Merge tag 'wireless-drivers-for-davem-2016-06-21' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers

Kalle Valo says:

====================
wireless-drivers fixes for 4.7

iwlwifi

* fix the scan timeout for long scans
* fix an RCU splat caused when updating the TKIP key
* fix a potential NULL-derefence introduced recently
* fix a IGTK key bug that has existed since the MVM driver was introduced
* fix some fw capabilities checks that got accidentally inverted

rtl8xxxu

* fix typo on variable name

ath10k

* fix deadlock when peer cannot be created
* fix crash related to printing features
* fix deadlock while processing rx_in_ord_ind

ath9k

* fix GPIO mask regression for AR9462 and AR9565
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/Documentation/ABI/stable/sysfs-class-ubi b/Documentation/ABI/stable/sysfs-class-ubi
index 18d471d..a6b3240 100644
--- a/Documentation/ABI/stable/sysfs-class-ubi
+++ b/Documentation/ABI/stable/sysfs-class-ubi
@@ -107,6 +107,15 @@
 Description:
 		Number of physical eraseblocks reserved for bad block handling.
 
+What:		/sys/class/ubi/ubiX/ro_mode
+Date:		April 2016
+KernelVersion:	4.7
+Contact:	linux-mtd@lists.infradead.org
+Description:
+		Contains ASCII "1\n" if the read-only flag is set on this
+		device, and "0\n" if it is cleared. UBI devices mark themselves
+		as read-only when they detect an unrecoverable error.
+
 What:		/sys/class/ubi/ubiX/total_eraseblocks
 Date:		July 2006
 KernelVersion:	2.6.22
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index de79efd..8c68768 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -128,16 +128,44 @@
 !Edrivers/base/platform.c
 !Edrivers/base/bus.c
      </sect1>
-     <sect1><title>Device Drivers DMA Management</title>
+     <sect1>
+       <title>Buffer Sharing and Synchronization</title>
+       <para>
+         The dma-buf subsystem provides the framework for sharing buffers
+         for hardware (DMA) access across multiple device drivers and
+         subsystems, and for synchronizing asynchronous hardware access.
+       </para>
+       <para>
+         This is used, for example, by drm "prime" multi-GPU support, but
+         is of course not limited to GPU use cases.
+       </para>
+       <para>
+         The three main components of this are: (1) dma-buf, representing
+         a sg_table and exposed to userspace as a file descriptor to allow
+         passing between devices, (2) fence, which provides a mechanism
+         to signal when one device as finished access, and (3) reservation,
+         which manages the shared or exclusive fence(s) associated with
+         the buffer.
+       </para>
+       <sect2><title>dma-buf</title>
 !Edrivers/dma-buf/dma-buf.c
-!Edrivers/dma-buf/fence.c
-!Edrivers/dma-buf/seqno-fence.c
-!Iinclude/linux/fence.h
-!Iinclude/linux/seqno-fence.h
+!Iinclude/linux/dma-buf.h
+       </sect2>
+       <sect2><title>reservation</title>
+!Pdrivers/dma-buf/reservation.c Reservation Object Overview
 !Edrivers/dma-buf/reservation.c
 !Iinclude/linux/reservation.h
+       </sect2>
+       <sect2><title>fence</title>
+!Edrivers/dma-buf/fence.c
+!Iinclude/linux/fence.h
+!Edrivers/dma-buf/seqno-fence.c
+!Iinclude/linux/seqno-fence.h
 !Edrivers/dma-buf/sync_file.c
 !Iinclude/linux/sync_file.h
+       </sect2>
+     </sect1>
+     <sect1><title>Device Drivers DMA Management</title>
 !Edrivers/base/dma-coherent.c
 !Edrivers/base/dma-mapping.c
      </sect1>
diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl
index 1692c4d..7586bf75 100644
--- a/Documentation/DocBook/gpu.tmpl
+++ b/Documentation/DocBook/gpu.tmpl
@@ -1617,12 +1617,23 @@
 !Iinclude/drm/drm_fb_helper.h
     </sect2>
     <sect2>
+      <title>Framebuffer CMA Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_fb_cma_helper.c framebuffer cma helper functions
+!Edrivers/gpu/drm/drm_fb_cma_helper.c
+    </sect2>
+    <sect2>
       <title>Display Port Helper Functions Reference</title>
 !Pdrivers/gpu/drm/drm_dp_helper.c dp helpers
 !Iinclude/drm/drm_dp_helper.h
 !Edrivers/gpu/drm/drm_dp_helper.c
     </sect2>
     <sect2>
+      <title>Display Port Dual Mode Adaptor Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_dp_dual_mode_helper.c dp dual mode helpers
+!Iinclude/drm/drm_dp_dual_mode_helper.h
+!Edrivers/gpu/drm/drm_dp_dual_mode_helper.c
+    </sect2>
+    <sect2>
       <title>Display Port MST Helper Functions Reference</title>
 !Pdrivers/gpu/drm/drm_dp_mst_topology.c dp mst helper
 !Iinclude/drm/drm_dp_mst_helper.h
@@ -1671,17 +1682,23 @@
 !Pdrivers/gpu/drm/drm_crtc.c Tile group
     </sect2>
     <sect2>
-	<title>Bridges</title>
+      <title>Bridges</title>
       <sect3>
-	 <title>Overview</title>
+        <title>Overview</title>
 !Pdrivers/gpu/drm/drm_bridge.c overview
       </sect3>
       <sect3>
-	 <title>Default bridge callback sequence</title>
+        <title>Default bridge callback sequence</title>
 !Pdrivers/gpu/drm/drm_bridge.c bridge callbacks
       </sect3>
 !Edrivers/gpu/drm/drm_bridge.c
     </sect2>
+    <sect2>
+      <title>Panel Helper Reference</title>
+!Iinclude/drm/drm_panel.h
+!Edrivers/gpu/drm/drm_panel.c
+!Pdrivers/gpu/drm/drm_panel.c drm panel
+    </sect2>
   </sect1>
 
   <!-- Internals: kms properties -->
@@ -1817,7 +1834,7 @@
 	</tr>
 	<tr>
 	<td rowspan="42" valign="top" >DRM</td>
-	<td valign="top" >Generic</td>
+	<td rowspan="2" valign="top" >Generic</td>
 	<td valign="top" >“rotation”</td>
 	<td valign="top" >BITMASK</td>
 	<td valign="top" >{ 0, "rotate-0" },
@@ -1832,6 +1849,13 @@
 	image along the specified axis prior to rotation</td>
 	</tr>
 	<tr>
+	<td valign="top" >“scaling mode”</td>
+	<td valign="top" >ENUM</td>
+	<td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
+	<td valign="top" >Connector</td>
+	<td valign="top" >Supported by: amdgpu, gma500, i915, nouveau and radeon.</td>
+	</tr>
+	<tr>
 	<td rowspan="5" valign="top" >Connector</td>
 	<td valign="top" >“EDID”</td>
 	<td valign="top" >BLOB | IMMUTABLE</td>
@@ -2068,21 +2092,12 @@
 	<td valign="top" >property to suggest an Y offset for a connector</td>
 	</tr>
 	<tr>
-	<td rowspan="8" valign="top" >Optional</td>
-	<td valign="top" >“scaling mode”</td>
-	<td valign="top" >ENUM</td>
-	<td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
-	<td valign="top" >Connector</td>
-	<td valign="top" >TBD</td>
-	</tr>
-	<tr>
+	<td rowspan="7" valign="top" >Optional</td>
 	<td valign="top" >"aspect ratio"</td>
 	<td valign="top" >ENUM</td>
 	<td valign="top" >{ "None", "4:3", "16:9" }</td>
 	<td valign="top" >Connector</td>
-	<td valign="top" >DRM property to set aspect ratio from user space app.
-		This enum is made generic to allow addition of custom aspect
-		ratios.</td>
+	<td valign="top" >TDB</td>
 	</tr>
 	<tr>
 	<td valign="top" >“dirty”</td>
@@ -2153,7 +2168,11 @@
 	<td valign="top" >ENUM</td>
 	<td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td>
 	<td valign="top" >Connector</td>
-	<td valign="top" >TBD</td>
+	<td valign="top" >When this property is set to Limited 16:235
+		and CTM is set, the hardware will be programmed with the
+		result of the multiplication of CTM by the limited range
+		matrix to ensure the pixels normaly in the range 0..1.0 are
+		remapped to the range 16/255..235/255.</td>
 	</tr>
 	<tr>
 	<td valign="top" >“audio”</td>
@@ -3334,7 +3353,7 @@
 	<title>Video BIOS Table (VBT)</title>
 !Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT)
 !Idrivers/gpu/drm/i915/intel_bios.c
-!Idrivers/gpu/drm/i915/intel_bios.h
+!Idrivers/gpu/drm/i915/intel_vbt_defs.h
       </sect2>
     </sect1>
 
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index c6938e5..4da60b4 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -56,6 +56,7 @@
 | ARM            | MMU-500         | #841119,#826419 | N/A                     |
 |                |                 |                 |                         |
 | Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375    |
+| Cavium         | ThunderX ITS    | #23144          | CAVIUM_ERRATUM_23144    |
 | Cavium         | ThunderX GICv3  | #23154          | CAVIUM_ERRATUM_23154    |
 | Cavium         | ThunderX Core   | #27456          | CAVIUM_ERRATUM_27456    |
 | Cavium         | ThunderX SMMUv2 | #27704          | N/A		       |
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
index 9f97df4..a5ea451 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
+++ b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
@@ -35,12 +35,22 @@
 		  as an interrupt/status bit in the HDMI controller
 		  itself).  See bindings/pinctrl/brcm,bcm2835-gpio.txt
 
+Required properties for DPI:
+- compatible:	Should be "brcm,bcm2835-dpi"
+- reg:		Physical base address and length of the registers
+- clocks:	a) core: The core clock the unit runs on
+		b) pixel: The pixel clock that feeds the pixelvalve
+- port:		Port node with a single endpoint connecting to the panel
+		  device, as defined in [1]
+
 Required properties for V3D:
 - compatible:	Should be "brcm,bcm2835-v3d"
 - reg:		Physical base address and length of the V3D's registers
 - interrupts:	The interrupt number
 		  See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
 
+[1] Documentation/devicetree/bindings/media/video-interfaces.txt
+
 Example:
 pixelvalve@7e807000 {
 	compatible = "brcm,bcm2835-pixelvalve2";
@@ -66,6 +76,22 @@
 	clock-names = "pixel", "hdmi";
 };
 
+dpi: dpi@7e208000 {
+	compatible = "brcm,bcm2835-dpi";
+	reg = <0x7e208000 0x8c>;
+	clocks = <&clocks BCM2835_CLOCK_VPU>,
+	         <&clocks BCM2835_CLOCK_DPI>;
+	clock-names = "core", "pixel";
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	port {
+		dpi_out: endpoint@0 {
+			remote-endpoint = <&panel_in>;
+		};
+	};
+};
+
 v3d: v3d@7ec00000 {
 	compatible = "brcm,bcm2835-v3d";
 	reg = <0x7ec00000 0x1000>;
@@ -75,3 +101,13 @@
 vc4: gpu {
 	compatible = "brcm,bcm2835-vc4";
 };
+
+panel: panel {
+	compatible = "ontat,yx700wv03", "simple-panel";
+
+	port {
+		panel_in: endpoint {
+			remote-endpoint = <&dpi_out>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt b/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
new file mode 100644
index 0000000..4f2ba8c
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
@@ -0,0 +1,52 @@
+Analogix Display Port bridge bindings
+
+Required properties for dp-controller:
+	-compatible:
+		platform specific such as:
+		 * "samsung,exynos5-dp"
+		 * "rockchip,rk3288-dp"
+	-reg:
+		physical base address of the controller and length
+		of memory mapped region.
+	-interrupts:
+		interrupt combiner values.
+	-clocks:
+		from common clock binding: handle to dp clock.
+	-clock-names:
+		from common clock binding: Shall be "dp".
+	-interrupt-parent:
+		phandle to Interrupt combiner node.
+	-phys:
+		from general PHY binding: the phandle for the PHY device.
+	-phy-names:
+		from general PHY binding: Should be "dp".
+
+Optional properties for dp-controller:
+	-force-hpd:
+		Indicate driver need force hpd when hpd detect failed, this
+		is used for some eDP screen which don't have hpd signal.
+	-hpd-gpios:
+		Hotplug detect GPIO.
+		Indicates which GPIO should be used for hotplug detection
+	-port@[X]: SoC specific port nodes with endpoint definitions as defined
+		in Documentation/devicetree/bindings/media/video-interfaces.txt,
+		please refer to the SoC specific binding document:
+		* Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
+		* Documentation/devicetree/bindings/video/analogix_dp-rockchip.txt
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+-------------------------------------------------------------------------------
+
+Example:
+
+	dp-controller {
+		compatible = "samsung,exynos5-dp";
+		reg = <0x145b0000 0x10000>;
+		interrupts = <10 3>;
+		interrupt-parent = <&combiner>;
+		clocks = <&clock 342>;
+		clock-names = "dp";
+
+		phys = <&dp_phy>;
+		phy-names = "dp";
+	};
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos5433-decon.txt b/Documentation/devicetree/bindings/display/exynos/exynos5433-decon.txt
index 377afbf..c9fd7b3 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos5433-decon.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos5433-decon.txt
@@ -5,7 +5,8 @@
 buffer to an external LCD interface.
 
 Required properties:
-- compatible: value should be "samsung,exynos5433-decon";
+- compatible: value should be one of:
+	"samsung,exynos5433-decon", "samsung,exynos5433-decon-tv";
 - reg: physical base address and length of the DECON registers set.
 - interrupts: should contain a list of all DECON IP block interrupts in the
 	      order: VSYNC, LCD_SYSTEM. The interrupt specifier format
@@ -16,7 +17,7 @@
 - clocks: must include clock specifiers corresponding to entries in the
 	  clock-names property.
 - clock-names: list of clock names sorted in the same order as the clocks
-	       property. Must contain "aclk_decon", "aclk_smmu_decon0x",
+	       property. Must contain "pclk", "aclk_decon", "aclk_smmu_decon0x",
 	       "aclk_xiu_decon0x", "pclk_smmu_decon0x", clk_decon_vclk",
 	       "sclk_decon_eclk"
 - ports: contains a port which is connected to mic node. address-cells and
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
index fe4a7a2..ade5d8e 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
@@ -1,20 +1,3 @@
-Device-Tree bindings for Samsung Exynos Embedded DisplayPort Transmitter(eDP)
-
-DisplayPort is industry standard to accommodate the growing board adoption
-of digital display technology within the PC and CE industries.
-It consolidates the internal and external connection methods to reduce device
-complexity and cost. It also supports necessary features for important cross
-industry applications and provides performance scalability to enable the next
-generation of displays that feature higher color depths, refresh rates, and
-display resolutions.
-
-eDP (embedded display port) device is compliant with Embedded DisplayPort
-standard as follows,
-- DisplayPort standard 1.1a for Exynos5250 and Exynos5260.
-- DisplayPort standard 1.3 for Exynos5422s and Exynos5800.
-
-eDP resides between FIMD and panel or FIMD and bridge such as LVDS.
-
 The Exynos display port interface should be configured based on
 the type of panel connected to it.
 
@@ -48,26 +31,6 @@
 		from general PHY binding: the phandle for the PHY device.
 	-phy-names:
 		from general PHY binding: Should be "dp".
-	-samsung,color-space:
-		input video data format.
-			COLOR_RGB = 0, COLOR_YCBCR422 = 1, COLOR_YCBCR444 = 2
-	-samsung,dynamic-range:
-		dynamic range for input video data.
-			VESA = 0, CEA = 1
-	-samsung,ycbcr-coeff:
-		YCbCr co-efficients for input video.
-			COLOR_YCBCR601 = 0, COLOR_YCBCR709 = 1
-	-samsung,color-depth:
-		number of bits per colour component.
-			COLOR_6 = 0, COLOR_8 = 1, COLOR_10 = 2, COLOR_12 = 3
-	-samsung,link-rate:
-		link rate supported by the panel.
-			LINK_RATE_1_62GBPS = 0x6, LINK_RATE_2_70GBPS = 0x0A
-	-samsung,lane-count:
-		number of lanes supported by the panel.
-			LANE_COUNT1 = 1, LANE_COUNT2 = 2, LANE_COUNT4 = 4
-	- display-timings: timings for the connected panel as described by
-		Documentation/devicetree/bindings/display/display-timing.txt
 
 Optional properties for dp-controller:
 	-interlaced:
@@ -83,17 +46,31 @@
 		Hotplug detect GPIO.
 			Indicates which GPIO should be used for hotplug
 			detection
-Video interfaces:
-  Device node can contain video interface port nodes according to [1].
-  The following are properties specific to those nodes:
+	-video interfaces: Device node can contain video interface port
+			nodes according to [1].
+	- display-timings: timings for the connected panel as described by
+		Documentation/devicetree/bindings/display/panel/display-timing.txt
 
-  endpoint node connected to bridge or panel node:
-   - remote-endpoint: specifies the endpoint in panel or bridge node.
-		      This node is required in all kinds of exynos dp
-		      to represent the connection between dp and bridge
-		      or dp and panel.
+For the below properties, please refer to Analogix DP binding document:
+ * Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
+	-phys (required)
+	-phy-names (required)
+	-hpd-gpios (optional)
+	 force-hpd (optional)
 
-[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+Deprecated properties for DisplayPort:
+-interlaced:            deprecated prop that can parsed from drm_display_mode.
+-vsync-active-high:     deprecated prop that can parsed from drm_display_mode.
+-hsync-active-high:     deprecated prop that can parsed from drm_display_mode.
+-samsung,ycbcr-coeff:   deprecated prop that can parsed from drm_display_mode.
+-samsung,dynamic-range: deprecated prop that can parsed from drm_display_mode.
+-samsung,color-space:   deprecated prop that can parsed from drm_display_info.
+-samsung,color-depth:   deprecated prop that can parsed from drm_display_info.
+-samsung,link-rate:     deprecated prop that can reading from monitor by dpcd method.
+-samsung,lane-count:    deprecated prop that can reading from monitor by dpcd method.
+-samsung,hpd-gpio:      deprecated name for hpd-gpios.
+
+-------------------------------------------------------------------------------
 
 Example:
 
@@ -112,13 +89,6 @@
 
 Board Specific portion:
 	dp-controller {
-		samsung,color-space = <0>;
-		samsung,dynamic-range = <0>;
-		samsung,ycbcr-coeff = <0>;
-		samsung,color-depth = <1>;
-		samsung,link-rate = <0x0a>;
-		samsung,lane-count = <4>;
-
 		display-timings {
 			native-mode = <&lcd_timing>;
 			lcd_timing: 1366x768 {
@@ -135,18 +105,9 @@
 		};
 
 		ports {
-			port {
+			port@0 {
 				dp_out: endpoint {
-					remote-endpoint = <&dp_in>;
-				};
-			};
-		};
-
-		panel {
-			...
-			port {
-				dp_in: endpoint {
-					remote-endpoint = <&dp_out>;
+					remote-endpoint = <&bridge_in>;
 				};
 			};
 		};
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt b/Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt
index d474f59..a2ec4c1 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt
@@ -5,6 +5,7 @@
 	1) "samsung,exynos4210-hdmi"
 	2) "samsung,exynos4212-hdmi"
 	3) "samsung,exynos5420-hdmi"
+	4) "samsung,exynos5433-hdmi"
 - reg: physical base address of the hdmi and length of memory mapped
 	region.
 - interrupts: interrupt number to the cpu.
@@ -12,6 +13,11 @@
 	a) phandle of the gpio controller node.
 	b) pin number within the gpio controller.
 	c) optional flags and pull up/down.
+- ddc: phandle to the hdmi ddc node
+- phy: phandle to the hdmi phy node
+- samsung,syscon-phandle: phandle for system controller node for PMU.
+
+Required properties for Exynos 4210, 4212, 5420 and 5433:
 - clocks: list of clock IDs from SoC clock driver.
 	a) hdmi: Gate of HDMI IP bus clock.
 	b) sclk_hdmi: Gate of HDMI special clock.
@@ -25,9 +31,24 @@
 		sclk_pixel.
 - clock-names: aliases as per driver requirements for above clock IDs:
 	"hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy" and "mout_hdmi".
-- ddc: phandle to the hdmi ddc node
-- phy: phandle to the hdmi phy node
-- samsung,syscon-phandle: phandle for system controller node for PMU.
+
+Required properties for Exynos 5433:
+- clocks: list of clock specifiers according to common clock bindings.
+	a) hdmi_pclk: Gate of HDMI IP APB bus.
+	b) hdmi_i_pclk: Gate of HDMI-PHY IP APB bus.
+	d) i_tmds_clk: Gate of HDMI TMDS clock.
+	e) i_pixel_clk: Gate of HDMI pixel clock.
+	f) i_spdif_clk: Gate of HDMI SPDIF clock.
+	g) oscclk: Oscillator clock, used as parent of following *_user clocks
+		in case HDMI-PHY is not operational.
+	h) tmds_clko: TMDS clock generated by HDMI-PHY.
+	i) tmds_clko_user: MUX used to switch between oscclk and tmds_clko,
+		respectively if HDMI-PHY is off and operational.
+	j) pixel_clko: Pixel clock generated by HDMI-PHY.
+	k) pixel_clko_user: MUX used to switch between oscclk and pixel_clko,
+		respectively if HDMI-PHY is off and operational.
+- clock-names: aliases for above clock specfiers.
+- samsung,sysreg: handle to syscon used to control the system registers.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/display/fsl,dcu.txt b/Documentation/devicetree/bindings/display/fsl,dcu.txt
index ebf1be9..ae55cde 100644
--- a/Documentation/devicetree/bindings/display/fsl,dcu.txt
+++ b/Documentation/devicetree/bindings/display/fsl,dcu.txt
@@ -6,17 +6,24 @@
 	* "fsl,vf610-dcu".
 
 - reg:			Address and length of the register set for dcu.
-- clocks:		From common clock binding: handle to dcu clock.
-- clock-names:		From common clock binding: Shall be "dcu".
+- clocks:		Handle to "dcu" and "pix" clock (in the order below)
+			This can be the same clock (e.g. LS1021a)
+			See ../clocks/clock-bindings.txt for details.
+- clock-names:		Should be "dcu" and "pix"
+			See ../clocks/clock-bindings.txt for details.
 - big-endian		Boolean property, LS1021A DCU registers are big-endian.
 - fsl,panel:		The phandle to panel node.
 
+Optional properties:
+- fsl,tcon:		The phandle to the timing controller node.
+
 Examples:
 dcu: dcu@2ce0000 {
 	compatible = "fsl,ls1021a-dcu";
 	reg = <0x0 0x2ce0000 0x0 0x10000>;
-	clocks = <&platform_clk 0>;
-	clock-names = "dcu";
+	clocks = <&platform_clk 0>, <&platform_clk 0>;
+	clock-names = "dcu", "pix";
 	big-endian;
 	fsl,panel = <&panel>;
+	fsl,tcon = <&tcon>;
 };
diff --git a/Documentation/devicetree/bindings/display/fsl,tcon.txt b/Documentation/devicetree/bindings/display/fsl,tcon.txt
new file mode 100644
index 0000000..6fa4ab6
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/fsl,tcon.txt
@@ -0,0 +1,18 @@
+Device Tree bindings for Freescale TCON Driver
+
+Required properties:
+- compatible:		Should be one of
+	* "fsl,vf610-tcon".
+
+- reg:			Address and length of the register set for tcon.
+- clocks:		From common clock binding: handle to tcon ipg clock.
+- clock-names:		From common clock binding: Shall be "ipg".
+
+Examples:
+timing-controller@4003d000 {
+	compatible = "fsl,vf610-tcon";
+	reg = <0x4003d000 0x1000>;
+	clocks = <&clks VF610_CLK_TCON0>;
+	clock-names = "ipg";
+	status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/display/hisilicon/dw-dsi.txt b/Documentation/devicetree/bindings/display/hisilicon/dw-dsi.txt
new file mode 100644
index 0000000..d270bfe
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/hisilicon/dw-dsi.txt
@@ -0,0 +1,72 @@
+Device-Tree bindings for DesignWare DSI Host Controller v1.20a driver
+
+A DSI Host Controller resides in the middle of display controller and external
+HDMI converter or panel.
+
+Required properties:
+- compatible: value should be "hisilicon,hi6220-dsi".
+- reg: physical base address and length of dsi controller's registers.
+- clocks: contains APB clock phandle + clock-specifier pair.
+- clock-names: should be "pclk".
+- ports: contains DSI controller input and output sub port.
+  The input port connects to ADE output port with the reg value "0".
+  The output port with the reg value "1", it could connect to panel or
+  any other bridge endpoints.
+  See Documentation/devicetree/bindings/graph.txt for more device graph info.
+
+A example of HiKey board hi6220 SoC and board specific DT entry:
+Example:
+
+SoC specific:
+	dsi: dsi@f4107800 {
+		compatible = "hisilicon,hi6220-dsi";
+		reg = <0x0 0xf4107800 0x0 0x100>;
+		clocks = <&media_ctrl  HI6220_DSI_PCLK>;
+		clock-names = "pclk";
+		status = "disabled";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			/* 0 for input port */
+			port@0 {
+				reg = <0>;
+				dsi_in: endpoint {
+					remote-endpoint = <&ade_out>;
+				};
+			};
+		};
+	};
+
+
+Board specific:
+	&dsi {
+		status = "ok";
+
+		ports {
+			/* 1 for output port */
+			port@1 {
+				reg = <1>;
+
+				dsi_out0: endpoint@0 {
+					remote-endpoint = <&adv7533_in>;
+				};
+			};
+		};
+	};
+
+	&i2c2 {
+		...
+
+		adv7533: adv7533@39 {
+			...
+
+			port {
+				adv7533_in: endpoint {
+					remote-endpoint = <&dsi_out0>;
+				};
+			};
+		};
+	};
+
diff --git a/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt b/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt
new file mode 100644
index 0000000..38dc9d6
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt
@@ -0,0 +1,64 @@
+Device-Tree bindings for hisilicon ADE display controller driver
+
+ADE (Advanced Display Engine) is the display controller which grab image
+data from memory, do composition, do post image processing, generate RGB
+timing stream and transfer to DSI.
+
+Required properties:
+- compatible: value should be "hisilicon,hi6220-ade".
+- reg: physical base address and length of the ADE controller's registers.
+- hisilicon,noc-syscon: ADE NOC QoS syscon.
+- resets: The ADE reset controller node.
+- interrupt: the ldi vblank interrupt number used.
+- clocks: a list of phandle + clock-specifier pairs, one for each entry
+  in clock-names.
+- clock-names: should contain:
+  "clk_ade_core" for the ADE core clock.
+  "clk_codec_jpeg" for the media NOC QoS clock, which use the same clock with
+  jpeg codec.
+  "clk_ade_pix" for the ADE pixel clok.
+- assigned-clocks: Should contain "clk_ade_core" and "clk_codec_jpeg" clocks'
+  phandle + clock-specifier pairs.
+- assigned-clock-rates: clock rates, one for each entry in assigned-clocks.
+  The rate of "clk_ade_core" could be "360000000" or "180000000";
+  The rate of "clk_codec_jpeg" could be or less than "1440000000".
+  These rate values could be configured according to performance and power
+  consumption.
+- port: the output port. This contains one endpoint subnode, with its
+  remote-endpoint set to the phandle of the connected DSI input endpoint.
+  See Documentation/devicetree/bindings/graph.txt for more device graph info.
+
+Optional properties:
+- dma-coherent: Present if dma operations are coherent.
+
+
+A example of HiKey board hi6220 SoC specific DT entry:
+Example:
+
+	ade: ade@f4100000 {
+		compatible = "hisilicon,hi6220-ade";
+		reg = <0x0 0xf4100000 0x0 0x7800>;
+		reg-names = "ade_base";
+		hisilicon,noc-syscon = <&medianoc_ade>;
+		resets = <&media_ctrl MEDIA_ADE>;
+		interrupts = <0 115 4>; /* ldi interrupt */
+
+		clocks = <&media_ctrl HI6220_ADE_CORE>,
+			 <&media_ctrl HI6220_CODEC_JPEG>,
+			 <&media_ctrl HI6220_ADE_PIX_SRC>;
+		/*clock name*/
+		clock-names  = "clk_ade_core",
+			       "clk_codec_jpeg",
+			       "clk_ade_pix";
+
+		assigned-clocks = <&media_ctrl HI6220_ADE_CORE>,
+			<&media_ctrl HI6220_CODEC_JPEG>;
+		assigned-clock-rates = <360000000>, <288000000>;
+		dma-coherent;
+
+		port {
+			ade_out: endpoint {
+				remote-endpoint = <&dsi_in>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/display/imx/ldb.txt b/Documentation/devicetree/bindings/display/imx/ldb.txt
index 0a175d9..a407462 100644
--- a/Documentation/devicetree/bindings/display/imx/ldb.txt
+++ b/Documentation/devicetree/bindings/display/imx/ldb.txt
@@ -62,6 +62,7 @@
    display-timings are used instead.
 
 Optional properties (required if display-timings are used):
+ - ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
  - display-timings : A node that describes the display timings as defined in
    Documentation/devicetree/bindings/display/display-timing.txt.
  - fsl,data-mapping : should be "spwg" or "jeida"
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
new file mode 100644
index 0000000..db6e77e
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
@@ -0,0 +1,203 @@
+Mediatek display subsystem
+==========================
+
+The Mediatek display subsystem consists of various DISP function blocks in the
+MMSYS register space. The connections between them can be configured by output
+and input selectors in the MMSYS_CONFIG register space. Pixel clock and start
+of frame signal are distributed to the other function blocks by a DISP_MUTEX
+function block.
+
+All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node.
+For a description of the MMSYS_CONFIG binding, see
+Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt.
+
+DISP function blocks
+====================
+
+A display stream starts at a source function block that reads pixel data from
+memory and ends with a sink function block that drives pixels on a display
+interface, or writes pixels back to memory. All DISP function blocks have
+their own register space, interrupt, and clock gate. The blocks that can
+access memory additionally have to list the IOMMU and local arbiter they are
+connected to.
+
+For a description of the display interface sink function blocks, see
+Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt and
+Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt.
+
+Required properties (all function blocks):
+- compatible: "mediatek,<chip>-disp-<function>", one of
+	"mediatek,<chip>-disp-ovl"   - overlay (4 layers, blending, csc)
+	"mediatek,<chip>-disp-rdma"  - read DMA / line buffer
+	"mediatek,<chip>-disp-wdma"  - write DMA
+	"mediatek,<chip>-disp-color" - color processor
+	"mediatek,<chip>-disp-aal"   - adaptive ambient light controller
+	"mediatek,<chip>-disp-gamma" - gamma correction
+	"mediatek,<chip>-disp-merge" - merge streams from two RDMA sources
+	"mediatek,<chip>-disp-split" - split stream to two encoders
+	"mediatek,<chip>-disp-ufoe"  - data compression engine
+	"mediatek,<chip>-dsi"        - DSI controller, see mediatek,dsi.txt
+	"mediatek,<chip>-dpi"        - DPI controller, see mediatek,dpi.txt
+	"mediatek,<chip>-disp-mutex" - display mutex
+	"mediatek,<chip>-disp-od"    - overdrive
+- reg: Physical base address and length of the function block register space
+- interrupts: The interrupt signal from the function block (required, except for
+  merge and split function blocks).
+- clocks: device clocks
+  See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
+  For most function blocks this is just a single clock input. Only the DSI and
+  DPI controller nodes have multiple clock inputs. These are documented in
+  mediatek,dsi.txt and mediatek,dpi.txt, respectively.
+
+Required properties (DMA function blocks):
+- compatible: Should be one of
+	"mediatek,<chip>-disp-ovl"
+	"mediatek,<chip>-disp-rdma"
+	"mediatek,<chip>-disp-wdma"
+- larb: Should contain a phandle pointing to the local arbiter device as defined
+  in Documentation/devicetree/bindings/soc/mediatek/mediatek,smi-larb.txt
+- iommus: Should point to the respective IOMMU block with master port as
+  argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
+  for details.
+
+Examples:
+
+mmsys: clock-controller@14000000 {
+	compatible = "mediatek,mt8173-mmsys", "syscon";
+	reg = <0 0x14000000 0 0x1000>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	#clock-cells = <1>;
+};
+
+ovl0: ovl@1400c000 {
+	compatible = "mediatek,mt8173-disp-ovl";
+	reg = <0 0x1400c000 0 0x1000>;
+	interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_LOW>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	clocks = <&mmsys CLK_MM_DISP_OVL0>;
+	iommus = <&iommu M4U_PORT_DISP_OVL0>;
+	mediatek,larb = <&larb0>;
+};
+
+ovl1: ovl@1400d000 {
+	compatible = "mediatek,mt8173-disp-ovl";
+	reg = <0 0x1400d000 0 0x1000>;
+	interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_LOW>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	clocks = <&mmsys CLK_MM_DISP_OVL1>;
+	iommus = <&iommu M4U_PORT_DISP_OVL1>;
+	mediatek,larb = <&larb4>;
+};
+
+rdma0: rdma@1400e000 {
+	compatible = "mediatek,mt8173-disp-rdma";
+	reg = <0 0x1400e000 0 0x1000>;
+	interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_LOW>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	clocks = <&mmsys CLK_MM_DISP_RDMA0>;
+	iommus = <&iommu M4U_PORT_DISP_RDMA0>;
+	mediatek,larb = <&larb0>;
+};
+
+rdma1: rdma@1400f000 {
+	compatible = "mediatek,mt8173-disp-rdma";
+	reg = <0 0x1400f000 0 0x1000>;
+	interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_LOW>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	clocks = <&mmsys CLK_MM_DISP_RDMA1>;
+	iommus = <&iommu M4U_PORT_DISP_RDMA1>;
+	mediatek,larb = <&larb4>;
+};
+
+rdma2: rdma@14010000 {
+	compatible = "mediatek,mt8173-disp-rdma";
+	reg = <0 0x14010000 0 0x1000>;
+	interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_LOW>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	clocks = <&mmsys CLK_MM_DISP_RDMA2>;
+	iommus = <&iommu M4U_PORT_DISP_RDMA2>;
+	mediatek,larb = <&larb4>;
+};
+
+wdma0: wdma@14011000 {
+	compatible = "mediatek,mt8173-disp-wdma";
+	reg = <0 0x14011000 0 0x1000>;
+	interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_LOW>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	clocks = <&mmsys CLK_MM_DISP_WDMA0>;
+	iommus = <&iommu M4U_PORT_DISP_WDMA0>;
+	mediatek,larb = <&larb0>;
+};
+
+wdma1: wdma@14012000 {
+	compatible = "mediatek,mt8173-disp-wdma";
+	reg = <0 0x14012000 0 0x1000>;
+	interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_LOW>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	clocks = <&mmsys CLK_MM_DISP_WDMA1>;
+	iommus = <&iommu M4U_PORT_DISP_WDMA1>;
+	mediatek,larb = <&larb4>;
+};
+
+color0: color@14013000 {
+	compatible = "mediatek,mt8173-disp-color";
+	reg = <0 0x14013000 0 0x1000>;
+	interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_LOW>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	clocks = <&mmsys CLK_MM_DISP_COLOR0>;
+};
+
+color1: color@14014000 {
+	compatible = "mediatek,mt8173-disp-color";
+	reg = <0 0x14014000 0 0x1000>;
+	interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_LOW>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	clocks = <&mmsys CLK_MM_DISP_COLOR1>;
+};
+
+aal@14015000 {
+	compatible = "mediatek,mt8173-disp-aal";
+	reg = <0 0x14015000 0 0x1000>;
+	interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_LOW>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	clocks = <&mmsys CLK_MM_DISP_AAL>;
+};
+
+gamma@14016000 {
+	compatible = "mediatek,mt8173-disp-gamma";
+	reg = <0 0x14016000 0 0x1000>;
+	interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_LOW>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	clocks = <&mmsys CLK_MM_DISP_GAMMA>;
+};
+
+ufoe@1401a000 {
+	compatible = "mediatek,mt8173-disp-ufoe";
+	reg = <0 0x1401a000 0 0x1000>;
+	interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_LOW>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	clocks = <&mmsys CLK_MM_DISP_UFOE>;
+};
+
+dsi0: dsi@1401b000 {
+	/* See mediatek,dsi.txt for details */
+};
+
+dpi0: dpi@1401d000 {
+	/* See mediatek,dpi.txt for details */
+};
+
+mutex: mutex@14020000 {
+	compatible = "mediatek,mt8173-disp-mutex";
+	reg = <0 0x14020000 0 0x1000>;
+	interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_LOW>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	clocks = <&mmsys CLK_MM_MUTEX_32K>;
+};
+
+od@14023000 {
+	compatible = "mediatek,mt8173-disp-od";
+	reg = <0 0x14023000 0 0x1000>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	clocks = <&mmsys CLK_MM_DISP_OD>;
+};
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
new file mode 100644
index 0000000..b6a7e73
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
@@ -0,0 +1,35 @@
+Mediatek DPI Device
+===================
+
+The Mediatek DPI function block is a sink of the display subsystem and
+provides 8-bit RGB/YUV444 or 8/10/10-bit YUV422 pixel data on a parallel
+output bus.
+
+Required properties:
+- compatible: "mediatek,<chip>-dpi"
+- reg: Physical base address and length of the controller's registers
+- interrupts: The interrupt signal from the function block.
+- clocks: device clocks
+  See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
+- clock-names: must contain "pixel", "engine", and "pll"
+- port: Output port node with endpoint definitions as described in
+  Documentation/devicetree/bindings/graph.txt. This port should be connected
+  to the input port of an attached HDMI or LVDS encoder chip.
+
+Example:
+
+dpi0: dpi@1401d000 {
+	compatible = "mediatek,mt8173-dpi";
+	reg = <0 0x1401d000 0 0x1000>;
+	interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>;
+	clocks = <&mmsys CLK_MM_DPI_PIXEL>,
+		 <&mmsys CLK_MM_DPI_ENGINE>,
+		 <&apmixedsys CLK_APMIXED_TVDPLL>;
+	clock-names = "pixel", "engine", "pll";
+
+	port {
+		dpi0_out: endpoint {
+			remote-endpoint = <&hdmi0_in>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
new file mode 100644
index 0000000..2b1585a
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
@@ -0,0 +1,60 @@
+Mediatek DSI Device
+===================
+
+The Mediatek DSI function block is a sink of the display subsystem and can
+drive up to 4-lane MIPI DSI output. Two DSIs can be synchronized for dual-
+channel output.
+
+Required properties:
+- compatible: "mediatek,<chip>-dsi"
+- reg: Physical base address and length of the controller's registers
+- interrupts: The interrupt signal from the function block.
+- clocks: device clocks
+  See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
+- clock-names: must contain "engine", "digital", and "hs"
+- phys: phandle link to the MIPI D-PHY controller.
+- phy-names: must contain "dphy"
+- port: Output port node with endpoint definitions as described in
+  Documentation/devicetree/bindings/graph.txt. This port should be connected
+  to the input port of an attached DSI panel or DSI-to-eDP encoder chip.
+
+MIPI TX Configuration Module
+============================
+
+The MIPI TX configuration module controls the MIPI D-PHY.
+
+Required properties:
+- compatible: "mediatek,<chip>-mipi-tx"
+- reg: Physical base address and length of the controller's registers
+- clocks: PLL reference clock
+- clock-output-names: name of the output clock line to the DSI encoder
+- #clock-cells: must be <0>;
+- #phy-cells: must be <0>.
+
+Example:
+
+mipi_tx0: mipi-dphy@10215000 {
+	compatible = "mediatek,mt8173-mipi-tx";
+	reg = <0 0x10215000 0 0x1000>;
+	clocks = <&clk26m>;
+	clock-output-names = "mipi_tx0_pll";
+	#clock-cells = <0>;
+	#phy-cells = <0>;
+};
+
+dsi0: dsi@1401b000 {
+	compatible = "mediatek,mt8173-dsi";
+	reg = <0 0x1401b000 0 0x1000>;
+	interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_LOW>;
+	clocks = <&mmsys MM_DSI0_ENGINE>, <&mmsys MM_DSI0_DIGITAL>,
+		 <&mipi_tx0>;
+	clock-names = "engine", "digital", "hs";
+	phys = <&mipi_tx0>;
+	phy-names = "dphy";
+
+	port {
+		dsi0_out: endpoint {
+			remote-endpoint = <&panel_in>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,at070tn92.txt b/Documentation/devicetree/bindings/display/panel/innolux,at070tn92.txt
new file mode 100644
index 0000000..3e10cd7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/innolux,at070tn92.txt
@@ -0,0 +1,7 @@
+Innolux AT070TN92 7.0" WQVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,at070tn92"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino-43-ts.txt b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino-43-ts.txt
new file mode 100644
index 0000000..74540a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino-43-ts.txt
@@ -0,0 +1,7 @@
+Olimex 4.3" TFT LCD panel
+
+Required properties:
+- compatible: should be "olimex,lcd-olinuxino-43-ts"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/ontat,yx700wv03.txt b/Documentation/devicetree/bindings/display/panel/ontat,yx700wv03.txt
new file mode 100644
index 0000000..3d8a5e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/ontat,yx700wv03.txt
@@ -0,0 +1,7 @@
+On Tat Industrial Company 7" DPI TFT panel.
+
+Required properties:
+- compatible: should be "ontat,yx700wv03"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/tpk,f07a-0102.txt b/Documentation/devicetree/bindings/display/panel/tpk,f07a-0102.txt
new file mode 100644
index 0000000..a2613b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/tpk,f07a-0102.txt
@@ -0,0 +1,8 @@
+TPK U.S.A. LLC Fusion 7" integrated projected capacitive touch display with,
+800 x 480 (WVGA) LCD panel.
+
+Required properties:
+- compatible: should be "tpk,f07a-0102"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/tpk,f10a-0102.txt b/Documentation/devicetree/bindings/display/panel/tpk,f10a-0102.txt
new file mode 100644
index 0000000..b9d0511
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/tpk,f10a-0102.txt
@@ -0,0 +1,8 @@
+TPK U.S.A. LLC Fusion 10.1" integrated projected capacitive touch display with,
+1024 x 600 (WSVGA) LCD panel.
+
+Required properties:
+- compatible: should be "tpk,f10a-0102"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt
new file mode 100644
index 0000000..e832ff9
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt
@@ -0,0 +1,92 @@
+Rockchip RK3288 specific extensions to the Analogix Display Port
+================================
+
+Required properties:
+- compatible: "rockchip,rk3288-edp";
+
+- reg: physical base address of the controller and length
+
+- clocks: from common clock binding: handle to dp clock.
+	  of memory mapped region.
+
+- clock-names: from common clock binding:
+	       Required elements: "dp" "pclk"
+
+- resets: Must contain an entry for each entry in reset-names.
+	  See ../reset/reset.txt for details.
+
+- pinctrl-names: Names corresponding to the chip hotplug pinctrl states.
+- pinctrl-0: pin-control mode. should be <&edp_hpd>
+
+- reset-names: Must include the name "dp"
+
+- rockchip,grf: this soc should set GRF regs, so need get grf here.
+
+- ports: there are 2 port nodes with endpoint definitions as defined in
+  Documentation/devicetree/bindings/media/video-interfaces.txt.
+    Port 0: contained 2 endpoints, connecting to the output of vop.
+    Port 1: contained 1 endpoint, connecting to the input of panel.
+
+For the below properties, please refer to Analogix DP binding document:
+ * Documentation/devicetree/bindings/drm/bridge/analogix_dp.txt
+- phys (required)
+- phy-names (required)
+- hpd-gpios (optional)
+- force-hpd (optional)
+-------------------------------------------------------------------------------
+
+Example:
+	dp-controller: dp@ff970000 {
+		compatible = "rockchip,rk3288-dp";
+		reg = <0xff970000 0x4000>;
+		interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru SCLK_EDP>, <&cru PCLK_EDP_CTRL>;
+		clock-names = "dp", "pclk";
+		phys = <&dp_phy>;
+		phy-names = "dp";
+
+		rockchip,grf = <&grf>;
+		resets = <&cru 111>;
+		reset-names = "dp";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&edp_hpd>;
+
+		status = "disabled";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			edp_in: port@0 {
+				reg = <0>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				edp_in_vopb: endpoint@0 {
+					reg = <0>;
+					remote-endpoint = <&vopb_out_edp>;
+				};
+				edp_in_vopl: endpoint@1 {
+					reg = <1>;
+					remote-endpoint = <&vopl_out_edp>;
+				};
+			};
+
+			edp_out: port@1 {
+				reg = <1>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				edp_out_panel: endpoint {
+					reg = <0>;
+					remote-endpoint = <&panel_in_edp>
+				};
+			};
+		};
+	};
+
+	pinctrl {
+		edp {
+			edp_hpd: edp-hpd {
+				rockchip,pins = <7 11 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/display/snps,arcpgu.txt b/Documentation/devicetree/bindings/display/snps,arcpgu.txt
new file mode 100644
index 0000000..c5c7dfd
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/snps,arcpgu.txt
@@ -0,0 +1,35 @@
+ARC PGU
+
+This is a display controller found on several development boards produced
+by Synopsys. The ARC PGU is an RGB streamer that reads the data from a
+framebuffer and sends it to a single digital encoder (usually HDMI).
+
+Required properties:
+  - compatible: "snps,arcpgu"
+  - reg: Physical base address and length of the controller's registers.
+  - clocks: A list of phandle + clock-specifier pairs, one for each
+    entry in 'clock-names'.
+  - clock-names: A list of clock names. For ARC PGU it should contain:
+      - "pxlclk" for the clock feeding the output PLL of the controller.
+
+Required sub-nodes:
+  - port: The PGU connection to an encoder chip.
+
+Example:
+
+/ {
+	...
+
+	pgu@XXXXXXXX {
+		compatible = "snps,arcpgu";
+		reg = <0xXXXXXXXX 0x400>;
+		clocks = <&clock_node>;
+		clock-names = "pxlclk";
+
+		port {
+			pgu_output: endpoint {
+				remote-endpoint = <&hdmi_enc_input>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
new file mode 100644
index 0000000..df8f4ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -0,0 +1,258 @@
+Allwinner A10 Display Pipeline
+==============================
+
+The Allwinner A10 Display pipeline is composed of several components
+that are going to be documented below:
+
+TV Encoder
+----------
+
+The TV Encoder supports the composite and VGA output. It is one end of
+the pipeline.
+
+Required properties:
+ - compatible: value should be "allwinner,sun4i-a10-tv-encoder".
+ - reg: base address and size of memory-mapped region
+ - clocks: the clocks driving the TV encoder
+ - resets: phandle to the reset controller driving the encoder
+
+- ports: A ports node with endpoint definitions as defined in
+  Documentation/devicetree/bindings/media/video-interfaces.txt. The
+  first port should be the input endpoint.
+
+TCON
+----
+
+The TCON acts as a timing controller for RGB, LVDS and TV interfaces.
+
+Required properties:
+ - compatible: value should be "allwinner,sun5i-a13-tcon".
+ - reg: base address and size of memory-mapped region
+ - interrupts: interrupt associated to this IP
+ - clocks: phandles to the clocks feeding the TCON. Three are needed:
+   - 'ahb': the interface clocks
+   - 'tcon-ch0': The clock driving the TCON channel 0
+   - 'tcon-ch1': The clock driving the TCON channel 1
+ - resets: phandles to the reset controllers driving the encoder
+   - "lcd": the reset line for the TCON channel 0
+
+ - clock-names: the clock names mentioned above
+ - reset-names: the reset names mentioned above
+ - clock-output-names: Name of the pixel clock created
+
+- ports: A ports node with endpoint definitions as defined in
+  Documentation/devicetree/bindings/media/video-interfaces.txt. The
+  first port should be the input endpoint, the second one the output
+
+  The output should have two endpoints. The first is the block
+  connected to the TCON channel 0 (usually a panel or a bridge), the
+  second the block connected to the TCON channel 1 (usually the TV
+  encoder)
+
+
+Display Engine Backend
+----------------------
+
+The display engine backend exposes layers and sprites to the
+system.
+
+Required properties:
+  - compatible: value must be one of:
+    * allwinner,sun5i-a13-display-backend
+  - reg: base address and size of the memory-mapped region.
+  - clocks: phandles to the clocks feeding the frontend and backend
+    * ahb: the backend interface clock
+    * mod: the backend module clock
+    * ram: the backend DRAM clock
+  - clock-names: the clock names mentioned above
+  - resets: phandles to the reset controllers driving the backend
+
+- ports: A ports node with endpoint definitions as defined in
+  Documentation/devicetree/bindings/media/video-interfaces.txt. The
+  first port should be the input endpoints, the second one the output
+
+Display Engine Frontend
+-----------------------
+
+The display engine frontend does formats conversion, scaling,
+deinterlacing and color space conversion.
+
+Required properties:
+  - compatible: value must be one of:
+    * allwinner,sun5i-a13-display-frontend
+  - reg: base address and size of the memory-mapped region.
+  - interrupts: interrupt associated to this IP
+  - clocks: phandles to the clocks feeding the frontend and backend
+    * ahb: the backend interface clock
+    * mod: the backend module clock
+    * ram: the backend DRAM clock
+  - clock-names: the clock names mentioned above
+  - resets: phandles to the reset controllers driving the backend
+
+- ports: A ports node with endpoint definitions as defined in
+  Documentation/devicetree/bindings/media/video-interfaces.txt. The
+  first port should be the input endpoints, the second one the outputs
+
+
+Display Engine Pipeline
+-----------------------
+
+The display engine pipeline (and its entry point, since it can be
+either directly the backend or the frontend) is represented as an
+extra node.
+
+Required properties:
+  - compatible: value must be one of:
+    * allwinner,sun5i-a13-display-engine
+
+  - allwinner,pipelines: list of phandle to the display engine
+    frontends available.
+
+Example:
+
+panel: panel {
+	compatible = "olimex,lcd-olinuxino-43-ts";
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	port {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		panel_input: endpoint {
+			remote-endpoint = <&tcon0_out_panel>;
+		};
+	};
+};
+
+tve0: tv-encoder@01c0a000 {
+	compatible = "allwinner,sun4i-a10-tv-encoder";
+	reg = <0x01c0a000 0x1000>;
+	clocks = <&ahb_gates 34>;
+	resets = <&tcon_ch0_clk 0>;
+
+	port {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		tve0_in_tcon0: endpoint@0 {
+			reg = <0>;
+			remote-endpoint = <&tcon0_out_tve0>;
+		};
+	};
+};
+
+tcon0: lcd-controller@1c0c000 {
+	compatible = "allwinner,sun5i-a13-tcon";
+	reg = <0x01c0c000 0x1000>;
+	interrupts = <44>;
+	resets = <&tcon_ch0_clk 1>;
+	reset-names = "lcd";
+	clocks = <&ahb_gates 36>,
+		 <&tcon_ch0_clk>,
+		 <&tcon_ch1_clk>;
+	clock-names = "ahb",
+		      "tcon-ch0",
+		      "tcon-ch1";
+	clock-output-names = "tcon-pixel-clock";
+
+	ports {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		tcon0_in: port@0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0>;
+
+			tcon0_in_be0: endpoint@0 {
+				reg = <0>;
+				remote-endpoint = <&be0_out_tcon0>;
+			};
+		};
+
+		tcon0_out: port@1 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <1>;
+
+			tcon0_out_panel: endpoint@0 {
+				reg = <0>;
+				remote-endpoint = <&panel_input>;
+			};
+
+			tcon0_out_tve0: endpoint@1 {
+				reg = <1>;
+				remote-endpoint = <&tve0_in_tcon0>;
+			};
+		};
+	};
+};
+
+fe0: display-frontend@1e00000 {
+	compatible = "allwinner,sun5i-a13-display-frontend";
+	reg = <0x01e00000 0x20000>;
+	interrupts = <47>;
+	clocks = <&ahb_gates 46>, <&de_fe_clk>,
+		 <&dram_gates 25>;
+	clock-names = "ahb", "mod",
+		      "ram";
+	resets = <&de_fe_clk>;
+
+	ports {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		fe0_out: port@1 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <1>;
+
+			fe0_out_be0: endpoint {
+				remote-endpoint = <&be0_in_fe0>;
+			};
+		};
+	};
+};
+
+be0: display-backend@1e60000 {
+	compatible = "allwinner,sun5i-a13-display-backend";
+	reg = <0x01e60000 0x10000>;
+	clocks = <&ahb_gates 44>, <&de_be_clk>,
+		 <&dram_gates 26>;
+	clock-names = "ahb", "mod",
+		      "ram";
+	resets = <&de_be_clk>;
+
+	ports {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		be0_in: port@0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0>;
+
+			be0_in_fe0: endpoint@0 {
+				reg = <0>;
+				remote-endpoint = <&fe0_out_be0>;
+			};
+		};
+
+		be0_out: port@1 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <1>;
+
+			be0_out_tcon0: endpoint@0 {
+				reg = <0>;
+				remote-endpoint = <&tcon0_in_be0>;
+			};
+		};
+	};
+};
+
+display-engine {
+	compatible = "allwinner,sun5i-a13-display-engine";
+	allwinner,pipelines = <&fe0>;
+};
diff --git a/Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt b/Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt
index ef37528..dd031fc 100644
--- a/Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt
@@ -33,7 +33,7 @@
 	gpio-controller;
 	interrupt-controller;
 	#interrupt-cells = <2>;
-	clocks = <&PBCLK4>;
+	clocks = <&rootclk PB4CLK>;
 	microchip,gpio-bank = <0>;
 	gpio-ranges = <&pic32_pinctrl 0 0 16>;
 };
diff --git a/Documentation/devicetree/bindings/bus/ti-gpmc.txt b/Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
similarity index 88%
rename from Documentation/devicetree/bindings/bus/ti-gpmc.txt
rename to Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
index 0168370..21055e21 100644
--- a/Documentation/devicetree/bindings/bus/ti-gpmc.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
@@ -32,6 +32,19 @@
 			bootloader) are used for the physical address decoding.
 			As this will change in the future, filling correct
 			values here is a requirement.
+ - interrupt-controller: The GPMC driver implements and interrupt controller for
+			the NAND events "fifoevent" and "termcount" plus the
+			rising/falling edges on the GPMC_WAIT pins.
+			The interrupt number mapping is as follows
+			0 - NAND_fifoevent
+			1 - NAND_termcount
+			2 - GPMC_WAIT0 pin edge
+			3 - GPMC_WAIT1 pin edge, and so on.
+ - interrupt-cells:	Must be set to 2
+ - gpio-controller:	The GPMC driver implements a GPIO controller for the
+			GPMC WAIT pins that can be used as general purpose inputs.
+			0 maps to GPMC_WAIT0 pin.
+ - gpio-cells:		Must be set to 2
 
 Timing properties for child nodes. All are optional and default to 0.
 
@@ -130,6 +143,10 @@
 		#address-cells = <2>;
 		#size-cells = <1>;
 		ranges = <0 0 0x08000000 0x10000000>; /* CS0 @addr 0x8000000, size 0x10000000 */
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		gpio-controller;
+		#gpio-cells = <2>;
 
 		/* child nodes go here */
 	};
diff --git a/Documentation/devicetree/bindings/mips/cpu_irq.txt b/Documentation/devicetree/bindings/mips/cpu_irq.txt
index fc149f3..f080f06 100644
--- a/Documentation/devicetree/bindings/mips/cpu_irq.txt
+++ b/Documentation/devicetree/bindings/mips/cpu_irq.txt
@@ -13,7 +13,7 @@
 - compatible : Should be "mti,cpu-interrupt-controller"
 
 Example devicetree:
-	cpu-irq: cpu-irq@0 {
+	cpu-irq: cpu-irq {
 		#address-cells = <0>;
 
 		interrupt-controller;
diff --git a/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt b/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt
index 71ad57e..3149297 100644
--- a/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt
+++ b/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt
@@ -20,7 +20,7 @@
 		compatible = "microchip,pic32mzda-sdhci";
 		reg = <0x1f8ec000 0x100>;
 		interrupts = <191 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&REFCLKO4>, <&PBCLK5>;
+		clocks = <&rootclk REF4CLK>, <&rootclk PB5CLK>;
 		clock-names = "base_clk", "sys_clk";
 		bus-width = <4>;
 		cap-sd-highspeed;
diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
index d53aba9..3e7ee99 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -39,7 +39,7 @@
 
 Nand Flash Controller(NFC) is an optional sub-node
 Required properties:
-- compatible : "atmel,sama5d3-nfc" or "atmel,sama5d4-nfc".
+- compatible : "atmel,sama5d3-nfc".
 - reg : should specify the address and size used for NFC command registers,
         NFC registers and NFC SRAM. NFC SRAM address and size can be absent
         if don't want to use it.
diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
index 0f6985b..7066597 100644
--- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
+++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
@@ -24,6 +24,7 @@
                          brcm,brcmnand-v5.0
                          brcm,brcmnand-v6.0
                          brcm,brcmnand-v6.1
+                         brcm,brcmnand-v6.2
                          brcm,brcmnand-v7.0
                          brcm,brcmnand-v7.1
                          brcm,brcmnand
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
index fb733c4..3ee7e20 100644
--- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
@@ -13,7 +13,11 @@
 
 Required properties:
 
- - reg:		The CS line the peripheral is connected to
+ - compatible:	"ti,omap2-nand"
+ - reg:		range id (CS number), base offset and length of the
+		NAND I/O space
+ - interrupt-parent: must point to gpmc node
+ - interrupts:	Two interrupt specifiers, one for fifoevent, one for termcount.
 
 Optional properties:
 
@@ -44,6 +48,7 @@
 		locating ECC errors for BCHx algorithms. SoC devices which have
 		ELM hardware engines should specify this device node in .dtsi
 		Using ELM for ECC error correction frees some CPU cycles.
+ - rb-gpios:	GPIO specifier for the ready/busy# pin.
 
 For inline partition table parsing (optional):
 
@@ -55,20 +60,26 @@
 	gpmc: gpmc@50000000 {
 		compatible = "ti,am3352-gpmc";
 		ti,hwmods = "gpmc";
-		reg = <0x50000000 0x1000000>;
+		reg = <0x50000000 0x36c>;
 		interrupts = <100>;
 		gpmc,num-cs = <8>;
 		gpmc,num-waitpins = <2>;
 		#address-cells = <2>;
 		#size-cells = <1>;
-		ranges = <0 0 0x08000000 0x2000>;	/* CS0: NAND */
+		ranges = <0 0 0x08000000 0x1000000>;	/* CS0 space, 16MB */
 		elm_id = <&elm>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
 
 		nand@0,0 {
-			reg = <0 0 0>; /* CS0, offset 0 */
+			compatible = "ti,omap2-nand";
+			reg = <0 0 4>;		/* CS0, offset 0, NAND I/O window 4 */
+			interrupt-parent = <&gpmc>;
+			interrupts = <0 IRQ_TYPE_NONE>, <1 IRQ_TYPE NONE>;
 			nand-bus-width = <16>;
 			ti,nand-ecc-opt = "bch8";
 			ti,nand-xfer-type = "polled";
+			rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
 
 			gpmc,sync-clk-ps = <0>;
 			gpmc,cs-on-ns = <0>;
diff --git a/Documentation/devicetree/bindings/mtd/nand.txt b/Documentation/devicetree/bindings/mtd/nand.txt
index b53f92e..3733300 100644
--- a/Documentation/devicetree/bindings/mtd/nand.txt
+++ b/Documentation/devicetree/bindings/mtd/nand.txt
@@ -1,8 +1,31 @@
-* MTD generic binding
+* NAND chip and NAND controller generic binding
+
+NAND controller/NAND chip representation:
+
+The NAND controller should be represented with its own DT node, and all
+NAND chips attached to this controller should be defined as children nodes
+of the NAND controller. This representation should be enforced even for
+simple controllers supporting only one chip.
+
+Mandatory NAND controller properties:
+- #address-cells: depends on your controller. Should at least be 1 to
+		  encode the CS line id.
+- #size-cells: depends on your controller. Put zero unless you need a
+	       mapping between CS lines and dedicated memory regions
+
+Optional NAND controller properties
+- ranges: only needed if you need to define a mapping between CS lines and
+	  memory regions
+
+Optional NAND chip properties:
 
 - nand-ecc-mode : String, operation mode of the NAND ecc mode.
-  Supported values are: "none", "soft", "hw", "hw_syndrome", "hw_oob_first",
-  "soft_bch".
+		  Supported values are: "none", "soft", "hw", "hw_syndrome",
+		  "hw_oob_first".
+		  Deprecated values:
+		  "soft_bch": use "soft" and nand-ecc-algo instead
+- nand-ecc-algo: string, algorithm of NAND ECC.
+		 Supported values are: "hamming", "bch".
 - nand-bus-width : 8 or 16 bus width if not present 8
 - nand-on-flash-bbt: boolean to enable on flash bbt option if not present false
 
@@ -19,3 +42,20 @@
 The interpretation of these parameters is implementation-defined, so not all
 implementations must support all possible combinations. However, implementations
 are encouraged to further specify the value(s) they support.
+
+Example:
+
+	nand-controller {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		/* controller specific properties */
+
+		nand@0 {
+			reg = <0>;
+			nand-ecc-mode = "soft";
+			nand-ecc-algo = "bch";
+
+			/* controller specific properties */
+		};
+	};
diff --git a/Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt
index 4b5efa5..29b72e3 100644
--- a/Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt
@@ -34,7 +34,7 @@
 	#size-cells = <1>;
 	compatible = "microchip,pic32mzda-pinctrl";
 	reg = <0x1f801400 0x400>;
-	clocks = <&PBCLK1>;
+	clocks = <&rootclk PB1CLK>;
 
 	pinctrl_uart2: pinctrl_uart2 {
 		uart2-tx {
diff --git a/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt b/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt
index 65b38bf..7a34345 100644
--- a/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt
+++ b/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt
@@ -20,7 +20,7 @@
 		interrupts = <112 IRQ_TYPE_LEVEL_HIGH>,
 			<113 IRQ_TYPE_LEVEL_HIGH>,
 			<114 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&PBCLK2>;
+		clocks = <&rootclk PB2CLK>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&pinctrl_uart1
 				&pinctrl_uart1_cts
diff --git a/Documentation/devicetree/bindings/sound/max98371.txt b/Documentation/devicetree/bindings/sound/max98371.txt
new file mode 100644
index 0000000..6c28523
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/max98371.txt
@@ -0,0 +1,17 @@
+max98371 codec
+
+This device supports I2C mode only.
+
+Required properties:
+
+- compatible : "maxim,max98371"
+- reg : The chip select number on the I2C bus
+
+Example:
+
+&i2c {
+	max98371: max98371@0x31 {
+		compatible = "maxim,max98371";
+		reg = <0x31>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
index f205ce9..ac28cdb 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
@@ -1,15 +1,16 @@
-MT8173 with RT5650 RT5676 CODECS
+MT8173 with RT5650 RT5676 CODECS and HDMI via I2S
 
 Required properties:
 - compatible : "mediatek,mt8173-rt5650-rt5676"
 - mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs
+			and of the hdmi encoder node
 - mediatek,platform: the phandle of MT8173 ASoC platform
 
 Example:
 
 	sound {
 		compatible = "mediatek,mt8173-rt5650-rt5676";
-		mediatek,audio-codec = <&rt5650 &rt5676>;
+		mediatek,audio-codec = <&rt5650 &rt5676 &hdmi0>;
 		mediatek,platform = <&afe>;
 	};
 
diff --git a/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt b/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt
index fe5a5ef..5bfa6b6 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt
@@ -5,11 +5,21 @@
 - mediatek,audio-codec: the phandles of rt5650 codecs
 - mediatek,platform: the phandle of MT8173 ASoC platform
 
+Optional subnodes:
+- codec-capture : the subnode of rt5650 codec capture
+Required codec-capture subnode properties:
+- sound-dai: audio codec dai name on capture path
+  <&rt5650 0> : Default setting. Connect rt5650 I2S1 for capture. (dai_name = rt5645-aif1)
+  <&rt5650 1> : Connect rt5650 I2S2 for capture. (dai_name = rt5645-aif2)
+
 Example:
 
 	sound {
 		compatible = "mediatek,mt8173-rt5650";
 		mediatek,audio-codec = <&rt5650>;
 		mediatek,platform = <&afe>;
+		codec-capture {
+			sound-dai = <&rt5650 1>;
+		};
 	};
 
diff --git a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
index 028fa1c..4d9a83d 100644
--- a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
+++ b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
@@ -37,17 +37,18 @@
 
   - dai-name: DAI name that describes the IP.
 
+  - IP mode: IP working mode depending on associated codec.
+	"HDMI" connected to HDMI codec and support IEC HDMI formats (player only).
+	"SPDIF" connected to SPDIF codec and support SPDIF formats (player only).
+	"PCM" PCM standard mode for I2S or TDM bus.
+	"TDM" TDM mode for TDM bus.
+
 Required properties ("st,sti-uni-player" compatibility only):
   - clocks: CPU_DAI IP clock source, listed in the same order than the
 	    CPU_DAI properties.
 
   - uniperiph-id: internal SOC IP instance ID.
 
-  - IP mode: IP working mode depending on associated codec.
-	"HDMI" connected to HDMI codec IP and IEC HDMI formats.
-	"SPDIF"connected to SPDIF codec and support SPDIF formats.
-	"PCM"  PCM standard mode for I2S or TDM bus.
-
 Optional properties:
   - pinctrl-0: defined for CPU_DAI@1 and CPU_DAI@4 to describe I2S PIOs for
 	       external codecs connection.
@@ -56,6 +57,22 @@
 
 Example:
 
+	sti_uni_player1: sti-uni-player@1 {
+		compatible = "st,sti-uni-player";
+		status = "okay";
+		#sound-dai-cells = <0>;
+		st,syscfg = <&syscfg_core>;
+		clocks = <&clk_s_d0_flexgen CLK_PCM_1>;
+		reg = <0x8D81000 0x158>;
+		interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+		dmas = <&fdma0 3 0 1>;
+		st,dai-name = "Uni Player #1 (I2S)";
+		dma-names = "tx";
+		st,uniperiph-id = <1>;
+		st,version = <5>;
+		st,mode = "TDM";
+	};
+
 	sti_uni_player2: sti-uni-player@2 {
 		compatible = "st,sti-uni-player";
 		status = "okay";
@@ -65,7 +82,7 @@
 		reg = <0x8D82000 0x158>;
 		interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
 		dmas = <&fdma0 4 0 1>;
-		dai-name = "Uni Player #1 (DAC)";
+		dai-name = "Uni Player #2 (DAC)";
 		dma-names = "tx";
 		uniperiph-id = <2>;
 		version = <5>;
@@ -82,7 +99,7 @@
 		interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
 		dmas = <&fdma0 7 0 1>;
 		dma-names = "tx";
-		dai-name = "Uni Player #1 (PIO)";
+		dai-name = "Uni Player #3 (SPDIF)";
 		uniperiph-id = <3>;
 		version = <5>;
 		mode = "SPDIF";
@@ -99,6 +116,7 @@
 		dma-names = "rx";
 		dai-name = "Uni Reader #1 (HDMI RX)";
 		version = <3>;
+		st,mode = "PCM";
 	};
 
 2) sti-sas-codec: internal audio codec IPs driver
@@ -152,4 +170,20 @@
 				sound-dai = <&sti_sasg_codec 0>;
 			};
 		};
+		simple-audio-card,dai-link@2 {
+			/* TDM playback  */
+			format = "left_j";
+			frame-inversion = <1>;
+			cpu {
+				sound-dai = <&sti_uni_player1>;
+				dai-tdm-slot-num = <16>;
+				dai-tdm-slot-width = <16>;
+				dai-tdm-slot-tx-mask =
+					<1 1 1 1 0 0 0 0 0 0 1 1 0 0 1 1>;
+			};
+
+			codec {
+				sound-dai = <&sti_sasg_codec 3>;
+			};
+		};
 	};
diff --git a/Documentation/devicetree/bindings/sound/tas571x.txt b/Documentation/devicetree/bindings/sound/tas571x.txt
index 0ac31d8..b4959f1 100644
--- a/Documentation/devicetree/bindings/sound/tas571x.txt
+++ b/Documentation/devicetree/bindings/sound/tas571x.txt
@@ -1,4 +1,4 @@
-Texas Instruments TAS5711/TAS5717/TAS5719 stereo power amplifiers
+Texas Instruments TAS5711/TAS5717/TAS5719/TAS5721 stereo power amplifiers
 
 The codec is controlled through an I2C interface.  It also has two other
 signals that can be wired up to GPIOs: reset (strongly recommended), and
@@ -6,7 +6,11 @@
 
 Required properties:
 
-- compatible: "ti,tas5711", "ti,tas5717", or "ti,tas5719"
+- compatible: should be one of the following:
+  - "ti,tas5711",
+  - "ti,tas5717",
+  - "ti,tas5719",
+  - "ti,tas5721"
 - reg: The I2C address of the device
 - #sound-dai-cells: must be equal to 0
 
@@ -25,6 +29,8 @@
 - PVDD_B-supply: regulator phandle for the PVDD_B supply (5711)
 - PVDD_C-supply: regulator phandle for the PVDD_C supply (5711)
 - PVDD_D-supply: regulator phandle for the PVDD_D supply (5711)
+- DRVDD-supply: regulator phandle for the DRVDD supply (5721)
+- PVDD-supply: regulator phandle for the PVDD supply (5721)
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/sound/tas5720.txt b/Documentation/devicetree/bindings/sound/tas5720.txt
new file mode 100644
index 0000000..806ea73
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/tas5720.txt
@@ -0,0 +1,25 @@
+Texas Instruments TAS5720 Mono Audio amplifier
+
+The TAS5720 serial control bus communicates through the I2C protocol only. The
+serial bus is also used for periodic codec fault checking/reporting during
+audio playback. For more product information please see the links below:
+
+http://www.ti.com/product/TAS5720L
+http://www.ti.com/product/TAS5720M
+
+Required properties:
+
+- compatible : "ti,tas5720"
+- reg : I2C slave address
+- dvdd-supply : phandle to a 3.3-V supply for the digital circuitry
+- pvdd-supply : phandle to a supply used for the Class-D amp and the analog
+
+Example:
+
+tas5720: tas5720@6c {
+	status = "okay";
+	compatible = "ti,tas5720";
+	reg = <0x6c>;
+	dvdd-supply = <&vdd_3v3_reg>;
+	pvdd-supply = <&amp_supply_reg>;
+};
diff --git a/Documentation/devicetree/bindings/spi/microchip,spi-pic32.txt b/Documentation/devicetree/bindings/spi/microchip,spi-pic32.txt
new file mode 100644
index 0000000..79de379f
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/microchip,spi-pic32.txt
@@ -0,0 +1,34 @@
+Microchip PIC32 SPI Master controller
+
+Required properties:
+- compatible: Should be "microchip,pic32mzda-spi".
+- reg: Address and length of register space for the device.
+- interrupts: Should contain all three spi interrupts in sequence
+              of <fault-irq>, <receive-irq>, <transmit-irq>.
+- interrupt-names: Should be "fault", "rx", "tx" in order.
+- clocks: Phandle of the clock generating SPI clock on the bus.
+- clock-names: Should be "mck0".
+- cs-gpios: Specifies the gpio pins to be used for chipselects.
+            See: Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Optional properties:
+- dmas: Two or more DMA channel specifiers following the convention outlined
+        in Documentation/devicetree/bindings/dma/dma.txt
+- dma-names: Names for the dma channels. There must be at least one channel
+             named "spi-tx" for transmit and named "spi-rx" for receive.
+
+Example:
+
+spi1: spi@1f821000 {
+        compatible = "microchip,pic32mzda-spi";
+        reg = <0x1f821000 0x200>;
+        interrupts = <109 IRQ_TYPE_LEVEL_HIGH>,
+                     <110 IRQ_TYPE_LEVEL_HIGH>,
+                     <111 IRQ_TYPE_LEVEL_HIGH>;
+        interrupt-names = "fault", "rx", "tx";
+        clocks = <&PBCLK2>;
+        clock-names = "mck0";
+        cs-gpios = <&gpio3 4 GPIO_ACTIVE_LOW>;
+        dmas = <&dma 134>, <&dma 135>;
+        dma-names = "spi-rx", "spi-tx";
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
index 1ad0fe3..ff5893d 100644
--- a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
@@ -16,8 +16,7 @@
 
 Optional property:
 - big-endian: If present the dspi device's registers are implemented
-  in big endian mode, otherwise in native mode(same with CPU), for more
-  detail please see: Documentation/devicetree/bindings/regmap/regmap.txt.
+  in big endian mode.
 
 Optional SPI slave node properties:
 - fsl,spi-cs-sck-delay: a delay in nanoseconds between activating chip
diff --git a/Documentation/devicetree/bindings/spi/sqi-pic32.txt b/Documentation/devicetree/bindings/spi/sqi-pic32.txt
new file mode 100644
index 0000000..c82d021
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/sqi-pic32.txt
@@ -0,0 +1,18 @@
+Microchip PIC32 Quad SPI controller
+-----------------------------------
+Required properties:
+- compatible: Should be "microchip,pic32mzda-sqi".
+- reg: Address and length of SQI controller register space.
+- interrupts: Should contain SQI interrupt.
+- clocks: Should contain phandle of two clocks in sequence, one that drives
+          clock on SPI bus and other that drives SQI controller.
+- clock-names: Should be "spi_ck" and "reg_ck" in order.
+
+Example:
+	sqi1: spi@1f8e2000 {
+		compatible = "microchip,pic32mzda-sqi";
+		reg = <0x1f8e2000 0x200>;
+		clocks = <&rootclk REF2CLK>, <&rootclk PB5CLK>;
+		clock-names = "spi_ck", "reg_ck";
+		interrupts = <169 IRQ_TYPE_LEVEL_HIGH>;
+	};
diff --git a/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt b/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt
index 6908d3a..edebfa0 100644
--- a/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt
+++ b/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt
@@ -26,6 +26,10 @@
     of this property. See <dt-bindings/thermal/tegra124-soctherm.h> for a
     list of valid values when referring to thermal sensors.
 
+Note:
+- the "critical" type trip points will be set to SOC_THERM hardware as the
+shut down temperature. Once the temperature of this thermal zone is higher
+than it, the system will be shutdown or reset by hardware.
 
 Example :
 
@@ -51,5 +55,13 @@
 
                         thermal-sensors =
                                 <&soctherm TEGRA124_SOCTHERM_SENSOR_CPU>;
+
+			trips {
+				cpu_shutdown_trip: shutdown-trip {
+					temperature = <102500>;
+					hysteresis = <1000>;
+					type = "critical";
+				};
+			};
                 };
 	};
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
index e5ee3f1..a8e52c8 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
@@ -11,7 +11,6 @@
 			    - "renesas,thermal-r8a7791" (R-Car M2-W)
 			    - "renesas,thermal-r8a7792" (R-Car V2H)
 			    - "renesas,thermal-r8a7793" (R-Car M2-N)
-			    - "renesas,thermal-r8a7794" (R-Car E2)
 - reg			: Address range of the thermal registers.
 			  The 1st reg will be recognized as common register
 			  if it has "interrupts".
diff --git a/Documentation/devicetree/bindings/thermal/tango-thermal.txt b/Documentation/devicetree/bindings/thermal/tango-thermal.txt
new file mode 100644
index 0000000..212198d
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/tango-thermal.txt
@@ -0,0 +1,17 @@
+* Tango Thermal
+
+The SMP8758 SoC includes 3 instances of this temperature sensor
+(in the CPU, video decoder, and PCIe controller).
+
+Required properties:
+- #thermal-sensor-cells: Should be 0 (see thermal.txt)
+- compatible: "sigma,smp8758-thermal"
+- reg: Address range of the thermal registers
+
+Example:
+
+	cpu_temp: thermal@920100 {
+		#thermal-sensor-cells = <0>;
+		compatible = "sigma,smp8758-thermal";
+		reg = <0x920100 12>;
+	};
diff --git a/Documentation/devicetree/bindings/thermal/thermal-generic-adc.txt b/Documentation/devicetree/bindings/thermal/thermal-generic-adc.txt
new file mode 100644
index 0000000..d7235550
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/thermal-generic-adc.txt
@@ -0,0 +1,89 @@
+General Purpose Analog To Digital Converter (ADC) based thermal sensor.
+
+On some of platforms, thermal sensor like thermistors are connected to
+one of ADC channel and sensor resistance is read via voltage across the
+sensor resistor. The voltage read across the sensor is mapped to
+temperature using voltage-temperature lookup table.
+
+Required properties:
+===================
+- compatible:		     Must be "generic-adc-thermal".
+- temperature-lookup-table:  Two dimensional array of Integer; lookup table
+			     to map the relation between ADC value and
+			     temperature. When ADC is read, the value is
+			     looked up on the table to get the equivalent
+			     temperature.
+			     The first value of the each row of array is the
+			     temperature in milliCelsius and second value of
+			     the each row of array is the ADC read value.
+- #thermal-sensor-cells:     Should be 1. See ./thermal.txt for a description
+			     of this property.
+
+Example :
+#include <dt-bindings/thermal/thermal.h>
+
+i2c@7000c400 {
+	ads1015: ads1015@4a {
+		reg = <0x4a>;
+		compatible = "ads1015";
+		sampling-frequency = <3300>;
+		#io-channel-cells = <1>;
+	};
+};
+
+tboard_thermistor: thermal-sensor {
+	compatible = "generic-adc-thermal";
+	#thermal-sensor-cells = <0>;
+	io-channels = <&ads1015 1>;
+	io-channel-names = "sensor-channel";
+	temperature-lookup-table = <    (-40000) 2578
+					(-39000) 2577
+					(-38000) 2576
+					(-37000) 2575
+					(-36000) 2574
+					(-35000) 2573
+					(-34000) 2572
+					(-33000) 2571
+					(-32000) 2569
+					(-31000) 2568
+					(-30000) 2567
+					::::::::::
+					118000 254
+					119000 247
+					120000 240
+					121000 233
+					122000 226
+					123000 220
+					124000 214
+					125000 208>;
+};
+
+dummy_cool_dev: dummy-cool-dev {
+	compatible = "dummy-cooling-dev";
+	#cooling-cells = <2>; /* min followed by max */
+};
+
+thermal-zones {
+	Tboard {
+		polling-delay = <15000>; /* milliseconds */
+		polling-delay-passive = <0>; /* milliseconds */
+		thermal-sensors = <&tboard_thermistor>;
+
+		trips {
+			therm_est_trip: therm_est_trip {
+				temperature = <40000>;
+				type = "active";
+				hysteresis = <1000>;
+			};
+		};
+
+		cooling-maps {
+			map0 {
+				trip = <&therm_est_trip>;
+				cooling-device = <&dummy_cool_dev THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				contribution = <100>;
+			};
+
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 4454483..a7440bc 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -23,6 +23,7 @@
 ampire	Ampire Co., Ltd.
 ams	AMS AG
 amstaos	AMS-Taos Inc.
+analogix	Analogix Semiconductor, Inc.
 apm	Applied Micro Circuits Corporation (APM)
 aptina	Aptina Imaging
 arasan	Arasan Chip Systems
@@ -185,6 +186,7 @@
 olimex	OLIMEX Ltd.
 onion	Onion Corporation
 onnn	ON Semiconductor Corp.
+ontat	On Tat Industrial Company
 opencores	OpenCores.org
 option	Option NV
 ortustech	Ortus Technology Co., Ltd.
@@ -261,6 +263,7 @@
 toshiba	Toshiba Corporation
 toumaz	Toumaz
 tplink	TP-LINK Technologies Co., Ltd.
+tpk	TPK U.S.A. LLC
 tronfy	Tronfy
 tronsmart	Tronsmart
 truly	Truly Semiconductors Limited
diff --git a/Documentation/devicetree/bindings/video/bridge/anx7814.txt b/Documentation/devicetree/bindings/video/bridge/anx7814.txt
new file mode 100644
index 0000000..b2a22c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/bridge/anx7814.txt
@@ -0,0 +1,40 @@
+Analogix ANX7814 SlimPort (Full-HD Transmitter)
+-----------------------------------------------
+
+The ANX7814 is an ultra-low power Full-HD (1080p60) SlimPort transmitter
+designed for portable devices.
+
+Required properties:
+
+ - compatible		: "analogix,anx7814"
+ - reg			: I2C address of the device
+ - interrupt-parent	: Should be the phandle of the interrupt controller
+			  that services interrupts for this device
+ - interrupts		: Should contain the INTP interrupt
+ - hpd-gpios		: Which GPIO to use for hpd
+ - pd-gpios		: Which GPIO to use for power down
+ - reset-gpios		: Which GPIO to use for reset
+
+Optional properties:
+
+ - dvdd10-supply	: Regulator for 1.0V digital core power.
+ - Video port for HDMI input, using the DT bindings defined in [1].
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+
+	anx7814: anx7814@38 {
+		compatible = "analogix,anx7814";
+		reg = <0x38>;
+		interrupt-parent = <&gpio0>;
+		interrupts = <99 IRQ_TYPE_LEVEL_LOW>;   /* INTP */
+		hpd-gpios = <&pio 36 GPIO_ACTIVE_HIGH>;
+		pd-gpios = <&pio 33 GPIO_ACTIVE_HIGH>;
+		reset-gpios = <&pio 98 GPIO_ACTIVE_HIGH>;
+		port {
+			anx7814_in: endpoint {
+				remote-endpoint = <&hdmi0_out>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt b/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt
index 8dab6fd..107280e 100644
--- a/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt
@@ -5,10 +5,12 @@
 - reg : Should contain WDT registers location and length
 - interrupts : Should contain WDT interrupt
 
-Optional property:
+Optional properties:
 - big-endian: If present the watchdog device's registers are implemented
   in big endian mode, otherwise in native mode(same with CPU), for more
   detail please see: Documentation/devicetree/bindings/regmap/regmap.txt.
+- fsl,ext-reset-output: If present the watchdog device is configured to
+  assert its external reset (WDOG_B) instead of issuing a software reset.
 
 Examples:
 
diff --git a/Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt b/Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt
index 852f694..49485f8 100644
--- a/Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt
+++ b/Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt
@@ -8,12 +8,12 @@
 - compatible: must be "microchip,pic32mzda-dmt".
 - reg: physical base address of the controller and length of memory mapped
   region.
-- clocks: phandle of parent clock (should be &PBCLK7).
+- clocks: phandle of source clk. Should be <&rootclk PB7CLK>.
 
 Example:
 
 	watchdog@1f800a00 {
 		compatible = "microchip,pic32mzda-dmt";
 		reg = <0x1f800a00 0x80>;
-		clocks = <&PBCLK7>;
+		clocks = <&rootclk PB7CLK>;
 	};
diff --git a/Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt b/Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt
index d140103..f03a29a 100644
--- a/Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt
@@ -7,12 +7,12 @@
 - compatible: must be "microchip,pic32mzda-wdt".
 - reg: physical base address of the controller and length of memory mapped
   region.
-- clocks: phandle of source clk. should be <&LPRC> clk.
+- clocks: phandle of source clk. Should be <&rootclk LPRCCLK>.
 
 Example:
 
 	watchdog@1f800800 {
 		compatible = "microchip,pic32mzda-wdt";
 		reg = <0x1f800800 0x200>;
-		clocks = <&LPRC>;
+		clocks = <&rootclk LPRCCLK>;
 	};
diff --git a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
new file mode 100644
index 0000000..b9512f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
@@ -0,0 +1,25 @@
+Renesas Watchdog Timer (WDT) Controller
+
+Required properties:
+- compatible : Should be "renesas,r8a7795-wdt", or "renesas,rcar-gen3-wdt"
+
+  When compatible with the generic version, nodes must list the SoC-specific
+  version corresponding to the platform first, followed by the generic
+  version.
+
+- reg : Should contain WDT registers location and length
+- clocks : the clock feeding the watchdog timer.
+
+Optional properties:
+- timeout-sec : Contains the watchdog timeout in seconds
+- power-domains : the power domain the WDT belongs to
+
+Examples:
+
+	wdt0: watchdog@e6020000 {
+		compatible = "renesas,r8a7795-wdt", "renesas,rcar-gen3-wdt";
+		reg = <0 0xe6020000 0 0x0c>;
+		clocks = <&cpg CPG_MOD 402>;
+		power-domains = <&cpg>;
+		timeout-sec = <60>;
+	};
diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt
index 7bde640..ce4587d 100644
--- a/Documentation/filesystems/dax.txt
+++ b/Documentation/filesystems/dax.txt
@@ -79,6 +79,38 @@
 - ext4: the fourth extended filesystem, see Documentation/filesystems/ext4.txt
 
 
+Handling Media Errors
+---------------------
+
+The libnvdimm subsystem stores a record of known media error locations for
+each pmem block device (in gendisk->badblocks). If we fault at such location,
+or one with a latent error not yet discovered, the application can expect
+to receive a SIGBUS. Libnvdimm also allows clearing of these errors by simply
+writing the affected sectors (through the pmem driver, and if the underlying
+NVDIMM supports the clear_poison DSM defined by ACPI).
+
+Since DAX IO normally doesn't go through the driver/bio path, applications or
+sysadmins have an option to restore the lost data from a prior backup/inbuilt
+redundancy in the following ways:
+
+1. Delete the affected file, and restore from a backup (sysadmin route):
+   This will free the file system blocks that were being used by the file,
+   and the next time they're allocated, they will be zeroed first, which
+   happens through the driver, and will clear bad sectors.
+
+2. Truncate or hole-punch the part of the file that has a bad-block (at least
+   an entire aligned sector has to be hole-punched, but not necessarily an
+   entire filesystem block).
+
+These are the two basic paths that allow DAX filesystems to continue operating
+in the presence of media errors. More robust error recovery mechanisms can be
+built on top of this in the future, for example, involving redundancy/mirroring
+provided at the block layer through DM, or additionally, at the filesystem
+level. These would have to rely on the above two tenets, that error clearing
+can happen either by sending an IO through the driver, or zeroing (also through
+the driver).
+
+
 Shortcomings
 ------------
 
diff --git a/Documentation/filesystems/devpts.txt b/Documentation/filesystems/devpts.txt
index 30d2fcb..9f94fe27 100644
--- a/Documentation/filesystems/devpts.txt
+++ b/Documentation/filesystems/devpts.txt
@@ -1,141 +1,26 @@
+Each mount of the devpts filesystem is now distinct such that ptys
+and their indicies allocated in one mount are independent from ptys
+and their indicies in all other mounts.
 
-To support containers, we now allow multiple instances of devpts filesystem,
-such that indices of ptys allocated in one instance are independent of indices
-allocated in other instances of devpts.
+All mounts of the devpts filesystem now create a /dev/pts/ptmx node
+with permissions 0000.
 
-To preserve backward compatibility, this support for multiple instances is
-enabled only if:
+To retain backwards compatibility the a ptmx device node (aka any node
+created with "mknod name c 5 2") when opened will look for an instance
+of devpts under the name "pts" in the same directory as the ptmx device
+node.
 
-	- CONFIG_DEVPTS_MULTIPLE_INSTANCES=y, and
-	- '-o newinstance' mount option is specified while mounting devpts
-
-IOW, devpts now supports both single-instance and multi-instance semantics.
-
-If CONFIG_DEVPTS_MULTIPLE_INSTANCES=n, there is no change in behavior and
-this referred to as the "legacy" mode. In this mode, the new mount options
-(-o newinstance and -o ptmxmode) will be ignored with a 'bogus option' message
-on console.
-
-If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and devpts is mounted without the
-'newinstance' option (as in current start-up scripts) the new mount binds
-to the initial kernel mount of devpts. This mode is referred to as the
-'single-instance' mode and the current, single-instance semantics are
-preserved, i.e PTYs are common across the system.
-
-The only difference between this single-instance mode and the legacy mode
-is the presence of new, '/dev/pts/ptmx' node with permissions 0000, which
-can safely be ignored.
-
-If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and 'newinstance' option is specified,
-the mount is considered to be in the multi-instance mode and a new instance
-of the devpts fs is created. Any ptys created in this instance are independent
-of ptys in other instances of devpts. Like in the single-instance mode, the
-/dev/pts/ptmx node is present. To effectively use the multi-instance mode,
-open of /dev/ptmx must be a redirected to '/dev/pts/ptmx' using a symlink or
-bind-mount.
-
-Eg: A container startup script could do the following:
-
-	$ chmod 0666 /dev/pts/ptmx
-	$ rm /dev/ptmx
-	$ ln -s pts/ptmx /dev/ptmx
-	$ ns_exec -cm /bin/bash
-
-	# We are now in new container
-
-	$ umount /dev/pts
-	$ mount -t devpts -o newinstance lxcpts /dev/pts
-	$ sshd -p 1234
-
-where 'ns_exec -cm /bin/bash' calls clone() with CLONE_NEWNS flag and execs
-/bin/bash in the child process.  A pty created by the sshd is not visible in
-the original mount of /dev/pts.
+As an option instead of placing a /dev/ptmx device node at /dev/ptmx
+it is possible to place a symlink to /dev/pts/ptmx at /dev/ptmx or
+to bind mount /dev/ptx/ptmx to /dev/ptmx.  If you opt for using
+the devpts filesystem in this manner devpts should be mounted with
+the ptmxmode=0666, or chmod 0666 /dev/pts/ptmx should be called.
 
 Total count of pty pairs in all instances is limited by sysctls:
 kernel.pty.max = 4096		- global limit
-kernel.pty.reserve = 1024	- reserve for initial instance
+kernel.pty.reserve = 1024	- reserved for filesystems mounted from the initial mount namespace
 kernel.pty.nr			- current count of ptys
 
 Per-instance limit could be set by adding mount option "max=<count>".
 This feature was added in kernel 3.4 together with sysctl kernel.pty.reserve.
 In kernels older than 3.4 sysctl kernel.pty.max works as per-instance limit.
-
-User-space changes
-------------------
-
-In multi-instance mode (i.e '-o newinstance' mount option is specified at least
-once), following user-space issues should be noted.
-
-1. If -o newinstance mount option is never used, /dev/pts/ptmx can be ignored
-   and no change is needed to system-startup scripts.
-
-2. To effectively use multi-instance mode (i.e -o newinstance is specified)
-   administrators or startup scripts should "redirect" open of /dev/ptmx to
-   /dev/pts/ptmx using either a bind mount or symlink.
-
-	$ mount -t devpts -o newinstance devpts /dev/pts
-
-   followed by either
-
-	$ rm /dev/ptmx
-	$ ln -s pts/ptmx /dev/ptmx
-	$ chmod 666 /dev/pts/ptmx
-   or
-	$ mount -o bind /dev/pts/ptmx /dev/ptmx
-
-3. The '/dev/ptmx -> pts/ptmx' symlink is the preferred method since it
-   enables better error-reporting and treats both single-instance and
-   multi-instance mounts similarly.
-
-   But this method requires that system-startup scripts set the mode of
-   /dev/pts/ptmx correctly (default mode is 0000). The scripts can set the
-   mode by, either
-
-   	- adding ptmxmode mount option to devpts entry in /etc/fstab, or
-	- using 'chmod 0666 /dev/pts/ptmx'
-
-4. If multi-instance mode mount is needed for containers, but the system
-   startup scripts have not yet been updated, container-startup scripts
-   should bind mount /dev/ptmx to /dev/pts/ptmx to avoid breaking single-
-   instance mounts.
-
-   Or, in general, container-startup scripts should use:
-
-	mount -t devpts -o newinstance -o ptmxmode=0666 devpts /dev/pts
-	if [ ! -L /dev/ptmx ]; then
-		mount -o bind /dev/pts/ptmx /dev/ptmx
-	fi
-
-   When all devpts mounts are multi-instance, /dev/ptmx can permanently be
-   a symlink to pts/ptmx and the bind mount can be ignored.
-
-5. A multi-instance mount that is not accompanied by the /dev/ptmx to
-   /dev/pts/ptmx redirection would result in an unusable/unreachable pty.
-
-	mount -t devpts -o newinstance lxcpts /dev/pts
-
-   immediately followed by:
-
-	open("/dev/ptmx")
-
-    would create a pty, say /dev/pts/7, in the initial kernel mount.
-    But /dev/pts/7 would be invisible in the new mount.
-
-6. The permissions for /dev/pts/ptmx node should be specified when mounting
-   /dev/pts, using the '-o ptmxmode=%o' mount option (default is 0000).
-
-	mount -t devpts -o newinstance -o ptmxmode=0644 devpts /dev/pts
-
-   The permissions can be later be changed as usual with 'chmod'.
-
-	chmod 666 /dev/pts/ptmx
-
-7. A mount of devpts without the 'newinstance' option results in binding to
-   initial kernel mount.  This behavior while preserving legacy semantics,
-   does not provide strict isolation in a container environment. i.e by
-   mounting devpts without the 'newinstance' option, a container could
-   get visibility into the 'host' or root container's devpts.
-   
-   To workaround this and have strict isolation, all mounts of devpts,
-   including the mount in the root container, should use the newinstance
-   option.
diff --git a/Documentation/filesystems/directory-locking b/Documentation/filesystems/directory-locking
index 09bbf9a..c314bad 100644
--- a/Documentation/filesystems/directory-locking
+++ b/Documentation/filesystems/directory-locking
@@ -1,30 +1,37 @@
 	Locking scheme used for directory operations is based on two
-kinds of locks - per-inode (->i_mutex) and per-filesystem
+kinds of locks - per-inode (->i_rwsem) and per-filesystem
 (->s_vfs_rename_mutex).
 
-	When taking the i_mutex on multiple non-directory objects, we
+	When taking the i_rwsem on multiple non-directory objects, we
 always acquire the locks in order by increasing address.  We'll call
 that "inode pointer" order in the following.
 
 	For our purposes all operations fall in 5 classes:
 
 1) read access.  Locking rules: caller locks directory we are accessing.
+The lock is taken shared.
 
-2) object creation.  Locking rules: same as above.
+2) object creation.  Locking rules: same as above, but the lock is taken
+exclusive.
 
 3) object removal.  Locking rules: caller locks parent, finds victim,
-locks victim and calls the method.
+locks victim and calls the method.  Locks are exclusive.
 
 4) rename() that is _not_ cross-directory.  Locking rules: caller locks
-the parent and finds source and target.  If target already exists, lock
-it.  If source is a non-directory, lock it.  If that means we need to
-lock both, lock them in inode pointer order.
+the parent and finds source and target.  In case of exchange (with
+RENAME_EXCHANGE in rename2() flags argument) lock both.  In any case,
+if the target already exists, lock it.  If the source is a non-directory,
+lock it.  If we need to lock both, lock them in inode pointer order.
+Then call the method.  All locks are exclusive.
+NB: we might get away with locking the the source (and target in exchange
+case) shared.
 
 5) link creation.  Locking rules:
 	* lock parent
 	* check that source is not a directory
 	* lock source
 	* call the method.
+All locks are exclusive.
 
 6) cross-directory rename.  The trickiest in the whole bunch.  Locking
 rules:
@@ -35,11 +42,12 @@
 		fail with -ENOTEMPTY
 	* if new parent is equal to or is a descendent of source
 		fail with -ELOOP
-	* If target exists, lock it.  If source is a non-directory, lock
-	  it.  In case that means we need to lock both source and target,
-	  do so in inode pointer order.
+	* If it's an exchange, lock both the source and the target.
+	* If the target exists, lock it.  If the source is a non-directory,
+	  lock it.  If we need to lock both, do so in inode pointer order.
 	* call the method.
-
+All ->i_rwsem are taken exclusive.  Again, we might get away with locking
+the the source (and target in exchange case) shared.
 
 The rules above obviously guarantee that all directories that are going to be
 read, modified or removed by method will be locked by caller.
@@ -73,7 +81,7 @@
 attempt to acquire some lock and already holds at least one lock.  Let's
 consider the set of contended locks.  First of all, filesystem lock is
 not contended, since any process blocked on it is not holding any locks.
-Thus all processes are blocked on ->i_mutex.
+Thus all processes are blocked on ->i_rwsem.
 
 	By (3), any process holding a non-directory lock can only be
 waiting on another non-directory lock with a larger address.  Therefore
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt
index 41c3d33..5b21ef7 100644
--- a/Documentation/filesystems/nilfs2.txt
+++ b/Documentation/filesystems/nilfs2.txt
@@ -268,3 +268,8 @@
                                     ( regular file, directory, or symlink )
 
 For detail on the format of each file, please see include/linux/nilfs2_fs.h.
+
+There are no patents or other intellectual property that we protect
+with regard to the design of NILFS2.  It is allowed to replicate the
+design in hopes that other operating systems could share (mount, read,
+write, etc.) data stored in this format.
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index 2809145..d6259c7 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -194,15 +194,6 @@
 "break" the link.  Changes will not be propagated to other names
 referring to the same inode.
 
-Symlinks in /proc/PID/ and /proc/PID/fd which point to a non-directory
-object in overlayfs will not contain valid absolute paths, only
-relative paths leading up to the filesystem's root.  This will be
-fixed in the future.
-
-Some operations are not atomic, for example a crash during copy_up or
-rename will leave the filesystem in an inconsistent state.  This will
-be addressed in the future.
-
 Changes to underlying filesystems
 ---------------------------------
 
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 46f3bb7..a5fb89c 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -578,3 +578,10 @@
 --
 [mandatory]
 	->atomic_open() calls without O_CREAT may happen in parallel.
+--
+[mandatory]
+	->setxattr() and xattr_handler.set() get dentry and inode passed separately.
+	dentry might be yet to be attached to inode, so do _not_ use its ->d_inode
+	in the instances.  Rationale: !@#!@# security_d_instantiate() needs to be
+	called before we attach dentry to inode and !@#!@##!@$!$#!@#$!@$!@$ smack
+	->d_instantiate() uses not just ->getxattr() but ->setxattr() as well.
diff --git a/Documentation/gdb-kernel-debugging.txt b/Documentation/gdb-kernel-debugging.txt
index 7050ce8..4ab7d43 100644
--- a/Documentation/gdb-kernel-debugging.txt
+++ b/Documentation/gdb-kernel-debugging.txt
@@ -139,6 +139,27 @@
       start_comm = "swapper/2\000\000\000\000\000\000"
     }
 
+ o Dig into a radix tree data structure, such as the IRQ descriptors:
+    (gdb) print (struct irq_desc)$lx_radix_tree_lookup(irq_desc_tree, 18)
+    $6 = {
+      irq_common_data = {
+        state_use_accessors = 67584,
+        handler_data = 0x0 <__vectors_start>,
+        msi_desc = 0x0 <__vectors_start>,
+        affinity = {{
+            bits = {65535}
+          }}
+      },
+      irq_data = {
+        mask = 0,
+        irq = 18,
+        hwirq = 27,
+        common = 0xee803d80,
+        chip = 0xc0eb0854 <gic_data>,
+        domain = 0xee808000,
+        parent_data = 0x0 <__vectors_start>,
+        chip_data = 0xc0eb0854 <gic_data>
+      } <... trimmed ...>
 
 List of commands and functions
 ------------------------------
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440
index f5b1fca..9ba6587 100644
--- a/Documentation/hwmon/max34440
+++ b/Documentation/hwmon/max34440
@@ -5,17 +5,17 @@
   * Maxim MAX34440
     Prefixes: 'max34440'
     Addresses scanned: -
-    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34440.pdf
   * Maxim MAX34441
     PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
     Prefixes: 'max34441'
     Addresses scanned: -
-    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34441.pdf
   * Maxim MAX34446
     PMBus Power-Supply Data Logger
     Prefixes: 'max34446'
     Addresses scanned: -
-    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34446.pdf
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34446.pdf
   * Maxim MAX34460
     PMBus 12-Channel Voltage Monitor & Sequencer
     Prefix: 'max34460'
diff --git a/Documentation/infiniband/sysfs.txt b/Documentation/infiniband/sysfs.txt
index 3ecf0c3..45bcafe 100644
--- a/Documentation/infiniband/sysfs.txt
+++ b/Documentation/infiniband/sysfs.txt
@@ -56,6 +56,18 @@
   ports/1/pkeys/10 contains the value at index 10 in port 1's P_Key
   table.
 
+  There is an optional "hw_counters" subdirectory that may be under either
+  the parent device or the port subdirectories or both.  If present,
+  there are a list of counters provided by the hardware.  They may match
+  some of the counters in the counters directory, but they often include
+  many other counters.  In addition to the various counters, there will
+  be a file named "lifespan" that configures how frequently the core
+  should update the counters when they are being accessed (counters are
+  not updated if they are not being accessed).  The lifespan is in milli-
+  seconds and defaults to 10 unless set to something else by the driver.
+  Users may echo a value between 0 - 10000 to the lifespan file to set
+  the length of time between updates in milliseconds.
+
 MTHCA
 
   The Mellanox HCA driver also creates the files:
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index c52856d..db10185 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -241,9 +241,8 @@
 	depends on !MODULES
 
 MODVERSIONS directly depends on MODULES, this means it's only visible if
-MODULES is different from 'n'. The comment on the other hand is always
-visible when MODULES is visible (the (empty) dependency of MODULES is
-also part of the comment dependencies).
+MODULES is different from 'n'. The comment on the other hand is only
+visible when MODULES is set to 'n'.
 
 
 Kconfig syntax
@@ -285,12 +284,17 @@
 	"endchoice"
 
 This defines a choice group and accepts any of the above attributes as
-options. A choice can only be of type bool or tristate, while a boolean
-choice only allows a single config entry to be selected, a tristate
-choice also allows any number of config entries to be set to 'm'. This
-can be used if multiple drivers for a single hardware exists and only a
-single driver can be compiled/loaded into the kernel, but all drivers
-can be compiled as modules.
+options. A choice can only be of type bool or tristate.  If no type is
+specified for a choice, it's type will be determined by the type of
+the first choice element in the group or remain unknown if none of the
+choice elements have a type specified, as well.
+
+While a boolean choice only allows a single config entry to be
+selected, a tristate choice also allows any number of config entries
+to be set to 'm'. This can be used if multiple drivers for a single
+hardware exists and only a single driver can be compiled/loaded into
+the kernel, but all drivers can be compiled as modules.
+
 A choice accepts another option "optional", which allows to set the
 choice to 'n' and no entry needs to be selected.
 If no [symbol] is associated with a choice, then you can not have multiple
diff --git a/Documentation/kdump/gdbmacros.txt b/Documentation/kdump/gdbmacros.txt
index 9b9b454..220d0a8 100644
--- a/Documentation/kdump/gdbmacros.txt
+++ b/Documentation/kdump/gdbmacros.txt
@@ -15,15 +15,16 @@
 
 define bttnobp
 	set $tasks_off=((size_t)&((struct task_struct *)0)->tasks)
-	set $pid_off=((size_t)&((struct task_struct *)0)->pids[1].pid_list.next)
+	set $pid_off=((size_t)&((struct task_struct *)0)->thread_group.next)
 	set $init_t=&init_task
 	set $next_t=(((char *)($init_t->tasks).next) - $tasks_off)
+	set var $stacksize = sizeof(union thread_union)
 	while ($next_t != $init_t)
 		set $next_t=(struct task_struct *)$next_t
 		printf "\npid %d; comm %s:\n", $next_t.pid, $next_t.comm
 		printf "===================\n"
-		set var $stackp = $next_t.thread.esp
-		set var $stack_top = ($stackp & ~4095) + 4096
+		set var $stackp = $next_t.thread.sp
+		set var $stack_top = ($stackp & ~($stacksize - 1)) + $stacksize
 
 		while ($stackp < $stack_top)
 			if (*($stackp) > _stext && *($stackp) < _sinittext)
@@ -31,13 +32,13 @@
 			end
 			set $stackp += 4
 		end
-		set $next_th=(((char *)$next_t->pids[1].pid_list.next) - $pid_off)
+		set $next_th=(((char *)$next_t->thread_group.next) - $pid_off)
 		while ($next_th != $next_t)
 			set $next_th=(struct task_struct *)$next_th
 			printf "\npid %d; comm %s:\n", $next_t.pid, $next_t.comm
 			printf "===================\n"
-			set var $stackp = $next_t.thread.esp
-			set var $stack_top = ($stackp & ~4095) + 4096
+			set var $stackp = $next_t.thread.sp
+			set var $stack_top = ($stackp & ~($stacksize - 1)) + stacksize
 
 			while ($stackp < $stack_top)
 				if (*($stackp) > _stext && *($stackp) < _sinittext)
@@ -45,7 +46,7 @@
 				end
 				set $stackp += 4
 			end
-			set $next_th=(((char *)$next_th->pids[1].pid_list.next) - $pid_off)
+			set $next_th=(((char *)$next_th->thread_group.next) - $pid_off)
 		end
 		set $next_t=(char *)($next_t->tasks.next) - $tasks_off
 	end
@@ -54,42 +55,44 @@
 	dump all thread stack traces on a kernel compiled with !CONFIG_FRAME_POINTER
 end
 
+define btthreadstack
+	set var $pid_task = $arg0
+
+	printf "\npid %d; comm %s:\n", $pid_task.pid, $pid_task.comm
+	printf "task struct: "
+	print $pid_task
+	printf "===================\n"
+	set var $stackp = $pid_task.thread.sp
+	set var $stacksize = sizeof(union thread_union)
+	set var $stack_top = ($stackp & ~($stacksize - 1)) + $stacksize
+	set var $stack_bot = ($stackp & ~($stacksize - 1))
+
+	set $stackp = *((unsigned long *) $stackp)
+	while (($stackp < $stack_top) && ($stackp > $stack_bot))
+		set var $addr = *(((unsigned long *) $stackp) + 1)
+		info symbol $addr
+		set $stackp = *((unsigned long *) $stackp)
+	end
+end
+document btthreadstack
+	 dump a thread stack using the given task structure pointer
+end
+
+
 define btt
 	set $tasks_off=((size_t)&((struct task_struct *)0)->tasks)
-	set $pid_off=((size_t)&((struct task_struct *)0)->pids[1].pid_list.next)
+	set $pid_off=((size_t)&((struct task_struct *)0)->thread_group.next)
 	set $init_t=&init_task
 	set $next_t=(((char *)($init_t->tasks).next) - $tasks_off)
 	while ($next_t != $init_t)
 		set $next_t=(struct task_struct *)$next_t
-		printf "\npid %d; comm %s:\n", $next_t.pid, $next_t.comm
-		printf "===================\n"
-		set var $stackp = $next_t.thread.esp
-		set var $stack_top = ($stackp & ~4095) + 4096
-		set var $stack_bot = ($stackp & ~4095)
+		btthreadstack $next_t
 
-		set $stackp = *($stackp)
-		while (($stackp < $stack_top) && ($stackp > $stack_bot))
-			set var $addr = *($stackp + 4)
-			info symbol $addr
-			set $stackp = *($stackp)
-		end
-
-		set $next_th=(((char *)$next_t->pids[1].pid_list.next) - $pid_off)
+		set $next_th=(((char *)$next_t->thread_group.next) - $pid_off)
 		while ($next_th != $next_t)
 			set $next_th=(struct task_struct *)$next_th
-			printf "\npid %d; comm %s:\n", $next_t.pid, $next_t.comm
-			printf "===================\n"
-			set var $stackp = $next_t.thread.esp
-			set var $stack_top = ($stackp & ~4095) + 4096
-			set var $stack_bot = ($stackp & ~4095)
-
-			set $stackp = *($stackp)
-			while (($stackp < $stack_top) && ($stackp > $stack_bot))
-				set var $addr = *($stackp + 4)
-				info symbol $addr
-				set $stackp = *($stackp)
-			end
-			set $next_th=(((char *)$next_th->pids[1].pid_list.next) - $pid_off)
+			btthreadstack $next_th
+			set $next_th=(((char *)$next_th->thread_group.next) - $pid_off)
 		end
 		set $next_t=(char *)($next_t->tasks.next) - $tasks_off
 	end
@@ -101,7 +104,7 @@
 define btpid
 	set var $pid = $arg0
 	set $tasks_off=((size_t)&((struct task_struct *)0)->tasks)
-	set $pid_off=((size_t)&((struct task_struct *)0)->pids[1].pid_list.next)
+	set $pid_off=((size_t)&((struct task_struct *)0)->thread_group.next)
 	set $init_t=&init_task
 	set $next_t=(((char *)($init_t->tasks).next) - $tasks_off)
 	set var $pid_task = 0
@@ -113,29 +116,18 @@
 			set $pid_task = $next_t
 		end
 
-		set $next_th=(((char *)$next_t->pids[1].pid_list.next) - $pid_off)
+		set $next_th=(((char *)$next_t->thread_group.next) - $pid_off)
 		while ($next_th != $next_t)
 			set $next_th=(struct task_struct *)$next_th
 			if ($next_th.pid == $pid)
 				set $pid_task = $next_th
 			end
-			set $next_th=(((char *)$next_th->pids[1].pid_list.next) - $pid_off)
+			set $next_th=(((char *)$next_th->thread_group.next) - $pid_off)
 		end
 		set $next_t=(char *)($next_t->tasks.next) - $tasks_off
 	end
 
-	printf "\npid %d; comm %s:\n", $pid_task.pid, $pid_task.comm
-	printf "===================\n"
-	set var $stackp = $pid_task.thread.esp
-	set var $stack_top = ($stackp & ~4095) + 4096
-	set var $stack_bot = ($stackp & ~4095)
-
-	set $stackp = *($stackp)
-	while (($stackp < $stack_top) && ($stackp > $stack_bot))
-		set var $addr = *($stackp + 4)
-		info symbol $addr
-		set $stackp = *($stackp)
-	end
+	btthreadstack $pid_task
 end
 document btpid
 	backtrace of pid
@@ -145,7 +137,7 @@
 define trapinfo
 	set var $pid = $arg0
 	set $tasks_off=((size_t)&((struct task_struct *)0)->tasks)
-	set $pid_off=((size_t)&((struct task_struct *)0)->pids[1].pid_list.next)
+	set $pid_off=((size_t)&((struct task_struct *)0)->thread_group.next)
 	set $init_t=&init_task
 	set $next_t=(((char *)($init_t->tasks).next) - $tasks_off)
 	set var $pid_task = 0
@@ -157,13 +149,13 @@
 			set $pid_task = $next_t
 		end
 
-		set $next_th=(((char *)$next_t->pids[1].pid_list.next) - $pid_off)
+		set $next_th=(((char *)$next_t->thread_group.next) - $pid_off)
 		while ($next_th != $next_t)
 			set $next_th=(struct task_struct *)$next_th
 			if ($next_th.pid == $pid)
 				set $pid_task = $next_th
 			end
-			set $next_th=(((char *)$next_th->pids[1].pid_list.next) - $pid_off)
+			set $next_th=(((char *)$next_th->thread_group.next) - $pid_off)
 		end
 		set $next_t=(char *)($next_t->tasks.next) - $tasks_off
 	end
@@ -178,21 +170,92 @@
 	address the kernel panicked.
 end
 
+define dump_log_idx
+	set $idx = $arg0
+	if ($argc > 1)
+		set $prev_flags = $arg1
+	else
+		set $prev_flags = 0
+	end
+	set $msg = ((struct printk_log *) (log_buf + $idx))
+	set $prefix = 1
+	set $newline = 1
+	set $log = log_buf + $idx + sizeof(*$msg)
+
+	# prev & LOG_CONT && !(msg->flags & LOG_PREIX)
+	if (($prev_flags & 8) && !($msg->flags & 4))
+		set $prefix = 0
+	end
+
+	# msg->flags & LOG_CONT
+	if ($msg->flags & 8)
+		# (prev & LOG_CONT && !(prev & LOG_NEWLINE))
+		if (($prev_flags & 8) && !($prev_flags & 2))
+			set $prefix = 0
+		end
+		# (!(msg->flags & LOG_NEWLINE))
+		if (!($msg->flags & 2))
+			set $newline = 0
+		end
+	end
+
+	if ($prefix)
+		printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000
+	end
+	if ($msg->text_len != 0)
+		eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len
+	end
+	if ($newline)
+		printf "\n"
+	end
+	if ($msg->dict_len > 0)
+		set $dict = $log + $msg->text_len
+		set $idx = 0
+		set $line = 1
+		while ($idx < $msg->dict_len)
+			if ($line)
+				printf " "
+				set $line = 0
+			end
+			set $c = $dict[$idx]
+			if ($c == '\0')
+				printf "\n"
+				set $line = 1
+			else
+				if ($c < ' ' || $c >= 127 || $c == '\\')
+					printf "\\x%02x", $c
+				else
+					printf "%c", $c
+				end
+			end
+			set $idx = $idx + 1
+		end
+		printf "\n"
+	end
+end
+document dump_log_idx
+	Dump a single log given its index in the log buffer.  The first
+	parameter is the index into log_buf, the second is optional and
+	specified the previous log buffer's flags, used for properly
+	formatting continued lines.
+end
 
 define dmesg
-	set $i = 0
-	set $end_idx = (log_end - 1) & (log_buf_len - 1)
+	set $i = log_first_idx
+	set $end_idx = log_first_idx
+	set $prev_flags = 0
 
-	while ($i < logged_chars)
-		set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1)
-
-		if ($idx + 100 <= $end_idx) || \
-		   ($end_idx <= $idx && $idx + 100 < log_buf_len)
-			printf "%.100s", &log_buf[$idx]
-			set $i = $i + 100
+	while (1)
+		set $msg = ((struct printk_log *) (log_buf + $i))
+		if ($msg->len == 0)
+			set $i = 0
 		else
-			printf "%c", log_buf[$idx]
-			set $i = $i + 1
+			dump_log_idx $i $prev_flags
+			set $i = $i + $msg->len
+			set $prev_flags = $msg->flags
+		end
+		if ($i == $end_idx)
+			loop_break
 		end
 	end
 end
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 6c7f365b..9ae9293 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1036,15 +1036,17 @@
 
 shared_media - BOOLEAN
 	Send(router) or accept(host) RFC1620 shared media redirects.
-	Overrides ip_secure_redirects.
+	Overrides secure_redirects.
 	shared_media for the interface will be enabled if at least one of
 	conf/{all,interface}/shared_media is set to TRUE,
 	it will be disabled otherwise
 	default TRUE
 
 secure_redirects - BOOLEAN
-	Accept ICMP redirect messages only for gateways,
-	listed in default gateway list.
+	Accept ICMP redirect messages only to gateways listed in the
+	interface's current gateway list. Even if disabled, RFC1122 redirect
+	rules still apply.
+	Overridden by shared_media.
 	secure_redirects for the interface will be enabled if at least one of
 	conf/{all,interface}/secure_redirects is set to TRUE,
 	it will be disabled otherwise
diff --git a/Documentation/pwm.txt b/Documentation/pwm.txt
index ca895fd..789b27c 100644
--- a/Documentation/pwm.txt
+++ b/Documentation/pwm.txt
@@ -42,9 +42,26 @@
 
 After being requested, a PWM has to be configured using:
 
-int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns);
+int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state);
 
-To start/stop toggling the PWM output use pwm_enable()/pwm_disable().
+This API controls both the PWM period/duty_cycle config and the
+enable/disable state.
+
+The pwm_config(), pwm_enable() and pwm_disable() functions are just wrappers
+around pwm_apply_state() and should not be used if the user wants to change
+several parameter at once. For example, if you see pwm_config() and
+pwm_{enable,disable}() calls in the same function, this probably means you
+should switch to pwm_apply_state().
+
+The PWM user API also allows one to query the PWM state with pwm_get_state().
+
+In addition to the PWM state, the PWM API also exposes PWM arguments, which
+are the reference PWM config one should use on this PWM.
+PWM arguments are usually platform-specific and allows the PWM user to only
+care about dutycycle relatively to the full period (like, duty = 50% of the
+period). struct pwm_args contains 2 fields (period and polarity) and should
+be used to set the initial PWM config (usually done in the probe function
+of the PWM user). PWM arguments are retrieved with pwm_get_args().
 
 Using PWMs with the sysfs interface
 -----------------------------------
@@ -105,6 +122,15 @@
 polarity starts low for the duration of the duty cycle and goes high for the
 remainder of the period.
 
+Drivers are encouraged to implement ->apply() instead of the legacy
+->enable(), ->disable() and ->config() methods. Doing that should provide
+atomicity in the PWM config workflow, which is required when the PWM controls
+a critical device (like a regulator).
+
+The implementation of ->get_state() (a method used to retrieve initial PWM
+state) is also encouraged for the same reason: letting the PWM user know
+about the current PWM state would allow him to avoid glitches.
+
 Locking
 -------
 
diff --git a/Documentation/scsi/tcm_qla2xxx.txt b/Documentation/scsi/tcm_qla2xxx.txt
new file mode 100644
index 0000000..c3a670a
--- /dev/null
+++ b/Documentation/scsi/tcm_qla2xxx.txt
@@ -0,0 +1,22 @@
+tcm_qla2xxx jam_host attribute
+------------------------------
+There is now a new module endpoint atribute called jam_host
+attribute: jam_host: boolean=0/1
+This attribute and accompanying code is only included if the
+Kconfig parameter TCM_QLA2XXX_DEBUG is set to Y
+By default this jammer code and functionality is disabled
+
+Use this attribute to control the discarding of SCSI commands to a
+selected host.
+This may be useful for testing error handling and simulating slow drain
+and other fabric issues.
+
+Setting a boolean of 1 for the jam_host attribute for a particular host
+ will discard the commands for that host.
+Reset back to 0 to stop the jamming.
+
+Enable host 4 to be jammed
+echo 1 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:27:8f:ae/tpgt_1/attrib/jam_host
+
+Disable jamming on host 4
+echo 0 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:27:8f:ae/tpgt_1/attrib/jam_host
diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt
index 20d0571..3849814 100644
--- a/Documentation/security/keys.txt
+++ b/Documentation/security/keys.txt
@@ -826,7 +826,8 @@
  (*) Compute a Diffie-Hellman shared secret or public key
 
        long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params,
-		   char *buffer, size_t buflen);
+		   char *buffer, size_t buflen,
+		   void *reserved);
 
      The params struct contains serial numbers for three keys:
 
@@ -843,6 +844,8 @@
      public key.  If the base is the remote public key, the result is
      the shared secret.
 
+     The reserved argument must be set to NULL.
+
      The buffer length must be at least the length of the prime, or zero.
 
      If the buffer length is nonzero, the length of the result is
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index daabdd7..a3683ce 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -61,6 +61,7 @@
 - perf_cpu_time_max_percent
 - perf_event_paranoid
 - perf_event_max_stack
+- perf_event_max_contexts_per_stack
 - pid_max
 - powersave-nap               [ PPC only ]
 - printk
@@ -668,6 +669,19 @@
 
 ==============================================================
 
+perf_event_max_contexts_per_stack:
+
+Controls maximum number of stack frame context entries for
+(attr.sample_type & PERF_SAMPLE_CALLCHAIN) configured events, for
+instance, when using 'perf record -g' or 'perf trace --call-graph fp'.
+
+This can only be done when no events are in use that have callchains
+enabled, otherwise writing to this file will return -EBUSY.
+
+The default value is 8.
+
+==============================================================
+
 pid_max:
 
 PID allocation wrap value.  When the kernel's next PID value
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 7d370c9..94bf694 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -294,8 +294,6 @@
 	buf += "	.tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
 	buf += "	.tpg_get_inst_index		= " + fabric_mod_name + "_tpg_get_inst_index,\n"
 	buf += "	.release_cmd			= " + fabric_mod_name + "_release_cmd,\n"
-	buf += "	.shutdown_session		= " + fabric_mod_name + "_shutdown_session,\n"
-	buf += "	.close_session			= " + fabric_mod_name + "_close_session,\n"
 	buf += "	.sess_get_index			= " + fabric_mod_name + "_sess_get_index,\n"
 	buf += "	.sess_get_initiator_sid		= NULL,\n"
 	buf += "	.write_pending			= " + fabric_mod_name + "_write_pending,\n"
@@ -467,20 +465,6 @@
 			buf += "}\n\n"
 			bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
 
-		if re.search('shutdown_session\)\(', fo):
-			buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
-			buf += "{\n"
-			buf += "	return 0;\n"
-			buf += "}\n\n"
-			bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
-
-		if re.search('close_session\)\(', fo):
-			buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
-			buf += "{\n"
-			buf += "	return;\n"
-			buf += "}\n\n"
-			bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
-
 		if re.search('sess_get_index\)\(', fo):
 			buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
 			buf += "{\n"
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt
index ed419d6..efc3f3d 100644
--- a/Documentation/thermal/sysfs-api.txt
+++ b/Documentation/thermal/sysfs-api.txt
@@ -69,8 +69,8 @@
 1.1.2 void thermal_zone_device_unregister(struct thermal_zone_device *tz)
 
     This interface function removes the thermal zone device.
-    It deletes the corresponding entry form /sys/class/thermal folder and
-    unbind all the thermal cooling devices it uses.
+    It deletes the corresponding entry from /sys/class/thermal folder and
+    unbinds all the thermal cooling devices it uses.
 
 1.1.3 struct thermal_zone_device *thermal_zone_of_sensor_register(
 		struct device *dev, int sensor_id, void *data,
@@ -146,32 +146,32 @@
 
     This interface function adds a new thermal cooling device (fan/processor/...)
     to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
-    to all the thermal zone devices register at the same time.
+    to all the thermal zone devices registered at the same time.
     name: the cooling device name.
     devdata: device private data.
     ops: thermal cooling devices call-backs.
 	.get_max_state: get the Maximum throttle state of the cooling device.
-	.get_cur_state: get the Current throttle state of the cooling device.
+	.get_cur_state: get the Currently requested throttle state of the cooling device.
 	.set_cur_state: set the Current throttle state of the cooling device.
 
 1.2.2 void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
 
-    This interface function remove the thermal cooling device.
-    It deletes the corresponding entry form /sys/class/thermal folder and
-    unbind itself from all the thermal zone devices using it.
+    This interface function removes the thermal cooling device.
+    It deletes the corresponding entry from /sys/class/thermal folder and
+    unbinds itself from all the thermal zone devices using it.
 
 1.3 interface for binding a thermal zone device with a thermal cooling device
 1.3.1 int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
 	int trip, struct thermal_cooling_device *cdev,
 	unsigned long upper, unsigned long lower, unsigned int weight);
 
-    This interface function bind a thermal cooling device to the certain trip
+    This interface function binds a thermal cooling device to a particular trip
     point of a thermal zone device.
     This function is usually called in the thermal zone device .bind callback.
     tz: the thermal zone device
     cdev: thermal cooling device
-    trip: indicates which trip point the cooling devices is associated with
-	  in this thermal zone.
+    trip: indicates which trip point in this thermal zone the cooling device
+          is associated with.
     upper:the Maximum cooling state for this trip point.
           THERMAL_NO_LIMIT means no upper limit,
 	  and the cooling device can be in max_state.
@@ -184,13 +184,13 @@
 1.3.2 int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
 		int trip, struct thermal_cooling_device *cdev);
 
-    This interface function unbind a thermal cooling device from the certain
+    This interface function unbinds a thermal cooling device from a particular
     trip point of a thermal zone device. This function is usually called in
     the thermal zone device .unbind callback.
     tz: the thermal zone device
     cdev: thermal cooling device
-    trip: indicates which trip point the cooling devices is associated with
-	  in this thermal zone.
+    trip: indicates which trip point in this thermal zone the cooling device
+          is associated with.
 
 1.4 Thermal Zone Parameters
 1.4.1 struct thermal_bind_params
@@ -210,13 +210,13 @@
                this thermal zone and cdev, for a particular trip point.
                If nth bit is set, then the cdev and thermal zone are bound
                for trip point n.
-    .limits: This is an array of cooling state limits. Must have exactly
-         2 * thermal_zone.number_of_trip_points. It is an array consisting
-         of tuples <lower-state upper-state> of state limits. Each trip
-         will be associated with one state limit tuple when binding.
-         A NULL pointer means <THERMAL_NO_LIMITS THERMAL_NO_LIMITS>
-         on all trips. These limits are used when binding a cdev to a
-         trip point.
+    .binding_limits: This is an array of cooling state limits. Must have
+                     exactly 2 * thermal_zone.number_of_trip_points. It is an
+                     array consisting of tuples <lower-state upper-state> of
+                     state limits. Each trip will be associated with one state
+                     limit tuple when binding. A NULL pointer means
+                     <THERMAL_NO_LIMITS THERMAL_NO_LIMITS> on all trips.
+                     These limits are used when binding a cdev to a trip point.
     .match: This call back returns success(0) if the 'tz and cdev' need to
 	    be bound, as per platform data.
 1.4.2 struct thermal_zone_params
@@ -351,8 +351,8 @@
 	RO, Optional
 
 cdev[0-*]_trip_point
-	The trip point with which cdev[0-*] is associated in this thermal
-	zone; -1 means the cooling device is not associated with any trip
+	The trip point in this thermal zone which cdev[0-*] is associated
+	with; -1 means the cooling device is not associated with any trip
 	point.
 	RO, Optional
 
diff --git a/Documentation/watchdog/hpwdt.txt b/Documentation/watchdog/hpwdt.txt
index 9488078..a40398c 100644
--- a/Documentation/watchdog/hpwdt.txt
+++ b/Documentation/watchdog/hpwdt.txt
@@ -1,64 +1,67 @@
-Last reviewed: 06/02/2009
+Last reviewed: 04/04/2016
 
-                     HP iLO2 NMI Watchdog Driver
-              NMI sourcing for iLO2 based ProLiant Servers
+                     HPE iLO NMI Watchdog Driver
+              NMI sourcing for iLO based ProLiant Servers
                      Documentation and Driver by
-              Thomas Mingarelli <thomas.mingarelli@hp.com>
+              Thomas Mingarelli <thomas.mingarelli@hpe.com>
 
- The HP iLO2 NMI Watchdog driver is a kernel module that provides basic
+ The HPE iLO NMI Watchdog driver is a kernel module that provides basic
  watchdog functionality and the added benefit of NMI sourcing. Both the
  watchdog functionality and the NMI sourcing capability need to be enabled
  by the user. Remember that the two modes are not dependent on one another.
  A user can have the NMI sourcing without the watchdog timer and vice-versa.
+ All references to iLO in this document imply it also works on iLO2 and all
+ subsequent generations.
 
  Watchdog functionality is enabled like any other common watchdog driver. That
  is, an application needs to be started that kicks off the watchdog timer. A
  basic application exists in the Documentation/watchdog/src directory called
  watchdog-test.c. Simply compile the C file and kick it off. If the system
- gets into a bad state and hangs, the HP ProLiant iLO 2 timer register will
+ gets into a bad state and hangs, the HPE ProLiant iLO timer register will
  not be updated in a timely fashion and a hardware system reset (also known as
  an Automatic Server Recovery (ASR)) event will occur.
 
- The hpwdt driver also has four (4) module parameters. They are the following:
+ The hpwdt driver also has three (3) module parameters. They are the following:
 
- soft_margin - allows the user to set the watchdog timer value
- allow_kdump - allows the user to save off a kernel dump image after an NMI
+ soft_margin - allows the user to set the watchdog timer value.
+               Default value is 30 seconds.
+ allow_kdump - allows the user to save off a kernel dump image after an NMI.
+               Default value is 1/ON
  nowayout    - basic watchdog parameter that does not allow the timer to
                be restarted or an impending ASR to be escaped.
- priority    - determines whether or not the hpwdt driver is first on the
-               die_notify list to handle NMIs or last. The default value
-               for this module parameter is 0 or LAST. If the user wants to
-               enable NMI sourcing then reload the hpwdt driver with
-               priority=1 (and boot with nmi_watchdog=0).
+               Default value is set when compiling the kernel. If it is set
+               to "Y", then there is no way of disabling the watchdog once
+               it has been started.
 
  NOTE: More information about watchdog drivers in general, including the ioctl
        interface to /dev/watchdog can be found in
        Documentation/watchdog/watchdog-api.txt and Documentation/IPMI.txt.
 
- The priority parameter was introduced due to other kernel software that relied
- on handling NMIs (like oprofile). Keeping hpwdt's priority at 0 (or LAST)
- enables the users of NMIs for non critical events to be work as expected.
-
  The NMI sourcing capability is disabled by default due to the inability to
  distinguish between "NMI Watchdog Ticks" and "HW generated NMI events" in the
  Linux kernel. What this means is that the hpwdt nmi handler code is called
  each time the NMI signal fires off. This could amount to several thousands of
  NMIs in a matter of seconds. If a user sees the Linux kernel's "dazed and
  confused" message in the logs or if the system gets into a hung state, then
- the hpwdt driver can be reloaded with the "priority" module parameter set
- (priority=1).
+ the hpwdt driver can be reloaded.
 
  1. If the kernel has not been booted with nmi_watchdog turned off then
-    edit /boot/grub/menu.lst and place the nmi_watchdog=0 at the end of the
-    currently booting kernel line.
+    edit and place the nmi_watchdog=0 at the end of the currently booting
+    kernel line. Depending on your Linux distribution and platform setup:
+    For non-UEFI systems
+       /boot/grub/grub.conf   or
+       /boot/grub/menu.lst
+    For UEFI systems
+      /boot/efi/EFI/distroname/grub.conf   or
+      /boot/efi/efi/distroname/elilo.conf
  2. reboot the sever
- 3. Once the system comes up perform a rmmod hpwdt
- 4. insmod /lib/modules/`uname -r`/kernel/drivers/char/watchdog/hpwdt.ko priority=1
+ 3. Once the system comes up perform a modprobe -r hpwdt
+ 4. modprobe /lib/modules/`uname -r`/kernel/drivers/watchdog/hpwdt.ko
 
  Now, the hpwdt can successfully receive and source the NMI and provide a log
- message that details the reason for the NMI (as determined by the HP BIOS).
+ message that details the reason for the NMI (as determined by the HPE BIOS).
 
- Below is a list of NMIs the HP BIOS understands along with the associated
+ Below is a list of NMIs the HPE BIOS understands along with the associated
  code (reason):
 
 	No source found                00h
@@ -92,4 +95,4 @@
 
 
  -- Tom Mingarelli
-    (thomas.mingarelli@hp.com)
+    (thomas.mingarelli@hpe.com)
diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt
index c161399..a8d3642 100644
--- a/Documentation/watchdog/watchdog-parameters.txt
+++ b/Documentation/watchdog/watchdog-parameters.txt
@@ -86,6 +86,10 @@
 davinci_wdt:
 heartbeat: Watchdog heartbeat period in seconds from 1 to 600, default 60
 -------------------------------------------------
+ebc-c384_wdt:
+timeout: Watchdog timeout in seconds. (1<=timeout<=15300, default=60)
+nowayout: Watchdog cannot be stopped once started
+-------------------------------------------------
 ep93xx_wdt:
 nowayout: Watchdog cannot be stopped once started
 timeout: Watchdog timeout in seconds. (1<=timeout<=3600, default=TBD)
diff --git a/Kbuild b/Kbuild
index f55cefd..3d0ae15 100644
--- a/Kbuild
+++ b/Kbuild
@@ -5,6 +5,7 @@
 # 2) Generate timeconst.h
 # 3) Generate asm-offsets.h (may need bounds.h and timeconst.h)
 # 4) Check for missing system calls
+# 5) Generate constants.py (may need bounds.h)
 
 # Default sed regexp - multiline due to syntax constraints
 define sed-y
@@ -96,5 +97,14 @@
 missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE
 	$(call cmd,syscalls)
 
+#####
+# 5) Generate constants for Python GDB integration
+#
+
+extra-$(CONFIG_GDB_SCRIPTS) += build_constants_py
+
+build_constants_py: $(obj)/$(timeconst-file) $(obj)/$(bounds-file)
+	@$(MAKE) $(build)=scripts/gdb/linux $@
+
 # Keep these three files during make clean
 no-clean-files := $(bounds-file) $(offsets-file) $(timeconst-file)
diff --git a/MAINTAINERS b/MAINTAINERS
index 7e47346..2ebe195 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -856,6 +856,12 @@
 F:	drivers/net/arcnet/
 F:	include/uapi/linux/if_arcnet.h
 
+ARC PGU DRM DRIVER
+M:	Alexey Brodkin <abrodkin@synopsys.com>
+S:	Supported
+F:	drivers/gpu/drm/arc/
+F:	Documentation/devicetree/bindings/display/snps,arcpgu.txt
+
 ARM HDLCD DRM DRIVER
 M:	Liviu Dudau <liviu.dudau@arm.com>
 S:	Supported
@@ -1945,6 +1951,16 @@
 S:	Maintained
 F:	drivers/platform/x86/asus-wireless.c
 
+ASYMMETRIC KEYS
+M:	David Howells <dhowells@redhat.com>
+L:	keyrings@vger.kernel.org
+S:	Maintained
+F:	Documentation/crypto/asymmetric-keys.txt
+F:	include/linux/verification.h
+F:	include/crypto/public_key.h
+F:	include/crypto/pkcs7.h
+F:	crypto/asymmetric_keys/
+
 ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
 R:	Dan Williams <dan.j.williams@intel.com>
 W:	http://sourceforge.net/projects/xscaleiop
@@ -2288,7 +2304,7 @@
 M:	Kent Overstreet <kent.overstreet@gmail.com>
 L:	linux-bcache@vger.kernel.org
 W:	http://bcache.evilpiepirate.org
-S:	Maintained
+S:	Orphan
 F:	drivers/md/bcache/
 
 BDISP ST MEDIA DRIVER
@@ -2489,6 +2505,7 @@
 M:	Rafał Miłecki <zajec5@gmail.com>
 L:	linux-mips@linux-mips.org
 S:	Maintained
+F:	Documentation/devicetree/bindings/mips/brcm/
 F:	arch/mips/bcm47xx/*
 F:	arch/mips/include/asm/mach-bcm47xx/*
 
@@ -3069,6 +3086,7 @@
 L:	linux-clk@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
 S:	Maintained
+F:	Documentation/devicetree/bindings/clock/
 F:	drivers/clk/
 X:	drivers/clk/clkdev.c
 F:	include/linux/clk-pr*
@@ -3837,9 +3855,25 @@
 S:	Maintained
 F:	drivers/gpu/drm/
 F:	drivers/gpu/vga/
+F:	Documentation/DocBook/gpu.*
 F:	include/drm/
 F:	include/uapi/drm/
 
+DRM DRIVER FOR AST SERVER GRAPHICS CHIPS
+M:	Dave Airlie <airlied@redhat.com>
+S:	Odd Fixes
+F:	drivers/gpu/drm/ast/
+
+DRM DRIVER FOR BOCHS VIRTUAL GPU
+M:	Gerd Hoffmann <kraxel@redhat.com>
+S:	Odd Fixes
+F:	drivers/gpu/drm/bochs/
+
+DRM DRIVER FOR QEMU'S CIRRUS DEVICE
+M:	Dave Airlie <airlied@redhat.com>
+S:	Odd Fixes
+F:	drivers/gpu/drm/cirrus/
+
 RADEON and AMDGPU DRM DRIVERS
 M:	Alex Deucher <alexander.deucher@amd.com>
 M:	Christian König <christian.koenig@amd.com>
@@ -3847,9 +3881,9 @@
 T:	git git://people.freedesktop.org/~agd5f/linux
 S:	Supported
 F:	drivers/gpu/drm/radeon/
-F:	include/uapi/drm/radeon*
+F:	include/uapi/drm/radeon_drm.h
 F:	drivers/gpu/drm/amd/
-F:	include/uapi/drm/amdgpu*
+F:	include/uapi/drm/amdgpu_drm.h
 
 DRM PANEL DRIVERS
 M:	Thierry Reding <thierry.reding@gmail.com>
@@ -3872,7 +3906,7 @@
 S:	Supported
 F:	drivers/gpu/drm/i915/
 F:	include/drm/i915*
-F:	include/uapi/drm/i915*
+F:	include/uapi/drm/i915_drm.h
 
 DRM DRIVERS FOR ATMEL HLCDC
 M:	Boris Brezillon <boris.brezillon@free-electrons.com>
@@ -3881,6 +3915,13 @@
 F:	drivers/gpu/drm/atmel-hlcdc/
 F:	Documentation/devicetree/bindings/drm/atmel/
 
+DRM DRIVERS FOR ALLWINNER A10
+M:	Maxime Ripard  <maxime.ripard@free-electrons.com>
+L:	dri-devel@lists.freedesktop.org
+S:	Supported
+F:	drivers/gpu/drm/sun4i/
+F:	Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+
 DRM DRIVERS FOR EXYNOS
 M:	Inki Dae <inki.dae@samsung.com>
 M:	Joonyoung Shim <jy0922.shim@samsung.com>
@@ -3890,8 +3931,8 @@
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git
 S:	Supported
 F:	drivers/gpu/drm/exynos/
-F:	include/drm/exynos*
-F:	include/uapi/drm/exynos*
+F:	include/uapi/drm/exynos_drm.h
+F:	Documentation/devicetree/bindings/display/exynos/
 
 DRM DRIVERS FOR FREESCALE DCU
 M:	Stefan Agner <stefan@agner.ch>
@@ -3900,6 +3941,7 @@
 S:	Supported
 F:	drivers/gpu/drm/fsl-dcu/
 F:	Documentation/devicetree/bindings/display/fsl,dcu.txt
+F:	Documentation/devicetree/bindings/display/fsl,tcon.txt
 F:	Documentation/devicetree/bindings/display/panel/nec,nl4827hc19_05b.txt
 
 DRM DRIVERS FOR FREESCALE IMX
@@ -3915,12 +3957,45 @@
 L:	dri-devel@lists.freedesktop.org
 T:	git git://github.com/patjak/drm-gma500
 S:	Maintained
-F:	drivers/gpu/drm/gma500
-F:	include/drm/gma500*
+F:	drivers/gpu/drm/gma500/
+
+DRM DRIVERS FOR HISILICON
+M:	Xinliang Liu <z.liuxinliang@hisilicon.com>
+R:	Xinwei Kong <kong.kongxinwei@hisilicon.com>
+R:	Chen Feng <puck.chen@hisilicon.com>
+L:	dri-devel@lists.freedesktop.org
+T:	git git://github.com/xin3liang/linux.git
+S:	Maintained
+F:	drivers/gpu/drm/hisilicon/
+F:	Documentation/devicetree/bindings/display/hisilicon/
+
+DRM DRIVER FOR INTEL I810 VIDEO CARDS
+S:	Orphan / Obsolete
+F:	drivers/gpu/drm/i810/
+F:	include/uapi/drm/i810_drm.h
+
+DRM DRIVER FOR MSM ADRENO GPU
+M:	Rob Clark <robdclark@gmail.com>
+L:	linux-arm-msm@vger.kernel.org
+L:	dri-devel@lists.freedesktop.org
+L:	freedreno@lists.freedesktop.org
+T:	git git://people.freedesktop.org/~robclark/linux
+S:	Maintained
+F:	drivers/gpu/drm/msm/
+F:	include/uapi/drm/msm_drm.h
+F:	Documentation/devicetree/bindings/display/msm/
+
+DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
+M:	Ben Skeggs <bskeggs@redhat.com>
+L:	dri-devel@lists.freedesktop.org
+L:	nouveau@lists.freedesktop.org
+T:	git git://github.com/skeggsb/linux
+S:	Supported
+F:	drivers/gpu/drm/nouveau/
+F:	include/uapi/drm/nouveau_drm.h
 
 DRM DRIVERS FOR NVIDIA TEGRA
 M:	Thierry Reding <thierry.reding@gmail.com>
-M:	Terje Bergström <tbergstrom@nvidia.com>
 L:	dri-devel@lists.freedesktop.org
 L:	linux-tegra@vger.kernel.org
 T:	git git://anongit.freedesktop.org/tegra/linux.git
@@ -3931,22 +4006,54 @@
 F:	include/uapi/drm/tegra_drm.h
 F:	Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
 
+DRM DRIVER FOR MATROX G200/G400 GRAPHICS CARDS
+S:	Orphan / Obsolete
+F:	drivers/gpu/drm/mga/
+F:	include/uapi/drm/mga_drm.h
+
+DRM DRIVER FOR MGA G200 SERVER GRAPHICS CHIPS
+M:	Dave Airlie <airlied@redhat.com>
+S:	Odd Fixes
+F:	drivers/gpu/drm/mgag200/
+
+DRM DRIVER FOR RAGE 128 VIDEO CARDS
+S:	Orphan / Obsolete
+F:	drivers/gpu/drm/r128/
+F:	include/uapi/drm/r128_drm.h
+
 DRM DRIVERS FOR RENESAS
 M:	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
 L:	dri-devel@lists.freedesktop.org
 L:	linux-renesas-soc@vger.kernel.org
-T:	git git://people.freedesktop.org/~airlied/linux
+T:	git git://linuxtv.org/pinchartl/fbdev
 S:	Supported
 F:	drivers/gpu/drm/rcar-du/
 F:	drivers/gpu/drm/shmobile/
 F:	include/linux/platform_data/shmob_drm.h
+F:	Documentation/devicetree/bindings/display/renesas,du.txt
+
+DRM DRIVER FOR QXL VIRTUAL GPU
+M:	Dave Airlie <airlied@redhat.com>
+S:	Odd Fixes
+F:	drivers/gpu/drm/qxl/
+F:	include/uapi/drm/qxl_drm.h
 
 DRM DRIVERS FOR ROCKCHIP
 M:	Mark Yao <mark.yao@rock-chips.com>
 L:	dri-devel@lists.freedesktop.org
 S:	Maintained
 F:	drivers/gpu/drm/rockchip/
-F:	Documentation/devicetree/bindings/display/rockchip*
+F:	Documentation/devicetree/bindings/display/rockchip/
+
+DRM DRIVER FOR SAVAGE VIDEO CARDS
+S:	Orphan / Obsolete
+F:	drivers/gpu/drm/savage/
+F:	include/uapi/drm/savage_drm.h
+
+DRM DRIVER FOR SIS VIDEO CARDS
+S:	Orphan / Obsolete
+F:	drivers/gpu/drm/sis/
+F:	include/uapi/drm/sis_drm.h
 
 DRM DRIVERS FOR STI
 M:	Benjamin Gaignard <benjamin.gaignard@linaro.org>
@@ -3957,14 +4064,43 @@
 F:	drivers/gpu/drm/sti
 F:	Documentation/devicetree/bindings/display/st,stih4xx.txt
 
+DRM DRIVER FOR TDFX VIDEO CARDS
+S:	Orphan / Obsolete
+F:	drivers/gpu/drm/tdfx/
+
+DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS
+M:	Dave Airlie <airlied@redhat.com>
+S:	Odd Fixes
+F:	drivers/gpu/drm/udl/
+
 DRM DRIVERS FOR VIVANTE GPU IP
 M:	Lucas Stach <l.stach@pengutronix.de>
 R:	Russell King <linux+etnaviv@armlinux.org.uk>
 R:	Christian Gmeiner <christian.gmeiner@gmail.com>
 L:	dri-devel@lists.freedesktop.org
 S:	Maintained
-F:	drivers/gpu/drm/etnaviv
-F:	Documentation/devicetree/bindings/display/etnaviv
+F:	drivers/gpu/drm/etnaviv/
+F:	include/uapi/drm/etnaviv_drm.h
+F:	Documentation/devicetree/bindings/display/etnaviv/
+
+DRM DRIVER FOR VMWARE VIRTUAL GPU
+M:	"VMware Graphics" <linux-graphics-maintainer@vmware.com>
+M:	Sinclair Yeh <syeh@vmware.com>
+M:	Thomas Hellstrom <thellstrom@vmware.com>
+L:	dri-devel@lists.freedesktop.org
+T:	git git://people.freedesktop.org/~syeh/repos_linux
+T:	git git://people.freedesktop.org/~thomash/linux
+S:	Supported
+F:	drivers/gpu/drm/vmwgfx/
+F:	include/uapi/drm/vmwgfx_drm.h
+
+DRM DRIVERS FOR VC4
+M:	Eric Anholt <eric@anholt.net>
+T:	git git://github.com/anholt/linux
+S:	Supported
+F:	drivers/gpu/drm/vc4/
+F:	include/uapi/drm/vc4_drm.h
+F:	Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
 
 DSBR100 USB FM RADIO DRIVER
 M:	Alexey Klimov <klimov.linux@gmail.com>
@@ -4859,6 +4995,7 @@
 
 GDB KERNEL DEBUGGING HELPER SCRIPTS
 M:	Jan Kiszka <jan.kiszka@siemens.com>
+M:	Kieran Bingham <kieran@bingham.xyz>
 S:	Supported
 F:	scripts/gdb/
 
@@ -5173,6 +5310,13 @@
 F:	include/linux/cciss_ioctl.h
 F:	include/uapi/linux/cciss_ioctl.h
 
+HFI1 DRIVER
+M:	Mike Marciniszyn <mike.marciniszyn@intel.com>
+M:	Dennis Dalessandro <dennis.dalessandro@intel.com>
+L:	linux-rdma@vger.kernel.org
+S:	Supported
+F:	drivers/infiniband/hw/hfi1
+
 HFS FILESYSTEM
 L:	linux-fsdevel@vger.kernel.org
 S:	Orphan
@@ -5702,7 +5846,6 @@
 S:	Supported
 F:	Documentation/infiniband/
 F:	drivers/infiniband/
-F:	drivers/staging/rdma/
 F:	include/uapi/linux/if_infiniband.h
 F:	include/uapi/rdma/
 F:	include/rdma/
@@ -5961,6 +6104,14 @@
 F:	arch/x86/include/asm/intel_telemetry.h
 F:	drivers/platform/x86/intel_telemetry*
 
+INTEL PMC CORE DRIVER
+M:	Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+M:	Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+L:	platform-driver-x86@vger.kernel.org
+S:	Maintained
+F:	arch/x86/include/asm/pmc_core.h
+F:	drivers/platform/x86/intel_pmc_core*
+
 IOC3 ETHERNET DRIVER
 M:	Ralf Baechle <ralf@linux-mips.org>
 L:	linux-mips@linux-mips.org
@@ -6278,8 +6429,9 @@
 F:	scripts/kconfig/
 
 KDUMP
-M:	Vivek Goyal <vgoyal@redhat.com>
-M:	Haren Myneni <hbabu@us.ibm.com>
+M:	Dave Young <dyoung@redhat.com>
+M:	Baoquan He <bhe@redhat.com>
+R:	Vivek Goyal <vgoyal@redhat.com>
 L:	kexec@lists.infradead.org
 W:	http://lse.sourceforge.net/kdump/
 S:	Maintained
@@ -6356,6 +6508,7 @@
 F:	include/linux/kvm*
 F:	include/uapi/linux/kvm*
 F:	virt/kvm/
+F:	tools/kvm/
 
 KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V
 M:	Joerg Roedel <joro@8bytes.org>
@@ -6424,7 +6577,7 @@
 S:	Maintained
 F:	include/linux/kexec.h
 F:	include/uapi/linux/kexec.h
-F:	kernel/kexec.c
+F:	kernel/kexec*
 
 KEYS/KEYRINGS:
 M:	David Howells <dhowells@redhat.com>
@@ -6433,6 +6586,8 @@
 F:	Documentation/security/keys.txt
 F:	include/linux/key.h
 F:	include/linux/key-type.h
+F:	include/linux/keyctl.h
+F:	include/uapi/linux/keyctl.h
 F:	include/keys/
 F:	security/keys/
 
@@ -7005,6 +7160,8 @@
 M:	Russell King <rmk+kernel@armlinux.org.uk>
 S:	Maintained
 F:	drivers/gpu/drm/armada/
+F:	include/uapi/drm/armada_drm.h
+F:	Documentation/devicetree/bindings/display/armada/
 
 MARVELL 88E6352 DSA support
 M:	Guenter Roeck <linux@roeck-us.net>
@@ -7366,6 +7523,7 @@
 T:	git git://git.linux-mips.org/pub/scm/ralf/linux.git
 Q:	http://patchwork.linux-mips.org/project/linux-mips/list/
 S:	Supported
+F:	Documentation/devicetree/bindings/mips/
 F:	Documentation/mips/
 F:	arch/mips/
 
@@ -7902,6 +8060,7 @@
 M:	Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
 L:	linux-nilfs@vger.kernel.org
 W:	http://nilfs.sourceforge.net/
+W:	http://nilfs.osdn.jp/
 T:	git git://github.com/konis/nilfs2.git
 S:	Supported
 F:	Documentation/filesystems/nilfs2.txt
@@ -8743,6 +8902,7 @@
 F:	arch/*/kernel/*/*/perf_event*.c
 F:	arch/*/include/asm/perf_event.h
 F:	arch/*/kernel/perf_callchain.c
+F:	arch/*/events/*
 F:	tools/perf/
 
 PERSONALITY HANDLING
@@ -8787,6 +8947,7 @@
 L:	linux-gpio@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
 S:	Maintained
+F:	Documentation/devicetree/bindings/pinctrl/
 F:	drivers/pinctrl/
 F:	include/linux/pinctrl/
 
@@ -10771,12 +10932,6 @@
 S:	Odd Fixes
 F:	drivers/staging/xgifb/
 
-HFI1 DRIVER
-M:	Mike Marciniszyn <infinipath@intel.com>
-L:	linux-rdma@vger.kernel.org
-S:	Supported
-F:	drivers/staging/rdma/hfi1
-
 STARFIRE/DURALAN NETWORK DRIVER
 M:	Ion Badulescu <ionut@badula.org>
 S:	Odd Fixes
@@ -11157,6 +11312,7 @@
 
 TI BANDGAP AND THERMAL DRIVER
 M:	Eduardo Valentin <edubezval@gmail.com>
+M:	Keerthy <j-keerthy@ti.com>
 L:	linux-pm@vger.kernel.org
 L:	linux-omap@vger.kernel.org
 S:	Maintained
@@ -12207,6 +12363,7 @@
 W:	http://www.linux-watchdog.org/
 T:	git git://www.linux-watchdog.org/linux-watchdog.git
 S:	Maintained
+F:	Documentation/devicetree/bindings/watchdog/
 F:	Documentation/watchdog/
 F:	drivers/watchdog/
 F:	include/linux/watchdog.h
diff --git a/Makefile b/Makefile
index 0f9cb36..8d1301a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 4
-PATCHLEVEL = 6
+PATCHLEVEL = 7
 SUBLEVEL = 0
-EXTRAVERSION =
-NAME = Charred Weasel
+EXTRAVERSION = -rc2
+NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -128,6 +128,10 @@
 # Cancel implicit rules on top Makefile
 $(CURDIR)/Makefile Makefile: ;
 
+ifneq ($(words $(subst :, ,$(CURDIR))), 1)
+  $(error main directory cannot contain spaces nor colons)
+endif
+
 ifneq ($(KBUILD_OUTPUT),)
 # Invoke a second make in the output directory, passing relevant variables
 # check that the output directory actually exists
@@ -142,7 +146,7 @@
 $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
 	@:
 
-sub-make: FORCE
+sub-make:
 	$(Q)$(MAKE) -C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR) \
 	-f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
 
@@ -364,7 +368,7 @@
 LDFLAGS_MODULE  =
 CFLAGS_KERNEL	=
 AFLAGS_KERNEL	=
-CFLAGS_GCOV	= -fprofile-arcs -ftest-coverage
+CFLAGS_GCOV	= -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
 CFLAGS_KCOV	= -fsanitize-coverage=trace-pc
 
 
@@ -617,7 +621,11 @@
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
 KBUILD_CFLAGS	+= -Os $(call cc-disable-warning,maybe-uninitialized,)
 else
-KBUILD_CFLAGS	+= -O2
+ifdef CONFIG_PROFILE_ALL_BRANCHES
+KBUILD_CFLAGS	+= -O2 $(call cc-disable-warning,maybe-uninitialized,)
+else
+KBUILD_CFLAGS   += -O2
+endif
 endif
 
 # Tell gcc to never replace conditional load with a non-conditional one
@@ -697,9 +705,10 @@
 KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
 else
 
-# This warning generated too much noise in a regular build.
-# Use make W=1 to enable this warning (see scripts/Makefile.build)
+# These warnings generated too much noise in a regular build.
+# Use make W=1 to enable them (see scripts/Makefile.build)
 KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
 endif
 
 ifdef CONFIG_FRAME_POINTER
@@ -926,27 +935,41 @@
 
 vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN)
 
-# Final link of vmlinux
-      cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
-quiet_cmd_link-vmlinux = LINK    $@
-
-# Include targets which we want to
-# execute if the rest of the kernel build went well.
-vmlinux: scripts/link-vmlinux.sh $(vmlinux-deps) FORCE
+# Include targets which we want to execute sequentially if the rest of the
+# kernel build went well. If CONFIG_TRIM_UNUSED_KSYMS is set, this might be
+# evaluated more than once.
+PHONY += vmlinux_prereq
+vmlinux_prereq: $(vmlinux-deps) FORCE
 ifdef CONFIG_HEADERS_CHECK
 	$(Q)$(MAKE) -f $(srctree)/Makefile headers_check
 endif
-ifdef CONFIG_SAMPLES
-	$(Q)$(MAKE) $(build)=samples
-endif
 ifdef CONFIG_BUILD_DOCSRC
 	$(Q)$(MAKE) $(build)=Documentation
 endif
 ifdef CONFIG_GDB_SCRIPTS
 	$(Q)ln -fsn `cd $(srctree) && /bin/pwd`/scripts/gdb/vmlinux-gdb.py
 endif
+ifdef CONFIG_TRIM_UNUSED_KSYMS
+	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \
+	  "$(MAKE) KBUILD_MODULES=1 -f $(srctree)/Makefile vmlinux_prereq"
+endif
+
+# standalone target for easier testing
+include/generated/autoksyms.h: FORCE
+	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh true
+
+# Final link of vmlinux
+      cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
+quiet_cmd_link-vmlinux = LINK    $@
+
+vmlinux: scripts/link-vmlinux.sh vmlinux_prereq $(vmlinux-deps) FORCE
 	+$(call if_changed,link-vmlinux)
 
+# Build samples along the rest of the kernel
+ifdef CONFIG_SAMPLES
+vmlinux-dirs += samples
+endif
+
 # The actual objects are generated when descending,
 # make sure no implicit rule kicks in
 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
@@ -998,10 +1021,12 @@
 prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
                    include/config/auto.conf
 	$(cmd_crmodverdir)
+	$(Q)test -e include/generated/autoksyms.h || \
+	    touch   include/generated/autoksyms.h
 
 archprepare: archheaders archscripts prepare1 scripts_basic
 
-prepare0: archprepare FORCE
+prepare0: archprepare
 	$(Q)$(MAKE) $(build)=.
 
 # All the preparing..
@@ -1061,7 +1086,7 @@
 export INSTALL_FW_PATH
 
 PHONY += firmware_install
-firmware_install: FORCE
+firmware_install:
 	@mkdir -p $(objtree)/firmware
 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_install
 
@@ -1081,7 +1106,7 @@
 archscripts:
 
 PHONY += __headers
-__headers: $(version_h) scripts_basic asm-generic archheaders archscripts FORCE
+__headers: $(version_h) scripts_basic asm-generic archheaders archscripts
 	$(Q)$(MAKE) $(build)=scripts build_unifdef
 
 PHONY += headers_install_all
@@ -1192,7 +1217,8 @@
 # Modules not configured
 # ---------------------------------------------------------------------------
 
-modules modules_install: FORCE
+PHONY += modules modules_install
+modules modules_install:
 	@echo >&2
 	@echo >&2 "The present kernel configuration has modules disabled."
 	@echo >&2 "Type 'make config' and enable loadable module support."
@@ -1283,6 +1309,7 @@
 board-dirs := $(dir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*/*_defconfig))
 board-dirs := $(sort $(notdir $(board-dirs:/=)))
 
+PHONY += help
 help:
 	@echo  'Cleaning targets:'
 	@echo  '  clean		  - Remove most generated files but keep the config and'
@@ -1453,6 +1480,7 @@
 clean:	rm-dirs := $(MODVERDIR)
 clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers
 
+PHONY += help
 help:
 	@echo  '  Building external modules.'
 	@echo  '  Syntax: make -C path/to/kernel/src M=$$PWD target'
diff --git a/arch/Kconfig b/arch/Kconfig
index b16e74e..d794384 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -598,6 +598,14 @@
 	  Architecture supports the 'objtool check' host tool command, which
 	  performs compile-time stack metadata validation.
 
+config HAVE_ARCH_HASH
+	bool
+	default n
+	help
+	  If this is set, the architecture provides an <asm/hash.h>
+	  file which provides platform-specific implementations of some
+	  functions in <linux/hash.h> or fs/namei.c.
+
 #
 # ABI hall of shame
 #
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 0dcbacf..0d3e59f 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -61,7 +61,7 @@
 	def_bool y
 
 config ARCH_DISCONTIGMEM_ENABLE
-	def_bool y
+	def_bool n
 
 config ARCH_FLATMEM_ENABLE
 	def_bool y
@@ -186,9 +186,6 @@
 config ARC_HAS_COH_CACHES
 	def_bool n
 
-config ARC_HAS_REENTRANT_IRQ_LV2
-	def_bool n
-
 config ARC_MCIP
 	bool "ARConnect Multicore IP (MCIP) Support "
 	depends on ISA_ARCV2
@@ -366,25 +363,10 @@
 if ISA_ARCOMPACT
 
 config ARC_COMPACT_IRQ_LEVELS
-	bool "ARCompact IRQ Priorities: High(2)/Low(1)"
+	bool "Setup Timer IRQ as high Priority"
 	default n
-	# Timer HAS to be high priority, for any other high priority config
-	select ARC_IRQ3_LV2
 	# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
-	depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
-
-if ARC_COMPACT_IRQ_LEVELS
-
-config ARC_IRQ3_LV2
-	bool
-
-config ARC_IRQ5_LV2
-	bool
-
-config ARC_IRQ6_LV2
-	bool
-
-endif	#ARC_COMPACT_IRQ_LEVELS
+	depends on !SMP
 
 config ARC_FPU_SAVE_RESTORE
 	bool "Enable FPU state persistence across context switch"
@@ -407,11 +389,6 @@
 	default y
 	depends on !ARC_CANT_LLSC
 
-config ARC_STAR_9000923308
-	bool "Workaround for llock/scond livelock"
-	default n
-	depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
-
 config ARC_HAS_SWAPE
 	bool "Insn: SWAPE (endian-swap)"
 	default y
@@ -471,7 +448,7 @@
 
 config HIGHMEM
 	bool "High Memory Support"
-	select DISCONTIGMEM
+	select ARCH_DISCONTIGMEM_ENABLE
 	help
 	  With ARC 2G:2G address split, only upper 2G is directly addressable by
 	  kernel. Enable this to potentially allow access to rest of 2G and PAE
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 02fabef..d4df6be 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -127,7 +127,7 @@
 
 boot		:= arch/arc/boot
 
-#default target for make without any arguements.
+#default target for make without any arguments.
 KBUILD_IMAGE	:= bootpImage
 
 all:	$(KBUILD_IMAGE)
diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi
index 3942634..02410b2 100644
--- a/arch/arc/boot/dts/abilis_tb100.dtsi
+++ b/arch/arc/boot/dts/abilis_tb100.dtsi
@@ -23,8 +23,6 @@
 
 
 / {
-	clock-frequency		= <500000000>;	/* 500 MHZ */
-
 	soc100 {
 		bus-frequency	= <166666666>;
 
diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi
index b046722..f9e7686 100644
--- a/arch/arc/boot/dts/abilis_tb101.dtsi
+++ b/arch/arc/boot/dts/abilis_tb101.dtsi
@@ -23,8 +23,6 @@
 
 
 / {
-	clock-frequency		= <500000000>;	/* 500 MHZ */
-
 	soc100 {
 		bus-frequency	= <166666666>;
 
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
index 40bcecf..6ae2c47 100644
--- a/arch/arc/boot/dts/axc001.dtsi
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -15,7 +15,6 @@
 
 / {
 	compatible = "snps,arc";
-	clock-frequency = <750000000>;	/* 750 MHZ */
 	#address-cells = <1>;
 	#size-cells = <1>;
 
@@ -101,8 +100,26 @@
 	memory {
 		#address-cells = <1>;
 		#size-cells = <1>;
-		ranges = <0x00000000 0x80000000 0x40000000>;
+		ranges = <0x00000000 0x80000000 0x20000000>;
 		device_type = "memory";
-		reg = <0x80000000 0x20000000>;	/* 512MiB */
+		reg = <0x80000000 0x1b000000>;	/* (512 - 32) MiB */
+	};
+
+	reserved-memory {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		/*
+		 * We just move frame buffer area to the very end of
+		 * available DDR. And even though in case of ARC770 there's
+		 * no strict requirement for a frame-buffer to be in any
+		 * particular location it allows us to use the same
+		 * base board's DT node for ARC PGU as for ARc HS38.
+		 */
+		frame_buffer: frame_buffer@9e000000 {
+			compatible = "shared-dma-pool";
+			reg = <0x9e000000 0x2000000>;
+			no-map;
+		};
 	};
 };
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index cabe0de..14df46f 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -14,7 +14,6 @@
 
 / {
 	compatible = "snps,arc";
-	clock-frequency = <90000000>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 
@@ -108,4 +107,18 @@
 		device_type = "memory";
 		reg = <0x80000000 0x20000000>;	/* 512MiB */
 	};
+
+	reserved-memory {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		/*
+		 * Move frame buffer out of IOC aperture (0x8z-0xAz).
+		 */
+		frame_buffer: frame_buffer@be000000 {
+			compatible = "shared-dma-pool";
+			reg = <0xbe000000 0x2000000>;
+			no-map;
+		};
+	};
 };
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index ed1674b..3d6cfa3 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -14,7 +14,6 @@
 
 / {
 	compatible = "snps,arc";
-	clock-frequency = <90000000>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 
@@ -131,4 +130,18 @@
 		device_type = "memory";
 		reg = <0x80000000 0x20000000>;	/* 512MiB */
 	};
+
+	reserved-memory {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		/*
+		 * Move frame buffer out of IOC aperture (0x8z-0xAz).
+		 */
+		frame_buffer: frame_buffer@be000000 {
+			compatible = "shared-dma-pool";
+			reg = <0xbe000000 0x2000000>;
+			no-map;
+		};
+	};
 };
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index 68c84a2..d6c1bbc 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -47,6 +47,12 @@
 				clock-frequency = <50000000>;
 				#clock-cells = <0>;
 			};
+
+			pguclk: pguclk {
+				#clock-cells = <0>;
+				compatible = "fixed-clock";
+				clock-frequency = <74440000>;
+			};
 		};
 
 		ethernet@0x18000 {
@@ -160,6 +166,37 @@
 			clocks = <&i2cclk>;
 			interrupts = <16>;
 
+			adv7511:adv7511@39{
+				compatible="adi,adv7511";
+				reg = <0x39>;
+				interrupts = <23>;
+				adi,input-depth = <8>;
+				adi,input-colorspace = "rgb";
+				adi,input-clock = "1x";
+				adi,clock-delay = <0x03>;
+
+				ports {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					/* RGB/YUV input */
+					port@0 {
+						reg = <0>;
+						adv7511_input:endpoint {
+						remote-endpoint = <&pgu_output>;
+						};
+					};
+
+					/* HDMI output */
+					port@1 {
+						reg = <1>;
+						adv7511_output: endpoint {
+							remote-endpoint = <&hdmi_connector_in>;
+						};
+					};
+				};
+			};
+
 			eeprom@0x54{
 				compatible = "24c01";
 				reg = <0x54>;
@@ -173,6 +210,16 @@
 			};
 		};
 
+		hdmi0: connector {
+			compatible = "hdmi-connector";
+			type = "a";
+			port {
+				hdmi_connector_in: endpoint {
+					remote-endpoint = <&adv7511_output>;
+				};
+			};
+		};
+
 		gpio0:gpio@13000 {
 			compatible = "snps,dw-apb-gpio";
 			reg = <0x13000 0x1000>;
@@ -234,5 +281,19 @@
 				reg = <2>;
 			};
 		};
+
+		pgu@17000 {
+			compatible = "snps,arcpgu";
+			reg = <0x17000 0x400>;
+			encoder-slave = <&adv7511>;
+			clocks = <&pguclk>;
+			clock-names = "pxlclk";
+			memory-region = <&frame_buffer>;
+			port {
+				pgu_output: endpoint {
+					remote-endpoint = <&adv7511_input>;
+				};
+			};
+		};
 	};
 };
diff --git a/arch/arc/boot/dts/eznps.dts b/arch/arc/boot/dts/eznps.dts
index b89f6c3..1e0d225 100644
--- a/arch/arc/boot/dts/eznps.dts
+++ b/arch/arc/boot/dts/eznps.dts
@@ -18,7 +18,6 @@
 
 / {
 	compatible = "ezchip,arc-nps";
-	clock-frequency = <83333333>;	/* 83.333333 MHZ */
 	#address-cells = <1>;
 	#size-cells = <1>;
 	interrupt-parent = <&intc>;
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts
index 5d5e373..6397051 100644
--- a/arch/arc/boot/dts/nsim_700.dts
+++ b/arch/arc/boot/dts/nsim_700.dts
@@ -11,7 +11,6 @@
 
 / {
 	compatible = "snps,nsim";
-	clock-frequency = <80000000>;	/* 80 MHZ */
 	#address-cells = <1>;
 	#size-cells = <1>;
 	interrupt-parent = <&core_intc>;
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index b5b060a..763d66c 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -11,7 +11,6 @@
 
 / {
 	compatible = "snps,nsimosci";
-	clock-frequency = <20000000>;	/* 20 MHZ */
 	#address-cells = <1>;
 	#size-cells = <1>;
 	interrupt-parent = <&core_intc>;
diff --git a/arch/arc/boot/dts/nsimosci_hs.dts b/arch/arc/boot/dts/nsimosci_hs.dts
index 325e730..4eb97c5 100644
--- a/arch/arc/boot/dts/nsimosci_hs.dts
+++ b/arch/arc/boot/dts/nsimosci_hs.dts
@@ -11,7 +11,6 @@
 
 / {
 	compatible = "snps,nsimosci_hs";
-	clock-frequency = <20000000>;	/* 20 MHZ */
 	#address-cells = <1>;
 	#size-cells = <1>;
 	interrupt-parent = <&core_intc>;
diff --git a/arch/arc/boot/dts/nsimosci_hs_idu.dts b/arch/arc/boot/dts/nsimosci_hs_idu.dts
index ee03d71..853f897 100644
--- a/arch/arc/boot/dts/nsimosci_hs_idu.dts
+++ b/arch/arc/boot/dts/nsimosci_hs_idu.dts
@@ -11,7 +11,6 @@
 
 / {
 	compatible = "snps,nsimosci_hs";
-	clock-frequency = <5000000>;	/* 5 MHZ */
 	#address-cells = <1>;
 	#size-cells = <1>;
 	interrupt-parent = <&core_intc>;
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
index 3a10cc6..65808fe 100644
--- a/arch/arc/boot/dts/skeleton.dtsi
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -13,7 +13,6 @@
 
 / {
 	compatible = "snps,arc";
-	clock-frequency = <80000000>;	/* 80 MHZ */
 	#address-cells = <1>;
 	#size-cells = <1>;
 	chosen { };
diff --git a/arch/arc/boot/dts/skeleton_hs.dtsi b/arch/arc/boot/dts/skeleton_hs.dtsi
index 71fd308..2dfe803 100644
--- a/arch/arc/boot/dts/skeleton_hs.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs.dtsi
@@ -8,7 +8,6 @@
 
 / {
 	compatible = "snps,arc";
-	clock-frequency = <80000000>;	/* 80 MHZ */
 	#address-cells = <1>;
 	#size-cells = <1>;
 	chosen { };
diff --git a/arch/arc/boot/dts/skeleton_hs_idu.dtsi b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
index d1cb25a..4c11079 100644
--- a/arch/arc/boot/dts/skeleton_hs_idu.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
@@ -8,7 +8,6 @@
 
 / {
 	compatible = "snps,arc";
-	clock-frequency = <80000000>;	/* 80 MHZ */
 	#address-cells = <1>;
 	#size-cells = <1>;
 	chosen { };
diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi
index ad4ee43..0fd6ba9 100644
--- a/arch/arc/boot/dts/vdk_axc003.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003.dtsi
@@ -14,7 +14,6 @@
 
 / {
 	compatible = "snps,arc";
-	clock-frequency = <50000000>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 
diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
index a3cb626..82214cd 100644
--- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
@@ -15,7 +15,6 @@
 
 / {
 	compatible = "snps,arc";
-	clock-frequency = <50000000>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 5f3dcbb..dd68399 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -25,50 +25,17 @@
 
 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 
-#ifdef CONFIG_ARC_STAR_9000923308
-
-#define SCOND_FAIL_RETRY_VAR_DEF						\
-	unsigned int delay = 1, tmp;						\
-
-#define SCOND_FAIL_RETRY_ASM							\
-	"	bz	4f			\n"				\
-	"   ; --- scond fail delay ---		\n"				\
-	"	mov	%[tmp], %[delay]	\n"	/* tmp = delay */	\
-	"2: 	brne.d	%[tmp], 0, 2b		\n"	/* while (tmp != 0) */	\
-	"	sub	%[tmp], %[tmp], 1	\n"	/* tmp-- */		\
-	"	rol	%[delay], %[delay]	\n"	/* delay *= 2 */	\
-	"	b	1b			\n"	/* start over */	\
-	"4: ; --- success ---			\n"				\
-
-#define SCOND_FAIL_RETRY_VARS							\
-	  ,[delay] "+&r" (delay),[tmp] "=&r"	(tmp)				\
-
-#else	/* !CONFIG_ARC_STAR_9000923308 */
-
-#define SCOND_FAIL_RETRY_VAR_DEF
-
-#define SCOND_FAIL_RETRY_ASM							\
-	"	bnz     1b			\n"				\
-
-#define SCOND_FAIL_RETRY_VARS
-
-#endif
-
 #define ATOMIC_OP(op, c_op, asm_op)					\
 static inline void atomic_##op(int i, atomic_t *v)			\
 {									\
-	unsigned int val;				                \
-	SCOND_FAIL_RETRY_VAR_DEF                                        \
+	unsigned int val;						\
 									\
 	__asm__ __volatile__(						\
 	"1:	llock   %[val], [%[ctr]]		\n"		\
 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
 	"	scond   %[val], [%[ctr]]		\n"		\
-	"						\n"		\
-	SCOND_FAIL_RETRY_ASM						\
-									\
+	"	bnz     1b				\n"		\
 	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
-	  SCOND_FAIL_RETRY_VARS						\
 	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
 	  [i]	"ir"	(i)						\
 	: "cc");							\
@@ -77,8 +44,7 @@
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
 static inline int atomic_##op##_return(int i, atomic_t *v)		\
 {									\
-	unsigned int val;				                \
-	SCOND_FAIL_RETRY_VAR_DEF                                        \
+	unsigned int val;						\
 									\
 	/*								\
 	 * Explicit full memory barrier needed before/after as		\
@@ -90,11 +56,8 @@
 	"1:	llock   %[val], [%[ctr]]		\n"		\
 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
 	"	scond   %[val], [%[ctr]]		\n"		\
-	"						\n"		\
-	SCOND_FAIL_RETRY_ASM						\
-									\
+	"	bnz     1b				\n"		\
 	: [val]	"=&r"	(val)						\
-	  SCOND_FAIL_RETRY_VARS						\
 	: [ctr]	"r"	(&v->counter),					\
 	  [i]	"ir"	(i)						\
 	: "cc");							\
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index e0e1faf..14c310f 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -76,8 +76,8 @@
 	 * We need to be a bit more cautious here. What if a kernel bug in
 	 * L1 ISR, caused SP to go whaco (some small value which looks like
 	 * USER stk) and then we take L2 ISR.
-	 * Above brlo alone would treat it as a valid L1-L2 sceanrio
-	 * instead of shouting alound
+	 * Above brlo alone would treat it as a valid L1-L2 scenario
+	 * instead of shouting around
 	 * The only feasible way is to make sure this L2 happened in
 	 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
 	 * L1 ISR before it switches stack
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index 1fd467e..b0b87f2 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -83,7 +83,7 @@
 		local_flush_tlb_all();
 
 		/*
-		 * Above checke for rollover of 8 bit ASID in 32 bit container.
+		 * Above check for rollover of 8 bit ASID in 32 bit container.
 		 * If the container itself wrapped around, set it to a non zero
 		 * "generation" to distinguish from no context
 		 */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 034bbdc..858f98e 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -47,7 +47,7 @@
  * Page Tables are purely for Linux VM's consumption and the bits below are
  * suited to that (uniqueness). Hence some are not implemented in the TLB and
  * some have different value in TLB.
- * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
  *      seperate PD0 and PD1, which combined forms a translation entry)
  *      while for PTE perspective, they are 8 and 9 respectively
  * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index f9048994..16b630f 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -78,7 +78,7 @@
 #define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
 
 /*
- * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
  * Look in process.c for details of kernel stack layout
  */
 #define TSK_K_ESP(tsk)		(tsk->thread.ksp)
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 9913804..89fdd1b 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -86,7 +86,7 @@
  * (1) These insn were introduced only in 4.10 release. So for older released
  *	support needed.
  *
- * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
  *	gaurantted by the platform (not something which core handles).
  *	Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
  *	disabling for atomicity.
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index 800e7c4..cded4a9 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -20,11 +20,6 @@
 
 #ifdef CONFIG_ARC_HAS_LLSC
 
-/*
- * A normal LLOCK/SCOND based system, w/o need for livelock workaround
- */
-#ifndef CONFIG_ARC_STAR_9000923308
-
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
 	unsigned int val;
@@ -238,293 +233,6 @@
 	smp_mb();
 }
 
-#else	/* CONFIG_ARC_STAR_9000923308 */
-
-/*
- * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
- * coherency transactions in the SCU. The exclusive line state keeps rotating
- * among contenting cores leading to a never ending cycle. So break the cycle
- * by deferring the retry of failed exclusive access (SCOND). The actual delay
- * needed is function of number of contending cores as well as the unrelated
- * coherency traffic from other cores. To keep the code simple, start off with
- * small delay of 1 which would suffice most cases and in case of contention
- * double the delay. Eventually the delay is sufficient such that the coherency
- * pipeline is drained, thus a subsequent exclusive access would succeed.
- */
-
-#define SCOND_FAIL_RETRY_VAR_DEF						\
-	unsigned int delay, tmp;						\
-
-#define SCOND_FAIL_RETRY_ASM							\
-	"   ; --- scond fail delay ---		\n"				\
-	"	mov	%[tmp], %[delay]	\n"	/* tmp = delay */	\
-	"2: 	brne.d	%[tmp], 0, 2b		\n"	/* while (tmp != 0) */	\
-	"	sub	%[tmp], %[tmp], 1	\n"	/* tmp-- */		\
-	"	rol	%[delay], %[delay]	\n"	/* delay *= 2 */	\
-	"	b	1b			\n"	/* start over */	\
-	"					\n"				\
-	"4: ; --- done ---			\n"				\
-
-#define SCOND_FAIL_RETRY_VARS							\
-	  ,[delay] "=&r" (delay), [tmp] "=&r"	(tmp)				\
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
-	unsigned int val;
-	SCOND_FAIL_RETRY_VAR_DEF;
-
-	smp_mb();
-
-	__asm__ __volatile__(
-	"0:	mov	%[delay], 1		\n"
-	"1:	llock	%[val], [%[slock]]	\n"
-	"	breq	%[val], %[LOCKED], 0b	\n"	/* spin while LOCKED */
-	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
-	"	bz	4f			\n"	/* done */
-	"					\n"
-	SCOND_FAIL_RETRY_ASM
-
-	: [val]		"=&r"	(val)
-	  SCOND_FAIL_RETRY_VARS
-	: [slock]	"r"	(&(lock->slock)),
-	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
-	: "memory", "cc");
-
-	smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
-	unsigned int val, got_it = 0;
-	SCOND_FAIL_RETRY_VAR_DEF;
-
-	smp_mb();
-
-	__asm__ __volatile__(
-	"0:	mov	%[delay], 1		\n"
-	"1:	llock	%[val], [%[slock]]	\n"
-	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
-	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
-	"	bz.d	4f			\n"
-	"	mov.z	%[got_it], 1		\n"	/* got it */
-	"					\n"
-	SCOND_FAIL_RETRY_ASM
-
-	: [val]		"=&r"	(val),
-	  [got_it]	"+&r"	(got_it)
-	  SCOND_FAIL_RETRY_VARS
-	: [slock]	"r"	(&(lock->slock)),
-	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
-	: "memory", "cc");
-
-	smp_mb();
-
-	return got_it;
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
-	smp_mb();
-
-	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
-
-	smp_mb();
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers but only one writer.
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
- */
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
-	unsigned int val;
-	SCOND_FAIL_RETRY_VAR_DEF;
-
-	smp_mb();
-
-	/*
-	 * zero means writer holds the lock exclusively, deny Reader.
-	 * Otherwise grant lock to first/subseq reader
-	 *
-	 * 	if (rw->counter > 0) {
-	 *		rw->counter--;
-	 *		ret = 1;
-	 *	}
-	 */
-
-	__asm__ __volatile__(
-	"0:	mov	%[delay], 1		\n"
-	"1:	llock	%[val], [%[rwlock]]	\n"
-	"	brls	%[val], %[WR_LOCKED], 0b\n"	/* <= 0: spin while write locked */
-	"	sub	%[val], %[val], 1	\n"	/* reader lock */
-	"	scond	%[val], [%[rwlock]]	\n"
-	"	bz	4f			\n"	/* done */
-	"					\n"
-	SCOND_FAIL_RETRY_ASM
-
-	: [val]		"=&r"	(val)
-	  SCOND_FAIL_RETRY_VARS
-	: [rwlock]	"r"	(&(rw->counter)),
-	  [WR_LOCKED]	"ir"	(0)
-	: "memory", "cc");
-
-	smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
-	unsigned int val, got_it = 0;
-	SCOND_FAIL_RETRY_VAR_DEF;
-
-	smp_mb();
-
-	__asm__ __volatile__(
-	"0:	mov	%[delay], 1		\n"
-	"1:	llock	%[val], [%[rwlock]]	\n"
-	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
-	"	sub	%[val], %[val], 1	\n"	/* counter-- */
-	"	scond	%[val], [%[rwlock]]	\n"
-	"	bz.d	4f			\n"
-	"	mov.z	%[got_it], 1		\n"	/* got it */
-	"					\n"
-	SCOND_FAIL_RETRY_ASM
-
-	: [val]		"=&r"	(val),
-	  [got_it]	"+&r"	(got_it)
-	  SCOND_FAIL_RETRY_VARS
-	: [rwlock]	"r"	(&(rw->counter)),
-	  [WR_LOCKED]	"ir"	(0)
-	: "memory", "cc");
-
-	smp_mb();
-
-	return got_it;
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
-	unsigned int val;
-	SCOND_FAIL_RETRY_VAR_DEF;
-
-	smp_mb();
-
-	/*
-	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
-	 * deny writer. Otherwise if unlocked grant to writer
-	 * Hence the claim that Linux rwlocks are unfair to writers.
-	 * (can be starved for an indefinite time by readers).
-	 *
-	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
-	 *		rw->counter = 0;
-	 *		ret = 1;
-	 *	}
-	 */
-
-	__asm__ __volatile__(
-	"0:	mov	%[delay], 1		\n"
-	"1:	llock	%[val], [%[rwlock]]	\n"
-	"	brne	%[val], %[UNLOCKED], 0b	\n"	/* while !UNLOCKED spin */
-	"	mov	%[val], %[WR_LOCKED]	\n"
-	"	scond	%[val], [%[rwlock]]	\n"
-	"	bz	4f			\n"
-	"					\n"
-	SCOND_FAIL_RETRY_ASM
-
-	: [val]		"=&r"	(val)
-	  SCOND_FAIL_RETRY_VARS
-	: [rwlock]	"r"	(&(rw->counter)),
-	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
-	  [WR_LOCKED]	"ir"	(0)
-	: "memory", "cc");
-
-	smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
-	unsigned int val, got_it = 0;
-	SCOND_FAIL_RETRY_VAR_DEF;
-
-	smp_mb();
-
-	__asm__ __volatile__(
-	"0:	mov	%[delay], 1		\n"
-	"1:	llock	%[val], [%[rwlock]]	\n"
-	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
-	"	mov	%[val], %[WR_LOCKED]	\n"
-	"	scond	%[val], [%[rwlock]]	\n"
-	"	bz.d	4f			\n"
-	"	mov.z	%[got_it], 1		\n"	/* got it */
-	"					\n"
-	SCOND_FAIL_RETRY_ASM
-
-	: [val]		"=&r"	(val),
-	  [got_it]	"+&r"	(got_it)
-	  SCOND_FAIL_RETRY_VARS
-	: [rwlock]	"r"	(&(rw->counter)),
-	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
-	  [WR_LOCKED]	"ir"	(0)
-	: "memory", "cc");
-
-	smp_mb();
-
-	return got_it;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
-	unsigned int val;
-
-	smp_mb();
-
-	/*
-	 * rw->counter++;
-	 */
-	__asm__ __volatile__(
-	"1:	llock	%[val], [%[rwlock]]	\n"
-	"	add	%[val], %[val], 1	\n"
-	"	scond	%[val], [%[rwlock]]	\n"
-	"	bnz	1b			\n"
-	"					\n"
-	: [val]		"=&r"	(val)
-	: [rwlock]	"r"	(&(rw->counter))
-	: "memory", "cc");
-
-	smp_mb();
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
-	unsigned int val;
-
-	smp_mb();
-
-	/*
-	 * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
-	 */
-	__asm__ __volatile__(
-	"1:	llock	%[val], [%[rwlock]]	\n"
-	"	scond	%[UNLOCKED], [%[rwlock]]\n"
-	"	bnz	1b			\n"
-	"					\n"
-	: [val]		"=&r"	(val)
-	: [rwlock]	"r"	(&(rw->counter)),
-	  [UNLOCKED]	"r"	(__ARCH_RW_LOCK_UNLOCKED__)
-	: "memory", "cc");
-
-	smp_mb();
-}
-
-#undef SCOND_FAIL_RETRY_VAR_DEF
-#undef SCOND_FAIL_RETRY_ASM
-#undef SCOND_FAIL_RETRY_VARS
-
-#endif	/* CONFIG_ARC_STAR_9000923308 */
-
 #else	/* !CONFIG_ARC_HAS_LLSC */
 
 static inline void arch_spin_lock(arch_spinlock_t *lock)
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 3af6745..2d79e52 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -103,7 +103,7 @@
 
 /*
  * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
- * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
  * syscall, so all that reamins to be tested is _TIF_WORK_MASK
  */
 
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index d1da603..a78d567 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -32,7 +32,7 @@
 #define __kernel_ok		(segment_eq(get_fs(), KERNEL_DS))
 
 /*
- * Algorthmically, for __user_ok() we want do:
+ * Algorithmically, for __user_ok() we want do:
  * 	(start < TASK_SIZE) && (start+len < TASK_SIZE)
  * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
  * emitted directly in code.
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
index 095599a..71f3918 100644
--- a/arch/arc/include/uapi/asm/swab.h
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -74,7 +74,7 @@
 	__tmp ^ __in;						\
 })
 
-#elif (ARC_BSWAP_TYPE == 2)	/* Custom single cycle bwap instruction */
+#elif (ARC_BSWAP_TYPE == 2)	/* Custom single cycle bswap instruction */
 
 #define __arch_swab32(x)						\
 ({									\
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 39e58d1..41fa2ec 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -15,6 +15,7 @@
 #if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
 #define _UAPI_ASM_ARC_UNISTD_H
 
+#define __ARCH_WANT_RENAMEAT
 #define __ARCH_WANT_SYS_EXECVE
 #define __ARCH_WANT_SYS_CLONE
 #define __ARCH_WANT_SYS_VFORK
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
index 0cb0aba..98812c1 100644
--- a/arch/arc/kernel/entry-compact.S
+++ b/arch/arc/kernel/entry-compact.S
@@ -91,27 +91,13 @@
 VECTOR   instr_service           ; 0x10, Instrn Error   (0x2)
 
 ; ******************** Device ISRs **********************
-#ifdef CONFIG_ARC_IRQ3_LV2
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
 VECTOR   handle_interrupt_level2
 #else
 VECTOR   handle_interrupt_level1
 #endif
 
-VECTOR   handle_interrupt_level1
-
-#ifdef CONFIG_ARC_IRQ5_LV2
-VECTOR   handle_interrupt_level2
-#else
-VECTOR   handle_interrupt_level1
-#endif
-
-#ifdef CONFIG_ARC_IRQ6_LV2
-VECTOR   handle_interrupt_level2
-#else
-VECTOR   handle_interrupt_level1
-#endif
-
-.rept   25
+.rept   28
 VECTOR   handle_interrupt_level1 ; Other devices
 .endr
 
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index c5cceca..ce9deb9 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -28,10 +28,8 @@
 {
 	int level_mask = 0;
 
-       /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
-	level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
-	level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
-	level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
+       /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
+	level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
 
 	/*
 	 * Write to register, even if no LV2 IRQs configured to reset it
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 8b134cf..08f03d9 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -48,7 +48,7 @@
 static int callchain_trace(unsigned int addr, void *data)
 {
 	struct arc_callchain_trace *ctrl = data;
-	struct perf_callchain_entry *entry = ctrl->perf_stuff;
+	struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
 	perf_callchain_store(entry, addr);
 
 	if (ctrl->depth++ < 3)
@@ -58,7 +58,7 @@
 }
 
 void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
 	struct arc_callchain_trace ctrl = {
 		.depth = 0,
@@ -69,7 +69,7 @@
 }
 
 void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
 	/*
 	 * User stack can't be unwound trivially with kernel dwarf unwinder
@@ -108,7 +108,7 @@
 	int64_t delta = new_raw_count - prev_raw_count;
 
 	/*
-	 * We don't afaraid of hwc->prev_count changing beneath our feet
+	 * We aren't afraid of hwc->prev_count changing beneath our feet
 	 * because there's no way for us to re-enter this function anytime.
 	 */
 	local64_set(&hwc->prev_count, new_raw_count);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index f63b8bf..2ee7a4d 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -392,7 +392,7 @@
 		/*
 		 * If we are here, it is established that @uboot_arg didn't
 		 * point to DT blob. Instead if u-boot says it is cmdline,
-		 * Appent to embedded DT cmdline.
+		 * append to embedded DT cmdline.
 		 * setup_machine_fdt() would have populated @boot_command_line
 		 */
 		if (uboot_tag == 1) {
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 004b7f0..6cb3736 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -34,7 +34,7 @@
  *  -ViXS were still seeing crashes when using insmod to load drivers.
  *   It turned out that the code to change Execute permssions for TLB entries
  *   of user was not guarded for interrupts (mod_tlb_permission)
- *   This was cauing TLB entries to be overwritten on unrelated indexes
+ *   This was causing TLB entries to be overwritten on unrelated indexes
  *
  * Vineetg: July 15th 2008: Bug #94183
  *  -Exception happens in Delay slot of a JMP, and before user space resumes,
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index a6f91e8..934150e 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -276,7 +276,7 @@
 	return 0;
 }
 
-/* called on user read(): display the couters */
+/* called on user read(): display the counters */
 static ssize_t tlb_stats_output(struct file *file,	/* file descriptor */
 				char __user *user_buf,	/* user buffer */
 				size_t len,		/* length of buffer */
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 9e5eddb..5a294b2 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -215,7 +215,7 @@
  * ------------------
  * This ver of MMU supports variable page sizes (1k-16k): although Linux will
  * only support 8k (default), 16k and 4k.
- * However from hardware perspective, smaller page sizes aggrevate aliasing
+ * However from hardware perspective, smaller page sizes aggravate aliasing
  * meaning more vaddr bits needed to disambiguate the cache-line-op ;
  * the existing scheme of piggybacking won't work for certain configurations.
  * Two new registers IC_PTAG and DC_PTAG inttoduced.
@@ -302,7 +302,7 @@
 
 	/*
 	 * This is technically for MMU v4, using the MMU v3 programming model
-	 * Special work for HS38 aliasing I-cache configuratino with PAE40
+	 * Special work for HS38 aliasing I-cache configuration with PAE40
 	 *   - upper 8 bits of paddr need to be written into PTAG_HI
 	 *   - (and needs to be written before the lower 32 bits)
 	 * Note that PTAG_HI is hoisted outside the line loop
@@ -936,7 +936,7 @@
 			      ic->ver, CONFIG_ARC_MMU_VER);
 
 		/*
-		 * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
+		 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
 		 * pair to provide vaddr/paddr respectively, just as in MMU v3
 		 */
 		if (is_isa_arcv2() && ic->alias)
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 8c8e36f..73d7e4c 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -10,7 +10,7 @@
  * DMA Coherent API Notes
  *
  * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
- * implemented by accessintg it using a kernel virtual address, with
+ * implemented by accessing it using a kernel virtual address, with
  * Cache bit off in the TLB entry.
  *
  * The default DMA address == Phy address which is 0x8000_0000 based.
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 446705a..5be33a2 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -82,7 +82,6 @@
 
 $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
 	$(Q)$(MAKE) $(build)=$(obj)/bootp $@
-	@:
 
 $(obj)/bootpImage: $(obj)/bootp/bootp FORCE
 	$(call if_changed,objcopy)
diff --git a/arch/arm/boot/bootp/Makefile b/arch/arm/boot/bootp/Makefile
index 5761f00..5e4acd2 100644
--- a/arch/arm/boot/bootp/Makefile
+++ b/arch/arm/boot/bootp/Makefile
@@ -17,7 +17,6 @@
 # Note that bootp.lds picks up kernel.o and initrd.o
 $(obj)/bootp:	$(src)/bootp.lds $(addprefix $(obj)/,init.o kernel.o initrd.o) FORCE
 	$(call if_changed,ld)
-	@:
 
 # kernel.o and initrd.o includes a binary image using
 # .incbin, a dependency which is not tracked automatically
@@ -26,4 +25,4 @@
 
 $(obj)/initrd.o: $(INITRD) FORCE
 
-PHONY += $(INITRD) FORCE
+PHONY += $(INITRD)
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 0f89d87..06b6c2d 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -399,6 +399,7 @@
 	imx6ul-tx6ul-mainboard.dtb
 dtb-$(CONFIG_SOC_IMX7D) += \
 	imx7d-cl-som-imx7.dtb \
+	imx7d-nitrogen7.dtb \
 	imx7d-sbc-imx7.dtb \
 	imx7d-sdb.dtb
 dtb-$(CONFIG_SOC_LS1021A) += \
diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts
index 267f81a..8c89062 100644
--- a/arch/arm/boot/dts/exynos3250-monk.dts
+++ b/arch/arm/boot/dts/exynos3250-monk.dts
@@ -14,6 +14,7 @@
 
 /dts-v1/;
 #include "exynos3250.dtsi"
+#include "exynos4412-ppmu-common.dtsi"
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/clock/samsung,s2mps11.h>
@@ -156,6 +157,12 @@
 	};
 };
 
+&bus_dmc {
+	devfreq-events = <&ppmu_dmc0_3>, <&ppmu_dmc1_3>;
+	vdd-supply = <&buck1_reg>;
+	status = "okay";
+};
+
 &cpu0 {
 	cpu0-supply = <&buck2_reg>;
 };
@@ -458,46 +465,6 @@
 	status = "okay";
 };
 
-&ppmu_dmc0 {
-	status = "okay";
-
-	events {
-		ppmu_dmc0_3: ppmu-event3-dmc0 {
-			event-name = "ppmu-event3-dmc0";
-		};
-	};
-};
-
-&ppmu_dmc1 {
-	status = "okay";
-
-	events {
-		ppmu_dmc1_3: ppmu-event3-dmc1 {
-			event-name = "ppmu-event3-dmc1";
-		};
-	};
-};
-
-&ppmu_leftbus {
-	status = "okay";
-
-	events {
-		ppmu_leftbus_3: ppmu-event3-leftbus {
-			event-name = "ppmu-event3-leftbus";
-		};
-	};
-};
-
-&ppmu_rightbus {
-	status = "okay";
-
-	events {
-		ppmu_rightbus_3: ppmu-event3-rightbus {
-			event-name = "ppmu-event3-rightbus";
-		};
-	};
-};
-
 &xusbxti {
 	clock-frequency = <24000000>;
 };
diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
index 31eb09b..e4228195 100644
--- a/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -14,6 +14,7 @@
 
 /dts-v1/;
 #include "exynos3250.dtsi"
+#include "exynos4412-ppmu-common.dtsi"
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/clock/samsung,s2mps11.h>
@@ -147,6 +148,53 @@
 	};
 };
 
+&bus_dmc {
+	devfreq-events = <&ppmu_dmc0_3>, <&ppmu_dmc1_3>;
+	vdd-supply = <&buck1_reg>;
+	status = "okay";
+};
+
+&bus_leftbus {
+	devfreq-events = <&ppmu_leftbus_3>, <&ppmu_rightbus_3>;
+	vdd-supply = <&buck3_reg>;
+	status = "okay";
+};
+
+&bus_rightbus {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_lcd0 {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_fsys {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_mcuisp {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_isp {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_peril {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_mfc {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
 &cpu0 {
 	cpu0-supply = <&buck2_reg>;
 };
@@ -635,46 +683,6 @@
 	status = "okay";
 };
 
-&ppmu_dmc0 {
-	status = "okay";
-
-	events {
-		ppmu_dmc0_3: ppmu-event3-dmc0 {
-			event-name = "ppmu-event3-dmc0";
-		};
-	};
-};
-
-&ppmu_dmc1 {
-	status = "okay";
-
-	events {
-		ppmu_dmc1_3: ppmu-event3-dmc1 {
-			event-name = "ppmu-event3-dmc1";
-		};
-	};
-};
-
-&ppmu_leftbus {
-	status = "okay";
-
-	events {
-		ppmu_leftbus_3: ppmu-event3-leftbus {
-			event-name = "ppmu-event3-leftbus";
-		};
-	};
-};
-
-&ppmu_rightbus {
-	status = "okay";
-
-	events {
-		ppmu_rightbus_3: ppmu-event3-rightbus {
-			event-name = "ppmu-event3-rightbus";
-		};
-	};
-};
-
 &xusbxti {
 	clock-frequency = <24000000>;
 };
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 094782b..62f3dcd 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -713,6 +713,187 @@
 			clock-names = "ppmu";
 			status = "disabled";
 		};
+
+		bus_dmc: bus_dmc {
+			compatible = "samsung,exynos-bus";
+			clocks = <&cmu_dmc CLK_DIV_DMC>;
+			clock-names = "bus";
+			operating-points-v2 = <&bus_dmc_opp_table>;
+			status = "disabled";
+		};
+
+		bus_dmc_opp_table: opp_table1 {
+			compatible = "operating-points-v2";
+			opp-shared;
+
+			opp@50000000 {
+				opp-hz = /bits/ 64 <50000000>;
+				opp-microvolt = <800000>;
+			};
+			opp@100000000 {
+				opp-hz = /bits/ 64 <100000000>;
+				opp-microvolt = <800000>;
+			};
+			opp@134000000 {
+				opp-hz = /bits/ 64 <134000000>;
+				opp-microvolt = <800000>;
+			};
+			opp@200000000 {
+				opp-hz = /bits/ 64 <200000000>;
+				opp-microvolt = <825000>;
+			};
+			opp@400000000 {
+				opp-hz = /bits/ 64 <400000000>;
+				opp-microvolt = <875000>;
+			};
+		};
+
+		bus_leftbus: bus_leftbus {
+			compatible = "samsung,exynos-bus";
+			clocks = <&cmu CLK_DIV_GDL>;
+			clock-names = "bus";
+			operating-points-v2 = <&bus_leftbus_opp_table>;
+			status = "disabled";
+		};
+
+		bus_rightbus: bus_rightbus {
+			compatible = "samsung,exynos-bus";
+			clocks = <&cmu CLK_DIV_GDR>;
+			clock-names = "bus";
+			operating-points-v2 = <&bus_leftbus_opp_table>;
+			status = "disabled";
+		};
+
+		bus_lcd0: bus_lcd0 {
+			compatible = "samsung,exynos-bus";
+			clocks = <&cmu CLK_DIV_ACLK_160>;
+			clock-names = "bus";
+			operating-points-v2 = <&bus_leftbus_opp_table>;
+			status = "disabled";
+		};
+
+		bus_fsys: bus_fsys {
+			compatible = "samsung,exynos-bus";
+			clocks = <&cmu CLK_DIV_ACLK_200>;
+			clock-names = "bus";
+			operating-points-v2 = <&bus_leftbus_opp_table>;
+			status = "disabled";
+		};
+
+		bus_mcuisp: bus_mcuisp {
+			compatible = "samsung,exynos-bus";
+			clocks = <&cmu CLK_DIV_ACLK_400_MCUISP>;
+			clock-names = "bus";
+			operating-points-v2 = <&bus_mcuisp_opp_table>;
+			status = "disabled";
+		};
+
+		bus_isp: bus_isp {
+			compatible = "samsung,exynos-bus";
+			clocks = <&cmu CLK_DIV_ACLK_266>;
+			clock-names = "bus";
+			operating-points-v2 = <&bus_isp_opp_table>;
+			status = "disabled";
+		};
+
+		bus_peril: bus_peril {
+			compatible = "samsung,exynos-bus";
+			clocks = <&cmu CLK_DIV_ACLK_100>;
+			clock-names = "bus";
+			operating-points-v2 = <&bus_peril_opp_table>;
+			status = "disabled";
+		};
+
+		bus_mfc: bus_mfc {
+			compatible = "samsung,exynos-bus";
+			clocks = <&cmu CLK_SCLK_MFC>;
+			clock-names = "bus";
+			operating-points-v2 = <&bus_leftbus_opp_table>;
+			status = "disabled";
+		};
+
+		bus_leftbus_opp_table: opp_table2 {
+			compatible = "operating-points-v2";
+			opp-shared;
+
+			opp@50000000 {
+				opp-hz = /bits/ 64 <50000000>;
+				opp-microvolt = <900000>;
+			};
+			opp@80000000 {
+				opp-hz = /bits/ 64 <80000000>;
+				opp-microvolt = <900000>;
+			};
+			opp@100000000 {
+				opp-hz = /bits/ 64 <100000000>;
+				opp-microvolt = <1000000>;
+			};
+			opp@134000000 {
+				opp-hz = /bits/ 64 <134000000>;
+				opp-microvolt = <1000000>;
+			};
+			opp@200000000 {
+				opp-hz = /bits/ 64 <200000000>;
+				opp-microvolt = <1000000>;
+			};
+		};
+
+		bus_mcuisp_opp_table: opp_table3 {
+			compatible = "operating-points-v2";
+			opp-shared;
+
+			opp@50000000 {
+				opp-hz = /bits/ 64 <50000000>;
+			};
+			opp@80000000 {
+				opp-hz = /bits/ 64 <80000000>;
+			};
+			opp@100000000 {
+				opp-hz = /bits/ 64 <100000000>;
+			};
+			opp@200000000 {
+				opp-hz = /bits/ 64 <200000000>;
+			};
+			opp@400000000 {
+				opp-hz = /bits/ 64 <400000000>;
+			};
+		};
+
+		bus_isp_opp_table: opp_table4 {
+			compatible = "operating-points-v2";
+			opp-shared;
+
+			opp@50000000 {
+				opp-hz = /bits/ 64 <50000000>;
+			};
+			opp@80000000 {
+				opp-hz = /bits/ 64 <80000000>;
+			};
+			opp@100000000 {
+				opp-hz = /bits/ 64 <100000000>;
+			};
+			opp@200000000 {
+				opp-hz = /bits/ 64 <200000000>;
+			};
+			opp@300000000 {
+				opp-hz = /bits/ 64 <300000000>;
+			};
+		};
+
+		bus_peril_opp_table: opp_table5 {
+			compatible = "operating-points-v2";
+			opp-shared;
+
+			opp@50000000 {
+				opp-hz = /bits/ 64 <50000000>;
+			};
+			opp@80000000 {
+				opp-hz = /bits/ 64 <80000000>;
+			};
+			opp@100000000 {
+				opp-hz = /bits/ 64 <100000000>;
+			};
+		};
 	};
 };
 
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index c1cb8df..2d9b029 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -257,6 +257,165 @@
 		power-domains = <&pd_lcd1>;
 		#iommu-cells = <0>;
 	};
+
+	bus_dmc: bus_dmc {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DIV_DMC>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_dmc_opp_table>;
+		status = "disabled";
+	};
+
+	bus_acp: bus_acp {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DIV_ACP>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_acp_opp_table>;
+		status = "disabled";
+	};
+
+	bus_peri: bus_peri {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_ACLK100>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_peri_opp_table>;
+		status = "disabled";
+	};
+
+	bus_fsys: bus_fsys {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_ACLK133>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_fsys_opp_table>;
+		status = "disabled";
+	};
+
+	bus_display: bus_display {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_ACLK160>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_display_opp_table>;
+		status = "disabled";
+	};
+
+	bus_lcd0: bus_lcd0 {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_ACLK200>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_leftbus_opp_table>;
+		status = "disabled";
+	};
+
+	bus_leftbus: bus_leftbus {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DIV_GDL>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_leftbus_opp_table>;
+		status = "disabled";
+	};
+
+	bus_rightbus: bus_rightbus {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DIV_GDR>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_leftbus_opp_table>;
+		status = "disabled";
+	};
+
+	bus_mfc: bus_mfc {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_SCLK_MFC>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_leftbus_opp_table>;
+		status = "disabled";
+	};
+
+	bus_dmc_opp_table: opp_table1 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@134000000 {
+			opp-hz = /bits/ 64 <134000000>;
+			opp-microvolt = <1025000>;
+		};
+		opp@267000000 {
+			opp-hz = /bits/ 64 <267000000>;
+			opp-microvolt = <1050000>;
+		};
+		opp@400000000 {
+			opp-hz = /bits/ 64 <400000000>;
+			opp-microvolt = <1150000>;
+		};
+	};
+
+	bus_acp_opp_table: opp_table2 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@134000000 {
+			opp-hz = /bits/ 64 <134000000>;
+		};
+		opp@160000000 {
+			opp-hz = /bits/ 64 <160000000>;
+		};
+		opp@200000000 {
+			opp-hz = /bits/ 64 <200000000>;
+		};
+	};
+
+	bus_peri_opp_table: opp_table3 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@5000000 {
+			opp-hz = /bits/ 64 <5000000>;
+		};
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+		};
+	};
+
+	bus_fsys_opp_table: opp_table4 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@10000000 {
+			opp-hz = /bits/ 64 <10000000>;
+		};
+		opp@134000000 {
+			opp-hz = /bits/ 64 <134000000>;
+		};
+	};
+
+	bus_display_opp_table: opp_table5 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+		};
+		opp@134000000 {
+			opp-hz = /bits/ 64 <134000000>;
+		};
+		opp@160000000 {
+			opp-hz = /bits/ 64 <160000000>;
+		};
+	};
+
+	bus_leftbus_opp_table: opp_table6 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+		};
+		opp@160000000 {
+			opp-hz = /bits/ 64 <160000000>;
+		};
+		opp@200000000 {
+			opp-hz = /bits/ 64 <200000000>;
+		};
+	};
 };
 
 &gic {
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index cab0f07..ec7619a 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -11,6 +11,7 @@
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/clock/maxim,max77686.h>
 #include "exynos4412.dtsi"
+#include "exynos4412-ppmu-common.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 
 / {
@@ -108,6 +109,53 @@
 	};
 };
 
+&bus_dmc {
+	devfreq-events = <&ppmu_dmc0_3>, <&ppmu_dmc1_3>;
+	vdd-supply = <&buck1_reg>;
+	status = "okay";
+};
+
+&bus_acp {
+	devfreq = <&bus_dmc>;
+	status = "okay";
+};
+
+&bus_c2c {
+	devfreq = <&bus_dmc>;
+	status = "okay";
+};
+
+&bus_leftbus {
+	devfreq-events = <&ppmu_leftbus_3>, <&ppmu_rightbus_3>;
+	vdd-supply = <&buck3_reg>;
+	status = "okay";
+};
+
+&bus_rightbus {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_display {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_fsys {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_peri {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_mfc {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
 &cpu0 {
 	cpu0-supply = <&buck2_reg>;
 };
@@ -359,8 +407,8 @@
 
 			buck1_reg: BUCK1 {
 				regulator-name = "vdd_mif";
-				regulator-min-microvolt = <1000000>;
-				regulator-max-microvolt = <1000000>;
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <1100000>;
 				regulator-always-on;
 				regulator-boot-on;
 			};
@@ -375,8 +423,8 @@
 
 			buck3_reg: BUCK3 {
 				regulator-name = "vdd_int";
-				regulator-min-microvolt = <1000000>;
-				regulator-max-microvolt = <1000000>;
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <1050000>;
 				regulator-always-on;
 				regulator-boot-on;
 			};
diff --git a/arch/arm/boot/dts/exynos4412-ppmu-common.dtsi b/arch/arm/boot/dts/exynos4412-ppmu-common.dtsi
new file mode 100644
index 0000000..16e4b77
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4412-ppmu-common.dtsi
@@ -0,0 +1,50 @@
+/*
+ * Device tree sources for Exynos4412 PPMU common device tree
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+&ppmu_dmc0 {
+       status = "okay";
+
+       events {
+	       ppmu_dmc0_3: ppmu-event3-dmc0 {
+		       event-name = "ppmu-event3-dmc0";
+	       };
+       };
+};
+
+&ppmu_dmc1 {
+       status = "okay";
+
+       events {
+	       ppmu_dmc1_3: ppmu-event3-dmc1 {
+		       event-name = "ppmu-event3-dmc1";
+	       };
+       };
+};
+
+&ppmu_leftbus {
+       status = "okay";
+
+       events {
+	       ppmu_leftbus_3: ppmu-event3-leftbus {
+		       event-name = "ppmu-event3-leftbus";
+	       };
+       };
+};
+
+&ppmu_rightbus {
+       status = "okay";
+
+       events {
+	       ppmu_rightbus_3: ppmu-event3-rightbus {
+		       event-name = "ppmu-event3-rightbus";
+	       };
+       };
+};
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 5d1eaea..9336fd4 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -14,6 +14,7 @@
 
 /dts-v1/;
 #include "exynos4412.dtsi"
+#include "exynos4412-ppmu-common.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/clock/maxim,max77686.h>
@@ -288,6 +289,53 @@
 	status = "okay";
 };
 
+&bus_dmc {
+	devfreq-events = <&ppmu_dmc0_3>, <&ppmu_dmc1_3>;
+	vdd-supply = <&buck1_reg>;
+	status = "okay";
+};
+
+&bus_acp {
+	devfreq = <&bus_dmc>;
+	status = "okay";
+};
+
+&bus_c2c {
+	devfreq = <&bus_dmc>;
+	status = "okay";
+};
+
+&bus_leftbus {
+	devfreq-events = <&ppmu_leftbus_3>, <&ppmu_rightbus_3>;
+	vdd-supply = <&buck3_reg>;
+	status = "okay";
+};
+
+&bus_rightbus {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_display {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_fsys {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_peri {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_mfc {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
 &cpu0 {
 	cpu0-supply = <&buck2_reg>;
 };
@@ -871,46 +919,6 @@
 	assigned-clock-parents =  <&clock CLK_XUSBXTI>;
 };
 
-&ppmu_dmc0 {
-	status = "okay";
-
-	events {
-		ppmu_dmc0_3: ppmu-event3-dmc0 {
-			event-name = "ppmu-event3-dmc0";
-		};
-	};
-};
-
-&ppmu_dmc1 {
-	status = "okay";
-
-	events {
-		ppmu_dmc1_3: ppmu-event3-dmc1 {
-			event-name = "ppmu-event3-dmc1";
-		};
-	};
-};
-
-&ppmu_leftbus {
-	status = "okay";
-
-	events {
-		ppmu_leftbus_3: ppmu-event3-leftbus {
-			event-name = "ppmu-event3-leftbus";
-		};
-	};
-};
-
-&ppmu_rightbus {
-	status = "okay";
-
-	events {
-		ppmu_rightbus_3: ppmu-event3-rightbus {
-			event-name = "ppmu-event3-rightbus";
-		};
-	};
-};
-
 &pinctrl_0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&sleep0>;
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi
index b7490ea0..c452499 100644
--- a/arch/arm/boot/dts/exynos4x12.dtsi
+++ b/arch/arm/boot/dts/exynos4x12.dtsi
@@ -281,6 +281,180 @@
 		clocks = <&clock CLK_SMMU_LITE1>, <&clock CLK_FIMC_LITE1>;
 		#iommu-cells = <0>;
 	};
+
+	bus_dmc: bus_dmc {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DIV_DMC>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_dmc_opp_table>;
+		status = "disabled";
+	};
+
+	bus_acp: bus_acp {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DIV_ACP>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_acp_opp_table>;
+		status = "disabled";
+	};
+
+	bus_c2c: bus_c2c {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DIV_C2C>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_dmc_opp_table>;
+		status = "disabled";
+	};
+
+	bus_dmc_opp_table: opp_table1 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+			opp-microvolt = <900000>;
+		};
+		opp@134000000 {
+			opp-hz = /bits/ 64 <134000000>;
+			opp-microvolt = <900000>;
+		};
+		opp@160000000 {
+			opp-hz = /bits/ 64 <160000000>;
+			opp-microvolt = <900000>;
+		};
+		opp@267000000 {
+			opp-hz = /bits/ 64 <267000000>;
+			opp-microvolt = <950000>;
+		};
+		opp@400000000 {
+			opp-hz = /bits/ 64 <400000000>;
+			opp-microvolt = <1050000>;
+		};
+	};
+
+	bus_acp_opp_table: opp_table2 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+		};
+		opp@134000000 {
+			opp-hz = /bits/ 64 <134000000>;
+		};
+		opp@160000000 {
+			opp-hz = /bits/ 64 <160000000>;
+		};
+		opp@267000000 {
+			opp-hz = /bits/ 64 <267000000>;
+		};
+	};
+
+	bus_leftbus: bus_leftbus {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DIV_GDL>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_leftbus_opp_table>;
+		status = "disabled";
+	};
+
+	bus_rightbus: bus_rightbus {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DIV_GDR>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_leftbus_opp_table>;
+		status = "disabled";
+	};
+
+	bus_display: bus_display {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_ACLK160>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_display_opp_table>;
+		status = "disabled";
+	};
+
+	bus_fsys: bus_fsys {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_ACLK133>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_fsys_opp_table>;
+		status = "disabled";
+	};
+
+	bus_peri: bus_peri {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_ACLK100>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_peri_opp_table>;
+		status = "disabled";
+	};
+
+	bus_mfc: bus_mfc {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_SCLK_MFC>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_leftbus_opp_table>;
+		status = "disabled";
+	};
+
+	bus_leftbus_opp_table: opp_table3 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+			opp-microvolt = <900000>;
+		};
+		opp@134000000 {
+			opp-hz = /bits/ 64 <134000000>;
+			opp-microvolt = <925000>;
+		};
+		opp@160000000 {
+			opp-hz = /bits/ 64 <160000000>;
+			opp-microvolt = <950000>;
+		};
+		opp@200000000 {
+			opp-hz = /bits/ 64 <200000000>;
+			opp-microvolt = <1000000>;
+		};
+	};
+
+	bus_display_opp_table: opp_table4 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@160000000 {
+			opp-hz = /bits/ 64 <160000000>;
+		};
+		opp@200000000 {
+			opp-hz = /bits/ 64 <200000000>;
+		};
+	};
+
+	bus_fsys_opp_table: opp_table5 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+		};
+		opp@134000000 {
+			opp-hz = /bits/ 64 <134000000>;
+		};
+	};
+
+	bus_peri_opp_table: opp_table6 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@50000000 {
+			opp-hz = /bits/ 64 <50000000>;
+		};
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+		};
+	};
 };
 
 &combiner {
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index 1e25152..85dad29 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -124,8 +124,6 @@
 &dp {
 	status = "okay";
 	samsung,color-space = <0>;
-	samsung,dynamic-range = <0>;
-	samsung,ycbcr-coeff = <0>;
 	samsung,color-depth = <1>;
 	samsung,link-rate = <0x0a>;
 	samsung,lane-count = <4>;
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 0e2eb3f..b7992b1 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -80,8 +80,6 @@
 
 &dp {
 	samsung,color-space = <0>;
-	samsung,dynamic-range = <0>;
-	samsung,ycbcr-coeff = <0>;
 	samsung,color-depth = <1>;
 	samsung,link-rate = <0x0a>;
 	samsung,lane-count = <4>;
diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
index c9889b1..ddfe1f5 100644
--- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
+++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
@@ -236,12 +236,10 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&dp_hpd>;
 	samsung,color-space = <0>;
-	samsung,dynamic-range = <0>;
-	samsung,ycbcr-coeff = <0>;
 	samsung,color-depth = <1>;
 	samsung,link-rate = <0x0a>;
 	samsung,lane-count = <2>;
-	samsung,hpd-gpio = <&gpx0 7 GPIO_ACTIVE_HIGH>;
+	hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>;
 
 	ports {
 		port0 {
diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
index 273d662..ac291f5 100644
--- a/arch/arm/boot/dts/exynos5250-spring.dts
+++ b/arch/arm/boot/dts/exynos5250-spring.dts
@@ -74,12 +74,10 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&dp_hpd_gpio>;
 	samsung,color-space = <0>;
-	samsung,dynamic-range = <0>;
-	samsung,ycbcr-coeff = <0>;
 	samsung,color-depth = <1>;
 	samsung,link-rate = <0x0a>;
 	samsung,lane-count = <1>;
-	samsung,hpd-gpio = <&gpc3 0 GPIO_ACTIVE_HIGH>;
+	hpd-gpios = <&gpc3 0 GPIO_ACTIVE_HIGH>;
 };
 
 &ehci {
diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
index 8811e17..f9d2e4f 100644
--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
+++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
@@ -157,12 +157,10 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&dp_hpd_gpio>;
 	samsung,color-space = <0>;
-	samsung,dynamic-range = <0>;
-	samsung,ycbcr-coeff = <0>;
 	samsung,color-depth = <1>;
 	samsung,link-rate = <0x06>;
 	samsung,lane-count = <2>;
-	samsung,hpd-gpio = <&gpx2 6 GPIO_ACTIVE_HIGH>;
+	hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>;
 
 	ports {
 		port0 {
diff --git a/arch/arm/boot/dts/exynos5420-smdk5420.dts b/arch/arm/boot/dts/exynos5420-smdk5420.dts
index 9b77940..2e748d1 100644
--- a/arch/arm/boot/dts/exynos5420-smdk5420.dts
+++ b/arch/arm/boot/dts/exynos5420-smdk5420.dts
@@ -102,8 +102,6 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&dp_hpd>;
 	samsung,color-space = <0>;
-	samsung,dynamic-range = <0>;
-	samsung,ycbcr-coeff = <0>;
 	samsung,color-depth = <1>;
 	samsung,link-rate = <0x0a>;
 	samsung,lane-count = <4>;
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 4c85234..c6e05eb 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -294,6 +294,42 @@
 		};
 	};
 
+	nocp_mem0_0: nocp@10CA1000 {
+		compatible = "samsung,exynos5420-nocp";
+		reg = <0x10CA1000 0x200>;
+		status = "disabled";
+	};
+
+	nocp_mem0_1: nocp@10CA1400 {
+		compatible = "samsung,exynos5420-nocp";
+		reg = <0x10CA1400 0x200>;
+		status = "disabled";
+	};
+
+	nocp_mem1_0: nocp@10CA1800 {
+		compatible = "samsung,exynos5420-nocp";
+		reg = <0x10CA1800 0x200>;
+		status = "disabled";
+	};
+
+	nocp_mem1_1: nocp@10CA1C00 {
+		compatible = "samsung,exynos5420-nocp";
+		reg = <0x10CA1C00 0x200>;
+		status = "disabled";
+	};
+
+	nocp_g3d_0: nocp@11A51000 {
+		compatible = "samsung,exynos5420-nocp";
+		reg = <0x11A51000 0x200>;
+		status = "disabled";
+	};
+
+	nocp_g3d_1: nocp@11A51400 {
+		compatible = "samsung,exynos5420-nocp";
+		reg = <0x11A51400 0x200>;
+		status = "disabled";
+	};
+
 	gsc_pd: power-domain@10044000 {
 		compatible = "samsung,exynos4210-pd";
 		reg = <0x10044000 0x20>;
@@ -1188,6 +1224,377 @@
 		power-domains = <&disp_pd>;
 		#iommu-cells = <0>;
 	};
+
+	bus_wcore: bus_wcore {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_ACLK400_WCORE>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_wcore_opp_table>;
+		status = "disabled";
+	};
+
+	bus_noc: bus_noc {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_ACLK100_NOC>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_noc_opp_table>;
+		status = "disabled";
+	};
+
+	bus_fsys_apb: bus_fsys_apb {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_PCLK200_FSYS>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_fsys_apb_opp_table>;
+		status = "disabled";
+	};
+
+	bus_fsys: bus_fsys {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_ACLK200_FSYS>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_fsys_apb_opp_table>;
+		status = "disabled";
+	};
+
+	bus_fsys2: bus_fsys2 {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_ACLK200_FSYS2>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_fsys2_opp_table>;
+		status = "disabled";
+	};
+
+	bus_mfc: bus_mfc {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_ACLK333>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_mfc_opp_table>;
+		status = "disabled";
+	};
+
+	bus_gen: bus_gen {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_ACLK266>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_gen_opp_table>;
+		status = "disabled";
+	};
+
+	bus_peri: bus_peri {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_ACLK66>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_peri_opp_table>;
+		status = "disabled";
+	};
+
+	bus_g2d: bus_g2d {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_ACLK333_G2D>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_g2d_opp_table>;
+		status = "disabled";
+	};
+
+	bus_g2d_acp: bus_g2d_acp {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_ACLK266_G2D>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_g2d_acp_opp_table>;
+		status = "disabled";
+	};
+
+	bus_jpeg: bus_jpeg {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_ACLK300_JPEG>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_jpeg_opp_table>;
+		status = "disabled";
+	};
+
+	bus_jpeg_apb: bus_jpeg_apb {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_ACLK166>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_jpeg_apb_opp_table>;
+		status = "disabled";
+	};
+
+	bus_disp1_fimd: bus_disp1_fimd {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_ACLK300_DISP1>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_disp1_fimd_opp_table>;
+		status = "disabled";
+	};
+
+	bus_disp1: bus_disp1 {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_ACLK400_DISP1>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_disp1_opp_table>;
+		status = "disabled";
+	};
+
+	bus_gscl_scaler: bus_gscl_scaler {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_ACLK300_GSCL>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_gscl_opp_table>;
+		status = "disabled";
+	};
+
+	bus_mscl: bus_mscl {
+		compatible = "samsung,exynos-bus";
+		clocks = <&clock CLK_DOUT_ACLK400_MSCL>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_mscl_opp_table>;
+		status = "disabled";
+	};
+
+	bus_wcore_opp_table: opp_table2 {
+		compatible = "operating-points-v2";
+
+		opp00 {
+			opp-hz = /bits/ 64 <84000000>;
+			opp-microvolt = <925000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <111000000>;
+			opp-microvolt = <950000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <222000000>;
+			opp-microvolt = <950000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <333000000>;
+			opp-microvolt = <950000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <400000000>;
+			opp-microvolt = <987500>;
+		};
+	};
+
+	bus_noc_opp_table: opp_table3 {
+		compatible = "operating-points-v2";
+
+		opp00 {
+			opp-hz = /bits/ 64 <67000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <75000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <86000000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <100000000>;
+		};
+	};
+
+	bus_fsys_apb_opp_table: opp_table4 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp00 {
+			opp-hz = /bits/ 64 <100000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <200000000>;
+		};
+	};
+
+	bus_fsys2_opp_table: opp_table5 {
+		compatible = "operating-points-v2";
+
+		opp00 {
+			opp-hz = /bits/ 64 <75000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <100000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <150000000>;
+		};
+	};
+
+	bus_mfc_opp_table: opp_table6 {
+		compatible = "operating-points-v2";
+
+		opp00 {
+			opp-hz = /bits/ 64 <96000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <111000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <167000000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <222000000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <333000000>;
+		};
+	};
+
+	bus_gen_opp_table: opp_table7 {
+		compatible = "operating-points-v2";
+
+		opp00 {
+			opp-hz = /bits/ 64 <89000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <133000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <178000000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <267000000>;
+		};
+	};
+
+	bus_peri_opp_table: opp_table8 {
+		compatible = "operating-points-v2";
+
+		opp00 {
+			opp-hz = /bits/ 64 <67000000>;
+		};
+	};
+
+	bus_g2d_opp_table: opp_table9 {
+		compatible = "operating-points-v2";
+
+		opp00 {
+			opp-hz = /bits/ 64 <84000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <167000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <222000000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <300000000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <333000000>;
+		};
+	};
+
+	bus_g2d_acp_opp_table: opp_table10 {
+		compatible = "operating-points-v2";
+
+		opp00 {
+			opp-hz = /bits/ 64 <67000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <133000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <178000000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <267000000>;
+		};
+	};
+
+	bus_jpeg_opp_table: opp_table11 {
+		compatible = "operating-points-v2";
+
+		opp00 {
+			opp-hz = /bits/ 64 <75000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <150000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <200000000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <300000000>;
+		};
+	};
+
+	bus_jpeg_apb_opp_table: opp_table12 {
+		compatible = "operating-points-v2";
+
+		opp00 {
+			opp-hz = /bits/ 64 <84000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <111000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <134000000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <167000000>;
+		};
+	};
+
+	bus_disp1_fimd_opp_table: opp_table13 {
+		compatible = "operating-points-v2";
+
+		opp00 {
+			opp-hz = /bits/ 64 <120000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <200000000>;
+		};
+	};
+
+	bus_disp1_opp_table: opp_table14 {
+		compatible = "operating-points-v2";
+
+		opp00 {
+			opp-hz = /bits/ 64 <120000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <200000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <300000000>;
+		};
+	};
+
+	bus_gscl_opp_table: opp_table15 {
+		compatible = "operating-points-v2";
+
+		opp00 {
+			opp-hz = /bits/ 64 <150000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <200000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <300000000>;
+		};
+	};
+
+	bus_mscl_opp_table: opp_table16 {
+		compatible = "operating-points-v2";
+
+		opp00 {
+			opp-hz = /bits/ 64 <84000000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <167000000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <222000000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <333000000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <400000000>;
+		};
+	};
 };
 
 &dp {
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index 20fa761..2a4e10b 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -56,6 +56,89 @@
 	};
 };
 
+&bus_wcore {
+	devfreq-events = <&nocp_mem0_0>, <&nocp_mem0_1>,
+			<&nocp_mem1_0>, <&nocp_mem1_1>;
+	vdd-supply = <&buck3_reg>;
+	exynos,saturation-ratio = <100>;
+	status = "okay";
+};
+
+&bus_noc {
+	devfreq = <&bus_wcore>;
+	status = "okay";
+};
+
+&bus_fsys_apb {
+	devfreq = <&bus_wcore>;
+	status = "okay";
+};
+
+&bus_fsys {
+	devfreq = <&bus_wcore>;
+	status = "okay";
+};
+
+&bus_fsys2 {
+	devfreq = <&bus_wcore>;
+	status = "okay";
+};
+
+&bus_mfc {
+	devfreq = <&bus_wcore>;
+	status = "okay";
+};
+
+&bus_gen {
+	devfreq = <&bus_wcore>;
+	status = "okay";
+};
+
+&bus_peri {
+	devfreq = <&bus_wcore>;
+	status = "okay";
+};
+
+&bus_g2d {
+	devfreq = <&bus_wcore>;
+	status = "okay";
+};
+
+&bus_g2d_acp {
+	devfreq = <&bus_wcore>;
+	status = "okay";
+};
+
+&bus_jpeg {
+	devfreq = <&bus_wcore>;
+	status = "okay";
+};
+
+&bus_jpeg_apb {
+	devfreq = <&bus_wcore>;
+	status = "okay";
+};
+
+&bus_disp1_fimd {
+	devfreq = <&bus_wcore>;
+	status = "okay";
+};
+
+&bus_disp1 {
+	devfreq = <&bus_wcore>;
+	status = "okay";
+};
+
+&bus_gscl_scaler {
+	devfreq = <&bus_wcore>;
+	status = "okay";
+};
+
+&bus_mscl {
+	devfreq = <&bus_wcore>;
+	status = "okay";
+};
+
 &clock_audss {
 	assigned-clocks = <&clock_audss EXYNOS_MOUT_AUDSS>,
 			<&clock_audss EXYNOS_MOUT_I2S>,
@@ -361,6 +444,22 @@
 	vqmmc-supply = <&ldo13_reg>;
 };
 
+&nocp_mem0_0 {
+	status = "okay";
+};
+
+&nocp_mem0_1 {
+	status = "okay";
+};
+
+&nocp_mem1_0 {
+	status = "okay";
+};
+
+&nocp_mem1_1 {
+	status = "okay";
+};
+
 &pinctrl_0 {
 	hdmi_hpd_irq: hdmi-hpd-irq {
 		samsung,pins = "gpx3-7";
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index f959925..62ceb89 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -157,8 +157,6 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&dp_hpd_gpio>;
 	samsung,color-space = <0>;
-	samsung,dynamic-range = <0>;
-	samsung,ycbcr-coeff = <0>;
 	samsung,color-depth = <1>;
 	samsung,link-rate = <0x0a>;
 	samsung,lane-count = <2>;
diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts
new file mode 100644
index 0000000..1ce9780
--- /dev/null
+++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts
@@ -0,0 +1,745 @@
+/*
+ * Copyright 2016 Boundary Devices, Inc.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include "imx7d.dtsi"
+
+/ {
+	model = "Boundary Devices i.MX7 Nitrogen7 Board";
+	compatible = "boundary,imx7d-nitrogen7", "fsl,imx7d";
+
+	aliases {
+		fb_lcd = &lcdif;
+		t_lcd = &t_lcd;
+	};
+
+	memory {
+		reg = <0x80000000 0x40000000>;
+	};
+
+	backlight-j9 {
+		compatible = "gpio-backlight";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_backlight_j9>;
+		gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+		default-on;
+	};
+
+	backlight-j20 {
+		compatible = "pwm-backlight";
+		pwms = <&pwm1 0 5000000>;
+		brightness-levels = <0 4 8 16 32 64 128 255>;
+		default-brightness-level = <6>;
+		status = "okay";
+	};
+
+	reg_usb_otg1_vbus: regulator-usb-otg1-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_otg1_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	reg_usb_otg2_vbus: regulator-usb-otg2-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_otg2_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&gpio4 7 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	reg_can2_3v3: regulator-can2-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "can2-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		gpio = <&gpio2 14 GPIO_ACTIVE_LOW>;
+	};
+
+	reg_vref_1v8: regulator-vref-1v8 {
+		compatible = "regulator-fixed";
+		regulator-name = "vref-1v8";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	reg_vref_3v3: regulator-vref-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "vref-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	reg_wlan: regulator-wlan {
+		compatible = "regulator-fixed";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
+		clock-names = "slow";
+		regulator-name = "reg_wlan";
+		startup-delay-us = <70000>;
+		gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+};
+
+&adc1 {
+	vref-supply = <&reg_vref_1v8>;
+	status = "okay";
+};
+
+&adc2 {
+	vref-supply = <&reg_vref_1v8>;
+	status = "okay";
+};
+
+&clks {
+	assigned-clocks = <&clks IMX7D_CLKO2_ROOT_SRC>,
+			  <&clks IMX7D_CLKO2_ROOT_DIV>;
+	assigned-clock-parents = <&clks IMX7D_CKIL>;
+	assigned-clock-rates = <0>, <32768>;
+};
+
+&cpu0 {
+	arm-supply = <&sw1a_reg>;
+};
+
+&fec1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet1>;
+	assigned-clocks = <&clks IMX7D_ENET1_TIME_ROOT_SRC>,
+			  <&clks IMX7D_ENET1_TIME_ROOT_CLK>;
+	assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
+	assigned-clock-rates = <0>, <100000000>;
+	phy-mode = "rgmii";
+	phy-handle = <&ethphy0>;
+	fsl,magic-packet;
+	status = "okay";
+
+	mdio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		ethphy0: ethernet-phy@4 {
+			reg = <4>;
+		};
+	};
+};
+
+&flexcan2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_flexcan2>;
+	xceiver-supply = <&reg_can2_3v3>;
+	status = "okay";
+};
+
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+
+	pmic: pfuze3000@08 {
+		compatible = "fsl,pfuze3000";
+		reg = <0x08>;
+
+		regulators {
+			sw1a_reg: sw1a {
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1475000>;
+				regulator-boot-on;
+				regulator-always-on;
+				regulator-ramp-delay = <6250>;
+			};
+
+			/* use sw1c_reg to align with pfuze100/pfuze200 */
+			sw1c_reg: sw1b {
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1475000>;
+				regulator-boot-on;
+				regulator-always-on;
+				regulator-ramp-delay = <6250>;
+			};
+
+			sw2_reg: sw2 {
+				regulator-min-microvolt = <1500000>;
+				regulator-max-microvolt = <1850000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			sw3a_reg: sw3 {
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <1650000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			swbst_reg: swbst {
+				regulator-min-microvolt = <5000000>;
+				regulator-max-microvolt = <5150000>;
+			};
+
+			snvs_reg: vsnvs {
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			vref_reg: vrefddr {
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			vgen1_reg: vldo1 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vgen2_reg: vldo2 {
+				regulator-min-microvolt = <800000>;
+				regulator-max-microvolt = <1550000>;
+				regulator-always-on;
+			};
+
+			vgen3_reg: vccsd {
+				regulator-min-microvolt = <2850000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vgen4_reg: v33 {
+				regulator-min-microvolt = <2850000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vgen5_reg: vldo3 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vgen6_reg: vldo4 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+		};
+	};
+};
+
+&i2c2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+
+	rtc@68 {
+		compatible = "rv4162";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_i2c2_rv4162>;
+		reg = <0x68>;
+		interrupts-extended = <&gpio2 15 IRQ_TYPE_LEVEL_LOW>;
+	};
+};
+
+&i2c3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c3>;
+	status = "okay";
+
+	touch@48 {
+		compatible = "ti,tsc2004";
+		reg = <0x48>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_i2c3_tsc2004>;
+		interrupts-extended = <&gpio3 4 IRQ_TYPE_EDGE_FALLING>;
+		wakeup-gpios = <&gpio3 4 GPIO_ACTIVE_LOW>;
+	};
+};
+
+&i2c4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c4>;
+	status = "okay";
+
+	codec: wm8960@1a {
+		compatible = "wlf,wm8960";
+		reg = <0x1a>;
+		clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
+		clock-names = "mclk";
+		wlf,shared-lrclk;
+	};
+};
+
+&lcdif {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_lcdif_dat
+		     &pinctrl_lcdif_ctrl>;
+	lcd-supply = <&reg_vref_3v3>;
+	display = <&display0>;
+	status = "okay";
+
+	display0: lcd-display {
+		bits-per-pixel = <16>;
+		bus-width = <18>;
+
+		display-timings {
+			native-mode = <&t_lcd>;
+			t_lcd: t_lcd_default {
+				/* default to Okaya display */
+				clock-frequency = <30000000>;
+				hactive = <800>;
+				vactive = <480>;
+				hfront-porch = <40>;
+				hback-porch = <40>;
+				hsync-len = <48>;
+				vback-porch = <29>;
+				vfront-porch = <13>;
+				vsync-len = <3>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+		};
+	};
+};
+
+&pwm1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm1>;
+	status = "okay";
+};
+
+&pwm2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm2>;
+	status = "okay";
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+	assigned-clocks = <&clks IMX7D_UART1_ROOT_SRC>;
+	assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>;
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+	assigned-clocks = <&clks IMX7D_UART2_ROOT_SRC>;
+	assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>;
+	status = "okay";
+};
+
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart3>;
+	assigned-clocks = <&clks IMX7D_UART3_ROOT_SRC>;
+	assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>;
+	status = "okay";
+};
+
+&uart6 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart6>;
+	assigned-clocks = <&clks IMX7D_UART6_ROOT_SRC>;
+	assigned-clock-parents = <&clks IMX7D_PLL_SYS_MAIN_240M_CLK>;
+	fsl,uart-has-rtscts;
+	status = "okay";
+};
+
+&usbotg1 {
+	vbus-supply = <&reg_usb_otg1_vbus>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbotg1>;
+	status = "okay";
+};
+
+&usbotg2 {
+	vbus-supply = <&reg_usb_otg2_vbus>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbotg2>;
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usdhc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc1>;
+	cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+	vmmc-supply = <&vgen3_reg>;
+	bus-width = <4>;
+	fsl,tuning-step = <2>;
+	wakeup-source;
+	keep-power-in-suspend;
+	status = "okay";
+};
+
+&usdhc2 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc2>;
+	bus-width = <4>;
+	non-removable;
+	vmmc-supply = <&reg_wlan>;
+	cap-power-off-card;
+	keep-power-in-suspend;
+	status = "okay";
+
+	wlcore: wlcore@2 {
+		compatible = "ti,wl1271";
+		reg = <2>;
+		interrupt-parent = <&gpio4>;
+		interrupts = <20 IRQ_TYPE_LEVEL_HIGH>;
+		ref-clock-frequency = <38400000>;
+	};
+};
+
+&usdhc3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc3>;
+	assigned-clocks = <&clks IMX7D_USDHC3_ROOT_CLK>;
+	assigned-clock-rates = <400000000>;
+	bus-width = <8>;
+	fsl,tuning-step = <2>;
+	non-removable;
+	status = "okay";
+};
+
+&wdog1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_wdog1>;
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_hog_1 &pinctrl_j2>;
+
+	pinctrl_hog_1: hoggrp-1 {
+		fsl,pins = <
+			MX7D_PAD_SD3_RESET_B__GPIO6_IO11	0x5d
+			MX7D_PAD_GPIO1_IO13__GPIO1_IO13		0x7d
+			MX7D_PAD_ECSPI2_MISO__GPIO4_IO22	0x7d
+		>;
+	};
+
+	pinctrl_enet1: enet1grp {
+		fsl,pins = <
+			MX7D_PAD_GPIO1_IO10__ENET1_MDIO			0x3
+			MX7D_PAD_GPIO1_IO11__ENET1_MDC			0x3
+			MX7D_PAD_GPIO1_IO12__CCM_ENET_REF_CLK1		0x3
+			MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC	0x71
+			MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0	0x71
+			MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1	0x71
+			MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2	0x71
+			MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3	0x71
+			MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL	0x71
+			MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC	0x71
+			MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0	0x11
+			MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1	0x11
+			MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2	0x11
+			MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3	0x71
+			MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL	0x11
+			MX7D_PAD_SD3_STROBE__GPIO6_IO10			0x75
+		>;
+	};
+
+	pinctrl_flexcan2: flexcan2grp {
+		fsl,pins = <
+			MX7D_PAD_GPIO1_IO14__FLEXCAN2_RX	0x7d
+			MX7D_PAD_GPIO1_IO15__FLEXCAN2_TX	0x7d
+			MX7D_PAD_EPDC_DATA14__GPIO2_IO14	0x7d
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX7D_PAD_I2C1_SDA__I2C1_SDA		0x4000007f
+			MX7D_PAD_I2C1_SCL__I2C1_SCL		0x4000007f
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX7D_PAD_I2C2_SDA__I2C2_SDA		0x4000007f
+			MX7D_PAD_I2C2_SCL__I2C2_SCL		0x4000007f
+		>;
+	};
+
+	pinctrl_i2c2_rv4162: i2c2-rv4162grp {
+		fsl,pins = <
+			MX7D_PAD_EPDC_DATA15__GPIO2_IO15	0x7d
+		>;
+	};
+
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			MX7D_PAD_I2C3_SDA__I2C3_SDA		0x4000007f
+			MX7D_PAD_I2C3_SCL__I2C3_SCL		0x4000007f
+		>;
+	};
+
+	pinctrl_i2c3_tsc2004: i2c3tsc2004grp {
+		fsl,pins = <
+			MX7D_PAD_LCD_RESET__GPIO3_IO4		0x79
+			MX7D_PAD_SD2_WP__GPIO5_IO10		0x7d
+		>;
+	};
+
+	pinctrl_i2c4: i2c4grp {
+		fsl,pins = <
+			MX7D_PAD_I2C4_SDA__I2C4_SDA		0x4000007f
+			MX7D_PAD_I2C4_SCL__I2C4_SCL		0x4000007f
+		>;
+	};
+
+	pinctrl_j2: j2grp {
+		fsl,pins = <
+			MX7D_PAD_SAI1_TX_DATA__GPIO6_IO15	0x7d
+			MX7D_PAD_EPDC_BDR0__GPIO2_IO28		0x7d
+			MX7D_PAD_SAI1_RX_DATA__GPIO6_IO12	0x7d
+			MX7D_PAD_EPDC_BDR1__GPIO2_IO29		0x7d
+			MX7D_PAD_SD1_WP__GPIO5_IO1		0x7d
+			MX7D_PAD_EPDC_SDSHR__GPIO2_IO19		0x7d
+			MX7D_PAD_SD1_RESET_B__GPIO5_IO2		0x7d
+			MX7D_PAD_SD2_RESET_B__GPIO5_IO11	0x7d
+			MX7D_PAD_EPDC_DATA07__GPIO2_IO7		0x7d
+			MX7D_PAD_EPDC_DATA08__GPIO2_IO8		0x7d
+			MX7D_PAD_EPDC_DATA09__GPIO2_IO9		0x7d
+			MX7D_PAD_EPDC_DATA10__GPIO2_IO10	0x7d
+			MX7D_PAD_EPDC_DATA11__GPIO2_IO11	0x7d
+			MX7D_PAD_EPDC_DATA12__GPIO2_IO12	0x7d
+			MX7D_PAD_SAI1_TX_SYNC__GPIO6_IO14	0x7d
+			MX7D_PAD_EPDC_DATA13__GPIO2_IO13	0x7d
+			MX7D_PAD_SAI1_TX_BCLK__GPIO6_IO13	0x7d
+			MX7D_PAD_SD2_CD_B__GPIO5_IO9		0x7d
+			MX7D_PAD_EPDC_GDCLK__GPIO2_IO24		0x7d
+			MX7D_PAD_SAI2_RX_DATA__GPIO6_IO21	0x7d
+			MX7D_PAD_EPDC_GDOE__GPIO2_IO25		0x7d
+			MX7D_PAD_EPDC_GDRL__GPIO2_IO26		0x7d
+			MX7D_PAD_SAI2_TX_DATA__GPIO6_IO22	0x7d
+			MX7D_PAD_EPDC_SDCE0__GPIO2_IO20		0x7d
+			MX7D_PAD_SAI2_TX_BCLK__GPIO6_IO20	0x7d
+			MX7D_PAD_EPDC_SDCE1__GPIO2_IO21		0x7d
+			MX7D_PAD_SAI2_TX_SYNC__GPIO6_IO19	0x7d
+			MX7D_PAD_EPDC_SDCE2__GPIO2_IO22		0x7d
+			MX7D_PAD_EPDC_SDCE3__GPIO2_IO23		0x7d
+			MX7D_PAD_EPDC_GDSP__GPIO2_IO27		0x7d
+			MX7D_PAD_EPDC_SDCLK__GPIO2_IO16		0x7d
+			MX7D_PAD_EPDC_SDLE__GPIO2_IO17		0x7d
+			MX7D_PAD_EPDC_SDOE__GPIO2_IO18		0x7d
+			MX7D_PAD_EPDC_PWR_COM__GPIO2_IO30	0x7d
+			MX7D_PAD_EPDC_PWR_STAT__GPIO2_IO31	0x7d
+		>;
+	};
+
+	pinctrl_lcdif_dat: lcdifdatgrp {
+		fsl,pins = <
+			MX7D_PAD_LCD_DATA00__LCD_DATA0		0x79
+			MX7D_PAD_LCD_DATA01__LCD_DATA1		0x79
+			MX7D_PAD_LCD_DATA02__LCD_DATA2		0x79
+			MX7D_PAD_LCD_DATA03__LCD_DATA3		0x79
+			MX7D_PAD_LCD_DATA04__LCD_DATA4		0x79
+			MX7D_PAD_LCD_DATA05__LCD_DATA5		0x79
+			MX7D_PAD_LCD_DATA06__LCD_DATA6		0x79
+			MX7D_PAD_LCD_DATA07__LCD_DATA7		0x79
+			MX7D_PAD_LCD_DATA08__LCD_DATA8		0x79
+			MX7D_PAD_LCD_DATA09__LCD_DATA9		0x79
+			MX7D_PAD_LCD_DATA10__LCD_DATA10		0x79
+			MX7D_PAD_LCD_DATA11__LCD_DATA11		0x79
+			MX7D_PAD_LCD_DATA12__LCD_DATA12		0x79
+			MX7D_PAD_LCD_DATA13__LCD_DATA13		0x79
+			MX7D_PAD_LCD_DATA14__LCD_DATA14		0x79
+			MX7D_PAD_LCD_DATA15__LCD_DATA15		0x79
+			MX7D_PAD_LCD_DATA16__LCD_DATA16		0x79
+			MX7D_PAD_LCD_DATA17__LCD_DATA17		0x79
+			MX7D_PAD_LCD_DATA18__LCD_DATA18		0x79
+			MX7D_PAD_LCD_DATA19__LCD_DATA19		0x79
+			MX7D_PAD_LCD_DATA20__LCD_DATA20		0x79
+			MX7D_PAD_LCD_DATA21__LCD_DATA21		0x79
+			MX7D_PAD_LCD_DATA22__LCD_DATA22		0x79
+			MX7D_PAD_LCD_DATA23__LCD_DATA23		0x79
+		>;
+	};
+
+	pinctrl_lcdif_ctrl: lcdifctrlgrp {
+		fsl,pins = <
+			MX7D_PAD_LCD_CLK__LCD_CLK		0x79
+			MX7D_PAD_LCD_ENABLE__LCD_ENABLE		0x79
+			MX7D_PAD_LCD_VSYNC__LCD_VSYNC		0x79
+			MX7D_PAD_LCD_HSYNC__LCD_HSYNC		0x79
+		>;
+	};
+
+	pinctrl_pwm2: pwm2grp {
+		fsl,pins = <
+			MX7D_PAD_GPIO1_IO09__PWM2_OUT		0x7d
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX7D_PAD_UART1_TX_DATA__UART1_DCE_TX	0x79
+			MX7D_PAD_UART1_RX_DATA__UART1_DCE_RX	0x79
+		>;
+	};
+
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			MX7D_PAD_UART2_TX_DATA__UART2_DCE_TX	0x79
+			MX7D_PAD_UART2_RX_DATA__UART2_DCE_RX	0x79
+		>;
+	};
+
+	pinctrl_uart3: uart3grp {
+		fsl,pins = <
+			MX7D_PAD_UART3_TX_DATA__UART3_DCE_TX	0x79
+			MX7D_PAD_UART3_RX_DATA__UART3_DCE_RX	0x79
+			MX7D_PAD_EPDC_DATA04__GPIO2_IO4		0x7d
+		>;
+	};
+
+	pinctrl_uart6: uart6grp {
+		fsl,pins = <
+			MX7D_PAD_ECSPI1_MOSI__UART6_DCE_TX	0x79
+			MX7D_PAD_ECSPI1_SCLK__UART6_DCE_RX	0x79
+			MX7D_PAD_ECSPI1_SS0__UART6_DCE_CTS	0x79
+			MX7D_PAD_ECSPI1_MISO__UART6_DCE_RTS	0x79
+		>;
+	};
+
+	pinctrl_usbotg2: usbotg2grp {
+		fsl,pins = <
+			MX7D_PAD_UART3_RTS_B__USB_OTG2_OC	0x7d
+			MX7D_PAD_UART3_CTS_B__GPIO4_IO7		0x14
+		>;
+	};
+
+	pinctrl_usdhc1: usdhc1grp {
+		fsl,pins = <
+			MX7D_PAD_SD1_CMD__SD1_CMD		0x59
+			MX7D_PAD_SD1_CLK__SD1_CLK		0x19
+			MX7D_PAD_SD1_DATA0__SD1_DATA0		0x59
+			MX7D_PAD_SD1_DATA1__SD1_DATA1		0x59
+			MX7D_PAD_SD1_DATA2__SD1_DATA2		0x59
+			MX7D_PAD_SD1_DATA3__SD1_DATA3		0x59
+			MX7D_PAD_GPIO1_IO08__SD1_VSELECT	0x75
+			MX7D_PAD_SD1_CD_B__GPIO5_IO0		0x75
+		>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX7D_PAD_SD2_CMD__SD2_CMD		0x59
+			MX7D_PAD_SD2_CLK__SD2_CLK		0x19
+			MX7D_PAD_SD2_DATA0__SD2_DATA0		0x59
+			MX7D_PAD_SD2_DATA1__SD2_DATA1		0x59
+			MX7D_PAD_SD2_DATA2__SD2_DATA2		0x59
+			MX7D_PAD_SD2_DATA3__SD2_DATA3		0x59
+			MX7D_PAD_ECSPI2_SCLK__GPIO4_IO20	0x59
+			MX7D_PAD_ECSPI2_MOSI__GPIO4_IO21	0x59
+		>;
+	};
+
+	pinctrl_usdhc3: usdhc3grp {
+		fsl,pins = <
+			MX7D_PAD_SD3_CMD__SD3_CMD		0x59
+			MX7D_PAD_SD3_CLK__SD3_CLK		0x19
+			MX7D_PAD_SD3_DATA0__SD3_DATA0		0x59
+			MX7D_PAD_SD3_DATA1__SD3_DATA1		0x59
+			MX7D_PAD_SD3_DATA2__SD3_DATA2		0x59
+			MX7D_PAD_SD3_DATA3__SD3_DATA3		0x59
+			MX7D_PAD_SD3_DATA4__SD3_DATA4		0x59
+			MX7D_PAD_SD3_DATA5__SD3_DATA5		0x59
+			MX7D_PAD_SD3_DATA6__SD3_DATA6		0x59
+			MX7D_PAD_SD3_DATA7__SD3_DATA7		0x59
+		>;
+	};
+};
+
+&iomuxc_lpsr {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_hog_2>;
+
+	pinctrl_hog_2: hoggrp-2 {
+		fsl,pins = <
+			MX7D_PAD_GPIO1_IO02__GPIO1_IO2		0x7d
+			MX7D_PAD_GPIO1_IO03__CCM_CLKO2		0x7d
+		>;
+	};
+
+	pinctrl_backlight_j9: backlightj9grp {
+		fsl,pins = <
+			MX7D_PAD_GPIO1_IO07__GPIO1_IO7		0x7d
+		>;
+	};
+
+	pinctrl_pwm1: pwm1grp {
+		fsl,pins = <
+			MX7D_PAD_GPIO1_IO01__PWM1_OUT		0x7d
+		>;
+	};
+
+	pinctrl_usbotg1: usbotg1grp {
+		fsl,pins = <
+			MX7D_PAD_GPIO1_IO04__USB_OTG1_OC	0x7d
+			MX7D_PAD_GPIO1_IO05__GPIO1_IO5		0x14
+		>;
+	};
+
+	pinctrl_wdog1: wdog1grp {
+		fsl,pins = <
+			MX7D_PAD_GPIO1_IO00__WDOD1_WDOG_B	0x75
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index b5a50e0..6b3faa2 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -651,6 +651,17 @@
 				#pwm-cells = <2>;
 				status = "disabled";
 			};
+
+			lcdif: lcdif@30730000 {
+				compatible = "fsl,imx7d-lcdif", "fsl,imx28-lcdif";
+				reg = <0x30730000 0x10000>;
+				interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>,
+					<&clks IMX7D_CLK_DUMMY>,
+					<&clks IMX7D_CLK_DUMMY>;
+				clock-names = "pix", "axi", "disp_axi";
+				status = "disabled";
+			};
 		};
 
 		aips3: aips-bus@30800000 {
@@ -693,6 +704,26 @@
 				status = "disabled";
 			};
 
+			flexcan1: can@30a00000 {
+				compatible = "fsl,imx7d-flexcan", "fsl,imx6q-flexcan";
+				reg = <0x30a00000 0x10000>;
+				interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX7D_CLK_DUMMY>,
+					<&clks IMX7D_CAN1_ROOT_CLK>;
+				clock-names = "ipg", "per";
+				status = "disabled";
+			};
+
+			flexcan2: can@30a10000 {
+				compatible = "fsl,imx7d-flexcan", "fsl,imx6q-flexcan";
+				reg = <0x30a10000 0x10000>;
+				interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX7D_CLK_DUMMY>,
+					<&clks IMX7D_CAN2_ROOT_CLK>;
+				clock-names = "ipg", "per";
+				status = "disabled";
+			};
+
 			i2c1: i2c@30a20000 {
 				#address-cells = <1>;
 				#size-cells = <0>;
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index 0c82097..b9bbcce 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -14,6 +14,7 @@
 #include <dt-bindings/clock/r8a7779-clock.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/power/r8a7779-sysc.h>
 
 / {
 	compatible = "renesas,r8a7779";
@@ -34,18 +35,21 @@
 			compatible = "arm,cortex-a9";
 			reg = <1>;
 			clock-frequency = <1000000000>;
+			power-domains = <&sysc R8A7779_PD_ARM1>;
 		};
 		cpu@2 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <2>;
 			clock-frequency = <1000000000>;
+			power-domains = <&sysc R8A7779_PD_ARM2>;
 		};
 		cpu@3 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <3>;
 			clock-frequency = <1000000000>;
+			power-domains = <&sysc R8A7779_PD_ARM3>;
 		};
 	};
 
@@ -173,7 +177,7 @@
 		reg = <0xffc70000 0x1000>;
 		interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_I2C0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -184,7 +188,7 @@
 		reg = <0xffc71000 0x1000>;
 		interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_I2C1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -195,7 +199,7 @@
 		reg = <0xffc72000 0x1000>;
 		interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_I2C2>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -206,7 +210,7 @@
 		reg = <0xffc73000 0x1000>;
 		interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_I2C3>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -218,7 +222,7 @@
 		clocks = <&mstp0_clks R8A7779_CLK_SCIF0>,
 			 <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
 		clock-names = "fck", "brg_int", "scif_clk";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -230,7 +234,7 @@
 		clocks = <&mstp0_clks R8A7779_CLK_SCIF1>,
 			 <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
 		clock-names = "fck", "brg_int", "scif_clk";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -242,7 +246,7 @@
 		clocks = <&mstp0_clks R8A7779_CLK_SCIF2>,
 			 <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
 		clock-names = "fck", "brg_int", "scif_clk";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -254,7 +258,7 @@
 		clocks = <&mstp0_clks R8A7779_CLK_SCIF3>,
 			 <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
 		clock-names = "fck", "brg_int", "scif_clk";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -266,7 +270,7 @@
 		clocks = <&mstp0_clks R8A7779_CLK_SCIF4>,
 			 <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
 		clock-names = "fck", "brg_int", "scif_clk";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -278,7 +282,7 @@
 		clocks = <&mstp0_clks R8A7779_CLK_SCIF5>,
 			 <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
 		clock-names = "fck", "brg_int", "scif_clk";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -300,7 +304,7 @@
 			     <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_TMU0>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 
 		#renesas,channels = <3>;
 
@@ -315,7 +319,7 @@
 			     <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_TMU1>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 
 		#renesas,channels = <3>;
 
@@ -330,7 +334,7 @@
 			     <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_TMU2>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 
 		#renesas,channels = <3>;
 
@@ -342,7 +346,7 @@
 		reg = <0xfc600000 0x2000>;
 		interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7779_CLK_SATA>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 	};
 
 	sdhi0: sd@ffe4c000 {
@@ -350,7 +354,7 @@
 		reg = <0xffe4c000 0x100>;
 		interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7779_CLK_SDHI0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -359,7 +363,7 @@
 		reg = <0xffe4d000 0x100>;
 		interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7779_CLK_SDHI1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -368,7 +372,7 @@
 		reg = <0xffe4e000 0x100>;
 		interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7779_CLK_SDHI2>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -377,7 +381,7 @@
 		reg = <0xffe4f000 0x100>;
 		interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7779_CLK_SDHI3>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -388,7 +392,7 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 		clocks = <&mstp0_clks R8A7779_CLK_HSPI>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -399,7 +403,7 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 		clocks = <&mstp0_clks R8A7779_CLK_HSPI>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -410,7 +414,7 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 		clocks = <&mstp0_clks R8A7779_CLK_HSPI>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -419,7 +423,7 @@
 		reg = <0 0xfff80000 0 0x40000>;
 		interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7779_CLK_DU>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
 		status = "disabled";
 
 		ports {
@@ -585,4 +589,10 @@
 				"mmc1", "mmc0";
 		};
 	};
+
+	sysc: system-controller@ffd85000 {
+		compatible = "renesas,r8a7779-sysc";
+		reg = <0xffd85000 0x0200>;
+		#power-domain-cells = <1>;
+	};
 };
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 935064f..83cf23c 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -13,6 +13,7 @@
 #include <dt-bindings/clock/r8a7790-clock.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/power/r8a7790-sysc.h>
 
 / {
 	compatible = "renesas,r8a7790";
@@ -52,6 +53,7 @@
 			voltage-tolerance = <1>; /* 1% */
 			clocks = <&cpg_clocks R8A7790_CLK_Z>;
 			clock-latency = <300000>; /* 300 us */
+			power-domains = <&sysc R8A7790_PD_CA15_CPU0>;
 			next-level-cache = <&L2_CA15>;
 
 			/* kHz - uV - OPPs unknown yet */
@@ -68,6 +70,7 @@
 			compatible = "arm,cortex-a15";
 			reg = <1>;
 			clock-frequency = <1300000000>;
+			power-domains = <&sysc R8A7790_PD_CA15_CPU1>;
 			next-level-cache = <&L2_CA15>;
 		};
 
@@ -76,6 +79,7 @@
 			compatible = "arm,cortex-a15";
 			reg = <2>;
 			clock-frequency = <1300000000>;
+			power-domains = <&sysc R8A7790_PD_CA15_CPU2>;
 			next-level-cache = <&L2_CA15>;
 		};
 
@@ -84,6 +88,7 @@
 			compatible = "arm,cortex-a15";
 			reg = <3>;
 			clock-frequency = <1300000000>;
+			power-domains = <&sysc R8A7790_PD_CA15_CPU3>;
 			next-level-cache = <&L2_CA15>;
 		};
 
@@ -92,6 +97,7 @@
 			compatible = "arm,cortex-a7";
 			reg = <0x100>;
 			clock-frequency = <780000000>;
+			power-domains = <&sysc R8A7790_PD_CA7_CPU0>;
 			next-level-cache = <&L2_CA7>;
 		};
 
@@ -100,6 +106,7 @@
 			compatible = "arm,cortex-a7";
 			reg = <0x101>;
 			clock-frequency = <780000000>;
+			power-domains = <&sysc R8A7790_PD_CA7_CPU1>;
 			next-level-cache = <&L2_CA7>;
 		};
 
@@ -108,6 +115,7 @@
 			compatible = "arm,cortex-a7";
 			reg = <0x102>;
 			clock-frequency = <780000000>;
+			power-domains = <&sysc R8A7790_PD_CA7_CPU2>;
 			next-level-cache = <&L2_CA7>;
 		};
 
@@ -116,6 +124,7 @@
 			compatible = "arm,cortex-a7";
 			reg = <0x103>;
 			clock-frequency = <780000000>;
+			power-domains = <&sysc R8A7790_PD_CA7_CPU3>;
 			next-level-cache = <&L2_CA7>;
 		};
 	};
@@ -141,12 +150,14 @@
 
 	L2_CA15: cache-controller@0 {
 		compatible = "cache";
+		power-domains = <&sysc R8A7790_PD_CA15_SCU>;
 		cache-unified;
 		cache-level = <2>;
 	};
 
 	L2_CA7: cache-controller@1 {
 		compatible = "cache";
+		power-domains = <&sysc R8A7790_PD_CA7_SCU>;
 		cache-unified;
 		cache-level = <2>;
 	};
@@ -173,7 +184,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7790_CLK_GPIO0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 	};
 
 	gpio1: gpio@e6051000 {
@@ -186,7 +197,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7790_CLK_GPIO1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 	};
 
 	gpio2: gpio@e6052000 {
@@ -199,7 +210,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7790_CLK_GPIO2>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 	};
 
 	gpio3: gpio@e6053000 {
@@ -212,7 +223,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7790_CLK_GPIO3>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 	};
 
 	gpio4: gpio@e6054000 {
@@ -225,7 +236,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7790_CLK_GPIO4>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 	};
 
 	gpio5: gpio@e6055000 {
@@ -238,7 +249,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7790_CLK_GPIO5>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 	};
 
 	thermal: thermal@e61f0000 {
@@ -248,7 +259,7 @@
 		reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
 		interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp5_clks R8A7790_CLK_THERMAL>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		#thermal-sensor-cells = <0>;
 	};
 
@@ -267,7 +278,7 @@
 			     <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7790_CLK_CMT0>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 
 		renesas,channels-mask = <0x60>;
 
@@ -287,7 +298,7 @@
 			     <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7790_CLK_CMT1>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 
 		renesas,channels-mask = <0xff>;
 
@@ -304,7 +315,7 @@
 			     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R8A7790_CLK_IRQC>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 	};
 
 	dmac0: dma-controller@e6700000 {
@@ -333,7 +344,7 @@
 				"ch12", "ch13", "ch14";
 		clocks = <&mstp2_clks R8A7790_CLK_SYS_DMAC0>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <15>;
 	};
@@ -364,7 +375,7 @@
 				"ch12", "ch13", "ch14";
 		clocks = <&mstp2_clks R8A7790_CLK_SYS_DMAC1>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <15>;
 	};
@@ -393,7 +404,7 @@
 				"ch12";
 		clocks = <&mstp5_clks R8A7790_CLK_AUDIO_DMAC0>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <13>;
 	};
@@ -422,7 +433,7 @@
 				"ch12";
 		clocks = <&mstp5_clks R8A7790_CLK_AUDIO_DMAC1>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <13>;
 	};
@@ -434,7 +445,7 @@
 			      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "ch0", "ch1";
 		clocks = <&mstp3_clks R8A7790_CLK_USBDMAC0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <2>;
 	};
@@ -446,7 +457,7 @@
 			      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "ch0", "ch1";
 		clocks = <&mstp3_clks R8A7790_CLK_USBDMAC1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <2>;
 	};
@@ -458,7 +469,7 @@
 		reg = <0 0xe6508000 0 0x40>;
 		interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7790_CLK_I2C0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <110>;
 		status = "disabled";
 	};
@@ -470,7 +481,7 @@
 		reg = <0 0xe6518000 0 0x40>;
 		interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7790_CLK_I2C1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <6>;
 		status = "disabled";
 	};
@@ -482,7 +493,7 @@
 		reg = <0 0xe6530000 0 0x40>;
 		interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7790_CLK_I2C2>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <6>;
 		status = "disabled";
 	};
@@ -494,7 +505,7 @@
 		reg = <0 0xe6540000 0 0x40>;
 		interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7790_CLK_I2C3>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <110>;
 		status = "disabled";
 	};
@@ -508,7 +519,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_IIC0>;
 		dmas = <&dmac0 0x61>, <&dmac0 0x62>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -521,7 +532,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_IIC1>;
 		dmas = <&dmac0 0x65>, <&dmac0 0x66>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -534,7 +545,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_IIC2>;
 		dmas = <&dmac0 0x69>, <&dmac0 0x6a>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -547,7 +558,7 @@
 		clocks = <&mstp9_clks R8A7790_CLK_IICDVFS>;
 		dmas = <&dmac0 0x77>, <&dmac0 0x78>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -558,7 +569,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_MMCIF0>;
 		dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		reg-io-width = <4>;
 		status = "disabled";
 		max-frequency = <97500000>;
@@ -571,7 +582,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_MMCIF1>;
 		dmas = <&dmac0 0xe1>, <&dmac0 0xe2>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		reg-io-width = <4>;
 		status = "disabled";
 		max-frequency = <97500000>;
@@ -590,7 +601,7 @@
 		dmas = <&dmac1 0xcd>, <&dmac1 0xce>;
 		dma-names = "tx", "rx";
 		max-frequency = <195000000>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -602,7 +613,7 @@
 		dmas = <&dmac1 0xc9>, <&dmac1 0xca>;
 		dma-names = "tx", "rx";
 		max-frequency = <195000000>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -614,7 +625,7 @@
 		dmas = <&dmac1 0xc1>, <&dmac1 0xc2>;
 		dma-names = "tx", "rx";
 		max-frequency = <97500000>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -626,7 +637,7 @@
 		dmas = <&dmac1 0xd3>, <&dmac1 0xd4>;
 		dma-names = "tx", "rx";
 		max-frequency = <97500000>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -639,7 +650,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x21>, <&dmac0 0x22>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -652,7 +663,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x25>, <&dmac0 0x26>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -665,7 +676,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x27>, <&dmac0 0x28>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -678,7 +689,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -691,7 +702,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -704,7 +715,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -718,7 +729,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -732,7 +743,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -746,7 +757,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -760,7 +771,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -774,7 +785,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -783,7 +794,7 @@
 		reg = <0 0xee700000 0 0x400>;
 		interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7790_CLK_ETHER>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		phy-mode = "rmii";
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -796,7 +807,7 @@
 		reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
 		interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7790_CLK_ETHERAVB>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -807,7 +818,7 @@
 		reg = <0 0xee300000 0 0x2000>;
 		interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7790_CLK_SATA0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -816,7 +827,7 @@
 		reg = <0 0xee500000 0 0x2000>;
 		interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7790_CLK_SATA1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -828,7 +839,7 @@
 		dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
 		       <&usb_dmac1 0>, <&usb_dmac1 1>;
 		dma-names = "ch0", "ch1", "ch2", "ch3";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		renesas,buswait = <4>;
 		phys = <&usb0 1>;
 		phy-names = "usb";
@@ -842,7 +853,7 @@
 		#size-cells = <0>;
 		clocks = <&mstp7_clks R8A7790_CLK_HSUSB>;
 		clock-names = "usbhs";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 
 		usb0: usb-channel@0 {
@@ -860,7 +871,7 @@
 		reg = <0 0xe6ef0000 0 0x1000>;
 		interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7790_CLK_VIN0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -869,7 +880,7 @@
 		reg = <0 0xe6ef1000 0 0x1000>;
 		interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7790_CLK_VIN1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -878,7 +889,7 @@
 		reg = <0 0xe6ef2000 0 0x1000>;
 		interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7790_CLK_VIN2>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -887,7 +898,7 @@
 		reg = <0 0xe6ef3000 0 0x1000>;
 		interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7790_CLK_VIN3>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -896,7 +907,7 @@
 		reg = <0 0xfe920000 0 0x8000>;
 		interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7790_CLK_VSP1_R>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 
 		renesas,has-sru;
 		renesas,#rpf = <5>;
@@ -909,7 +920,7 @@
 		reg = <0 0xfe928000 0 0x8000>;
 		interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7790_CLK_VSP1_S>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 
 		renesas,has-lut;
 		renesas,has-sru;
@@ -923,7 +934,7 @@
 		reg = <0 0xfe930000 0 0x8000>;
 		interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7790_CLK_VSP1_DU0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 
 		renesas,has-lif;
 		renesas,has-lut;
@@ -937,7 +948,7 @@
 		reg = <0 0xfe938000 0 0x8000>;
 		interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7790_CLK_VSP1_DU1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 
 		renesas,has-lif;
 		renesas,has-lut;
@@ -992,7 +1003,7 @@
 		clocks = <&mstp9_clks R8A7790_CLK_RCAN0>,
 			 <&cpg_clocks R8A7790_CLK_RCAN>, <&can_clk>;
 		clock-names = "clkp1", "clkp2", "can_clk";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -1003,7 +1014,7 @@
 		clocks = <&mstp9_clks R8A7790_CLK_RCAN1>,
 			 <&cpg_clocks R8A7790_CLK_RCAN>, <&can_clk>;
 		clock-names = "clkp1", "clkp2", "can_clk";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -1012,7 +1023,7 @@
 		reg = <0 0xfe980000 0 0x10300>;
 		interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7790_CLK_JPU>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 	};
 
 	clocks {
@@ -1447,6 +1458,12 @@
 		};
 	};
 
+	sysc: system-controller@e6180000 {
+		compatible = "renesas,r8a7790-sysc";
+		reg = <0 0xe6180000 0 0x0200>;
+		#power-domain-cells = <1>;
+	};
+
 	qspi: spi@e6b10000 {
 		compatible = "renesas,qspi-r8a7790", "renesas,qspi";
 		reg = <0 0xe6b10000 0 0x2c>;
@@ -1454,7 +1471,7 @@
 		clocks = <&mstp9_clks R8A7790_CLK_QSPI_MOD>;
 		dmas = <&dmac0 0x17>, <&dmac0 0x18>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		num-cs = <1>;
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -1468,7 +1485,7 @@
 		clocks = <&mstp0_clks R8A7790_CLK_MSIOF0>;
 		dmas = <&dmac0 0x51>, <&dmac0 0x52>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -1481,7 +1498,7 @@
 		clocks = <&mstp2_clks R8A7790_CLK_MSIOF1>;
 		dmas = <&dmac0 0x55>, <&dmac0 0x56>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -1494,7 +1511,7 @@
 		clocks = <&mstp2_clks R8A7790_CLK_MSIOF2>;
 		dmas = <&dmac0 0x41>, <&dmac0 0x42>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -1507,7 +1524,7 @@
 		clocks = <&mstp2_clks R8A7790_CLK_MSIOF3>;
 		dmas = <&dmac0 0x45>, <&dmac0 0x46>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -1518,7 +1535,7 @@
 		reg = <0 0xee000000 0 0xc00>;
 		interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7790_CLK_SSUSB>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		phys = <&usb2 1>;
 		phy-names = "usb";
 		status = "disabled";
@@ -1531,7 +1548,7 @@
 		      <0 0xee080000 0 0x1100>;
 		interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp7_clks R8A7790_CLK_EHCI>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 
 		bus-range = <0 0>;
@@ -1566,7 +1583,7 @@
 		      <0 0xee0a0000 0 0x1100>;
 		interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp7_clks R8A7790_CLK_EHCI>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 
 		bus-range = <1 1>;
@@ -1584,7 +1601,7 @@
 		compatible = "renesas,pci-r8a7790", "renesas,pci-rcar-gen2";
 		device_type = "pci";
 		clocks = <&mstp7_clks R8A7790_CLK_EHCI>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		reg = <0 0xee0d0000 0 0xc00>,
 		      <0 0xee0c0000 0 0x1100>;
 		interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
@@ -1637,7 +1654,7 @@
 		interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7790_CLK_PCIEC>, <&pcie_bus_clk>;
 		clock-names = "pcie", "pcie_bus";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -1680,7 +1697,7 @@
 				"mix.0", "mix.1",
 				"dvc.0", "dvc.1",
 				"clk_a", "clk_b", "clk_c", "clk_i";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
 
 		status = "disabled";
 
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 565c270..db67e34 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -13,6 +13,7 @@
 #include <dt-bindings/clock/r8a7791-clock.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/power/r8a7791-sysc.h>
 
 / {
 	compatible = "renesas,r8a7791";
@@ -51,6 +52,7 @@
 			voltage-tolerance = <1>; /* 1% */
 			clocks = <&cpg_clocks R8A7791_CLK_Z>;
 			clock-latency = <300000>; /* 300 us */
+			power-domains = <&sysc R8A7791_PD_CA15_CPU0>;
 			next-level-cache = <&L2_CA15>;
 
 			/* kHz - uV - OPPs unknown yet */
@@ -67,6 +69,7 @@
 			compatible = "arm,cortex-a15";
 			reg = <1>;
 			clock-frequency = <1500000000>;
+			power-domains = <&sysc R8A7791_PD_CA15_CPU1>;
 			next-level-cache = <&L2_CA15>;
 		};
 	};
@@ -92,6 +95,7 @@
 
 	L2_CA15: cache-controller@0 {
 		compatible = "cache";
+		power-domains = <&sysc R8A7791_PD_CA15_SCU>;
 		cache-unified;
 		cache-level = <2>;
 	};
@@ -118,7 +122,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 	};
 
 	gpio1: gpio@e6051000 {
@@ -131,7 +135,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 	};
 
 	gpio2: gpio@e6052000 {
@@ -144,7 +148,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO2>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 	};
 
 	gpio3: gpio@e6053000 {
@@ -157,7 +161,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO3>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 	};
 
 	gpio4: gpio@e6054000 {
@@ -170,7 +174,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO4>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 	};
 
 	gpio5: gpio@e6055000 {
@@ -183,7 +187,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO5>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 	};
 
 	gpio6: gpio@e6055400 {
@@ -196,7 +200,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO6>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 	};
 
 	gpio7: gpio@e6055800 {
@@ -209,7 +213,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO7>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 	};
 
 	thermal: thermal@e61f0000 {
@@ -219,7 +223,7 @@
 		reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
 		interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp5_clks R8A7791_CLK_THERMAL>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		#thermal-sensor-cells = <0>;
 	};
 
@@ -238,7 +242,7 @@
 			     <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7791_CLK_CMT0>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 
 		renesas,channels-mask = <0x60>;
 
@@ -258,7 +262,7 @@
 			     <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7791_CLK_CMT1>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 
 		renesas,channels-mask = <0xff>;
 
@@ -281,7 +285,7 @@
 			     <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R8A7791_CLK_IRQC>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 	};
 
 	dmac0: dma-controller@e6700000 {
@@ -310,7 +314,7 @@
 				"ch12", "ch13", "ch14";
 		clocks = <&mstp2_clks R8A7791_CLK_SYS_DMAC0>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <15>;
 	};
@@ -341,7 +345,7 @@
 				"ch12", "ch13", "ch14";
 		clocks = <&mstp2_clks R8A7791_CLK_SYS_DMAC1>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <15>;
 	};
@@ -370,7 +374,7 @@
 				"ch12";
 		clocks = <&mstp5_clks R8A7791_CLK_AUDIO_DMAC0>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <13>;
 	};
@@ -399,7 +403,7 @@
 				"ch12";
 		clocks = <&mstp5_clks R8A7791_CLK_AUDIO_DMAC1>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <13>;
 	};
@@ -411,7 +415,7 @@
 			      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "ch0", "ch1";
 		clocks = <&mstp3_clks R8A7791_CLK_USBDMAC0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <2>;
 	};
@@ -423,7 +427,7 @@
 			      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "ch0", "ch1";
 		clocks = <&mstp3_clks R8A7791_CLK_USBDMAC1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <2>;
 	};
@@ -436,7 +440,7 @@
 		reg = <0 0xe6508000 0 0x40>;
 		interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7791_CLK_I2C0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <6>;
 		status = "disabled";
 	};
@@ -448,7 +452,7 @@
 		reg = <0 0xe6518000 0 0x40>;
 		interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7791_CLK_I2C1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <6>;
 		status = "disabled";
 	};
@@ -460,7 +464,7 @@
 		reg = <0 0xe6530000 0 0x40>;
 		interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7791_CLK_I2C2>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <6>;
 		status = "disabled";
 	};
@@ -472,7 +476,7 @@
 		reg = <0 0xe6540000 0 0x40>;
 		interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7791_CLK_I2C3>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <6>;
 		status = "disabled";
 	};
@@ -484,7 +488,7 @@
 		reg = <0 0xe6520000 0 0x40>;
 		interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7791_CLK_I2C4>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <6>;
 		status = "disabled";
 	};
@@ -497,7 +501,7 @@
 		reg = <0 0xe6528000 0 0x40>;
 		interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7791_CLK_I2C5>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <110>;
 		status = "disabled";
 	};
@@ -512,7 +516,7 @@
 		clocks = <&mstp9_clks R8A7791_CLK_IICDVFS>;
 		dmas = <&dmac0 0x77>, <&dmac0 0x78>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -525,7 +529,7 @@
 		clocks = <&mstp3_clks R8A7791_CLK_IIC0>;
 		dmas = <&dmac0 0x61>, <&dmac0 0x62>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -538,7 +542,7 @@
 		clocks = <&mstp3_clks R8A7791_CLK_IIC1>;
 		dmas = <&dmac0 0x65>, <&dmac0 0x66>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -554,7 +558,7 @@
 		clocks = <&mstp3_clks R8A7791_CLK_MMCIF0>;
 		dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		reg-io-width = <4>;
 		status = "disabled";
 		max-frequency = <97500000>;
@@ -567,7 +571,7 @@
 		clocks = <&mstp3_clks R8A7791_CLK_SDHI0>;
 		dmas = <&dmac1 0xcd>, <&dmac1 0xce>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -578,7 +582,7 @@
 		clocks = <&mstp3_clks R8A7791_CLK_SDHI1>;
 		dmas = <&dmac1 0xc1>, <&dmac1 0xc2>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -589,7 +593,7 @@
 		clocks = <&mstp3_clks R8A7791_CLK_SDHI2>;
 		dmas = <&dmac1 0xd3>, <&dmac1 0xd4>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -602,7 +606,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x21>, <&dmac0 0x22>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -615,7 +619,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x25>, <&dmac0 0x26>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -628,7 +632,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x27>, <&dmac0 0x28>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -641,7 +645,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x1b>, <&dmac0 0x1c>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -654,7 +658,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x1f>, <&dmac0 0x20>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -667,7 +671,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x23>, <&dmac0 0x24>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -680,7 +684,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -693,7 +697,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -706,7 +710,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -720,7 +724,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -734,7 +738,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -748,7 +752,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -762,7 +766,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x2f>, <&dmac0 0x30>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -776,7 +780,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0xfb>, <&dmac0 0xfc>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -790,7 +794,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0xfd>, <&dmac0 0xfe>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -804,7 +808,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -818,7 +822,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -832,7 +836,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x3b>, <&dmac0 0x3c>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -841,7 +845,7 @@
 		reg = <0 0xee700000 0 0x400>;
 		interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7791_CLK_ETHER>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		phy-mode = "rmii";
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -854,7 +858,7 @@
 		reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
 		interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7791_CLK_ETHERAVB>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -865,7 +869,7 @@
 		reg = <0 0xee300000 0 0x2000>;
 		interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7791_CLK_SATA0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -874,7 +878,7 @@
 		reg = <0 0xee500000 0 0x2000>;
 		interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7791_CLK_SATA1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -886,7 +890,7 @@
 		dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
 		       <&usb_dmac1 0>, <&usb_dmac1 1>;
 		dma-names = "ch0", "ch1", "ch2", "ch3";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		renesas,buswait = <4>;
 		phys = <&usb0 1>;
 		phy-names = "usb";
@@ -900,7 +904,7 @@
 		#size-cells = <0>;
 		clocks = <&mstp7_clks R8A7791_CLK_HSUSB>;
 		clock-names = "usbhs";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 
 		usb0: usb-channel@0 {
@@ -918,7 +922,7 @@
 		reg = <0 0xe6ef0000 0 0x1000>;
 		interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7791_CLK_VIN0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -927,7 +931,7 @@
 		reg = <0 0xe6ef1000 0 0x1000>;
 		interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7791_CLK_VIN1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -936,7 +940,7 @@
 		reg = <0 0xe6ef2000 0 0x1000>;
 		interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7791_CLK_VIN2>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -945,7 +949,7 @@
 		reg = <0 0xfe928000 0 0x8000>;
 		interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7791_CLK_VSP1_S>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 
 		renesas,has-lut;
 		renesas,has-sru;
@@ -959,7 +963,7 @@
 		reg = <0 0xfe930000 0 0x8000>;
 		interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7791_CLK_VSP1_DU0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 
 		renesas,has-lif;
 		renesas,has-lut;
@@ -973,7 +977,7 @@
 		reg = <0 0xfe938000 0 0x8000>;
 		interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7791_CLK_VSP1_DU1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 
 		renesas,has-lif;
 		renesas,has-lut;
@@ -1019,7 +1023,7 @@
 		clocks = <&mstp9_clks R8A7791_CLK_RCAN0>,
 			 <&cpg_clocks R8A7791_CLK_RCAN>, <&can_clk>;
 		clock-names = "clkp1", "clkp2", "can_clk";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -1030,7 +1034,7 @@
 		clocks = <&mstp9_clks R8A7791_CLK_RCAN1>,
 			 <&cpg_clocks R8A7791_CLK_RCAN>, <&can_clk>;
 		clock-names = "clkp1", "clkp2", "can_clk";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -1039,7 +1043,7 @@
 		reg = <0 0xfe980000 0 0x10300>;
 		interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7791_CLK_JPU>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 	};
 
 	clocks {
@@ -1463,6 +1467,12 @@
 		};
 	};
 
+	sysc: system-controller@e6180000 {
+		compatible = "renesas,r8a7791-sysc";
+		reg = <0 0xe6180000 0 0x0200>;
+		#power-domain-cells = <1>;
+	};
+
 	qspi: spi@e6b10000 {
 		compatible = "renesas,qspi-r8a7791", "renesas,qspi";
 		reg = <0 0xe6b10000 0 0x2c>;
@@ -1470,7 +1480,7 @@
 		clocks = <&mstp9_clks R8A7791_CLK_QSPI_MOD>;
 		dmas = <&dmac0 0x17>, <&dmac0 0x18>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		num-cs = <1>;
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -1484,7 +1494,7 @@
 		clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
 		dmas = <&dmac0 0x51>, <&dmac0 0x52>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -1497,7 +1507,7 @@
 		clocks = <&mstp2_clks R8A7791_CLK_MSIOF1>;
 		dmas = <&dmac0 0x55>, <&dmac0 0x56>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -1510,7 +1520,7 @@
 		clocks = <&mstp2_clks R8A7791_CLK_MSIOF2>;
 		dmas = <&dmac0 0x41>, <&dmac0 0x42>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -1521,7 +1531,7 @@
 		reg = <0 0xee000000 0 0xc00>;
 		interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7791_CLK_SSUSB>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		phys = <&usb2 1>;
 		phy-names = "usb";
 		status = "disabled";
@@ -1534,7 +1544,7 @@
 		      <0 0xee080000 0 0x1100>;
 		interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp7_clks R8A7791_CLK_EHCI>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 
 		bus-range = <0 0>;
@@ -1569,7 +1579,7 @@
 		      <0 0xee0c0000 0 0x1100>;
 		interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp7_clks R8A7791_CLK_EHCI>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 
 		bus-range = <1 1>;
@@ -1619,7 +1629,7 @@
 		interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7791_CLK_PCIEC>, <&pcie_bus_clk>;
 		clock-names = "pcie", "pcie_bus";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -1722,7 +1732,7 @@
 				"mix.0", "mix.1",
 				"dvc.0", "dvc.1",
 				"clk_a", "clk_b", "clk_c", "clk_i";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 
 		status = "disabled";
 
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
index cf6dc2a..1dd6d20 100644
--- a/arch/arm/boot/dts/r8a7793.dtsi
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -11,6 +11,7 @@
 #include <dt-bindings/clock/r8a7793-clock.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/power/r8a7793-sysc.h>
 
 / {
 	compatible = "renesas,r8a7793";
@@ -43,6 +44,7 @@
 			voltage-tolerance = <1>; /* 1% */
 			clocks = <&cpg_clocks R8A7793_CLK_Z>;
 			clock-latency = <300000>; /* 300 us */
+			power-domains = <&sysc R8A7793_PD_CA15_CPU0>;
 
 			/* kHz - uV - OPPs unknown yet */
 			operating-points = <1500000 1000000>,
@@ -76,6 +78,7 @@
 
 	L2_CA15: cache-controller@0 {
 		compatible = "cache";
+		power-domains = <&sysc R8A7793_PD_CA15_SCU>;
 		cache-unified;
 		cache-level = <2>;
 	};
@@ -102,7 +105,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7793_CLK_GPIO0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 	};
 
 	gpio1: gpio@e6051000 {
@@ -115,7 +118,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7793_CLK_GPIO1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 	};
 
 	gpio2: gpio@e6052000 {
@@ -128,7 +131,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7793_CLK_GPIO2>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 	};
 
 	gpio3: gpio@e6053000 {
@@ -141,7 +144,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7793_CLK_GPIO3>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 	};
 
 	gpio4: gpio@e6054000 {
@@ -154,7 +157,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7793_CLK_GPIO4>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 	};
 
 	gpio5: gpio@e6055000 {
@@ -167,7 +170,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7793_CLK_GPIO5>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 	};
 
 	gpio6: gpio@e6055400 {
@@ -180,7 +183,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7793_CLK_GPIO6>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 	};
 
 	gpio7: gpio@e6055800 {
@@ -193,7 +196,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7793_CLK_GPIO7>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 	};
 
 	thermal: thermal@e61f0000 {
@@ -203,7 +206,7 @@
 		reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
 		interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp5_clks R8A7793_CLK_THERMAL>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		#thermal-sensor-cells = <0>;
 	};
 
@@ -222,7 +225,7 @@
 			     <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7793_CLK_CMT0>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 
 		renesas,channels-mask = <0x60>;
 
@@ -242,7 +245,7 @@
 			     <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7793_CLK_CMT1>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 
 		renesas,channels-mask = <0xff>;
 
@@ -265,7 +268,7 @@
 			     <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R8A7793_CLK_IRQC>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 	};
 
 	dmac0: dma-controller@e6700000 {
@@ -294,7 +297,7 @@
 				"ch12", "ch13", "ch14";
 		clocks = <&mstp2_clks R8A7793_CLK_SYS_DMAC0>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <15>;
 	};
@@ -325,7 +328,7 @@
 				"ch12", "ch13", "ch14";
 		clocks = <&mstp2_clks R8A7793_CLK_SYS_DMAC1>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <15>;
 	};
@@ -354,7 +357,7 @@
 				"ch12";
 		clocks = <&mstp5_clks R8A7793_CLK_AUDIO_DMAC0>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <13>;
 	};
@@ -383,7 +386,7 @@
 				"ch12";
 		clocks = <&mstp5_clks R8A7793_CLK_AUDIO_DMAC1>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <13>;
 	};
@@ -396,7 +399,7 @@
 		reg = <0 0xe6508000 0 0x40>;
 		interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7793_CLK_I2C0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <6>;
 		status = "disabled";
 	};
@@ -408,7 +411,7 @@
 		reg = <0 0xe6518000 0 0x40>;
 		interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7793_CLK_I2C1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <6>;
 		status = "disabled";
 	};
@@ -420,7 +423,7 @@
 		reg = <0 0xe6530000 0 0x40>;
 		interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7793_CLK_I2C2>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <6>;
 		status = "disabled";
 	};
@@ -432,7 +435,7 @@
 		reg = <0 0xe6540000 0 0x40>;
 		interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7793_CLK_I2C3>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <6>;
 		status = "disabled";
 	};
@@ -444,7 +447,7 @@
 		reg = <0 0xe6520000 0 0x40>;
 		interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7793_CLK_I2C4>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <6>;
 		status = "disabled";
 	};
@@ -457,7 +460,7 @@
 		reg = <0 0xe6528000 0 0x40>;
 		interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7793_CLK_I2C5>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		i2c-scl-internal-delay-ns = <110>;
 		status = "disabled";
 	};
@@ -472,7 +475,7 @@
 		clocks = <&mstp9_clks R8A7793_CLK_IICDVFS>;
 		dmas = <&dmac0 0x77>, <&dmac0 0x78>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -485,7 +488,7 @@
 		clocks = <&mstp3_clks R8A7793_CLK_IIC0>;
 		dmas = <&dmac0 0x61>, <&dmac0 0x62>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -498,7 +501,7 @@
 		clocks = <&mstp3_clks R8A7793_CLK_IIC1>;
 		dmas = <&dmac0 0x65>, <&dmac0 0x66>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -514,7 +517,7 @@
 		clocks = <&mstp3_clks R8A7793_CLK_SDHI0>;
 		dmas = <&dmac0 0xcd>, <&dmac0 0xce>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -525,7 +528,7 @@
 		clocks = <&mstp3_clks R8A7793_CLK_SDHI1>;
 		dmas = <&dmac0 0xc1>, <&dmac0 0xc2>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -536,7 +539,7 @@
 		clocks = <&mstp3_clks R8A7793_CLK_SDHI2>;
 		dmas = <&dmac0 0xd3>, <&dmac0 0xd4>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -549,7 +552,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x21>, <&dmac0 0x22>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -562,7 +565,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x25>, <&dmac0 0x26>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -575,7 +578,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x27>, <&dmac0 0x28>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -588,7 +591,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x1b>, <&dmac0 0x1c>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -601,7 +604,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x1f>, <&dmac0 0x20>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -614,7 +617,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x23>, <&dmac0 0x24>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -627,7 +630,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -640,7 +643,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -653,7 +656,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -667,7 +670,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -681,7 +684,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -695,7 +698,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -709,7 +712,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x2f>, <&dmac0 0x30>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -723,7 +726,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0xfb>, <&dmac0 0xfc>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -737,7 +740,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0xfd>, <&dmac0 0xfe>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -751,7 +754,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -765,7 +768,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -779,7 +782,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x3b>, <&dmac0 0x3c>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -788,7 +791,7 @@
 		reg = <0 0xee700000 0 0x400>;
 		interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7793_CLK_ETHER>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		phy-mode = "rmii";
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -802,7 +805,7 @@
 		clocks = <&mstp9_clks R8A7793_CLK_QSPI_MOD>;
 		dmas = <&dmac0 0x17>, <&dmac0 0x18>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		num-cs = <1>;
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -846,7 +849,7 @@
 		clocks = <&mstp9_clks R8A7793_CLK_RCAN0>,
 			 <&cpg_clocks R8A7793_CLK_RCAN>, <&can_clk>;
 		clock-names = "clkp1", "clkp2", "can_clk";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -857,7 +860,7 @@
 		clocks = <&mstp9_clks R8A7793_CLK_RCAN1>,
 			 <&cpg_clocks R8A7793_CLK_RCAN>, <&can_clk>;
 		clock-names = "clkp1", "clkp2", "can_clk";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -1221,6 +1224,12 @@
 		};
 	};
 
+	sysc: system-controller@e6180000 {
+		compatible = "renesas,r8a7793-sysc";
+		reg = <0 0xe6180000 0 0x0200>;
+		#power-domain-cells = <1>;
+	};
+
 	ipmmu_sy0: mmu@e6280000 {
 		compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa";
 		reg = <0 0xe6280000 0 0x1000>;
@@ -1316,7 +1325,7 @@
 				"src.4", "src.3", "src.2", "src.1", "src.0",
 				"dvc.0", "dvc.1",
 				"clk_a", "clk_b", "clk_c", "clk_i";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 
 		status = "disabled";
 
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index e45b23f..f334a3a 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -12,6 +12,7 @@
 #include <dt-bindings/clock/r8a7794-clock.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/power/r8a7794-sysc.h>
 
 / {
 	compatible = "renesas,r8a7794";
@@ -42,6 +43,7 @@
 			compatible = "arm,cortex-a7";
 			reg = <0>;
 			clock-frequency = <1000000000>;
+			power-domains = <&sysc R8A7794_PD_CA7_CPU0>;
 			next-level-cache = <&L2_CA7>;
 		};
 
@@ -50,12 +52,14 @@
 			compatible = "arm,cortex-a7";
 			reg = <1>;
 			clock-frequency = <1000000000>;
+			power-domains = <&sysc R8A7794_PD_CA7_CPU1>;
 			next-level-cache = <&L2_CA7>;
 		};
 	};
 
 	L2_CA7: cache-controller@1 {
 		compatible = "cache";
+		power-domains = <&sysc R8A7794_PD_CA7_SCU>;
 		cache-unified;
 		cache-level = <2>;
 	};
@@ -82,7 +86,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7794_CLK_GPIO0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 	};
 
 	gpio1: gpio@e6051000 {
@@ -95,7 +99,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7794_CLK_GPIO1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 	};
 
 	gpio2: gpio@e6052000 {
@@ -108,7 +112,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7794_CLK_GPIO2>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 	};
 
 	gpio3: gpio@e6053000 {
@@ -121,7 +125,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7794_CLK_GPIO3>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 	};
 
 	gpio4: gpio@e6054000 {
@@ -134,7 +138,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7794_CLK_GPIO4>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 	};
 
 	gpio5: gpio@e6055000 {
@@ -147,7 +151,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7794_CLK_GPIO5>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 	};
 
 	gpio6: gpio@e6055400 {
@@ -160,7 +164,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7794_CLK_GPIO6>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 	};
 
 	cmt0: timer@ffca0000 {
@@ -170,7 +174,7 @@
 			     <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7794_CLK_CMT0>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 
 		renesas,channels-mask = <0x60>;
 
@@ -190,7 +194,7 @@
 			     <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7794_CLK_CMT1>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 
 		renesas,channels-mask = <0xff>;
 
@@ -221,7 +225,7 @@
 			     <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R8A7794_CLK_IRQC>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 	};
 
 	pfc: pin-controller@e6060000 {
@@ -255,7 +259,7 @@
 				"ch12", "ch13", "ch14";
 		clocks = <&mstp2_clks R8A7794_CLK_SYS_DMAC0>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <15>;
 	};
@@ -286,7 +290,7 @@
 				"ch12", "ch13", "ch14";
 		clocks = <&mstp2_clks R8A7794_CLK_SYS_DMAC1>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <15>;
 	};
@@ -300,7 +304,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x21>, <&dmac0 0x22>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -313,7 +317,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x25>, <&dmac0 0x26>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -326,7 +330,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x27>, <&dmac0 0x28>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -339,7 +343,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x1b>, <&dmac0 0x1c>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -352,7 +356,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x1f>, <&dmac0 0x20>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -365,7 +369,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x23>, <&dmac0 0x24>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -378,7 +382,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -391,7 +395,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -404,7 +408,7 @@
 		clock-names = "fck";
 		dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -418,7 +422,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -432,7 +436,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -446,7 +450,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -460,7 +464,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x2f>, <&dmac0 0x30>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -474,7 +478,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0xfb>, <&dmac0 0xfc>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -488,7 +492,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0xfd>, <&dmac0 0xfe>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -502,7 +506,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -516,7 +520,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -530,7 +534,7 @@
 		clock-names = "fck", "brg_int", "scif_clk";
 		dmas = <&dmac0 0x3b>, <&dmac0 0x3c>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -539,7 +543,7 @@
 		reg = <0 0xee700000 0 0x400>;
 		interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7794_CLK_ETHER>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		phy-mode = "rmii";
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -552,7 +556,7 @@
 		reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
 		interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7794_CLK_ETHERAVB>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -564,7 +568,7 @@
 		reg = <0 0xe6508000 0 0x40>;
 		interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7794_CLK_I2C0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		i2c-scl-internal-delay-ns = <6>;
@@ -576,7 +580,7 @@
 		reg = <0 0xe6518000 0 0x40>;
 		interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7794_CLK_I2C1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		i2c-scl-internal-delay-ns = <6>;
@@ -588,7 +592,7 @@
 		reg = <0 0xe6530000 0 0x40>;
 		interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7794_CLK_I2C2>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		i2c-scl-internal-delay-ns = <6>;
@@ -600,7 +604,7 @@
 		reg = <0 0xe6540000 0 0x40>;
 		interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7794_CLK_I2C3>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		i2c-scl-internal-delay-ns = <6>;
@@ -612,7 +616,7 @@
 		reg = <0 0xe6520000 0 0x40>;
 		interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7794_CLK_I2C4>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		i2c-scl-internal-delay-ns = <6>;
@@ -624,7 +628,7 @@
 		reg = <0 0xe6528000 0 0x40>;
 		interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7794_CLK_I2C5>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		i2c-scl-internal-delay-ns = <6>;
@@ -638,7 +642,7 @@
 		clocks = <&mstp3_clks R8A7794_CLK_IIC0>;
 		dmas = <&dmac0 0x61>, <&dmac0 0x62>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -651,7 +655,7 @@
 		clocks = <&mstp3_clks R8A7794_CLK_IIC1>;
 		dmas = <&dmac0 0x65>, <&dmac0 0x66>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -664,7 +668,7 @@
 		clocks = <&mstp3_clks R8A7794_CLK_MMCIF0>;
 		dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		reg-io-width = <4>;
 		status = "disabled";
 	};
@@ -674,7 +678,7 @@
 		reg = <0 0xee100000 0 0x200>;
 		interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7794_CLK_SDHI0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -683,7 +687,7 @@
 		reg = <0 0xee140000 0 0x100>;
 		interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7794_CLK_SDHI1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -692,7 +696,7 @@
 		reg = <0 0xee160000 0 0x100>;
 		interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7794_CLK_SDHI2>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -703,7 +707,7 @@
 		clocks = <&mstp9_clks R8A7794_CLK_QSPI_MOD>;
 		dmas = <&dmac0 0x17>, <&dmac0 0x18>;
 		dma-names = "tx", "rx";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		num-cs = <1>;
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -715,7 +719,7 @@
 		reg = <0 0xe6ef0000 0 0x1000>;
 		interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7794_CLK_VIN0>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -724,7 +728,7 @@
 		reg = <0 0xe6ef1000 0 0x1000>;
 		interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7794_CLK_VIN1>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -735,7 +739,7 @@
 		      <0 0xee080000 0 0x1100>;
 		interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp7_clks R8A7794_CLK_EHCI>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 
 		bus-range = <0 0>;
@@ -770,7 +774,7 @@
 		      <0 0xee0c0000 0 0x1100>;
 		interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp7_clks R8A7794_CLK_EHCI>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 
 		bus-range = <1 1>;
@@ -803,7 +807,7 @@
 		reg = <0 0xe6590000 0 0x100>;
 		interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp7_clks R8A7794_CLK_HSUSB>;
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		renesas,buswait = <4>;
 		phys = <&usb0 1>;
 		phy-names = "usb";
@@ -817,7 +821,7 @@
 		#size-cells = <0>;
 		clocks = <&mstp7_clks R8A7794_CLK_HSUSB>;
 		clock-names = "usbhs";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 
 		usb0: usb-channel@0 {
@@ -865,7 +869,7 @@
 		clocks = <&mstp9_clks R8A7794_CLK_RCAN0>,
 			 <&cpg_clocks R8A7794_CLK_RCAN>, <&can_clk>;
 		clock-names = "clkp1", "clkp2", "can_clk";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -876,7 +880,7 @@
 		clocks = <&mstp9_clks R8A7794_CLK_RCAN1>,
 			 <&cpg_clocks R8A7794_CLK_RCAN>, <&can_clk>;
 		clock-names = "clkp1", "clkp2", "can_clk";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
 
@@ -1213,6 +1217,12 @@
 		};
 	};
 
+	sysc: system-controller@e6180000 {
+		compatible = "renesas,r8a7794-sysc";
+		reg = <0 0xe6180000 0 0x0200>;
+		#power-domain-cells = <1>;
+	};
+
 	ipmmu_sy0: mmu@e6280000 {
 		compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa";
 		reg = <0 0xe6280000 0 0x1000>;
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index a99f07a..941f362 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -38,11 +38,17 @@
 		vddio-pex-ctl-supply = <&vdd_3v3_lp0>;
 		avdd-pll-erefe-supply = <&avdd_1v05_run>;
 
+		/* Mini PCIe */
 		pci@1,0 {
+			phys = <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-4}>;
+			phy-names = "pcie-0";
 			status = "okay";
 		};
 
+		/* Gigabit Ethernet */
 		pci@2,0 {
+			phys = <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-2}>;
+			phy-names = "pcie-0";
 			status = "okay";
 		};
 	};
@@ -1677,6 +1683,9 @@
 	sata@0,70020000 {
 		status = "okay";
 
+		phys = <&{/padctl@0,7009f000/pads/sata/lanes/sata-0}>;
+		phy-names = "sata-0";
+
 		hvdd-supply = <&vdd_3v3_lp0>;
 		vddio-supply = <&vdd_1v05_run>;
 		avdd-supply = <&vdd_1v05_run>;
@@ -1689,28 +1698,107 @@
 		status = "okay";
 	};
 
-	padctl@0,7009f000 {
-		pinctrl-0 = <&padctl_default>;
-		pinctrl-names = "default";
+	usb@0,70090000 {
+		phys = <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-0}>, /* Micro A/B */
+		       <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-1}>, /* Mini PCIe */
+		       <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-2}>, /* USB3 */
+		       <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-0}>; /* USB3 */
+		phy-names = "usb2-0", "usb2-1", "usb2-2", "usb3-0";
 
-		padctl_default: pinmux {
-			usb3 {
-				nvidia,lanes = "pcie-0", "pcie-1";
-				nvidia,function = "usb3";
-				nvidia,iddq = <0>;
+		avddio-pex-supply = <&vdd_1v05_run>;
+		dvddio-pex-supply = <&vdd_1v05_run>;
+		avdd-usb-supply = <&vdd_3v3_lp0>;
+		avdd-pll-utmip-supply = <&vddio_1v8>;
+		avdd-pll-erefe-supply = <&avdd_1v05_run>;
+		avdd-usb-ss-pll-supply = <&vdd_1v05_run>;
+		hvdd-usb-ss-supply = <&vdd_3v3_lp0>;
+		hvdd-usb-ss-pll-e-supply = <&vdd_3v3_lp0>;
+
+		status = "okay";
+	};
+
+	padctl@0,7009f000 {
+		status = "okay";
+
+		pads {
+			usb2 {
+				status = "okay";
+
+				lanes {
+					usb2-0 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+
+					usb2-1 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+
+					usb2-2 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+				};
 			};
 
 			pcie {
-				nvidia,lanes = "pcie-2", "pcie-3",
-					       "pcie-4";
-				nvidia,function = "pcie";
-				nvidia,iddq = <0>;
+				status = "okay";
+
+				lanes {
+					pcie-0 {
+						nvidia,function = "usb3-ss";
+						status = "okay";
+					};
+
+					pcie-2 {
+						nvidia,function = "pcie";
+						status = "okay";
+					};
+
+					pcie-4 {
+						nvidia,function = "pcie";
+						status = "okay";
+					};
+				};
 			};
 
 			sata {
-				nvidia,lanes = "sata-0";
-				nvidia,function = "sata";
-				nvidia,iddq = <0>;
+				status = "okay";
+
+				lanes {
+					sata-0 {
+						nvidia,function = "sata";
+						status = "okay";
+					};
+				};
+			};
+		};
+
+		ports {
+			/* Micro A/B */
+			usb2-0 {
+				status = "okay";
+				mode = "otg";
+			};
+
+			/* Mini PCIe */
+			usb2-1 {
+				status = "okay";
+				mode = "host";
+			};
+
+			/* USB3 */
+			usb2-2 {
+				status = "okay";
+				mode = "host";
+
+				vbus-supply = <&vdd_usb3_vbus>;
+			};
+
+			usb3-0 {
+				nvidia,usb2-companion = <2>;
+				status = "okay";
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi
index 5f1fc14..0710a60 100644
--- a/arch/arm/boot/dts/tegra124-nyan.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan.dtsi
@@ -224,7 +224,7 @@
 					regulator-always-on;
 				};
 
-				ldo0 {
+				avdd_1v05_run: ldo0 {
 					regulator-name = "+1.05V_RUN_AVDD";
 					regulator-min-microvolt = <1050000>;
 					regulator-max-microvolt = <1050000>;
@@ -368,6 +368,99 @@
 		status = "okay";
 	};
 
+	usb@0,70090000 {
+		phys = <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-0}>, /* 1st USB A */
+		       <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-1}>, /* Internal USB */
+		       <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-2}>, /* 2nd USB A */
+		       <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-0}>, /* 1st USB A */
+		       <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-1}>; /* 2nd USB A */
+		phy-names = "usb2-0", "usb2-1", "usb2-2", "usb3-0", "usb3-1";
+
+		avddio-pex-supply = <&vdd_1v05_run>;
+		dvddio-pex-supply = <&vdd_1v05_run>;
+		avdd-usb-supply = <&vdd_3v3_lp0>;
+		avdd-pll-utmip-supply = <&vddio_1v8>;
+		avdd-pll-erefe-supply = <&avdd_1v05_run>;
+		avdd-usb-ss-pll-supply = <&vdd_1v05_run>;
+		hvdd-usb-ss-supply = <&vdd_3v3_lp0>;
+		hvdd-usb-ss-pll-e-supply = <&vdd_3v3_lp0>;
+
+		status = "okay";
+	};
+
+	padctl@0,7009f000 {
+		status = "okay";
+
+		pads {
+			usb2 {
+				status = "okay";
+
+				lanes {
+					usb2-0 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+
+					usb2-1 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+
+					usb2-2 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+				};
+			};
+
+			pcie {
+				status = "okay";
+
+				lanes {
+					pcie-0 {
+						nvidia,function = "usb3-ss";
+						status = "okay";
+					};
+
+					pcie-1 {
+						nvidia,function = "usb3-ss";
+						status = "okay";
+					};
+				};
+			};
+		};
+
+		ports {
+			usb2-0 {
+				vbus-supply = <&vdd_usb1_vbus>;
+				status = "okay";
+				mode = "otg";
+			};
+
+			usb2-1 {
+				vbus-supply = <&vdd_run_cam>;
+				status = "okay";
+				mode = "host";
+			};
+
+			usb2-2 {
+				vbus-supply = <&vdd_usb3_vbus>;
+				status = "okay";
+				mode = "host";
+			};
+
+			usb3-0 {
+				nvidia,usb2-companion = <0>;
+				status = "okay";
+			};
+
+			usb3-1 {
+				nvidia,usb2-companion = <1>;
+				status = "okay";
+			};
+		};
+	};
+
 	sdhci0_pwrseq: sdhci0_pwrseq {
 		compatible = "mmc-pwrseq-simple";
 
@@ -414,33 +507,6 @@
 		};
 	};
 
-	usb@0,7d000000 { /* Rear external USB port. */
-		status = "okay";
-	};
-
-	usb-phy@0,7d000000 {
-		status = "okay";
-		vbus-supply = <&vdd_usb1_vbus>;
-	};
-
-	usb@0,7d004000 { /* Internal webcam. */
-		status = "okay";
-	};
-
-	usb-phy@0,7d004000 {
-		status = "okay";
-		vbus-supply = <&vdd_run_cam>;
-	};
-
-	usb@0,7d008000 { /* Left external USB port. */
-		status = "okay";
-	};
-
-	usb-phy@0,7d008000 {
-		status = "okay";
-		vbus-supply = <&vdd_usb3_vbus>;
-	};
-
 	backlight: backlight {
 		compatible = "pwm-backlight";
 
diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts
index 0318258..973446d 100644
--- a/arch/arm/boot/dts/tegra124-venice2.dts
+++ b/arch/arm/boot/dts/tegra124-venice2.dts
@@ -757,7 +757,7 @@
 					regulator-always-on;
 				};
 
-				ldo0 {
+				avdd_1v05_run: ldo0 {
 					regulator-name = "+1.05V_RUN_AVDD";
 					regulator-min-microvolt = <1050000>;
 					regulator-max-microvolt = <1050000>;
@@ -899,6 +899,105 @@
 		status = "okay";
 	};
 
+	usb@0,70090000 {
+		phys = <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-0}>, /* 1st USB A */
+		       <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-1}>, /* Internal USB */
+		       <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-2}>, /* 2nd USB A */
+		       <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-0}>, /* 1st USB A */
+		       <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-1}>; /* 2nd USB A */
+		phy-names = "usb2-0", "usb2-1", "usb2-2", "usb3-0", "usb3-1";
+
+		avddio-pex-supply = <&vdd_1v05_run>;
+		dvddio-pex-supply = <&vdd_1v05_run>;
+		avdd-usb-supply = <&vdd_3v3_lp0>;
+		avdd-pll-utmip-supply = <&vddio_1v8>;
+		avdd-pll-erefe-supply = <&avdd_1v05_run>;
+		avdd-usb-ss-pll-supply = <&vdd_1v05_run>;
+		hvdd-usb-ss-supply = <&vdd_3v3_lp0>;
+		hvdd-usb-ss-pll-e-supply = <&vdd_3v3_lp0>;
+
+		status = "okay";
+	};
+
+	padctl@0,7009f000 {
+		pads {
+			usb2 {
+				status = "okay";
+
+				lanes {
+					usb2-0 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+
+					usb2-1 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+
+					usb2-2 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+				};
+			};
+
+			pcie {
+				status = "okay";
+
+				lanes {
+					pcie-0 {
+						nvidia,function = "usb3-ss";
+						status = "okay";
+					};
+
+					pcie-1 {
+						nvidia,function = "usb3-ss";
+						status = "okay";
+					};
+
+					pcie-1 {
+						nvidia,function = "usb3-ss";
+						status = "okay";
+					};
+				};
+			};
+		};
+
+		ports {
+			usb2-0 {
+				status = "okay";
+				mode = "otg";
+
+				vbus-supply = <&vdd_usb1_vbus>;
+			};
+
+			usb2-1 {
+				status = "okay";
+				mode = "host";
+
+				vbus-supply = <&vdd_run_cam>;
+			};
+
+			usb2-2 {
+				status = "okay";
+				mode = "host";
+
+				vbus-supply = <&vdd_usb3_vbus>;
+			};
+
+			usb3-0 {
+				nvidia,usb2-companion = <0>;
+				status = "okay";
+			};
+
+			usb3-1 {
+				nvidia,usb2-companion = <2>;
+				status = "okay";
+			};
+		};
+	};
+
 	sdhci@0,700b0400 {
 		cd-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_HIGH>;
 		power-gpios = <&gpio TEGRA_GPIO(R, 0) GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index e4eac1f..ea48118 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -2,7 +2,6 @@
 #include <dt-bindings/gpio/tegra-gpio.h>
 #include <dt-bindings/memory/tegra124-mc.h>
 #include <dt-bindings/pinctrl/pinctrl-tegra.h>
-#include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/reset/tegra124-car.h>
 #include <dt-bindings/thermal/tegra124-soctherm.h>
@@ -51,9 +50,6 @@
 		reset-names = "pex", "afi", "pcie_x";
 		status = "disabled";
 
-		phys = <&padctl TEGRA_XUSB_PADCTL_PCIE>;
-		phy-names = "pcie";
-
 		pci@1,0 {
 			device_type = "pci";
 			assigned-addresses = <0x82000800 0 0x01000000 0 0x1000>;
@@ -622,8 +618,6 @@
 			 <&tegra_car 123>,
 			 <&tegra_car 129>;
 		reset-names = "sata", "sata-oob", "sata-cold";
-		phys = <&padctl TEGRA_XUSB_PADCTL_SATA>;
-		phy-names = "sata-phy";
 		status = "disabled";
 	};
 
@@ -642,13 +636,172 @@
 		status = "disabled";
 	};
 
+	usb@0,70090000 {
+		compatible = "nvidia,tegra124-xusb";
+		reg = <0x0 0x70090000 0x0 0x8000>,
+		      <0x0 0x70098000 0x0 0x1000>,
+		      <0x0 0x70099000 0x0 0x1000>;
+		reg-names = "hcd", "fpci", "ipfs";
+
+		interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+
+		clocks = <&tegra_car TEGRA124_CLK_XUSB_HOST>,
+			 <&tegra_car TEGRA124_CLK_XUSB_HOST_SRC>,
+			 <&tegra_car TEGRA124_CLK_XUSB_FALCON_SRC>,
+			 <&tegra_car TEGRA124_CLK_XUSB_SS>,
+			 <&tegra_car TEGRA124_CLK_XUSB_SS_DIV2>,
+			 <&tegra_car TEGRA124_CLK_XUSB_SS_SRC>,
+			 <&tegra_car TEGRA124_CLK_XUSB_HS_SRC>,
+			 <&tegra_car TEGRA124_CLK_XUSB_FS_SRC>,
+			 <&tegra_car TEGRA124_CLK_PLL_U_480M>,
+			 <&tegra_car TEGRA124_CLK_CLK_M>,
+			 <&tegra_car TEGRA124_CLK_PLL_E>;
+		clock-names = "xusb_host", "xusb_host_src",
+			      "xusb_falcon_src", "xusb_ss",
+			      "xusb_ss_div2", "xusb_ss_src",
+			      "xusb_hs_src", "xusb_fs_src",
+			      "pll_u_480m", "clk_m", "pll_e";
+		resets = <&tegra_car 89>, <&tegra_car 156>,
+			 <&tegra_car 143>;
+		reset-names = "xusb_host", "xusb_ss", "xusb_src";
+
+		nvidia,xusb-padctl = <&padctl>;
+
+		status = "disabled";
+	};
+
 	padctl: padctl@0,7009f000 {
 		compatible = "nvidia,tegra124-xusb-padctl";
 		reg = <0x0 0x7009f000 0x0 0x1000>;
 		resets = <&tegra_car 142>;
 		reset-names = "padctl";
 
-		#phy-cells = <1>;
+		pads {
+			usb2 {
+				status = "disabled";
+
+				lanes {
+					usb2-0 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					usb2-1 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					usb2-2 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+				};
+			};
+
+			ulpi {
+				status = "disabled";
+
+				lanes {
+					ulpi-0 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+				};
+			};
+
+			hsic {
+				status = "disabled";
+
+				lanes {
+					hsic-0 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					hsic-1 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+				};
+			};
+
+			pcie {
+				status = "disabled";
+
+				lanes {
+					pcie-0 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					pcie-1 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					pcie-2 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					pcie-3 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					pcie-4 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+				};
+			};
+
+			sata {
+				status = "disabled";
+
+				lanes {
+					sata-0 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+				};
+			};
+		};
+
+		ports {
+			usb2-0 {
+				status = "disabled";
+			};
+
+			usb2-1 {
+				status = "disabled";
+			};
+
+			usb2-2 {
+				status = "disabled";
+			};
+
+			ulpi-0 {
+				status = "disabled";
+			};
+
+			hsic-0 {
+				status = "disabled";
+			};
+
+			hsic-1 {
+				status = "disabled";
+			};
+
+			usb3-0 {
+				status = "disabled";
+			};
+
+			usb3-1 {
+				status = "disabled";
+			};
+		};
 	};
 
 	sdhci@0,700b0000 {
diff --git a/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi b/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi
index 4d8b7f6..a8a8e43 100644
--- a/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi
+++ b/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi
@@ -50,6 +50,11 @@
 		clock-frequency = <16000000>;
 	};
 
+	panel: panel {
+		compatible = "edt,et057090dhu";
+		backlight = <&bl>;
+	};
+
 	reg_3v3: regulator-3v3 {
 		compatible = "regulator-fixed";
 		regulator-name = "3.3V";
@@ -83,6 +88,13 @@
 	status  = "okay";
 };
 
+&dcu0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_dcu0_1>;
+	fsl,panel = <&panel>;
+	status = "okay";
+};
+
 &dspi1 {
 	status = "okay";
 
@@ -134,6 +146,10 @@
 	vin-supply = <&reg_3v3>;
 };
 
+&tcon0 {
+	status = "okay";
+};
+
 &uart0 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/vf-colibri.dtsi b/arch/arm/boot/dts/vf-colibri.dtsi
index 226a86f..b741709 100644
--- a/arch/arm/boot/dts/vf-colibri.dtsi
+++ b/arch/arm/boot/dts/vf-colibri.dtsi
@@ -222,6 +222,39 @@
 			>;
 		};
 
+		pinctrl_dcu0_1: dcu0grp_1 {
+			fsl,pins = <
+				VF610_PAD_PTE0__DCU0_HSYNC	0x1902
+				VF610_PAD_PTE1__DCU0_VSYNC	0x1902
+				VF610_PAD_PTE2__DCU0_PCLK	0x1902
+				VF610_PAD_PTE4__DCU0_DE		0x1902
+				VF610_PAD_PTE5__DCU0_R0		0x1902
+				VF610_PAD_PTE6__DCU0_R1		0x1902
+				VF610_PAD_PTE7__DCU0_R2		0x1902
+				VF610_PAD_PTE8__DCU0_R3		0x1902
+				VF610_PAD_PTE9__DCU0_R4		0x1902
+				VF610_PAD_PTE10__DCU0_R5	0x1902
+				VF610_PAD_PTE11__DCU0_R6	0x1902
+				VF610_PAD_PTE12__DCU0_R7	0x1902
+				VF610_PAD_PTE13__DCU0_G0	0x1902
+				VF610_PAD_PTE14__DCU0_G1	0x1902
+				VF610_PAD_PTE15__DCU0_G2	0x1902
+				VF610_PAD_PTE16__DCU0_G3	0x1902
+				VF610_PAD_PTE17__DCU0_G4	0x1902
+				VF610_PAD_PTE18__DCU0_G5	0x1902
+				VF610_PAD_PTE19__DCU0_G6	0x1902
+				VF610_PAD_PTE20__DCU0_G7	0x1902
+				VF610_PAD_PTE21__DCU0_B0	0x1902
+				VF610_PAD_PTE22__DCU0_B1	0x1902
+				VF610_PAD_PTE23__DCU0_B2	0x1902
+				VF610_PAD_PTE24__DCU0_B3	0x1902
+				VF610_PAD_PTE25__DCU0_B4	0x1902
+				VF610_PAD_PTE26__DCU0_B5	0x1902
+				VF610_PAD_PTE27__DCU0_B6	0x1902
+				VF610_PAD_PTE28__DCU0_B7	0x1902
+			>;
+		};
+
 		pinctrl_dspi1: dspi1grp {
 			fsl,pins = <
 				VF610_PAD_PTD5__DSPI1_CS0		0x33e2
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index 04ef54d..2c13ec6 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -311,6 +311,14 @@
 							<20000000>;
 			};
 
+			tcon0: timing-controller@4003d000 {
+				compatible = "fsl,vf610-tcon";
+				reg = <0x4003d000 0x1000>;
+				clocks = <&clks VF610_CLK_TCON0>;
+				clock-names = "ipg";
+				status = "disabled";
+			};
+
 			wdoga5: wdog@4003e000 {
 				compatible = "fsl,vf610-wdt", "fsl,imx21-wdt";
 				reg = <0x4003e000 0x1000>;
@@ -416,6 +424,17 @@
 				status = "disabled";
 			};
 
+			dcu0: dcu@40058000 {
+				compatible = "fsl,vf610-dcu";
+				reg = <0x40058000 0x1200>;
+				interrupts = <30 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks VF610_CLK_DCU0>,
+					<&clks VF610_CLK_DCU0_DIV>;
+				clock-names = "dcu", "pix";
+				fsl,tcon = <&tcon0>;
+				status = "disabled";
+			};
+
 			i2c0: i2c@40066000 {
 				#address-cells = <1>;
 				#size-cells = <0>;
diff --git a/arch/arm/configs/bcm_defconfig b/arch/arm/configs/bcm_defconfig
index 7117662..909049a2 100644
--- a/arch/arm/configs/bcm_defconfig
+++ b/arch/arm/configs/bcm_defconfig
@@ -12,7 +12,6 @@
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_BLK_CGROUP=y
 CONFIG_NAMESPACES=y
diff --git a/arch/arm/configs/zx_defconfig b/arch/arm/configs/zx_defconfig
index ab683fb..d6253a4 100644
--- a/arch/arm/configs/zx_defconfig
+++ b/arch/arm/configs/zx_defconfig
@@ -7,7 +7,6 @@
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_NAMESPACES=y
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 0df6b1f..96387d4 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -41,6 +41,8 @@
 
 #define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
 
+#define KVM_REQ_VCPU_EXIT	8
+
 u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
 int __attribute_const__ kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -226,6 +228,10 @@
 
 struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
 struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
+void kvm_arm_halt_guest(struct kvm *kvm);
+void kvm_arm_resume_guest(struct kvm *kvm);
+void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
+void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
 
 int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
 unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
index d8e90c8..f3a7de7 100644
--- a/arch/arm/include/asm/kvm_mmio.h
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -28,6 +28,9 @@
 	bool sign_extend;
 };
 
+void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
+unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
+
 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
 		 phys_addr_t fault_ipa);
diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
index 27563be..22bf1f6 100644
--- a/arch/arm/kernel/perf_callchain.c
+++ b/arch/arm/kernel/perf_callchain.c
@@ -31,7 +31,7 @@
  */
 static struct frame_tail __user *
 user_backtrace(struct frame_tail __user *tail,
-	       struct perf_callchain_entry *entry)
+	       struct perf_callchain_entry_ctx *entry)
 {
 	struct frame_tail buftail;
 	unsigned long err;
@@ -59,7 +59,7 @@
 }
 
 void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
 	struct frame_tail __user *tail;
 
@@ -75,7 +75,7 @@
 
 	tail = (struct frame_tail __user *)regs->ARM_fp - 1;
 
-	while ((entry->nr < sysctl_perf_event_max_stack) &&
+	while ((entry->nr < entry->max_stack) &&
 	       tail && !((unsigned long)tail & 0x3))
 		tail = user_backtrace(tail, entry);
 }
@@ -89,13 +89,13 @@
 callchain_trace(struct stackframe *fr,
 		void *data)
 {
-	struct perf_callchain_entry *entry = data;
+	struct perf_callchain_entry_ctx *entry = data;
 	perf_callchain_store(entry, fr->pc);
 	return 0;
 }
 
 void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
 	struct stackframe fr;
 
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index a647d66..4a803c5 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -420,7 +420,8 @@
 	npages = 1; /* for sigpage */
 	npages += vdso_total_pages;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
 	hint = sigpage_addr(mm, npages);
 	addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
 	if (IS_ERR_VALUE(addr)) {
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index ef9119f..4d93758 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -733,8 +733,8 @@
 	if (ret)
 		return ret;
 
-	vfp_flush_hwstate(thread);
 	thread->vfpstate.hard = new_vfp;
+	vfp_flush_hwstate(thread);
 
 	return 0;
 }
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 95a0005..02abfff 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -46,6 +46,13 @@
 	---help---
 	  Provides host support for ARM processors.
 
+config KVM_NEW_VGIC
+	bool "New VGIC implementation"
+	depends on KVM
+	default y
+	---help---
+	  uses the new VGIC implementation
+
 source drivers/vhost/Kconfig
 
 endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index eb1bf43..a596b58 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -21,7 +21,18 @@
 obj-y += kvm-arm.o init.o interrupts.o
 obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
 obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
+
+ifeq ($(CONFIG_KVM_NEW_VGIC),y)
+obj-y += $(KVM)/arm/vgic/vgic.o
+obj-y += $(KVM)/arm/vgic/vgic-init.o
+obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
+obj-y += $(KVM)/arm/vgic/vgic-v2.o
+obj-y += $(KVM)/arm/vgic/vgic-mmio.o
+obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
+obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
+else
 obj-y += $(KVM)/arm/vgic.o
 obj-y += $(KVM)/arm/vgic-v2.o
 obj-y += $(KVM)/arm/vgic-v2-emul.o
+endif
 obj-y += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 237d5d8..893941e 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -455,7 +455,7 @@
 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
 {
 	struct kvm *kvm = vcpu->kvm;
-	int ret;
+	int ret = 0;
 
 	if (likely(vcpu->arch.has_run_once))
 		return 0;
@@ -478,9 +478,9 @@
 	 * interrupts from the virtual timer with a userspace gic.
 	 */
 	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
-		kvm_timer_enable(kvm);
+		ret = kvm_timer_enable(vcpu);
 
-	return 0;
+	return ret;
 }
 
 bool kvm_arch_intc_initialized(struct kvm *kvm)
@@ -488,30 +488,37 @@
 	return vgic_initialized(kvm);
 }
 
-static void kvm_arm_halt_guest(struct kvm *kvm) __maybe_unused;
-static void kvm_arm_resume_guest(struct kvm *kvm) __maybe_unused;
-
-static void kvm_arm_halt_guest(struct kvm *kvm)
+void kvm_arm_halt_guest(struct kvm *kvm)
 {
 	int i;
 	struct kvm_vcpu *vcpu;
 
 	kvm_for_each_vcpu(i, vcpu, kvm)
 		vcpu->arch.pause = true;
-	force_vm_exit(cpu_all_mask);
+	kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT);
 }
 
-static void kvm_arm_resume_guest(struct kvm *kvm)
+void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.pause = true;
+	kvm_vcpu_kick(vcpu);
+}
+
+void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
+{
+	struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
+
+	vcpu->arch.pause = false;
+	swake_up(wq);
+}
+
+void kvm_arm_resume_guest(struct kvm *kvm)
 {
 	int i;
 	struct kvm_vcpu *vcpu;
 
-	kvm_for_each_vcpu(i, vcpu, kvm) {
-		struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
-
-		vcpu->arch.pause = false;
-		swake_up(wq);
-	}
+	kvm_for_each_vcpu(i, vcpu, kvm)
+		kvm_arm_resume_vcpu(vcpu);
 }
 
 static void vcpu_sleep(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 0f6600f..10f80a6 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -23,7 +23,7 @@
 
 #include "trace.h"
 
-static void mmio_write_buf(char *buf, unsigned int len, unsigned long data)
+void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
 {
 	void *datap = NULL;
 	union {
@@ -55,7 +55,7 @@
 	memcpy(buf, datap, len);
 }
 
-static unsigned long mmio_read_buf(char *buf, unsigned int len)
+unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
 {
 	unsigned long data = 0;
 	union {
@@ -66,7 +66,7 @@
 
 	switch (len) {
 	case 1:
-		data = buf[0];
+		data = *(u8 *)buf;
 		break;
 	case 2:
 		memcpy(&tmp.hword, buf, len);
@@ -87,11 +87,10 @@
 
 /**
  * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
+ *			     or in-kernel IO emulation
+ *
  * @vcpu: The VCPU pointer
  * @run:  The VCPU run struct containing the mmio data
- *
- * This should only be called after returning from userspace for MMIO load
- * emulation.
  */
 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
@@ -104,7 +103,7 @@
 		if (len > sizeof(unsigned long))
 			return -EINVAL;
 
-		data = mmio_read_buf(run->mmio.data, len);
+		data = kvm_mmio_read_buf(run->mmio.data, len);
 
 		if (vcpu->arch.mmio_decode.sign_extend &&
 		    len < sizeof(unsigned long)) {
@@ -190,7 +189,7 @@
 					       len);
 
 		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
-		mmio_write_buf(data_buf, len, data);
+		kvm_mmio_write_buf(data_buf, len, data);
 
 		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
 				       data_buf);
@@ -206,18 +205,19 @@
 	run->mmio.is_write	= is_write;
 	run->mmio.phys_addr	= fault_ipa;
 	run->mmio.len		= len;
-	if (is_write)
-		memcpy(run->mmio.data, data_buf, len);
 
 	if (!ret) {
 		/* We handled the access successfully in the kernel. */
+		if (!is_write)
+			memcpy(run->mmio.data, data_buf, len);
 		vcpu->stat.mmio_exit_kernel++;
 		kvm_handle_mmio_return(vcpu, run);
 		return 1;
-	} else {
-		vcpu->stat.mmio_exit_user++;
 	}
 
+	if (is_write)
+		memcpy(run->mmio.data, data_buf, len);
+	vcpu->stat.mmio_exit_user++;
 	run->exit_reason	= KVM_EXIT_MMIO;
 	return 0;
 }
diff --git a/arch/arm/mach-lpc32xx/Makefile b/arch/arm/mach-lpc32xx/Makefile
index c70709a..79b6b07 100644
--- a/arch/arm/mach-lpc32xx/Makefile
+++ b/arch/arm/mach-lpc32xx/Makefile
@@ -2,6 +2,6 @@
 # Makefile for the linux kernel.
 #
 
-obj-y	:= irq.o common.o serial.o
+obj-y	:= common.o serial.o
 obj-y	+= pm.o suspend.o
 obj-y	+= phy3250.o
diff --git a/arch/arm/mach-lpc32xx/include/mach/irqs.h b/arch/arm/mach-lpc32xx/include/mach/irqs.h
index 9e3b90d..0019053 100644
--- a/arch/arm/mach-lpc32xx/include/mach/irqs.h
+++ b/arch/arm/mach-lpc32xx/include/mach/irqs.h
@@ -112,6 +112,6 @@
 #define IRQ_LPC32XX_GPI_06		LPC32XX_SIC2_IRQ(28)
 #define IRQ_LPC32XX_SYSCLK		LPC32XX_SIC2_IRQ(31)
 
-#define NR_IRQS				96
+#define LPC32XX_NR_IRQS			96
 
 #endif
diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c
deleted file mode 100644
index 2ae431e..0000000
--- a/arch/arm/mach-lpc32xx/irq.c
+++ /dev/null
@@ -1,477 +0,0 @@
-/*
- * arch/arm/mach-lpc32xx/irq.c
- *
- * Author: Kevin Wells <kevin.wells@nxp.com>
- *
- * Copyright (C) 2010 NXP Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/irqdomain.h>
-#include <linux/module.h>
-
-#include <mach/irqs.h>
-#include <mach/hardware.h>
-#include <mach/platform.h>
-#include "common.h"
-
-/*
- * Default value representing the Activation polarity of all internal
- * interrupt sources
- */
-#define MIC_APR_DEFAULT		0x3FF0EFE0
-#define SIC1_APR_DEFAULT	0xFBD27186
-#define SIC2_APR_DEFAULT	0x801810C0
-
-/*
- * Default value representing the Activation Type of all internal
- * interrupt sources. All are level sensitive.
- */
-#define MIC_ATR_DEFAULT		0x00000000
-#define SIC1_ATR_DEFAULT	0x00026000
-#define SIC2_ATR_DEFAULT	0x00000000
-
-static struct irq_domain *lpc32xx_mic_domain;
-static struct device_node *lpc32xx_mic_np;
-
-struct lpc32xx_event_group_regs {
-	void __iomem *enab_reg;
-	void __iomem *edge_reg;
-	void __iomem *maskstat_reg;
-	void __iomem *rawstat_reg;
-};
-
-static const struct lpc32xx_event_group_regs lpc32xx_event_int_regs = {
-	.enab_reg = LPC32XX_CLKPWR_INT_ER,
-	.edge_reg = LPC32XX_CLKPWR_INT_AP,
-	.maskstat_reg = LPC32XX_CLKPWR_INT_SR,
-	.rawstat_reg = LPC32XX_CLKPWR_INT_RS,
-};
-
-static const struct lpc32xx_event_group_regs lpc32xx_event_pin_regs = {
-	.enab_reg = LPC32XX_CLKPWR_PIN_ER,
-	.edge_reg = LPC32XX_CLKPWR_PIN_AP,
-	.maskstat_reg = LPC32XX_CLKPWR_PIN_SR,
-	.rawstat_reg = LPC32XX_CLKPWR_PIN_RS,
-};
-
-struct lpc32xx_event_info {
-	const struct lpc32xx_event_group_regs *event_group;
-	u32 mask;
-};
-
-/*
- * Maps an IRQ number to and event mask and register
- */
-static const struct lpc32xx_event_info lpc32xx_events[NR_IRQS] = {
-	[IRQ_LPC32XX_GPI_08] = {
-		.event_group = &lpc32xx_event_pin_regs,
-		.mask = LPC32XX_CLKPWR_EXTSRC_GPI_08_BIT,
-	},
-	[IRQ_LPC32XX_GPI_09] = {
-		.event_group = &lpc32xx_event_pin_regs,
-		.mask = LPC32XX_CLKPWR_EXTSRC_GPI_09_BIT,
-	},
-	[IRQ_LPC32XX_GPI_19] = {
-		.event_group = &lpc32xx_event_pin_regs,
-		.mask = LPC32XX_CLKPWR_EXTSRC_GPI_19_BIT,
-	},
-	[IRQ_LPC32XX_GPI_07] = {
-		.event_group = &lpc32xx_event_pin_regs,
-		.mask = LPC32XX_CLKPWR_EXTSRC_GPI_07_BIT,
-	},
-	[IRQ_LPC32XX_GPI_00] = {
-		.event_group = &lpc32xx_event_pin_regs,
-		.mask = LPC32XX_CLKPWR_EXTSRC_GPI_00_BIT,
-	},
-	[IRQ_LPC32XX_GPI_01] = {
-		.event_group = &lpc32xx_event_pin_regs,
-		.mask = LPC32XX_CLKPWR_EXTSRC_GPI_01_BIT,
-	},
-	[IRQ_LPC32XX_GPI_02] = {
-		.event_group = &lpc32xx_event_pin_regs,
-		.mask = LPC32XX_CLKPWR_EXTSRC_GPI_02_BIT,
-	},
-	[IRQ_LPC32XX_GPI_03] = {
-		.event_group = &lpc32xx_event_pin_regs,
-		.mask = LPC32XX_CLKPWR_EXTSRC_GPI_03_BIT,
-	},
-	[IRQ_LPC32XX_GPI_04] = {
-		.event_group = &lpc32xx_event_pin_regs,
-		.mask = LPC32XX_CLKPWR_EXTSRC_GPI_04_BIT,
-	},
-	[IRQ_LPC32XX_GPI_05] = {
-		.event_group = &lpc32xx_event_pin_regs,
-		.mask = LPC32XX_CLKPWR_EXTSRC_GPI_05_BIT,
-	},
-	[IRQ_LPC32XX_GPI_06] = {
-		.event_group = &lpc32xx_event_pin_regs,
-		.mask = LPC32XX_CLKPWR_EXTSRC_GPI_06_BIT,
-	},
-	[IRQ_LPC32XX_GPI_28] = {
-		.event_group = &lpc32xx_event_pin_regs,
-		.mask = LPC32XX_CLKPWR_EXTSRC_GPI_28_BIT,
-	},
-	[IRQ_LPC32XX_GPIO_00] = {
-		.event_group = &lpc32xx_event_int_regs,
-		.mask = LPC32XX_CLKPWR_INTSRC_GPIO_00_BIT,
-	},
-	[IRQ_LPC32XX_GPIO_01] = {
-		.event_group = &lpc32xx_event_int_regs,
-		.mask = LPC32XX_CLKPWR_INTSRC_GPIO_01_BIT,
-	},
-	[IRQ_LPC32XX_GPIO_02] = {
-		.event_group = &lpc32xx_event_int_regs,
-		.mask = LPC32XX_CLKPWR_INTSRC_GPIO_02_BIT,
-	},
-	[IRQ_LPC32XX_GPIO_03] = {
-		.event_group = &lpc32xx_event_int_regs,
-		.mask = LPC32XX_CLKPWR_INTSRC_GPIO_03_BIT,
-	},
-	[IRQ_LPC32XX_GPIO_04] = {
-		.event_group = &lpc32xx_event_int_regs,
-		.mask = LPC32XX_CLKPWR_INTSRC_GPIO_04_BIT,
-	},
-	[IRQ_LPC32XX_GPIO_05] = {
-		.event_group = &lpc32xx_event_int_regs,
-		.mask = LPC32XX_CLKPWR_INTSRC_GPIO_05_BIT,
-	},
-	[IRQ_LPC32XX_KEY] = {
-		.event_group = &lpc32xx_event_int_regs,
-		.mask = LPC32XX_CLKPWR_INTSRC_KEY_BIT,
-	},
-	[IRQ_LPC32XX_ETHERNET] = {
-		.event_group = &lpc32xx_event_int_regs,
-		.mask = LPC32XX_CLKPWR_INTSRC_MAC_BIT,
-	},
-	[IRQ_LPC32XX_USB_OTG_ATX] = {
-		.event_group = &lpc32xx_event_int_regs,
-		.mask = LPC32XX_CLKPWR_INTSRC_USBATXINT_BIT,
-	},
-	[IRQ_LPC32XX_USB_HOST] = {
-		.event_group = &lpc32xx_event_int_regs,
-		.mask = LPC32XX_CLKPWR_INTSRC_USB_BIT,
-	},
-	[IRQ_LPC32XX_RTC] = {
-		.event_group = &lpc32xx_event_int_regs,
-		.mask = LPC32XX_CLKPWR_INTSRC_RTC_BIT,
-	},
-	[IRQ_LPC32XX_MSTIMER] = {
-		.event_group = &lpc32xx_event_int_regs,
-		.mask = LPC32XX_CLKPWR_INTSRC_MSTIMER_BIT,
-	},
-	[IRQ_LPC32XX_TS_AUX] = {
-		.event_group = &lpc32xx_event_int_regs,
-		.mask = LPC32XX_CLKPWR_INTSRC_TS_AUX_BIT,
-	},
-	[IRQ_LPC32XX_TS_P] = {
-		.event_group = &lpc32xx_event_int_regs,
-		.mask = LPC32XX_CLKPWR_INTSRC_TS_P_BIT,
-	},
-	[IRQ_LPC32XX_TS_IRQ] = {
-		.event_group = &lpc32xx_event_int_regs,
-		.mask = LPC32XX_CLKPWR_INTSRC_ADC_BIT,
-	},
-};
-
-static void get_controller(unsigned int irq, unsigned int *base,
-	unsigned int *irqbit)
-{
-	if (irq < 32) {
-		*base = LPC32XX_MIC_BASE;
-		*irqbit = 1 << irq;
-	} else if (irq < 64) {
-		*base = LPC32XX_SIC1_BASE;
-		*irqbit = 1 << (irq - 32);
-	} else {
-		*base = LPC32XX_SIC2_BASE;
-		*irqbit = 1 << (irq - 64);
-	}
-}
-
-static void lpc32xx_mask_irq(struct irq_data *d)
-{
-	unsigned int reg, ctrl, mask;
-
-	get_controller(d->hwirq, &ctrl, &mask);
-
-	reg = __raw_readl(LPC32XX_INTC_MASK(ctrl)) & ~mask;
-	__raw_writel(reg, LPC32XX_INTC_MASK(ctrl));
-}
-
-static void lpc32xx_unmask_irq(struct irq_data *d)
-{
-	unsigned int reg, ctrl, mask;
-
-	get_controller(d->hwirq, &ctrl, &mask);
-
-	reg = __raw_readl(LPC32XX_INTC_MASK(ctrl)) | mask;
-	__raw_writel(reg, LPC32XX_INTC_MASK(ctrl));
-}
-
-static void lpc32xx_ack_irq(struct irq_data *d)
-{
-	unsigned int ctrl, mask;
-
-	get_controller(d->hwirq, &ctrl, &mask);
-
-	__raw_writel(mask, LPC32XX_INTC_RAW_STAT(ctrl));
-
-	/* Also need to clear pending wake event */
-	if (lpc32xx_events[d->hwirq].mask != 0)
-		__raw_writel(lpc32xx_events[d->hwirq].mask,
-			lpc32xx_events[d->hwirq].event_group->rawstat_reg);
-}
-
-static void __lpc32xx_set_irq_type(unsigned int irq, int use_high_level,
-	int use_edge)
-{
-	unsigned int reg, ctrl, mask;
-
-	get_controller(irq, &ctrl, &mask);
-
-	/* Activation level, high or low */
-	reg = __raw_readl(LPC32XX_INTC_POLAR(ctrl));
-	if (use_high_level)
-		reg |= mask;
-	else
-		reg &= ~mask;
-	__raw_writel(reg, LPC32XX_INTC_POLAR(ctrl));
-
-	/* Activation type, edge or level */
-	reg = __raw_readl(LPC32XX_INTC_ACT_TYPE(ctrl));
-	if (use_edge)
-		reg |= mask;
-	else
-		reg &= ~mask;
-	__raw_writel(reg, LPC32XX_INTC_ACT_TYPE(ctrl));
-
-	/* Use same polarity for the wake events */
-	if (lpc32xx_events[irq].mask != 0) {
-		reg = __raw_readl(lpc32xx_events[irq].event_group->edge_reg);
-
-		if (use_high_level)
-			reg |= lpc32xx_events[irq].mask;
-		else
-			reg &= ~lpc32xx_events[irq].mask;
-
-		__raw_writel(reg, lpc32xx_events[irq].event_group->edge_reg);
-	}
-}
-
-static int lpc32xx_set_irq_type(struct irq_data *d, unsigned int type)
-{
-	switch (type) {
-	case IRQ_TYPE_EDGE_RISING:
-		/* Rising edge sensitive */
-		__lpc32xx_set_irq_type(d->hwirq, 1, 1);
-		irq_set_handler_locked(d, handle_edge_irq);
-		break;
-
-	case IRQ_TYPE_EDGE_FALLING:
-		/* Falling edge sensitive */
-		__lpc32xx_set_irq_type(d->hwirq, 0, 1);
-		irq_set_handler_locked(d, handle_edge_irq);
-		break;
-
-	case IRQ_TYPE_LEVEL_LOW:
-		/* Low level sensitive */
-		__lpc32xx_set_irq_type(d->hwirq, 0, 0);
-		irq_set_handler_locked(d, handle_level_irq);
-		break;
-
-	case IRQ_TYPE_LEVEL_HIGH:
-		/* High level sensitive */
-		__lpc32xx_set_irq_type(d->hwirq, 1, 0);
-		irq_set_handler_locked(d, handle_level_irq);
-		break;
-
-	/* Other modes are not supported */
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int lpc32xx_irq_wake(struct irq_data *d, unsigned int state)
-{
-	unsigned long eventreg;
-
-	if (lpc32xx_events[d->hwirq].mask != 0) {
-		eventreg = __raw_readl(lpc32xx_events[d->hwirq].
-			event_group->enab_reg);
-
-		if (state)
-			eventreg |= lpc32xx_events[d->hwirq].mask;
-		else {
-			eventreg &= ~lpc32xx_events[d->hwirq].mask;
-
-			/*
-			 * When disabling the wakeup, clear the latched
-			 * event
-			 */
-			__raw_writel(lpc32xx_events[d->hwirq].mask,
-				lpc32xx_events[d->hwirq].
-				event_group->rawstat_reg);
-		}
-
-		__raw_writel(eventreg,
-			lpc32xx_events[d->hwirq].event_group->enab_reg);
-
-		return 0;
-	}
-
-	/* Clear event */
-	__raw_writel(lpc32xx_events[d->hwirq].mask,
-		lpc32xx_events[d->hwirq].event_group->rawstat_reg);
-
-	return -ENODEV;
-}
-
-static void __init lpc32xx_set_default_mappings(unsigned int apr,
-	unsigned int atr, unsigned int offset)
-{
-	unsigned int i;
-
-	/* Set activation levels for each interrupt */
-	i = 0;
-	while (i < 32) {
-		__lpc32xx_set_irq_type(offset + i, ((apr >> i) & 0x1),
-			((atr >> i) & 0x1));
-		i++;
-	}
-}
-
-static struct irq_chip lpc32xx_irq_chip = {
-	.name = "MIC",
-	.irq_ack = lpc32xx_ack_irq,
-	.irq_mask = lpc32xx_mask_irq,
-	.irq_unmask = lpc32xx_unmask_irq,
-	.irq_set_type = lpc32xx_set_irq_type,
-	.irq_set_wake = lpc32xx_irq_wake
-};
-
-static void lpc32xx_sic1_handler(struct irq_desc *desc)
-{
-	unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC1_BASE));
-
-	while (ints != 0) {
-		int irqno = fls(ints) - 1;
-
-		ints &= ~(1 << irqno);
-
-		generic_handle_irq(LPC32XX_SIC1_IRQ(irqno));
-	}
-}
-
-static void lpc32xx_sic2_handler(struct irq_desc *desc)
-{
-	unsigned long ints = __raw_readl(LPC32XX_INTC_STAT(LPC32XX_SIC2_BASE));
-
-	while (ints != 0) {
-		int irqno = fls(ints) - 1;
-
-		ints &= ~(1 << irqno);
-
-		generic_handle_irq(LPC32XX_SIC2_IRQ(irqno));
-	}
-}
-
-static int __init __lpc32xx_mic_of_init(struct device_node *node,
-					struct device_node *parent)
-{
-	lpc32xx_mic_np = node;
-
-	return 0;
-}
-
-static const struct of_device_id mic_of_match[] __initconst = {
-	{ .compatible = "nxp,lpc3220-mic", .data = __lpc32xx_mic_of_init },
-	{ }
-};
-
-void __init lpc32xx_init_irq(void)
-{
-	unsigned int i;
-
-	/* Setup MIC */
-	__raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_MIC_BASE));
-	__raw_writel(MIC_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_MIC_BASE));
-	__raw_writel(MIC_ATR_DEFAULT, LPC32XX_INTC_ACT_TYPE(LPC32XX_MIC_BASE));
-
-	/* Setup SIC1 */
-	__raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC1_BASE));
-	__raw_writel(SIC1_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC1_BASE));
-	__raw_writel(SIC1_ATR_DEFAULT,
-				LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC1_BASE));
-
-	/* Setup SIC2 */
-	__raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC2_BASE));
-	__raw_writel(SIC2_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC2_BASE));
-	__raw_writel(SIC2_ATR_DEFAULT,
-				LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC2_BASE));
-
-	/* Configure supported IRQ's */
-	for (i = 0; i < NR_IRQS; i++) {
-		irq_set_chip_and_handler(i, &lpc32xx_irq_chip,
-					 handle_level_irq);
-		irq_clear_status_flags(i, IRQ_NOREQUEST);
-	}
-
-	/* Set default mappings */
-	lpc32xx_set_default_mappings(MIC_APR_DEFAULT, MIC_ATR_DEFAULT, 0);
-	lpc32xx_set_default_mappings(SIC1_APR_DEFAULT, SIC1_ATR_DEFAULT, 32);
-	lpc32xx_set_default_mappings(SIC2_APR_DEFAULT, SIC2_ATR_DEFAULT, 64);
-
-	/* Initially disable all wake events */
-	__raw_writel(0, LPC32XX_CLKPWR_P01_ER);
-	__raw_writel(0, LPC32XX_CLKPWR_INT_ER);
-	__raw_writel(0, LPC32XX_CLKPWR_PIN_ER);
-
-	/*
-	 * Default wake activation polarities, all pin sources are low edge
-	 * triggered
-	 */
-	__raw_writel(LPC32XX_CLKPWR_INTSRC_TS_P_BIT |
-		LPC32XX_CLKPWR_INTSRC_MSTIMER_BIT |
-		LPC32XX_CLKPWR_INTSRC_RTC_BIT,
-		LPC32XX_CLKPWR_INT_AP);
-	__raw_writel(0, LPC32XX_CLKPWR_PIN_AP);
-
-	/* Clear latched wake event states */
-	__raw_writel(__raw_readl(LPC32XX_CLKPWR_PIN_RS),
-		LPC32XX_CLKPWR_PIN_RS);
-	__raw_writel(__raw_readl(LPC32XX_CLKPWR_INT_RS),
-		LPC32XX_CLKPWR_INT_RS);
-
-	of_irq_init(mic_of_match);
-
-	lpc32xx_mic_domain = irq_domain_add_legacy(lpc32xx_mic_np, NR_IRQS,
-						   0, 0, &irq_domain_simple_ops,
-						   NULL);
-	if (!lpc32xx_mic_domain)
-		panic("Unable to add MIC irq domain\n");
-
-	/* MIC SUBIRQx interrupts will route handling to the chain handlers */
-	irq_set_chained_handler(IRQ_LPC32XX_SUB1IRQ, lpc32xx_sic1_handler);
-	irq_set_chained_handler(IRQ_LPC32XX_SUB2IRQ, lpc32xx_sic2_handler);
-}
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
index 72918c4..f6ac027 100644
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ b/arch/arm/mach-omap2/gpmc-nand.c
@@ -97,10 +97,7 @@
 	gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT);
 
 	memset(&s, 0, sizeof(struct gpmc_settings));
-	if (gpmc_nand_data->of_node)
-		gpmc_read_settings_dt(gpmc_nand_data->of_node, &s);
-	else
-		gpmc_set_legacy(gpmc_nand_data, &s);
+	gpmc_set_legacy(gpmc_nand_data, &s);
 
 	s.device_nand = true;
 
@@ -121,8 +118,6 @@
 	if (err < 0)
 		goto out_free_cs;
 
-	gpmc_update_nand_reg(&gpmc_nand_data->reg, gpmc_nand_data->cs);
-
 	if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) {
 		pr_err("omap2-nand: Unsupported NAND ECC scheme selected\n");
 		err = -EINVAL;
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 7ee4652..cd894d6 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -6,6 +6,7 @@
 
 config MACH_PXA27X_DT
 	bool "Support PXA27x platforms from device tree"
+	select PINCTRL
 	select POWER_SUPPLY
 	select PXA27x
 	select USE_OF
@@ -17,6 +18,7 @@
 config MACH_PXA3XX_DT
 	bool "Support PXA3xx platforms from device tree"
 	select CPU_PXA300
+	select PINCTRL
 	select POWER_SUPPLY
 	select PXA3xx
 	select USE_OF
diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c
index e838b11..fa9d71d 100644
--- a/arch/arm/mach-pxa/eseries.c
+++ b/arch/arm/mach-pxa/eseries.c
@@ -128,7 +128,7 @@
 /* Some e-series hardware cannot control the 32K clock */
 static void __init __maybe_unused eseries_register_clks(void)
 {
-	clk_register_fixed_rate(NULL, "CLK_CK32K", NULL, CLK_IS_ROOT, 32768);
+	clk_register_fixed_rate(NULL, "CLK_CK32K", NULL, 0, 32768);
 }
 
 #ifdef CONFIG_MACH_E330
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index d9578bc..bd7cd8b 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -763,14 +763,49 @@
 	.pattern	= scan_ff_pattern
 };
 
-static struct nand_ecclayout akita_oobinfo = {
-	.oobfree	= { {0x08, 0x09} },
-	.eccbytes	= 24,
-	.eccpos		= {
-			0x05, 0x01, 0x02, 0x03, 0x06, 0x07, 0x15, 0x11,
-			0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23,
-			0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37,
-	},
+static int akita_ooblayout_ecc(struct mtd_info *mtd, int section,
+			       struct mtd_oob_region *oobregion)
+{
+	if (section > 12)
+		return -ERANGE;
+
+	switch (section % 3) {
+	case 0:
+		oobregion->offset = 5;
+		oobregion->length = 1;
+		break;
+
+	case 1:
+		oobregion->offset = 1;
+		oobregion->length = 3;
+		break;
+
+	case 2:
+		oobregion->offset = 6;
+		oobregion->length = 2;
+		break;
+	}
+
+	oobregion->offset += (section / 3) * 0x10;
+
+	return 0;
+}
+
+static int akita_ooblayout_free(struct mtd_info *mtd, int section,
+				struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 8;
+	oobregion->length = 9;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops akita_ooblayout_ops = {
+	.ecc = akita_ooblayout_ecc,
+	.free = akita_ooblayout_free,
 };
 
 static struct sharpsl_nand_platform_data spitz_nand_pdata = {
@@ -804,11 +839,11 @@
 	} else if (machine_is_akita()) {
 		spitz_nand_partitions[1].size = 58 * 1024 * 1024;
 		spitz_nand_bbt.len = 1;
-		spitz_nand_pdata.ecc_layout = &akita_oobinfo;
+		spitz_nand_pdata.ecc_layout = &akita_ooblayout_ops;
 	} else if (machine_is_borzoi()) {
 		spitz_nand_partitions[1].size = 32 * 1024 * 1024;
 		spitz_nand_bbt.len = 1;
-		spitz_nand_pdata.ecc_layout = &akita_oobinfo;
+		spitz_nand_pdata.ecc_layout = &akita_ooblayout_ops;
 	}
 
 	platform_device_register(&spitz_nand_device);
diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c
index 774c982..25a139b 100644
--- a/arch/arm/mach-s3c24xx/mach-rx1950.c
+++ b/arch/arm/mach-s3c24xx/mach-rx1950.c
@@ -496,6 +496,12 @@
 		return PTR_ERR(lcd_pwm);
 	}
 
+	/*
+	 * FIXME: pwm_apply_args() should be removed when switching to
+	 * the atomic PWM API.
+	 */
+	pwm_apply_args(lcd_pwm);
+
 	rx1950_lcd_power(1);
 	rx1950_bl_power(1);
 
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
index 5766ce2..8409cab 100644
--- a/arch/arm/mach-vexpress/spc.c
+++ b/arch/arm/mach-vexpress/spc.c
@@ -547,7 +547,7 @@
 
 	init.name = dev_name(cpu_dev);
 	init.ops = &clk_spc_ops;
-	init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+	init.flags = CLK_GET_RATE_NOCACHE;
 	init.num_parents = 0;
 
 	return devm_clk_register(cpu_dev, &spc->hw);
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 1160434..59a8fa7 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -74,5 +74,5 @@
 	@mkdir -p $(MODLIB)/vdso
 
 PHONY += vdso_install
-vdso_install: $(obj)/vdso.so.dbg $(MODLIB)/vdso FORCE
+vdso_install: $(obj)/vdso.so.dbg $(MODLIB)/vdso
 	$(call cmd,vdso_install)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 76747d9..5a0a691 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -113,6 +113,18 @@
 config MMU
 	def_bool y
 
+config ARM64_PAGE_SHIFT
+	int
+	default 16 if ARM64_64K_PAGES
+	default 14 if ARM64_16K_PAGES
+	default 12
+
+config ARM64_CONT_SHIFT
+	int
+	default 5 if ARM64_64K_PAGES
+	default 7 if ARM64_16K_PAGES
+	default 4
+
 config ARCH_MMAP_RND_BITS_MIN
        default 14 if ARM64_64K_PAGES
        default 16 if ARM64_16K_PAGES
@@ -426,6 +438,15 @@
 
 	  If unsure, say Y.
 
+config CAVIUM_ERRATUM_23144
+	bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
+	depends on NUMA
+	default y
+	help
+	  ITS SYNC command hang for cross node io and collections/cpu mapping.
+
+	  If unsure, say Y.
+
 config CAVIUM_ERRATUM_23154
 	bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
 	default y
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 710fde4..0cc758c 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -12,7 +12,8 @@
 	  who are working in architecture specific areas of the kernel.
 	  It is probably not a good idea to enable this feature in a production
 	  kernel.
-	  If in doubt, say "N"
+
+	  If in doubt, say N.
 
 config PID_IN_CONTEXTIDR
 	bool "Write the current PID to the CONTEXTIDR register"
@@ -38,15 +39,15 @@
 	  value.
 
 config DEBUG_SET_MODULE_RONX
-        bool "Set loadable kernel module data as NX and text as RO"
-        depends on MODULES
-        help
-          This option helps catch unintended modifications to loadable
-          kernel module's text and read-only data. It also prevents execution
-          of module data. Such protection may interfere with run-time code
-          patching and dynamic kernel tracing - and they might also protect
-          against certain classes of kernel exploits.
-          If in doubt, say "N".
+	bool "Set loadable kernel module data as NX and text as RO"
+	depends on MODULES
+	default y
+	help
+	  Is this is set, kernel module text and rodata will be made read-only.
+	  This is to help catch accidental or malicious attempts to change the
+	  kernel's executable code.
+
+	  If in doubt, say Y.
 
 config DEBUG_RODATA
 	bool "Make kernel text and rodata read-only"
@@ -56,7 +57,7 @@
 	  is to help catch accidental or malicious attempts to change the
 	  kernel's executable code.
 
-	  If in doubt, say Y
+	  If in doubt, say Y.
 
 config DEBUG_ALIGN_RODATA
 	depends on DEBUG_RODATA
@@ -69,7 +70,7 @@
 	  alignment and potentially wasted space. Turn on this option if
 	  performance is more important than memory pressure.
 
-	  If in doubt, say N
+	  If in doubt, say N.
 
 source "drivers/hwtracing/coresight/Kconfig"
 
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 354d754..7085e32 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -60,7 +60,9 @@
 
 # The byte offset of the kernel image in RAM from the start of RAM.
 ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
-TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
+TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
+		 int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
+		 rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
 else
 TEXT_OFFSET := 0x00080000
 endif
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
index 7cb2d72..3285a92 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
@@ -10,6 +10,7 @@
 
 #include <dt-bindings/clock/r8a7795-cpg-mssr.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/power/r8a7795-sysc.h>
 
 / {
 	compatible = "renesas,r8a7795";
@@ -39,6 +40,7 @@
 			compatible = "arm,cortex-a57", "arm,armv8";
 			reg = <0x0>;
 			device_type = "cpu";
+			power-domains = <&sysc R8A7795_PD_CA57_CPU0>;
 			next-level-cache = <&L2_CA57>;
 			enable-method = "psci";
 		};
@@ -47,6 +49,7 @@
 			compatible = "arm,cortex-a57","arm,armv8";
 			reg = <0x1>;
 			device_type = "cpu";
+			power-domains = <&sysc R8A7795_PD_CA57_CPU1>;
 			next-level-cache = <&L2_CA57>;
 			enable-method = "psci";
 		};
@@ -54,6 +57,7 @@
 			compatible = "arm,cortex-a57","arm,armv8";
 			reg = <0x2>;
 			device_type = "cpu";
+			power-domains = <&sysc R8A7795_PD_CA57_CPU2>;
 			next-level-cache = <&L2_CA57>;
 			enable-method = "psci";
 		};
@@ -61,6 +65,7 @@
 			compatible = "arm,cortex-a57","arm,armv8";
 			reg = <0x3>;
 			device_type = "cpu";
+			power-domains = <&sysc R8A7795_PD_CA57_CPU3>;
 			next-level-cache = <&L2_CA57>;
 			enable-method = "psci";
 		};
@@ -68,12 +73,14 @@
 
 	L2_CA57: cache-controller@0 {
 		compatible = "cache";
+		power-domains = <&sysc R8A7795_PD_CA57_SCU>;
 		cache-unified;
 		cache-level = <2>;
 	};
 
 	L2_CA53: cache-controller@1 {
 		compatible = "cache";
+		power-domains = <&sysc R8A7795_PD_CA53_SCU>;
 		cache-unified;
 		cache-level = <2>;
 	};
@@ -168,7 +175,7 @@
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			clocks = <&cpg CPG_MOD 912>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 		};
 
 		gpio1: gpio@e6051000 {
@@ -182,7 +189,7 @@
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			clocks = <&cpg CPG_MOD 911>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 		};
 
 		gpio2: gpio@e6052000 {
@@ -196,7 +203,7 @@
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			clocks = <&cpg CPG_MOD 910>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 		};
 
 		gpio3: gpio@e6053000 {
@@ -210,7 +217,7 @@
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			clocks = <&cpg CPG_MOD 909>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 		};
 
 		gpio4: gpio@e6054000 {
@@ -224,7 +231,7 @@
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			clocks = <&cpg CPG_MOD 908>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 		};
 
 		gpio5: gpio@e6055000 {
@@ -238,7 +245,7 @@
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			clocks = <&cpg CPG_MOD 907>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 		};
 
 		gpio6: gpio@e6055400 {
@@ -252,7 +259,7 @@
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			clocks = <&cpg CPG_MOD 906>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 		};
 
 		gpio7: gpio@e6055800 {
@@ -266,7 +273,7 @@
 			#interrupt-cells = <2>;
 			interrupt-controller;
 			clocks = <&cpg CPG_MOD 905>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 		};
 
 		pmu_a57 {
@@ -302,6 +309,12 @@
 			#power-domain-cells = <0>;
 		};
 
+		sysc: system-controller@e6180000 {
+			compatible = "renesas,r8a7795-sysc";
+			reg = <0 0xe6180000 0 0x0400>;
+			#power-domain-cells = <1>;
+		};
+
 		audma0: dma-controller@ec700000 {
 			compatible = "renesas,rcar-dmac";
 			reg = <0 0xec700000 0 0x10000>;
@@ -329,7 +342,7 @@
 					"ch12", "ch13", "ch14", "ch15";
 			clocks = <&cpg CPG_MOD 502>;
 			clock-names = "fck";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			#dma-cells = <1>;
 			dma-channels = <16>;
 		};
@@ -361,7 +374,7 @@
 					"ch12", "ch13", "ch14", "ch15";
 			clocks = <&cpg CPG_MOD 501>;
 			clock-names = "fck";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			#dma-cells = <1>;
 			dma-channels = <16>;
 		};
@@ -383,7 +396,7 @@
 				      GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
 				      GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 407>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 		};
 
 		dmac0: dma-controller@e6700000 {
@@ -414,7 +427,7 @@
 					"ch12", "ch13", "ch14", "ch15";
 			clocks = <&cpg CPG_MOD 219>;
 			clock-names = "fck";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			#dma-cells = <1>;
 			dma-channels = <16>;
 		};
@@ -447,7 +460,7 @@
 					"ch12", "ch13", "ch14", "ch15";
 			clocks = <&cpg CPG_MOD 218>;
 			clock-names = "fck";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			#dma-cells = <1>;
 			dma-channels = <16>;
 		};
@@ -480,7 +493,7 @@
 					"ch12", "ch13", "ch14", "ch15";
 			clocks = <&cpg CPG_MOD 217>;
 			clock-names = "fck";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			#dma-cells = <1>;
 			dma-channels = <16>;
 		};
@@ -522,7 +535,7 @@
 					  "ch20", "ch21", "ch22", "ch23",
 					  "ch24";
 			clocks = <&cpg CPG_MOD 812>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			phy-mode = "rgmii-id";
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -539,7 +552,7 @@
 			clock-names = "clkp1", "clkp2", "can_clk";
 			assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
 			assigned-clock-rates = <40000000>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -554,7 +567,7 @@
 			clock-names = "clkp1", "clkp2", "can_clk";
 			assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
 			assigned-clock-rates = <40000000>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -570,7 +583,7 @@
 			clock-names = "fck", "brg_int", "scif_clk";
 			dmas = <&dmac1 0x31>, <&dmac1 0x30>;
 			dma-names = "tx", "rx";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -586,7 +599,7 @@
 			clock-names = "fck", "brg_int", "scif_clk";
 			dmas = <&dmac1 0x33>, <&dmac1 0x32>;
 			dma-names = "tx", "rx";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -602,7 +615,7 @@
 			clock-names = "fck", "brg_int", "scif_clk";
 			dmas = <&dmac1 0x35>, <&dmac1 0x34>;
 			dma-names = "tx", "rx";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -618,7 +631,7 @@
 			clock-names = "fck", "brg_int", "scif_clk";
 			dmas = <&dmac0 0x37>, <&dmac0 0x36>;
 			dma-names = "tx", "rx";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -634,7 +647,7 @@
 			clock-names = "fck", "brg_int", "scif_clk";
 			dmas = <&dmac0 0x39>, <&dmac0 0x38>;
 			dma-names = "tx", "rx";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -649,7 +662,7 @@
 			clock-names = "fck", "brg_int", "scif_clk";
 			dmas = <&dmac1 0x51>, <&dmac1 0x50>;
 			dma-names = "tx", "rx";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -664,7 +677,7 @@
 			clock-names = "fck", "brg_int", "scif_clk";
 			dmas = <&dmac1 0x53>, <&dmac1 0x52>;
 			dma-names = "tx", "rx";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -679,7 +692,7 @@
 			clock-names = "fck", "brg_int", "scif_clk";
 			dmas = <&dmac1 0x13>, <&dmac1 0x12>;
 			dma-names = "tx", "rx";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -694,7 +707,7 @@
 			clock-names = "fck", "brg_int", "scif_clk";
 			dmas = <&dmac0 0x57>, <&dmac0 0x56>;
 			dma-names = "tx", "rx";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -709,7 +722,7 @@
 			clock-names = "fck", "brg_int", "scif_clk";
 			dmas = <&dmac0 0x59>, <&dmac0 0x58>;
 			dma-names = "tx", "rx";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -724,7 +737,7 @@
 			clock-names = "fck", "brg_int", "scif_clk";
 			dmas = <&dmac1 0x5b>, <&dmac1 0x5a>;
 			dma-names = "tx", "rx";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -735,7 +748,7 @@
 			reg = <0 0xe6500000 0 0x40>;
 			interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 931>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			i2c-scl-internal-delay-ns = <110>;
 			status = "disabled";
 		};
@@ -747,7 +760,7 @@
 			reg = <0 0xe6508000 0 0x40>;
 			interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 930>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			i2c-scl-internal-delay-ns = <6>;
 			status = "disabled";
 		};
@@ -759,7 +772,7 @@
 			reg = <0 0xe6510000 0 0x40>;
 			interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 929>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			i2c-scl-internal-delay-ns = <6>;
 			status = "disabled";
 		};
@@ -771,7 +784,7 @@
 			reg = <0 0xe66d0000 0 0x40>;
 			interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 928>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			i2c-scl-internal-delay-ns = <110>;
 			status = "disabled";
 		};
@@ -783,7 +796,7 @@
 			reg = <0 0xe66d8000 0 0x40>;
 			interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 927>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			i2c-scl-internal-delay-ns = <110>;
 			status = "disabled";
 		};
@@ -795,7 +808,7 @@
 			reg = <0 0xe66e0000 0 0x40>;
 			interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 919>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			i2c-scl-internal-delay-ns = <110>;
 			status = "disabled";
 		};
@@ -807,7 +820,7 @@
 			reg = <0 0xe66e8000 0 0x40>;
 			interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 918>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			i2c-scl-internal-delay-ns = <6>;
 			status = "disabled";
 		};
@@ -857,7 +870,7 @@
 				      "src.1", "src.0",
 				      "dvc.0", "dvc.1",
 				      "clk_a", "clk_b", "clk_c", "clk_i";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 
 			rcar_sound,dvc {
@@ -991,7 +1004,7 @@
 			reg = <0 0xee000000 0 0xc00>;
 			interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 328>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -1000,7 +1013,7 @@
 			reg = <0 0xee040000 0 0xc00>;
 			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 327>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -1012,7 +1025,7 @@
 				      GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 330>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			#dma-cells = <1>;
 			dma-channels = <2>;
 		};
@@ -1025,7 +1038,7 @@
 				      GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "ch0", "ch1";
 			clocks = <&cpg CPG_MOD 331>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			#dma-cells = <1>;
 			dma-channels = <2>;
 		};
@@ -1035,7 +1048,7 @@
 			reg = <0 0xee100000 0 0x2000>;
 			interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 314>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -1044,7 +1057,7 @@
 			reg = <0 0xee120000 0 0x2000>;
 			interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 313>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -1053,7 +1066,7 @@
 			reg = <0 0xee140000 0 0x2000>;
 			interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 312>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			cap-mmc-highspeed;
 			status = "disabled";
 		};
@@ -1063,7 +1076,7 @@
 			reg = <0 0xee160000 0 0x2000>;
 			interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 311>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			cap-mmc-highspeed;
 			status = "disabled";
 		};
@@ -1073,7 +1086,7 @@
 			reg = <0 0xee080200 0 0x700>;
 			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 703>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			#phy-cells = <0>;
 			status = "disabled";
 		};
@@ -1082,7 +1095,7 @@
 			compatible = "renesas,usb2-phy-r8a7795";
 			reg = <0 0xee0a0200 0 0x700>;
 			clocks = <&cpg CPG_MOD 702>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			#phy-cells = <0>;
 			status = "disabled";
 		};
@@ -1091,7 +1104,7 @@
 			compatible = "renesas,usb2-phy-r8a7795";
 			reg = <0 0xee0c0200 0 0x700>;
 			clocks = <&cpg CPG_MOD 701>;
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			#phy-cells = <0>;
 			status = "disabled";
 		};
@@ -1103,7 +1116,7 @@
 			clocks = <&cpg CPG_MOD 703>;
 			phys = <&usb2_phy0>;
 			phy-names = "usb";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -1114,7 +1127,7 @@
 			clocks = <&cpg CPG_MOD 702>;
 			phys = <&usb2_phy1>;
 			phy-names = "usb";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -1125,7 +1138,7 @@
 			clocks = <&cpg CPG_MOD 701>;
 			phys = <&usb2_phy2>;
 			phy-names = "usb";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -1136,7 +1149,7 @@
 			clocks = <&cpg CPG_MOD 703>;
 			phys = <&usb2_phy0>;
 			phy-names = "usb";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -1147,7 +1160,7 @@
 			clocks = <&cpg CPG_MOD 702>;
 			phys = <&usb2_phy1>;
 			phy-names = "usb";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -1158,7 +1171,7 @@
 			clocks = <&cpg CPG_MOD 701>;
 			phys = <&usb2_phy2>;
 			phy-names = "usb";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 		pciec0: pcie@fe000000 {
@@ -1182,7 +1195,7 @@
 			interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>;
 			clock-names = "pcie", "pcie_bus";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -1207,7 +1220,7 @@
 			interrupt-map = <0 0 0 0 &gic GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 318>, <&pcie_bus_clk>;
 			clock-names = "pcie", "pcie_bus";
-			power-domains = <&cpg>;
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 	};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 8917150..fd2d74d 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -200,6 +200,8 @@
 CONFIG_THERMAL=y
 CONFIG_THERMAL_EMULATION=y
 CONFIG_EXYNOS_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_RENESAS_WDT=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_MFD_SEC_CORE=y
 CONFIG_MFD_HI655X_PMIC=y
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 7a09c48..579b6e6 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -160,14 +160,14 @@
 #define STACK_RND_MASK			(0x3ffff >> (PAGE_SHIFT - 12))
 #endif
 
-#ifdef CONFIG_COMPAT
-
 #ifdef __AARCH64EB__
 #define COMPAT_ELF_PLATFORM		("v8b")
 #else
 #define COMPAT_ELF_PLATFORM		("v8l")
 #endif
 
+#ifdef CONFIG_COMPAT
+
 #define COMPAT_ELF_ET_DYN_BASE		(2 * TASK_SIZE_32 / 3)
 
 /* AArch32 registers. */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e63d23b..49095fc 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -43,6 +43,8 @@
 
 #define KVM_VCPU_MAX_FEATURES 4
 
+#define KVM_REQ_VCPU_EXIT	8
+
 int __attribute_const__ kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
 int kvm_arch_dev_ioctl_check_extension(long ext);
@@ -327,6 +329,10 @@
 
 struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
+void kvm_arm_halt_guest(struct kvm *kvm);
+void kvm_arm_resume_guest(struct kvm *kvm);
+void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
+void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
 
 u64 __kvm_call_hyp(void *hypfn, ...);
 #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
index fe612a9..75ea420 100644
--- a/arch/arm64/include/asm/kvm_mmio.h
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -30,6 +30,9 @@
 	bool sign_extend;
 };
 
+void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
+unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
+
 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
 		 phys_addr_t fault_ipa);
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 72a3025..31b7322 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -55,8 +55,9 @@
 #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
 
 /*
- * PAGE_OFFSET - the virtual address of the start of the kernel image (top
+ * PAGE_OFFSET - the virtual address of the start of the linear map (top
  *		 (VA_BITS - 1))
+ * KIMAGE_VADDR - the virtual address of the start of the kernel image
  * VA_BITS - the maximum number of bits for virtual addresses.
  * VA_START - the first kernel virtual address.
  * TASK_SIZE - the maximum size of a user space task.
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 17b45f7..8472c6d 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -23,16 +23,8 @@
 
 /* PAGE_SHIFT determines the page size */
 /* CONT_SHIFT determines the number of pages which can be tracked together  */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define PAGE_SHIFT		16
-#define CONT_SHIFT		5
-#elif defined(CONFIG_ARM64_16K_PAGES)
-#define PAGE_SHIFT		14
-#define CONT_SHIFT		7
-#else
-#define PAGE_SHIFT		12
-#define CONT_SHIFT		4
-#endif
+#define PAGE_SHIFT		CONFIG_ARM64_PAGE_SHIFT
+#define CONT_SHIFT		CONFIG_ARM64_CONT_SHIFT
 #define PAGE_SIZE		(_AC(1, UL) << PAGE_SHIFT)
 #define PAGE_MASK		(~(PAGE_SIZE-1))
 
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 0685d74..9e397a5 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -81,19 +81,6 @@
 #define segment_eq(a, b)	((a) == (b))
 
 /*
- * Return 1 if addr < current->addr_limit, 0 otherwise.
- */
-#define __addr_ok(addr)							\
-({									\
-	unsigned long flag;						\
-	asm("cmp %1, %0; cset %0, lo"					\
-		: "=&r" (flag)						\
-		: "r" (addr), "0" (current_thread_info()->addr_limit)	\
-		: "cc");						\
-	flag;								\
-})
-
-/*
  * Test whether a block of memory is a valid user space address.
  * Returns 1 if the range is valid, 0 otherwise.
  *
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 41e58fe..e78ac26 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE+5)
 
-#define __NR_compat_syscalls		390
+#define __NR_compat_syscalls		394
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 5b925b7..b7e8ef1 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -801,6 +801,14 @@
 __SYSCALL(__NR_userfaultfd, sys_userfaultfd)
 #define __NR_membarrier 389
 __SYSCALL(__NR_membarrier, sys_membarrier)
+#define __NR_mlock2 390
+__SYSCALL(__NR_mlock2, sys_mlock2)
+#define __NR_copy_file_range 391
+__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
+#define __NR_preadv2 392
+__SYSCALL(__NR_preadv2, compat_sys_preadv2)
+#define __NR_pwritev2 393
+__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
 
 /*
  * Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h
index 1caadc2..043d17a 100644
--- a/arch/arm64/include/uapi/asm/unistd.h
+++ b/arch/arm64/include/uapi/asm/unistd.h
@@ -13,4 +13,7 @@
  * You should have received a copy of the GNU General Public License
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
+
+#define __ARCH_WANT_RENAMEAT
+
 #include <asm-generic/unistd.h>
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 3808470..c173d32 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -22,6 +22,8 @@
 
 #include <linux/bitops.h>
 #include <linux/bug.h>
+#include <linux/compat.h>
+#include <linux/elf.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/personality.h>
@@ -104,6 +106,7 @@
 static int c_show(struct seq_file *m, void *v)
 {
 	int i, j;
+	bool compat = personality(current->personality) == PER_LINUX32;
 
 	for_each_online_cpu(i) {
 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
@@ -115,6 +118,9 @@
 		 * "processor".  Give glibc what it expects.
 		 */
 		seq_printf(m, "processor\t: %d\n", i);
+		if (compat)
+			seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
+				   MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
 
 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
 			   loops_per_jiffy / (500000UL/HZ),
@@ -127,7 +133,7 @@
 		 * software which does already (at least for 32-bit).
 		 */
 		seq_puts(m, "Features\t:");
-		if (personality(current->personality) == PER_LINUX32) {
+		if (compat) {
 #ifdef CONFIG_COMPAT
 			for (j = 0; compat_hwcap_str[j]; j++)
 				if (compat_elf_hwcap & (1 << j))
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 32c3c6e..713ca82 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -31,7 +31,7 @@
  */
 static struct frame_tail __user *
 user_backtrace(struct frame_tail __user *tail,
-	       struct perf_callchain_entry *entry)
+	       struct perf_callchain_entry_ctx *entry)
 {
 	struct frame_tail buftail;
 	unsigned long err;
@@ -76,7 +76,7 @@
 
 static struct compat_frame_tail __user *
 compat_user_backtrace(struct compat_frame_tail __user *tail,
-		      struct perf_callchain_entry *entry)
+		      struct perf_callchain_entry_ctx *entry)
 {
 	struct compat_frame_tail buftail;
 	unsigned long err;
@@ -106,7 +106,7 @@
 }
 #endif /* CONFIG_COMPAT */
 
-void perf_callchain_user(struct perf_callchain_entry *entry,
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
 			 struct pt_regs *regs)
 {
 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
@@ -122,7 +122,7 @@
 
 		tail = (struct frame_tail __user *)regs->regs[29];
 
-		while (entry->nr < sysctl_perf_event_max_stack &&
+		while (entry->nr < entry->max_stack &&
 		       tail && !((unsigned long)tail & 0xf))
 			tail = user_backtrace(tail, entry);
 	} else {
@@ -132,7 +132,7 @@
 
 		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
 
-		while ((entry->nr < sysctl_perf_event_max_stack) &&
+		while ((entry->nr < entry->max_stack) &&
 			tail && !((unsigned long)tail & 0x3))
 			tail = compat_user_backtrace(tail, entry);
 #endif
@@ -146,12 +146,12 @@
  */
 static int callchain_trace(struct stackframe *frame, void *data)
 {
-	struct perf_callchain_entry *entry = data;
+	struct perf_callchain_entry_ctx *entry = data;
 	perf_callchain_store(entry, frame->pc);
 	return 0;
 }
 
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
 			   struct pt_regs *regs)
 {
 	struct stackframe frame;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index c539208..f7cf463 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -477,8 +477,9 @@
 	void __user *pc = (void __user *)instruction_pointer(regs);
 	console_verbose();
 
-	pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
-		handler[reason], esr, esr_get_class_string(esr));
+	pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
+		handler[reason], smp_processor_id(), esr,
+		esr_get_class_string(esr));
 	__show_regs(regs);
 
 	info.si_signo = SIGILL;
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 64fc030..9fefb00 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -95,7 +95,8 @@
 	};
 	void *ret;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
 	current->mm->context.vdso = (void *)addr;
 
 	/* Map vectors page at the high address. */
@@ -163,7 +164,8 @@
 	/* Be sure to map the data page */
 	vdso_mapping_len = vdso_text_len + PAGE_SIZE;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
 	if (IS_ERR_VALUE(vdso_base)) {
 		ret = ERR_PTR(vdso_base);
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index aa2e34e..c4f26ef 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -54,6 +54,13 @@
 	  Adds support for a virtual Performance Monitoring Unit (PMU) in
 	  virtual machines.
 
+config KVM_NEW_VGIC
+	bool "New VGIC implementation"
+	depends on KVM
+	default y
+        ---help---
+          uses the new VGIC implementation
+
 source drivers/vhost/Kconfig
 
 endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 122cff4..a7a958c 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -20,10 +20,22 @@
 kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
 kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
 
+ifeq ($(CONFIG_KVM_NEW_VGIC),y)
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-irqfd.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v2.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-kvm-device.o
+else
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
+endif
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
 kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index fff7cd4..5f8f80b 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -169,7 +169,8 @@
 	 * Make sure stores to the GIC via the memory mapped interface
 	 * are now visible to the system register interface.
 	 */
-	dsb(st);
+	if (!cpu_if->vgic_sre)
+		dsb(st);
 
 	cpu_if->vgic_vmcr  = read_gicreg(ICH_VMCR_EL2);
 
@@ -190,12 +191,11 @@
 			if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
 				continue;
 
-			if (cpu_if->vgic_elrsr & (1 << i)) {
+			if (cpu_if->vgic_elrsr & (1 << i))
 				cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
-				continue;
-			}
+			else
+				cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
 
-			cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
 			__gic_v3_set_lr(0, i);
 		}
 
@@ -236,8 +236,12 @@
 
 	val = read_gicreg(ICC_SRE_EL2);
 	write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
-	isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
-	write_gicreg(1, ICC_SRE_EL1);
+
+	if (!cpu_if->vgic_sre) {
+		/* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
+		isb();
+		write_gicreg(1, ICC_SRE_EL1);
+	}
 }
 
 void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
@@ -256,8 +260,10 @@
 	 * been actually programmed with the value we want before
 	 * starting to mess with the rest of the GIC.
 	 */
-	write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
-	isb();
+	if (!cpu_if->vgic_sre) {
+		write_gicreg(0, ICC_SRE_EL1);
+		isb();
+	}
 
 	val = read_gicreg(ICH_VTR_EL2);
 	max_lr_idx = vtr_to_max_lr_idx(val);
@@ -306,18 +312,18 @@
 	 * (re)distributors. This ensure the guest will read the
 	 * correct values from the memory-mapped interface.
 	 */
-	isb();
-	dsb(sy);
+	if (!cpu_if->vgic_sre) {
+		isb();
+		dsb(sy);
+	}
 	vcpu->arch.vgic_cpu.live_lrs = live_lrs;
 
 	/*
 	 * Prevent the guest from touching the GIC system registers if
 	 * SRE isn't enabled for GICv3 emulation.
 	 */
-	if (!cpu_if->vgic_sre) {
-		write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
-			     ICC_SRE_EL2);
-	}
+	write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
+		     ICC_SRE_EL2);
 }
 
 void __hyp_text __vgic_v3_init_lrs(void)
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 4d1ac81..e9e0e6d 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -162,7 +162,7 @@
 		esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
 
 	if (!is_iabt)
-		esr |= ESR_ELx_EC_DABT_LOW;
+		esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
 
 	vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
 }
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 7bbe3ff..a57d650 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -134,6 +134,17 @@
 	return true;
 }
 
+static bool access_gic_sre(struct kvm_vcpu *vcpu,
+			   struct sys_reg_params *p,
+			   const struct sys_reg_desc *r)
+{
+	if (p->is_write)
+		return ignore_write(vcpu, p);
+
+	p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
+	return true;
+}
+
 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
 			struct sys_reg_params *p,
 			const struct sys_reg_desc *r)
@@ -958,7 +969,7 @@
 	  access_gic_sgi },
 	/* ICC_SRE_EL1 */
 	{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
-	  trap_raz_wi },
+	  access_gic_sre },
 
 	/* CONTEXTIDR_EL1 */
 	{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 8404190..ccfde23 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -150,6 +150,7 @@
 
 struct pg_level {
 	const struct prot_bits *bits;
+	const char *name;
 	size_t num;
 	u64 mask;
 };
@@ -157,15 +158,19 @@
 static struct pg_level pg_level[] = {
 	{
 	}, { /* pgd */
+		.name	= "PGD",
 		.bits	= pte_bits,
 		.num	= ARRAY_SIZE(pte_bits),
 	}, { /* pud */
+		.name	= (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
 		.bits	= pte_bits,
 		.num	= ARRAY_SIZE(pte_bits),
 	}, { /* pmd */
+		.name	= (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
 		.bits	= pte_bits,
 		.num	= ARRAY_SIZE(pte_bits),
 	}, { /* pte */
+		.name	= "PTE",
 		.bits	= pte_bits,
 		.num	= ARRAY_SIZE(pte_bits),
 	},
@@ -214,7 +219,8 @@
 				delta >>= 10;
 				unit++;
 			}
-			seq_printf(st->seq, "%9lu%c", delta, *unit);
+			seq_printf(st->seq, "%9lu%c %s", delta, *unit,
+				   pg_level[st->level].name);
 			if (pg_level[st->level].bits)
 				dump_prot(st, pg_level[st->level].bits,
 					  pg_level[st->level].num);
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index aa8aee7..2e49bd2 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -306,6 +306,10 @@
 		hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
 	} else if (ps == PUD_SIZE) {
 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+	} else if (ps == (PAGE_SIZE * CONT_PTES)) {
+		hugetlb_add_hstate(CONT_PTE_SHIFT);
+	} else if (ps == (PMD_SIZE * CONT_PMDS)) {
+		hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
 	} else {
 		hugetlb_bad_size();
 		pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@@ -314,3 +318,13 @@
 	return 1;
 }
 __setup("hugepagesz=", setup_hugepagesz);
+
+#ifdef CONFIG_ARM64_64K_PAGES
+static __init int add_default_hugepagesz(void)
+{
+	if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
+		hugetlb_add_hstate(CONT_PMD_SHIFT);
+	return 0;
+}
+arch_initcall(add_default_hugepagesz);
+#endif
diff --git a/arch/c6x/include/uapi/asm/unistd.h b/arch/c6x/include/uapi/asm/unistd.h
index e7d09a6..12d73d9 100644
--- a/arch/c6x/include/uapi/asm/unistd.h
+++ b/arch/c6x/include/uapi/asm/unistd.h
@@ -14,6 +14,7 @@
  *   more details.
  */
 
+#define __ARCH_WANT_RENAMEAT
 #define __ARCH_WANT_SYS_CLONE
 
 /* Use the standard ABI for syscalls. */
diff --git a/arch/cris/arch-v32/drivers/mach-a3/nandflash.c b/arch/cris/arch-v32/drivers/mach-a3/nandflash.c
index 5aa3f51..3f646c7 100644
--- a/arch/cris/arch-v32/drivers/mach-a3/nandflash.c
+++ b/arch/cris/arch-v32/drivers/mach-a3/nandflash.c
@@ -157,6 +157,7 @@
 	/* 20 us command delay time */
 	this->chip_delay = 20;
 	this->ecc.mode = NAND_ECC_SOFT;
+	this->ecc.algo = NAND_ECC_HAMMING;
 
 	/* Enable the following for a flash based bad block table */
 	/* this->bbt_options = NAND_BBT_USE_FLASH; */
diff --git a/arch/cris/arch-v32/drivers/mach-fs/nandflash.c b/arch/cris/arch-v32/drivers/mach-fs/nandflash.c
index a7c17b0..a745405 100644
--- a/arch/cris/arch-v32/drivers/mach-fs/nandflash.c
+++ b/arch/cris/arch-v32/drivers/mach-fs/nandflash.c
@@ -148,6 +148,7 @@
 	/* 20 us command delay time */
 	this->chip_delay = 20;
 	this->ecc.mode = NAND_ECC_SOFT;
+	this->ecc.algo = NAND_ECC_HAMMING;
 
 	/* Enable the following for a flash based bad block table */
 	/* this->bbt_options = NAND_BBT_USE_FLASH; */
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index aa232de..3ae8525 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -20,6 +20,7 @@
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_LZO
 	select HAVE_ARCH_KGDB
+	select HAVE_ARCH_HASH
 	select CPU_NO_EFFICIENT_FFS
 
 config RWSEM_GENERIC_SPINLOCK
diff --git a/arch/h8300/boot/compressed/Makefile b/arch/h8300/boot/compressed/Makefile
index 7643633..613bfe6 100644
--- a/arch/h8300/boot/compressed/Makefile
+++ b/arch/h8300/boot/compressed/Makefile
@@ -23,7 +23,6 @@
 
 $(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE
 	$(call if_changed,ld)
-	@:
 
 $(obj)/vmlinux.bin: vmlinux FORCE
 	$(call if_changed,objcopy)
diff --git a/arch/h8300/include/asm/hash.h b/arch/h8300/include/asm/hash.h
new file mode 100644
index 0000000..04cfbd2
--- /dev/null
+++ b/arch/h8300/include/asm/hash.h
@@ -0,0 +1,53 @@
+#ifndef _ASM_HASH_H
+#define _ASM_HASH_H
+
+/*
+ * The later H8SX models have a 32x32-bit multiply, but the H8/300H
+ * and H8S have only 16x16->32.  Since it's tolerably compact, this is
+ * basically an inlined version of the __mulsi3 code.  Since the inputs
+ * are not expected to be small, it's also simplfied by skipping the
+ * early-out checks.
+ *
+ * (Since neither CPU has any multi-bit shift instructions, a
+ * shift-and-add version is a non-starter.)
+ *
+ * TODO: come up with an arch-specific version of the hashing in fs/namei.c,
+ * since that is heavily dependent on rotates.  Which, as mentioned, suck
+ * horribly on H8.
+ */
+
+#if defined(CONFIG_CPU_H300H) || defined(CONFIG_CPU_H8S)
+
+#define HAVE_ARCH__HASH_32 1
+
+/*
+ * Multiply by k = 0x61C88647.  Fitting this into three registers requires
+ * one extra instruction, but reducing register pressure will probably
+ * make that back and then some.
+ *
+ * GCC asm note: %e1 is the high half of operand %1, while %f1 is the
+ * low half.  So if %1 is er4, then %e1 is e4 and %f1 is r4.
+ *
+ * This has been designed to modify x in place, since that's the most
+ * common usage, but preserve k, since hash_64() makes two calls in
+ * quick succession.
+ */
+static inline u32 __attribute_const__ __hash_32(u32 x)
+{
+	u32 temp;
+
+	asm(   "mov.w	%e1,%f0"
+	"\n	mulxu.w	%f2,%0"		/* klow * xhigh */
+	"\n	mov.w	%f0,%e1"	/* The extra instruction */
+	"\n	mov.w	%f1,%f0"
+	"\n	mulxu.w	%e2,%0"		/* khigh * xlow */
+	"\n	add.w	%e1,%f0"
+	"\n	mulxu.w	%f2,%1"		/* klow * xlow */
+	"\n	add.w	%f0,%e1"
+	: "=&r" (temp), "=r" (x)
+	: "%r" (GOLDEN_RATIO_32), "1" (x));
+	return x;
+}
+
+#endif
+#endif /* _ASM_HASH_H */
diff --git a/arch/h8300/include/uapi/asm/unistd.h b/arch/h8300/include/uapi/asm/unistd.h
index 7a2eb69..7dd20ef 100644
--- a/arch/h8300/include/uapi/asm/unistd.h
+++ b/arch/h8300/include/uapi/asm/unistd.h
@@ -1,3 +1,5 @@
 #define __ARCH_NOMMU
 
+#define __ARCH_WANT_RENAMEAT
+
 #include <asm-generic/unistd.h>
diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h
index ffee405d..2151760 100644
--- a/arch/hexagon/include/uapi/asm/unistd.h
+++ b/arch/hexagon/include/uapi/asm/unistd.h
@@ -27,6 +27,7 @@
  */
 
 #define sys_mmap2 sys_mmap_pgoff
+#define __ARCH_WANT_RENAMEAT
 #define __ARCH_WANT_SYS_EXECVE
 #define __ARCH_WANT_SYS_CLONE
 #define __ARCH_WANT_SYS_VFORK
diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c
index 0bf5a87..3ea96841 100644
--- a/arch/hexagon/kernel/vdso.c
+++ b/arch/hexagon/kernel/vdso.c
@@ -65,7 +65,8 @@
 	unsigned long vdso_base;
 	struct mm_struct *mm = current->mm;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
 
 	/* Try to get it loaded right near ld.so/glibc. */
 	vdso_base = STACK_TOP;
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 970d0bd..c100d78 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -95,8 +95,8 @@
   echo '* unwcheck	- Check vmlinux for invalid unwind info'
 endef
 
-archprepare: make_nr_irqs_h FORCE
-PHONY += make_nr_irqs_h FORCE
+archprepare: make_nr_irqs_h
+PHONY += make_nr_irqs_h
 
-make_nr_irqs_h: FORCE
+make_nr_irqs_h:
 	$(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
diff --git a/arch/m32r/boot/compressed/Makefile b/arch/m32r/boot/compressed/Makefile
index 01729c2..0606a72 100644
--- a/arch/m32r/boot/compressed/Makefile
+++ b/arch/m32r/boot/compressed/Makefile
@@ -19,7 +19,6 @@
 
 $(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) $(obj)/piggy.o FORCE
 	$(call if_changed,ld)
-	@:
 
 $(obj)/vmlinux.bin: vmlinux FORCE
 	$(call if_changed,objcopy)
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 62d6961..564052e 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -164,6 +164,7 @@
 	spin_unlock(&flushcache_lock);
 	preempt_enable();
 }
+EXPORT_SYMBOL(smp_flush_cache_all);
 
 void smp_flush_cache_all_interrupt(void)
 {
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 8ace920..967260f 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -41,6 +41,7 @@
 	select CPU_HAS_NO_UNALIGNED
 	select GENERIC_CSUM
 	select CPU_NO_EFFICIENT_FFS
+	select HAVE_ARCH_HASH
 	help
 	  The Freescale (was Motorola) 68000 CPU is the first generation of
 	  the well known M68K family of processors. The CPU core as well as
diff --git a/arch/m68k/include/asm/hash.h b/arch/m68k/include/asm/hash.h
new file mode 100644
index 0000000..6407af8
--- /dev/null
+++ b/arch/m68k/include/asm/hash.h
@@ -0,0 +1,59 @@
+#ifndef _ASM_HASH_H
+#define _ASM_HASH_H
+
+/*
+ * If CONFIG_M68000=y (original mc68000/010), this file is #included
+ * to work around the lack of a MULU.L instruction.
+ */
+
+#define HAVE_ARCH__HASH_32 1
+/*
+ * While it would be legal to substitute a different hash operation
+ * entirely, let's keep it simple and just use an optimized multiply
+ * by GOLDEN_RATIO_32 = 0x61C88647.
+ *
+ * The best way to do that appears to be to multiply by 0x8647 with
+ * shifts and adds, and use mulu.w to multiply the high half by 0x61C8.
+ *
+ * Because the 68000 has multi-cycle shifts, this addition chain is
+ * chosen to minimise the shift distances.
+ *
+ * Despite every attempt to spoon-feed it simple operations, GCC
+ * 6.1.1 doggedly insists on doing annoying things like converting
+ * "lsl.l #2,<reg>" (12 cycles) to two adds (8+8 cycles).
+ *
+ * It also likes to notice two shifts in a row, like "a = x << 2" and
+ * "a <<= 7", and convert that to "a = x << 9".  But shifts longer
+ * than 8 bits are extra-slow on m68k, so that's a lose.
+ *
+ * Since the 68000 is a very simple in-order processor with no
+ * instruction scheduling effects on execution time, we can safely
+ * take it out of GCC's hands and write one big asm() block.
+ *
+ * Without calling overhead, this operation is 30 bytes (14 instructions
+ * plus one immediate constant) and 166 cycles.
+ *
+ * (Because %2 is fetched twice, it can't be postincrement, and thus it
+ * can't be a fully general "g" or "m".  Register is preferred, but
+ * offsettable memory or immediate will work.)
+ */
+static inline u32 __attribute_const__ __hash_32(u32 x)
+{
+	u32 a, b;
+
+	asm(   "move.l %2,%0"	/* a = x * 0x0001 */
+	"\n	lsl.l #2,%0"	/* a = x * 0x0004 */
+	"\n	move.l %0,%1"
+	"\n	lsl.l #7,%0"	/* a = x * 0x0200 */
+	"\n	add.l %2,%0"	/* a = x * 0x0201 */
+	"\n	add.l %0,%1"	/* b = x * 0x0205 */
+	"\n	add.l %0,%0"	/* a = x * 0x0402 */
+	"\n	add.l %0,%1"	/* b = x * 0x0607 */
+	"\n	lsl.l #5,%0"	/* a = x * 0x8040 */
+	: "=&d,d" (a), "=&r,r" (b)
+	: "r,roi?" (x));	/* a+b = x*0x8647 */
+
+	return ((u16)(x*0x61c8) << 16) + a + b;
+}
+
+#endif	/* _ASM_HASH_H */
diff --git a/arch/metag/include/uapi/asm/unistd.h b/arch/metag/include/uapi/asm/unistd.h
index b80b8e8..459b6ec 100644
--- a/arch/metag/include/uapi/asm/unistd.h
+++ b/arch/metag/include/uapi/asm/unistd.h
@@ -7,6 +7,8 @@
  * (at your option) any later version.
  */
 
+#define __ARCH_WANT_RENAMEAT
+
 /* Use the standard ABI for syscalls. */
 #include <asm-generic/unistd.h>
 
diff --git a/arch/metag/kernel/perf_callchain.c b/arch/metag/kernel/perf_callchain.c
index 252abc1..3e8e048 100644
--- a/arch/metag/kernel/perf_callchain.c
+++ b/arch/metag/kernel/perf_callchain.c
@@ -29,7 +29,7 @@
 
 static struct metag_frame __user *
 user_backtrace(struct metag_frame __user *user_frame,
-	       struct perf_callchain_entry *entry)
+	       struct perf_callchain_entry_ctx *entry)
 {
 	struct metag_frame frame;
 	unsigned long calladdr;
@@ -56,7 +56,7 @@
 }
 
 void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
 	unsigned long sp = regs->ctx.AX[0].U0;
 	struct metag_frame __user *frame;
@@ -65,7 +65,7 @@
 
 	--frame;
 
-	while ((entry->nr < sysctl_perf_event_max_stack) && frame)
+	while ((entry->nr < entry->max_stack) && frame)
 		frame = user_backtrace(frame, entry);
 }
 
@@ -78,13 +78,13 @@
 callchain_trace(struct stackframe *fr,
 		void *data)
 {
-	struct perf_callchain_entry *entry = data;
+	struct perf_callchain_entry_ctx *entry = data;
 	perf_callchain_store(entry, fr->pc);
 	return 0;
 }
 
 void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
 	struct stackframe fr;
 
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index f17c3a4..636e072 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -16,6 +16,7 @@
 	select GENERIC_IRQ_SHOW
 	select GENERIC_PCI_IOMAP
 	select GENERIC_SCHED_CLOCK
+	select HAVE_ARCH_HASH
 	select HAVE_ARCH_KGDB
 	select HAVE_DEBUG_KMEMLEAK
 	select HAVE_DMA_API_DEBUG
diff --git a/arch/microblaze/include/asm/hash.h b/arch/microblaze/include/asm/hash.h
new file mode 100644
index 0000000..753513a
--- /dev/null
+++ b/arch/microblaze/include/asm/hash.h
@@ -0,0 +1,81 @@
+#ifndef _ASM_HASH_H
+#define _ASM_HASH_H
+
+/*
+ * Fortunately, most people who want to run Linux on Microblaze enable
+ * both multiplier and barrel shifter, but omitting them is technically
+ * a supported configuration.
+ *
+ * With just a barrel shifter, we can implement an efficient constant
+ * multiply using shifts and adds.  GCC can find a 9-step solution, but
+ * this 6-step solution was found by Yevgen Voronenko's implementation
+ * of the Hcub algorithm at http://spiral.ece.cmu.edu/mcm/gen.html.
+ *
+ * That software is really not designed for a single multiplier this large,
+ * but if you run it enough times with different seeds, it'll find several
+ * 6-shift, 6-add sequences for computing x * 0x61C88647.  They are all
+ *	c = (x << 19) + x;
+ *	a = (x <<  9) + c;
+ *	b = (x << 23) + a;
+ *	return (a<<11) + (b<<6) + (c<<3) - b;
+ * with variations on the order of the final add.
+ *
+ * Without even a shifter, it's hopless; any hash function will suck.
+ */
+
+#if CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL == 0
+
+#define HAVE_ARCH__HASH_32 1
+
+/* Multiply by GOLDEN_RATIO_32 = 0x61C88647 */
+static inline u32 __attribute_const__ __hash_32(u32 a)
+{
+#if CONFIG_XILINX_MICROBLAZE0_USE_BARREL
+	unsigned int b, c;
+
+	/* Phase 1: Compute three intermediate values */
+	b =  a << 23;
+	c = (a << 19) + a;
+	a = (a <<  9) + c;
+	b += a;
+
+	/* Phase 2: Compute (a << 11) + (b << 6) + (c << 3) - b */
+	a <<= 5;
+	a += b;		/* (a << 5) + b */
+	a <<= 3;
+	a += c;		/* (a << 8) + (b << 3) + c */
+	a <<= 3;
+	return a - b;	/* (a << 11) + (b << 6) + (c << 3) - b */
+#else
+	/*
+	 * "This is really going to hurt."
+	 *
+	 * Without a barrel shifter, left shifts are implemented as
+	 * repeated additions, and the best we can do is an optimal
+	 * addition-subtraction chain.  This one is not known to be
+	 * optimal, but at 37 steps, it's decent for a 31-bit multiplier.
+	 *
+	 * Question: given its size (37*4 = 148 bytes per instance),
+	 * and slowness, is this worth having inline?
+	 */
+	unsigned int b, c, d;
+
+	b = a << 4;	/* 4    */
+	c = b << 1;	/* 1  5 */
+	b += a;		/* 1  6 */
+	c += b;		/* 1  7 */
+	c <<= 3;	/* 3 10 */
+	c -= a;		/* 1 11 */
+	d = c << 7;	/* 7 18 */
+	d += b;		/* 1 19 */
+	d <<= 8;	/* 8 27 */
+	d += a;		/* 1 28 */
+	d <<= 1;	/* 1 29 */
+	d += b;		/* 1 30 */
+	d <<= 6;	/* 6 36 */
+	return d + c;	/* 1 37 total instructions*/
+#endif
+}
+
+#endif /* !CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL */
+#endif /* _ASM_HASH_H */
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 76ed17b..805ae5d 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -38,6 +38,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define __NR_syscalls         389
+#define __NR_syscalls         392
 
 #endif /* _ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h
index 32850c7..a8bd3fa 100644
--- a/arch/microblaze/include/uapi/asm/unistd.h
+++ b/arch/microblaze/include/uapi/asm/unistd.h
@@ -404,5 +404,8 @@
 #define __NR_memfd_create	386
 #define __NR_bpf		387
 #define __NR_execveat		388
+#define __NR_userfaultfd	389
+#define __NR_membarrier		390
+#define __NR_mlock2		391
 
 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 29c8568..6b3dd99 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -389,3 +389,6 @@
 	.long sys_memfd_create
 	.long sys_bpf
 	.long sys_execveat
+	.long sys_userfaultfd
+	.long sys_membarrier		/* 390 */
+	.long sys_mlock2
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 35654be..14cba60 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -48,6 +48,8 @@
 resource_size_t isa_mem_base;
 
 unsigned long isa_io_base;
+EXPORT_SYMBOL(isa_io_base);
+
 static int pci_bus_count;
 
 struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 8040fb1..ac91939 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -398,6 +398,7 @@
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_MIPS_CPS
 	select SYS_SUPPORTS_MULTITHREADING
+	select SYS_SUPPORTS_RELOCATABLE
 	select SYS_SUPPORTS_ZBOOT
 	select SYS_HAS_EARLY_PRINTK
 	select USE_GENERIC_EARLY_PRINTK_8250
@@ -3117,6 +3118,7 @@
 config BINFMT_ELF32
 	bool
 	default y if MIPS32_O32 || MIPS32_N32
+	select ELFCORE
 
 endmenu
 
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
index 4a9c8f2..f6ae6ed 100644
--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
@@ -5,7 +5,7 @@
 	#size-cells = <1>;
 	compatible = "ingenic,jz4740";
 
-	cpuintc: interrupt-controller@0 {
+	cpuintc: interrupt-controller {
 		#address-cells = <0>;
 		#interrupt-cells = <1>;
 		interrupt-controller;
diff --git a/arch/mips/boot/dts/ralink/mt7620a.dtsi b/arch/mips/boot/dts/ralink/mt7620a.dtsi
index 08bf24f..793c0c7 100644
--- a/arch/mips/boot/dts/ralink/mt7620a.dtsi
+++ b/arch/mips/boot/dts/ralink/mt7620a.dtsi
@@ -9,7 +9,7 @@
 		};
 	};
 
-	cpuintc: cpuintc@0 {
+	cpuintc: cpuintc {
 		#address-cells = <0>;
 		#interrupt-cells = <1>;
 		interrupt-controller;
diff --git a/arch/mips/boot/dts/ralink/rt2880.dtsi b/arch/mips/boot/dts/ralink/rt2880.dtsi
index 182afde..fb2faef 100644
--- a/arch/mips/boot/dts/ralink/rt2880.dtsi
+++ b/arch/mips/boot/dts/ralink/rt2880.dtsi
@@ -9,7 +9,7 @@
 		};
 	};
 
-	cpuintc: cpuintc@0 {
+	cpuintc: cpuintc {
 		#address-cells = <0>;
 		#interrupt-cells = <1>;
 		interrupt-controller;
diff --git a/arch/mips/boot/dts/ralink/rt3050.dtsi b/arch/mips/boot/dts/ralink/rt3050.dtsi
index e3203d4..d3cb57f 100644
--- a/arch/mips/boot/dts/ralink/rt3050.dtsi
+++ b/arch/mips/boot/dts/ralink/rt3050.dtsi
@@ -9,7 +9,7 @@
 		};
 	};
 
-	cpuintc: cpuintc@0 {
+	cpuintc: cpuintc {
 		#address-cells = <0>;
 		#interrupt-cells = <1>;
 		interrupt-controller;
diff --git a/arch/mips/boot/dts/ralink/rt3883.dtsi b/arch/mips/boot/dts/ralink/rt3883.dtsi
index 3b131dd..3d6fc9a 100644
--- a/arch/mips/boot/dts/ralink/rt3883.dtsi
+++ b/arch/mips/boot/dts/ralink/rt3883.dtsi
@@ -9,7 +9,7 @@
 		};
 	};
 
-	cpuintc: cpuintc@0 {
+	cpuintc: cpuintc {
 		#address-cells = <0>;
 		#interrupt-cells = <1>;
 		interrupt-controller;
diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
index 686ebd1..48d2112 100644
--- a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
+++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
@@ -10,7 +10,7 @@
 		reg = <0x0 0x08000000>;
 	};
 
-	cpuintc: interrupt-controller@0 {
+	cpuintc: interrupt-controller {
 		#address-cells = <0>;
 		#interrupt-cells = <1>;
 		interrupt-controller;
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index dff88aa..33aab89 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -384,7 +384,7 @@
 {
 	unsigned int cpu = (unsigned long)hcpu;
 
-	switch (action) {
+	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_UP_PREPARE:
 		octeon_update_boot_vector(cpu);
 		break;
diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig
index 3bdb72a..f0c8971 100644
--- a/arch/mips/configs/db1xxx_defconfig
+++ b/arch/mips/configs/db1xxx_defconfig
@@ -18,7 +18,6 @@
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
 CONFIG_MEMCG_KMEM=y
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index f8bf915c..7f95c4b 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -25,7 +25,6 @@
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_CPUSETS=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 6741673..56584a6 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -19,6 +19,28 @@
 #include <asm/asmmacro-64.h>
 #endif
 
+/*
+ * Helper macros for generating raw instruction encodings.
+ */
+#ifdef CONFIG_CPU_MICROMIPS
+	.macro	insn32_if_mm enc
+	.insn
+	.hword ((\enc) >> 16)
+	.hword ((\enc) & 0xffff)
+	.endm
+
+	.macro	insn_if_mips enc
+	.endm
+#else
+	.macro	insn32_if_mm enc
+	.endm
+
+	.macro	insn_if_mips enc
+	.insn
+	.word (\enc)
+	.endm
+#endif
+
 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 	.macro	local_irq_enable reg=t0
 	ei
@@ -341,38 +363,6 @@
 	.endm
 #else
 
-#ifdef CONFIG_CPU_MICROMIPS
-#define CFC_MSA_INSN		0x587e0056
-#define CTC_MSA_INSN		0x583e0816
-#define LDB_MSA_INSN		0x58000807
-#define LDH_MSA_INSN		0x58000817
-#define LDW_MSA_INSN		0x58000827
-#define LDD_MSA_INSN		0x58000837
-#define STB_MSA_INSN		0x5800080f
-#define STH_MSA_INSN		0x5800081f
-#define STW_MSA_INSN		0x5800082f
-#define STD_MSA_INSN		0x5800083f
-#define COPY_SW_MSA_INSN	0x58b00056
-#define COPY_SD_MSA_INSN	0x58b80056
-#define INSERT_W_MSA_INSN	0x59300816
-#define INSERT_D_MSA_INSN	0x59380816
-#else
-#define CFC_MSA_INSN		0x787e0059
-#define CTC_MSA_INSN		0x783e0819
-#define LDB_MSA_INSN		0x78000820
-#define LDH_MSA_INSN		0x78000821
-#define LDW_MSA_INSN		0x78000822
-#define LDD_MSA_INSN		0x78000823
-#define STB_MSA_INSN		0x78000824
-#define STH_MSA_INSN		0x78000825
-#define STW_MSA_INSN		0x78000826
-#define STD_MSA_INSN		0x78000827
-#define COPY_SW_MSA_INSN	0x78b00059
-#define COPY_SD_MSA_INSN	0x78b80059
-#define INSERT_W_MSA_INSN	0x79300819
-#define INSERT_D_MSA_INSN	0x79380819
-#endif
-
 	/*
 	 * Temporary until all toolchains in use include MSA support.
 	 */
@@ -380,8 +370,8 @@
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
-	.insn
-	.word	CFC_MSA_INSN | (\cs << 11)
+	insn_if_mips 0x787e0059 | (\cs << 11)
+	insn32_if_mm 0x587e0056 | (\cs << 11)
 	move	\rd, $1
 	.set	pop
 	.endm
@@ -391,7 +381,8 @@
 	.set	noat
 	SET_HARDFLOAT
 	move	$1, \rs
-	.word	CTC_MSA_INSN | (\cd << 6)
+	insn_if_mips 0x783e0819 | (\cd << 6)
+	insn32_if_mm 0x583e0816 | (\cd << 6)
 	.set	pop
 	.endm
 
@@ -400,7 +391,8 @@
 	.set	noat
 	SET_HARDFLOAT
 	PTR_ADDU $1, \base, \off
-	.word	LDB_MSA_INSN | (\wd << 6)
+	insn_if_mips 0x78000820 | (\wd << 6)
+	insn32_if_mm 0x58000807 | (\wd << 6)
 	.set	pop
 	.endm
 
@@ -409,7 +401,8 @@
 	.set	noat
 	SET_HARDFLOAT
 	PTR_ADDU $1, \base, \off
-	.word	LDH_MSA_INSN | (\wd << 6)
+	insn_if_mips 0x78000821 | (\wd << 6)
+	insn32_if_mm 0x58000817 | (\wd << 6)
 	.set	pop
 	.endm
 
@@ -418,7 +411,8 @@
 	.set	noat
 	SET_HARDFLOAT
 	PTR_ADDU $1, \base, \off
-	.word	LDW_MSA_INSN | (\wd << 6)
+	insn_if_mips 0x78000822 | (\wd << 6)
+	insn32_if_mm 0x58000827 | (\wd << 6)
 	.set	pop
 	.endm
 
@@ -427,7 +421,8 @@
 	.set	noat
 	SET_HARDFLOAT
 	PTR_ADDU $1, \base, \off
-	.word	LDD_MSA_INSN | (\wd << 6)
+	insn_if_mips 0x78000823 | (\wd << 6)
+	insn32_if_mm 0x58000837 | (\wd << 6)
 	.set	pop
 	.endm
 
@@ -436,7 +431,8 @@
 	.set	noat
 	SET_HARDFLOAT
 	PTR_ADDU $1, \base, \off
-	.word	STB_MSA_INSN | (\wd << 6)
+	insn_if_mips 0x78000824 | (\wd << 6)
+	insn32_if_mm 0x5800080f | (\wd << 6)
 	.set	pop
 	.endm
 
@@ -445,7 +441,8 @@
 	.set	noat
 	SET_HARDFLOAT
 	PTR_ADDU $1, \base, \off
-	.word	STH_MSA_INSN | (\wd << 6)
+	insn_if_mips 0x78000825 | (\wd << 6)
+	insn32_if_mm 0x5800081f | (\wd << 6)
 	.set	pop
 	.endm
 
@@ -454,7 +451,8 @@
 	.set	noat
 	SET_HARDFLOAT
 	PTR_ADDU $1, \base, \off
-	.word	STW_MSA_INSN | (\wd << 6)
+	insn_if_mips 0x78000826 | (\wd << 6)
+	insn32_if_mm 0x5800082f | (\wd << 6)
 	.set	pop
 	.endm
 
@@ -463,7 +461,8 @@
 	.set	noat
 	SET_HARDFLOAT
 	PTR_ADDU $1, \base, \off
-	.word	STD_MSA_INSN | (\wd << 6)
+	insn_if_mips 0x78000827 | (\wd << 6)
+	insn32_if_mm 0x5800083f | (\wd << 6)
 	.set	pop
 	.endm
 
@@ -471,8 +470,8 @@
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
-	.insn
-	.word	COPY_SW_MSA_INSN | (\n << 16) | (\ws << 11)
+	insn_if_mips 0x78b00059 | (\n << 16) | (\ws << 11)
+	insn32_if_mm 0x58b00056 | (\n << 16) | (\ws << 11)
 	.set	pop
 	.endm
 
@@ -480,8 +479,8 @@
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
-	.insn
-	.word	COPY_SD_MSA_INSN | (\n << 16) | (\ws << 11)
+	insn_if_mips 0x78b80059 | (\n << 16) | (\ws << 11)
+	insn32_if_mm 0x58b80056 | (\n << 16) | (\ws << 11)
 	.set	pop
 	.endm
 
@@ -489,7 +488,8 @@
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
-	.word	INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
+	insn_if_mips 0x79300819 | (\n << 16) | (\wd << 6)
+	insn32_if_mm 0x59300816 | (\n << 16) | (\wd << 6)
 	.set	pop
 	.endm
 
@@ -497,7 +497,8 @@
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
-	.word	INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
+	insn_if_mips 0x79380819 | (\n << 16) | (\wd << 6)
+	insn32_if_mm 0x59380816 | (\n << 16) | (\wd << 6)
 	.set	pop
 	.endm
 #endif
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index dbb1eb6..e0fecf2 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -58,8 +58,8 @@
  * address of a label as argument to inline assembler.	Gas otoh has the
  * annoying difference between la and dla which are only usable for 32-bit
  * rsp. 64-bit code, so can't be used without conditional compilation.
- * The alterantive is switching the assembler to 64-bit code which happens
- * to work right even for 32-bit code ...
+ * The alternative is switching the assembler to 64-bit code which happens
+ * to work right even for 32-bit code...
  */
 #define instruction_hazard()						\
 do {									\
@@ -133,8 +133,8 @@
  * address of a label as argument to inline assembler.	Gas otoh has the
  * annoying difference between la and dla which are only usable for 32-bit
  * rsp. 64-bit code, so can't be used without conditional compilation.
- * The alterantive is switching the assembler to 64-bit code which happens
- * to work right even for 32-bit code ...
+ * The alternative is switching the assembler to 64-bit code which happens
+ * to work right even for 32-bit code...
  */
 #define __instruction_hazard()						\
 do {									\
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
index ca8077a..456ddba 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
@@ -100,7 +100,7 @@
 	u32	dscr_nxtptr;		/* Next descriptor pointer (mostly) */
 	/*
 	 * First 32 bytes are HW specific!!!
-	 * Lets have some SW data following -- make sure it's 32 bytes.
+	 * Let's have some SW data following -- make sure it's 32 bytes.
 	 */
 	u32	sw_status;
 	u32	sw_context;
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
index ce02894..d607d64 100644
--- a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
@@ -140,7 +140,7 @@
 * Cases 1 and 3 are intended for boards which want to provide their own
 * GPIO namespace and -operations (i.e. for example you have 8 GPIOs
 * which are in part provided by spare Au1300 GPIO pins and in part by
-* an external FPGA but you still want them to be accssible in linux
+* an external FPGA but you still want them to be accessible in linux
 * as gpio0-7. The board can of course use the alchemy_gpioX_* functions
 * as required).
 */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
index 466fc85..c4e856f 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
@@ -22,7 +22,7 @@
 	int has_phy_interrupt;
 	int phy_interrupt;
 
-	/* if has_phy, use autonegociated pause parameters or force
+	/* if has_phy, use autonegotiated pause parameters or force
 	 * them */
 	int pause_auto;
 	int pause_rx;
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
index 1daa644..04d8620 100644
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h
@@ -64,7 +64,7 @@
 
 static inline int plat_device_is_coherent(struct device *dev)
 {
-	return 1;		/* IP27 non-cohernet mode is unsupported */
+	return 1;		/* IP27 non-coherent mode is unsupported */
 }
 
 #endif /* __ASM_MACH_IP27_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
index 0a0b0e2..7bdf212 100644
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -86,7 +86,7 @@
 
 static inline int plat_device_is_coherent(struct device *dev)
 {
-	return 0;		/* IP32 is non-cohernet */
+	return 0;		/* IP32 is non-coherent */
 }
 
 #endif /* __ASM_MACH_IP32_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
index 398733e..7f7b0fc 100644
--- a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
+++ b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
@@ -27,7 +27,7 @@
 
 	unsigned char banks[JZ_NAND_NUM_BANKS];
 
-	void (*ident_callback)(struct platform_device *, struct nand_chip *,
+	void (*ident_callback)(struct platform_device *, struct mtd_info *,
 				struct mtd_partition **, int *num_partitions);
 };
 
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
index 7023883..8e9b022 100644
--- a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
@@ -22,7 +22,7 @@
 
 /*
  * during early_printk no ioremap possible at this early stage
- * lets use KSEG1 instead
+ * let's use KSEG1 instead
  */
 #define LTQ_ASC0_BASE_ADDR	0x1E100C00
 #define LTQ_EARLY_ASC		KSEG1ADDR(LTQ_ASC0_BASE_ADDR)
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
index f873107..17b41bb 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
@@ -75,7 +75,7 @@
 
 /*
  * during early_printk no ioremap is possible
- * lets use KSEG1 instead
+ * let's use KSEG1 instead
  */
 #define LTQ_ASC1_BASE_ADDR	0x1E100C00
 #define LTQ_EARLY_ASC		KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
diff --git a/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h b/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
index 4431fc5..74230d0 100644
--- a/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
+++ b/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
@@ -24,7 +24,7 @@
 	u8 level;
 };
 
-#define CONSTANT_SPEED_POLICY	0  /* at constent speed */
+#define CONSTANT_SPEED_POLICY	0  /* at constant speed */
 #define STEP_SPEED_POLICY	1  /* use up/down arrays to describe policy */
 #define KERNEL_HELPER_POLICY	2  /* kernel as a helper to fan control */
 
diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
index 0cf8622..ab03eb3 100644
--- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
@@ -56,7 +56,7 @@
 		(0 << MIPS_SEGCFG_PA_SHIFT) |				\
 		(1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
 	or	t0, t2
-	mtc0	t0, $5, 2
+	mtc0	t0, CP0_SEGCTL0
 
 	/* SegCtl1 */
 	li      t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |	\
@@ -67,7 +67,7 @@
 		(0 << MIPS_SEGCFG_PA_SHIFT) |				\
 		(1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
 	ins	t0, t1, 16, 3
-	mtc0	t0, $5, 3
+	mtc0	t0, CP0_SEGCTL1
 
 	/* SegCtl2 */
 	li	t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |	\
@@ -77,7 +77,7 @@
 		(4 << MIPS_SEGCFG_PA_SHIFT) |				\
 		(1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
 	or	t0, t2
-	mtc0	t0, $5, 4
+	mtc0	t0, CP0_SEGCTL2
 
 	jal	mips_ihb
 	mfc0    t0, $16, 5
diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h
index f6ba004..aa4cca0 100644
--- a/arch/mips/include/asm/mips_mt.h
+++ b/arch/mips/include/asm/mips_mt.h
@@ -1,5 +1,5 @@
 /*
- * Definitions and decalrations for MIPS MT support that are common between
+ * Definitions and declarations for MIPS MT support that are common between
  * the VSMP, and AP/SP kernel models.
  */
 #ifndef __ASM_MIPS_MT_H
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 25d0157..e1ca65c 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -48,6 +48,9 @@
 #define CP0_CONF $3
 #define CP0_CONTEXT $4
 #define CP0_PAGEMASK $5
+#define CP0_SEGCTL0 $5, 2
+#define CP0_SEGCTL1 $5, 3
+#define CP0_SEGCTL2 $5, 4
 #define CP0_WIRED $6
 #define CP0_INFO $7
 #define CP0_HWRENA $7, 0
@@ -726,6 +729,8 @@
 #define MIPS_PWFIELD_PTEI_SHIFT	0
 #define MIPS_PWFIELD_PTEI_MASK	0x0000003f
 
+#define MIPS_PWSIZE_PS_SHIFT	30
+#define MIPS_PWSIZE_PS_MASK	0x40000000
 #define MIPS_PWSIZE_GDW_SHIFT	24
 #define MIPS_PWSIZE_GDW_MASK	0x3f000000
 #define MIPS_PWSIZE_UDW_SHIFT	18
@@ -739,6 +744,12 @@
 
 #define MIPS_PWCTL_PWEN_SHIFT	31
 #define MIPS_PWCTL_PWEN_MASK	0x80000000
+#define MIPS_PWCTL_XK_SHIFT	28
+#define MIPS_PWCTL_XK_MASK	0x10000000
+#define MIPS_PWCTL_XS_SHIFT	27
+#define MIPS_PWCTL_XS_MASK	0x08000000
+#define MIPS_PWCTL_XU_SHIFT	26
+#define MIPS_PWCTL_XU_MASK	0x04000000
 #define MIPS_PWCTL_DPH_SHIFT	7
 #define MIPS_PWCTL_DPH_MASK	0x00000080
 #define MIPS_PWCTL_HUGEPG_SHIFT	6
@@ -1046,6 +1057,33 @@
 }
 
 /*
+ * Helper macros for generating raw instruction encodings in inline asm.
+ */
+#ifdef CONFIG_CPU_MICROMIPS
+#define _ASM_INSN16_IF_MM(_enc)			\
+	".insn\n\t"				\
+	".hword (" #_enc ")\n\t"
+#define _ASM_INSN32_IF_MM(_enc)			\
+	".insn\n\t"				\
+	".hword ((" #_enc ") >> 16)\n\t"	\
+	".hword ((" #_enc ") & 0xffff)\n\t"
+#else
+#define _ASM_INSN_IF_MIPS(_enc)			\
+	".insn\n\t"				\
+	".word (" #_enc ")\n\t"
+#endif
+
+#ifndef _ASM_INSN16_IF_MM
+#define _ASM_INSN16_IF_MM(_enc)
+#endif
+#ifndef _ASM_INSN32_IF_MM
+#define _ASM_INSN32_IF_MM(_enc)
+#endif
+#ifndef _ASM_INSN_IF_MIPS
+#define _ASM_INSN_IF_MIPS(_enc)
+#endif
+
+/*
  * TLB Invalidate Flush
  */
 static inline void tlbinvf(void)
@@ -1053,7 +1091,9 @@
 	__asm__ __volatile__(
 		".set push\n\t"
 		".set noreorder\n\t"
-		".word 0x42000004\n\t" /* tlbinvf */
+		"# tlbinvf\n\t"
+		_ASM_INSN_IF_MIPS(0x42000004)
+		_ASM_INSN32_IF_MM(0x0000537c)
 		".set pop");
 }
 
@@ -1274,9 +1314,9 @@
 	"	.set	push					\n"	\
 	"	.set	noat					\n"	\
 	"	.set	mips32r2				\n"	\
-	"	.insn						\n"	\
 	"	# mfhc0 $1, %1					\n"	\
-	"	.word	(0x40410000 | ((%1 & 0x1f) << 11))	\n"	\
+	_ASM_INSN_IF_MIPS(0x40410000 | ((%1 & 0x1f) << 11))		\
+	_ASM_INSN32_IF_MM(0x002000f4 | ((%1 & 0x1f) << 16))		\
 	"	move	%0, $1					\n"	\
 	"	.set	pop					\n"	\
 	: "=r" (__res)							\
@@ -1292,8 +1332,8 @@
 	"	.set	mips32r2				\n"	\
 	"	move	$1, %0					\n"	\
 	"	# mthc0 $1, %1					\n"	\
-	"	.insn						\n"	\
-	"	.word	(0x40c10000 | ((%1 & 0x1f) << 11))	\n"	\
+	_ASM_INSN_IF_MIPS(0x40c10000 | ((%1 & 0x1f) << 11))		\
+	_ASM_INSN32_IF_MM(0x002002f4 | ((%1 & 0x1f) << 16))		\
 	"	.set	pop					\n"	\
 	:								\
 	: "r" (value), "i" (register));					\
@@ -1743,7 +1783,8 @@
 		".set\tpush\n\t"					\
 		".set\tnoat\n\t"					\
 		"# mfgc0\t$1, $%1, %2\n\t"				\
-		".word\t(0x40610000 | %1 << 11 | %2)\n\t"		\
+		_ASM_INSN_IF_MIPS(0x40610000 | %1 << 11 | %2)		\
+		_ASM_INSN32_IF_MM(0x002004fc | %1 << 16 | %2 << 11)	\
 		"move\t%0, $1\n\t"					\
 		".set\tpop"						\
 		: "=r" (__res)						\
@@ -1757,7 +1798,8 @@
 		".set\tpush\n\t"					\
 		".set\tnoat\n\t"					\
 		"# dmfgc0\t$1, $%1, %2\n\t"				\
-		".word\t(0x40610100 | %1 << 11 | %2)\n\t"		\
+		_ASM_INSN_IF_MIPS(0x40610100 | %1 << 11 | %2)		\
+		_ASM_INSN32_IF_MM(0x582004fc | %1 << 16 | %2 << 11)	\
 		"move\t%0, $1\n\t"					\
 		".set\tpop"						\
 		: "=r" (__res)						\
@@ -1770,9 +1812,10 @@
 	__asm__ __volatile__(						\
 		".set\tpush\n\t"					\
 		".set\tnoat\n\t"					\
-		"move\t$1, %0\n\t"					\
+		"move\t$1, %z0\n\t"					\
 		"# mtgc0\t$1, $%1, %2\n\t"				\
-		".word\t(0x40610200 | %1 << 11 | %2)\n\t"		\
+		_ASM_INSN_IF_MIPS(0x40610200 | %1 << 11 | %2)		\
+		_ASM_INSN32_IF_MM(0x002006fc | %1 << 16 | %2 << 11)	\
 		".set\tpop"						\
 		: : "Jr" ((unsigned int)(value)),			\
 		    "i" (register), "i" (sel));				\
@@ -1783,9 +1826,10 @@
 	__asm__ __volatile__(						\
 		".set\tpush\n\t"					\
 		".set\tnoat\n\t"					\
-		"move\t$1, %0\n\t"					\
+		"move\t$1, %z0\n\t"					\
 		"# dmtgc0\t$1, $%1, %2\n\t"				\
-		".word\t(0x40610300 | %1 << 11 | %2)\n\t"		\
+		_ASM_INSN_IF_MIPS(0x40610300 | %1 << 11 | %2)		\
+		_ASM_INSN32_IF_MM(0x582006fc | %1 << 16 | %2 << 11)	\
 		".set\tpop"						\
 		: : "Jr" (value),					\
 		    "i" (register), "i" (sel));				\
@@ -2246,7 +2290,6 @@
 
 #else
 
-#ifdef CONFIG_CPU_MICROMIPS
 #define rddsp(mask)							\
 ({									\
 	unsigned int __res;						\
@@ -2255,8 +2298,8 @@
 	"	.set	push					\n"	\
 	"	.set	noat					\n"	\
 	"	# rddsp $1, %x1					\n"	\
-	"	.hword	((0x0020067c | (%x1 << 14)) >> 16)	\n"	\
-	"	.hword	((0x0020067c | (%x1 << 14)) & 0xffff)	\n"	\
+	_ASM_INSN_IF_MIPS(0x7c000cb8 | (%x1 << 16))			\
+	_ASM_INSN32_IF_MM(0x0020067c | (%x1 << 14))			\
 	"	move	%0, $1					\n"	\
 	"	.set	pop					\n"	\
 	: "=r" (__res)							\
@@ -2271,98 +2314,13 @@
 	"	.set	noat					\n"	\
 	"	move	$1, %0					\n"	\
 	"	# wrdsp $1, %x1					\n"	\
-	"	.hword	((0x0020167c | (%x1 << 14)) >> 16)	\n"	\
-	"	.hword	((0x0020167c | (%x1 << 14)) & 0xffff)	\n"	\
+	_ASM_INSN_IF_MIPS(0x7c2004f8 | (%x1 << 11))			\
+	_ASM_INSN32_IF_MM(0x0020167c | (%x1 << 14))			\
 	"	.set	pop					\n"	\
 	:								\
 	: "r" (val), "i" (mask));					\
 } while (0)
 
-#define _umips_dsp_mfxxx(ins)						\
-({									\
-	unsigned long __treg;						\
-									\
-	__asm__ __volatile__(						\
-	"	.set	push					\n"	\
-	"	.set	noat					\n"	\
-	"	.hword	0x0001					\n"	\
-	"	.hword	%x1					\n"	\
-	"	move	%0, $1					\n"	\
-	"	.set	pop					\n"	\
-	: "=r" (__treg)							\
-	: "i" (ins));							\
-	__treg;								\
-})
-
-#define _umips_dsp_mtxxx(val, ins)					\
-do {									\
-	__asm__ __volatile__(						\
-	"	.set	push					\n"	\
-	"	.set	noat					\n"	\
-	"	move	$1, %0					\n"	\
-	"	.hword	0x0001					\n"	\
-	"	.hword	%x1					\n"	\
-	"	.set	pop					\n"	\
-	:								\
-	: "r" (val), "i" (ins));					\
-} while (0)
-
-#define _umips_dsp_mflo(reg) _umips_dsp_mfxxx((reg << 14) | 0x107c)
-#define _umips_dsp_mfhi(reg) _umips_dsp_mfxxx((reg << 14) | 0x007c)
-
-#define _umips_dsp_mtlo(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x307c))
-#define _umips_dsp_mthi(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x207c))
-
-#define mflo0() _umips_dsp_mflo(0)
-#define mflo1() _umips_dsp_mflo(1)
-#define mflo2() _umips_dsp_mflo(2)
-#define mflo3() _umips_dsp_mflo(3)
-
-#define mfhi0() _umips_dsp_mfhi(0)
-#define mfhi1() _umips_dsp_mfhi(1)
-#define mfhi2() _umips_dsp_mfhi(2)
-#define mfhi3() _umips_dsp_mfhi(3)
-
-#define mtlo0(x) _umips_dsp_mtlo(x, 0)
-#define mtlo1(x) _umips_dsp_mtlo(x, 1)
-#define mtlo2(x) _umips_dsp_mtlo(x, 2)
-#define mtlo3(x) _umips_dsp_mtlo(x, 3)
-
-#define mthi0(x) _umips_dsp_mthi(x, 0)
-#define mthi1(x) _umips_dsp_mthi(x, 1)
-#define mthi2(x) _umips_dsp_mthi(x, 2)
-#define mthi3(x) _umips_dsp_mthi(x, 3)
-
-#else  /* !CONFIG_CPU_MICROMIPS */
-#define rddsp(mask)							\
-({									\
-	unsigned int __res;						\
-									\
-	__asm__ __volatile__(						\
-	"	.set	push				\n"		\
-	"	.set	noat				\n"		\
-	"	# rddsp $1, %x1				\n"		\
-	"	.word	0x7c000cb8 | (%x1 << 16)	\n"		\
-	"	move	%0, $1				\n"		\
-	"	.set	pop				\n"		\
-	: "=r" (__res)							\
-	: "i" (mask));							\
-	__res;								\
-})
-
-#define wrdsp(val, mask)						\
-do {									\
-	__asm__ __volatile__(						\
-	"	.set	push					\n"	\
-	"	.set	noat					\n"	\
-	"	move	$1, %0					\n"	\
-	"	# wrdsp $1, %x1					\n"	\
-	"	.word	0x7c2004f8 | (%x1 << 11)		\n"	\
-	"	.set	pop					\n"	\
-        :								\
-	: "r" (val), "i" (mask));					\
-} while (0)
-
 #define _dsp_mfxxx(ins)							\
 ({									\
 	unsigned long __treg;						\
@@ -2370,7 +2328,8 @@
 	__asm__ __volatile__(						\
 	"	.set	push					\n"	\
 	"	.set	noat					\n"	\
-	"	.word	(0x00000810 | %1)			\n"	\
+	_ASM_INSN_IF_MIPS(0x00000810 | %X1)				\
+	_ASM_INSN32_IF_MM(0x0001007c | %x1)				\
 	"	move	%0, $1					\n"	\
 	"	.set	pop					\n"	\
 	: "=r" (__treg)							\
@@ -2384,18 +2343,31 @@
 	"	.set	push					\n"	\
 	"	.set	noat					\n"	\
 	"	move	$1, %0					\n"	\
-	"	.word	(0x00200011 | %1)			\n"	\
+	_ASM_INSN_IF_MIPS(0x00200011 | %X1)				\
+	_ASM_INSN32_IF_MM(0x0001207c | %x1)				\
 	"	.set	pop					\n"	\
 	:								\
 	: "r" (val), "i" (ins));					\
 } while (0)
 
+#ifdef CONFIG_CPU_MICROMIPS
+
+#define _dsp_mflo(reg) _dsp_mfxxx((reg << 14) | 0x1000)
+#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 14) | 0x0000)
+
+#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 14) | 0x1000))
+#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 14) | 0x0000))
+
+#else  /* !CONFIG_CPU_MICROMIPS */
+
 #define _dsp_mflo(reg) _dsp_mfxxx((reg << 21) | 0x0002)
 #define _dsp_mfhi(reg) _dsp_mfxxx((reg << 21) | 0x0000)
 
 #define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0002))
 #define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0000))
 
+#endif /* CONFIG_CPU_MICROMIPS */
+
 #define mflo0() _dsp_mflo(0)
 #define mflo1() _dsp_mflo(1)
 #define mflo2() _dsp_mflo(2)
@@ -2416,7 +2388,6 @@
 #define mthi2(x) _dsp_mthi(x, 2)
 #define mthi3(x) _dsp_mthi(x, 3)
 
-#endif /* CONFIG_CPU_MICROMIPS */
 #endif
 
 /*
@@ -2556,28 +2527,32 @@
 {
 	__asm__ __volatile__(
 		"# tlbgp\n\t"
-		".word 0x42000010");
+		_ASM_INSN_IF_MIPS(0x42000010)
+		_ASM_INSN32_IF_MM(0x0000017c));
 }
 
 static inline void guest_tlb_read(void)
 {
 	__asm__ __volatile__(
 		"# tlbgr\n\t"
-		".word 0x42000009");
+		_ASM_INSN_IF_MIPS(0x42000009)
+		_ASM_INSN32_IF_MM(0x0000117c));
 }
 
 static inline void guest_tlb_write_indexed(void)
 {
 	__asm__ __volatile__(
 		"# tlbgwi\n\t"
-		".word 0x4200000a");
+		_ASM_INSN_IF_MIPS(0x4200000a)
+		_ASM_INSN32_IF_MM(0x0000217c));
 }
 
 static inline void guest_tlb_write_random(void)
 {
 	__asm__ __volatile__(
 		"# tlbgwr\n\t"
-		".word 0x4200000e");
+		_ASM_INSN_IF_MIPS(0x4200000e)
+		_ASM_INSN32_IF_MM(0x0000317c));
 }
 
 /*
@@ -2587,7 +2562,8 @@
 {
 	__asm__ __volatile__(
 		"# tlbginvf\n\t"
-		".word 0x4200000c");
+		_ASM_INSN_IF_MIPS(0x4200000c)
+		_ASM_INSN32_IF_MM(0x0000517c));
 }
 
 #endif	/* !TOOLCHAIN_SUPPORTS_VIRT */
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index 6e4effa..ddf496c 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -192,13 +192,6 @@
  * allow compilation with toolchains that do not support MSA. Once all
  * toolchains in use support MSA these can be removed.
  */
-#ifdef CONFIG_CPU_MICROMIPS
-#define CFC_MSA_INSN	0x587e0056
-#define CTC_MSA_INSN	0x583e0816
-#else
-#define CFC_MSA_INSN	0x787e0059
-#define CTC_MSA_INSN	0x783e0819
-#endif
 
 #define __BUILD_MSA_CTL_REG(name, cs)				\
 static inline unsigned int read_msa_##name(void)		\
@@ -207,11 +200,12 @@
 	__asm__ __volatile__(					\
 	"	.set	push\n"					\
 	"	.set	noat\n"					\
-	"	.insn\n"					\
-	"	.word	%1 | (" #cs " << 11)\n"			\
+	"	# cfcmsa $1, $%1\n"				\
+	_ASM_INSN_IF_MIPS(0x787e0059 | %1 << 11)		\
+	_ASM_INSN32_IF_MM(0x587e0056 | %1 << 11)		\
 	"	move	%0, $1\n"				\
 	"	.set	pop\n"					\
-	: "=r"(reg) : "i"(CFC_MSA_INSN));			\
+	: "=r"(reg) : "i"(cs));					\
 	return reg;						\
 }								\
 								\
@@ -221,10 +215,11 @@
 	"	.set	push\n"					\
 	"	.set	noat\n"					\
 	"	move	$1, %0\n"				\
-	"	.insn\n"					\
-	"	.word	%1 | (" #cs " << 6)\n"			\
+	"	# ctcmsa $%1, $1\n"				\
+	_ASM_INSN_IF_MIPS(0x783e0819 | %1 << 6)			\
+	_ASM_INSN32_IF_MM(0x583e0816 | %1 << 6)			\
 	"	.set	pop\n"					\
-	: : "r"(val), "i"(CTC_MSA_INSN));			\
+	: : "r"(val), "i"(cs));					\
 }
 
 #endif /* !TOOLCHAIN_SUPPORTS_MSA */
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
index 8d05d90..a07a36f 100644
--- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
+++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
@@ -146,7 +146,7 @@
  * This structure contains the global state of all command queues.
  * It is stored in a bootmem named block and shared by all
  * applications running on Octeon. Tickets are stored in a differnet
- * cahce line that queue information to reduce the contention on the
+ * cache line that queue information to reduce the contention on the
  * ll/sc used to get a ticket. If this is not the case, the update
  * of queue state causes the ll/sc to fail quite often.
  */
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h
index 8933203..cda93ae 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-board.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h
@@ -94,7 +94,7 @@
  * @phy_addr:  The address of the PHY to program
  * @link_flags:
  *		    Flags to control autonegotiation.  Bit 0 is autonegotiation
- *		    enable/disable to maintain backware compatibility.
+ *		    enable/disable to maintain backward compatibility.
  * @link_info: Link speed to program. If the speed is zero and autonegotiation
  *		    is enabled, all possible negotiation speeds are advertised.
  *
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd.h b/arch/mips/include/asm/octeon/cvmx-ipd.h
index e13490e..cbdc14b 100644
--- a/arch/mips/include/asm/octeon/cvmx-ipd.h
+++ b/arch/mips/include/asm/octeon/cvmx-ipd.h
@@ -39,7 +39,7 @@
 
 enum cvmx_ipd_mode {
    CVMX_IPD_OPC_MODE_STT = 0LL,	  /* All blocks DRAM, not cached in L2 */
-   CVMX_IPD_OPC_MODE_STF = 1LL,	  /* All bloccks into  L2 */
+   CVMX_IPD_OPC_MODE_STF = 1LL,	  /* All blocks into  L2 */
    CVMX_IPD_OPC_MODE_STF1_STT = 2LL,   /* 1st block L2, rest DRAM */
    CVMX_IPD_OPC_MODE_STF2_STT = 3LL    /* 1st, 2nd blocks L2, rest DRAM */
 };
diff --git a/arch/mips/include/asm/octeon/cvmx-pow.h b/arch/mips/include/asm/octeon/cvmx-pow.h
index 5153156..410bb70 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow.h
@@ -2051,7 +2051,7 @@
 }
 
 /**
- * Descchedules the current work queue entry.
+ * Deschedules the current work queue entry.
  *
  * @no_sched: no schedule flag value to be set on the work queue
  *	      entry.  If this is set the entry will not be
diff --git a/arch/mips/include/asm/sgi/hpc3.h b/arch/mips/include/asm/sgi/hpc3.h
index 4a9c990..c0e3dc0 100644
--- a/arch/mips/include/asm/sgi/hpc3.h
+++ b/arch/mips/include/asm/sgi/hpc3.h
@@ -39,7 +39,7 @@
 	volatile u32 pbdma_dptr;	/* pbus dma channel desc ptr */
 	u32 _unused0[0x1000/4 - 2];	/* padding */
 	volatile u32 pbdma_ctrl;	/* pbus dma channel control register has
-					 * copletely different meaning for read
+					 * completely different meaning for read
 					 * compared with write */
 	/* read */
 #define HPC3_PDMACTRL_INT	0x00000001 /* interrupt (cleared after read) */
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 4e3f9b7a..258fd03 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -48,20 +48,6 @@
 #define QI_LB60_GPIO_KEYIN8		JZ_GPIO_PORTD(26)
 
 /* NAND */
-static struct nand_ecclayout qi_lb60_ecclayout_1gb = {
-	.eccbytes = 36,
-	.eccpos = {
-		6,  7,	8,  9,	10, 11, 12, 13,
-		14, 15, 16, 17, 18, 19, 20, 21,
-		22, 23, 24, 25, 26, 27, 28, 29,
-		30, 31, 32, 33, 34, 35, 36, 37,
-		38, 39, 40, 41
-	},
-	.oobfree = {
-		{ .offset = 2, .length = 4 },
-		{ .offset = 42, .length = 22 }
-	},
-};
 
 /* Early prototypes of the QI LB60 had only 1GB of NAND.
  * In order to support these devices as well the partition and ecc layout is
@@ -84,25 +70,6 @@
 	},
 };
 
-static struct nand_ecclayout qi_lb60_ecclayout_2gb = {
-	.eccbytes = 72,
-	.eccpos = {
-		12, 13, 14, 15, 16, 17, 18, 19,
-		20, 21, 22, 23, 24, 25, 26, 27,
-		28, 29, 30, 31, 32, 33, 34, 35,
-		36, 37, 38, 39, 40, 41, 42, 43,
-		44, 45, 46, 47, 48, 49, 50, 51,
-		52, 53, 54, 55, 56, 57, 58, 59,
-		60, 61, 62, 63, 64, 65, 66, 67,
-		68, 69, 70, 71, 72, 73, 74, 75,
-		76, 77, 78, 79, 80, 81, 82, 83
-	},
-	.oobfree = {
-		{ .offset = 2, .length = 10 },
-		{ .offset = 84, .length = 44 },
-	},
-};
-
 static struct mtd_partition qi_lb60_partitions_2gb[] = {
 	{
 		.name = "NAND BOOT partition",
@@ -121,19 +88,67 @@
 	},
 };
 
+static int qi_lb60_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->length = 36;
+	oobregion->offset = 6;
+
+	if (mtd->oobsize == 128) {
+		oobregion->length *= 2;
+		oobregion->offset *= 2;
+	}
+
+	return 0;
+}
+
+static int qi_lb60_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	int eccbytes = 36, eccoff = 6;
+
+	if (section > 1)
+		return -ERANGE;
+
+	if (mtd->oobsize == 128) {
+		eccbytes *= 2;
+		eccoff *= 2;
+	}
+
+	if (!section) {
+		oobregion->offset = 2;
+		oobregion->length = eccoff - 2;
+	} else {
+		oobregion->offset = eccoff + eccbytes;
+		oobregion->length = mtd->oobsize - oobregion->offset;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = {
+	.ecc = qi_lb60_ooblayout_ecc,
+	.free = qi_lb60_ooblayout_free,
+};
+
 static void qi_lb60_nand_ident(struct platform_device *pdev,
-		struct nand_chip *chip, struct mtd_partition **partitions,
+		struct mtd_info *mtd, struct mtd_partition **partitions,
 		int *num_partitions)
 {
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
 	if (chip->page_shift == 12) {
-		chip->ecc.layout = &qi_lb60_ecclayout_2gb;
 		*partitions = qi_lb60_partitions_2gb;
 		*num_partitions = ARRAY_SIZE(qi_lb60_partitions_2gb);
 	} else {
-		chip->ecc.layout = &qi_lb60_ecclayout_1gb;
 		*partitions = qi_lb60_partitions_1gb;
 		*num_partitions = ARRAY_SIZE(qi_lb60_partitions_1gb);
 	}
+
+	mtd_set_ooblayout(mtd, &qi_lb60_ooblayout_ops);
 }
 
 static struct jz_nand_platform_data qi_lb60_nand_pdata = {
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index ceca6cc..6dc3f1f 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -481,7 +481,7 @@
 			/*
 			 * OK we are here either because we hit a NAL
 			 * instruction or because we are emulating an
-			 * old bltzal{,l} one. Lets figure out what the
+			 * old bltzal{,l} one. Let's figure out what the
 			 * case really is.
 			 */
 			if (!insn.i_format.rs) {
@@ -515,7 +515,7 @@
 			/*
 			 * OK we are here either because we hit a BAL
 			 * instruction or because we are emulating an
-			 * old bgezal{,l} one. Lets figure out what the
+			 * old bgezal{,l} one. Let's figure out what the
 			 * case really is.
 			 */
 			if (!insn.i_format.rs) {
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 51b98dc..59476a6 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -441,6 +441,21 @@
 	mfc0	t0, CP0_CONFIG
 	mttc0	t0, CP0_CONFIG
 
+	/*
+	 * Copy the EVA config from this VPE if the CPU supports it.
+	 * CONFIG3 must exist to be running MT startup - just read it.
+	 */
+	mfc0	t0, CP0_CONFIG, 3
+	and	t0, t0, MIPS_CONF3_SC
+	beqz	t0, 3f
+	 nop
+	mfc0    t0, CP0_SEGCTL0
+	mttc0	t0, CP0_SEGCTL0
+	mfc0    t0, CP0_SEGCTL1
+	mttc0	t0, CP0_SEGCTL1
+	mfc0    t0, CP0_SEGCTL2
+	mttc0	t0, CP0_SEGCTL2
+3:
 	/* Ensure no software interrupts are pending */
 	mttc0	zero, CP0_CAUSE
 	mttc0	zero, CP0_STATUS
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5ac5c3e..a88d442 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -833,10 +833,8 @@
 		c->options |= MIPS_CPU_MAAR;
 	if (config5 & MIPS_CONF5_LLB)
 		c->options |= MIPS_CPU_RW_LLB;
-#ifdef CONFIG_XPA
 	if (config5 & MIPS_CONF5_MVH)
-		c->options |= MIPS_CPU_XPA;
-#endif
+		c->options |= MIPS_CPU_MVH;
 	if (cpu_has_mips_r6 && (config5 & MIPS_CONF5_VP))
 		c->options |= MIPS_CPU_VP;
 
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index c3c234d..891f5ee 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -88,7 +88,7 @@
 	elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
 	flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags;
 
-	/* Lets see if this is an O32 ELF */
+	/* Let's see if this is an O32 ELF */
 	if (elf32) {
 		if (flags & EF_MIPS_FP64) {
 			/*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 8eb5af8..f25f7ea 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -54,6 +54,9 @@
 	for (i = 0; i < NR_IRQS; i++)
 		irq_set_noprobe(i);
 
+	if (cpu_has_veic)
+		clear_c0_status(ST0_IM);
+
 	arch_init_irq();
 }
 
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 625ee77..7ff2a55 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -2202,7 +2202,7 @@
 	}
 
 	/*
-	 * Lets not return to userland just yet. It's constly and
+	 * Let's not return to userland just yet. It's costly and
 	 * it's likely we have more R2 instructions to emulate
 	 */
 	if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index 5021c54..d64056e 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -25,8 +25,8 @@
  * the user stack callchains, we will add it here.
  */
 
-static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
-	unsigned long reg29)
+static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry,
+				    unsigned long reg29)
 {
 	unsigned long *sp = (unsigned long *)reg29;
 	unsigned long addr;
@@ -35,14 +35,14 @@
 		addr = *sp++;
 		if (__kernel_text_address(addr)) {
 			perf_callchain_store(entry, addr);
-			if (entry->nr >= sysctl_perf_event_max_stack)
+			if (entry->nr >= entry->max_stack)
 				break;
 		}
 	}
 }
 
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
-		      struct pt_regs *regs)
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+			   struct pt_regs *regs)
 {
 	unsigned long sp = regs->regs[29];
 #ifdef CONFIG_KALLSYMS
@@ -59,7 +59,7 @@
 	}
 	do {
 		perf_callchain_store(entry, pc);
-		if (entry->nr >= sysctl_perf_event_max_stack)
+		if (entry->nr >= entry->max_stack)
 			break;
 		pc = unwind_stack(current, &sp, pc, &ra);
 	} while (pc);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 411c971..813ed78 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -345,7 +345,7 @@
 		return 0;
 	if (info->pc_offset < 0) /* leaf */
 		return 1;
-	/* prologue seems boggus... */
+	/* prologue seems bogus... */
 err:
 	return -1;
 }
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index ab04229..ae42314 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -770,15 +770,7 @@
 	sigset_t *oldset = sigmask_to_save();
 	int ret;
 	struct mips_abi *abi = current->thread.abi;
-#ifdef CONFIG_CPU_MICROMIPS
-	void *vdso;
-	unsigned long tmp = (unsigned long)current->mm->context.vdso;
-
-	set_isa16_mode(tmp);
-	vdso = (void *)tmp;
-#else
 	void *vdso = current->mm->context.vdso;
-#endif
 
 	if (regs->regs[0]) {
 		switch(regs->regs[2]) {
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 1061bd2..4ed36f2 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -359,8 +359,12 @@
 		BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
 	}
 
-	change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 |
-				 STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7);
+	if (cpu_has_veic)
+		clear_c0_status(ST0_IM);
+	else
+		change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
+					 STATUSF_IP4 | STATUSF_IP5 |
+					 STATUSF_IP6 | STATUSF_IP7);
 }
 
 static void cps_smp_finish(void)
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 975e997..54e1663 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -104,7 +104,8 @@
 	struct resource gic_res;
 	int ret;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
 
 	/*
 	 * Determine total area size. This includes the VDSO data itself, the
diff --git a/arch/mips/lasat/picvue_proc.c b/arch/mips/lasat/picvue_proc.c
index b420958..27533c1 100644
--- a/arch/mips/lasat/picvue_proc.c
+++ b/arch/mips/lasat/picvue_proc.c
@@ -43,7 +43,7 @@
 {
 	int lineno = *(int *)m->private;
 
-	if (lineno < 0 || lineno > PVC_NLINES) {
+	if (lineno < 0 || lineno >= PVC_NLINES) {
 		printk(KERN_WARNING "proc_read_line: invalid lineno %d\n", lineno);
 		return 0;
 	}
@@ -67,7 +67,7 @@
 	char kbuf[PVC_LINELEN];
 	size_t len;
 
-	BUG_ON(lineno < 0 || lineno > PVC_NLINES);
+	BUG_ON(lineno < 0 || lineno >= PVC_NLINES);
 
 	len = min(count, sizeof(kbuf) - 1);
 	if (copy_from_user(kbuf, buf, len))
diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c
index beb80f31..927dc94 100644
--- a/arch/mips/lib/ashldi3.c
+++ b/arch/mips/lib/ashldi3.c
@@ -2,7 +2,7 @@
 
 #include "libgcc.h"
 
-long long __ashldi3(long long u, word_type b)
+long long notrace __ashldi3(long long u, word_type b)
 {
 	DWunion uu, w;
 	word_type bm;
diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c
index c884a91..9fdf1a5 100644
--- a/arch/mips/lib/ashrdi3.c
+++ b/arch/mips/lib/ashrdi3.c
@@ -2,7 +2,7 @@
 
 #include "libgcc.h"
 
-long long __ashrdi3(long long u, word_type b)
+long long notrace __ashrdi3(long long u, word_type b)
 {
 	DWunion uu, w;
 	word_type bm;
diff --git a/arch/mips/lib/bswapdi.c b/arch/mips/lib/bswapdi.c
index 77e5f9c1..e3e77aa 100644
--- a/arch/mips/lib/bswapdi.c
+++ b/arch/mips/lib/bswapdi.c
@@ -1,6 +1,6 @@
 #include <linux/module.h>
 
-unsigned long long __bswapdi2(unsigned long long u)
+unsigned long long notrace __bswapdi2(unsigned long long u)
 {
 	return (((u) & 0xff00000000000000ull) >> 56) |
 	       (((u) & 0x00ff000000000000ull) >> 40) |
diff --git a/arch/mips/lib/bswapsi.c b/arch/mips/lib/bswapsi.c
index 2b302ff..530a8af 100644
--- a/arch/mips/lib/bswapsi.c
+++ b/arch/mips/lib/bswapsi.c
@@ -1,6 +1,6 @@
 #include <linux/module.h>
 
-unsigned int __bswapsi2(unsigned int u)
+unsigned int notrace __bswapsi2(unsigned int u)
 {
 	return (((u) & 0xff000000) >> 24) |
 	       (((u) & 0x00ff0000) >>  8) |
diff --git a/arch/mips/lib/cmpdi2.c b/arch/mips/lib/cmpdi2.c
index 8c13064..06857da 100644
--- a/arch/mips/lib/cmpdi2.c
+++ b/arch/mips/lib/cmpdi2.c
@@ -2,7 +2,7 @@
 
 #include "libgcc.h"
 
-word_type __cmpdi2(long long a, long long b)
+word_type notrace __cmpdi2(long long a, long long b)
 {
 	const DWunion au = {
 		.ll = a
diff --git a/arch/mips/lib/lshrdi3.c b/arch/mips/lib/lshrdi3.c
index dcf8d68..3645474 100644
--- a/arch/mips/lib/lshrdi3.c
+++ b/arch/mips/lib/lshrdi3.c
@@ -2,7 +2,7 @@
 
 #include "libgcc.h"
 
-long long __lshrdi3(long long u, word_type b)
+long long notrace __lshrdi3(long long u, word_type b)
 {
 	DWunion uu, w;
 	word_type bm;
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 9245e17..6c303a9 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -256,7 +256,7 @@
 
 	/*
 	 * Macro to build the __copy_user common code
-	 * Arguements:
+	 * Arguments:
 	 * mode : LEGACY_MODE or EVA_MODE
 	 * from : Source operand. USEROP or KERNELOP
 	 * to   : Destination operand. USEROP or KERNELOP
diff --git a/arch/mips/lib/ucmpdi2.c b/arch/mips/lib/ucmpdi2.c
index bb4cb2f..bd599f5 100644
--- a/arch/mips/lib/ucmpdi2.c
+++ b/arch/mips/lib/ucmpdi2.c
@@ -2,7 +2,7 @@
 
 #include "libgcc.h"
 
-word_type __ucmpdi2(unsigned long long a, unsigned long long b)
+word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b)
 {
 	const DWunion au = {.ll = a};
 	const DWunion bu = {.ll = b};
diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
index a2631a5..249039a 100644
--- a/arch/mips/loongson64/loongson-3/hpet.c
+++ b/arch/mips/loongson64/loongson-3/hpet.c
@@ -212,7 +212,7 @@
 	/* set hpet base address */
 	smbus_write(SMBUS_PCI_REGB4, HPET_ADDR);
 
-	/* enable decodeing of access to HPET MMIO*/
+	/* enable decoding of access to HPET MMIO*/
 	smbus_enable(SMBUS_PCI_REG40, (1 << 28));
 
 	/* HPET irq enable */
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index d4ceacd..4707488 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -8,7 +8,7 @@
 #include "ieee754.h"
 
 /*
- * Emulate the arbritrary instruction ir at xcp->cp0_epc.  Required when
+ * Emulate the arbitrary instruction ir at xcp->cp0_epc.  Required when
  * we have to emulate the instruction in a COP1 branch delay slot.  Do
  * not change cp0_epc due to the instruction
  *
@@ -88,7 +88,7 @@
 	fr = (struct emuframe __user *)
 		((regs->regs[29] - sizeof(struct emuframe)) & ~0x7);
 
-	/* Verify that the stack pointer is not competely insane */
+	/* Verify that the stack pointer is not completely insane */
 	if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
 		return SIGBUS;
 
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 274da90..4004b65 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -2361,8 +2361,9 @@
 		(config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT);
 
 	config = read_c0_pwsize();
-	pr_debug("PWSize  (0x%0*lx): GDW: 0x%02lx  UDW: 0x%02lx  MDW: 0x%02lx  PTW: 0x%02lx  PTEW: 0x%02lx\n",
+	pr_debug("PWSize  (0x%0*lx): PS: 0x%lx  GDW: 0x%02lx  UDW: 0x%02lx  MDW: 0x%02lx  PTW: 0x%02lx  PTEW: 0x%02lx\n",
 		field, config,
+		(config & MIPS_PWSIZE_PS_MASK) >> MIPS_PWSIZE_PS_SHIFT,
 		(config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT,
 		(config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT,
 		(config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT,
@@ -2370,9 +2371,12 @@
 		(config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT);
 
 	pwctl = read_c0_pwctl();
-	pr_debug("PWCtl   (0x%x): PWEn: 0x%x  DPH: 0x%x  HugePg: 0x%x  Psn: 0x%x\n",
+	pr_debug("PWCtl   (0x%x): PWEn: 0x%x  XK: 0x%x  XS: 0x%x  XU: 0x%x  DPH: 0x%x  HugePg: 0x%x  Psn: 0x%x\n",
 		pwctl,
 		(pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT,
+		(pwctl & MIPS_PWCTL_XK_MASK) >> MIPS_PWCTL_XK_SHIFT,
+		(pwctl & MIPS_PWCTL_XS_MASK) >> MIPS_PWCTL_XS_SHIFT,
+		(pwctl & MIPS_PWCTL_XU_MASK) >> MIPS_PWCTL_XU_SHIFT,
 		(pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT,
 		(pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT,
 		(pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT);
@@ -2427,15 +2431,25 @@
 	if (CONFIG_PGTABLE_LEVELS >= 3)
 		pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
 
-	pwsize |= ilog2(sizeof(pte_t)/4) << MIPS_PWSIZE_PTEW_SHIFT;
+	/* Set pointer size to size of directory pointers */
+	if (config_enabled(CONFIG_64BIT))
+		pwsize |= MIPS_PWSIZE_PS_MASK;
+	/* PTEs may be multiple pointers long (e.g. with XPA) */
+	pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
+			& MIPS_PWSIZE_PTEW_MASK;
 
 	write_c0_pwsize(pwsize);
 
 	/* Make sure everything is set before we enable the HTW */
 	back_to_back_c0_hazard();
 
-	/* Enable HTW and disable the rest of the pwctl fields */
+	/*
+	 * Enable HTW (and only for XUSeg on 64-bit), and disable the rest of
+	 * the pwctl fields.
+	 */
 	config = 1 << MIPS_PWCTL_PWEN_SHIFT;
+	if (config_enabled(CONFIG_64BIT))
+		config |= MIPS_PWCTL_XU_MASK;
 	write_c0_pwctl(config);
 	pr_info("Hardware Page Table Walker enabled\n");
 
diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h
index 7c2da27..a4e758a 100644
--- a/arch/mips/oprofile/op_impl.h
+++ b/arch/mips/oprofile/op_impl.h
@@ -24,7 +24,7 @@
 	unsigned long unit_mask;
 };
 
-/* Per-architecture configury and hooks.  */
+/* Per-architecture configure and hooks.  */
 struct op_mips_model {
 	void (*reg_setup) (struct op_counter_config *);
 	void (*cpu_setup) (void *dummy);
diff --git a/arch/mips/pci/ops-bridge.c b/arch/mips/pci/ops-bridge.c
index 4383194..57e1463 100644
--- a/arch/mips/pci/ops-bridge.c
+++ b/arch/mips/pci/ops-bridge.c
@@ -33,9 +33,9 @@
  * The Bridge ASIC supports both type 0 and type 1 access.  Type 1 is
  * not really documented, so right now I can't write code which uses it.
  * Therefore we use type 0 accesses for now even though they won't work
- * correcly for PCI-to-PCI bridges.
+ * correctly for PCI-to-PCI bridges.
  *
- * The function is complicated by the ultimate brokeness of the IOC3 chip
+ * The function is complicated by the ultimate brokenness of the IOC3 chip
  * which is used in SGI systems.  The IOC3 can only handle 32-bit PCI
  * accesses and does only decode parts of it's address space.
  */
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c
index 956c92e..ab79828 100644
--- a/arch/mips/pistachio/init.c
+++ b/arch/mips/pistachio/init.c
@@ -83,12 +83,16 @@
 	}
 }
 
-void __init plat_mem_setup(void)
+void __init *plat_get_fdt(void)
 {
 	if (fw_arg0 != -2)
 		panic("Device-tree not present");
+	return (void *)fw_arg1;
+}
 
-	__dt_setup_arch((void *)fw_arg1);
+void __init plat_mem_setup(void)
+{
+	__dt_setup_arch(plat_get_fdt());
 
 	plat_setup_iocoherency();
 }
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index 88b82fe..d40edda 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -188,6 +188,41 @@
 	FUNC("gpio", 0, 11, 1),
 };
 
+static struct rt2880_pmx_func p4led_kn_grp_mt7628[] = {
+	FUNC("jtag", 3, 30, 1),
+	FUNC("util", 2, 30, 1),
+	FUNC("gpio", 1, 30, 1),
+	FUNC("p4led_kn", 0, 30, 1),
+};
+
+static struct rt2880_pmx_func p3led_kn_grp_mt7628[] = {
+	FUNC("jtag", 3, 31, 1),
+	FUNC("util", 2, 31, 1),
+	FUNC("gpio", 1, 31, 1),
+	FUNC("p3led_kn", 0, 31, 1),
+};
+
+static struct rt2880_pmx_func p2led_kn_grp_mt7628[] = {
+	FUNC("jtag", 3, 32, 1),
+	FUNC("util", 2, 32, 1),
+	FUNC("gpio", 1, 32, 1),
+	FUNC("p2led_kn", 0, 32, 1),
+};
+
+static struct rt2880_pmx_func p1led_kn_grp_mt7628[] = {
+	FUNC("jtag", 3, 33, 1),
+	FUNC("util", 2, 33, 1),
+	FUNC("gpio", 1, 33, 1),
+	FUNC("p1led_kn", 0, 33, 1),
+};
+
+static struct rt2880_pmx_func p0led_kn_grp_mt7628[] = {
+	FUNC("jtag", 3, 34, 1),
+	FUNC("rsvd", 2, 34, 1),
+	FUNC("gpio", 1, 34, 1),
+	FUNC("p0led_kn", 0, 34, 1),
+};
+
 static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
 	FUNC("rsvd", 3, 35, 1),
 	FUNC("rsvd", 2, 35, 1),
@@ -195,16 +230,61 @@
 	FUNC("wled_kn", 0, 35, 1),
 };
 
+static struct rt2880_pmx_func p4led_an_grp_mt7628[] = {
+	FUNC("jtag", 3, 39, 1),
+	FUNC("util", 2, 39, 1),
+	FUNC("gpio", 1, 39, 1),
+	FUNC("p4led_an", 0, 39, 1),
+};
+
+static struct rt2880_pmx_func p3led_an_grp_mt7628[] = {
+	FUNC("jtag", 3, 40, 1),
+	FUNC("util", 2, 40, 1),
+	FUNC("gpio", 1, 40, 1),
+	FUNC("p3led_an", 0, 40, 1),
+};
+
+static struct rt2880_pmx_func p2led_an_grp_mt7628[] = {
+	FUNC("jtag", 3, 41, 1),
+	FUNC("util", 2, 41, 1),
+	FUNC("gpio", 1, 41, 1),
+	FUNC("p2led_an", 0, 41, 1),
+};
+
+static struct rt2880_pmx_func p1led_an_grp_mt7628[] = {
+	FUNC("jtag", 3, 42, 1),
+	FUNC("util", 2, 42, 1),
+	FUNC("gpio", 1, 42, 1),
+	FUNC("p1led_an", 0, 42, 1),
+};
+
+static struct rt2880_pmx_func p0led_an_grp_mt7628[] = {
+	FUNC("jtag", 3, 43, 1),
+	FUNC("rsvd", 2, 43, 1),
+	FUNC("gpio", 1, 43, 1),
+	FUNC("p0led_an", 0, 43, 1),
+};
+
 static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
-	FUNC("rsvd", 3, 35, 1),
-	FUNC("rsvd", 2, 35, 1),
-	FUNC("gpio", 1, 35, 1),
-	FUNC("wled_an", 0, 35, 1),
+	FUNC("rsvd", 3, 44, 1),
+	FUNC("rsvd", 2, 44, 1),
+	FUNC("gpio", 1, 44, 1),
+	FUNC("wled_an", 0, 44, 1),
 };
 
 #define MT7628_GPIO_MODE_MASK		0x3
 
+#define MT7628_GPIO_MODE_P4LED_KN	58
+#define MT7628_GPIO_MODE_P3LED_KN	56
+#define MT7628_GPIO_MODE_P2LED_KN	54
+#define MT7628_GPIO_MODE_P1LED_KN	52
+#define MT7628_GPIO_MODE_P0LED_KN	50
 #define MT7628_GPIO_MODE_WLED_KN	48
+#define MT7628_GPIO_MODE_P4LED_AN	42
+#define MT7628_GPIO_MODE_P3LED_AN	40
+#define MT7628_GPIO_MODE_P2LED_AN	38
+#define MT7628_GPIO_MODE_P1LED_AN	36
+#define MT7628_GPIO_MODE_P0LED_AN	34
 #define MT7628_GPIO_MODE_WLED_AN	32
 #define MT7628_GPIO_MODE_PWM1		30
 #define MT7628_GPIO_MODE_PWM0		28
@@ -223,9 +303,9 @@
 #define MT7628_GPIO_MODE_GPIO		0
 
 static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
-	GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
+	GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
 				1, MT7628_GPIO_MODE_PWM1),
-	GRP_G("pmw0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
+	GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
 				1, MT7628_GPIO_MODE_PWM0),
 	GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
 				1, MT7628_GPIO_MODE_UART2),
@@ -251,8 +331,28 @@
 				1, MT7628_GPIO_MODE_GPIO),
 	GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
 				1, MT7628_GPIO_MODE_WLED_AN),
+	GRP_G("p0led_an", p0led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+				1, MT7628_GPIO_MODE_P0LED_AN),
+	GRP_G("p1led_an", p1led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+				1, MT7628_GPIO_MODE_P1LED_AN),
+	GRP_G("p2led_an", p2led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+				1, MT7628_GPIO_MODE_P2LED_AN),
+	GRP_G("p3led_an", p3led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+				1, MT7628_GPIO_MODE_P3LED_AN),
+	GRP_G("p4led_an", p4led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+				1, MT7628_GPIO_MODE_P4LED_AN),
 	GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
 				1, MT7628_GPIO_MODE_WLED_KN),
+	GRP_G("p0led_kn", p0led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+				1, MT7628_GPIO_MODE_P0LED_KN),
+	GRP_G("p1led_kn", p1led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+				1, MT7628_GPIO_MODE_P1LED_KN),
+	GRP_G("p2led_kn", p2led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+				1, MT7628_GPIO_MODE_P2LED_KN),
+	GRP_G("p3led_kn", p3led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+				1, MT7628_GPIO_MODE_P3LED_KN),
+	GRP_G("p4led_kn", p4led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+				1, MT7628_GPIO_MODE_P4LED_KN),
 	{ 0 }
 };
 
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
index 328ceb3..2abe016 100644
--- a/arch/mips/sgi-ip27/ip27-hubio.c
+++ b/arch/mips/sgi-ip27/ip27-hubio.c
@@ -105,7 +105,7 @@
 	prb.iprb_ff = force_fire_and_forget ? 1 : 0;
 
 	/*
-	 * Set the appropriate number of PIO cresits for the widget.
+	 * Set the appropriate number of PIO credits for the widget.
 	 */
 	prb.iprb_xtalkctr = credits;
 
diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
index a2358b4..cfceaea 100644
--- a/arch/mips/sgi-ip27/ip27-nmi.c
+++ b/arch/mips/sgi-ip27/ip27-nmi.c
@@ -23,7 +23,7 @@
 static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 
 /*
- * Lets see what else we need to do here. Set up sp, gp?
+ * Let's see what else we need to do here. Set up sp, gp?
  */
 void nmi_dump(void)
 {
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index 20f582a..4fe5678 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -67,7 +67,7 @@
 		return -ENODEV;
 
 	/*
-	 * Okay, here's a xbow. Lets arbitrate and find
+	 * Okay, here's a xbow. Let's arbitrate and find
 	 * out if we should initialize it. Set enabled
 	 * hub connected at highest or lowest widget as
 	 * master.
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
index a046b30..160b880 100644
--- a/arch/mips/sni/rm200.c
+++ b/arch/mips/sni/rm200.c
@@ -263,7 +263,7 @@
 		static int spurious_irq_mask;
 		/*
 		 * At this point we can be sure the IRQ is spurious,
-		 * lets ACK and report it. [once per IRQ]
+		 * let's ACK and report it. [once per IRQ]
 		 */
 		if (!(spurious_irq_mask & irqmask)) {
 			printk(KERN_DEBUG
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index b369509..3b4538e 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -5,10 +5,12 @@
 ccflags-vdso := \
 	$(filter -I%,$(KBUILD_CFLAGS)) \
 	$(filter -E%,$(KBUILD_CFLAGS)) \
+	$(filter -mmicromips,$(KBUILD_CFLAGS)) \
 	$(filter -march=%,$(KBUILD_CFLAGS))
 cflags-vdso := $(ccflags-vdso) \
 	$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
-	-O2 -g -fPIC -fno-common -fno-builtin -G 0 -DDISABLE_BRANCH_PROFILING \
+	-O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
+	-DDISABLE_BRANCH_PROFILING \
 	$(call cc-option, -fno-stack-protector)
 aflags-vdso := $(ccflags-vdso) \
 	$(filter -I%,$(KBUILD_CFLAGS)) \
diff --git a/arch/mips/vr41xx/common/cmu.c b/arch/mips/vr41xx/common/cmu.c
index 05302bf..89bac98 100644
--- a/arch/mips/vr41xx/common/cmu.c
+++ b/arch/mips/vr41xx/common/cmu.c
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2001-2002  MontaVista Software Inc.
  *    Author: Yoichi Yuasa <source@mvista.com>
- *  Copuright (C) 2003-2005  Yoichi Yuasa <yuasa@linux-mips.org>
+ *  Copyright (C) 2003-2005  Yoichi Yuasa <yuasa@linux-mips.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
diff --git a/arch/mn10300/boot/compressed/Makefile b/arch/mn10300/boot/compressed/Makefile
index 08a95e1..5f56f9d 100644
--- a/arch/mn10300/boot/compressed/Makefile
+++ b/arch/mn10300/boot/compressed/Makefile
@@ -8,7 +8,6 @@
 
 $(obj)/vmlinux: $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE
 	$(call if_changed,ld)
-	@:
 
 $(obj)/vmlinux.bin: vmlinux FORCE
 	$(call if_changed,objcopy)
diff --git a/arch/mn10300/configs/asb2364_defconfig b/arch/mn10300/configs/asb2364_defconfig
index fbb96ae..cd0a6cb 100644
--- a/arch/mn10300/configs/asb2364_defconfig
+++ b/arch/mn10300/configs/asb2364_defconfig
@@ -11,7 +11,6 @@
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_RELAY=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_EXPERT=y
diff --git a/arch/nios2/Makefile b/arch/nios2/Makefile
index 2328f82..e74afc1 100644
--- a/arch/nios2/Makefile
+++ b/arch/nios2/Makefile
@@ -20,7 +20,7 @@
 
 export MMU
 
-LIBGCC		:= $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+LIBGCC         := $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
 
 KBUILD_CFLAGS += -pipe -D__linux__ -D__ELF__
 KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_HW_MUL_SUPPORT),-mhw-mul,-mno-hw-mul)
@@ -53,7 +53,7 @@
 archclean:
 	$(Q)$(MAKE) $(clean)=$(nios2-boot)
 
-%.dtb:
+%.dtb: | scripts
 	$(Q)$(MAKE) $(build)=$(nios2-boot) $(nios2-boot)/$@
 
 dtbs:
diff --git a/arch/nios2/boot/compressed/Makefile b/arch/nios2/boot/compressed/Makefile
index 5b0fb34..d5921c9 100644
--- a/arch/nios2/boot/compressed/Makefile
+++ b/arch/nios2/boot/compressed/Makefile
@@ -11,7 +11,6 @@
 
 $(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) $(obj)/piggy.o FORCE
 	$(call if_changed,ld)
-	@:
 
 LDFLAGS_piggy.o := -r --format binary --oformat elf32-littlenios2 -T
 
diff --git a/arch/nios2/include/uapi/asm/unistd.h b/arch/nios2/include/uapi/asm/unistd.h
index c4bf795..51a32c7 100644
--- a/arch/nios2/include/uapi/asm/unistd.h
+++ b/arch/nios2/include/uapi/asm/unistd.h
@@ -17,6 +17,8 @@
 
  #define sys_mmap2 sys_mmap_pgoff
 
+#define __ARCH_WANT_RENAMEAT
+
 /* Use the standard ABI for syscalls */
 #include <asm-generic/unistd.h>
 
diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h
index ce40b71..471905b 100644
--- a/arch/openrisc/include/uapi/asm/unistd.h
+++ b/arch/openrisc/include/uapi/asm/unistd.h
@@ -20,6 +20,7 @@
 
 #define sys_mmap2 sys_mmap_pgoff
 
+#define __ARCH_WANT_RENAMEAT
 #define __ARCH_WANT_SYS_FORK
 #define __ARCH_WANT_SYS_CLONE
 
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 3d498a6..dc11738 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -6,6 +6,7 @@
 	select HAVE_OPROFILE
 	select HAVE_FUNCTION_TRACER
 	select HAVE_FUNCTION_GRAPH_TRACER
+	select HAVE_SYSCALL_TRACEPOINTS
 	select ARCH_WANT_FRAME_POINTERS
 	select RTC_CLASS
 	select RTC_DRV_GENERIC
@@ -31,6 +32,8 @@
 	select HAVE_DEBUG_STACKOVERFLOW
 	select HAVE_ARCH_AUDITSYSCALL
 	select HAVE_ARCH_SECCOMP_FILTER
+	select HAVE_ARCH_TRACEHOOK
+	select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT)
 	select ARCH_NO_COHERENT_DMA_MMAP
 	select CPU_NO_EFFICIENT_FFS
 
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index 0a90b96..7ada309 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -52,8 +52,7 @@
 /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
 extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
 				   unsigned int new_);
-extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr,
-				   unsigned long old, unsigned long new_);
+extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
 
 /* don't worry...optimizer will get rid of most of this */
 static inline unsigned long
@@ -61,7 +60,7 @@
 {
 	switch (size) {
 #ifdef CONFIG_64BIT
-	case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
+	case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
 #endif
 	case 4: return __cmpxchg_u32((unsigned int *)ptr,
 				     (unsigned int)old, (unsigned int)new_);
@@ -86,7 +85,7 @@
 {
 	switch (size) {
 #ifdef CONFIG_64BIT
-	case 8:	return __cmpxchg_u64((unsigned long *)ptr, old, new_);
+	case 8:	return __cmpxchg_u64((u64 *)ptr, old, new_);
 #endif
 	case 4:	return __cmpxchg_u32(ptr, old, new_);
 	default:
@@ -111,4 +110,6 @@
 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 #endif
 
+#define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
+
 #endif /* _ASM_PARISC_CMPXCHG_H_ */
diff --git a/arch/parisc/include/asm/eisa_eeprom.h b/arch/parisc/include/asm/eisa_eeprom.h
index 8ce8b85..5637ac9 100644
--- a/arch/parisc/include/asm/eisa_eeprom.h
+++ b/arch/parisc/include/asm/eisa_eeprom.h
@@ -99,7 +99,7 @@
 #define HPEE_MEMORY_DECODE_24BITS 0x04
 #define HPEE_MEMORY_DECODE_32BITS 0x08
 /* byte 2 and 3 are a 16bit LE value
- * containging the memory size in kilobytes */
+ * containing the memory size in kilobytes */
 /* byte 4,5,6 are a 24bit LE value
  * containing the memory base address */
 
@@ -135,7 +135,7 @@
 #define HPEE_PORT_SHARED    0x40
 #define HPEE_PORT_MORE      0x80
 /* byte 1 and 2 is a 16bit LE value
- * conating the start port number */
+ * containing the start port number */
 
 #define HPEE_PORT_INIT_MAX_LEN     60 /* in bytes here */
 /* port init entry byte 0 */
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
index 24cd81d..d635c6b 100644
--- a/arch/parisc/include/asm/ftrace.h
+++ b/arch/parisc/include/asm/ftrace.h
@@ -6,6 +6,8 @@
 
 #define MCOUNT_INSN_SIZE 4
 
+extern unsigned long sys_call_table[];
+
 extern unsigned long return_address(unsigned int);
 
 #define ftrace_return_address(n) return_address(n)
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index 49df148..ac8bd58 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -35,70 +35,57 @@
 futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 {
 	unsigned long int flags;
-	u32 val;
 	int op = (encoded_op >> 28) & 7;
 	int cmp = (encoded_op >> 24) & 15;
 	int oparg = (encoded_op << 8) >> 20;
 	int cmparg = (encoded_op << 20) >> 20;
-	int oldval = 0, ret;
+	int oldval, ret;
+	u32 tmp;
+
 	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
 		oparg = 1 << oparg;
 
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
 		return -EFAULT;
 
+	_futex_spin_lock_irqsave(uaddr, &flags);
 	pagefault_disable();
 
-	_futex_spin_lock_irqsave(uaddr, &flags);
+	ret = -EFAULT;
+	if (unlikely(get_user(oldval, uaddr) != 0))
+		goto out_pagefault_enable;
+
+	ret = 0;
+	tmp = oldval;
 
 	switch (op) {
 	case FUTEX_OP_SET:
-		/* *(int *)UADDR2 = OPARG; */
-		ret = get_user(oldval, uaddr);
-		if (!ret)
-			ret = put_user(oparg, uaddr);
+		tmp = oparg;
 		break;
 	case FUTEX_OP_ADD:
-		/* *(int *)UADDR2 += OPARG; */
-		ret = get_user(oldval, uaddr);
-		if (!ret) {
-			val = oldval + oparg;
-			ret = put_user(val, uaddr);
-		}
+		tmp += oparg;
 		break;
 	case FUTEX_OP_OR:
-		/* *(int *)UADDR2 |= OPARG; */
-		ret = get_user(oldval, uaddr);
-		if (!ret) {
-			val = oldval | oparg;
-			ret = put_user(val, uaddr);
-		}
+		tmp |= oparg;
 		break;
 	case FUTEX_OP_ANDN:
-		/* *(int *)UADDR2 &= ~OPARG; */
-		ret = get_user(oldval, uaddr);
-		if (!ret) {
-			val = oldval & ~oparg;
-			ret = put_user(val, uaddr);
-		}
+		tmp &= ~oparg;
 		break;
 	case FUTEX_OP_XOR:
-		/* *(int *)UADDR2 ^= OPARG; */
-		ret = get_user(oldval, uaddr);
-		if (!ret) {
-			val = oldval ^ oparg;
-			ret = put_user(val, uaddr);
-		}
+		tmp ^= oparg;
 		break;
 	default:
 		ret = -ENOSYS;
 	}
 
+	if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
+		ret = -EFAULT;
+
+out_pagefault_enable:
+	pagefault_enable();
 	_futex_spin_unlock_irqrestore(uaddr, &flags);
 
-	pagefault_enable();
-
-	if (!ret) {
+	if (ret == 0) {
 		switch (cmp) {
 		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
 		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
@@ -112,12 +99,10 @@
 	return ret;
 }
 
-/* Non-atomic version */
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 			      u32 oldval, u32 newval)
 {
-	int ret;
 	u32 val;
 	unsigned long flags;
 
@@ -137,17 +122,20 @@
 	 */
 
 	_futex_spin_lock_irqsave(uaddr, &flags);
+	if (unlikely(get_user(val, uaddr) != 0)) {
+		_futex_spin_unlock_irqrestore(uaddr, &flags);
+		return -EFAULT;
+	}
 
-	ret = get_user(val, uaddr);
-
-	if (!ret && val == oldval)
-		ret = put_user(newval, uaddr);
+	if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
+		_futex_spin_unlock_irqrestore(uaddr, &flags);
+		return -EFAULT;
+	}
 
 	*uval = val;
-
 	_futex_spin_unlock_irqrestore(uaddr, &flags);
 
-	return ret;
+	return 0;
 }
 
 #endif /*__KERNEL__*/
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index 8121aa6..8be707e 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -40,7 +40,7 @@
    memory to indicate to the compiler that the assembly code reads
    or writes to items other than those listed in the input and output
    operands.  This may pessimize the code somewhat but __ldcw is
-   usually used within code blocks surrounded by memory barriors.  */
+   usually used within code blocks surrounded by memory barriers.  */
 #define __ldcw(a) ({						\
 	unsigned __ret;						\
 	__asm__ __volatile__(__LDCW " 0(%1),%0"			\
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
index 637ce8d..5e0b4e6 100644
--- a/arch/parisc/include/asm/syscall.h
+++ b/arch/parisc/include/asm/syscall.h
@@ -8,6 +8,8 @@
 #include <linux/err.h>
 #include <asm/ptrace.h>
 
+#define NR_syscalls (__NR_Linux_syscalls)
+
 static inline long syscall_get_nr(struct task_struct *tsk,
 				  struct pt_regs *regs)
 {
@@ -33,12 +35,19 @@
 		args[1] = regs->gr[25];
 	case 1:
 		args[0] = regs->gr[26];
+	case 0:
 		break;
 	default:
 		BUG();
 	}
 }
 
+static inline long syscall_get_return_value(struct task_struct *task,
+						struct pt_regs *regs)
+{
+	return regs->gr[28];
+}
+
 static inline void syscall_set_return_value(struct task_struct *task,
 					    struct pt_regs *regs,
 					    int error, long val)
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index e96e693..7581330e 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -55,6 +55,7 @@
 #define TIF_SINGLESTEP		9	/* single stepping? */
 #define TIF_BLOCKSTEP		10	/* branch stepping? */
 #define TIF_SECCOMP		11	/* secure computing */
+#define TIF_SYSCALL_TRACEPOINT	12	/* syscall tracepoint instrumentation */
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
@@ -66,12 +67,13 @@
 #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
 #define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
+#define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
 
 #define _TIF_USER_WORK_MASK     (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
                                  _TIF_NEED_RESCHED)
 #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP |	\
 				 _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \
-				 _TIF_SECCOMP)
+				 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
 
 #ifdef CONFIG_64BIT
 # ifdef CONFIG_COMPAT
diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h
index 4736020..5e953ab 100644
--- a/arch/parisc/include/asm/traps.h
+++ b/arch/parisc/include/asm/traps.h
@@ -8,6 +8,8 @@
 void parisc_terminate(char *msg, struct pt_regs *regs,
 		int code, unsigned long offset) __noreturn __cold;
 
+void die_if_kernel(char *str, struct pt_regs *regs, long err);
+
 /* mm/fault.c */
 void do_page_fault(struct pt_regs *regs, unsigned long code,
 		unsigned long address);
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 7955e43..0f59fd9 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -40,14 +40,10 @@
 #define get_user __get_user
 
 #if !defined(CONFIG_64BIT)
-#define LDD_KERNEL(ptr)		BUILD_BUG()
-#define LDD_USER(ptr)		BUILD_BUG()
-#define STD_KERNEL(x, ptr)	__put_kernel_asm64(x, ptr)
+#define LDD_USER(ptr)		__get_user_asm64(ptr)
 #define STD_USER(x, ptr)	__put_user_asm64(x, ptr)
 #else
-#define LDD_KERNEL(ptr)		__get_kernel_asm("ldd", ptr)
 #define LDD_USER(ptr)		__get_user_asm("ldd", ptr)
-#define STD_KERNEL(x, ptr)	__put_kernel_asm("std", x, ptr)
 #define STD_USER(x, ptr)	__put_user_asm("std", x, ptr)
 #endif
 
@@ -80,70 +76,70 @@
 	unsigned long fault_addr;
 };
 
+/*
+ * load_sr2() preloads the space register %%sr2 - based on the value of
+ * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
+ * is 0), or with the current value of %%sr3 to access user space (USER_DS)
+ * memory. The following __get_user_asm() and __put_user_asm() functions have
+ * %%sr2 hard-coded to access the requested memory.
+ */
+#define load_sr2() \
+	__asm__(" or,=  %0,%%r0,%%r0\n\t"	\
+		" mfsp %%sr3,%0\n\t"		\
+		" mtsp %0,%%sr2\n\t"		\
+		: : "r"(get_fs()) : )
+
 #define __get_user(x, ptr)                               \
 ({                                                       \
 	register long __gu_err __asm__ ("r8") = 0;       \
 	register long __gu_val __asm__ ("r9") = 0;       \
 							 \
-	if (segment_eq(get_fs(), KERNEL_DS)) {           \
-	    switch (sizeof(*(ptr))) {                    \
-	    case 1: __get_kernel_asm("ldb", ptr); break; \
-	    case 2: __get_kernel_asm("ldh", ptr); break; \
-	    case 4: __get_kernel_asm("ldw", ptr); break; \
-	    case 8: LDD_KERNEL(ptr); break;		 \
-	    default: BUILD_BUG(); break;		 \
-	    }                                            \
-	}                                                \
-	else {                                           \
-	    switch (sizeof(*(ptr))) {                    \
+	load_sr2();					 \
+	switch (sizeof(*(ptr))) {			 \
 	    case 1: __get_user_asm("ldb", ptr); break;   \
 	    case 2: __get_user_asm("ldh", ptr); break;   \
 	    case 4: __get_user_asm("ldw", ptr); break;   \
 	    case 8: LDD_USER(ptr);  break;		 \
 	    default: BUILD_BUG(); break;		 \
-	    }                                            \
 	}                                                \
 							 \
 	(x) = (__force __typeof__(*(ptr))) __gu_val;	 \
 	__gu_err;                                        \
 })
 
-#define __get_kernel_asm(ldx, ptr)                      \
-	__asm__("\n1:\t" ldx "\t0(%2),%0\n\t"		\
+#define __get_user_asm(ldx, ptr)                        \
+	__asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t"	\
 		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
 		: "=r"(__gu_val), "=r"(__gu_err)        \
 		: "r"(ptr), "1"(__gu_err)		\
 		: "r1");
 
-#define __get_user_asm(ldx, ptr)                        \
-	__asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t"	\
-		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
-		: "=r"(__gu_val), "=r"(__gu_err)        \
+#if !defined(CONFIG_64BIT)
+
+#define __get_user_asm64(ptr) 				\
+	__asm__("\n1:\tldw 0(%%sr2,%2),%0"		\
+		"\n2:\tldw 4(%%sr2,%2),%R0\n\t"		\
+		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
+		ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
+		: "=r"(__gu_val), "=r"(__gu_err)	\
 		: "r"(ptr), "1"(__gu_err)		\
 		: "r1");
 
+#endif /* !defined(CONFIG_64BIT) */
+
+
 #define __put_user(x, ptr)                                      \
 ({								\
 	register long __pu_err __asm__ ("r8") = 0;      	\
         __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);	\
 								\
-	if (segment_eq(get_fs(), KERNEL_DS)) {                  \
-	    switch (sizeof(*(ptr))) {                           \
-	    case 1: __put_kernel_asm("stb", __x, ptr); break;   \
-	    case 2: __put_kernel_asm("sth", __x, ptr); break;   \
-	    case 4: __put_kernel_asm("stw", __x, ptr); break;   \
-	    case 8: STD_KERNEL(__x, ptr); break;		\
-	    default: BUILD_BUG(); break;			\
-	    }                                                   \
-	}                                                       \
-	else {                                                  \
-	    switch (sizeof(*(ptr))) {                           \
+	load_sr2();						\
+	switch (sizeof(*(ptr))) {				\
 	    case 1: __put_user_asm("stb", __x, ptr); break;     \
 	    case 2: __put_user_asm("sth", __x, ptr); break;     \
 	    case 4: __put_user_asm("stw", __x, ptr); break;     \
 	    case 8: STD_USER(__x, ptr); break;			\
 	    default: BUILD_BUG(); break;			\
-	    }                                                   \
 	}                                                       \
 								\
 	__pu_err;						\
@@ -159,17 +155,9 @@
  * r8/r9 are already listed as err/val.
  */
 
-#define __put_kernel_asm(stx, x, ptr)                       \
-	__asm__ __volatile__ (                              \
-		"\n1:\t" stx "\t%2,0(%1)\n\t"		    \
-		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
-		: "=r"(__pu_err)                            \
-		: "r"(ptr), "r"(x), "0"(__pu_err)	    \
-	    	: "r1")
-
 #define __put_user_asm(stx, x, ptr)                         \
 	__asm__ __volatile__ (                              \
-		"\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t"	    \
+		"\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t"	    \
 		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
 		: "=r"(__pu_err)                            \
 		: "r"(ptr), "r"(x), "0"(__pu_err)	    \
@@ -178,21 +166,10 @@
 
 #if !defined(CONFIG_64BIT)
 
-#define __put_kernel_asm64(__val, ptr) do {		    \
-	__asm__ __volatile__ (				    \
-		"\n1:\tstw %2,0(%1)"			    \
-		"\n2:\tstw %R2,4(%1)\n\t"		    \
-		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
-		ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
-		: "=r"(__pu_err)                            \
-		: "r"(ptr), "r"(__val), "0"(__pu_err) \
-		: "r1");				    \
-} while (0)
-
 #define __put_user_asm64(__val, ptr) do {	    	    \
 	__asm__ __volatile__ (				    \
-		"\n1:\tstw %2,0(%%sr3,%1)"		    \
-		"\n2:\tstw %R2,4(%%sr3,%1)\n\t"		    \
+		"\n1:\tstw %2,0(%%sr2,%1)"		    \
+		"\n2:\tstw %R2,4(%%sr2,%1)\n\t"		    \
 		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
 		ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
 		: "=r"(__pu_err)                            \
diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h
index 702498f..0609ff1 100644
--- a/arch/parisc/include/uapi/asm/pdc.h
+++ b/arch/parisc/include/uapi/asm/pdc.h
@@ -59,7 +59,7 @@
 #define PDC_MODEL_GET_BOOT__OP	8	/* returns boot test options	*/
 #define PDC_MODEL_SET_BOOT__OP	9	/* set boot test options	*/
 
-#define PA89_INSTRUCTION_SET	0x4	/* capatibilies returned	*/
+#define PA89_INSTRUCTION_SET	0x4	/* capabilities returned	*/
 #define PA90_INSTRUCTION_SET	0x8
 
 #define PDC_CACHE	5		/* return/set cache (& TLB) info*/
diff --git a/arch/parisc/include/uapi/asm/ptrace.h b/arch/parisc/include/uapi/asm/ptrace.h
index c4fa6c8..02ce2eb 100644
--- a/arch/parisc/include/uapi/asm/ptrace.h
+++ b/arch/parisc/include/uapi/asm/ptrace.h
@@ -13,6 +13,11 @@
  * N.B. gdb/strace care about the size and offsets within this
  * structure. If you change things, you may break object compatibility
  * for those applications.
+ *
+ * Please do NOT use this structure for future programs, but use
+ * user_regs_struct (see below) instead.
+ *
+ * It can be accessed through PTRACE_PEEKUSR/PTRACE_POKEUSR only.
  */
 
 struct pt_regs {
@@ -33,6 +38,45 @@
 	unsigned long ipsw;	/* CR22 */
 };
 
+/**
+ * struct user_regs_struct - User general purpose registers
+ *
+ * This is the user-visible general purpose register state structure
+ * which is used to define the elf_gregset_t.
+ *
+ * It can be accessed through PTRACE_GETREGSET with NT_PRSTATUS
+ * and through PTRACE_GETREGS.
+ */
+struct user_regs_struct {
+	unsigned long gr[32];	/* PSW is in gr[0] */
+	unsigned long sr[8];
+	unsigned long iaoq[2];
+	unsigned long iasq[2];
+	unsigned long sar;	/* CR11 */
+	unsigned long iir;	/* CR19 */
+	unsigned long isr;	/* CR20 */
+	unsigned long ior;	/* CR21 */
+	unsigned long ipsw;	/* CR22 */
+	unsigned long cr0;
+	unsigned long cr24, cr25, cr26, cr27, cr28, cr29, cr30, cr31;
+	unsigned long cr8, cr9, cr12, cr13, cr10, cr15;
+	unsigned long _pad[80-64];	/* pad to ELF_NGREG (80) */
+};
+
+/**
+ * struct user_fp_struct - User floating point registers
+ *
+ * This is the user-visible floating point register state structure.
+ * It uses the same layout and size as elf_fpregset_t.
+ *
+ * It can be accessed through PTRACE_GETREGSET with NT_PRFPREG
+ * and through PTRACE_GETFPREGS.
+ */
+struct user_fp_struct {
+	__u64 fr[32];
+};
+
+
 /*
  * The numbers chosen here are somewhat arbitrary but absolutely MUST
  * not overlap with any of the number assigned in <linux/ptrace.h>.
@@ -43,5 +87,9 @@
  */
 #define PTRACE_SINGLEBLOCK	12	/* resume execution until next branch */
 
+#define PTRACE_GETREGS		18
+#define PTRACE_SETREGS		19
+#define PTRACE_GETFPREGS	14
+#define PTRACE_SETFPREGS	15
 
 #endif /* _UAPI_PARISC_PTRACE_H */
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index cc0ce92..a9b9407 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -102,7 +102,7 @@
 #define __NR_uselib              (__NR_Linux + 86)
 #define __NR_swapon              (__NR_Linux + 87)
 #define __NR_reboot              (__NR_Linux + 88)
-#define __NR_mmap2             (__NR_Linux + 89)
+#define __NR_mmap2               (__NR_Linux + 89)
 #define __NR_mmap                (__NR_Linux + 90)
 #define __NR_munmap              (__NR_Linux + 91)
 #define __NR_truncate            (__NR_Linux + 92)
@@ -114,7 +114,7 @@
 #define __NR_recv                (__NR_Linux + 98)
 #define __NR_statfs              (__NR_Linux + 99)
 #define __NR_fstatfs            (__NR_Linux + 100)
-#define __NR_stat64           (__NR_Linux + 101)
+#define __NR_stat64             (__NR_Linux + 101)
 /* #define __NR_socketcall         (__NR_Linux + 102) */
 #define __NR_syslog             (__NR_Linux + 103)
 #define __NR_setitimer          (__NR_Linux + 104)
@@ -140,17 +140,17 @@
 #define __NR_adjtimex           (__NR_Linux + 124)
 #define __NR_mprotect           (__NR_Linux + 125)
 #define __NR_sigprocmask        (__NR_Linux + 126)
-#define __NR_create_module      (__NR_Linux + 127)
+#define __NR_create_module      (__NR_Linux + 127) /* not used */
 #define __NR_init_module        (__NR_Linux + 128)
 #define __NR_delete_module      (__NR_Linux + 129)
-#define __NR_get_kernel_syms    (__NR_Linux + 130)
+#define __NR_get_kernel_syms    (__NR_Linux + 130) /* not used */
 #define __NR_quotactl           (__NR_Linux + 131)
 #define __NR_getpgid            (__NR_Linux + 132)
 #define __NR_fchdir             (__NR_Linux + 133)
 #define __NR_bdflush            (__NR_Linux + 134)
 #define __NR_sysfs              (__NR_Linux + 135)
 #define __NR_personality        (__NR_Linux + 136)
-#define __NR_afs_syscall        (__NR_Linux + 137) /* Syscall for Andrew File System */
+#define __NR_afs_syscall        (__NR_Linux + 137) /* not used */
 #define __NR_setfsuid           (__NR_Linux + 138)
 #define __NR_setfsgid           (__NR_Linux + 139)
 #define __NR__llseek            (__NR_Linux + 140)
@@ -180,9 +180,9 @@
 #define __NR_setresuid          (__NR_Linux + 164)
 #define __NR_getresuid          (__NR_Linux + 165)
 #define __NR_sigaltstack        (__NR_Linux + 166)
-#define __NR_query_module       (__NR_Linux + 167)
+#define __NR_query_module       (__NR_Linux + 167) /* not used */
 #define __NR_poll               (__NR_Linux + 168)
-#define __NR_nfsservctl         (__NR_Linux + 169)
+#define __NR_nfsservctl         (__NR_Linux + 169) /* not used */
 #define __NR_setresgid          (__NR_Linux + 170)
 #define __NR_getresgid          (__NR_Linux + 171)
 #define __NR_prctl              (__NR_Linux + 172)
@@ -209,18 +209,16 @@
 #define __NR_shmdt              (__NR_Linux + 193)
 #define __NR_shmget             (__NR_Linux + 194)
 #define __NR_shmctl             (__NR_Linux + 195)
-
-#define __NR_getpmsg		(__NR_Linux + 196) /* Somebody *wants* streams? */
-#define __NR_putpmsg		(__NR_Linux + 197)
-
+#define __NR_getpmsg            (__NR_Linux + 196) /* not used */
+#define __NR_putpmsg            (__NR_Linux + 197) /* not used */
 #define __NR_lstat64            (__NR_Linux + 198)
 #define __NR_truncate64         (__NR_Linux + 199)
 #define __NR_ftruncate64        (__NR_Linux + 200)
 #define __NR_getdents64         (__NR_Linux + 201)
 #define __NR_fcntl64            (__NR_Linux + 202)
-#define __NR_attrctl            (__NR_Linux + 203)
-#define __NR_acl_get            (__NR_Linux + 204)
-#define __NR_acl_set            (__NR_Linux + 205)
+#define __NR_attrctl            (__NR_Linux + 203) /* not used */
+#define __NR_acl_get            (__NR_Linux + 204) /* not used */
+#define __NR_acl_set            (__NR_Linux + 205) /* not used */
 #define __NR_gettid             (__NR_Linux + 206)
 #define __NR_readahead          (__NR_Linux + 207)
 #define __NR_tkill              (__NR_Linux + 208)
@@ -228,8 +226,8 @@
 #define __NR_futex              (__NR_Linux + 210)
 #define __NR_sched_setaffinity  (__NR_Linux + 211)
 #define __NR_sched_getaffinity  (__NR_Linux + 212)
-#define __NR_set_thread_area    (__NR_Linux + 213)
-#define __NR_get_thread_area    (__NR_Linux + 214)
+#define __NR_set_thread_area    (__NR_Linux + 213) /* not used */
+#define __NR_get_thread_area    (__NR_Linux + 214) /* not used */
 #define __NR_io_setup           (__NR_Linux + 215)
 #define __NR_io_destroy         (__NR_Linux + 216)
 #define __NR_io_getevents       (__NR_Linux + 217)
@@ -278,7 +276,7 @@
 #define __NR_mbind		(__NR_Linux + 260)
 #define __NR_get_mempolicy	(__NR_Linux + 261)
 #define __NR_set_mempolicy	(__NR_Linux + 262)
-#define __NR_vserver		(__NR_Linux + 263)
+#define __NR_vserver		(__NR_Linux + 263) /* not used */
 #define __NR_add_key		(__NR_Linux + 264)
 #define __NR_request_key	(__NR_Linux + 265)
 #define __NR_keyctl		(__NR_Linux + 266)
@@ -318,7 +316,7 @@
 #define __NR_kexec_load		(__NR_Linux + 300)
 #define __NR_utimensat		(__NR_Linux + 301)
 #define __NR_signalfd		(__NR_Linux + 302)
-#define __NR_timerfd		(__NR_Linux + 303)
+#define __NR_timerfd		(__NR_Linux + 303) /* not used */
 #define __NR_eventfd		(__NR_Linux + 304)
 #define __NR_fallocate		(__NR_Linux + 305)
 #define __NR_timerfd_create	(__NR_Linux + 306)
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 39127d3..baa3d9d 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -667,7 +667,7 @@
 	 * boundary
 	 */
 
-	.text
+	.section .text.hot
 	.align 2048
 
 ENTRY(fault_vector_20)
@@ -2019,6 +2019,7 @@
 	.procend
 ENDPROC(mcount)
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	.align 8
 	.globl return_to_handler
 	.type  return_to_handler, @function
@@ -2040,11 +2041,17 @@
 #endif
 
 	/* call ftrace_return_to_handler(0) */
+	.import ftrace_return_to_handler,code
+	load32 ftrace_return_to_handler,%ret0
+	load32 .Lftrace_ret,%r2
 #ifdef CONFIG_64BIT
 	ldo -16(%sp),%ret1		/* Reference param save area */
+	bve	(%ret0)
+#else
+	bv	%r0(%ret0)
 #endif
-	BL ftrace_return_to_handler,%r2
 	ldi 0,%r26
+.Lftrace_ret:
 	copy %ret0,%rp
 
 	/* restore original return values */
@@ -2062,6 +2069,8 @@
 	.procend
 ENDPROC(return_to_handler)
 
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
 #endif	/* CONFIG_FUNCTION_TRACER */
 
 #ifdef CONFIG_IRQSTACKS
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index b13f9ec..a828a0a 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -18,12 +18,15 @@
 #include <asm/ftrace.h>
 
 
+#define __hot __attribute__ ((__section__ (".text.hot")))
+
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 /*
  * Hook the return address and push it in the stack of return addrs
  * in current thread info.
  */
-static void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+static void __hot prepare_ftrace_return(unsigned long *parent,
+					unsigned long self_addr)
 {
 	unsigned long old;
 	struct ftrace_graph_ent trace;
@@ -53,7 +56,7 @@
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
-void notrace ftrace_function_trampoline(unsigned long parent,
+void notrace __hot ftrace_function_trampoline(unsigned long parent,
 				unsigned long self_addr,
 				unsigned long org_sp_gr3)
 {
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index e81ccf1..5adc339 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -324,8 +324,9 @@
 		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
 		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
 
-		printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
-			cpunum, coproc_cfg.revision, coproc_cfg.model);
+		if (cpunum == 0)
+			printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
+				cpunum, coproc_cfg.revision, coproc_cfg.model);
 
 		/*
 		** store status register to stack (hopefully aligned)
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 8fb81a3..b5458b3 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -4,18 +4,20 @@
  * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc.
  * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx>
  * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
- * Copyright (C) 2008 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2008-2016 Helge Deller <deller@gmx.de>
  */
 
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
+#include <linux/elf.h>
 #include <linux/errno.h>
 #include <linux/ptrace.h>
 #include <linux/tracehook.h>
 #include <linux/user.h>
 #include <linux/personality.h>
+#include <linux/regset.h>
 #include <linux/security.h>
 #include <linux/seccomp.h>
 #include <linux/compat.h>
@@ -30,6 +32,17 @@
 /* PSW bits we allow the debugger to modify */
 #define USER_PSW_BITS	(PSW_N | PSW_B | PSW_V | PSW_CB)
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+/*
+ * These are our native regset flavors.
+ */
+enum parisc_regset {
+	REGSET_GENERAL,
+	REGSET_FP
+};
+
 /*
  * Called by kernel/ptrace.c when detaching..
  *
@@ -114,6 +127,7 @@
 long arch_ptrace(struct task_struct *child, long request,
 		 unsigned long addr, unsigned long data)
 {
+	unsigned long __user *datap = (unsigned long __user *)data;
 	unsigned long tmp;
 	long ret = -EIO;
 
@@ -126,7 +140,7 @@
 		     addr >= sizeof(struct pt_regs))
 			break;
 		tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
-		ret = put_user(tmp, (unsigned long __user *) data);
+		ret = put_user(tmp, datap);
 		break;
 
 	/* Write the word at location addr in the USER area.  This will need
@@ -165,6 +179,34 @@
 		}
 		break;
 
+	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
+		return copy_regset_to_user(child,
+					   task_user_regset_view(current),
+					   REGSET_GENERAL,
+					   0, sizeof(struct user_regs_struct),
+					   datap);
+
+	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
+		return copy_regset_from_user(child,
+					     task_user_regset_view(current),
+					     REGSET_GENERAL,
+					     0, sizeof(struct user_regs_struct),
+					     datap);
+
+	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
+		return copy_regset_to_user(child,
+					   task_user_regset_view(current),
+					   REGSET_FP,
+					   0, sizeof(struct user_fp_struct),
+					   datap);
+
+	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
+		return copy_regset_from_user(child,
+					     task_user_regset_view(current),
+					     REGSET_FP,
+					     0, sizeof(struct user_fp_struct),
+					     datap);
+
 	default:
 		ret = ptrace_request(child, request, addr, data);
 		break;
@@ -283,6 +325,10 @@
 		regs->gr[20] = -1UL;
 		goto out;
 	}
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+		trace_sys_enter(regs, regs->gr[20]);
+#endif
 
 #ifdef CONFIG_64BIT
 	if (!is_compat_task())
@@ -311,6 +357,324 @@
 
 	audit_syscall_exit(regs);
 
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+		trace_sys_exit(regs, regs->gr[20]);
+#endif
+
 	if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
 		tracehook_report_syscall_exit(regs, stepping);
 }
+
+
+/*
+ * regset functions.
+ */
+
+static int fpr_get(struct task_struct *target,
+		     const struct user_regset *regset,
+		     unsigned int pos, unsigned int count,
+		     void *kbuf, void __user *ubuf)
+{
+	struct pt_regs *regs = task_regs(target);
+	__u64 *k = kbuf;
+	__u64 __user *u = ubuf;
+	__u64 reg;
+
+	pos /= sizeof(reg);
+	count /= sizeof(reg);
+
+	if (kbuf)
+		for (; count > 0 && pos < ELF_NFPREG; --count)
+			*k++ = regs->fr[pos++];
+	else
+		for (; count > 0 && pos < ELF_NFPREG; --count)
+			if (__put_user(regs->fr[pos++], u++))
+				return -EFAULT;
+
+	kbuf = k;
+	ubuf = u;
+	pos *= sizeof(reg);
+	count *= sizeof(reg);
+	return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+					ELF_NFPREG * sizeof(reg), -1);
+}
+
+static int fpr_set(struct task_struct *target,
+		     const struct user_regset *regset,
+		     unsigned int pos, unsigned int count,
+		     const void *kbuf, const void __user *ubuf)
+{
+	struct pt_regs *regs = task_regs(target);
+	const __u64 *k = kbuf;
+	const __u64 __user *u = ubuf;
+	__u64 reg;
+
+	pos /= sizeof(reg);
+	count /= sizeof(reg);
+
+	if (kbuf)
+		for (; count > 0 && pos < ELF_NFPREG; --count)
+			regs->fr[pos++] = *k++;
+	else
+		for (; count > 0 && pos < ELF_NFPREG; --count) {
+			if (__get_user(reg, u++))
+				return -EFAULT;
+			regs->fr[pos++] = reg;
+		}
+
+	kbuf = k;
+	ubuf = u;
+	pos *= sizeof(reg);
+	count *= sizeof(reg);
+	return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+					 ELF_NFPREG * sizeof(reg), -1);
+}
+
+#define RI(reg) (offsetof(struct user_regs_struct,reg) / sizeof(long))
+
+static unsigned long get_reg(struct pt_regs *regs, int num)
+{
+	switch (num) {
+	case RI(gr[0]) ... RI(gr[31]):	return regs->gr[num - RI(gr[0])];
+	case RI(sr[0]) ... RI(sr[7]):	return regs->sr[num - RI(sr[0])];
+	case RI(iasq[0]):		return regs->iasq[0];
+	case RI(iasq[1]):		return regs->iasq[1];
+	case RI(iaoq[0]):		return regs->iaoq[0];
+	case RI(iaoq[1]):		return regs->iaoq[1];
+	case RI(sar):			return regs->sar;
+	case RI(iir):			return regs->iir;
+	case RI(isr):			return regs->isr;
+	case RI(ior):			return regs->ior;
+	case RI(ipsw):			return regs->ipsw;
+	case RI(cr27):			return regs->cr27;
+	case RI(cr0):			return mfctl(0);
+	case RI(cr24):			return mfctl(24);
+	case RI(cr25):			return mfctl(25);
+	case RI(cr26):			return mfctl(26);
+	case RI(cr28):			return mfctl(28);
+	case RI(cr29):			return mfctl(29);
+	case RI(cr30):			return mfctl(30);
+	case RI(cr31):			return mfctl(31);
+	case RI(cr8):			return mfctl(8);
+	case RI(cr9):			return mfctl(9);
+	case RI(cr12):			return mfctl(12);
+	case RI(cr13):			return mfctl(13);
+	case RI(cr10):			return mfctl(10);
+	case RI(cr15):			return mfctl(15);
+	default:			return 0;
+	}
+}
+
+static void set_reg(struct pt_regs *regs, int num, unsigned long val)
+{
+	switch (num) {
+	case RI(gr[0]): /*
+			 * PSW is in gr[0].
+			 * Allow writing to Nullify, Divide-step-correction,
+			 * and carry/borrow bits.
+			 * BEWARE, if you set N, and then single step, it won't
+			 * stop on the nullified instruction.
+			 */
+			val &= USER_PSW_BITS;
+			regs->gr[0] &= ~USER_PSW_BITS;
+			regs->gr[0] |= val;
+			return;
+	case RI(gr[1]) ... RI(gr[31]):
+			regs->gr[num - RI(gr[0])] = val;
+			return;
+	case RI(iaoq[0]):
+	case RI(iaoq[1]):
+			regs->iaoq[num - RI(iaoq[0])] = val;
+			return;
+	case RI(sar):	regs->sar = val;
+			return;
+	default:	return;
+#if 0
+	/* do not allow to change any of the following registers (yet) */
+	case RI(sr[0]) ... RI(sr[7]):	return regs->sr[num - RI(sr[0])];
+	case RI(iasq[0]):		return regs->iasq[0];
+	case RI(iasq[1]):		return regs->iasq[1];
+	case RI(iir):			return regs->iir;
+	case RI(isr):			return regs->isr;
+	case RI(ior):			return regs->ior;
+	case RI(ipsw):			return regs->ipsw;
+	case RI(cr27):			return regs->cr27;
+        case cr0, cr24, cr25, cr26, cr27, cr28, cr29, cr30, cr31;
+        case cr8, cr9, cr12, cr13, cr10, cr15;
+#endif
+	}
+}
+
+static int gpr_get(struct task_struct *target,
+		     const struct user_regset *regset,
+		     unsigned int pos, unsigned int count,
+		     void *kbuf, void __user *ubuf)
+{
+	struct pt_regs *regs = task_regs(target);
+	unsigned long *k = kbuf;
+	unsigned long __user *u = ubuf;
+	unsigned long reg;
+
+	pos /= sizeof(reg);
+	count /= sizeof(reg);
+
+	if (kbuf)
+		for (; count > 0 && pos < ELF_NGREG; --count)
+			*k++ = get_reg(regs, pos++);
+	else
+		for (; count > 0 && pos < ELF_NGREG; --count)
+			if (__put_user(get_reg(regs, pos++), u++))
+				return -EFAULT;
+	kbuf = k;
+	ubuf = u;
+	pos *= sizeof(reg);
+	count *= sizeof(reg);
+	return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+					ELF_NGREG * sizeof(reg), -1);
+}
+
+static int gpr_set(struct task_struct *target,
+		     const struct user_regset *regset,
+		     unsigned int pos, unsigned int count,
+		     const void *kbuf, const void __user *ubuf)
+{
+	struct pt_regs *regs = task_regs(target);
+	const unsigned long *k = kbuf;
+	const unsigned long __user *u = ubuf;
+	unsigned long reg;
+
+	pos /= sizeof(reg);
+	count /= sizeof(reg);
+
+	if (kbuf)
+		for (; count > 0 && pos < ELF_NGREG; --count)
+			set_reg(regs, pos++, *k++);
+	else
+		for (; count > 0 && pos < ELF_NGREG; --count) {
+			if (__get_user(reg, u++))
+				return -EFAULT;
+			set_reg(regs, pos++, reg);
+		}
+
+	kbuf = k;
+	ubuf = u;
+	pos *= sizeof(reg);
+	count *= sizeof(reg);
+	return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+					 ELF_NGREG * sizeof(reg), -1);
+}
+
+static const struct user_regset native_regsets[] = {
+	[REGSET_GENERAL] = {
+		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
+		.size = sizeof(long), .align = sizeof(long),
+		.get = gpr_get, .set = gpr_set
+	},
+	[REGSET_FP] = {
+		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
+		.size = sizeof(__u64), .align = sizeof(__u64),
+		.get = fpr_get, .set = fpr_set
+	}
+};
+
+static const struct user_regset_view user_parisc_native_view = {
+	.name = "parisc", .e_machine = ELF_ARCH, .ei_osabi = ELFOSABI_LINUX,
+	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
+};
+
+#ifdef CONFIG_64BIT
+#include <linux/compat.h>
+
+static int gpr32_get(struct task_struct *target,
+		     const struct user_regset *regset,
+		     unsigned int pos, unsigned int count,
+		     void *kbuf, void __user *ubuf)
+{
+	struct pt_regs *regs = task_regs(target);
+	compat_ulong_t *k = kbuf;
+	compat_ulong_t __user *u = ubuf;
+	compat_ulong_t reg;
+
+	pos /= sizeof(reg);
+	count /= sizeof(reg);
+
+	if (kbuf)
+		for (; count > 0 && pos < ELF_NGREG; --count)
+			*k++ = get_reg(regs, pos++);
+	else
+		for (; count > 0 && pos < ELF_NGREG; --count)
+			if (__put_user((compat_ulong_t) get_reg(regs, pos++), u++))
+				return -EFAULT;
+
+	kbuf = k;
+	ubuf = u;
+	pos *= sizeof(reg);
+	count *= sizeof(reg);
+	return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+					ELF_NGREG * sizeof(reg), -1);
+}
+
+static int gpr32_set(struct task_struct *target,
+		     const struct user_regset *regset,
+		     unsigned int pos, unsigned int count,
+		     const void *kbuf, const void __user *ubuf)
+{
+	struct pt_regs *regs = task_regs(target);
+	const compat_ulong_t *k = kbuf;
+	const compat_ulong_t __user *u = ubuf;
+	compat_ulong_t reg;
+
+	pos /= sizeof(reg);
+	count /= sizeof(reg);
+
+	if (kbuf)
+		for (; count > 0 && pos < ELF_NGREG; --count)
+			set_reg(regs, pos++, *k++);
+	else
+		for (; count > 0 && pos < ELF_NGREG; --count) {
+			if (__get_user(reg, u++))
+				return -EFAULT;
+			set_reg(regs, pos++, reg);
+		}
+
+	kbuf = k;
+	ubuf = u;
+	pos *= sizeof(reg);
+	count *= sizeof(reg);
+	return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+					 ELF_NGREG * sizeof(reg), -1);
+}
+
+/*
+ * These are the regset flavors matching the 32bit native set.
+ */
+static const struct user_regset compat_regsets[] = {
+	[REGSET_GENERAL] = {
+		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
+		.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
+		.get = gpr32_get, .set = gpr32_set
+	},
+	[REGSET_FP] = {
+		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
+		.size = sizeof(__u64), .align = sizeof(__u64),
+		.get = fpr_get, .set = fpr_set
+	}
+};
+
+static const struct user_regset_view user_parisc_compat_view = {
+	.name = "parisc", .e_machine = EM_PARISC, .ei_osabi = ELFOSABI_LINUX,
+	.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
+};
+#endif	/* CONFIG_64BIT */
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+	BUILD_BUG_ON(sizeof(struct user_regs_struct)/sizeof(long) != ELF_NGREG);
+	BUILD_BUG_ON(sizeof(struct user_fp_struct)/sizeof(__u64) != ELF_NFPREG);
+#ifdef CONFIG_64BIT
+	if (is_compat_task())
+		return &user_parisc_compat_view;
+#endif
+	return &user_parisc_native_view;
+}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 57b4836..d03422e 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -912,6 +912,7 @@
 
 	.align 8
 ENTRY(sys_call_table)
+	.export sys_call_table,data
 #include "syscall_table.S"
 END(sys_call_table)
 
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 400acac0..31ec99a 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -38,6 +38,18 @@
 
 static unsigned long clocktick __read_mostly;	/* timer cycles per tick */
 
+#ifndef CONFIG_64BIT
+/*
+ * The processor-internal cycle counter (Control Register 16) is used as time
+ * source for the sched_clock() function.  This register is 64bit wide on a
+ * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always
+ * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits
+ * with a per-cpu variable which we increase every time the counter
+ * wraps-around (which happens every ~4 secounds).
+ */
+static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits);
+#endif
+
 /*
  * We keep time on PA-RISC Linux by using the Interval Timer which is
  * a pair of registers; one is read-only and one is write-only; both
@@ -108,6 +120,12 @@
 	 */
 	mtctl(next_tick, 16);
 
+#if !defined(CONFIG_64BIT)
+	/* check for overflow on a 32bit kernel (every ~4 seconds). */
+	if (unlikely(next_tick < now))
+		this_cpu_inc(cr16_high_32_bits);
+#endif
+
 	/* Skip one clocktick on purpose if we missed next_tick.
 	 * The new CR16 must be "later" than current CR16 otherwise
 	 * itimer would not fire until CR16 wrapped - e.g 4 seconds
@@ -219,6 +237,12 @@
 	unsigned int cpu = smp_processor_id();
 	unsigned long next_tick = mfctl(16) + clocktick;
 
+#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
+	/* With multiple 64bit CPUs online, the cr16's are not syncronized. */
+	if (cpu != 0)
+		clear_sched_clock_stable();
+#endif
+
 	mtctl(next_tick, 16);		/* kick off Interval Timer (CR16) */
 
 	per_cpu(cpu_data, cpu).it_value = next_tick;
@@ -246,15 +270,47 @@
 	}
 }
 
+
+/*
+ * sched_clock() framework
+ */
+
+static u32 cyc2ns_mul __read_mostly;
+static u32 cyc2ns_shift __read_mostly;
+
+u64 sched_clock(void)
+{
+	u64 now;
+
+	/* Get current cycle counter (Control Register 16). */
+#ifdef CONFIG_64BIT
+	now = mfctl(16);
+#else
+	now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32);
+#endif
+
+	/* return the value in ns (cycles_2_ns) */
+	return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift);
+}
+
+
+/*
+ * timer interrupt and sched_clock() initialization
+ */
+
 void __init time_init(void)
 {
 	unsigned long current_cr16_khz;
 
+	current_cr16_khz = PAGE0->mem_10msec/10;  /* kHz */
 	clocktick = (100 * PAGE0->mem_10msec) / HZ;
 
+	/* calculate mult/shift values for cr16 */
+	clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
+				NSEC_PER_MSEC, 0);
+
 	start_cpu_itimer();	/* get CPU 0 started */
 
 	/* register at clocksource framework */
-	current_cr16_khz = PAGE0->mem_10msec/10;  /* kHz */
 	clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
 }
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index d7c0acb..2b65c01 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -28,6 +28,7 @@
 #include <linux/ratelimit.h>
 #include <asm/uaccess.h>
 #include <asm/hardirq.h>
+#include <asm/traps.h>
 
 /* #define DEBUG_UNALIGNED 1 */
 
@@ -130,8 +131,6 @@
 
 int unaligned_enabled __read_mostly = 1;
 
-void die_if_kernel (char *str, struct pt_regs *regs, long err);
-
 static int emulate_ldh(struct pt_regs *regs, int toreg)
 {
 	unsigned long saddr = regs->ior;
@@ -666,7 +665,7 @@
 		break;
 	}
 
-	if (modify && R1(regs->iir))
+	if (ret == 0 && modify && R1(regs->iir))
 		regs->gr[R1(regs->iir)] = newbase;
 
 
@@ -677,6 +676,14 @@
 
 	if (ret)
 	{
+		/*
+		 * The unaligned handler failed.
+		 * If we were called by __get_user() or __put_user() jump
+		 * to it's exception fixup handler instead of crashing.
+		 */
+		if (!user_mode(regs) && fixup_exception(regs))
+			return;
+
 		printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
 		die_if_kernel("Unaligned data reference", regs, 28);
 
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index ddd988b..e278a87 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -75,7 +75,10 @@
 	if (addr >= kernel_unwind_table.start && 
 	    addr <= kernel_unwind_table.end)
 		e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
-	else 
+	else {
+		unsigned long flags;
+
+		spin_lock_irqsave(&unwind_lock, flags);
 		list_for_each_entry(table, &unwind_tables, list) {
 			if (addr >= table->start && 
 			    addr <= table->end)
@@ -86,6 +89,8 @@
 				break;
 			}
 		}
+		spin_unlock_irqrestore(&unwind_lock, flags);
+	}
 
 	return e;
 }
@@ -303,18 +308,16 @@
 
 			insn = *(unsigned int *)npc;
 
-			if ((insn & 0xffffc000) == 0x37de0000 ||
-			    (insn & 0xffe00000) == 0x6fc00000) {
+			if ((insn & 0xffffc001) == 0x37de0000 ||
+			    (insn & 0xffe00001) == 0x6fc00000) {
 				/* ldo X(sp), sp, or stwm X,D(sp) */
-				frame_size += (insn & 0x1 ? -1 << 13 : 0) | 
-					((insn & 0x3fff) >> 1);
+				frame_size += (insn & 0x3fff) >> 1;
 				dbg("analyzing func @ %lx, insn=%08x @ "
 				    "%lx, frame_size = %ld\n", info->ip,
 				    insn, npc, frame_size);
-			} else if ((insn & 0xffe00008) == 0x73c00008) {
+			} else if ((insn & 0xffe00009) == 0x73c00008) {
 				/* std,ma X,D(sp) */
-				frame_size += (insn & 0x1 ? -1 << 13 : 0) | 
-					(((insn >> 4) & 0x3ff) << 3);
+				frame_size += ((insn >> 4) & 0x3ff) << 3;
 				dbg("analyzing func @ %lx, insn=%08x @ "
 				    "%lx, frame_size = %ld\n", info->ip,
 				    insn, npc, frame_size);
@@ -333,6 +336,9 @@
 			}
 		}
 
+		if (frame_size > e->Total_frame_size << 3)
+			frame_size = e->Total_frame_size << 3;
+
 		if (!unwind_special(info, e->region_start, frame_size)) {
 			info->prev_sp = info->sp - frame_size;
 			if (e->Millicode)
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index 1871188..8e45b0a 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -55,11 +55,10 @@
 }
 
 
-#ifdef CONFIG_64BIT
-unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new)
+u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
 {
 	unsigned long flags;
-	unsigned long prev;
+	u64 prev;
 
 	_atomic_spin_lock_irqsave(ptr, flags);
 	if ((prev = *ptr) == old)
@@ -67,7 +66,6 @@
 	_atomic_spin_unlock_irqrestore(ptr, flags);
 	return prev;
 }
-#endif
 
 unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
 {
diff --git a/arch/parisc/math-emu/fpudispatch.c b/arch/parisc/math-emu/fpudispatch.c
index 673b73e..18df123 100644
--- a/arch/parisc/math-emu/fpudispatch.c
+++ b/arch/parisc/math-emu/fpudispatch.c
@@ -184,7 +184,7 @@
 
 /*
  * this routine will decode the excepting floating point instruction and
- * call the approiate emulation routine.
+ * call the appropriate emulation routine.
  * It is called by decode_fpu with the following parameters:
  * fpudispatch(current_ir, unimplemented_code, 0, &Fpu_register)
  * where current_ir is the instruction to be emulated,
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index 3dc75de..549c24c 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -190,12 +190,21 @@
 					 /* DMA */ 0x2 &UIC0 0xc 0x4>;
 		};
 
+		AHBDMA: dma@bffd0800 {
+			compatible = "snps,dma-spear1340";
+			reg = <4 0xbffd0800 0x400>;
+			interrupt-parent = <&UIC3>;
+			interrupts = <0x5 0x4>;
+			#dma-cells = <3>;
+		};
+
 		SATA0: sata@bffd1000 {
 			compatible = "amcc,sata-460ex";
-			reg = <4 0xbffd1000 0x800 4 0xbffd0800 0x400>;
+			reg = <4 0xbffd1000 0x800>;
 			interrupt-parent = <&UIC3>;
-			interrupts = <0x0 0x4       /* SATA */
-				      0x5 0x4>;     /* AHBDMA */
+			interrupts = <0x0 0x4>;
+			dmas = <&AHBDMA 0 1 0>;
+			dma-names = "sata-dma";
 		};
 
 		POB0: opb {
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c1e82e9..a0948f4 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -717,7 +717,7 @@
 #define   MMCR0_FCWAIT	0x00000002UL /* freeze counter in WAIT state */
 #define   MMCR0_FCHV	0x00000001UL /* freeze conditions in hypervisor mode */
 #define SPRN_MMCR1	798
-#define SPRN_MMCR2	769
+#define SPRN_MMCR2	785
 #define SPRN_MMCRA	0x312
 #define   MMCRA_SDSYNC	0x80000000UL /* SDAR synced with SIAR */
 #define   MMCRA_SDAR_DCACHE_MISS 0x40000000UL
@@ -754,13 +754,13 @@
 #define SPRN_PMC6	792
 #define SPRN_PMC7	793
 #define SPRN_PMC8	794
-#define SPRN_SIAR	780
-#define SPRN_SDAR	781
 #define SPRN_SIER	784
 #define   SIER_SIPR		0x2000000	/* Sampled MSR_PR */
 #define   SIER_SIHV		0x1000000	/* Sampled MSR_HV */
 #define   SIER_SIAR_VALID	0x0400000	/* SIAR contents valid */
 #define   SIER_SDAR_VALID	0x0200000	/* SDAR contents valid */
+#define SPRN_SIAR	796
+#define SPRN_SDAR	797
 #define SPRN_TACR	888
 #define SPRN_TCSCR	889
 #define SPRN_CSIGR	890
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index da51925..ccd2037 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -656,6 +656,7 @@
 	W(0xffff0000), W(0x003e0000),	/* POWER6 */
 	W(0xffff0000), W(0x003f0000),	/* POWER7 */
 	W(0xffff0000), W(0x004b0000),	/* POWER8E */
+	W(0xffff0000), W(0x004c0000),   /* POWER8NVL */
 	W(0xffff0000), W(0x004d0000),	/* POWER8 */
 	W(0xffffffff), W(0x0f000004),	/* all 2.07-compliant */
 	W(0xffffffff), W(0x0f000003),	/* all 2.06-compliant */
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index def1b8b..6767605 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -195,7 +195,8 @@
 	 * and end up putting it elsewhere.
 	 * Add enough to the size so that the result can be aligned.
 	 */
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
 	vdso_base = get_unmapped_area(NULL, vdso_base,
 				      (vdso_pages << PAGE_SHIFT) +
 				      ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5926896..b2740c6 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -159,6 +159,19 @@
 	},
 };
 
+/*
+ * 'R' and 'C' update notes:
+ *  - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
+ *     create writeable HPTEs without C set, because the hcall H_PROTECT
+ *     that we use in that case will not update C
+ *  - The above is however not a problem, because we also don't do that
+ *     fancy "no flush" variant of eviction and we use H_REMOVE which will
+ *     do the right thing and thus we don't have the race I described earlier
+ *
+ *    - Under bare metal,  we do have the race, so we need R and C set
+ *    - We make sure R is always set and never lost
+ *    - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
+ */
 unsigned long htab_convert_pte_flags(unsigned long pteflags)
 {
 	unsigned long rflags = 0;
@@ -186,9 +199,14 @@
 			rflags |= 0x1;
 	}
 	/*
-	 * Always add "C" bit for perf. Memory coherence is always enabled
+	 * We can't allow hardware to update hpte bits. Hence always
+	 * set 'R' bit and set 'C' if it is a write fault
+	 * Memory coherence is always enabled
 	 */
-	rflags |=  HPTE_R_C | HPTE_R_M;
+	rflags |=  HPTE_R_R | HPTE_R_M;
+
+	if (pteflags & _PAGE_DIRTY)
+		rflags |= HPTE_R_C;
 	/*
 	 * Add in WIG bits
 	 */
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index eb44511..6703187 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -33,10 +33,7 @@
 	changed = !pmd_same(*(pmdp), entry);
 	if (changed) {
 		__ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
-		/*
-		 * Since we are not supporting SW TLB systems, we don't
-		 * have any thing similar to flush_tlb_page_nohash()
-		 */
+		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 	}
 	return changed;
 }
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 18b2c11..c939e6e 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -296,11 +296,6 @@
 void __init radix__early_init_mmu(void)
 {
 	unsigned long lpcr;
-	/*
-	 * setup LPCR UPRT based on mmu_features
-	 */
-	lpcr = mfspr(SPRN_LPCR);
-	mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
 
 #ifdef CONFIG_PPC_64K_PAGES
 	/* PAGE_SIZE mappings */
@@ -343,8 +338,11 @@
 	__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
 
 	radix_init_page_sizes();
-	if (!firmware_has_feature(FW_FEATURE_LPAR))
+	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+		lpcr = mfspr(SPRN_LPCR);
+		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
 		radix_init_partition_table();
+	}
 
 	radix_init_pgtable();
 }
@@ -353,16 +351,15 @@
 {
 	unsigned long lpcr;
 	/*
-	 * setup LPCR UPRT based on mmu_features
+	 * update partition table control register and UPRT
 	 */
-	lpcr = mfspr(SPRN_LPCR);
-	mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
-	/*
-	 * update partition table control register, 64 K size.
-	 */
-	if (!firmware_has_feature(FW_FEATURE_LPAR))
+	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+		lpcr = mfspr(SPRN_LPCR);
+		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
+
 		mtspr(SPRN_PTCR,
 		      __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
+	}
 }
 
 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 26d37e6..0fc2671 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -47,7 +47,7 @@
 }
 
 void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
 	unsigned long sp, next_sp;
 	unsigned long next_ip;
@@ -76,7 +76,7 @@
 			next_ip = regs->nip;
 			lr = regs->link;
 			level = 0;
-			perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
+			perf_callchain_store_context(entry, PERF_CONTEXT_KERNEL);
 
 		} else {
 			if (level == 0)
@@ -232,7 +232,7 @@
 		puc == (unsigned long) &sf->uc;
 }
 
-static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
 				   struct pt_regs *regs)
 {
 	unsigned long sp, next_sp;
@@ -247,7 +247,7 @@
 	sp = regs->gpr[1];
 	perf_callchain_store(entry, next_ip);
 
-	while (entry->nr < sysctl_perf_event_max_stack) {
+	while (entry->nr < entry->max_stack) {
 		fp = (unsigned long __user *) sp;
 		if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
 			return;
@@ -274,7 +274,7 @@
 			    read_user_stack_64(&uregs[PT_R1], &sp))
 				return;
 			level = 0;
-			perf_callchain_store(entry, PERF_CONTEXT_USER);
+			perf_callchain_store_context(entry, PERF_CONTEXT_USER);
 			perf_callchain_store(entry, next_ip);
 			continue;
 		}
@@ -319,7 +319,7 @@
 	return rc;
 }
 
-static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
+static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
 					  struct pt_regs *regs)
 {
 }
@@ -439,7 +439,7 @@
 	return mctx->mc_gregs;
 }
 
-static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
 				   struct pt_regs *regs)
 {
 	unsigned int sp, next_sp;
@@ -453,7 +453,7 @@
 	sp = regs->gpr[1];
 	perf_callchain_store(entry, next_ip);
 
-	while (entry->nr < sysctl_perf_event_max_stack) {
+	while (entry->nr < entry->max_stack) {
 		fp = (unsigned int __user *) (unsigned long) sp;
 		if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
 			return;
@@ -473,7 +473,7 @@
 			    read_user_stack_32(&uregs[PT_R1], &sp))
 				return;
 			level = 0;
-			perf_callchain_store(entry, PERF_CONTEXT_USER);
+			perf_callchain_store_context(entry, PERF_CONTEXT_USER);
 			perf_callchain_store(entry, next_ip);
 			continue;
 		}
@@ -487,7 +487,7 @@
 }
 
 void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
 	if (current_is_64bit())
 		perf_callchain_user_64(entry, regs);
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index c50ea76..6081fbd 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -221,7 +221,7 @@
 /* convenience wrappers around the common clk API */
 static inline struct clk *mpc512x_clk_fixed(const char *name, int rate)
 {
-	return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+	return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
 }
 
 static inline struct clk *mpc512x_clk_factor(
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 84fb984..85c85eb 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -172,7 +172,7 @@
 	if (rc < 0)
 		goto out;
 
-	skip = roundup(cprm->file->f_pos - total + sz, 4) - cprm->file->f_pos;
+	skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
 	if (!dump_skip(cprm, skip))
 		goto Eio;
 out:
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index ac3ffd9..3998e0f 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -53,7 +53,6 @@
 static int ibm_slot_error_detail;
 static int ibm_get_config_addr_info;
 static int ibm_get_config_addr_info2;
-static int ibm_configure_bridge;
 static int ibm_configure_pe;
 
 /*
@@ -81,7 +80,14 @@
 	ibm_get_config_addr_info2	= rtas_token("ibm,get-config-addr-info2");
 	ibm_get_config_addr_info	= rtas_token("ibm,get-config-addr-info");
 	ibm_configure_pe		= rtas_token("ibm,configure-pe");
-	ibm_configure_bridge		= rtas_token("ibm,configure-bridge");
+
+	/*
+	 * ibm,configure-pe and ibm,configure-bridge have the same semantics,
+	 * however ibm,configure-pe can be faster.  If we can't find
+	 * ibm,configure-pe then fall back to using ibm,configure-bridge.
+	 */
+	if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
+		ibm_configure_pe 	= rtas_token("ibm,configure-bridge");
 
 	/*
 	 * Necessary sanity check. We needn't check "get-config-addr-info"
@@ -93,8 +99,7 @@
 	    (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
 	     ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE)	||
 	    ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE	||
-	    (ibm_configure_pe == RTAS_UNKNOWN_SERVICE		&&
-	     ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) {
+	    ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
 		pr_info("EEH functionality not supported\n");
 		return -EINVAL;
 	}
@@ -615,29 +620,41 @@
 {
 	int config_addr;
 	int ret;
+	/* Waiting 0.2s maximum before skipping configuration */
+	int max_wait = 200;
 
 	/* Figure out the PE address */
 	config_addr = pe->config_addr;
 	if (pe->addr)
 		config_addr = pe->addr;
 
-	/* Use new configure-pe function, if supported */
-	if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
+	while (max_wait > 0) {
 		ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
 				config_addr, BUID_HI(pe->phb->buid),
 				BUID_LO(pe->phb->buid));
-	} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
-		ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
-				config_addr, BUID_HI(pe->phb->buid),
-				BUID_LO(pe->phb->buid));
-	} else {
-		return -EFAULT;
+
+		if (!ret)
+			return ret;
+
+		/*
+		 * If RTAS returns a delay value that's above 100ms, cut it
+		 * down to 100ms in case firmware made a mistake.  For more
+		 * on how these delay values work see rtas_busy_delay_time
+		 */
+		if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
+		    ret <= RTAS_EXTENDED_DELAY_MAX)
+			ret = RTAS_EXTENDED_DELAY_MIN+2;
+
+		max_wait -= rtas_busy_delay_time(ret);
+
+		if (max_wait < 0)
+			break;
+
+		rtas_busy_delay(ret);
 	}
 
-	if (ret)
-		pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
-			__func__, pe->phb->global_number, pe->addr, ret);
-
+	pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
+		__func__, pe->phb->global_number, pe->addr, ret);
 	return ret;
 }
 
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 0d112b9..ff75d70 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -143,7 +143,7 @@
  */
 static long
 axon_ram_direct_access(struct block_device *device, sector_t sector,
-		       void __pmem **kaddr, pfn_t *pfn)
+		       void __pmem **kaddr, pfn_t *pfn, long size)
 {
 	struct axon_ram_bank *bank = device->bd_disk->private_data;
 	loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT;
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index fac6ac9..1dd2103 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -22,7 +22,6 @@
 LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
 $(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS)
 	$(call if_changed,ld)
-	@:
 
 sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 0x\1/p'
 
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 0ac42cc..d5ec71b 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -1,8 +1,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -13,19 +12,19 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
 CONFIG_CFS_BANDWIDTH=y
 CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
@@ -55,7 +54,6 @@
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
 CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=256
 CONFIG_NUMA=y
@@ -65,6 +63,15 @@
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
+CONFIG_ZPOOL=m
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
 CONFIG_PCI=y
 CONFIG_PCI_DEBUG=y
 CONFIG_HOTPLUG_PCI=y
@@ -452,6 +459,7 @@
 CONFIG_RAW_DRIVER=m
 CONFIG_HANGCHECK_TIMER=m
 CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
@@ -537,6 +545,8 @@
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
 CONFIG_FRAME_WARN=1024
 CONFIG_READABLE_ASM=y
 CONFIG_UNUSED_SYMBOLS=y
@@ -555,13 +565,17 @@
 CONFIG_SLUB_STATS=y
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_VMACACHE=y
 CONFIG_DEBUG_VM_RB=y
+CONFIG_DEBUG_VM_PGFLAGS=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
 CONFIG_DEBUG_PER_CPU_MAPS=y
 CONFIG_DEBUG_SHIRQ=y
 CONFIG_DETECT_HUNG_TASK=y
+CONFIG_WQ_WATCHDOG=y
 CONFIG_PANIC_ON_OOPS=y
+CONFIG_DEBUG_TIMEKEEPING=y
 CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
@@ -596,6 +610,8 @@
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_TEST_LIST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
@@ -607,7 +623,6 @@
 CONFIG_TEST_KSTRTOX=y
 CONFIG_DMA_API_DEBUG=y
 CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
@@ -651,7 +666,6 @@
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
@@ -664,7 +678,7 @@
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index a31dcd5..f46a351 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -1,8 +1,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -13,17 +12,17 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CGROUP_PERF=y
 CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
@@ -53,7 +52,6 @@
 CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z196=y
 CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=256
 CONFIG_NUMA=y
@@ -62,6 +60,14 @@
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
 CONFIG_PCI=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
@@ -530,6 +536,8 @@
 CONFIG_DLM=m
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_FRAME_WARN=1024
 CONFIG_UNUSED_SYMBOLS=y
@@ -547,13 +555,13 @@
 CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_BLK_DEV_IO_TRACE=y
 # CONFIG_KPROBE_EVENT is not set
+CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_RBTREE_TEST=m
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
@@ -597,8 +605,6 @@
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
-CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
@@ -610,7 +616,7 @@
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 7b73bf3..ba0f2a5 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -1,8 +1,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -14,17 +13,17 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
 # CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CGROUP_PERF=y
 CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
@@ -53,7 +52,6 @@
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
 CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=512
 CONFIG_NUMA=y
@@ -62,6 +60,14 @@
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
 CONFIG_PCI=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
@@ -447,6 +453,7 @@
 CONFIG_RAW_DRIVER=m
 CONFIG_HANGCHECK_TIMER=m
 CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
@@ -530,6 +537,8 @@
 CONFIG_DLM=m
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_FRAME_WARN=1024
 CONFIG_UNUSED_SYMBOLS=y
@@ -546,11 +555,12 @@
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
@@ -594,8 +604,6 @@
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
-CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
@@ -607,7 +615,7 @@
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 1719843..4366a3e 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -1,5 +1,5 @@
 # CONFIG_SWAP is not set
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -7,7 +7,6 @@
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z196=y
 CONFIG_TUNE_ZEC12=y
 # CONFIG_COMPAT is not set
 CONFIG_NR_CPUS=2
@@ -64,7 +63,6 @@
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 # CONFIG_FTRACE is not set
-# CONFIG_STRICT_DEVMEM is not set
 # CONFIG_PFAULT is not set
 # CONFIG_S390_HYPFS_FS is not set
 # CONFIG_VIRTUALIZATION is not set
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index e24f2af..3f571ea 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,8 +1,8 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
+CONFIG_USELIB=y
 CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_TASKSTATS=y
 CONFIG_TASK_DELAY_ACCT=y
@@ -11,19 +11,19 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_CGROUPS=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_BLK_DEV_INITRD=y
@@ -44,7 +44,6 @@
 CONFIG_IBM_PARTITION=y
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
 CONFIG_NR_CPUS=256
 CONFIG_NUMA=y
 CONFIG_HZ_100=y
@@ -52,6 +51,14 @@
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
 CONFIG_CRASH_DUMP=y
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
@@ -61,7 +68,6 @@
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
-# CONFIG_INET_LRO is not set
 CONFIG_L2TP=m
 CONFIG_L2TP_DEBUGFS=m
 CONFIG_VLAN_8021Q=y
@@ -144,6 +150,9 @@
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_HUGETLBFS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
 CONFIG_UNUSED_SYMBOLS=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
@@ -158,20 +167,21 @@
 CONFIG_DEBUG_LOCKDEP=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PI_LIST=y
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_RCU_TRACE=y
 CONFIG_LATENCYTOP=y
 CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
-CONFIG_TRACER_SNAPSHOT=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
 CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_KPROBES_SANITY_TEST=y
-# CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_AUTHENC=m
@@ -212,8 +222,6 @@
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_ZLIB=m
-CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 2f1b721..0e64f08 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -43,13 +43,13 @@
 	switch (action) {
 	case PM_SUSPEND_PREPARE:
 	case PM_HIBERNATION_PREPARE:
-		if (crashk_res.start)
-			crash_map_reserved_pages();
+		if (kexec_crash_image)
+			arch_kexec_unprotect_crashkres();
 		break;
 	case PM_POST_SUSPEND:
 	case PM_POST_HIBERNATION:
-		if (crashk_res.start)
-			crash_unmap_reserved_pages();
+		if (kexec_crash_image)
+			arch_kexec_protect_crashkres();
 		break;
 	default:
 		return NOTIFY_DONE;
@@ -60,6 +60,8 @@
 static int __init machine_kdump_pm_init(void)
 {
 	pm_notifier(machine_kdump_pm_cb, 0);
+	/* Create initial mapping for crashkernel memory */
+	arch_kexec_unprotect_crashkres();
 	return 0;
 }
 arch_initcall(machine_kdump_pm_init);
@@ -146,6 +148,8 @@
 #endif
 }
 
+#ifdef CONFIG_CRASH_DUMP
+
 /*
  * Map or unmap crashkernel memory
  */
@@ -167,21 +171,25 @@
 }
 
 /*
- * Map crashkernel memory
+ * Unmap crashkernel memory
  */
-void crash_map_reserved_pages(void)
+void arch_kexec_protect_crashkres(void)
 {
-	crash_map_pages(1);
+	if (crashk_res.end)
+		crash_map_pages(0);
 }
 
 /*
- * Unmap crashkernel memory
+ * Map crashkernel memory
  */
-void crash_unmap_reserved_pages(void)
+void arch_kexec_unprotect_crashkres(void)
 {
-	crash_map_pages(0);
+	if (crashk_res.end)
+		crash_map_pages(1);
 }
 
+#endif
+
 /*
  * Give back memory to hypervisor before new kdump is loaded
  */
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index c3e4099..87035fa 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -224,13 +224,13 @@
 
 static int __perf_callchain_kernel(void *data, unsigned long address)
 {
-	struct perf_callchain_entry *entry = data;
+	struct perf_callchain_entry_ctx *entry = data;
 
 	perf_callchain_store(entry, address);
 	return 0;
 }
 
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
 			   struct pt_regs *regs)
 {
 	if (user_mode(regs))
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 94495ca..5904abf 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -216,7 +216,8 @@
 	 * it at vdso_base which is the "natural" base for it, but we might
 	 * fail and end up putting it elsewhere.
 	 */
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
 	vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
 	if (IS_ERR_VALUE(vdso_base)) {
 		rc = vdso_base;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 7a31440..19288c1 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -250,6 +250,7 @@
 
 	report_user_fault(regs, SIGSEGV, 1);
 	si.si_signo = SIGSEGV;
+	si.si_errno = 0;
 	si.si_code = si_code;
 	si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
 	force_sig_info(SIGSEGV, &si, current);
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index f010c93..fda605d 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -37,7 +37,7 @@
  *	      |		      |     |
  *	      +---------------+     |
  *	      | 8 byte skbp   |     |
- * R15+170 -> +---------------+     |
+ * R15+176 -> +---------------+     |
  *	      | 8 byte hlen   |     |
  * R15+168 -> +---------------+     |
  *	      | 4 byte align  |     |
@@ -58,7 +58,7 @@
 #define STK_OFF		(STK_SPACE - STK_160_UNUSED)
 #define STK_OFF_TMP	160	/* Offset of tmp buffer on stack */
 #define STK_OFF_HLEN	168	/* Offset of SKB header length on stack */
-#define STK_OFF_SKBP	170	/* Offset of SKB pointer on stack */
+#define STK_OFF_SKBP	176	/* Offset of SKB pointer on stack */
 
 #define STK_OFF_R6	(160 - 11 * 8)	/* Offset of r6 on stack */
 #define STK_OFF_TCCNT	(160 - 12 * 8)	/* Offset of tail_call_cnt on stack */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 9133b0e..bee281f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -45,7 +45,7 @@
 	int labels[1];		/* Labels for local jumps */
 };
 
-#define BPF_SIZE_MAX	0x7ffff	/* Max size for program (20 bit signed displ) */
+#define BPF_SIZE_MAX	0xffff	/* Max size for program (16 bit branches) */
 
 #define SEEN_SKB	1	/* skb access */
 #define SEEN_MEM	2	/* use mem[] for temporary storage */
@@ -450,7 +450,7 @@
 		emit_load_skb_data_hlen(jit);
 	if (jit->seen & SEEN_SKB_CHANGE)
 		/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
-		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
+		EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
 			      STK_OFF_SKBP);
 }
 
diff --git a/arch/score/include/uapi/asm/unistd.h b/arch/score/include/uapi/asm/unistd.h
index 9cb4260..d4008c3 100644
--- a/arch/score/include/uapi/asm/unistd.h
+++ b/arch/score/include/uapi/asm/unistd.h
@@ -1,5 +1,6 @@
 #define __ARCH_HAVE_MMU
 
+#define __ARCH_WANT_RENAMEAT
 #define __ARCH_WANT_SYSCALL_NO_AT
 #define __ARCH_WANT_SYSCALL_NO_FLAGS
 #define __ARCH_WANT_SYSCALL_OFF_T
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index 6df826e..c4c47ea 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -55,7 +55,6 @@
 
 $(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(lib1funcs-obj) FORCE
 	$(call if_changed,ld)
-	@:
 
 $(obj)/vmlinux.bin: vmlinux FORCE
 	$(call if_changed,objcopy)
diff --git a/arch/sh/boot/romimage/Makefile b/arch/sh/boot/romimage/Makefile
index 2216ee5..43c4119 100644
--- a/arch/sh/boot/romimage/Makefile
+++ b/arch/sh/boot/romimage/Makefile
@@ -17,7 +17,6 @@
 
 $(obj)/vmlinux: $(obj)/head.o $(obj-y) $(obj)/piggy.o FORCE
 	$(call if_changed,ld)
-	@:
 
 OBJCOPYFLAGS += -j .empty_zero_page
 
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
index a8d9757..fe45d2c 100644
--- a/arch/sh/configs/apsh4ad0a_defconfig
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -10,7 +10,6 @@
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_CGROUP_MEMCG=y
 CONFIG_BLK_CGROUP=y
 CONFIG_NAMESPACES=y
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index e7e56a4..36642ec2 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -17,7 +17,6 @@
 CONFIG_CPUSETS=y
 # CONFIG_PROC_PID_CPUSET is not set
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_CGROUP_MEMCG=y
 CONFIG_CGROUP_MEMCG_SWAP=y
 CONFIG_CGROUP_SCHED=y
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index 6bc30ab..91853a6 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -10,7 +10,6 @@
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_CGROUP_MEMCG=y
 CONFIG_RELAY=y
 CONFIG_NAMESPACES=y
diff --git a/arch/sh/configs/shx3_defconfig b/arch/sh/configs/shx3_defconfig
index cd6c519..4a4269a 100644
--- a/arch/sh/configs/shx3_defconfig
+++ b/arch/sh/configs/shx3_defconfig
@@ -12,7 +12,6 @@
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_CGROUP_MEMCG=y
 CONFIG_RELAY=y
 CONFIG_NAMESPACES=y
diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig
index 1e843db..01c9a91 100644
--- a/arch/sh/configs/urquell_defconfig
+++ b/arch/sh/configs/urquell_defconfig
@@ -14,7 +14,6 @@
 CONFIG_CPUSETS=y
 # CONFIG_PROC_PID_CPUSET is not set
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_CGROUP_MEMCG=y
 CONFIG_CGROUP_MEMCG_SWAP=y
 CONFIG_CGROUP_SCHED=y
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
index cc80b61..fa2c0cd 100644
--- a/arch/sh/kernel/perf_callchain.c
+++ b/arch/sh/kernel/perf_callchain.c
@@ -21,7 +21,7 @@
 
 static void callchain_address(void *data, unsigned long addr, int reliable)
 {
-	struct perf_callchain_entry *entry = data;
+	struct perf_callchain_entry_ctx *entry = data;
 
 	if (reliable)
 		perf_callchain_store(entry, addr);
@@ -33,7 +33,7 @@
 };
 
 void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
 	perf_callchain_store(entry, regs->pc);
 
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index ea2aa13..cc0cc5b 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -64,7 +64,9 @@
 	unsigned long addr;
 	int ret;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
+
 	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
 	if (IS_ERR_VALUE(addr)) {
 		ret = addr;
diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h
index 10e9dab..f0700cf 100644
--- a/arch/sparc/include/asm/head_64.h
+++ b/arch/sparc/include/asm/head_64.h
@@ -15,6 +15,10 @@
 
 #define	PTREGS_OFF	(STACK_BIAS + STACKFRAME_SZ)
 
+#define	RTRAP_PSTATE		(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
+#define	RTRAP_PSTATE_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
+#define RTRAP_PSTATE_AG_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
+
 #define __CHEETAH_ID	0x003e0014
 #define __JALAPENO_ID	0x003e0016
 #define __SERRANO_ID	0x003e0022
diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h
index 71b5a67..781b9f1 100644
--- a/arch/sparc/include/asm/ttable.h
+++ b/arch/sparc/include/asm/ttable.h
@@ -589,8 +589,8 @@
 	 restored;					\
 	nop; nop; nop; nop; nop; nop;			\
 	nop; nop; nop; nop; nop;			\
-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
+	ba,a,pt	%xcc, user_rtt_fill_fixup_dax;		\
+	ba,a,pt	%xcc, user_rtt_fill_fixup_mna;		\
 	ba,a,pt	%xcc, user_rtt_fill_fixup;
 
 
@@ -652,8 +652,8 @@
 	 restored;					\
 	nop; nop; nop; nop; nop;			\
 	nop; nop; nop;					\
-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
+	ba,a,pt	%xcc, user_rtt_fill_fixup_dax;		\
+	ba,a,pt	%xcc, user_rtt_fill_fixup_mna;		\
 	ba,a,pt	%xcc, user_rtt_fill_fixup;
 
 
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 7cf9c6e..fdb1332 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -21,6 +21,7 @@
 CFLAGS_REMOVE_pcr.o := -pg
 endif
 
+obj-$(CONFIG_SPARC64)   += urtt_fill.o
 obj-$(CONFIG_SPARC32)   += entry.o wof.o wuf.o
 obj-$(CONFIG_SPARC32)   += etrap_32.o
 obj-$(CONFIG_SPARC32)   += rtrap_32.o
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index a4b8b5a..710f327 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1711,7 +1711,7 @@
 }
 pure_initcall(init_hw_perf_events);
 
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
 			   struct pt_regs *regs)
 {
 	unsigned long ksp, fp;
@@ -1756,7 +1756,7 @@
 			}
 		}
 #endif
-	} while (entry->nr < sysctl_perf_event_max_stack);
+	} while (entry->nr < entry->max_stack);
 }
 
 static inline int
@@ -1769,7 +1769,7 @@
 	return (__range_not_ok(fp, size, TASK_SIZE) == 0);
 }
 
-static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
 				   struct pt_regs *regs)
 {
 	unsigned long ufp;
@@ -1790,10 +1790,10 @@
 		pc = sf.callers_pc;
 		ufp = (unsigned long)sf.fp + STACK_BIAS;
 		perf_callchain_store(entry, pc);
-	} while (entry->nr < sysctl_perf_event_max_stack);
+	} while (entry->nr < entry->max_stack);
 }
 
-static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
 				   struct pt_regs *regs)
 {
 	unsigned long ufp;
@@ -1822,11 +1822,11 @@
 			ufp = (unsigned long)sf.fp;
 		}
 		perf_callchain_store(entry, pc);
-	} while (entry->nr < sysctl_perf_event_max_stack);
+	} while (entry->nr < entry->max_stack);
 }
 
 void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
 	u64 saved_fault_address = current_thread_info()->fault_address;
 	u8 saved_fault_code = get_thread_fault_code();
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index d08bdaf..216948c 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -14,10 +14,6 @@
 #include <asm/visasm.h>
 #include <asm/processor.h>
 
-#define		RTRAP_PSTATE		(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
-#define		RTRAP_PSTATE_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
-#define		RTRAP_PSTATE_AG_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
-
 #ifdef CONFIG_CONTEXT_TRACKING
 # define SCHEDULE_USER schedule_user
 #else
@@ -242,52 +238,17 @@
 		 wrpr			%g1, %cwp
 		ba,a,pt			%xcc, user_rtt_fill_64bit
 
+user_rtt_fill_fixup_dax:
+		ba,pt	%xcc, user_rtt_fill_fixup_common
+		 mov	1, %g3
+
+user_rtt_fill_fixup_mna:
+		ba,pt	%xcc, user_rtt_fill_fixup_common
+		 mov	2, %g3
+
 user_rtt_fill_fixup:
-		rdpr	%cwp, %g1
-		add	%g1, 1, %g1
-		wrpr	%g1, 0x0, %cwp
-
-		rdpr	%wstate, %g2
-		sll	%g2, 3, %g2
-		wrpr	%g2, 0x0, %wstate
-
-		/* We know %canrestore and %otherwin are both zero.  */
-
-		sethi	%hi(sparc64_kern_pri_context), %g2
-		ldx	[%g2 + %lo(sparc64_kern_pri_context)], %g2
-		mov	PRIMARY_CONTEXT, %g1
-
-661:		stxa	%g2, [%g1] ASI_DMMU
-		.section .sun4v_1insn_patch, "ax"
-		.word	661b
-		stxa	%g2, [%g1] ASI_MMU
-		.previous
-
-		sethi	%hi(KERNBASE), %g1
-		flush	%g1
-
-		or	%g4, FAULT_CODE_WINFIXUP, %g4
-		stb	%g4, [%g6 + TI_FAULT_CODE]
-		stx	%g5, [%g6 + TI_FAULT_ADDR]
-
-		mov	%g6, %l1
-		wrpr	%g0, 0x0, %tl
-
-661:		nop
-		.section		.sun4v_1insn_patch, "ax"
-		.word			661b
-		SET_GL(0)
-		.previous
-
-		wrpr	%g0, RTRAP_PSTATE, %pstate
-
-		mov	%l1, %g6
-		ldx	[%g6 + TI_TASK], %g4
-		LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
-		call	do_sparc64_fault
-		 add	%sp, PTREGS_OFF, %o0
-		ba,pt	%xcc, rtrap
-		 nop
+		ba,pt	%xcc, user_rtt_fill_fixup_common
+		 clr	%g3
 
 user_rtt_pre_restore:
 		add			%g1, 1, %g1
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 3c25241..91cc2f4 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -138,12 +138,24 @@
 	return 0;
 }
 
+/* Checks if the fp is valid.  We always build signal frames which are
+ * 16-byte aligned, therefore we can always enforce that the restore
+ * frame has that property as well.
+ */
+static bool invalid_frame_pointer(void __user *fp, int fplen)
+{
+	if ((((unsigned long) fp) & 15) ||
+	    ((unsigned long)fp) > 0x100000000ULL - fplen)
+		return true;
+	return false;
+}
+
 void do_sigreturn32(struct pt_regs *regs)
 {
 	struct signal_frame32 __user *sf;
 	compat_uptr_t fpu_save;
 	compat_uptr_t rwin_save;
-	unsigned int psr;
+	unsigned int psr, ufp;
 	unsigned int pc, npc;
 	sigset_t set;
 	compat_sigset_t seta;
@@ -158,11 +170,16 @@
 	sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
 
 	/* 1. Make sure we are not getting garbage from the user */
-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
-	    (((unsigned long) sf) & 3))
+	if (invalid_frame_pointer(sf, sizeof(*sf)))
 		goto segv;
 
-	if (get_user(pc, &sf->info.si_regs.pc) ||
+	if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
+		goto segv;
+
+	if (ufp & 0x7)
+		goto segv;
+
+	if (__get_user(pc, &sf->info.si_regs.pc) ||
 	    __get_user(npc, &sf->info.si_regs.npc))
 		goto segv;
 
@@ -227,7 +244,7 @@
 asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
 {
 	struct rt_signal_frame32 __user *sf;
-	unsigned int psr, pc, npc;
+	unsigned int psr, pc, npc, ufp;
 	compat_uptr_t fpu_save;
 	compat_uptr_t rwin_save;
 	sigset_t set;
@@ -242,11 +259,16 @@
 	sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
 
 	/* 1. Make sure we are not getting garbage from the user */
-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
-	    (((unsigned long) sf) & 3))
+	if (invalid_frame_pointer(sf, sizeof(*sf)))
 		goto segv;
 
-	if (get_user(pc, &sf->regs.pc) || 
+	if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+		goto segv;
+
+	if (ufp & 0x7)
+		goto segv;
+
+	if (__get_user(pc, &sf->regs.pc) || 
 	    __get_user(npc, &sf->regs.npc))
 		goto segv;
 
@@ -307,14 +329,6 @@
 	force_sig(SIGSEGV, current);
 }
 
-/* Checks if the fp is valid */
-static int invalid_frame_pointer(void __user *fp, int fplen)
-{
-	if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
-		return 1;
-	return 0;
-}
-
 static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
 {
 	unsigned long sp;
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 52aa5e4..c3c12ef 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -60,10 +60,22 @@
 #define SF_ALIGNEDSZ  (((sizeof(struct signal_frame) + 7) & (~7)))
 #define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame) + 7) & (~7)))
 
+/* Checks if the fp is valid.  We always build signal frames which are
+ * 16-byte aligned, therefore we can always enforce that the restore
+ * frame has that property as well.
+ */
+static inline bool invalid_frame_pointer(void __user *fp, int fplen)
+{
+	if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
+		return true;
+
+	return false;
+}
+
 asmlinkage void do_sigreturn(struct pt_regs *regs)
 {
+	unsigned long up_psr, pc, npc, ufp;
 	struct signal_frame __user *sf;
-	unsigned long up_psr, pc, npc;
 	sigset_t set;
 	__siginfo_fpu_t __user *fpu_save;
 	__siginfo_rwin_t __user *rwin_save;
@@ -77,10 +89,13 @@
 	sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
 
 	/* 1. Make sure we are not getting garbage from the user */
-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
+	if (!invalid_frame_pointer(sf, sizeof(*sf)))
 		goto segv_and_exit;
 
-	if (((unsigned long) sf) & 3)
+	if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
+		goto segv_and_exit;
+
+	if (ufp & 0x7)
 		goto segv_and_exit;
 
 	err = __get_user(pc,  &sf->info.si_regs.pc);
@@ -127,7 +142,7 @@
 asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
 {
 	struct rt_signal_frame __user *sf;
-	unsigned int psr, pc, npc;
+	unsigned int psr, pc, npc, ufp;
 	__siginfo_fpu_t __user *fpu_save;
 	__siginfo_rwin_t __user *rwin_save;
 	sigset_t set;
@@ -135,8 +150,13 @@
 
 	synchronize_user_stack();
 	sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
-	    (((unsigned long) sf) & 0x03))
+	if (!invalid_frame_pointer(sf, sizeof(*sf)))
+		goto segv;
+
+	if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+		goto segv;
+
+	if (ufp & 0x7)
 		goto segv;
 
 	err = __get_user(pc, &sf->regs.pc);
@@ -178,15 +198,6 @@
 	force_sig(SIGSEGV, current);
 }
 
-/* Checks if the fp is valid */
-static inline int invalid_frame_pointer(void __user *fp, int fplen)
-{
-	if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
-		return 1;
-
-	return 0;
-}
-
 static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
 {
 	unsigned long sp = regs->u_regs[UREG_FP];
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 39aaec1..5ee930c 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -234,6 +234,17 @@
 	goto out;
 }
 
+/* Checks if the fp is valid.  We always build rt signal frames which
+ * are 16-byte aligned, therefore we can always enforce that the
+ * restore frame has that property as well.
+ */
+static bool invalid_frame_pointer(void __user *fp)
+{
+	if (((unsigned long) fp) & 15)
+		return true;
+	return false;
+}
+
 struct rt_signal_frame {
 	struct sparc_stackf	ss;
 	siginfo_t		info;
@@ -246,8 +257,8 @@
 
 void do_rt_sigreturn(struct pt_regs *regs)
 {
+	unsigned long tpc, tnpc, tstate, ufp;
 	struct rt_signal_frame __user *sf;
-	unsigned long tpc, tnpc, tstate;
 	__siginfo_fpu_t __user *fpu_save;
 	__siginfo_rwin_t __user *rwin_save;
 	sigset_t set;
@@ -261,10 +272,16 @@
 		(regs->u_regs [UREG_FP] + STACK_BIAS);
 
 	/* 1. Make sure we are not getting garbage from the user */
-	if (((unsigned long) sf) & 3)
+	if (invalid_frame_pointer(sf))
 		goto segv;
 
-	err = get_user(tpc, &sf->regs.tpc);
+	if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+		goto segv;
+
+	if ((ufp + STACK_BIAS) & 0x7)
+		goto segv;
+
+	err = __get_user(tpc, &sf->regs.tpc);
 	err |= __get_user(tnpc, &sf->regs.tnpc);
 	if (test_thread_flag(TIF_32BIT)) {
 		tpc &= 0xffffffff;
@@ -308,14 +325,6 @@
 	force_sig(SIGSEGV, current);
 }
 
-/* Checks if the fp is valid */
-static int invalid_frame_pointer(void __user *fp)
-{
-	if (((unsigned long) fp) & 15)
-		return 1;
-	return 0;
-}
-
 static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
 {
 	unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c
index 0f6eebe..e5fe8ce 100644
--- a/arch/sparc/kernel/sigutil_32.c
+++ b/arch/sparc/kernel/sigutil_32.c
@@ -48,6 +48,10 @@
 int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
 {
 	int err;
+
+	if (((unsigned long) fpu) & 3)
+		return -EFAULT;
+
 #ifdef CONFIG_SMP
 	if (test_tsk_thread_flag(current, TIF_USEDFPU))
 		regs->psr &= ~PSR_EF;
@@ -97,7 +101,10 @@
 	struct thread_info *t = current_thread_info();
 	int i, wsaved, err;
 
-	__get_user(wsaved, &rp->wsaved);
+	if (((unsigned long) rp) & 3)
+		return -EFAULT;
+
+	get_user(wsaved, &rp->wsaved);
 	if (wsaved > NSWINS)
 		return -EFAULT;
 
diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
index 387834a..36aadcb 100644
--- a/arch/sparc/kernel/sigutil_64.c
+++ b/arch/sparc/kernel/sigutil_64.c
@@ -37,7 +37,10 @@
 	unsigned long fprs;
 	int err;
 
-	err = __get_user(fprs, &fpu->si_fprs);
+	if (((unsigned long) fpu) & 7)
+		return -EFAULT;
+
+	err = get_user(fprs, &fpu->si_fprs);
 	fprs_write(0);
 	regs->tstate &= ~TSTATE_PEF;
 	if (fprs & FPRS_DL)
@@ -72,7 +75,10 @@
 	struct thread_info *t = current_thread_info();
 	int i, wsaved, err;
 
-	__get_user(wsaved, &rp->wsaved);
+	if (((unsigned long) rp) & 7)
+		return -EFAULT;
+
+	get_user(wsaved, &rp->wsaved);
 	if (wsaved > NSWINS)
 		return -EFAULT;
 
diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
new file mode 100644
index 0000000..5604a2b0
--- /dev/null
+++ b/arch/sparc/kernel/urtt_fill.S
@@ -0,0 +1,98 @@
+#include <asm/thread_info.h>
+#include <asm/trap_block.h>
+#include <asm/spitfire.h>
+#include <asm/ptrace.h>
+#include <asm/head.h>
+
+		.text
+		.align	8
+		.globl	user_rtt_fill_fixup_common
+user_rtt_fill_fixup_common:
+		rdpr	%cwp, %g1
+		add	%g1, 1, %g1
+		wrpr	%g1, 0x0, %cwp
+
+		rdpr	%wstate, %g2
+		sll	%g2, 3, %g2
+		wrpr	%g2, 0x0, %wstate
+
+		/* We know %canrestore and %otherwin are both zero.  */
+
+		sethi	%hi(sparc64_kern_pri_context), %g2
+		ldx	[%g2 + %lo(sparc64_kern_pri_context)], %g2
+		mov	PRIMARY_CONTEXT, %g1
+
+661:		stxa	%g2, [%g1] ASI_DMMU
+		.section .sun4v_1insn_patch, "ax"
+		.word	661b
+		stxa	%g2, [%g1] ASI_MMU
+		.previous
+
+		sethi	%hi(KERNBASE), %g1
+		flush	%g1
+
+		mov	%g4, %l4
+		mov	%g5, %l5
+		brnz,pn	%g3, 1f
+		 mov	%g3, %l3
+
+		or	%g4, FAULT_CODE_WINFIXUP, %g4
+		stb	%g4, [%g6 + TI_FAULT_CODE]
+		stx	%g5, [%g6 + TI_FAULT_ADDR]
+1:
+		mov	%g6, %l1
+		wrpr	%g0, 0x0, %tl
+
+661:		nop
+		.section		.sun4v_1insn_patch, "ax"
+		.word			661b
+		SET_GL(0)
+		.previous
+
+		wrpr	%g0, RTRAP_PSTATE, %pstate
+
+		mov	%l1, %g6
+		ldx	[%g6 + TI_TASK], %g4
+		LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
+
+		brnz,pn	%l3, 1f
+		 nop
+
+		call	do_sparc64_fault
+		 add	%sp, PTREGS_OFF, %o0
+		ba,pt	%xcc, rtrap
+		 nop
+
+1:		cmp	%g3, 2
+		bne,pn	%xcc, 2f
+		 nop
+
+		sethi	%hi(tlb_type), %g1
+		lduw	[%g1 + %lo(tlb_type)], %g1
+		cmp	%g1, 3
+		bne,pt	%icc, 1f
+		 add	%sp, PTREGS_OFF, %o0
+		mov	%l4, %o2
+		call	sun4v_do_mna
+		 mov	%l5, %o1
+		ba,a,pt	%xcc, rtrap
+1:		mov	%l4, %o1
+		mov	%l5, %o2
+		call	mem_address_unaligned
+		 nop
+		ba,a,pt	%xcc, rtrap
+
+2:		sethi	%hi(tlb_type), %g1
+		mov	%l4, %o1
+		lduw	[%g1 + %lo(tlb_type)], %g1
+		mov	%l5, %o2
+		cmp	%g1, 3
+		bne,pt	%icc, 1f
+		 add	%sp, PTREGS_OFF, %o0
+		call	sun4v_data_access_exception
+		 nop
+		ba,a,pt	%xcc, rtrap
+
+1:		call	spitfire_data_access_exception
+		 nop
+		ba,a,pt	%xcc, rtrap
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 652683c..14bb0d5 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2824,9 +2824,10 @@
 	 * the Data-TLB for huge pages.
 	 */
 	if (tlb_type == cheetah_plus) {
+		bool need_context_reload = false;
 		unsigned long ctx;
 
-		spin_lock(&ctx_alloc_lock);
+		spin_lock_irq(&ctx_alloc_lock);
 		ctx = mm->context.sparc64_ctx_val;
 		ctx &= ~CTX_PGSZ_MASK;
 		ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
@@ -2845,9 +2846,12 @@
 			 * also executing in this address space.
 			 */
 			mm->context.sparc64_ctx_val = ctx;
-			on_each_cpu(context_reload, mm, 0);
+			need_context_reload = true;
 		}
-		spin_unlock(&ctx_alloc_lock);
+		spin_unlock_irq(&ctx_alloc_lock);
+
+		if (need_context_reload)
+			on_each_cpu(context_reload, mm, 0);
 	}
 }
 #endif
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 76989b87..4820a02 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -3,49 +3,38 @@
 
 config TILE
 	def_bool y
-	select HAVE_EXIT_THREAD
-	select HAVE_PERF_EVENTS
-	select USE_PMC if PERF_EVENTS
-	select HAVE_DMA_API_DEBUG
-	select HAVE_KVM if !TILEGX
-	select GENERIC_FIND_FIRST_BIT
-	select SYSCTL_EXCEPTION_TRACE
-	select CC_OPTIMIZE_FOR_SIZE
-	select HAVE_DEBUG_KMEMLEAK
-	select GENERIC_IRQ_PROBE
-	select GENERIC_PENDING_IRQ if SMP
-	select GENERIC_IRQ_SHOW
-	select HAVE_DEBUG_BUGVERBOSE
-	select VIRT_TO_BUS
-	select SYS_HYPERVISOR
+	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
 	select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
 	select ARCH_HAS_DEVMEM_IS_ALLOWED
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
-	select GENERIC_CLOCKEVENTS
-	select MODULES_USE_ELF_RELA
-	select HAVE_ARCH_TRACEHOOK
-	select HAVE_SYSCALL_TRACEPOINTS
-	select USER_STACKTRACE_SUPPORT
-	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-	select HAVE_DEBUG_STACKOVERFLOW
 	select ARCH_WANT_FRAME_POINTERS
-	select HAVE_CONTEXT_TRACKING
-	select HAVE_NMI if USE_PMC
+	select CC_OPTIMIZE_FOR_SIZE
 	select EDAC_SUPPORT
+	select GENERIC_CLOCKEVENTS
+	select GENERIC_FIND_FIRST_BIT
+	select GENERIC_IRQ_PROBE
+	select GENERIC_IRQ_SHOW
+	select GENERIC_PENDING_IRQ if SMP
 	select GENERIC_STRNCPY_FROM_USER
 	select GENERIC_STRNLEN_USER
 	select HAVE_ARCH_SECCOMP_FILTER
-
-# FIXME: investigate whether we need/want these options.
-#	select HAVE_IOREMAP_PROT
-#	select HAVE_OPTPROBES
-#	select HAVE_REGS_AND_STACK_ACCESS_API
-#	select HAVE_HW_BREAKPOINT
-#	select PERF_EVENTS
-#	select HAVE_USER_RETURN_NOTIFIER
-#	config NO_BOOTMEM
-#	config ARCH_SUPPORTS_DEBUG_PAGEALLOC
-#	config HUGETLB_PAGE_SIZE_VARIABLE
+	select HAVE_ARCH_TRACEHOOK
+	select HAVE_CONTEXT_TRACKING
+	select HAVE_DEBUG_BUGVERBOSE
+	select HAVE_DEBUG_KMEMLEAK
+	select HAVE_DEBUG_STACKOVERFLOW
+	select HAVE_DMA_API_DEBUG
+	select HAVE_EXIT_THREAD
+	select HAVE_KVM if !TILEGX
+	select HAVE_NMI if USE_PMC
+	select HAVE_PERF_EVENTS
+	select HAVE_SYSCALL_TRACEPOINTS
+	select MODULES_USE_ELF_RELA
+	select SYSCTL_EXCEPTION_TRACE
+	select SYS_HYPERVISOR
+	select USER_STACKTRACE_SUPPORT
+	select USE_PMC if PERF_EVENTS
+	select VIRT_TO_BUS
 
 config MMU
 	def_bool y
@@ -132,17 +121,17 @@
 # 64-bit TILE-Gx toolchain, so force CONFIG_TILEGX on.
 config TILEGX
 	def_bool ARCH != "tilepro"
-	select SPARSE_IRQ
+	select ARCH_SUPPORTS_ATOMIC_RMW
 	select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
-	select HAVE_FUNCTION_TRACER
-	select HAVE_FUNCTION_GRAPH_TRACER
+	select HAVE_ARCH_JUMP_LABEL
+	select HAVE_ARCH_KGDB
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
+	select HAVE_FUNCTION_GRAPH_TRACER
+	select HAVE_FUNCTION_TRACER
 	select HAVE_KPROBES
 	select HAVE_KRETPROBES
-	select HAVE_ARCH_KGDB
-	select ARCH_SUPPORTS_ATOMIC_RMW
-	select HAVE_ARCH_JUMP_LABEL
+	select SPARSE_IRQ
 
 config TILEPRO
 	def_bool !TILEGX
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
index 7189055..fd122ef 100644
--- a/arch/tile/configs/tilegx_defconfig
+++ b/arch/tile/configs/tilegx_defconfig
@@ -16,7 +16,6 @@
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_BLK_CGROUP=y
@@ -89,7 +88,6 @@
 CONFIG_TCP_CONG_ILLINOIS=m
 CONFIG_TCP_MD5SIG=y
 CONFIG_IPV6=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_IPV6_OPTIMISTIC_DAD=y
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
index dc85468..eb6a559 100644
--- a/arch/tile/configs/tilepro_defconfig
+++ b/arch/tile/configs/tilepro_defconfig
@@ -15,7 +15,6 @@
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_BLK_CGROUP=y
@@ -85,7 +84,6 @@
 CONFIG_TCP_CONG_ILLINOIS=m
 CONFIG_TCP_MD5SIG=y
 CONFIG_IPV6=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_IPV6_OPTIMISTIC_DAD=y
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c
index f102048..34de300 100644
--- a/arch/tile/gxio/mpipe.c
+++ b/arch/tile/gxio/mpipe.c
@@ -122,7 +122,7 @@
 {
 	const int BUFFERS_PER_LINE = 12;
 
-	/* Count the number of cachlines. */
+	/* Count the number of cachelines. */
 	unsigned long lines =
 		(buffers + BUFFERS_PER_LINE - 1) / BUFFERS_PER_LINE;
 
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index 51cabc2..b0531a6 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -37,12 +37,25 @@
 	__insn_fetchadd4((void *)&v->counter, i);
 }
 
+/*
+ * Note a subtlety of the locking here.  We are required to provide a
+ * full memory barrier before and after the operation.  However, we
+ * only provide an explicit mb before the operation.  After the
+ * operation, we use barrier() to get a full mb for free, because:
+ *
+ * (1) The barrier directive to the compiler prohibits any instructions
+ * being statically hoisted before the barrier;
+ * (2) the microarchitecture will not issue any further instructions
+ * until the fetchadd result is available for the "+ i" add instruction;
+ * (3) the smb_mb before the fetchadd ensures that no other memory
+ * operations are in flight at this point.
+ */
 static inline int atomic_add_return(int i, atomic_t *v)
 {
 	int val;
 	smp_mb();  /* barrier for proper semantics */
 	val = __insn_fetchadd4((void *)&v->counter, i) + i;
-	barrier();  /* the "+ i" above will wait on memory */
+	barrier();  /* equivalent to smp_mb(); see block comment above */
 	return val;
 }
 
@@ -95,7 +108,7 @@
 	int val;
 	smp_mb();  /* barrier for proper semantics */
 	val = __insn_fetchadd((void *)&v->counter, i) + i;
-	barrier();  /* the "+ i" above will wait on memory */
+	barrier();  /* equivalent to smp_mb; see atomic_add_return() */
 	return val;
 }
 
diff --git a/arch/tile/include/uapi/asm/unistd.h b/arch/tile/include/uapi/asm/unistd.h
index 3866397..24e9187 100644
--- a/arch/tile/include/uapi/asm/unistd.h
+++ b/arch/tile/include/uapi/asm/unistd.h
@@ -12,6 +12,7 @@
  *   more details.
  */
 
+#define __ARCH_WANT_RENAMEAT
 #if !defined(__LP64__) || defined(__SYSCALL_COMPAT)
 /* Use the flavor of this syscall that matches the 32-bit API better. */
 #define __ARCH_WANT_SYNC_FILE_RANGE2
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index aa2b44c..0e7a5d0 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -40,7 +40,7 @@
 #include <arch/sim.h>
 
 /*
- * This file containes the routines to search for PCI buses,
+ * This file contains the routines to search for PCI buses,
  * enumerate the buses, and configure any attached devices.
  */
 
@@ -434,7 +434,7 @@
 
 	/*
 	 * Now determine which PCIe ports are configured to operate in RC
-	 * mode. There is a differece in the port configuration capability
+	 * mode. There is a difference in the port configuration capability
 	 * between the Gx36 and Gx72 devices.
 	 *
 	 * The Gx36 has configuration capability for each of the 3 PCIe
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
index 8767060..6394c1c 100644
--- a/arch/tile/kernel/perf_event.c
+++ b/arch/tile/kernel/perf_event.c
@@ -941,7 +941,7 @@
 /*
  * Tile specific backtracing code for perf_events.
  */
-static inline void perf_callchain(struct perf_callchain_entry *entry,
+static inline void perf_callchain(struct perf_callchain_entry_ctx *entry,
 		    struct pt_regs *regs)
 {
 	struct KBacktraceIterator kbt;
@@ -992,13 +992,13 @@
 	}
 }
 
-void perf_callchain_user(struct perf_callchain_entry *entry,
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
 		    struct pt_regs *regs)
 {
 	perf_callchain(entry, regs);
 }
 
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
 		      struct pt_regs *regs)
 {
 	perf_callchain(entry, regs);
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c
index 0db5f7c..9772a35 100644
--- a/arch/tile/kernel/unaligned.c
+++ b/arch/tile/kernel/unaligned.c
@@ -188,7 +188,7 @@
 	 * Parse fault bundle, find potential used registers and mark
 	 * corresponding bits in reg_map and alias_map. These 2 bit maps
 	 * are used to find the scratch registers and determine if there
-	 * is register alais.
+	 * is register alias.
 	 */
 	if (bundle & TILEGX_BUNDLE_MODE_MASK) {  /* Y Mode Bundle. */
 
@@ -1529,7 +1529,7 @@
 	}
 
 
-	/* Read the bundle casued the exception! */
+	/* Read the bundle caused the exception! */
 	pc = (tilegx_bundle_bits __user *)(regs->pc);
 	if (get_user(bundle, pc) != 0) {
 		/* Probably never be here since pc is valid user address.*/
diff --git a/arch/um/configs/i386_defconfig b/arch/um/configs/i386_defconfig
index a12bf68..5636221 100644
--- a/arch/um/configs/i386_defconfig
+++ b/arch/um/configs/i386_defconfig
@@ -17,7 +17,6 @@
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_BLK_CGROUP=y
 # CONFIG_PID_NS is not set
diff --git a/arch/um/configs/x86_64_defconfig b/arch/um/configs/x86_64_defconfig
index 3aab117..7a67b7a 100644
--- a/arch/um/configs/x86_64_defconfig
+++ b/arch/um/configs/x86_64_defconfig
@@ -15,7 +15,6 @@
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_BLK_CGROUP=y
 # CONFIG_PID_NS is not set
diff --git a/arch/um/include/shared/registers.h b/arch/um/include/shared/registers.h
index f5b7635..a74449b 100644
--- a/arch/um/include/shared/registers.h
+++ b/arch/um/include/shared/registers.h
@@ -9,6 +9,8 @@
 #include <sysdep/ptrace.h>
 #include <sysdep/archsetjmp.h>
 
+extern int save_i387_registers(int pid, unsigned long *fp_regs);
+extern int restore_i387_registers(int pid, unsigned long *fp_regs);
 extern int save_fp_registers(int pid, unsigned long *fp_regs);
 extern int restore_fp_registers(int pid, unsigned long *fp_regs);
 extern int save_fpx_registers(int pid, unsigned long *fp_regs);
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 0b04711..034b42c7 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -398,6 +398,6 @@
 {
 	int cpu = current_thread_info()->cpu;
 
-	return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
+	return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
 }
 
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 7801666..8acaf4e 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -29,23 +29,29 @@
 
 static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
 {
-	struct uml_pt_regs r;
+	struct uml_pt_regs *r;
 	int save_errno = errno;
 
-	r.is_user = 0;
+	r = malloc(sizeof(struct uml_pt_regs));
+	if (!r)
+		panic("out of memory");
+
+	r->is_user = 0;
 	if (sig == SIGSEGV) {
 		/* For segfaults, we want the data from the sigcontext. */
-		get_regs_from_mc(&r, mc);
-		GET_FAULTINFO_FROM_MC(r.faultinfo, mc);
+		get_regs_from_mc(r, mc);
+		GET_FAULTINFO_FROM_MC(r->faultinfo, mc);
 	}
 
 	/* enable signals if sig isn't IRQ signal */
 	if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGALRM))
 		unblock_signals();
 
-	(*sig_info[sig])(sig, si, &r);
+	(*sig_info[sig])(sig, si, r);
 
 	errno = save_errno;
+
+	free(r);
 }
 
 /*
@@ -83,11 +89,17 @@
 
 static void timer_real_alarm_handler(mcontext_t *mc)
 {
-	struct uml_pt_regs regs;
+	struct uml_pt_regs *regs;
+
+	regs = malloc(sizeof(struct uml_pt_regs));
+	if (!regs)
+		panic("out of memory");
 
 	if (mc != NULL)
-		get_regs_from_mc(&regs, mc);
-	timer_handler(SIGALRM, NULL, &regs);
+		get_regs_from_mc(regs, mc);
+	timer_handler(SIGALRM, NULL, regs);
+
+	free(regs);
 }
 
 void timer_alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
diff --git a/arch/unicore32/boot/Makefile b/arch/unicore32/boot/Makefile
index ec7fb70..8288550 100644
--- a/arch/unicore32/boot/Makefile
+++ b/arch/unicore32/boot/Makefile
@@ -31,7 +31,7 @@
 	$(call if_changed,uimage)
 	@echo '  Image $@ is ready'
 
-PHONY += initrd FORCE
+PHONY += initrd
 initrd:
 	@test "$(INITRD)" != "" || \
 	(echo You must specify INITRD; exit -1)
diff --git a/arch/unicore32/boot/compressed/Makefile b/arch/unicore32/boot/compressed/Makefile
index 96494fb..9aecdd3 100644
--- a/arch/unicore32/boot/compressed/Makefile
+++ b/arch/unicore32/boot/compressed/Makefile
@@ -54,7 +54,6 @@
 $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/head.o $(obj)/piggy.o \
 		$(obj)/misc.o FORCE
 	$(call if_changed,ld)
-	@:
 
 # We now have a PIC decompressor implementation.  Decompressors running
 # from RAM should not define ZTEXTADDR.  Decompressors running directly
diff --git a/arch/unicore32/include/uapi/asm/unistd.h b/arch/unicore32/include/uapi/asm/unistd.h
index d4cc455..1f63c47 100644
--- a/arch/unicore32/include/uapi/asm/unistd.h
+++ b/arch/unicore32/include/uapi/asm/unistd.h
@@ -10,6 +10,8 @@
  * published by the Free Software Foundation.
  */
 
+#define __ARCH_WANT_RENAMEAT
+
 /* Use the standard ABI for syscalls. */
 #include <asm-generic/unistd.h>
 #define __ARCH_WANT_SYS_CLONE
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index cfdd8c3..f135688 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -87,7 +87,6 @@
 
 $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
 	$(call if_changed,ld)
-	@:
 
 OBJCOPYFLAGS_vmlinux.bin :=  -R .comment -S
 $(obj)/vmlinux.bin: vmlinux FORCE
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 265901a..5fa6ee2 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -17,7 +17,6 @@
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_COMPAT_BRK is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 0c8d796..d28bdab 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -16,7 +16,6 @@
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_COMPAT_BRK is not set
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index 98df1fa..027aec4 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -8,16 +8,15 @@
 #include <linux/linkage.h>
 #include "calling.h"
 #include <asm/asm.h>
-#include <asm/frame.h>
 
 	/* rdi:	arg1 ... normal C conventions. rax is saved/restored. */
 	.macro THUNK name, func, put_ret_addr_in_rdi=0
 	.globl \name
 	.type \name, @function
 \name:
-	FRAME_BEGIN
+	pushq %rbp
+	movq %rsp, %rbp
 
-	/* this one pushes 9 elems, the next one would be %rIP */
 	pushq %rdi
 	pushq %rsi
 	pushq %rdx
@@ -29,8 +28,8 @@
 	pushq %r11
 
 	.if \put_ret_addr_in_rdi
-	/* 9*8(%rsp) is return addr on stack */
-	movq 9*8(%rsp), %rdi
+	/* 8(%rbp) is return addr on stack */
+	movq 8(%rbp), %rdi
 	.endif
 
 	call \func
@@ -65,7 +64,7 @@
 	popq %rdx
 	popq %rsi
 	popq %rdi
-	FRAME_END
+	popq %rbp
 	ret
 	_ASM_NOKPROBE(restore)
 #endif
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 6874da5..253b72e 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -193,10 +193,10 @@
 $(MODLIB)/vdso: FORCE
 	@mkdir -p $(MODLIB)/vdso
 
-$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
+$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso
 	$(call cmd,vdso_install)
 
 PHONY += vdso_install $(vdso_img_insttargets)
-vdso_install: $(vdso_img_insttargets) FORCE
+vdso_install: $(vdso_img_insttargets)
 
 clean-files := vdso32.so vdso32.so.dbg vdso64* vdso-image-*.c vdsox32.so*
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index b3cf813..ab220ac 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -163,7 +163,8 @@
 		addr = 0;
 	}
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
 
 	addr = get_unmapped_area(NULL, addr,
 				 image->size - image->sym_vvar_start, 0, 0);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 73a75aa..33787ee 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2202,7 +2202,7 @@
 
 static int backtrace_address(void *data, unsigned long addr, int reliable)
 {
-	struct perf_callchain_entry *entry = data;
+	struct perf_callchain_entry_ctx *entry = data;
 
 	return perf_callchain_store(entry, addr);
 }
@@ -2214,7 +2214,7 @@
 };
 
 void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
 		/* TODO: We don't support guest os callchain now */
@@ -2268,7 +2268,7 @@
 #include <asm/compat.h>
 
 static inline int
-perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
+perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
 {
 	/* 32-bit process in 64-bit kernel. */
 	unsigned long ss_base, cs_base;
@@ -2283,7 +2283,7 @@
 
 	fp = compat_ptr(ss_base + regs->bp);
 	pagefault_disable();
-	while (entry->nr < sysctl_perf_event_max_stack) {
+	while (entry->nr < entry->max_stack) {
 		unsigned long bytes;
 		frame.next_frame     = 0;
 		frame.return_address = 0;
@@ -2309,14 +2309,14 @@
 }
 #else
 static inline int
-perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
+perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
 {
     return 0;
 }
 #endif
 
 void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
 	struct stack_frame frame;
 	const void __user *fp;
@@ -2343,7 +2343,7 @@
 		return;
 
 	pagefault_disable();
-	while (entry->nr < sysctl_perf_event_max_stack) {
+	while (entry->nr < entry->max_stack) {
 		unsigned long bytes;
 		frame.next_frame	     = NULL;
 		frame.return_address = 0;
diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c
index 0a5ede1..eb05335 100644
--- a/arch/x86/events/intel/p4.c
+++ b/arch/x86/events/intel/p4.c
@@ -826,7 +826,7 @@
 		 * Clear bits we reserve to be managed by kernel itself
 		 * and never allowed from a user space
 		 */
-		 event->attr.config &= P4_CONFIG_MASK;
+		event->attr.config &= P4_CONFIG_MASK;
 
 		rc = p4_validate_raw_event(event);
 		if (rc)
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 16c1789..fce7406 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -891,7 +891,7 @@
 		return -ENODEV;
 
 	pkg = topology_phys_to_logical_pkg(phys_id);
-	if (WARN_ON_ONCE(pkg < 0))
+	if (pkg < 0)
 		return -EINVAL;
 
 	if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index ae6aad1..cb26f18 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -116,13 +116,13 @@
 	.min_coredump	= PAGE_SIZE
 };
 
-static void set_brk(unsigned long start, unsigned long end)
+static int set_brk(unsigned long start, unsigned long end)
 {
 	start = PAGE_ALIGN(start);
 	end = PAGE_ALIGN(end);
 	if (end <= start)
-		return;
-	vm_brk(start, end - start);
+		return 0;
+	return vm_brk(start, end - start);
 }
 
 #ifdef CONFIG_COREDUMP
@@ -321,7 +321,7 @@
 
 		error = vm_brk(text_addr & PAGE_MASK, map_size);
 
-		if (error != (text_addr & PAGE_MASK))
+		if (error)
 			return error;
 
 		error = read_code(bprm->file, text_addr, 32,
@@ -349,7 +349,10 @@
 #endif
 
 		if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) {
-			vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
+			error = vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
+			if (error)
+				return error;
+
 			read_code(bprm->file, N_TXTADDR(ex), fd_offset,
 					ex.a_text+ex.a_data);
 			goto beyond_if;
@@ -372,10 +375,13 @@
 		if (error != N_DATADDR(ex))
 			return error;
 	}
-beyond_if:
-	set_binfmt(&aout_format);
 
-	set_brk(current->mm->start_brk, current->mm->brk);
+beyond_if:
+	error = set_brk(current->mm->start_brk, current->mm->brk);
+	if (error)
+		return error;
+
+	set_binfmt(&aout_format);
 
 	current->mm->start_stack =
 		(unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
@@ -434,7 +440,9 @@
 			error_time = jiffies;
 		}
 #endif
-		vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
+		retval = vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
+		if (retval)
+			goto out;
 
 		read_code(file, start_addr, N_TXTOFF(ex),
 			  ex.a_text + ex.a_data);
@@ -453,9 +461,8 @@
 	len = PAGE_ALIGN(ex.a_text + ex.a_data);
 	bss = ex.a_text + ex.a_data + ex.a_bss;
 	if (bss > len) {
-		error = vm_brk(start_addr + len, bss - len);
-		retval = error;
-		if (error != start_addr + len)
+		retval = vm_brk(start_addr + len, bss - len);
+		if (retval)
 			goto out;
 	}
 	retval = 0;
diff --git a/arch/x86/include/asm/bugs.h b/arch/x86/include/asm/bugs.h
index 08abf63..5490bba 100644
--- a/arch/x86/include/asm/bugs.h
+++ b/arch/x86/include/asm/bugs.h
@@ -1,8 +1,16 @@
 #ifndef _ASM_X86_BUGS_H
 #define _ASM_X86_BUGS_H
 
+#include <asm/processor.h>
+
 extern void check_bugs(void);
 
+#if defined(CONFIG_CPU_SUP_INTEL)
+void check_mpx_erratum(struct cpuinfo_x86 *c);
+#else
+static inline void check_mpx_erratum(struct cpuinfo_x86 *c) {}
+#endif
+
 #if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32)
 int ppro_with_ram_bug(void);
 #else
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 25ebb54..483fb54 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -64,9 +64,9 @@
 	   (((bit)>>5)==11 && (1UL<<((bit)&31) & REQUIRED_MASK11)) ||	\
 	   (((bit)>>5)==12 && (1UL<<((bit)&31) & REQUIRED_MASK12)) ||	\
 	   (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK13)) ||	\
-	   (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK14)) ||	\
-	   (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK15)) ||	\
-	   (((bit)>>5)==14 && (1UL<<((bit)&31) & REQUIRED_MASK16)) )
+	   (((bit)>>5)==14 && (1UL<<((bit)&31) & REQUIRED_MASK14)) ||	\
+	   (((bit)>>5)==15 && (1UL<<((bit)&31) & REQUIRED_MASK15)) ||	\
+	   (((bit)>>5)==16 && (1UL<<((bit)&31) & REQUIRED_MASK16)) )
 
 #define DISABLED_MASK_BIT_SET(bit)					\
 	 ( (((bit)>>5)==0  && (1UL<<((bit)&31) & DISABLED_MASK0 )) ||	\
@@ -83,9 +83,9 @@
 	   (((bit)>>5)==11 && (1UL<<((bit)&31) & DISABLED_MASK11)) ||	\
 	   (((bit)>>5)==12 && (1UL<<((bit)&31) & DISABLED_MASK12)) ||	\
 	   (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK13)) ||	\
-	   (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK14)) ||	\
-	   (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK15)) ||	\
-	   (((bit)>>5)==14 && (1UL<<((bit)&31) & DISABLED_MASK16)) )
+	   (((bit)>>5)==14 && (1UL<<((bit)&31) & DISABLED_MASK14)) ||	\
+	   (((bit)>>5)==15 && (1UL<<((bit)&31) & DISABLED_MASK15)) ||	\
+	   (((bit)>>5)==16 && (1UL<<((bit)&31) & DISABLED_MASK16)) )
 
 #define cpu_has(c, bit)							\
 	(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 :	\
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index 39343be..911e935 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -29,11 +29,11 @@
 #endif /* CONFIG_X86_64 */
 
 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
-# define DISABLE_PKU		(1<<(X86_FEATURE_PKU))
-# define DISABLE_OSPKE		(1<<(X86_FEATURE_OSPKE))
-#else
 # define DISABLE_PKU		0
 # define DISABLE_OSPKE		0
+#else
+# define DISABLE_PKU		(1<<(X86_FEATURE_PKU & 31))
+# define DISABLE_OSPKE		(1<<(X86_FEATURE_OSPKE & 31))
 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
 
 /*
diff --git a/arch/x86/include/asm/intel_telemetry.h b/arch/x86/include/asm/intel_telemetry.h
index ed65fe7..85029b5 100644
--- a/arch/x86/include/asm/intel_telemetry.h
+++ b/arch/x86/include/asm/intel_telemetry.h
@@ -99,7 +99,7 @@
 	int (*reset_events)(void);
 };
 
-int telemetry_set_pltdata(struct telemetry_core_ops *ops,
+int telemetry_set_pltdata(const struct telemetry_core_ops *ops,
 			  struct telemetry_plt_config *pltconfig);
 
 int telemetry_clear_pltdata(void);
diff --git a/arch/x86/include/asm/pmc_core.h b/arch/x86/include/asm/pmc_core.h
new file mode 100644
index 0000000..d4855f1
--- /dev/null
+++ b/arch/x86/include/asm/pmc_core.h
@@ -0,0 +1,27 @@
+/*
+ * Intel Core SoC Power Management Controller Header File
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+ *          Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _ASM_PMC_CORE_H
+#define _ASM_PMC_CORE_H
+
+/* API to read SLP_S0_RESIDENCY counter */
+int intel_pmc_slp_s0_counter_read(u32 *data);
+
+#endif /* _ASM_PMC_CORE_H */
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index b9e9bb2..3725e14 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -2,10 +2,12 @@
 #define _UAPI__SVM_H
 
 #define SVM_EXIT_READ_CR0      0x000
+#define SVM_EXIT_READ_CR2      0x002
 #define SVM_EXIT_READ_CR3      0x003
 #define SVM_EXIT_READ_CR4      0x004
 #define SVM_EXIT_READ_CR8      0x008
 #define SVM_EXIT_WRITE_CR0     0x010
+#define SVM_EXIT_WRITE_CR2     0x012
 #define SVM_EXIT_WRITE_CR3     0x013
 #define SVM_EXIT_WRITE_CR4     0x014
 #define SVM_EXIT_WRITE_CR8     0x018
@@ -80,10 +82,12 @@
 
 #define SVM_EXIT_REASONS \
 	{ SVM_EXIT_READ_CR0,    "read_cr0" }, \
+	{ SVM_EXIT_READ_CR2,    "read_cr2" }, \
 	{ SVM_EXIT_READ_CR3,    "read_cr3" }, \
 	{ SVM_EXIT_READ_CR4,    "read_cr4" }, \
 	{ SVM_EXIT_READ_CR8,    "read_cr8" }, \
 	{ SVM_EXIT_WRITE_CR0,   "write_cr0" }, \
+	{ SVM_EXIT_WRITE_CR2,   "write_cr2" }, \
 	{ SVM_EXIT_WRITE_CR3,   "write_cr3" }, \
 	{ SVM_EXIT_WRITE_CR4,   "write_cr4" }, \
 	{ SVM_EXIT_WRITE_CR8,   "write_cr8" }, \
@@ -91,26 +95,57 @@
 	{ SVM_EXIT_READ_DR1,    "read_dr1" }, \
 	{ SVM_EXIT_READ_DR2,    "read_dr2" }, \
 	{ SVM_EXIT_READ_DR3,    "read_dr3" }, \
+	{ SVM_EXIT_READ_DR4,    "read_dr4" }, \
+	{ SVM_EXIT_READ_DR5,    "read_dr5" }, \
+	{ SVM_EXIT_READ_DR6,    "read_dr6" }, \
+	{ SVM_EXIT_READ_DR7,    "read_dr7" }, \
 	{ SVM_EXIT_WRITE_DR0,   "write_dr0" }, \
 	{ SVM_EXIT_WRITE_DR1,   "write_dr1" }, \
 	{ SVM_EXIT_WRITE_DR2,   "write_dr2" }, \
 	{ SVM_EXIT_WRITE_DR3,   "write_dr3" }, \
+	{ SVM_EXIT_WRITE_DR4,   "write_dr4" }, \
 	{ SVM_EXIT_WRITE_DR5,   "write_dr5" }, \
+	{ SVM_EXIT_WRITE_DR6,   "write_dr6" }, \
 	{ SVM_EXIT_WRITE_DR7,   "write_dr7" }, \
+	{ SVM_EXIT_EXCP_BASE + DE_VECTOR,       "DE excp" }, \
 	{ SVM_EXIT_EXCP_BASE + DB_VECTOR,       "DB excp" }, \
 	{ SVM_EXIT_EXCP_BASE + BP_VECTOR,       "BP excp" }, \
+	{ SVM_EXIT_EXCP_BASE + OF_VECTOR,       "OF excp" }, \
+	{ SVM_EXIT_EXCP_BASE + BR_VECTOR,       "BR excp" }, \
 	{ SVM_EXIT_EXCP_BASE + UD_VECTOR,       "UD excp" }, \
-	{ SVM_EXIT_EXCP_BASE + PF_VECTOR,       "PF excp" }, \
 	{ SVM_EXIT_EXCP_BASE + NM_VECTOR,       "NM excp" }, \
+	{ SVM_EXIT_EXCP_BASE + DF_VECTOR,       "DF excp" }, \
+	{ SVM_EXIT_EXCP_BASE + TS_VECTOR,       "TS excp" }, \
+	{ SVM_EXIT_EXCP_BASE + NP_VECTOR,       "NP excp" }, \
+	{ SVM_EXIT_EXCP_BASE + SS_VECTOR,       "SS excp" }, \
+	{ SVM_EXIT_EXCP_BASE + GP_VECTOR,       "GP excp" }, \
+	{ SVM_EXIT_EXCP_BASE + PF_VECTOR,       "PF excp" }, \
+	{ SVM_EXIT_EXCP_BASE + MF_VECTOR,       "MF excp" }, \
 	{ SVM_EXIT_EXCP_BASE + AC_VECTOR,       "AC excp" }, \
 	{ SVM_EXIT_EXCP_BASE + MC_VECTOR,       "MC excp" }, \
+	{ SVM_EXIT_EXCP_BASE + XM_VECTOR,       "XF excp" }, \
 	{ SVM_EXIT_INTR,        "interrupt" }, \
 	{ SVM_EXIT_NMI,         "nmi" }, \
 	{ SVM_EXIT_SMI,         "smi" }, \
 	{ SVM_EXIT_INIT,        "init" }, \
 	{ SVM_EXIT_VINTR,       "vintr" }, \
 	{ SVM_EXIT_CR0_SEL_WRITE, "cr0_sel_write" }, \
+	{ SVM_EXIT_IDTR_READ,   "read_idtr" }, \
+	{ SVM_EXIT_GDTR_READ,   "read_gdtr" }, \
+	{ SVM_EXIT_LDTR_READ,   "read_ldtr" }, \
+	{ SVM_EXIT_TR_READ,     "read_rt" }, \
+	{ SVM_EXIT_IDTR_WRITE,  "write_idtr" }, \
+	{ SVM_EXIT_GDTR_WRITE,  "write_gdtr" }, \
+	{ SVM_EXIT_LDTR_WRITE,  "write_ldtr" }, \
+	{ SVM_EXIT_TR_WRITE,    "write_rt" }, \
+	{ SVM_EXIT_RDTSC,       "rdtsc" }, \
+	{ SVM_EXIT_RDPMC,       "rdpmc" }, \
+	{ SVM_EXIT_PUSHF,       "pushf" }, \
+	{ SVM_EXIT_POPF,        "popf" }, \
 	{ SVM_EXIT_CPUID,       "cpuid" }, \
+	{ SVM_EXIT_RSM,         "rsm" }, \
+	{ SVM_EXIT_IRET,        "iret" }, \
+	{ SVM_EXIT_SWINT,       "swint" }, \
 	{ SVM_EXIT_INVD,        "invd" }, \
 	{ SVM_EXIT_PAUSE,       "pause" }, \
 	{ SVM_EXIT_HLT,         "hlt" }, \
@@ -119,6 +154,7 @@
 	{ SVM_EXIT_IOIO,        "io" }, \
 	{ SVM_EXIT_MSR,         "msr" }, \
 	{ SVM_EXIT_TASK_SWITCH, "task_switch" }, \
+	{ SVM_EXIT_FERR_FREEZE, "ferr_freeze" }, \
 	{ SVM_EXIT_SHUTDOWN,    "shutdown" }, \
 	{ SVM_EXIT_VMRUN,       "vmrun" }, \
 	{ SVM_EXIT_VMMCALL,     "hypercall" }, \
@@ -127,14 +163,16 @@
 	{ SVM_EXIT_STGI,        "stgi" }, \
 	{ SVM_EXIT_CLGI,        "clgi" }, \
 	{ SVM_EXIT_SKINIT,      "skinit" }, \
+	{ SVM_EXIT_RDTSCP,      "rdtscp" }, \
+	{ SVM_EXIT_ICEBP,       "icebp" }, \
 	{ SVM_EXIT_WBINVD,      "wbinvd" }, \
 	{ SVM_EXIT_MONITOR,     "monitor" }, \
 	{ SVM_EXIT_MWAIT,       "mwait" }, \
 	{ SVM_EXIT_XSETBV,      "xsetbv" }, \
 	{ SVM_EXIT_NPF,         "npf" }, \
-	{ SVM_EXIT_RSM,         "rsm" }, \
 	{ SVM_EXIT_AVIC_INCOMPLETE_IPI,		"avic_incomplete_ipi" }, \
-	{ SVM_EXIT_AVIC_UNACCELERATED_ACCESS,   "avic_unaccelerated_access" }
+	{ SVM_EXIT_AVIC_UNACCELERATED_ACCESS,   "avic_unaccelerated_access" }, \
+	{ SVM_EXIT_ERR,         "invalid_guest_state" }
 
 
 #endif /* _UAPI__SVM_H */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 6ef6ed9..0fe6953f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -37,6 +37,7 @@
 #include <asm/mtrr.h>
 #include <linux/numa.h>
 #include <asm/asm.h>
+#include <asm/bugs.h>
 #include <asm/cpu.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
@@ -270,6 +271,8 @@
 static __init int setup_disable_smep(char *arg)
 {
 	setup_clear_cpu_cap(X86_FEATURE_SMEP);
+	/* Check for things that depend on SMEP being enabled: */
+	check_mpx_erratum(&boot_cpu_data);
 	return 1;
 }
 __setup("nosmep", setup_disable_smep);
@@ -310,6 +313,10 @@
 
 static __always_inline void setup_pku(struct cpuinfo_x86 *c)
 {
+	/* check the boot processor, plus compile options for PKU: */
+	if (!cpu_feature_enabled(X86_FEATURE_PKU))
+		return;
+	/* checks the actual processor's cpuid bits: */
 	if (!cpu_has(c, X86_FEATURE_PKU))
 		return;
 	if (pku_disabled)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 8dae51f..6e2ffbe 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -25,6 +25,41 @@
 #include <asm/apic.h>
 #endif
 
+/*
+ * Just in case our CPU detection goes bad, or you have a weird system,
+ * allow a way to override the automatic disabling of MPX.
+ */
+static int forcempx;
+
+static int __init forcempx_setup(char *__unused)
+{
+	forcempx = 1;
+
+	return 1;
+}
+__setup("intel-skd-046-workaround=disable", forcempx_setup);
+
+void check_mpx_erratum(struct cpuinfo_x86 *c)
+{
+	if (forcempx)
+		return;
+	/*
+	 * Turn off the MPX feature on CPUs where SMEP is not
+	 * available or disabled.
+	 *
+	 * Works around Intel Erratum SKD046: "Branch Instructions
+	 * May Initialize MPX Bound Registers Incorrectly".
+	 *
+	 * This might falsely disable MPX on systems without
+	 * SMEP, like Atom processors without SMEP.  But there
+	 * is no such hardware known at the moment.
+	 */
+	if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) {
+		setup_clear_cpu_cap(X86_FEATURE_MPX);
+		pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
+	}
+}
+
 static void early_init_intel(struct cpuinfo_x86 *c)
 {
 	u64 misc_enable;
@@ -173,6 +208,8 @@
 		if (edx & (1U << 28))
 			c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
 	}
+
+	check_mpx_erratum(c);
 }
 
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index ba7fbba..5a294e4 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -538,3 +538,48 @@
 	return -ENOEXEC;
 }
 #endif /* CONFIG_KEXEC_FILE */
+
+static int
+kexec_mark_range(unsigned long start, unsigned long end, bool protect)
+{
+	struct page *page;
+	unsigned int nr_pages;
+
+	/*
+	 * For physical range: [start, end]. We must skip the unassigned
+	 * crashk resource with zero-valued "end" member.
+	 */
+	if (!end || start > end)
+		return 0;
+
+	page = pfn_to_page(start >> PAGE_SHIFT);
+	nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
+	if (protect)
+		return set_pages_ro(page, nr_pages);
+	else
+		return set_pages_rw(page, nr_pages);
+}
+
+static void kexec_mark_crashkres(bool protect)
+{
+	unsigned long control;
+
+	kexec_mark_range(crashk_low_res.start, crashk_low_res.end, protect);
+
+	/* Don't touch the control code page used in crash_kexec().*/
+	control = PFN_PHYS(page_to_pfn(kexec_crash_image->control_code_page));
+	/* Control code page is located in the 2nd page. */
+	kexec_mark_range(crashk_res.start, control + PAGE_SIZE - 1, protect);
+	control += KEXEC_CONTROL_PAGE_SIZE;
+	kexec_mark_range(control, crashk_res.end, protect);
+}
+
+void arch_kexec_protect_crashkres(void)
+{
+	kexec_mark_crashkres(true);
+}
+
+void arch_kexec_unprotect_crashkres(void)
+{
+	kexec_mark_crashkres(false);
+}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6b16c36..6e789ca 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -532,7 +532,7 @@
 
 	switch (code) {
 	case ARCH_SET_GS:
-		if (addr >= TASK_SIZE_OF(task))
+		if (addr >= TASK_SIZE_MAX)
 			return -EPERM;
 		cpu = get_cpu();
 		task->thread.gsindex = 0;
@@ -546,7 +546,7 @@
 	case ARCH_SET_FS:
 		/* Not strictly needed for fs, but do it for symmetry
 		   with gs */
-		if (addr >= TASK_SIZE_OF(task))
+		if (addr >= TASK_SIZE_MAX)
 			return -EPERM;
 		cpu = get_cpu();
 		task->thread.fsindex = 0;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index e60ef91..600edd2 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -392,7 +392,7 @@
 
 #ifdef CONFIG_X86_64
 	case offsetof(struct user_regs_struct,fs_base):
-		if (value >= TASK_SIZE_OF(child))
+		if (value >= TASK_SIZE_MAX)
 			return -EIO;
 		/*
 		 * When changing the segment base, use do_arch_prctl
@@ -406,7 +406,7 @@
 		/*
 		 * Exactly the same here as the %fs handling above.
 		 */
-		if (value >= TASK_SIZE_OF(child))
+		if (value >= TASK_SIZE_MAX)
 			return -EIO;
 		if (child->thread.gsbase != value)
 			return do_arch_prctl(child, ARCH_SET_GS, value);
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 6aa0f4d..9911a06 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -23,6 +23,7 @@
 #include <asm/param.h>
 
 /* CPU reference clock frequency: in KHz */
+#define FREQ_80		80000
 #define FREQ_83		83200
 #define FREQ_100	99840
 #define FREQ_133	133200
@@ -56,6 +57,8 @@
 	{ 6, 0x37, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } },
 	/* ANN */
 	{ 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } },
+	/* AIRMONT */
+	{ 6, 0x4c, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, FREQ_80,	0, 0, 0 } },
 };
 
 static int match_cpu(u8 family, u8 model)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 769af90..7597b42 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -181,19 +181,22 @@
 			     struct kvm_cpuid_entry __user *entries)
 {
 	int r, i;
-	struct kvm_cpuid_entry *cpuid_entries;
+	struct kvm_cpuid_entry *cpuid_entries = NULL;
 
 	r = -E2BIG;
 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
 		goto out;
 	r = -ENOMEM;
-	cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
-	if (!cpuid_entries)
-		goto out;
-	r = -EFAULT;
-	if (copy_from_user(cpuid_entries, entries,
-			   cpuid->nent * sizeof(struct kvm_cpuid_entry)))
-		goto out_free;
+	if (cpuid->nent) {
+		cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
+					cpuid->nent);
+		if (!cpuid_entries)
+			goto out;
+		r = -EFAULT;
+		if (copy_from_user(cpuid_entries, entries,
+				   cpuid->nent * sizeof(struct kvm_cpuid_entry)))
+			goto out;
+	}
 	for (i = 0; i < cpuid->nent; i++) {
 		vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
 		vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
@@ -212,9 +215,8 @@
 	kvm_x86_ops->cpuid_update(vcpu);
 	r = kvm_update_cpuid(vcpu);
 
-out_free:
-	vfree(cpuid_entries);
 out:
+	vfree(cpuid_entries);
 	return r;
 }
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 24e8001..def97b3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -336,12 +336,12 @@
 #ifdef CONFIG_X86_64
 static void __set_spte(u64 *sptep, u64 spte)
 {
-	*sptep = spte;
+	WRITE_ONCE(*sptep, spte);
 }
 
 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
 {
-	*sptep = spte;
+	WRITE_ONCE(*sptep, spte);
 }
 
 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
@@ -390,7 +390,7 @@
 	 */
 	smp_wmb();
 
-	ssptep->spte_low = sspte.spte_low;
+	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
 }
 
 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
@@ -400,7 +400,7 @@
 	ssptep = (union split_spte *)sptep;
 	sspte = (union split_spte)spte;
 
-	ssptep->spte_low = sspte.spte_low;
+	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
 
 	/*
 	 * If we map the spte from present to nonpresent, we should clear
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2214214..1163e81 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -84,7 +84,7 @@
 #define TSC_RATIO_MIN		0x0000000000000001ULL
 #define TSC_RATIO_MAX		0x000000ffffffffffULL
 
-#define AVIC_HPA_MASK	~((0xFFFULL << 52) || 0xFFF)
+#define AVIC_HPA_MASK	~((0xFFFULL << 52) | 0xFFF)
 
 /*
  * 0xff is broadcast, so the max index allowed for physical APIC ID
@@ -3597,7 +3597,7 @@
 	u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
 	u32 icrl = svm->vmcb->control.exit_info_1;
 	u32 id = svm->vmcb->control.exit_info_2 >> 32;
-	u32 index = svm->vmcb->control.exit_info_2 && 0xFF;
+	u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
 	struct kvm_lapic *apic = svm->vcpu.arch.apic;
 
 	trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e605d1e..fb93010 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2418,7 +2418,9 @@
 
 	if (is_guest_mode(vcpu))
 		msr_bitmap = vmx_msr_bitmap_nested;
-	else if (vcpu->arch.apic_base & X2APIC_ENABLE) {
+	else if (cpu_has_secondary_exec_ctrls() &&
+		 (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
+		  SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
 		if (is_long_mode(vcpu))
 			msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
 		else
@@ -4787,6 +4789,19 @@
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 
 	vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
+	if (cpu_has_secondary_exec_ctrls()) {
+		if (kvm_vcpu_apicv_active(vcpu))
+			vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
+				      SECONDARY_EXEC_APIC_REGISTER_VIRT |
+				      SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
+		else
+			vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
+					SECONDARY_EXEC_APIC_REGISTER_VIRT |
+					SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
+	}
+
+	if (cpu_has_vmx_msr_bitmap())
+		vmx_set_msr_bitmap(vcpu);
 }
 
 static u32 vmx_exec_control(struct vcpu_vmx *vmx)
@@ -6333,23 +6348,20 @@
 
 	set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
 
-	if (enable_apicv) {
-		for (msr = 0x800; msr <= 0x8ff; msr++)
-			vmx_disable_intercept_msr_read_x2apic(msr);
+	for (msr = 0x800; msr <= 0x8ff; msr++)
+		vmx_disable_intercept_msr_read_x2apic(msr);
 
-		/* According SDM, in x2apic mode, the whole id reg is used.
-		 * But in KVM, it only use the highest eight bits. Need to
-		 * intercept it */
-		vmx_enable_intercept_msr_read_x2apic(0x802);
-		/* TMCCT */
-		vmx_enable_intercept_msr_read_x2apic(0x839);
-		/* TPR */
-		vmx_disable_intercept_msr_write_x2apic(0x808);
-		/* EOI */
-		vmx_disable_intercept_msr_write_x2apic(0x80b);
-		/* SELF-IPI */
-		vmx_disable_intercept_msr_write_x2apic(0x83f);
-	}
+	/* According SDM, in x2apic mode, the whole id reg is used.  But in
+	 * KVM, it only use the highest eight bits. Need to intercept it */
+	vmx_enable_intercept_msr_read_x2apic(0x802);
+	/* TMCCT */
+	vmx_enable_intercept_msr_read_x2apic(0x839);
+	/* TPR */
+	vmx_disable_intercept_msr_write_x2apic(0x808);
+	/* EOI */
+	vmx_disable_intercept_msr_write_x2apic(0x80b);
+	/* SELF-IPI */
+	vmx_disable_intercept_msr_write_x2apic(0x83f);
 
 	if (enable_ept) {
 		kvm_mmu_set_mask_ptes(0ull,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c805cf4..902d9da 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2314,6 +2314,7 @@
 	case MSR_AMD64_NB_CFG:
 	case MSR_FAM10H_MMIO_CONF_BASE:
 	case MSR_AMD64_BU_CFG2:
+	case MSR_IA32_PERF_CTL:
 		msr_info->data = 0;
 		break;
 	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
@@ -2972,6 +2973,10 @@
 			      | KVM_VCPUEVENT_VALID_SMM))
 		return -EINVAL;
 
+	if (events->exception.injected &&
+	    (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
+		return -EINVAL;
+
 	process_nmi(vcpu);
 	vcpu->arch.exception.pending = events->exception.injected;
 	vcpu->arch.exception.nr = events->exception.nr;
@@ -3036,6 +3041,11 @@
 	if (dbgregs->flags)
 		return -EINVAL;
 
+	if (dbgregs->dr6 & ~0xffffffffull)
+		return -EINVAL;
+	if (dbgregs->dr7 & ~0xffffffffull)
+		return -EINVAL;
+
 	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
 	kvm_update_dr0123(vcpu);
 	vcpu->arch.dr6 = dbgregs->dr6;
@@ -7815,7 +7825,7 @@
 
 	slot = id_to_memslot(slots, id);
 	if (size) {
-		if (WARN_ON(slot->npages))
+		if (slot->npages)
 			return -EEXIST;
 
 		/*
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 5ce1ed0..7d1fa7c 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -292,7 +292,7 @@
 		return;
 
 	for (address = VMALLOC_START & PMD_MASK;
-	     address >= TASK_SIZE && address < FIXADDR_TOP;
+	     address >= TASK_SIZE_MAX && address < FIXADDR_TOP;
 	     address += PMD_SIZE) {
 		struct page *page;
 
@@ -854,8 +854,13 @@
 				return;
 		}
 #endif
-		/* Kernel addresses are always protection faults: */
-		if (address >= TASK_SIZE)
+
+		/*
+		 * To avoid leaking information about the kernel page table
+		 * layout, pretend that user-mode accesses to kernel addresses
+		 * are always protection faults.
+		 */
+		if (address >= TASK_SIZE_MAX)
 			error_code |= PF_PROT;
 
 		if (likely(show_unhandled_signals))
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 4bd08b0..99ddab7 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -491,8 +491,11 @@
 #endif
 	__acpi_register_gsi = acpi_register_gsi_xen;
 	__acpi_unregister_gsi = NULL;
-	/* Pre-allocate legacy irqs */
-	for (irq = 0; irq < nr_legacy_irqs(); irq++) {
+	/*
+	 * Pre-allocate the legacy IRQs.  Use NR_LEGACY_IRQS here
+	 * because we don't have a PIC and thus nr_legacy_irqs() is zero.
+	 */
+	for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
 		int trigger, polarity;
 
 		if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
index 92723ae..cd95075 100644
--- a/arch/x86/platform/efi/efi_stub_64.S
+++ b/arch/x86/platform/efi/efi_stub_64.S
@@ -11,7 +11,6 @@
 #include <asm/msr.h>
 #include <asm/processor-flags.h>
 #include <asm/page_types.h>
-#include <asm/frame.h>
 
 #define SAVE_XMM			\
 	mov %rsp, %rax;			\
@@ -40,10 +39,10 @@
 	mov (%rsp), %rsp
 
 ENTRY(efi_call)
-	FRAME_BEGIN
+	pushq %rbp
+	movq %rsp, %rbp
 	SAVE_XMM
-	mov (%rsp), %rax
-	mov 8(%rax), %rax
+	mov 16(%rbp), %rax
 	subq $48, %rsp
 	mov %r9, 32(%rsp)
 	mov %rax, 40(%rsp)
@@ -53,6 +52,6 @@
 	call *%rdi
 	addq $48, %rsp
 	RESTORE_XMM
-	FRAME_END
+	popq %rbp
 	ret
 ENDPROC(efi_call)
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 92e3e1d..12734a9 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -26,7 +26,5 @@
 
 $(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
 	$(call if_changed,bin2c)
-	@:
-
 
 obj-$(CONFIG_KEXEC_FILE)	+= kexec-purgatory.o
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index b959646..c556c5a 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -59,7 +59,6 @@
 targets += realmode.bin
 $(obj)/realmode.bin: $(obj)/realmode.elf $(obj)/realmode.relocs FORCE
 	$(call if_changed,objcopy)
-	@:
 
 quiet_cmd_relocs = RELOCS  $@
       cmd_relocs = arch/x86/tools/relocs --realmode $< > $@
diff --git a/arch/x86/um/os-Linux/registers.c b/arch/x86/um/os-Linux/registers.c
index 41bfe84..00f54a9 100644
--- a/arch/x86/um/os-Linux/registers.c
+++ b/arch/x86/um/os-Linux/registers.c
@@ -11,21 +11,56 @@
 #endif
 #include <longjmp.h>
 #include <sysdep/ptrace_user.h>
+#include <sys/uio.h>
+#include <asm/sigcontext.h>
+#include <linux/elf.h>
 
-int save_fp_registers(int pid, unsigned long *fp_regs)
+int have_xstate_support;
+
+int save_i387_registers(int pid, unsigned long *fp_regs)
 {
 	if (ptrace(PTRACE_GETFPREGS, pid, 0, fp_regs) < 0)
 		return -errno;
 	return 0;
 }
 
-int restore_fp_registers(int pid, unsigned long *fp_regs)
+int save_fp_registers(int pid, unsigned long *fp_regs)
+{
+	struct iovec iov;
+
+	if (have_xstate_support) {
+		iov.iov_base = fp_regs;
+		iov.iov_len = sizeof(struct _xstate);
+		if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) < 0)
+			return -errno;
+		return 0;
+	} else {
+		return save_i387_registers(pid, fp_regs);
+	}
+}
+
+int restore_i387_registers(int pid, unsigned long *fp_regs)
 {
 	if (ptrace(PTRACE_SETFPREGS, pid, 0, fp_regs) < 0)
 		return -errno;
 	return 0;
 }
 
+int restore_fp_registers(int pid, unsigned long *fp_regs)
+{
+	struct iovec iov;
+
+	if (have_xstate_support) {
+		iov.iov_base = fp_regs;
+		iov.iov_len = sizeof(struct _xstate);
+		if (ptrace(PTRACE_SETREGSET, pid, NT_X86_XSTATE, &iov) < 0)
+			return -errno;
+		return 0;
+	} else {
+		return restore_i387_registers(pid, fp_regs);
+	}
+}
+
 #ifdef __i386__
 int have_fpx_regs = 1;
 int save_fpx_registers(int pid, unsigned long *fp_regs)
@@ -85,6 +120,16 @@
 	return restore_fp_registers(pid, regs);
 }
 
+void arch_init_registers(int pid)
+{
+	struct _xstate fp_regs;
+	struct iovec iov;
+
+	iov.iov_base = &fp_regs;
+	iov.iov_len = sizeof(struct _xstate);
+	if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) == 0)
+		have_xstate_support = 1;
+}
 #endif
 
 unsigned long get_thread_reg(int reg, jmp_buf *buf)
diff --git a/arch/x86/um/ptrace_32.c b/arch/x86/um/ptrace_32.c
index 47c78d5..ebd4dd6 100644
--- a/arch/x86/um/ptrace_32.c
+++ b/arch/x86/um/ptrace_32.c
@@ -194,7 +194,8 @@
 	int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
 	struct user_i387_struct fpregs;
 
-	err = save_fp_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
+	err = save_i387_registers(userspace_pid[cpu],
+				  (unsigned long *) &fpregs);
 	if (err)
 		return err;
 
@@ -214,7 +215,7 @@
 	if (n > 0)
 		return -EFAULT;
 
-	return restore_fp_registers(userspace_pid[cpu],
+	return restore_i387_registers(userspace_pid[cpu],
 				    (unsigned long *) &fpregs);
 }
 
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index a629694..faab418 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -222,14 +222,14 @@
 static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
 {
 	int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
-	long fpregs[HOST_FP_SIZE];
+	struct user_i387_struct fpregs;
 
-	BUG_ON(sizeof(*buf) != sizeof(fpregs));
-	err = save_fp_registers(userspace_pid[cpu], fpregs);
+	err = save_i387_registers(userspace_pid[cpu],
+				  (unsigned long *) &fpregs);
 	if (err)
 		return err;
 
-	n = copy_to_user(buf, fpregs, sizeof(fpregs));
+	n = copy_to_user(buf, &fpregs, sizeof(fpregs));
 	if (n > 0)
 		return -EFAULT;
 
@@ -239,14 +239,14 @@
 static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
 {
 	int n, cpu = ((struct thread_info *) child->stack)->cpu;
-	long fpregs[HOST_FP_SIZE];
+	struct user_i387_struct fpregs;
 
-	BUG_ON(sizeof(*buf) != sizeof(fpregs));
-	n = copy_from_user(fpregs, buf, sizeof(fpregs));
+	n = copy_from_user(&fpregs, buf, sizeof(fpregs));
 	if (n > 0)
 		return -EFAULT;
 
-	return restore_fp_registers(userspace_pid[cpu], fpregs);
+	return restore_i387_registers(userspace_pid[cpu],
+				      (unsigned long *) &fpregs);
 }
 
 long subarch_ptrace(struct task_struct *child, long request,
diff --git a/arch/x86/um/shared/sysdep/ptrace_64.h b/arch/x86/um/shared/sysdep/ptrace_64.h
index 919789f..0dc223a 100644
--- a/arch/x86/um/shared/sysdep/ptrace_64.h
+++ b/arch/x86/um/shared/sysdep/ptrace_64.h
@@ -57,8 +57,6 @@
 #define UPT_SYSCALL_ARG5(r) UPT_R8(r)
 #define UPT_SYSCALL_ARG6(r) UPT_R9(r)
 
-static inline void arch_init_registers(int pid)
-{
-}
+extern void arch_init_registers(int pid);
 
 #endif
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index 14fcd01..49e5036 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -225,26 +225,16 @@
 	} else
 #endif
 	{
-		struct user_i387_struct fp;
-
-		err = copy_from_user(&fp, (void *)sc.fpstate,
-				     sizeof(struct user_i387_struct));
+		err = copy_from_user(regs->regs.fp, (void *)sc.fpstate,
+				     sizeof(struct _xstate));
 		if (err)
 			return 1;
-
-		err = restore_fp_registers(pid, (unsigned long *) &fp);
-		if (err < 0) {
-			printk(KERN_ERR "copy_sc_from_user - "
-			       "restore_fp_registers failed, errno = %d\n",
-			       -err);
-			return 1;
-		}
 	}
 	return 0;
 }
 
 static int copy_sc_to_user(struct sigcontext __user *to,
-			   struct _fpstate __user *to_fp, struct pt_regs *regs,
+			   struct _xstate __user *to_fp, struct pt_regs *regs,
 			   unsigned long mask)
 {
 	struct sigcontext sc;
@@ -310,25 +300,22 @@
 			return 1;
 		}
 
-		err = convert_fxsr_to_user(to_fp, &fpx);
+		err = convert_fxsr_to_user(&to_fp->fpstate, &fpx);
 		if (err)
 			return 1;
 
-		err |= __put_user(fpx.swd, &to_fp->status);
-		err |= __put_user(X86_FXSR_MAGIC, &to_fp->magic);
+		err |= __put_user(fpx.swd, &to_fp->fpstate.status);
+		err |= __put_user(X86_FXSR_MAGIC, &to_fp->fpstate.magic);
 		if (err)
 			return 1;
 
-		if (copy_to_user(&to_fp->_fxsr_env[0], &fpx,
+		if (copy_to_user(&to_fp->fpstate._fxsr_env[0], &fpx,
 				 sizeof(struct user_fxsr_struct)))
 			return 1;
 	} else
 #endif
 	{
-		struct user_i387_struct fp;
-
-		err = save_fp_registers(pid, (unsigned long *) &fp);
-		if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
+		if (copy_to_user(to_fp, regs->regs.fp, sizeof(struct _xstate)))
 			return 1;
 	}
 
@@ -337,7 +324,7 @@
 
 #ifdef CONFIG_X86_32
 static int copy_ucontext_to_user(struct ucontext __user *uc,
-				 struct _fpstate __user *fp, sigset_t *set,
+				 struct _xstate __user *fp, sigset_t *set,
 				 unsigned long sp)
 {
 	int err = 0;
@@ -353,7 +340,7 @@
 	char __user *pretcode;
 	int sig;
 	struct sigcontext sc;
-	struct _fpstate fpstate;
+	struct _xstate fpstate;
 	unsigned long extramask[_NSIG_WORDS-1];
 	char retcode[8];
 };
@@ -366,7 +353,7 @@
 	void __user *puc;
 	struct siginfo info;
 	struct ucontext uc;
-	struct _fpstate fpstate;
+	struct _xstate fpstate;
 	char retcode[8];
 };
 
@@ -495,7 +482,7 @@
 	char __user *pretcode;
 	struct ucontext uc;
 	struct siginfo info;
-	struct _fpstate fpstate;
+	struct _xstate fpstate;
 };
 
 int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c
index 470564b..cb3c223 100644
--- a/arch/x86/um/user-offsets.c
+++ b/arch/x86/um/user-offsets.c
@@ -50,7 +50,7 @@
 	DEFINE(HOST_GS, GS);
 	DEFINE(HOST_ORIG_AX, ORIG_EAX);
 #else
-	DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long));
+	DEFINE(HOST_FP_SIZE, sizeof(struct _xstate) / sizeof(unsigned long));
 	DEFINE_LONGS(HOST_BX, RBX);
 	DEFINE_LONGS(HOST_CX, RCX);
 	DEFINE_LONGS(HOST_DI, RDI);
diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c
index 237c683..6be22f9 100644
--- a/arch/x86/um/vdso/vma.c
+++ b/arch/x86/um/vdso/vma.c
@@ -61,7 +61,8 @@
 	if (!vdso_enabled)
 		return 0;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
 
 	err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
 		VM_READ|VM_EXEC|
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 7ab2951..e345891 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -393,6 +393,9 @@
 	unsigned long i = 0;
 	unsigned long n = end_pfn - start_pfn;
 
+	if (remap_pfn == 0)
+		remap_pfn = nr_pages;
+
 	while (i < n) {
 		unsigned long cur_pfn = start_pfn + i;
 		unsigned long left = n - i;
@@ -438,17 +441,29 @@
 	return remap_pfn;
 }
 
-static void __init xen_set_identity_and_remap(unsigned long nr_pages)
+static unsigned long __init xen_count_remap_pages(
+	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
+	unsigned long remap_pages)
+{
+	if (start_pfn >= nr_pages)
+		return remap_pages;
+
+	return remap_pages + min(end_pfn, nr_pages) - start_pfn;
+}
+
+static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
+	unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
+			      unsigned long nr_pages, unsigned long last_val))
 {
 	phys_addr_t start = 0;
-	unsigned long last_pfn = nr_pages;
+	unsigned long ret_val = 0;
 	const struct e820entry *entry = xen_e820_map;
 	int i;
 
 	/*
 	 * Combine non-RAM regions and gaps until a RAM region (or the
-	 * end of the map) is reached, then set the 1:1 map and
-	 * remap the memory in those non-RAM regions.
+	 * end of the map) is reached, then call the provided function
+	 * to perform its duty on the non-RAM region.
 	 *
 	 * The combined non-RAM regions are rounded to a whole number
 	 * of pages so any partial pages are accessible via the 1:1
@@ -466,14 +481,13 @@
 				end_pfn = PFN_UP(entry->addr);
 
 			if (start_pfn < end_pfn)
-				last_pfn = xen_set_identity_and_remap_chunk(
-						start_pfn, end_pfn, nr_pages,
-						last_pfn);
+				ret_val = func(start_pfn, end_pfn, nr_pages,
+					       ret_val);
 			start = end;
 		}
 	}
 
-	pr_info("Released %ld page(s)\n", xen_released_pages);
+	return ret_val;
 }
 
 /*
@@ -596,35 +610,6 @@
 	}
 }
 
-static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
-{
-	unsigned long extra = 0;
-	unsigned long start_pfn, end_pfn;
-	const struct e820entry *entry = xen_e820_map;
-	int i;
-
-	end_pfn = 0;
-	for (i = 0; i < xen_e820_map_entries; i++, entry++) {
-		start_pfn = PFN_DOWN(entry->addr);
-		/* Adjacent regions on non-page boundaries handling! */
-		end_pfn = min(end_pfn, start_pfn);
-
-		if (start_pfn >= max_pfn)
-			return extra + max_pfn - end_pfn;
-
-		/* Add any holes in map to result. */
-		extra += start_pfn - end_pfn;
-
-		end_pfn = PFN_UP(entry->addr + entry->size);
-		end_pfn = min(end_pfn, max_pfn);
-
-		if (entry->type != E820_RAM)
-			extra += end_pfn - start_pfn;
-	}
-
-	return extra;
-}
-
 bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
 {
 	struct e820entry *entry;
@@ -804,7 +789,7 @@
 	max_pages = xen_get_max_pages();
 
 	/* How many extra pages do we need due to remapping? */
-	max_pages += xen_count_remap_pages(max_pfn);
+	max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
 
 	if (max_pages > max_pfn)
 		extra_pages += max_pages - max_pfn;
@@ -922,7 +907,9 @@
 	 * Set identity map on non-RAM pages and prepare remapping the
 	 * underlying RAM.
 	 */
-	xen_set_identity_and_remap(max_pfn);
+	xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
+
+	pr_info("Released %ld page(s)\n", xen_released_pages);
 
 	return "Xen";
 }
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index a0a4e55..6deba5b 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -290,11 +290,11 @@
 	WARN_ON(!clockevent_state_oneshot(evt));
 
 	single.timeout_abs_ns = get_abs_timeout(delta);
-	single.flags = VCPU_SSHOTTMR_future;
+	/* Get an event anyway, even if the timeout is already expired */
+	single.flags = 0;
 
 	ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
-
-	BUG_ON(ret != 0 && ret != -ETIME);
+	BUG_ON(ret != 0);
 
 	return ret;
 }
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
index f4b7b38..d9444f0 100644
--- a/arch/xtensa/configs/generic_kc705_defconfig
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -11,7 +11,6 @@
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_MEMCG=y
 CONFIG_NAMESPACES=y
 CONFIG_SCHED_AUTOGROUP=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 22eeacb..61f943c 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -11,7 +11,6 @@
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_MEMCG=y
 CONFIG_NAMESPACES=y
 CONFIG_SCHED_AUTOGROUP=y
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index a6b00b3..ef90479 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -323,23 +323,23 @@
 
 static int callchain_trace(struct stackframe *frame, void *data)
 {
-	struct perf_callchain_entry *entry = data;
+	struct perf_callchain_entry_ctx *entry = data;
 
 	perf_callchain_store(entry, frame->pc);
 	return 0;
 }
 
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
 			   struct pt_regs *regs)
 {
-	xtensa_backtrace_kernel(regs, sysctl_perf_event_max_stack,
+	xtensa_backtrace_kernel(regs, entry->max_stack,
 				callchain_trace, NULL, entry);
 }
 
-void perf_callchain_user(struct perf_callchain_entry *entry,
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
 			 struct pt_regs *regs)
 {
-	xtensa_backtrace_user(regs, sysctl_perf_event_max_stack,
+	xtensa_backtrace_user(regs, entry->max_stack,
 			      callchain_trace, entry);
 }
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7df9c92..29cbc1b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2020,7 +2020,7 @@
 
 	q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
 	if (!q->queue_ctx)
-		return ERR_PTR(-ENOMEM);
+		goto err_exit;
 
 	q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
 						GFP_KERNEL, set->numa_node);
@@ -2084,6 +2084,8 @@
 	kfree(q->queue_hw_ctx);
 err_percpu:
 	free_percpu(q->queue_ctx);
+err_exit:
+	q->mq_ops = NULL;
 	return ERR_PTR(-ENOMEM);
 }
 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
diff --git a/block/ioctl.c b/block/ioctl.c
index 4ff1f92..ed2397f 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -4,7 +4,6 @@
 #include <linux/gfp.h>
 #include <linux/blkpg.h>
 #include <linux/hdreg.h>
-#include <linux/badblocks.h>
 #include <linux/backing-dev.h>
 #include <linux/fs.h>
 #include <linux/blktrace_api.h>
@@ -407,35 +406,6 @@
 		ret == -ENOIOCTLCMD;
 }
 
-#ifdef CONFIG_FS_DAX
-bool blkdev_dax_capable(struct block_device *bdev)
-{
-	struct gendisk *disk = bdev->bd_disk;
-
-	if (!disk->fops->direct_access)
-		return false;
-
-	/*
-	 * If the partition is not aligned on a page boundary, we can't
-	 * do dax I/O to it.
-	 */
-	if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512))
-			|| (bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
-		return false;
-
-	/*
-	 * If the device has known bad blocks, force all I/O through the
-	 * driver / page cache.
-	 *
-	 * TODO: support finer grained dax error handling
-	 */
-	if (disk->bb && disk->bb->count)
-		return false;
-
-	return true;
-}
-#endif
-
 static int blkdev_flushbuf(struct block_device *bdev, fmode_t mode,
 		unsigned cmd, unsigned long arg)
 {
@@ -598,9 +568,6 @@
 	case BLKTRACESETUP:
 	case BLKTRACETEARDOWN:
 		return blk_trace_ioctl(bdev, cmd, argp);
-	case BLKDAXGET:
-		return put_int(arg, !!(bdev->bd_inode->i_flags & S_DAX));
-		break;
 	case IOC_PR_REGISTER:
 		return blkdev_pr_register(bdev, argp);
 	case IOC_PR_RESERVE:
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index e28e912..331f6ba 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -13,6 +13,7 @@
 	tristate "Asymmetric public-key crypto algorithm subtype"
 	select MPILIB
 	select CRYPTO_HASH_INFO
+	select CRYPTO_AKCIPHER
 	help
 	  This option provides support for asymmetric public key type handling.
 	  If signature generation and/or verification are to be used,
diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c
index 3b92523..1063b64 100644
--- a/crypto/asymmetric_keys/pkcs7_key_type.c
+++ b/crypto/asymmetric_keys/pkcs7_key_type.c
@@ -62,7 +62,7 @@
 
 	return verify_pkcs7_signature(NULL, 0,
 				      prep->data, prep->datalen,
-				      NULL, usage,
+				      (void *)1UL, usage,
 				      pkcs7_view_content, prep);
 }
 
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 430f761..e1e2066 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -192,6 +192,8 @@
 
 source "drivers/nvdimm/Kconfig"
 
+source "drivers/dax/Kconfig"
+
 source "drivers/nvmem/Kconfig"
 
 source "drivers/hwtracing/stm/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 8f5d076..0b6f3d6 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -66,6 +66,7 @@
 obj-$(CONFIG_NVM)		+= lightnvm/
 obj-y				+= base/ block/ misc/ mfd/ nfc/
 obj-$(CONFIG_LIBNVDIMM)		+= nvdimm/
+obj-$(CONFIG_DEV_DAX)		+= dax/
 obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
 obj-$(CONFIG_NUBUS)		+= nubus/
 obj-y				+= macintosh/
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 15e4604..1f41284 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -265,7 +265,7 @@
 	char *p;
 
 	ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 	/* sync tail before inserting logs */
 	smp_mb();
@@ -286,7 +286,7 @@
 	char *p;
 
 	ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 	/* sync head before removing cmds */
 	smp_rmb();
@@ -330,7 +330,7 @@
 				goto again;
 			break;
 		}
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			break;
 		size += ret;
 		count -= ret;
@@ -373,7 +373,7 @@
 			if (ret == 0)
 				goto again;
 		}
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			break;
 		*(msg + size) = (char)ret;
 		size++;
@@ -526,7 +526,7 @@
 	}
 	acpi_aml_io.users++;
 err_lock:
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		if (acpi_aml_active_reader == file)
 			acpi_aml_active_reader = NULL;
 	}
@@ -587,7 +587,7 @@
 	char *p;
 
 	ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 	/* sync head before removing logs */
 	smp_rmb();
@@ -602,7 +602,7 @@
 	crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
 	ret = n;
 out:
-	acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !IS_ERR_VALUE(ret));
+	acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret);
 	return ret;
 }
 
@@ -634,7 +634,7 @@
 					goto again;
 			}
 		}
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			if (!acpi_aml_running())
 				ret = 0;
 			break;
@@ -657,7 +657,7 @@
 	char *p;
 
 	ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 	/* sync tail before inserting cmds */
 	smp_mb();
@@ -672,7 +672,7 @@
 	crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
 	ret = n;
 out:
-	acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !IS_ERR_VALUE(ret));
+	acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret);
 	return n;
 }
 
@@ -704,7 +704,7 @@
 					goto again;
 			}
 		}
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			if (!acpi_aml_running())
 				ret = 0;
 			break;
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 0d92d0f..c7ba948 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -331,15 +331,6 @@
 		pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
 
 		pr->pblk = object.processor.pblk_address;
-
-		/*
-		 * We don't care about error returns - we just try to mark
-		 * these reserved so that nobody else is confused into thinking
-		 * that this region might be unused..
-		 *
-		 * (In particular, allocating the IO range for Cardbus)
-		 */
-		request_region(pr->throttling.address, 6, "ACPI CPU throttle");
 	}
 
 	/*
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 3d5b8a0..c1d138e 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -754,7 +754,8 @@
 }
 
 int acpi_video_get_levels(struct acpi_device *device,
-			  struct acpi_video_device_brightness **dev_br)
+			  struct acpi_video_device_brightness **dev_br,
+			  int *pmax_level)
 {
 	union acpi_object *obj = NULL;
 	int i, max_level = 0, count = 0, level_ac_battery = 0;
@@ -841,6 +842,8 @@
 
 	br->count = count;
 	*dev_br = br;
+	if (pmax_level)
+		*pmax_level = max_level;
 
 out:
 	kfree(obj);
@@ -869,7 +872,7 @@
 	struct acpi_video_device_brightness *br = NULL;
 	int result = -EINVAL;
 
-	result = acpi_video_get_levels(device->dev, &br);
+	result = acpi_video_get_levels(device->dev, &br, &max_level);
 	if (result)
 		return result;
 	device->brightness = br;
@@ -1737,7 +1740,7 @@
 
 	mutex_lock(&video->device_list_lock);
 	list_for_each_entry(dev, &video->video_device_list, entry) {
-		if (!acpi_video_device_lcd_query_levels(dev, &levels))
+		if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels))
 			kfree(levels);
 	}
 	mutex_unlock(&video->device_list_lock);
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 0f18dbc..daceb80 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -83,27 +83,22 @@
 static u8
 acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width)
 {
-	u64 address;
-
 	if (!reg->access_width) {
+		if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+			max_bit_width = 32;
+		}
+
 		/*
 		 * Detect old register descriptors where only the bit_width field
-		 * makes senses. The target address is copied to handle possible
-		 * alignment issues.
+		 * makes senses.
 		 */
-		ACPI_MOVE_64_TO_64(&address, &reg->address);
-		if (!reg->bit_offset && reg->bit_width &&
+		if (reg->bit_width < max_bit_width &&
+		    !reg->bit_offset && reg->bit_width &&
 		    ACPI_IS_POWER_OF_TWO(reg->bit_width) &&
-		    ACPI_IS_ALIGNED(reg->bit_width, 8) &&
-		    ACPI_IS_ALIGNED(address, reg->bit_width)) {
+		    ACPI_IS_ALIGNED(reg->bit_width, 8)) {
 			return (reg->bit_width);
-		} else {
-			if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
-				return (32);
-			} else {
-				return (max_bit_width);
-			}
 		}
+		return (max_bit_width);
 	} else {
 		return (1 << (reg->access_width + 2));
 	}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b719ab3..ab23479 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1316,7 +1316,7 @@
 
 static void __exit acpi_battery_exit(void)
 {
-	async_synchronize_cookie(async_cookie);
+	async_synchronize_cookie(async_cookie + 1);
 	acpi_bus_unregister_driver(&acpi_battery_driver);
 #ifdef CONFIG_ACPI_PROCFS_POWER
 	acpi_unlock_battery_dir(acpi_battery_dir);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 31e8da6..262ca31 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1051,7 +1051,7 @@
 	 * Maybe EC region is required at bus_scan/acpi_get_devices. So it
 	 * is necessary to enable it as early as possible.
 	 */
-	acpi_boot_ec_enable();
+	acpi_ec_dsdt_probe();
 
 	printk(KERN_INFO PREFIX "Interpreter enabled\n");
 
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index cd2c3d6..993fd31 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -319,6 +319,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(acpi_device_fix_up_power);
 
 int acpi_device_update_power(struct acpi_device *device, int *state_p)
 {
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 0e70181..73c76d6 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1446,10 +1446,30 @@
 	return AE_OK;
 }
 
-int __init acpi_boot_ec_enable(void)
+static const struct acpi_device_id ec_device_ids[] = {
+	{"PNP0C09", 0},
+	{"", 0},
+};
+
+int __init acpi_ec_dsdt_probe(void)
 {
-	if (!boot_ec)
+	acpi_status status;
+
+	if (boot_ec)
 		return 0;
+
+	/*
+	 * Finding EC from DSDT if there is no ECDT EC available. When this
+	 * function is invoked, ACPI tables have been fully loaded, we can
+	 * walk namespace now.
+	 */
+	boot_ec = make_acpi_ec();
+	if (!boot_ec)
+		return -ENOMEM;
+	status = acpi_get_devices(ec_device_ids[0].id,
+				  ec_parse_device, boot_ec, NULL);
+	if (ACPI_FAILURE(status) || !boot_ec->handle)
+		return -ENODEV;
 	if (!ec_install_handlers(boot_ec)) {
 		first_ec = boot_ec;
 		return 0;
@@ -1457,11 +1477,6 @@
 	return -EFAULT;
 }
 
-static const struct acpi_device_id ec_device_ids[] = {
-	{"PNP0C09", 0},
-	{"", 0},
-};
-
 #if 0
 /*
  * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 9bb0773..27cc7fe 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -181,7 +181,7 @@
 
 int acpi_ec_init(void);
 int acpi_ec_ecdt_probe(void);
-int acpi_boot_ec_enable(void);
+int acpi_ec_dsdt_probe(void);
 void acpi_ec_block_transactions(void);
 void acpi_ec_unblock_transactions(void);
 void acpi_ec_unblock_transactions_early(void);
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 63cc9db..2215fc8 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -45,6 +45,11 @@
 MODULE_PARM_DESC(scrub_overflow_abort,
 		"Number of times we overflow ARS results before abort");
 
+static bool disable_vendor_specific;
+module_param(disable_vendor_specific, bool, S_IRUGO);
+MODULE_PARM_DESC(disable_vendor_specific,
+		"Limit commands to the publicly specified set\n");
+
 static struct workqueue_struct *nfit_wq;
 
 struct nfit_table_prev {
@@ -171,33 +176,46 @@
 		unsigned int buf_len, int *cmd_rc)
 {
 	struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
-	const struct nd_cmd_desc *desc = NULL;
 	union acpi_object in_obj, in_buf, *out_obj;
+	const struct nd_cmd_desc *desc = NULL;
 	struct device *dev = acpi_desc->dev;
+	struct nd_cmd_pkg *call_pkg = NULL;
 	const char *cmd_name, *dimm_name;
-	unsigned long dsm_mask;
+	unsigned long cmd_mask, dsm_mask;
 	acpi_handle handle;
+	unsigned int func;
 	const u8 *uuid;
 	u32 offset;
 	int rc, i;
 
+	func = cmd;
+	if (cmd == ND_CMD_CALL) {
+		call_pkg = buf;
+		func = call_pkg->nd_command;
+	}
+
 	if (nvdimm) {
 		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
 		struct acpi_device *adev = nfit_mem->adev;
 
 		if (!adev)
 			return -ENOTTY;
+		if (call_pkg && nfit_mem->family != call_pkg->nd_family)
+			return -ENOTTY;
+
 		dimm_name = nvdimm_name(nvdimm);
 		cmd_name = nvdimm_cmd_name(cmd);
+		cmd_mask = nvdimm_cmd_mask(nvdimm);
 		dsm_mask = nfit_mem->dsm_mask;
 		desc = nd_cmd_dimm_desc(cmd);
-		uuid = to_nfit_uuid(NFIT_DEV_DIMM);
+		uuid = to_nfit_uuid(nfit_mem->family);
 		handle = adev->handle;
 	} else {
 		struct acpi_device *adev = to_acpi_dev(acpi_desc);
 
 		cmd_name = nvdimm_bus_cmd_name(cmd);
-		dsm_mask = nd_desc->dsm_mask;
+		cmd_mask = nd_desc->cmd_mask;
+		dsm_mask = cmd_mask;
 		desc = nd_cmd_bus_desc(cmd);
 		uuid = to_nfit_uuid(NFIT_DEV_BUS);
 		handle = adev->handle;
@@ -207,7 +225,7 @@
 	if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
 		return -ENOTTY;
 
-	if (!test_bit(cmd, &dsm_mask))
+	if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
 		return -ENOTTY;
 
 	in_obj.type = ACPI_TYPE_PACKAGE;
@@ -222,21 +240,44 @@
 		in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
 				i, buf);
 
-	if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
-		dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__,
-				dimm_name, cmd_name, in_buf.buffer.length);
-		print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
-				4, in_buf.buffer.pointer, min_t(u32, 128,
-					in_buf.buffer.length), true);
+	if (call_pkg) {
+		/* skip over package wrapper */
+		in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
+		in_buf.buffer.length = call_pkg->nd_size_in;
 	}
 
-	out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj);
+	if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
+		dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
+				__func__, dimm_name, cmd, func,
+				in_buf.buffer.length);
+		print_hex_dump_debug("nvdimm in  ", DUMP_PREFIX_OFFSET, 4, 4,
+			in_buf.buffer.pointer,
+			min_t(u32, 256, in_buf.buffer.length), true);
+	}
+
+	out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
 	if (!out_obj) {
 		dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
 				cmd_name);
 		return -EINVAL;
 	}
 
+	if (call_pkg) {
+		call_pkg->nd_fw_size = out_obj->buffer.length;
+		memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
+			out_obj->buffer.pointer,
+			min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
+
+		ACPI_FREE(out_obj);
+		/*
+		 * Need to support FW function w/o known size in advance.
+		 * Caller can determine required size based upon nd_fw_size.
+		 * If we return an error (like elsewhere) then caller wouldn't
+		 * be able to rely upon data returned to make calculation.
+		 */
+		return 0;
+	}
+
 	if (out_obj->package.type != ACPI_TYPE_BUFFER) {
 		dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
 				__func__, dimm_name, cmd_name, out_obj->type);
@@ -658,6 +699,7 @@
 			if (!nfit_mem)
 				return -ENOMEM;
 			INIT_LIST_HEAD(&nfit_mem->list);
+			nfit_mem->acpi_desc = acpi_desc;
 			list_add(&nfit_mem->list, &acpi_desc->dimms);
 		}
 
@@ -819,7 +861,7 @@
 {
 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
 
-	return sprintf(buf, "%#x\n", dcr->vendor_id);
+	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
 }
 static DEVICE_ATTR_RO(vendor);
 
@@ -828,7 +870,7 @@
 {
 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
 
-	return sprintf(buf, "%#x\n", dcr->revision_id);
+	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
 }
 static DEVICE_ATTR_RO(rev_id);
 
@@ -837,28 +879,142 @@
 {
 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
 
-	return sprintf(buf, "%#x\n", dcr->device_id);
+	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
 }
 static DEVICE_ATTR_RO(device);
 
+static ssize_t subsystem_vendor_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
+}
+static DEVICE_ATTR_RO(subsystem_vendor);
+
+static ssize_t subsystem_rev_id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+	return sprintf(buf, "0x%04x\n",
+			be16_to_cpu(dcr->subsystem_revision_id));
+}
+static DEVICE_ATTR_RO(subsystem_rev_id);
+
+static ssize_t subsystem_device_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
+}
+static DEVICE_ATTR_RO(subsystem_device);
+
+static int num_nvdimm_formats(struct nvdimm *nvdimm)
+{
+	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+	int formats = 0;
+
+	if (nfit_mem->memdev_pmem)
+		formats++;
+	if (nfit_mem->memdev_bdw)
+		formats++;
+	return formats;
+}
+
 static ssize_t format_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
 
-	return sprintf(buf, "%#x\n", dcr->code);
+	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->code));
 }
 static DEVICE_ATTR_RO(format);
 
+static ssize_t format1_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	u32 handle;
+	ssize_t rc = -ENXIO;
+	struct nfit_mem *nfit_mem;
+	struct nfit_memdev *nfit_memdev;
+	struct acpi_nfit_desc *acpi_desc;
+	struct nvdimm *nvdimm = to_nvdimm(dev);
+	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+	nfit_mem = nvdimm_provider_data(nvdimm);
+	acpi_desc = nfit_mem->acpi_desc;
+	handle = to_nfit_memdev(dev)->device_handle;
+
+	/* assumes DIMMs have at most 2 published interface codes */
+	mutex_lock(&acpi_desc->init_mutex);
+	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
+		struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
+		struct nfit_dcr *nfit_dcr;
+
+		if (memdev->device_handle != handle)
+			continue;
+
+		list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
+			if (nfit_dcr->dcr->region_index != memdev->region_index)
+				continue;
+			if (nfit_dcr->dcr->code == dcr->code)
+				continue;
+			rc = sprintf(buf, "%#x\n",
+					be16_to_cpu(nfit_dcr->dcr->code));
+			break;
+		}
+		if (rc != ENXIO)
+			break;
+	}
+	mutex_unlock(&acpi_desc->init_mutex);
+	return rc;
+}
+static DEVICE_ATTR_RO(format1);
+
+static ssize_t formats_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct nvdimm *nvdimm = to_nvdimm(dev);
+
+	return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
+}
+static DEVICE_ATTR_RO(formats);
+
 static ssize_t serial_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
 
-	return sprintf(buf, "%#x\n", dcr->serial_number);
+	return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
 }
 static DEVICE_ATTR_RO(serial);
 
+static ssize_t family_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct nvdimm *nvdimm = to_nvdimm(dev);
+	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+
+	if (nfit_mem->family < 0)
+		return -ENXIO;
+	return sprintf(buf, "%d\n", nfit_mem->family);
+}
+static DEVICE_ATTR_RO(family);
+
+static ssize_t dsm_mask_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct nvdimm *nvdimm = to_nvdimm(dev);
+	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+
+	if (nfit_mem->family < 0)
+		return -ENXIO;
+	return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
+}
+static DEVICE_ATTR_RO(dsm_mask);
+
 static ssize_t flags_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -873,15 +1029,41 @@
 }
 static DEVICE_ATTR_RO(flags);
 
+static ssize_t id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+	if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
+		return sprintf(buf, "%04x-%02x-%04x-%08x\n",
+				be16_to_cpu(dcr->vendor_id),
+				dcr->manufacturing_location,
+				be16_to_cpu(dcr->manufacturing_date),
+				be32_to_cpu(dcr->serial_number));
+	else
+		return sprintf(buf, "%04x-%08x\n",
+				be16_to_cpu(dcr->vendor_id),
+				be32_to_cpu(dcr->serial_number));
+}
+static DEVICE_ATTR_RO(id);
+
 static struct attribute *acpi_nfit_dimm_attributes[] = {
 	&dev_attr_handle.attr,
 	&dev_attr_phys_id.attr,
 	&dev_attr_vendor.attr,
 	&dev_attr_device.attr,
-	&dev_attr_format.attr,
-	&dev_attr_serial.attr,
 	&dev_attr_rev_id.attr,
+	&dev_attr_subsystem_vendor.attr,
+	&dev_attr_subsystem_device.attr,
+	&dev_attr_subsystem_rev_id.attr,
+	&dev_attr_format.attr,
+	&dev_attr_formats.attr,
+	&dev_attr_format1.attr,
+	&dev_attr_serial.attr,
 	&dev_attr_flags.attr,
+	&dev_attr_id.attr,
+	&dev_attr_family.attr,
+	&dev_attr_dsm_mask.attr,
 	NULL,
 };
 
@@ -889,11 +1071,13 @@
 		struct attribute *a, int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
+	struct nvdimm *nvdimm = to_nvdimm(dev);
 
-	if (to_nfit_dcr(dev))
-		return a->mode;
-	else
+	if (!to_nfit_dcr(dev))
 		return 0;
+	if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
+		return 0;
+	return a->mode;
 }
 
 static struct attribute_group acpi_nfit_dimm_attribute_group = {
@@ -926,10 +1110,13 @@
 {
 	struct acpi_device *adev, *adev_dimm;
 	struct device *dev = acpi_desc->dev;
-	const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM);
+	unsigned long dsm_mask;
+	const u8 *uuid;
 	int i;
 
-	nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en;
+	/* nfit test assumes 1:1 relationship between commands and dsms */
+	nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
+	nfit_mem->family = NVDIMM_FAMILY_INTEL;
 	adev = to_acpi_dev(acpi_desc);
 	if (!adev)
 		return 0;
@@ -942,7 +1129,35 @@
 		return force_enable_dimms ? 0 : -ENODEV;
 	}
 
-	for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++)
+	/*
+	 * Until standardization materializes we need to consider up to 3
+	 * different command sets.  Note, that checking for function0 (bit0)
+	 * tells us if any commands are reachable through this uuid.
+	 */
+	for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++)
+		if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
+			break;
+
+	/* limit the supported commands to those that are publicly documented */
+	nfit_mem->family = i;
+	if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
+		dsm_mask = 0x3fe;
+		if (disable_vendor_specific)
+			dsm_mask &= ~(1 << ND_CMD_VENDOR);
+	} else if (nfit_mem->family == NVDIMM_FAMILY_HPE1)
+		dsm_mask = 0x1c3c76;
+	else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
+		dsm_mask = 0x1fe;
+		if (disable_vendor_specific)
+			dsm_mask &= ~(1 << 8);
+	} else {
+		dev_err(dev, "unknown dimm command family\n");
+		nfit_mem->family = -1;
+		return force_enable_dimms ? 0 : -ENODEV;
+	}
+
+	uuid = to_nfit_uuid(nfit_mem->family);
+	for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
 		if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
 			set_bit(i, &nfit_mem->dsm_mask);
 
@@ -955,8 +1170,8 @@
 	int dimm_count = 0;
 
 	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
+		unsigned long flags = 0, cmd_mask;
 		struct nvdimm *nvdimm;
-		unsigned long flags = 0;
 		u32 device_handle;
 		u16 mem_flags;
 		int rc;
@@ -979,9 +1194,18 @@
 		if (rc)
 			continue;
 
+		/*
+		 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
+		 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
+		 * userspace interface.
+		 */
+		cmd_mask = 1UL << ND_CMD_CALL;
+		if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
+			cmd_mask |= nfit_mem->dsm_mask;
+
 		nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
 				acpi_nfit_dimm_attribute_groups,
-				flags, &nfit_mem->dsm_mask);
+				flags, cmd_mask);
 		if (!nvdimm)
 			return -ENOMEM;
 
@@ -1010,14 +1234,14 @@
 	struct acpi_device *adev;
 	int i;
 
-	nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en;
+	nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
 	adev = to_acpi_dev(acpi_desc);
 	if (!adev)
 		return;
 
 	for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
 		if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
-			set_bit(i, &nd_desc->dsm_mask);
+			set_bit(i, &nd_desc->cmd_mask);
 }
 
 static ssize_t range_index_show(struct device *dev,
@@ -2309,7 +2533,7 @@
 	acpi_size sz;
 	int rc;
 
-	status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz);
+	status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz);
 	if (ACPI_FAILURE(status)) {
 		/* This is ok, we could have an nvdimm hotplugged later */
 		dev_dbg(dev, "failed to find NFIT at startup\n");
@@ -2466,6 +2690,8 @@
 	acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
 	acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
 	acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
+	acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
+	acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
 
 	nfit_wq = create_singlethread_workqueue("nfit");
 	if (!nfit_wq)
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index c75576b..11cb383 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -21,13 +21,25 @@
 #include <linux/acpi.h>
 #include <acpi/acuuid.h>
 
+/* ACPI 6.1 */
 #define UUID_NFIT_BUS "2f10e7a4-9e91-11e4-89d3-123b93f75cba"
+
+/* http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf */
 #define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66"
+
+/* https://github.com/HewlettPackard/hpe-nvm/blob/master/Documentation/ */
+#define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6"
+#define UUID_NFIT_DIMM_N_HPE2 "5008664b-b758-41a0-a03c-27c2f2d04f7e"
+
 #define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \
 		| ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
 		| ACPI_NFIT_MEM_NOT_ARMED)
 
 enum nfit_uuids {
+	/* for simplicity alias the uuid index with the family id */
+	NFIT_DEV_DIMM = NVDIMM_FAMILY_INTEL,
+	NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1,
+	NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2,
 	NFIT_SPA_VOLATILE,
 	NFIT_SPA_PM,
 	NFIT_SPA_DCR,
@@ -37,15 +49,16 @@
 	NFIT_SPA_PDISK,
 	NFIT_SPA_PCD,
 	NFIT_DEV_BUS,
-	NFIT_DEV_DIMM,
 	NFIT_UUID_MAX,
 };
 
-enum nfit_fic {
-	NFIT_FIC_BYTE = 0x101, /* byte-addressable energy backed */
-	NFIT_FIC_BLK = 0x201, /* block-addressable non-energy backed */
-	NFIT_FIC_BYTEN = 0x301, /* byte-addressable non-energy backed */
-};
+/*
+ * Region format interface codes are stored as an array of bytes in the
+ * NFIT DIMM Control Region structure
+ */
+#define NFIT_FIC_BYTE cpu_to_be16(0x101) /* byte-addressable energy backed */
+#define NFIT_FIC_BLK cpu_to_be16(0x201) /* block-addressable non-energy backed */
+#define NFIT_FIC_BYTEN cpu_to_be16(0x301) /* byte-addressable non-energy backed */
 
 enum {
 	NFIT_BLK_READ_FLUSH = 1,
@@ -109,7 +122,9 @@
 	struct nfit_flush *nfit_flush;
 	struct list_head list;
 	struct acpi_device *adev;
+	struct acpi_nfit_desc *acpi_desc;
 	unsigned long dsm_mask;
+	int family;
 };
 
 struct acpi_nfit_desc {
@@ -132,8 +147,8 @@
 	size_t ars_status_size;
 	struct work_struct work;
 	unsigned int cancel:1;
-	unsigned long dimm_dsm_force_en;
-	unsigned long bus_dsm_force_en;
+	unsigned long dimm_cmd_force_en;
+	unsigned long bus_cmd_force_en;
 	int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
 			void *iobuf, u64 len, int rw);
 };
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index f170d74..c72e648 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -676,6 +676,15 @@
 	if (!pr->flags.throttling)
 		return -ENODEV;
 
+	/*
+	 * We don't care about error returns - we just try to mark
+	 * these reserved so that nobody else is confused into thinking
+	 * that this region might be unused..
+	 *
+	 * (In particular, allocating the IO range for Cardbus)
+	 */
+	request_region(pr->throttling.address, 6, "ACPI CPU throttle");
+
 	pr->throttling.state = 0;
 
 	duty_mask = pr->throttling.state_count - 1;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index ac832bf..22c0995 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -625,7 +625,7 @@
  * some old BIOSes do expect a buffer or an integer etc.
  */
 union acpi_object *
-acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, int rev, int func,
+acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
 		  union acpi_object *argv4)
 {
 	acpi_status ret;
@@ -674,7 +674,7 @@
  * functions. Currently only support 64 functions at maximum, should be
  * enough for now.
  */
-bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs)
+bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
 {
 	int i;
 	u64 mask = 0;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index cfa936a..e2dc4c0 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -313,14 +313,23 @@
 
 config SATA_DWC
 	tristate "DesignWare Cores SATA support"
-	depends on 460EX
-	select DW_DMAC
+	depends on DMADEVICES
+	select GENERIC_PHY
 	help
 	  This option enables support for the on-chip SATA controller of the
 	  AppliedMicro processor 460EX.
 
 	  If unsure, say N.
 
+config SATA_DWC_OLD_DMA
+	bool "Support old device trees"
+	depends on SATA_DWC
+	select DW_DMAC_CORE
+	default y if 460EX
+	help
+	  This option enables support for old device trees without the
+	  "dmas" property.
+
 config SATA_DWC_DEBUG
 	bool "Debugging driver version"
 	depends on SATA_DWC
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 6f33ace..6be7770 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -66,6 +66,7 @@
 #include <scsi/scsi_host.h>
 #include <linux/libata.h>
 #include <asm/byteorder.h>
+#include <asm/unaligned.h>
 #include <linux/cdrom.h>
 #include <linux/ratelimit.h>
 #include <linux/pm_runtime.h>
@@ -695,7 +696,7 @@
  *	RETURNS:
  *	Block address read from @tf.
  */
-u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
+u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
 {
 	u64 block = 0;
 
@@ -720,7 +721,7 @@
 		if (!sect) {
 			ata_dev_warn(dev,
 				     "device reported invalid CHS sector 0\n");
-			sect = 1; /* oh well */
+			return U64_MAX;
 		}
 
 		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
@@ -2079,6 +2080,81 @@
 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
 }
 
+static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	unsigned int err_mask;
+	int log_index = ATA_LOG_NCQ_SEND_RECV * 2;
+	u16 log_pages;
+
+	err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
+				     0, ap->sector_buf, 1);
+	if (err_mask) {
+		ata_dev_dbg(dev,
+			    "failed to get Log Directory Emask 0x%x\n",
+			    err_mask);
+		return;
+	}
+	log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
+	if (!log_pages) {
+		ata_dev_warn(dev,
+			     "NCQ Send/Recv Log not supported\n");
+		return;
+	}
+	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
+				     0, ap->sector_buf, 1);
+	if (err_mask) {
+		ata_dev_dbg(dev,
+			    "failed to get NCQ Send/Recv Log Emask 0x%x\n",
+			    err_mask);
+	} else {
+		u8 *cmds = dev->ncq_send_recv_cmds;
+
+		dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
+		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
+
+		if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
+			ata_dev_dbg(dev, "disabling queued TRIM support\n");
+			cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
+				~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
+		}
+	}
+}
+
+static void ata_dev_config_ncq_non_data(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	unsigned int err_mask;
+	int log_index = ATA_LOG_NCQ_NON_DATA * 2;
+	u16 log_pages;
+
+	err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
+				     0, ap->sector_buf, 1);
+	if (err_mask) {
+		ata_dev_dbg(dev,
+			    "failed to get Log Directory Emask 0x%x\n",
+			    err_mask);
+		return;
+	}
+	log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
+	if (!log_pages) {
+		ata_dev_warn(dev,
+			     "NCQ Send/Recv Log not supported\n");
+		return;
+	}
+	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
+				     0, ap->sector_buf, 1);
+	if (err_mask) {
+		ata_dev_dbg(dev,
+			    "failed to get NCQ Non-Data Log Emask 0x%x\n",
+			    err_mask);
+	} else {
+		u8 *cmds = dev->ncq_non_data_cmds;
+
+		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
+	}
+}
+
 static int ata_dev_config_ncq(struct ata_device *dev,
 			       char *desc, size_t desc_sz)
 {
@@ -2123,31 +2199,127 @@
 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
 			ddepth, aa_desc);
 
-	if ((ap->flags & ATA_FLAG_FPDMA_AUX) &&
-	    ata_id_has_ncq_send_and_recv(dev->id)) {
-		err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
-					     0, ap->sector_buf, 1);
-		if (err_mask) {
-			ata_dev_dbg(dev,
-				    "failed to get NCQ Send/Recv Log Emask 0x%x\n",
-				    err_mask);
-		} else {
-			u8 *cmds = dev->ncq_send_recv_cmds;
-
-			dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
-			memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
-
-			if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
-				ata_dev_dbg(dev, "disabling queued TRIM support\n");
-				cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
-					~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
-			}
-		}
+	if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
+		if (ata_id_has_ncq_send_and_recv(dev->id))
+			ata_dev_config_ncq_send_recv(dev);
+		if (ata_id_has_ncq_non_data(dev->id))
+			ata_dev_config_ncq_non_data(dev);
 	}
 
 	return 0;
 }
 
+static void ata_dev_config_sense_reporting(struct ata_device *dev)
+{
+	unsigned int err_mask;
+
+	if (!ata_id_has_sense_reporting(dev->id))
+		return;
+
+	if (ata_id_sense_reporting_enabled(dev->id))
+		return;
+
+	err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
+	if (err_mask) {
+		ata_dev_dbg(dev,
+			    "failed to enable Sense Data Reporting, Emask 0x%x\n",
+			    err_mask);
+	}
+}
+
+static void ata_dev_config_zac(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	unsigned int err_mask;
+	u8 *identify_buf = ap->sector_buf;
+	int log_index = ATA_LOG_SATA_ID_DEV_DATA * 2, i, found = 0;
+	u16 log_pages;
+
+	dev->zac_zones_optimal_open = U32_MAX;
+	dev->zac_zones_optimal_nonseq = U32_MAX;
+	dev->zac_zones_max_open = U32_MAX;
+
+	/*
+	 * Always set the 'ZAC' flag for Host-managed devices.
+	 */
+	if (dev->class == ATA_DEV_ZAC)
+		dev->flags |= ATA_DFLAG_ZAC;
+	else if (ata_id_zoned_cap(dev->id) == 0x01)
+		/*
+		 * Check for host-aware devices.
+		 */
+		dev->flags |= ATA_DFLAG_ZAC;
+
+	if (!(dev->flags & ATA_DFLAG_ZAC))
+		return;
+
+	/*
+	 * Read Log Directory to figure out if IDENTIFY DEVICE log
+	 * is supported.
+	 */
+	err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
+				     0, ap->sector_buf, 1);
+	if (err_mask) {
+		ata_dev_info(dev,
+			     "failed to get Log Directory Emask 0x%x\n",
+			     err_mask);
+		return;
+	}
+	log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
+	if (log_pages == 0) {
+		ata_dev_warn(dev,
+			     "ATA Identify Device Log not supported\n");
+		return;
+	}
+	/*
+	 * Read IDENTIFY DEVICE data log, page 0, to figure out
+	 * if page 9 is supported.
+	 */
+	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, 0,
+				     identify_buf, 1);
+	if (err_mask) {
+		ata_dev_info(dev,
+			     "failed to get Device Identify Log Emask 0x%x\n",
+			     err_mask);
+		return;
+	}
+	log_pages = identify_buf[8];
+	for (i = 0; i < log_pages; i++) {
+		if (identify_buf[9 + i] == ATA_LOG_ZONED_INFORMATION) {
+			found++;
+			break;
+		}
+	}
+	if (!found) {
+		ata_dev_warn(dev,
+			     "ATA Zoned Information Log not supported\n");
+		return;
+	}
+
+	/*
+	 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
+	 */
+	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA,
+				     ATA_LOG_ZONED_INFORMATION,
+				     identify_buf, 1);
+	if (!err_mask) {
+		u64 zoned_cap, opt_open, opt_nonseq, max_open;
+
+		zoned_cap = get_unaligned_le64(&identify_buf[8]);
+		if ((zoned_cap >> 63))
+			dev->zac_zoned_cap = (zoned_cap & 1);
+		opt_open = get_unaligned_le64(&identify_buf[24]);
+		if ((opt_open >> 63))
+			dev->zac_zones_optimal_open = (u32)opt_open;
+		opt_nonseq = get_unaligned_le64(&identify_buf[32]);
+		if ((opt_nonseq >> 63))
+			dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
+		max_open = get_unaligned_le64(&identify_buf[40]);
+		if ((max_open >> 63))
+			dev->zac_zones_max_open = (u32)max_open;
+	}
+}
+
 /**
  *	ata_dev_configure - Configure the specified ATA/ATAPI device
  *	@dev: Target device to configure
@@ -2370,7 +2542,8 @@
 					dev->devslp_timing[i] = sata_setting[j];
 				}
 		}
-
+		ata_dev_config_sense_reporting(dev);
+		ata_dev_config_zac(dev);
 		dev->cdb_len = 16;
 	}
 
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 961acc7..61dc7a9 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1600,6 +1600,8 @@
 	tf->hob_lbah = buf[10];
 	tf->nsect = buf[12];
 	tf->hob_nsect = buf[13];
+	if (ata_id_has_ncq_autosense(dev->id))
+		tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
 
 	return 0;
 }
@@ -1636,6 +1638,56 @@
 }
 
 /**
+ *	ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
+ *	@dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
+ *	@cmd: scsi command for which the sense code should be set
+ *
+ *	Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
+ *	SENSE.  This function is an EH helper.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+static void ata_eh_request_sense(struct ata_queued_cmd *qc,
+				 struct scsi_cmnd *cmd)
+{
+	struct ata_device *dev = qc->dev;
+	struct ata_taskfile tf;
+	unsigned int err_mask;
+
+	if (qc->ap->pflags & ATA_PFLAG_FROZEN) {
+		ata_dev_warn(dev, "sense data available but port frozen\n");
+		return;
+	}
+
+	if (!cmd || qc->flags & ATA_QCFLAG_SENSE_VALID)
+		return;
+
+	if (!ata_id_sense_reporting_enabled(dev->id)) {
+		ata_dev_warn(qc->dev, "sense data reporting disabled\n");
+		return;
+	}
+
+	DPRINTK("ATA request sense\n");
+
+	ata_tf_init(dev, &tf);
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+	tf.command = ATA_CMD_REQ_SENSE_DATA;
+	tf.protocol = ATA_PROT_NODATA;
+
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+	/* Ignore err_mask; ATA_ERR might be set */
+	if (tf.command & ATA_SENSE) {
+		ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal);
+		qc->flags |= ATA_QCFLAG_SENSE_VALID;
+	} else {
+		ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
+			     tf.command, err_mask);
+	}
+}
+
+/**
  *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
  *	@dev: device to perform REQUEST_SENSE to
  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
@@ -1797,6 +1849,18 @@
 	memcpy(&qc->result_tf, &tf, sizeof(tf));
 	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
 	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
+	if ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary) {
+		char sense_key, asc, ascq;
+
+		sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
+		asc = (qc->result_tf.auxiliary >> 8) & 0xff;
+		ascq = qc->result_tf.auxiliary & 0xff;
+		ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq);
+		ata_scsi_set_sense_information(dev, qc->scsicmd,
+					       &qc->result_tf);
+		qc->flags |= ATA_QCFLAG_SENSE_VALID;
+	}
+
 	ehc->i.err_mask &= ~AC_ERR_DEV;
 }
 
@@ -1826,14 +1890,23 @@
 		return ATA_EH_RESET;
 	}
 
-	if (stat & (ATA_ERR | ATA_DF))
+	if (stat & (ATA_ERR | ATA_DF)) {
 		qc->err_mask |= AC_ERR_DEV;
-	else
+		/*
+		 * Sense data reporting does not work if the
+		 * device fault bit is set.
+		 */
+		if (stat & ATA_DF)
+			stat &= ~ATA_SENSE;
+	} else {
 		return 0;
+	}
 
 	switch (qc->dev->class) {
 	case ATA_DEV_ATA:
 	case ATA_DEV_ZAC:
+		if (stat & ATA_SENSE)
+			ata_eh_request_sense(qc, qc->scsicmd);
 		if (err & ATA_ICRC)
 			qc->err_mask |= AC_ERR_ATA_BUS;
 		if (err & (ATA_UNC | ATA_AMNF))
@@ -1847,20 +1920,31 @@
 			tmp = atapi_eh_request_sense(qc->dev,
 						qc->scsicmd->sense_buffer,
 						qc->result_tf.feature >> 4);
-			if (!tmp) {
-				/* ATA_QCFLAG_SENSE_VALID is used to
-				 * tell atapi_qc_complete() that sense
-				 * data is already valid.
-				 *
-				 * TODO: interpret sense data and set
-				 * appropriate err_mask.
-				 */
+			if (!tmp)
 				qc->flags |= ATA_QCFLAG_SENSE_VALID;
-			} else
+			else
 				qc->err_mask |= tmp;
 		}
 	}
 
+	if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
+		int ret = scsi_check_sense(qc->scsicmd);
+		/*
+		 * SUCCESS here means that the sense code could
+		 * evaluated and should be passed to the upper layers
+		 * for correct evaluation.
+		 * FAILED means the sense code could not interpreted
+		 * and the device would need to be reset.
+		 * NEEDS_RETRY and ADD_TO_MLQUEUE means that the
+		 * command would need to be retried.
+		 */
+		if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) {
+			qc->flags |= ATA_QCFLAG_RETRY;
+			qc->err_mask |= AC_ERR_OTHER;
+		} else if (ret != SUCCESS) {
+			qc->err_mask |= AC_ERR_HSM;
+		}
+	}
 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
 		action |= ATA_EH_RESET;
 
@@ -2398,6 +2482,8 @@
 		{ ATA_CMD_CFA_WRITE_MULT_NE,	"CFA WRITE MULTIPLE WITHOUT ERASE" },
 		{ ATA_CMD_REQ_SENSE_DATA,	"REQUEST SENSE DATA EXT" },
 		{ ATA_CMD_SANITIZE_DEVICE,	"SANITIZE DEVICE" },
+		{ ATA_CMD_ZAC_MGMT_IN,		"ZAC MANAGEMENT IN" },
+		{ ATA_CMD_ZAC_MGMT_OUT,		"ZAC MANAGEMENT OUT" },
 		{ ATA_CMD_READ_LONG,		"READ LONG (with retries)" },
 		{ ATA_CMD_READ_LONG_ONCE,	"READ LONG (without retries)" },
 		{ ATA_CMD_WRITE_LONG,		"WRITE LONG (with retries)" },
@@ -2569,14 +2655,15 @@
 
 #ifdef CONFIG_ATA_VERBOSE_ERROR
 		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
-				    ATA_ERR)) {
+				    ATA_SENSE | ATA_ERR)) {
 			if (res->command & ATA_BUSY)
 				ata_dev_err(qc->dev, "status: { Busy }\n");
 			else
-				ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
+				ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
 				  res->command & ATA_DRDY ? "DRDY " : "",
 				  res->command & ATA_DF ? "DF " : "",
 				  res->command & ATA_DRQ ? "DRQ " : "",
+				  res->command & ATA_SENSE ? "SENSE " : "",
 				  res->command & ATA_ERR ? "ERR " : "");
 		}
 
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 7bcc870..bfec66f 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -270,11 +270,52 @@
 	    ata_scsi_park_show, ata_scsi_park_store);
 EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
 
-static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
+			u8 sk, u8 asc, u8 ascq)
 {
+	bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE);
+
+	if (!cmd)
+		return;
+
 	cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 
-	scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
+	scsi_build_sense_buffer(d_sense, cmd->sense_buffer, sk, asc, ascq);
+}
+
+void ata_scsi_set_sense_information(struct ata_device *dev,
+				    struct scsi_cmnd *cmd,
+				    const struct ata_taskfile *tf)
+{
+	u64 information;
+
+	if (!cmd)
+		return;
+
+	information = ata_tf_read_block(tf, dev);
+	if (information == U64_MAX)
+		return;
+
+	scsi_set_sense_information(cmd->sense_buffer,
+				   SCSI_SENSE_BUFFERSIZE, information);
+}
+
+static void ata_scsi_set_invalid_field(struct ata_device *dev,
+				       struct scsi_cmnd *cmd, u16 field, u8 bit)
+{
+	ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x24, 0x0);
+	/* "Invalid field in cbd" */
+	scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+				     field, bit, 1);
+}
+
+static void ata_scsi_set_invalid_parameter(struct ata_device *dev,
+					   struct scsi_cmnd *cmd, u16 field)
+{
+	/* "Invalid field in parameter list" */
+	ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x26, 0x0);
+	scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+				     field, 0xff, 0);
 }
 
 static ssize_t
@@ -364,10 +405,10 @@
 };
 EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
 
-static void ata_scsi_invalid_field(struct scsi_cmnd *cmd)
+static void ata_scsi_invalid_field(struct ata_device *dev,
+				   struct scsi_cmnd *cmd, u16 field)
 {
-	ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
-	/* "Invalid field in cbd" */
+	ata_scsi_set_invalid_field(dev, cmd, field, 0xff);
 	cmd->scsi_done(cmd);
 }
 
@@ -980,6 +1021,7 @@
 	unsigned char *sb = cmd->sense_buffer;
 	unsigned char *desc = sb + 8;
 	int verbose = qc->ap->ops->error_handler == NULL;
+	u8 sense_key, asc, ascq;
 
 	memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
 
@@ -992,47 +1034,71 @@
 	if (qc->err_mask ||
 	    tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
 		ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
-				   &sb[1], &sb[2], &sb[3], verbose);
-		sb[1] &= 0x0f;
+				   &sense_key, &asc, &ascq, verbose);
+		ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
 	} else {
-		sb[1] = RECOVERED_ERROR;
-		sb[2] = 0;
-		sb[3] = 0x1D;
+		/*
+		 * ATA PASS-THROUGH INFORMATION AVAILABLE
+		 * Always in descriptor format sense.
+		 */
+		scsi_build_sense_buffer(1, cmd->sense_buffer,
+					RECOVERED_ERROR, 0, 0x1D);
 	}
 
-	/*
-	 * Sense data is current and format is descriptor.
-	 */
-	sb[0] = 0x72;
+	if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) {
+		u8 len;
 
-	desc[0] = 0x09;
+		/* descriptor format */
+		len = sb[7];
+		desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
+		if (!desc) {
+			if (SCSI_SENSE_BUFFERSIZE < len + 14)
+				return;
+			sb[7] = len + 14;
+			desc = sb + 8 + len;
+		}
+		desc[0] = 9;
+		desc[1] = 12;
+		/*
+		 * Copy registers into sense buffer.
+		 */
+		desc[2] = 0x00;
+		desc[3] = tf->feature;	/* == error reg */
+		desc[5] = tf->nsect;
+		desc[7] = tf->lbal;
+		desc[9] = tf->lbam;
+		desc[11] = tf->lbah;
+		desc[12] = tf->device;
+		desc[13] = tf->command; /* == status reg */
 
-	/* set length of additional sense data */
-	sb[7] = 14;
-	desc[1] = 12;
-
-	/*
-	 * Copy registers into sense buffer.
-	 */
-	desc[2] = 0x00;
-	desc[3] = tf->feature;	/* == error reg */
-	desc[5] = tf->nsect;
-	desc[7] = tf->lbal;
-	desc[9] = tf->lbam;
-	desc[11] = tf->lbah;
-	desc[12] = tf->device;
-	desc[13] = tf->command; /* == status reg */
-
-	/*
-	 * Fill in Extend bit, and the high order bytes
-	 * if applicable.
-	 */
-	if (tf->flags & ATA_TFLAG_LBA48) {
-		desc[2] |= 0x01;
-		desc[4] = tf->hob_nsect;
-		desc[6] = tf->hob_lbal;
-		desc[8] = tf->hob_lbam;
-		desc[10] = tf->hob_lbah;
+		/*
+		 * Fill in Extend bit, and the high order bytes
+		 * if applicable.
+		 */
+		if (tf->flags & ATA_TFLAG_LBA48) {
+			desc[2] |= 0x01;
+			desc[4] = tf->hob_nsect;
+			desc[6] = tf->hob_lbal;
+			desc[8] = tf->hob_lbam;
+			desc[10] = tf->hob_lbah;
+		}
+	} else {
+		/* Fixed sense format */
+		desc[0] = tf->feature;
+		desc[1] = tf->command; /* status */
+		desc[2] = tf->device;
+		desc[3] = tf->nsect;
+		desc[0] = 0;
+		if (tf->flags & ATA_TFLAG_LBA48)  {
+			desc[8] |= 0x80;
+			if (tf->hob_nsect)
+				desc[8] |= 0x40;
+			if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
+				desc[8] |= 0x20;
+		}
+		desc[9] = tf->lbal;
+		desc[10] = tf->lbam;
+		desc[11] = tf->lbah;
 	}
 }
 
@@ -1052,41 +1118,41 @@
 	struct scsi_cmnd *cmd = qc->scsicmd;
 	struct ata_taskfile *tf = &qc->result_tf;
 	unsigned char *sb = cmd->sense_buffer;
-	unsigned char *desc = sb + 8;
 	int verbose = qc->ap->ops->error_handler == NULL;
 	u64 block;
+	u8 sense_key, asc, ascq;
 
 	memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
 
 	cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 
-	/* sense data is current and format is descriptor */
-	sb[0] = 0x72;
-
+	if (ata_dev_disabled(dev)) {
+		/* Device disabled after error recovery */
+		/* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */
+		ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21);
+		return;
+	}
 	/* Use ata_to_sense_error() to map status register bits
 	 * onto sense key, asc & ascq.
 	 */
 	if (qc->err_mask ||
 	    tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
 		ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
-				   &sb[1], &sb[2], &sb[3], verbose);
-		sb[1] &= 0x0f;
+				   &sense_key, &asc, &ascq, verbose);
+		ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
+	} else {
+		/* Could not decode error */
+		ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n",
+			     tf->command, qc->err_mask);
+		ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
+		return;
 	}
 
 	block = ata_tf_read_block(&qc->result_tf, dev);
+	if (block == U64_MAX)
+		return;
 
-	/* information sense data descriptor */
-	sb[7] = 12;
-	desc[0] = 0x00;
-	desc[1] = 10;
-
-	desc[2] |= 0x80;	/* valid */
-	desc[6] = block >> 40;
-	desc[7] = block >> 32;
-	desc[8] = block >> 24;
-	desc[9] = block >> 16;
-	desc[10] = block >> 8;
-	desc[11] = block;
+	scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
 }
 
 static void ata_scsi_sdev_config(struct scsi_device *sdev)
@@ -1343,19 +1409,29 @@
 	struct scsi_cmnd *scmd = qc->scsicmd;
 	struct ata_taskfile *tf = &qc->tf;
 	const u8 *cdb = scmd->cmnd;
+	u16 fp;
+	u8 bp = 0xff;
 
-	if (scmd->cmd_len < 5)
+	if (scmd->cmd_len < 5) {
+		fp = 4;
 		goto invalid_fld;
+	}
 
 	tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
 	tf->protocol = ATA_PROT_NODATA;
 	if (cdb[1] & 0x1) {
 		;	/* ignore IMMED bit, violates sat-r05 */
 	}
-	if (cdb[4] & 0x2)
+	if (cdb[4] & 0x2) {
+		fp = 4;
+		bp = 1;
 		goto invalid_fld;       /* LOEJ bit set not supported */
-	if (((cdb[4] >> 4) & 0xf) != 0)
+	}
+	if (((cdb[4] >> 4) & 0xf) != 0) {
+		fp = 4;
+		bp = 3;
 		goto invalid_fld;       /* power conditions not supported */
+	}
 
 	if (cdb[4] & 0x1) {
 		tf->nsect = 1;	/* 1 sector, lba=0 */
@@ -1401,8 +1477,7 @@
 	return 0;
 
  invalid_fld:
-	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
-	/* "Invalid field in cbd" */
+	ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
 	return 1;
  skip:
 	scmd->result = SAM_STAT_GOOD;
@@ -1553,20 +1628,27 @@
 	const u8 *cdb = scmd->cmnd;
 	u64 block;
 	u32 n_block;
+	u16 fp;
 
 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 	tf->protocol = ATA_PROT_NODATA;
 
 	if (cdb[0] == VERIFY) {
-		if (scmd->cmd_len < 10)
+		if (scmd->cmd_len < 10) {
+			fp = 9;
 			goto invalid_fld;
+		}
 		scsi_10_lba_len(cdb, &block, &n_block);
 	} else if (cdb[0] == VERIFY_16) {
-		if (scmd->cmd_len < 16)
+		if (scmd->cmd_len < 16) {
+			fp = 15;
 			goto invalid_fld;
+		}
 		scsi_16_lba_len(cdb, &block, &n_block);
-	} else
+	} else {
+		fp = 0;
 		goto invalid_fld;
+	}
 
 	if (!n_block)
 		goto nothing_to_do;
@@ -1640,12 +1722,11 @@
 	return 0;
 
 invalid_fld:
-	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
-	/* "Invalid field in cbd" */
+	ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
 	return 1;
 
 out_of_range:
-	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
+	ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0);
 	/* "Logical Block Address out of range" */
 	return 1;
 
@@ -1680,6 +1761,7 @@
 	u64 block;
 	u32 n_block;
 	int rc;
+	u16 fp = 0;
 
 	if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16)
 		tf_flags |= ATA_TFLAG_WRITE;
@@ -1688,16 +1770,20 @@
 	switch (cdb[0]) {
 	case READ_10:
 	case WRITE_10:
-		if (unlikely(scmd->cmd_len < 10))
+		if (unlikely(scmd->cmd_len < 10)) {
+			fp = 9;
 			goto invalid_fld;
+		}
 		scsi_10_lba_len(cdb, &block, &n_block);
 		if (cdb[1] & (1 << 3))
 			tf_flags |= ATA_TFLAG_FUA;
 		break;
 	case READ_6:
 	case WRITE_6:
-		if (unlikely(scmd->cmd_len < 6))
+		if (unlikely(scmd->cmd_len < 6)) {
+			fp = 5;
 			goto invalid_fld;
+		}
 		scsi_6_lba_len(cdb, &block, &n_block);
 
 		/* for 6-byte r/w commands, transfer length 0
@@ -1708,14 +1794,17 @@
 		break;
 	case READ_16:
 	case WRITE_16:
-		if (unlikely(scmd->cmd_len < 16))
+		if (unlikely(scmd->cmd_len < 16)) {
+			fp = 15;
 			goto invalid_fld;
+		}
 		scsi_16_lba_len(cdb, &block, &n_block);
 		if (cdb[1] & (1 << 3))
 			tf_flags |= ATA_TFLAG_FUA;
 		break;
 	default:
 		DPRINTK("no-byte command\n");
+		fp = 0;
 		goto invalid_fld;
 	}
 
@@ -1742,12 +1831,11 @@
 		goto out_of_range;
 	/* treat all other errors as -EINVAL, fall through */
 invalid_fld:
-	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
-	/* "Invalid field in cbd" */
+	ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
 	return 1;
 
 out_of_range:
-	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
+	ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0);
 	/* "Logical Block Address out of range" */
 	return 1;
 
@@ -1784,6 +1872,8 @@
 	if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
 	    ((cdb[2] & 0x20) || need_sense))
 		ata_gen_passthru_sense(qc);
+	else if (qc->flags & ATA_QCFLAG_SENSE_VALID)
+		cmd->result = SAM_STAT_CHECK_CONDITION;
 	else if (need_sense)
 		ata_gen_ata_sense(qc);
 	else
@@ -1992,14 +2082,14 @@
 		0x00,
 		0xA0,	/* SAM-5 (no version claimed) */
 
-		0x04,
-		0xC0,	/* SBC-3 (no version claimed) */
+		0x06,
+		0x00,	/* SBC-4 (no version claimed) */
 
-		0x04,
-		0x60,	/* SPC-4 (no version claimed) */
+		0x05,
+		0xC0,	/* SPC-5 (no version claimed) */
 
 		0x60,
-		0x20,   /* ZBC (no version claimed) */
+		0x24,   /* ZBC r05 */
 	};
 
 	u8 hdr[] = {
@@ -2019,10 +2109,8 @@
 	    (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
 		hdr[1] |= (1 << 7);
 
-	if (args->dev->class == ATA_DEV_ZAC) {
+	if (args->dev->class == ATA_DEV_ZAC)
 		hdr[0] = TYPE_ZBC;
-		hdr[2] = 0x6; /* ZBC is defined in SPC-4 */
-	}
 
 	memcpy(rbuf, hdr, sizeof(hdr));
 	memcpy(&rbuf[8], "ATA     ", 8);
@@ -2036,7 +2124,7 @@
 	if (rbuf[32] == 0 || rbuf[32] == ' ')
 		memcpy(&rbuf[32], "n/a ", 4);
 
-	if (args->dev->class == ATA_DEV_ZAC)
+	if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC)
 		memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc));
 	else
 		memcpy(rbuf + 58, versions, sizeof(versions));
@@ -2056,6 +2144,7 @@
  */
 static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
 {
+	int num_pages;
 	const u8 pages[] = {
 		0x00,	/* page 0x00, this page */
 		0x80,	/* page 0x80, unit serial no page */
@@ -2064,10 +2153,14 @@
 		0xb0,	/* page 0xb0, block limits page */
 		0xb1,	/* page 0xb1, block device characteristics page */
 		0xb2,	/* page 0xb2, thin provisioning page */
+		0xb6,	/* page 0xb6, zoned block device characteristics */
 	};
 
-	rbuf[3] = sizeof(pages);	/* number of supported VPD pages */
-	memcpy(rbuf + 4, pages, sizeof(pages));
+	num_pages = sizeof(pages);
+	if (!(args->dev->flags & ATA_DFLAG_ZAC))
+		num_pages--;
+	rbuf[3] = num_pages;	/* number of supported VPD pages */
+	memcpy(rbuf + 4, pages, num_pages);
 	return 0;
 }
 
@@ -2232,12 +2325,15 @@
 {
 	int form_factor = ata_id_form_factor(args->id);
 	int media_rotation_rate = ata_id_rotation_rate(args->id);
+	u8 zoned = ata_id_zoned_cap(args->id);
 
 	rbuf[1] = 0xb1;
 	rbuf[3] = 0x3c;
 	rbuf[4] = media_rotation_rate >> 8;
 	rbuf[5] = media_rotation_rate;
 	rbuf[7] = form_factor;
+	if (zoned)
+		rbuf[8] = (zoned << 4);
 
 	return 0;
 }
@@ -2252,6 +2348,26 @@
 	return 0;
 }
 
+static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
+{
+	/*
+	 * zbc-r05 SCSI Zoned Block device characteristics VPD page
+	 */
+	rbuf[1] = 0xb6;
+	rbuf[3] = 0x3C;
+
+	/*
+	 * URSWRZ bit is only meaningful for host-managed ZAC drives
+	 */
+	if (args->dev->zac_zoned_cap & 1)
+		rbuf[4] |= 1;
+	put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]);
+	put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]);
+	put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]);
+
+	return 0;
+}
+
 /**
  *	ata_scsiop_noop - Command handler that simply returns success.
  *	@args: device IDENTIFY data / SCSI command of interest.
@@ -2317,6 +2433,7 @@
 
 /**
  *	ata_msense_ctl_mode - Simulate MODE SENSE control mode page
+ *	@dev: ATA device of interest
  *	@buf: output buffer
  *	@changeable: whether changeable parameters are requested
  *
@@ -2325,9 +2442,12 @@
  *	LOCKING:
  *	None.
  */
-static unsigned int ata_msense_ctl_mode(u8 *buf, bool changeable)
+static unsigned int ata_msense_ctl_mode(struct ata_device *dev, u8 *buf,
+					bool changeable)
 {
 	modecpy(buf, def_control_mpage, sizeof(def_control_mpage), changeable);
+	if (changeable && (dev->flags & ATA_DFLAG_D_SENSE))
+		buf[2] |= (1 << 2);	/* Descriptor sense requested */
 	return sizeof(def_control_mpage);
 }
 
@@ -2395,7 +2515,8 @@
 	};
 	u8 pg, spg;
 	unsigned int ebd, page_control, six_byte;
-	u8 dpofua;
+	u8 dpofua, bp = 0xff;
+	u16 fp;
 
 	VPRINTK("ENTER\n");
 
@@ -2414,6 +2535,8 @@
 	case 3: /* saved */
 		goto saving_not_supp;
 	default:
+		fp = 2;
+		bp = 6;
 		goto invalid_fld;
 	}
 
@@ -2428,8 +2551,10 @@
 	 * No mode subpages supported (yet) but asking for _all_
 	 * subpages may be valid
 	 */
-	if (spg && (spg != ALL_SUB_MPAGES))
+	if (spg && (spg != ALL_SUB_MPAGES)) {
+		fp = 3;
 		goto invalid_fld;
+	}
 
 	switch(pg) {
 	case RW_RECOVERY_MPAGE:
@@ -2441,16 +2566,17 @@
 		break;
 
 	case CONTROL_MPAGE:
-		p += ata_msense_ctl_mode(p, page_control == 1);
+		p += ata_msense_ctl_mode(args->dev, p, page_control == 1);
 		break;
 
 	case ALL_MPAGES:
 		p += ata_msense_rw_recovery(p, page_control == 1);
 		p += ata_msense_caching(args->id, p, page_control == 1);
-		p += ata_msense_ctl_mode(p, page_control == 1);
+		p += ata_msense_ctl_mode(args->dev, p, page_control == 1);
 		break;
 
 	default:		/* invalid page code */
+		fp = 2;
 		goto invalid_fld;
 	}
 
@@ -2480,12 +2606,11 @@
 	return 0;
 
 invalid_fld:
-	ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
-	/* "Invalid field in cbd" */
+	ata_scsi_set_invalid_field(dev, args->cmd, fp, bp);
 	return 1;
 
 saving_not_supp:
-	ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
+	ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
 	 /* "Saving parameters not supported" */
 	return 1;
 }
@@ -2561,6 +2686,9 @@
 				rbuf[14] |= 0x40; /* LBPRZ */
 			}
 		}
+		if (ata_id_zoned_cap(args->id) ||
+		    args->dev->class == ATA_DEV_ZAC)
+			rbuf[12] = (1 << 4); /* RC_BASIS */
 	}
 	return 0;
 }
@@ -2942,9 +3070,12 @@
 	struct scsi_cmnd *scmd = qc->scsicmd;
 	struct ata_device *dev = qc->dev;
 	const u8 *cdb = scmd->cmnd;
+	u16 fp;
 
-	if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN)
+	if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN) {
+		fp = 1;
 		goto invalid_fld;
+	}
 
 	/* enable LBA */
 	tf->flags |= ATA_TFLAG_LBA;
@@ -3008,8 +3139,10 @@
 	case ATA_CMD_READ_LONG_ONCE:
 	case ATA_CMD_WRITE_LONG:
 	case ATA_CMD_WRITE_LONG_ONCE:
-		if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
+		if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) {
+			fp = 1;
 			goto invalid_fld;
+		}
 		qc->sect_size = scsi_bufflen(scmd);
 		break;
 
@@ -3072,12 +3205,16 @@
 	ata_qc_set_pc_nbytes(qc);
 
 	/* We may not issue DMA commands if no DMA mode is set */
-	if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
+	if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) {
+		fp = 1;
 		goto invalid_fld;
+	}
 
 	/* sanity check for pio multi commands */
-	if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf))
+	if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) {
+		fp = 1;
 		goto invalid_fld;
+	}
 
 	if (is_multi_taskfile(tf)) {
 		unsigned int multi_count = 1 << (cdb[1] >> 5);
@@ -3098,8 +3235,10 @@
 	 * ->set_dmamode(), and ->post_set_mode() hooks).
 	 */
 	if (tf->command == ATA_CMD_SET_FEATURES &&
-	    tf->feature == SETFEATURES_XFER)
+	    tf->feature == SETFEATURES_XFER) {
+		fp = (cdb[0] == ATA_16) ? 4 : 3;
 		goto invalid_fld;
+	}
 
 	/*
 	 * Filter TPM commands by default. These provide an
@@ -3116,14 +3255,15 @@
 	 * so that we comply with the TC consortium stated goal that the user
 	 * can turn off TC features of their system.
 	 */
-	if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
+	if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) {
+		fp = (cdb[0] == ATA_16) ? 14 : 9;
 		goto invalid_fld;
+	}
 
 	return 0;
 
  invalid_fld:
-	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00);
-	/* "Invalid field in cdb" */
+	ata_scsi_set_invalid_field(dev, scmd, fp, 0xff);
 	return 1;
 }
 
@@ -3137,25 +3277,32 @@
 	u32 n_block;
 	u32 size;
 	void *buf;
+	u16 fp;
+	u8 bp = 0xff;
 
 	/* we may not issue DMA commands if no DMA mode is set */
 	if (unlikely(!dev->dma_mode))
-		goto invalid_fld;
+		goto invalid_opcode;
 
-	if (unlikely(scmd->cmd_len < 16))
+	if (unlikely(scmd->cmd_len < 16)) {
+		fp = 15;
 		goto invalid_fld;
+	}
 	scsi_16_lba_len(cdb, &block, &n_block);
 
 	/* for now we only support WRITE SAME with the unmap bit set */
-	if (unlikely(!(cdb[1] & 0x8)))
+	if (unlikely(!(cdb[1] & 0x8))) {
+		fp = 1;
+		bp = 3;
 		goto invalid_fld;
+	}
 
 	/*
 	 * WRITE SAME always has a sector sized buffer as payload, this
 	 * should never be a multiple entry S/G list.
 	 */
 	if (!scsi_sg_count(scmd))
-		goto invalid_fld;
+		goto invalid_param_len;
 
 	buf = page_address(sg_page(scsi_sglist(scmd)));
 	size = ata_set_lba_range_entries(buf, 512, block, n_block);
@@ -3186,9 +3333,242 @@
 
 	return 0;
 
+invalid_fld:
+	ata_scsi_set_invalid_field(dev, scmd, fp, bp);
+	return 1;
+invalid_param_len:
+	/* "Parameter list length error" */
+	ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
+	return 1;
+invalid_opcode:
+	/* "Invalid command operation code" */
+	ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x20, 0x0);
+	return 1;
+}
+
+/**
+ *	ata_scsi_report_zones_complete - convert ATA output
+ *	@qc: command structure returning the data
+ *
+ *	Convert T-13 little-endian field representation into
+ *	T-10 big-endian field representation.
+ *	What a mess.
+ */
+static void ata_scsi_report_zones_complete(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	struct sg_mapping_iter miter;
+	unsigned long flags;
+	unsigned int bytes = 0;
+
+	sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
+		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
+
+	local_irq_save(flags);
+	while (sg_miter_next(&miter)) {
+		unsigned int offset = 0;
+
+		if (bytes == 0) {
+			char *hdr;
+			u32 list_length;
+			u64 max_lba, opt_lba;
+			u16 same;
+
+			/* Swizzle header */
+			hdr = miter.addr;
+			list_length = get_unaligned_le32(&hdr[0]);
+			same = get_unaligned_le16(&hdr[4]);
+			max_lba = get_unaligned_le64(&hdr[8]);
+			opt_lba = get_unaligned_le64(&hdr[16]);
+			put_unaligned_be32(list_length, &hdr[0]);
+			hdr[4] = same & 0xf;
+			put_unaligned_be64(max_lba, &hdr[8]);
+			put_unaligned_be64(opt_lba, &hdr[16]);
+			offset += 64;
+			bytes += 64;
+		}
+		while (offset < miter.length) {
+			char *rec;
+			u8 cond, type, non_seq, reset;
+			u64 size, start, wp;
+
+			/* Swizzle zone descriptor */
+			rec = miter.addr + offset;
+			type = rec[0] & 0xf;
+			cond = (rec[1] >> 4) & 0xf;
+			non_seq = (rec[1] & 2);
+			reset = (rec[1] & 1);
+			size = get_unaligned_le64(&rec[8]);
+			start = get_unaligned_le64(&rec[16]);
+			wp = get_unaligned_le64(&rec[24]);
+			rec[0] = type;
+			rec[1] = (cond << 4) | non_seq | reset;
+			put_unaligned_be64(size, &rec[8]);
+			put_unaligned_be64(start, &rec[16]);
+			put_unaligned_be64(wp, &rec[24]);
+			WARN_ON(offset + 64 > miter.length);
+			offset += 64;
+			bytes += 64;
+		}
+	}
+	sg_miter_stop(&miter);
+	local_irq_restore(flags);
+
+	ata_scsi_qc_complete(qc);
+}
+
+static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc)
+{
+	struct ata_taskfile *tf = &qc->tf;
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	const u8 *cdb = scmd->cmnd;
+	u16 sect, fp = (u16)-1;
+	u8 sa, options, bp = 0xff;
+	u64 block;
+	u32 n_block;
+
+	if (unlikely(scmd->cmd_len < 16)) {
+		ata_dev_warn(qc->dev, "invalid cdb length %d\n",
+			     scmd->cmd_len);
+		fp = 15;
+		goto invalid_fld;
+	}
+	scsi_16_lba_len(cdb, &block, &n_block);
+	if (n_block != scsi_bufflen(scmd)) {
+		ata_dev_warn(qc->dev, "non-matching transfer count (%d/%d)\n",
+			     n_block, scsi_bufflen(scmd));
+		goto invalid_param_len;
+	}
+	sa = cdb[1] & 0x1f;
+	if (sa != ZI_REPORT_ZONES) {
+		ata_dev_warn(qc->dev, "invalid service action %d\n", sa);
+		fp = 1;
+		goto invalid_fld;
+	}
+	/*
+	 * ZAC allows only for transfers in 512 byte blocks,
+	 * and uses a 16 bit value for the transfer count.
+	 */
+	if ((n_block / 512) > 0xffff || n_block < 512 || (n_block % 512)) {
+		ata_dev_warn(qc->dev, "invalid transfer count %d\n", n_block);
+		goto invalid_param_len;
+	}
+	sect = n_block / 512;
+	options = cdb[14];
+
+	if (ata_ncq_enabled(qc->dev) &&
+	    ata_fpdma_zac_mgmt_in_supported(qc->dev)) {
+		tf->protocol = ATA_PROT_NCQ;
+		tf->command = ATA_CMD_FPDMA_RECV;
+		tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f;
+		tf->nsect = qc->tag << 3;
+		tf->feature = sect & 0xff;
+		tf->hob_feature = (sect >> 8) & 0xff;
+		tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES;
+	} else {
+		tf->command = ATA_CMD_ZAC_MGMT_IN;
+		tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES;
+		tf->protocol = ATA_PROT_DMA;
+		tf->hob_feature = options;
+		tf->hob_nsect = (sect >> 8) & 0xff;
+		tf->nsect = sect & 0xff;
+	}
+	tf->device = ATA_LBA;
+	tf->lbah = (block >> 16) & 0xff;
+	tf->lbam = (block >> 8) & 0xff;
+	tf->lbal = block & 0xff;
+	tf->hob_lbah = (block >> 40) & 0xff;
+	tf->hob_lbam = (block >> 32) & 0xff;
+	tf->hob_lbal = (block >> 24) & 0xff;
+
+	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
+	qc->flags |= ATA_QCFLAG_RESULT_TF;
+
+	ata_qc_set_pc_nbytes(qc);
+
+	qc->complete_fn = ata_scsi_report_zones_complete;
+
+	return 0;
+
+invalid_fld:
+	ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
+	return 1;
+
+invalid_param_len:
+	/* "Parameter list length error" */
+	ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
+	return 1;
+}
+
+static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
+{
+	struct ata_taskfile *tf = &qc->tf;
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	struct ata_device *dev = qc->dev;
+	const u8 *cdb = scmd->cmnd;
+	u8 reset_all, sa;
+	u64 block;
+	u32 n_block;
+	u16 fp = (u16)-1;
+
+	if (unlikely(scmd->cmd_len < 16)) {
+		fp = 15;
+		goto invalid_fld;
+	}
+
+	sa = cdb[1] & 0x1f;
+	if ((sa != ZO_CLOSE_ZONE) && (sa != ZO_FINISH_ZONE) &&
+	    (sa != ZO_OPEN_ZONE) && (sa != ZO_RESET_WRITE_POINTER)) {
+		fp = 1;
+		goto invalid_fld;
+	}
+
+	scsi_16_lba_len(cdb, &block, &n_block);
+	if (n_block) {
+		/*
+		 * ZAC MANAGEMENT OUT doesn't define any length
+		 */
+		goto invalid_param_len;
+	}
+	if (block > dev->n_sectors)
+		goto out_of_range;
+
+	reset_all = cdb[14] & 0x1;
+
+	if (ata_ncq_enabled(qc->dev) &&
+	    ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
+		tf->protocol = ATA_PROT_NCQ;
+		tf->command = ATA_CMD_NCQ_NON_DATA;
+		tf->hob_nsect = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT;
+		tf->nsect = qc->tag << 3;
+		tf->auxiliary = sa | (reset_all & 0x1) << 8;
+	} else {
+		tf->protocol = ATA_PROT_NODATA;
+		tf->command = ATA_CMD_ZAC_MGMT_OUT;
+		tf->feature = sa;
+		tf->hob_feature = reset_all & 0x1;
+	}
+	tf->lbah = (block >> 16) & 0xff;
+	tf->lbam = (block >> 8) & 0xff;
+	tf->lbal = block & 0xff;
+	tf->hob_lbah = (block >> 40) & 0xff;
+	tf->hob_lbam = (block >> 32) & 0xff;
+	tf->hob_lbal = (block >> 24) & 0xff;
+	tf->device = ATA_LBA;
+	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
+
+	return 0;
+
  invalid_fld:
-	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00);
-	/* "Invalid field in cdb" */
+	ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
+	return 1;
+ out_of_range:
+	/* "Logical Block Address out of range" */
+	ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00);
+	return 1;
+invalid_param_len:
+	/* "Parameter list length error" */
+	ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
 	return 1;
 }
 
@@ -3197,6 +3577,7 @@
  *	@qc: Storage for translated ATA taskfile
  *	@buf: input buffer
  *	@len: number of valid bytes in the input buffer
+ *	@fp: out parameter for the failed field on error
  *
  *	Prepare a taskfile to modify caching information for the device.
  *
@@ -3204,20 +3585,26 @@
  *	None.
  */
 static int ata_mselect_caching(struct ata_queued_cmd *qc,
-			       const u8 *buf, int len)
+			       const u8 *buf, int len, u16 *fp)
 {
 	struct ata_taskfile *tf = &qc->tf;
 	struct ata_device *dev = qc->dev;
 	char mpage[CACHE_MPAGE_LEN];
 	u8 wce;
+	int i;
 
 	/*
 	 * The first two bytes of def_cache_mpage are a header, so offsets
 	 * in mpage are off by 2 compared to buf.  Same for len.
 	 */
 
-	if (len != CACHE_MPAGE_LEN - 2)
+	if (len != CACHE_MPAGE_LEN - 2) {
+		if (len < CACHE_MPAGE_LEN - 2)
+			*fp = len;
+		else
+			*fp = CACHE_MPAGE_LEN - 2;
 		return -EINVAL;
+	}
 
 	wce = buf[0] & (1 << 2);
 
@@ -3225,10 +3612,14 @@
 	 * Check that read-only bits are not modified.
 	 */
 	ata_msense_caching(dev->id, mpage, false);
-	mpage[2] &= ~(1 << 2);
-	mpage[2] |= wce;
-	if (memcmp(mpage + 2, buf, CACHE_MPAGE_LEN - 2) != 0)
-		return -EINVAL;
+	for (i = 0; i < CACHE_MPAGE_LEN - 2; i++) {
+		if (i == 0)
+			continue;
+		if (mpage[i + 2] != buf[i]) {
+			*fp = i;
+			return -EINVAL;
+		}
+	}
 
 	tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
 	tf->protocol = ATA_PROT_NODATA;
@@ -3239,6 +3630,62 @@
 }
 
 /**
+ *	ata_mselect_control - Simulate MODE SELECT for control page
+ *	@qc: Storage for translated ATA taskfile
+ *	@buf: input buffer
+ *	@len: number of valid bytes in the input buffer
+ *	@fp: out parameter for the failed field on error
+ *
+ *	Prepare a taskfile to modify caching information for the device.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static int ata_mselect_control(struct ata_queued_cmd *qc,
+			       const u8 *buf, int len, u16 *fp)
+{
+	struct ata_device *dev = qc->dev;
+	char mpage[CONTROL_MPAGE_LEN];
+	u8 d_sense;
+	int i;
+
+	/*
+	 * The first two bytes of def_control_mpage are a header, so offsets
+	 * in mpage are off by 2 compared to buf.  Same for len.
+	 */
+
+	if (len != CONTROL_MPAGE_LEN - 2) {
+		if (len < CONTROL_MPAGE_LEN - 2)
+			*fp = len;
+		else
+			*fp = CONTROL_MPAGE_LEN - 2;
+		return -EINVAL;
+	}
+
+	d_sense = buf[0] & (1 << 2);
+
+	/*
+	 * Check that read-only bits are not modified.
+	 */
+	ata_msense_ctl_mode(dev, mpage, false);
+	for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) {
+		if (i == 0)
+			continue;
+		if (mpage[2 + i] != buf[i]) {
+			*fp = i;
+			return -EINVAL;
+		}
+	}
+	if (d_sense & (1 << 2))
+		dev->flags |= ATA_DFLAG_D_SENSE;
+	else
+		dev->flags &= ~ATA_DFLAG_D_SENSE;
+	qc->scsicmd->result = SAM_STAT_GOOD;
+	qc->scsicmd->scsi_done(qc->scsicmd);
+	return 0;
+}
+
+/**
  *	ata_scsiop_mode_select - Simulate MODE SELECT 6, 10 commands
  *	@qc: Storage for translated ATA taskfile
  *
@@ -3257,27 +3704,36 @@
 	u8 pg, spg;
 	unsigned six_byte, pg_len, hdr_len, bd_len;
 	int len;
+	u16 fp = (u16)-1;
+	u8 bp = 0xff;
 
 	VPRINTK("ENTER\n");
 
 	six_byte = (cdb[0] == MODE_SELECT);
 	if (six_byte) {
-		if (scmd->cmd_len < 5)
+		if (scmd->cmd_len < 5) {
+			fp = 4;
 			goto invalid_fld;
+		}
 
 		len = cdb[4];
 		hdr_len = 4;
 	} else {
-		if (scmd->cmd_len < 9)
+		if (scmd->cmd_len < 9) {
+			fp = 8;
 			goto invalid_fld;
+		}
 
 		len = (cdb[7] << 8) + cdb[8];
 		hdr_len = 8;
 	}
 
 	/* We only support PF=1, SP=0.  */
-	if ((cdb[1] & 0x11) != 0x10)
+	if ((cdb[1] & 0x11) != 0x10) {
+		fp = 1;
+		bp = (cdb[1] & 0x01) ? 1 : 5;
 		goto invalid_fld;
+	}
 
 	/* Test early for possible overrun.  */
 	if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
@@ -3298,8 +3754,11 @@
 	p += hdr_len;
 	if (len < bd_len)
 		goto invalid_param_len;
-	if (bd_len != 0 && bd_len != 8)
+	if (bd_len != 0 && bd_len != 8) {
+		fp = (six_byte) ? 3 : 6;
+		fp += bd_len + hdr_len;
 		goto invalid_param;
+	}
 
 	len -= bd_len;
 	p += bd_len;
@@ -3330,18 +3789,29 @@
 	 * No mode subpages supported (yet) but asking for _all_
 	 * subpages may be valid
 	 */
-	if (spg && (spg != ALL_SUB_MPAGES))
+	if (spg && (spg != ALL_SUB_MPAGES)) {
+		fp = (p[0] & 0x40) ? 1 : 0;
+		fp += hdr_len + bd_len;
 		goto invalid_param;
+	}
 	if (pg_len > len)
 		goto invalid_param_len;
 
 	switch (pg) {
 	case CACHE_MPAGE:
-		if (ata_mselect_caching(qc, p, pg_len) < 0)
+		if (ata_mselect_caching(qc, p, pg_len, &fp) < 0) {
+			fp += hdr_len + bd_len;
 			goto invalid_param;
+		}
 		break;
-
+	case CONTROL_MPAGE:
+		if (ata_mselect_control(qc, p, pg_len, &fp) < 0) {
+			fp += hdr_len + bd_len;
+			goto invalid_param;
+		}
+		break;
 	default:		/* invalid page code */
+		fp = bd_len + hdr_len;
 		goto invalid_param;
 	}
 
@@ -3355,18 +3825,16 @@
 	return 0;
 
  invalid_fld:
-	/* "Invalid field in CDB" */
-	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
+	ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
 	return 1;
 
  invalid_param:
-	/* "Invalid field in parameter list" */
-	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x26, 0x0);
+	ata_scsi_set_invalid_parameter(qc->dev, scmd, fp);
 	return 1;
 
  invalid_param_len:
 	/* "Parameter list length error" */
-	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
+	ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
 	return 1;
 
  skip:
@@ -3419,6 +3887,12 @@
 		return ata_scsi_mode_select_xlat;
 		break;
 
+	case ZBC_IN:
+		return ata_scsi_zbc_in_xlat;
+
+	case ZBC_OUT:
+		return ata_scsi_zbc_out_xlat;
+
 	case START_STOP:
 		return ata_scsi_start_stop_xlat;
 	}
@@ -3567,12 +4041,12 @@
 	switch(scsicmd[0]) {
 	/* TODO: worth improving? */
 	case FORMAT_UNIT:
-		ata_scsi_invalid_field(cmd);
+		ata_scsi_invalid_field(dev, cmd, 0);
 		break;
 
 	case INQUIRY:
-		if (scsicmd[1] & 2)	           /* is CmdDt set?  */
-			ata_scsi_invalid_field(cmd);
+		if (scsicmd[1] & 2)		   /* is CmdDt set?  */
+		    ata_scsi_invalid_field(dev, cmd, 1);
 		else if ((scsicmd[1] & 1) == 0)    /* is EVPD clear? */
 			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
 		else switch (scsicmd[2]) {
@@ -3597,8 +4071,14 @@
 		case 0xb2:
 			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
 			break;
+		case 0xb6:
+			if (dev->flags & ATA_DFLAG_ZAC) {
+				ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
+				break;
+			}
+			/* Fallthrough */
 		default:
-			ata_scsi_invalid_field(cmd);
+			ata_scsi_invalid_field(dev, cmd, 2);
 			break;
 		}
 		break;
@@ -3616,7 +4096,7 @@
 		if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
 			ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
 		else
-			ata_scsi_invalid_field(cmd);
+			ata_scsi_invalid_field(dev, cmd, 1);
 		break;
 
 	case REPORT_LUNS:
@@ -3624,7 +4104,7 @@
 		break;
 
 	case REQUEST_SENSE:
-		ata_scsi_set_sense(cmd, 0, 0, 0);
+		ata_scsi_set_sense(dev, cmd, 0, 0, 0);
 		cmd->result = (DRIVER_SENSE << 24);
 		cmd->scsi_done(cmd);
 		break;
@@ -3648,12 +4128,12 @@
 		if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
 			ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
 		else
-			ata_scsi_invalid_field(cmd);
+			ata_scsi_invalid_field(dev, cmd, 1);
 		break;
 
 	/* all other commands */
 	default:
-		ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
+		ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0);
 		/* "Invalid command operation code" */
 		cmd->scsi_done(cmd);
 		break;
diff --git a/drivers/ata/libata-trace.c b/drivers/ata/libata-trace.c
index fd30b8c..f8c550df 100644
--- a/drivers/ata/libata-trace.c
+++ b/drivers/ata/libata-trace.c
@@ -149,3 +149,75 @@
 
 	return ret;
 }
+
+const char *
+libata_trace_parse_subcmd(struct trace_seq *p, unsigned char cmd,
+			  unsigned char feature, unsigned char hob_nsect)
+{
+	const char *ret = trace_seq_buffer_ptr(p);
+
+	switch (cmd) {
+	case ATA_CMD_FPDMA_RECV:
+		switch (hob_nsect & 0x5f) {
+		case ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT:
+			trace_seq_printf(p, " READ_LOG_DMA_EXT");
+			break;
+		case ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN:
+			trace_seq_printf(p, " ZAC_MGMT_IN");
+			break;
+		}
+		break;
+	case ATA_CMD_FPDMA_SEND:
+		switch (hob_nsect & 0x5f) {
+		case ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT:
+			trace_seq_printf(p, " WRITE_LOG_DMA_EXT");
+			break;
+		case ATA_SUBCMD_FPDMA_SEND_DSM:
+			trace_seq_printf(p, " DATASET_MANAGEMENT");
+			break;
+		}
+		break;
+	case ATA_CMD_NCQ_NON_DATA:
+		switch (feature) {
+		case ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE:
+			trace_seq_printf(p, " ABORT_QUEUE");
+			break;
+		case ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES:
+			trace_seq_printf(p, " SET_FEATURES");
+			break;
+		case ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT:
+			trace_seq_printf(p, " ZERO_EXT");
+			break;
+		case ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT:
+			trace_seq_printf(p, " ZAC_MGMT_OUT");
+			break;
+		}
+		break;
+	case ATA_CMD_ZAC_MGMT_IN:
+		switch (feature) {
+		case ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES:
+			trace_seq_printf(p, " REPORT_ZONES");
+			break;
+		}
+		break;
+	case ATA_CMD_ZAC_MGMT_OUT:
+		switch (feature) {
+		case ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE:
+			trace_seq_printf(p, " CLOSE_ZONE");
+			break;
+		case ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE:
+			trace_seq_printf(p, " FINISH_ZONE");
+			break;
+		case ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE:
+			trace_seq_printf(p, " OPEN_ZONE");
+			break;
+		case ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER:
+			trace_seq_printf(p, " RESET_WRITE_POINTER");
+			break;
+		}
+		break;
+	}
+	trace_seq_putc(p, 0);
+
+	return ret;
+}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index f840ca1..3b301a4 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -67,7 +67,8 @@
 extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
 			   u64 block, u32 n_block, unsigned int tf_flags,
 			   unsigned int tag);
-extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
+extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
+			     struct ata_device *dev);
 extern unsigned ata_exec_internal(struct ata_device *dev,
 				  struct ata_taskfile *tf, const u8 *cdb,
 				  int dma_dir, void *buf, unsigned int buflen,
@@ -137,6 +138,11 @@
 			      struct scsi_host_template *sht);
 extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
 extern int ata_scsi_offline_dev(struct ata_device *dev);
+extern void ata_scsi_set_sense(struct ata_device *dev,
+			       struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
+extern void ata_scsi_set_sense_information(struct ata_device *dev,
+					   struct scsi_cmnd *cmd,
+					   const struct ata_taskfile *tf);
 extern void ata_scsi_media_change_notify(struct ata_device *dev);
 extern void ata_scsi_hotplug(struct work_struct *work);
 extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 2cb6f7e..00c2af1 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -30,10 +30,12 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/device.h>
+#include <linux/dmaengine.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/phy/phy.h>
 #include <linux/libata.h>
 #include <linux/slab.h>
 
@@ -42,10 +44,6 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
 
-/* Supported DMA engine drivers */
-#include <linux/platform_data/dma-dw.h>
-#include <linux/dma/dw.h>
-
 /* These two are defined in "libata.h" */
 #undef	DRV_NAME
 #undef	DRV_VERSION
@@ -53,19 +51,14 @@
 #define DRV_NAME        "sata-dwc"
 #define DRV_VERSION     "1.3"
 
-#ifndef out_le32
-#define out_le32(a, v)	__raw_writel(__cpu_to_le32(v), (void __iomem *)(a))
-#endif
-
-#ifndef in_le32
-#define in_le32(a)	__le32_to_cpu(__raw_readl((void __iomem *)(a)))
-#endif
+#define sata_dwc_writel(a, v)	writel_relaxed(v, a)
+#define sata_dwc_readl(a)	readl_relaxed(a)
 
 #ifndef NO_IRQ
 #define NO_IRQ		0
 #endif
 
-#define AHB_DMA_BRST_DFLT	64	/* 16 data items burst length*/
+#define AHB_DMA_BRST_DFLT	64	/* 16 data items burst length */
 
 enum {
 	SATA_DWC_MAX_PORTS = 1,
@@ -102,7 +95,7 @@
 	u32 versionr;		/* Version Register */
 	u32 idr;		/* ID Register */
 	u32 unimpl[192];	/* Unimplemented */
-	u32 dmadr[256];	/* FIFO Locations in DMA Mode */
+	u32 dmadr[256];		/* FIFO Locations in DMA Mode */
 };
 
 enum {
@@ -146,9 +139,14 @@
 	struct device		*dev;		/* generic device struct */
 	struct ata_probe_ent	*pe;		/* ptr to probe-ent */
 	struct ata_host		*host;
-	u8 __iomem		*reg_base;
-	struct sata_dwc_regs	*sata_dwc_regs;	/* DW Synopsys SATA specific */
+	struct sata_dwc_regs __iomem *sata_dwc_regs;	/* DW SATA specific */
+	u32			sactive_issued;
+	u32			sactive_queued;
+	struct phy		*phy;
+	phys_addr_t		dmadr;
+#ifdef CONFIG_SATA_DWC_OLD_DMA
 	struct dw_dma_chip	*dma;
+#endif
 };
 
 #define SATA_DWC_QCMD_MAX	32
@@ -159,25 +157,19 @@
 	int			dma_pending[SATA_DWC_QCMD_MAX];
 
 	/* DMA info */
-	struct dw_dma_slave		*dws;
 	struct dma_chan			*chan;
 	struct dma_async_tx_descriptor	*desc[SATA_DWC_QCMD_MAX];
 	u32				dma_interrupt_count;
 };
 
 /*
- * Commonly used DWC SATA driver Macros
+ * Commonly used DWC SATA driver macros
  */
-#define HSDEV_FROM_HOST(host)  ((struct sata_dwc_device *)\
-					(host)->private_data)
-#define HSDEV_FROM_AP(ap)  ((struct sata_dwc_device *)\
-					(ap)->host->private_data)
-#define HSDEVP_FROM_AP(ap)   ((struct sata_dwc_device_port *)\
-					(ap)->private_data)
-#define HSDEV_FROM_QC(qc)	((struct sata_dwc_device *)\
-					(qc)->ap->host->private_data)
-#define HSDEV_FROM_HSDEVP(p)	((struct sata_dwc_device *)\
-						(hsdevp)->hsdev)
+#define HSDEV_FROM_HOST(host)	((struct sata_dwc_device *)(host)->private_data)
+#define HSDEV_FROM_AP(ap)	((struct sata_dwc_device *)(ap)->host->private_data)
+#define HSDEVP_FROM_AP(ap)	((struct sata_dwc_device_port *)(ap)->private_data)
+#define HSDEV_FROM_QC(qc)	((struct sata_dwc_device *)(qc)->ap->host->private_data)
+#define HSDEV_FROM_HSDEVP(p)	((struct sata_dwc_device *)(p)->hsdev)
 
 enum {
 	SATA_DWC_CMD_ISSUED_NOT		= 0,
@@ -190,21 +182,6 @@
 	SATA_DWC_DMA_PENDING_RX		= 2,
 };
 
-struct sata_dwc_host_priv {
-	void	__iomem	 *scr_addr_sstatus;
-	u32	sata_dwc_sactive_issued ;
-	u32	sata_dwc_sactive_queued ;
-};
-
-static struct sata_dwc_host_priv host_pvt;
-
-static struct dw_dma_slave sata_dwc_dma_dws = {
-	.src_id = 0,
-	.dst_id = 0,
-	.m_master = 1,
-	.p_master = 0,
-};
-
 /*
  * Prototypes
  */
@@ -215,6 +192,93 @@
 static void sata_dwc_port_stop(struct ata_port *ap);
 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
 
+#ifdef CONFIG_SATA_DWC_OLD_DMA
+
+#include <linux/platform_data/dma-dw.h>
+#include <linux/dma/dw.h>
+
+static struct dw_dma_slave sata_dwc_dma_dws = {
+	.src_id = 0,
+	.dst_id = 0,
+	.m_master = 1,
+	.p_master = 0,
+};
+
+static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
+{
+	struct dw_dma_slave *dws = &sata_dwc_dma_dws;
+
+	if (dws->dma_dev != chan->device->dev)
+		return false;
+
+	chan->private = dws;
+	return true;
+}
+
+static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
+{
+	struct sata_dwc_device *hsdev = hsdevp->hsdev;
+	struct dw_dma_slave *dws = &sata_dwc_dma_dws;
+	dma_cap_mask_t mask;
+
+	dws->dma_dev = hsdev->dev;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	/* Acquire DMA channel */
+	hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
+	if (!hsdevp->chan) {
+		dev_err(hsdev->dev, "%s: dma channel unavailable\n",
+			 __func__);
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static int sata_dwc_dma_init_old(struct platform_device *pdev,
+				 struct sata_dwc_device *hsdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct resource *res;
+
+	hsdev->dma = devm_kzalloc(&pdev->dev, sizeof(*hsdev->dma), GFP_KERNEL);
+	if (!hsdev->dma)
+		return -ENOMEM;
+
+	hsdev->dma->dev = &pdev->dev;
+
+	/* Get SATA DMA interrupt number */
+	hsdev->dma->irq = irq_of_parse_and_map(np, 1);
+	if (hsdev->dma->irq == NO_IRQ) {
+		dev_err(&pdev->dev, "no SATA DMA irq\n");
+		return -ENODEV;
+	}
+
+	/* Get physical SATA DMA register base address */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hsdev->dma->regs)) {
+		dev_err(&pdev->dev,
+			"ioremap failed for AHBDMA register address\n");
+		return PTR_ERR(hsdev->dma->regs);
+	}
+
+	/* Initialize AHB DMAC */
+	return dw_dma_probe(hsdev->dma);
+}
+
+static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev)
+{
+	if (!hsdev->dma)
+		return;
+
+	dw_dma_remove(hsdev->dma);
+}
+
+#endif
+
 static const char *get_prot_descript(u8 protocol)
 {
 	switch ((enum ata_tf_protocols)protocol) {
@@ -305,21 +369,20 @@
 	struct ata_port *ap = qc->ap;
 	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
 	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
-	dma_addr_t addr = (dma_addr_t)&hsdev->sata_dwc_regs->dmadr;
 	struct dma_slave_config sconf;
 	struct dma_async_tx_descriptor *desc;
 
 	if (qc->dma_dir == DMA_DEV_TO_MEM) {
-		sconf.src_addr = addr;
-		sconf.device_fc = true;
+		sconf.src_addr = hsdev->dmadr;
+		sconf.device_fc = false;
 	} else {	/* DMA_MEM_TO_DEV */
-		sconf.dst_addr = addr;
+		sconf.dst_addr = hsdev->dmadr;
 		sconf.device_fc = false;
 	}
 
 	sconf.direction = qc->dma_dir;
-	sconf.src_maxburst = AHB_DMA_BRST_DFLT;
-	sconf.dst_maxburst = AHB_DMA_BRST_DFLT;
+	sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4;	/* in items */
+	sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4;	/* in items */
 	sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 	sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 
@@ -336,8 +399,8 @@
 	desc->callback = dma_dwc_xfer_done;
 	desc->callback_param = hsdev;
 
-	dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pad\n",
-		__func__, qc->sg, qc->n_elem, &addr);
+	dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__,
+		qc->sg, qc->n_elem, &hsdev->dmadr);
 
 	return desc;
 }
@@ -350,48 +413,38 @@
 		return -EINVAL;
 	}
 
-	*val = in_le32(link->ap->ioaddr.scr_addr + (scr * 4));
-	dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
-		__func__, link->ap->print_id, scr, *val);
+	*val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4));
+	dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
+		link->ap->print_id, scr, *val);
 
 	return 0;
 }
 
 static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
 {
-	dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
-		__func__, link->ap->print_id, scr, val);
+	dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
+		link->ap->print_id, scr, val);
 	if (scr > SCR_NOTIFICATION) {
 		dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
 			 __func__, scr);
 		return -EINVAL;
 	}
-	out_le32(link->ap->ioaddr.scr_addr + (scr * 4), val);
+	sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val);
 
 	return 0;
 }
 
-static u32 core_scr_read(unsigned int scr)
-{
-	return in_le32(host_pvt.scr_addr_sstatus + (scr * 4));
-}
-
-static void core_scr_write(unsigned int scr, u32 val)
-{
-	out_le32(host_pvt.scr_addr_sstatus + (scr * 4), val);
-}
-
-static void clear_serror(void)
+static void clear_serror(struct ata_port *ap)
 {
 	u32 val;
-	val = core_scr_read(SCR_ERROR);
-	core_scr_write(SCR_ERROR, val);
+	sata_dwc_scr_read(&ap->link, SCR_ERROR, &val);
+	sata_dwc_scr_write(&ap->link, SCR_ERROR, val);
 }
 
 static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
 {
-	out_le32(&hsdev->sata_dwc_regs->intpr,
-		 in_le32(&hsdev->sata_dwc_regs->intpr));
+	sata_dwc_writel(&hsdev->sata_dwc_regs->intpr,
+			sata_dwc_readl(&hsdev->sata_dwc_regs->intpr));
 }
 
 static u32 qcmd_tag_to_mask(u8 tag)
@@ -412,7 +465,7 @@
 
 	ata_ehi_clear_desc(ehi);
 
-	serror = core_scr_read(SCR_ERROR);
+	sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror);
 	status = ap->ops->sff_check_status(ap);
 
 	tag = ap->link.active_tag;
@@ -423,7 +476,7 @@
 		hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]);
 
 	/* Clear error register and interrupt bit */
-	clear_serror();
+	clear_serror(ap);
 	clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
 
 	/* This is the only error happening now.  TODO check for exact error */
@@ -462,12 +515,12 @@
 	int handled, num_processed, port = 0;
 	uint intpr, sactive, sactive2, tag_mask;
 	struct sata_dwc_device_port *hsdevp;
-	host_pvt.sata_dwc_sactive_issued = 0;
+	hsdev->sactive_issued = 0;
 
 	spin_lock_irqsave(&host->lock, flags);
 
 	/* Read the interrupt register */
-	intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
+	intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr);
 
 	ap = host->ports[port];
 	hsdevp = HSDEVP_FROM_AP(ap);
@@ -486,12 +539,12 @@
 	if (intpr & SATA_DWC_INTPR_NEWFP) {
 		clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
 
-		tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr));
+		tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr));
 		dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
 		if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
 			dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
 
-		host_pvt.sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag);
+		hsdev->sactive_issued |= qcmd_tag_to_mask(tag);
 
 		qc = ata_qc_from_tag(ap, tag);
 		/*
@@ -505,11 +558,11 @@
 		handled = 1;
 		goto DONE;
 	}
-	sactive = core_scr_read(SCR_ACTIVE);
-	tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
+	sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
+	tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
 
 	/* If no sactive issued and tag_mask is zero then this is not NCQ */
-	if (host_pvt.sata_dwc_sactive_issued == 0 && tag_mask == 0) {
+	if (hsdev->sactive_issued == 0 && tag_mask == 0) {
 		if (ap->link.active_tag == ATA_TAG_POISON)
 			tag = 0;
 		else
@@ -579,22 +632,19 @@
 	 */
 
 	 /* process completed commands */
-	sactive = core_scr_read(SCR_ACTIVE);
-	tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
+	sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
+	tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
 
-	if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \
-							tag_mask > 1) {
+	if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) {
 		dev_dbg(ap->dev,
 			"%s NCQ:sactive=0x%08x  sactive_issued=0x%08x tag_mask=0x%08x\n",
-			__func__, sactive, host_pvt.sata_dwc_sactive_issued,
-			tag_mask);
+			__func__, sactive, hsdev->sactive_issued, tag_mask);
 	}
 
-	if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \
-					(host_pvt.sata_dwc_sactive_issued)) {
+	if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) {
 		dev_warn(ap->dev,
-			 "Bad tag mask?  sactive=0x%08x (host_pvt.sata_dwc_sactive_issued)=0x%08x  tag_mask=0x%08x\n",
-			 sactive, host_pvt.sata_dwc_sactive_issued, tag_mask);
+			 "Bad tag mask?  sactive=0x%08x sactive_issued=0x%08x  tag_mask=0x%08x\n",
+			 sactive, hsdev->sactive_issued, tag_mask);
 	}
 
 	/* read just to clear ... not bad if currently still busy */
@@ -656,7 +706,7 @@
 	 * we were processing --we read status as part of processing a completed
 	 * command).
 	 */
-	sactive2 = core_scr_read(SCR_ACTIVE);
+	sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2);
 	if (sactive2 != sactive) {
 		dev_dbg(ap->dev,
 			"More completed - sactive=0x%x sactive2=0x%x\n",
@@ -672,15 +722,14 @@
 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
 {
 	struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
+	u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr);
 
 	if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
-		out_le32(&(hsdev->sata_dwc_regs->dmacr),
-			 SATA_DWC_DMACR_RX_CLEAR(
-				 in_le32(&(hsdev->sata_dwc_regs->dmacr))));
+		dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr);
+		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
 	} else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
-		out_le32(&(hsdev->sata_dwc_regs->dmacr),
-			 SATA_DWC_DMACR_TX_CLEAR(
-				 in_le32(&(hsdev->sata_dwc_regs->dmacr))));
+		dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr);
+		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
 	} else {
 		/*
 		 * This should not happen, it indicates the driver is out of
@@ -688,10 +737,9 @@
 		 */
 		dev_err(hsdev->dev,
 			"%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n",
-			__func__, tag, hsdevp->dma_pending[tag],
-			in_le32(&hsdev->sata_dwc_regs->dmacr));
-		out_le32(&(hsdev->sata_dwc_regs->dmacr),
-			SATA_DWC_DMACR_TXRXCH_CLEAR);
+			__func__, tag, hsdevp->dma_pending[tag], dmacr);
+		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
+				SATA_DWC_DMACR_TXRXCH_CLEAR);
 	}
 }
 
@@ -716,7 +764,7 @@
 			 __func__, qc->tag, qc->tf.command,
 			 get_dma_dir_descript(qc->dma_dir),
 			 get_prot_descript(qc->tf.protocol),
-			 in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+			 sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
 	}
 #endif
 
@@ -725,7 +773,7 @@
 			dev_err(ap->dev,
 				"%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n",
 				__func__,
-				in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+				sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
 		}
 
 		hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
@@ -742,8 +790,9 @@
 	u8 status = 0;
 	u32 mask = 0x0;
 	u8 tag = qc->tag;
+	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
 	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
-	host_pvt.sata_dwc_sactive_queued = 0;
+	hsdev->sactive_queued = 0;
 	dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
 
 	if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
@@ -756,10 +805,8 @@
 
 	/* clear active bit */
 	mask = (~(qcmd_tag_to_mask(tag)));
-	host_pvt.sata_dwc_sactive_queued = (host_pvt.sata_dwc_sactive_queued) \
-						& mask;
-	host_pvt.sata_dwc_sactive_issued = (host_pvt.sata_dwc_sactive_issued) \
-						& mask;
+	hsdev->sactive_queued = hsdev->sactive_queued & mask;
+	hsdev->sactive_issued = hsdev->sactive_issued & mask;
 	ata_qc_complete(qc);
 	return 0;
 }
@@ -767,54 +814,62 @@
 static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
 {
 	/* Enable selective interrupts by setting the interrupt maskregister*/
-	out_le32(&hsdev->sata_dwc_regs->intmr,
-		 SATA_DWC_INTMR_ERRM |
-		 SATA_DWC_INTMR_NEWFPM |
-		 SATA_DWC_INTMR_PMABRTM |
-		 SATA_DWC_INTMR_DMATM);
+	sata_dwc_writel(&hsdev->sata_dwc_regs->intmr,
+			SATA_DWC_INTMR_ERRM |
+			SATA_DWC_INTMR_NEWFPM |
+			SATA_DWC_INTMR_PMABRTM |
+			SATA_DWC_INTMR_DMATM);
 	/*
 	 * Unmask the error bits that should trigger an error interrupt by
 	 * setting the error mask register.
 	 */
-	out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
+	sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
 
 	dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
-		 __func__, in_le32(&hsdev->sata_dwc_regs->intmr),
-		in_le32(&hsdev->sata_dwc_regs->errmr));
+		 __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr),
+		sata_dwc_readl(&hsdev->sata_dwc_regs->errmr));
 }
 
-static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
+static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base)
 {
-	struct sata_dwc_device_port *hsdevp = param;
-	struct dw_dma_slave *dws = hsdevp->dws;
+	port->cmd_addr		= base + 0x00;
+	port->data_addr		= base + 0x00;
 
-	if (dws->dma_dev != chan->device->dev)
-		return false;
+	port->error_addr	= base + 0x04;
+	port->feature_addr	= base + 0x04;
 
-	chan->private = dws;
-	return true;
+	port->nsect_addr	= base + 0x08;
+
+	port->lbal_addr		= base + 0x0c;
+	port->lbam_addr		= base + 0x10;
+	port->lbah_addr		= base + 0x14;
+
+	port->device_addr	= base + 0x18;
+	port->command_addr	= base + 0x1c;
+	port->status_addr	= base + 0x1c;
+
+	port->altstatus_addr	= base + 0x20;
+	port->ctl_addr		= base + 0x20;
 }
 
-static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
+static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp)
 {
-	port->cmd_addr = (void __iomem *)base + 0x00;
-	port->data_addr = (void __iomem *)base + 0x00;
+	struct sata_dwc_device *hsdev = hsdevp->hsdev;
+	struct device *dev = hsdev->dev;
 
-	port->error_addr = (void __iomem *)base + 0x04;
-	port->feature_addr = (void __iomem *)base + 0x04;
+#ifdef CONFIG_SATA_DWC_OLD_DMA
+	if (!of_find_property(dev->of_node, "dmas", NULL))
+		return sata_dwc_dma_get_channel_old(hsdevp);
+#endif
 
-	port->nsect_addr = (void __iomem *)base + 0x08;
+	hsdevp->chan = dma_request_chan(dev, "sata-dma");
+	if (IS_ERR(hsdevp->chan)) {
+		dev_err(dev, "failed to allocate dma channel: %ld\n",
+			PTR_ERR(hsdevp->chan));
+		return PTR_ERR(hsdevp->chan);
+	}
 
-	port->lbal_addr = (void __iomem *)base + 0x0c;
-	port->lbam_addr = (void __iomem *)base + 0x10;
-	port->lbah_addr = (void __iomem *)base + 0x14;
-
-	port->device_addr = (void __iomem *)base + 0x18;
-	port->command_addr = (void __iomem *)base + 0x1c;
-	port->status_addr = (void __iomem *)base + 0x1c;
-
-	port->altstatus_addr = (void __iomem *)base + 0x20;
-	port->ctl_addr = (void __iomem *)base + 0x20;
+	return 0;
 }
 
 /*
@@ -829,7 +884,6 @@
 	struct sata_dwc_device *hsdev;
 	struct sata_dwc_device_port *hsdevp = NULL;
 	struct device *pdev;
-	dma_cap_mask_t mask;
 	int i;
 
 	hsdev = HSDEV_FROM_AP(ap);
@@ -853,20 +907,13 @@
 	}
 	hsdevp->hsdev = hsdev;
 
-	hsdevp->dws = &sata_dwc_dma_dws;
-	hsdevp->dws->dma_dev = hsdev->dev;
-
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-
-	/* Acquire DMA channel */
-	hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
-	if (!hsdevp->chan) {
-		dev_err(hsdev->dev, "%s: dma channel unavailable\n",
-			 __func__);
-		err = -EAGAIN;
+	err = sata_dwc_dma_get_channel(hsdevp);
+	if (err)
 		goto CLEANUP_ALLOC;
-	}
+
+	err = phy_power_on(hsdev->phy);
+	if (err)
+		goto CLEANUP_ALLOC;
 
 	for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
 		hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
@@ -877,18 +924,18 @@
 	if (ap->port_no == 0)  {
 		dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
 			__func__);
-		out_le32(&hsdev->sata_dwc_regs->dmacr,
-			 SATA_DWC_DMACR_TXRXCH_CLEAR);
+		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
+				SATA_DWC_DMACR_TXRXCH_CLEAR);
 
 		dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
 			 __func__);
-		out_le32(&hsdev->sata_dwc_regs->dbtsr,
-			 (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
-			  SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
+		sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
+				(SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
+				 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
 	}
 
 	/* Clear any error bits before libata starts issuing commands */
-	clear_serror();
+	clear_serror(ap);
 	ap->private_data = hsdevp;
 	dev_dbg(ap->dev, "%s: done\n", __func__);
 	return 0;
@@ -903,11 +950,13 @@
 static void sata_dwc_port_stop(struct ata_port *ap)
 {
 	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
 
 	dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
 
-	dmaengine_terminate_all(hsdevp->chan);
+	dmaengine_terminate_sync(hsdevp->chan);
 	dma_release_channel(hsdevp->chan);
+	phy_power_off(hsdev->phy);
 
 	kfree(hsdevp);
 	ap->private_data = NULL;
@@ -924,22 +973,20 @@
 					 struct ata_taskfile *tf,
 					 u8 tag, u32 cmd_issued)
 {
-	unsigned long flags;
 	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
 
 	dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
 		ata_get_cmd_descript(tf->command), tag);
 
-	spin_lock_irqsave(&ap->host->lock, flags);
 	hsdevp->cmd_issued[tag] = cmd_issued;
-	spin_unlock_irqrestore(&ap->host->lock, flags);
+
 	/*
 	 * Clear SError before executing a new command.
 	 * sata_dwc_scr_write and read can not be used here. Clearing the PM
 	 * managed SError register for the disk needs to be done before the
 	 * task file is loaded.
 	 */
-	clear_serror();
+	clear_serror(ap);
 	ata_sff_exec_command(ap, tf);
 }
 
@@ -992,18 +1039,18 @@
 	sata_dwc_tf_dump(ap, &qc->tf);
 
 	if (start_dma) {
-		reg = core_scr_read(SCR_ERROR);
+		sata_dwc_scr_read(&ap->link, SCR_ERROR, &reg);
 		if (reg & SATA_DWC_SERROR_ERR_BITS) {
 			dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
 				__func__, reg);
 		}
 
 		if (dir == DMA_TO_DEVICE)
-			out_le32(&hsdev->sata_dwc_regs->dmacr,
-				SATA_DWC_DMACR_TXCHEN);
+			sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
+					SATA_DWC_DMACR_TXCHEN);
 		else
-			out_le32(&hsdev->sata_dwc_regs->dmacr,
-				SATA_DWC_DMACR_RXCHEN);
+			sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
+					SATA_DWC_DMACR_RXCHEN);
 
 		/* Enable AHB DMA transfer on the specified channel */
 		dmaengine_submit(desc);
@@ -1025,36 +1072,12 @@
 	sata_dwc_bmdma_start_by_tag(qc, tag);
 }
 
-/*
- * Function : sata_dwc_qc_prep_by_tag
- * arguments : ata_queued_cmd *qc, u8 tag
- * Return value : None
- * qc_prep for a particular queued command based on tag
- */
-static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
-{
-	struct dma_async_tx_descriptor *desc;
-	struct ata_port *ap = qc->ap;
-	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
-
-	dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
-		__func__, ap->port_no, get_dma_dir_descript(qc->dma_dir),
-		 qc->n_elem);
-
-	desc = dma_dwc_xfer_setup(qc);
-	if (!desc) {
-		dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns NULL\n",
-			__func__);
-		return;
-	}
-	hsdevp->desc[tag] = desc;
-}
-
 static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
 {
 	u32 sactive;
 	u8 tag = qc->tag;
 	struct ata_port *ap = qc->ap;
+	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
 
 #ifdef DEBUG_NCQ
 	if (qc->tag > 0 || ap->link.sactive > 1)
@@ -1068,47 +1091,33 @@
 
 	if (!ata_is_ncq(qc->tf.protocol))
 		tag = 0;
-	sata_dwc_qc_prep_by_tag(qc, tag);
+
+	if (ata_is_dma(qc->tf.protocol)) {
+		hsdevp->desc[tag] = dma_dwc_xfer_setup(qc);
+		if (!hsdevp->desc[tag])
+			return AC_ERR_SYSTEM;
+	} else {
+		hsdevp->desc[tag] = NULL;
+	}
 
 	if (ata_is_ncq(qc->tf.protocol)) {
-		sactive = core_scr_read(SCR_ACTIVE);
+		sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
 		sactive |= (0x00000001 << tag);
-		core_scr_write(SCR_ACTIVE, sactive);
+		sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive);
 
 		dev_dbg(qc->ap->dev,
 			"%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n",
 			__func__, tag, qc->ap->link.sactive, sactive);
 
 		ap->ops->sff_tf_load(ap, &qc->tf);
-		sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag,
+		sata_dwc_exec_command_by_tag(ap, &qc->tf, tag,
 					     SATA_DWC_CMD_ISSUED_PEND);
 	} else {
-		ata_sff_qc_issue(qc);
+		return ata_bmdma_qc_issue(qc);
 	}
 	return 0;
 }
 
-/*
- * Function : sata_dwc_qc_prep
- * arguments : ata_queued_cmd *qc
- * Return value : None
- * qc_prep for a particular queued command
- */
-
-static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
-{
-	if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
-		return;
-
-#ifdef DEBUG_NCQ
-	if (qc->tag > 0)
-		dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
-			 __func__, qc->tag, qc->ap->link.active_tag);
-
-	return ;
-#endif
-}
-
 static void sata_dwc_error_handler(struct ata_port *ap)
 {
 	ata_sff_error_handler(ap);
@@ -1125,17 +1134,22 @@
 	sata_dwc_enable_interrupts(hsdev);
 
 	/* Reconfigure the DMA control register */
-	out_le32(&hsdev->sata_dwc_regs->dmacr,
-		 SATA_DWC_DMACR_TXRXCH_CLEAR);
+	sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
+			SATA_DWC_DMACR_TXRXCH_CLEAR);
 
 	/* Reconfigure the DMA Burst Transaction Size register */
-	out_le32(&hsdev->sata_dwc_regs->dbtsr,
-		 SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
-		 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
+	sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
+			SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
+			SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
 
 	return ret;
 }
 
+static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
+{
+	/* SATA DWC is master only */
+}
+
 /*
  * scsi mid-layer and libata interface structures
  */
@@ -1148,7 +1162,13 @@
 	 */
 	.sg_tablesize		= LIBATA_MAX_PRD,
 	/* .can_queue		= ATA_MAX_QUEUE, */
-	.dma_boundary		= ATA_DMA_BOUNDARY,
+	/*
+	 * Make sure a LLI block is not created that will span 8K max FIS
+	 * boundary. If the block spans such a FIS boundary, there is a chance
+	 * that a DMA burst will cross that boundary -- this results in an
+	 * error in the host controller.
+	 */
+	.dma_boundary		= 0x1fff /* ATA_DMA_BOUNDARY */,
 };
 
 static struct ata_port_operations sata_dwc_ops = {
@@ -1157,7 +1177,6 @@
 	.error_handler		= sata_dwc_error_handler,
 	.hardreset		= sata_dwc_hardreset,
 
-	.qc_prep		= sata_dwc_qc_prep,
 	.qc_issue		= sata_dwc_qc_issue,
 
 	.scr_read		= sata_dwc_scr_read,
@@ -1166,6 +1185,8 @@
 	.port_start		= sata_dwc_port_start,
 	.port_stop		= sata_dwc_port_stop,
 
+	.sff_dev_select		= sata_dwc_dev_select,
+
 	.bmdma_setup		= sata_dwc_bmdma_setup,
 	.bmdma_start		= sata_dwc_bmdma_start,
 };
@@ -1184,13 +1205,14 @@
 	struct sata_dwc_device *hsdev;
 	u32 idr, versionr;
 	char *ver = (char *)&versionr;
-	u8 __iomem *base;
+	void __iomem *base;
 	int err = 0;
 	int irq;
 	struct ata_host *host;
 	struct ata_port_info pi = sata_dwc_port_info[0];
 	const struct ata_port_info *ppi[] = { &pi, NULL };
 	struct device_node *np = ofdev->dev.of_node;
+	struct resource *res;
 
 	/* Allocate DWC SATA device */
 	host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
@@ -1201,57 +1223,33 @@
 	host->private_data = hsdev;
 
 	/* Ioremap SATA registers */
-	base = of_iomap(np, 0);
-	if (!base) {
+	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&ofdev->dev, res);
+	if (IS_ERR(base)) {
 		dev_err(&ofdev->dev,
 			"ioremap failed for SATA register address\n");
-		return -ENODEV;
+		return PTR_ERR(base);
 	}
-	hsdev->reg_base = base;
 	dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
 
 	/* Synopsys DWC SATA specific Registers */
-	hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
+	hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET;
+	hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr);
 
 	/* Setup port */
 	host->ports[0]->ioaddr.cmd_addr = base;
 	host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
-	host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
-	sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);
+	sata_dwc_setup_port(&host->ports[0]->ioaddr, base);
 
 	/* Read the ID and Version Registers */
-	idr = in_le32(&hsdev->sata_dwc_regs->idr);
-	versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
+	idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr);
+	versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr);
 	dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
 		   idr, ver[0], ver[1], ver[2]);
 
-	/* Get SATA DMA interrupt number */
-	hsdev->dma->irq = irq_of_parse_and_map(np, 1);
-	if (hsdev->dma->irq == NO_IRQ) {
-		dev_err(&ofdev->dev, "no SATA DMA irq\n");
-		err = -ENODEV;
-		goto error_iomap;
-	}
-
-	/* Get physical SATA DMA register base address */
-	hsdev->dma->regs = of_iomap(np, 1);
-	if (!hsdev->dma->regs) {
-		dev_err(&ofdev->dev,
-			"ioremap failed for AHBDMA register address\n");
-		err = -ENODEV;
-		goto error_iomap;
-	}
-
 	/* Save dev for later use in dev_xxx() routines */
 	hsdev->dev = &ofdev->dev;
 
-	hsdev->dma->dev = &ofdev->dev;
-
-	/* Initialize AHB DMAC */
-	err = dw_dma_probe(hsdev->dma);
-	if (err)
-		goto error_dma_iomap;
-
 	/* Enable SATA Interrupts */
 	sata_dwc_enable_interrupts(hsdev);
 
@@ -1263,6 +1261,25 @@
 		goto error_out;
 	}
 
+#ifdef CONFIG_SATA_DWC_OLD_DMA
+	if (!of_find_property(np, "dmas", NULL)) {
+		err = sata_dwc_dma_init_old(ofdev, hsdev);
+		if (err)
+			goto error_out;
+	}
+#endif
+
+	hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
+	if (IS_ERR(hsdev->phy)) {
+		err = PTR_ERR(hsdev->phy);
+		hsdev->phy = NULL;
+		goto error_out;
+	}
+
+	err = phy_init(hsdev->phy);
+	if (err)
+		goto error_out;
+
 	/*
 	 * Now, register with libATA core, this will also initiate the
 	 * device discovery process, invoking our port_start() handler &
@@ -1276,12 +1293,7 @@
 	return 0;
 
 error_out:
-	/* Free SATA DMA resources */
-	dw_dma_remove(hsdev->dma);
-error_dma_iomap:
-	iounmap(hsdev->dma->regs);
-error_iomap:
-	iounmap(base);
+	phy_exit(hsdev->phy);
 	return err;
 }
 
@@ -1293,11 +1305,13 @@
 
 	ata_host_detach(host);
 
-	/* Free SATA DMA resources */
-	dw_dma_remove(hsdev->dma);
+	phy_exit(hsdev->phy);
 
-	iounmap(hsdev->dma->regs);
-	iounmap(hsdev->reg_base);
+#ifdef CONFIG_SATA_DWC_OLD_DMA
+	/* Free SATA DMA resources */
+	sata_dwc_dma_exit_old(hsdev);
+#endif
+
 	dev_dbg(&ofdev->dev, "done\n");
 	return 0;
 }
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 8638d57..aafb8cc 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -197,7 +197,7 @@
 
 	for (i = 0; i < SGPIO_PINS; i++) {
 		err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i);
-		if (IS_ERR_VALUE(err))
+		if (err < 0)
 			return;
 
 		pdata->sgpio_gpio[i] = err;
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index a969a7e..85aaf22 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -181,13 +181,17 @@
 	"reserved 27", 
 	"reserved 28", 
 	"reserved 29", 
-	"reserved 30", 
+	"reserved 30", /* FIXME: The strings between 30-40 might be wrong. */
 	"reassembly abort: no buffers", 
 	"receive buffer overflow", 
 	"change in GFC", 
 	"receive buffer full", 
 	"low priority discard - no receive descriptor", 
 	"low priority discard - missing end of packet", 
+	"reserved 37",
+	"reserved 38",
+	"reserved 39",
+	"reseverd 40",
 	"reserved 41", 
 	"reserved 42", 
 	"reserved 43", 
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 7d00f29..809dd1e 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1128,7 +1128,7 @@
 	/* make the ptr point to the corresponding buffer desc entry */  
 	buf_desc_ptr += desc;	  
         if (!desc || (desc > iadev->num_rx_desc) || 
-                      ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
+                      ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
             free_desc(dev, desc);
             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
             return -1;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c81667d..e44944f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1267,14 +1267,15 @@
 		error = device_suspend_late(dev);
 
 		mutex_lock(&dpm_list_mtx);
+		if (!list_empty(&dev->power.entry))
+			list_move(&dev->power.entry, &dpm_late_early_list);
+
 		if (error) {
 			pm_dev_err(dev, state, " late", error);
 			dpm_save_failed_dev(dev_name(dev));
 			put_device(dev);
 			break;
 		}
-		if (!list_empty(&dev->power.entry))
-			list_move(&dev->power.entry, &dpm_late_early_list);
 		put_device(dev);
 
 		if (async_error)
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 04d706c..35b13a0 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -146,7 +146,6 @@
 		return -ENOTSUPP;
 	}
 
-	sflash->window = BCMA_SOC_FLASH2;
 	sflash->blocksize = e->blocksize;
 	sflash->numblocks = e->numblocks;
 	sflash->size = sflash->blocksize * sflash->numblocks;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 51a071e..c04bd9b 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -381,7 +381,7 @@
 
 #ifdef CONFIG_BLK_DEV_RAM_DAX
 static long brd_direct_access(struct block_device *bdev, sector_t sector,
-			void __pmem **kaddr, pfn_t *pfn)
+			void __pmem **kaddr, pfn_t *pfn, long size)
 {
 	struct brd_device *brd = bdev->bd_disk->private_data;
 	struct page *page;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 0ede6d7..81666a5 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -350,12 +350,12 @@
 	struct rbd_spec		*spec;
 	struct rbd_options	*opts;
 
-	char			*header_name;
+	struct ceph_object_id	header_oid;
+	struct ceph_object_locator header_oloc;
 
 	struct ceph_file_layout	layout;
 
-	struct ceph_osd_event   *watch_event;
-	struct rbd_obj_request	*watch_request;
+	struct ceph_osd_linger_request *watch_handle;
 
 	struct rbd_spec		*parent_spec;
 	u64			parent_overlap;
@@ -1596,12 +1596,6 @@
 	return __rbd_obj_request_wait(obj_request, 0);
 }
 
-static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
-					unsigned long timeout)
-{
-	return __rbd_obj_request_wait(obj_request, timeout);
-}
-
 static void rbd_img_request_complete(struct rbd_img_request *img_request)
 {
 
@@ -1751,12 +1745,6 @@
 		complete_all(&obj_request->completion);
 }
 
-static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
-{
-	dout("%s: obj %p\n", __func__, obj_request);
-	obj_request_done_set(obj_request);
-}
-
 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
 {
 	struct rbd_img_request *img_request = NULL;
@@ -1828,13 +1816,12 @@
 		obj_request_done_set(obj_request);
 }
 
-static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
-				struct ceph_msg *msg)
+static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
 {
 	struct rbd_obj_request *obj_request = osd_req->r_priv;
 	u16 opcode;
 
-	dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
+	dout("%s: osd_req %p\n", __func__, osd_req);
 	rbd_assert(osd_req == obj_request->osd_req);
 	if (obj_request_img_data_test(obj_request)) {
 		rbd_assert(obj_request->img_request);
@@ -1878,10 +1865,6 @@
 	case CEPH_OSD_OP_CALL:
 		rbd_osd_call_callback(obj_request);
 		break;
-	case CEPH_OSD_OP_NOTIFY_ACK:
-	case CEPH_OSD_OP_WATCH:
-		rbd_osd_trivial_callback(obj_request);
-		break;
 	default:
 		rbd_warn(NULL, "%s: unsupported op %hu",
 			obj_request->object_name, (unsigned short) opcode);
@@ -1896,27 +1879,17 @@
 {
 	struct rbd_img_request *img_request = obj_request->img_request;
 	struct ceph_osd_request *osd_req = obj_request->osd_req;
-	u64 snap_id;
 
-	rbd_assert(osd_req != NULL);
-
-	snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
-	ceph_osdc_build_request(osd_req, obj_request->offset,
-			NULL, snap_id, NULL);
+	if (img_request)
+		osd_req->r_snapid = img_request->snap_id;
 }
 
 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
 {
-	struct rbd_img_request *img_request = obj_request->img_request;
 	struct ceph_osd_request *osd_req = obj_request->osd_req;
-	struct ceph_snap_context *snapc;
-	struct timespec mtime = CURRENT_TIME;
 
-	rbd_assert(osd_req != NULL);
-
-	snapc = img_request ? img_request->snapc : NULL;
-	ceph_osdc_build_request(osd_req, obj_request->offset,
-			snapc, CEPH_NOSNAP, &mtime);
+	osd_req->r_mtime = CURRENT_TIME;
+	osd_req->r_data_offset = obj_request->offset;
 }
 
 /*
@@ -1954,7 +1927,7 @@
 	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
 					  GFP_NOIO);
 	if (!osd_req)
-		return NULL;	/* ENOMEM */
+		goto fail;
 
 	if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
 		osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
@@ -1965,9 +1938,18 @@
 	osd_req->r_priv = obj_request;
 
 	osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
-	ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
+	if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
+			     obj_request->object_name))
+		goto fail;
+
+	if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
+		goto fail;
 
 	return osd_req;
+
+fail:
+	ceph_osdc_put_request(osd_req);
+	return NULL;
 }
 
 /*
@@ -2003,16 +1985,25 @@
 	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
 						false, GFP_NOIO);
 	if (!osd_req)
-		return NULL;	/* ENOMEM */
+		goto fail;
 
 	osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
 	osd_req->r_callback = rbd_osd_req_callback;
 	osd_req->r_priv = obj_request;
 
 	osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
-	ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
+	if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
+			     obj_request->object_name))
+		goto fail;
+
+	if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
+		goto fail;
 
 	return osd_req;
+
+fail:
+	ceph_osdc_put_request(osd_req);
+	return NULL;
 }
 
 
@@ -2973,17 +2964,20 @@
 {
 	struct rbd_obj_request *obj_request;
 	struct rbd_obj_request *next_obj_request;
+	int ret = 0;
 
 	dout("%s: img %p\n", __func__, img_request);
-	for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
-		int ret;
 
+	rbd_img_request_get(img_request);
+	for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
 		ret = rbd_img_obj_request_submit(obj_request);
 		if (ret)
-			return ret;
+			goto out_put_ireq;
 	}
 
-	return 0;
+out_put_ireq:
+	rbd_img_request_put(img_request);
+	return ret;
 }
 
 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
@@ -3090,45 +3084,18 @@
 	obj_request_done_set(obj_request);
 }
 
-static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
+static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev);
+static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev);
+
+static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
+			 u64 notifier_id, void *data, size_t data_len)
 {
-	struct rbd_obj_request *obj_request;
+	struct rbd_device *rbd_dev = arg;
 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
 	int ret;
 
-	obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
-							OBJ_REQUEST_NODATA);
-	if (!obj_request)
-		return -ENOMEM;
-
-	ret = -ENOMEM;
-	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
-						  obj_request);
-	if (!obj_request->osd_req)
-		goto out;
-
-	osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
-					notify_id, 0, 0);
-	rbd_osd_req_format_read(obj_request);
-
-	ret = rbd_obj_request_submit(osdc, obj_request);
-	if (ret)
-		goto out;
-	ret = rbd_obj_request_wait(obj_request);
-out:
-	rbd_obj_request_put(obj_request);
-
-	return ret;
-}
-
-static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
-{
-	struct rbd_device *rbd_dev = (struct rbd_device *)data;
-	int ret;
-
-	dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
-		rbd_dev->header_name, (unsigned long long)notify_id,
-		(unsigned int)opcode);
+	dout("%s rbd_dev %p cookie %llu notify_id %llu\n", __func__, rbd_dev,
+	     cookie, notify_id);
 
 	/*
 	 * Until adequate refresh error handling is in place, there is
@@ -3140,63 +3107,31 @@
 	if (ret)
 		rbd_warn(rbd_dev, "refresh failed: %d", ret);
 
-	ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
+	ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
+				   &rbd_dev->header_oloc, notify_id, cookie,
+				   NULL, 0);
 	if (ret)
 		rbd_warn(rbd_dev, "notify_ack ret %d", ret);
 }
 
-/*
- * Send a (un)watch request and wait for the ack.  Return a request
- * with a ref held on success or error.
- */
-static struct rbd_obj_request *rbd_obj_watch_request_helper(
-						struct rbd_device *rbd_dev,
-						bool watch)
+static void rbd_watch_errcb(void *arg, u64 cookie, int err)
 {
-	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
-	struct ceph_options *opts = osdc->client->options;
-	struct rbd_obj_request *obj_request;
+	struct rbd_device *rbd_dev = arg;
 	int ret;
 
-	obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
-					     OBJ_REQUEST_NODATA);
-	if (!obj_request)
-		return ERR_PTR(-ENOMEM);
+	rbd_warn(rbd_dev, "encountered watch error: %d", err);
 
-	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
-						  obj_request);
-	if (!obj_request->osd_req) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	__rbd_dev_header_unwatch_sync(rbd_dev);
 
-	osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
-			      rbd_dev->watch_event->cookie, 0, watch);
-	rbd_osd_req_format_write(obj_request);
-
-	if (watch)
-		ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
-
-	ret = rbd_obj_request_submit(osdc, obj_request);
-	if (ret)
-		goto out;
-
-	ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
-	if (ret)
-		goto out;
-
-	ret = obj_request->result;
+	ret = rbd_dev_header_watch_sync(rbd_dev);
 	if (ret) {
-		if (watch)
-			rbd_obj_request_end(obj_request);
-		goto out;
+		rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
+		return;
 	}
 
-	return obj_request;
-
-out:
-	rbd_obj_request_put(obj_request);
-	return ERR_PTR(ret);
+	ret = rbd_dev_refresh(rbd_dev);
+	if (ret)
+		rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
 }
 
 /*
@@ -3205,35 +3140,33 @@
 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
 {
 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
-	struct rbd_obj_request *obj_request;
+	struct ceph_osd_linger_request *handle;
+
+	rbd_assert(!rbd_dev->watch_handle);
+
+	handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
+				 &rbd_dev->header_oloc, rbd_watch_cb,
+				 rbd_watch_errcb, rbd_dev);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	rbd_dev->watch_handle = handle;
+	return 0;
+}
+
+static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
+{
+	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
 	int ret;
 
-	rbd_assert(!rbd_dev->watch_event);
-	rbd_assert(!rbd_dev->watch_request);
+	if (!rbd_dev->watch_handle)
+		return;
 
-	ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
-				     &rbd_dev->watch_event);
-	if (ret < 0)
-		return ret;
+	ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
+	if (ret)
+		rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
 
-	obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
-	if (IS_ERR(obj_request)) {
-		ceph_osdc_cancel_event(rbd_dev->watch_event);
-		rbd_dev->watch_event = NULL;
-		return PTR_ERR(obj_request);
-	}
-
-	/*
-	 * A watch request is set to linger, so the underlying osd
-	 * request won't go away until we unregister it.  We retain
-	 * a pointer to the object request during that time (in
-	 * rbd_dev->watch_request), so we'll keep a reference to it.
-	 * We'll drop that reference after we've unregistered it in
-	 * rbd_dev_header_unwatch_sync().
-	 */
-	rbd_dev->watch_request = obj_request;
-
-	return 0;
+	rbd_dev->watch_handle = NULL;
 }
 
 /*
@@ -3241,24 +3174,7 @@
  */
 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
 {
-	struct rbd_obj_request *obj_request;
-
-	rbd_assert(rbd_dev->watch_event);
-	rbd_assert(rbd_dev->watch_request);
-
-	rbd_obj_request_end(rbd_dev->watch_request);
-	rbd_obj_request_put(rbd_dev->watch_request);
-	rbd_dev->watch_request = NULL;
-
-	obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
-	if (!IS_ERR(obj_request))
-		rbd_obj_request_put(obj_request);
-	else
-		rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
-			 PTR_ERR(obj_request));
-
-	ceph_osdc_cancel_event(rbd_dev->watch_event);
-	rbd_dev->watch_event = NULL;
+	__rbd_dev_header_unwatch_sync(rbd_dev);
 
 	dout("%s flushing notifies\n", __func__);
 	ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
@@ -3591,7 +3507,7 @@
 		if (!ondisk)
 			return -ENOMEM;
 
-		ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
+		ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name,
 				       0, size, ondisk);
 		if (ret < 0)
 			goto out;
@@ -4033,6 +3949,8 @@
 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
 	bool need_put = !!rbd_dev->opts;
 
+	ceph_oid_destroy(&rbd_dev->header_oid);
+
 	rbd_put_client(rbd_dev->rbd_client);
 	rbd_spec_put(rbd_dev->spec);
 	kfree(rbd_dev->opts);
@@ -4063,6 +3981,9 @@
 	INIT_LIST_HEAD(&rbd_dev->node);
 	init_rwsem(&rbd_dev->header_rwsem);
 
+	ceph_oid_init(&rbd_dev->header_oid);
+	ceph_oloc_init(&rbd_dev->header_oloc);
+
 	rbd_dev->dev.bus = &rbd_bus_type;
 	rbd_dev->dev.type = &rbd_device_type;
 	rbd_dev->dev.parent = &rbd_root_dev;
@@ -4111,7 +4032,7 @@
 		__le64 size;
 	} __attribute__ ((packed)) size_buf = { 0 };
 
-	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
 				"rbd", "get_size",
 				&snapid, sizeof (snapid),
 				&size_buf, sizeof (size_buf));
@@ -4151,7 +4072,7 @@
 	if (!reply_buf)
 		return -ENOMEM;
 
-	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
 				"rbd", "get_object_prefix", NULL, 0,
 				reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
@@ -4186,7 +4107,7 @@
 	u64 unsup;
 	int ret;
 
-	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
 				"rbd", "get_features",
 				&snapid, sizeof (snapid),
 				&features_buf, sizeof (features_buf));
@@ -4248,7 +4169,7 @@
 	}
 
 	snapid = cpu_to_le64(rbd_dev->spec->snap_id);
-	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
 				"rbd", "get_parent",
 				&snapid, sizeof (snapid),
 				reply_buf, size);
@@ -4351,7 +4272,7 @@
 	u64 stripe_count;
 	int ret;
 
-	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
 				"rbd", "get_stripe_unit_count", NULL, 0,
 				(char *)&striping_info_buf, size);
 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
@@ -4599,7 +4520,7 @@
 	if (!reply_buf)
 		return -ENOMEM;
 
-	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
 				"rbd", "get_snapcontext", NULL, 0,
 				reply_buf, size);
 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
@@ -4664,7 +4585,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	snapid = cpu_to_le64(snap_id);
-	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
 				"rbd", "get_snapshot_name",
 				&snapid, sizeof (snapid),
 				reply_buf, size);
@@ -4975,13 +4896,13 @@
 again:
 	ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
 	if (ret == -ENOENT && tries++ < 1) {
-		ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
-					       &newest_epoch);
+		ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
+					    &newest_epoch);
 		if (ret < 0)
 			return ret;
 
 		if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
-			ceph_monc_request_next_osdmap(&rbdc->client->monc);
+			ceph_osdc_maybe_request_map(&rbdc->client->osdc);
 			(void) ceph_monc_wait_osdmap(&rbdc->client->monc,
 						     newest_epoch,
 						     opts->mount_timeout);
@@ -5260,35 +5181,26 @@
 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
 {
 	struct rbd_spec *spec = rbd_dev->spec;
-	size_t size;
+	int ret;
 
 	/* Record the header object name for this rbd image. */
 
 	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
 
+	rbd_dev->header_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
 	if (rbd_dev->image_format == 1)
-		size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
+		ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
+				       spec->image_name, RBD_SUFFIX);
 	else
-		size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
+		ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
+				       RBD_HEADER_PREFIX, spec->image_id);
 
-	rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
-	if (!rbd_dev->header_name)
-		return -ENOMEM;
-
-	if (rbd_dev->image_format == 1)
-		sprintf(rbd_dev->header_name, "%s%s",
-			spec->image_name, RBD_SUFFIX);
-	else
-		sprintf(rbd_dev->header_name, "%s%s",
-			RBD_HEADER_PREFIX, spec->image_id);
-	return 0;
+	return ret;
 }
 
 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
 {
 	rbd_dev_unprobe(rbd_dev);
-	kfree(rbd_dev->header_name);
-	rbd_dev->header_name = NULL;
 	rbd_dev->image_format = 0;
 	kfree(rbd_dev->spec->image_id);
 	rbd_dev->spec->image_id = NULL;
@@ -5327,7 +5239,7 @@
 				pr_info("image %s/%s does not exist\n",
 					rbd_dev->spec->pool_name,
 					rbd_dev->spec->image_name);
-			goto out_header_name;
+			goto err_out_format;
 		}
 	}
 
@@ -5373,7 +5285,7 @@
 		goto err_out_probe;
 
 	dout("discovered format %u image, header name is %s\n",
-		rbd_dev->image_format, rbd_dev->header_name);
+		rbd_dev->image_format, rbd_dev->header_oid.name);
 	return 0;
 
 err_out_probe:
@@ -5381,9 +5293,6 @@
 err_out_watch:
 	if (!depth)
 		rbd_dev_header_unwatch_sync(rbd_dev);
-out_header_name:
-	kfree(rbd_dev->header_name);
-	rbd_dev->header_name = NULL;
 err_out_format:
 	rbd_dev->image_format = 0;
 	kfree(rbd_dev->spec->image_id);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 53ddba2..98efbfc 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -175,6 +175,7 @@
 config COMMON_CLK_NXP
 	def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
 	select REGMAP_MMIO if ARCH_LPC32XX
+	select MFD_SYSCON if ARCH_LPC18XX
 	---help---
 	  Support for clock providers on NXP platforms.
 
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index 8830458..1630a1f 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -59,6 +59,7 @@
 	struct clk_init_data init;
 	struct clk_pwm *clk_pwm;
 	struct pwm_device *pwm;
+	struct pwm_args pargs;
 	const char *clk_name;
 	struct clk *clk;
 	int ret;
@@ -71,22 +72,28 @@
 	if (IS_ERR(pwm))
 		return PTR_ERR(pwm);
 
-	if (!pwm->period) {
+	pwm_get_args(pwm, &pargs);
+	if (!pargs.period) {
 		dev_err(&pdev->dev, "invalid PWM period\n");
 		return -EINVAL;
 	}
 
 	if (of_property_read_u32(node, "clock-frequency", &clk_pwm->fixed_rate))
-		clk_pwm->fixed_rate = NSEC_PER_SEC / pwm->period;
+		clk_pwm->fixed_rate = NSEC_PER_SEC / pargs.period;
 
-	if (pwm->period != NSEC_PER_SEC / clk_pwm->fixed_rate &&
-	    pwm->period != DIV_ROUND_UP(NSEC_PER_SEC, clk_pwm->fixed_rate)) {
+	if (pargs.period != NSEC_PER_SEC / clk_pwm->fixed_rate &&
+	    pargs.period != DIV_ROUND_UP(NSEC_PER_SEC, clk_pwm->fixed_rate)) {
 		dev_err(&pdev->dev,
 			"clock-frequency does not match PWM period\n");
 		return -EINVAL;
 	}
 
-	ret = pwm_config(pwm, (pwm->period + 1) >> 1, pwm->period);
+	/*
+	 * FIXME: pwm_apply_args() should be removed when switching to the
+	 * atomic PWM API.
+	 */
+	pwm_apply_args(pwm);
+	ret = pwm_config(pwm, (pargs.period + 1) >> 1, pargs.period);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 227e356..10c9860 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -61,7 +61,6 @@
 	FACTOR(CLK_TOP_CLKRTC_INT, "clkrtc_int", "clk26m", 1, 793),
 	FACTOR(CLK_TOP_FPC, "fpc_ck", "clk26m", 1, 1),
 
-	FACTOR(CLK_TOP_HDMITX_DIG_CTS, "hdmitx_dig_cts", "tvdpll_445p5m", 1, 3),
 	FACTOR(CLK_TOP_HDMITXPLL_D2, "hdmitxpll_d2", "hdmitx_dig_cts", 1, 2),
 	FACTOR(CLK_TOP_HDMITXPLL_D3, "hdmitxpll_d3", "hdmitx_dig_cts", 1, 3),
 
@@ -558,7 +557,11 @@
 	MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel", atb_parents, 0x0090, 16, 2, 23),
 	MUX_GATE(CLK_TOP_VENC_LT_SEL, "venclt_sel", venc_lt_parents, 0x0090, 24, 4, 31),
 	/* CLK_CFG_6 */
-	MUX_GATE(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents, 0x00a0, 0, 3, 7),
+	/*
+	 * The dpi0_sel clock should not propagate rate changes to its parent
+	 * clock so the dpi driver can have full control over PLL and divider.
+	 */
+	MUX_GATE_FLAGS(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents, 0x00a0, 0, 3, 7, 0),
 	MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel", irda_parents, 0x00a0, 8, 2, 15),
 	MUX_GATE(CLK_TOP_CCI400_SEL, "cci400_sel", cci400_parents, 0x00a0, 16, 3, 23),
 	MUX_GATE(CLK_TOP_AUD_1_SEL, "aud_1_sel", aud_1_parents, 0x00a0, 24, 2, 31),
@@ -1091,6 +1094,11 @@
 		clk_data->clks[cku->id] = clk;
 	}
 
+	clk = clk_register_divider(NULL, "hdmi_ref", "tvdpll_594m", 0,
+				   base + 0x40, 16, 3, CLK_DIVIDER_POWER_OF_TWO,
+				   NULL);
+	clk_data->clks[CLK_APMIXED_HDMI_REF] = clk;
+
 	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
 	if (r)
 		pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 32d2e45..9f24fcf 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -83,7 +83,11 @@
 	signed char num_parents;
 };
 
-#define MUX_GATE(_id, _name, _parents, _reg, _shift, _width, _gate) {	\
+/*
+ * In case the rate change propagation to parent clocks is undesirable,
+ * this macro allows to specify the clock flags manually.
+ */
+#define MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, _gate, _flags) {	\
 		.id = _id,						\
 		.name = _name,						\
 		.mux_reg = _reg,					\
@@ -94,9 +98,16 @@
 		.divider_shift = -1,					\
 		.parent_names = _parents,				\
 		.num_parents = ARRAY_SIZE(_parents),			\
-		.flags = CLK_SET_RATE_PARENT,				\
+		.flags = _flags,					\
 	}
 
+/*
+ * Unless necessary, all MUX_GATE clocks propagate rate changes to their
+ * parent clock by default.
+ */
+#define MUX_GATE(_id, _name, _parents, _reg, _shift, _width, _gate)	\
+	MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, _gate, CLK_SET_RATE_PARENT)
+
 #define MUX(_id, _name, _parents, _reg, _shift, _width) {		\
 		.id = _id,						\
 		.name = _name,						\
diff --git a/drivers/clk/microchip/clk-pic32mzda.c b/drivers/clk/microchip/clk-pic32mzda.c
index 020a29a..51f5438 100644
--- a/drivers/clk/microchip/clk-pic32mzda.c
+++ b/drivers/clk/microchip/clk-pic32mzda.c
@@ -180,15 +180,15 @@
 
 	/* register fixed rate clocks */
 	clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL,
-						CLK_IS_ROOT, 24000000);
+						0, 24000000);
 	clks[FRCCLK] =  clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL,
-						CLK_IS_ROOT, 8000000);
+						0, 8000000);
 	clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL,
-						CLK_IS_ROOT, 8000000);
+						0, 8000000);
 	clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL,
-						CLK_IS_ROOT, 32000);
+						0, 32000);
 	clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL,
-						CLK_IS_ROOT, 24000000);
+						0, 24000000);
 	/* fixed rate (optional) clock */
 	if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) {
 		pr_info("pic32-clk: dt requests SOSC.\n");
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index b855181..456cf58 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -1221,7 +1221,7 @@
 		p = rate >= params->vco_min ? 1 : -EINVAL;
 	}
 
-	if (IS_ERR_VALUE(p))
+	if (p < 0)
 		return -EINVAL;
 
 	cfg->m = tegra_pll_get_fixed_mdiv(hw, input_rate);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 035513b..9009295 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -78,9 +78,14 @@
 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
 static int cpufreq_start_governor(struct cpufreq_policy *policy);
 
-static inline int cpufreq_exit_governor(struct cpufreq_policy *policy)
+static inline void cpufreq_exit_governor(struct cpufreq_policy *policy)
 {
-	return cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+	(void)cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+}
+
+static inline void cpufreq_stop_governor(struct cpufreq_policy *policy)
+{
+	(void)cpufreq_governor(policy, CPUFREQ_GOV_STOP);
 }
 
 /**
@@ -1026,13 +1031,8 @@
 		return 0;
 
 	down_write(&policy->rwsem);
-	if (has_target()) {
-		ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-		if (ret) {
-			pr_err("%s: Failed to stop governor\n", __func__);
-			goto unlock;
-		}
-	}
+	if (has_target())
+		cpufreq_stop_governor(policy);
 
 	cpumask_set_cpu(cpu, policy->cpus);
 
@@ -1041,8 +1041,6 @@
 		if (ret)
 			pr_err("%s: Failed to start governor\n", __func__);
 	}
-
-unlock:
 	up_write(&policy->rwsem);
 	return ret;
 }
@@ -1354,11 +1352,8 @@
 	}
 
 	down_write(&policy->rwsem);
-	if (has_target()) {
-		ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-		if (ret)
-			pr_err("%s: Failed to stop governor\n", __func__);
-	}
+	if (has_target())
+		cpufreq_stop_governor(policy);
 
 	cpumask_clear_cpu(cpu, policy->cpus);
 
@@ -1387,12 +1382,8 @@
 	if (cpufreq_driver->stop_cpu)
 		cpufreq_driver->stop_cpu(policy);
 
-	/* If cpu is last user of policy, free policy */
-	if (has_target()) {
-		ret = cpufreq_exit_governor(policy);
-		if (ret)
-			pr_err("%s: Failed to exit governor\n", __func__);
-	}
+	if (has_target())
+		cpufreq_exit_governor(policy);
 
 	/*
 	 * Perform the ->exit() even during light-weight tear-down,
@@ -1626,7 +1617,6 @@
 void cpufreq_suspend(void)
 {
 	struct cpufreq_policy *policy;
-	int ret;
 
 	if (!cpufreq_driver)
 		return;
@@ -1639,14 +1629,8 @@
 	for_each_active_policy(policy) {
 		if (has_target()) {
 			down_write(&policy->rwsem);
-			ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+			cpufreq_stop_governor(policy);
 			up_write(&policy->rwsem);
-
-			if (ret) {
-				pr_err("%s: Failed to stop governor for policy: %p\n",
-					__func__, policy);
-				continue;
-			}
 		}
 
 		if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
@@ -1848,7 +1832,7 @@
 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
 					unsigned int target_freq)
 {
-	clamp_val(target_freq, policy->min, policy->max);
+	target_freq = clamp_val(target_freq, policy->min, policy->max);
 
 	return cpufreq_driver->fast_switch(policy, target_freq);
 }
@@ -2049,16 +2033,15 @@
 
 	ret = policy->governor->governor(policy, event);
 
-	if (!ret) {
-		if (event == CPUFREQ_GOV_POLICY_INIT)
+	if (event == CPUFREQ_GOV_POLICY_INIT) {
+		if (ret)
+			module_put(policy->governor->owner);
+		else
 			policy->governor->initialized++;
-		else if (event == CPUFREQ_GOV_POLICY_EXIT)
-			policy->governor->initialized--;
-	}
-
-	if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
-			((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
+	} else if (event == CPUFREQ_GOV_POLICY_EXIT) {
+		policy->governor->initialized--;
 		module_put(policy->governor->owner);
+	}
 
 	return ret;
 }
@@ -2221,20 +2204,8 @@
 	old_gov = policy->governor;
 	/* end old governor */
 	if (old_gov) {
-		ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-		if (ret) {
-			/* This can happen due to race with other operations */
-			pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
-				 __func__, old_gov->name, ret);
-			return ret;
-		}
-
-		ret = cpufreq_exit_governor(policy);
-		if (ret) {
-			pr_err("%s: Failed to Exit Governor: %s (%d)\n",
-			       __func__, old_gov->name, ret);
-			return ret;
-		}
+		cpufreq_stop_governor(policy);
+		cpufreq_exit_governor(policy);
 	}
 
 	/* start new governor */
@@ -2495,10 +2466,7 @@
 
 	register_hotcpu_notifier(&cpufreq_cpu_notifier);
 	pr_debug("driver %s up and running\n", driver_data->name);
-
-out:
-	put_online_cpus();
-	return ret;
+	goto out;
 
 err_if_unreg:
 	subsys_interface_unregister(&cpufreq_interface);
@@ -2508,7 +2476,9 @@
 	write_lock_irqsave(&cpufreq_driver_lock, flags);
 	cpufreq_driver = NULL;
 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-	goto out;
+out:
+	put_online_cpus();
+	return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
 
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b76a98d..ee367e9 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -449,7 +449,7 @@
 		cpu->acpi_perf_data.states[0].core_frequency =
 					policy->cpuinfo.max_freq / 1000;
 	cpu->valid_pss_table = true;
-	pr_info("_PPC limits will be enforced\n");
+	pr_debug("_PPC limits will be enforced\n");
 
 	return;
 
@@ -1460,13 +1460,15 @@
 
 	intel_pstate_clear_update_util_hook(policy->cpu);
 
+	pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
+		 policy->cpuinfo.max_freq, policy->max);
+
 	cpu = all_cpu_data[0];
-	if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate) {
-		if (policy->max < policy->cpuinfo.max_freq &&
-		    policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
-			pr_debug("policy->max > max non turbo frequency\n");
-			policy->max = policy->cpuinfo.max_freq;
-		}
+	if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
+	    policy->max < policy->cpuinfo.max_freq &&
+	    policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
+		pr_debug("policy->max > max non turbo frequency\n");
+		policy->max = policy->cpuinfo.max_freq;
 	}
 
 	if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
@@ -1496,13 +1498,13 @@
 				   limits->max_sysfs_pct);
 	limits->max_perf_pct = max(limits->min_policy_pct,
 				   limits->max_perf_pct);
-	limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
 
 	/* Make sure min_perf_pct <= max_perf_pct */
 	limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
 
 	limits->min_perf = div_fp(limits->min_perf_pct, 100);
 	limits->max_perf = div_fp(limits->max_perf_pct, 100);
+	limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
 
  out:
 	intel_pstate_set_update_util_hook(policy->cpu);
@@ -1559,8 +1561,11 @@
 
 	/* cpuinfo and default policy values */
 	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
-	policy->cpuinfo.max_freq =
-		cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+	update_turbo_state();
+	policy->cpuinfo.max_freq = limits->turbo_disabled ?
+			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+	policy->cpuinfo.max_freq *= cpu->pstate.scaling;
+
 	intel_pstate_init_acpi_perf_limits(policy);
 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
 	cpumask_set_cpu(policy->cpu, policy->cpus);
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
index 6f602c7..643f431 100644
--- a/drivers/cpufreq/mt8173-cpufreq.c
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -307,17 +307,24 @@
 	return 0;
 }
 
+#define DYNAMIC_POWER "dynamic-power-coefficient"
+
 static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
 {
 	struct mtk_cpu_dvfs_info *info = policy->driver_data;
 	struct device_node *np = of_node_get(info->cpu_dev->of_node);
+	u32 capacitance = 0;
 
 	if (WARN_ON(!np))
 		return;
 
 	if (of_find_property(np, "#cooling-cells", NULL)) {
-		info->cdev = of_cpufreq_cooling_register(np,
-							 policy->related_cpus);
+		of_property_read_u32(np, DYNAMIC_POWER, &capacitance);
+
+		info->cdev = of_cpufreq_power_cooling_register(np,
+						policy->related_cpus,
+						capacitance,
+						NULL);
 
 		if (IS_ERR(info->cdev)) {
 			dev_err(info->cpu_dev,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index cead9be..376e63c 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -54,7 +54,7 @@
 
 	freq = new_freq * 1000;
 	ret = clk_round_rate(policy->clk, freq);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_warn(mpu_dev,
 			 "CPUfreq: Cannot find matching frequency for %lu\n",
 			 freq);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 2b8e6ce..a4d0059 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -214,7 +214,7 @@
 		tick_broadcast_exit();
 	}
 
-	if (!cpuidle_state_is_coupled(drv, entered_state))
+	if (!cpuidle_state_is_coupled(drv, index))
 		local_irq_enable();
 
 	/*
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 44d30b4..5ad5f30 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -402,7 +402,7 @@
 	ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
 	of_node_put(caam_node);
 
-	return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop;
+	return ret ? -ENOTSUPP : prop;
 }
 EXPORT_SYMBOL(caam_get_era);
 
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 52c7395..0d0d452 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -122,6 +122,7 @@
 	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
 	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
 	unsigned int unit;
+	u32 unit_size;
 	int ret;
 
 	if (!ctx->u.aes.key_len)
@@ -133,11 +134,17 @@
 	if (!req->info)
 		return -EINVAL;
 
-	for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
-		if (!(req->nbytes & (unit_size_map[unit].size - 1)))
-			break;
+	unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
+	if (req->nbytes <= unit_size_map[0].size) {
+		for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
+			if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
+				unit_size = unit_size_map[unit].value;
+				break;
+			}
+		}
+	}
 
-	if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
+	if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
 	    (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
 		/* Use the fallback to process the request for any
 		 * unsupported unit sizes or key sizes
@@ -158,7 +165,7 @@
 	rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
 	rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
 					   : CCP_AES_ACTION_DECRYPT;
-	rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
+	rctx->cmd.u.xts.unit_size = unit_size;
 	rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
 	rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
 	rctx->cmd.u.xts.iv = &rctx->iv_sg;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 6eefaa2..63464e8 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1986,7 +1986,7 @@
 					&dd->pdata->algs_info[i].algs_list[j]);
 err_pm:
 	pm_runtime_disable(dev);
-	if (dd->polling_mode)
+	if (!dd->polling_mode)
 		dma_release_channel(dd->dma_lch);
 data_err:
 	dev_err(dev, "initialization failed.\n");
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
new file mode 100644
index 0000000..cedab75
--- /dev/null
+++ b/drivers/dax/Kconfig
@@ -0,0 +1,26 @@
+menuconfig DEV_DAX
+	tristate "DAX: direct access to differentiated memory"
+	default m if NVDIMM_DAX
+	depends on TRANSPARENT_HUGEPAGE
+	help
+	  Support raw access to differentiated (persistence, bandwidth,
+	  latency...) memory via an mmap(2) capable character
+	  device.  Platform firmware or a device driver may identify a
+	  platform memory resource that is differentiated from the
+	  baseline memory pool.  Mappings of a /dev/daxX.Y device impose
+	  restrictions that make the mapping behavior deterministic.
+
+if DEV_DAX
+
+config DEV_DAX_PMEM
+	tristate "PMEM DAX: direct access to persistent memory"
+	depends on NVDIMM_DAX
+	default DEV_DAX
+	help
+	  Support raw access to persistent memory.  Note that this
+	  driver consumes memory ranges allocated and exported by the
+	  libnvdimm sub-system.
+
+	  Say Y if unsure
+
+endif
diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile
new file mode 100644
index 0000000..27c54e3
--- /dev/null
+++ b/drivers/dax/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_DEV_DAX) += dax.o
+obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
+
+dax_pmem-y := pmem.o
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
new file mode 100644
index 0000000..b891a12
--- /dev/null
+++ b/drivers/dax/dax.c
@@ -0,0 +1,575 @@
+/*
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <linux/pagemap.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pfn_t.h>
+#include <linux/slab.h>
+#include <linux/dax.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+
+static int dax_major;
+static struct class *dax_class;
+static DEFINE_IDA(dax_minor_ida);
+
+/**
+ * struct dax_region - mapping infrastructure for dax devices
+ * @id: kernel-wide unique region for a memory range
+ * @base: linear address corresponding to @res
+ * @kref: to pin while other agents have a need to do lookups
+ * @dev: parent device backing this region
+ * @align: allocation and mapping alignment for child dax devices
+ * @res: physical address range of the region
+ * @pfn_flags: identify whether the pfns are paged back or not
+ */
+struct dax_region {
+	int id;
+	struct ida ida;
+	void *base;
+	struct kref kref;
+	struct device *dev;
+	unsigned int align;
+	struct resource res;
+	unsigned long pfn_flags;
+};
+
+/**
+ * struct dax_dev - subdivision of a dax region
+ * @region - parent region
+ * @dev - device backing the character device
+ * @kref - enable this data to be tracked in filp->private_data
+ * @alive - !alive + rcu grace period == no new mappings can be established
+ * @id - child id in the region
+ * @num_resources - number of physical address extents in this device
+ * @res - array of physical address ranges
+ */
+struct dax_dev {
+	struct dax_region *region;
+	struct device *dev;
+	struct kref kref;
+	bool alive;
+	int id;
+	int num_resources;
+	struct resource res[0];
+};
+
+static void dax_region_free(struct kref *kref)
+{
+	struct dax_region *dax_region;
+
+	dax_region = container_of(kref, struct dax_region, kref);
+	kfree(dax_region);
+}
+
+void dax_region_put(struct dax_region *dax_region)
+{
+	kref_put(&dax_region->kref, dax_region_free);
+}
+EXPORT_SYMBOL_GPL(dax_region_put);
+
+static void dax_dev_free(struct kref *kref)
+{
+	struct dax_dev *dax_dev;
+
+	dax_dev = container_of(kref, struct dax_dev, kref);
+	dax_region_put(dax_dev->region);
+	kfree(dax_dev);
+}
+
+static void dax_dev_put(struct dax_dev *dax_dev)
+{
+	kref_put(&dax_dev->kref, dax_dev_free);
+}
+
+struct dax_region *alloc_dax_region(struct device *parent, int region_id,
+		struct resource *res, unsigned int align, void *addr,
+		unsigned long pfn_flags)
+{
+	struct dax_region *dax_region;
+
+	dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
+
+	if (!dax_region)
+		return NULL;
+
+	memcpy(&dax_region->res, res, sizeof(*res));
+	dax_region->pfn_flags = pfn_flags;
+	kref_init(&dax_region->kref);
+	dax_region->id = region_id;
+	ida_init(&dax_region->ida);
+	dax_region->align = align;
+	dax_region->dev = parent;
+	dax_region->base = addr;
+
+	return dax_region;
+}
+EXPORT_SYMBOL_GPL(alloc_dax_region);
+
+static ssize_t size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct dax_dev *dax_dev = dev_get_drvdata(dev);
+	unsigned long long size = 0;
+	int i;
+
+	for (i = 0; i < dax_dev->num_resources; i++)
+		size += resource_size(&dax_dev->res[i]);
+
+	return sprintf(buf, "%llu\n", size);
+}
+static DEVICE_ATTR_RO(size);
+
+static struct attribute *dax_device_attributes[] = {
+	&dev_attr_size.attr,
+	NULL,
+};
+
+static const struct attribute_group dax_device_attribute_group = {
+	.attrs = dax_device_attributes,
+};
+
+static const struct attribute_group *dax_attribute_groups[] = {
+	&dax_device_attribute_group,
+	NULL,
+};
+
+static void unregister_dax_dev(void *_dev)
+{
+	struct device *dev = _dev;
+	struct dax_dev *dax_dev = dev_get_drvdata(dev);
+	struct dax_region *dax_region = dax_dev->region;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	/*
+	 * Note, rcu is not protecting the liveness of dax_dev, rcu is
+	 * ensuring that any fault handlers that might have seen
+	 * dax_dev->alive == true, have completed.  Any fault handlers
+	 * that start after synchronize_rcu() has started will abort
+	 * upon seeing dax_dev->alive == false.
+	 */
+	dax_dev->alive = false;
+	synchronize_rcu();
+
+	get_device(dev);
+	device_unregister(dev);
+	ida_simple_remove(&dax_region->ida, dax_dev->id);
+	ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
+	put_device(dev);
+	dax_dev_put(dax_dev);
+}
+
+int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res,
+		int count)
+{
+	struct device *parent = dax_region->dev;
+	struct dax_dev *dax_dev;
+	struct device *dev;
+	int rc, minor;
+	dev_t dev_t;
+
+	dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
+	if (!dax_dev)
+		return -ENOMEM;
+	memcpy(dax_dev->res, res, sizeof(*res) * count);
+	dax_dev->num_resources = count;
+	kref_init(&dax_dev->kref);
+	dax_dev->alive = true;
+	dax_dev->region = dax_region;
+	kref_get(&dax_region->kref);
+
+	dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
+	if (dax_dev->id < 0) {
+		rc = dax_dev->id;
+		goto err_id;
+	}
+
+	minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
+	if (minor < 0) {
+		rc = minor;
+		goto err_minor;
+	}
+
+	dev_t = MKDEV(dax_major, minor);
+	dev = device_create_with_groups(dax_class, parent, dev_t, dax_dev,
+			dax_attribute_groups, "dax%d.%d", dax_region->id,
+			dax_dev->id);
+	if (IS_ERR(dev)) {
+		rc = PTR_ERR(dev);
+		goto err_create;
+	}
+	dax_dev->dev = dev;
+
+	rc = devm_add_action(dax_region->dev, unregister_dax_dev, dev);
+	if (rc) {
+		unregister_dax_dev(dev);
+		return rc;
+	}
+
+	return 0;
+
+ err_create:
+	ida_simple_remove(&dax_minor_ida, minor);
+ err_minor:
+	ida_simple_remove(&dax_region->ida, dax_dev->id);
+ err_id:
+	dax_dev_put(dax_dev);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(devm_create_dax_dev);
+
+/* return an unmapped area aligned to the dax region specified alignment */
+static unsigned long dax_dev_get_unmapped_area(struct file *filp,
+		unsigned long addr, unsigned long len, unsigned long pgoff,
+		unsigned long flags)
+{
+	unsigned long off, off_end, off_align, len_align, addr_align, align;
+	struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
+	struct dax_region *dax_region;
+
+	if (!dax_dev || addr)
+		goto out;
+
+	dax_region = dax_dev->region;
+	align = dax_region->align;
+	off = pgoff << PAGE_SHIFT;
+	off_end = off + len;
+	off_align = round_up(off, align);
+
+	if ((off_end <= off_align) || ((off_end - off_align) < align))
+		goto out;
+
+	len_align = len + align;
+	if ((off + len_align) < off)
+		goto out;
+
+	addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
+			pgoff, flags);
+	if (!IS_ERR_VALUE(addr_align)) {
+		addr_align += (off - addr_align) & (align - 1);
+		return addr_align;
+	}
+ out:
+	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+}
+
+static int __match_devt(struct device *dev, const void *data)
+{
+	const dev_t *devt = data;
+
+	return dev->devt == *devt;
+}
+
+static struct device *dax_dev_find(dev_t dev_t)
+{
+	return class_find_device(dax_class, NULL, &dev_t, __match_devt);
+}
+
+static int dax_dev_open(struct inode *inode, struct file *filp)
+{
+	struct dax_dev *dax_dev = NULL;
+	struct device *dev;
+
+	dev = dax_dev_find(inode->i_rdev);
+	if (!dev)
+		return -ENXIO;
+
+	device_lock(dev);
+	dax_dev = dev_get_drvdata(dev);
+	if (dax_dev) {
+		dev_dbg(dev, "%s\n", __func__);
+		filp->private_data = dax_dev;
+		kref_get(&dax_dev->kref);
+		inode->i_flags = S_DAX;
+	}
+	device_unlock(dev);
+
+	if (!dax_dev) {
+		put_device(dev);
+		return -ENXIO;
+	}
+	return 0;
+}
+
+static int dax_dev_release(struct inode *inode, struct file *filp)
+{
+	struct dax_dev *dax_dev = filp->private_data;
+	struct device *dev = dax_dev->dev;
+
+	dev_dbg(dax_dev->dev, "%s\n", __func__);
+	dax_dev_put(dax_dev);
+	put_device(dev);
+
+	return 0;
+}
+
+static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
+		const char *func)
+{
+	struct dax_region *dax_region = dax_dev->region;
+	struct device *dev = dax_dev->dev;
+	unsigned long mask;
+
+	if (!dax_dev->alive)
+		return -ENXIO;
+
+	/* prevent private / writable mappings from being established */
+	if ((vma->vm_flags & (VM_NORESERVE|VM_SHARED|VM_WRITE)) == VM_WRITE) {
+		dev_info(dev, "%s: %s: fail, attempted private mapping\n",
+				current->comm, func);
+		return -EINVAL;
+	}
+
+	mask = dax_region->align - 1;
+	if (vma->vm_start & mask || vma->vm_end & mask) {
+		dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
+				current->comm, func, vma->vm_start, vma->vm_end,
+				mask);
+		return -EINVAL;
+	}
+
+	if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
+			&& (vma->vm_flags & VM_DONTCOPY) == 0) {
+		dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
+				current->comm, func);
+		return -EINVAL;
+	}
+
+	if (!vma_is_dax(vma)) {
+		dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
+				current->comm, func);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
+		unsigned long size)
+{
+	struct resource *res;
+	phys_addr_t phys;
+	int i;
+
+	for (i = 0; i < dax_dev->num_resources; i++) {
+		res = &dax_dev->res[i];
+		phys = pgoff * PAGE_SIZE + res->start;
+		if (phys >= res->start && phys <= res->end)
+			break;
+		pgoff -= PHYS_PFN(resource_size(res));
+	}
+
+	if (i < dax_dev->num_resources) {
+		res = &dax_dev->res[i];
+		if (phys + size - 1 <= res->end)
+			return phys;
+	}
+
+	return -1;
+}
+
+static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
+		struct vm_fault *vmf)
+{
+	unsigned long vaddr = (unsigned long) vmf->virtual_address;
+	struct device *dev = dax_dev->dev;
+	struct dax_region *dax_region;
+	int rc = VM_FAULT_SIGBUS;
+	phys_addr_t phys;
+	pfn_t pfn;
+
+	if (check_vma(dax_dev, vma, __func__))
+		return VM_FAULT_SIGBUS;
+
+	dax_region = dax_dev->region;
+	if (dax_region->align > PAGE_SIZE) {
+		dev_dbg(dev, "%s: alignment > fault size\n", __func__);
+		return VM_FAULT_SIGBUS;
+	}
+
+	phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
+	if (phys == -1) {
+		dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+				vmf->pgoff);
+		return VM_FAULT_SIGBUS;
+	}
+
+	pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+
+	rc = vm_insert_mixed(vma, vaddr, pfn);
+
+	if (rc == -ENOMEM)
+		return VM_FAULT_OOM;
+	if (rc < 0 && rc != -EBUSY)
+		return VM_FAULT_SIGBUS;
+
+	return VM_FAULT_NOPAGE;
+}
+
+static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	int rc;
+	struct file *filp = vma->vm_file;
+	struct dax_dev *dax_dev = filp->private_data;
+
+	dev_dbg(dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
+			current->comm, (vmf->flags & FAULT_FLAG_WRITE)
+			? "write" : "read", vma->vm_start, vma->vm_end);
+	rcu_read_lock();
+	rc = __dax_dev_fault(dax_dev, vma, vmf);
+	rcu_read_unlock();
+
+	return rc;
+}
+
+static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
+		struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd,
+		unsigned int flags)
+{
+	unsigned long pmd_addr = addr & PMD_MASK;
+	struct device *dev = dax_dev->dev;
+	struct dax_region *dax_region;
+	phys_addr_t phys;
+	pgoff_t pgoff;
+	pfn_t pfn;
+
+	if (check_vma(dax_dev, vma, __func__))
+		return VM_FAULT_SIGBUS;
+
+	dax_region = dax_dev->region;
+	if (dax_region->align > PMD_SIZE) {
+		dev_dbg(dev, "%s: alignment > fault size\n", __func__);
+		return VM_FAULT_SIGBUS;
+	}
+
+	/* dax pmd mappings require pfn_t_devmap() */
+	if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
+		dev_dbg(dev, "%s: alignment > fault size\n", __func__);
+		return VM_FAULT_SIGBUS;
+	}
+
+	pgoff = linear_page_index(vma, pmd_addr);
+	phys = pgoff_to_phys(dax_dev, pgoff, PAGE_SIZE);
+	if (phys == -1) {
+		dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+				pgoff);
+		return VM_FAULT_SIGBUS;
+	}
+
+	pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+
+	return vmf_insert_pfn_pmd(vma, addr, pmd, pfn,
+			flags & FAULT_FLAG_WRITE);
+}
+
+static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
+		pmd_t *pmd, unsigned int flags)
+{
+	int rc;
+	struct file *filp = vma->vm_file;
+	struct dax_dev *dax_dev = filp->private_data;
+
+	dev_dbg(dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
+			current->comm, (flags & FAULT_FLAG_WRITE)
+			? "write" : "read", vma->vm_start, vma->vm_end);
+
+	rcu_read_lock();
+	rc = __dax_dev_pmd_fault(dax_dev, vma, addr, pmd, flags);
+	rcu_read_unlock();
+
+	return rc;
+}
+
+static void dax_dev_vm_open(struct vm_area_struct *vma)
+{
+	struct file *filp = vma->vm_file;
+	struct dax_dev *dax_dev = filp->private_data;
+
+	dev_dbg(dax_dev->dev, "%s\n", __func__);
+	kref_get(&dax_dev->kref);
+}
+
+static void dax_dev_vm_close(struct vm_area_struct *vma)
+{
+	struct file *filp = vma->vm_file;
+	struct dax_dev *dax_dev = filp->private_data;
+
+	dev_dbg(dax_dev->dev, "%s\n", __func__);
+	dax_dev_put(dax_dev);
+}
+
+static const struct vm_operations_struct dax_dev_vm_ops = {
+	.fault = dax_dev_fault,
+	.pmd_fault = dax_dev_pmd_fault,
+	.open = dax_dev_vm_open,
+	.close = dax_dev_vm_close,
+};
+
+static int dax_dev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct dax_dev *dax_dev = filp->private_data;
+	int rc;
+
+	dev_dbg(dax_dev->dev, "%s\n", __func__);
+
+	rc = check_vma(dax_dev, vma, __func__);
+	if (rc)
+		return rc;
+
+	kref_get(&dax_dev->kref);
+	vma->vm_ops = &dax_dev_vm_ops;
+	vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+	return 0;
+
+}
+
+static const struct file_operations dax_fops = {
+	.llseek = noop_llseek,
+	.owner = THIS_MODULE,
+	.open = dax_dev_open,
+	.release = dax_dev_release,
+	.get_unmapped_area = dax_dev_get_unmapped_area,
+	.mmap = dax_dev_mmap,
+};
+
+static int __init dax_init(void)
+{
+	int rc;
+
+	rc = register_chrdev(0, "dax", &dax_fops);
+	if (rc < 0)
+		return rc;
+	dax_major = rc;
+
+	dax_class = class_create(THIS_MODULE, "dax");
+	if (IS_ERR(dax_class)) {
+		unregister_chrdev(dax_major, "dax");
+		return PTR_ERR(dax_class);
+	}
+
+	return 0;
+}
+
+static void __exit dax_exit(void)
+{
+	class_destroy(dax_class);
+	unregister_chrdev(dax_major, "dax");
+	ida_destroy(&dax_minor_ida);
+}
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+subsys_initcall(dax_init);
+module_exit(dax_exit);
diff --git a/drivers/dax/dax.h b/drivers/dax/dax.h
new file mode 100644
index 0000000..d8b8f1f
--- /dev/null
+++ b/drivers/dax/dax.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#ifndef __DAX_H__
+#define __DAX_H__
+struct device;
+struct resource;
+struct dax_region;
+void dax_region_put(struct dax_region *dax_region);
+struct dax_region *alloc_dax_region(struct device *parent,
+		int region_id, struct resource *res, unsigned int align,
+		void *addr, unsigned long flags);
+int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res,
+		int count);
+#endif /* __DAX_H__ */
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
new file mode 100644
index 0000000..55d510e
--- /dev/null
+++ b/drivers/dax/pmem.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <linux/percpu-refcount.h>
+#include <linux/memremap.h>
+#include <linux/module.h>
+#include <linux/pfn_t.h>
+#include "../nvdimm/pfn.h"
+#include "../nvdimm/nd.h"
+#include "dax.h"
+
+struct dax_pmem {
+	struct device *dev;
+	struct percpu_ref ref;
+	struct completion cmp;
+};
+
+struct dax_pmem *to_dax_pmem(struct percpu_ref *ref)
+{
+	return container_of(ref, struct dax_pmem, ref);
+}
+
+static void dax_pmem_percpu_release(struct percpu_ref *ref)
+{
+	struct dax_pmem *dax_pmem = to_dax_pmem(ref);
+
+	dev_dbg(dax_pmem->dev, "%s\n", __func__);
+	complete(&dax_pmem->cmp);
+}
+
+static void dax_pmem_percpu_exit(void *data)
+{
+	struct percpu_ref *ref = data;
+	struct dax_pmem *dax_pmem = to_dax_pmem(ref);
+
+	dev_dbg(dax_pmem->dev, "%s\n", __func__);
+	percpu_ref_exit(ref);
+	wait_for_completion(&dax_pmem->cmp);
+}
+
+static void dax_pmem_percpu_kill(void *data)
+{
+	struct percpu_ref *ref = data;
+	struct dax_pmem *dax_pmem = to_dax_pmem(ref);
+
+	dev_dbg(dax_pmem->dev, "%s\n", __func__);
+	percpu_ref_kill(ref);
+}
+
+static int dax_pmem_probe(struct device *dev)
+{
+	int rc;
+	void *addr;
+	struct resource res;
+	struct nd_pfn_sb *pfn_sb;
+	struct dax_pmem *dax_pmem;
+	struct nd_region *nd_region;
+	struct nd_namespace_io *nsio;
+	struct dax_region *dax_region;
+	struct nd_namespace_common *ndns;
+	struct nd_dax *nd_dax = to_nd_dax(dev);
+	struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
+	struct vmem_altmap __altmap, *altmap = NULL;
+
+	ndns = nvdimm_namespace_common_probe(dev);
+	if (IS_ERR(ndns))
+		return PTR_ERR(ndns);
+	nsio = to_nd_namespace_io(&ndns->dev);
+
+	/* parse the 'pfn' info block via ->rw_bytes */
+	devm_nsio_enable(dev, nsio);
+	altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap);
+	if (IS_ERR(altmap))
+		return PTR_ERR(altmap);
+	devm_nsio_disable(dev, nsio);
+
+	pfn_sb = nd_pfn->pfn_sb;
+
+	if (!devm_request_mem_region(dev, nsio->res.start,
+				resource_size(&nsio->res), dev_name(dev))) {
+		dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
+		return -EBUSY;
+	}
+
+	dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL);
+	if (!dax_pmem)
+		return -ENOMEM;
+
+	dax_pmem->dev = dev;
+	init_completion(&dax_pmem->cmp);
+	rc = percpu_ref_init(&dax_pmem->ref, dax_pmem_percpu_release, 0,
+			GFP_KERNEL);
+	if (rc)
+		return rc;
+
+	rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
+	if (rc) {
+		dax_pmem_percpu_exit(&dax_pmem->ref);
+		return rc;
+	}
+
+	addr = devm_memremap_pages(dev, &res, &dax_pmem->ref, altmap);
+	if (IS_ERR(addr))
+		return PTR_ERR(addr);
+
+	rc = devm_add_action(dev, dax_pmem_percpu_kill, &dax_pmem->ref);
+	if (rc) {
+		dax_pmem_percpu_kill(&dax_pmem->ref);
+		return rc;
+	}
+
+	nd_region = to_nd_region(dev->parent);
+	dax_region = alloc_dax_region(dev, nd_region->id, &res,
+			le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
+	if (!dax_region)
+		return -ENOMEM;
+
+	/* TODO: support for subdividing a dax region... */
+	rc = devm_create_dax_dev(dax_region, &res, 1);
+
+	/* child dax_dev instances now own the lifetime of the dax_region */
+	dax_region_put(dax_region);
+
+	return rc;
+}
+
+static struct nd_device_driver dax_pmem_driver = {
+	.probe = dax_pmem_probe,
+	.drv = {
+		.name = "dax_pmem",
+	},
+	.type = ND_DRIVER_DAX_PMEM,
+};
+
+static int __init dax_pmem_init(void)
+{
+	return nd_driver_register(&dax_pmem_driver);
+}
+module_init(dax_pmem_init);
+
+static void __exit dax_pmem_exit(void)
+{
+	driver_unregister(&dax_pmem_driver.drv);
+}
+module_exit(dax_pmem_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 4a2c07e..6355ab3 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -33,6 +33,7 @@
 #include <linux/seq_file.h>
 #include <linux/poll.h>
 #include <linux/reservation.h>
+#include <linux/mm.h>
 
 #include <uapi/linux/dma-buf.h>
 
@@ -90,7 +91,7 @@
 	dmabuf = file->private_data;
 
 	/* check for overflowing the buffer's size */
-	if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+	if (vma->vm_pgoff + vma_pages(vma) >
 	    dmabuf->size >> PAGE_SHIFT)
 		return -EINVAL;
 
@@ -723,11 +724,11 @@
 		return -EINVAL;
 
 	/* check for offset overflow */
-	if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
+	if (pgoff + vma_pages(vma) < pgoff)
 		return -EOVERFLOW;
 
 	/* check for overflowing the buffer's size */
-	if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+	if (pgoff + vma_pages(vma) >
 	    dmabuf->size >> PAGE_SHIFT)
 		return -EINVAL;
 
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index c0bd572..9566a62 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -35,6 +35,17 @@
 #include <linux/reservation.h>
 #include <linux/export.h>
 
+/**
+ * DOC: Reservation Object Overview
+ *
+ * The reservation object provides a mechanism to manage shared and
+ * exclusive fences associated with a buffer.  A reservation object
+ * can have attached one exclusive fence (normally associated with
+ * write operations) or N shared fences (read operations).  The RCU
+ * mechanism is used to protect read access to fences from locked
+ * write-side updates.
+ */
+
 DEFINE_WW_CLASS(reservation_ww_class);
 EXPORT_SYMBOL(reservation_ww_class);
 
@@ -43,9 +54,17 @@
 
 const char reservation_seqcount_string[] = "reservation_seqcount";
 EXPORT_SYMBOL(reservation_seqcount_string);
-/*
- * Reserve space to add a shared fence to a reservation_object,
- * must be called with obj->lock held.
+
+/**
+ * reservation_object_reserve_shared - Reserve space to add a shared
+ * fence to a reservation_object.
+ * @obj: reservation object
+ *
+ * Should be called before reservation_object_add_shared_fence().  Must
+ * be called with obj->lock held.
+ *
+ * RETURNS
+ * Zero for success, or -errno
  */
 int reservation_object_reserve_shared(struct reservation_object *obj)
 {
@@ -180,7 +199,11 @@
 		fence_put(old_fence);
 }
 
-/*
+/**
+ * reservation_object_add_shared_fence - Add a fence to a shared slot
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
  * Add a fence to a shared slot, obj->lock must be held, and
  * reservation_object_reserve_shared_fence has been called.
  */
@@ -200,6 +223,13 @@
 }
 EXPORT_SYMBOL(reservation_object_add_shared_fence);
 
+/**
+ * reservation_object_add_excl_fence - Add an exclusive fence.
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
+ * Add a fence to the exclusive slot.  The obj->lock must be held.
+ */
 void reservation_object_add_excl_fence(struct reservation_object *obj,
 				       struct fence *fence)
 {
@@ -233,6 +263,18 @@
 }
 EXPORT_SYMBOL(reservation_object_add_excl_fence);
 
+/**
+ * reservation_object_get_fences_rcu - Get an object's shared and exclusive
+ * fences without update side lock held
+ * @obj: the reservation object
+ * @pfence_excl: the returned exclusive fence (or NULL)
+ * @pshared_count: the number of shared fences returned
+ * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * the required size, and must be freed by caller)
+ *
+ * RETURNS
+ * Zero or -errno
+ */
 int reservation_object_get_fences_rcu(struct reservation_object *obj,
 				      struct fence **pfence_excl,
 				      unsigned *pshared_count,
@@ -319,6 +361,18 @@
 }
 EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
 
+/**
+ * reservation_object_wait_timeout_rcu - Wait on reservation's objects
+ * shared and/or exclusive fences.
+ * @obj: the reservation object
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @intr: if true, do interruptible wait
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * RETURNS
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than zer on success.
+ */
 long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 					 bool wait_all, bool intr,
 					 unsigned long timeout)
@@ -416,6 +470,16 @@
 	return ret;
 }
 
+/**
+ * reservation_object_test_signaled_rcu - Test if a reservation object's
+ * fences have been signaled.
+ * @obj: the reservation object
+ * @test_all: if true, test all fences, otherwise only test the exclusive
+ * fence
+ *
+ * RETURNS
+ * true if all fences signaled, else false
+ */
 bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 					  bool test_all)
 {
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index e0df233..57aa227 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -461,25 +461,25 @@
 
 	/* Source burst */
 	ret = convert_burst(sconfig->src_maxburst);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		goto fail;
 	promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
 
 	/* Destination burst */
 	ret = convert_burst(sconfig->dst_maxburst);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		goto fail;
 	promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
 
 	/* Source bus width */
 	ret = convert_buswidth(sconfig->src_addr_width);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		goto fail;
 	promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
 
 	/* Destination bus width */
 	ret = convert_buswidth(sconfig->dst_addr_width);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		goto fail;
 	promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
 
@@ -518,25 +518,25 @@
 
 	/* Source burst */
 	ret = convert_burst(sconfig->src_maxburst);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		goto fail;
 	promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
 
 	/* Destination burst */
 	ret = convert_burst(sconfig->dst_maxburst);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		goto fail;
 	promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
 
 	/* Source bus width */
 	ret = convert_buswidth(sconfig->src_addr_width);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		goto fail;
 	promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
 
 	/* Destination bus width */
 	ret = convert_buswidth(sconfig->dst_addr_width);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		goto fail;
 	promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
 
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 6aa256b0..c3ee3ad 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -565,7 +565,8 @@
 	list_for_each(item, &mc_devices) {
 		mci = list_entry(item, struct mem_ctl_info, link);
 
-		edac_mod_work(&mci->work, value);
+		if (mci->op_state == OP_RUNNING_POLL)
+			edac_mod_work(&mci->work, value);
 	}
 	mutex_unlock(&mem_ctls_mutex);
 }
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index b4d0bf6..6744d88 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -239,8 +239,11 @@
 	{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
 };
 
-#define RIR_RNK_TGT(reg)		GET_BITFIELD(reg, 16, 19)
-#define RIR_OFFSET(reg)		GET_BITFIELD(reg,  2, 14)
+#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
+	GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
+
+#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
+	GET_BITFIELD(reg,  2, 15) : GET_BITFIELD(reg,  2, 14))
 
 /* Device 16, functions 2-7 */
 
@@ -326,6 +329,7 @@
 struct pci_id_table {
 	const struct pci_id_descr	*descr;
 	int				n_devs;
+	enum type			type;
 };
 
 struct sbridge_dev {
@@ -394,9 +398,14 @@
 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0)		},
 };
 
-#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
+#define PCI_ID_TABLE_ENTRY(A, T) {	\
+	.descr = A,			\
+	.n_devs = ARRAY_SIZE(A),	\
+	.type = T			\
+}
+
 static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
-	PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
+	PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
 	{0,}			/* 0 terminated list. */
 };
 
@@ -463,7 +472,7 @@
 };
 
 static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
-	PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge),
+	PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
 	{0,}			/* 0 terminated list. */
 };
 
@@ -536,7 +545,7 @@
 };
 
 static const struct pci_id_table pci_dev_descr_haswell_table[] = {
-	PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell),
+	PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
 	{0,}			/* 0 terminated list. */
 };
 
@@ -580,7 +589,7 @@
 };
 
 static const struct pci_id_table pci_dev_descr_knl_table[] = {
-	PCI_ID_TABLE_ENTRY(pci_dev_descr_knl),
+	PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
 	{0,}
 };
 
@@ -648,7 +657,7 @@
 };
 
 static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
-	PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell),
+	PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
 	{0,}			/* 0 terminated list. */
 };
 
@@ -1894,14 +1903,14 @@
 				pci_read_config_dword(pvt->pci_tad[i],
 						      rir_offset[j][k],
 						      &reg);
-				tmp_mb = RIR_OFFSET(reg) << 6;
+				tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
 
 				gb = div_u64_rem(tmp_mb, 1024, &mb);
 				edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
 					 i, j, k,
 					 gb, (mb*1000)/1024,
 					 ((u64)tmp_mb) << 20L,
-					 (u32)RIR_RNK_TGT(reg),
+					 (u32)RIR_RNK_TGT(pvt->info.type, reg),
 					 reg);
 			}
 		}
@@ -2234,7 +2243,7 @@
 	pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
 			      rir_offset[n_rir][idx],
 			      &reg);
-	*rank = RIR_RNK_TGT(reg);
+	*rank = RIR_RNK_TGT(pvt->info.type, reg);
 
 	edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
 		 n_rir,
@@ -3357,12 +3366,12 @@
 #define ICPU(model, table) \
 	{ X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
 
-/* Order here must match "enum type" */
 static const struct x86_cpu_id sbridge_cpuids[] = {
 	ICPU(0x2d, pci_dev_descr_sbridge_table),	/* SANDY_BRIDGE */
 	ICPU(0x3e, pci_dev_descr_ibridge_table),	/* IVY_BRIDGE */
 	ICPU(0x3f, pci_dev_descr_haswell_table),	/* HASWELL */
 	ICPU(0x4f, pci_dev_descr_broadwell_table),	/* BROADWELL */
+	ICPU(0x56, pci_dev_descr_broadwell_table),	/* BROADWELL-DE */
 	ICPU(0x57, pci_dev_descr_knl_table),		/* KNIGHTS_LANDING */
 	{ }
 };
@@ -3398,7 +3407,7 @@
 			 mc, mc + 1, num_mc);
 
 		sbridge_dev->mc = mc++;
-		rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids);
+		rc = sbridge_register_mci(sbridge_dev, ptable->type);
 		if (unlikely(rc < 0))
 			goto fail1;
 	}
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index d39014d..fc5f197 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -29,7 +29,6 @@
 
 #include <mach/hardware.h>
 #include <mach/platform.h>
-#include <mach/irqs.h>
 
 #define LPC32XX_GPIO_P3_INP_STATE		_GPREG(0x000)
 #define LPC32XX_GPIO_P3_OUTP_SET		_GPREG(0x004)
@@ -371,61 +370,16 @@
 
 static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset)
 {
-	return IRQ_LPC32XX_P0_P1_IRQ;
-}
-
-static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = {
-	IRQ_LPC32XX_GPIO_00,
-	IRQ_LPC32XX_GPIO_01,
-	IRQ_LPC32XX_GPIO_02,
-	IRQ_LPC32XX_GPIO_03,
-	IRQ_LPC32XX_GPIO_04,
-	IRQ_LPC32XX_GPIO_05,
-};
-
-static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
-{
-	if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table))
-		return lpc32xx_gpio_to_irq_gpio_p3_table[offset];
 	return -ENXIO;
 }
 
-static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = {
-	IRQ_LPC32XX_GPI_00,
-	IRQ_LPC32XX_GPI_01,
-	IRQ_LPC32XX_GPI_02,
-	IRQ_LPC32XX_GPI_03,
-	IRQ_LPC32XX_GPI_04,
-	IRQ_LPC32XX_GPI_05,
-	IRQ_LPC32XX_GPI_06,
-	IRQ_LPC32XX_GPI_07,
-	IRQ_LPC32XX_GPI_08,
-	IRQ_LPC32XX_GPI_09,
-	-ENXIO, /* 10 */
-	-ENXIO, /* 11 */
-	-ENXIO, /* 12 */
-	-ENXIO, /* 13 */
-	-ENXIO, /* 14 */
-	-ENXIO, /* 15 */
-	-ENXIO, /* 16 */
-	-ENXIO, /* 17 */
-	-ENXIO, /* 18 */
-	IRQ_LPC32XX_GPI_19,
-	-ENXIO, /* 20 */
-	-ENXIO, /* 21 */
-	-ENXIO, /* 22 */
-	-ENXIO, /* 23 */
-	-ENXIO, /* 24 */
-	-ENXIO, /* 25 */
-	-ENXIO, /* 26 */
-	-ENXIO, /* 27 */
-	IRQ_LPC32XX_GPI_28,
-};
+static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
+{
+	return -ENXIO;
+}
 
 static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset)
 {
-	if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table))
-		return lpc32xx_gpio_to_irq_gpi_p3_table[offset];
 	return -ENXIO;
 }
 
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 08897dc..1a33a19 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -393,7 +393,7 @@
 		irq_base = irq_alloc_descs(-1, 0, gc->ngpio, 0);
 	else
 		irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
-	if (IS_ERR_VALUE(irq_base)) {
+	if (irq_base < 0) {
 		dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
 		return irq_base;
 	}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index d407f904..24f60d2 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -20,6 +20,7 @@
 #include <linux/cdev.h>
 #include <linux/fs.h>
 #include <linux/uaccess.h>
+#include <linux/compat.h>
 #include <uapi/linux/gpio.h>
 
 #include "gpiolib.h"
@@ -316,7 +317,7 @@
 {
 	struct gpio_device *gdev = filp->private_data;
 	struct gpio_chip *chip = gdev->chip;
-	int __user *ip = (int __user *)arg;
+	void __user *ip = (void __user *)arg;
 
 	/* We fail any subsequent ioctl():s when the chip is gone */
 	if (!chip)
@@ -388,6 +389,14 @@
 	return -EINVAL;
 }
 
+#ifdef CONFIG_COMPAT
+static long gpio_ioctl_compat(struct file *filp, unsigned int cmd,
+			      unsigned long arg)
+{
+	return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
 /**
  * gpio_chrdev_open() - open the chardev for ioctl operations
  * @inode: inode for this chardev
@@ -431,7 +440,9 @@
 	.owner = THIS_MODULE,
 	.llseek = noop_llseek,
 	.unlocked_ioctl = gpio_ioctl,
-	.compat_ioctl = gpio_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = gpio_ioctl_compat,
+#endif
 };
 
 static void gpiodevice_release(struct device *dev)
@@ -618,6 +629,8 @@
 		goto err_free_label;
 	}
 
+	spin_unlock_irqrestore(&gpio_lock, flags);
+
 	for (i = 0; i < chip->ngpio; i++) {
 		struct gpio_desc *desc = &gdev->descs[i];
 
@@ -649,8 +662,6 @@
 		}
 	}
 
-	spin_unlock_irqrestore(&gpio_lock, flags);
-
 #ifdef CONFIG_PINCTRL
 	INIT_LIST_HEAD(&gdev->pin_ranges);
 #endif
@@ -1356,10 +1367,13 @@
 /*
  * This descriptor validation needs to be inserted verbatim into each
  * function taking a descriptor, so we need to use a preprocessor
- * macro to avoid endless duplication.
+ * macro to avoid endless duplication. If the desc is NULL it is an
+ * optional GPIO and calls should just bail out.
  */
 #define VALIDATE_DESC(desc) do { \
-	if (!desc || !desc->gdev) { \
+	if (!desc) \
+		return 0; \
+	if (!desc->gdev) { \
 		pr_warn("%s: invalid GPIO\n", __func__); \
 		return -EINVAL; \
 	} \
@@ -1370,7 +1384,9 @@
 	} } while (0)
 
 #define VALIDATE_DESC_VOID(desc) do { \
-	if (!desc || !desc->gdev) { \
+	if (!desc) \
+		return; \
+	if (!desc->gdev) { \
 		pr_warn("%s: invalid GPIO\n", __func__); \
 		return; \
 	} \
@@ -2066,17 +2082,30 @@
  */
 int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
 {
-	if (offset >= chip->ngpio)
-		return -EINVAL;
+	struct gpio_desc *desc;
 
-	if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) {
+	desc = gpiochip_get_desc(chip, offset);
+	if (IS_ERR(desc))
+		return PTR_ERR(desc);
+
+	/* Flush direction if something changed behind our back */
+	if (chip->get_direction) {
+		int dir = chip->get_direction(chip, offset);
+
+		if (dir)
+			clear_bit(FLAG_IS_OUT, &desc->flags);
+		else
+			set_bit(FLAG_IS_OUT, &desc->flags);
+	}
+
+	if (test_bit(FLAG_IS_OUT, &desc->flags)) {
 		chip_err(chip,
 			  "%s: tried to flag a GPIO set as output for IRQ\n",
 			  __func__);
 		return -EIO;
 	}
 
-	set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
+	set_bit(FLAG_USED_AS_IRQ, &desc->flags);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f2a74d0..fc35731 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -52,6 +52,7 @@
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
+	select FB_DEFERRED_IO
 	help
 	  FBDEV helpers for KMS drivers.
 
@@ -252,6 +253,8 @@
 
 source "drivers/gpu/drm/shmobile/Kconfig"
 
+source "drivers/gpu/drm/sun4i/Kconfig"
+
 source "drivers/gpu/drm/omapdrm/Kconfig"
 
 source "drivers/gpu/drm/tilcdc/Kconfig"
@@ -281,3 +284,9 @@
 source "drivers/gpu/drm/vc4/Kconfig"
 
 source "drivers/gpu/drm/etnaviv/Kconfig"
+
+source "drivers/gpu/drm/arc/Kconfig"
+
+source "drivers/gpu/drm/hisilicon/Kconfig"
+
+source "drivers/gpu/drm/mediatek/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 6eb94fc..be43afb 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -1,4 +1,4 @@
-#
+
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
@@ -23,7 +23,7 @@
 
 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
 		drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
-		drm_kms_helper_common.o
+		drm_kms_helper_common.o drm_dp_dual_mode_helper.o
 
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
@@ -65,6 +65,7 @@
 obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
 obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 obj-y			+= omapdrm/
+obj-$(CONFIG_DRM_SUN4I) += sun4i/
 obj-y			+= tilcdc/
 obj-$(CONFIG_DRM_QXL) += qxl/
 obj-$(CONFIG_DRM_BOCHS) += bochs/
@@ -73,8 +74,11 @@
 obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-$(CONFIG_DRM_STI) += sti/
 obj-$(CONFIG_DRM_IMX) += imx/
+obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
 obj-y			+= i2c/
 obj-y			+= panel/
 obj-y			+= bridge/
 obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
 obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
+obj-$(CONFIG_DRM_ARCPGU)+= arc/
+obj-y			+= hisilicon/
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index ca77ec1..e503e3d 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -2,6 +2,7 @@
 
 config DRM_AMD_ACP
        bool "Enable AMD Audio CoProcessor IP support"
+       depends on DRM_AMDGPU
        select MFD_CORE
        select PM_GENERIC_DOMAINS if PM
        help
diff --git a/drivers/gpu/drm/amd/acp/acp_hw.c b/drivers/gpu/drm/amd/acp/acp_hw.c
index 7af83f1..c7d7205 100644
--- a/drivers/gpu/drm/amd/acp/acp_hw.c
+++ b/drivers/gpu/drm/amd/acp/acp_hw.c
@@ -34,7 +34,7 @@
 
 #define mmACP_AZALIA_I2S_SELECT 0x51d4
 
-int amd_acp_hw_init(void *cgs_device,
+int amd_acp_hw_init(struct cgs_device *cgs_device,
 		    unsigned acp_version_major, unsigned acp_version_minor)
 {
 	unsigned int acp_mode = ACP_MODE_I2S;
diff --git a/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
index bccf47b..a72ddb2f 100644
--- a/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
+++ b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
@@ -28,7 +28,7 @@
 #include "cgs_linux.h"
 #include "cgs_common.h"
 
-int amd_acp_hw_init(void *cgs_device,
+int amd_acp_hw_init(struct cgs_device *cgs_device,
 		    unsigned acp_version_major, unsigned acp_version_minor);
 
 #endif /* _ACP_GFX_IF_H */
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index b30fcfa..7335c04 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -15,3 +15,13 @@
 	help
 	  This option selects CONFIG_MMU_NOTIFIER if it isn't already
 	  selected to enabled full userptr support.
+
+config DRM_AMDGPU_GART_DEBUGFS
+	bool "Allow GART access through debugfs"
+	depends on DRM_AMDGPU
+	depends on DEBUG_FS
+	default n
+	help
+	  Selecting this option creates a debugfs file to inspect the mapped
+	  pages. Uses more memory for housekeeping, enable only for debugging.
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 1bcbade..01c36b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -283,7 +283,8 @@
 	int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 	/* command emit functions */
 	void (*emit_ib)(struct amdgpu_ring *ring,
-			struct amdgpu_ib *ib);
+			struct amdgpu_ib *ib,
+			unsigned vm_id, bool ctx_switch);
 	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
 			   uint64_t seq, unsigned flags);
 	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
@@ -302,6 +303,8 @@
 	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
 	/* pad the indirect buffer to the necessary number of dw */
 	void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+	unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
+	void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
 };
 
 /*
@@ -365,13 +368,6 @@
 #define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
 #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
 
-struct amdgpu_user_fence {
-	/* write-back bo */
-	struct amdgpu_bo 	*bo;
-	/* write-back address offset to bo start */
-	uint32_t                offset;
-};
-
 int amdgpu_fence_driver_init(struct amdgpu_device *adev);
 void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
@@ -391,6 +387,14 @@
 /*
  * TTM.
  */
+
+#define AMDGPU_TTM_LRU_SIZE	20
+
+struct amdgpu_mman_lru {
+	struct list_head		*lru[TTM_NUM_MEM_TYPES];
+	struct list_head		*swap_lru;
+};
+
 struct amdgpu_mman {
 	struct ttm_bo_global_ref        bo_global_ref;
 	struct drm_global_reference	mem_global_ref;
@@ -408,6 +412,9 @@
 	struct amdgpu_ring			*buffer_funcs_ring;
 	/* Scheduler entity for buffer moves */
 	struct amd_sched_entity			entity;
+
+	/* custom LRU management */
+	struct amdgpu_mman_lru			log2_size[AMDGPU_TTM_LRU_SIZE];
 };
 
 int amdgpu_copy_buffer(struct amdgpu_ring *ring,
@@ -494,9 +501,10 @@
 				struct drm_file *file_priv);
 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
-							struct dma_buf_attachment *attach,
-							struct sg_table *sg);
+struct drm_gem_object *
+amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+				 struct dma_buf_attachment *attach,
+				 struct sg_table *sg);
 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
 					struct drm_gem_object *gobj,
 					int flags);
@@ -586,11 +594,16 @@
 		     struct amdgpu_sync *sync,
 		     struct reservation_object *resv,
 		     void *owner);
+bool amdgpu_sync_is_idle(struct amdgpu_sync *sync);
+int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
+			     struct fence *fence);
 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
 int amdgpu_sync_wait(struct amdgpu_sync *sync);
 void amdgpu_sync_free(struct amdgpu_sync *sync);
 int amdgpu_sync_init(void);
 void amdgpu_sync_fini(void);
+int amdgpu_fence_slab_init(void);
+void amdgpu_fence_slab_fini(void);
 
 /*
  * GART structures, functions & helpers
@@ -609,8 +622,9 @@
 	unsigned			num_gpu_pages;
 	unsigned			num_cpu_pages;
 	unsigned			table_size;
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 	struct page			**pages;
-	dma_addr_t			*pages_addr;
+#endif
 	bool				ready;
 	const struct amdgpu_gart_funcs *gart_funcs;
 };
@@ -709,6 +723,7 @@
 	unsigned			shared_count;
 	struct fence			**shared;
 	struct fence_cb			cb;
+	bool				async;
 };
 
 
@@ -721,17 +736,7 @@
 	uint32_t			length_dw;
 	uint64_t			gpu_addr;
 	uint32_t			*ptr;
-	struct amdgpu_user_fence        *user;
-	struct amdgpu_vm		*vm;
-	unsigned			vm_id;
-	uint64_t			vm_pd_addr;
-	struct amdgpu_ctx		*ctx;
-	uint32_t			gds_base, gds_size;
-	uint32_t			gws_base, gws_size;
-	uint32_t			oa_base, oa_size;
 	uint32_t			flags;
-	/* resulting sequence number */
-	uint64_t			sequence;
 };
 
 enum amdgpu_ring_type {
@@ -742,22 +747,25 @@
 	AMDGPU_RING_TYPE_VCE
 };
 
-extern struct amd_sched_backend_ops amdgpu_sched_ops;
+extern const struct amd_sched_backend_ops amdgpu_sched_ops;
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
-		     struct amdgpu_job **job);
+		     struct amdgpu_job **job, struct amdgpu_vm *vm);
 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
 			     struct amdgpu_job **job);
+
 void amdgpu_job_free(struct amdgpu_job *job);
+void amdgpu_job_free_func(struct kref *refcount);
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
 		      struct amd_sched_entity *entity, void *owner,
 		      struct fence **f);
+void amdgpu_job_timeout_func(struct work_struct *work);
 
 struct amdgpu_ring {
 	struct amdgpu_device		*adev;
 	const struct amdgpu_ring_funcs	*funcs;
 	struct amdgpu_fence_driver	fence_drv;
-	struct amd_gpu_scheduler 	sched;
+	struct amd_gpu_scheduler	sched;
 
 	spinlock_t              fence_lock;
 	struct amdgpu_bo	*ring_obj;
@@ -785,9 +793,13 @@
 	unsigned		wptr_offs;
 	unsigned		next_rptr_offs;
 	unsigned		fence_offs;
-	struct amdgpu_ctx	*current_ctx;
+	uint64_t		current_ctx;
 	enum amdgpu_ring_type	type;
 	char			name[16];
+	unsigned		cond_exe_offs;
+	u64				cond_exe_gpu_addr;
+	volatile u32	*cond_exe_cpu_addr;
+	int                     vmid;
 };
 
 /*
@@ -830,13 +842,6 @@
 	uint64_t			addr;
 };
 
-struct amdgpu_vm_id {
-	struct amdgpu_vm_manager_id	*mgr_id;
-	uint64_t			pd_gpu_addr;
-	/* last flushed PD/PT update */
-	struct fence			*flushed_updates;
-};
-
 struct amdgpu_vm {
 	/* tree of virtual addresses mapped */
 	struct rb_root		va;
@@ -862,19 +867,29 @@
 	struct amdgpu_vm_pt	*page_tables;
 
 	/* for id and flush management per ring */
-	struct amdgpu_vm_id	ids[AMDGPU_MAX_RINGS];
+	struct amdgpu_vm_id	*ids[AMDGPU_MAX_RINGS];
 
 	/* protecting freed */
 	spinlock_t		freed_lock;
 
 	/* Scheduler entity for page table updates */
 	struct amd_sched_entity	entity;
+
+	/* client id */
+	u64                     client_id;
 };
 
-struct amdgpu_vm_manager_id {
+struct amdgpu_vm_id {
 	struct list_head	list;
-	struct fence		*active;
-	atomic_long_t		owner;
+	struct fence		*first;
+	struct amdgpu_sync	active;
+	struct fence		*last_flush;
+	struct amdgpu_ring      *last_user;
+	atomic64_t		owner;
+
+	uint64_t		pd_gpu_addr;
+	/* last flushed PD/PT update */
+	struct fence		*flushed_updates;
 
 	uint32_t		gds_base;
 	uint32_t		gds_size;
@@ -889,7 +904,7 @@
 	struct mutex				lock;
 	unsigned				num_ids;
 	struct list_head			ids_lru;
-	struct amdgpu_vm_manager_id		ids[AMDGPU_NUM_VM];
+	struct amdgpu_vm_id			ids[AMDGPU_NUM_VM];
 
 	uint32_t				max_pfn;
 	/* vram base address for page table entry  */
@@ -901,6 +916,8 @@
 	struct amdgpu_ring                      *vm_pte_rings[AMDGPU_MAX_RINGS];
 	unsigned				vm_pte_num_rings;
 	atomic_t				vm_pte_next_ring;
+	/* client id counter */
+	atomic64_t				client_counter;
 };
 
 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
@@ -916,11 +933,12 @@
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		      struct amdgpu_sync *sync, struct fence *fence,
 		      unsigned *vm_id, uint64_t *vm_pd_addr);
-void amdgpu_vm_flush(struct amdgpu_ring *ring,
-		     unsigned vm_id, uint64_t pd_addr,
-		     uint32_t gds_base, uint32_t gds_size,
-		     uint32_t gws_base, uint32_t gws_size,
-		     uint32_t oa_base, uint32_t oa_size);
+int amdgpu_vm_flush(struct amdgpu_ring *ring,
+		    unsigned vm_id, uint64_t pd_addr,
+		    uint32_t gds_base, uint32_t gds_size,
+		    uint32_t gws_base, uint32_t gws_size,
+		    uint32_t oa_base, uint32_t oa_size,
+		    bool vmid_switch);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
@@ -1026,6 +1044,11 @@
  */
 #include "clearstate_defs.h"
 
+struct amdgpu_rlc_funcs {
+	void (*enter_safe_mode)(struct amdgpu_device *adev);
+	void (*exit_safe_mode)(struct amdgpu_device *adev);
+};
+
 struct amdgpu_rlc {
 	/* for power gating */
 	struct amdgpu_bo	*save_restore_obj;
@@ -1044,6 +1067,24 @@
 	uint64_t		cp_table_gpu_addr;
 	volatile uint32_t	*cp_table_ptr;
 	u32                     cp_table_size;
+
+	/* safe mode for updating CG/PG state */
+	bool in_safe_mode;
+	const struct amdgpu_rlc_funcs *funcs;
+
+	/* for firmware data */
+	u32 save_and_restore_offset;
+	u32 clear_state_descriptor_offset;
+	u32 avail_scratch_ram_locations;
+	u32 reg_restore_list_size;
+	u32 reg_list_format_start;
+	u32 reg_list_format_separate_start;
+	u32 starting_offsets_start;
+	u32 reg_list_format_size_bytes;
+	u32 reg_list_size_bytes;
+
+	u32 *register_list_format;
+	u32 *register_restore;
 };
 
 struct amdgpu_mec {
@@ -1097,6 +1138,12 @@
 	uint32_t macrotile_mode_array[16];
 };
 
+struct amdgpu_cu_info {
+	uint32_t number; /* total active CU number */
+	uint32_t ao_cu_mask;
+	uint32_t bitmap[4][4];
+};
+
 struct amdgpu_gfx {
 	struct mutex			gpu_clock_mutex;
 	struct amdgpu_gca_config	config;
@@ -1129,17 +1176,19 @@
 	struct amdgpu_irq_src		priv_reg_irq;
 	struct amdgpu_irq_src		priv_inst_irq;
 	/* gfx status */
-	uint32_t gfx_current_status;
+	uint32_t			gfx_current_status;
 	/* ce ram size*/
-	unsigned ce_ram_size;
+	unsigned			ce_ram_size;
+	struct amdgpu_cu_info		cu_info;
 };
 
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		  unsigned size, struct amdgpu_ib *ib);
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f);
+void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
+		    struct fence *f);
 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 		       struct amdgpu_ib *ib, struct fence *last_vm_update,
-		       struct fence **f);
+		       struct amdgpu_job *job, struct fence **f);
 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
@@ -1164,7 +1213,7 @@
 struct amdgpu_cs_chunk {
 	uint32_t		chunk_id;
 	uint32_t		length_dw;
-	uint32_t		*kdata;
+	void			*kdata;
 };
 
 struct amdgpu_cs_parser {
@@ -1195,13 +1244,25 @@
 struct amdgpu_job {
 	struct amd_sched_job    base;
 	struct amdgpu_device	*adev;
+	struct amdgpu_vm	*vm;
 	struct amdgpu_ring	*ring;
 	struct amdgpu_sync	sync;
 	struct amdgpu_ib	*ibs;
 	struct fence		*fence; /* the hw fence */
 	uint32_t		num_ibs;
 	void			*owner;
-	struct amdgpu_user_fence uf;
+	uint64_t		ctx;
+	unsigned		vm_id;
+	uint64_t		vm_pd_addr;
+	uint32_t		gds_base, gds_size;
+	uint32_t		gws_base, gws_size;
+	uint32_t		oa_base, oa_size;
+
+	/* user fence handling */
+	struct amdgpu_bo	*uf_bo;
+	uint32_t		uf_offset;
+	uint64_t		uf_sequence;
+
 };
 #define to_amdgpu_job(sched_job)		\
 		container_of((sched_job), struct amdgpu_job, base)
@@ -1582,10 +1643,12 @@
 /*
  * UVD
  */
-#define AMDGPU_MAX_UVD_HANDLES	10
-#define AMDGPU_UVD_STACK_SIZE	(1024*1024)
-#define AMDGPU_UVD_HEAP_SIZE	(1024*1024)
-#define AMDGPU_UVD_FIRMWARE_OFFSET 256
+#define AMDGPU_DEFAULT_UVD_HANDLES	10
+#define AMDGPU_MAX_UVD_HANDLES		40
+#define AMDGPU_UVD_STACK_SIZE		(200*1024)
+#define AMDGPU_UVD_HEAP_SIZE		(256*1024)
+#define AMDGPU_UVD_SESSION_SIZE		(50*1024)
+#define AMDGPU_UVD_FIRMWARE_OFFSET	256
 
 struct amdgpu_uvd {
 	struct amdgpu_bo	*vcpu_bo;
@@ -1593,6 +1656,7 @@
 	uint64_t		gpu_addr;
 	unsigned		fw_version;
 	void			*saved_bo;
+	unsigned		max_handles;
 	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES];
 	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES];
 	struct delayed_work	idle_work;
@@ -1645,7 +1709,7 @@
 	struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
 	struct amdgpu_irq_src	trap_irq;
 	struct amdgpu_irq_src	illegal_inst_irq;
-	int 			num_instances;
+	int			num_instances;
 };
 
 /*
@@ -1691,12 +1755,12 @@
  * Debugfs
  */
 struct amdgpu_debugfs {
-	struct drm_info_list	*files;
+	const struct drm_info_list	*files;
 	unsigned		num_files;
 };
 
 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
-			     struct drm_info_list *files,
+			     const struct drm_info_list *files,
 			     unsigned nfiles);
 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
 
@@ -1738,13 +1802,6 @@
 	bool grbm_indexed;
 };
 
-struct amdgpu_cu_info {
-	uint32_t number; /* total active CU number */
-	uint32_t ao_cu_mask;
-	uint32_t bitmap[4][4];
-};
-
-
 /*
  * ASIC specific functions.
  */
@@ -1762,7 +1819,6 @@
 	u32 (*get_xclk)(struct amdgpu_device *adev);
 	/* get the gpu clock counter */
 	uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
-	int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info);
 	/* MM block clocks */
 	int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
 	int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
@@ -1855,15 +1911,8 @@
 /*
  * CGS
  */
-void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
-void amdgpu_cgs_destroy_device(void *cgs_device);
-
-
-/*
- * CGS
- */
-void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
-void amdgpu_cgs_destroy_device(void *cgs_device);
+struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
+void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
 
 
 /* GPU virtualization */
@@ -1904,16 +1953,15 @@
 	int				usec_timeout;
 	const struct amdgpu_asic_funcs	*asic_funcs;
 	bool				shutdown;
-	bool				suspend;
 	bool				need_dma32;
 	bool				accel_working;
-	struct work_struct 		reset_work;
+	struct work_struct		reset_work;
 	struct notifier_block		acpi_nb;
 	struct amdgpu_i2c_chan		*i2c_bus[AMDGPU_MAX_I2C_BUS];
 	struct amdgpu_debugfs		debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
-	unsigned 			debugfs_count;
+	unsigned			debugfs_count;
 #if defined(CONFIG_DEBUG_FS)
-	struct dentry			*debugfs_regs;
+	struct dentry			*debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
 #endif
 	struct amdgpu_atif		atif;
 	struct amdgpu_atcs		atcs;
@@ -1926,7 +1974,6 @@
 	/* BIOS */
 	uint8_t				*bios;
 	bool				is_atom_bios;
-	uint16_t			bios_header_start;
 	struct amdgpu_bo		*stollen_vga_memory;
 	uint32_t			bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
 
@@ -2163,7 +2210,6 @@
 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
-#define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info))
 #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
 #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
@@ -2175,7 +2221,7 @@
 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
-#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
+#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c))
 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
@@ -2183,6 +2229,8 @@
 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
 #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
+#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
+#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@@ -2196,7 +2244,7 @@
 #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
 #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
 #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
-#define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base))
+#define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
 #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
 #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
 #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
@@ -2339,7 +2387,7 @@
  * KMS
  */
 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
-extern int amdgpu_max_kms_ioctl;
+extern const int amdgpu_max_kms_ioctl;
 
 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int amdgpu_driver_unload_kms(struct drm_device *dev);
@@ -2398,5 +2446,4 @@
 		       uint64_t addr, struct amdgpu_bo **bo);
 
 #include "amdgpu_object.h"
-
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index b7b583c..252edba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -467,13 +467,6 @@
 	return 0;
 }
 
-static void acp_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "ACP STATUS\n");
-}
-
 static int acp_set_clockgating_state(void *handle,
 				     enum amd_clockgating_state state)
 {
@@ -487,6 +480,7 @@
 }
 
 const struct amd_ip_funcs acp_ip_funcs = {
+	.name = "acp_ip",
 	.early_init = acp_early_init,
 	.late_init = NULL,
 	.sw_init = acp_sw_init,
@@ -498,7 +492,6 @@
 	.is_idle = acp_is_idle,
 	.wait_for_idle = acp_wait_for_idle,
 	.soft_reset = acp_soft_reset,
-	.print_status = acp_print_status,
 	.set_clockgating_state = acp_set_clockgating_state,
 	.set_powergating_state = acp_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
index f6e32a6..8a39631 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
@@ -30,7 +30,7 @@
 
 struct amdgpu_acp {
 	struct device *parent;
-	void *cgs_device;
+	struct cgs_device *cgs_device;
 	struct amd_acp_private *private;
 	struct mfd_cell *acp_cell;
 	struct resource *acp_res;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 84b0ce3..9df1bcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -234,16 +234,6 @@
 	return hpd;
 }
 
-static bool amdgpu_atombios_apply_quirks(struct amdgpu_device *adev,
-					 uint32_t supported_device,
-					 int *connector_type,
-					 struct amdgpu_i2c_bus_rec *i2c_bus,
-					 uint16_t *line_mux,
-					 struct amdgpu_hpd *hpd)
-{
-	return true;
-}
-
 static const int object_connector_convert[] = {
 	DRM_MODE_CONNECTOR_Unknown,
 	DRM_MODE_CONNECTOR_DVII,
@@ -514,11 +504,6 @@
 
 			conn_id = le16_to_cpu(path->usConnObjectId);
 
-			if (!amdgpu_atombios_apply_quirks
-			    (adev, le16_to_cpu(path->usDeviceTag), &connector_type,
-			     &ddc_bus, &conn_id, &hpd))
-				continue;
-
 			amdgpu_display_add_connector(adev,
 						      conn_id,
 						      le16_to_cpu(path->usDeviceTag),
@@ -699,6 +684,36 @@
 	return ret;
 }
 
+union gfx_info {
+	ATOM_GFX_INFO_V2_1 info;
+};
+
+int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, GFX_Info);
+	uint8_t frev, crev;
+	uint16_t data_offset;
+	int ret = -EINVAL;
+
+	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		union gfx_info *gfx_info = (union gfx_info *)
+			(mode_info->atom_context->bios + data_offset);
+
+		adev->gfx.config.max_shader_engines = gfx_info->info.max_shader_engines;
+		adev->gfx.config.max_tile_pipes = gfx_info->info.max_tile_pipes;
+		adev->gfx.config.max_cu_per_sh = gfx_info->info.max_cu_per_sh;
+		adev->gfx.config.max_sh_per_se = gfx_info->info.max_sh_per_se;
+		adev->gfx.config.max_backends_per_se = gfx_info->info.max_backends_per_se;
+		adev->gfx.config.max_texture_channel_caches =
+			gfx_info->info.max_texture_channel_caches;
+
+		ret = 0;
+	}
+	return ret;
+}
+
 union igp_info {
 	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
 	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 9e14420..8c2e696 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -144,6 +144,8 @@
 
 int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
 
+int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev);
+
 bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
 				      struct amdgpu_atom_ss *ss,
 				      int id, u32 clock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index cd639c3..33e47a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -141,7 +141,7 @@
 void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
 {
 	int i;
-	int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
+	static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
 		640 * 480 * 4,
 		720 * 480 * 4,
 		800 * 600 * 4,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 80add22..99ca75b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -349,7 +349,7 @@
 bool amdgpu_get_bios(struct amdgpu_device *adev)
 {
 	bool r;
-	uint16_t tmp;
+	uint16_t tmp, bios_header_start;
 
 	r = amdgpu_atrm_get_bios(adev);
 	if (r == false)
@@ -383,11 +383,11 @@
 		goto free_bios;
 	}
 
-	adev->bios_header_start = RBIOS16(0x48);
-	if (!adev->bios_header_start) {
+	bios_header_start = RBIOS16(0x48);
+	if (!bios_header_start) {
 		goto free_bios;
 	}
-	tmp = adev->bios_header_start + 4;
+	tmp = bios_header_start + 4;
 	if (!memcmp(adev->bios + tmp, "ATOM", 4) ||
 	    !memcmp(adev->bios + tmp, "MOTA", 4)) {
 		adev->is_atom_bios = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index eacd810..823bf5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -106,7 +106,7 @@
 		struct amdgpu_bo *bo;
 		struct mm_struct *usermm;
 
-		gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
+		gobj = drm_gem_object_lookup(filp, info[i].bo_handle);
 		if (!gobj) {
 			r = -ENOENT;
 			goto error_free;
@@ -263,7 +263,7 @@
 		for (i = 0; i < args->in.bo_number; ++i) {
 			if (copy_from_user(&info[i], uptr, bytes))
 				goto error_free;
-			
+
 			uptr += args->in.bo_info_size;
 		}
 	}
@@ -271,7 +271,7 @@
 	switch (args->in.operation) {
 	case AMDGPU_BO_LIST_OP_CREATE:
 		r = amdgpu_bo_list_create(fpriv, &list, &handle);
-		if (r) 
+		if (r)
 			goto error_free;
 
 		r = amdgpu_bo_list_set(adev, filp, list, info,
@@ -281,7 +281,7 @@
 			goto error_free;
 
 		break;
-		
+
 	case AMDGPU_BO_LIST_OP_DESTROY:
 		amdgpu_bo_list_destroy(fpriv, handle);
 		handle = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 6043dc7..8943099 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -42,7 +42,7 @@
 	struct amdgpu_device *adev =					\
 		((struct amdgpu_cgs_device *)cgs_device)->adev
 
-static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
+static int amdgpu_cgs_gpu_mem_info(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
 				   uint64_t *mc_start, uint64_t *mc_size,
 				   uint64_t *mem_size)
 {
@@ -73,7 +73,7 @@
 	return 0;
 }
 
-static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
+static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem,
 				uint64_t size,
 				uint64_t min_offset, uint64_t max_offset,
 				cgs_handle_t *kmem_handle, uint64_t *mcaddr)
@@ -102,7 +102,7 @@
 	return ret;
 }
 
-static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
+static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t kmem_handle)
 {
 	struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
 
@@ -118,7 +118,7 @@
 	return 0;
 }
 
-static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
+static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
 				    enum cgs_gpu_mem_type type,
 				    uint64_t size, uint64_t align,
 				    uint64_t min_offset, uint64_t max_offset,
@@ -208,7 +208,7 @@
 	return ret;
 }
 
-static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
+static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
 {
 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
 
@@ -225,7 +225,7 @@
 	return 0;
 }
 
-static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
+static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
 				   uint64_t *mcaddr)
 {
 	int r;
@@ -246,7 +246,7 @@
 	return r;
 }
 
-static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
+static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
 {
 	int r;
 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@@ -258,7 +258,7 @@
 	return r;
 }
 
-static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
+static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
 				   void **map)
 {
 	int r;
@@ -271,7 +271,7 @@
 	return r;
 }
 
-static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
+static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
 {
 	int r;
 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@@ -283,20 +283,20 @@
 	return r;
 }
 
-static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
+static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
 {
 	CGS_FUNC_ADEV;
 	return RREG32(offset);
 }
 
-static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
+static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
 				      uint32_t value)
 {
 	CGS_FUNC_ADEV;
 	WREG32(offset, value);
 }
 
-static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
+static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
 					     enum cgs_ind_reg space,
 					     unsigned index)
 {
@@ -320,7 +320,7 @@
 	return 0;
 }
 
-static void amdgpu_cgs_write_ind_register(void *cgs_device,
+static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
 					  enum cgs_ind_reg space,
 					  unsigned index, uint32_t value)
 {
@@ -343,7 +343,7 @@
 	WARN(1, "Invalid indirect register space");
 }
 
-static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
+static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device *cgs_device, unsigned addr)
 {
 	CGS_FUNC_ADEV;
 	uint8_t val;
@@ -353,7 +353,7 @@
 	return val;
 }
 
-static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
+static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device *cgs_device, unsigned addr)
 {
 	CGS_FUNC_ADEV;
 	uint16_t val;
@@ -363,7 +363,7 @@
 	return val;
 }
 
-static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
+static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device *cgs_device,
 						 unsigned addr)
 {
 	CGS_FUNC_ADEV;
@@ -374,7 +374,7 @@
 	return val;
 }
 
-static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
+static void amdgpu_cgs_write_pci_config_byte(struct cgs_device *cgs_device, unsigned addr,
 					     uint8_t value)
 {
 	CGS_FUNC_ADEV;
@@ -382,7 +382,7 @@
 	WARN(ret, "pci_write_config_byte error");
 }
 
-static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
+static void amdgpu_cgs_write_pci_config_word(struct cgs_device *cgs_device, unsigned addr,
 					     uint16_t value)
 {
 	CGS_FUNC_ADEV;
@@ -390,7 +390,7 @@
 	WARN(ret, "pci_write_config_word error");
 }
 
-static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
+static void amdgpu_cgs_write_pci_config_dword(struct cgs_device *cgs_device, unsigned addr,
 					      uint32_t value)
 {
 	CGS_FUNC_ADEV;
@@ -399,7 +399,7 @@
 }
 
 
-static int amdgpu_cgs_get_pci_resource(void *cgs_device,
+static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
 				       enum cgs_resource_type resource_type,
 				       uint64_t size,
 				       uint64_t offset,
@@ -433,7 +433,7 @@
 	}
 }
 
-static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
+static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
 						  unsigned table, uint16_t *size,
 						  uint8_t *frev, uint8_t *crev)
 {
@@ -449,7 +449,7 @@
 	return NULL;
 }
 
-static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
+static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
 					      uint8_t *frev, uint8_t *crev)
 {
 	CGS_FUNC_ADEV;
@@ -462,7 +462,7 @@
 	return -EINVAL;
 }
 
-static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
+static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
 					  void *args)
 {
 	CGS_FUNC_ADEV;
@@ -471,33 +471,33 @@
 		adev->mode_info.atom_context, table, args);
 }
 
-static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
+static int amdgpu_cgs_create_pm_request(struct cgs_device *cgs_device, cgs_handle_t *request)
 {
 	/* TODO */
 	return 0;
 }
 
-static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
+static int amdgpu_cgs_destroy_pm_request(struct cgs_device *cgs_device, cgs_handle_t request)
 {
 	/* TODO */
 	return 0;
 }
 
-static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
+static int amdgpu_cgs_set_pm_request(struct cgs_device *cgs_device, cgs_handle_t request,
 				     int active)
 {
 	/* TODO */
 	return 0;
 }
 
-static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
+static int amdgpu_cgs_pm_request_clock(struct cgs_device *cgs_device, cgs_handle_t request,
 				       enum cgs_clock clock, unsigned freq)
 {
 	/* TODO */
 	return 0;
 }
 
-static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
+static int amdgpu_cgs_pm_request_engine(struct cgs_device *cgs_device, cgs_handle_t request,
 					enum cgs_engine engine, int powered)
 {
 	/* TODO */
@@ -506,7 +506,7 @@
 
 
 
-static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
+static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device *cgs_device,
 					    enum cgs_clock clock,
 					    struct cgs_clock_limits *limits)
 {
@@ -514,7 +514,7 @@
 	return 0;
 }
 
-static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
+static int amdgpu_cgs_set_camera_voltages(struct cgs_device *cgs_device, uint32_t mask,
 					  const uint32_t *voltages)
 {
 	DRM_ERROR("not implemented");
@@ -565,7 +565,7 @@
 	.process = cgs_process_irq,
 };
 
-static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
+static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src_id,
 				     unsigned num_types,
 				     cgs_irq_source_set_func_t set,
 				     cgs_irq_handler_func_t handler,
@@ -600,19 +600,19 @@
 	return ret;
 }
 
-static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
+static int amdgpu_cgs_irq_get(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
 {
 	CGS_FUNC_ADEV;
 	return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
 }
 
-static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
+static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
 {
 	CGS_FUNC_ADEV;
 	return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
 }
 
-int amdgpu_cgs_set_clockgating_state(void *cgs_device,
+int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
 				  enum amd_ip_block_type block_type,
 				  enum amd_clockgating_state state)
 {
@@ -633,7 +633,7 @@
 	return r;
 }
 
-int amdgpu_cgs_set_powergating_state(void *cgs_device,
+int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
 				  enum amd_ip_block_type block_type,
 				  enum amd_powergating_state state)
 {
@@ -655,7 +655,7 @@
 }
 
 
-static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
+static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
 {
 	CGS_FUNC_ADEV;
 	enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
@@ -681,9 +681,10 @@
 		result = AMDGPU_UCODE_ID_CP_MEC1;
 		break;
 	case CGS_UCODE_ID_CP_MEC_JT2:
-		if (adev->asic_type == CHIP_TONGA)
+		if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11
+		  || adev->asic_type == CHIP_POLARIS10)
 			result = AMDGPU_UCODE_ID_CP_MEC2;
-		else if (adev->asic_type == CHIP_CARRIZO)
+		else
 			result = AMDGPU_UCODE_ID_CP_MEC1;
 		break;
 	case CGS_UCODE_ID_RLC_G:
@@ -695,13 +696,24 @@
 	return result;
 }
 
-static int amdgpu_cgs_get_firmware_info(void *cgs_device,
+static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
+{
+	CGS_FUNC_ADEV;
+	if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
+		release_firmware(adev->pm.fw);
+		return 0;
+	}
+	/* cannot release other firmware because they are not created by cgs */
+	return -EINVAL;
+}
+
+static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 					enum cgs_ucode_id type,
 					struct cgs_firmware_info *info)
 {
 	CGS_FUNC_ADEV;
 
-	if (CGS_UCODE_ID_SMU != type) {
+	if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
 		uint64_t gpu_addr;
 		uint32_t data_size;
 		const struct gfx_firmware_header_v1_0 *header;
@@ -734,30 +746,44 @@
 		const uint8_t *src;
 		const struct smc_firmware_header_v1_0 *hdr;
 
-		switch (adev->asic_type) {
-		case CHIP_TONGA:
-			strcpy(fw_name, "amdgpu/tonga_smc.bin");
-			break;
-		case CHIP_FIJI:
-			strcpy(fw_name, "amdgpu/fiji_smc.bin");
-			break;
-		default:
-			DRM_ERROR("SMC firmware not supported\n");
-			return -EINVAL;
-		}
+		if (!adev->pm.fw) {
+			switch (adev->asic_type) {
+			case CHIP_TONGA:
+				strcpy(fw_name, "amdgpu/tonga_smc.bin");
+				break;
+			case CHIP_FIJI:
+				strcpy(fw_name, "amdgpu/fiji_smc.bin");
+				break;
+			case CHIP_POLARIS11:
+				if (type == CGS_UCODE_ID_SMU)
+					strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+				else if (type == CGS_UCODE_ID_SMU_SK)
+					strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
+				break;
+			case CHIP_POLARIS10:
+				if (type == CGS_UCODE_ID_SMU)
+					strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+				else if (type == CGS_UCODE_ID_SMU_SK)
+					strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
+				break;
+			default:
+				DRM_ERROR("SMC firmware not supported\n");
+				return -EINVAL;
+			}
 
-		err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
-		if (err) {
-			DRM_ERROR("Failed to request firmware\n");
-			return err;
-		}
+			err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+			if (err) {
+				DRM_ERROR("Failed to request firmware\n");
+				return err;
+			}
 
-		err = amdgpu_ucode_validate(adev->pm.fw);
-		if (err) {
-			DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
-			release_firmware(adev->pm.fw);
-			adev->pm.fw = NULL;
-			return err;
+			err = amdgpu_ucode_validate(adev->pm.fw);
+			if (err) {
+				DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
+				release_firmware(adev->pm.fw);
+				adev->pm.fw = NULL;
+				return err;
+			}
 		}
 
 		hdr = (const struct smc_firmware_header_v1_0 *)	adev->pm.fw->data;
@@ -774,7 +800,7 @@
 	return 0;
 }
 
-static int amdgpu_cgs_query_system_info(void *cgs_device,
+static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
 				struct cgs_system_info *sys_info)
 {
 	CGS_FUNC_ADEV;
@@ -801,6 +827,9 @@
 	case CGS_SYSTEM_INFO_PG_FLAGS:
 		sys_info->value = adev->pg_flags;
 		break;
+	case CGS_SYSTEM_INFO_GFX_CU_INFO:
+		sys_info->value = adev->gfx.cu_info.number;
+		break;
 	default:
 		return -ENODEV;
 	}
@@ -808,7 +837,7 @@
 	return 0;
 }
 
-static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
+static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
 					  struct cgs_display_info *info)
 {
 	CGS_FUNC_ADEV;
@@ -851,7 +880,7 @@
 }
 
 
-static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
+static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
 {
 	CGS_FUNC_ADEV;
 
@@ -867,7 +896,7 @@
  */
 
 #if defined(CONFIG_ACPI)
-static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
+static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
 				    struct cgs_acpi_method_info *info)
 {
 	CGS_FUNC_ADEV;
@@ -1030,14 +1059,14 @@
 	return result;
 }
 #else
-static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
+static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
 				struct cgs_acpi_method_info *info)
 {
 	return -EIO;
 }
 #endif
 
-int amdgpu_cgs_call_acpi_method(void *cgs_device,
+int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
 					uint32_t acpi_method,
 					uint32_t acpi_function,
 					void *pinput, void *poutput,
@@ -1107,6 +1136,7 @@
 	amdgpu_cgs_pm_query_clock_limits,
 	amdgpu_cgs_set_camera_voltages,
 	amdgpu_cgs_get_firmware_info,
+	amdgpu_cgs_rel_firmware,
 	amdgpu_cgs_set_powergating_state,
 	amdgpu_cgs_set_clockgating_state,
 	amdgpu_cgs_get_active_displays_info,
@@ -1121,7 +1151,7 @@
 	amdgpu_cgs_irq_put
 };
 
-void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
+struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
 {
 	struct amdgpu_cgs_device *cgs_device =
 		kmalloc(sizeof(*cgs_device), GFP_KERNEL);
@@ -1135,10 +1165,10 @@
 	cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
 	cgs_device->adev = adev;
 
-	return cgs_device;
+	return (struct cgs_device *)cgs_device;
 }
 
-void amdgpu_cgs_destroy_device(void *cgs_device)
+void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
 {
 	kfree(cgs_device);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 119cdc2..cb07da4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -194,12 +194,12 @@
 				bpc = 8;
 				DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
 					  connector->name, bpc);
-			} else if (bpc > 8) {
-				/* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
-				DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
-					  connector->name);
-				bpc = 8;
 			}
+		} else if (bpc > 8) {
+			/* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
+			DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
+				  connector->name);
+			bpc = 8;
 		}
 	}
 
@@ -439,7 +439,7 @@
 	struct drm_display_mode *mode = NULL;
 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
 	int i;
-	struct mode_size {
+	static const struct mode_size {
 		int w;
 		int h;
 	} common_modes[17] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9392e50..9bc8f1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -24,7 +24,6 @@
  * Authors:
  *    Jerome Glisse <glisse@freedesktop.org>
  */
-#include <linux/list_sort.h>
 #include <linux/pagemap.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
@@ -88,44 +87,41 @@
 }
 
 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
-				      struct amdgpu_user_fence *uf,
-				      struct drm_amdgpu_cs_chunk_fence *fence_data)
+				      struct drm_amdgpu_cs_chunk_fence *data,
+				      uint32_t *offset)
 {
 	struct drm_gem_object *gobj;
-	uint32_t handle;
 
-	handle = fence_data->handle;
-	gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
-				     fence_data->handle);
+	gobj = drm_gem_object_lookup(p->filp, data->handle);
 	if (gobj == NULL)
 		return -EINVAL;
 
-	uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
-	uf->offset = fence_data->offset;
-
-	if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) {
-		drm_gem_object_unreference_unlocked(gobj);
-		return -EINVAL;
-	}
-
-	p->uf_entry.robj = amdgpu_bo_ref(uf->bo);
+	p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
 	p->uf_entry.priority = 0;
 	p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
 	p->uf_entry.tv.shared = true;
 	p->uf_entry.user_pages = NULL;
+	*offset = data->offset;
 
 	drm_gem_object_unreference_unlocked(gobj);
+
+	if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
+		amdgpu_bo_unref(&p->uf_entry.robj);
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 {
 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+	struct amdgpu_vm *vm = &fpriv->vm;
 	union drm_amdgpu_cs *cs = data;
 	uint64_t *chunk_array_user;
 	uint64_t *chunk_array;
-	struct amdgpu_user_fence uf = {};
 	unsigned size, num_ibs = 0;
+	uint32_t uf_offset = 0;
 	int i;
 	int ret;
 
@@ -200,7 +196,8 @@
 				goto free_partial_kdata;
 			}
 
-			ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata);
+			ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
+							 &uf_offset);
 			if (ret)
 				goto free_partial_kdata;
 
@@ -215,11 +212,14 @@
 		}
 	}
 
-	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job);
+	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
 	if (ret)
 		goto free_all_kdata;
 
-	p->job->uf = uf;
+	if (p->uf_entry.robj) {
+		p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj);
+		p->job->uf_offset = uf_offset;
+	}
 
 	kfree(chunk_array);
 	return 0;
@@ -377,7 +377,7 @@
 	INIT_LIST_HEAD(&duplicates);
 	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
 
-	if (p->job->uf.bo)
+	if (p->uf_entry.robj)
 		list_add(&p->uf_entry.tv.head, &p->validated);
 
 	if (need_mmap_lock)
@@ -473,6 +473,9 @@
 		goto error_validate;
 
 	if (p->bo_list) {
+		struct amdgpu_bo *gds = p->bo_list->gds_obj;
+		struct amdgpu_bo *gws = p->bo_list->gws_obj;
+		struct amdgpu_bo *oa = p->bo_list->oa_obj;
 		struct amdgpu_vm *vm = &fpriv->vm;
 		unsigned i;
 
@@ -481,6 +484,19 @@
 
 			p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
 		}
+
+		if (gds) {
+			p->job->gds_base = amdgpu_bo_gpu_offset(gds);
+			p->job->gds_size = amdgpu_bo_size(gds);
+		}
+		if (gws) {
+			p->job->gws_base = amdgpu_bo_gpu_offset(gws);
+			p->job->gws_size = amdgpu_bo_size(gws);
+		}
+		if (oa) {
+			p->job->oa_base = amdgpu_bo_gpu_offset(oa);
+			p->job->oa_size = amdgpu_bo_size(oa);
+		}
 	}
 
 error_validate:
@@ -527,16 +543,6 @@
 	return 0;
 }
 
-static int cmp_size_smaller_first(void *priv, struct list_head *a,
-				  struct list_head *b)
-{
-	struct amdgpu_bo_list_entry *la = list_entry(a, struct amdgpu_bo_list_entry, tv.head);
-	struct amdgpu_bo_list_entry *lb = list_entry(b, struct amdgpu_bo_list_entry, tv.head);
-
-	/* Sort A before B if A is smaller. */
-	return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
-}
-
 /**
  * cs_parser_fini() - clean parser states
  * @parser:	parser structure holding parsing context.
@@ -553,18 +559,6 @@
 	if (!error) {
 		amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
 
-		/* Sort the buffer list from the smallest to largest buffer,
-		 * which affects the order of buffers in the LRU list.
-		 * This assures that the smallest buffers are added first
-		 * to the LRU list, so they are likely to be later evicted
-		 * first, instead of large buffers whose eviction is more
-		 * expensive.
-		 *
-		 * This slightly lowers the number of bytes moved by TTM
-		 * per frame under memory pressure.
-		 */
-		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
-
 		ttm_eu_fence_buffer_objects(&parser->ticket,
 					    &parser->validated,
 					    parser->fence);
@@ -763,41 +757,14 @@
 
 		ib->length_dw = chunk_ib->ib_bytes / 4;
 		ib->flags = chunk_ib->flags;
-		ib->ctx = parser->ctx;
 		j++;
 	}
 
-	/* add GDS resources to first IB */
-	if (parser->bo_list) {
-		struct amdgpu_bo *gds = parser->bo_list->gds_obj;
-		struct amdgpu_bo *gws = parser->bo_list->gws_obj;
-		struct amdgpu_bo *oa = parser->bo_list->oa_obj;
-		struct amdgpu_ib *ib = &parser->job->ibs[0];
-
-		if (gds) {
-			ib->gds_base = amdgpu_bo_gpu_offset(gds);
-			ib->gds_size = amdgpu_bo_size(gds);
-		}
-		if (gws) {
-			ib->gws_base = amdgpu_bo_gpu_offset(gws);
-			ib->gws_size = amdgpu_bo_size(gws);
-		}
-		if (oa) {
-			ib->oa_base = amdgpu_bo_gpu_offset(oa);
-			ib->oa_size = amdgpu_bo_size(oa);
-		}
-	}
-	/* wrap the last IB with user fence */
-	if (parser->job->uf.bo) {
-		struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1];
-
-		/* UVD & VCE fw doesn't support user fences */
-		if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
-		    parser->job->ring->type == AMDGPU_RING_TYPE_VCE)
-			return -EINVAL;
-
-		ib->user = &parser->job->uf;
-	}
+	/* UVD & VCE fw doesn't support user fences */
+	if (parser->job->uf_bo && (
+	    parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
+	    parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
+		return -EINVAL;
 
 	return 0;
 }
@@ -862,28 +829,28 @@
 			    union drm_amdgpu_cs *cs)
 {
 	struct amdgpu_ring *ring = p->job->ring;
-	struct amd_sched_fence *fence;
+	struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+	struct fence *fence;
 	struct amdgpu_job *job;
+	int r;
 
 	job = p->job;
 	p->job = NULL;
 
-	job->base.sched = &ring->sched;
-	job->base.s_entity = &p->ctx->rings[ring->idx].entity;
-	job->owner = p->filp;
-
-	fence = amd_sched_fence_create(job->base.s_entity, p->filp);
-	if (!fence) {
+	r = amd_sched_job_init(&job->base, &ring->sched,
+			       entity, amdgpu_job_timeout_func,
+			       amdgpu_job_free_func,
+			       p->filp, &fence);
+	if (r) {
 		amdgpu_job_free(job);
-		return -ENOMEM;
+		return r;
 	}
 
-	job->base.s_fence = fence;
-	p->fence = fence_get(&fence->base);
-
-	cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring,
-					      &fence->base);
-	job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
+	job->owner = p->filp;
+	job->ctx = entity->fence_context;
+	p->fence = fence_get(fence);
+	cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
+	job->uf_sequence = cs->out.handle;
 
 	trace_amdgpu_cs_ioctl(job);
 	amd_sched_entity_push_job(&job->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2139da7..964f314 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -59,6 +59,8 @@
 	"FIJI",
 	"CARRIZO",
 	"STONEY",
+	"POLARIS10",
+	"POLARIS11",
 	"LAST",
 };
 
@@ -346,7 +348,7 @@
 	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
 
-	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), 
+	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
 					     AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
 	if (adev->doorbell.num_doorbells == 0)
 		return -EINVAL;
@@ -825,8 +827,10 @@
  */
 static void amdgpu_atombios_fini(struct amdgpu_device *adev)
 {
-	if (adev->mode_info.atom_context)
+	if (adev->mode_info.atom_context) {
 		kfree(adev->mode_info.atom_context->scratch);
+		kfree(adev->mode_info.atom_context->iio);
+	}
 	kfree(adev->mode_info.atom_context);
 	adev->mode_info.atom_context = NULL;
 	kfree(adev->mode_info.atom_card_info);
@@ -936,15 +940,11 @@
 	}
 
 	if (amdgpu_gart_size != -1) {
-		/* gtt size must be power of two and greater or equal to 32M */
+		/* gtt size must be greater or equal to 32M */
 		if (amdgpu_gart_size < 32) {
 			dev_warn(adev->dev, "gart size (%d) too small\n",
 				 amdgpu_gart_size);
 			amdgpu_gart_size = -1;
-		} else if (!amdgpu_check_pot_argument(amdgpu_gart_size)) {
-			dev_warn(adev->dev, "gart size (%d) must be a power of 2\n",
-				 amdgpu_gart_size);
-			amdgpu_gart_size = -1;
 		}
 	}
 
@@ -1144,6 +1144,8 @@
 	case CHIP_TOPAZ:
 	case CHIP_TONGA:
 	case CHIP_FIJI:
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS10:
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
 		if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
@@ -1196,7 +1198,7 @@
 				if (r == -ENOENT) {
 					adev->ip_block_status[i].valid = false;
 				} else if (r) {
-					DRM_ERROR("early_init %d failed %d\n", i, r);
+					DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
 					return r;
 				} else {
 					adev->ip_block_status[i].valid = true;
@@ -1219,7 +1221,7 @@
 			continue;
 		r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
 		if (r) {
-			DRM_ERROR("sw_init %d failed %d\n", i, r);
+			DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
 			return r;
 		}
 		adev->ip_block_status[i].sw = true;
@@ -1252,7 +1254,7 @@
 			continue;
 		r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
 		if (r) {
-			DRM_ERROR("hw_init %d failed %d\n", i, r);
+			DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
 			return r;
 		}
 		adev->ip_block_status[i].hw = true;
@@ -1272,13 +1274,13 @@
 		r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
 								    AMD_CG_STATE_GATE);
 		if (r) {
-			DRM_ERROR("set_clockgating_state(gate) %d failed %d\n", i, r);
+			DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
 			return r;
 		}
 		if (adev->ip_blocks[i].funcs->late_init) {
 			r = adev->ip_blocks[i].funcs->late_init((void *)adev);
 			if (r) {
-				DRM_ERROR("late_init %d failed %d\n", i, r);
+				DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
 				return r;
 			}
 		}
@@ -1302,13 +1304,13 @@
 		r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
 								    AMD_CG_STATE_UNGATE);
 		if (r) {
-			DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r);
+			DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
 			return r;
 		}
 		r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
 		/* XXX handle errors */
 		if (r) {
-			DRM_DEBUG("hw_fini %d failed %d\n", i, r);
+			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
 		}
 		adev->ip_block_status[i].hw = false;
 	}
@@ -1319,12 +1321,17 @@
 		r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
 		/* XXX handle errors */
 		if (r) {
-			DRM_DEBUG("sw_fini %d failed %d\n", i, r);
+			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
 		}
 		adev->ip_block_status[i].sw = false;
 		adev->ip_block_status[i].valid = false;
 	}
 
+	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+		if (adev->ip_blocks[i].funcs->late_fini)
+			adev->ip_blocks[i].funcs->late_fini((void *)adev);
+	}
+
 	return 0;
 }
 
@@ -1332,20 +1339,29 @@
 {
 	int i, r;
 
+	/* ungate SMC block first */
+	r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
+					 AMD_CG_STATE_UNGATE);
+	if (r) {
+		DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
+	}
+
 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
 		if (!adev->ip_block_status[i].valid)
 			continue;
 		/* ungate blocks so that suspend can properly shut them down */
-		r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
-								    AMD_CG_STATE_UNGATE);
-		if (r) {
-			DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r);
+		if (i != AMD_IP_BLOCK_TYPE_SMC) {
+			r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+									    AMD_CG_STATE_UNGATE);
+			if (r) {
+				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+			}
 		}
 		/* XXX handle errors */
 		r = adev->ip_blocks[i].funcs->suspend(adev);
 		/* XXX handle errors */
 		if (r) {
-			DRM_ERROR("suspend %d failed %d\n", i, r);
+			DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
 		}
 	}
 
@@ -1361,7 +1377,7 @@
 			continue;
 		r = adev->ip_blocks[i].funcs->resume(adev);
 		if (r) {
-			DRM_ERROR("resume %d failed %d\n", i, r);
+			DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
 			return r;
 		}
 	}
@@ -1504,8 +1520,7 @@
 		amdgpu_atombios_has_gpu_virtualization_table(adev);
 
 	/* Post card if necessary */
-	if (!amdgpu_card_posted(adev) ||
-	    adev->virtualization.supports_sr_iov) {
+	if (!amdgpu_card_posted(adev)) {
 		if (!adev->bios) {
 			dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
 			return -EINVAL;
@@ -2007,7 +2022,7 @@
  * Debugfs
  */
 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
-			     struct drm_info_list *files,
+			     const struct drm_info_list *files,
 			     unsigned nfiles)
 {
 	unsigned i;
@@ -2119,32 +2134,246 @@
 	return result;
 }
 
+static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
+					size_t size, loff_t *pos)
+{
+	struct amdgpu_device *adev = f->f_inode->i_private;
+	ssize_t result = 0;
+	int r;
+
+	if (size & 0x3 || *pos & 0x3)
+		return -EINVAL;
+
+	while (size) {
+		uint32_t value;
+
+		value = RREG32_PCIE(*pos >> 2);
+		r = put_user(value, (uint32_t *)buf);
+		if (r)
+			return r;
+
+		result += 4;
+		buf += 4;
+		*pos += 4;
+		size -= 4;
+	}
+
+	return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
+					 size_t size, loff_t *pos)
+{
+	struct amdgpu_device *adev = f->f_inode->i_private;
+	ssize_t result = 0;
+	int r;
+
+	if (size & 0x3 || *pos & 0x3)
+		return -EINVAL;
+
+	while (size) {
+		uint32_t value;
+
+		r = get_user(value, (uint32_t *)buf);
+		if (r)
+			return r;
+
+		WREG32_PCIE(*pos >> 2, value);
+
+		result += 4;
+		buf += 4;
+		*pos += 4;
+		size -= 4;
+	}
+
+	return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
+					size_t size, loff_t *pos)
+{
+	struct amdgpu_device *adev = f->f_inode->i_private;
+	ssize_t result = 0;
+	int r;
+
+	if (size & 0x3 || *pos & 0x3)
+		return -EINVAL;
+
+	while (size) {
+		uint32_t value;
+
+		value = RREG32_DIDT(*pos >> 2);
+		r = put_user(value, (uint32_t *)buf);
+		if (r)
+			return r;
+
+		result += 4;
+		buf += 4;
+		*pos += 4;
+		size -= 4;
+	}
+
+	return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
+					 size_t size, loff_t *pos)
+{
+	struct amdgpu_device *adev = f->f_inode->i_private;
+	ssize_t result = 0;
+	int r;
+
+	if (size & 0x3 || *pos & 0x3)
+		return -EINVAL;
+
+	while (size) {
+		uint32_t value;
+
+		r = get_user(value, (uint32_t *)buf);
+		if (r)
+			return r;
+
+		WREG32_DIDT(*pos >> 2, value);
+
+		result += 4;
+		buf += 4;
+		*pos += 4;
+		size -= 4;
+	}
+
+	return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+					size_t size, loff_t *pos)
+{
+	struct amdgpu_device *adev = f->f_inode->i_private;
+	ssize_t result = 0;
+	int r;
+
+	if (size & 0x3 || *pos & 0x3)
+		return -EINVAL;
+
+	while (size) {
+		uint32_t value;
+
+		value = RREG32_SMC(*pos >> 2);
+		r = put_user(value, (uint32_t *)buf);
+		if (r)
+			return r;
+
+		result += 4;
+		buf += 4;
+		*pos += 4;
+		size -= 4;
+	}
+
+	return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
+					 size_t size, loff_t *pos)
+{
+	struct amdgpu_device *adev = f->f_inode->i_private;
+	ssize_t result = 0;
+	int r;
+
+	if (size & 0x3 || *pos & 0x3)
+		return -EINVAL;
+
+	while (size) {
+		uint32_t value;
+
+		r = get_user(value, (uint32_t *)buf);
+		if (r)
+			return r;
+
+		WREG32_SMC(*pos >> 2, value);
+
+		result += 4;
+		buf += 4;
+		*pos += 4;
+		size -= 4;
+	}
+
+	return result;
+}
+
 static const struct file_operations amdgpu_debugfs_regs_fops = {
 	.owner = THIS_MODULE,
 	.read = amdgpu_debugfs_regs_read,
 	.write = amdgpu_debugfs_regs_write,
 	.llseek = default_llseek
 };
+static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
+	.owner = THIS_MODULE,
+	.read = amdgpu_debugfs_regs_didt_read,
+	.write = amdgpu_debugfs_regs_didt_write,
+	.llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
+	.owner = THIS_MODULE,
+	.read = amdgpu_debugfs_regs_pcie_read,
+	.write = amdgpu_debugfs_regs_pcie_write,
+	.llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
+	.owner = THIS_MODULE,
+	.read = amdgpu_debugfs_regs_smc_read,
+	.write = amdgpu_debugfs_regs_smc_write,
+	.llseek = default_llseek
+};
+
+static const struct file_operations *debugfs_regs[] = {
+	&amdgpu_debugfs_regs_fops,
+	&amdgpu_debugfs_regs_didt_fops,
+	&amdgpu_debugfs_regs_pcie_fops,
+	&amdgpu_debugfs_regs_smc_fops,
+};
+
+static const char *debugfs_regs_names[] = {
+	"amdgpu_regs",
+	"amdgpu_regs_didt",
+	"amdgpu_regs_pcie",
+	"amdgpu_regs_smc",
+};
 
 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
 {
 	struct drm_minor *minor = adev->ddev->primary;
 	struct dentry *ent, *root = minor->debugfs_root;
+	unsigned i, j;
 
-	ent = debugfs_create_file("amdgpu_regs", S_IFREG | S_IRUGO, root,
-				  adev, &amdgpu_debugfs_regs_fops);
-	if (IS_ERR(ent))
-		return PTR_ERR(ent);
-	i_size_write(ent->d_inode, adev->rmmio_size);
-	adev->debugfs_regs = ent;
+	for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+		ent = debugfs_create_file(debugfs_regs_names[i],
+					  S_IFREG | S_IRUGO, root,
+					  adev, debugfs_regs[i]);
+		if (IS_ERR(ent)) {
+			for (j = 0; j < i; j++) {
+				debugfs_remove(adev->debugfs_regs[i]);
+				adev->debugfs_regs[i] = NULL;
+			}
+			return PTR_ERR(ent);
+		}
+
+		if (!i)
+			i_size_write(ent->d_inode, adev->rmmio_size);
+		adev->debugfs_regs[i] = ent;
+	}
 
 	return 0;
 }
 
 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
 {
-	debugfs_remove(adev->debugfs_regs);
-	adev->debugfs_regs = NULL;
+	unsigned i;
+
+	for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+		if (adev->debugfs_regs[i]) {
+			debugfs_remove(adev->debugfs_regs[i]);
+			adev->debugfs_regs[i] = NULL;
+		}
+	}
 }
 
 int amdgpu_debugfs_init(struct drm_minor *minor)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 3fb405b..b0832da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -131,12 +131,17 @@
 				 vblank->framedur_ns / 1000,
 				 vblank->linedur_ns / 1000, stat, vpos, hpos);
 
-	/* set the flip status */
+	/* Do the flip (mmio) */
+	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
+
+	/* Set the flip status */
 	amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 
-	/* Do the flip (mmio) */
-	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
+
+	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
+					 amdgpuCrtc->crtc_id, amdgpuCrtc, work);
+
 }
 
 /*
@@ -192,6 +197,7 @@
 	work->event = event;
 	work->adev = adev;
 	work->crtc_id = amdgpu_crtc->crtc_id;
+	work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
 
 	/* schedule unpin of the old buffer */
 	old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
@@ -252,6 +258,9 @@
 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
 	amdgpu_crtc->pflip_works = work;
 
+
+	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
+					 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
 	/* update crtc fb */
 	crtc->primary->fb = fb;
 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
@@ -554,7 +563,7 @@
 	struct amdgpu_framebuffer *amdgpu_fb;
 	int ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+	obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
 	if (obj ==  NULL) {
 		dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
 			"can't create framebuffer\n", mode_cmd->handles[0]);
@@ -588,20 +597,20 @@
 	.output_poll_changed = amdgpu_output_poll_changed
 };
 
-static struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
+static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
 {	{ UNDERSCAN_OFF, "off" },
 	{ UNDERSCAN_ON, "on" },
 	{ UNDERSCAN_AUTO, "auto" },
 };
 
-static struct drm_prop_enum_list amdgpu_audio_enum_list[] =
+static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
 {	{ AMDGPU_AUDIO_DISABLE, "off" },
 	{ AMDGPU_AUDIO_ENABLE, "on" },
 	{ AMDGPU_AUDIO_AUTO, "auto" },
 };
 
 /* XXX support different dither options? spatial, temporal, both, etc. */
-static struct drm_prop_enum_list amdgpu_dither_enum_list[] =
+static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
 {	{ AMDGPU_FMT_DITHER_DISABLE, "off" },
 	{ AMDGPU_FMT_DITHER_ENABLE, "on" },
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 7b7f4ab..fe36caf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -150,7 +150,7 @@
 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 			amdgpu_crtc = to_amdgpu_crtc(crtc);
 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
-				vrefresh = amdgpu_crtc->hw_mode.vrefresh;
+				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
 				break;
 			}
 		}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f1e17d6..f888c01 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -50,9 +50,11 @@
  * KMS wrapper.
  * - 3.0.0 - initial driver
  * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
+ * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
+ *           at the end of IBs.
  */
 #define KMS_DRIVER_MAJOR	3
-#define KMS_DRIVER_MINOR	1
+#define KMS_DRIVER_MINOR	2
 #define KMS_DRIVER_PATCHLEVEL	0
 
 int amdgpu_vram_limit = 0;
@@ -166,7 +168,7 @@
 MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
 module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
 
-static struct pci_device_id pciidlist[] = {
+static const struct pci_device_id pciidlist[] = {
 #ifdef CONFIG_DRM_AMDGPU_CIK
 	/* Kaveri */
 	{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
@@ -277,6 +279,28 @@
 	{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
 	/* stoney */
 	{0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU},
+	/* Polaris11 */
+	{0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+	{0x1002, 0x67E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+	{0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+	{0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+	{0x1002, 0x67EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+	{0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+	{0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+	{0x1002, 0x67E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+	{0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+	/* Polaris10 */
+	{0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+	{0x1002, 0x67C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+	{0x1002, 0x67C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+	{0x1002, 0x67C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+	{0x1002, 0x67C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+	{0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+	{0x1002, 0x67C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+	{0x1002, 0x67C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+	{0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+	{0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+	{0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
 
 	{0, 0, 0}
 };
@@ -514,7 +538,7 @@
 	.irq_uninstall = amdgpu_irq_uninstall,
 	.irq_handler = amdgpu_irq_handler,
 	.ioctls = amdgpu_ioctls_kms,
-	.gem_free_object = amdgpu_gem_object_free,
+	.gem_free_object_unlocked = amdgpu_gem_object_free,
 	.gem_open_object = amdgpu_gem_object_open,
 	.gem_close_object = amdgpu_gem_object_close,
 	.dumb_create = amdgpu_mode_dumb_create,
@@ -553,22 +577,22 @@
 	.driver.pm = &amdgpu_pm_ops,
 };
 
+
+
 static int __init amdgpu_init(void)
 {
 	amdgpu_sync_init();
-#ifdef CONFIG_VGA_CONSOLE
+	amdgpu_fence_slab_init();
 	if (vgacon_text_force()) {
 		DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
 		return -EINVAL;
 	}
-#endif
 	DRM_INFO("amdgpu kernel modesetting enabled.\n");
 	driver = &kms_driver;
 	pdriver = &amdgpu_kms_pci_driver;
 	driver->driver_features |= DRIVER_MODESET;
 	driver->num_ioctls = amdgpu_max_kms_ioctl;
 	amdgpu_register_atpx_handler();
-
 	/* let modprobe override vga console setting */
 	return drm_pci_init(driver, pdriver);
 }
@@ -579,6 +603,7 @@
 	drm_pci_exit(driver, pdriver);
 	amdgpu_unregister_atpx_handler();
 	amdgpu_sync_fini();
+	amdgpu_fence_slab_fini();
 }
 
 module_init(amdgpu_init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index d81f1f4..d155876 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -55,8 +55,21 @@
 };
 
 static struct kmem_cache *amdgpu_fence_slab;
-static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
 
+int amdgpu_fence_slab_init(void)
+{
+	amdgpu_fence_slab = kmem_cache_create(
+		"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
+		SLAB_HWCACHE_ALIGN, NULL);
+	if (!amdgpu_fence_slab)
+		return -ENOMEM;
+	return 0;
+}
+
+void amdgpu_fence_slab_fini(void)
+{
+	kmem_cache_destroy(amdgpu_fence_slab);
+}
 /*
  * Cast helper
  */
@@ -198,7 +211,7 @@
 
 		/* There is always exactly one thread signaling this fence slot */
 		fence = rcu_dereference_protected(*ptr, 1);
-		rcu_assign_pointer(*ptr, NULL);
+		RCU_INIT_POINTER(*ptr, NULL);
 
 		BUG_ON(!fence);
 
@@ -352,9 +365,9 @@
 	setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
 		    (unsigned long)ring);
 
-	ring->fence_drv.num_fences_mask = num_hw_submission - 1;
+	ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
 	spin_lock_init(&ring->fence_drv.lock);
-	ring->fence_drv.fences = kcalloc(num_hw_submission, sizeof(void *),
+	ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
 					 GFP_KERNEL);
 	if (!ring->fence_drv.fences)
 		return -ENOMEM;
@@ -396,13 +409,6 @@
  */
 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
 {
-	if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
-		amdgpu_fence_slab = kmem_cache_create(
-			"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
-			SLAB_HWCACHE_ALIGN, NULL);
-		if (!amdgpu_fence_slab)
-			return -ENOMEM;
-	}
 	if (amdgpu_debugfs_fence_init(adev))
 		dev_err(adev->dev, "fence debugfs file creation failed\n");
 
@@ -437,13 +443,10 @@
 		amd_sched_fini(&ring->sched);
 		del_timer_sync(&ring->fence_drv.fallback_timer);
 		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
-			fence_put(ring->fence_drv.fences[i]);
+			fence_put(ring->fence_drv.fences[j]);
 		kfree(ring->fence_drv.fences);
 		ring->fence_drv.initialized = false;
 	}
-
-	if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
-		kmem_cache_destroy(amdgpu_fence_slab);
 }
 
 /**
@@ -639,7 +642,7 @@
 	return 0;
 }
 
-static struct drm_info_list amdgpu_debugfs_fence_list[] = {
+static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
 	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
 	{"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 7312d72..921bce2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -238,18 +238,17 @@
 	t = offset / AMDGPU_GPU_PAGE_SIZE;
 	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
 	for (i = 0; i < pages; i++, p++) {
-		if (adev->gart.pages[p]) {
-			adev->gart.pages[p] = NULL;
-			adev->gart.pages_addr[p] = adev->dummy_page.addr;
-			page_base = adev->gart.pages_addr[p];
-			if (!adev->gart.ptr)
-				continue;
+#ifdef CONFIG_AMDGPU_GART_DEBUGFS
+		adev->gart.pages[p] = NULL;
+#endif
+		page_base = adev->dummy_page.addr;
+		if (!adev->gart.ptr)
+			continue;
 
-			for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
-				amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
-							t, page_base, flags);
-				page_base += AMDGPU_GPU_PAGE_SIZE;
-			}
+		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+			amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
+						t, page_base, flags);
+			page_base += AMDGPU_GPU_PAGE_SIZE;
 		}
 	}
 	mb();
@@ -287,10 +286,11 @@
 	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
 
 	for (i = 0; i < pages; i++, p++) {
-		adev->gart.pages_addr[p] = dma_addr[i];
+#ifdef CONFIG_AMDGPU_GART_DEBUGFS
 		adev->gart.pages[p] = pagelist[i];
+#endif
 		if (adev->gart.ptr) {
-			page_base = adev->gart.pages_addr[p];
+			page_base = dma_addr[i];
 			for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
 				amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags);
 				page_base += AMDGPU_GPU_PAGE_SIZE;
@@ -312,11 +312,11 @@
  */
 int amdgpu_gart_init(struct amdgpu_device *adev)
 {
-	int r, i;
+	int r;
 
-	if (adev->gart.pages) {
+	if (adev->dummy_page.page)
 		return 0;
-	}
+
 	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
 	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
 		DRM_ERROR("Page size is smaller than GPU page size!\n");
@@ -330,22 +330,16 @@
 	adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
 	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
 		 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
+
+#ifdef CONFIG_AMDGPU_GART_DEBUGFS
 	/* Allocate pages table */
 	adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
 	if (adev->gart.pages == NULL) {
 		amdgpu_gart_fini(adev);
 		return -ENOMEM;
 	}
-	adev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
-					adev->gart.num_cpu_pages);
-	if (adev->gart.pages_addr == NULL) {
-		amdgpu_gart_fini(adev);
-		return -ENOMEM;
-	}
-	/* set GART entry to point to the dummy page by default */
-	for (i = 0; i < adev->gart.num_cpu_pages; i++) {
-		adev->gart.pages_addr[i] = adev->dummy_page.addr;
-	}
+#endif
+
 	return 0;
 }
 
@@ -358,15 +352,14 @@
  */
 void amdgpu_gart_fini(struct amdgpu_device *adev)
 {
-	if (adev->gart.pages && adev->gart.pages_addr && adev->gart.ready) {
+	if (adev->gart.ready) {
 		/* unbind pages */
 		amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
 	}
 	adev->gart.ready = false;
+#ifdef CONFIG_AMDGPU_GART_DEBUGFS
 	vfree(adev->gart.pages);
-	vfree(adev->gart.pages_addr);
 	adev->gart.pages = NULL;
-	adev->gart.pages_addr = NULL;
-
+#endif
 	amdgpu_dummy_page_fini(adev);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
index c3f4e85..503d540 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
@@ -43,7 +43,7 @@
 struct amdgpu_bo;
 
 struct amdgpu_gds_asic_info {
-	uint32_t 	total_size;
+	uint32_t	total_size;
 	uint32_t	gfx_partition_size;
 	uint32_t	cs_partition_size;
 };
@@ -52,8 +52,8 @@
 	struct amdgpu_gds_asic_info	mem;
 	struct amdgpu_gds_asic_info	gws;
 	struct amdgpu_gds_asic_info	oa;
-	/* At present, GDS, GWS and OA resources for gfx (graphics) 
-	 * is always pre-allocated and available for graphics operation. 
+	/* At present, GDS, GWS and OA resources for gfx (graphics)
+	 * is always pre-allocated and available for graphics operation.
 	 * Such resource is shared between all gfx clients.
 	 * TODO: move this operation to user space
 	 * */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index fa6a27b..8fab648 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -93,7 +93,7 @@
 	struct drm_device *ddev = adev->ddev;
 	struct drm_file *file;
 
-	mutex_lock(&ddev->struct_mutex);
+	mutex_lock(&ddev->filelist_mutex);
 
 	list_for_each_entry(file, &ddev->filelist, lhead) {
 		struct drm_gem_object *gobj;
@@ -103,13 +103,13 @@
 		spin_lock(&file->table_lock);
 		idr_for_each_entry(&file->object_idr, gobj, handle) {
 			WARN_ONCE(1, "And also active allocations!\n");
-			drm_gem_object_unreference(gobj);
+			drm_gem_object_unreference_unlocked(gobj);
 		}
 		idr_destroy(&file->object_idr);
 		spin_unlock(&file->table_lock);
 	}
 
-	mutex_unlock(&ddev->struct_mutex);
+	mutex_unlock(&ddev->filelist_mutex);
 }
 
 /*
@@ -338,7 +338,7 @@
 	struct drm_gem_object *gobj;
 	struct amdgpu_bo *robj;
 
-	gobj = drm_gem_object_lookup(dev, filp, handle);
+	gobj = drm_gem_object_lookup(filp, handle);
 	if (gobj == NULL) {
 		return -ENOENT;
 	}
@@ -402,7 +402,7 @@
 	int r = 0;
 	long ret;
 
-	gobj = drm_gem_object_lookup(dev, filp, handle);
+	gobj = drm_gem_object_lookup(filp, handle);
 	if (gobj == NULL) {
 		return -ENOENT;
 	}
@@ -436,7 +436,7 @@
 	int r = -1;
 
 	DRM_DEBUG("%d \n", args->handle);
-	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	gobj = drm_gem_object_lookup(filp, args->handle);
 	if (gobj == NULL)
 		return -ENOENT;
 	robj = gem_to_amdgpu_bo(gobj);
@@ -584,7 +584,7 @@
 		return -EINVAL;
 	}
 
-	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	gobj = drm_gem_object_lookup(filp, args->handle);
 	if (gobj == NULL)
 		return -ENOENT;
 	rbo = gem_to_amdgpu_bo(gobj);
@@ -646,7 +646,7 @@
 	struct amdgpu_bo *robj;
 	int r;
 
-	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	gobj = drm_gem_object_lookup(filp, args->handle);
 	if (gobj == NULL) {
 		return -ENOENT;
 	}
@@ -769,7 +769,7 @@
 	struct drm_file *file;
 	int r;
 
-	r = mutex_lock_interruptible(&dev->struct_mutex);
+	r = mutex_lock_interruptible(&dev->filelist_mutex);
 	if (r)
 		return r;
 
@@ -793,11 +793,11 @@
 		spin_unlock(&file->table_lock);
 	}
 
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev->filelist_mutex);
 	return 0;
 }
 
-static struct drm_info_list amdgpu_debugfs_gem_list[] = {
+static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
 	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
 };
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 8443cea..7a0b1e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -74,9 +74,6 @@
 			ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
 	}
 
-	ib->vm = vm;
-	ib->vm_id = 0;
-
 	return 0;
 }
 
@@ -89,7 +86,8 @@
  *
  * Free an IB (all asics).
  */
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f)
+void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
+		    struct fence *f)
 {
 	amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
 }
@@ -117,28 +115,40 @@
  */
 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 		       struct amdgpu_ib *ibs, struct fence *last_vm_update,
-		       struct fence **f)
+		       struct amdgpu_job *job, struct fence **f)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib *ib = &ibs[0];
-	struct amdgpu_ctx *ctx, *old_ctx;
+	bool skip_preamble, need_ctx_switch;
+	unsigned patch_offset = ~0;
 	struct amdgpu_vm *vm;
+	int vmid = 0, old_vmid = ring->vmid;
 	struct fence *hwf;
+	uint64_t ctx;
+
 	unsigned i;
 	int r = 0;
 
 	if (num_ibs == 0)
 		return -EINVAL;
 
-	ctx = ibs->ctx;
-	vm = ibs->vm;
+	/* ring tests don't use a job */
+	if (job) {
+		vm = job->vm;
+		ctx = job->ctx;
+		vmid = job->vm_id;
+	} else {
+		vm = NULL;
+		ctx = 0;
+		vmid = 0;
+	}
 
 	if (!ring->ready) {
 		dev_err(adev->dev, "couldn't schedule ib\n");
 		return -EINVAL;
 	}
 
-	if (vm && !ibs->vm_id) {
+	if (vm && !job->vm_id) {
 		dev_err(adev->dev, "VM IB without ID\n");
 		return -EINVAL;
 	}
@@ -149,58 +159,70 @@
 		return r;
 	}
 
-	if (vm) {
-		/* do context switch */
-		amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
-				ib->gds_base, ib->gds_size,
-				ib->gws_base, ib->gws_size,
-				ib->oa_base, ib->oa_size);
+	if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec)
+		patch_offset = amdgpu_ring_init_cond_exec(ring);
 
-		if (ring->funcs->emit_hdp_flush)
-			amdgpu_ring_emit_hdp_flush(ring);
+	if (vm) {
+		r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
+				    job->gds_base, job->gds_size,
+				    job->gws_base, job->gws_size,
+				    job->oa_base, job->oa_size,
+				    (ring->current_ctx == ctx) && (old_vmid != vmid));
+		if (r) {
+			amdgpu_ring_undo(ring);
+			return r;
+		}
 	}
 
-	old_ctx = ring->current_ctx;
+	if (ring->funcs->emit_hdp_flush)
+		amdgpu_ring_emit_hdp_flush(ring);
+
+	/* always set cond_exec_polling to CONTINUE */
+	*ring->cond_exe_cpu_addr = 1;
+
+	skip_preamble = ring->current_ctx == ctx;
+	need_ctx_switch = ring->current_ctx != ctx;
 	for (i = 0; i < num_ibs; ++i) {
 		ib = &ibs[i];
+		/* drop preamble IBs if we don't have a context switch */
+		if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
+			continue;
 
-		if (ib->ctx != ctx || ib->vm != vm) {
-			ring->current_ctx = old_ctx;
-			if (ib->vm_id)
-				amdgpu_vm_reset_id(adev, ib->vm_id);
-			amdgpu_ring_undo(ring);
-			return -EINVAL;
-		}
-		amdgpu_ring_emit_ib(ring, ib);
-		ring->current_ctx = ctx;
+		amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
+				    need_ctx_switch);
+		need_ctx_switch = false;
+		ring->vmid = vmid;
 	}
 
-	if (vm) {
-		if (ring->funcs->emit_hdp_invalidate)
-			amdgpu_ring_emit_hdp_invalidate(ring);
-	}
+	if (ring->funcs->emit_hdp_invalidate)
+		amdgpu_ring_emit_hdp_invalidate(ring);
 
 	r = amdgpu_fence_emit(ring, &hwf);
 	if (r) {
 		dev_err(adev->dev, "failed to emit fence (%d)\n", r);
-		ring->current_ctx = old_ctx;
-		if (ib->vm_id)
-			amdgpu_vm_reset_id(adev, ib->vm_id);
+		if (job && job->vm_id)
+			amdgpu_vm_reset_id(adev, job->vm_id);
+		ring->vmid = old_vmid;
 		amdgpu_ring_undo(ring);
 		return r;
 	}
 
 	/* wrap the last IB with fence */
-	if (ib->user) {
-		uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
-		addr += ib->user->offset;
-		amdgpu_ring_emit_fence(ring, addr, ib->sequence,
+	if (job && job->uf_bo) {
+		uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo);
+
+		addr += job->uf_offset;
+		amdgpu_ring_emit_fence(ring, addr, job->uf_sequence,
 				       AMDGPU_FENCE_FLAG_64BIT);
 	}
 
 	if (f)
 		*f = fence_get(hwf);
 
+	if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
+		amdgpu_ring_patch_cond_exec(ring, patch_offset);
+
+	ring->current_ctx = ctx;
 	amdgpu_ring_commit(ring);
 	return 0;
 }
@@ -315,7 +337,7 @@
 
 }
 
-static struct drm_info_list amdgpu_debugfs_sa_list[] = {
+static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
 	{"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 762cfdb..835a3fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -219,7 +219,6 @@
 	if (r) {
 		return r;
 	}
-	adev->ddev->vblank_disable_allowed = true;
 
 	/* enable msi */
 	adev->irq.msi_enabled = false;
@@ -498,7 +497,7 @@
 	return 0;
 }
 
-static struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
+static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
 	.map = amdgpu_irqdomain_map,
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 9c9b19e..f0dafa5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,8 +28,25 @@
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
+static void amdgpu_job_free_handler(struct work_struct *ws)
+{
+	struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job);
+	amd_sched_job_put(&job->base);
+}
+
+void amdgpu_job_timeout_func(struct work_struct *work)
+{
+	struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
+	DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
+				job->base.sched->name,
+				(uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
+				job->ring->fence_drv.sync_seq);
+
+	amd_sched_job_put(&job->base);
+}
+
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
-		     struct amdgpu_job **job)
+		     struct amdgpu_job **job, struct amdgpu_vm *vm)
 {
 	size_t size = sizeof(struct amdgpu_job);
 
@@ -43,8 +60,10 @@
 		return -ENOMEM;
 
 	(*job)->adev = adev;
+	(*job)->vm = vm;
 	(*job)->ibs = (void *)&(*job)[1];
 	(*job)->num_ibs = num_ibs;
+	INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
 
 	amdgpu_sync_create(&(*job)->sync);
 
@@ -56,7 +75,7 @@
 {
 	int r;
 
-	r = amdgpu_job_alloc(adev, 1, job);
+	r = amdgpu_job_alloc(adev, 1, job, NULL);
 	if (r)
 		return r;
 
@@ -78,8 +97,16 @@
 		amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
 	fence_put(job->fence);
 
-	amdgpu_bo_unref(&job->uf.bo);
+	amdgpu_bo_unref(&job->uf_bo);
 	amdgpu_sync_free(&job->sync);
+
+	if (!job->base.use_sched)
+		kfree(job);
+}
+
+void amdgpu_job_free_func(struct kref *refcount)
+{
+	struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount);
 	kfree(job);
 }
 
@@ -87,16 +114,22 @@
 		      struct amd_sched_entity *entity, void *owner,
 		      struct fence **f)
 {
+	struct fence *fence;
+	int r;
 	job->ring = ring;
-	job->base.sched = &ring->sched;
-	job->base.s_entity = entity;
-	job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
-	if (!job->base.s_fence)
-		return -ENOMEM;
 
-	*f = fence_get(&job->base.s_fence->base);
+	if (!f)
+		return -EINVAL;
+
+	r = amd_sched_job_init(&job->base, &ring->sched,
+			       entity, amdgpu_job_timeout_func,
+			       amdgpu_job_free_func, owner, &fence);
+	if (r)
+		return r;
 
 	job->owner = owner;
+	job->ctx = entity->fence_context;
+	*f = fence_get(fence);
 	amd_sched_entity_push_job(&job->base);
 
 	return 0;
@@ -105,27 +138,19 @@
 static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
 {
 	struct amdgpu_job *job = to_amdgpu_job(sched_job);
-	struct amdgpu_vm *vm = job->ibs->vm;
+	struct amdgpu_vm *vm = job->vm;
 
 	struct fence *fence = amdgpu_sync_get_fence(&job->sync);
 
-	if (fence == NULL && vm && !job->ibs->vm_id) {
+	if (fence == NULL && vm && !job->vm_id) {
 		struct amdgpu_ring *ring = job->ring;
-		unsigned i, vm_id;
-		uint64_t vm_pd_addr;
 		int r;
 
 		r = amdgpu_vm_grab_id(vm, ring, &job->sync,
 				      &job->base.s_fence->base,
-				      &vm_id, &vm_pd_addr);
+				      &job->vm_id, &job->vm_pd_addr);
 		if (r)
 			DRM_ERROR("Error getting VM ID (%d)\n", r);
-		else {
-			for (i = 0; i < job->num_ibs; ++i) {
-				job->ibs[i].vm_id = vm_id;
-				job->ibs[i].vm_pd_addr = vm_pd_addr;
-			}
-		}
 
 		fence = amdgpu_sync_get_fence(&job->sync);
 	}
@@ -153,7 +178,7 @@
 
 	trace_amdgpu_sched_run_job(job);
 	r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
-			       job->sync.last_vm_update, &fence);
+			       job->sync.last_vm_update, job, &fence);
 	if (r) {
 		DRM_ERROR("Error scheduling IBs (%d)\n", r);
 		goto err;
@@ -165,7 +190,9 @@
 	return fence;
 }
 
-struct amd_sched_backend_ops amdgpu_sched_ops = {
+const struct amd_sched_backend_ops amdgpu_sched_ops = {
 	.dependency = amdgpu_job_dependency,
 	.run_job = amdgpu_job_run,
+	.begin_job = amd_sched_job_begin,
+	.finish_job = amd_sched_job_finish,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index b04337d..40a2370 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -427,7 +427,6 @@
 	}
 	case AMDGPU_INFO_DEV_INFO: {
 		struct drm_amdgpu_info_device dev_info = {};
-		struct amdgpu_cu_info cu_info;
 
 		dev_info.device_id = dev->pdev->device;
 		dev_info.chip_rev = adev->rev_id;
@@ -461,11 +460,11 @@
 					     AMDGPU_GPU_PAGE_SIZE;
 		dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
 
-		amdgpu_asic_get_cu_info(adev, &cu_info);
-		dev_info.cu_active_number = cu_info.number;
-		dev_info.cu_ao_mask = cu_info.ao_cu_mask;
+		dev_info.cu_active_number = adev->gfx.cu_info.number;
+		dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
 		dev_info.ce_ram_size = adev->gfx.ce_ram_size;
-		memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
+		memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
+		       sizeof(adev->gfx.cu_info.bitmap));
 		dev_info.vram_type = adev->mc.vram_type;
 		dev_info.vram_bit_width = adev->mc.vram_width;
 		dev_info.vce_harvest_config = adev->vce.harvest_config;
@@ -755,4 +754,4 @@
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 };
-int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
+const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 9f4a45c..32fa7b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -232,7 +232,10 @@
 	int r;
 
 	mutex_lock(&adev->mn_lock);
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem)) {
+		mutex_unlock(&adev->mn_lock);
+		return ERR_PTR(-EINTR);
+	}
 
 	hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
 		if (rmn->mm == mm)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 81bd964..6b1d7d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -283,7 +283,7 @@
 	u32 (*hpd_get_gpio_reg)(struct amdgpu_device *adev);
 	/* pageflipping */
 	void (*page_flip)(struct amdgpu_device *adev,
-			 int crtc_id, u64 crtc_base);
+			  int crtc_id, u64 crtc_base, bool async);
 	int (*page_flip_get_scanoutpos)(struct amdgpu_device *adev, int crtc,
 					u32 *vbl, u32 *position);
 	/* display topology setup */
@@ -530,7 +530,7 @@
 				((em) == ATOM_ENCODER_MODE_DP_MST))
 
 /* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */
-#define USE_REAL_VBLANKSTART 		(1 << 30)
+#define USE_REAL_VBLANKSTART		(1 << 30)
 #define GET_DISTANCE_TO_VBLANKSTART	(1 << 31)
 
 void amdgpu_link_encoder_connector(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index acc0801..bdb01d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -71,7 +71,7 @@
 {
 	int r;
 
-	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
+	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS)
 			dev_err(bo->adev->dev, "%p reserve failed\n", bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index ff9597c..589b36e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -362,16 +362,23 @@
 	struct amdgpu_device *adev = ddev->dev_private;
 	int ret;
 	long level;
+	uint32_t i, mask = 0;
+	char sub_str[2];
 
-	ret = kstrtol(buf, 0, &level);
+	for (i = 0; i < strlen(buf) - 1; i++) {
+		sub_str[0] = *(buf + i);
+		sub_str[1] = '\0';
+		ret = kstrtol(sub_str, 0, &level);
 
-	if (ret) {
-		count = -EINVAL;
-		goto fail;
+		if (ret) {
+			count = -EINVAL;
+			goto fail;
+		}
+		mask |= 1 << level;
 	}
 
 	if (adev->pp_enabled)
-		amdgpu_dpm_force_clock_level(adev, PP_SCLK, level);
+		amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
 fail:
 	return count;
 }
@@ -399,16 +406,23 @@
 	struct amdgpu_device *adev = ddev->dev_private;
 	int ret;
 	long level;
+	uint32_t i, mask = 0;
+	char sub_str[2];
 
-	ret = kstrtol(buf, 0, &level);
+	for (i = 0; i < strlen(buf) - 1; i++) {
+		sub_str[0] = *(buf + i);
+		sub_str[1] = '\0';
+		ret = kstrtol(sub_str, 0, &level);
 
-	if (ret) {
-		count = -EINVAL;
-		goto fail;
+		if (ret) {
+			count = -EINVAL;
+			goto fail;
+		}
+		mask |= 1 << level;
 	}
 
 	if (adev->pp_enabled)
-		amdgpu_dpm_force_clock_level(adev, PP_MCLK, level);
+		amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
 fail:
 	return count;
 }
@@ -436,16 +450,23 @@
 	struct amdgpu_device *adev = ddev->dev_private;
 	int ret;
 	long level;
+	uint32_t i, mask = 0;
+	char sub_str[2];
 
-	ret = kstrtol(buf, 0, &level);
+	for (i = 0; i < strlen(buf) - 1; i++) {
+		sub_str[0] = *(buf + i);
+		sub_str[1] = '\0';
+		ret = kstrtol(sub_str, 0, &level);
 
-	if (ret) {
-		count = -EINVAL;
-		goto fail;
+		if (ret) {
+			count = -EINVAL;
+			goto fail;
+		}
+		mask |= 1 << level;
 	}
 
 	if (adev->pp_enabled)
-		amdgpu_dpm_force_clock_level(adev, PP_PCIE, level);
+		amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
 fail:
 	return count;
 }
@@ -1212,7 +1233,7 @@
 	return 0;
 }
 
-static struct drm_info_list amdgpu_pm_info_list[] = {
+static const struct drm_info_list amdgpu_pm_info_list[] = {
 	{"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
 };
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index e9c6ae6..8225655 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -99,6 +99,10 @@
 
 #ifdef CONFIG_DRM_AMD_POWERPLAY
 	switch (adev->asic_type) {
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS10:
+		adev->pp_enabled = true;
+		break;
 	case CHIP_TONGA:
 	case CHIP_FIJI:
 		adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
@@ -179,13 +183,6 @@
 	if (ret)
 		return ret;
 
-#ifdef CONFIG_DRM_AMD_POWERPLAY
-	if (adev->pp_enabled) {
-		amdgpu_pm_sysfs_fini(adev);
-		amd_powerplay_fini(adev->powerplay.pp_handle);
-	}
-#endif
-
 	return ret;
 }
 
@@ -219,6 +216,22 @@
 	return ret;
 }
 
+static void amdgpu_pp_late_fini(void *handle)
+{
+#ifdef CONFIG_DRM_AMD_POWERPLAY
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->pp_enabled) {
+		amdgpu_pm_sysfs_fini(adev);
+		amd_powerplay_fini(adev->powerplay.pp_handle);
+	}
+
+	if (adev->powerplay.ip_funcs->late_fini)
+		adev->powerplay.ip_funcs->late_fini(
+			  adev->powerplay.pp_handle);
+#endif
+}
+
 static int amdgpu_pp_suspend(void *handle)
 {
 	int ret = 0;
@@ -299,28 +312,20 @@
 	return ret;
 }
 
-static void amdgpu_pp_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	if (adev->powerplay.ip_funcs->print_status)
-		adev->powerplay.ip_funcs->print_status(
-					adev->powerplay.pp_handle);
-}
-
 const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
+	.name = "amdgpu_powerplay",
 	.early_init = amdgpu_pp_early_init,
 	.late_init = amdgpu_pp_late_init,
 	.sw_init = amdgpu_pp_sw_init,
 	.sw_fini = amdgpu_pp_sw_fini,
 	.hw_init = amdgpu_pp_hw_init,
 	.hw_fini = amdgpu_pp_hw_fini,
+	.late_fini = amdgpu_pp_late_fini,
 	.suspend = amdgpu_pp_suspend,
 	.resume = amdgpu_pp_resume,
 	.is_idle = amdgpu_pp_is_idle,
 	.wait_for_idle = amdgpu_pp_wait_for_idle,
 	.soft_reset = amdgpu_pp_soft_reset,
-	.print_status = amdgpu_pp_print_status,
 	.set_clockgating_state = amdgpu_pp_set_clockgating_state,
 	.set_powergating_state = amdgpu_pp_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index be6388f..7700dc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -57,9 +57,10 @@
 	ttm_bo_kunmap(&bo->dma_buf_vmap);
 }
 
-struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
-							struct dma_buf_attachment *attach,
-							struct sg_table *sg)
+struct drm_gem_object *
+amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+				 struct dma_buf_attachment *attach,
+				 struct sg_table *sg)
 {
 	struct reservation_object *resv = attach->dmabuf->resv;
 	struct amdgpu_device *adev = dev->dev_private;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 972eed2..870f949 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -46,7 +46,8 @@
  * wptr.  The GPU then starts fetching commands and executes
  * them until the pointers are equal again.
  */
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+				    struct amdgpu_ring *ring);
 
 /**
  * amdgpu_ring_alloc - allocate space on the ring buffer
@@ -215,18 +216,17 @@
  *
  * @adev: amdgpu_device pointer
  * @ring: amdgpu_ring structure holding ring information
- * @ring_size: size of the ring
+ * @max_ndw: maximum number of dw for ring alloc
  * @nop: nop packet for this ring
  *
  * Initialize the driver information for the selected ring (all asics).
  * Returns 0 on success, error on failure.
  */
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
-		     unsigned ring_size, u32 nop, u32 align_mask,
+		     unsigned max_dw, u32 nop, u32 align_mask,
 		     struct amdgpu_irq_src *irq_src, unsigned irq_type,
 		     enum amdgpu_ring_type ring_type)
 {
-	u32 rb_bufsz;
 	int r;
 
 	if (ring->adev == NULL) {
@@ -265,8 +265,17 @@
 		dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
 		return r;
 	}
-	ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
+	ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4;
 	ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
+
+	r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
+	if (r) {
+		dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
+		return r;
+	}
+	ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
+	ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
+
 	spin_lock_init(&ring->fence_lock);
 	r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
 	if (r) {
@@ -274,10 +283,8 @@
 		return r;
 	}
 
-	/* Align ring size */
-	rb_bufsz = order_base_2(ring_size / 8);
-	ring_size = (1 << (rb_bufsz + 1)) * 4;
-	ring->ring_size = ring_size;
+	ring->ring_size = roundup_pow_of_two(max_dw * 4 *
+					     amdgpu_sched_hw_submission);
 	ring->align_mask = align_mask;
 	ring->nop = nop;
 	ring->type = ring_type;
@@ -310,8 +317,7 @@
 		}
 	}
 	ring->ptr_mask = (ring->ring_size / 4) - 1;
-	ring->max_dw = DIV_ROUND_UP(ring->ring_size / 4,
-				    amdgpu_sched_hw_submission);
+	ring->max_dw = max_dw;
 
 	if (amdgpu_debugfs_ring_init(adev, ring)) {
 		DRM_ERROR("Failed to register debugfs file for rings !\n");
@@ -337,6 +343,7 @@
 	ring->ring = NULL;
 	ring->ring_obj = NULL;
 
+	amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
 	amdgpu_wb_free(ring->adev, ring->fence_offs);
 	amdgpu_wb_free(ring->adev, ring->rptr_offs);
 	amdgpu_wb_free(ring->adev, ring->wptr_offs);
@@ -363,9 +370,8 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct amdgpu_device *adev = dev->dev_private;
-	int roffset = *(int*)node->info_ent->data;
+	int roffset = (unsigned long)node->info_ent->data;
 	struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
-
 	uint32_t rptr, wptr, rptr_next;
 	unsigned i;
 
@@ -408,46 +414,37 @@
 	return 0;
 }
 
-/* TODO: clean this up !*/
-static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]);
-static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]);
-static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]);
-static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring);
-static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring);
-static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring);
-static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]);
-static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]);
-
-static struct drm_info_list amdgpu_debugfs_ring_info_list[] = {
-	{"amdgpu_ring_gfx", amdgpu_debugfs_ring_info, 0, &amdgpu_gfx_index},
-	{"amdgpu_ring_cp1", amdgpu_debugfs_ring_info, 0, &cayman_cp1_index},
-	{"amdgpu_ring_cp2", amdgpu_debugfs_ring_info, 0, &cayman_cp2_index},
-	{"amdgpu_ring_dma1", amdgpu_debugfs_ring_info, 0, &amdgpu_dma1_index},
-	{"amdgpu_ring_dma2", amdgpu_debugfs_ring_info, 0, &amdgpu_dma2_index},
-	{"amdgpu_ring_uvd", amdgpu_debugfs_ring_info, 0, &r600_uvd_index},
-	{"amdgpu_ring_vce1", amdgpu_debugfs_ring_info, 0, &si_vce1_index},
-	{"amdgpu_ring_vce2", amdgpu_debugfs_ring_info, 0, &si_vce2_index},
-};
+static struct drm_info_list amdgpu_debugfs_ring_info_list[AMDGPU_MAX_RINGS];
+static char amdgpu_debugfs_ring_names[AMDGPU_MAX_RINGS][32];
 
 #endif
 
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+				    struct amdgpu_ring *ring)
 {
 #if defined(CONFIG_DEBUG_FS)
+	unsigned offset = (uint8_t*)ring - (uint8_t*)adev;
 	unsigned i;
+	struct drm_info_list *info;
+	char *name;
+
 	for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
-		struct drm_info_list *info = &amdgpu_debugfs_ring_info_list[i];
-		int roffset = *(int*)amdgpu_debugfs_ring_info_list[i].data;
-		struct amdgpu_ring *other = (void *)(((uint8_t*)adev) + roffset);
-		unsigned r;
-
-		if (other != ring)
-			continue;
-
-		r = amdgpu_debugfs_add_files(adev, info, 1);
-		if (r)
-			return r;
+		info = &amdgpu_debugfs_ring_info_list[i];
+		if (!info->data)
+			break;
 	}
+
+	if (i == ARRAY_SIZE(amdgpu_debugfs_ring_info_list))
+		return -ENOSPC;
+
+	name = &amdgpu_debugfs_ring_names[i][0];
+	sprintf(name, "amdgpu_ring_%s", ring->name);
+	info->name = name;
+	info->show = amdgpu_debugfs_ring_info;
+	info->driver_features = 0;
+	info->data = (void*)(uintptr_t)offset;
+
+	return amdgpu_debugfs_add_files(adev, info, 1);
 #endif
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 8bf84ef..48618ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -115,6 +115,7 @@
 		return r;
 	}
 	r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+	memset(sa_manager->cpu_ptr, 0, sa_manager->size);
 	amdgpu_bo_unreserve(sa_manager->bo);
 	return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index c48b4fc..34a9280 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -109,6 +109,29 @@
 }
 
 /**
+ * amdgpu_sync_add_later - add the fence to the hash
+ *
+ * @sync: sync object to add the fence to
+ * @f: fence to add
+ *
+ * Tries to add the fence to an existing hash entry. Returns true when an entry
+ * was found, false otherwise.
+ */
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
+{
+	struct amdgpu_sync_entry *e;
+
+	hash_for_each_possible(sync->fences, e, node, f->context) {
+		if (unlikely(e->fence->context != f->context))
+			continue;
+
+		amdgpu_sync_keep_later(&e->fence, f);
+		return true;
+	}
+	return false;
+}
+
+/**
  * amdgpu_sync_fence - remember to sync to this fence
  *
  * @sync: sync object to add fence to
@@ -127,13 +150,8 @@
 	    amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
 		amdgpu_sync_keep_later(&sync->last_vm_update, f);
 
-	hash_for_each_possible(sync->fences, e, node, f->context) {
-		if (unlikely(e->fence->context != f->context))
-			continue;
-
-		amdgpu_sync_keep_later(&e->fence, f);
+	if (amdgpu_sync_add_later(sync, f))
 		return 0;
-	}
 
 	e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
 	if (!e)
@@ -204,6 +222,81 @@
 	return r;
 }
 
+/**
+ * amdgpu_sync_is_idle - test if all fences are signaled
+ *
+ * @sync: the sync object
+ *
+ * Returns true if all fences in the sync object are signaled.
+ */
+bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
+{
+	struct amdgpu_sync_entry *e;
+	struct hlist_node *tmp;
+	int i;
+
+	hash_for_each_safe(sync->fences, i, tmp, e, node) {
+		struct fence *f = e->fence;
+
+		if (fence_is_signaled(f)) {
+			hash_del(&e->node);
+			fence_put(f);
+			kmem_cache_free(amdgpu_sync_slab, e);
+			continue;
+		}
+
+		return false;
+	}
+
+	return true;
+}
+
+/**
+ * amdgpu_sync_cycle_fences - move fences from one sync object into another
+ *
+ * @dst: the destination sync object
+ * @src: the source sync object
+ * @fence: fence to add to source
+ *
+ * Remove all fences from source and put them into destination and add
+ * fence as new one into source.
+ */
+int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
+			     struct fence *fence)
+{
+	struct amdgpu_sync_entry *e, *newone;
+	struct hlist_node *tmp;
+	int i;
+
+	/* Allocate the new entry before moving the old ones */
+	newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
+	if (!newone)
+		return -ENOMEM;
+
+	hash_for_each_safe(src->fences, i, tmp, e, node) {
+		struct fence *f = e->fence;
+
+		hash_del(&e->node);
+		if (fence_is_signaled(f)) {
+			fence_put(f);
+			kmem_cache_free(amdgpu_sync_slab, e);
+			continue;
+		}
+
+		if (amdgpu_sync_add_later(dst, f)) {
+			kmem_cache_free(amdgpu_sync_slab, e);
+			continue;
+		}
+
+		hash_add(dst->fences, &e->node, f->context);
+	}
+
+	hash_add(src->fences, &newone->node, fence->context);
+	newone->fence = fence_get(fence);
+
+	return 0;
+}
+
 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
 {
 	struct amdgpu_sync_entry *e;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 11af449..3b9053a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -911,6 +911,52 @@
 	return flags;
 }
 
+static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
+{
+	struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+	unsigned i, j;
+
+	for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
+		struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
+
+		for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
+			if (&tbo->lru == lru->lru[j])
+				lru->lru[j] = tbo->lru.prev;
+
+		if (&tbo->swap == lru->swap_lru)
+			lru->swap_lru = tbo->swap.prev;
+	}
+}
+
+static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
+{
+	struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+	unsigned log2_size = min(ilog2(tbo->num_pages),
+				 AMDGPU_TTM_LRU_SIZE - 1);
+
+	return &adev->mman.log2_size[log2_size];
+}
+
+static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
+{
+	struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
+	struct list_head *res = lru->lru[tbo->mem.mem_type];
+
+	lru->lru[tbo->mem.mem_type] = &tbo->lru;
+
+	return res;
+}
+
+static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
+{
+	struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
+	struct list_head *res = lru->swap_lru;
+
+	lru->swap_lru = &tbo->swap;
+
+	return res;
+}
+
 static struct ttm_bo_driver amdgpu_bo_driver = {
 	.ttm_tt_create = &amdgpu_ttm_tt_create,
 	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
@@ -924,10 +970,14 @@
 	.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
 	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
 	.io_mem_free = &amdgpu_ttm_io_mem_free,
+	.lru_removal = &amdgpu_ttm_lru_removal,
+	.lru_tail = &amdgpu_ttm_lru_tail,
+	.swap_lru_tail = &amdgpu_ttm_swap_lru_tail,
 };
 
 int amdgpu_ttm_init(struct amdgpu_device *adev)
 {
+	unsigned i, j;
 	int r;
 
 	r = amdgpu_ttm_global_init(adev);
@@ -945,6 +995,15 @@
 		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
 		return r;
 	}
+
+	for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
+		struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
+
+		for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
+			lru->lru[j] = &adev->mman.bdev.man[j].lru;
+		lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
+	}
+
 	adev->mman.initialized = true;
 	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
 				adev->mc.real_vram_size >> PAGE_SHIFT);
@@ -1167,7 +1226,7 @@
 static int ttm_pl_vram = TTM_PL_VRAM;
 static int ttm_pl_tt = TTM_PL_TT;
 
-static struct drm_info_list amdgpu_ttm_debugfs_list[] = {
+static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
 	{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
 	{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
 	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
@@ -1218,6 +1277,8 @@
 	.llseek = default_llseek
 };
 
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+
 static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
 				   size_t size, loff_t *pos)
 {
@@ -1265,6 +1326,8 @@
 
 #endif
 
+#endif
+
 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
 {
 #if defined(CONFIG_DEBUG_FS)
@@ -1280,6 +1343,7 @@
 	i_size_write(ent->d_inode, adev->mc.mc_vram_size);
 	adev->mman.vram = ent;
 
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 	ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
 				  adev, &amdgpu_ttm_gtt_fops);
 	if (IS_ERR(ent))
@@ -1287,6 +1351,7 @@
 	i_size_write(ent->d_inode, adev->mc.gtt_size);
 	adev->mman.gtt = ent;
 
+#endif
 	count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
 
 #ifdef CONFIG_SWIOTLB
@@ -1308,7 +1373,10 @@
 	debugfs_remove(adev->mman.vram);
 	adev->mman.vram = NULL;
 
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 	debugfs_remove(adev->mman.gtt);
 	adev->mman.gtt = NULL;
 #endif
+
+#endif
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 871018c..e19520c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -41,19 +41,23 @@
 
 /* 1 second timeout */
 #define UVD_IDLE_TIMEOUT_MS	1000
+/* Polaris10/11 firmware version */
+#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
 
 /* Firmware Names */
 #ifdef CONFIG_DRM_AMDGPU_CIK
 #define FIRMWARE_BONAIRE	"radeon/bonaire_uvd.bin"
-#define FIRMWARE_KABINI 	"radeon/kabini_uvd.bin"
-#define FIRMWARE_KAVERI 	"radeon/kaveri_uvd.bin"
-#define FIRMWARE_HAWAII 	"radeon/hawaii_uvd.bin"
+#define FIRMWARE_KABINI	"radeon/kabini_uvd.bin"
+#define FIRMWARE_KAVERI	"radeon/kaveri_uvd.bin"
+#define FIRMWARE_HAWAII	"radeon/hawaii_uvd.bin"
 #define FIRMWARE_MULLINS	"radeon/mullins_uvd.bin"
 #endif
 #define FIRMWARE_TONGA		"amdgpu/tonga_uvd.bin"
 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_uvd.bin"
 #define FIRMWARE_FIJI		"amdgpu/fiji_uvd.bin"
 #define FIRMWARE_STONEY		"amdgpu/stoney_uvd.bin"
+#define FIRMWARE_POLARIS10	"amdgpu/polaris10_uvd.bin"
+#define FIRMWARE_POLARIS11	"amdgpu/polaris11_uvd.bin"
 
 /**
  * amdgpu_uvd_cs_ctx - Command submission parser context
@@ -85,6 +89,8 @@
 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
 MODULE_FIRMWARE(FIRMWARE_FIJI);
 MODULE_FIRMWARE(FIRMWARE_STONEY);
+MODULE_FIRMWARE(FIRMWARE_POLARIS10);
+MODULE_FIRMWARE(FIRMWARE_POLARIS11);
 
 static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
@@ -131,6 +137,12 @@
 	case CHIP_STONEY:
 		fw_name = FIRMWARE_STONEY;
 		break;
+	case CHIP_POLARIS10:
+		fw_name = FIRMWARE_POLARIS10;
+		break;
+	case CHIP_POLARIS11:
+		fw_name = FIRMWARE_POLARIS11;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -151,6 +163,9 @@
 		return r;
 	}
 
+	/* Set the default UVD handles that the firmware can handle */
+	adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
+
 	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
 	family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
 	version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
@@ -158,11 +173,28 @@
 	DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
 		version_major, version_minor, family_id);
 
+	/*
+	 * Limit the number of UVD handles depending on microcode major
+	 * and minor versions. The firmware version which has 40 UVD
+	 * instances support is 1.80. So all subsequent versions should
+	 * also have the same support.
+	 */
+	if ((version_major > 0x01) ||
+	    ((version_major == 0x01) && (version_minor >= 0x50)))
+		adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
 	adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
 				(family_id << 8));
 
+	if ((adev->asic_type == CHIP_POLARIS10 ||
+	     adev->asic_type == CHIP_POLARIS11) &&
+	    (adev->uvd.fw_version < FW_1_66_16))
+		DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
+			  version_major, version_minor);
+
 	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
-		 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
+		  +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+		  +  AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
 	r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
@@ -205,7 +237,7 @@
 		return r;
 	}
 
-	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+	for (i = 0; i < adev->uvd.max_handles; ++i) {
 		atomic_set(&adev->uvd.handles[i], 0);
 		adev->uvd.filp[i] = NULL;
 	}
@@ -221,19 +253,20 @@
 {
 	int r;
 
-	if (adev->uvd.vcpu_bo == NULL)
-		return 0;
+	kfree(adev->uvd.saved_bo);
 
 	amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
 
-	r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
-	if (!r) {
-		amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
-		amdgpu_bo_unpin(adev->uvd.vcpu_bo);
-		amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-	}
+	if (adev->uvd.vcpu_bo) {
+		r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
+		if (!r) {
+			amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
+			amdgpu_bo_unpin(adev->uvd.vcpu_bo);
+			amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
+		}
 
-	amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+	}
 
 	amdgpu_ring_fini(&adev->uvd.ring);
 
@@ -251,7 +284,7 @@
 	if (adev->uvd.vcpu_bo == NULL)
 		return 0;
 
-	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+	for (i = 0; i < adev->uvd.max_handles; ++i)
 		if (atomic_read(&adev->uvd.handles[i]))
 			break;
 
@@ -308,7 +341,7 @@
 	struct amdgpu_ring *ring = &adev->uvd.ring;
 	int i, r;
 
-	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+	for (i = 0; i < adev->uvd.max_handles; ++i) {
 		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
 		if (handle != 0 && adev->uvd.filp[i] == filp) {
 			struct fence *fence;
@@ -390,7 +423,8 @@
  *
  * Peek into the decode message and calculate the necessary buffer sizes.
  */
-static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
+static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
+	unsigned buf_sizes[])
 {
 	unsigned stream_type = msg[4];
 	unsigned width = msg[6];
@@ -412,7 +446,6 @@
 
 	switch (stream_type) {
 	case 0: /* H264 */
-	case 7: /* H264 Perf */
 		switch(level) {
 		case 30:
 			num_dpb_buffer = 8100 / fs_in_mb;
@@ -490,6 +523,54 @@
 		min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
 		break;
 
+	case 7: /* H264 Perf */
+		switch(level) {
+		case 30:
+			num_dpb_buffer = 8100 / fs_in_mb;
+			break;
+		case 31:
+			num_dpb_buffer = 18000 / fs_in_mb;
+			break;
+		case 32:
+			num_dpb_buffer = 20480 / fs_in_mb;
+			break;
+		case 41:
+			num_dpb_buffer = 32768 / fs_in_mb;
+			break;
+		case 42:
+			num_dpb_buffer = 34816 / fs_in_mb;
+			break;
+		case 50:
+			num_dpb_buffer = 110400 / fs_in_mb;
+			break;
+		case 51:
+			num_dpb_buffer = 184320 / fs_in_mb;
+			break;
+		default:
+			num_dpb_buffer = 184320 / fs_in_mb;
+			break;
+		}
+		num_dpb_buffer++;
+		if (num_dpb_buffer > 17)
+			num_dpb_buffer = 17;
+
+		/* reference picture buffer */
+		min_dpb_size = image_size * num_dpb_buffer;
+
+		if (adev->asic_type < CHIP_POLARIS10){
+			/* macroblock context buffer */
+			min_dpb_size +=
+				width_in_mb * height_in_mb * num_dpb_buffer * 192;
+
+			/* IT surface buffer */
+			min_dpb_size += width_in_mb * height_in_mb * 32;
+		} else {
+			/* macroblock context buffer */
+			min_ctx_size =
+				width_in_mb * height_in_mb * num_dpb_buffer * 192;
+		}
+		break;
+
 	case 16: /* H265 */
 		image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
 		image_size = ALIGN(image_size, 256);
@@ -568,7 +649,7 @@
 		amdgpu_bo_kunmap(bo);
 
 		/* try to alloc a new handle */
-		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+		for (i = 0; i < adev->uvd.max_handles; ++i) {
 			if (atomic_read(&adev->uvd.handles[i]) == handle) {
 				DRM_ERROR("Handle 0x%x already in use!\n", handle);
 				return -EINVAL;
@@ -585,13 +666,13 @@
 
 	case 1:
 		/* it's a decode msg, calc buffer sizes */
-		r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
+		r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
 		amdgpu_bo_kunmap(bo);
 		if (r)
 			return r;
 
 		/* validate the handle */
-		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+		for (i = 0; i < adev->uvd.max_handles; ++i) {
 			if (atomic_read(&adev->uvd.handles[i]) == handle) {
 				if (adev->uvd.filp[i] != ctx->parser->filp) {
 					DRM_ERROR("UVD handle collision detected!\n");
@@ -606,7 +687,7 @@
 
 	case 2:
 		/* it's a destroy msg, free the handle */
-		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+		for (i = 0; i < adev->uvd.max_handles; ++i)
 			atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
 		amdgpu_bo_kunmap(bo);
 		return 0;
@@ -886,7 +967,7 @@
 	ib->length_dw = 16;
 
 	if (direct) {
-		r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+		r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
 		job->fence = f;
 		if (r)
 			goto err_free;
@@ -1018,7 +1099,7 @@
 
 	fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
 
-	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+	for (i = 0; i < adev->uvd.max_handles; ++i)
 		if (atomic_read(&adev->uvd.handles[i]))
 			++handles;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 481a64f..875626a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -41,15 +41,17 @@
 /* Firmware Names */
 #ifdef CONFIG_DRM_AMDGPU_CIK
 #define FIRMWARE_BONAIRE	"radeon/bonaire_vce.bin"
-#define FIRMWARE_KABINI 	"radeon/kabini_vce.bin"
-#define FIRMWARE_KAVERI 	"radeon/kaveri_vce.bin"
-#define FIRMWARE_HAWAII 	"radeon/hawaii_vce.bin"
+#define FIRMWARE_KABINI	"radeon/kabini_vce.bin"
+#define FIRMWARE_KAVERI	"radeon/kaveri_vce.bin"
+#define FIRMWARE_HAWAII	"radeon/hawaii_vce.bin"
 #define FIRMWARE_MULLINS	"radeon/mullins_vce.bin"
 #endif
 #define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
 #define FIRMWARE_FIJI		"amdgpu/fiji_vce.bin"
 #define FIRMWARE_STONEY		"amdgpu/stoney_vce.bin"
+#define FIRMWARE_POLARIS10	"amdgpu/polaris10_vce.bin"
+#define FIRMWARE_POLARIS11         "amdgpu/polaris11_vce.bin"
 
 #ifdef CONFIG_DRM_AMDGPU_CIK
 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -62,6 +64,8 @@
 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
 MODULE_FIRMWARE(FIRMWARE_FIJI);
 MODULE_FIRMWARE(FIRMWARE_STONEY);
+MODULE_FIRMWARE(FIRMWARE_POLARIS10);
+MODULE_FIRMWARE(FIRMWARE_POLARIS11);
 
 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
 
@@ -113,6 +117,12 @@
 	case CHIP_STONEY:
 		fw_name = FIRMWARE_STONEY;
 		break;
+	case CHIP_POLARIS10:
+		fw_name = FIRMWARE_POLARIS10;
+		break;
+	case CHIP_POLARIS11:
+		fw_name = FIRMWARE_POLARIS11;
+		break;
 
 	default:
 		return -EINVAL;
@@ -426,7 +436,7 @@
 	for (i = ib->length_dw; i < ib_size_dw; ++i)
 		ib->ptr[i] = 0x0;
 
-	r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+	r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
 	job->fence = f;
 	if (r)
 		goto err;
@@ -488,7 +498,7 @@
 		ib->ptr[i] = 0x0;
 
 	if (direct) {
-		r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+		r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
 		job->fence = f;
 		if (r)
 			goto err;
@@ -752,7 +762,8 @@
  * @ib: the IB to execute
  *
  */
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
+			     unsigned vm_id, bool ctx_switch)
 {
 	amdgpu_ring_write(ring, VCE_CMD_IB);
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index ef99d23..f40cf76 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -34,7 +34,8 @@
 			       bool direct, struct fence **fence);
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
+			     unsigned vm_id, bool ctx_switch);
 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 				unsigned flags);
 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b6c011b..62a4c12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -53,6 +53,18 @@
 /* Special value that no flush is necessary */
 #define AMDGPU_VM_NO_FLUSH (~0ll)
 
+/* Local structure. Encapsulate some VM table update parameters to reduce
+ * the number of function parameters
+ */
+struct amdgpu_vm_update_params {
+	/* address where to copy page table entries from */
+	uint64_t src;
+	/* DMA addresses to use for mapping */
+	dma_addr_t *pages_addr;
+	/* indirect buffer to fill with commands */
+	struct amdgpu_ib *ib;
+};
+
 /**
  * amdgpu_vm_num_pde - return the number of page directory entries
  *
@@ -166,74 +178,109 @@
 {
 	uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 	struct amdgpu_device *adev = ring->adev;
-	struct amdgpu_vm_id *id = &vm->ids[ring->idx];
 	struct fence *updates = sync->last_vm_update;
+	struct amdgpu_vm_id *id;
+	unsigned i = ring->idx;
 	int r;
 
 	mutex_lock(&adev->vm_manager.lock);
 
-	/* check if the id is still valid */
-	if (id->mgr_id) {
-		struct fence *flushed = id->flushed_updates;
-		bool is_later;
-		long owner;
+	/* Check if we can use a VMID already assigned to this VM */
+	do {
+		struct fence *flushed;
 
-		if (!flushed)
-			is_later = true;
-		else if (!updates)
-			is_later = false;
-		else
-			is_later = fence_is_later(updates, flushed);
+		id = vm->ids[i++];
+		if (i == AMDGPU_MAX_RINGS)
+			i = 0;
 
-		owner = atomic_long_read(&id->mgr_id->owner);
-		if (!is_later && owner == (long)id &&
-		    pd_addr == id->pd_gpu_addr) {
+		/* Check all the prerequisites to using this VMID */
+		if (!id)
+			continue;
 
+		if (atomic64_read(&id->owner) != vm->client_id)
+			continue;
+
+		if (pd_addr != id->pd_gpu_addr)
+			continue;
+
+		if (id->last_user != ring &&
+		    (!id->last_flush || !fence_is_signaled(id->last_flush)))
+			continue;
+
+		flushed  = id->flushed_updates;
+		if (updates && (!flushed || fence_is_later(updates, flushed)))
+			continue;
+
+		/* Good we can use this VMID */
+		if (id->last_user == ring) {
 			r = amdgpu_sync_fence(ring->adev, sync,
-					      id->mgr_id->active);
-			if (r) {
-				mutex_unlock(&adev->vm_manager.lock);
-				return r;
-			}
-
-			fence_put(id->mgr_id->active);
-			id->mgr_id->active = fence_get(fence);
-
-			list_move_tail(&id->mgr_id->list,
-				       &adev->vm_manager.ids_lru);
-
-			*vm_id = id->mgr_id - adev->vm_manager.ids;
-			*vm_pd_addr = AMDGPU_VM_NO_FLUSH;
-			trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id,
-						*vm_pd_addr);
-
-			mutex_unlock(&adev->vm_manager.lock);
-			return 0;
+					      id->first);
+			if (r)
+				goto error;
 		}
-	}
 
-	id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru,
-				      struct amdgpu_vm_manager_id,
-				      list);
+		/* And remember this submission as user of the VMID */
+		r = amdgpu_sync_fence(ring->adev, &id->active, fence);
+		if (r)
+			goto error;
 
-	r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active);
-	if (!r) {
-		fence_put(id->mgr_id->active);
-		id->mgr_id->active = fence_get(fence);
+		list_move_tail(&id->list, &adev->vm_manager.ids_lru);
+		vm->ids[ring->idx] = id;
 
-		fence_put(id->flushed_updates);
-		id->flushed_updates = fence_get(updates);
-
-		id->pd_gpu_addr = pd_addr;
-
-		list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru);
-		atomic_long_set(&id->mgr_id->owner, (long)id);
-
-		*vm_id = id->mgr_id - adev->vm_manager.ids;
-		*vm_pd_addr = pd_addr;
+		*vm_id = id - adev->vm_manager.ids;
+		*vm_pd_addr = AMDGPU_VM_NO_FLUSH;
 		trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+
+		mutex_unlock(&adev->vm_manager.lock);
+		return 0;
+
+	} while (i != ring->idx);
+
+	id = list_first_entry(&adev->vm_manager.ids_lru,
+			      struct amdgpu_vm_id,
+			      list);
+
+	if (!amdgpu_sync_is_idle(&id->active)) {
+		struct list_head *head = &adev->vm_manager.ids_lru;
+		struct amdgpu_vm_id *tmp;
+
+		list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru,
+					 list) {
+			if (amdgpu_sync_is_idle(&id->active)) {
+				list_move(&id->list, head);
+				head = &id->list;
+			}
+		}
+		id = list_first_entry(&adev->vm_manager.ids_lru,
+				      struct amdgpu_vm_id,
+				      list);
 	}
 
+	r = amdgpu_sync_cycle_fences(sync, &id->active, fence);
+	if (r)
+		goto error;
+
+	fence_put(id->first);
+	id->first = fence_get(fence);
+
+	fence_put(id->last_flush);
+	id->last_flush = NULL;
+
+	fence_put(id->flushed_updates);
+	id->flushed_updates = fence_get(updates);
+
+	id->pd_gpu_addr = pd_addr;
+
+	list_move_tail(&id->list, &adev->vm_manager.ids_lru);
+	id->last_user = ring;
+	atomic64_set(&id->owner, vm->client_id);
+	vm->ids[ring->idx] = id;
+
+	*vm_id = id - adev->vm_manager.ids;
+	*vm_pd_addr = pd_addr;
+	trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+
+error:
 	mutex_unlock(&adev->vm_manager.lock);
 	return r;
 }
@@ -247,43 +294,62 @@
  *
  * Emit a VM flush when it is necessary.
  */
-void amdgpu_vm_flush(struct amdgpu_ring *ring,
-		     unsigned vm_id, uint64_t pd_addr,
-		     uint32_t gds_base, uint32_t gds_size,
-		     uint32_t gws_base, uint32_t gws_size,
-		     uint32_t oa_base, uint32_t oa_size)
+int amdgpu_vm_flush(struct amdgpu_ring *ring,
+		    unsigned vm_id, uint64_t pd_addr,
+		    uint32_t gds_base, uint32_t gds_size,
+		    uint32_t gws_base, uint32_t gws_size,
+		    uint32_t oa_base, uint32_t oa_size,
+		    bool vmid_switch)
 {
 	struct amdgpu_device *adev = ring->adev;
-	struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
+	struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
 	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
-		mgr_id->gds_base != gds_base ||
-		mgr_id->gds_size != gds_size ||
-		mgr_id->gws_base != gws_base ||
-		mgr_id->gws_size != gws_size ||
-		mgr_id->oa_base != oa_base ||
-		mgr_id->oa_size != oa_size);
+		id->gds_base != gds_base ||
+		id->gds_size != gds_size ||
+		id->gws_base != gws_base ||
+		id->gws_size != gws_size ||
+		id->oa_base != oa_base ||
+		id->oa_size != oa_size);
+	int r;
 
 	if (ring->funcs->emit_pipeline_sync && (
-	    pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
+	    pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch))
 		amdgpu_ring_emit_pipeline_sync(ring);
 
-	if (pd_addr != AMDGPU_VM_NO_FLUSH) {
+	if (ring->funcs->emit_vm_flush &&
+	    pd_addr != AMDGPU_VM_NO_FLUSH) {
+		struct fence *fence;
+
 		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
 		amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
+
+		mutex_lock(&adev->vm_manager.lock);
+		if ((id->pd_gpu_addr == pd_addr) && (id->last_user == ring)) {
+			r = amdgpu_fence_emit(ring, &fence);
+			if (r) {
+				mutex_unlock(&adev->vm_manager.lock);
+				return r;
+			}
+			fence_put(id->last_flush);
+			id->last_flush = fence;
+		}
+		mutex_unlock(&adev->vm_manager.lock);
 	}
 
 	if (gds_switch_needed) {
-		mgr_id->gds_base = gds_base;
-		mgr_id->gds_size = gds_size;
-		mgr_id->gws_base = gws_base;
-		mgr_id->gws_size = gws_size;
-		mgr_id->oa_base = oa_base;
-		mgr_id->oa_size = oa_size;
+		id->gds_base = gds_base;
+		id->gds_size = gds_size;
+		id->gws_base = gws_base;
+		id->gws_size = gws_size;
+		id->oa_base = oa_base;
+		id->oa_size = oa_size;
 		amdgpu_ring_emit_gds_switch(ring, vm_id,
 					    gds_base, gds_size,
 					    gws_base, gws_size,
 					    oa_base, oa_size);
 	}
+
+	return 0;
 }
 
 /**
@@ -296,14 +362,14 @@
  */
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
 {
-	struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
+	struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
 
-	mgr_id->gds_base = 0;
-	mgr_id->gds_size = 0;
-	mgr_id->gws_base = 0;
-	mgr_id->gws_size = 0;
-	mgr_id->oa_base = 0;
-	mgr_id->oa_size = 0;
+	id->gds_base = 0;
+	id->gds_size = 0;
+	id->gws_base = 0;
+	id->gws_size = 0;
+	id->oa_base = 0;
+	id->oa_size = 0;
 }
 
 /**
@@ -335,9 +401,7 @@
  * amdgpu_vm_update_pages - helper to call the right asic function
  *
  * @adev: amdgpu_device pointer
- * @gtt: GART instance to use for mapping
- * @gtt_flags: GTT hw access flags
- * @ib: indirect buffer to fill with commands
+ * @vm_update_params: see amdgpu_vm_update_params definition
  * @pe: addr of the page entry
  * @addr: dst addr to write into pe
  * @count: number of page entries to update
@@ -348,30 +412,29 @@
  * to setup the page table using the DMA.
  */
 static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
-				   struct amdgpu_gart *gtt,
-				   uint32_t gtt_flags,
-				   struct amdgpu_ib *ib,
+				   struct amdgpu_vm_update_params
+					*vm_update_params,
 				   uint64_t pe, uint64_t addr,
 				   unsigned count, uint32_t incr,
 				   uint32_t flags)
 {
 	trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
 
-	if ((gtt == &adev->gart) && (flags == gtt_flags)) {
-		uint64_t src = gtt->table_addr + (addr >> 12) * 8;
-		amdgpu_vm_copy_pte(adev, ib, pe, src, count);
+	if (vm_update_params->src) {
+		amdgpu_vm_copy_pte(adev, vm_update_params->ib,
+			pe, (vm_update_params->src + (addr >> 12) * 8), count);
 
-	} else if (gtt) {
-		dma_addr_t *pages_addr = gtt->pages_addr;
-		amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
-				    count, incr, flags);
+	} else if (vm_update_params->pages_addr) {
+		amdgpu_vm_write_pte(adev, vm_update_params->ib,
+			vm_update_params->pages_addr,
+			pe, addr, count, incr, flags);
 
 	} else if (count < 3) {
-		amdgpu_vm_write_pte(adev, ib, NULL, pe, addr,
+		amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr,
 				    count, incr, flags);
 
 	} else {
-		amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
+		amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr,
 				      count, incr, flags);
 	}
 }
@@ -391,10 +454,12 @@
 	struct amdgpu_ring *ring;
 	struct fence *fence = NULL;
 	struct amdgpu_job *job;
+	struct amdgpu_vm_update_params vm_update_params;
 	unsigned entries;
 	uint64_t addr;
 	int r;
 
+	memset(&vm_update_params, 0, sizeof(vm_update_params));
 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
 	r = reservation_object_reserve_shared(bo->tbo.resv);
@@ -412,7 +477,8 @@
 	if (r)
 		goto error;
 
-	amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries,
+	vm_update_params.ib = &job->ibs[0];
+	amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries,
 			       0, 0);
 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 
@@ -485,11 +551,12 @@
 	uint64_t last_pde = ~0, last_pt = ~0;
 	unsigned count = 0, pt_idx, ndw;
 	struct amdgpu_job *job;
-	struct amdgpu_ib *ib;
+	struct amdgpu_vm_update_params vm_update_params;
 	struct fence *fence = NULL;
 
 	int r;
 
+	memset(&vm_update_params, 0, sizeof(vm_update_params));
 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
 	/* padding, etc. */
@@ -502,7 +569,7 @@
 	if (r)
 		return r;
 
-	ib = &job->ibs[0];
+	vm_update_params.ib = &job->ibs[0];
 
 	/* walk over the address space and update the page directory */
 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -522,7 +589,7 @@
 		    ((last_pt + incr * count) != pt)) {
 
 			if (count) {
-				amdgpu_vm_update_pages(adev, NULL, 0, ib,
+				amdgpu_vm_update_pages(adev, &vm_update_params,
 						       last_pde, last_pt,
 						       count, incr,
 						       AMDGPU_PTE_VALID);
@@ -537,14 +604,15 @@
 	}
 
 	if (count)
-		amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt,
-				       count, incr, AMDGPU_PTE_VALID);
+		amdgpu_vm_update_pages(adev, &vm_update_params,
+					last_pde, last_pt,
+					count, incr, AMDGPU_PTE_VALID);
 
-	if (ib->length_dw != 0) {
-		amdgpu_ring_pad_ib(ring, ib);
+	if (vm_update_params.ib->length_dw != 0) {
+		amdgpu_ring_pad_ib(ring, vm_update_params.ib);
 		amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
 				 AMDGPU_FENCE_OWNER_VM);
-		WARN_ON(ib->length_dw > ndw);
+		WARN_ON(vm_update_params.ib->length_dw > ndw);
 		r = amdgpu_job_submit(job, ring, &vm->entity,
 				      AMDGPU_FENCE_OWNER_VM, &fence);
 		if (r)
@@ -570,18 +638,15 @@
  * amdgpu_vm_frag_ptes - add fragment information to PTEs
  *
  * @adev: amdgpu_device pointer
- * @gtt: GART instance to use for mapping
- * @gtt_flags: GTT hw mapping flags
- * @ib: IB for the update
+ * @vm_update_params: see amdgpu_vm_update_params definition
  * @pe_start: first PTE to handle
  * @pe_end: last PTE to handle
  * @addr: addr those PTEs should point to
  * @flags: hw mapping flags
  */
 static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
-				struct amdgpu_gart *gtt,
-				uint32_t gtt_flags,
-				struct amdgpu_ib *ib,
+				struct amdgpu_vm_update_params
+					*vm_update_params,
 				uint64_t pe_start, uint64_t pe_end,
 				uint64_t addr, uint32_t flags)
 {
@@ -618,10 +683,11 @@
 		return;
 
 	/* system pages are non continuously */
-	if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
+	if (vm_update_params->src || vm_update_params->pages_addr ||
+		!(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
 
 		count = (pe_end - pe_start) / 8;
-		amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start,
+		amdgpu_vm_update_pages(adev, vm_update_params, pe_start,
 				       addr, count, AMDGPU_GPU_PAGE_SIZE,
 				       flags);
 		return;
@@ -630,21 +696,21 @@
 	/* handle the 4K area at the beginning */
 	if (pe_start != frag_start) {
 		count = (frag_start - pe_start) / 8;
-		amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr,
+		amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr,
 				       count, AMDGPU_GPU_PAGE_SIZE, flags);
 		addr += AMDGPU_GPU_PAGE_SIZE * count;
 	}
 
 	/* handle the area in the middle */
 	count = (frag_end - frag_start) / 8;
-	amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count,
+	amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count,
 			       AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
 
 	/* handle the 4K area at the end */
 	if (frag_end != pe_end) {
 		addr += AMDGPU_GPU_PAGE_SIZE * count;
 		count = (pe_end - frag_end) / 8;
-		amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr,
+		amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr,
 				       count, AMDGPU_GPU_PAGE_SIZE, flags);
 	}
 }
@@ -653,8 +719,7 @@
  * amdgpu_vm_update_ptes - make sure that page tables are valid
  *
  * @adev: amdgpu_device pointer
- * @gtt: GART instance to use for mapping
- * @gtt_flags: GTT hw mapping flags
+ * @vm_update_params: see amdgpu_vm_update_params definition
  * @vm: requested vm
  * @start: start of GPU address range
  * @end: end of GPU address range
@@ -664,10 +729,9 @@
  * Update the page tables in the range @start - @end.
  */
 static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
-				  struct amdgpu_gart *gtt,
-				  uint32_t gtt_flags,
+				  struct amdgpu_vm_update_params
+					*vm_update_params,
 				  struct amdgpu_vm *vm,
-				  struct amdgpu_ib *ib,
 				  uint64_t start, uint64_t end,
 				  uint64_t dst, uint32_t flags)
 {
@@ -693,7 +757,7 @@
 
 		if (last_pe_end != pe_start) {
 
-			amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
+			amdgpu_vm_frag_ptes(adev, vm_update_params,
 					    last_pe_start, last_pe_end,
 					    last_dst, flags);
 
@@ -708,17 +772,16 @@
 		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
 	}
 
-	amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
-			    last_pe_start, last_pe_end,
-			    last_dst, flags);
+	amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start,
+			    last_pe_end, last_dst, flags);
 }
 
 /**
  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
  *
  * @adev: amdgpu_device pointer
- * @gtt: GART instance to use for mapping
- * @gtt_flags: flags as they are used for GTT
+ * @src: address where to copy page table entries from
+ * @pages_addr: DMA addresses to use for mapping
  * @vm: requested vm
  * @start: start of mapped range
  * @last: last mapped entry
@@ -730,8 +793,8 @@
  * Returns 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
-				       struct amdgpu_gart *gtt,
-				       uint32_t gtt_flags,
+				       uint64_t src,
+				       dma_addr_t *pages_addr,
 				       struct amdgpu_vm *vm,
 				       uint64_t start, uint64_t last,
 				       uint32_t flags, uint64_t addr,
@@ -741,11 +804,14 @@
 	void *owner = AMDGPU_FENCE_OWNER_VM;
 	unsigned nptes, ncmds, ndw;
 	struct amdgpu_job *job;
-	struct amdgpu_ib *ib;
+	struct amdgpu_vm_update_params vm_update_params;
 	struct fence *f = NULL;
 	int r;
 
 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+	memset(&vm_update_params, 0, sizeof(vm_update_params));
+	vm_update_params.src = src;
+	vm_update_params.pages_addr = pages_addr;
 
 	/* sync to everything on unmapping */
 	if (!(flags & AMDGPU_PTE_VALID))
@@ -762,11 +828,11 @@
 	/* padding, etc. */
 	ndw = 64;
 
-	if ((gtt == &adev->gart) && (flags == gtt_flags)) {
+	if (vm_update_params.src) {
 		/* only copy commands needed */
 		ndw += ncmds * 7;
 
-	} else if (gtt) {
+	} else if (vm_update_params.pages_addr) {
 		/* header for write data commands */
 		ndw += ncmds * 4;
 
@@ -785,7 +851,7 @@
 	if (r)
 		return r;
 
-	ib = &job->ibs[0];
+	vm_update_params.ib = &job->ibs[0];
 
 	r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
 			     owner);
@@ -796,11 +862,11 @@
 	if (r)
 		goto error_free;
 
-	amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1,
-			      addr, flags);
+	amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
+			      last + 1, addr, flags);
 
-	amdgpu_ring_pad_ib(ring, ib);
-	WARN_ON(ib->length_dw > ndw);
+	amdgpu_ring_pad_ib(ring, vm_update_params.ib);
+	WARN_ON(vm_update_params.ib->length_dw > ndw);
 	r = amdgpu_job_submit(job, ring, &vm->entity,
 			      AMDGPU_FENCE_OWNER_VM, &f);
 	if (r)
@@ -823,11 +889,12 @@
  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
  *
  * @adev: amdgpu_device pointer
- * @gtt: GART instance to use for mapping
+ * @gtt_flags: flags as they are used for GTT
+ * @pages_addr: DMA addresses to use for mapping
  * @vm: requested vm
  * @mapping: mapped range and flags to use for the update
  * @addr: addr to set the area to
- * @gtt_flags: flags as they are used for GTT
+ * @flags: HW flags for the mapping
  * @fence: optional resulting fence
  *
  * Split the mapping into smaller chunks so that each update fits
@@ -835,16 +902,16 @@
  * Returns 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
-				      struct amdgpu_gart *gtt,
 				      uint32_t gtt_flags,
+				      dma_addr_t *pages_addr,
 				      struct amdgpu_vm *vm,
 				      struct amdgpu_bo_va_mapping *mapping,
-				      uint64_t addr, struct fence **fence)
+				      uint32_t flags, uint64_t addr,
+				      struct fence **fence)
 {
 	const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
 
-	uint64_t start = mapping->it.start;
-	uint32_t flags = gtt_flags;
+	uint64_t src = 0, start = mapping->it.start;
 	int r;
 
 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -857,10 +924,15 @@
 
 	trace_amdgpu_vm_bo_update(mapping);
 
+	if (pages_addr) {
+		if (flags == gtt_flags)
+			src = adev->gart.table_addr + (addr >> 12) * 8;
+		addr = 0;
+	}
 	addr += mapping->offset;
 
-	if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags)))
-		return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
+	if (!pages_addr || src)
+		return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
 						   start, mapping->it.last,
 						   flags, addr, fence);
 
@@ -868,7 +940,7 @@
 		uint64_t last;
 
 		last = min((uint64_t)mapping->it.last, start + max_size - 1);
-		r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
+		r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
 						start, last, flags, addr,
 						fence);
 		if (r)
@@ -899,16 +971,20 @@
 {
 	struct amdgpu_vm *vm = bo_va->vm;
 	struct amdgpu_bo_va_mapping *mapping;
-	struct amdgpu_gart *gtt = NULL;
-	uint32_t flags;
+	dma_addr_t *pages_addr = NULL;
+	uint32_t gtt_flags, flags;
 	uint64_t addr;
 	int r;
 
 	if (mem) {
+		struct ttm_dma_tt *ttm;
+
 		addr = (u64)mem->start << PAGE_SHIFT;
 		switch (mem->mem_type) {
 		case TTM_PL_TT:
-			gtt = &bo_va->bo->adev->gart;
+			ttm = container_of(bo_va->bo->tbo.ttm, struct
+					   ttm_dma_tt, ttm);
+			pages_addr = ttm->dma_address;
 			break;
 
 		case TTM_PL_VRAM:
@@ -923,6 +999,7 @@
 	}
 
 	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
+	gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
 
 	spin_lock(&vm->status_lock);
 	if (!list_empty(&bo_va->vm_status))
@@ -930,7 +1007,8 @@
 	spin_unlock(&vm->status_lock);
 
 	list_for_each_entry(mapping, &bo_va->invalids, list) {
-		r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr,
+		r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm,
+					       mapping, flags, addr,
 					       &bo_va->last_pt_update);
 		if (r)
 			return r;
@@ -976,8 +1054,8 @@
 			struct amdgpu_bo_va_mapping, list);
 		list_del(&mapping->list);
 
-		r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping,
-					       0, NULL);
+		r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping,
+					       0, 0, NULL);
 		kfree(mapping);
 		if (r)
 			return r;
@@ -1320,11 +1398,10 @@
 	struct amd_sched_rq *rq;
 	int i, r;
 
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		vm->ids[i].mgr_id = NULL;
-		vm->ids[i].flushed_updates = NULL;
-	}
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+		vm->ids[i] = NULL;
 	vm->va = RB_ROOT;
+	vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
 	spin_lock_init(&vm->status_lock);
 	INIT_LIST_HEAD(&vm->invalidated);
 	INIT_LIST_HEAD(&vm->cleared);
@@ -1416,15 +1493,6 @@
 
 	amdgpu_bo_unref(&vm->page_directory);
 	fence_put(vm->page_directory_fence);
-
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		struct amdgpu_vm_id *id = &vm->ids[i];
-
-		if (id->mgr_id)
-			atomic_long_cmpxchg(&id->mgr_id->owner,
-					    (long)id, 0);
-		fence_put(id->flushed_updates);
-	}
 }
 
 /**
@@ -1443,11 +1511,13 @@
 	/* skip over VMID 0, since it is the system VM */
 	for (i = 1; i < adev->vm_manager.num_ids; ++i) {
 		amdgpu_vm_reset_id(adev, i);
+		amdgpu_sync_create(&adev->vm_manager.ids[i].active);
 		list_add_tail(&adev->vm_manager.ids[i].list,
 			      &adev->vm_manager.ids_lru);
 	}
 
 	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
+	atomic64_set(&adev->vm_manager.client_counter, 0);
 }
 
 /**
@@ -1461,6 +1531,11 @@
 {
 	unsigned i;
 
-	for (i = 0; i < AMDGPU_NUM_VM; ++i)
-		fence_put(adev->vm_manager.ids[i].active);
+	for (i = 0; i < AMDGPU_NUM_VM; ++i) {
+		struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
+
+		fence_put(adev->vm_manager.ids[i].first);
+		amdgpu_sync_free(&adev->vm_manager.ids[i].active);
+		fence_put(id->flushed_updates);
+	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index fece8f4..49daf6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -92,7 +92,7 @@
 #define ATOM_WS_AND_MASK	0x45
 #define ATOM_WS_FB_WINDOW	0x46
 #define ATOM_WS_ATTRIBUTES	0x47
-#define ATOM_WS_REGPTR  	0x48
+#define ATOM_WS_REGPTR		0x48
 
 #define ATOM_IIO_NOP		0
 #define ATOM_IIO_START		1
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 49aa350..49a39b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -461,13 +461,14 @@
 	PIXEL_CLOCK_PARAMETERS_V3 v3;
 	PIXEL_CLOCK_PARAMETERS_V5 v5;
 	PIXEL_CLOCK_PARAMETERS_V6 v6;
+	PIXEL_CLOCK_PARAMETERS_V7 v7;
 };
 
 /* on DCE5, make sure the voltage is high enough to support the
  * required disp clk.
  */
 void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
-				    u32 dispclk)
+					   u32 dispclk)
 {
 	u8 frev, crev;
 	int index;
@@ -510,6 +511,49 @@
 	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
 }
 
+union set_dce_clock {
+	SET_DCE_CLOCK_PS_ALLOCATION_V1_1 v1_1;
+	SET_DCE_CLOCK_PS_ALLOCATION_V2_1 v2_1;
+};
+
+u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev,
+				       u32 freq, u8 clk_type, u8 clk_src)
+{
+	u8 frev, crev;
+	int index;
+	union set_dce_clock args;
+	u32 ret_freq = 0;
+
+	memset(&args, 0, sizeof(args));
+
+	index = GetIndexIntoMasterTable(COMMAND, SetDCEClock);
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev,
+				   &crev))
+		return 0;
+
+	switch (frev) {
+	case 2:
+		switch (crev) {
+		case 1:
+			args.v2_1.asParam.ulDCEClkFreq = cpu_to_le32(freq); /* 10kHz units */
+			args.v2_1.asParam.ucDCEClkType = clk_type;
+			args.v2_1.asParam.ucDCEClkSrc = clk_src;
+			amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+			ret_freq = le32_to_cpu(args.v2_1.asParam.ulDCEClkFreq) * 10;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+			return 0;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+		return 0;
+	}
+
+	return ret_freq;
+}
+
 static bool is_pixel_clock_source_from_pll(u32 encoder_mode, int pll_id)
 {
 	if (ENCODER_MODE_IS_DP(encoder_mode)) {
@@ -523,18 +567,18 @@
 }
 
 void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
-			       u32 crtc_id,
-			       int pll_id,
-			       u32 encoder_mode,
-			       u32 encoder_id,
-			       u32 clock,
-			       u32 ref_div,
-			       u32 fb_div,
-			       u32 frac_fb_div,
-			       u32 post_div,
-			       int bpc,
-			       bool ss_enabled,
-			       struct amdgpu_atom_ss *ss)
+				      u32 crtc_id,
+				      int pll_id,
+				      u32 encoder_mode,
+				      u32 encoder_id,
+				      u32 clock,
+				      u32 ref_div,
+				      u32 fb_div,
+				      u32 frac_fb_div,
+				      u32 post_div,
+				      int bpc,
+				      bool ss_enabled,
+				      struct amdgpu_atom_ss *ss)
 {
 	struct drm_device *dev = crtc->dev;
 	struct amdgpu_device *adev = dev->dev_private;
@@ -652,6 +696,34 @@
 			args.v6.ucEncoderMode = encoder_mode;
 			args.v6.ucPpll = pll_id;
 			break;
+		case 7:
+			args.v7.ulPixelClock = cpu_to_le32(clock * 10); /* 100 hz units */
+			args.v7.ucMiscInfo = 0;
+			if ((encoder_mode == ATOM_ENCODER_MODE_DVI) &&
+			    (clock > 165000))
+				args.v7.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
+			args.v7.ucCRTC = crtc_id;
+			if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+				switch (bpc) {
+				case 8:
+				default:
+					args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS;
+					break;
+				case 10:
+					args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4;
+					break;
+				case 12:
+					args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2;
+					break;
+				case 16:
+					args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1;
+					break;
+				}
+			}
+			args.v7.ucTransmitterID = encoder_id;
+			args.v7.ucEncoderMode = encoder_mode;
+			args.v7.ucPpll = pll_id;
+			break;
 		default:
 			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
 			return;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h
index c670833..0eeda8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h
@@ -37,6 +37,8 @@
 				  struct drm_display_mode *mode);
 void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
 				    u32 dispclk);
+u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev,
+				       u32 freq, u8 clk_type, u8 clk_src);
 void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
 			       u32 crtc_id,
 			       int pll_id,
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 1cd6de5..48b6bd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -567,6 +567,7 @@
 	DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
 	DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
 	DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
+	DIG_ENCODER_CONTROL_PARAMETERS_V5 v5;
 };
 
 void
@@ -694,6 +695,47 @@
 			else
 				args.v4.ucHPD_ID = hpd_id + 1;
 			break;
+		case 5:
+			switch (action) {
+			case ATOM_ENCODER_CMD_SETUP_PANEL_MODE:
+				args.v5.asDPPanelModeParam.ucAction = action;
+				args.v5.asDPPanelModeParam.ucPanelMode = panel_mode;
+				args.v5.asDPPanelModeParam.ucDigId = dig->dig_encoder;
+				break;
+			case ATOM_ENCODER_CMD_STREAM_SETUP:
+				args.v5.asStreamParam.ucAction = action;
+				args.v5.asStreamParam.ucDigId = dig->dig_encoder;
+				args.v5.asStreamParam.ucDigMode =
+					amdgpu_atombios_encoder_get_encoder_mode(encoder);
+				if (ENCODER_MODE_IS_DP(args.v5.asStreamParam.ucDigMode))
+					args.v5.asStreamParam.ucLaneNum = dp_lane_count;
+				else if (amdgpu_dig_monitor_is_duallink(encoder,
+									amdgpu_encoder->pixel_clock))
+					args.v5.asStreamParam.ucLaneNum = 8;
+				else
+					args.v5.asStreamParam.ucLaneNum = 4;
+				args.v5.asStreamParam.ulPixelClock =
+					cpu_to_le32(amdgpu_encoder->pixel_clock / 10);
+				args.v5.asStreamParam.ucBitPerColor =
+					amdgpu_atombios_encoder_get_bpc(encoder);
+				args.v5.asStreamParam.ucLinkRateIn270Mhz = dp_clock / 27000;
+				break;
+			case ATOM_ENCODER_CMD_DP_LINK_TRAINING_START:
+			case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1:
+			case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2:
+			case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3:
+			case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN4:
+			case ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE:
+			case ATOM_ENCODER_CMD_DP_VIDEO_OFF:
+			case ATOM_ENCODER_CMD_DP_VIDEO_ON:
+				args.v5.asCmdParam.ucAction = action;
+				args.v5.asCmdParam.ucDigId = dig->dig_encoder;
+				break;
+			default:
+				DRM_ERROR("Unsupported action 0x%x\n", action);
+				break;
+			}
+			break;
 		default:
 			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
 			break;
@@ -714,11 +756,12 @@
 	DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
 	DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
 	DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6 v6;
 };
 
 void
 amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int action,
-				       uint8_t lane_num, uint8_t lane_set)
+					      uint8_t lane_num, uint8_t lane_set)
 {
 	struct drm_device *dev = encoder->dev;
 	struct amdgpu_device *adev = dev->dev_private;
@@ -1070,6 +1113,54 @@
 			args.v5.ucDigEncoderSel = 1 << dig_encoder;
 			args.v5.ucDPLaneSet = lane_set;
 			break;
+		case 6:
+			args.v6.ucAction = action;
+			if (is_dp)
+				args.v6.ulSymClock = cpu_to_le32(dp_clock / 10);
+			else
+				args.v6.ulSymClock = cpu_to_le32(amdgpu_encoder->pixel_clock / 10);
+
+			switch (amdgpu_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				if (dig->linkb)
+					args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYB;
+				else
+					args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYA;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				if (dig->linkb)
+					args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYD;
+				else
+					args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYC;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				if (dig->linkb)
+					args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYF;
+				else
+					args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYE;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+				args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYG;
+				break;
+			}
+			if (is_dp)
+				args.v6.ucLaneNum = dp_lane_count;
+			else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+				args.v6.ucLaneNum = 8;
+			else
+				args.v6.ucLaneNum = 4;
+			args.v6.ucConnObjId = connector_object_id;
+			if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH)
+				args.v6.ucDPLaneSet = lane_set;
+			else
+				args.v6.ucDigMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
+
+			if (hpd_id == AMDGPU_HPD_NONE)
+				args.v6.ucHPDSel = 0;
+			else
+				args.v6.ucHPDSel = hpd_id + 1;
+			args.v6.ucDigEncoderSel = 1 << dig_encoder;
+			break;
 		default:
 			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
 			break;
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 1f9109d..5ec1f1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -2549,19 +2549,17 @@
 	return 0;
 }
 
-static u8 ci_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
-					     u32 sclk, u32 min_sclk_in_sr)
+static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
 {
 	u32 i;
 	u32 tmp;
-	u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
-		min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
+	u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
 
 	if (sclk < min)
 		return 0;
 
 	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
-		tmp = sclk / (1 << i);
+		tmp = sclk >> i;
 		if (tmp >= min || i == 0)
 			break;
 	}
@@ -3358,8 +3356,7 @@
 	graphic_level->PowerThrottle = 0;
 
 	if (pi->caps_sclk_ds)
-		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(adev,
-										   engine_clock,
+		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
 										   CISLAND_MINIMUM_ENGINE_CLOCK);
 
 	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
@@ -6224,6 +6221,9 @@
 	ci_dpm_fini(adev);
 	mutex_unlock(&adev->pm.mutex);
 
+	release_firmware(adev->pm.fw);
+	adev->pm.fw = NULL;
+
 	return 0;
 }
 
@@ -6309,215 +6309,6 @@
 	return 0;
 }
 
-static void ci_dpm_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "CIK DPM registers\n");
-	dev_info(adev->dev, "  BIOS_SCRATCH_4=0x%08X\n",
-		 RREG32(mmBIOS_SCRATCH_4));
-	dev_info(adev->dev, "  MC_ARB_DRAM_TIMING=0x%08X\n",
-		 RREG32(mmMC_ARB_DRAM_TIMING));
-	dev_info(adev->dev, "  MC_ARB_DRAM_TIMING2=0x%08X\n",
-		 RREG32(mmMC_ARB_DRAM_TIMING2));
-	dev_info(adev->dev, "  MC_ARB_BURST_TIME=0x%08X\n",
-		 RREG32(mmMC_ARB_BURST_TIME));
-	dev_info(adev->dev, "  MC_ARB_DRAM_TIMING_1=0x%08X\n",
-		 RREG32(mmMC_ARB_DRAM_TIMING_1));
-	dev_info(adev->dev, "  MC_ARB_DRAM_TIMING2_1=0x%08X\n",
-		 RREG32(mmMC_ARB_DRAM_TIMING2_1));
-	dev_info(adev->dev, "  MC_CG_CONFIG=0x%08X\n",
-		 RREG32(mmMC_CG_CONFIG));
-	dev_info(adev->dev, "  MC_ARB_CG=0x%08X\n",
-		 RREG32(mmMC_ARB_CG));
-	dev_info(adev->dev, "  DIDT_SQ_CTRL0=0x%08X\n",
-		 RREG32_DIDT(ixDIDT_SQ_CTRL0));
-	dev_info(adev->dev, "  DIDT_DB_CTRL0=0x%08X\n",
-		 RREG32_DIDT(ixDIDT_DB_CTRL0));
-	dev_info(adev->dev, "  DIDT_TD_CTRL0=0x%08X\n",
-		 RREG32_DIDT(ixDIDT_TD_CTRL0));
-	dev_info(adev->dev, "  DIDT_TCP_CTRL0=0x%08X\n",
-		 RREG32_DIDT(ixDIDT_TCP_CTRL0));
-	dev_info(adev->dev, "  CG_THERMAL_INT=0x%08X\n",
-		 RREG32_SMC(ixCG_THERMAL_INT));
-	dev_info(adev->dev, "  CG_THERMAL_CTRL=0x%08X\n",
-		 RREG32_SMC(ixCG_THERMAL_CTRL));
-	dev_info(adev->dev, "  GENERAL_PWRMGT=0x%08X\n",
-		 RREG32_SMC(ixGENERAL_PWRMGT));
-	dev_info(adev->dev, "  MC_SEQ_CNTL_3=0x%08X\n",
-		 RREG32(mmMC_SEQ_CNTL_3));
-	dev_info(adev->dev, "  LCAC_MC0_CNTL=0x%08X\n",
-		 RREG32_SMC(ixLCAC_MC0_CNTL));
-	dev_info(adev->dev, "  LCAC_MC1_CNTL=0x%08X\n",
-		 RREG32_SMC(ixLCAC_MC1_CNTL));
-	dev_info(adev->dev, "  LCAC_CPL_CNTL=0x%08X\n",
-		 RREG32_SMC(ixLCAC_CPL_CNTL));
-	dev_info(adev->dev, "  SCLK_PWRMGT_CNTL=0x%08X\n",
-		 RREG32_SMC(ixSCLK_PWRMGT_CNTL));
-	dev_info(adev->dev, "  BIF_LNCNT_RESET=0x%08X\n",
-		 RREG32(mmBIF_LNCNT_RESET));
-	dev_info(adev->dev, "  FIRMWARE_FLAGS=0x%08X\n",
-		 RREG32_SMC(ixFIRMWARE_FLAGS));
-	dev_info(adev->dev, "  CG_SPLL_FUNC_CNTL=0x%08X\n",
-		 RREG32_SMC(ixCG_SPLL_FUNC_CNTL));
-	dev_info(adev->dev, "  CG_SPLL_FUNC_CNTL_2=0x%08X\n",
-		 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2));
-	dev_info(adev->dev, "  CG_SPLL_FUNC_CNTL_3=0x%08X\n",
-		 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3));
-	dev_info(adev->dev, "  CG_SPLL_FUNC_CNTL_4=0x%08X\n",
-		 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4));
-	dev_info(adev->dev, "  CG_SPLL_SPREAD_SPECTRUM=0x%08X\n",
-		 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM));
-	dev_info(adev->dev, "  CG_SPLL_SPREAD_SPECTRUM_2=0x%08X\n",
-		 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2));
-	dev_info(adev->dev, "  DLL_CNTL=0x%08X\n",
-		 RREG32(mmDLL_CNTL));
-	dev_info(adev->dev, "  MCLK_PWRMGT_CNTL=0x%08X\n",
-		 RREG32(mmMCLK_PWRMGT_CNTL));
-	dev_info(adev->dev, "  MPLL_AD_FUNC_CNTL=0x%08X\n",
-		 RREG32(mmMPLL_AD_FUNC_CNTL));
-	dev_info(adev->dev, "  MPLL_DQ_FUNC_CNTL=0x%08X\n",
-		 RREG32(mmMPLL_DQ_FUNC_CNTL));
-	dev_info(adev->dev, "  MPLL_FUNC_CNTL=0x%08X\n",
-		 RREG32(mmMPLL_FUNC_CNTL));
-	dev_info(adev->dev, "  MPLL_FUNC_CNTL_1=0x%08X\n",
-		 RREG32(mmMPLL_FUNC_CNTL_1));
-	dev_info(adev->dev, "  MPLL_FUNC_CNTL_2=0x%08X\n",
-		 RREG32(mmMPLL_FUNC_CNTL_2));
-	dev_info(adev->dev, "  MPLL_SS1=0x%08X\n",
-		 RREG32(mmMPLL_SS1));
-	dev_info(adev->dev, "  MPLL_SS2=0x%08X\n",
-		 RREG32(mmMPLL_SS2));
-	dev_info(adev->dev, "  CG_DISPLAY_GAP_CNTL=0x%08X\n",
-		 RREG32_SMC(ixCG_DISPLAY_GAP_CNTL));
-	dev_info(adev->dev, "  CG_DISPLAY_GAP_CNTL2=0x%08X\n",
-		 RREG32_SMC(ixCG_DISPLAY_GAP_CNTL2));
-	dev_info(adev->dev, "  CG_STATIC_SCREEN_PARAMETER=0x%08X\n",
-		 RREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER));
-	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_0=0x%08X\n",
-		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0));
-	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_1=0x%08X\n",
-		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_1));
-	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_2=0x%08X\n",
-		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_2));
-	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_3=0x%08X\n",
-		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_3));
-	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_4=0x%08X\n",
-		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_4));
-	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_5=0x%08X\n",
-		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_5));
-	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_6=0x%08X\n",
-		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_6));
-	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_7=0x%08X\n",
-		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_7));
-	dev_info(adev->dev, "  RCU_UC_EVENTS=0x%08X\n",
-		 RREG32_SMC(ixRCU_UC_EVENTS));
-	dev_info(adev->dev, "  DPM_TABLE_475=0x%08X\n",
-		 RREG32_SMC(ixDPM_TABLE_475));
-	dev_info(adev->dev, "  MC_SEQ_RAS_TIMING_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_RAS_TIMING_LP));
-	dev_info(adev->dev, "  MC_SEQ_RAS_TIMING=0x%08X\n",
-		 RREG32(mmMC_SEQ_RAS_TIMING));
-	dev_info(adev->dev, "  MC_SEQ_CAS_TIMING_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_CAS_TIMING_LP));
-	dev_info(adev->dev, "  MC_SEQ_CAS_TIMING=0x%08X\n",
-		 RREG32(mmMC_SEQ_CAS_TIMING));
-	dev_info(adev->dev, "  MC_SEQ_DLL_STBY_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_DLL_STBY_LP));
-	dev_info(adev->dev, "  MC_SEQ_DLL_STBY=0x%08X\n",
-		 RREG32(mmMC_SEQ_DLL_STBY));
-	dev_info(adev->dev, "  MC_SEQ_G5PDX_CMD0_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_G5PDX_CMD0_LP));
-	dev_info(adev->dev, "  MC_SEQ_G5PDX_CMD0=0x%08X\n",
-		 RREG32(mmMC_SEQ_G5PDX_CMD0));
-	dev_info(adev->dev, "  MC_SEQ_G5PDX_CMD1_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_G5PDX_CMD1_LP));
-	dev_info(adev->dev, "  MC_SEQ_G5PDX_CMD1=0x%08X\n",
-		 RREG32(mmMC_SEQ_G5PDX_CMD1));
-	dev_info(adev->dev, "  MC_SEQ_G5PDX_CTRL_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_G5PDX_CTRL_LP));
-	dev_info(adev->dev, "  MC_SEQ_G5PDX_CTRL=0x%08X\n",
-		 RREG32(mmMC_SEQ_G5PDX_CTRL));
-	dev_info(adev->dev, "  MC_SEQ_PMG_DVS_CMD_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_PMG_DVS_CMD_LP));
-	dev_info(adev->dev, "  MC_SEQ_PMG_DVS_CMD=0x%08X\n",
-		 RREG32(mmMC_SEQ_PMG_DVS_CMD));
-	dev_info(adev->dev, "  MC_SEQ_PMG_DVS_CTL_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_PMG_DVS_CTL_LP));
-	dev_info(adev->dev, "  MC_SEQ_PMG_DVS_CTL=0x%08X\n",
-		 RREG32(mmMC_SEQ_PMG_DVS_CTL));
-	dev_info(adev->dev, "  MC_SEQ_MISC_TIMING_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_MISC_TIMING_LP));
-	dev_info(adev->dev, "  MC_SEQ_MISC_TIMING=0x%08X\n",
-		 RREG32(mmMC_SEQ_MISC_TIMING));
-	dev_info(adev->dev, "  MC_SEQ_MISC_TIMING2_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_MISC_TIMING2_LP));
-	dev_info(adev->dev, "  MC_SEQ_MISC_TIMING2=0x%08X\n",
-		 RREG32(mmMC_SEQ_MISC_TIMING2));
-	dev_info(adev->dev, "  MC_SEQ_PMG_CMD_EMRS_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_PMG_CMD_EMRS_LP));
-	dev_info(adev->dev, "  MC_PMG_CMD_EMRS=0x%08X\n",
-		 RREG32(mmMC_PMG_CMD_EMRS));
-	dev_info(adev->dev, "  MC_SEQ_PMG_CMD_MRS_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_PMG_CMD_MRS_LP));
-	dev_info(adev->dev, "  MC_PMG_CMD_MRS=0x%08X\n",
-		 RREG32(mmMC_PMG_CMD_MRS));
-	dev_info(adev->dev, "  MC_SEQ_PMG_CMD_MRS1_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_PMG_CMD_MRS1_LP));
-	dev_info(adev->dev, "  MC_PMG_CMD_MRS1=0x%08X\n",
-		 RREG32(mmMC_PMG_CMD_MRS1));
-	dev_info(adev->dev, "  MC_SEQ_WR_CTL_D0_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_WR_CTL_D0_LP));
-	dev_info(adev->dev, "  MC_SEQ_WR_CTL_D0=0x%08X\n",
-		 RREG32(mmMC_SEQ_WR_CTL_D0));
-	dev_info(adev->dev, "  MC_SEQ_WR_CTL_D1_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_WR_CTL_D1_LP));
-	dev_info(adev->dev, "  MC_SEQ_WR_CTL_D1=0x%08X\n",
-		 RREG32(mmMC_SEQ_WR_CTL_D1));
-	dev_info(adev->dev, "  MC_SEQ_RD_CTL_D0_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_RD_CTL_D0_LP));
-	dev_info(adev->dev, "  MC_SEQ_RD_CTL_D0=0x%08X\n",
-		 RREG32(mmMC_SEQ_RD_CTL_D0));
-	dev_info(adev->dev, "  MC_SEQ_RD_CTL_D1_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_RD_CTL_D1_LP));
-	dev_info(adev->dev, "  MC_SEQ_RD_CTL_D1=0x%08X\n",
-		 RREG32(mmMC_SEQ_RD_CTL_D1));
-	dev_info(adev->dev, "  MC_SEQ_PMG_TIMING_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_PMG_TIMING_LP));
-	dev_info(adev->dev, "  MC_SEQ_PMG_TIMING=0x%08X\n",
-		 RREG32(mmMC_SEQ_PMG_TIMING));
-	dev_info(adev->dev, "  MC_SEQ_PMG_CMD_MRS2_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_PMG_CMD_MRS2_LP));
-	dev_info(adev->dev, "  MC_PMG_CMD_MRS2=0x%08X\n",
-		 RREG32(mmMC_PMG_CMD_MRS2));
-	dev_info(adev->dev, "  MC_SEQ_WR_CTL_2_LP=0x%08X\n",
-		 RREG32(mmMC_SEQ_WR_CTL_2_LP));
-	dev_info(adev->dev, "  MC_SEQ_WR_CTL_2=0x%08X\n",
-		 RREG32(mmMC_SEQ_WR_CTL_2));
-	dev_info(adev->dev, "  PCIE_LC_SPEED_CNTL=0x%08X\n",
-		 RREG32_PCIE(ixPCIE_LC_SPEED_CNTL));
-	dev_info(adev->dev, "  PCIE_LC_LINK_WIDTH_CNTL=0x%08X\n",
-		 RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL));
-	dev_info(adev->dev, "  SMC_IND_INDEX_0=0x%08X\n",
-		 RREG32(mmSMC_IND_INDEX_0));
-	dev_info(adev->dev, "  SMC_IND_DATA_0=0x%08X\n",
-		 RREG32(mmSMC_IND_DATA_0));
-	dev_info(adev->dev, "  SMC_IND_ACCESS_CNTL=0x%08X\n",
-		 RREG32(mmSMC_IND_ACCESS_CNTL));
-	dev_info(adev->dev, "  SMC_RESP_0=0x%08X\n",
-		 RREG32(mmSMC_RESP_0));
-	dev_info(adev->dev, "  SMC_MESSAGE_0=0x%08X\n",
-		 RREG32(mmSMC_MESSAGE_0));
-	dev_info(adev->dev, "  SMC_SYSCON_RESET_CNTL=0x%08X\n",
-		 RREG32_SMC(ixSMC_SYSCON_RESET_CNTL));
-	dev_info(adev->dev, "  SMC_SYSCON_CLOCK_CNTL_0=0x%08X\n",
-		 RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0));
-	dev_info(adev->dev, "  SMC_SYSCON_MISC_CNTL=0x%08X\n",
-		 RREG32_SMC(ixSMC_SYSCON_MISC_CNTL));
-	dev_info(adev->dev, "  SMC_PC_C=0x%08X\n",
-		 RREG32_SMC(ixSMC_PC_C));
-}
-
 static int ci_dpm_soft_reset(void *handle)
 {
 	return 0;
@@ -6572,7 +6363,7 @@
 }
 
 static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
-				    struct amdgpu_irq_src *source, 
+				    struct amdgpu_irq_src *source,
 				    struct amdgpu_iv_entry *entry)
 {
 	bool queue_thermal = false;
@@ -6614,6 +6405,7 @@
 }
 
 const struct amd_ip_funcs ci_dpm_ip_funcs = {
+	.name = "ci_dpm",
 	.early_init = ci_dpm_early_init,
 	.late_init = ci_dpm_late_init,
 	.sw_init = ci_dpm_sw_init,
@@ -6625,7 +6417,6 @@
 	.is_idle = ci_dpm_is_idle,
 	.wait_for_idle = ci_dpm_wait_for_idle,
 	.soft_reset = ci_dpm_soft_reset,
-	.print_status = ci_dpm_print_status,
 	.set_clockgating_state = ci_dpm_set_clockgating_state,
 	.set_powergating_state = ci_dpm_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index bddc9ba..07bc795 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -962,7 +962,7 @@
 	return true;
 }
 
-static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
+static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
 	{mmGRBM_STATUS, false},
 	{mmGB_ADDR_CONFIG, false},
 	{mmMC_ARB_RAMCFG, false},
@@ -2007,7 +2007,6 @@
 	.get_xclk = &cik_get_xclk,
 	.set_uvd_clocks = &cik_set_uvd_clocks,
 	.set_vce_clocks = &cik_set_vce_clocks,
-	.get_cu_info = &gfx_v7_0_get_cu_info,
 	/* these should be moved to their own ip modules */
 	.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
 	.wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
@@ -2214,11 +2213,6 @@
 	return 0;
 }
 
-static void cik_common_print_status(void *handle)
-{
-
-}
-
 static int cik_common_soft_reset(void *handle)
 {
 	/* XXX hard reset?? */
@@ -2238,6 +2232,7 @@
 }
 
 const struct amd_ip_funcs cik_common_ip_funcs = {
+	.name = "cik_common",
 	.early_init = cik_common_early_init,
 	.late_init = NULL,
 	.sw_init = cik_common_sw_init,
@@ -2249,7 +2244,6 @@
 	.is_idle = cik_common_is_idle,
 	.wait_for_idle = cik_common_wait_for_idle,
 	.soft_reset = cik_common_soft_reset,
-	.print_status = cik_common_print_status,
 	.set_clockgating_state = cik_common_set_clockgating_state,
 	.set_powergating_state = cik_common_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 30c9b3be..be3d6f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -103,7 +103,6 @@
  */
 static int cik_ih_irq_init(struct amdgpu_device *adev)
 {
-	int ret = 0;
 	int rb_bufsz;
 	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
 	u64 wptr_off;
@@ -156,7 +155,7 @@
 	/* enable irqs */
 	cik_ih_enable_interrupts(adev);
 
-	return ret;
+	return 0;
 }
 
 /**
@@ -243,7 +242,7 @@
 	/* wptr/rptr are in bytes! */
 	u32 ring_index = adev->irq.ih.rptr >> 2;
 	uint32_t dw[4];
-	
+
 	dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
 	dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
 	dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
@@ -372,35 +371,6 @@
 	return -ETIMEDOUT;
 }
 
-static void cik_ih_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "CIK IH registers\n");
-	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
-		RREG32(mmSRBM_STATUS));
-	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
-		RREG32(mmSRBM_STATUS2));
-	dev_info(adev->dev, "  INTERRUPT_CNTL=0x%08X\n",
-		 RREG32(mmINTERRUPT_CNTL));
-	dev_info(adev->dev, "  INTERRUPT_CNTL2=0x%08X\n",
-		 RREG32(mmINTERRUPT_CNTL2));
-	dev_info(adev->dev, "  IH_CNTL=0x%08X\n",
-		 RREG32(mmIH_CNTL));
-	dev_info(adev->dev, "  IH_RB_CNTL=0x%08X\n",
-		 RREG32(mmIH_RB_CNTL));
-	dev_info(adev->dev, "  IH_RB_BASE=0x%08X\n",
-		 RREG32(mmIH_RB_BASE));
-	dev_info(adev->dev, "  IH_RB_WPTR_ADDR_LO=0x%08X\n",
-		 RREG32(mmIH_RB_WPTR_ADDR_LO));
-	dev_info(adev->dev, "  IH_RB_WPTR_ADDR_HI=0x%08X\n",
-		 RREG32(mmIH_RB_WPTR_ADDR_HI));
-	dev_info(adev->dev, "  IH_RB_RPTR=0x%08X\n",
-		 RREG32(mmIH_RB_RPTR));
-	dev_info(adev->dev, "  IH_RB_WPTR=0x%08X\n",
-		 RREG32(mmIH_RB_WPTR));
-}
-
 static int cik_ih_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -412,8 +382,6 @@
 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
 
 	if (srbm_soft_reset) {
-		cik_ih_print_status((void *)adev);
-
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -428,8 +396,6 @@
 
 		/* Wait a little for things to settle down */
 		udelay(50);
-
-		cik_ih_print_status((void *)adev);
 	}
 
 	return 0;
@@ -448,6 +414,7 @@
 }
 
 const struct amd_ip_funcs cik_ih_ip_funcs = {
+	.name = "cik_ih",
 	.early_init = cik_ih_early_init,
 	.late_init = NULL,
 	.sw_init = cik_ih_sw_init,
@@ -459,7 +426,6 @@
 	.is_idle = cik_ih_is_idle,
 	.wait_for_idle = cik_ih_wait_for_idle,
 	.soft_reset = cik_ih_soft_reset,
-	.print_status = cik_ih_print_status,
 	.set_clockgating_state = cik_ih_set_clockgating_state,
 	.set_powergating_state = cik_ih_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index d3ac329..9dc4e24 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -66,6 +66,16 @@
 
 u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
 
+
+static void cik_sdma_free_microcode(struct amdgpu_device *adev)
+{
+	int i;
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+			release_firmware(adev->sdma.instance[i].fw);
+			adev->sdma.instance[i].fw = NULL;
+	}
+}
+
 /*
  * sDMA - System DMA
  * Starting with CIK, the GPU has new asynchronous
@@ -210,9 +220,10 @@
  * Schedule an IB in the DMA ring (CIK).
  */
 static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
-			   struct amdgpu_ib *ib)
+				  struct amdgpu_ib *ib,
+				  unsigned vm_id, bool ctx_switch)
 {
-	u32 extra_bits = ib->vm_id & 0xf;
+	u32 extra_bits = vm_id & 0xf;
 	u32 next_rptr = ring->wptr + 5;
 
 	while ((next_rptr & 7) != 4)
@@ -418,6 +429,8 @@
 		/* Initialize the ring buffer's read and write pointers */
 		WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
 		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+		WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+		WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 
 		/* set the wb address whether it's enabled or not */
 		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -445,7 +458,12 @@
 		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
 		ring->ready = true;
+	}
 
+	cik_sdma_enable(adev, true);
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		ring = &adev->sdma.instance[i].ring;
 		r = amdgpu_ring_test_ring(ring);
 		if (r) {
 			ring->ready = false;
@@ -528,8 +546,8 @@
 	if (r)
 		return r;
 
-	/* unhalt the MEs */
-	cik_sdma_enable(adev, true);
+	/* halt the engine before programing */
+	cik_sdma_enable(adev, false);
 
 	/* start the gfx rings and rlc compute queues */
 	r = cik_sdma_gfx_resume(adev);
@@ -643,7 +661,7 @@
 	ib.ptr[3] = 1;
 	ib.ptr[4] = 0xDEADBEEF;
 	ib.length_dw = 5;
-	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
 	if (r)
 		goto err1;
 
@@ -976,7 +994,7 @@
 		ring = &adev->sdma.instance[i].ring;
 		ring->ring_obj = NULL;
 		sprintf(ring->name, "sdma%d", i);
-		r = amdgpu_ring_init(adev, ring, 256 * 1024,
+		r = amdgpu_ring_init(adev, ring, 1024,
 				     SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
 				     &adev->sdma.trap_irq,
 				     (i == 0) ?
@@ -997,6 +1015,7 @@
 	for (i = 0; i < adev->sdma.num_instances; i++)
 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
+	cik_sdma_free_microcode(adev);
 	return 0;
 }
 
@@ -1064,57 +1083,6 @@
 	return -ETIMEDOUT;
 }
 
-static void cik_sdma_print_status(void *handle)
-{
-	int i, j;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "CIK SDMA registers\n");
-	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
-		 RREG32(mmSRBM_STATUS2));
-	for (i = 0; i < adev->sdma.num_instances; i++) {
-		dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
-			 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_ME_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_IB_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_WPTR=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_TILING_CONFIG=0x%08X\n",
-			 i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
-		mutex_lock(&adev->srbm_mutex);
-		for (j = 0; j < 16; j++) {
-			cik_srbm_select(adev, 0, 0, 0, j);
-			dev_info(adev->dev, "  VM %d:\n", j);
-			dev_info(adev->dev, "  SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n",
-				 RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
-			dev_info(adev->dev, "  SDMA0_GFX_APE1_CNTL=0x%08X\n",
-				 RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
-		}
-		cik_srbm_select(adev, 0, 0, 0, 0);
-		mutex_unlock(&adev->srbm_mutex);
-	}
-}
-
 static int cik_sdma_soft_reset(void *handle)
 {
 	u32 srbm_soft_reset = 0;
@@ -1137,8 +1105,6 @@
 	}
 
 	if (srbm_soft_reset) {
-		cik_sdma_print_status((void *)adev);
-
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -1153,8 +1119,6 @@
 
 		/* Wait a little for things to settle down */
 		udelay(50);
-
-		cik_sdma_print_status((void *)adev);
 	}
 
 	return 0;
@@ -1278,6 +1242,7 @@
 }
 
 const struct amd_ip_funcs cik_sdma_ip_funcs = {
+	.name = "cik_sdma",
 	.early_init = cik_sdma_early_init,
 	.late_init = NULL,
 	.sw_init = cik_sdma_sw_init,
@@ -1289,7 +1254,6 @@
 	.is_idle = cik_sdma_is_idle,
 	.wait_for_idle = cik_sdma_wait_for_idle,
 	.soft_reset = cik_sdma_soft_reset,
-	.print_status = cik_sdma_print_status,
 	.set_clockgating_state = cik_sdma_set_clockgating_state,
 	.set_powergating_state = cik_sdma_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 60d4493..c4f6f00 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -190,8 +190,8 @@
 #       define MACRO_TILE_ASPECT(x)				((x) << 4)
 #       define NUM_BANKS(x)					((x) << 6)
 
-#define		MSG_ENTER_RLC_SAFE_MODE      			1
-#define		MSG_EXIT_RLC_SAFE_MODE      			0
+#define		MSG_ENTER_RLC_SAFE_MODE			1
+#define		MSG_EXIT_RLC_SAFE_MODE			0
 
 /*
  * PM4
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index e7ef226..933e425 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1579,7 +1579,6 @@
 
 static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
 {
-	int ret = 0;
 	struct cz_power_info *pi = cz_get_pi(adev);
 
 	if (pi->caps_sclk_ds) {
@@ -1588,20 +1587,19 @@
 				CZ_MIN_DEEP_SLEEP_SCLK);
 	}
 
-	return ret;
+	return 0;
 }
 
 /* ?? without dal support, is this still needed in setpowerstate list*/
 static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev)
 {
-	int ret = 0;
 	struct cz_power_info *pi = cz_get_pi(adev);
 
 	cz_send_msg_to_smc_with_parameter(adev,
 			PPSMC_MSG_SetWatermarkFrequency,
 			pi->sclk_dpm.soft_max_clk);
 
-	return ret;
+	return 0;
 }
 
 static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev)
@@ -1636,7 +1634,6 @@
 
 static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
 {
-	int ret = 0;
 	struct cz_power_info *pi = cz_get_pi(adev);
 	struct cz_ps *ps = &pi->requested_ps;
 
@@ -1647,21 +1644,19 @@
 			cz_dpm_nbdpm_lm_pstate_enable(adev, true);
 	}
 
-	return ret;
+	return 0;
 }
 
 /* with dpm enabled */
 static int cz_dpm_set_power_state(struct amdgpu_device *adev)
 {
-	int ret = 0;
-
 	cz_dpm_update_sclk_limit(adev);
 	cz_dpm_set_deep_sleep_sclk_threshold(adev);
 	cz_dpm_set_watermark_threshold(adev);
 	cz_dpm_enable_nbdpm(adev);
 	cz_dpm_update_low_memory_pstate(adev);
 
-	return ret;
+	return 0;
 }
 
 static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
@@ -2230,6 +2225,7 @@
 }
 
 const struct amd_ip_funcs cz_dpm_ip_funcs = {
+	.name = "cz_dpm",
 	.early_init = cz_dpm_early_init,
 	.late_init = cz_dpm_late_init,
 	.sw_init = cz_dpm_sw_init,
@@ -2241,7 +2237,6 @@
 	.is_idle = NULL,
 	.wait_for_idle = NULL,
 	.soft_reset = NULL,
-	.print_status = NULL,
 	.set_clockgating_state = cz_dpm_set_clockgating_state,
 	.set_powergating_state = cz_dpm_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index c79638f..3d23a70 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -103,7 +103,6 @@
  */
 static int cz_ih_irq_init(struct amdgpu_device *adev)
 {
-	int ret = 0;
 	int rb_bufsz;
 	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
 	u64 wptr_off;
@@ -157,7 +156,7 @@
 	/* enable interrupts */
 	cz_ih_enable_interrupts(adev);
 
-	return ret;
+	return 0;
 }
 
 /**
@@ -222,7 +221,7 @@
 	/* wptr/rptr are in bytes! */
 	u32 ring_index = adev->irq.ih.rptr >> 2;
 	uint32_t dw[4];
-	
+
 	dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
 	dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
 	dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
@@ -351,35 +350,6 @@
 	return -ETIMEDOUT;
 }
 
-static void cz_ih_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "CZ IH registers\n");
-	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
-		RREG32(mmSRBM_STATUS));
-	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
-		RREG32(mmSRBM_STATUS2));
-	dev_info(adev->dev, "  INTERRUPT_CNTL=0x%08X\n",
-		 RREG32(mmINTERRUPT_CNTL));
-	dev_info(adev->dev, "  INTERRUPT_CNTL2=0x%08X\n",
-		 RREG32(mmINTERRUPT_CNTL2));
-	dev_info(adev->dev, "  IH_CNTL=0x%08X\n",
-		 RREG32(mmIH_CNTL));
-	dev_info(adev->dev, "  IH_RB_CNTL=0x%08X\n",
-		 RREG32(mmIH_RB_CNTL));
-	dev_info(adev->dev, "  IH_RB_BASE=0x%08X\n",
-		 RREG32(mmIH_RB_BASE));
-	dev_info(adev->dev, "  IH_RB_WPTR_ADDR_LO=0x%08X\n",
-		 RREG32(mmIH_RB_WPTR_ADDR_LO));
-	dev_info(adev->dev, "  IH_RB_WPTR_ADDR_HI=0x%08X\n",
-		 RREG32(mmIH_RB_WPTR_ADDR_HI));
-	dev_info(adev->dev, "  IH_RB_RPTR=0x%08X\n",
-		 RREG32(mmIH_RB_RPTR));
-	dev_info(adev->dev, "  IH_RB_WPTR=0x%08X\n",
-		 RREG32(mmIH_RB_WPTR));
-}
-
 static int cz_ih_soft_reset(void *handle)
 {
 	u32 srbm_soft_reset = 0;
@@ -391,8 +361,6 @@
 						SOFT_RESET_IH, 1);
 
 	if (srbm_soft_reset) {
-		cz_ih_print_status((void *)adev);
-
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -407,8 +375,6 @@
 
 		/* Wait a little for things to settle down */
 		udelay(50);
-
-		cz_ih_print_status((void *)adev);
 	}
 
 	return 0;
@@ -429,6 +395,7 @@
 }
 
 const struct amd_ip_funcs cz_ih_ip_funcs = {
+	.name = "cz_ih",
 	.early_init = cz_ih_early_init,
 	.late_init = NULL,
 	.sw_init = cz_ih_sw_init,
@@ -440,7 +407,6 @@
 	.is_idle = cz_ih_is_idle,
 	.wait_for_idle = cz_ih_wait_for_idle,
 	.soft_reset = cz_ih_soft_reset,
-	.print_status = cz_ih_print_status,
 	.set_clockgating_state = cz_ih_set_clockgating_state,
 	.set_powergating_state = cz_ih_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h b/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h
index 924d355..026342f 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h
@@ -77,7 +77,7 @@
 	uint8_t		driver_buffer_length;
 	uint8_t		scratch_buffer_length;
 	uint16_t	toc_entry_used_count;
-	uint16_t 	toc_entry_initialize_index;
+	uint16_t	toc_entry_initialize_index;
 	uint16_t	toc_entry_power_profiling_index;
 	uint16_t	toc_entry_aram;
 	uint16_t	toc_entry_ih_register_restore_task_index;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 6de2ce53..8227344 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -284,10 +284,16 @@
  * surface base address.
  */
 static void dce_v10_0_page_flip(struct amdgpu_device *adev,
-			      int crtc_id, u64 crtc_base)
+				int crtc_id, u64 crtc_base, bool async)
 {
 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+	u32 tmp;
 
+	/* flip at hsync for async, default is vsync */
+	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
+	tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
+			    GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
+	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 	/* update the primary scanout address */
 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 	       upper_32_bits(crtc_base));
@@ -2211,6 +2217,14 @@
 
 	dce_v10_0_vga_enable(crtc, false);
 
+	/* Make sure surface address is updated at vertical blank rather than
+	 * horizontal blank
+	 */
+	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
+	tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
+			    GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
+	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 	       upper_32_bits(fb_location));
 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -2261,13 +2275,6 @@
 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
-	/* pageflip setup */
-	/* make sure flip is at vb rather than hb */
-	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
-	tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
-			    GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
-	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
 	/* set pageflip to happen only at start of vblank interval (front porch) */
 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
 
@@ -2587,7 +2594,7 @@
 		return -EINVAL;
 	}
 
-	obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+	obj = drm_gem_object_lookup(file_priv, handle);
 	if (!obj) {
 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
 		return -ENOENT;
@@ -2992,6 +2999,8 @@
 
 	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
 
+	adev->ddev->mode_config.async_page_flip = true;
+
 	adev->ddev->mode_config.max_width = 16384;
 	adev->ddev->mode_config.max_height = 16384;
 
@@ -3130,14 +3139,6 @@
 	return 0;
 }
 
-static void dce_v10_0_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "DCE 10.x registers\n");
-	/* XXX todo */
-}
-
 static int dce_v10_0_soft_reset(void *handle)
 {
 	u32 srbm_soft_reset = 0, tmp;
@@ -3147,8 +3148,6 @@
 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
 
 	if (srbm_soft_reset) {
-		dce_v10_0_print_status((void *)adev);
-
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -3163,7 +3162,6 @@
 
 		/* Wait a little for things to settle down */
 		udelay(50);
-		dce_v10_0_print_status((void *)adev);
 	}
 	return 0;
 }
@@ -3370,7 +3368,7 @@
 
 	/* wakeup usersapce */
 	if (works->event)
-		drm_send_vblank_event(adev->ddev, crtc_id, works->event);
+		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
 
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
@@ -3501,6 +3499,7 @@
 }
 
 const struct amd_ip_funcs dce_v10_0_ip_funcs = {
+	.name = "dce_v10_0",
 	.early_init = dce_v10_0_early_init,
 	.late_init = NULL,
 	.sw_init = dce_v10_0_sw_init,
@@ -3512,7 +3511,6 @@
 	.is_idle = dce_v10_0_is_idle,
 	.wait_for_idle = dce_v10_0_wait_for_idle,
 	.soft_reset = dce_v10_0_soft_reset,
-	.print_status = dce_v10_0_print_status,
 	.set_clockgating_state = dce_v10_0_set_clockgating_state,
 	.set_powergating_state = dce_v10_0_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index e9ccc6b..af26ec0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -132,6 +132,22 @@
 	mmFBC_MISC, 0x1f311fff, 0x14302000,
 };
 
+static const u32 polaris11_golden_settings_a11[] =
+{
+	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
+	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
+	mmFBC_DEBUG1, 0xffffffff, 0x00000008,
+	mmFBC_MISC, 0x9f313fff, 0x14302008,
+	mmHDMI_CONTROL, 0x313f031f, 0x00000011,
+};
+
+static const u32 polaris10_golden_settings_a11[] =
+{
+	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
+	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
+	mmFBC_MISC, 0x9f313fff, 0x14302008,
+	mmHDMI_CONTROL, 0x313f031f, 0x00000011,
+};
 
 static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
 {
@@ -149,6 +165,16 @@
 						 stoney_golden_settings_a11,
 						 (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
 		break;
+	case CHIP_POLARIS11:
+		amdgpu_program_register_sequence(adev,
+						 polaris11_golden_settings_a11,
+						 (const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
+		break;
+	case CHIP_POLARIS10:
+		amdgpu_program_register_sequence(adev,
+						 polaris10_golden_settings_a11,
+						 (const u32)ARRAY_SIZE(polaris10_golden_settings_a11));
+		break;
 	default:
 		break;
 	}
@@ -276,10 +302,17 @@
  * surface base address.
  */
 static void dce_v11_0_page_flip(struct amdgpu_device *adev,
-			      int crtc_id, u64 crtc_base)
+				int crtc_id, u64 crtc_base, bool async)
 {
 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+	u32 tmp;
 
+	/* flip at hsync for async, default is vsync */
+	/* use UPDATE_IMMEDIATE_EN instead for async? */
+	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
+	tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
+			    GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
+	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 	/* update the scanout addresses */
 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 	       upper_32_bits(crtc_base));
@@ -565,35 +598,14 @@
 		crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
 					     CRTC_CONTROL, CRTC_MASTER_EN);
 		if (crtc_enabled) {
-#if 0
-			u32 frame_count;
-			int j;
-
+#if 1
 			save->crtc_enabled[i] = true;
 			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
 			if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
-				amdgpu_display_vblank_wait(adev, i);
-				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				/*it is correct only for RGB ; black is 0*/
+				WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
 				tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
 				WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-			}
-			/* wait for the next frame */
-			frame_count = amdgpu_display_vblank_get_counter(adev, i);
-			for (j = 0; j < adev->usec_timeout; j++) {
-				if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
-					break;
-				udelay(1);
-			}
-			tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-			if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
-				tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
-				WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
-			}
-			tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]);
-			if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
-				tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
-				WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
 			}
 #else
 			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
@@ -614,54 +626,20 @@
 static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev,
 				       struct amdgpu_mode_mc_save *save)
 {
-	u32 tmp, frame_count;
-	int i, j;
+	u32 tmp;
+	int i;
 
 	/* update crtc base addresses */
 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 		       upper_32_bits(adev->mc.vram_start));
-		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
-		       upper_32_bits(adev->mc.vram_start));
 		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
 		       (u32)adev->mc.vram_start);
-		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
-		       (u32)adev->mc.vram_start);
 
 		if (save->crtc_enabled[i]) {
-			tmp = RREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i]);
-			if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
-				tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
-				WREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
-			}
-			tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-			if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
-				tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
-				WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
-			}
-			tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]);
-			if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
-				tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
-				WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
-			}
-			for (j = 0; j < adev->usec_timeout; j++) {
-				tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-				if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
-					break;
-				udelay(1);
-			}
 			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
 			tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
-			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 			WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-			/* wait for the next frame */
-			frame_count = amdgpu_display_vblank_get_counter(adev, i);
-			for (j = 0; j < adev->usec_timeout; j++) {
-				if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
-					break;
-				udelay(1);
-			}
 		}
 	}
 
@@ -1624,6 +1602,7 @@
 	AUD4_REGISTER_OFFSET,
 	AUD5_REGISTER_OFFSET,
 	AUD6_REGISTER_OFFSET,
+	AUD7_REGISTER_OFFSET,
 };
 
 static int dce_v11_0_audio_init(struct amdgpu_device *adev)
@@ -1635,7 +1614,20 @@
 
 	adev->mode_info.audio.enabled = true;
 
-	adev->mode_info.audio.num_pins = 7;
+	switch (adev->asic_type) {
+	case CHIP_CARRIZO:
+	case CHIP_STONEY:
+		adev->mode_info.audio.num_pins = 7;
+		break;
+	case CHIP_POLARIS10:
+		adev->mode_info.audio.num_pins = 8;
+		break;
+	case CHIP_POLARIS11:
+		adev->mode_info.audio.num_pins = 6;
+		break;
+	default:
+		return -EINVAL;
+	}
 
 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
 		adev->mode_info.audio.pin[i].channels = -1;
@@ -2201,6 +2193,14 @@
 
 	dce_v11_0_vga_enable(crtc, false);
 
+	/* Make sure surface address is updated at vertical blank rather than
+	 * horizontal blank
+	 */
+	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
+	tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
+			    GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
+	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 	       upper_32_bits(fb_location));
 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -2251,13 +2251,6 @@
 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
-	/* pageflip setup */
-	/* make sure flip is at vb rather than hb */
-	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
-	tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
-			    GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
-	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
 	/* set pageflip to happen only at start of vblank interval (front porch) */
 	WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
 
@@ -2427,6 +2420,40 @@
 	u32 pll_in_use;
 	int pll;
 
+	if ((adev->asic_type == CHIP_POLARIS10) ||
+	    (adev->asic_type == CHIP_POLARIS11)) {
+		struct amdgpu_encoder *amdgpu_encoder =
+			to_amdgpu_encoder(amdgpu_crtc->encoder);
+		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+		if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
+			return ATOM_DP_DTO;
+
+		switch (amdgpu_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+			if (dig->linkb)
+				return ATOM_COMBOPHY_PLL1;
+			else
+				return ATOM_COMBOPHY_PLL0;
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+			if (dig->linkb)
+				return ATOM_COMBOPHY_PLL3;
+			else
+				return ATOM_COMBOPHY_PLL2;
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+			if (dig->linkb)
+				return ATOM_COMBOPHY_PLL5;
+			else
+				return ATOM_COMBOPHY_PLL4;
+			break;
+		default:
+			DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
+			return ATOM_PPLL_INVALID;
+		}
+	}
+
 	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
 		if (adev->clock.dp_extclk)
 			/* skip PPLL programming if using ext clock */
@@ -2578,7 +2605,7 @@
 		return -EINVAL;
 	}
 
-	obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+	obj = drm_gem_object_lookup(file_priv, handle);
 	if (!obj) {
 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
 		return -ENOENT;
@@ -2782,7 +2809,17 @@
 	case ATOM_PPLL2:
 		/* disable the ppll */
 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
-					  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+		break;
+	case ATOM_COMBOPHY_PLL0:
+	case ATOM_COMBOPHY_PLL1:
+	case ATOM_COMBOPHY_PLL2:
+	case ATOM_COMBOPHY_PLL3:
+	case ATOM_COMBOPHY_PLL4:
+	case ATOM_COMBOPHY_PLL5:
+		/* disable the ppll */
+		amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
+						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
 		break;
 	default:
 		break;
@@ -2800,11 +2837,28 @@
 				  int x, int y, struct drm_framebuffer *old_fb)
 {
 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
 
 	if (!amdgpu_crtc->adjusted_clock)
 		return -EINVAL;
 
-	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
+	if ((adev->asic_type == CHIP_POLARIS10) ||
+	    (adev->asic_type == CHIP_POLARIS11)) {
+		struct amdgpu_encoder *amdgpu_encoder =
+			to_amdgpu_encoder(amdgpu_crtc->encoder);
+		int encoder_mode =
+			amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
+
+		/* SetPixelClock calculates the plls and ss values now */
+		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
+						 amdgpu_crtc->pll_id,
+						 encoder_mode, amdgpu_encoder->encoder_id,
+						 adjusted_mode->clock, 0, 0, 0, 0,
+						 amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
+	} else {
+		amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
+	}
 	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
 	dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
 	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
@@ -2955,6 +3009,16 @@
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 9;
 		break;
+	case CHIP_POLARIS10:
+		adev->mode_info.num_crtc = 6;
+		adev->mode_info.num_hpd = 6;
+		adev->mode_info.num_dig = 6;
+		break;
+	case CHIP_POLARIS11:
+		adev->mode_info.num_crtc = 5;
+		adev->mode_info.num_hpd = 5;
+		adev->mode_info.num_dig = 5;
+		break;
 	default:
 		/* FIXME: not supported yet */
 		return -EINVAL;
@@ -2987,6 +3051,8 @@
 
 	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
 
+	adev->ddev->mode_config.async_page_flip = true;
+
 	adev->ddev->mode_config.max_width = 16384;
 	adev->ddev->mode_config.max_height = 16384;
 
@@ -3057,7 +3123,15 @@
 	/* init dig PHYs, disp eng pll */
 	amdgpu_atombios_crtc_powergate_init(adev);
 	amdgpu_atombios_encoder_init_dig(adev);
-	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+	if ((adev->asic_type == CHIP_POLARIS10) ||
+	    (adev->asic_type == CHIP_POLARIS11)) {
+		amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
+						   DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
+		amdgpu_atombios_crtc_set_dce_clock(adev, 0,
+						   DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
+	} else {
+		amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+	}
 
 	/* initialize hpd */
 	dce_v11_0_hpd_init(adev);
@@ -3126,14 +3200,6 @@
 	return 0;
 }
 
-static void dce_v11_0_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "DCE 10.x registers\n");
-	/* XXX todo */
-}
-
 static int dce_v11_0_soft_reset(void *handle)
 {
 	u32 srbm_soft_reset = 0, tmp;
@@ -3143,8 +3209,6 @@
 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
 
 	if (srbm_soft_reset) {
-		dce_v11_0_print_status((void *)adev);
-
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -3159,7 +3223,6 @@
 
 		/* Wait a little for things to settle down */
 		udelay(50);
-		dce_v11_0_print_status((void *)adev);
 	}
 	return 0;
 }
@@ -3366,7 +3429,7 @@
 
 	/* wakeup usersapce */
 	if(works->event)
-		drm_send_vblank_event(adev->ddev, crtc_id, works->event);
+		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
 
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
@@ -3497,6 +3560,7 @@
 }
 
 const struct amd_ip_funcs dce_v11_0_ip_funcs = {
+	.name = "dce_v11_0",
 	.early_init = dce_v11_0_early_init,
 	.late_init = NULL,
 	.sw_init = dce_v11_0_sw_init,
@@ -3508,7 +3572,6 @@
 	.is_idle = dce_v11_0_is_idle,
 	.wait_for_idle = dce_v11_0_wait_for_idle,
 	.soft_reset = dce_v11_0_soft_reset,
-	.print_status = dce_v11_0_print_status,
 	.set_clockgating_state = dce_v11_0_set_clockgating_state,
 	.set_powergating_state = dce_v11_0_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index e56b55d..3fb65e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -233,10 +233,13 @@
  * surface base address.
  */
 static void dce_v8_0_page_flip(struct amdgpu_device *adev,
-			      int crtc_id, u64 crtc_base)
+			       int crtc_id, u64 crtc_base, bool async)
 {
 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 
+	/* flip at hsync for async, default is vsync */
+	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
+	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 	/* update the primary scanout addresses */
 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 	       upper_32_bits(crtc_base));
@@ -1999,7 +2002,7 @@
 	uint32_t fb_format, fb_pitch_pixels;
 	u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
 	u32 pipe_config;
-	u32 tmp, viewport_w, viewport_h;
+	u32 viewport_w, viewport_h;
 	int r;
 	bool bypass_lut = false;
 
@@ -2135,6 +2138,11 @@
 
 	dce_v8_0_vga_enable(crtc, false);
 
+	/* Make sure surface address is updated at vertical blank rather than
+	 * horizontal blank
+	 */
+	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 	       upper_32_bits(fb_location));
 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -2182,12 +2190,6 @@
 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
-	/* pageflip setup */
-	/* make sure flip is at vb rather than hb */
-	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
-	tmp &= ~GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK;
-	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
 	/* set pageflip to happen only at start of vblank interval (front porch) */
 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
 
@@ -2499,7 +2501,7 @@
 		return -EINVAL;
 	}
 
-	obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+	obj = drm_gem_object_lookup(file_priv, handle);
 	if (!obj) {
 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
 		return -ENOENT;
@@ -2902,6 +2904,8 @@
 
 	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
 
+	adev->ddev->mode_config.async_page_flip = true;
+
 	adev->ddev->mode_config.max_width = 16384;
 	adev->ddev->mode_config.max_height = 16384;
 
@@ -3038,14 +3042,6 @@
 	return 0;
 }
 
-static void dce_v8_0_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "DCE 8.x registers\n");
-	/* XXX todo */
-}
-
 static int dce_v8_0_soft_reset(void *handle)
 {
 	u32 srbm_soft_reset = 0, tmp;
@@ -3055,8 +3051,6 @@
 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
 
 	if (srbm_soft_reset) {
-		dce_v8_0_print_status((void *)adev);
-
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -3071,7 +3065,6 @@
 
 		/* Wait a little for things to settle down */
 		udelay(50);
-		dce_v8_0_print_status((void *)adev);
 	}
 	return 0;
 }
@@ -3379,7 +3372,7 @@
 
 	/* wakeup usersapce */
 	if (works->event)
-		drm_send_vblank_event(adev->ddev, crtc_id, works->event);
+		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
 
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
@@ -3431,6 +3424,7 @@
 }
 
 const struct amd_ip_funcs dce_v8_0_ip_funcs = {
+	.name = "dce_v8_0",
 	.early_init = dce_v8_0_early_init,
 	.late_init = NULL,
 	.sw_init = dce_v8_0_sw_init,
@@ -3442,7 +3436,6 @@
 	.is_idle = dce_v8_0_is_idle,
 	.wait_for_idle = dce_v8_0_wait_for_idle,
 	.soft_reset = dce_v8_0_soft_reset,
-	.print_status = dce_v8_0_print_status,
 	.set_clockgating_state = dce_v8_0_set_clockgating_state,
 	.set_powergating_state = dce_v8_0_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
index 4b0e45a..ed03b75 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
@@ -72,6 +72,11 @@
 
 static int fiji_dpm_sw_fini(void *handle)
 {
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	release_firmware(adev->pm.fw);
+	adev->pm.fw = NULL;
+
 	return 0;
 }
 
@@ -143,6 +148,7 @@
 }
 
 const struct amd_ip_funcs fiji_dpm_ip_funcs = {
+	.name = "fiji_dpm",
 	.early_init = fiji_dpm_early_init,
 	.late_init = NULL,
 	.sw_init = fiji_dpm_sw_init,
@@ -154,7 +160,6 @@
 	.is_idle = NULL,
 	.wait_for_idle = NULL,
 	.soft_reset = NULL,
-	.print_status = NULL,
 	.set_clockgating_state = fiji_dpm_set_clockgating_state,
 	.set_powergating_state = fiji_dpm_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index bb8709066..8c6ad1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -53,7 +53,6 @@
 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
-int gfx_v7_0_get_cu_info(struct amdgpu_device *, struct amdgpu_cu_info *);
 
 MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
 MODULE_FIRMWARE("radeon/bonaire_me.bin");
@@ -882,6 +881,7 @@
 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
 static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
 static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
+static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
 
 /*
  * Core functions
@@ -991,6 +991,22 @@
 	return err;
 }
 
+static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
+{
+	release_firmware(adev->gfx.pfp_fw);
+	adev->gfx.pfp_fw = NULL;
+	release_firmware(adev->gfx.me_fw);
+	adev->gfx.me_fw = NULL;
+	release_firmware(adev->gfx.ce_fw);
+	adev->gfx.ce_fw = NULL;
+	release_firmware(adev->gfx.mec_fw);
+	adev->gfx.mec_fw = NULL;
+	release_firmware(adev->gfx.mec2_fw);
+	adev->gfx.mec2_fw = NULL;
+	release_firmware(adev->gfx.rlc_fw);
+	adev->gfx.rlc_fw = NULL;
+}
+
 /**
  * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
  *
@@ -1718,6 +1734,7 @@
 	gfx_v7_0_tiling_mode_table_init(adev);
 
 	gfx_v7_0_setup_rb(adev);
+	gfx_v7_0_get_cu_info(adev);
 
 	/* set HW defaults for 3D engine */
 	WREG32(mmCP_MEQ_THRESHOLDS,
@@ -2029,17 +2046,13 @@
  * on the gfx ring for execution by the GPU.
  */
 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-				  struct amdgpu_ib *ib)
+				      struct amdgpu_ib *ib,
+				      unsigned vm_id, bool ctx_switch)
 {
-	bool need_ctx_switch = ring->current_ctx != ib->ctx;
 	u32 header, control = 0;
 	u32 next_rptr = ring->wptr + 5;
 
-	/* drop the CE preamble IB for the same context */
-	if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
-		return;
-
-	if (need_ctx_switch)
+	if (ctx_switch)
 		next_rptr += 2;
 
 	next_rptr += 4;
@@ -2050,7 +2063,7 @@
 	amdgpu_ring_write(ring, next_rptr);
 
 	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
-	if (need_ctx_switch) {
+	if (ctx_switch) {
 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
 		amdgpu_ring_write(ring, 0);
 	}
@@ -2060,7 +2073,7 @@
 	else
 		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
 
-	control |= ib->length_dw | (ib->vm_id << 24);
+	control |= ib->length_dw | (vm_id << 24);
 
 	amdgpu_ring_write(ring, header);
 	amdgpu_ring_write(ring,
@@ -2073,7 +2086,8 @@
 }
 
 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
-				  struct amdgpu_ib *ib)
+					  struct amdgpu_ib *ib,
+					  unsigned vm_id, bool ctx_switch)
 {
 	u32 header, control = 0;
 	u32 next_rptr = ring->wptr + 5;
@@ -2088,7 +2102,7 @@
 
 	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
 
-	control |= ib->length_dw | (ib->vm_id << 24);
+	control |= ib->length_dw | (vm_id << 24);
 
 	amdgpu_ring_write(ring, header);
 	amdgpu_ring_write(ring,
@@ -2136,7 +2150,7 @@
 	ib.ptr[2] = 0xDEADBEEF;
 	ib.length_dw = 3;
 
-	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
 	if (r)
 		goto err2;
 
@@ -3053,6 +3067,19 @@
 static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 {
 	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	uint32_t seq = ring->fence_drv.sync_seq;
+	uint64_t addr = ring->fence_drv.gpu_addr;
+
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
+				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
+	amdgpu_ring_write(ring, addr & 0xfffffffc);
+	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+	amdgpu_ring_write(ring, seq);
+	amdgpu_ring_write(ring, 0xffffffff);
+	amdgpu_ring_write(ring, 4); /* poll interval */
+
 	if (usepfp) {
 		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
@@ -3080,18 +3107,6 @@
 					unsigned vm_id, uint64_t pd_addr)
 {
 	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
-	uint32_t seq = ring->fence_drv.sync_seq;
-	uint64_t addr = ring->fence_drv.gpu_addr;
-
-	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
-	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
-				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
-				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
-	amdgpu_ring_write(ring, addr & 0xfffffffc);
-	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
-	amdgpu_ring_write(ring, seq);
-	amdgpu_ring_write(ring, 0xffffffff);
-	amdgpu_ring_write(ring, 4); /* poll interval */
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3869,18 +3884,13 @@
 
 static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
 {
-	uint32_t tmp, active_cu_number;
-	struct amdgpu_cu_info cu_info;
+	u32 tmp;
 
-	gfx_v7_0_get_cu_info(adev, &cu_info);
-	tmp = cu_info.ao_cu_mask;
-	active_cu_number = cu_info.number;
-
-	WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, tmp);
+	WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
 
 	tmp = RREG32(mmRLC_MAX_PG_CU);
 	tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
-	tmp |= (active_cu_number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
+	tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
 	WREG32(mmRLC_MAX_PG_CU, tmp);
 }
 
@@ -4414,7 +4424,7 @@
 		ring = &adev->gfx.gfx_ring[i];
 		ring->ring_obj = NULL;
 		sprintf(ring->name, "gfx");
-		r = amdgpu_ring_init(adev, ring, 1024 * 1024,
+		r = amdgpu_ring_init(adev, ring, 1024,
 				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
 				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
 				     AMDGPU_RING_TYPE_GFX);
@@ -4438,10 +4448,10 @@
 		ring->me = 1; /* first MEC */
 		ring->pipe = i / 8;
 		ring->queue = i % 8;
-		sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
+		sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
 		irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
 		/* type-2 packets are deprecated on MEC, use type-3 instead */
-		r = amdgpu_ring_init(adev, ring, 1024 * 1024,
+		r = amdgpu_ring_init(adev, ring, 1024,
 				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
 				     &adev->gfx.eop_irq, irq_type,
 				     AMDGPU_RING_TYPE_COMPUTE);
@@ -4495,6 +4505,7 @@
 	gfx_v7_0_cp_compute_fini(adev);
 	gfx_v7_0_rlc_fini(adev);
 	gfx_v7_0_mec_fini(adev);
+	gfx_v7_0_free_microcode(adev);
 
 	return 0;
 }
@@ -4572,256 +4583,6 @@
 	return -ETIMEDOUT;
 }
 
-static void gfx_v7_0_print_status(void *handle)
-{
-	int i;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "GFX 7.x registers\n");
-	dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
-		RREG32(mmGRBM_STATUS));
-	dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
-		RREG32(mmGRBM_STATUS2));
-	dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
-		RREG32(mmGRBM_STATUS_SE0));
-	dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
-		RREG32(mmGRBM_STATUS_SE1));
-	dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
-		RREG32(mmGRBM_STATUS_SE2));
-	dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
-		RREG32(mmGRBM_STATUS_SE3));
-	dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
-	dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
-		 RREG32(mmCP_STALLED_STAT1));
-	dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
-		 RREG32(mmCP_STALLED_STAT2));
-	dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
-		 RREG32(mmCP_STALLED_STAT3));
-	dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
-		 RREG32(mmCP_CPF_BUSY_STAT));
-	dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
-		 RREG32(mmCP_CPF_STALLED_STAT1));
-	dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
-	dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
-	dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
-		 RREG32(mmCP_CPC_STALLED_STAT1));
-	dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
-
-	for (i = 0; i < 32; i++) {
-		dev_info(adev->dev, "  GB_TILE_MODE%d=0x%08X\n",
-			 i, RREG32(mmGB_TILE_MODE0 + (i * 4)));
-	}
-	for (i = 0; i < 16; i++) {
-		dev_info(adev->dev, "  GB_MACROTILE_MODE%d=0x%08X\n",
-			 i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4)));
-	}
-	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
-		dev_info(adev->dev, "  se: %d\n", i);
-		gfx_v7_0_select_se_sh(adev, i, 0xffffffff);
-		dev_info(adev->dev, "  PA_SC_RASTER_CONFIG=0x%08X\n",
-			 RREG32(mmPA_SC_RASTER_CONFIG));
-		dev_info(adev->dev, "  PA_SC_RASTER_CONFIG_1=0x%08X\n",
-			 RREG32(mmPA_SC_RASTER_CONFIG_1));
-	}
-	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
-
-	dev_info(adev->dev, "  GB_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmGB_ADDR_CONFIG));
-	dev_info(adev->dev, "  HDP_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmHDP_ADDR_CONFIG));
-	dev_info(adev->dev, "  DMIF_ADDR_CALC=0x%08X\n",
-		 RREG32(mmDMIF_ADDR_CALC));
-
-	dev_info(adev->dev, "  CP_MEQ_THRESHOLDS=0x%08X\n",
-		 RREG32(mmCP_MEQ_THRESHOLDS));
-	dev_info(adev->dev, "  SX_DEBUG_1=0x%08X\n",
-		 RREG32(mmSX_DEBUG_1));
-	dev_info(adev->dev, "  TA_CNTL_AUX=0x%08X\n",
-		 RREG32(mmTA_CNTL_AUX));
-	dev_info(adev->dev, "  SPI_CONFIG_CNTL=0x%08X\n",
-		 RREG32(mmSPI_CONFIG_CNTL));
-	dev_info(adev->dev, "  SQ_CONFIG=0x%08X\n",
-		 RREG32(mmSQ_CONFIG));
-	dev_info(adev->dev, "  DB_DEBUG=0x%08X\n",
-		 RREG32(mmDB_DEBUG));
-	dev_info(adev->dev, "  DB_DEBUG2=0x%08X\n",
-		 RREG32(mmDB_DEBUG2));
-	dev_info(adev->dev, "  DB_DEBUG3=0x%08X\n",
-		 RREG32(mmDB_DEBUG3));
-	dev_info(adev->dev, "  CB_HW_CONTROL=0x%08X\n",
-		 RREG32(mmCB_HW_CONTROL));
-	dev_info(adev->dev, "  SPI_CONFIG_CNTL_1=0x%08X\n",
-		 RREG32(mmSPI_CONFIG_CNTL_1));
-	dev_info(adev->dev, "  PA_SC_FIFO_SIZE=0x%08X\n",
-		 RREG32(mmPA_SC_FIFO_SIZE));
-	dev_info(adev->dev, "  VGT_NUM_INSTANCES=0x%08X\n",
-		 RREG32(mmVGT_NUM_INSTANCES));
-	dev_info(adev->dev, "  CP_PERFMON_CNTL=0x%08X\n",
-		 RREG32(mmCP_PERFMON_CNTL));
-	dev_info(adev->dev, "  PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
-		 RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS));
-	dev_info(adev->dev, "  VGT_CACHE_INVALIDATION=0x%08X\n",
-		 RREG32(mmVGT_CACHE_INVALIDATION));
-	dev_info(adev->dev, "  VGT_GS_VERTEX_REUSE=0x%08X\n",
-		 RREG32(mmVGT_GS_VERTEX_REUSE));
-	dev_info(adev->dev, "  PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
-		 RREG32(mmPA_SC_LINE_STIPPLE_STATE));
-	dev_info(adev->dev, "  PA_CL_ENHANCE=0x%08X\n",
-		 RREG32(mmPA_CL_ENHANCE));
-	dev_info(adev->dev, "  PA_SC_ENHANCE=0x%08X\n",
-		 RREG32(mmPA_SC_ENHANCE));
-
-	dev_info(adev->dev, "  CP_ME_CNTL=0x%08X\n",
-		 RREG32(mmCP_ME_CNTL));
-	dev_info(adev->dev, "  CP_MAX_CONTEXT=0x%08X\n",
-		 RREG32(mmCP_MAX_CONTEXT));
-	dev_info(adev->dev, "  CP_ENDIAN_SWAP=0x%08X\n",
-		 RREG32(mmCP_ENDIAN_SWAP));
-	dev_info(adev->dev, "  CP_DEVICE_ID=0x%08X\n",
-		 RREG32(mmCP_DEVICE_ID));
-
-	dev_info(adev->dev, "  CP_SEM_WAIT_TIMER=0x%08X\n",
-		 RREG32(mmCP_SEM_WAIT_TIMER));
-	if (adev->asic_type != CHIP_HAWAII)
-		dev_info(adev->dev, "  CP_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
-			 RREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL));
-
-	dev_info(adev->dev, "  CP_RB_WPTR_DELAY=0x%08X\n",
-		 RREG32(mmCP_RB_WPTR_DELAY));
-	dev_info(adev->dev, "  CP_RB_VMID=0x%08X\n",
-		 RREG32(mmCP_RB_VMID));
-	dev_info(adev->dev, "  CP_RB0_CNTL=0x%08X\n",
-		 RREG32(mmCP_RB0_CNTL));
-	dev_info(adev->dev, "  CP_RB0_WPTR=0x%08X\n",
-		 RREG32(mmCP_RB0_WPTR));
-	dev_info(adev->dev, "  CP_RB0_RPTR_ADDR=0x%08X\n",
-		 RREG32(mmCP_RB0_RPTR_ADDR));
-	dev_info(adev->dev, "  CP_RB0_RPTR_ADDR_HI=0x%08X\n",
-		 RREG32(mmCP_RB0_RPTR_ADDR_HI));
-	dev_info(adev->dev, "  CP_RB0_CNTL=0x%08X\n",
-		 RREG32(mmCP_RB0_CNTL));
-	dev_info(adev->dev, "  CP_RB0_BASE=0x%08X\n",
-		 RREG32(mmCP_RB0_BASE));
-	dev_info(adev->dev, "  CP_RB0_BASE_HI=0x%08X\n",
-		 RREG32(mmCP_RB0_BASE_HI));
-	dev_info(adev->dev, "  CP_MEC_CNTL=0x%08X\n",
-		 RREG32(mmCP_MEC_CNTL));
-	dev_info(adev->dev, "  CP_CPF_DEBUG=0x%08X\n",
-		 RREG32(mmCP_CPF_DEBUG));
-
-	dev_info(adev->dev, "  SCRATCH_ADDR=0x%08X\n",
-		 RREG32(mmSCRATCH_ADDR));
-	dev_info(adev->dev, "  SCRATCH_UMSK=0x%08X\n",
-		 RREG32(mmSCRATCH_UMSK));
-
-	/* init the pipes */
-	mutex_lock(&adev->srbm_mutex);
-	for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
-		int me = (i < 4) ? 1 : 2;
-		int pipe = (i < 4) ? i : (i - 4);
-		int queue;
-
-		dev_info(adev->dev, "  me: %d, pipe: %d\n", me, pipe);
-		cik_srbm_select(adev, me, pipe, 0, 0);
-		dev_info(adev->dev, "  CP_HPD_EOP_BASE_ADDR=0x%08X\n",
-			 RREG32(mmCP_HPD_EOP_BASE_ADDR));
-		dev_info(adev->dev, "  CP_HPD_EOP_BASE_ADDR_HI=0x%08X\n",
-			 RREG32(mmCP_HPD_EOP_BASE_ADDR_HI));
-		dev_info(adev->dev, "  CP_HPD_EOP_VMID=0x%08X\n",
-			 RREG32(mmCP_HPD_EOP_VMID));
-		dev_info(adev->dev, "  CP_HPD_EOP_CONTROL=0x%08X\n",
-			 RREG32(mmCP_HPD_EOP_CONTROL));
-
-		for (queue = 0; queue < 8; queue++) {
-			cik_srbm_select(adev, me, pipe, queue, 0);
-			dev_info(adev->dev, "  queue: %d\n", queue);
-			dev_info(adev->dev, "  CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
-				 RREG32(mmCP_PQ_WPTR_POLL_CNTL));
-			dev_info(adev->dev, "  CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
-				 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL));
-			dev_info(adev->dev, "  CP_HQD_ACTIVE=0x%08X\n",
-				 RREG32(mmCP_HQD_ACTIVE));
-			dev_info(adev->dev, "  CP_HQD_DEQUEUE_REQUEST=0x%08X\n",
-				 RREG32(mmCP_HQD_DEQUEUE_REQUEST));
-			dev_info(adev->dev, "  CP_HQD_PQ_RPTR=0x%08X\n",
-				 RREG32(mmCP_HQD_PQ_RPTR));
-			dev_info(adev->dev, "  CP_HQD_PQ_WPTR=0x%08X\n",
-				 RREG32(mmCP_HQD_PQ_WPTR));
-			dev_info(adev->dev, "  CP_HQD_PQ_BASE=0x%08X\n",
-				 RREG32(mmCP_HQD_PQ_BASE));
-			dev_info(adev->dev, "  CP_HQD_PQ_BASE_HI=0x%08X\n",
-				 RREG32(mmCP_HQD_PQ_BASE_HI));
-			dev_info(adev->dev, "  CP_HQD_PQ_CONTROL=0x%08X\n",
-				 RREG32(mmCP_HQD_PQ_CONTROL));
-			dev_info(adev->dev, "  CP_HQD_PQ_WPTR_POLL_ADDR=0x%08X\n",
-				 RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR));
-			dev_info(adev->dev, "  CP_HQD_PQ_WPTR_POLL_ADDR_HI=0x%08X\n",
-				 RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI));
-			dev_info(adev->dev, "  CP_HQD_PQ_RPTR_REPORT_ADDR=0x%08X\n",
-				 RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR));
-			dev_info(adev->dev, "  CP_HQD_PQ_RPTR_REPORT_ADDR_HI=0x%08X\n",
-				 RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI));
-			dev_info(adev->dev, "  CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
-				 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL));
-			dev_info(adev->dev, "  CP_HQD_PQ_WPTR=0x%08X\n",
-				 RREG32(mmCP_HQD_PQ_WPTR));
-			dev_info(adev->dev, "  CP_HQD_VMID=0x%08X\n",
-				 RREG32(mmCP_HQD_VMID));
-			dev_info(adev->dev, "  CP_MQD_BASE_ADDR=0x%08X\n",
-				 RREG32(mmCP_MQD_BASE_ADDR));
-			dev_info(adev->dev, "  CP_MQD_BASE_ADDR_HI=0x%08X\n",
-				 RREG32(mmCP_MQD_BASE_ADDR_HI));
-			dev_info(adev->dev, "  CP_MQD_CONTROL=0x%08X\n",
-				 RREG32(mmCP_MQD_CONTROL));
-		}
-	}
-	cik_srbm_select(adev, 0, 0, 0, 0);
-	mutex_unlock(&adev->srbm_mutex);
-
-	dev_info(adev->dev, "  CP_INT_CNTL_RING0=0x%08X\n",
-		 RREG32(mmCP_INT_CNTL_RING0));
-	dev_info(adev->dev, "  RLC_LB_CNTL=0x%08X\n",
-		 RREG32(mmRLC_LB_CNTL));
-	dev_info(adev->dev, "  RLC_CNTL=0x%08X\n",
-		 RREG32(mmRLC_CNTL));
-	dev_info(adev->dev, "  RLC_CGCG_CGLS_CTRL=0x%08X\n",
-		 RREG32(mmRLC_CGCG_CGLS_CTRL));
-	dev_info(adev->dev, "  RLC_LB_CNTR_INIT=0x%08X\n",
-		 RREG32(mmRLC_LB_CNTR_INIT));
-	dev_info(adev->dev, "  RLC_LB_CNTR_MAX=0x%08X\n",
-		 RREG32(mmRLC_LB_CNTR_MAX));
-	dev_info(adev->dev, "  RLC_LB_INIT_CU_MASK=0x%08X\n",
-		 RREG32(mmRLC_LB_INIT_CU_MASK));
-	dev_info(adev->dev, "  RLC_LB_PARAMS=0x%08X\n",
-		 RREG32(mmRLC_LB_PARAMS));
-	dev_info(adev->dev, "  RLC_LB_CNTL=0x%08X\n",
-		 RREG32(mmRLC_LB_CNTL));
-	dev_info(adev->dev, "  RLC_MC_CNTL=0x%08X\n",
-		 RREG32(mmRLC_MC_CNTL));
-	dev_info(adev->dev, "  RLC_UCODE_CNTL=0x%08X\n",
-		 RREG32(mmRLC_UCODE_CNTL));
-
-	if (adev->asic_type == CHIP_BONAIRE)
-		dev_info(adev->dev, "  RLC_DRIVER_CPDMA_STATUS=0x%08X\n",
-			 RREG32(mmRLC_DRIVER_CPDMA_STATUS));
-
-	mutex_lock(&adev->srbm_mutex);
-	for (i = 0; i < 16; i++) {
-		cik_srbm_select(adev, 0, 0, 0, i);
-		dev_info(adev->dev, "  VM %d:\n", i);
-		dev_info(adev->dev, "  SH_MEM_CONFIG=0x%08X\n",
-			 RREG32(mmSH_MEM_CONFIG));
-		dev_info(adev->dev, "  SH_MEM_APE1_BASE=0x%08X\n",
-			 RREG32(mmSH_MEM_APE1_BASE));
-		dev_info(adev->dev, "  SH_MEM_APE1_LIMIT=0x%08X\n",
-			 RREG32(mmSH_MEM_APE1_LIMIT));
-		dev_info(adev->dev, "  SH_MEM_BASES=0x%08X\n",
-			 RREG32(mmSH_MEM_BASES));
-	}
-	cik_srbm_select(adev, 0, 0, 0, 0);
-	mutex_unlock(&adev->srbm_mutex);
-}
-
 static int gfx_v7_0_soft_reset(void *handle)
 {
 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
@@ -4855,7 +4616,6 @@
 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
 
 	if (grbm_soft_reset || srbm_soft_reset) {
-		gfx_v7_0_print_status((void *)adev);
 		/* disable CG/PG */
 		gfx_v7_0_fini_pg(adev);
 		gfx_v7_0_update_cg(adev, false);
@@ -4898,7 +4658,6 @@
 		}
 		/* Wait a little for things to settle down */
 		udelay(50);
-		gfx_v7_0_print_status((void *)adev);
 	}
 	return 0;
 }
@@ -5150,6 +4909,7 @@
 }
 
 const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
+	.name = "gfx_v7_0",
 	.early_init = gfx_v7_0_early_init,
 	.late_init = gfx_v7_0_late_init,
 	.sw_init = gfx_v7_0_sw_init,
@@ -5161,7 +4921,6 @@
 	.is_idle = gfx_v7_0_is_idle,
 	.wait_for_idle = gfx_v7_0_wait_for_idle,
 	.soft_reset = gfx_v7_0_soft_reset,
-	.print_status = gfx_v7_0_print_status,
 	.set_clockgating_state = gfx_v7_0_set_clockgating_state,
 	.set_powergating_state = gfx_v7_0_set_powergating_state,
 };
@@ -5268,14 +5027,11 @@
 }
 
 
-int gfx_v7_0_get_cu_info(struct amdgpu_device *adev,
-			 struct amdgpu_cu_info *cu_info)
+static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
 {
 	int i, j, k, counter, active_cu_number = 0;
 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
-
-	if (!adev || !cu_info)
-		return -EINVAL;
+	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
 
 	memset(cu_info, 0, sizeof(*cu_info));
 
@@ -5306,6 +5062,4 @@
 
 	cu_info->number = active_cu_number;
 	cu_info->ao_cu_mask = ao_cu_mask;
-
-	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
index c04bfba..e747aa9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
@@ -32,6 +32,5 @@
 void gfx_v7_0_rlc_stop(struct amdgpu_device *adev);
 uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev);
 void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index f0c7b35..9f6f866 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -27,6 +27,7 @@
 #include "vi.h"
 #include "vid.h"
 #include "amdgpu_ucode.h"
+#include "amdgpu_atombios.h"
 #include "clearstate_vi.h"
 
 #include "gmc/gmc_8_2_d.h"
@@ -51,6 +52,7 @@
 
 #define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
 #define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
+#define POLARIS11_GB_ADDR_CONFIG_GOLDEN 0x22011002
 #define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
 
 #define ARRAY_MODE(x)					((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
@@ -84,6 +86,8 @@
 	BPM_REG_FGCG_MAX
 };
 
+#define RLC_FormatDirectRegListLength        14
+
 MODULE_FIRMWARE("amdgpu/carrizo_ce.bin");
 MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin");
 MODULE_FIRMWARE("amdgpu/carrizo_me.bin");
@@ -117,6 +121,20 @@
 MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
 MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
 
+MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
+
 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
 {
 	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
@@ -247,6 +265,66 @@
 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 };
 
+static const u32 golden_settings_polaris11_a11[] =
+{
+	mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
+	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
+	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
+	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
+	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
+	mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
+	mmSQ_CONFIG, 0x07f80000, 0x07180000,
+	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
+	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
+	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
+	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
+};
+
+static const u32 polaris11_golden_common_all[] =
+{
+	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
+	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
+	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
+	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+};
+
+static const u32 golden_settings_polaris10_a11[] =
+{
+	mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
+	mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
+	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
+	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
+	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002a,
+	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
+	mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
+	mmSQ_CONFIG, 0x07f80000, 0x07180000,
+	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
+	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
+	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
+	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+};
+
+static const u32 polaris10_golden_common_all[] =
+{
+	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
+	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
+	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
+	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
+	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
+	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+};
+
 static const u32 fiji_golden_common_all[] =
 {
 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
@@ -527,7 +605,7 @@
 	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
 	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
 	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
-  	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
+	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
 	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
 	mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
 	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
@@ -558,6 +636,9 @@
 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
 static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
+static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev);
+static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
+static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
 
 static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
 {
@@ -596,6 +677,22 @@
 						 tonga_golden_common_all,
 						 (const u32)ARRAY_SIZE(tonga_golden_common_all));
 		break;
+	case CHIP_POLARIS11:
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_polaris11_a11,
+						 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
+		amdgpu_program_register_sequence(adev,
+						 polaris11_golden_common_all,
+						 (const u32)ARRAY_SIZE(polaris11_golden_common_all));
+		break;
+	case CHIP_POLARIS10:
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_polaris10_a11,
+						 (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
+		amdgpu_program_register_sequence(adev,
+						 polaris10_golden_common_all,
+						 (const u32)ARRAY_SIZE(polaris10_golden_common_all));
+		break;
 	case CHIP_CARRIZO:
 		amdgpu_program_register_sequence(adev,
 						 cz_mgcg_cgcg_init,
@@ -706,7 +803,7 @@
 	ib.ptr[2] = 0xDEADBEEF;
 	ib.length_dw = 3;
 
-	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
 	if (r)
 		goto err2;
 
@@ -739,6 +836,26 @@
 	return r;
 }
 
+
+static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {
+	release_firmware(adev->gfx.pfp_fw);
+	adev->gfx.pfp_fw = NULL;
+	release_firmware(adev->gfx.me_fw);
+	adev->gfx.me_fw = NULL;
+	release_firmware(adev->gfx.ce_fw);
+	adev->gfx.ce_fw = NULL;
+	release_firmware(adev->gfx.rlc_fw);
+	adev->gfx.rlc_fw = NULL;
+	release_firmware(adev->gfx.mec_fw);
+	adev->gfx.mec_fw = NULL;
+	if ((adev->asic_type != CHIP_STONEY) &&
+	    (adev->asic_type != CHIP_TOPAZ))
+		release_firmware(adev->gfx.mec2_fw);
+	adev->gfx.mec2_fw = NULL;
+
+	kfree(adev->gfx.rlc.register_list_format);
+}
+
 static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
 {
 	const char *chip_name;
@@ -747,6 +864,8 @@
 	struct amdgpu_firmware_info *info = NULL;
 	const struct common_firmware_header *header = NULL;
 	const struct gfx_firmware_header_v1_0 *cp_hdr;
+	const struct rlc_firmware_header_v2_0 *rlc_hdr;
+	unsigned int *tmp = NULL, i;
 
 	DRM_DEBUG("\n");
 
@@ -763,6 +882,12 @@
 	case CHIP_FIJI:
 		chip_name = "fiji";
 		break;
+	case CHIP_POLARIS11:
+		chip_name = "polaris11";
+		break;
+	case CHIP_POLARIS10:
+		chip_name = "polaris10";
+		break;
 	case CHIP_STONEY:
 		chip_name = "stoney";
 		break;
@@ -808,9 +933,49 @@
 	if (err)
 		goto out;
 	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
-	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
-	adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
-	adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+	adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+	adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+
+	adev->gfx.rlc.save_and_restore_offset =
+			le32_to_cpu(rlc_hdr->save_and_restore_offset);
+	adev->gfx.rlc.clear_state_descriptor_offset =
+			le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
+	adev->gfx.rlc.avail_scratch_ram_locations =
+			le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
+	adev->gfx.rlc.reg_restore_list_size =
+			le32_to_cpu(rlc_hdr->reg_restore_list_size);
+	adev->gfx.rlc.reg_list_format_start =
+			le32_to_cpu(rlc_hdr->reg_list_format_start);
+	adev->gfx.rlc.reg_list_format_separate_start =
+			le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
+	adev->gfx.rlc.starting_offsets_start =
+			le32_to_cpu(rlc_hdr->starting_offsets_start);
+	adev->gfx.rlc.reg_list_format_size_bytes =
+			le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
+	adev->gfx.rlc.reg_list_size_bytes =
+			le32_to_cpu(rlc_hdr->reg_list_size_bytes);
+
+	adev->gfx.rlc.register_list_format =
+			kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
+					adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
+
+	if (!adev->gfx.rlc.register_list_format) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+			le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+	for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
+		adev->gfx.rlc.register_list_format[i] =	le32_to_cpu(tmp[i]);
+
+	adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
+
+	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+			le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+	for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
+		adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
 
 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
 	err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
@@ -911,6 +1076,153 @@
 	return err;
 }
 
+static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
+				    volatile u32 *buffer)
+{
+	u32 count = 0, i;
+	const struct cs_section_def *sect = NULL;
+	const struct cs_extent_def *ext = NULL;
+
+	if (adev->gfx.rlc.cs_data == NULL)
+		return;
+	if (buffer == NULL)
+		return;
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	buffer[count++] = cpu_to_le32(0x80000000);
+	buffer[count++] = cpu_to_le32(0x80000000);
+
+	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+		for (ext = sect->section; ext->extent != NULL; ++ext) {
+			if (sect->id == SECT_CONTEXT) {
+				buffer[count++] =
+					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+				buffer[count++] = cpu_to_le32(ext->reg_index -
+						PACKET3_SET_CONTEXT_REG_START);
+				for (i = 0; i < ext->reg_count; i++)
+					buffer[count++] = cpu_to_le32(ext->extent[i]);
+			} else {
+				return;
+			}
+		}
+	}
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
+			PACKET3_SET_CONTEXT_REG_START);
+	switch (adev->asic_type) {
+	case CHIP_TONGA:
+	case CHIP_POLARIS10:
+		buffer[count++] = cpu_to_le32(0x16000012);
+		buffer[count++] = cpu_to_le32(0x0000002A);
+		break;
+	case CHIP_POLARIS11:
+		buffer[count++] = cpu_to_le32(0x16000012);
+		buffer[count++] = cpu_to_le32(0x00000000);
+		break;
+	case CHIP_FIJI:
+		buffer[count++] = cpu_to_le32(0x3a00161a);
+		buffer[count++] = cpu_to_le32(0x0000002e);
+		break;
+	case CHIP_TOPAZ:
+	case CHIP_CARRIZO:
+		buffer[count++] = cpu_to_le32(0x00000002);
+		buffer[count++] = cpu_to_le32(0x00000000);
+		break;
+	case CHIP_STONEY:
+		buffer[count++] = cpu_to_le32(0x00000000);
+		buffer[count++] = cpu_to_le32(0x00000000);
+		break;
+	default:
+		buffer[count++] = cpu_to_le32(0x00000000);
+		buffer[count++] = cpu_to_le32(0x00000000);
+		break;
+	}
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+	buffer[count++] = cpu_to_le32(0);
+}
+
+static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
+{
+	int r;
+
+	/* clear state block */
+	if (adev->gfx.rlc.clear_state_obj) {
+		r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+		if (unlikely(r != 0))
+			dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
+		amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+		amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
+		adev->gfx.rlc.clear_state_obj = NULL;
+	}
+}
+
+static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
+{
+	volatile u32 *dst_ptr;
+	u32 dws;
+	const struct cs_section_def *cs_data;
+	int r;
+
+	adev->gfx.rlc.cs_data = vi_cs_data;
+
+	cs_data = adev->gfx.rlc.cs_data;
+
+	if (cs_data) {
+		/* clear state block */
+		adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
+
+		if (adev->gfx.rlc.clear_state_obj == NULL) {
+			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+					     AMDGPU_GEM_DOMAIN_VRAM,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     NULL, NULL,
+					     &adev->gfx.rlc.clear_state_obj);
+			if (r) {
+				dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+				gfx_v8_0_rlc_fini(adev);
+				return r;
+			}
+		}
+		r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+		if (unlikely(r != 0)) {
+			gfx_v8_0_rlc_fini(adev);
+			return r;
+		}
+		r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
+				  &adev->gfx.rlc.clear_state_gpu_addr);
+		if (r) {
+			amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+			dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+			gfx_v8_0_rlc_fini(adev);
+			return r;
+		}
+
+		r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
+		if (r) {
+			dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
+			gfx_v8_0_rlc_fini(adev);
+			return r;
+		}
+		/* set up the cs buffer */
+		dst_ptr = adev->gfx.rlc.cs_ptr;
+		gfx_v8_0_get_csb_buffer(adev, dst_ptr);
+		amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+	}
+
+	return 0;
+}
+
 static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
 {
 	int r;
@@ -1262,7 +1574,7 @@
 	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
 
 	/* shedule the ib on the ring */
-	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
 	if (r) {
 		DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
 		goto fail;
@@ -1296,12 +1608,13 @@
 	return r;
 }
 
-static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
+static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
 {
 	u32 gb_addr_config;
 	u32 mc_shared_chmap, mc_arb_ramcfg;
 	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
 	u32 tmp;
+	int ret;
 
 	switch (adev->asic_type) {
 	case CHIP_TOPAZ:
@@ -1338,6 +1651,34 @@
 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
 		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
 		break;
+	case CHIP_POLARIS11:
+		ret = amdgpu_atombios_get_gfx_info(adev);
+		if (ret)
+			return ret;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 32;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_POLARIS10:
+		ret = amdgpu_atombios_get_gfx_info(adev);
+		if (ret)
+			return ret;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 32;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
+		break;
 	case CHIP_TONGA:
 		adev->gfx.config.max_shader_engines = 4;
 		adev->gfx.config.max_tile_pipes = 8;
@@ -1520,6 +1861,8 @@
 		break;
 	}
 	adev->gfx.config.gb_addr_config = gb_addr_config;
+
+	return 0;
 }
 
 static int gfx_v8_0_sw_init(void *handle)
@@ -1553,6 +1896,12 @@
 		return r;
 	}
 
+	r = gfx_v8_0_rlc_init(adev);
+	if (r) {
+		DRM_ERROR("Failed to init rlc BOs!\n");
+		return r;
+	}
+
 	r = gfx_v8_0_mec_init(adev);
 	if (r) {
 		DRM_ERROR("Failed to init MEC BOs!\n");
@@ -1570,7 +1919,7 @@
 			ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
 		}
 
-		r = amdgpu_ring_init(adev, ring, 1024 * 1024,
+		r = amdgpu_ring_init(adev, ring, 1024,
 				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
 				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
 				     AMDGPU_RING_TYPE_GFX);
@@ -1594,10 +1943,10 @@
 		ring->me = 1; /* first MEC */
 		ring->pipe = i / 8;
 		ring->queue = i % 8;
-		sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
+		sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
 		irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
 		/* type-2 packets are deprecated on MEC, use type-3 instead */
-		r = amdgpu_ring_init(adev, ring, 1024 * 1024,
+		r = amdgpu_ring_init(adev, ring, 1024,
 				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
 				     &adev->gfx.eop_irq, irq_type,
 				     AMDGPU_RING_TYPE_COMPUTE);
@@ -1629,7 +1978,9 @@
 
 	adev->gfx.ce_ram_size = 0x8000;
 
-	gfx_v8_0_gpu_early_init(adev);
+	r = gfx_v8_0_gpu_early_init(adev);
+	if (r)
+		return r;
 
 	return 0;
 }
@@ -1650,6 +2001,10 @@
 
 	gfx_v8_0_mec_fini(adev);
 
+	gfx_v8_0_rlc_fini(adev);
+
+	gfx_v8_0_free_microcode(adev);
+
 	return 0;
 }
 
@@ -2219,6 +2574,410 @@
 				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
 
 		break;
+	case CHIP_POLARIS11:
+		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16));
+		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+
+		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+
+		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
+
+		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+			if (reg_offset != 7)
+				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
+
+		break;
+	case CHIP_POLARIS10:
+		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
+		modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+				PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+				MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+				SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+
+		mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+
+		mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+
+		mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+
+		mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
+
+		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+			if (reg_offset != 7)
+				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
+
+		break;
 	case CHIP_STONEY:
 		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
 				PIPE_CONFIG(ADDR_SURF_P2) |
@@ -2695,6 +3454,7 @@
 	gfx_v8_0_tiling_mode_table_init(adev);
 
 	gfx_v8_0_setup_rb(adev);
+	gfx_v8_0_get_cu_info(adev);
 
 	/* XXX SH_MEM regs */
 	/* where to put LDS, scratch, GPUVM in FSA64 space */
@@ -2788,6 +3548,188 @@
 	WREG32(mmCP_INT_CNTL_RING0, tmp);
 }
 
+static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
+{
+	/* csib */
+	WREG32(mmRLC_CSIB_ADDR_HI,
+			adev->gfx.rlc.clear_state_gpu_addr >> 32);
+	WREG32(mmRLC_CSIB_ADDR_LO,
+			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
+	WREG32(mmRLC_CSIB_LENGTH,
+			adev->gfx.rlc.clear_state_size);
+}
+
+static void gfx_v8_0_parse_ind_reg_list(int *register_list_format,
+				int ind_offset,
+				int list_size,
+				int *unique_indices,
+				int *indices_count,
+				int max_indices,
+				int *ind_start_offsets,
+				int *offset_count,
+				int max_offset)
+{
+	int indices;
+	bool new_entry = true;
+
+	for (; ind_offset < list_size; ind_offset++) {
+
+		if (new_entry) {
+			new_entry = false;
+			ind_start_offsets[*offset_count] = ind_offset;
+			*offset_count = *offset_count + 1;
+			BUG_ON(*offset_count >= max_offset);
+		}
+
+		if (register_list_format[ind_offset] == 0xFFFFFFFF) {
+			new_entry = true;
+			continue;
+		}
+
+		ind_offset += 2;
+
+		/* look for the matching indice */
+		for (indices = 0;
+			indices < *indices_count;
+			indices++) {
+			if (unique_indices[indices] ==
+				register_list_format[ind_offset])
+				break;
+		}
+
+		if (indices >= *indices_count) {
+			unique_indices[*indices_count] =
+				register_list_format[ind_offset];
+			indices = *indices_count;
+			*indices_count = *indices_count + 1;
+			BUG_ON(*indices_count >= max_indices);
+		}
+
+		register_list_format[ind_offset] = indices;
+	}
+}
+
+static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
+{
+	int i, temp, data;
+	int unique_indices[] = {0, 0, 0, 0, 0, 0, 0, 0};
+	int indices_count = 0;
+	int indirect_start_offsets[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	int offset_count = 0;
+
+	int list_size;
+	unsigned int *register_list_format =
+		kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
+	if (register_list_format == NULL)
+		return -ENOMEM;
+	memcpy(register_list_format, adev->gfx.rlc.register_list_format,
+			adev->gfx.rlc.reg_list_format_size_bytes);
+
+	gfx_v8_0_parse_ind_reg_list(register_list_format,
+				RLC_FormatDirectRegListLength,
+				adev->gfx.rlc.reg_list_format_size_bytes >> 2,
+				unique_indices,
+				&indices_count,
+				sizeof(unique_indices) / sizeof(int),
+				indirect_start_offsets,
+				&offset_count,
+				sizeof(indirect_start_offsets)/sizeof(int));
+
+	/* save and restore list */
+	temp = RREG32(mmRLC_SRM_CNTL);
+	temp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
+	WREG32(mmRLC_SRM_CNTL, temp);
+
+	WREG32(mmRLC_SRM_ARAM_ADDR, 0);
+	for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
+		WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]);
+
+	/* indirect list */
+	WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start);
+	for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
+		WREG32(mmRLC_GPM_SCRATCH_DATA, register_list_format[i]);
+
+	list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
+	list_size = list_size >> 1;
+	WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size);
+	WREG32(mmRLC_GPM_SCRATCH_DATA, list_size);
+
+	/* starting offsets starts */
+	WREG32(mmRLC_GPM_SCRATCH_ADDR,
+		adev->gfx.rlc.starting_offsets_start);
+	for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
+		WREG32(mmRLC_GPM_SCRATCH_DATA,
+				indirect_start_offsets[i]);
+
+	/* unique indices */
+	temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
+	data = mmRLC_SRM_INDEX_CNTL_DATA_0;
+	for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) {
+		amdgpu_mm_wreg(adev, temp + i, unique_indices[i] & 0x3FFFF, false);
+		amdgpu_mm_wreg(adev, data + i, unique_indices[i] >> 20, false);
+	}
+	kfree(register_list_format);
+
+	return 0;
+}
+
+static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
+{
+	uint32_t data;
+
+	data = RREG32(mmRLC_SRM_CNTL);
+	data |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
+	WREG32(mmRLC_SRM_CNTL, data);
+}
+
+static void polaris11_init_power_gating(struct amdgpu_device *adev)
+{
+	uint32_t data;
+
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+			AMD_PG_SUPPORT_GFX_SMG |
+			AMD_PG_SUPPORT_GFX_DMG)) {
+		data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
+		data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
+		data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
+		WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
+
+		data = 0;
+		data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
+		data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
+		data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
+		data |= (0x10 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
+		WREG32(mmRLC_PG_DELAY, data);
+
+		data = RREG32(mmRLC_PG_DELAY_2);
+		data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
+		data |= (0x3 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
+		WREG32(mmRLC_PG_DELAY_2, data);
+
+		data = RREG32(mmRLC_AUTO_PG_CTRL);
+		data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
+		data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
+		WREG32(mmRLC_AUTO_PG_CTRL, data);
+	}
+}
+
+static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
+{
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+			      AMD_PG_SUPPORT_GFX_SMG |
+			      AMD_PG_SUPPORT_GFX_DMG |
+			      AMD_PG_SUPPORT_CP |
+			      AMD_PG_SUPPORT_GDS |
+			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
+		gfx_v8_0_init_csb(adev);
+		gfx_v8_0_init_save_restore_list(adev);
+		gfx_v8_0_enable_save_restore_machine(adev);
+
+		if (adev->asic_type == CHIP_POLARIS11)
+			polaris11_init_power_gating(adev);
+	}
+}
+
 void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
 {
 	u32 tmp = RREG32(mmRLC_CNTL);
@@ -2858,12 +3800,17 @@
 
 	/* disable CG */
 	WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
+	if (adev->asic_type == CHIP_POLARIS11 ||
+		adev->asic_type == CHIP_POLARIS10)
+		WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0);
 
 	/* disable PG */
 	WREG32(mmRLC_PG_CNTL, 0);
 
 	gfx_v8_0_rlc_reset(adev);
 
+	gfx_v8_0_init_pg(adev);
+
 	if (!adev->pp_enabled) {
 		if (!adev->firmware.smu_load) {
 			/* legacy rlc firmware loading */
@@ -3035,18 +3982,27 @@
 	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
 	switch (adev->asic_type) {
 	case CHIP_TONGA:
+	case CHIP_POLARIS10:
 		amdgpu_ring_write(ring, 0x16000012);
 		amdgpu_ring_write(ring, 0x0000002A);
 		break;
+	case CHIP_POLARIS11:
+		amdgpu_ring_write(ring, 0x16000012);
+		amdgpu_ring_write(ring, 0x00000000);
+		break;
 	case CHIP_FIJI:
 		amdgpu_ring_write(ring, 0x3a00161a);
 		amdgpu_ring_write(ring, 0x0000002e);
 		break;
-	case CHIP_TOPAZ:
 	case CHIP_CARRIZO:
 		amdgpu_ring_write(ring, 0x00000002);
 		amdgpu_ring_write(ring, 0x00000000);
 		break;
+	case CHIP_TOPAZ:
+		amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
+				0x00000000 : 0x00000002);
+		amdgpu_ring_write(ring, 0x00000000);
+		break;
 	case CHIP_STONEY:
 		amdgpu_ring_write(ring, 0x00000000);
 		amdgpu_ring_write(ring, 0x00000000);
@@ -3122,6 +4078,8 @@
 			tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
 					    DOORBELL_OFFSET, ring->doorbell_index);
 			tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
+					    DOORBELL_HIT, 0);
+			tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
 					    DOORBELL_EN, 1);
 		} else {
 			tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
@@ -3679,7 +4637,9 @@
 		if (use_doorbell) {
 			if ((adev->asic_type == CHIP_CARRIZO) ||
 			    (adev->asic_type == CHIP_FIJI) ||
-			    (adev->asic_type == CHIP_STONEY)) {
+			    (adev->asic_type == CHIP_STONEY) ||
+			    (adev->asic_type == CHIP_POLARIS11) ||
+			    (adev->asic_type == CHIP_POLARIS10)) {
 				WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
 				       AMDGPU_DOORBELL_KIQ << 2);
 				WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
@@ -3713,7 +4673,9 @@
 		tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
 		WREG32(mmCP_HQD_PERSISTENT_STATE, tmp);
 		mqd->cp_hqd_persistent_state = tmp;
-		if (adev->asic_type == CHIP_STONEY) {
+		if (adev->asic_type == CHIP_STONEY ||
+			adev->asic_type == CHIP_POLARIS11 ||
+			adev->asic_type == CHIP_POLARIS10) {
 			tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL);
 			tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1);
 			WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp);
@@ -3845,6 +4807,9 @@
 	gfx_v8_0_rlc_stop(adev);
 	gfx_v8_0_cp_compute_fini(adev);
 
+	amdgpu_set_powergating_state(adev,
+			AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE);
+
 	return 0;
 }
 
@@ -3889,185 +4854,6 @@
 	return -ETIMEDOUT;
 }
 
-static void gfx_v8_0_print_status(void *handle)
-{
-	int i;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "GFX 8.x registers\n");
-	dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
-		 RREG32(mmGRBM_STATUS));
-	dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
-		 RREG32(mmGRBM_STATUS2));
-	dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
-		 RREG32(mmGRBM_STATUS_SE0));
-	dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
-		 RREG32(mmGRBM_STATUS_SE1));
-	dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
-		 RREG32(mmGRBM_STATUS_SE2));
-	dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
-		 RREG32(mmGRBM_STATUS_SE3));
-	dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
-	dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
-		 RREG32(mmCP_STALLED_STAT1));
-	dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
-		 RREG32(mmCP_STALLED_STAT2));
-	dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
-		 RREG32(mmCP_STALLED_STAT3));
-	dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
-		 RREG32(mmCP_CPF_BUSY_STAT));
-	dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
-		 RREG32(mmCP_CPF_STALLED_STAT1));
-	dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
-	dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
-	dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
-		 RREG32(mmCP_CPC_STALLED_STAT1));
-	dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
-
-	for (i = 0; i < 32; i++) {
-		dev_info(adev->dev, "  GB_TILE_MODE%d=0x%08X\n",
-			 i, RREG32(mmGB_TILE_MODE0 + (i * 4)));
-	}
-	for (i = 0; i < 16; i++) {
-		dev_info(adev->dev, "  GB_MACROTILE_MODE%d=0x%08X\n",
-			 i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4)));
-	}
-	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
-		dev_info(adev->dev, "  se: %d\n", i);
-		gfx_v8_0_select_se_sh(adev, i, 0xffffffff);
-		dev_info(adev->dev, "  PA_SC_RASTER_CONFIG=0x%08X\n",
-			 RREG32(mmPA_SC_RASTER_CONFIG));
-		dev_info(adev->dev, "  PA_SC_RASTER_CONFIG_1=0x%08X\n",
-			 RREG32(mmPA_SC_RASTER_CONFIG_1));
-	}
-	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
-
-	dev_info(adev->dev, "  GB_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmGB_ADDR_CONFIG));
-	dev_info(adev->dev, "  HDP_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmHDP_ADDR_CONFIG));
-	dev_info(adev->dev, "  DMIF_ADDR_CALC=0x%08X\n",
-		 RREG32(mmDMIF_ADDR_CALC));
-
-	dev_info(adev->dev, "  CP_MEQ_THRESHOLDS=0x%08X\n",
-		 RREG32(mmCP_MEQ_THRESHOLDS));
-	dev_info(adev->dev, "  SX_DEBUG_1=0x%08X\n",
-		 RREG32(mmSX_DEBUG_1));
-	dev_info(adev->dev, "  TA_CNTL_AUX=0x%08X\n",
-		 RREG32(mmTA_CNTL_AUX));
-	dev_info(adev->dev, "  SPI_CONFIG_CNTL=0x%08X\n",
-		 RREG32(mmSPI_CONFIG_CNTL));
-	dev_info(adev->dev, "  SQ_CONFIG=0x%08X\n",
-		 RREG32(mmSQ_CONFIG));
-	dev_info(adev->dev, "  DB_DEBUG=0x%08X\n",
-		 RREG32(mmDB_DEBUG));
-	dev_info(adev->dev, "  DB_DEBUG2=0x%08X\n",
-		 RREG32(mmDB_DEBUG2));
-	dev_info(adev->dev, "  DB_DEBUG3=0x%08X\n",
-		 RREG32(mmDB_DEBUG3));
-	dev_info(adev->dev, "  CB_HW_CONTROL=0x%08X\n",
-		 RREG32(mmCB_HW_CONTROL));
-	dev_info(adev->dev, "  SPI_CONFIG_CNTL_1=0x%08X\n",
-		 RREG32(mmSPI_CONFIG_CNTL_1));
-	dev_info(adev->dev, "  PA_SC_FIFO_SIZE=0x%08X\n",
-		 RREG32(mmPA_SC_FIFO_SIZE));
-	dev_info(adev->dev, "  VGT_NUM_INSTANCES=0x%08X\n",
-		 RREG32(mmVGT_NUM_INSTANCES));
-	dev_info(adev->dev, "  CP_PERFMON_CNTL=0x%08X\n",
-		 RREG32(mmCP_PERFMON_CNTL));
-	dev_info(adev->dev, "  PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
-		 RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS));
-	dev_info(adev->dev, "  VGT_CACHE_INVALIDATION=0x%08X\n",
-		 RREG32(mmVGT_CACHE_INVALIDATION));
-	dev_info(adev->dev, "  VGT_GS_VERTEX_REUSE=0x%08X\n",
-		 RREG32(mmVGT_GS_VERTEX_REUSE));
-	dev_info(adev->dev, "  PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
-		 RREG32(mmPA_SC_LINE_STIPPLE_STATE));
-	dev_info(adev->dev, "  PA_CL_ENHANCE=0x%08X\n",
-		 RREG32(mmPA_CL_ENHANCE));
-	dev_info(adev->dev, "  PA_SC_ENHANCE=0x%08X\n",
-		 RREG32(mmPA_SC_ENHANCE));
-
-	dev_info(adev->dev, "  CP_ME_CNTL=0x%08X\n",
-		 RREG32(mmCP_ME_CNTL));
-	dev_info(adev->dev, "  CP_MAX_CONTEXT=0x%08X\n",
-		 RREG32(mmCP_MAX_CONTEXT));
-	dev_info(adev->dev, "  CP_ENDIAN_SWAP=0x%08X\n",
-		 RREG32(mmCP_ENDIAN_SWAP));
-	dev_info(adev->dev, "  CP_DEVICE_ID=0x%08X\n",
-		 RREG32(mmCP_DEVICE_ID));
-
-	dev_info(adev->dev, "  CP_SEM_WAIT_TIMER=0x%08X\n",
-		 RREG32(mmCP_SEM_WAIT_TIMER));
-
-	dev_info(adev->dev, "  CP_RB_WPTR_DELAY=0x%08X\n",
-		 RREG32(mmCP_RB_WPTR_DELAY));
-	dev_info(adev->dev, "  CP_RB_VMID=0x%08X\n",
-		 RREG32(mmCP_RB_VMID));
-	dev_info(adev->dev, "  CP_RB0_CNTL=0x%08X\n",
-		 RREG32(mmCP_RB0_CNTL));
-	dev_info(adev->dev, "  CP_RB0_WPTR=0x%08X\n",
-		 RREG32(mmCP_RB0_WPTR));
-	dev_info(adev->dev, "  CP_RB0_RPTR_ADDR=0x%08X\n",
-		 RREG32(mmCP_RB0_RPTR_ADDR));
-	dev_info(adev->dev, "  CP_RB0_RPTR_ADDR_HI=0x%08X\n",
-		 RREG32(mmCP_RB0_RPTR_ADDR_HI));
-	dev_info(adev->dev, "  CP_RB0_CNTL=0x%08X\n",
-		 RREG32(mmCP_RB0_CNTL));
-	dev_info(adev->dev, "  CP_RB0_BASE=0x%08X\n",
-		 RREG32(mmCP_RB0_BASE));
-	dev_info(adev->dev, "  CP_RB0_BASE_HI=0x%08X\n",
-		 RREG32(mmCP_RB0_BASE_HI));
-	dev_info(adev->dev, "  CP_MEC_CNTL=0x%08X\n",
-		 RREG32(mmCP_MEC_CNTL));
-	dev_info(adev->dev, "  CP_CPF_DEBUG=0x%08X\n",
-		 RREG32(mmCP_CPF_DEBUG));
-
-	dev_info(adev->dev, "  SCRATCH_ADDR=0x%08X\n",
-		 RREG32(mmSCRATCH_ADDR));
-	dev_info(adev->dev, "  SCRATCH_UMSK=0x%08X\n",
-		 RREG32(mmSCRATCH_UMSK));
-
-	dev_info(adev->dev, "  CP_INT_CNTL_RING0=0x%08X\n",
-		 RREG32(mmCP_INT_CNTL_RING0));
-	dev_info(adev->dev, "  RLC_LB_CNTL=0x%08X\n",
-		 RREG32(mmRLC_LB_CNTL));
-	dev_info(adev->dev, "  RLC_CNTL=0x%08X\n",
-		 RREG32(mmRLC_CNTL));
-	dev_info(adev->dev, "  RLC_CGCG_CGLS_CTRL=0x%08X\n",
-		 RREG32(mmRLC_CGCG_CGLS_CTRL));
-	dev_info(adev->dev, "  RLC_LB_CNTR_INIT=0x%08X\n",
-		 RREG32(mmRLC_LB_CNTR_INIT));
-	dev_info(adev->dev, "  RLC_LB_CNTR_MAX=0x%08X\n",
-		 RREG32(mmRLC_LB_CNTR_MAX));
-	dev_info(adev->dev, "  RLC_LB_INIT_CU_MASK=0x%08X\n",
-		 RREG32(mmRLC_LB_INIT_CU_MASK));
-	dev_info(adev->dev, "  RLC_LB_PARAMS=0x%08X\n",
-		 RREG32(mmRLC_LB_PARAMS));
-	dev_info(adev->dev, "  RLC_LB_CNTL=0x%08X\n",
-		 RREG32(mmRLC_LB_CNTL));
-	dev_info(adev->dev, "  RLC_MC_CNTL=0x%08X\n",
-		 RREG32(mmRLC_MC_CNTL));
-	dev_info(adev->dev, "  RLC_UCODE_CNTL=0x%08X\n",
-		 RREG32(mmRLC_UCODE_CNTL));
-
-	mutex_lock(&adev->srbm_mutex);
-	for (i = 0; i < 16; i++) {
-		vi_srbm_select(adev, 0, 0, 0, i);
-		dev_info(adev->dev, "  VM %d:\n", i);
-		dev_info(adev->dev, "  SH_MEM_CONFIG=0x%08X\n",
-			 RREG32(mmSH_MEM_CONFIG));
-		dev_info(adev->dev, "  SH_MEM_APE1_BASE=0x%08X\n",
-			 RREG32(mmSH_MEM_APE1_BASE));
-		dev_info(adev->dev, "  SH_MEM_APE1_LIMIT=0x%08X\n",
-			 RREG32(mmSH_MEM_APE1_LIMIT));
-		dev_info(adev->dev, "  SH_MEM_BASES=0x%08X\n",
-			 RREG32(mmSH_MEM_BASES));
-	}
-	vi_srbm_select(adev, 0, 0, 0, 0);
-	mutex_unlock(&adev->srbm_mutex);
-}
-
 static int gfx_v8_0_soft_reset(void *handle)
 {
 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
@@ -4108,7 +4894,6 @@
 						SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
 
 	if (grbm_soft_reset || srbm_soft_reset) {
-		gfx_v8_0_print_status((void *)adev);
 		/* stop the rlc */
 		gfx_v8_0_rlc_stop(adev);
 
@@ -4168,7 +4953,6 @@
 
 		/* Wait a little for things to settle down */
 		udelay(50);
-		gfx_v8_0_print_status((void *)adev);
 	}
 	return 0;
 }
@@ -4250,6 +5034,7 @@
 	gfx_v8_0_set_ring_funcs(adev);
 	gfx_v8_0_set_irq_funcs(adev);
 	gfx_v8_0_set_gds_init(adev);
+	gfx_v8_0_set_rlc_funcs(adev);
 
 	return 0;
 }
@@ -4272,17 +5057,109 @@
 	if (r)
 		return r;
 
+	amdgpu_set_powergating_state(adev,
+			AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_GATE);
+
 	return 0;
 }
 
+static void polaris11_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
+		bool enable)
+{
+	uint32_t data, temp;
+
+	/* Send msg to SMU via Powerplay */
+	amdgpu_set_powergating_state(adev,
+			AMD_IP_BLOCK_TYPE_SMC,
+			enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
+
+	if (enable) {
+		/* Enable static MGPG */
+		temp = data = RREG32(mmRLC_PG_CNTL);
+		data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
+
+		if (temp != data)
+			WREG32(mmRLC_PG_CNTL, data);
+	} else {
+		temp = data = RREG32(mmRLC_PG_CNTL);
+		data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
+
+		if (temp != data)
+			WREG32(mmRLC_PG_CNTL, data);
+	}
+}
+
+static void polaris11_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
+		bool enable)
+{
+	uint32_t data, temp;
+
+	if (enable) {
+		/* Enable dynamic MGPG */
+		temp = data = RREG32(mmRLC_PG_CNTL);
+		data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
+
+		if (temp != data)
+			WREG32(mmRLC_PG_CNTL, data);
+	} else {
+		temp = data = RREG32(mmRLC_PG_CNTL);
+		data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
+
+		if (temp != data)
+			WREG32(mmRLC_PG_CNTL, data);
+	}
+}
+
+static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
+		bool enable)
+{
+	uint32_t data, temp;
+
+	if (enable) {
+		/* Enable quick PG */
+		temp = data = RREG32(mmRLC_PG_CNTL);
+		data |= 0x100000;
+
+		if (temp != data)
+			WREG32(mmRLC_PG_CNTL, data);
+	} else {
+		temp = data = RREG32(mmRLC_PG_CNTL);
+		data &= ~0x100000;
+
+		if (temp != data)
+			WREG32(mmRLC_PG_CNTL, data);
+	}
+}
+
 static int gfx_v8_0_set_powergating_state(void *handle,
 					  enum amd_powergating_state state)
 {
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
+		return 0;
+
+	switch (adev->asic_type) {
+	case CHIP_POLARIS11:
+		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG)
+			polaris11_enable_gfx_static_mg_power_gating(adev,
+					state == AMD_PG_STATE_GATE ? true : false);
+		else if (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG)
+			polaris11_enable_gfx_dynamic_mg_power_gating(adev,
+					state == AMD_PG_STATE_GATE ? true : false);
+		else
+			polaris11_enable_gfx_quick_mg_power_gating(adev,
+					state == AMD_PG_STATE_GATE ? true : false);
+		break;
+	default:
+		break;
+	}
+
 	return 0;
 }
 
-static void fiji_send_serdes_cmd(struct amdgpu_device *adev,
-		uint32_t reg_addr, uint32_t cmd)
+static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
+				     uint32_t reg_addr, uint32_t cmd)
 {
 	uint32_t data;
 
@@ -4292,7 +5169,8 @@
 	WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
 
 	data = RREG32(mmRLC_SERDES_WR_CTRL);
-	data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
+	if (adev->asic_type == CHIP_STONEY)
+			data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
 			RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
 			RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
 			RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
@@ -4300,42 +5178,218 @@
 			RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
 			RLC_SERDES_WR_CTRL__POWER_UP_MASK |
 			RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
-			RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
-			RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
 			RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
+	else
+		data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
+			  RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
+			  RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
+			  RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
+			  RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
+			  RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
+			  RLC_SERDES_WR_CTRL__POWER_UP_MASK |
+			  RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
+			  RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
+			  RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
+			  RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
 	data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK |
-			(cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
-			(reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
-			(0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
+		 (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
+		 (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
+		 (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
 
 	WREG32(mmRLC_SERDES_WR_CTRL, data);
 }
 
-static void fiji_update_medium_grain_clock_gating(struct amdgpu_device *adev,
-		bool enable)
+#define MSG_ENTER_RLC_SAFE_MODE     1
+#define MSG_EXIT_RLC_SAFE_MODE      0
+
+#define RLC_GPR_REG2__REQ_MASK           0x00000001
+#define RLC_GPR_REG2__MESSAGE__SHIFT     0x00000001
+#define RLC_GPR_REG2__MESSAGE_MASK       0x0000001e
+
+static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev)
+{
+	u32 data = 0;
+	unsigned i;
+
+	data = RREG32(mmRLC_CNTL);
+	if ((data & RLC_CNTL__RLC_ENABLE_F32_MASK) == 0)
+		return;
+
+	if ((adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) ||
+	    (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG |
+			       AMD_PG_SUPPORT_GFX_DMG))) {
+		data |= RLC_GPR_REG2__REQ_MASK;
+		data &= ~RLC_GPR_REG2__MESSAGE_MASK;
+		data |= (MSG_ENTER_RLC_SAFE_MODE << RLC_GPR_REG2__MESSAGE__SHIFT);
+		WREG32(mmRLC_GPR_REG2, data);
+
+		for (i = 0; i < adev->usec_timeout; i++) {
+			if ((RREG32(mmRLC_GPM_STAT) &
+			     (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+			      RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
+			    (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+			     RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
+				break;
+			udelay(1);
+		}
+
+		for (i = 0; i < adev->usec_timeout; i++) {
+			if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+				break;
+			udelay(1);
+		}
+		adev->gfx.rlc.in_safe_mode = true;
+	}
+}
+
+static void cz_exit_rlc_safe_mode(struct amdgpu_device *adev)
+{
+	u32 data;
+	unsigned i;
+
+	data = RREG32(mmRLC_CNTL);
+	if ((data & RLC_CNTL__RLC_ENABLE_F32_MASK) == 0)
+		return;
+
+	if ((adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) ||
+	    (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG |
+			       AMD_PG_SUPPORT_GFX_DMG))) {
+		data |= RLC_GPR_REG2__REQ_MASK;
+		data &= ~RLC_GPR_REG2__MESSAGE_MASK;
+		data |= (MSG_EXIT_RLC_SAFE_MODE << RLC_GPR_REG2__MESSAGE__SHIFT);
+		WREG32(mmRLC_GPR_REG2, data);
+		adev->gfx.rlc.in_safe_mode = false;
+	}
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+			break;
+		udelay(1);
+	}
+}
+
+static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
+{
+	u32 data;
+	unsigned i;
+
+	data = RREG32(mmRLC_CNTL);
+	if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
+		return;
+
+	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
+		data |= RLC_SAFE_MODE__CMD_MASK;
+		data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+		data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+		WREG32(mmRLC_SAFE_MODE, data);
+
+		for (i = 0; i < adev->usec_timeout; i++) {
+			if ((RREG32(mmRLC_GPM_STAT) &
+			     (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+			      RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
+			    (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+			     RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
+				break;
+			udelay(1);
+		}
+
+		for (i = 0; i < adev->usec_timeout; i++) {
+			if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+				break;
+			udelay(1);
+		}
+		adev->gfx.rlc.in_safe_mode = true;
+	}
+}
+
+static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
+{
+	u32 data = 0;
+	unsigned i;
+
+	data = RREG32(mmRLC_CNTL);
+	if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
+		return;
+
+	if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
+		if (adev->gfx.rlc.in_safe_mode) {
+			data |= RLC_SAFE_MODE__CMD_MASK;
+			data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+			WREG32(mmRLC_SAFE_MODE, data);
+			adev->gfx.rlc.in_safe_mode = false;
+		}
+	}
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+			break;
+		udelay(1);
+	}
+}
+
+static void gfx_v8_0_nop_enter_rlc_safe_mode(struct amdgpu_device *adev)
+{
+	adev->gfx.rlc.in_safe_mode = true;
+}
+
+static void gfx_v8_0_nop_exit_rlc_safe_mode(struct amdgpu_device *adev)
+{
+	adev->gfx.rlc.in_safe_mode = false;
+}
+
+static const struct amdgpu_rlc_funcs cz_rlc_funcs = {
+	.enter_safe_mode = cz_enter_rlc_safe_mode,
+	.exit_safe_mode = cz_exit_rlc_safe_mode
+};
+
+static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
+	.enter_safe_mode = iceland_enter_rlc_safe_mode,
+	.exit_safe_mode = iceland_exit_rlc_safe_mode
+};
+
+static const struct amdgpu_rlc_funcs gfx_v8_0_nop_rlc_funcs = {
+	.enter_safe_mode = gfx_v8_0_nop_enter_rlc_safe_mode,
+	.exit_safe_mode = gfx_v8_0_nop_exit_rlc_safe_mode
+};
+
+static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+						      bool enable)
 {
 	uint32_t temp, data;
 
-	/* It is disabled by HW by default */
-	if (enable) {
-		/* 1 - RLC memory Light sleep */
-		temp = data = RREG32(mmRLC_MEM_SLP_CNTL);
-		data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
-		if (temp != data)
-			WREG32(mmRLC_MEM_SLP_CNTL, data);
+	adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
-		/* 2 - CP memory Light sleep */
-		temp = data = RREG32(mmCP_MEM_SLP_CNTL);
-		data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
-		if (temp != data)
-			WREG32(mmCP_MEM_SLP_CNTL, data);
+	/* It is disabled by HW by default */
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
+				/* 1 - RLC memory Light sleep */
+				temp = data = RREG32(mmRLC_MEM_SLP_CNTL);
+				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
+				if (temp != data)
+					WREG32(mmRLC_MEM_SLP_CNTL, data);
+			}
+
+			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
+				/* 2 - CP memory Light sleep */
+				temp = data = RREG32(mmCP_MEM_SLP_CNTL);
+				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
+				if (temp != data)
+					WREG32(mmCP_MEM_SLP_CNTL, data);
+			}
+		}
 
 		/* 3 - RLC_CGTT_MGCG_OVERRIDE */
 		temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
-		data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
-				RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
-				RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
-				RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
+		if (adev->flags & AMD_IS_APU)
+			data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
+				  RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
+				  RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK);
+		else
+			data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
+				  RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
+				  RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
+				  RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
 
 		if (temp != data)
 			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
@@ -4344,19 +5398,23 @@
 		gfx_v8_0_wait_for_rlc_serdes(adev);
 
 		/* 5 - clear mgcg override */
-		fiji_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
+		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
 
-		/* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
-		temp = data = RREG32(mmCGTS_SM_CTRL_REG);
-		data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
-		data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
-		data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
-		data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
-		data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
-		data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
-		data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
-		if (temp != data)
-			WREG32(mmCGTS_SM_CTRL_REG, data);
+		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
+			/* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
+			temp = data = RREG32(mmCGTS_SM_CTRL_REG);
+			data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
+			data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
+			data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
+			data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
+			if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
+			    (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
+				data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
+			data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
+			data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
+			if (temp != data)
+				WREG32(mmCGTS_SM_CTRL_REG, data);
+		}
 		udelay(50);
 
 		/* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
@@ -4396,23 +5454,27 @@
 		gfx_v8_0_wait_for_rlc_serdes(adev);
 
 		/* 6 - set mgcg override */
-		fiji_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
+		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
 
 		udelay(50);
 
 		/* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
 		gfx_v8_0_wait_for_rlc_serdes(adev);
 	}
+
+	adev->gfx.rlc.funcs->exit_safe_mode(adev);
 }
 
-static void fiji_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
-		bool enable)
+static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
+						      bool enable)
 {
 	uint32_t temp, temp1, data, data1;
 
 	temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
 
-	if (enable) {
+	adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
 		/* 1 enable cntx_empty_int_enable/cntx_busy_int_enable/
 		 * Cmp_busy/GFX_Idle interrupts
 		 */
@@ -4427,25 +5489,29 @@
 		gfx_v8_0_wait_for_rlc_serdes(adev);
 
 		/* 3 - clear cgcg override */
-		fiji_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
+		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
 
 		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
 		gfx_v8_0_wait_for_rlc_serdes(adev);
 
 		/* 4 - write cmd to set CGLS */
-		fiji_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
+		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
 
 		/* 5 - enable cgcg */
 		data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
 
-		/* enable cgls*/
-		data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
+		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
+			/* enable cgls*/
+			data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
 
-		temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
-		data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
+			temp1 = data1 =	RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
+			data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
 
-		if (temp1 != data1)
-			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
+			if (temp1 != data1)
+				WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
+		} else {
+			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
+		}
 
 		if (temp != data)
 			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
@@ -4470,36 +5536,38 @@
 		gfx_v8_0_wait_for_rlc_serdes(adev);
 
 		/* write cmd to Set CGCG Overrride */
-		fiji_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
+		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
 
 		/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
 		gfx_v8_0_wait_for_rlc_serdes(adev);
 
 		/* write cmd to Clear CGLS */
-		fiji_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
+		gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
 
 		/* disable cgcg, cgls should be disabled too. */
 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
-				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
+			  RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
 		if (temp != data)
 			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
 	}
+
+	adev->gfx.rlc.funcs->exit_safe_mode(adev);
 }
-static int fiji_update_gfx_clock_gating(struct amdgpu_device *adev,
-		bool enable)
+static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
+					    bool enable)
 {
 	if (enable) {
 		/* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
 		 * ===  MGCG + MGLS + TS(CG/LS) ===
 		 */
-		fiji_update_medium_grain_clock_gating(adev, enable);
-		fiji_update_coarse_grain_clock_gating(adev, enable);
+		gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
+		gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
 	} else {
 		/* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS)
 		 * ===  CGCG + CGLS ===
 		 */
-		fiji_update_coarse_grain_clock_gating(adev, enable);
-		fiji_update_medium_grain_clock_gating(adev, enable);
+		gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
+		gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
 	}
 	return 0;
 }
@@ -4511,8 +5579,10 @@
 
 	switch (adev->asic_type) {
 	case CHIP_FIJI:
-		fiji_update_gfx_clock_gating(adev,
-				state == AMD_CG_STATE_GATE ? true : false);
+	case CHIP_CARRIZO:
+	case CHIP_STONEY:
+		gfx_v8_0_update_gfx_clock_gating(adev,
+						 state == AMD_CG_STATE_GATE ? true : false);
 		break;
 	default:
 		break;
@@ -4602,17 +5672,13 @@
 }
 
 static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-				  struct amdgpu_ib *ib)
+				      struct amdgpu_ib *ib,
+				      unsigned vm_id, bool ctx_switch)
 {
-	bool need_ctx_switch = ring->current_ctx != ib->ctx;
 	u32 header, control = 0;
 	u32 next_rptr = ring->wptr + 5;
 
-	/* drop the CE preamble IB for the same context */
-	if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
-		return;
-
-	if (need_ctx_switch)
+	if (ctx_switch)
 		next_rptr += 2;
 
 	next_rptr += 4;
@@ -4623,7 +5689,7 @@
 	amdgpu_ring_write(ring, next_rptr);
 
 	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
-	if (need_ctx_switch) {
+	if (ctx_switch) {
 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
 		amdgpu_ring_write(ring, 0);
 	}
@@ -4633,7 +5699,7 @@
 	else
 		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
 
-	control |= ib->length_dw | (ib->vm_id << 24);
+	control |= ib->length_dw | (vm_id << 24);
 
 	amdgpu_ring_write(ring, header);
 	amdgpu_ring_write(ring,
@@ -4646,7 +5712,8 @@
 }
 
 static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
-				  struct amdgpu_ib *ib)
+					  struct amdgpu_ib *ib,
+					  unsigned vm_id, bool ctx_switch)
 {
 	u32 header, control = 0;
 	u32 next_rptr = ring->wptr + 5;
@@ -4662,7 +5729,7 @@
 
 	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
 
-	control |= ib->length_dw | (ib->vm_id << 24);
+	control |= ib->length_dw | (vm_id << 24);
 
 	amdgpu_ring_write(ring, header);
 	amdgpu_ring_write(ring,
@@ -4684,6 +5751,7 @@
 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
 	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
 				 EOP_TC_ACTION_EN |
+				 EOP_TC_WB_ACTION_EN |
 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
 				 EVENT_INDEX(5)));
 	amdgpu_ring_write(ring, addr & 0xfffffffc);
@@ -5022,6 +6090,7 @@
 }
 
 const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
+	.name = "gfx_v8_0",
 	.early_init = gfx_v8_0_early_init,
 	.late_init = gfx_v8_0_late_init,
 	.sw_init = gfx_v8_0_sw_init,
@@ -5033,7 +6102,6 @@
 	.is_idle = gfx_v8_0_is_idle,
 	.wait_for_idle = gfx_v8_0_wait_for_idle,
 	.soft_reset = gfx_v8_0_soft_reset,
-	.print_status = gfx_v8_0_print_status,
 	.set_clockgating_state = gfx_v8_0_set_clockgating_state,
 	.set_powergating_state = gfx_v8_0_set_powergating_state,
 };
@@ -5112,6 +6180,22 @@
 	adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
 }
 
+static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_TOPAZ:
+	case CHIP_STONEY:
+		adev->gfx.rlc.funcs = &iceland_rlc_funcs;
+		break;
+	case CHIP_CARRIZO:
+		adev->gfx.rlc.funcs = &cz_rlc_funcs;
+		break;
+	default:
+		adev->gfx.rlc.funcs = &gfx_v8_0_nop_rlc_funcs;
+		break;
+	}
+}
+
 static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
 {
 	/* init asci gds info */
@@ -5155,14 +6239,11 @@
 	return (~data) & mask;
 }
 
-int gfx_v8_0_get_cu_info(struct amdgpu_device *adev,
-			 struct amdgpu_cu_info *cu_info)
+static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
 {
 	int i, j, k, counter, active_cu_number = 0;
 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
-
-	if (!adev || !cu_info)
-		return -EINVAL;
+	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
 
 	memset(cu_info, 0, sizeof(*cu_info));
 
@@ -5193,6 +6274,4 @@
 
 	cu_info->number = active_cu_number;
 	cu_info->ao_cu_mask = ao_cu_mask;
-
-	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index 021e051..16a49f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -28,6 +28,5 @@
 
 uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev);
 void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index a4a2e6c..1feb643 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1117,114 +1117,6 @@
 
 }
 
-static void gmc_v7_0_print_status(void *handle)
-{
-	int i, j;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "GMC 8.x registers\n");
-	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
-		RREG32(mmSRBM_STATUS));
-	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
-		RREG32(mmSRBM_STATUS2));
-
-	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
-	dev_info(adev->dev, "  MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
-		 RREG32(mmMC_VM_MX_L1_TLB_CNTL));
-	dev_info(adev->dev, "  VM_L2_CNTL=0x%08X\n",
-		 RREG32(mmVM_L2_CNTL));
-	dev_info(adev->dev, "  VM_L2_CNTL2=0x%08X\n",
-		 RREG32(mmVM_L2_CNTL2));
-	dev_info(adev->dev, "  VM_L2_CNTL3=0x%08X\n",
-		 RREG32(mmVM_L2_CNTL3));
-	dev_info(adev->dev, "  VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
-		 RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
-		 RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
-		 RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT0_CNTL2=0x%08X\n",
-		 RREG32(mmVM_CONTEXT0_CNTL2));
-	dev_info(adev->dev, "  VM_CONTEXT0_CNTL=0x%08X\n",
-		 RREG32(mmVM_CONTEXT0_CNTL));
-	dev_info(adev->dev, "  0x15D4=0x%08X\n",
-		 RREG32(0x575));
-	dev_info(adev->dev, "  0x15D8=0x%08X\n",
-		 RREG32(0x576));
-	dev_info(adev->dev, "  0x15DC=0x%08X\n",
-		 RREG32(0x577));
-	dev_info(adev->dev, "  VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT1_CNTL2=0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_CNTL2));
-	dev_info(adev->dev, "  VM_CONTEXT1_CNTL=0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_CNTL));
-	for (i = 0; i < 16; i++) {
-		if (i < 8)
-			dev_info(adev->dev, "  VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
-				 i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
-		else
-			dev_info(adev->dev, "  VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
-				 i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
-	}
-	dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
-		 RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
-	dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
-		 RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
-	dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
-		 RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
-	dev_info(adev->dev, "  MC_VM_FB_LOCATION=0x%08X\n",
-		 RREG32(mmMC_VM_FB_LOCATION));
-	dev_info(adev->dev, "  MC_VM_AGP_BASE=0x%08X\n",
-		 RREG32(mmMC_VM_AGP_BASE));
-	dev_info(adev->dev, "  MC_VM_AGP_TOP=0x%08X\n",
-		 RREG32(mmMC_VM_AGP_TOP));
-	dev_info(adev->dev, "  MC_VM_AGP_BOT=0x%08X\n",
-		 RREG32(mmMC_VM_AGP_BOT));
-
-	if (adev->asic_type == CHIP_KAVERI) {
-		dev_info(adev->dev, "  CHUB_CONTROL=0x%08X\n",
-			 RREG32(mmCHUB_CONTROL));
-	}
-
-	dev_info(adev->dev, "  HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
-		 RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
-	dev_info(adev->dev, "  HDP_NONSURFACE_BASE=0x%08X\n",
-		 RREG32(mmHDP_NONSURFACE_BASE));
-	dev_info(adev->dev, "  HDP_NONSURFACE_INFO=0x%08X\n",
-		 RREG32(mmHDP_NONSURFACE_INFO));
-	dev_info(adev->dev, "  HDP_NONSURFACE_SIZE=0x%08X\n",
-		 RREG32(mmHDP_NONSURFACE_SIZE));
-	dev_info(adev->dev, "  HDP_MISC_CNTL=0x%08X\n",
-		 RREG32(mmHDP_MISC_CNTL));
-	dev_info(adev->dev, "  HDP_HOST_PATH_CNTL=0x%08X\n",
-		 RREG32(mmHDP_HOST_PATH_CNTL));
-
-	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
-		dev_info(adev->dev, "  %d:\n", i);
-		dev_info(adev->dev, "  0x%04X=0x%08X\n",
-			 0xb05 + j, RREG32(0xb05 + j));
-		dev_info(adev->dev, "  0x%04X=0x%08X\n",
-			 0xb06 + j, RREG32(0xb06 + j));
-		dev_info(adev->dev, "  0x%04X=0x%08X\n",
-			 0xb07 + j, RREG32(0xb07 + j));
-		dev_info(adev->dev, "  0x%04X=0x%08X\n",
-			 0xb08 + j, RREG32(0xb08 + j));
-		dev_info(adev->dev, "  0x%04X=0x%08X\n",
-			 0xb09 + j, RREG32(0xb09 + j));
-	}
-
-	dev_info(adev->dev, "  BIF_FB_EN=0x%08X\n",
-		 RREG32(mmBIF_FB_EN));
-}
-
 static int gmc_v7_0_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1244,8 +1136,6 @@
 	}
 
 	if (srbm_soft_reset) {
-		gmc_v7_0_print_status((void *)adev);
-
 		gmc_v7_0_mc_stop(adev, &save);
 		if (gmc_v7_0_wait_for_idle(adev)) {
 			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
@@ -1269,8 +1159,6 @@
 
 		gmc_v7_0_mc_resume(adev, &save);
 		udelay(50);
-
-		gmc_v7_0_print_status((void *)adev);
 	}
 
 	return 0;
@@ -1373,6 +1261,7 @@
 }
 
 const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
+	.name = "gmc_v7_0",
 	.early_init = gmc_v7_0_early_init,
 	.late_init = gmc_v7_0_late_init,
 	.sw_init = gmc_v7_0_sw_init,
@@ -1384,7 +1273,6 @@
 	.is_idle = gmc_v7_0_is_idle,
 	.wait_for_idle = gmc_v7_0_wait_for_idle,
 	.soft_reset = gmc_v7_0_soft_reset,
-	.print_status = gmc_v7_0_print_status,
 	.set_clockgating_state = gmc_v7_0_set_clockgating_state,
 	.set_powergating_state = gmc_v7_0_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 7a9db2c..9945d5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -43,6 +43,8 @@
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 
 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
 
 static const u32 golden_settings_tonga_a11[] =
 {
@@ -73,6 +75,23 @@
 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
+static const u32 golden_settings_polaris11_a11[] =
+{
+	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
+};
+
+static const u32 golden_settings_polaris10_a11[] =
+{
+	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
+	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
+};
+
 static const u32 cz_mgcg_cgcg_init[] =
 {
 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
@@ -103,6 +122,16 @@
 						 golden_settings_tonga_a11,
 						 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
 		break;
+	case CHIP_POLARIS11:
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_polaris11_a11,
+						 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
+		break;
+	case CHIP_POLARIS10:
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_polaris10_a11,
+						 (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
+		break;
 	case CHIP_CARRIZO:
 		amdgpu_program_register_sequence(adev,
 						 cz_mgcg_cgcg_init,
@@ -209,6 +238,12 @@
 	case CHIP_TONGA:
 		chip_name = "tonga";
 		break;
+	case CHIP_POLARIS11:
+		chip_name = "polaris11";
+		break;
+	case CHIP_POLARIS10:
+		chip_name = "polaris10";
+		break;
 	case CHIP_FIJI:
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
@@ -1085,111 +1120,6 @@
 
 }
 
-static void gmc_v8_0_print_status(void *handle)
-{
-	int i, j;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "GMC 8.x registers\n");
-	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
-		RREG32(mmSRBM_STATUS));
-	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
-		RREG32(mmSRBM_STATUS2));
-
-	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
-	dev_info(adev->dev, "  MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
-		 RREG32(mmMC_VM_MX_L1_TLB_CNTL));
-	dev_info(adev->dev, "  VM_L2_CNTL=0x%08X\n",
-		 RREG32(mmVM_L2_CNTL));
-	dev_info(adev->dev, "  VM_L2_CNTL2=0x%08X\n",
-		 RREG32(mmVM_L2_CNTL2));
-	dev_info(adev->dev, "  VM_L2_CNTL3=0x%08X\n",
-		 RREG32(mmVM_L2_CNTL3));
-	dev_info(adev->dev, "  VM_L2_CNTL4=0x%08X\n",
-		 RREG32(mmVM_L2_CNTL4));
-	dev_info(adev->dev, "  VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
-		 RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
-		 RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
-		 RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT0_CNTL2=0x%08X\n",
-		 RREG32(mmVM_CONTEXT0_CNTL2));
-	dev_info(adev->dev, "  VM_CONTEXT0_CNTL=0x%08X\n",
-		 RREG32(mmVM_CONTEXT0_CNTL));
-	dev_info(adev->dev, "  VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n",
-		 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR));
-	dev_info(adev->dev, "  VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n",
-		 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR));
-	dev_info(adev->dev, "  mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n",
-		 RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET));
-	dev_info(adev->dev, "  VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT1_CNTL2=0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_CNTL2));
-	dev_info(adev->dev, "  VM_CONTEXT1_CNTL=0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_CNTL));
-	for (i = 0; i < 16; i++) {
-		if (i < 8)
-			dev_info(adev->dev, "  VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
-				 i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
-		else
-			dev_info(adev->dev, "  VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
-				 i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
-	}
-	dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
-		 RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
-	dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
-		 RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
-	dev_info(adev->dev, "  MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
-		 RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
-	dev_info(adev->dev, "  MC_VM_FB_LOCATION=0x%08X\n",
-		 RREG32(mmMC_VM_FB_LOCATION));
-	dev_info(adev->dev, "  MC_VM_AGP_BASE=0x%08X\n",
-		 RREG32(mmMC_VM_AGP_BASE));
-	dev_info(adev->dev, "  MC_VM_AGP_TOP=0x%08X\n",
-		 RREG32(mmMC_VM_AGP_TOP));
-	dev_info(adev->dev, "  MC_VM_AGP_BOT=0x%08X\n",
-		 RREG32(mmMC_VM_AGP_BOT));
-
-	dev_info(adev->dev, "  HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
-		 RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
-	dev_info(adev->dev, "  HDP_NONSURFACE_BASE=0x%08X\n",
-		 RREG32(mmHDP_NONSURFACE_BASE));
-	dev_info(adev->dev, "  HDP_NONSURFACE_INFO=0x%08X\n",
-		 RREG32(mmHDP_NONSURFACE_INFO));
-	dev_info(adev->dev, "  HDP_NONSURFACE_SIZE=0x%08X\n",
-		 RREG32(mmHDP_NONSURFACE_SIZE));
-	dev_info(adev->dev, "  HDP_MISC_CNTL=0x%08X\n",
-		 RREG32(mmHDP_MISC_CNTL));
-	dev_info(adev->dev, "  HDP_HOST_PATH_CNTL=0x%08X\n",
-		 RREG32(mmHDP_HOST_PATH_CNTL));
-
-	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
-		dev_info(adev->dev, "  %d:\n", i);
-		dev_info(adev->dev, "  0x%04X=0x%08X\n",
-			 0xb05 + j, RREG32(0xb05 + j));
-		dev_info(adev->dev, "  0x%04X=0x%08X\n",
-			 0xb06 + j, RREG32(0xb06 + j));
-		dev_info(adev->dev, "  0x%04X=0x%08X\n",
-			 0xb07 + j, RREG32(0xb07 + j));
-		dev_info(adev->dev, "  0x%04X=0x%08X\n",
-			 0xb08 + j, RREG32(0xb08 + j));
-		dev_info(adev->dev, "  0x%04X=0x%08X\n",
-			 0xb09 + j, RREG32(0xb09 + j));
-	}
-
-	dev_info(adev->dev, "  BIF_FB_EN=0x%08X\n",
-		 RREG32(mmBIF_FB_EN));
-}
-
 static int gmc_v8_0_soft_reset(void *handle)
 {
 	struct amdgpu_mode_mc_save save;
@@ -1209,8 +1139,6 @@
 	}
 
 	if (srbm_soft_reset) {
-		gmc_v8_0_print_status((void *)adev);
-
 		gmc_v8_0_mc_stop(adev, &save);
 		if (gmc_v8_0_wait_for_idle(adev)) {
 			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
@@ -1234,8 +1162,6 @@
 
 		gmc_v8_0_mc_resume(adev, &save);
 		udelay(50);
-
-		gmc_v8_0_print_status((void *)adev);
 	}
 
 	return 0;
@@ -1313,11 +1239,11 @@
 }
 
 static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
-		bool enable)
+						     bool enable)
 {
 	uint32_t data;
 
-	if (enable) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
 		data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
@@ -1393,11 +1319,11 @@
 }
 
 static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
-		bool enable)
+				       bool enable)
 {
 	uint32_t data;
 
-	if (enable) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
 		data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
@@ -1497,6 +1423,7 @@
 }
 
 const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
+	.name = "gmc_v8_0",
 	.early_init = gmc_v8_0_early_init,
 	.late_init = gmc_v8_0_late_init,
 	.sw_init = gmc_v8_0_sw_init,
@@ -1508,7 +1435,6 @@
 	.is_idle = gmc_v8_0_is_idle,
 	.wait_for_idle = gmc_v8_0_wait_for_idle,
 	.soft_reset = gmc_v8_0_soft_reset,
-	.print_status = gmc_v8_0_print_status,
 	.set_clockgating_state = gmc_v8_0_set_clockgating_state,
 	.set_powergating_state = gmc_v8_0_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
index 208d55f4..825ccd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
@@ -72,6 +72,11 @@
 
 static int iceland_dpm_sw_fini(void *handle)
 {
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	release_firmware(adev->pm.fw);
+	adev->pm.fw = NULL;
+
 	return 0;
 }
 
@@ -157,6 +162,7 @@
 }
 
 const struct amd_ip_funcs iceland_dpm_ip_funcs = {
+	.name = "iceland_dpm",
 	.early_init = iceland_dpm_early_init,
 	.late_init = NULL,
 	.sw_init = iceland_dpm_sw_init,
@@ -168,7 +174,6 @@
 	.is_idle = NULL,
 	.wait_for_idle = NULL,
 	.soft_reset = NULL,
-	.print_status = NULL,
 	.set_clockgating_state = iceland_dpm_set_clockgating_state,
 	.set_powergating_state = iceland_dpm_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 679e739..3b8906c 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -103,7 +103,6 @@
  */
 static int iceland_ih_irq_init(struct amdgpu_device *adev)
 {
-	int ret = 0;
 	int rb_bufsz;
 	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
 	u64 wptr_off;
@@ -157,7 +156,7 @@
 	/* enable interrupts */
 	iceland_ih_enable_interrupts(adev);
 
-	return ret;
+	return 0;
 }
 
 /**
@@ -351,35 +350,6 @@
 	return -ETIMEDOUT;
 }
 
-static void iceland_ih_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "ICELAND IH registers\n");
-	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
-		RREG32(mmSRBM_STATUS));
-	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
-		RREG32(mmSRBM_STATUS2));
-	dev_info(adev->dev, "  INTERRUPT_CNTL=0x%08X\n",
-		 RREG32(mmINTERRUPT_CNTL));
-	dev_info(adev->dev, "  INTERRUPT_CNTL2=0x%08X\n",
-		 RREG32(mmINTERRUPT_CNTL2));
-	dev_info(adev->dev, "  IH_CNTL=0x%08X\n",
-		 RREG32(mmIH_CNTL));
-	dev_info(adev->dev, "  IH_RB_CNTL=0x%08X\n",
-		 RREG32(mmIH_RB_CNTL));
-	dev_info(adev->dev, "  IH_RB_BASE=0x%08X\n",
-		 RREG32(mmIH_RB_BASE));
-	dev_info(adev->dev, "  IH_RB_WPTR_ADDR_LO=0x%08X\n",
-		 RREG32(mmIH_RB_WPTR_ADDR_LO));
-	dev_info(adev->dev, "  IH_RB_WPTR_ADDR_HI=0x%08X\n",
-		 RREG32(mmIH_RB_WPTR_ADDR_HI));
-	dev_info(adev->dev, "  IH_RB_RPTR=0x%08X\n",
-		 RREG32(mmIH_RB_RPTR));
-	dev_info(adev->dev, "  IH_RB_WPTR=0x%08X\n",
-		 RREG32(mmIH_RB_WPTR));
-}
-
 static int iceland_ih_soft_reset(void *handle)
 {
 	u32 srbm_soft_reset = 0;
@@ -391,8 +361,6 @@
 						SOFT_RESET_IH, 1);
 
 	if (srbm_soft_reset) {
-		iceland_ih_print_status((void *)adev);
-
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -407,8 +375,6 @@
 
 		/* Wait a little for things to settle down */
 		udelay(50);
-
-		iceland_ih_print_status((void *)adev);
 	}
 
 	return 0;
@@ -427,6 +393,7 @@
 }
 
 const struct amd_ip_funcs iceland_ih_ip_funcs = {
+	.name = "iceland_ih",
 	.early_init = iceland_ih_early_init,
 	.late_init = NULL,
 	.sw_init = iceland_ih_sw_init,
@@ -438,7 +405,6 @@
 	.is_idle = iceland_ih_is_idle,
 	.wait_for_idle = iceland_ih_wait_for_idle,
 	.soft_reset = iceland_ih_soft_reset,
-	.print_status = iceland_ih_print_status,
 	.set_clockgating_state = iceland_ih_set_clockgating_state,
 	.set_powergating_state = iceland_ih_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 654d767..a789a86 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -135,11 +135,6 @@
 #endif
 }
 
-static u32 sumo_get_sleep_divider_from_id(u32 id)
-{
-	return 1 << id;
-}
-
 static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev,
 						      struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
 						      ATOM_AVAILABLE_SCLK_LIST *table)
@@ -2176,8 +2171,7 @@
 	struct kv_power_info *pi = kv_get_pi(adev);
 	u32 i;
 	u32 temp;
-	u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
-		min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
+	u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK);
 
 	if (sclk < min)
 		return 0;
@@ -2186,7 +2180,7 @@
 		return 0;
 
 	for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
-		temp = sclk / sumo_get_sleep_divider_from_id(i);
+		temp = sclk >> i;
 		if (temp >= min)
 			break;
 	}
@@ -2258,7 +2252,7 @@
 	if (pi->caps_stable_p_state) {
 		stable_p_state_sclk = (max_limits->sclk * 75) / 100;
 
-		for (i = table->count - 1; i >= 0; i++) {
+		for (i = table->count - 1; i >= 0; i--) {
 			if (stable_p_state_sclk >= table->entries[i].clk) {
 				stable_p_state_sclk = table->entries[i].clk;
 				break;
@@ -3147,62 +3141,6 @@
 	return 0;
 }
 
-static void kv_dpm_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "KV/KB DPM registers\n");
-	dev_info(adev->dev, "  DIDT_SQ_CTRL0=0x%08X\n",
-		 RREG32_DIDT(ixDIDT_SQ_CTRL0));
-	dev_info(adev->dev, "  DIDT_DB_CTRL0=0x%08X\n",
-		 RREG32_DIDT(ixDIDT_DB_CTRL0));
-	dev_info(adev->dev, "  DIDT_TD_CTRL0=0x%08X\n",
-		 RREG32_DIDT(ixDIDT_TD_CTRL0));
-	dev_info(adev->dev, "  DIDT_TCP_CTRL0=0x%08X\n",
-		 RREG32_DIDT(ixDIDT_TCP_CTRL0));
-	dev_info(adev->dev, "  LCAC_SX0_OVR_SEL=0x%08X\n",
-		 RREG32_SMC(ixLCAC_SX0_OVR_SEL));
-	dev_info(adev->dev, "  LCAC_SX0_OVR_VAL=0x%08X\n",
-		 RREG32_SMC(ixLCAC_SX0_OVR_VAL));
-	dev_info(adev->dev, "  LCAC_MC0_OVR_SEL=0x%08X\n",
-		 RREG32_SMC(ixLCAC_MC0_OVR_SEL));
-	dev_info(adev->dev, "  LCAC_MC0_OVR_VAL=0x%08X\n",
-		 RREG32_SMC(ixLCAC_MC0_OVR_VAL));
-	dev_info(adev->dev, "  LCAC_MC1_OVR_SEL=0x%08X\n",
-		 RREG32_SMC(ixLCAC_MC1_OVR_SEL));
-	dev_info(adev->dev, "  LCAC_MC1_OVR_VAL=0x%08X\n",
-		 RREG32_SMC(ixLCAC_MC1_OVR_VAL));
-	dev_info(adev->dev, "  LCAC_MC2_OVR_SEL=0x%08X\n",
-		 RREG32_SMC(ixLCAC_MC2_OVR_SEL));
-	dev_info(adev->dev, "  LCAC_MC2_OVR_VAL=0x%08X\n",
-		 RREG32_SMC(ixLCAC_MC2_OVR_VAL));
-	dev_info(adev->dev, "  LCAC_MC3_OVR_SEL=0x%08X\n",
-		 RREG32_SMC(ixLCAC_MC3_OVR_SEL));
-	dev_info(adev->dev, "  LCAC_MC3_OVR_VAL=0x%08X\n",
-		 RREG32_SMC(ixLCAC_MC3_OVR_VAL));
-	dev_info(adev->dev, "  LCAC_CPL_OVR_SEL=0x%08X\n",
-		 RREG32_SMC(ixLCAC_CPL_OVR_SEL));
-	dev_info(adev->dev, "  LCAC_CPL_OVR_VAL=0x%08X\n",
-		 RREG32_SMC(ixLCAC_CPL_OVR_VAL));
-	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_0=0x%08X\n",
-		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0));
-	dev_info(adev->dev, "  GENERAL_PWRMGT=0x%08X\n",
-		 RREG32_SMC(ixGENERAL_PWRMGT));
-	dev_info(adev->dev, "  SCLK_PWRMGT_CNTL=0x%08X\n",
-		 RREG32_SMC(ixSCLK_PWRMGT_CNTL));
-	dev_info(adev->dev, "  SMC_MESSAGE_0=0x%08X\n",
-		 RREG32(mmSMC_MESSAGE_0));
-	dev_info(adev->dev, "  SMC_RESP_0=0x%08X\n",
-		 RREG32(mmSMC_RESP_0));
-	dev_info(adev->dev, "  SMC_MSG_ARG_0=0x%08X\n",
-		 RREG32(mmSMC_MSG_ARG_0));
-	dev_info(adev->dev, "  SMC_IND_INDEX_0=0x%08X\n",
-		 RREG32(mmSMC_IND_INDEX_0));
-	dev_info(adev->dev, "  SMC_IND_DATA_0=0x%08X\n",
-		 RREG32(mmSMC_IND_DATA_0));
-	dev_info(adev->dev, "  SMC_IND_ACCESS_CNTL=0x%08X\n",
-		 RREG32(mmSMC_IND_ACCESS_CNTL));
-}
 
 static int kv_dpm_soft_reset(void *handle)
 {
@@ -3300,6 +3238,7 @@
 }
 
 const struct amd_ip_funcs kv_dpm_ip_funcs = {
+	.name = "kv_dpm",
 	.early_init = kv_dpm_early_init,
 	.late_init = kv_dpm_late_init,
 	.sw_init = kv_dpm_sw_init,
@@ -3311,7 +3250,6 @@
 	.is_idle = kv_dpm_is_idle,
 	.wait_for_idle = kv_dpm_wait_for_idle,
 	.soft_reset = kv_dpm_soft_reset,
-	.print_status = kv_dpm_print_status,
 	.set_clockgating_state = kv_dpm_set_clockgating_state,
 	.set_powergating_state = kv_dpm_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 6e0a86a..b556bd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -105,6 +105,15 @@
 	}
 }
 
+static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
+{
+	int i;
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		release_firmware(adev->sdma.instance[i].fw);
+		adev->sdma.instance[i].fw = NULL;
+	}
+}
+
 /**
  * sdma_v2_4_init_microcode - load ucode images from disk
  *
@@ -242,9 +251,10 @@
  * Schedule an IB in the DMA ring (VI).
  */
 static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
-				   struct amdgpu_ib *ib)
+				   struct amdgpu_ib *ib,
+				   unsigned vm_id, bool ctx_switch)
 {
-	u32 vmid = ib->vm_id & 0xf;
+	u32 vmid = vm_id & 0xf;
 	u32 next_rptr = ring->wptr + 5;
 
 	while ((next_rptr & 7) != 2)
@@ -460,6 +470,8 @@
 		/* Initialize the ring buffer's read and write pointers */
 		WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
 		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+		WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+		WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 
 		/* set the wb address whether it's enabled or not */
 		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -488,7 +500,11 @@
 		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
 		ring->ready = true;
+	}
 
+	sdma_v2_4_enable(adev, true);
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		ring = &adev->sdma.instance[i].ring;
 		r = amdgpu_ring_test_ring(ring);
 		if (r) {
 			ring->ready = false;
@@ -579,8 +595,8 @@
 			return -EINVAL;
 	}
 
-	/* unhalt the MEs */
-	sdma_v2_4_enable(adev, true);
+	/* halt the engine before programing */
+	sdma_v2_4_enable(adev, false);
 
 	/* start the gfx rings and rlc compute queues */
 	r = sdma_v2_4_gfx_resume(adev);
@@ -701,7 +717,7 @@
 	ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 	ib.length_dw = 8;
 
-	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
 	if (r)
 		goto err1;
 
@@ -990,7 +1006,7 @@
 		ring->ring_obj = NULL;
 		ring->use_doorbell = false;
 		sprintf(ring->name, "sdma%d", i);
-		r = amdgpu_ring_init(adev, ring, 256 * 1024,
+		r = amdgpu_ring_init(adev, ring, 1024,
 				     SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
 				     &adev->sdma.trap_irq,
 				     (i == 0) ?
@@ -1011,6 +1027,7 @@
 	for (i = 0; i < adev->sdma.num_instances; i++)
 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
+	sdma_v2_4_free_microcode(adev);
 	return 0;
 }
 
@@ -1080,55 +1097,6 @@
 	return -ETIMEDOUT;
 }
 
-static void sdma_v2_4_print_status(void *handle)
-{
-	int i, j;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "VI SDMA registers\n");
-	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
-		 RREG32(mmSRBM_STATUS2));
-	for (i = 0; i < adev->sdma.num_instances; i++) {
-		dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
-			 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_F32_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_IB_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_WPTR=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_TILING_CONFIG=0x%08X\n",
-			 i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
-		mutex_lock(&adev->srbm_mutex);
-		for (j = 0; j < 16; j++) {
-			vi_srbm_select(adev, 0, 0, 0, j);
-			dev_info(adev->dev, "  VM %d:\n", j);
-			dev_info(adev->dev, "  SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n",
-				 i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
-			dev_info(adev->dev, "  SDMA%d_GFX_APE1_CNTL=0x%08X\n",
-				 i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
-		}
-		vi_srbm_select(adev, 0, 0, 0, 0);
-		mutex_unlock(&adev->srbm_mutex);
-	}
-}
-
 static int sdma_v2_4_soft_reset(void *handle)
 {
 	u32 srbm_soft_reset = 0;
@@ -1151,8 +1119,6 @@
 	}
 
 	if (srbm_soft_reset) {
-		sdma_v2_4_print_status((void *)adev);
-
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -1167,8 +1133,6 @@
 
 		/* Wait a little for things to settle down */
 		udelay(50);
-
-		sdma_v2_4_print_status((void *)adev);
 	}
 
 	return 0;
@@ -1283,6 +1247,7 @@
 }
 
 const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
+	.name = "sdma_v2_4",
 	.early_init = sdma_v2_4_early_init,
 	.late_init = NULL,
 	.sw_init = sdma_v2_4_sw_init,
@@ -1294,7 +1259,6 @@
 	.is_idle = sdma_v2_4_is_idle,
 	.wait_for_idle = sdma_v2_4_wait_for_idle,
 	.soft_reset = sdma_v2_4_soft_reset,
-	.print_status = sdma_v2_4_print_status,
 	.set_clockgating_state = sdma_v2_4_set_clockgating_state,
 	.set_powergating_state = sdma_v2_4_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 8c8ca98..532ea88 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -56,6 +56,11 @@
 MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
 MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
 MODULE_FIRMWARE("amdgpu/stoney_sdma.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
+
 
 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
 {
@@ -101,6 +106,34 @@
 	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
 };
 
+static const u32 golden_settings_polaris11_a11[] =
+{
+	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
+	mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
+	mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
+	mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
+	mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+};
+
+static const u32 golden_settings_polaris10_a11[] =
+{
+	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
+	mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
+	mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
+	mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
+	mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+};
+
 static const u32 cz_golden_settings_a11[] =
 {
 	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
@@ -172,6 +205,16 @@
 						 golden_settings_tonga_a11,
 						 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
 		break;
+	case CHIP_POLARIS11:
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_polaris11_a11,
+						 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
+		break;
+	case CHIP_POLARIS10:
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_polaris10_a11,
+						 (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
+		break;
 	case CHIP_CARRIZO:
 		amdgpu_program_register_sequence(adev,
 						 cz_mgcg_cgcg_init,
@@ -193,6 +236,15 @@
 	}
 }
 
+static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
+{
+	int i;
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		release_firmware(adev->sdma.instance[i].fw);
+		adev->sdma.instance[i].fw = NULL;
+	}
+}
+
 /**
  * sdma_v3_0_init_microcode - load ucode images from disk
  *
@@ -220,6 +272,12 @@
 	case CHIP_FIJI:
 		chip_name = "fiji";
 		break;
+	case CHIP_POLARIS11:
+		chip_name = "polaris11";
+		break;
+	case CHIP_POLARIS10:
+		chip_name = "polaris10";
+		break;
 	case CHIP_CARRIZO:
 		chip_name = "carrizo";
 		break;
@@ -353,9 +411,10 @@
  * Schedule an IB in the DMA ring (VI).
  */
 static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
-				   struct amdgpu_ib *ib)
+				   struct amdgpu_ib *ib,
+				   unsigned vm_id, bool ctx_switch)
 {
-	u32 vmid = ib->vm_id & 0xf;
+	u32 vmid = vm_id & 0xf;
 	u32 next_rptr = ring->wptr + 5;
 
 	while ((next_rptr & 7) != 2)
@@ -452,6 +511,31 @@
 	amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
 }
 
+unsigned init_cond_exec(struct amdgpu_ring *ring)
+{
+	unsigned ret;
+	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
+	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
+	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
+	amdgpu_ring_write(ring, 1);
+	ret = ring->wptr;/* this is the offset we need patch later */
+	amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
+	return ret;
+}
+
+void patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
+{
+	unsigned cur;
+	BUG_ON(ring->ring[offset] != 0x55aa55aa);
+
+	cur = ring->wptr - 1;
+	if (likely(cur > offset))
+		ring->ring[offset] = cur - offset;
+	else
+		ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
+}
+
+
 /**
  * sdma_v3_0_gfx_stop - stop the gfx async dma engines
  *
@@ -597,6 +681,8 @@
 		/* Initialize the ring buffer's read and write pointers */
 		WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
 		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+		WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+		WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 
 		/* set the wb address whether it's enabled or not */
 		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -636,7 +722,15 @@
 		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
 		ring->ready = true;
+	}
 
+	/* unhalt the MEs */
+	sdma_v3_0_enable(adev, true);
+	/* enable sdma ring preemption */
+	sdma_v3_0_ctx_switch_enable(adev, true);
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		ring = &adev->sdma.instance[i].ring;
 		r = amdgpu_ring_test_ring(ring);
 		if (r) {
 			ring->ready = false;
@@ -729,10 +823,9 @@
 		}
 	}
 
-	/* unhalt the MEs */
-	sdma_v3_0_enable(adev, true);
-	/* enable sdma ring preemption */
-	sdma_v3_0_ctx_switch_enable(adev, true);
+	/* disble sdma engine before programing it */
+	sdma_v3_0_ctx_switch_enable(adev, false);
+	sdma_v3_0_enable(adev, false);
 
 	/* start the gfx rings and rlc compute queues */
 	r = sdma_v3_0_gfx_resume(adev);
@@ -853,7 +946,7 @@
 	ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
 	ib.length_dw = 8;
 
-	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
 	if (r)
 		goto err1;
 
@@ -1151,7 +1244,7 @@
 			AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
 
 		sprintf(ring->name, "sdma%d", i);
-		r = amdgpu_ring_init(adev, ring, 256 * 1024,
+		r = amdgpu_ring_init(adev, ring, 1024,
 				     SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
 				     &adev->sdma.trap_irq,
 				     (i == 0) ?
@@ -1172,6 +1265,7 @@
 	for (i = 0; i < adev->sdma.num_instances; i++)
 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
+	sdma_v3_0_free_microcode(adev);
 	return 0;
 }
 
@@ -1242,57 +1336,6 @@
 	return -ETIMEDOUT;
 }
 
-static void sdma_v3_0_print_status(void *handle)
-{
-	int i, j;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "VI SDMA registers\n");
-	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
-		 RREG32(mmSRBM_STATUS2));
-	for (i = 0; i < adev->sdma.num_instances; i++) {
-		dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
-			 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_F32_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_IB_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_CNTL=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_WPTR=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_GFX_DOORBELL=0x%08X\n",
-			 i, RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]));
-		dev_info(adev->dev, "  SDMA%d_TILING_CONFIG=0x%08X\n",
-			 i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
-		mutex_lock(&adev->srbm_mutex);
-		for (j = 0; j < 16; j++) {
-			vi_srbm_select(adev, 0, 0, 0, j);
-			dev_info(adev->dev, "  VM %d:\n", j);
-			dev_info(adev->dev, "  SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n",
-				 i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
-			dev_info(adev->dev, "  SDMA%d_GFX_APE1_CNTL=0x%08X\n",
-				 i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
-		}
-		vi_srbm_select(adev, 0, 0, 0, 0);
-		mutex_unlock(&adev->srbm_mutex);
-	}
-}
-
 static int sdma_v3_0_soft_reset(void *handle)
 {
 	u32 srbm_soft_reset = 0;
@@ -1315,8 +1358,6 @@
 	}
 
 	if (srbm_soft_reset) {
-		sdma_v3_0_print_status((void *)adev);
-
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -1331,8 +1372,6 @@
 
 		/* Wait a little for things to settle down */
 		udelay(50);
-
-		sdma_v3_0_print_status((void *)adev);
 	}
 
 	return 0;
@@ -1433,40 +1472,31 @@
 	return 0;
 }
 
-static void fiji_update_sdma_medium_grain_clock_gating(
+static void sdma_v3_0_update_sdma_medium_grain_clock_gating(
 		struct amdgpu_device *adev,
 		bool enable)
 {
 	uint32_t temp, data;
+	int i;
 
-	if (enable) {
-		temp = data = RREG32(mmSDMA0_CLK_CTRL);
-		data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
-				SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
-				SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
-				SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
-				SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
-				SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
-				SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
-				SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
-		if (data != temp)
-			WREG32(mmSDMA0_CLK_CTRL, data);
-
-		temp = data = RREG32(mmSDMA1_CLK_CTRL);
-		data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
-				SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
-				SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
-				SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
-				SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
-				SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
-				SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
-				SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
-
-		if (data != temp)
-			WREG32(mmSDMA1_CLK_CTRL, data);
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
+		for (i = 0; i < adev->sdma.num_instances; i++) {
+			temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
+			data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
+				  SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
+				  SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
+				  SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
+				  SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
+				  SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
+				  SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
+				  SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
+			if (data != temp)
+				WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
+		}
 	} else {
-		temp = data = RREG32(mmSDMA0_CLK_CTRL);
-		data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
+		for (i = 0; i < adev->sdma.num_instances; i++) {
+			temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
+			data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
 				SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
 				SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
 				SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
@@ -1475,54 +1505,35 @@
 				SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
 				SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK;
 
-		if (data != temp)
-			WREG32(mmSDMA0_CLK_CTRL, data);
-
-		temp = data = RREG32(mmSDMA1_CLK_CTRL);
-		data |= SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
-				SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
-				SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
-				SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
-				SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
-				SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
-				SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
-				SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK;
-
-		if (data != temp)
-			WREG32(mmSDMA1_CLK_CTRL, data);
+			if (data != temp)
+				WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
+		}
 	}
 }
 
-static void fiji_update_sdma_medium_grain_light_sleep(
+static void sdma_v3_0_update_sdma_medium_grain_light_sleep(
 		struct amdgpu_device *adev,
 		bool enable)
 {
 	uint32_t temp, data;
+	int i;
 
-	if (enable) {
-		temp = data = RREG32(mmSDMA0_POWER_CNTL);
-		data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
+		for (i = 0; i < adev->sdma.num_instances; i++) {
+			temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
+			data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
 
-		if (temp != data)
-			WREG32(mmSDMA0_POWER_CNTL, data);
-
-		temp = data = RREG32(mmSDMA1_POWER_CNTL);
-		data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
-
-		if (temp != data)
-			WREG32(mmSDMA1_POWER_CNTL, data);
+			if (temp != data)
+				WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
+		}
 	} else {
-		temp = data = RREG32(mmSDMA0_POWER_CNTL);
-		data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
+		for (i = 0; i < adev->sdma.num_instances; i++) {
+			temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
+			data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
 
-		if (temp != data)
-			WREG32(mmSDMA0_POWER_CNTL, data);
-
-		temp = data = RREG32(mmSDMA1_POWER_CNTL);
-		data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
-
-		if (temp != data)
-			WREG32(mmSDMA1_POWER_CNTL, data);
+			if (temp != data)
+				WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
+		}
 	}
 }
 
@@ -1533,9 +1544,11 @@
 
 	switch (adev->asic_type) {
 	case CHIP_FIJI:
-		fiji_update_sdma_medium_grain_clock_gating(adev,
+	case CHIP_CARRIZO:
+	case CHIP_STONEY:
+		sdma_v3_0_update_sdma_medium_grain_clock_gating(adev,
 				state == AMD_CG_STATE_GATE ? true : false);
-		fiji_update_sdma_medium_grain_light_sleep(adev,
+		sdma_v3_0_update_sdma_medium_grain_light_sleep(adev,
 				state == AMD_CG_STATE_GATE ? true : false);
 		break;
 	default:
@@ -1551,6 +1564,7 @@
 }
 
 const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
+	.name = "sdma_v3_0",
 	.early_init = sdma_v3_0_early_init,
 	.late_init = NULL,
 	.sw_init = sdma_v3_0_sw_init,
@@ -1562,7 +1576,6 @@
 	.is_idle = sdma_v3_0_is_idle,
 	.wait_for_idle = sdma_v3_0_wait_for_idle,
 	.soft_reset = sdma_v3_0_soft_reset,
-	.print_status = sdma_v3_0_print_status,
 	.set_clockgating_state = sdma_v3_0_set_clockgating_state,
 	.set_powergating_state = sdma_v3_0_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h
index c24a81e..880152c 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h
@@ -44,6 +44,7 @@
 #define UCODE_ID_IH_REG_RESTORE   11
 #define UCODE_ID_VBIOS            12
 #define UCODE_ID_MISC_METADATA    13
+#define UCODE_ID_SMU_SK		      14
 #define UCODE_ID_RLC_SCRATCH      32
 #define UCODE_ID_RLC_SRM_ARAM     33
 #define UCODE_ID_RLC_SRM_DRAM     34
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
index 0497784..f06f6f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
@@ -71,6 +71,11 @@
 
 static int tonga_dpm_sw_fini(void *handle)
 {
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	release_firmware(adev->pm.fw);
+	adev->pm.fw = NULL;
+
 	return 0;
 }
 
@@ -143,6 +148,7 @@
 }
 
 const struct amd_ip_funcs tonga_dpm_ip_funcs = {
+	.name = "tonga_dpm",
 	.early_init = tonga_dpm_early_init,
 	.late_init = NULL,
 	.sw_init = tonga_dpm_sw_init,
@@ -154,7 +160,6 @@
 	.is_idle = NULL,
 	.wait_for_idle = NULL,
 	.soft_reset = NULL,
-	.print_status = NULL,
 	.set_clockgating_state = tonga_dpm_set_clockgating_state,
 	.set_powergating_state = tonga_dpm_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 0f14199..c920558 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -99,7 +99,6 @@
  */
 static int tonga_ih_irq_init(struct amdgpu_device *adev)
 {
-	int ret = 0;
 	int rb_bufsz;
 	u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr;
 	u64 wptr_off;
@@ -165,7 +164,7 @@
 	/* enable interrupts */
 	tonga_ih_enable_interrupts(adev);
 
-	return ret;
+	return 0;
 }
 
 /**
@@ -374,35 +373,6 @@
 	return -ETIMEDOUT;
 }
 
-static void tonga_ih_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "TONGA IH registers\n");
-	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
-		RREG32(mmSRBM_STATUS));
-	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
-		RREG32(mmSRBM_STATUS2));
-	dev_info(adev->dev, "  INTERRUPT_CNTL=0x%08X\n",
-		 RREG32(mmINTERRUPT_CNTL));
-	dev_info(adev->dev, "  INTERRUPT_CNTL2=0x%08X\n",
-		 RREG32(mmINTERRUPT_CNTL2));
-	dev_info(adev->dev, "  IH_CNTL=0x%08X\n",
-		 RREG32(mmIH_CNTL));
-	dev_info(adev->dev, "  IH_RB_CNTL=0x%08X\n",
-		 RREG32(mmIH_RB_CNTL));
-	dev_info(adev->dev, "  IH_RB_BASE=0x%08X\n",
-		 RREG32(mmIH_RB_BASE));
-	dev_info(adev->dev, "  IH_RB_WPTR_ADDR_LO=0x%08X\n",
-		 RREG32(mmIH_RB_WPTR_ADDR_LO));
-	dev_info(adev->dev, "  IH_RB_WPTR_ADDR_HI=0x%08X\n",
-		 RREG32(mmIH_RB_WPTR_ADDR_HI));
-	dev_info(adev->dev, "  IH_RB_RPTR=0x%08X\n",
-		 RREG32(mmIH_RB_RPTR));
-	dev_info(adev->dev, "  IH_RB_WPTR=0x%08X\n",
-		 RREG32(mmIH_RB_WPTR));
-}
-
 static int tonga_ih_soft_reset(void *handle)
 {
 	u32 srbm_soft_reset = 0;
@@ -414,8 +384,6 @@
 						SOFT_RESET_IH, 1);
 
 	if (srbm_soft_reset) {
-		tonga_ih_print_status(adev);
-
 		tmp = RREG32(mmSRBM_SOFT_RESET);
 		tmp |= srbm_soft_reset;
 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -430,8 +398,6 @@
 
 		/* Wait a little for things to settle down */
 		udelay(50);
-
-		tonga_ih_print_status(adev);
 	}
 
 	return 0;
@@ -450,6 +416,7 @@
 }
 
 const struct amd_ip_funcs tonga_ih_ip_funcs = {
+	.name = "tonga_ih",
 	.early_init = tonga_ih_early_init,
 	.late_init = NULL,
 	.sw_init = tonga_ih_sw_init,
@@ -461,7 +428,6 @@
 	.is_idle = tonga_ih_is_idle,
 	.wait_for_idle = tonga_ih_wait_for_idle,
 	.soft_reset = tonga_ih_soft_reset,
-	.print_status = tonga_ih_print_status,
 	.set_clockgating_state = tonga_ih_set_clockgating_state,
 	.set_powergating_state = tonga_ih_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index cb46375..f075514 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -114,7 +114,7 @@
 
 	ring = &adev->uvd.ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf,
+	r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
 			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
 
 	return r;
@@ -489,7 +489,8 @@
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
-				  struct amdgpu_ib *ib)
+				  struct amdgpu_ib *ib,
+				  unsigned vm_id, bool ctx_switch)
 {
 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
 	amdgpu_ring_write(ring, ib->gpu_addr);
@@ -559,12 +560,13 @@
 	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 
 	addr += size;
-	size = AMDGPU_UVD_STACK_SIZE >> 3;
+	size = AMDGPU_UVD_HEAP_SIZE >> 3;
 	WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
 	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 
 	addr += size;
-	size = AMDGPU_UVD_HEAP_SIZE >> 3;
+	size = (AMDGPU_UVD_STACK_SIZE +
+	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
 	WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
 	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 
@@ -679,117 +681,6 @@
 	return uvd_v4_2_start(adev);
 }
 
-static void uvd_v4_2_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	dev_info(adev->dev, "UVD 4.2 registers\n");
-	dev_info(adev->dev, "  UVD_SEMA_ADDR_LOW=0x%08X\n",
-		 RREG32(mmUVD_SEMA_ADDR_LOW));
-	dev_info(adev->dev, "  UVD_SEMA_ADDR_HIGH=0x%08X\n",
-		 RREG32(mmUVD_SEMA_ADDR_HIGH));
-	dev_info(adev->dev, "  UVD_SEMA_CMD=0x%08X\n",
-		 RREG32(mmUVD_SEMA_CMD));
-	dev_info(adev->dev, "  UVD_GPCOM_VCPU_CMD=0x%08X\n",
-		 RREG32(mmUVD_GPCOM_VCPU_CMD));
-	dev_info(adev->dev, "  UVD_GPCOM_VCPU_DATA0=0x%08X\n",
-		 RREG32(mmUVD_GPCOM_VCPU_DATA0));
-	dev_info(adev->dev, "  UVD_GPCOM_VCPU_DATA1=0x%08X\n",
-		 RREG32(mmUVD_GPCOM_VCPU_DATA1));
-	dev_info(adev->dev, "  UVD_ENGINE_CNTL=0x%08X\n",
-		 RREG32(mmUVD_ENGINE_CNTL));
-	dev_info(adev->dev, "  UVD_UDEC_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_SEMA_CNTL=0x%08X\n",
-		 RREG32(mmUVD_SEMA_CNTL));
-	dev_info(adev->dev, "  UVD_LMI_EXT40_ADDR=0x%08X\n",
-		 RREG32(mmUVD_LMI_EXT40_ADDR));
-	dev_info(adev->dev, "  UVD_CTX_INDEX=0x%08X\n",
-		 RREG32(mmUVD_CTX_INDEX));
-	dev_info(adev->dev, "  UVD_CTX_DATA=0x%08X\n",
-		 RREG32(mmUVD_CTX_DATA));
-	dev_info(adev->dev, "  UVD_CGC_GATE=0x%08X\n",
-		 RREG32(mmUVD_CGC_GATE));
-	dev_info(adev->dev, "  UVD_CGC_CTRL=0x%08X\n",
-		 RREG32(mmUVD_CGC_CTRL));
-	dev_info(adev->dev, "  UVD_LMI_CTRL2=0x%08X\n",
-		 RREG32(mmUVD_LMI_CTRL2));
-	dev_info(adev->dev, "  UVD_MASTINT_EN=0x%08X\n",
-		 RREG32(mmUVD_MASTINT_EN));
-	dev_info(adev->dev, "  UVD_LMI_ADDR_EXT=0x%08X\n",
-		 RREG32(mmUVD_LMI_ADDR_EXT));
-	dev_info(adev->dev, "  UVD_LMI_CTRL=0x%08X\n",
-		 RREG32(mmUVD_LMI_CTRL));
-	dev_info(adev->dev, "  UVD_LMI_SWAP_CNTL=0x%08X\n",
-		 RREG32(mmUVD_LMI_SWAP_CNTL));
-	dev_info(adev->dev, "  UVD_MP_SWAP_CNTL=0x%08X\n",
-		 RREG32(mmUVD_MP_SWAP_CNTL));
-	dev_info(adev->dev, "  UVD_MPC_SET_MUXA0=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_MUXA0));
-	dev_info(adev->dev, "  UVD_MPC_SET_MUXA1=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_MUXA1));
-	dev_info(adev->dev, "  UVD_MPC_SET_MUXB0=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_MUXB0));
-	dev_info(adev->dev, "  UVD_MPC_SET_MUXB1=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_MUXB1));
-	dev_info(adev->dev, "  UVD_MPC_SET_MUX=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_MUX));
-	dev_info(adev->dev, "  UVD_MPC_SET_ALU=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_ALU));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET0=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_OFFSET0));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE0=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_SIZE0));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET1=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_OFFSET1));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE1=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_SIZE1));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET2=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_OFFSET2));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE2=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_SIZE2));
-	dev_info(adev->dev, "  UVD_VCPU_CNTL=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CNTL));
-	dev_info(adev->dev, "  UVD_SOFT_RESET=0x%08X\n",
-		 RREG32(mmUVD_SOFT_RESET));
-	dev_info(adev->dev, "  UVD_RBC_IB_BASE=0x%08X\n",
-		 RREG32(mmUVD_RBC_IB_BASE));
-	dev_info(adev->dev, "  UVD_RBC_IB_SIZE=0x%08X\n",
-		 RREG32(mmUVD_RBC_IB_SIZE));
-	dev_info(adev->dev, "  UVD_RBC_RB_BASE=0x%08X\n",
-		 RREG32(mmUVD_RBC_RB_BASE));
-	dev_info(adev->dev, "  UVD_RBC_RB_RPTR=0x%08X\n",
-		 RREG32(mmUVD_RBC_RB_RPTR));
-	dev_info(adev->dev, "  UVD_RBC_RB_WPTR=0x%08X\n",
-		 RREG32(mmUVD_RBC_RB_WPTR));
-	dev_info(adev->dev, "  UVD_RBC_RB_WPTR_CNTL=0x%08X\n",
-		 RREG32(mmUVD_RBC_RB_WPTR_CNTL));
-	dev_info(adev->dev, "  UVD_RBC_RB_CNTL=0x%08X\n",
-		 RREG32(mmUVD_RBC_RB_CNTL));
-	dev_info(adev->dev, "  UVD_STATUS=0x%08X\n",
-		 RREG32(mmUVD_STATUS));
-	dev_info(adev->dev, "  UVD_SEMA_TIMEOUT_STATUS=0x%08X\n",
-		 RREG32(mmUVD_SEMA_TIMEOUT_STATUS));
-	dev_info(adev->dev, "  UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
-		 RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL));
-	dev_info(adev->dev, "  UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n",
-		 RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL));
-	dev_info(adev->dev, "  UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
-		 RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
-	dev_info(adev->dev, "  UVD_CONTEXT_ID=0x%08X\n",
-		 RREG32(mmUVD_CONTEXT_ID));
-	dev_info(adev->dev, "  UVD_UDEC_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
-
-}
-
 static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
 					struct amdgpu_irq_src *source,
 					unsigned type,
@@ -849,6 +740,7 @@
 }
 
 const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
+	.name = "uvd_v4_2",
 	.early_init = uvd_v4_2_early_init,
 	.late_init = NULL,
 	.sw_init = uvd_v4_2_sw_init,
@@ -860,7 +752,6 @@
 	.is_idle = uvd_v4_2_is_idle,
 	.wait_for_idle = uvd_v4_2_wait_for_idle,
 	.soft_reset = uvd_v4_2_soft_reset,
-	.print_status = uvd_v4_2_print_status,
 	.set_clockgating_state = uvd_v4_2_set_clockgating_state,
 	.set_powergating_state = uvd_v4_2_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 16476d8..e0a76a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -31,6 +31,7 @@
 #include "uvd/uvd_5_0_sh_mask.h"
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
+#include "vi.h"
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -110,7 +111,7 @@
 
 	ring = &adev->uvd.ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf,
+	r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
 			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
 
 	return r;
@@ -271,12 +272,13 @@
 	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 
 	offset += size;
-	size = AMDGPU_UVD_STACK_SIZE;
+	size = AMDGPU_UVD_HEAP_SIZE;
 	WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
 	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 
 	offset += size;
-	size = AMDGPU_UVD_HEAP_SIZE;
+	size = AMDGPU_UVD_STACK_SIZE +
+	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
 	WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
 	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 
@@ -537,7 +539,8 @@
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
-				  struct amdgpu_ib *ib)
+				  struct amdgpu_ib *ib,
+				  unsigned vm_id, bool ctx_switch)
 {
 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -622,120 +625,6 @@
 	return uvd_v5_0_start(adev);
 }
 
-static void uvd_v5_0_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	dev_info(adev->dev, "UVD 5.0 registers\n");
-	dev_info(adev->dev, "  UVD_SEMA_ADDR_LOW=0x%08X\n",
-		 RREG32(mmUVD_SEMA_ADDR_LOW));
-	dev_info(adev->dev, "  UVD_SEMA_ADDR_HIGH=0x%08X\n",
-		 RREG32(mmUVD_SEMA_ADDR_HIGH));
-	dev_info(adev->dev, "  UVD_SEMA_CMD=0x%08X\n",
-		 RREG32(mmUVD_SEMA_CMD));
-	dev_info(adev->dev, "  UVD_GPCOM_VCPU_CMD=0x%08X\n",
-		 RREG32(mmUVD_GPCOM_VCPU_CMD));
-	dev_info(adev->dev, "  UVD_GPCOM_VCPU_DATA0=0x%08X\n",
-		 RREG32(mmUVD_GPCOM_VCPU_DATA0));
-	dev_info(adev->dev, "  UVD_GPCOM_VCPU_DATA1=0x%08X\n",
-		 RREG32(mmUVD_GPCOM_VCPU_DATA1));
-	dev_info(adev->dev, "  UVD_ENGINE_CNTL=0x%08X\n",
-		 RREG32(mmUVD_ENGINE_CNTL));
-	dev_info(adev->dev, "  UVD_UDEC_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_SEMA_CNTL=0x%08X\n",
-		 RREG32(mmUVD_SEMA_CNTL));
-	dev_info(adev->dev, "  UVD_LMI_EXT40_ADDR=0x%08X\n",
-		 RREG32(mmUVD_LMI_EXT40_ADDR));
-	dev_info(adev->dev, "  UVD_CTX_INDEX=0x%08X\n",
-		 RREG32(mmUVD_CTX_INDEX));
-	dev_info(adev->dev, "  UVD_CTX_DATA=0x%08X\n",
-		 RREG32(mmUVD_CTX_DATA));
-	dev_info(adev->dev, "  UVD_CGC_GATE=0x%08X\n",
-		 RREG32(mmUVD_CGC_GATE));
-	dev_info(adev->dev, "  UVD_CGC_CTRL=0x%08X\n",
-		 RREG32(mmUVD_CGC_CTRL));
-	dev_info(adev->dev, "  UVD_LMI_CTRL2=0x%08X\n",
-		 RREG32(mmUVD_LMI_CTRL2));
-	dev_info(adev->dev, "  UVD_MASTINT_EN=0x%08X\n",
-		 RREG32(mmUVD_MASTINT_EN));
-	dev_info(adev->dev, "  UVD_LMI_ADDR_EXT=0x%08X\n",
-		 RREG32(mmUVD_LMI_ADDR_EXT));
-	dev_info(adev->dev, "  UVD_LMI_CTRL=0x%08X\n",
-		 RREG32(mmUVD_LMI_CTRL));
-	dev_info(adev->dev, "  UVD_LMI_SWAP_CNTL=0x%08X\n",
-		 RREG32(mmUVD_LMI_SWAP_CNTL));
-	dev_info(adev->dev, "  UVD_MP_SWAP_CNTL=0x%08X\n",
-		 RREG32(mmUVD_MP_SWAP_CNTL));
-	dev_info(adev->dev, "  UVD_MPC_SET_MUXA0=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_MUXA0));
-	dev_info(adev->dev, "  UVD_MPC_SET_MUXA1=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_MUXA1));
-	dev_info(adev->dev, "  UVD_MPC_SET_MUXB0=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_MUXB0));
-	dev_info(adev->dev, "  UVD_MPC_SET_MUXB1=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_MUXB1));
-	dev_info(adev->dev, "  UVD_MPC_SET_MUX=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_MUX));
-	dev_info(adev->dev, "  UVD_MPC_SET_ALU=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_ALU));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET0=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_OFFSET0));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE0=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_SIZE0));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET1=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_OFFSET1));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE1=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_SIZE1));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET2=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_OFFSET2));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE2=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_SIZE2));
-	dev_info(adev->dev, "  UVD_VCPU_CNTL=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CNTL));
-	dev_info(adev->dev, "  UVD_SOFT_RESET=0x%08X\n",
-		 RREG32(mmUVD_SOFT_RESET));
-	dev_info(adev->dev, "  UVD_LMI_RBC_IB_64BIT_BAR_LOW=0x%08X\n",
-		 RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW));
-	dev_info(adev->dev, "  UVD_LMI_RBC_IB_64BIT_BAR_HIGH=0x%08X\n",
-		 RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH));
-	dev_info(adev->dev, "  UVD_RBC_IB_SIZE=0x%08X\n",
-		 RREG32(mmUVD_RBC_IB_SIZE));
-	dev_info(adev->dev, "  UVD_LMI_RBC_RB_64BIT_BAR_LOW=0x%08X\n",
-		 RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW));
-	dev_info(adev->dev, "  UVD_LMI_RBC_RB_64BIT_BAR_HIGH=0x%08X\n",
-		 RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH));
-	dev_info(adev->dev, "  UVD_RBC_RB_RPTR=0x%08X\n",
-		 RREG32(mmUVD_RBC_RB_RPTR));
-	dev_info(adev->dev, "  UVD_RBC_RB_WPTR=0x%08X\n",
-		 RREG32(mmUVD_RBC_RB_WPTR));
-	dev_info(adev->dev, "  UVD_RBC_RB_WPTR_CNTL=0x%08X\n",
-		 RREG32(mmUVD_RBC_RB_WPTR_CNTL));
-	dev_info(adev->dev, "  UVD_RBC_RB_CNTL=0x%08X\n",
-		 RREG32(mmUVD_RBC_RB_CNTL));
-	dev_info(adev->dev, "  UVD_STATUS=0x%08X\n",
-		 RREG32(mmUVD_STATUS));
-	dev_info(adev->dev, "  UVD_SEMA_TIMEOUT_STATUS=0x%08X\n",
-		 RREG32(mmUVD_SEMA_TIMEOUT_STATUS));
-	dev_info(adev->dev, "  UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
-		 RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL));
-	dev_info(adev->dev, "  UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n",
-		 RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL));
-	dev_info(adev->dev, "  UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
-		 RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
-	dev_info(adev->dev, "  UVD_CONTEXT_ID=0x%08X\n",
-		 RREG32(mmUVD_CONTEXT_ID));
-	dev_info(adev->dev, "  UVD_UDEC_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
-}
-
 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
 					struct amdgpu_irq_src *source,
 					unsigned type,
@@ -754,14 +643,128 @@
 	return 0;
 }
 
+static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
+{
+	uint32_t data, data1, data2, suvd_flags;
+
+	data = RREG32(mmUVD_CGC_CTRL);
+	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
+	data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
+
+	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
+		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
+
+	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
+		     UVD_SUVD_CGC_GATE__SIT_MASK |
+		     UVD_SUVD_CGC_GATE__SMP_MASK |
+		     UVD_SUVD_CGC_GATE__SCM_MASK |
+		     UVD_SUVD_CGC_GATE__SDB_MASK;
+
+	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
+		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
+		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
+
+	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
+			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
+			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
+			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
+			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
+			UVD_CGC_CTRL__SYS_MODE_MASK |
+			UVD_CGC_CTRL__UDEC_MODE_MASK |
+			UVD_CGC_CTRL__MPEG2_MODE_MASK |
+			UVD_CGC_CTRL__REGS_MODE_MASK |
+			UVD_CGC_CTRL__RBC_MODE_MASK |
+			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
+			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
+			UVD_CGC_CTRL__IDCT_MODE_MASK |
+			UVD_CGC_CTRL__MPRD_MODE_MASK |
+			UVD_CGC_CTRL__MPC_MODE_MASK |
+			UVD_CGC_CTRL__LBSI_MODE_MASK |
+			UVD_CGC_CTRL__LRBBM_MODE_MASK |
+			UVD_CGC_CTRL__WCB_MODE_MASK |
+			UVD_CGC_CTRL__VCPU_MODE_MASK |
+			UVD_CGC_CTRL__JPEG_MODE_MASK |
+			UVD_CGC_CTRL__SCPU_MODE_MASK);
+	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
+			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
+			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
+			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
+			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
+	data1 |= suvd_flags;
+
+	WREG32(mmUVD_CGC_CTRL, data);
+	WREG32(mmUVD_CGC_GATE, 0);
+	WREG32(mmUVD_SUVD_CGC_GATE, data1);
+	WREG32(mmUVD_SUVD_CGC_CTRL, data2);
+}
+
+#if 0
+static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
+{
+	uint32_t data, data1, cgc_flags, suvd_flags;
+
+	data = RREG32(mmUVD_CGC_GATE);
+	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
+
+	cgc_flags = UVD_CGC_GATE__SYS_MASK |
+				UVD_CGC_GATE__UDEC_MASK |
+				UVD_CGC_GATE__MPEG2_MASK |
+				UVD_CGC_GATE__RBC_MASK |
+				UVD_CGC_GATE__LMI_MC_MASK |
+				UVD_CGC_GATE__IDCT_MASK |
+				UVD_CGC_GATE__MPRD_MASK |
+				UVD_CGC_GATE__MPC_MASK |
+				UVD_CGC_GATE__LBSI_MASK |
+				UVD_CGC_GATE__LRBBM_MASK |
+				UVD_CGC_GATE__UDEC_RE_MASK |
+				UVD_CGC_GATE__UDEC_CM_MASK |
+				UVD_CGC_GATE__UDEC_IT_MASK |
+				UVD_CGC_GATE__UDEC_DB_MASK |
+				UVD_CGC_GATE__UDEC_MP_MASK |
+				UVD_CGC_GATE__WCB_MASK |
+				UVD_CGC_GATE__VCPU_MASK |
+				UVD_CGC_GATE__SCPU_MASK;
+
+	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
+				UVD_SUVD_CGC_GATE__SIT_MASK |
+				UVD_SUVD_CGC_GATE__SMP_MASK |
+				UVD_SUVD_CGC_GATE__SCM_MASK |
+				UVD_SUVD_CGC_GATE__SDB_MASK;
+
+	data |= cgc_flags;
+	data1 |= suvd_flags;
+
+	WREG32(mmUVD_CGC_GATE, data);
+	WREG32(mmUVD_SUVD_CGC_GATE, data1);
+}
+#endif
+
 static int uvd_v5_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	static int curstate = -1;
 
 	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
 		return 0;
 
+	if (curstate == state)
+		return 0;
+
+	curstate = state;
+	if (enable) {
+		/* disable HW gating and enable Sw gating */
+		uvd_v5_0_set_sw_clock_gating(adev);
+	} else {
+		/* wait for STATUS to clear */
+		if (uvd_v5_0_wait_for_idle(handle))
+			return -EBUSY;
+
+		/* enable HW gates because UVD is idle */
+/*		uvd_v5_0_set_hw_clock_gating(adev); */
+	}
+
 	return 0;
 }
 
@@ -789,6 +792,7 @@
 }
 
 const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
+	.name = "uvd_v5_0",
 	.early_init = uvd_v5_0_early_init,
 	.late_init = NULL,
 	.sw_init = uvd_v5_0_sw_init,
@@ -800,7 +804,6 @@
 	.is_idle = uvd_v5_0_is_idle,
 	.wait_for_idle = uvd_v5_0_wait_for_idle,
 	.soft_reset = uvd_v5_0_soft_reset,
-	.print_status = uvd_v5_0_print_status,
 	.set_clockgating_state = uvd_v5_0_set_clockgating_state,
 	.set_powergating_state = uvd_v5_0_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d493791..c9929d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -31,11 +31,15 @@
 #include "uvd/uvd_6_0_sh_mask.h"
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "vi.h"
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
 static int uvd_v6_0_start(struct amdgpu_device *adev);
 static void uvd_v6_0_stop(struct amdgpu_device *adev);
+static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
 
 /**
  * uvd_v6_0_ring_get_rptr - get read pointer
@@ -110,7 +114,7 @@
 
 	ring = &adev->uvd.ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf,
+	r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
 			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
 
 	return r;
@@ -270,20 +274,24 @@
 	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 
 	offset += size;
-	size = AMDGPU_UVD_STACK_SIZE;
+	size = AMDGPU_UVD_HEAP_SIZE;
 	WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
 	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 
 	offset += size;
-	size = AMDGPU_UVD_HEAP_SIZE;
+	size = AMDGPU_UVD_STACK_SIZE +
+	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
 	WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
 	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 
 	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+
+	WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 }
 
+#if 0
 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
 		bool enable)
 {
@@ -360,157 +368,7 @@
 	WREG32(mmUVD_CGC_GATE, data);
 	WREG32(mmUVD_SUVD_CGC_GATE, data1);
 }
-
-static void tonga_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
-		bool enable)
-{
-	u32 data, data1;
-
-	data = RREG32(mmUVD_CGC_GATE);
-	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
-	if (enable) {
-		data |= UVD_CGC_GATE__SYS_MASK |
-				UVD_CGC_GATE__UDEC_MASK |
-				UVD_CGC_GATE__MPEG2_MASK |
-				UVD_CGC_GATE__RBC_MASK |
-				UVD_CGC_GATE__LMI_MC_MASK |
-				UVD_CGC_GATE__IDCT_MASK |
-				UVD_CGC_GATE__MPRD_MASK |
-				UVD_CGC_GATE__MPC_MASK |
-				UVD_CGC_GATE__LBSI_MASK |
-				UVD_CGC_GATE__LRBBM_MASK |
-				UVD_CGC_GATE__UDEC_RE_MASK |
-				UVD_CGC_GATE__UDEC_CM_MASK |
-				UVD_CGC_GATE__UDEC_IT_MASK |
-				UVD_CGC_GATE__UDEC_DB_MASK |
-				UVD_CGC_GATE__UDEC_MP_MASK |
-				UVD_CGC_GATE__WCB_MASK |
-				UVD_CGC_GATE__VCPU_MASK |
-				UVD_CGC_GATE__SCPU_MASK;
-		data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
-				UVD_SUVD_CGC_GATE__SIT_MASK |
-				UVD_SUVD_CGC_GATE__SMP_MASK |
-				UVD_SUVD_CGC_GATE__SCM_MASK |
-				UVD_SUVD_CGC_GATE__SDB_MASK;
-	} else {
-		data &= ~(UVD_CGC_GATE__SYS_MASK |
-				UVD_CGC_GATE__UDEC_MASK |
-				UVD_CGC_GATE__MPEG2_MASK |
-				UVD_CGC_GATE__RBC_MASK |
-				UVD_CGC_GATE__LMI_MC_MASK |
-				UVD_CGC_GATE__LMI_UMC_MASK |
-				UVD_CGC_GATE__IDCT_MASK |
-				UVD_CGC_GATE__MPRD_MASK |
-				UVD_CGC_GATE__MPC_MASK |
-				UVD_CGC_GATE__LBSI_MASK |
-				UVD_CGC_GATE__LRBBM_MASK |
-				UVD_CGC_GATE__UDEC_RE_MASK |
-				UVD_CGC_GATE__UDEC_CM_MASK |
-				UVD_CGC_GATE__UDEC_IT_MASK |
-				UVD_CGC_GATE__UDEC_DB_MASK |
-				UVD_CGC_GATE__UDEC_MP_MASK |
-				UVD_CGC_GATE__WCB_MASK |
-				UVD_CGC_GATE__VCPU_MASK |
-				UVD_CGC_GATE__SCPU_MASK);
-		data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
-				UVD_SUVD_CGC_GATE__SIT_MASK |
-				UVD_SUVD_CGC_GATE__SMP_MASK |
-				UVD_SUVD_CGC_GATE__SCM_MASK |
-				UVD_SUVD_CGC_GATE__SDB_MASK);
-	}
-	WREG32(mmUVD_CGC_GATE, data);
-	WREG32(mmUVD_SUVD_CGC_GATE, data1);
-}
-
-static void uvd_v6_0_set_uvd_dynamic_clock_mode(struct amdgpu_device *adev,
-		bool swmode)
-{
-	u32 data, data1 = 0, data2;
-
-	/* Always un-gate UVD REGS bit */
-	data = RREG32(mmUVD_CGC_GATE);
-	data &= ~(UVD_CGC_GATE__REGS_MASK);
-	WREG32(mmUVD_CGC_GATE, data);
-
-	data = RREG32(mmUVD_CGC_CTRL);
-	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
-			UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
-	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
-			1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER) |
-			4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY);
-
-	data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
-	if (swmode) {
-		data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
-				UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
-				UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
-				UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
-				UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
-				UVD_CGC_CTRL__SYS_MODE_MASK |
-				UVD_CGC_CTRL__UDEC_MODE_MASK |
-				UVD_CGC_CTRL__MPEG2_MODE_MASK |
-				UVD_CGC_CTRL__REGS_MODE_MASK |
-				UVD_CGC_CTRL__RBC_MODE_MASK |
-				UVD_CGC_CTRL__LMI_MC_MODE_MASK |
-				UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
-				UVD_CGC_CTRL__IDCT_MODE_MASK |
-				UVD_CGC_CTRL__MPRD_MODE_MASK |
-				UVD_CGC_CTRL__MPC_MODE_MASK |
-				UVD_CGC_CTRL__LBSI_MODE_MASK |
-				UVD_CGC_CTRL__LRBBM_MODE_MASK |
-				UVD_CGC_CTRL__WCB_MODE_MASK |
-				UVD_CGC_CTRL__VCPU_MODE_MASK |
-				UVD_CGC_CTRL__JPEG_MODE_MASK |
-				UVD_CGC_CTRL__SCPU_MODE_MASK);
-		data1 |= UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
-				UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK;
-		data1 &= ~UVD_CGC_CTRL2__GATER_DIV_ID_MASK;
-		data1 |= 7 << REG_FIELD_SHIFT(UVD_CGC_CTRL2, GATER_DIV_ID);
-		data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
-				UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
-				UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
-				UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
-				UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
-	} else {
-		data |= UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
-				UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
-				UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
-				UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
-				UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
-				UVD_CGC_CTRL__SYS_MODE_MASK |
-				UVD_CGC_CTRL__UDEC_MODE_MASK |
-				UVD_CGC_CTRL__MPEG2_MODE_MASK |
-				UVD_CGC_CTRL__REGS_MODE_MASK |
-				UVD_CGC_CTRL__RBC_MODE_MASK |
-				UVD_CGC_CTRL__LMI_MC_MODE_MASK |
-				UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
-				UVD_CGC_CTRL__IDCT_MODE_MASK |
-				UVD_CGC_CTRL__MPRD_MODE_MASK |
-				UVD_CGC_CTRL__MPC_MODE_MASK |
-				UVD_CGC_CTRL__LBSI_MODE_MASK |
-				UVD_CGC_CTRL__LRBBM_MODE_MASK |
-				UVD_CGC_CTRL__WCB_MODE_MASK |
-				UVD_CGC_CTRL__VCPU_MODE_MASK |
-				UVD_CGC_CTRL__SCPU_MODE_MASK;
-		data2 |= UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
-				UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
-				UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
-				UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
-				UVD_SUVD_CGC_CTRL__SDB_MODE_MASK;
-	}
-	WREG32(mmUVD_CGC_CTRL, data);
-	WREG32(mmUVD_SUVD_CGC_CTRL, data2);
-
-	data = RREG32_UVD_CTX(ixUVD_CGC_CTRL2);
-	data &= ~(REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_OCLK_RAMP_EN) |
-			REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_RCLK_RAMP_EN) |
-			REG_FIELD_MASK(UVD_CGC_CTRL2, GATER_DIV_ID));
-	data1 &= (REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_OCLK_RAMP_EN) |
-			REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_RCLK_RAMP_EN) |
-			REG_FIELD_MASK(UVD_CGC_CTRL2, GATER_DIV_ID));
-	data |= data1;
-	WREG32_UVD_CTX(ixUVD_CGC_CTRL2, data);
-}
+#endif
 
 /**
  * uvd_v6_0_start - start UVD block
@@ -538,11 +396,7 @@
 
 	/* Set dynamic clock gating in S/W control mode */
 	if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
-		if (adev->flags & AMD_IS_APU)
-			cz_set_uvd_clock_gating_branches(adev, false);
-		else
-			tonga_set_uvd_clock_gating_branches(adev, false);
-		uvd_v6_0_set_uvd_dynamic_clock_mode(adev, true);
+		uvd_v6_0_set_sw_clock_gating(adev);
 	} else {
 		/* disable clock gating */
 		uint32_t data = RREG32(mmUVD_CGC_CTRL);
@@ -777,7 +631,8 @@
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
-				  struct amdgpu_ib *ib)
+				  struct amdgpu_ib *ib,
+				  unsigned vm_id, bool ctx_switch)
 {
 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -854,112 +709,6 @@
 	return uvd_v6_0_start(adev);
 }
 
-static void uvd_v6_0_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	dev_info(adev->dev, "UVD 6.0 registers\n");
-	dev_info(adev->dev, "  UVD_SEMA_ADDR_LOW=0x%08X\n",
-		 RREG32(mmUVD_SEMA_ADDR_LOW));
-	dev_info(adev->dev, "  UVD_SEMA_ADDR_HIGH=0x%08X\n",
-		 RREG32(mmUVD_SEMA_ADDR_HIGH));
-	dev_info(adev->dev, "  UVD_SEMA_CMD=0x%08X\n",
-		 RREG32(mmUVD_SEMA_CMD));
-	dev_info(adev->dev, "  UVD_GPCOM_VCPU_CMD=0x%08X\n",
-		 RREG32(mmUVD_GPCOM_VCPU_CMD));
-	dev_info(adev->dev, "  UVD_GPCOM_VCPU_DATA0=0x%08X\n",
-		 RREG32(mmUVD_GPCOM_VCPU_DATA0));
-	dev_info(adev->dev, "  UVD_GPCOM_VCPU_DATA1=0x%08X\n",
-		 RREG32(mmUVD_GPCOM_VCPU_DATA1));
-	dev_info(adev->dev, "  UVD_ENGINE_CNTL=0x%08X\n",
-		 RREG32(mmUVD_ENGINE_CNTL));
-	dev_info(adev->dev, "  UVD_UDEC_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_SEMA_CNTL=0x%08X\n",
-		 RREG32(mmUVD_SEMA_CNTL));
-	dev_info(adev->dev, "  UVD_LMI_EXT40_ADDR=0x%08X\n",
-		 RREG32(mmUVD_LMI_EXT40_ADDR));
-	dev_info(adev->dev, "  UVD_CTX_INDEX=0x%08X\n",
-		 RREG32(mmUVD_CTX_INDEX));
-	dev_info(adev->dev, "  UVD_CTX_DATA=0x%08X\n",
-		 RREG32(mmUVD_CTX_DATA));
-	dev_info(adev->dev, "  UVD_CGC_GATE=0x%08X\n",
-		 RREG32(mmUVD_CGC_GATE));
-	dev_info(adev->dev, "  UVD_CGC_CTRL=0x%08X\n",
-		 RREG32(mmUVD_CGC_CTRL));
-	dev_info(adev->dev, "  UVD_LMI_CTRL2=0x%08X\n",
-		 RREG32(mmUVD_LMI_CTRL2));
-	dev_info(adev->dev, "  UVD_MASTINT_EN=0x%08X\n",
-		 RREG32(mmUVD_MASTINT_EN));
-	dev_info(adev->dev, "  UVD_LMI_ADDR_EXT=0x%08X\n",
-		 RREG32(mmUVD_LMI_ADDR_EXT));
-	dev_info(adev->dev, "  UVD_LMI_CTRL=0x%08X\n",
-		 RREG32(mmUVD_LMI_CTRL));
-	dev_info(adev->dev, "  UVD_LMI_SWAP_CNTL=0x%08X\n",
-		 RREG32(mmUVD_LMI_SWAP_CNTL));
-	dev_info(adev->dev, "  UVD_MP_SWAP_CNTL=0x%08X\n",
-		 RREG32(mmUVD_MP_SWAP_CNTL));
-	dev_info(adev->dev, "  UVD_MPC_SET_MUXA0=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_MUXA0));
-	dev_info(adev->dev, "  UVD_MPC_SET_MUXA1=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_MUXA1));
-	dev_info(adev->dev, "  UVD_MPC_SET_MUXB0=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_MUXB0));
-	dev_info(adev->dev, "  UVD_MPC_SET_MUXB1=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_MUXB1));
-	dev_info(adev->dev, "  UVD_MPC_SET_MUX=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_MUX));
-	dev_info(adev->dev, "  UVD_MPC_SET_ALU=0x%08X\n",
-		 RREG32(mmUVD_MPC_SET_ALU));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET0=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_OFFSET0));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE0=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_SIZE0));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET1=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_OFFSET1));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE1=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_SIZE1));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET2=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_OFFSET2));
-	dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE2=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CACHE_SIZE2));
-	dev_info(adev->dev, "  UVD_VCPU_CNTL=0x%08X\n",
-		 RREG32(mmUVD_VCPU_CNTL));
-	dev_info(adev->dev, "  UVD_SOFT_RESET=0x%08X\n",
-		 RREG32(mmUVD_SOFT_RESET));
-	dev_info(adev->dev, "  UVD_RBC_IB_SIZE=0x%08X\n",
-		 RREG32(mmUVD_RBC_IB_SIZE));
-	dev_info(adev->dev, "  UVD_RBC_RB_RPTR=0x%08X\n",
-		 RREG32(mmUVD_RBC_RB_RPTR));
-	dev_info(adev->dev, "  UVD_RBC_RB_WPTR=0x%08X\n",
-		 RREG32(mmUVD_RBC_RB_WPTR));
-	dev_info(adev->dev, "  UVD_RBC_RB_WPTR_CNTL=0x%08X\n",
-		 RREG32(mmUVD_RBC_RB_WPTR_CNTL));
-	dev_info(adev->dev, "  UVD_RBC_RB_CNTL=0x%08X\n",
-		 RREG32(mmUVD_RBC_RB_CNTL));
-	dev_info(adev->dev, "  UVD_STATUS=0x%08X\n",
-		 RREG32(mmUVD_STATUS));
-	dev_info(adev->dev, "  UVD_SEMA_TIMEOUT_STATUS=0x%08X\n",
-		 RREG32(mmUVD_SEMA_TIMEOUT_STATUS));
-	dev_info(adev->dev, "  UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
-		 RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL));
-	dev_info(adev->dev, "  UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n",
-		 RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL));
-	dev_info(adev->dev, "  UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
-		 RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
-	dev_info(adev->dev, "  UVD_CONTEXT_ID=0x%08X\n",
-		 RREG32(mmUVD_CONTEXT_ID));
-	dev_info(adev->dev, "  UVD_UDEC_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
-}
-
 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
 					struct amdgpu_irq_src *source,
 					unsigned type,
@@ -978,25 +727,146 @@
 	return 0;
 }
 
+static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
+{
+	uint32_t data, data1, data2, suvd_flags;
+
+	data = RREG32(mmUVD_CGC_CTRL);
+	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
+	data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
+
+	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
+		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
+
+	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
+		     UVD_SUVD_CGC_GATE__SIT_MASK |
+		     UVD_SUVD_CGC_GATE__SMP_MASK |
+		     UVD_SUVD_CGC_GATE__SCM_MASK |
+		     UVD_SUVD_CGC_GATE__SDB_MASK;
+
+	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
+		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
+		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
+
+	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
+			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
+			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
+			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
+			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
+			UVD_CGC_CTRL__SYS_MODE_MASK |
+			UVD_CGC_CTRL__UDEC_MODE_MASK |
+			UVD_CGC_CTRL__MPEG2_MODE_MASK |
+			UVD_CGC_CTRL__REGS_MODE_MASK |
+			UVD_CGC_CTRL__RBC_MODE_MASK |
+			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
+			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
+			UVD_CGC_CTRL__IDCT_MODE_MASK |
+			UVD_CGC_CTRL__MPRD_MODE_MASK |
+			UVD_CGC_CTRL__MPC_MODE_MASK |
+			UVD_CGC_CTRL__LBSI_MODE_MASK |
+			UVD_CGC_CTRL__LRBBM_MODE_MASK |
+			UVD_CGC_CTRL__WCB_MODE_MASK |
+			UVD_CGC_CTRL__VCPU_MODE_MASK |
+			UVD_CGC_CTRL__JPEG_MODE_MASK |
+			UVD_CGC_CTRL__SCPU_MODE_MASK |
+			UVD_CGC_CTRL__JPEG2_MODE_MASK);
+	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
+			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
+			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
+			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
+			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
+	data1 |= suvd_flags;
+
+	WREG32(mmUVD_CGC_CTRL, data);
+	WREG32(mmUVD_CGC_GATE, 0);
+	WREG32(mmUVD_SUVD_CGC_GATE, data1);
+	WREG32(mmUVD_SUVD_CGC_CTRL, data2);
+}
+
+#if 0
+static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
+{
+	uint32_t data, data1, cgc_flags, suvd_flags;
+
+	data = RREG32(mmUVD_CGC_GATE);
+	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
+
+	cgc_flags = UVD_CGC_GATE__SYS_MASK |
+		UVD_CGC_GATE__UDEC_MASK |
+		UVD_CGC_GATE__MPEG2_MASK |
+		UVD_CGC_GATE__RBC_MASK |
+		UVD_CGC_GATE__LMI_MC_MASK |
+		UVD_CGC_GATE__IDCT_MASK |
+		UVD_CGC_GATE__MPRD_MASK |
+		UVD_CGC_GATE__MPC_MASK |
+		UVD_CGC_GATE__LBSI_MASK |
+		UVD_CGC_GATE__LRBBM_MASK |
+		UVD_CGC_GATE__UDEC_RE_MASK |
+		UVD_CGC_GATE__UDEC_CM_MASK |
+		UVD_CGC_GATE__UDEC_IT_MASK |
+		UVD_CGC_GATE__UDEC_DB_MASK |
+		UVD_CGC_GATE__UDEC_MP_MASK |
+		UVD_CGC_GATE__WCB_MASK |
+		UVD_CGC_GATE__VCPU_MASK |
+		UVD_CGC_GATE__SCPU_MASK |
+		UVD_CGC_GATE__JPEG_MASK |
+		UVD_CGC_GATE__JPEG2_MASK;
+
+	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
+				UVD_SUVD_CGC_GATE__SIT_MASK |
+				UVD_SUVD_CGC_GATE__SMP_MASK |
+				UVD_SUVD_CGC_GATE__SCM_MASK |
+				UVD_SUVD_CGC_GATE__SDB_MASK;
+
+	data |= cgc_flags;
+	data1 |= suvd_flags;
+
+	WREG32(mmUVD_CGC_GATE, data);
+	WREG32(mmUVD_SUVD_CGC_GATE, data1);
+}
+#endif
+
+static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+	if (enable)
+		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+	else
+		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+
+	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
 static int uvd_v6_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+	static int curstate = -1;
+
+	if (adev->asic_type == CHIP_FIJI)
+		uvd_v6_set_bypass_mode(adev, enable);
 
 	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
 		return 0;
 
+	if (curstate == state)
+		return 0;
+
+	curstate = state;
 	if (enable) {
-		if (adev->flags & AMD_IS_APU)
-			cz_set_uvd_clock_gating_branches(adev, enable);
-		else
-			tonga_set_uvd_clock_gating_branches(adev, enable);
-		uvd_v6_0_set_uvd_dynamic_clock_mode(adev, true);
+		/* disable HW gating and enable Sw gating */
+		uvd_v6_0_set_sw_clock_gating(adev);
 	} else {
-		uint32_t data = RREG32(mmUVD_CGC_CTRL);
-		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
-		WREG32(mmUVD_CGC_CTRL, data);
+		/* wait for STATUS to clear */
+		if (uvd_v6_0_wait_for_idle(handle))
+			return -EBUSY;
+
+		/* enable HW gates because UVD is idle */
+/*		uvd_v6_0_set_hw_clock_gating(adev); */
 	}
 
 	return 0;
@@ -1026,6 +896,7 @@
 }
 
 const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
+	.name = "uvd_v6_0",
 	.early_init = uvd_v6_0_early_init,
 	.late_init = NULL,
 	.sw_init = uvd_v6_0_sw_init,
@@ -1037,7 +908,6 @@
 	.is_idle = uvd_v6_0_is_idle,
 	.wait_for_idle = uvd_v6_0_wait_for_idle,
 	.soft_reset = uvd_v6_0_soft_reset,
-	.print_status = uvd_v6_0_print_status,
 	.set_clockgating_state = uvd_v6_0_set_clockgating_state,
 	.set_powergating_state = uvd_v6_0_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index c7e885b..45d92ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -44,7 +44,7 @@
 static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
-
+static int vce_v2_0_wait_for_idle(void *handle);
 /**
  * vce_v2_0_ring_get_rptr - get read pointer
  *
@@ -201,14 +201,14 @@
 
 	ring = &adev->vce.ring[0];
 	sprintf(ring->name, "vce0");
-	r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
+	r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
 			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
 	if (r)
 		return r;
 
 	ring = &adev->vce.ring[1];
 	sprintf(ring->name, "vce1");
-	r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
+	r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
 			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
 	if (r)
 		return r;
@@ -240,7 +240,8 @@
 
 	r = vce_v2_0_start(adev);
 	if (r)
-		return r;
+/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
+		return 0;
 
 	ring = &adev->vce.ring[0];
 	ring->ready = true;
@@ -318,7 +319,7 @@
 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
 
 		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
-    } else {
+	} else {
 		tmp = RREG32(mmVCE_CLOCK_GATING_B);
 		tmp |= 0xe7;
 		tmp &= ~0xe70000;
@@ -339,6 +340,21 @@
 {
 	u32 orig, tmp;
 
+	if (gated) {
+		if (vce_v2_0_wait_for_idle(adev)) {
+			DRM_INFO("VCE is busy, Can't set clock gateing");
+			return;
+		}
+		WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+		WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+		mdelay(100);
+		WREG32(mmVCE_STATUS, 0);
+	} else {
+		WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+		WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+		mdelay(100);
+	}
+
 	tmp = RREG32(mmVCE_CLOCK_GATING_B);
 	tmp &= ~0x00060006;
 	if (gated) {
@@ -362,6 +378,7 @@
 
 	if (gated)
 		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
+	WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
 }
 
 static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
@@ -478,75 +495,6 @@
 	return vce_v2_0_start(adev);
 }
 
-static void vce_v2_0_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "VCE 2.0 registers\n");
-	dev_info(adev->dev, "  VCE_STATUS=0x%08X\n",
-		 RREG32(mmVCE_STATUS));
-	dev_info(adev->dev, "  VCE_VCPU_CNTL=0x%08X\n",
-		 RREG32(mmVCE_VCPU_CNTL));
-	dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
-		 RREG32(mmVCE_VCPU_CACHE_OFFSET0));
-	dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE0=0x%08X\n",
-		 RREG32(mmVCE_VCPU_CACHE_SIZE0));
-	dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
-		 RREG32(mmVCE_VCPU_CACHE_OFFSET1));
-	dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE1=0x%08X\n",
-		 RREG32(mmVCE_VCPU_CACHE_SIZE1));
-	dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
-		 RREG32(mmVCE_VCPU_CACHE_OFFSET2));
-	dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE2=0x%08X\n",
-		 RREG32(mmVCE_VCPU_CACHE_SIZE2));
-	dev_info(adev->dev, "  VCE_SOFT_RESET=0x%08X\n",
-		 RREG32(mmVCE_SOFT_RESET));
-	dev_info(adev->dev, "  VCE_RB_BASE_LO2=0x%08X\n",
-		 RREG32(mmVCE_RB_BASE_LO2));
-	dev_info(adev->dev, "  VCE_RB_BASE_HI2=0x%08X\n",
-		 RREG32(mmVCE_RB_BASE_HI2));
-	dev_info(adev->dev, "  VCE_RB_SIZE2=0x%08X\n",
-		 RREG32(mmVCE_RB_SIZE2));
-	dev_info(adev->dev, "  VCE_RB_RPTR2=0x%08X\n",
-		 RREG32(mmVCE_RB_RPTR2));
-	dev_info(adev->dev, "  VCE_RB_WPTR2=0x%08X\n",
-		 RREG32(mmVCE_RB_WPTR2));
-	dev_info(adev->dev, "  VCE_RB_BASE_LO=0x%08X\n",
-		 RREG32(mmVCE_RB_BASE_LO));
-	dev_info(adev->dev, "  VCE_RB_BASE_HI=0x%08X\n",
-		 RREG32(mmVCE_RB_BASE_HI));
-	dev_info(adev->dev, "  VCE_RB_SIZE=0x%08X\n",
-		 RREG32(mmVCE_RB_SIZE));
-	dev_info(adev->dev, "  VCE_RB_RPTR=0x%08X\n",
-		 RREG32(mmVCE_RB_RPTR));
-	dev_info(adev->dev, "  VCE_RB_WPTR=0x%08X\n",
-		 RREG32(mmVCE_RB_WPTR));
-	dev_info(adev->dev, "  VCE_CLOCK_GATING_A=0x%08X\n",
-		 RREG32(mmVCE_CLOCK_GATING_A));
-	dev_info(adev->dev, "  VCE_CLOCK_GATING_B=0x%08X\n",
-		 RREG32(mmVCE_CLOCK_GATING_B));
-	dev_info(adev->dev, "  VCE_CGTT_CLK_OVERRIDE=0x%08X\n",
-		 RREG32(mmVCE_CGTT_CLK_OVERRIDE));
-	dev_info(adev->dev, "  VCE_UENC_CLOCK_GATING=0x%08X\n",
-		 RREG32(mmVCE_UENC_CLOCK_GATING));
-	dev_info(adev->dev, "  VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
-		 RREG32(mmVCE_UENC_REG_CLOCK_GATING));
-	dev_info(adev->dev, "  VCE_SYS_INT_EN=0x%08X\n",
-		 RREG32(mmVCE_SYS_INT_EN));
-	dev_info(adev->dev, "  VCE_LMI_CTRL2=0x%08X\n",
-		 RREG32(mmVCE_LMI_CTRL2));
-	dev_info(adev->dev, "  VCE_LMI_CTRL=0x%08X\n",
-		 RREG32(mmVCE_LMI_CTRL));
-	dev_info(adev->dev, "  VCE_LMI_VM_CTRL=0x%08X\n",
-		 RREG32(mmVCE_LMI_VM_CTRL));
-	dev_info(adev->dev, "  VCE_LMI_SWAP_CNTL=0x%08X\n",
-		 RREG32(mmVCE_LMI_SWAP_CNTL));
-	dev_info(adev->dev, "  VCE_LMI_SWAP_CNTL1=0x%08X\n",
-		 RREG32(mmVCE_LMI_SWAP_CNTL1));
-	dev_info(adev->dev, "  VCE_LMI_CACHE_CTRL=0x%08X\n",
-		 RREG32(mmVCE_LMI_CACHE_CTRL));
-}
-
 static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
 					struct amdgpu_irq_src *source,
 					unsigned type,
@@ -619,6 +567,7 @@
 }
 
 const struct amd_ip_funcs vce_v2_0_ip_funcs = {
+	.name = "vce_v2_0",
 	.early_init = vce_v2_0_early_init,
 	.late_init = NULL,
 	.sw_init = vce_v2_0_sw_init,
@@ -630,7 +579,6 @@
 	.is_idle = vce_v2_0_is_idle,
 	.wait_for_idle = vce_v2_0_wait_for_idle,
 	.soft_reset = vce_v2_0_soft_reset,
-	.print_status = vce_v2_0_print_status,
 	.set_clockgating_state = vce_v2_0_set_clockgating_state,
 	.set_powergating_state = vce_v2_0_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index ce468ee..30e8099 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -40,9 +40,9 @@
 
 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT	0x04
 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK	0x10
-#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 	0x8616
-#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 	0x8617
-#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 	0x8618
+#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0	0x8616
+#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1	0x8617
+#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2	0x8618
 
 #define VCE_V3_0_FW_SIZE	(384 * 1024)
 #define VCE_V3_0_STACK_SIZE	(64 * 1024)
@@ -315,9 +315,11 @@
 {
 	u32 tmp;
 
-	/* Fiji, Stoney are single pipe */
+	/* Fiji, Stoney, Polaris10, Polaris11 are single pipe */
 	if ((adev->asic_type == CHIP_FIJI) ||
-	    (adev->asic_type == CHIP_STONEY))
+	    (adev->asic_type == CHIP_STONEY) ||
+	    (adev->asic_type == CHIP_POLARIS10) ||
+	    (adev->asic_type == CHIP_POLARIS11))
 		return AMDGPU_VCE_HARVEST_VCE1;
 
 	/* Tonga and CZ are dual or single pipe */
@@ -381,14 +383,14 @@
 
 	ring = &adev->vce.ring[0];
 	sprintf(ring->name, "vce0");
-	r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
+	r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
 			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
 	if (r)
 		return r;
 
 	ring = &adev->vce.ring[1];
 	sprintf(ring->name, "vce1");
-	r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
+	r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
 			     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
 	if (r)
 		return r;
@@ -564,73 +566,6 @@
 	return vce_v3_0_start(adev);
 }
 
-static void vce_v3_0_print_status(void *handle)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-	dev_info(adev->dev, "VCE 3.0 registers\n");
-	dev_info(adev->dev, "  VCE_STATUS=0x%08X\n",
-		 RREG32(mmVCE_STATUS));
-	dev_info(adev->dev, "  VCE_VCPU_CNTL=0x%08X\n",
-		 RREG32(mmVCE_VCPU_CNTL));
-	dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
-		 RREG32(mmVCE_VCPU_CACHE_OFFSET0));
-	dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE0=0x%08X\n",
-		 RREG32(mmVCE_VCPU_CACHE_SIZE0));
-	dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
-		 RREG32(mmVCE_VCPU_CACHE_OFFSET1));
-	dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE1=0x%08X\n",
-		 RREG32(mmVCE_VCPU_CACHE_SIZE1));
-	dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
-		 RREG32(mmVCE_VCPU_CACHE_OFFSET2));
-	dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE2=0x%08X\n",
-		 RREG32(mmVCE_VCPU_CACHE_SIZE2));
-	dev_info(adev->dev, "  VCE_SOFT_RESET=0x%08X\n",
-		 RREG32(mmVCE_SOFT_RESET));
-	dev_info(adev->dev, "  VCE_RB_BASE_LO2=0x%08X\n",
-		 RREG32(mmVCE_RB_BASE_LO2));
-	dev_info(adev->dev, "  VCE_RB_BASE_HI2=0x%08X\n",
-		 RREG32(mmVCE_RB_BASE_HI2));
-	dev_info(adev->dev, "  VCE_RB_SIZE2=0x%08X\n",
-		 RREG32(mmVCE_RB_SIZE2));
-	dev_info(adev->dev, "  VCE_RB_RPTR2=0x%08X\n",
-		 RREG32(mmVCE_RB_RPTR2));
-	dev_info(adev->dev, "  VCE_RB_WPTR2=0x%08X\n",
-		 RREG32(mmVCE_RB_WPTR2));
-	dev_info(adev->dev, "  VCE_RB_BASE_LO=0x%08X\n",
-		 RREG32(mmVCE_RB_BASE_LO));
-	dev_info(adev->dev, "  VCE_RB_BASE_HI=0x%08X\n",
-		 RREG32(mmVCE_RB_BASE_HI));
-	dev_info(adev->dev, "  VCE_RB_SIZE=0x%08X\n",
-		 RREG32(mmVCE_RB_SIZE));
-	dev_info(adev->dev, "  VCE_RB_RPTR=0x%08X\n",
-		 RREG32(mmVCE_RB_RPTR));
-	dev_info(adev->dev, "  VCE_RB_WPTR=0x%08X\n",
-		 RREG32(mmVCE_RB_WPTR));
-	dev_info(adev->dev, "  VCE_CLOCK_GATING_A=0x%08X\n",
-		 RREG32(mmVCE_CLOCK_GATING_A));
-	dev_info(adev->dev, "  VCE_CLOCK_GATING_B=0x%08X\n",
-		 RREG32(mmVCE_CLOCK_GATING_B));
-	dev_info(adev->dev, "  VCE_UENC_CLOCK_GATING=0x%08X\n",
-		 RREG32(mmVCE_UENC_CLOCK_GATING));
-	dev_info(adev->dev, "  VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
-		 RREG32(mmVCE_UENC_REG_CLOCK_GATING));
-	dev_info(adev->dev, "  VCE_SYS_INT_EN=0x%08X\n",
-		 RREG32(mmVCE_SYS_INT_EN));
-	dev_info(adev->dev, "  VCE_LMI_CTRL2=0x%08X\n",
-		 RREG32(mmVCE_LMI_CTRL2));
-	dev_info(adev->dev, "  VCE_LMI_CTRL=0x%08X\n",
-		 RREG32(mmVCE_LMI_CTRL));
-	dev_info(adev->dev, "  VCE_LMI_VM_CTRL=0x%08X\n",
-		 RREG32(mmVCE_LMI_VM_CTRL));
-	dev_info(adev->dev, "  VCE_LMI_SWAP_CNTL=0x%08X\n",
-		 RREG32(mmVCE_LMI_SWAP_CNTL));
-	dev_info(adev->dev, "  VCE_LMI_SWAP_CNTL1=0x%08X\n",
-		 RREG32(mmVCE_LMI_SWAP_CNTL1));
-	dev_info(adev->dev, "  VCE_LMI_CACHE_CTRL=0x%08X\n",
-		 RREG32(mmVCE_LMI_CACHE_CTRL));
-}
-
 static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
 					struct amdgpu_irq_src *source,
 					unsigned type,
@@ -739,6 +674,7 @@
 }
 
 const struct amd_ip_funcs vce_v3_0_ip_funcs = {
+	.name = "vce_v3_0",
 	.early_init = vce_v3_0_early_init,
 	.late_init = NULL,
 	.sw_init = vce_v3_0_sw_init,
@@ -750,7 +686,6 @@
 	.is_idle = vce_v3_0_is_idle,
 	.wait_for_idle = vce_v3_0_wait_for_idle,
 	.soft_reset = vce_v3_0_soft_reset,
-	.print_status = vce_v3_0_print_status,
 	.set_clockgating_state = vce_v3_0_set_clockgating_state,
 	.set_powergating_state = vce_v3_0_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 1c120ef..2c88d0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -78,6 +78,11 @@
 #include "amdgpu_acp.h"
 #endif
 
+MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
+
 /*
  * Indirect registers accessor
  */
@@ -276,6 +281,8 @@
 						 stoney_mgcg_cgcg_init,
 						 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
 		break;
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS10:
 	default:
 		break;
 	}
@@ -414,11 +421,11 @@
 	return true;
 }
 
-static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
+static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
 	{mmGB_MACROTILE_MODE7, true},
 };
 
-static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
+static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
 	{mmGB_TILE_MODE7, true},
 	{mmGB_TILE_MODE12, true},
 	{mmGB_TILE_MODE17, true},
@@ -426,7 +433,7 @@
 	{mmGB_MACROTILE_MODE7, true},
 };
 
-static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
+static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
 	{mmGRBM_STATUS, false},
 	{mmGRBM_STATUS2, false},
 	{mmGRBM_STATUS_SE0, false},
@@ -525,8 +532,8 @@
 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
 			    u32 sh_num, u32 reg_offset, u32 *value)
 {
-	struct amdgpu_allowed_register_entry *asic_register_table = NULL;
-	struct amdgpu_allowed_register_entry *asic_register_entry;
+	const struct amdgpu_allowed_register_entry *asic_register_table = NULL;
+	const struct amdgpu_allowed_register_entry *asic_register_entry;
 	uint32_t size, i;
 
 	*value = 0;
@@ -537,6 +544,8 @@
 		break;
 	case CHIP_FIJI:
 	case CHIP_TONGA:
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS10:
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
 		asic_register_table = cz_allowed_read_registers;
@@ -907,6 +916,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vi_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 8,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &gmc_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 3,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &tonga_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &amdgpu_pp_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 11,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &dce_v11_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 3,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &sdma_v3_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 3,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 3,
+		.minor = 4,
+		.rev = 0,
+		.funcs = &vce_v3_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -999,6 +1076,11 @@
 		adev->ip_blocks = tonga_ip_blocks;
 		adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
 		break;
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS10:
+		adev->ip_blocks = polaris11_ip_blocks;
+		adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
+		break;
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
 		adev->ip_blocks = cz_ip_blocks;
@@ -1036,7 +1118,6 @@
 	.get_xclk = &vi_get_xclk,
 	.set_uvd_clocks = &vi_set_uvd_clocks,
 	.set_vce_clocks = &vi_set_vce_clocks,
-	.get_cu_info = &gfx_v8_0_get_cu_info,
 	/* these should be moved to their own ip modules */
 	.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
 	.wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
@@ -1076,19 +1157,69 @@
 		adev->external_rev_id = 0x1;
 		break;
 	case CHIP_FIJI:
-		adev->cg_flags = 0;
+		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			AMD_CG_SUPPORT_GFX_RLC_LS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CGTS_LS |
+			AMD_CG_SUPPORT_GFX_CGCG |
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_HDP_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_ROM_MGCG |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_MC_LS;
 		adev->pg_flags = 0;
 		adev->external_rev_id = adev->rev_id + 0x3c;
 		break;
 	case CHIP_TONGA:
-		adev->cg_flags = 0;
+		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
 		adev->pg_flags = 0;
 		adev->external_rev_id = adev->rev_id + 0x14;
 		break;
-	case CHIP_CARRIZO:
-	case CHIP_STONEY:
+	case CHIP_POLARIS11:
 		adev->cg_flags = 0;
 		adev->pg_flags = 0;
+		adev->external_rev_id = adev->rev_id + 0x5A;
+		break;
+	case CHIP_POLARIS10:
+		adev->cg_flags = 0;
+		adev->pg_flags = 0;
+		adev->external_rev_id = adev->rev_id + 0x50;
+		break;
+	case CHIP_CARRIZO:
+		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			AMD_CG_SUPPORT_GFX_RLC_LS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			AMD_CG_SUPPORT_GFX_CGTS_LS |
+			AMD_CG_SUPPORT_GFX_CGCG |
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_HDP_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_SDMA_LS;
+		adev->pg_flags = 0;
+		adev->external_rev_id = adev->rev_id + 0x1;
+		break;
+	case CHIP_STONEY:
+		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_HDP_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_SDMA_LS;
+		adev->pg_flags = 0;
 		adev->external_rev_id = adev->rev_id + 0x1;
 		break;
 	default:
@@ -1164,24 +1295,19 @@
 	return 0;
 }
 
-static void vi_common_print_status(void *handle)
-{
-	return;
-}
-
 static int vi_common_soft_reset(void *handle)
 {
 	return 0;
 }
 
-static void fiji_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
-		bool enable)
+static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
+						   bool enable)
 {
 	uint32_t temp, data;
 
 	temp = data = RREG32_PCIE(ixPCIE_CNTL2);
 
-	if (enable)
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
 		data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
@@ -1194,14 +1320,14 @@
 		WREG32_PCIE(ixPCIE_CNTL2, data);
 }
 
-static void fiji_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
-		bool enable)
+static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
+						    bool enable)
 {
 	uint32_t temp, data;
 
 	temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
 
-	if (enable)
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
 		data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
 	else
 		data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
@@ -1210,14 +1336,14 @@
 		WREG32(mmHDP_HOST_PATH_CNTL, data);
 }
 
-static void fiji_update_hdp_light_sleep(struct amdgpu_device *adev,
-		bool enable)
+static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
+				      bool enable)
 {
 	uint32_t temp, data;
 
 	temp = data = RREG32(mmHDP_MEM_POWER_LS);
 
-	if (enable)
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
 		data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
 	else
 		data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
@@ -1226,14 +1352,14 @@
 		WREG32(mmHDP_MEM_POWER_LS, data);
 }
 
-static void fiji_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
-		bool enable)
+static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
+						    bool enable)
 {
 	uint32_t temp, data;
 
 	temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
 
-	if (enable)
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
 		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
 	else
@@ -1245,19 +1371,28 @@
 }
 
 static int vi_common_set_clockgating_state(void *handle,
-					    enum amd_clockgating_state state)
+					   enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	switch (adev->asic_type) {
 	case CHIP_FIJI:
-		fiji_update_bif_medium_grain_light_sleep(adev,
+		vi_update_bif_medium_grain_light_sleep(adev,
 				state == AMD_CG_STATE_GATE ? true : false);
-		fiji_update_hdp_medium_grain_clock_gating(adev,
+		vi_update_hdp_medium_grain_clock_gating(adev,
 				state == AMD_CG_STATE_GATE ? true : false);
-		fiji_update_hdp_light_sleep(adev,
+		vi_update_hdp_light_sleep(adev,
 				state == AMD_CG_STATE_GATE ? true : false);
-		fiji_update_rom_medium_grain_clock_gating(adev,
+		vi_update_rom_medium_grain_clock_gating(adev,
+				state == AMD_CG_STATE_GATE ? true : false);
+		break;
+	case CHIP_CARRIZO:
+	case CHIP_STONEY:
+		vi_update_bif_medium_grain_light_sleep(adev,
+				state == AMD_CG_STATE_GATE ? true : false);
+		vi_update_hdp_medium_grain_clock_gating(adev,
+				state == AMD_CG_STATE_GATE ? true : false);
+		vi_update_hdp_light_sleep(adev,
 				state == AMD_CG_STATE_GATE ? true : false);
 		break;
 	default:
@@ -1273,6 +1408,7 @@
 }
 
 const struct amd_ip_funcs vi_common_ip_funcs = {
+	.name = "vi_common",
 	.early_init = vi_common_early_init,
 	.late_init = NULL,
 	.sw_init = vi_common_sw_init,
@@ -1284,7 +1420,6 @@
 	.is_idle = vi_common_is_idle,
 	.wait_for_idle = vi_common_wait_for_idle,
 	.soft_reset = vi_common_soft_reset,
-	.print_status = vi_common_print_status,
 	.set_clockgating_state = vi_common_set_clockgating_state,
 	.set_powergating_state = vi_common_set_powergating_state,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index ace4997..062ee16 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -54,7 +54,8 @@
 #define AUD3_REGISTER_OFFSET                 (0x17b4 - 0x17a8)
 #define AUD4_REGISTER_OFFSET                 (0x17b8 - 0x17a8)
 #define AUD5_REGISTER_OFFSET                 (0x17bc - 0x17a8)
-#define AUD6_REGISTER_OFFSET                 (0x17c4 - 0x17a8)
+#define AUD6_REGISTER_OFFSET                 (0x17c0 - 0x17a8)
+#define AUD7_REGISTER_OFFSET                 (0x17c4 - 0x17a8)
 
 /* hpd instance offsets */
 #define HPD0_REGISTER_OFFSET                 (0x1898 - 0x1898)
@@ -365,7 +366,7 @@
 #define VCE_CMD_IB		0x00000002
 #define VCE_CMD_FENCE		0x00000003
 #define VCE_CMD_TRAP		0x00000004
-#define VCE_CMD_IB_AUTO 	0x00000005
+#define VCE_CMD_IB_AUTO	0x00000005
 #define VCE_CMD_SEMAPHORE	0x00000006
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 07ac724..ee3e04e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -109,7 +109,7 @@
 
 	is_32bit_user_mode = in_compat_syscall();
 
-	if (is_32bit_user_mode == true) {
+	if (is_32bit_user_mode) {
 		dev_warn(kfd_device,
 			"Process %d (32-bit) failed to open /dev/kfd\n"
 			"32-bit processes are not supported by amdkfd\n",
@@ -131,12 +131,11 @@
 					void *data)
 {
 	struct kfd_ioctl_get_version_args *args = data;
-	int err = 0;
 
 	args->major_version = KFD_IOCTL_MAJOR_VERSION;
 	args->minor_version = KFD_IOCTL_MINOR_VERSION;
 
-	return err;
+	return 0;
 }
 
 static int set_queue_properties_from_user(struct queue_properties *q_properties,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 4bb7f42..f49c551 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -216,7 +216,7 @@
 		}
 	}
 
-	if (set == false)
+	if (!set)
 		return -EBUSY;
 
 	pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
@@ -354,7 +354,7 @@
 		return -ENOMEM;
 	}
 
-	if (q->properties.is_active == true)
+	if (q->properties.is_active)
 		prev_active = true;
 
 	/*
@@ -363,9 +363,9 @@
 	 * and modify counter accordingly
 	 */
 	retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
-	if ((q->properties.is_active == true) && (prev_active == false))
+	if ((q->properties.is_active) && (!prev_active))
 		dqm->queue_count++;
-	else if ((q->properties.is_active == false) && (prev_active == true))
+	else if ((!q->properties.is_active) && (prev_active))
 		dqm->queue_count--;
 
 	if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
@@ -954,7 +954,7 @@
 
 	if (lock)
 		mutex_lock(&dqm->lock);
-	if (dqm->active_runlist == false)
+	if (!dqm->active_runlist)
 		goto out;
 
 	pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index b6e28dc..a6a4b2b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -177,9 +177,9 @@
 	bool ret;
 
 	ret = allocate_free_slot(p, page, signal_slot_index);
-	if (ret == false) {
+	if (!ret) {
 		ret = allocate_signal_page(devkfd, p);
-		if (ret == true)
+		if (ret)
 			ret = allocate_free_slot(p, page, signal_slot_index);
 	}
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 8fa8941..9beae87 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -300,7 +300,7 @@
 		break;
 	}
 
-	if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
+	if (!kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) {
 		pr_err("amdkfd: failed to init kernel queue\n");
 		kfree(kq);
 		return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 90f3914..ca8c093 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -98,7 +98,7 @@
 	int retval;
 
 	BUG_ON(!pm);
-	BUG_ON(pm->allocated == true);
+	BUG_ON(pm->allocated);
 	BUG_ON(is_over_subscription == NULL);
 
 	pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
@@ -292,7 +292,7 @@
 			q->properties.doorbell_off;
 
 	packet->mes_map_queues_ordinals[0].bitfields3.is_static =
-			(use_static == true) ? 1 : 0;
+			(use_static) ? 1 : 0;
 
 	packet->mes_map_queues_ordinals[0].mqd_addr_lo =
 			lower_32_bits(q->gart_mqd_addr);
@@ -357,7 +357,7 @@
 				alloc_size_bytes);
 
 		list_for_each_entry(kq, &qpd->priv_queue_list, list) {
-			if (kq->queue->properties.is_active != true)
+			if (!kq->queue->properties.is_active)
 				continue;
 
 			pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n",
@@ -383,7 +383,7 @@
 		}
 
 		list_for_each_entry(q, &qpd->queues_list, list) {
-			if (q->properties.is_active != true)
+			if (!q->properties.is_active)
 				continue;
 
 			pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n",
@@ -531,7 +531,7 @@
 fail_acquire_packet_buffer:
 	mutex_unlock(&pm->lock);
 fail_create_runlist_ib:
-	if (pm->allocated == true)
+	if (pm->allocated)
 		pm_release_ib(pm);
 	return retval;
 }
@@ -647,7 +647,7 @@
 	default:
 		BUG();
 		break;
-	};
+	}
 
 	pm->priv_queue->ops.submit_packet(pm->priv_queue);
 
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 04e40906..afce1ed 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -48,6 +48,8 @@
 	CHIP_FIJI,
 	CHIP_CARRIZO,
 	CHIP_STONEY,
+	CHIP_POLARIS10,
+	CHIP_POLARIS11,
 	CHIP_LAST,
 };
 
@@ -104,6 +106,7 @@
 #define AMD_CG_SUPPORT_VCE_MGCG			(1 << 14)
 #define AMD_CG_SUPPORT_HDP_LS			(1 << 15)
 #define AMD_CG_SUPPORT_HDP_MGCG			(1 << 16)
+#define AMD_CG_SUPPORT_ROM_MGCG			(1 << 17)
 
 /* PG flags */
 #define AMD_PG_SUPPORT_GFX_PG			(1 << 0)
@@ -140,6 +143,8 @@
 };
 
 struct amd_ip_funcs {
+	/* Name of IP block */
+	char *name;
 	/* sets up early driver state (pre sw_init), does not configure hw - Optional */
 	int (*early_init)(void *handle);
 	/* sets up late driver/hw state (post hw_init) - Optional */
@@ -152,6 +157,7 @@
 	int (*hw_init)(void *handle);
 	/* tears down the hw state */
 	int (*hw_fini)(void *handle);
+	void (*late_fini)(void *handle);
 	/* handles IP specific hw/sw changes for suspend */
 	int (*suspend)(void *handle);
 	/* handles IP specific hw/sw changes for resume */
@@ -162,8 +168,6 @@
 	int (*wait_for_idle)(void *handle);
 	/* soft reset the IP block */
 	int (*soft_reset)(void *handle);
-	/* dump the IP block status registers */
-	void (*print_status)(void *handle);
 	/* enable/disable cg for the IP block */
 	int (*set_clockgating_state)(void *handle,
 				     enum amd_clockgating_state state);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h
new file mode 100755
index 0000000..09a7df1
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h
@@ -0,0 +1,10075 @@
+/*
+ * DCE_11_2 Register documentation
+ *
+ * Copyright (C) 2016  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DCE_11_2_D_H
+#define DCE_11_2_D_H
+
+#define mmPIPE0_PG_CONFIG                                                       0x2c0
+#define mmPIPE0_PG_ENABLE                                                       0x2c1
+#define mmPIPE0_PG_STATUS                                                       0x2c2
+#define mmPIPE1_PG_CONFIG                                                       0x2c3
+#define mmPIPE1_PG_ENABLE                                                       0x2c4
+#define mmPIPE1_PG_STATUS                                                       0x2c5
+#define mmPIPE2_PG_CONFIG                                                       0x2c6
+#define mmPIPE2_PG_ENABLE                                                       0x2c7
+#define mmPIPE2_PG_STATUS                                                       0x2c8
+#define mmPIPE3_PG_CONFIG                                                       0x2c9
+#define mmPIPE3_PG_ENABLE                                                       0x2ca
+#define mmPIPE3_PG_STATUS                                                       0x2cb
+#define mmPIPE4_PG_CONFIG                                                       0x2cc
+#define mmPIPE4_PG_ENABLE                                                       0x2cd
+#define mmPIPE4_PG_STATUS                                                       0x2ce
+#define mmPIPE5_PG_CONFIG                                                       0x2cf
+#define mmPIPE5_PG_ENABLE                                                       0x2d0
+#define mmPIPE5_PG_STATUS                                                       0x2d1
+#define mmDCPG_INTERRUPT_STATUS                                                 0x2de
+#define mmDCPG_INTERRUPT_CONTROL                                                0x2df
+#define mmDCPG_INTERRUPT_CONTROL2                                               0x2e0
+#define mmDC_IP_REQUEST_CNTL                                                    0x2d2
+#define mmDC_PGFSM_CONFIG_REG                                                   0x2d3
+#define mmDC_PGFSM_WRITE_REG                                                    0x2d4
+#define mmDC_PGCNTL_STATUS_REG                                                  0x2d5
+#define mmDCPG_TEST_DEBUG_INDEX                                                 0x2d6
+#define mmDCPG_TEST_DEBUG_DATA                                                  0x2d7
+#define mmBL1_PWM_AMBIENT_LIGHT_LEVEL                                           0x1628
+#define mmBL1_PWM_USER_LEVEL                                                    0x1629
+#define mmBL1_PWM_TARGET_ABM_LEVEL                                              0x162a
+#define mmBL1_PWM_CURRENT_ABM_LEVEL                                             0x162b
+#define mmBL1_PWM_FINAL_DUTY_CYCLE                                              0x162c
+#define mmBL1_PWM_MINIMUM_DUTY_CYCLE                                            0x162d
+#define mmBL1_PWM_ABM_CNTL                                                      0x162e
+#define mmBL1_PWM_BL_UPDATE_SAMPLE_RATE                                         0x162f
+#define mmBL1_PWM_GRP2_REG_LOCK                                                 0x1630
+#define mmDC_ABM1_CNTL                                                          0x1638
+#define mmDC_ABM1_IPCSC_COEFF_SEL                                               0x1639
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_0                                            0x163a
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_1                                            0x163b
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_2                                            0x163c
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_3                                            0x163d
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_4                                            0x163e
+#define mmDC_ABM1_ACE_THRES_12                                                  0x163f
+#define mmDC_ABM1_ACE_THRES_34                                                  0x1640
+#define mmDC_ABM1_ACE_CNTL_MISC                                                 0x1641
+#define mmDC_ABM1_DEBUG_MISC                                                    0x1649
+#define mmDC_ABM1_HGLS_REG_READ_PROGRESS                                        0x164a
+#define mmDC_ABM1_HG_MISC_CTRL                                                  0x164b
+#define mmDC_ABM1_LS_SUM_OF_LUMA                                                0x164c
+#define mmDC_ABM1_LS_MIN_MAX_LUMA                                               0x164d
+#define mmDC_ABM1_LS_FILTERED_MIN_MAX_LUMA                                      0x164e
+#define mmDC_ABM1_LS_PIXEL_COUNT                                                0x164f
+#define mmDC_ABM1_LS_OVR_SCAN_BIN                                               0x1650
+#define mmDC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES                                  0x1651
+#define mmDC_ABM1_LS_MIN_PIXEL_VALUE_COUNT                                      0x1652
+#define mmDC_ABM1_LS_MAX_PIXEL_VALUE_COUNT                                      0x1653
+#define mmDC_ABM1_HG_SAMPLE_RATE                                                0x1654
+#define mmDC_ABM1_LS_SAMPLE_RATE                                                0x1655
+#define mmDC_ABM1_HG_BIN_1_32_SHIFT_FLAG                                        0x1656
+#define mmDC_ABM1_HG_BIN_1_8_SHIFT_INDEX                                        0x1657
+#define mmDC_ABM1_HG_BIN_9_16_SHIFT_INDEX                                       0x1658
+#define mmDC_ABM1_HG_BIN_17_24_SHIFT_INDEX                                      0x1659
+#define mmDC_ABM1_HG_BIN_25_32_SHIFT_INDEX                                      0x165a
+#define mmDC_ABM1_HG_RESULT_1                                                   0x165b
+#define mmDC_ABM1_HG_RESULT_2                                                   0x165c
+#define mmDC_ABM1_HG_RESULT_3                                                   0x165d
+#define mmDC_ABM1_HG_RESULT_4                                                   0x165e
+#define mmDC_ABM1_HG_RESULT_5                                                   0x165f
+#define mmDC_ABM1_HG_RESULT_6                                                   0x1660
+#define mmDC_ABM1_HG_RESULT_7                                                   0x1661
+#define mmDC_ABM1_HG_RESULT_8                                                   0x1662
+#define mmDC_ABM1_HG_RESULT_9                                                   0x1663
+#define mmDC_ABM1_HG_RESULT_10                                                  0x1664
+#define mmDC_ABM1_HG_RESULT_11                                                  0x1665
+#define mmDC_ABM1_HG_RESULT_12                                                  0x1666
+#define mmDC_ABM1_HG_RESULT_13                                                  0x1667
+#define mmDC_ABM1_HG_RESULT_14                                                  0x1668
+#define mmDC_ABM1_HG_RESULT_15                                                  0x1669
+#define mmDC_ABM1_HG_RESULT_16                                                  0x166a
+#define mmDC_ABM1_HG_RESULT_17                                                  0x166b
+#define mmDC_ABM1_HG_RESULT_18                                                  0x166c
+#define mmDC_ABM1_HG_RESULT_19                                                  0x166d
+#define mmDC_ABM1_HG_RESULT_20                                                  0x166e
+#define mmDC_ABM1_HG_RESULT_21                                                  0x166f
+#define mmDC_ABM1_HG_RESULT_22                                                  0x1670
+#define mmDC_ABM1_HG_RESULT_23                                                  0x1671
+#define mmDC_ABM1_HG_RESULT_24                                                  0x1672
+#define mmDC_ABM1_OVERSCAN_PIXEL_VALUE                                          0x169b
+#define mmDC_ABM1_BL_MASTER_LOCK                                                0x169c
+#define mmABM_TEST_DEBUG_INDEX                                                  0x169e
+#define mmABM_TEST_DEBUG_DATA                                                   0x169f
+#define mmCRTC_H_BLANK_EARLY_NUM                                                0x1b7d
+#define mmCRTC0_CRTC_H_BLANK_EARLY_NUM                                          0x1b7d
+#define mmCRTC1_CRTC_H_BLANK_EARLY_NUM                                          0x1d7d
+#define mmCRTC2_CRTC_H_BLANK_EARLY_NUM                                          0x1f7d
+#define mmCRTC3_CRTC_H_BLANK_EARLY_NUM                                          0x417d
+#define mmCRTC4_CRTC_H_BLANK_EARLY_NUM                                          0x437d
+#define mmCRTC5_CRTC_H_BLANK_EARLY_NUM                                          0x457d
+#define mmCRTC_H_TOTAL                                                          0x1b80
+#define mmCRTC0_CRTC_H_TOTAL                                                    0x1b80
+#define mmCRTC1_CRTC_H_TOTAL                                                    0x1d80
+#define mmCRTC2_CRTC_H_TOTAL                                                    0x1f80
+#define mmCRTC3_CRTC_H_TOTAL                                                    0x4180
+#define mmCRTC4_CRTC_H_TOTAL                                                    0x4380
+#define mmCRTC5_CRTC_H_TOTAL                                                    0x4580
+#define mmCRTC_H_BLANK_START_END                                                0x1b81
+#define mmCRTC0_CRTC_H_BLANK_START_END                                          0x1b81
+#define mmCRTC1_CRTC_H_BLANK_START_END                                          0x1d81
+#define mmCRTC2_CRTC_H_BLANK_START_END                                          0x1f81
+#define mmCRTC3_CRTC_H_BLANK_START_END                                          0x4181
+#define mmCRTC4_CRTC_H_BLANK_START_END                                          0x4381
+#define mmCRTC5_CRTC_H_BLANK_START_END                                          0x4581
+#define mmCRTC_H_SYNC_A                                                         0x1b82
+#define mmCRTC0_CRTC_H_SYNC_A                                                   0x1b82
+#define mmCRTC1_CRTC_H_SYNC_A                                                   0x1d82
+#define mmCRTC2_CRTC_H_SYNC_A                                                   0x1f82
+#define mmCRTC3_CRTC_H_SYNC_A                                                   0x4182
+#define mmCRTC4_CRTC_H_SYNC_A                                                   0x4382
+#define mmCRTC5_CRTC_H_SYNC_A                                                   0x4582
+#define mmCRTC_H_SYNC_A_CNTL                                                    0x1b83
+#define mmCRTC0_CRTC_H_SYNC_A_CNTL                                              0x1b83
+#define mmCRTC1_CRTC_H_SYNC_A_CNTL                                              0x1d83
+#define mmCRTC2_CRTC_H_SYNC_A_CNTL                                              0x1f83
+#define mmCRTC3_CRTC_H_SYNC_A_CNTL                                              0x4183
+#define mmCRTC4_CRTC_H_SYNC_A_CNTL                                              0x4383
+#define mmCRTC5_CRTC_H_SYNC_A_CNTL                                              0x4583
+#define mmCRTC_H_SYNC_B                                                         0x1b84
+#define mmCRTC0_CRTC_H_SYNC_B                                                   0x1b84
+#define mmCRTC1_CRTC_H_SYNC_B                                                   0x1d84
+#define mmCRTC2_CRTC_H_SYNC_B                                                   0x1f84
+#define mmCRTC3_CRTC_H_SYNC_B                                                   0x4184
+#define mmCRTC4_CRTC_H_SYNC_B                                                   0x4384
+#define mmCRTC5_CRTC_H_SYNC_B                                                   0x4584
+#define mmCRTC_H_SYNC_B_CNTL                                                    0x1b85
+#define mmCRTC0_CRTC_H_SYNC_B_CNTL                                              0x1b85
+#define mmCRTC1_CRTC_H_SYNC_B_CNTL                                              0x1d85
+#define mmCRTC2_CRTC_H_SYNC_B_CNTL                                              0x1f85
+#define mmCRTC3_CRTC_H_SYNC_B_CNTL                                              0x4185
+#define mmCRTC4_CRTC_H_SYNC_B_CNTL                                              0x4385
+#define mmCRTC5_CRTC_H_SYNC_B_CNTL                                              0x4585
+#define mmCRTC_VBI_END                                                          0x1b86
+#define mmCRTC0_CRTC_VBI_END                                                    0x1b86
+#define mmCRTC1_CRTC_VBI_END                                                    0x1d86
+#define mmCRTC2_CRTC_VBI_END                                                    0x1f86
+#define mmCRTC3_CRTC_VBI_END                                                    0x4186
+#define mmCRTC4_CRTC_VBI_END                                                    0x4386
+#define mmCRTC5_CRTC_VBI_END                                                    0x4586
+#define mmCRTC_V_TOTAL                                                          0x1b87
+#define mmCRTC0_CRTC_V_TOTAL                                                    0x1b87
+#define mmCRTC1_CRTC_V_TOTAL                                                    0x1d87
+#define mmCRTC2_CRTC_V_TOTAL                                                    0x1f87
+#define mmCRTC3_CRTC_V_TOTAL                                                    0x4187
+#define mmCRTC4_CRTC_V_TOTAL                                                    0x4387
+#define mmCRTC5_CRTC_V_TOTAL                                                    0x4587
+#define mmCRTC_V_TOTAL_MIN                                                      0x1b88
+#define mmCRTC0_CRTC_V_TOTAL_MIN                                                0x1b88
+#define mmCRTC1_CRTC_V_TOTAL_MIN                                                0x1d88
+#define mmCRTC2_CRTC_V_TOTAL_MIN                                                0x1f88
+#define mmCRTC3_CRTC_V_TOTAL_MIN                                                0x4188
+#define mmCRTC4_CRTC_V_TOTAL_MIN                                                0x4388
+#define mmCRTC5_CRTC_V_TOTAL_MIN                                                0x4588
+#define mmCRTC_V_TOTAL_MAX                                                      0x1b89
+#define mmCRTC0_CRTC_V_TOTAL_MAX                                                0x1b89
+#define mmCRTC1_CRTC_V_TOTAL_MAX                                                0x1d89
+#define mmCRTC2_CRTC_V_TOTAL_MAX                                                0x1f89
+#define mmCRTC3_CRTC_V_TOTAL_MAX                                                0x4189
+#define mmCRTC4_CRTC_V_TOTAL_MAX                                                0x4389
+#define mmCRTC5_CRTC_V_TOTAL_MAX                                                0x4589
+#define mmCRTC_V_TOTAL_CONTROL                                                  0x1b8a
+#define mmCRTC0_CRTC_V_TOTAL_CONTROL                                            0x1b8a
+#define mmCRTC1_CRTC_V_TOTAL_CONTROL                                            0x1d8a
+#define mmCRTC2_CRTC_V_TOTAL_CONTROL                                            0x1f8a
+#define mmCRTC3_CRTC_V_TOTAL_CONTROL                                            0x418a
+#define mmCRTC4_CRTC_V_TOTAL_CONTROL                                            0x438a
+#define mmCRTC5_CRTC_V_TOTAL_CONTROL                                            0x458a
+#define mmCRTC_V_TOTAL_INT_STATUS                                               0x1b8b
+#define mmCRTC0_CRTC_V_TOTAL_INT_STATUS                                         0x1b8b
+#define mmCRTC1_CRTC_V_TOTAL_INT_STATUS                                         0x1d8b
+#define mmCRTC2_CRTC_V_TOTAL_INT_STATUS                                         0x1f8b
+#define mmCRTC3_CRTC_V_TOTAL_INT_STATUS                                         0x418b
+#define mmCRTC4_CRTC_V_TOTAL_INT_STATUS                                         0x438b
+#define mmCRTC5_CRTC_V_TOTAL_INT_STATUS                                         0x458b
+#define mmCRTC_VSYNC_NOM_INT_STATUS                                             0x1b8c
+#define mmCRTC0_CRTC_VSYNC_NOM_INT_STATUS                                       0x1b8c
+#define mmCRTC1_CRTC_VSYNC_NOM_INT_STATUS                                       0x1d8c
+#define mmCRTC2_CRTC_VSYNC_NOM_INT_STATUS                                       0x1f8c
+#define mmCRTC3_CRTC_VSYNC_NOM_INT_STATUS                                       0x418c
+#define mmCRTC4_CRTC_VSYNC_NOM_INT_STATUS                                       0x438c
+#define mmCRTC5_CRTC_VSYNC_NOM_INT_STATUS                                       0x458c
+#define mmCRTC_V_BLANK_START_END                                                0x1b8d
+#define mmCRTC0_CRTC_V_BLANK_START_END                                          0x1b8d
+#define mmCRTC1_CRTC_V_BLANK_START_END                                          0x1d8d
+#define mmCRTC2_CRTC_V_BLANK_START_END                                          0x1f8d
+#define mmCRTC3_CRTC_V_BLANK_START_END                                          0x418d
+#define mmCRTC4_CRTC_V_BLANK_START_END                                          0x438d
+#define mmCRTC5_CRTC_V_BLANK_START_END                                          0x458d
+#define mmCRTC_V_SYNC_A                                                         0x1b8e
+#define mmCRTC0_CRTC_V_SYNC_A                                                   0x1b8e
+#define mmCRTC1_CRTC_V_SYNC_A                                                   0x1d8e
+#define mmCRTC2_CRTC_V_SYNC_A                                                   0x1f8e
+#define mmCRTC3_CRTC_V_SYNC_A                                                   0x418e
+#define mmCRTC4_CRTC_V_SYNC_A                                                   0x438e
+#define mmCRTC5_CRTC_V_SYNC_A                                                   0x458e
+#define mmCRTC_V_SYNC_A_CNTL                                                    0x1b8f
+#define mmCRTC0_CRTC_V_SYNC_A_CNTL                                              0x1b8f
+#define mmCRTC1_CRTC_V_SYNC_A_CNTL                                              0x1d8f
+#define mmCRTC2_CRTC_V_SYNC_A_CNTL                                              0x1f8f
+#define mmCRTC3_CRTC_V_SYNC_A_CNTL                                              0x418f
+#define mmCRTC4_CRTC_V_SYNC_A_CNTL                                              0x438f
+#define mmCRTC5_CRTC_V_SYNC_A_CNTL                                              0x458f
+#define mmCRTC_V_SYNC_B                                                         0x1b90
+#define mmCRTC0_CRTC_V_SYNC_B                                                   0x1b90
+#define mmCRTC1_CRTC_V_SYNC_B                                                   0x1d90
+#define mmCRTC2_CRTC_V_SYNC_B                                                   0x1f90
+#define mmCRTC3_CRTC_V_SYNC_B                                                   0x4190
+#define mmCRTC4_CRTC_V_SYNC_B                                                   0x4390
+#define mmCRTC5_CRTC_V_SYNC_B                                                   0x4590
+#define mmCRTC_V_SYNC_B_CNTL                                                    0x1b91
+#define mmCRTC0_CRTC_V_SYNC_B_CNTL                                              0x1b91
+#define mmCRTC1_CRTC_V_SYNC_B_CNTL                                              0x1d91
+#define mmCRTC2_CRTC_V_SYNC_B_CNTL                                              0x1f91
+#define mmCRTC3_CRTC_V_SYNC_B_CNTL                                              0x4191
+#define mmCRTC4_CRTC_V_SYNC_B_CNTL                                              0x4391
+#define mmCRTC5_CRTC_V_SYNC_B_CNTL                                              0x4591
+#define mmCRTC_DTMTEST_CNTL                                                     0x1b92
+#define mmCRTC0_CRTC_DTMTEST_CNTL                                               0x1b92
+#define mmCRTC1_CRTC_DTMTEST_CNTL                                               0x1d92
+#define mmCRTC2_CRTC_DTMTEST_CNTL                                               0x1f92
+#define mmCRTC3_CRTC_DTMTEST_CNTL                                               0x4192
+#define mmCRTC4_CRTC_DTMTEST_CNTL                                               0x4392
+#define mmCRTC5_CRTC_DTMTEST_CNTL                                               0x4592
+#define mmCRTC_DTMTEST_STATUS_POSITION                                          0x1b93
+#define mmCRTC0_CRTC_DTMTEST_STATUS_POSITION                                    0x1b93
+#define mmCRTC1_CRTC_DTMTEST_STATUS_POSITION                                    0x1d93
+#define mmCRTC2_CRTC_DTMTEST_STATUS_POSITION                                    0x1f93
+#define mmCRTC3_CRTC_DTMTEST_STATUS_POSITION                                    0x4193
+#define mmCRTC4_CRTC_DTMTEST_STATUS_POSITION                                    0x4393
+#define mmCRTC5_CRTC_DTMTEST_STATUS_POSITION                                    0x4593
+#define mmCRTC_TRIGA_CNTL                                                       0x1b94
+#define mmCRTC0_CRTC_TRIGA_CNTL                                                 0x1b94
+#define mmCRTC1_CRTC_TRIGA_CNTL                                                 0x1d94
+#define mmCRTC2_CRTC_TRIGA_CNTL                                                 0x1f94
+#define mmCRTC3_CRTC_TRIGA_CNTL                                                 0x4194
+#define mmCRTC4_CRTC_TRIGA_CNTL                                                 0x4394
+#define mmCRTC5_CRTC_TRIGA_CNTL                                                 0x4594
+#define mmCRTC_TRIGA_MANUAL_TRIG                                                0x1b95
+#define mmCRTC0_CRTC_TRIGA_MANUAL_TRIG                                          0x1b95
+#define mmCRTC1_CRTC_TRIGA_MANUAL_TRIG                                          0x1d95
+#define mmCRTC2_CRTC_TRIGA_MANUAL_TRIG                                          0x1f95
+#define mmCRTC3_CRTC_TRIGA_MANUAL_TRIG                                          0x4195
+#define mmCRTC4_CRTC_TRIGA_MANUAL_TRIG                                          0x4395
+#define mmCRTC5_CRTC_TRIGA_MANUAL_TRIG                                          0x4595
+#define mmCRTC_TRIGB_CNTL                                                       0x1b96
+#define mmCRTC0_CRTC_TRIGB_CNTL                                                 0x1b96
+#define mmCRTC1_CRTC_TRIGB_CNTL                                                 0x1d96
+#define mmCRTC2_CRTC_TRIGB_CNTL                                                 0x1f96
+#define mmCRTC3_CRTC_TRIGB_CNTL                                                 0x4196
+#define mmCRTC4_CRTC_TRIGB_CNTL                                                 0x4396
+#define mmCRTC5_CRTC_TRIGB_CNTL                                                 0x4596
+#define mmCRTC_TRIGB_MANUAL_TRIG                                                0x1b97
+#define mmCRTC0_CRTC_TRIGB_MANUAL_TRIG                                          0x1b97
+#define mmCRTC1_CRTC_TRIGB_MANUAL_TRIG                                          0x1d97
+#define mmCRTC2_CRTC_TRIGB_MANUAL_TRIG                                          0x1f97
+#define mmCRTC3_CRTC_TRIGB_MANUAL_TRIG                                          0x4197
+#define mmCRTC4_CRTC_TRIGB_MANUAL_TRIG                                          0x4397
+#define mmCRTC5_CRTC_TRIGB_MANUAL_TRIG                                          0x4597
+#define mmCRTC_FORCE_COUNT_NOW_CNTL                                             0x1b98
+#define mmCRTC0_CRTC_FORCE_COUNT_NOW_CNTL                                       0x1b98
+#define mmCRTC1_CRTC_FORCE_COUNT_NOW_CNTL                                       0x1d98
+#define mmCRTC2_CRTC_FORCE_COUNT_NOW_CNTL                                       0x1f98
+#define mmCRTC3_CRTC_FORCE_COUNT_NOW_CNTL                                       0x4198
+#define mmCRTC4_CRTC_FORCE_COUNT_NOW_CNTL                                       0x4398
+#define mmCRTC5_CRTC_FORCE_COUNT_NOW_CNTL                                       0x4598
+#define mmCRTC_FLOW_CONTROL                                                     0x1b99
+#define mmCRTC0_CRTC_FLOW_CONTROL                                               0x1b99
+#define mmCRTC1_CRTC_FLOW_CONTROL                                               0x1d99
+#define mmCRTC2_CRTC_FLOW_CONTROL                                               0x1f99
+#define mmCRTC3_CRTC_FLOW_CONTROL                                               0x4199
+#define mmCRTC4_CRTC_FLOW_CONTROL                                               0x4399
+#define mmCRTC5_CRTC_FLOW_CONTROL                                               0x4599
+#define mmCRTC_STEREO_FORCE_NEXT_EYE                                            0x1b9a
+#define mmCRTC0_CRTC_STEREO_FORCE_NEXT_EYE                                      0x1b9a
+#define mmCRTC1_CRTC_STEREO_FORCE_NEXT_EYE                                      0x1d9a
+#define mmCRTC2_CRTC_STEREO_FORCE_NEXT_EYE                                      0x1f9a
+#define mmCRTC3_CRTC_STEREO_FORCE_NEXT_EYE                                      0x419a
+#define mmCRTC4_CRTC_STEREO_FORCE_NEXT_EYE                                      0x439a
+#define mmCRTC5_CRTC_STEREO_FORCE_NEXT_EYE                                      0x459a
+#define mmCRTC_AVSYNC_COUNTER                                                   0x1b9b
+#define mmCRTC0_CRTC_AVSYNC_COUNTER                                             0x1b9b
+#define mmCRTC1_CRTC_AVSYNC_COUNTER                                             0x1d9b
+#define mmCRTC2_CRTC_AVSYNC_COUNTER                                             0x1f9b
+#define mmCRTC3_CRTC_AVSYNC_COUNTER                                             0x419b
+#define mmCRTC4_CRTC_AVSYNC_COUNTER                                             0x439b
+#define mmCRTC5_CRTC_AVSYNC_COUNTER                                             0x459b
+#define mmCRTC_CONTROL                                                          0x1b9c
+#define mmCRTC0_CRTC_CONTROL                                                    0x1b9c
+#define mmCRTC1_CRTC_CONTROL                                                    0x1d9c
+#define mmCRTC2_CRTC_CONTROL                                                    0x1f9c
+#define mmCRTC3_CRTC_CONTROL                                                    0x419c
+#define mmCRTC4_CRTC_CONTROL                                                    0x439c
+#define mmCRTC5_CRTC_CONTROL                                                    0x459c
+#define mmCRTC_BLANK_CONTROL                                                    0x1b9d
+#define mmCRTC0_CRTC_BLANK_CONTROL                                              0x1b9d
+#define mmCRTC1_CRTC_BLANK_CONTROL                                              0x1d9d
+#define mmCRTC2_CRTC_BLANK_CONTROL                                              0x1f9d
+#define mmCRTC3_CRTC_BLANK_CONTROL                                              0x419d
+#define mmCRTC4_CRTC_BLANK_CONTROL                                              0x439d
+#define mmCRTC5_CRTC_BLANK_CONTROL                                              0x459d
+#define mmCRTC_INTERLACE_CONTROL                                                0x1b9e
+#define mmCRTC0_CRTC_INTERLACE_CONTROL                                          0x1b9e
+#define mmCRTC1_CRTC_INTERLACE_CONTROL                                          0x1d9e
+#define mmCRTC2_CRTC_INTERLACE_CONTROL                                          0x1f9e
+#define mmCRTC3_CRTC_INTERLACE_CONTROL                                          0x419e
+#define mmCRTC4_CRTC_INTERLACE_CONTROL                                          0x439e
+#define mmCRTC5_CRTC_INTERLACE_CONTROL                                          0x459e
+#define mmCRTC_INTERLACE_STATUS                                                 0x1b9f
+#define mmCRTC0_CRTC_INTERLACE_STATUS                                           0x1b9f
+#define mmCRTC1_CRTC_INTERLACE_STATUS                                           0x1d9f
+#define mmCRTC2_CRTC_INTERLACE_STATUS                                           0x1f9f
+#define mmCRTC3_CRTC_INTERLACE_STATUS                                           0x419f
+#define mmCRTC4_CRTC_INTERLACE_STATUS                                           0x439f
+#define mmCRTC5_CRTC_INTERLACE_STATUS                                           0x459f
+#define mmCRTC_FIELD_INDICATION_CONTROL                                         0x1ba0
+#define mmCRTC0_CRTC_FIELD_INDICATION_CONTROL                                   0x1ba0
+#define mmCRTC1_CRTC_FIELD_INDICATION_CONTROL                                   0x1da0
+#define mmCRTC2_CRTC_FIELD_INDICATION_CONTROL                                   0x1fa0
+#define mmCRTC3_CRTC_FIELD_INDICATION_CONTROL                                   0x41a0
+#define mmCRTC4_CRTC_FIELD_INDICATION_CONTROL                                   0x43a0
+#define mmCRTC5_CRTC_FIELD_INDICATION_CONTROL                                   0x45a0
+#define mmCRTC_PIXEL_DATA_READBACK0                                             0x1ba1
+#define mmCRTC0_CRTC_PIXEL_DATA_READBACK0                                       0x1ba1
+#define mmCRTC1_CRTC_PIXEL_DATA_READBACK0                                       0x1da1
+#define mmCRTC2_CRTC_PIXEL_DATA_READBACK0                                       0x1fa1
+#define mmCRTC3_CRTC_PIXEL_DATA_READBACK0                                       0x41a1
+#define mmCRTC4_CRTC_PIXEL_DATA_READBACK0                                       0x43a1
+#define mmCRTC5_CRTC_PIXEL_DATA_READBACK0                                       0x45a1
+#define mmCRTC_PIXEL_DATA_READBACK1                                             0x1ba2
+#define mmCRTC0_CRTC_PIXEL_DATA_READBACK1                                       0x1ba2
+#define mmCRTC1_CRTC_PIXEL_DATA_READBACK1                                       0x1da2
+#define mmCRTC2_CRTC_PIXEL_DATA_READBACK1                                       0x1fa2
+#define mmCRTC3_CRTC_PIXEL_DATA_READBACK1                                       0x41a2
+#define mmCRTC4_CRTC_PIXEL_DATA_READBACK1                                       0x43a2
+#define mmCRTC5_CRTC_PIXEL_DATA_READBACK1                                       0x45a2
+#define mmCRTC_STATUS                                                           0x1ba3
+#define mmCRTC0_CRTC_STATUS                                                     0x1ba3
+#define mmCRTC1_CRTC_STATUS                                                     0x1da3
+#define mmCRTC2_CRTC_STATUS                                                     0x1fa3
+#define mmCRTC3_CRTC_STATUS                                                     0x41a3
+#define mmCRTC4_CRTC_STATUS                                                     0x43a3
+#define mmCRTC5_CRTC_STATUS                                                     0x45a3
+#define mmCRTC_STATUS_POSITION                                                  0x1ba4
+#define mmCRTC0_CRTC_STATUS_POSITION                                            0x1ba4
+#define mmCRTC1_CRTC_STATUS_POSITION                                            0x1da4
+#define mmCRTC2_CRTC_STATUS_POSITION                                            0x1fa4
+#define mmCRTC3_CRTC_STATUS_POSITION                                            0x41a4
+#define mmCRTC4_CRTC_STATUS_POSITION                                            0x43a4
+#define mmCRTC5_CRTC_STATUS_POSITION                                            0x45a4
+#define mmCRTC_NOM_VERT_POSITION                                                0x1ba5
+#define mmCRTC0_CRTC_NOM_VERT_POSITION                                          0x1ba5
+#define mmCRTC1_CRTC_NOM_VERT_POSITION                                          0x1da5
+#define mmCRTC2_CRTC_NOM_VERT_POSITION                                          0x1fa5
+#define mmCRTC3_CRTC_NOM_VERT_POSITION                                          0x41a5
+#define mmCRTC4_CRTC_NOM_VERT_POSITION                                          0x43a5
+#define mmCRTC5_CRTC_NOM_VERT_POSITION                                          0x45a5
+#define mmCRTC_STATUS_FRAME_COUNT                                               0x1ba6
+#define mmCRTC0_CRTC_STATUS_FRAME_COUNT                                         0x1ba6
+#define mmCRTC1_CRTC_STATUS_FRAME_COUNT                                         0x1da6
+#define mmCRTC2_CRTC_STATUS_FRAME_COUNT                                         0x1fa6
+#define mmCRTC3_CRTC_STATUS_FRAME_COUNT                                         0x41a6
+#define mmCRTC4_CRTC_STATUS_FRAME_COUNT                                         0x43a6
+#define mmCRTC5_CRTC_STATUS_FRAME_COUNT                                         0x45a6
+#define mmCRTC_STATUS_VF_COUNT                                                  0x1ba7
+#define mmCRTC0_CRTC_STATUS_VF_COUNT                                            0x1ba7
+#define mmCRTC1_CRTC_STATUS_VF_COUNT                                            0x1da7
+#define mmCRTC2_CRTC_STATUS_VF_COUNT                                            0x1fa7
+#define mmCRTC3_CRTC_STATUS_VF_COUNT                                            0x41a7
+#define mmCRTC4_CRTC_STATUS_VF_COUNT                                            0x43a7
+#define mmCRTC5_CRTC_STATUS_VF_COUNT                                            0x45a7
+#define mmCRTC_STATUS_HV_COUNT                                                  0x1ba8
+#define mmCRTC0_CRTC_STATUS_HV_COUNT                                            0x1ba8
+#define mmCRTC1_CRTC_STATUS_HV_COUNT                                            0x1da8
+#define mmCRTC2_CRTC_STATUS_HV_COUNT                                            0x1fa8
+#define mmCRTC3_CRTC_STATUS_HV_COUNT                                            0x41a8
+#define mmCRTC4_CRTC_STATUS_HV_COUNT                                            0x43a8
+#define mmCRTC5_CRTC_STATUS_HV_COUNT                                            0x45a8
+#define mmCRTC_COUNT_CONTROL                                                    0x1ba9
+#define mmCRTC0_CRTC_COUNT_CONTROL                                              0x1ba9
+#define mmCRTC1_CRTC_COUNT_CONTROL                                              0x1da9
+#define mmCRTC2_CRTC_COUNT_CONTROL                                              0x1fa9
+#define mmCRTC3_CRTC_COUNT_CONTROL                                              0x41a9
+#define mmCRTC4_CRTC_COUNT_CONTROL                                              0x43a9
+#define mmCRTC5_CRTC_COUNT_CONTROL                                              0x45a9
+#define mmCRTC_COUNT_RESET                                                      0x1baa
+#define mmCRTC0_CRTC_COUNT_RESET                                                0x1baa
+#define mmCRTC1_CRTC_COUNT_RESET                                                0x1daa
+#define mmCRTC2_CRTC_COUNT_RESET                                                0x1faa
+#define mmCRTC3_CRTC_COUNT_RESET                                                0x41aa
+#define mmCRTC4_CRTC_COUNT_RESET                                                0x43aa
+#define mmCRTC5_CRTC_COUNT_RESET                                                0x45aa
+#define mmCRTC_MANUAL_FORCE_VSYNC_NEXT_LINE                                     0x1bab
+#define mmCRTC0_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE                               0x1bab
+#define mmCRTC1_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE                               0x1dab
+#define mmCRTC2_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE                               0x1fab
+#define mmCRTC3_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE                               0x41ab
+#define mmCRTC4_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE                               0x43ab
+#define mmCRTC5_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE                               0x45ab
+#define mmCRTC_VERT_SYNC_CONTROL                                                0x1bac
+#define mmCRTC0_CRTC_VERT_SYNC_CONTROL                                          0x1bac
+#define mmCRTC1_CRTC_VERT_SYNC_CONTROL                                          0x1dac
+#define mmCRTC2_CRTC_VERT_SYNC_CONTROL                                          0x1fac
+#define mmCRTC3_CRTC_VERT_SYNC_CONTROL                                          0x41ac
+#define mmCRTC4_CRTC_VERT_SYNC_CONTROL                                          0x43ac
+#define mmCRTC5_CRTC_VERT_SYNC_CONTROL                                          0x45ac
+#define mmCRTC_STEREO_STATUS                                                    0x1bad
+#define mmCRTC0_CRTC_STEREO_STATUS                                              0x1bad
+#define mmCRTC1_CRTC_STEREO_STATUS                                              0x1dad
+#define mmCRTC2_CRTC_STEREO_STATUS                                              0x1fad
+#define mmCRTC3_CRTC_STEREO_STATUS                                              0x41ad
+#define mmCRTC4_CRTC_STEREO_STATUS                                              0x43ad
+#define mmCRTC5_CRTC_STEREO_STATUS                                              0x45ad
+#define mmCRTC_STEREO_CONTROL                                                   0x1bae
+#define mmCRTC0_CRTC_STEREO_CONTROL                                             0x1bae
+#define mmCRTC1_CRTC_STEREO_CONTROL                                             0x1dae
+#define mmCRTC2_CRTC_STEREO_CONTROL                                             0x1fae
+#define mmCRTC3_CRTC_STEREO_CONTROL                                             0x41ae
+#define mmCRTC4_CRTC_STEREO_CONTROL                                             0x43ae
+#define mmCRTC5_CRTC_STEREO_CONTROL                                             0x45ae
+#define mmCRTC_SNAPSHOT_STATUS                                                  0x1baf
+#define mmCRTC0_CRTC_SNAPSHOT_STATUS                                            0x1baf
+#define mmCRTC1_CRTC_SNAPSHOT_STATUS                                            0x1daf
+#define mmCRTC2_CRTC_SNAPSHOT_STATUS                                            0x1faf
+#define mmCRTC3_CRTC_SNAPSHOT_STATUS                                            0x41af
+#define mmCRTC4_CRTC_SNAPSHOT_STATUS                                            0x43af
+#define mmCRTC5_CRTC_SNAPSHOT_STATUS                                            0x45af
+#define mmCRTC_SNAPSHOT_CONTROL                                                 0x1bb0
+#define mmCRTC0_CRTC_SNAPSHOT_CONTROL                                           0x1bb0
+#define mmCRTC1_CRTC_SNAPSHOT_CONTROL                                           0x1db0
+#define mmCRTC2_CRTC_SNAPSHOT_CONTROL                                           0x1fb0
+#define mmCRTC3_CRTC_SNAPSHOT_CONTROL                                           0x41b0
+#define mmCRTC4_CRTC_SNAPSHOT_CONTROL                                           0x43b0
+#define mmCRTC5_CRTC_SNAPSHOT_CONTROL                                           0x45b0
+#define mmCRTC_SNAPSHOT_POSITION                                                0x1bb1
+#define mmCRTC0_CRTC_SNAPSHOT_POSITION                                          0x1bb1
+#define mmCRTC1_CRTC_SNAPSHOT_POSITION                                          0x1db1
+#define mmCRTC2_CRTC_SNAPSHOT_POSITION                                          0x1fb1
+#define mmCRTC3_CRTC_SNAPSHOT_POSITION                                          0x41b1
+#define mmCRTC4_CRTC_SNAPSHOT_POSITION                                          0x43b1
+#define mmCRTC5_CRTC_SNAPSHOT_POSITION                                          0x45b1
+#define mmCRTC_SNAPSHOT_FRAME                                                   0x1bb2
+#define mmCRTC0_CRTC_SNAPSHOT_FRAME                                             0x1bb2
+#define mmCRTC1_CRTC_SNAPSHOT_FRAME                                             0x1db2
+#define mmCRTC2_CRTC_SNAPSHOT_FRAME                                             0x1fb2
+#define mmCRTC3_CRTC_SNAPSHOT_FRAME                                             0x41b2
+#define mmCRTC4_CRTC_SNAPSHOT_FRAME                                             0x43b2
+#define mmCRTC5_CRTC_SNAPSHOT_FRAME                                             0x45b2
+#define mmCRTC_START_LINE_CONTROL                                               0x1bb3
+#define mmCRTC0_CRTC_START_LINE_CONTROL                                         0x1bb3
+#define mmCRTC1_CRTC_START_LINE_CONTROL                                         0x1db3
+#define mmCRTC2_CRTC_START_LINE_CONTROL                                         0x1fb3
+#define mmCRTC3_CRTC_START_LINE_CONTROL                                         0x41b3
+#define mmCRTC4_CRTC_START_LINE_CONTROL                                         0x43b3
+#define mmCRTC5_CRTC_START_LINE_CONTROL                                         0x45b3
+#define mmCRTC_INTERRUPT_CONTROL                                                0x1bb4
+#define mmCRTC0_CRTC_INTERRUPT_CONTROL                                          0x1bb4
+#define mmCRTC1_CRTC_INTERRUPT_CONTROL                                          0x1db4
+#define mmCRTC2_CRTC_INTERRUPT_CONTROL                                          0x1fb4
+#define mmCRTC3_CRTC_INTERRUPT_CONTROL                                          0x41b4
+#define mmCRTC4_CRTC_INTERRUPT_CONTROL                                          0x43b4
+#define mmCRTC5_CRTC_INTERRUPT_CONTROL                                          0x45b4
+#define mmCRTC_UPDATE_LOCK                                                      0x1bb5
+#define mmCRTC0_CRTC_UPDATE_LOCK                                                0x1bb5
+#define mmCRTC1_CRTC_UPDATE_LOCK                                                0x1db5
+#define mmCRTC2_CRTC_UPDATE_LOCK                                                0x1fb5
+#define mmCRTC3_CRTC_UPDATE_LOCK                                                0x41b5
+#define mmCRTC4_CRTC_UPDATE_LOCK                                                0x43b5
+#define mmCRTC5_CRTC_UPDATE_LOCK                                                0x45b5
+#define mmCRTC_DOUBLE_BUFFER_CONTROL                                            0x1bb6
+#define mmCRTC0_CRTC_DOUBLE_BUFFER_CONTROL                                      0x1bb6
+#define mmCRTC1_CRTC_DOUBLE_BUFFER_CONTROL                                      0x1db6
+#define mmCRTC2_CRTC_DOUBLE_BUFFER_CONTROL                                      0x1fb6
+#define mmCRTC3_CRTC_DOUBLE_BUFFER_CONTROL                                      0x41b6
+#define mmCRTC4_CRTC_DOUBLE_BUFFER_CONTROL                                      0x43b6
+#define mmCRTC5_CRTC_DOUBLE_BUFFER_CONTROL                                      0x45b6
+#define mmCRTC_VGA_PARAMETER_CAPTURE_MODE                                       0x1bb7
+#define mmCRTC0_CRTC_VGA_PARAMETER_CAPTURE_MODE                                 0x1bb7
+#define mmCRTC1_CRTC_VGA_PARAMETER_CAPTURE_MODE                                 0x1db7
+#define mmCRTC2_CRTC_VGA_PARAMETER_CAPTURE_MODE                                 0x1fb7
+#define mmCRTC3_CRTC_VGA_PARAMETER_CAPTURE_MODE                                 0x41b7
+#define mmCRTC4_CRTC_VGA_PARAMETER_CAPTURE_MODE                                 0x43b7
+#define mmCRTC5_CRTC_VGA_PARAMETER_CAPTURE_MODE                                 0x45b7
+#define mmCRTC_TEST_PATTERN_CONTROL                                             0x1bba
+#define mmCRTC0_CRTC_TEST_PATTERN_CONTROL                                       0x1bba
+#define mmCRTC1_CRTC_TEST_PATTERN_CONTROL                                       0x1dba
+#define mmCRTC2_CRTC_TEST_PATTERN_CONTROL                                       0x1fba
+#define mmCRTC3_CRTC_TEST_PATTERN_CONTROL                                       0x41ba
+#define mmCRTC4_CRTC_TEST_PATTERN_CONTROL                                       0x43ba
+#define mmCRTC5_CRTC_TEST_PATTERN_CONTROL                                       0x45ba
+#define mmCRTC_TEST_PATTERN_PARAMETERS                                          0x1bbb
+#define mmCRTC0_CRTC_TEST_PATTERN_PARAMETERS                                    0x1bbb
+#define mmCRTC1_CRTC_TEST_PATTERN_PARAMETERS                                    0x1dbb
+#define mmCRTC2_CRTC_TEST_PATTERN_PARAMETERS                                    0x1fbb
+#define mmCRTC3_CRTC_TEST_PATTERN_PARAMETERS                                    0x41bb
+#define mmCRTC4_CRTC_TEST_PATTERN_PARAMETERS                                    0x43bb
+#define mmCRTC5_CRTC_TEST_PATTERN_PARAMETERS                                    0x45bb
+#define mmCRTC_TEST_PATTERN_COLOR                                               0x1bbc
+#define mmCRTC0_CRTC_TEST_PATTERN_COLOR                                         0x1bbc
+#define mmCRTC1_CRTC_TEST_PATTERN_COLOR                                         0x1dbc
+#define mmCRTC2_CRTC_TEST_PATTERN_COLOR                                         0x1fbc
+#define mmCRTC3_CRTC_TEST_PATTERN_COLOR                                         0x41bc
+#define mmCRTC4_CRTC_TEST_PATTERN_COLOR                                         0x43bc
+#define mmCRTC5_CRTC_TEST_PATTERN_COLOR                                         0x45bc
+#define mmCRTC_MASTER_UPDATE_LOCK                                               0x1bbd
+#define mmCRTC0_CRTC_MASTER_UPDATE_LOCK                                         0x1bbd
+#define mmCRTC1_CRTC_MASTER_UPDATE_LOCK                                         0x1dbd
+#define mmCRTC2_CRTC_MASTER_UPDATE_LOCK                                         0x1fbd
+#define mmCRTC3_CRTC_MASTER_UPDATE_LOCK                                         0x41bd
+#define mmCRTC4_CRTC_MASTER_UPDATE_LOCK                                         0x43bd
+#define mmCRTC5_CRTC_MASTER_UPDATE_LOCK                                         0x45bd
+#define mmCRTC_MASTER_UPDATE_MODE                                               0x1bbe
+#define mmCRTC0_CRTC_MASTER_UPDATE_MODE                                         0x1bbe
+#define mmCRTC1_CRTC_MASTER_UPDATE_MODE                                         0x1dbe
+#define mmCRTC2_CRTC_MASTER_UPDATE_MODE                                         0x1fbe
+#define mmCRTC3_CRTC_MASTER_UPDATE_MODE                                         0x41be
+#define mmCRTC4_CRTC_MASTER_UPDATE_MODE                                         0x43be
+#define mmCRTC5_CRTC_MASTER_UPDATE_MODE                                         0x45be
+#define mmCRTC_MVP_INBAND_CNTL_INSERT                                           0x1bbf
+#define mmCRTC0_CRTC_MVP_INBAND_CNTL_INSERT                                     0x1bbf
+#define mmCRTC1_CRTC_MVP_INBAND_CNTL_INSERT                                     0x1dbf
+#define mmCRTC2_CRTC_MVP_INBAND_CNTL_INSERT                                     0x1fbf
+#define mmCRTC3_CRTC_MVP_INBAND_CNTL_INSERT                                     0x41bf
+#define mmCRTC4_CRTC_MVP_INBAND_CNTL_INSERT                                     0x43bf
+#define mmCRTC5_CRTC_MVP_INBAND_CNTL_INSERT                                     0x45bf
+#define mmCRTC_MVP_INBAND_CNTL_INSERT_TIMER                                     0x1bc0
+#define mmCRTC0_CRTC_MVP_INBAND_CNTL_INSERT_TIMER                               0x1bc0
+#define mmCRTC1_CRTC_MVP_INBAND_CNTL_INSERT_TIMER                               0x1dc0
+#define mmCRTC2_CRTC_MVP_INBAND_CNTL_INSERT_TIMER                               0x1fc0
+#define mmCRTC3_CRTC_MVP_INBAND_CNTL_INSERT_TIMER                               0x41c0
+#define mmCRTC4_CRTC_MVP_INBAND_CNTL_INSERT_TIMER                               0x43c0
+#define mmCRTC5_CRTC_MVP_INBAND_CNTL_INSERT_TIMER                               0x45c0
+#define mmCRTC_MVP_STATUS                                                       0x1bc1
+#define mmCRTC0_CRTC_MVP_STATUS                                                 0x1bc1
+#define mmCRTC1_CRTC_MVP_STATUS                                                 0x1dc1
+#define mmCRTC2_CRTC_MVP_STATUS                                                 0x1fc1
+#define mmCRTC3_CRTC_MVP_STATUS                                                 0x41c1
+#define mmCRTC4_CRTC_MVP_STATUS                                                 0x43c1
+#define mmCRTC5_CRTC_MVP_STATUS                                                 0x45c1
+#define mmCRTC_MASTER_EN                                                        0x1bc2
+#define mmCRTC0_CRTC_MASTER_EN                                                  0x1bc2
+#define mmCRTC1_CRTC_MASTER_EN                                                  0x1dc2
+#define mmCRTC2_CRTC_MASTER_EN                                                  0x1fc2
+#define mmCRTC3_CRTC_MASTER_EN                                                  0x41c2
+#define mmCRTC4_CRTC_MASTER_EN                                                  0x43c2
+#define mmCRTC5_CRTC_MASTER_EN                                                  0x45c2
+#define mmCRTC_ALLOW_STOP_OFF_V_CNT                                             0x1bc3
+#define mmCRTC0_CRTC_ALLOW_STOP_OFF_V_CNT                                       0x1bc3
+#define mmCRTC1_CRTC_ALLOW_STOP_OFF_V_CNT                                       0x1dc3
+#define mmCRTC2_CRTC_ALLOW_STOP_OFF_V_CNT                                       0x1fc3
+#define mmCRTC3_CRTC_ALLOW_STOP_OFF_V_CNT                                       0x41c3
+#define mmCRTC4_CRTC_ALLOW_STOP_OFF_V_CNT                                       0x43c3
+#define mmCRTC5_CRTC_ALLOW_STOP_OFF_V_CNT                                       0x45c3
+#define mmCRTC_V_UPDATE_INT_STATUS                                              0x1bc4
+#define mmCRTC0_CRTC_V_UPDATE_INT_STATUS                                        0x1bc4
+#define mmCRTC1_CRTC_V_UPDATE_INT_STATUS                                        0x1dc4
+#define mmCRTC2_CRTC_V_UPDATE_INT_STATUS                                        0x1fc4
+#define mmCRTC3_CRTC_V_UPDATE_INT_STATUS                                        0x41c4
+#define mmCRTC4_CRTC_V_UPDATE_INT_STATUS                                        0x43c4
+#define mmCRTC5_CRTC_V_UPDATE_INT_STATUS                                        0x45c4
+#define mmCRTC_OVERSCAN_COLOR                                                   0x1bc8
+#define mmCRTC0_CRTC_OVERSCAN_COLOR                                             0x1bc8
+#define mmCRTC1_CRTC_OVERSCAN_COLOR                                             0x1dc8
+#define mmCRTC2_CRTC_OVERSCAN_COLOR                                             0x1fc8
+#define mmCRTC3_CRTC_OVERSCAN_COLOR                                             0x41c8
+#define mmCRTC4_CRTC_OVERSCAN_COLOR                                             0x43c8
+#define mmCRTC5_CRTC_OVERSCAN_COLOR                                             0x45c8
+#define mmCRTC_OVERSCAN_COLOR_EXT                                               0x1bc9
+#define mmCRTC0_CRTC_OVERSCAN_COLOR_EXT                                         0x1bc9
+#define mmCRTC1_CRTC_OVERSCAN_COLOR_EXT                                         0x1dc9
+#define mmCRTC2_CRTC_OVERSCAN_COLOR_EXT                                         0x1fc9
+#define mmCRTC3_CRTC_OVERSCAN_COLOR_EXT                                         0x41c9
+#define mmCRTC4_CRTC_OVERSCAN_COLOR_EXT                                         0x43c9
+#define mmCRTC5_CRTC_OVERSCAN_COLOR_EXT                                         0x45c9
+#define mmCRTC_BLANK_DATA_COLOR                                                 0x1bca
+#define mmCRTC0_CRTC_BLANK_DATA_COLOR                                           0x1bca
+#define mmCRTC1_CRTC_BLANK_DATA_COLOR                                           0x1dca
+#define mmCRTC2_CRTC_BLANK_DATA_COLOR                                           0x1fca
+#define mmCRTC3_CRTC_BLANK_DATA_COLOR                                           0x41ca
+#define mmCRTC4_CRTC_BLANK_DATA_COLOR                                           0x43ca
+#define mmCRTC5_CRTC_BLANK_DATA_COLOR                                           0x45ca
+#define mmCRTC_BLANK_DATA_COLOR_EXT                                             0x1bcb
+#define mmCRTC0_CRTC_BLANK_DATA_COLOR_EXT                                       0x1bcb
+#define mmCRTC1_CRTC_BLANK_DATA_COLOR_EXT                                       0x1dcb
+#define mmCRTC2_CRTC_BLANK_DATA_COLOR_EXT                                       0x1fcb
+#define mmCRTC3_CRTC_BLANK_DATA_COLOR_EXT                                       0x41cb
+#define mmCRTC4_CRTC_BLANK_DATA_COLOR_EXT                                       0x43cb
+#define mmCRTC5_CRTC_BLANK_DATA_COLOR_EXT                                       0x45cb
+#define mmCRTC_BLACK_COLOR                                                      0x1bcc
+#define mmCRTC0_CRTC_BLACK_COLOR                                                0x1bcc
+#define mmCRTC1_CRTC_BLACK_COLOR                                                0x1dcc
+#define mmCRTC2_CRTC_BLACK_COLOR                                                0x1fcc
+#define mmCRTC3_CRTC_BLACK_COLOR                                                0x41cc
+#define mmCRTC4_CRTC_BLACK_COLOR                                                0x43cc
+#define mmCRTC5_CRTC_BLACK_COLOR                                                0x45cc
+#define mmCRTC_BLACK_COLOR_EXT                                                  0x1bcd
+#define mmCRTC0_CRTC_BLACK_COLOR_EXT                                            0x1bcd
+#define mmCRTC1_CRTC_BLACK_COLOR_EXT                                            0x1dcd
+#define mmCRTC2_CRTC_BLACK_COLOR_EXT                                            0x1fcd
+#define mmCRTC3_CRTC_BLACK_COLOR_EXT                                            0x41cd
+#define mmCRTC4_CRTC_BLACK_COLOR_EXT                                            0x43cd
+#define mmCRTC5_CRTC_BLACK_COLOR_EXT                                            0x45cd
+#define mmCRTC_VERTICAL_INTERRUPT0_POSITION                                     0x1bce
+#define mmCRTC0_CRTC_VERTICAL_INTERRUPT0_POSITION                               0x1bce
+#define mmCRTC1_CRTC_VERTICAL_INTERRUPT0_POSITION                               0x1dce
+#define mmCRTC2_CRTC_VERTICAL_INTERRUPT0_POSITION                               0x1fce
+#define mmCRTC3_CRTC_VERTICAL_INTERRUPT0_POSITION                               0x41ce
+#define mmCRTC4_CRTC_VERTICAL_INTERRUPT0_POSITION                               0x43ce
+#define mmCRTC5_CRTC_VERTICAL_INTERRUPT0_POSITION                               0x45ce
+#define mmCRTC_VERTICAL_INTERRUPT0_CONTROL                                      0x1bcf
+#define mmCRTC0_CRTC_VERTICAL_INTERRUPT0_CONTROL                                0x1bcf
+#define mmCRTC1_CRTC_VERTICAL_INTERRUPT0_CONTROL                                0x1dcf
+#define mmCRTC2_CRTC_VERTICAL_INTERRUPT0_CONTROL                                0x1fcf
+#define mmCRTC3_CRTC_VERTICAL_INTERRUPT0_CONTROL                                0x41cf
+#define mmCRTC4_CRTC_VERTICAL_INTERRUPT0_CONTROL                                0x43cf
+#define mmCRTC5_CRTC_VERTICAL_INTERRUPT0_CONTROL                                0x45cf
+#define mmCRTC_VERTICAL_INTERRUPT1_POSITION                                     0x1bd0
+#define mmCRTC0_CRTC_VERTICAL_INTERRUPT1_POSITION                               0x1bd0
+#define mmCRTC1_CRTC_VERTICAL_INTERRUPT1_POSITION                               0x1dd0
+#define mmCRTC2_CRTC_VERTICAL_INTERRUPT1_POSITION                               0x1fd0
+#define mmCRTC3_CRTC_VERTICAL_INTERRUPT1_POSITION                               0x41d0
+#define mmCRTC4_CRTC_VERTICAL_INTERRUPT1_POSITION                               0x43d0
+#define mmCRTC5_CRTC_VERTICAL_INTERRUPT1_POSITION                               0x45d0
+#define mmCRTC_VERTICAL_INTERRUPT1_CONTROL                                      0x1bd1
+#define mmCRTC0_CRTC_VERTICAL_INTERRUPT1_CONTROL                                0x1bd1
+#define mmCRTC1_CRTC_VERTICAL_INTERRUPT1_CONTROL                                0x1dd1
+#define mmCRTC2_CRTC_VERTICAL_INTERRUPT1_CONTROL                                0x1fd1
+#define mmCRTC3_CRTC_VERTICAL_INTERRUPT1_CONTROL                                0x41d1
+#define mmCRTC4_CRTC_VERTICAL_INTERRUPT1_CONTROL                                0x43d1
+#define mmCRTC5_CRTC_VERTICAL_INTERRUPT1_CONTROL                                0x45d1
+#define mmCRTC_VERTICAL_INTERRUPT2_POSITION                                     0x1bd2
+#define mmCRTC0_CRTC_VERTICAL_INTERRUPT2_POSITION                               0x1bd2
+#define mmCRTC1_CRTC_VERTICAL_INTERRUPT2_POSITION                               0x1dd2
+#define mmCRTC2_CRTC_VERTICAL_INTERRUPT2_POSITION                               0x1fd2
+#define mmCRTC3_CRTC_VERTICAL_INTERRUPT2_POSITION                               0x41d2
+#define mmCRTC4_CRTC_VERTICAL_INTERRUPT2_POSITION                               0x43d2
+#define mmCRTC5_CRTC_VERTICAL_INTERRUPT2_POSITION                               0x45d2
+#define mmCRTC_VERTICAL_INTERRUPT2_CONTROL                                      0x1bd3
+#define mmCRTC0_CRTC_VERTICAL_INTERRUPT2_CONTROL                                0x1bd3
+#define mmCRTC1_CRTC_VERTICAL_INTERRUPT2_CONTROL                                0x1dd3
+#define mmCRTC2_CRTC_VERTICAL_INTERRUPT2_CONTROL                                0x1fd3
+#define mmCRTC3_CRTC_VERTICAL_INTERRUPT2_CONTROL                                0x41d3
+#define mmCRTC4_CRTC_VERTICAL_INTERRUPT2_CONTROL                                0x43d3
+#define mmCRTC5_CRTC_VERTICAL_INTERRUPT2_CONTROL                                0x45d3
+#define mmCRTC_CRC_CNTL                                                         0x1bd4
+#define mmCRTC0_CRTC_CRC_CNTL                                                   0x1bd4
+#define mmCRTC1_CRTC_CRC_CNTL                                                   0x1dd4
+#define mmCRTC2_CRTC_CRC_CNTL                                                   0x1fd4
+#define mmCRTC3_CRTC_CRC_CNTL                                                   0x41d4
+#define mmCRTC4_CRTC_CRC_CNTL                                                   0x43d4
+#define mmCRTC5_CRTC_CRC_CNTL                                                   0x45d4
+#define mmCRTC_CRC0_WINDOWA_X_CONTROL                                           0x1bd5
+#define mmCRTC0_CRTC_CRC0_WINDOWA_X_CONTROL                                     0x1bd5
+#define mmCRTC1_CRTC_CRC0_WINDOWA_X_CONTROL                                     0x1dd5
+#define mmCRTC2_CRTC_CRC0_WINDOWA_X_CONTROL                                     0x1fd5
+#define mmCRTC3_CRTC_CRC0_WINDOWA_X_CONTROL                                     0x41d5
+#define mmCRTC4_CRTC_CRC0_WINDOWA_X_CONTROL                                     0x43d5
+#define mmCRTC5_CRTC_CRC0_WINDOWA_X_CONTROL                                     0x45d5
+#define mmCRTC_CRC0_WINDOWA_Y_CONTROL                                           0x1bd6
+#define mmCRTC0_CRTC_CRC0_WINDOWA_Y_CONTROL                                     0x1bd6
+#define mmCRTC1_CRTC_CRC0_WINDOWA_Y_CONTROL                                     0x1dd6
+#define mmCRTC2_CRTC_CRC0_WINDOWA_Y_CONTROL                                     0x1fd6
+#define mmCRTC3_CRTC_CRC0_WINDOWA_Y_CONTROL                                     0x41d6
+#define mmCRTC4_CRTC_CRC0_WINDOWA_Y_CONTROL                                     0x43d6
+#define mmCRTC5_CRTC_CRC0_WINDOWA_Y_CONTROL                                     0x45d6
+#define mmCRTC_CRC0_WINDOWB_X_CONTROL                                           0x1bd7
+#define mmCRTC0_CRTC_CRC0_WINDOWB_X_CONTROL                                     0x1bd7
+#define mmCRTC1_CRTC_CRC0_WINDOWB_X_CONTROL                                     0x1dd7
+#define mmCRTC2_CRTC_CRC0_WINDOWB_X_CONTROL                                     0x1fd7
+#define mmCRTC3_CRTC_CRC0_WINDOWB_X_CONTROL                                     0x41d7
+#define mmCRTC4_CRTC_CRC0_WINDOWB_X_CONTROL                                     0x43d7
+#define mmCRTC5_CRTC_CRC0_WINDOWB_X_CONTROL                                     0x45d7
+#define mmCRTC_CRC0_WINDOWB_Y_CONTROL                                           0x1bd8
+#define mmCRTC0_CRTC_CRC0_WINDOWB_Y_CONTROL                                     0x1bd8
+#define mmCRTC1_CRTC_CRC0_WINDOWB_Y_CONTROL                                     0x1dd8
+#define mmCRTC2_CRTC_CRC0_WINDOWB_Y_CONTROL                                     0x1fd8
+#define mmCRTC3_CRTC_CRC0_WINDOWB_Y_CONTROL                                     0x41d8
+#define mmCRTC4_CRTC_CRC0_WINDOWB_Y_CONTROL                                     0x43d8
+#define mmCRTC5_CRTC_CRC0_WINDOWB_Y_CONTROL                                     0x45d8
+#define mmCRTC_CRC0_DATA_RG                                                     0x1bd9
+#define mmCRTC0_CRTC_CRC0_DATA_RG                                               0x1bd9
+#define mmCRTC1_CRTC_CRC0_DATA_RG                                               0x1dd9
+#define mmCRTC2_CRTC_CRC0_DATA_RG                                               0x1fd9
+#define mmCRTC3_CRTC_CRC0_DATA_RG                                               0x41d9
+#define mmCRTC4_CRTC_CRC0_DATA_RG                                               0x43d9
+#define mmCRTC5_CRTC_CRC0_DATA_RG                                               0x45d9
+#define mmCRTC_CRC0_DATA_B                                                      0x1bda
+#define mmCRTC0_CRTC_CRC0_DATA_B                                                0x1bda
+#define mmCRTC1_CRTC_CRC0_DATA_B                                                0x1dda
+#define mmCRTC2_CRTC_CRC0_DATA_B                                                0x1fda
+#define mmCRTC3_CRTC_CRC0_DATA_B                                                0x41da
+#define mmCRTC4_CRTC_CRC0_DATA_B                                                0x43da
+#define mmCRTC5_CRTC_CRC0_DATA_B                                                0x45da
+#define mmCRTC_CRC1_WINDOWA_X_CONTROL                                           0x1bdb
+#define mmCRTC0_CRTC_CRC1_WINDOWA_X_CONTROL                                     0x1bdb
+#define mmCRTC1_CRTC_CRC1_WINDOWA_X_CONTROL                                     0x1ddb
+#define mmCRTC2_CRTC_CRC1_WINDOWA_X_CONTROL                                     0x1fdb
+#define mmCRTC3_CRTC_CRC1_WINDOWA_X_CONTROL                                     0x41db
+#define mmCRTC4_CRTC_CRC1_WINDOWA_X_CONTROL                                     0x43db
+#define mmCRTC5_CRTC_CRC1_WINDOWA_X_CONTROL                                     0x45db
+#define mmCRTC_CRC1_WINDOWA_Y_CONTROL                                           0x1bdc
+#define mmCRTC0_CRTC_CRC1_WINDOWA_Y_CONTROL                                     0x1bdc
+#define mmCRTC1_CRTC_CRC1_WINDOWA_Y_CONTROL                                     0x1ddc
+#define mmCRTC2_CRTC_CRC1_WINDOWA_Y_CONTROL                                     0x1fdc
+#define mmCRTC3_CRTC_CRC1_WINDOWA_Y_CONTROL                                     0x41dc
+#define mmCRTC4_CRTC_CRC1_WINDOWA_Y_CONTROL                                     0x43dc
+#define mmCRTC5_CRTC_CRC1_WINDOWA_Y_CONTROL                                     0x45dc
+#define mmCRTC_CRC1_WINDOWB_X_CONTROL                                           0x1bdd
+#define mmCRTC0_CRTC_CRC1_WINDOWB_X_CONTROL                                     0x1bdd
+#define mmCRTC1_CRTC_CRC1_WINDOWB_X_CONTROL                                     0x1ddd
+#define mmCRTC2_CRTC_CRC1_WINDOWB_X_CONTROL                                     0x1fdd
+#define mmCRTC3_CRTC_CRC1_WINDOWB_X_CONTROL                                     0x41dd
+#define mmCRTC4_CRTC_CRC1_WINDOWB_X_CONTROL                                     0x43dd
+#define mmCRTC5_CRTC_CRC1_WINDOWB_X_CONTROL                                     0x45dd
+#define mmCRTC_CRC1_WINDOWB_Y_CONTROL                                           0x1bde
+#define mmCRTC0_CRTC_CRC1_WINDOWB_Y_CONTROL                                     0x1bde
+#define mmCRTC1_CRTC_CRC1_WINDOWB_Y_CONTROL                                     0x1dde
+#define mmCRTC2_CRTC_CRC1_WINDOWB_Y_CONTROL                                     0x1fde
+#define mmCRTC3_CRTC_CRC1_WINDOWB_Y_CONTROL                                     0x41de
+#define mmCRTC4_CRTC_CRC1_WINDOWB_Y_CONTROL                                     0x43de
+#define mmCRTC5_CRTC_CRC1_WINDOWB_Y_CONTROL                                     0x45de
+#define mmCRTC_CRC1_DATA_RG                                                     0x1bdf
+#define mmCRTC0_CRTC_CRC1_DATA_RG                                               0x1bdf
+#define mmCRTC1_CRTC_CRC1_DATA_RG                                               0x1ddf
+#define mmCRTC2_CRTC_CRC1_DATA_RG                                               0x1fdf
+#define mmCRTC3_CRTC_CRC1_DATA_RG                                               0x41df
+#define mmCRTC4_CRTC_CRC1_DATA_RG                                               0x43df
+#define mmCRTC5_CRTC_CRC1_DATA_RG                                               0x45df
+#define mmCRTC_CRC1_DATA_B                                                      0x1be0
+#define mmCRTC0_CRTC_CRC1_DATA_B                                                0x1be0
+#define mmCRTC1_CRTC_CRC1_DATA_B                                                0x1de0
+#define mmCRTC2_CRTC_CRC1_DATA_B                                                0x1fe0
+#define mmCRTC3_CRTC_CRC1_DATA_B                                                0x41e0
+#define mmCRTC4_CRTC_CRC1_DATA_B                                                0x43e0
+#define mmCRTC5_CRTC_CRC1_DATA_B                                                0x45e0
+#define mmCRTC_EXT_TIMING_SYNC_CONTROL                                          0x1be1
+#define mmCRTC0_CRTC_EXT_TIMING_SYNC_CONTROL                                    0x1be1
+#define mmCRTC1_CRTC_EXT_TIMING_SYNC_CONTROL                                    0x1de1
+#define mmCRTC2_CRTC_EXT_TIMING_SYNC_CONTROL                                    0x1fe1
+#define mmCRTC3_CRTC_EXT_TIMING_SYNC_CONTROL                                    0x41e1
+#define mmCRTC4_CRTC_EXT_TIMING_SYNC_CONTROL                                    0x43e1
+#define mmCRTC5_CRTC_EXT_TIMING_SYNC_CONTROL                                    0x45e1
+#define mmCRTC_EXT_TIMING_SYNC_WINDOW_START                                     0x1be2
+#define mmCRTC0_CRTC_EXT_TIMING_SYNC_WINDOW_START                               0x1be2
+#define mmCRTC1_CRTC_EXT_TIMING_SYNC_WINDOW_START                               0x1de2
+#define mmCRTC2_CRTC_EXT_TIMING_SYNC_WINDOW_START                               0x1fe2
+#define mmCRTC3_CRTC_EXT_TIMING_SYNC_WINDOW_START                               0x41e2
+#define mmCRTC4_CRTC_EXT_TIMING_SYNC_WINDOW_START                               0x43e2
+#define mmCRTC5_CRTC_EXT_TIMING_SYNC_WINDOW_START                               0x45e2
+#define mmCRTC_EXT_TIMING_SYNC_WINDOW_END                                       0x1be3
+#define mmCRTC0_CRTC_EXT_TIMING_SYNC_WINDOW_END                                 0x1be3
+#define mmCRTC1_CRTC_EXT_TIMING_SYNC_WINDOW_END                                 0x1de3
+#define mmCRTC2_CRTC_EXT_TIMING_SYNC_WINDOW_END                                 0x1fe3
+#define mmCRTC3_CRTC_EXT_TIMING_SYNC_WINDOW_END                                 0x41e3
+#define mmCRTC4_CRTC_EXT_TIMING_SYNC_WINDOW_END                                 0x43e3
+#define mmCRTC5_CRTC_EXT_TIMING_SYNC_WINDOW_END                                 0x45e3
+#define mmCRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL                           0x1be4
+#define mmCRTC0_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL                     0x1be4
+#define mmCRTC1_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL                     0x1de4
+#define mmCRTC2_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL                     0x1fe4
+#define mmCRTC3_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL                     0x41e4
+#define mmCRTC4_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL                     0x43e4
+#define mmCRTC5_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL                     0x45e4
+#define mmCRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL                                0x1be5
+#define mmCRTC0_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL                          0x1be5
+#define mmCRTC1_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL                          0x1de5
+#define mmCRTC2_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL                          0x1fe5
+#define mmCRTC3_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL                          0x41e5
+#define mmCRTC4_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL                          0x43e5
+#define mmCRTC5_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL                          0x45e5
+#define mmCRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL                         0x1be6
+#define mmCRTC0_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL                   0x1be6
+#define mmCRTC1_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL                   0x1de6
+#define mmCRTC2_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL                   0x1fe6
+#define mmCRTC3_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL                   0x41e6
+#define mmCRTC4_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL                   0x43e6
+#define mmCRTC5_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL                   0x45e6
+#define mmCRTC_STATIC_SCREEN_CONTROL                                            0x1be7
+#define mmCRTC0_CRTC_STATIC_SCREEN_CONTROL                                      0x1be7
+#define mmCRTC1_CRTC_STATIC_SCREEN_CONTROL                                      0x1de7
+#define mmCRTC2_CRTC_STATIC_SCREEN_CONTROL                                      0x1fe7
+#define mmCRTC3_CRTC_STATIC_SCREEN_CONTROL                                      0x41e7
+#define mmCRTC4_CRTC_STATIC_SCREEN_CONTROL                                      0x43e7
+#define mmCRTC5_CRTC_STATIC_SCREEN_CONTROL                                      0x45e7
+#define mmCRTC_3D_STRUCTURE_CONTROL                                             0x1b78
+#define mmCRTC0_CRTC_3D_STRUCTURE_CONTROL                                       0x1b78
+#define mmCRTC1_CRTC_3D_STRUCTURE_CONTROL                                       0x1d78
+#define mmCRTC2_CRTC_3D_STRUCTURE_CONTROL                                       0x1f78
+#define mmCRTC3_CRTC_3D_STRUCTURE_CONTROL                                       0x4178
+#define mmCRTC4_CRTC_3D_STRUCTURE_CONTROL                                       0x4378
+#define mmCRTC5_CRTC_3D_STRUCTURE_CONTROL                                       0x4578
+#define mmCRTC_GSL_VSYNC_GAP                                                    0x1b79
+#define mmCRTC0_CRTC_GSL_VSYNC_GAP                                              0x1b79
+#define mmCRTC1_CRTC_GSL_VSYNC_GAP                                              0x1d79
+#define mmCRTC2_CRTC_GSL_VSYNC_GAP                                              0x1f79
+#define mmCRTC3_CRTC_GSL_VSYNC_GAP                                              0x4179
+#define mmCRTC4_CRTC_GSL_VSYNC_GAP                                              0x4379
+#define mmCRTC5_CRTC_GSL_VSYNC_GAP                                              0x4579
+#define mmCRTC_GSL_WINDOW                                                       0x1b7a
+#define mmCRTC0_CRTC_GSL_WINDOW                                                 0x1b7a
+#define mmCRTC1_CRTC_GSL_WINDOW                                                 0x1d7a
+#define mmCRTC2_CRTC_GSL_WINDOW                                                 0x1f7a
+#define mmCRTC3_CRTC_GSL_WINDOW                                                 0x417a
+#define mmCRTC4_CRTC_GSL_WINDOW                                                 0x437a
+#define mmCRTC5_CRTC_GSL_WINDOW                                                 0x457a
+#define mmCRTC_GSL_CONTROL                                                      0x1b7b
+#define mmCRTC0_CRTC_GSL_CONTROL                                                0x1b7b
+#define mmCRTC1_CRTC_GSL_CONTROL                                                0x1d7b
+#define mmCRTC2_CRTC_GSL_CONTROL                                                0x1f7b
+#define mmCRTC3_CRTC_GSL_CONTROL                                                0x417b
+#define mmCRTC4_CRTC_GSL_CONTROL                                                0x437b
+#define mmCRTC5_CRTC_GSL_CONTROL                                                0x457b
+#define mmCRTC_TEST_DEBUG_INDEX                                                 0x1bc6
+#define mmCRTC0_CRTC_TEST_DEBUG_INDEX                                           0x1bc6
+#define mmCRTC1_CRTC_TEST_DEBUG_INDEX                                           0x1dc6
+#define mmCRTC2_CRTC_TEST_DEBUG_INDEX                                           0x1fc6
+#define mmCRTC3_CRTC_TEST_DEBUG_INDEX                                           0x41c6
+#define mmCRTC4_CRTC_TEST_DEBUG_INDEX                                           0x43c6
+#define mmCRTC5_CRTC_TEST_DEBUG_INDEX                                           0x45c6
+#define mmCRTC_TEST_DEBUG_DATA                                                  0x1bc7
+#define mmCRTC0_CRTC_TEST_DEBUG_DATA                                            0x1bc7
+#define mmCRTC1_CRTC_TEST_DEBUG_DATA                                            0x1dc7
+#define mmCRTC2_CRTC_TEST_DEBUG_DATA                                            0x1fc7
+#define mmCRTC3_CRTC_TEST_DEBUG_DATA                                            0x41c7
+#define mmCRTC4_CRTC_TEST_DEBUG_DATA                                            0x43c7
+#define mmCRTC5_CRTC_TEST_DEBUG_DATA                                            0x45c7
+#define mmDAC_ENABLE                                                            0x16aa
+#define mmDAC_SOURCE_SELECT                                                     0x16ab
+#define mmDAC_CRC_EN                                                            0x16ac
+#define mmDAC_CRC_CONTROL                                                       0x16ad
+#define mmDAC_CRC_SIG_RGB_MASK                                                  0x16ae
+#define mmDAC_CRC_SIG_CONTROL_MASK                                              0x16af
+#define mmDAC_CRC_SIG_RGB                                                       0x16b0
+#define mmDAC_CRC_SIG_CONTROL                                                   0x16b1
+#define mmDAC_SYNC_TRISTATE_CONTROL                                             0x16b2
+#define mmDAC_STEREOSYNC_SELECT                                                 0x16b3
+#define mmDAC_AUTODETECT_CONTROL                                                0x16b4
+#define mmDAC_AUTODETECT_CONTROL2                                               0x16b5
+#define mmDAC_AUTODETECT_CONTROL3                                               0x16b6
+#define mmDAC_AUTODETECT_STATUS                                                 0x16b7
+#define mmDAC_AUTODETECT_INT_CONTROL                                            0x16b8
+#define mmDAC_FORCE_OUTPUT_CNTL                                                 0x16b9
+#define mmDAC_FORCE_DATA                                                        0x16ba
+#define mmDAC_POWERDOWN                                                         0x16bb
+#define mmDAC_CONTROL                                                           0x16bc
+#define mmDAC_COMPARATOR_ENABLE                                                 0x16bd
+#define mmDAC_COMPARATOR_OUTPUT                                                 0x16be
+#define mmDAC_PWR_CNTL                                                          0x16bf
+#define mmDAC_DFT_CONFIG                                                        0x16c0
+#define mmDAC_FIFO_STATUS                                                       0x16c1
+#define mmDAC_TEST_DEBUG_INDEX                                                  0x16c2
+#define mmDAC_TEST_DEBUG_DATA                                                   0x16c3
+#define mmPERFCOUNTER_CNTL                                                      0x170
+#define mmDC_PERFMON0_PERFCOUNTER_CNTL                                          0x170
+#define mmDC_PERFMON1_PERFCOUNTER_CNTL                                          0x358
+#define mmDC_PERFMON2_PERFCOUNTER_CNTL                                          0x364
+#define mmDC_PERFMON3_PERFCOUNTER_CNTL                                          0x18c8
+#define mmDC_PERFMON4_PERFCOUNTER_CNTL                                          0x1b24
+#define mmDC_PERFMON5_PERFCOUNTER_CNTL                                          0x1d24
+#define mmDC_PERFMON6_PERFCOUNTER_CNTL                                          0x1f24
+#define mmDC_PERFMON7_PERFCOUNTER_CNTL                                          0x4124
+#define mmDC_PERFMON8_PERFCOUNTER_CNTL                                          0x4324
+#define mmDC_PERFMON9_PERFCOUNTER_CNTL                                          0x4524
+#define mmDC_PERFMON10_PERFCOUNTER_CNTL                                         0x4724
+#define mmDC_PERFMON11_PERFCOUNTER_CNTL                                         0x59a0
+#define mmDC_PERFMON12_PERFCOUNTER_CNTL                                         0x5f68
+#define mmDC_PERFMON13_PERFCOUNTER_CNTL                                         0x9924
+#define mmPERFCOUNTER_STATE                                                     0x171
+#define mmDC_PERFMON0_PERFCOUNTER_STATE                                         0x171
+#define mmDC_PERFMON1_PERFCOUNTER_STATE                                         0x359
+#define mmDC_PERFMON2_PERFCOUNTER_STATE                                         0x365
+#define mmDC_PERFMON3_PERFCOUNTER_STATE                                         0x18c9
+#define mmDC_PERFMON4_PERFCOUNTER_STATE                                         0x1b25
+#define mmDC_PERFMON5_PERFCOUNTER_STATE                                         0x1d25
+#define mmDC_PERFMON6_PERFCOUNTER_STATE                                         0x1f25
+#define mmDC_PERFMON7_PERFCOUNTER_STATE                                         0x4125
+#define mmDC_PERFMON8_PERFCOUNTER_STATE                                         0x4325
+#define mmDC_PERFMON9_PERFCOUNTER_STATE                                         0x4525
+#define mmDC_PERFMON10_PERFCOUNTER_STATE                                        0x4725
+#define mmDC_PERFMON11_PERFCOUNTER_STATE                                        0x59a1
+#define mmDC_PERFMON12_PERFCOUNTER_STATE                                        0x5f69
+#define mmDC_PERFMON13_PERFCOUNTER_STATE                                        0x9925
+#define mmPERFMON_CNTL                                                          0x173
+#define mmDC_PERFMON0_PERFMON_CNTL                                              0x173
+#define mmDC_PERFMON1_PERFMON_CNTL                                              0x35b
+#define mmDC_PERFMON2_PERFMON_CNTL                                              0x367
+#define mmDC_PERFMON3_PERFMON_CNTL                                              0x18cb
+#define mmDC_PERFMON4_PERFMON_CNTL                                              0x1b27
+#define mmDC_PERFMON5_PERFMON_CNTL                                              0x1d27
+#define mmDC_PERFMON6_PERFMON_CNTL                                              0x1f27
+#define mmDC_PERFMON7_PERFMON_CNTL                                              0x4127
+#define mmDC_PERFMON8_PERFMON_CNTL                                              0x4327
+#define mmDC_PERFMON9_PERFMON_CNTL                                              0x4527
+#define mmDC_PERFMON10_PERFMON_CNTL                                             0x4727
+#define mmDC_PERFMON11_PERFMON_CNTL                                             0x59a3
+#define mmDC_PERFMON12_PERFMON_CNTL                                             0x5f6b
+#define mmDC_PERFMON13_PERFMON_CNTL                                             0x9927
+#define mmPERFMON_CNTL2                                                         0x17a
+#define mmDC_PERFMON0_PERFMON_CNTL2                                             0x17a
+#define mmDC_PERFMON1_PERFMON_CNTL2                                             0x362
+#define mmDC_PERFMON2_PERFMON_CNTL2                                             0x36e
+#define mmDC_PERFMON3_PERFMON_CNTL2                                             0x18d2
+#define mmDC_PERFMON4_PERFMON_CNTL2                                             0x1b2e
+#define mmDC_PERFMON5_PERFMON_CNTL2                                             0x1d2e
+#define mmDC_PERFMON6_PERFMON_CNTL2                                             0x1f2e
+#define mmDC_PERFMON7_PERFMON_CNTL2                                             0x412e
+#define mmDC_PERFMON8_PERFMON_CNTL2                                             0x432e
+#define mmDC_PERFMON9_PERFMON_CNTL2                                             0x452e
+#define mmDC_PERFMON10_PERFMON_CNTL2                                            0x472e
+#define mmDC_PERFMON11_PERFMON_CNTL2                                            0x59aa
+#define mmDC_PERFMON12_PERFMON_CNTL2                                            0x5f72
+#define mmDC_PERFMON13_PERFMON_CNTL2                                            0x992e
+#define mmPERFMON_CVALUE_INT_MISC                                               0x172
+#define mmDC_PERFMON0_PERFMON_CVALUE_INT_MISC                                   0x172
+#define mmDC_PERFMON1_PERFMON_CVALUE_INT_MISC                                   0x35a
+#define mmDC_PERFMON2_PERFMON_CVALUE_INT_MISC                                   0x366
+#define mmDC_PERFMON3_PERFMON_CVALUE_INT_MISC                                   0x18ca
+#define mmDC_PERFMON4_PERFMON_CVALUE_INT_MISC                                   0x1b26
+#define mmDC_PERFMON5_PERFMON_CVALUE_INT_MISC                                   0x1d26
+#define mmDC_PERFMON6_PERFMON_CVALUE_INT_MISC                                   0x1f26
+#define mmDC_PERFMON7_PERFMON_CVALUE_INT_MISC                                   0x4126
+#define mmDC_PERFMON8_PERFMON_CVALUE_INT_MISC                                   0x4326
+#define mmDC_PERFMON9_PERFMON_CVALUE_INT_MISC                                   0x4526
+#define mmDC_PERFMON10_PERFMON_CVALUE_INT_MISC                                  0x4726
+#define mmDC_PERFMON11_PERFMON_CVALUE_INT_MISC                                  0x59a2
+#define mmDC_PERFMON12_PERFMON_CVALUE_INT_MISC                                  0x5f6a
+#define mmDC_PERFMON13_PERFMON_CVALUE_INT_MISC                                  0x9926
+#define mmPERFMON_CVALUE_LOW                                                    0x174
+#define mmDC_PERFMON0_PERFMON_CVALUE_LOW                                        0x174
+#define mmDC_PERFMON1_PERFMON_CVALUE_LOW                                        0x35c
+#define mmDC_PERFMON2_PERFMON_CVALUE_LOW                                        0x368
+#define mmDC_PERFMON3_PERFMON_CVALUE_LOW                                        0x18cc
+#define mmDC_PERFMON4_PERFMON_CVALUE_LOW                                        0x1b28
+#define mmDC_PERFMON5_PERFMON_CVALUE_LOW                                        0x1d28
+#define mmDC_PERFMON6_PERFMON_CVALUE_LOW                                        0x1f28
+#define mmDC_PERFMON7_PERFMON_CVALUE_LOW                                        0x4128
+#define mmDC_PERFMON8_PERFMON_CVALUE_LOW                                        0x4328
+#define mmDC_PERFMON9_PERFMON_CVALUE_LOW                                        0x4528
+#define mmDC_PERFMON10_PERFMON_CVALUE_LOW                                       0x4728
+#define mmDC_PERFMON11_PERFMON_CVALUE_LOW                                       0x59a4
+#define mmDC_PERFMON12_PERFMON_CVALUE_LOW                                       0x5f6c
+#define mmDC_PERFMON13_PERFMON_CVALUE_LOW                                       0x9928
+#define mmPERFMON_HI                                                            0x175
+#define mmDC_PERFMON0_PERFMON_HI                                                0x175
+#define mmDC_PERFMON1_PERFMON_HI                                                0x35d
+#define mmDC_PERFMON2_PERFMON_HI                                                0x369
+#define mmDC_PERFMON3_PERFMON_HI                                                0x18cd
+#define mmDC_PERFMON4_PERFMON_HI                                                0x1b29
+#define mmDC_PERFMON5_PERFMON_HI                                                0x1d29
+#define mmDC_PERFMON6_PERFMON_HI                                                0x1f29
+#define mmDC_PERFMON7_PERFMON_HI                                                0x4129
+#define mmDC_PERFMON8_PERFMON_HI                                                0x4329
+#define mmDC_PERFMON9_PERFMON_HI                                                0x4529
+#define mmDC_PERFMON10_PERFMON_HI                                               0x4729
+#define mmDC_PERFMON11_PERFMON_HI                                               0x59a5
+#define mmDC_PERFMON12_PERFMON_HI                                               0x5f6d
+#define mmDC_PERFMON13_PERFMON_HI                                               0x9929
+#define mmPERFMON_LOW                                                           0x176
+#define mmDC_PERFMON0_PERFMON_LOW                                               0x176
+#define mmDC_PERFMON1_PERFMON_LOW                                               0x35e
+#define mmDC_PERFMON2_PERFMON_LOW                                               0x36a
+#define mmDC_PERFMON3_PERFMON_LOW                                               0x18ce
+#define mmDC_PERFMON4_PERFMON_LOW                                               0x1b2a
+#define mmDC_PERFMON5_PERFMON_LOW                                               0x1d2a
+#define mmDC_PERFMON6_PERFMON_LOW                                               0x1f2a
+#define mmDC_PERFMON7_PERFMON_LOW                                               0x412a
+#define mmDC_PERFMON8_PERFMON_LOW                                               0x432a
+#define mmDC_PERFMON9_PERFMON_LOW                                               0x452a
+#define mmDC_PERFMON10_PERFMON_LOW                                              0x472a
+#define mmDC_PERFMON11_PERFMON_LOW                                              0x59a6
+#define mmDC_PERFMON12_PERFMON_LOW                                              0x5f6e
+#define mmDC_PERFMON13_PERFMON_LOW                                              0x992a
+#define mmPERFMON_TEST_DEBUG_INDEX                                              0x177
+#define mmDC_PERFMON0_PERFMON_TEST_DEBUG_INDEX                                  0x177
+#define mmDC_PERFMON1_PERFMON_TEST_DEBUG_INDEX                                  0x35f
+#define mmDC_PERFMON2_PERFMON_TEST_DEBUG_INDEX                                  0x36b
+#define mmDC_PERFMON3_PERFMON_TEST_DEBUG_INDEX                                  0x18cf
+#define mmDC_PERFMON4_PERFMON_TEST_DEBUG_INDEX                                  0x1b2b
+#define mmDC_PERFMON5_PERFMON_TEST_DEBUG_INDEX                                  0x1d2b
+#define mmDC_PERFMON6_PERFMON_TEST_DEBUG_INDEX                                  0x1f2b
+#define mmDC_PERFMON7_PERFMON_TEST_DEBUG_INDEX                                  0x412b
+#define mmDC_PERFMON8_PERFMON_TEST_DEBUG_INDEX                                  0x432b
+#define mmDC_PERFMON9_PERFMON_TEST_DEBUG_INDEX                                  0x452b
+#define mmDC_PERFMON10_PERFMON_TEST_DEBUG_INDEX                                 0x472b
+#define mmDC_PERFMON11_PERFMON_TEST_DEBUG_INDEX                                 0x59a7
+#define mmDC_PERFMON12_PERFMON_TEST_DEBUG_INDEX                                 0x5f6f
+#define mmDC_PERFMON13_PERFMON_TEST_DEBUG_INDEX                                 0x992b
+#define mmPERFMON_TEST_DEBUG_DATA                                               0x178
+#define mmDC_PERFMON0_PERFMON_TEST_DEBUG_DATA                                   0x178
+#define mmDC_PERFMON1_PERFMON_TEST_DEBUG_DATA                                   0x360
+#define mmDC_PERFMON2_PERFMON_TEST_DEBUG_DATA                                   0x36c
+#define mmDC_PERFMON3_PERFMON_TEST_DEBUG_DATA                                   0x18d0
+#define mmDC_PERFMON4_PERFMON_TEST_DEBUG_DATA                                   0x1b2c
+#define mmDC_PERFMON5_PERFMON_TEST_DEBUG_DATA                                   0x1d2c
+#define mmDC_PERFMON6_PERFMON_TEST_DEBUG_DATA                                   0x1f2c
+#define mmDC_PERFMON7_PERFMON_TEST_DEBUG_DATA                                   0x412c
+#define mmDC_PERFMON8_PERFMON_TEST_DEBUG_DATA                                   0x432c
+#define mmDC_PERFMON9_PERFMON_TEST_DEBUG_DATA                                   0x452c
+#define mmDC_PERFMON10_PERFMON_TEST_DEBUG_DATA                                  0x472c
+#define mmDC_PERFMON11_PERFMON_TEST_DEBUG_DATA                                  0x59a8
+#define mmDC_PERFMON12_PERFMON_TEST_DEBUG_DATA                                  0x5f70
+#define mmDC_PERFMON13_PERFMON_TEST_DEBUG_DATA                                  0x992c
+#define mmREFCLK_CNTL                                                           0x109
+#define mmDCCG_CBUS_ANTIGLITCH_RESETB                                           0x15c
+#define mmDCCG_CBUS_SPARE                                                       0x15d
+#define mmDCCG_CBUS_WRCMD_DELAY                                                 0x110
+#define mmDPREFCLK_CNTL                                                         0x118
+#define mmDCE_VERSION                                                           0x11e
+#define mmAVSYNC_COUNTER_WRITE                                                  0x12a
+#define mmAVSYNC_COUNTER_CONTROL                                                0x12b
+#define mmAVSYNC_COUNTER_READ                                                   0x12f
+#define mmDCCG_GTC_CNTL                                                         0x120
+#define mmDCCG_GTC_DTO_INCR                                                     0x121
+#define mmDCCG_GTC_DTO_MODULO                                                   0x122
+#define mmDCCG_GTC_CURRENT                                                      0x123
+#define mmDCCG_DS_DTO_INCR                                                      0x113
+#define mmDCCG_DS_DTO_MODULO                                                    0x114
+#define mmDCCG_DS_CNTL                                                          0x115
+#define mmDCCG_DS_HW_CAL_INTERVAL                                               0x116
+#define mmDCCG_DS_DEBUG_CNTL                                                    0x112
+#define mmDMCU_SMU_INTERRUPT_CNTL                                               0x12c
+#define mmSMU_CONTROL                                                           0x12d
+#define mmSMU_INTERRUPT_CONTROL                                                 0x12e
+#define mmDAC_CLK_ENABLE                                                        0x128
+#define mmDVO_CLK_ENABLE                                                        0x129
+#define mmDCCG_GATE_DISABLE_CNTL                                                0x134
+#define mmDCCG_GATE_DISABLE_CNTL2                                               0x13c
+#define mmDISPCLK_CGTT_BLK_CTRL_REG                                             0x135
+#define mmSCLK_CGTT_BLK_CTRL_REG                                                0x136
+#define mmDPREFCLK_CGTT_BLK_CTRL_REG                                            0x108
+#define mmREFCLK_CGTT_BLK_CTRL_REG                                              0x10b
+#define mmSYMCLK_CGTT_BLK_CTRL_REG                                              0x13d
+#define mmDCCG_CAC_STATUS                                                       0x137
+#define mmPIXCLK0_RESYNC_CNTL                                                   0x13a
+#define mmPHYPLLA_PIXCLK_RESYNC_CNTL                                            0x100
+#define mmPHYPLLB_PIXCLK_RESYNC_CNTL                                            0x101
+#define mmPHYPLLC_PIXCLK_RESYNC_CNTL                                            0x102
+#define mmPHYPLLD_PIXCLK_RESYNC_CNTL                                            0x103
+#define mmPHYPLLE_PIXCLK_RESYNC_CNTL                                            0x10c
+#define mmPHYPLLF_PIXCLK_RESYNC_CNTL                                            0x13e
+#define mmMICROSECOND_TIME_BASE_DIV                                             0x13b
+#define mmDCCG_DISP_CNTL_REG                                                    0x13f
+#define mmMILLISECOND_TIME_BASE_DIV                                             0x130
+#define mmDISPCLK_FREQ_CHANGE_CNTL                                              0x131
+#define mmDC_MEM_GLOBAL_PWR_REQ_CNTL                                            0x132
+#define mmDCCG_PERFMON_CNTL                                                     0x133
+#define mmDCCG_PERFMON_CNTL2                                                    0x10e
+#define mmCRTC0_PIXEL_RATE_CNTL                                                 0x140
+#define mmDP_DTO0_PHASE                                                         0x141
+#define mmDP_DTO0_MODULO                                                        0x142
+#define mmCRTC0_PHYPLL_PIXEL_RATE_CNTL                                          0x143
+#define mmCRTC1_PIXEL_RATE_CNTL                                                 0x144
+#define mmDP_DTO1_PHASE                                                         0x145
+#define mmDP_DTO1_MODULO                                                        0x146
+#define mmCRTC1_PHYPLL_PIXEL_RATE_CNTL                                          0x147
+#define mmCRTC2_PIXEL_RATE_CNTL                                                 0x148
+#define mmDP_DTO2_PHASE                                                         0x149
+#define mmDP_DTO2_MODULO                                                        0x14a
+#define mmCRTC2_PHYPLL_PIXEL_RATE_CNTL                                          0x14b
+#define mmCRTC3_PIXEL_RATE_CNTL                                                 0x14c
+#define mmDP_DTO3_PHASE                                                         0x14d
+#define mmDP_DTO3_MODULO                                                        0x14e
+#define mmCRTC3_PHYPLL_PIXEL_RATE_CNTL                                          0x14f
+#define mmCRTC4_PIXEL_RATE_CNTL                                                 0x150
+#define mmDP_DTO4_PHASE                                                         0x151
+#define mmDP_DTO4_MODULO                                                        0x152
+#define mmCRTC4_PHYPLL_PIXEL_RATE_CNTL                                          0x153
+#define mmCRTC5_PIXEL_RATE_CNTL                                                 0x154
+#define mmDP_DTO5_PHASE                                                         0x155
+#define mmDP_DTO5_MODULO                                                        0x156
+#define mmCRTC5_PHYPLL_PIXEL_RATE_CNTL                                          0x157
+#define mmDCCG_SOFT_RESET                                                       0x15f
+#define mmSYMCLKA_CLOCK_ENABLE                                                  0x160
+#define mmSYMCLKB_CLOCK_ENABLE                                                  0x161
+#define mmSYMCLKC_CLOCK_ENABLE                                                  0x162
+#define mmSYMCLKD_CLOCK_ENABLE                                                  0x163
+#define mmSYMCLKE_CLOCK_ENABLE                                                  0x164
+#define mmSYMCLKF_CLOCK_ENABLE                                                  0x165
+#define mmDPDBG_CLK_FORCE_CONTROL                                               0x10d
+#define mmDCCG_AUDIO_DTO_SOURCE                                                 0x16b
+#define mmDCCG_AUDIO_DTO0_PHASE                                                 0x16c
+#define mmDCCG_AUDIO_DTO0_MODULE                                                0x16d
+#define mmDCCG_AUDIO_DTO1_PHASE                                                 0x16e
+#define mmDCCG_AUDIO_DTO1_MODULE                                                0x16f
+#define mmDCCG_TEST_DEBUG_INDEX                                                 0x17c
+#define mmDCCG_TEST_DEBUG_DATA                                                  0x17d
+#define mmDCCG_TEST_CLK_SEL                                                     0x17e
+#define mmCPLL_MACRO_CNTL_RESERVED0                                             0x5fd0
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED0                                  0x5fd0
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED0                                  0x5fdc
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED0                                  0x5fe8
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED0                                  0x5ff4
+#define mmCPLL_MACRO_CNTL_RESERVED1                                             0x5fd1
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED1                                  0x5fd1
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED1                                  0x5fdd
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED1                                  0x5fe9
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED1                                  0x5ff5
+#define mmCPLL_MACRO_CNTL_RESERVED2                                             0x5fd2
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED2                                  0x5fd2
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED2                                  0x5fde
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED2                                  0x5fea
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED2                                  0x5ff6
+#define mmCPLL_MACRO_CNTL_RESERVED3                                             0x5fd3
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED3                                  0x5fd3
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED3                                  0x5fdf
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED3                                  0x5feb
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED3                                  0x5ff7
+#define mmCPLL_MACRO_CNTL_RESERVED4                                             0x5fd4
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED4                                  0x5fd4
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED4                                  0x5fe0
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED4                                  0x5fec
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED4                                  0x5ff8
+#define mmCPLL_MACRO_CNTL_RESERVED5                                             0x5fd5
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED5                                  0x5fd5
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED5                                  0x5fe1
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED5                                  0x5fed
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED5                                  0x5ff9
+#define mmCPLL_MACRO_CNTL_RESERVED6                                             0x5fd6
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED6                                  0x5fd6
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED6                                  0x5fe2
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED6                                  0x5fee
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED6                                  0x5ffa
+#define mmCPLL_MACRO_CNTL_RESERVED7                                             0x5fd7
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED7                                  0x5fd7
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED7                                  0x5fe3
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED7                                  0x5fef
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED7                                  0x5ffb
+#define mmCPLL_MACRO_CNTL_RESERVED8                                             0x5fd8
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED8                                  0x5fd8
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED8                                  0x5fe4
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED8                                  0x5ff0
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED8                                  0x5ffc
+#define mmCPLL_MACRO_CNTL_RESERVED9                                             0x5fd9
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED9                                  0x5fd9
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED9                                  0x5fe5
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED9                                  0x5ff1
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED9                                  0x5ffd
+#define mmCPLL_MACRO_CNTL_RESERVED10                                            0x5fda
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED10                                 0x5fda
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED10                                 0x5fe6
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED10                                 0x5ff2
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED10                                 0x5ffe
+#define mmCPLL_MACRO_CNTL_RESERVED11                                            0x5fdb
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED11                                 0x5fdb
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED11                                 0x5fe7
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED11                                 0x5ff3
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED11                                 0x5fff
+#define mmPLL_MACRO_CNTL_RESERVED0                                              0x1700
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED0                                    0x1700
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED0                                    0x172a
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED0                                    0x1754
+#define mmPLL_MACRO_CNTL_RESERVED1                                              0x1701
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED1                                    0x1701
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED1                                    0x172b
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED1                                    0x1755
+#define mmPLL_MACRO_CNTL_RESERVED2                                              0x1702
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED2                                    0x1702
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED2                                    0x172c
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED2                                    0x1756
+#define mmPLL_MACRO_CNTL_RESERVED3                                              0x1703
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED3                                    0x1703
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED3                                    0x172d
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED3                                    0x1757
+#define mmPLL_MACRO_CNTL_RESERVED4                                              0x1704
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED4                                    0x1704
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED4                                    0x172e
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED4                                    0x1758
+#define mmPLL_MACRO_CNTL_RESERVED5                                              0x1705
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED5                                    0x1705
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED5                                    0x172f
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED5                                    0x1759
+#define mmPLL_MACRO_CNTL_RESERVED6                                              0x1706
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED6                                    0x1706
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED6                                    0x1730
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED6                                    0x175a
+#define mmPLL_MACRO_CNTL_RESERVED7                                              0x1707
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED7                                    0x1707
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED7                                    0x1731
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED7                                    0x175b
+#define mmPLL_MACRO_CNTL_RESERVED8                                              0x1708
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED8                                    0x1708
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED8                                    0x1732
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED8                                    0x175c
+#define mmPLL_MACRO_CNTL_RESERVED9                                              0x1709
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED9                                    0x1709
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED9                                    0x1733
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED9                                    0x175d
+#define mmPLL_MACRO_CNTL_RESERVED10                                             0x170a
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED10                                   0x170a
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED10                                   0x1734
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED10                                   0x175e
+#define mmPLL_MACRO_CNTL_RESERVED11                                             0x170b
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED11                                   0x170b
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED11                                   0x1735
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED11                                   0x175f
+#define mmPLL_MACRO_CNTL_RESERVED12                                             0x170c
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED12                                   0x170c
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED12                                   0x1736
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED12                                   0x1760
+#define mmPLL_MACRO_CNTL_RESERVED13                                             0x170d
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED13                                   0x170d
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED13                                   0x1737
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED13                                   0x1761
+#define mmPLL_MACRO_CNTL_RESERVED14                                             0x170e
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED14                                   0x170e
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED14                                   0x1738
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED14                                   0x1762
+#define mmPLL_MACRO_CNTL_RESERVED15                                             0x170f
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED15                                   0x170f
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED15                                   0x1739
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED15                                   0x1763
+#define mmPLL_MACRO_CNTL_RESERVED16                                             0x1710
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED16                                   0x1710
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED16                                   0x173a
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED16                                   0x1764
+#define mmPLL_MACRO_CNTL_RESERVED17                                             0x1711
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED17                                   0x1711
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED17                                   0x173b
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED17                                   0x1765
+#define mmPLL_MACRO_CNTL_RESERVED18                                             0x1712
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED18                                   0x1712
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED18                                   0x173c
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED18                                   0x1766
+#define mmPLL_MACRO_CNTL_RESERVED19                                             0x1713
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED19                                   0x1713
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED19                                   0x173d
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED19                                   0x1767
+#define mmPLL_MACRO_CNTL_RESERVED20                                             0x1714
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED20                                   0x1714
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED20                                   0x173e
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED20                                   0x1768
+#define mmPLL_MACRO_CNTL_RESERVED21                                             0x1715
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED21                                   0x1715
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED21                                   0x173f
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED21                                   0x1769
+#define mmPLL_MACRO_CNTL_RESERVED22                                             0x1716
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED22                                   0x1716
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED22                                   0x1740
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED22                                   0x176a
+#define mmPLL_MACRO_CNTL_RESERVED23                                             0x1717
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED23                                   0x1717
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED23                                   0x1741
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED23                                   0x176b
+#define mmPLL_MACRO_CNTL_RESERVED24                                             0x1718
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED24                                   0x1718
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED24                                   0x1742
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED24                                   0x176c
+#define mmPLL_MACRO_CNTL_RESERVED25                                             0x1719
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED25                                   0x1719
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED25                                   0x1743
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED25                                   0x176d
+#define mmPLL_MACRO_CNTL_RESERVED26                                             0x171a
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED26                                   0x171a
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED26                                   0x1744
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED26                                   0x176e
+#define mmPLL_MACRO_CNTL_RESERVED27                                             0x171b
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED27                                   0x171b
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED27                                   0x1745
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED27                                   0x176f
+#define mmPLL_MACRO_CNTL_RESERVED28                                             0x171c
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED28                                   0x171c
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED28                                   0x1746
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED28                                   0x1770
+#define mmPLL_MACRO_CNTL_RESERVED29                                             0x171d
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED29                                   0x171d
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED29                                   0x1747
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED29                                   0x1771
+#define mmPLL_MACRO_CNTL_RESERVED30                                             0x171e
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED30                                   0x171e
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED30                                   0x1748
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED30                                   0x1772
+#define mmPLL_MACRO_CNTL_RESERVED31                                             0x171f
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED31                                   0x171f
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED31                                   0x1749
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED31                                   0x1773
+#define mmPLL_MACRO_CNTL_RESERVED32                                             0x1720
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED32                                   0x1720
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED32                                   0x174a
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED32                                   0x1774
+#define mmPLL_MACRO_CNTL_RESERVED33                                             0x1721
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED33                                   0x1721
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED33                                   0x174b
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED33                                   0x1775
+#define mmPLL_MACRO_CNTL_RESERVED34                                             0x1722
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED34                                   0x1722
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED34                                   0x174c
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED34                                   0x1776
+#define mmPLL_MACRO_CNTL_RESERVED35                                             0x1723
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED35                                   0x1723
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED35                                   0x174d
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED35                                   0x1777
+#define mmPLL_MACRO_CNTL_RESERVED36                                             0x1724
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED36                                   0x1724
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED36                                   0x174e
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED36                                   0x1778
+#define mmPLL_MACRO_CNTL_RESERVED37                                             0x1725
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED37                                   0x1725
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED37                                   0x174f
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED37                                   0x1779
+#define mmPLL_MACRO_CNTL_RESERVED38                                             0x1726
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED38                                   0x1726
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED38                                   0x1750
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED38                                   0x177a
+#define mmPLL_MACRO_CNTL_RESERVED39                                             0x1727
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED39                                   0x1727
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED39                                   0x1751
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED39                                   0x177b
+#define mmPLL_MACRO_CNTL_RESERVED40                                             0x1728
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED40                                   0x1728
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED40                                   0x1752
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED40                                   0x177c
+#define mmPLL_MACRO_CNTL_RESERVED41                                             0x1729
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED41                                   0x1729
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED41                                   0x1753
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED41                                   0x177d
+#define mmDENTIST_DISPCLK_CNTL                                                  0x124
+#define mmDCDEBUG_BUS_CLK1_SEL                                                  0x16c4
+#define mmDCDEBUG_BUS_CLK2_SEL                                                  0x16c5
+#define mmDCDEBUG_BUS_CLK3_SEL                                                  0x16c6
+#define mmDCDEBUG_BUS_CLK4_SEL                                                  0x16c7
+#define mmDCDEBUG_BUS_CLK5_SEL                                                  0x16c8
+#define mmDCDEBUG_OUT_PIN_OVERRIDE                                              0x16c9
+#define mmDCDEBUG_OUT_CNTL                                                      0x16ca
+#define mmDCDEBUG_OUT_DATA                                                      0x16cb
+#define mmDMIF_CONTROL                                                          0x2f6
+#define mmDMIF_STATUS                                                           0x2f7
+#define mmDMIFV_STATUS                                                          0x2f5
+#define mmDMIF_HW_DEBUG                                                         0x2f8
+#define mmDMIF_ARBITRATION_CONTROL                                              0x2f9
+#define mmPIPE0_ARBITRATION_CONTROL3                                            0x2fa
+#define mmPIPE1_ARBITRATION_CONTROL3                                            0x2fb
+#define mmPIPE2_ARBITRATION_CONTROL3                                            0x2fc
+#define mmPIPE3_ARBITRATION_CONTROL3                                            0x2fd
+#define mmPIPE4_ARBITRATION_CONTROL3                                            0x2fe
+#define mmPIPE5_ARBITRATION_CONTROL3                                            0x2ff
+#define mmPIPE6_ARBITRATION_CONTROL3                                            0x32a
+#define mmPIPE7_ARBITRATION_CONTROL3                                            0x32b
+#define mmDMIF_P_VMID                                                           0x300
+#define mmDMIF_URG_OVERRIDE                                                     0x329
+#define mmDMIF_TEST_DEBUG_INDEX                                                 0x301
+#define mmDMIF_TEST_DEBUG_DATA                                                  0x302
+#define ixDMIF_DEBUG02_CORE0                                                    0x2
+#define ixDMIF_DEBUG02_CORE1                                                    0xa
+#define mmDMIF_ADDR_CALC                                                        0x303
+#define mmDMIF_STATUS2                                                          0x304
+#define mmPIPE0_MAX_REQUESTS                                                    0x305
+#define mmPIPE1_MAX_REQUESTS                                                    0x306
+#define mmPIPE2_MAX_REQUESTS                                                    0x307
+#define mmPIPE3_MAX_REQUESTS                                                    0x308
+#define mmPIPE4_MAX_REQUESTS                                                    0x309
+#define mmPIPE5_MAX_REQUESTS                                                    0x30a
+#define mmPIPE6_MAX_REQUESTS                                                    0x32c
+#define mmPIPE7_MAX_REQUESTS                                                    0x32d
+#define mmDVMM_REG_RD_STATUS                                                    0x32e
+#define mmDVMM_REG_RD_DATA                                                      0x32f
+#define mmDVMM_PTE_REQ                                                          0x330
+#define mmDVMM_CNTL                                                             0x331
+#define mmDVMM_FAULT_STATUS                                                     0x332
+#define mmDVMM_FAULT_ADDR                                                       0x333
+#define mmLOW_POWER_TILING_CONTROL                                              0x30b
+#define mmMCIF_CONTROL                                                          0x30c
+#define mmMCIF_WRITE_COMBINE_CONTROL                                            0x30d
+#define mmMCIF_TEST_DEBUG_INDEX                                                 0x30e
+#define mmMCIF_TEST_DEBUG_DATA                                                  0x30f
+#define ixIDDCCIF02_DBG_DCCIF_C                                                 0x9
+#define ixIDDCCIF04_DBG_DCCIF_E                                                 0xb
+#define ixIDDCCIF05_DBG_DCCIF_F                                                 0xc
+#define mmMCIF_VMID                                                             0x310
+#define mmMCIF_MEM_CONTROL                                                      0x311
+#define mmCC_DC_PIPE_DIS                                                        0x312
+#define mmMC_DC_INTERFACE_NACK_STATUS                                           0x313
+#define mmRBBMIF_TIMEOUT                                                        0x314
+#define mmRBBMIF_STATUS                                                         0x315
+#define mmRBBMIF_TIMEOUT_DIS                                                    0x316
+#define mmRBBMIF_STATUS_FLAG                                                    0x327
+#define mmDCI_MEM_PWR_STATUS                                                    0x317
+#define mmDCI_MEM_PWR_STATUS2                                                   0x318
+#define mmDCI_MEM_PWR_STATUS3                                                   0x33d
+#define mmDCI_CLK_CNTL                                                          0x319
+#define mmDCI_CLK_RAMP_CNTL                                                     0x31a
+#define mmDCI_MEM_PWR_CNTL                                                      0x31b
+#define mmDCI_MEM_PWR_CNTL2                                                     0x31c
+#define mmDCI_MEM_PWR_CNTL3                                                     0x31d
+#define mmDCI_MEM_PWR_CNTL4                                                     0x33b
+#define mmDVMM_PTE_PGMEM_CONTROL                                                0x335
+#define mmDVMM_PTE_PGMEM_STATE                                                  0x336
+#define mmDCI_SOFT_RESET                                                        0x328
+#define mmDCI_MISC                                                              0x33c
+#define mmDCI_TEST_DEBUG_INDEX                                                  0x31e
+#define mmDCI_TEST_DEBUG_DATA                                                   0x31f
+#define mmDCI_DEBUG_CONFIG                                                      0x320
+#define mmPIPE0_DMIF_BUFFER_CONTROL                                             0x321
+#define mmPIPE1_DMIF_BUFFER_CONTROL                                             0x322
+#define mmPIPE2_DMIF_BUFFER_CONTROL                                             0x323
+#define mmPIPE3_DMIF_BUFFER_CONTROL                                             0x324
+#define mmPIPE4_DMIF_BUFFER_CONTROL                                             0x325
+#define mmPIPE5_DMIF_BUFFER_CONTROL                                             0x326
+#define mmDC_GENERICA                                                           0x4800
+#define mmDC_GENERICB                                                           0x4801
+#define mmDC_PAD_EXTERN_SIG                                                     0x4802
+#define mmDC_REF_CLK_CNTL                                                       0x4803
+#define mmDC_GPIO_DEBUG                                                         0x4804
+#define mmUNIPHYA_LINK_CNTL                                                     0x4805
+#define mmUNIPHYB_LINK_CNTL                                                     0x4807
+#define mmUNIPHYC_LINK_CNTL                                                     0x4809
+#define mmUNIPHYD_LINK_CNTL                                                     0x480b
+#define mmUNIPHYE_LINK_CNTL                                                     0x480d
+#define mmUNIPHYF_LINK_CNTL                                                     0x480f
+#define mmUNIPHYG_LINK_CNTL                                                     0x4811
+#define mmUNIPHYA_CHANNEL_XBAR_CNTL                                             0x4806
+#define mmUNIPHYB_CHANNEL_XBAR_CNTL                                             0x4808
+#define mmUNIPHYC_CHANNEL_XBAR_CNTL                                             0x480a
+#define mmUNIPHYD_CHANNEL_XBAR_CNTL                                             0x480c
+#define mmUNIPHYE_CHANNEL_XBAR_CNTL                                             0x480e
+#define mmUNIPHYF_CHANNEL_XBAR_CNTL                                             0x4810
+#define mmUNIPHYG_CHANNEL_XBAR_CNTL                                             0x4812
+#define mmUNIPHYLPA_LINK_CNTL                                                   0x4847
+#define mmUNIPHYLPB_LINK_CNTL                                                   0x4848
+#define mmUNIPHYLPA_CHANNEL_XBAR_CNTL                                           0x4849
+#define mmUNIPHYLPB_CHANNEL_XBAR_CNTL                                           0x484a
+#define mmUNIPHY_IMPCAL_LINKA                                                   0x4838
+#define mmUNIPHY_IMPCAL_LINKB                                                   0x4839
+#define mmUNIPHY_IMPCAL_LINKC                                                   0x483f
+#define mmUNIPHY_IMPCAL_LINKD                                                   0x4840
+#define mmUNIPHY_IMPCAL_LINKE                                                   0x4843
+#define mmUNIPHY_IMPCAL_LINKF                                                   0x4844
+#define mmUNIPHY_IMPCAL_PERIOD                                                  0x483a
+#define mmAUXP_IMPCAL                                                           0x483b
+#define mmAUXN_IMPCAL                                                           0x483c
+#define mmDCIO_IMPCAL_CNTL                                                      0x483d
+#define mmUNIPHY_IMPCAL_PSW_AB                                                  0x483e
+#define mmDCIO_IMPCAL_CNTL_CD                                                   0x4841
+#define mmUNIPHY_IMPCAL_PSW_CD                                                  0x4842
+#define mmDCIO_IMPCAL_CNTL_EF                                                   0x4845
+#define mmUNIPHY_IMPCAL_PSW_EF                                                  0x4846
+#define mmDCIO_WRCMD_DELAY                                                      0x4816
+#define mmDC_PINSTRAPS                                                          0x4818
+#define mmDC_DVODATA_CONFIG                                                     0x481a
+#define mmLVTMA_PWRSEQ_CNTL                                                     0x481b
+#define mmLVTMA_PWRSEQ_STATE                                                    0x481c
+#define mmLVTMA_PWRSEQ_REF_DIV                                                  0x481d
+#define mmLVTMA_PWRSEQ_DELAY1                                                   0x481e
+#define mmLVTMA_PWRSEQ_DELAY2                                                   0x481f
+#define mmBL_PWM_CNTL                                                           0x4820
+#define mmBL_PWM_CNTL2                                                          0x4821
+#define mmBL_PWM_PERIOD_CNTL                                                    0x4822
+#define mmBL_PWM_GRP1_REG_LOCK                                                  0x4823
+#define mmDCIO_GSL_GENLK_PAD_CNTL                                               0x4824
+#define mmDCIO_GSL_SWAPLOCK_PAD_CNTL                                            0x4825
+#define mmDCIO_GSL0_CNTL                                                        0x4826
+#define mmDCIO_GSL1_CNTL                                                        0x4827
+#define mmDCIO_GSL2_CNTL                                                        0x4828
+#define mmDC_GPU_TIMER_START_POSITION_V_UPDATE                                  0x4829
+#define mmDC_GPU_TIMER_START_POSITION_P_FLIP                                    0x482a
+#define mmDC_GPU_TIMER_READ                                                     0x482b
+#define mmDC_GPU_TIMER_READ_CNTL                                                0x482c
+#define mmDCIO_CLOCK_CNTL                                                       0x482d
+#define mmDCIO_DEBUG                                                            0x482f
+#define mmDCO_DCFE_EXT_VSYNC_CNTL                                               0x4830
+#define mmDBG_OUT_CNTL                                                          0x4834
+#define mmDCIO_DEBUG_CONFIG                                                     0x4835
+#define mmDCIO_SOFT_RESET                                                       0x4836
+#define mmDCIO_DPHY_SEL                                                         0x4837
+#define mmDCIO_DPCS_TX_INTERRUPT                                                0x484b
+#define mmDCIO_DPCS_RX_INTERRUPT                                                0x484c
+#define mmDCIO_SEMAPHORE0                                                       0x484d
+#define mmDCIO_SEMAPHORE1                                                       0x484e
+#define mmDCIO_SEMAPHORE2                                                       0x484f
+#define mmDCIO_SEMAPHORE3                                                       0x4850
+#define mmDCIO_SEMAPHORE4                                                       0x4851
+#define mmDCIO_SEMAPHORE5                                                       0x4852
+#define mmDCIO_SEMAPHORE6                                                       0x4853
+#define mmDCIO_SEMAPHORE7                                                       0x4854
+#define mmDCIO_TEST_DEBUG_INDEX                                                 0x4831
+#define mmDCIO_TEST_DEBUG_DATA                                                  0x4832
+#define ixDCIO_DEBUG1                                                           0x1
+#define ixDCIO_DEBUG2                                                           0x2
+#define ixDCIO_DEBUG3                                                           0x3
+#define ixDCIO_DEBUG4                                                           0x4
+#define ixDCIO_DEBUG5                                                           0x5
+#define ixDCIO_DEBUG6                                                           0x6
+#define ixDCIO_DEBUG7                                                           0x7
+#define ixDCIO_DEBUG8                                                           0x8
+#define ixDCIO_DEBUG9                                                           0x9
+#define ixDCIO_DEBUGA                                                           0xa
+#define ixDCIO_DEBUGB                                                           0xb
+#define ixDCIO_DEBUGC                                                           0xc
+#define ixDCIO_DEBUGD                                                           0xd
+#define ixDCIO_DEBUGE                                                           0xe
+#define ixDCIO_DEBUGF                                                           0xf
+#define ixDCIO_DEBUG10                                                          0x10
+#define ixDCIO_DEBUG11                                                          0x11
+#define ixDCIO_DEBUG12                                                          0x12
+#define ixDCIO_DEBUG13                                                          0x13
+#define ixDCIO_DEBUG14                                                          0x14
+#define ixDCIO_DEBUG15                                                          0x15
+#define ixDCIO_DEBUG16                                                          0x16
+#define ixDCIO_DEBUG17                                                          0x17
+#define ixDCIO_DEBUG18                                                          0x18
+#define ixDCIO_DEBUG19                                                          0x19
+#define ixDCIO_DEBUG1A                                                          0x1a
+#define ixDCIO_DEBUG1B                                                          0x1b
+#define ixDCIO_DEBUG1C                                                          0x1c
+#define ixDCIO_DEBUG1D                                                          0x1d
+#define ixDCIO_DEBUG1E                                                          0x1e
+#define ixDCIO_DEBUG1F                                                          0x1f
+#define ixDCIO_DEBUG20                                                          0x20
+#define ixDCIO_DEBUG21                                                          0x21
+#define ixDCIO_DEBUG22                                                          0x22
+#define ixDCIO_DEBUG23                                                          0x23
+#define ixDCIO_DEBUG24                                                          0x24
+#define ixDCIO_DEBUG25                                                          0x25
+#define ixDCIO_DEBUG26                                                          0x26
+#define ixDCIO_DEBUG27                                                          0x27
+#define ixDCIO_DEBUG28                                                          0x28
+#define ixDCIO_DEBUG_ID                                                         0x0
+#define mmDC_GPIO_GENERIC_MASK                                                  0x4860
+#define mmDC_GPIO_GENERIC_A                                                     0x4861
+#define mmDC_GPIO_GENERIC_EN                                                    0x4862
+#define mmDC_GPIO_GENERIC_Y                                                     0x4863
+#define mmDC_GPIO_DDC1_MASK                                                     0x4868
+#define mmDC_GPIO_DDC1_A                                                        0x4869
+#define mmDC_GPIO_DDC1_EN                                                       0x486a
+#define mmDC_GPIO_DDC1_Y                                                        0x486b
+#define mmDC_GPIO_DDC2_MASK                                                     0x486c
+#define mmDC_GPIO_DDC2_A                                                        0x486d
+#define mmDC_GPIO_DDC2_EN                                                       0x486e
+#define mmDC_GPIO_DDC2_Y                                                        0x486f
+#define mmDC_GPIO_DDC3_MASK                                                     0x4870
+#define mmDC_GPIO_DDC3_A                                                        0x4871
+#define mmDC_GPIO_DDC3_EN                                                       0x4872
+#define mmDC_GPIO_DDC3_Y                                                        0x4873
+#define mmDC_GPIO_DDC4_MASK                                                     0x4874
+#define mmDC_GPIO_DDC4_A                                                        0x4875
+#define mmDC_GPIO_DDC4_EN                                                       0x4876
+#define mmDC_GPIO_DDC4_Y                                                        0x4877
+#define mmDC_GPIO_DDC5_MASK                                                     0x4878
+#define mmDC_GPIO_DDC5_A                                                        0x4879
+#define mmDC_GPIO_DDC5_EN                                                       0x487a
+#define mmDC_GPIO_DDC5_Y                                                        0x487b
+#define mmDC_GPIO_DDC6_MASK                                                     0x487c
+#define mmDC_GPIO_DDC6_A                                                        0x487d
+#define mmDC_GPIO_DDC6_EN                                                       0x487e
+#define mmDC_GPIO_DDC6_Y                                                        0x487f
+#define mmDC_GPIO_DDCVGA_MASK                                                   0x4880
+#define mmDC_GPIO_DDCVGA_A                                                      0x4881
+#define mmDC_GPIO_DDCVGA_EN                                                     0x4882
+#define mmDC_GPIO_DDCVGA_Y                                                      0x4883
+#define mmDC_GPIO_SYNCA_MASK                                                    0x4884
+#define mmDC_GPIO_SYNCA_A                                                       0x4885
+#define mmDC_GPIO_SYNCA_EN                                                      0x4886
+#define mmDC_GPIO_SYNCA_Y                                                       0x4887
+#define mmDC_GPIO_GENLK_MASK                                                    0x4888
+#define mmDC_GPIO_GENLK_A                                                       0x4889
+#define mmDC_GPIO_GENLK_EN                                                      0x488a
+#define mmDC_GPIO_GENLK_Y                                                       0x488b
+#define mmDC_GPIO_HPD_MASK                                                      0x488c
+#define mmDC_GPIO_HPD_A                                                         0x488d
+#define mmDC_GPIO_HPD_EN                                                        0x488e
+#define mmDC_GPIO_HPD_Y                                                         0x488f
+#define mmDC_GPIO_PWRSEQ_MASK                                                   0x4890
+#define mmDC_GPIO_PWRSEQ_A                                                      0x4891
+#define mmDC_GPIO_PWRSEQ_EN                                                     0x4892
+#define mmDC_GPIO_PWRSEQ_Y                                                      0x4893
+#define mmDC_GPIO_PAD_STRENGTH_1                                                0x4894
+#define mmDC_GPIO_PAD_STRENGTH_2                                                0x4895
+#define mmPHY_AUX_CNTL                                                          0x4897
+#define mmDC_GPIO_I2CPAD_A                                                      0x4899
+#define mmDC_GPIO_I2CPAD_EN                                                     0x489a
+#define mmDC_GPIO_I2CPAD_Y                                                      0x489b
+#define mmDC_GPIO_I2CPAD_STRENGTH                                               0x489c
+#define mmDVO_VREF_CONTROL                                                      0x489e
+#define mmDVO_SKEW_ADJUST                                                       0x489f
+#define mmDC_GPIO_RECEIVER_EN0                                                  0x48a0
+#define mmDC_GPIO_RECEIVER_EN1                                                  0x48a1
+#define mmDC_GPIO_I2S_SPDIF_MASK                                                0x48a8
+#define mmDC_GPIO_I2S_SPDIF_A                                                   0x48a9
+#define mmDC_GPIO_I2S_SPDIF_EN                                                  0x48aa
+#define mmDC_GPIO_I2S_SPDIF_Y                                                   0x48ab
+#define mmDC_GPIO_I2S_SPDIF_STRENGTH                                            0x48ac
+#define mmDC_GPIO_TX12_EN                                                       0x48ad
+#define mmDC_GPIO_AUX_CTRL_0                                                    0x48ae
+#define mmDC_GPIO_AUX_CTRL_1                                                    0x48af
+#define mmDC_GPIO_AUX_CTRL_2                                                    0x48b0
+#define mmDC_GPIO_HPD_CTRL_0                                                    0x48b1
+#define mmDC_GPIO_HPD_CTRL_1                                                    0x48b2
+#define mmDAC_MACRO_CNTL_RESERVED0                                              0x48b8
+#define mmDAC_MACRO_CNTL_RESERVED1                                              0x48b9
+#define mmDAC_MACRO_CNTL_RESERVED2                                              0x48ba
+#define mmDAC_MACRO_CNTL_RESERVED3                                              0x48bb
+#define mmUNIPHY_MACRO_CNTL_RESERVED0                                           0x48c0
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED0                              0x48c0
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0                              0x4960
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0                              0x9a00
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0                              0x9aa0
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0                              0x9b40
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED0                              0x9be0
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED0                              0x9c80
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED0                              0x9d20
+#define mmUNIPHY_MACRO_CNTL_RESERVED1                                           0x48c1
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED1                              0x48c1
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1                              0x4961
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1                              0x9a01
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1                              0x9aa1
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1                              0x9b41
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED1                              0x9be1
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED1                              0x9c81
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED1                              0x9d21
+#define mmUNIPHY_MACRO_CNTL_RESERVED2                                           0x48c2
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED2                              0x48c2
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2                              0x4962
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2                              0x9a02
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2                              0x9aa2
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2                              0x9b42
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED2                              0x9be2
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED2                              0x9c82
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED2                              0x9d22
+#define mmUNIPHY_MACRO_CNTL_RESERVED3                                           0x48c3
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED3                              0x48c3
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3                              0x4963
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3                              0x9a03
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3                              0x9aa3
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3                              0x9b43
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED3                              0x9be3
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED3                              0x9c83
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED3                              0x9d23
+#define mmUNIPHY_MACRO_CNTL_RESERVED4                                           0x48c4
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED4                              0x48c4
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4                              0x4964
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4                              0x9a04
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4                              0x9aa4
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4                              0x9b44
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED4                              0x9be4
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED4                              0x9c84
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED4                              0x9d24
+#define mmUNIPHY_MACRO_CNTL_RESERVED5                                           0x48c5
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED5                              0x48c5
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5                              0x4965
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5                              0x9a05
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5                              0x9aa5
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5                              0x9b45
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED5                              0x9be5
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED5                              0x9c85
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED5                              0x9d25
+#define mmUNIPHY_MACRO_CNTL_RESERVED6                                           0x48c6
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED6                              0x48c6
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6                              0x4966
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6                              0x9a06
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6                              0x9aa6
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6                              0x9b46
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED6                              0x9be6
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED6                              0x9c86
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED6                              0x9d26
+#define mmUNIPHY_MACRO_CNTL_RESERVED7                                           0x48c7
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED7                              0x48c7
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7                              0x4967
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7                              0x9a07
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7                              0x9aa7
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7                              0x9b47
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED7                              0x9be7
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED7                              0x9c87
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED7                              0x9d27
+#define mmUNIPHY_MACRO_CNTL_RESERVED8                                           0x48c8
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED8                              0x48c8
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8                              0x4968
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8                              0x9a08
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8                              0x9aa8
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8                              0x9b48
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED8                              0x9be8
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED8                              0x9c88
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED8                              0x9d28
+#define mmUNIPHY_MACRO_CNTL_RESERVED9                                           0x48c9
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED9                              0x48c9
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9                              0x4969
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9                              0x9a09
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9                              0x9aa9
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9                              0x9b49
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED9                              0x9be9
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED9                              0x9c89
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED9                              0x9d29
+#define mmUNIPHY_MACRO_CNTL_RESERVED10                                          0x48ca
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED10                             0x48ca
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10                             0x496a
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10                             0x9a0a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10                             0x9aaa
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10                             0x9b4a
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED10                             0x9bea
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED10                             0x9c8a
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED10                             0x9d2a
+#define mmUNIPHY_MACRO_CNTL_RESERVED11                                          0x48cb
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED11                             0x48cb
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11                             0x496b
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11                             0x9a0b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11                             0x9aab
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11                             0x9b4b
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED11                             0x9beb
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED11                             0x9c8b
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED11                             0x9d2b
+#define mmUNIPHY_MACRO_CNTL_RESERVED12                                          0x48cc
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED12                             0x48cc
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12                             0x496c
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12                             0x9a0c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12                             0x9aac
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12                             0x9b4c
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED12                             0x9bec
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED12                             0x9c8c
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED12                             0x9d2c
+#define mmUNIPHY_MACRO_CNTL_RESERVED13                                          0x48cd
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED13                             0x48cd
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13                             0x496d
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13                             0x9a0d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13                             0x9aad
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13                             0x9b4d
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED13                             0x9bed
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED13                             0x9c8d
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED13                             0x9d2d
+#define mmUNIPHY_MACRO_CNTL_RESERVED14                                          0x48ce
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED14                             0x48ce
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14                             0x496e
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14                             0x9a0e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14                             0x9aae
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14                             0x9b4e
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED14                             0x9bee
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED14                             0x9c8e
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED14                             0x9d2e
+#define mmUNIPHY_MACRO_CNTL_RESERVED15                                          0x48cf
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED15                             0x48cf
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15                             0x496f
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15                             0x9a0f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15                             0x9aaf
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15                             0x9b4f
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED15                             0x9bef
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED15                             0x9c8f
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED15                             0x9d2f
+#define mmUNIPHY_MACRO_CNTL_RESERVED16                                          0x48d0
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED16                             0x48d0
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16                             0x4970
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16                             0x9a10
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16                             0x9ab0
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16                             0x9b50
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED16                             0x9bf0
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED16                             0x9c90
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED16                             0x9d30
+#define mmUNIPHY_MACRO_CNTL_RESERVED17                                          0x48d1
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED17                             0x48d1
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17                             0x4971
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17                             0x9a11
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17                             0x9ab1
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17                             0x9b51
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED17                             0x9bf1
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED17                             0x9c91
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED17                             0x9d31
+#define mmUNIPHY_MACRO_CNTL_RESERVED18                                          0x48d2
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED18                             0x48d2
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18                             0x4972
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18                             0x9a12
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18                             0x9ab2
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18                             0x9b52
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED18                             0x9bf2
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED18                             0x9c92
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED18                             0x9d32
+#define mmUNIPHY_MACRO_CNTL_RESERVED19                                          0x48d3
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED19                             0x48d3
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19                             0x4973
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19                             0x9a13
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19                             0x9ab3
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19                             0x9b53
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED19                             0x9bf3
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED19                             0x9c93
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED19                             0x9d33
+#define mmUNIPHY_MACRO_CNTL_RESERVED20                                          0x48d4
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED20                             0x48d4
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20                             0x4974
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20                             0x9a14
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20                             0x9ab4
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20                             0x9b54
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED20                             0x9bf4
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED20                             0x9c94
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED20                             0x9d34
+#define mmUNIPHY_MACRO_CNTL_RESERVED21                                          0x48d5
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED21                             0x48d5
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21                             0x4975
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21                             0x9a15
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21                             0x9ab5
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21                             0x9b55
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED21                             0x9bf5
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED21                             0x9c95
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED21                             0x9d35
+#define mmUNIPHY_MACRO_CNTL_RESERVED22                                          0x48d6
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED22                             0x48d6
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22                             0x4976
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22                             0x9a16
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22                             0x9ab6
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22                             0x9b56
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED22                             0x9bf6
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED22                             0x9c96
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED22                             0x9d36
+#define mmUNIPHY_MACRO_CNTL_RESERVED23                                          0x48d7
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED23                             0x48d7
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23                             0x4977
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23                             0x9a17
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23                             0x9ab7
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23                             0x9b57
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED23                             0x9bf7
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED23                             0x9c97
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED23                             0x9d37
+#define mmUNIPHY_MACRO_CNTL_RESERVED24                                          0x48d8
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED24                             0x48d8
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24                             0x4978
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24                             0x9a18
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24                             0x9ab8
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24                             0x9b58
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED24                             0x9bf8
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED24                             0x9c98
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED24                             0x9d38
+#define mmUNIPHY_MACRO_CNTL_RESERVED25                                          0x48d9
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED25                             0x48d9
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25                             0x4979
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25                             0x9a19
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25                             0x9ab9
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25                             0x9b59
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED25                             0x9bf9
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED25                             0x9c99
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED25                             0x9d39
+#define mmUNIPHY_MACRO_CNTL_RESERVED26                                          0x48da
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED26                             0x48da
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26                             0x497a
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26                             0x9a1a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26                             0x9aba
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26                             0x9b5a
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED26                             0x9bfa
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED26                             0x9c9a
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED26                             0x9d3a
+#define mmUNIPHY_MACRO_CNTL_RESERVED27                                          0x48db
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED27                             0x48db
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27                             0x497b
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27                             0x9a1b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27                             0x9abb
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27                             0x9b5b
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED27                             0x9bfb
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED27                             0x9c9b
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED27                             0x9d3b
+#define mmUNIPHY_MACRO_CNTL_RESERVED28                                          0x48dc
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED28                             0x48dc
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28                             0x497c
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28                             0x9a1c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28                             0x9abc
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28                             0x9b5c
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED28                             0x9bfc
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED28                             0x9c9c
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED28                             0x9d3c
+#define mmUNIPHY_MACRO_CNTL_RESERVED29                                          0x48dd
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED29                             0x48dd
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29                             0x497d
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29                             0x9a1d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29                             0x9abd
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29                             0x9b5d
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED29                             0x9bfd
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED29                             0x9c9d
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED29                             0x9d3d
+#define mmUNIPHY_MACRO_CNTL_RESERVED30                                          0x48de
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED30                             0x48de
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30                             0x497e
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30                             0x9a1e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30                             0x9abe
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30                             0x9b5e
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED30                             0x9bfe
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED30                             0x9c9e
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED30                             0x9d3e
+#define mmUNIPHY_MACRO_CNTL_RESERVED31                                          0x48df
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED31                             0x48df
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31                             0x497f
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31                             0x9a1f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31                             0x9abf
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31                             0x9b5f
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED31                             0x9bff
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED31                             0x9c9f
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED31                             0x9d3f
+#define mmUNIPHY_MACRO_CNTL_RESERVED32                                          0x48e0
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED32                             0x48e0
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32                             0x4980
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32                             0x9a20
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32                             0x9ac0
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32                             0x9b60
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED32                             0x9c00
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED32                             0x9ca0
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED32                             0x9d40
+#define mmUNIPHY_MACRO_CNTL_RESERVED33                                          0x48e1
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED33                             0x48e1
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33                             0x4981
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33                             0x9a21
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33                             0x9ac1
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33                             0x9b61
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED33                             0x9c01
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED33                             0x9ca1
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED33                             0x9d41
+#define mmUNIPHY_MACRO_CNTL_RESERVED34                                          0x48e2
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED34                             0x48e2
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34                             0x4982
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34                             0x9a22
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34                             0x9ac2
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34                             0x9b62
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED34                             0x9c02
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED34                             0x9ca2
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED34                             0x9d42
+#define mmUNIPHY_MACRO_CNTL_RESERVED35                                          0x48e3
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED35                             0x48e3
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35                             0x4983
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35                             0x9a23
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35                             0x9ac3
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35                             0x9b63
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED35                             0x9c03
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED35                             0x9ca3
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED35                             0x9d43
+#define mmUNIPHY_MACRO_CNTL_RESERVED36                                          0x48e4
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED36                             0x48e4
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36                             0x4984
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36                             0x9a24
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36                             0x9ac4
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36                             0x9b64
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED36                             0x9c04
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED36                             0x9ca4
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED36                             0x9d44
+#define mmUNIPHY_MACRO_CNTL_RESERVED37                                          0x48e5
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED37                             0x48e5
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37                             0x4985
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37                             0x9a25
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37                             0x9ac5
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37                             0x9b65
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED37                             0x9c05
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED37                             0x9ca5
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED37                             0x9d45
+#define mmUNIPHY_MACRO_CNTL_RESERVED38                                          0x48e6
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED38                             0x48e6
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38                             0x4986
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38                             0x9a26
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38                             0x9ac6
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38                             0x9b66
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED38                             0x9c06
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED38                             0x9ca6
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED38                             0x9d46
+#define mmUNIPHY_MACRO_CNTL_RESERVED39                                          0x48e7
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED39                             0x48e7
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39                             0x4987
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39                             0x9a27
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39                             0x9ac7
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39                             0x9b67
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED39                             0x9c07
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED39                             0x9ca7
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED39                             0x9d47
+#define mmUNIPHY_MACRO_CNTL_RESERVED40                                          0x48e8
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED40                             0x48e8
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40                             0x4988
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40                             0x9a28
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40                             0x9ac8
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40                             0x9b68
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED40                             0x9c08
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED40                             0x9ca8
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED40                             0x9d48
+#define mmUNIPHY_MACRO_CNTL_RESERVED41                                          0x48e9
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED41                             0x48e9
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41                             0x4989
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41                             0x9a29
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41                             0x9ac9
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41                             0x9b69
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED41                             0x9c09
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED41                             0x9ca9
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED41                             0x9d49
+#define mmUNIPHY_MACRO_CNTL_RESERVED42                                          0x48ea
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED42                             0x48ea
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42                             0x498a
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42                             0x9a2a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42                             0x9aca
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42                             0x9b6a
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED42                             0x9c0a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED42                             0x9caa
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED42                             0x9d4a
+#define mmUNIPHY_MACRO_CNTL_RESERVED43                                          0x48eb
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED43                             0x48eb
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43                             0x498b
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43                             0x9a2b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43                             0x9acb
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43                             0x9b6b
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED43                             0x9c0b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED43                             0x9cab
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED43                             0x9d4b
+#define mmUNIPHY_MACRO_CNTL_RESERVED44                                          0x48ec
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED44                             0x48ec
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44                             0x498c
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44                             0x9a2c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44                             0x9acc
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44                             0x9b6c
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED44                             0x9c0c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED44                             0x9cac
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED44                             0x9d4c
+#define mmUNIPHY_MACRO_CNTL_RESERVED45                                          0x48ed
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED45                             0x48ed
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45                             0x498d
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45                             0x9a2d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45                             0x9acd
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45                             0x9b6d
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED45                             0x9c0d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED45                             0x9cad
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED45                             0x9d4d
+#define mmUNIPHY_MACRO_CNTL_RESERVED46                                          0x48ee
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED46                             0x48ee
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46                             0x498e
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46                             0x9a2e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46                             0x9ace
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46                             0x9b6e
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED46                             0x9c0e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED46                             0x9cae
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED46                             0x9d4e
+#define mmUNIPHY_MACRO_CNTL_RESERVED47                                          0x48ef
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED47                             0x48ef
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47                             0x498f
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47                             0x9a2f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47                             0x9acf
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47                             0x9b6f
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED47                             0x9c0f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED47                             0x9caf
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED47                             0x9d4f
+#define mmUNIPHY_MACRO_CNTL_RESERVED48                                          0x48f0
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED48                             0x48f0
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48                             0x4990
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48                             0x9a30
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48                             0x9ad0
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48                             0x9b70
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED48                             0x9c10
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED48                             0x9cb0
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED48                             0x9d50
+#define mmUNIPHY_MACRO_CNTL_RESERVED49                                          0x48f1
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED49                             0x48f1
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49                             0x4991
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49                             0x9a31
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49                             0x9ad1
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49                             0x9b71
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED49                             0x9c11
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED49                             0x9cb1
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED49                             0x9d51
+#define mmUNIPHY_MACRO_CNTL_RESERVED50                                          0x48f2
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED50                             0x48f2
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50                             0x4992
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50                             0x9a32
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50                             0x9ad2
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50                             0x9b72
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED50                             0x9c12
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED50                             0x9cb2
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED50                             0x9d52
+#define mmUNIPHY_MACRO_CNTL_RESERVED51                                          0x48f3
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED51                             0x48f3
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51                             0x4993
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51                             0x9a33
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51                             0x9ad3
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51                             0x9b73
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED51                             0x9c13
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED51                             0x9cb3
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED51                             0x9d53
+#define mmUNIPHY_MACRO_CNTL_RESERVED52                                          0x48f4
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED52                             0x48f4
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52                             0x4994
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52                             0x9a34
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52                             0x9ad4
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52                             0x9b74
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED52                             0x9c14
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED52                             0x9cb4
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED52                             0x9d54
+#define mmUNIPHY_MACRO_CNTL_RESERVED53                                          0x48f5
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED53                             0x48f5
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53                             0x4995
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53                             0x9a35
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53                             0x9ad5
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53                             0x9b75
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED53                             0x9c15
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED53                             0x9cb5
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED53                             0x9d55
+#define mmUNIPHY_MACRO_CNTL_RESERVED54                                          0x48f6
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED54                             0x48f6
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54                             0x4996
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54                             0x9a36
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54                             0x9ad6
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54                             0x9b76
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED54                             0x9c16
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED54                             0x9cb6
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED54                             0x9d56
+#define mmUNIPHY_MACRO_CNTL_RESERVED55                                          0x48f7
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED55                             0x48f7
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55                             0x4997
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55                             0x9a37
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55                             0x9ad7
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55                             0x9b77
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED55                             0x9c17
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED55                             0x9cb7
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED55                             0x9d57
+#define mmUNIPHY_MACRO_CNTL_RESERVED56                                          0x48f8
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED56                             0x48f8
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56                             0x4998
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56                             0x9a38
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56                             0x9ad8
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56                             0x9b78
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED56                             0x9c18
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED56                             0x9cb8
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED56                             0x9d58
+#define mmUNIPHY_MACRO_CNTL_RESERVED57                                          0x48f9
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED57                             0x48f9
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57                             0x4999
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57                             0x9a39
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57                             0x9ad9
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57                             0x9b79
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED57                             0x9c19
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED57                             0x9cb9
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED57                             0x9d59
+#define mmUNIPHY_MACRO_CNTL_RESERVED58                                          0x48fa
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED58                             0x48fa
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED58                             0x499a
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED58                             0x9a3a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED58                             0x9ada
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED58                             0x9b7a
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED58                             0x9c1a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED58                             0x9cba
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED58                             0x9d5a
+#define mmUNIPHY_MACRO_CNTL_RESERVED59                                          0x48fb
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED59                             0x48fb
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED59                             0x499b
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED59                             0x9a3b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED59                             0x9adb
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED59                             0x9b7b
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED59                             0x9c1b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED59                             0x9cbb
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED59                             0x9d5b
+#define mmUNIPHY_MACRO_CNTL_RESERVED60                                          0x48fc
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED60                             0x48fc
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED60                             0x499c
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED60                             0x9a3c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED60                             0x9adc
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED60                             0x9b7c
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED60                             0x9c1c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED60                             0x9cbc
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED60                             0x9d5c
+#define mmUNIPHY_MACRO_CNTL_RESERVED61                                          0x48fd
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED61                             0x48fd
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED61                             0x499d
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED61                             0x9a3d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED61                             0x9add
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED61                             0x9b7d
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED61                             0x9c1d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED61                             0x9cbd
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED61                             0x9d5d
+#define mmUNIPHY_MACRO_CNTL_RESERVED62                                          0x48fe
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED62                             0x48fe
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED62                             0x499e
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED62                             0x9a3e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED62                             0x9ade
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED62                             0x9b7e
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED62                             0x9c1e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED62                             0x9cbe
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED62                             0x9d5e
+#define mmUNIPHY_MACRO_CNTL_RESERVED63                                          0x48ff
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED63                             0x48ff
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED63                             0x499f
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED63                             0x9a3f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED63                             0x9adf
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED63                             0x9b7f
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED63                             0x9c1f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED63                             0x9cbf
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED63                             0x9d5f
+#define mmUNIPHY_MACRO_CNTL_RESERVED64                                          0x4900
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED64                             0x4900
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED64                             0x49a0
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED64                             0x9a40
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED64                             0x9ae0
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED64                             0x9b80
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED64                             0x9c20
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED64                             0x9cc0
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED64                             0x9d60
+#define mmUNIPHY_MACRO_CNTL_RESERVED65                                          0x4901
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED65                             0x4901
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED65                             0x49a1
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED65                             0x9a41
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED65                             0x9ae1
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED65                             0x9b81
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED65                             0x9c21
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED65                             0x9cc1
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED65                             0x9d61
+#define mmUNIPHY_MACRO_CNTL_RESERVED66                                          0x4902
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED66                             0x4902
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED66                             0x49a2
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED66                             0x9a42
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED66                             0x9ae2
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED66                             0x9b82
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED66                             0x9c22
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED66                             0x9cc2
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED66                             0x9d62
+#define mmUNIPHY_MACRO_CNTL_RESERVED67                                          0x4903
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED67                             0x4903
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED67                             0x49a3
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED67                             0x9a43
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED67                             0x9ae3
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED67                             0x9b83
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED67                             0x9c23
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED67                             0x9cc3
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED67                             0x9d63
+#define mmUNIPHY_MACRO_CNTL_RESERVED68                                          0x4904
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED68                             0x4904
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED68                             0x49a4
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED68                             0x9a44
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED68                             0x9ae4
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED68                             0x9b84
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED68                             0x9c24
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED68                             0x9cc4
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED68                             0x9d64
+#define mmUNIPHY_MACRO_CNTL_RESERVED69                                          0x4905
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED69                             0x4905
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED69                             0x49a5
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED69                             0x9a45
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED69                             0x9ae5
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED69                             0x9b85
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED69                             0x9c25
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED69                             0x9cc5
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED69                             0x9d65
+#define mmUNIPHY_MACRO_CNTL_RESERVED70                                          0x4906
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED70                             0x4906
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED70                             0x49a6
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED70                             0x9a46
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED70                             0x9ae6
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED70                             0x9b86
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED70                             0x9c26
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED70                             0x9cc6
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED70                             0x9d66
+#define mmUNIPHY_MACRO_CNTL_RESERVED71                                          0x4907
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED71                             0x4907
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED71                             0x49a7
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED71                             0x9a47
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED71                             0x9ae7
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED71                             0x9b87
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED71                             0x9c27
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED71                             0x9cc7
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED71                             0x9d67
+#define mmUNIPHY_MACRO_CNTL_RESERVED72                                          0x4908
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED72                             0x4908
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED72                             0x49a8
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED72                             0x9a48
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED72                             0x9ae8
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED72                             0x9b88
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED72                             0x9c28
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED72                             0x9cc8
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED72                             0x9d68
+#define mmUNIPHY_MACRO_CNTL_RESERVED73                                          0x4909
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED73                             0x4909
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED73                             0x49a9
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED73                             0x9a49
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED73                             0x9ae9
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED73                             0x9b89
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED73                             0x9c29
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED73                             0x9cc9
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED73                             0x9d69
+#define mmUNIPHY_MACRO_CNTL_RESERVED74                                          0x490a
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED74                             0x490a
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED74                             0x49aa
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED74                             0x9a4a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED74                             0x9aea
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED74                             0x9b8a
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED74                             0x9c2a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED74                             0x9cca
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED74                             0x9d6a
+#define mmUNIPHY_MACRO_CNTL_RESERVED75                                          0x490b
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED75                             0x490b
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED75                             0x49ab
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED75                             0x9a4b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED75                             0x9aeb
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED75                             0x9b8b
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED75                             0x9c2b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED75                             0x9ccb
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED75                             0x9d6b
+#define mmUNIPHY_MACRO_CNTL_RESERVED76                                          0x490c
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED76                             0x490c
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED76                             0x49ac
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED76                             0x9a4c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED76                             0x9aec
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED76                             0x9b8c
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED76                             0x9c2c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED76                             0x9ccc
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED76                             0x9d6c
+#define mmUNIPHY_MACRO_CNTL_RESERVED77                                          0x490d
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED77                             0x490d
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED77                             0x49ad
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED77                             0x9a4d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED77                             0x9aed
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED77                             0x9b8d
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED77                             0x9c2d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED77                             0x9ccd
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED77                             0x9d6d
+#define mmUNIPHY_MACRO_CNTL_RESERVED78                                          0x490e
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED78                             0x490e
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED78                             0x49ae
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED78                             0x9a4e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED78                             0x9aee
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED78                             0x9b8e
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED78                             0x9c2e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED78                             0x9cce
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED78                             0x9d6e
+#define mmUNIPHY_MACRO_CNTL_RESERVED79                                          0x490f
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED79                             0x490f
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED79                             0x49af
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED79                             0x9a4f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED79                             0x9aef
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED79                             0x9b8f
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED79                             0x9c2f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED79                             0x9ccf
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED79                             0x9d6f
+#define mmUNIPHY_MACRO_CNTL_RESERVED80                                          0x4910
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED80                             0x4910
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED80                             0x49b0
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED80                             0x9a50
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED80                             0x9af0
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED80                             0x9b90
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED80                             0x9c30
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED80                             0x9cd0
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED80                             0x9d70
+#define mmUNIPHY_MACRO_CNTL_RESERVED81                                          0x4911
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED81                             0x4911
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED81                             0x49b1
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED81                             0x9a51
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED81                             0x9af1
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED81                             0x9b91
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED81                             0x9c31
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED81                             0x9cd1
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED81                             0x9d71
+#define mmUNIPHY_MACRO_CNTL_RESERVED82                                          0x4912
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED82                             0x4912
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED82                             0x49b2
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED82                             0x9a52
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED82                             0x9af2
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED82                             0x9b92
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED82                             0x9c32
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED82                             0x9cd2
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED82                             0x9d72
+#define mmUNIPHY_MACRO_CNTL_RESERVED83                                          0x4913
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED83                             0x4913
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED83                             0x49b3
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED83                             0x9a53
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED83                             0x9af3
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED83                             0x9b93
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED83                             0x9c33
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED83                             0x9cd3
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED83                             0x9d73
+#define mmUNIPHY_MACRO_CNTL_RESERVED84                                          0x4914
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED84                             0x4914
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED84                             0x49b4
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED84                             0x9a54
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED84                             0x9af4
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED84                             0x9b94
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED84                             0x9c34
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED84                             0x9cd4
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED84                             0x9d74
+#define mmUNIPHY_MACRO_CNTL_RESERVED85                                          0x4915
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED85                             0x4915
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED85                             0x49b5
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED85                             0x9a55
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED85                             0x9af5
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED85                             0x9b95
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED85                             0x9c35
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED85                             0x9cd5
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED85                             0x9d75
+#define mmUNIPHY_MACRO_CNTL_RESERVED86                                          0x4916
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED86                             0x4916
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED86                             0x49b6
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED86                             0x9a56
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED86                             0x9af6
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED86                             0x9b96
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED86                             0x9c36
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED86                             0x9cd6
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED86                             0x9d76
+#define mmUNIPHY_MACRO_CNTL_RESERVED87                                          0x4917
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED87                             0x4917
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED87                             0x49b7
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED87                             0x9a57
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED87                             0x9af7
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED87                             0x9b97
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED87                             0x9c37
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED87                             0x9cd7
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED87                             0x9d77
+#define mmUNIPHY_MACRO_CNTL_RESERVED88                                          0x4918
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED88                             0x4918
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED88                             0x49b8
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED88                             0x9a58
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED88                             0x9af8
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED88                             0x9b98
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED88                             0x9c38
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED88                             0x9cd8
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED88                             0x9d78
+#define mmUNIPHY_MACRO_CNTL_RESERVED89                                          0x4919
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED89                             0x4919
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED89                             0x49b9
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED89                             0x9a59
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED89                             0x9af9
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED89                             0x9b99
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED89                             0x9c39
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED89                             0x9cd9
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED89                             0x9d79
+#define mmUNIPHY_MACRO_CNTL_RESERVED90                                          0x491a
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED90                             0x491a
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED90                             0x49ba
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED90                             0x9a5a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED90                             0x9afa
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED90                             0x9b9a
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED90                             0x9c3a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED90                             0x9cda
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED90                             0x9d7a
+#define mmUNIPHY_MACRO_CNTL_RESERVED91                                          0x491b
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED91                             0x491b
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED91                             0x49bb
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED91                             0x9a5b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED91                             0x9afb
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED91                             0x9b9b
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED91                             0x9c3b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED91                             0x9cdb
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED91                             0x9d7b
+#define mmUNIPHY_MACRO_CNTL_RESERVED92                                          0x491c
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED92                             0x491c
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED92                             0x49bc
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED92                             0x9a5c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED92                             0x9afc
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED92                             0x9b9c
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED92                             0x9c3c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED92                             0x9cdc
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED92                             0x9d7c
+#define mmUNIPHY_MACRO_CNTL_RESERVED93                                          0x491d
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED93                             0x491d
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED93                             0x49bd
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED93                             0x9a5d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED93                             0x9afd
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED93                             0x9b9d
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED93                             0x9c3d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED93                             0x9cdd
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED93                             0x9d7d
+#define mmUNIPHY_MACRO_CNTL_RESERVED94                                          0x491e
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED94                             0x491e
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED94                             0x49be
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED94                             0x9a5e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED94                             0x9afe
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED94                             0x9b9e
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED94                             0x9c3e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED94                             0x9cde
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED94                             0x9d7e
+#define mmUNIPHY_MACRO_CNTL_RESERVED95                                          0x491f
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED95                             0x491f
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED95                             0x49bf
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED95                             0x9a5f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED95                             0x9aff
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED95                             0x9b9f
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED95                             0x9c3f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED95                             0x9cdf
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED95                             0x9d7f
+#define mmUNIPHY_MACRO_CNTL_RESERVED96                                          0x4920
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED96                             0x4920
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED96                             0x49c0
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED96                             0x9a60
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED96                             0x9b00
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED96                             0x9ba0
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED96                             0x9c40
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED96                             0x9ce0
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED96                             0x9d80
+#define mmUNIPHY_MACRO_CNTL_RESERVED97                                          0x4921
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED97                             0x4921
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED97                             0x49c1
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED97                             0x9a61
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED97                             0x9b01
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED97                             0x9ba1
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED97                             0x9c41
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED97                             0x9ce1
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED97                             0x9d81
+#define mmUNIPHY_MACRO_CNTL_RESERVED98                                          0x4922
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED98                             0x4922
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED98                             0x49c2
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED98                             0x9a62
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED98                             0x9b02
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED98                             0x9ba2
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED98                             0x9c42
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED98                             0x9ce2
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED98                             0x9d82
+#define mmUNIPHY_MACRO_CNTL_RESERVED99                                          0x4923
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED99                             0x4923
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED99                             0x49c3
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED99                             0x9a63
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED99                             0x9b03
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED99                             0x9ba3
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED99                             0x9c43
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED99                             0x9ce3
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED99                             0x9d83
+#define mmUNIPHY_MACRO_CNTL_RESERVED100                                         0x4924
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED100                            0x4924
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED100                            0x49c4
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED100                            0x9a64
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED100                            0x9b04
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED100                            0x9ba4
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED100                            0x9c44
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED100                            0x9ce4
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED100                            0x9d84
+#define mmUNIPHY_MACRO_CNTL_RESERVED101                                         0x4925
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED101                            0x4925
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED101                            0x49c5
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED101                            0x9a65
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED101                            0x9b05
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED101                            0x9ba5
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED101                            0x9c45
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED101                            0x9ce5
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED101                            0x9d85
+#define mmUNIPHY_MACRO_CNTL_RESERVED102                                         0x4926
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED102                            0x4926
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED102                            0x49c6
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED102                            0x9a66
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED102                            0x9b06
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED102                            0x9ba6
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED102                            0x9c46
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED102                            0x9ce6
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED102                            0x9d86
+#define mmUNIPHY_MACRO_CNTL_RESERVED103                                         0x4927
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED103                            0x4927
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED103                            0x49c7
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED103                            0x9a67
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED103                            0x9b07
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED103                            0x9ba7
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED103                            0x9c47
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED103                            0x9ce7
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED103                            0x9d87
+#define mmUNIPHY_MACRO_CNTL_RESERVED104                                         0x4928
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED104                            0x4928
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED104                            0x49c8
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED104                            0x9a68
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED104                            0x9b08
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED104                            0x9ba8
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED104                            0x9c48
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED104                            0x9ce8
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED104                            0x9d88
+#define mmUNIPHY_MACRO_CNTL_RESERVED105                                         0x4929
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED105                            0x4929
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED105                            0x49c9
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED105                            0x9a69
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED105                            0x9b09
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED105                            0x9ba9
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED105                            0x9c49
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED105                            0x9ce9
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED105                            0x9d89
+#define mmUNIPHY_MACRO_CNTL_RESERVED106                                         0x492a
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED106                            0x492a
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED106                            0x49ca
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED106                            0x9a6a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED106                            0x9b0a
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED106                            0x9baa
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED106                            0x9c4a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED106                            0x9cea
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED106                            0x9d8a
+#define mmUNIPHY_MACRO_CNTL_RESERVED107                                         0x492b
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED107                            0x492b
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED107                            0x49cb
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED107                            0x9a6b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED107                            0x9b0b
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED107                            0x9bab
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED107                            0x9c4b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED107                            0x9ceb
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED107                            0x9d8b
+#define mmUNIPHY_MACRO_CNTL_RESERVED108                                         0x492c
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED108                            0x492c
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED108                            0x49cc
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED108                            0x9a6c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED108                            0x9b0c
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED108                            0x9bac
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED108                            0x9c4c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED108                            0x9cec
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED108                            0x9d8c
+#define mmUNIPHY_MACRO_CNTL_RESERVED109                                         0x492d
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED109                            0x492d
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED109                            0x49cd
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED109                            0x9a6d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED109                            0x9b0d
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED109                            0x9bad
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED109                            0x9c4d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED109                            0x9ced
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED109                            0x9d8d
+#define mmUNIPHY_MACRO_CNTL_RESERVED110                                         0x492e
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED110                            0x492e
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED110                            0x49ce
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED110                            0x9a6e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED110                            0x9b0e
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED110                            0x9bae
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED110                            0x9c4e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED110                            0x9cee
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED110                            0x9d8e
+#define mmUNIPHY_MACRO_CNTL_RESERVED111                                         0x492f
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED111                            0x492f
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED111                            0x49cf
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED111                            0x9a6f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED111                            0x9b0f
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED111                            0x9baf
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED111                            0x9c4f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED111                            0x9cef
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED111                            0x9d8f
+#define mmUNIPHY_MACRO_CNTL_RESERVED112                                         0x4930
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED112                            0x4930
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED112                            0x49d0
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED112                            0x9a70
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED112                            0x9b10
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED112                            0x9bb0
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED112                            0x9c50
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED112                            0x9cf0
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED112                            0x9d90
+#define mmUNIPHY_MACRO_CNTL_RESERVED113                                         0x4931
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED113                            0x4931
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED113                            0x49d1
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED113                            0x9a71
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED113                            0x9b11
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED113                            0x9bb1
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED113                            0x9c51
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED113                            0x9cf1
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED113                            0x9d91
+#define mmUNIPHY_MACRO_CNTL_RESERVED114                                         0x4932
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED114                            0x4932
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED114                            0x49d2
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED114                            0x9a72
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED114                            0x9b12
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED114                            0x9bb2
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED114                            0x9c52
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED114                            0x9cf2
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED114                            0x9d92
+#define mmUNIPHY_MACRO_CNTL_RESERVED115                                         0x4933
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED115                            0x4933
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED115                            0x49d3
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED115                            0x9a73
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED115                            0x9b13
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED115                            0x9bb3
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED115                            0x9c53
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED115                            0x9cf3
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED115                            0x9d93
+#define mmUNIPHY_MACRO_CNTL_RESERVED116                                         0x4934
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED116                            0x4934
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED116                            0x49d4
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED116                            0x9a74
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED116                            0x9b14
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED116                            0x9bb4
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED116                            0x9c54
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED116                            0x9cf4
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED116                            0x9d94
+#define mmUNIPHY_MACRO_CNTL_RESERVED117                                         0x4935
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED117                            0x4935
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED117                            0x49d5
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED117                            0x9a75
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED117                            0x9b15
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED117                            0x9bb5
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED117                            0x9c55
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED117                            0x9cf5
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED117                            0x9d95
+#define mmUNIPHY_MACRO_CNTL_RESERVED118                                         0x4936
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED118                            0x4936
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED118                            0x49d6
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED118                            0x9a76
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED118                            0x9b16
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED118                            0x9bb6
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED118                            0x9c56
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED118                            0x9cf6
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED118                            0x9d96
+#define mmUNIPHY_MACRO_CNTL_RESERVED119                                         0x4937
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED119                            0x4937
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED119                            0x49d7
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED119                            0x9a77
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED119                            0x9b17
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED119                            0x9bb7
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED119                            0x9c57
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED119                            0x9cf7
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED119                            0x9d97
+#define mmUNIPHY_MACRO_CNTL_RESERVED120                                         0x4938
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED120                            0x4938
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED120                            0x49d8
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED120                            0x9a78
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED120                            0x9b18
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED120                            0x9bb8
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED120                            0x9c58
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED120                            0x9cf8
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED120                            0x9d98
+#define mmUNIPHY_MACRO_CNTL_RESERVED121                                         0x4939
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED121                            0x4939
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED121                            0x49d9
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED121                            0x9a79
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED121                            0x9b19
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED121                            0x9bb9
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED121                            0x9c59
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED121                            0x9cf9
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED121                            0x9d99
+#define mmUNIPHY_MACRO_CNTL_RESERVED122                                         0x493a
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED122                            0x493a
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED122                            0x49da
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED122                            0x9a7a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED122                            0x9b1a
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED122                            0x9bba
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED122                            0x9c5a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED122                            0x9cfa
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED122                            0x9d9a
+#define mmUNIPHY_MACRO_CNTL_RESERVED123                                         0x493b
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED123                            0x493b
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED123                            0x49db
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED123                            0x9a7b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED123                            0x9b1b
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED123                            0x9bbb
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED123                            0x9c5b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED123                            0x9cfb
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED123                            0x9d9b
+#define mmUNIPHY_MACRO_CNTL_RESERVED124                                         0x493c
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED124                            0x493c
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED124                            0x49dc
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED124                            0x9a7c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED124                            0x9b1c
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED124                            0x9bbc
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED124                            0x9c5c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED124                            0x9cfc
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED124                            0x9d9c
+#define mmUNIPHY_MACRO_CNTL_RESERVED125                                         0x493d
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED125                            0x493d
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED125                            0x49dd
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED125                            0x9a7d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED125                            0x9b1d
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED125                            0x9bbd
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED125                            0x9c5d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED125                            0x9cfd
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED125                            0x9d9d
+#define mmUNIPHY_MACRO_CNTL_RESERVED126                                         0x493e
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED126                            0x493e
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED126                            0x49de
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED126                            0x9a7e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED126                            0x9b1e
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED126                            0x9bbe
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED126                            0x9c5e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED126                            0x9cfe
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED126                            0x9d9e
+#define mmUNIPHY_MACRO_CNTL_RESERVED127                                         0x493f
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED127                            0x493f
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED127                            0x49df
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED127                            0x9a7f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED127                            0x9b1f
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED127                            0x9bbf
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED127                            0x9c5f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED127                            0x9cff
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED127                            0x9d9f
+#define mmUNIPHY_MACRO_CNTL_RESERVED128                                         0x4940
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED128                            0x4940
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED128                            0x49e0
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED128                            0x9a80
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED128                            0x9b20
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED128                            0x9bc0
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED128                            0x9c60
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED128                            0x9d00
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED128                            0x9da0
+#define mmUNIPHY_MACRO_CNTL_RESERVED129                                         0x4941
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED129                            0x4941
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED129                            0x49e1
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED129                            0x9a81
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED129                            0x9b21
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED129                            0x9bc1
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED129                            0x9c61
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED129                            0x9d01
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED129                            0x9da1
+#define mmUNIPHY_MACRO_CNTL_RESERVED130                                         0x4942
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED130                            0x4942
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED130                            0x49e2
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED130                            0x9a82
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED130                            0x9b22
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED130                            0x9bc2
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED130                            0x9c62
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED130                            0x9d02
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED130                            0x9da2
+#define mmUNIPHY_MACRO_CNTL_RESERVED131                                         0x4943
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED131                            0x4943
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED131                            0x49e3
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED131                            0x9a83
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED131                            0x9b23
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED131                            0x9bc3
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED131                            0x9c63
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED131                            0x9d03
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED131                            0x9da3
+#define mmUNIPHY_MACRO_CNTL_RESERVED132                                         0x4944
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED132                            0x4944
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED132                            0x49e4
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED132                            0x9a84
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED132                            0x9b24
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED132                            0x9bc4
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED132                            0x9c64
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED132                            0x9d04
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED132                            0x9da4
+#define mmUNIPHY_MACRO_CNTL_RESERVED133                                         0x4945
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED133                            0x4945
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED133                            0x49e5
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED133                            0x9a85
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED133                            0x9b25
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED133                            0x9bc5
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED133                            0x9c65
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED133                            0x9d05
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED133                            0x9da5
+#define mmUNIPHY_MACRO_CNTL_RESERVED134                                         0x4946
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED134                            0x4946
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED134                            0x49e6
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED134                            0x9a86
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED134                            0x9b26
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED134                            0x9bc6
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED134                            0x9c66
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED134                            0x9d06
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED134                            0x9da6
+#define mmUNIPHY_MACRO_CNTL_RESERVED135                                         0x4947
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED135                            0x4947
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED135                            0x49e7
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED135                            0x9a87
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED135                            0x9b27
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED135                            0x9bc7
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED135                            0x9c67
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED135                            0x9d07
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED135                            0x9da7
+#define mmUNIPHY_MACRO_CNTL_RESERVED136                                         0x4948
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED136                            0x4948
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED136                            0x49e8
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED136                            0x9a88
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED136                            0x9b28
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED136                            0x9bc8
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED136                            0x9c68
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED136                            0x9d08
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED136                            0x9da8
+#define mmUNIPHY_MACRO_CNTL_RESERVED137                                         0x4949
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED137                            0x4949
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED137                            0x49e9
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED137                            0x9a89
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED137                            0x9b29
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED137                            0x9bc9
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED137                            0x9c69
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED137                            0x9d09
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED137                            0x9da9
+#define mmUNIPHY_MACRO_CNTL_RESERVED138                                         0x494a
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED138                            0x494a
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED138                            0x49ea
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED138                            0x9a8a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED138                            0x9b2a
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED138                            0x9bca
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED138                            0x9c6a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED138                            0x9d0a
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED138                            0x9daa
+#define mmUNIPHY_MACRO_CNTL_RESERVED139                                         0x494b
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED139                            0x494b
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED139                            0x49eb
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED139                            0x9a8b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED139                            0x9b2b
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED139                            0x9bcb
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED139                            0x9c6b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED139                            0x9d0b
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED139                            0x9dab
+#define mmUNIPHY_MACRO_CNTL_RESERVED140                                         0x494c
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED140                            0x494c
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED140                            0x49ec
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED140                            0x9a8c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED140                            0x9b2c
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED140                            0x9bcc
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED140                            0x9c6c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED140                            0x9d0c
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED140                            0x9dac
+#define mmUNIPHY_MACRO_CNTL_RESERVED141                                         0x494d
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED141                            0x494d
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED141                            0x49ed
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED141                            0x9a8d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED141                            0x9b2d
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED141                            0x9bcd
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED141                            0x9c6d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED141                            0x9d0d
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED141                            0x9dad
+#define mmUNIPHY_MACRO_CNTL_RESERVED142                                         0x494e
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED142                            0x494e
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED142                            0x49ee
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED142                            0x9a8e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED142                            0x9b2e
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED142                            0x9bce
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED142                            0x9c6e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED142                            0x9d0e
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED142                            0x9dae
+#define mmUNIPHY_MACRO_CNTL_RESERVED143                                         0x494f
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED143                            0x494f
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED143                            0x49ef
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED143                            0x9a8f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED143                            0x9b2f
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED143                            0x9bcf
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED143                            0x9c6f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED143                            0x9d0f
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED143                            0x9daf
+#define mmUNIPHY_MACRO_CNTL_RESERVED144                                         0x4950
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED144                            0x4950
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED144                            0x49f0
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED144                            0x9a90
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED144                            0x9b30
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED144                            0x9bd0
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED144                            0x9c70
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED144                            0x9d10
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED144                            0x9db0
+#define mmUNIPHY_MACRO_CNTL_RESERVED145                                         0x4951
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED145                            0x4951
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED145                            0x49f1
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED145                            0x9a91
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED145                            0x9b31
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED145                            0x9bd1
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED145                            0x9c71
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED145                            0x9d11
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED145                            0x9db1
+#define mmUNIPHY_MACRO_CNTL_RESERVED146                                         0x4952
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED146                            0x4952
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED146                            0x49f2
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED146                            0x9a92
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED146                            0x9b32
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED146                            0x9bd2
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED146                            0x9c72
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED146                            0x9d12
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED146                            0x9db2
+#define mmUNIPHY_MACRO_CNTL_RESERVED147                                         0x4953
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED147                            0x4953
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED147                            0x49f3
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED147                            0x9a93
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED147                            0x9b33
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED147                            0x9bd3
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED147                            0x9c73
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED147                            0x9d13
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED147                            0x9db3
+#define mmUNIPHY_MACRO_CNTL_RESERVED148                                         0x4954
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED148                            0x4954
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED148                            0x49f4
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED148                            0x9a94
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED148                            0x9b34
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED148                            0x9bd4
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED148                            0x9c74
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED148                            0x9d14
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED148                            0x9db4
+#define mmUNIPHY_MACRO_CNTL_RESERVED149                                         0x4955
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED149                            0x4955
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED149                            0x49f5
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED149                            0x9a95
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED149                            0x9b35
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED149                            0x9bd5
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED149                            0x9c75
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED149                            0x9d15
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED149                            0x9db5
+#define mmUNIPHY_MACRO_CNTL_RESERVED150                                         0x4956
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED150                            0x4956
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED150                            0x49f6
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED150                            0x9a96
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED150                            0x9b36
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED150                            0x9bd6
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED150                            0x9c76
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED150                            0x9d16
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED150                            0x9db6
+#define mmUNIPHY_MACRO_CNTL_RESERVED151                                         0x4957
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED151                            0x4957
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED151                            0x49f7
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED151                            0x9a97
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED151                            0x9b37
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED151                            0x9bd7
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED151                            0x9c77
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED151                            0x9d17
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED151                            0x9db7
+#define mmUNIPHY_MACRO_CNTL_RESERVED152                                         0x4958
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED152                            0x4958
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED152                            0x49f8
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED152                            0x9a98
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED152                            0x9b38
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED152                            0x9bd8
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED152                            0x9c78
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED152                            0x9d18
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED152                            0x9db8
+#define mmUNIPHY_MACRO_CNTL_RESERVED153                                         0x4959
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED153                            0x4959
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED153                            0x49f9
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED153                            0x9a99
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED153                            0x9b39
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED153                            0x9bd9
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED153                            0x9c79
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED153                            0x9d19
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED153                            0x9db9
+#define mmUNIPHY_MACRO_CNTL_RESERVED154                                         0x495a
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED154                            0x495a
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED154                            0x49fa
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED154                            0x9a9a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED154                            0x9b3a
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED154                            0x9bda
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED154                            0x9c7a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED154                            0x9d1a
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED154                            0x9dba
+#define mmUNIPHY_MACRO_CNTL_RESERVED155                                         0x495b
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED155                            0x495b
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED155                            0x49fb
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED155                            0x9a9b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED155                            0x9b3b
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED155                            0x9bdb
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED155                            0x9c7b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED155                            0x9d1b
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED155                            0x9dbb
+#define mmUNIPHY_MACRO_CNTL_RESERVED156                                         0x495c
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED156                            0x495c
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED156                            0x49fc
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED156                            0x9a9c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED156                            0x9b3c
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED156                            0x9bdc
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED156                            0x9c7c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED156                            0x9d1c
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED156                            0x9dbc
+#define mmUNIPHY_MACRO_CNTL_RESERVED157                                         0x495d
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED157                            0x495d
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED157                            0x49fd
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED157                            0x9a9d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED157                            0x9b3d
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED157                            0x9bdd
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED157                            0x9c7d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED157                            0x9d1d
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED157                            0x9dbd
+#define mmUNIPHY_MACRO_CNTL_RESERVED158                                         0x495e
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED158                            0x495e
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED158                            0x49fe
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED158                            0x9a9e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED158                            0x9b3e
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED158                            0x9bde
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED158                            0x9c7e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED158                            0x9d1e
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED158                            0x9dbe
+#define mmUNIPHY_MACRO_CNTL_RESERVED159                                         0x495f
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED159                            0x495f
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED159                            0x49ff
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED159                            0x9a9f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED159                            0x9b3f
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED159                            0x9bdf
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED159                            0x9c7f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED159                            0x9d1f
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED159                            0x9dbf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED0                                         0x5a84
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED1                                         0x5a85
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED2                                         0x5a86
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED3                                         0x5a87
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED4                                         0x5a88
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED5                                         0x5a89
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED6                                         0x5a8a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED7                                         0x5a8b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED8                                         0x5a8c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED9                                         0x5a8d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED10                                        0x5a8e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED11                                        0x5a8f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED12                                        0x5a90
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED13                                        0x5a91
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED14                                        0x5a92
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED15                                        0x5a93
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED16                                        0x5a94
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED17                                        0x5a95
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED18                                        0x5a96
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED19                                        0x5a97
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED20                                        0x5a98
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED21                                        0x5a99
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED22                                        0x5a9a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED23                                        0x5a9b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED24                                        0x5a9c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED25                                        0x5a9d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED26                                        0x5a9e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED27                                        0x5a9f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED28                                        0x5aa0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED29                                        0x5aa1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED30                                        0x5aa2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED31                                        0x5aa3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED32                                        0x5aa4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED33                                        0x5aa5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED34                                        0x5aa6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED35                                        0x5aa7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED36                                        0x5aa8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED37                                        0x5aa9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED38                                        0x5aaa
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED39                                        0x5aab
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED40                                        0x5aac
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED41                                        0x5aad
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED42                                        0x5aae
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED43                                        0x5aaf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED44                                        0x5ab0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED45                                        0x5ab1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED46                                        0x5ab2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED47                                        0x5ab3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED48                                        0x5ab4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED49                                        0x5ab5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED50                                        0x5ab6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED51                                        0x5ab7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED52                                        0x5ab8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED53                                        0x5ab9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED54                                        0x5aba
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED55                                        0x5abb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED56                                        0x5abc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED57                                        0x5abd
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED58                                        0x5abe
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED59                                        0x5abf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED60                                        0x5ac0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED61                                        0x5ac1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED62                                        0x5ac2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED63                                        0x5ac3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED64                                        0x5ac4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED65                                        0x5ac5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED66                                        0x5ac6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED67                                        0x5ac7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED68                                        0x5ac8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED69                                        0x5ac9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED70                                        0x5aca
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED71                                        0x5acb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED72                                        0x5acc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED73                                        0x5acd
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED74                                        0x5ace
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED75                                        0x5acf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED76                                        0x5ad0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED77                                        0x5ad1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED78                                        0x5ad2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED79                                        0x5ad3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED80                                        0x5ad4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED81                                        0x5ad5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED82                                        0x5ad6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED83                                        0x5ad7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED84                                        0x5ad8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED85                                        0x5ad9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED86                                        0x5ada
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED87                                        0x5adb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED88                                        0x5adc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED89                                        0x5add
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED90                                        0x5ade
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED91                                        0x5adf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED92                                        0x5ae0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED93                                        0x5ae1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED94                                        0x5ae2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED95                                        0x5ae3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED96                                        0x5ae4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED97                                        0x5ae5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED98                                        0x5ae6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED99                                        0x5ae7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED100                                       0x5ae8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED101                                       0x5ae9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED102                                       0x5aea
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED103                                       0x5aeb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED104                                       0x5aec
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED105                                       0x5aed
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED106                                       0x5aee
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED107                                       0x5aef
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED108                                       0x5af0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED109                                       0x5af1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED110                                       0x5af2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED111                                       0x5af3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED112                                       0x5af4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED113                                       0x5af5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED114                                       0x5af6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED115                                       0x5af7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED116                                       0x5af8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED117                                       0x5af9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED118                                       0x5afa
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED119                                       0x5afb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED120                                       0x5afc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED121                                       0x5afd
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED122                                       0x5afe
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED123                                       0x5aff
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED124                                       0x5b00
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED125                                       0x5b01
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED126                                       0x5b02
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED127                                       0x5b03
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED128                                       0x5b04
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED129                                       0x5b05
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED130                                       0x5b06
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED131                                       0x5b07
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED132                                       0x5b08
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED133                                       0x5b09
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED134                                       0x5b0a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED135                                       0x5b0b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED136                                       0x5b0c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED137                                       0x5b0d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED138                                       0x5b0e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED139                                       0x5b0f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED140                                       0x5b10
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED141                                       0x5b11
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED142                                       0x5b12
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED143                                       0x5b13
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED144                                       0x5b14
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED145                                       0x5b15
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED146                                       0x5b16
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED147                                       0x5b17
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED148                                       0x5b18
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED149                                       0x5b19
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED150                                       0x5b1a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED151                                       0x5b1b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED152                                       0x5b1c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED153                                       0x5b1d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED154                                       0x5b1e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED155                                       0x5b1f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED156                                       0x5b20
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED157                                       0x5b21
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED158                                       0x5b22
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED159                                       0x5b23
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED160                                       0x5b24
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED161                                       0x5b25
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED162                                       0x5b26
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED163                                       0x5b27
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED164                                       0x5b28
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED165                                       0x5b29
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED166                                       0x5b2a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED167                                       0x5b2b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED168                                       0x5b2c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED169                                       0x5b2d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED170                                       0x5b2e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED171                                       0x5b2f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED172                                       0x5b30
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED173                                       0x5b31
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED174                                       0x5b32
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED175                                       0x5b33
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED176                                       0x5b34
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED177                                       0x5b35
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED178                                       0x5b36
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED179                                       0x5b37
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED180                                       0x5b38
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED181                                       0x5b39
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED182                                       0x5b3a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED183                                       0x5b3b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED184                                       0x5b3c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED185                                       0x5b3d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED186                                       0x5b3e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED187                                       0x5b3f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED188                                       0x5b40
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED189                                       0x5b41
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED190                                       0x5b42
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED191                                       0x5b43
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED192                                       0x5b44
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED193                                       0x5b45
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED194                                       0x5b46
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED195                                       0x5b47
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED196                                       0x5b48
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED197                                       0x5b49
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED198                                       0x5b4a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED199                                       0x5b4b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED200                                       0x5b4c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED201                                       0x5b4d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED202                                       0x5b4e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED203                                       0x5b4f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED204                                       0x5b50
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED205                                       0x5b51
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED206                                       0x5b52
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED207                                       0x5b53
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED208                                       0x5b54
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED209                                       0x5b55
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED210                                       0x5b56
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED211                                       0x5b57
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED212                                       0x5b58
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED213                                       0x5b59
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED214                                       0x5b5a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED215                                       0x5b5b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED216                                       0x5b5c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED217                                       0x5b5d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED218                                       0x5b5e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED219                                       0x5b5f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED220                                       0x5b60
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED221                                       0x5b61
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED222                                       0x5b62
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED223                                       0x5b63
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED224                                       0x5b64
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED225                                       0x5b65
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED226                                       0x5b66
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED227                                       0x5b67
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED228                                       0x5b68
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED229                                       0x5b69
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED230                                       0x5b6a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED231                                       0x5b6b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED232                                       0x5b6c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED233                                       0x5b6d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED234                                       0x5b6e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED235                                       0x5b6f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED236                                       0x5b70
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED237                                       0x5b71
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED238                                       0x5b72
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED239                                       0x5b73
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED240                                       0x5b74
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED241                                       0x5b75
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED242                                       0x5b76
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED243                                       0x5b77
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED244                                       0x5b78
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED245                                       0x5b79
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED246                                       0x5b7a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED247                                       0x5b7b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED248                                       0x5b7c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED249                                       0x5b7d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED250                                       0x5b7e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED251                                       0x5b7f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED252                                       0x5b80
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED253                                       0x5b81
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED254                                       0x5b82
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED255                                       0x5b83
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED256                                       0x5b84
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED257                                       0x5b85
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED258                                       0x5b86
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED259                                       0x5b87
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED260                                       0x5b88
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED261                                       0x5b89
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED262                                       0x5b8a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED263                                       0x5b8b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED264                                       0x5b8c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED265                                       0x5b8d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED266                                       0x5b8e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED267                                       0x5b8f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED268                                       0x5b90
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED269                                       0x5b91
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED270                                       0x5b92
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED271                                       0x5b93
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED272                                       0x5b94
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED273                                       0x5b95
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED274                                       0x5b96
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED275                                       0x5b97
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED276                                       0x5b98
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED277                                       0x5b99
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED278                                       0x5b9a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED279                                       0x5b9b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED280                                       0x5b9c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED281                                       0x5b9d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED282                                       0x5b9e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED283                                       0x5b9f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED284                                       0x5ba0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED285                                       0x5ba1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED286                                       0x5ba2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED287                                       0x5ba3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED288                                       0x5ba4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED289                                       0x5ba5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED290                                       0x5ba6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED291                                       0x5ba7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED292                                       0x5ba8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED293                                       0x5ba9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED294                                       0x5baa
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED295                                       0x5bab
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED296                                       0x5bac
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED297                                       0x5bad
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED298                                       0x5bae
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED299                                       0x5baf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED300                                       0x5bb0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED301                                       0x5bb1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED302                                       0x5bb2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED303                                       0x5bb3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED304                                       0x5bb4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED305                                       0x5bb5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED306                                       0x5bb6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED307                                       0x5bb7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED308                                       0x5bb8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED309                                       0x5bb9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED310                                       0x5bba
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED311                                       0x5bbb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED312                                       0x5bbc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED313                                       0x5bbd
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED314                                       0x5bbe
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED315                                       0x5bbf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED316                                       0x5bc0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED317                                       0x5bc1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED318                                       0x5bc2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED319                                       0x5bc3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED320                                       0x5bc4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED321                                       0x5bc5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED322                                       0x5bc6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED323                                       0x5bc7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED324                                       0x5bc8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED325                                       0x5bc9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED326                                       0x5bca
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED327                                       0x5bcb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED328                                       0x5bcc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED329                                       0x5bcd
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED330                                       0x5bce
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED331                                       0x5bcf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED332                                       0x5bd0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED333                                       0x5bd1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED334                                       0x5bd2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED335                                       0x5bd3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED336                                       0x5bd4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED337                                       0x5bd5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED338                                       0x5bd6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED339                                       0x5bd7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED340                                       0x5bd8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED341                                       0x5bd9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED342                                       0x5bda
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED343                                       0x5bdb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED344                                       0x5bdc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED345                                       0x5bdd
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED346                                       0x5bde
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED347                                       0x5bdf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED348                                       0x5be0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED349                                       0x5be1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED350                                       0x5be2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED351                                       0x5be3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED352                                       0x5be4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED353                                       0x5be5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED354                                       0x5be6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED355                                       0x5be7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED356                                       0x5be8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED357                                       0x5be9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED358                                       0x5bea
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED359                                       0x5beb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED360                                       0x5bec
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED361                                       0x5bed
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED362                                       0x5bee
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED363                                       0x5bef
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED364                                       0x5bf0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED365                                       0x5bf1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED366                                       0x5bf2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED367                                       0x5bf3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED368                                       0x5bf4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED369                                       0x5bf5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED370                                       0x5bf6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED371                                       0x5bf7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED372                                       0x5bf8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED373                                       0x5bf9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED374                                       0x5bfa
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED375                                       0x5bfb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED376                                       0x5bfc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED377                                       0x5bfd
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED378                                       0x5bfe
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED379                                       0x5bff
+#define mmDPHY_MACRO_CNTL_RESERVED0                                             0x5d98
+#define mmDPHY_MACRO_CNTL_RESERVED1                                             0x5d99
+#define mmDPHY_MACRO_CNTL_RESERVED2                                             0x5d9a
+#define mmDPHY_MACRO_CNTL_RESERVED3                                             0x5d9b
+#define mmDPHY_MACRO_CNTL_RESERVED4                                             0x5d9c
+#define mmDPHY_MACRO_CNTL_RESERVED5                                             0x5d9d
+#define mmDPHY_MACRO_CNTL_RESERVED6                                             0x5d9e
+#define mmDPHY_MACRO_CNTL_RESERVED7                                             0x5d9f
+#define mmDPHY_MACRO_CNTL_RESERVED8                                             0x5da0
+#define mmDPHY_MACRO_CNTL_RESERVED9                                             0x5da1
+#define mmDPHY_MACRO_CNTL_RESERVED10                                            0x5da2
+#define mmDPHY_MACRO_CNTL_RESERVED11                                            0x5da3
+#define mmDPHY_MACRO_CNTL_RESERVED12                                            0x5da4
+#define mmDPHY_MACRO_CNTL_RESERVED13                                            0x5da5
+#define mmDPHY_MACRO_CNTL_RESERVED14                                            0x5da6
+#define mmDPHY_MACRO_CNTL_RESERVED15                                            0x5da7
+#define mmDPHY_MACRO_CNTL_RESERVED16                                            0x5da8
+#define mmDPHY_MACRO_CNTL_RESERVED17                                            0x5da9
+#define mmDPHY_MACRO_CNTL_RESERVED18                                            0x5daa
+#define mmDPHY_MACRO_CNTL_RESERVED19                                            0x5dab
+#define mmDPHY_MACRO_CNTL_RESERVED20                                            0x5dac
+#define mmDPHY_MACRO_CNTL_RESERVED21                                            0x5dad
+#define mmDPHY_MACRO_CNTL_RESERVED22                                            0x5dae
+#define mmDPHY_MACRO_CNTL_RESERVED23                                            0x5daf
+#define mmDPHY_MACRO_CNTL_RESERVED24                                            0x5db0
+#define mmDPHY_MACRO_CNTL_RESERVED25                                            0x5db1
+#define mmDPHY_MACRO_CNTL_RESERVED26                                            0x5db2
+#define mmDPHY_MACRO_CNTL_RESERVED27                                            0x5db3
+#define mmDPHY_MACRO_CNTL_RESERVED28                                            0x5db4
+#define mmDPHY_MACRO_CNTL_RESERVED29                                            0x5db5
+#define mmDPHY_MACRO_CNTL_RESERVED30                                            0x5db6
+#define mmDPHY_MACRO_CNTL_RESERVED31                                            0x5db7
+#define mmDPHY_MACRO_CNTL_RESERVED32                                            0x5db8
+#define mmDPHY_MACRO_CNTL_RESERVED33                                            0x5db9
+#define mmDPHY_MACRO_CNTL_RESERVED34                                            0x5dba
+#define mmDPHY_MACRO_CNTL_RESERVED35                                            0x5dbb
+#define mmDPHY_MACRO_CNTL_RESERVED36                                            0x5dbc
+#define mmDPHY_MACRO_CNTL_RESERVED37                                            0x5dbd
+#define mmDPHY_MACRO_CNTL_RESERVED38                                            0x5dbe
+#define mmDPHY_MACRO_CNTL_RESERVED39                                            0x5dbf
+#define mmDPHY_MACRO_CNTL_RESERVED40                                            0x5dc0
+#define mmDPHY_MACRO_CNTL_RESERVED41                                            0x5dc1
+#define mmDPHY_MACRO_CNTL_RESERVED42                                            0x5dc2
+#define mmDPHY_MACRO_CNTL_RESERVED43                                            0x5dc3
+#define mmDPHY_MACRO_CNTL_RESERVED44                                            0x5dc4
+#define mmDPHY_MACRO_CNTL_RESERVED45                                            0x5dc5
+#define mmDPHY_MACRO_CNTL_RESERVED46                                            0x5dc6
+#define mmDPHY_MACRO_CNTL_RESERVED47                                            0x5dc7
+#define mmDPHY_MACRO_CNTL_RESERVED48                                            0x5dc8
+#define mmDPHY_MACRO_CNTL_RESERVED49                                            0x5dc9
+#define mmDPHY_MACRO_CNTL_RESERVED50                                            0x5dca
+#define mmDPHY_MACRO_CNTL_RESERVED51                                            0x5dcb
+#define mmDPHY_MACRO_CNTL_RESERVED52                                            0x5dcc
+#define mmDPHY_MACRO_CNTL_RESERVED53                                            0x5dcd
+#define mmDPHY_MACRO_CNTL_RESERVED54                                            0x5dce
+#define mmDPHY_MACRO_CNTL_RESERVED55                                            0x5dcf
+#define mmDPHY_MACRO_CNTL_RESERVED56                                            0x5dd0
+#define mmDPHY_MACRO_CNTL_RESERVED57                                            0x5dd1
+#define mmDPHY_MACRO_CNTL_RESERVED58                                            0x5dd2
+#define mmDPHY_MACRO_CNTL_RESERVED59                                            0x5dd3
+#define mmDPHY_MACRO_CNTL_RESERVED60                                            0x5dd4
+#define mmDPHY_MACRO_CNTL_RESERVED61                                            0x5dd5
+#define mmDPHY_MACRO_CNTL_RESERVED62                                            0x5dd6
+#define mmDPHY_MACRO_CNTL_RESERVED63                                            0x5dd7
+#define mmGRPH_ENABLE                                                           0x1a00
+#define mmDCP0_GRPH_ENABLE                                                      0x1a00
+#define mmDCP1_GRPH_ENABLE                                                      0x1c00
+#define mmDCP2_GRPH_ENABLE                                                      0x1e00
+#define mmDCP3_GRPH_ENABLE                                                      0x4000
+#define mmDCP4_GRPH_ENABLE                                                      0x4200
+#define mmDCP5_GRPH_ENABLE                                                      0x4400
+#define mmGRPH_CONTROL                                                          0x1a01
+#define mmDCP0_GRPH_CONTROL                                                     0x1a01
+#define mmDCP1_GRPH_CONTROL                                                     0x1c01
+#define mmDCP2_GRPH_CONTROL                                                     0x1e01
+#define mmDCP3_GRPH_CONTROL                                                     0x4001
+#define mmDCP4_GRPH_CONTROL                                                     0x4201
+#define mmDCP5_GRPH_CONTROL                                                     0x4401
+#define mmGRPH_LUT_10BIT_BYPASS                                                 0x1a02
+#define mmDCP0_GRPH_LUT_10BIT_BYPASS                                            0x1a02
+#define mmDCP1_GRPH_LUT_10BIT_BYPASS                                            0x1c02
+#define mmDCP2_GRPH_LUT_10BIT_BYPASS                                            0x1e02
+#define mmDCP3_GRPH_LUT_10BIT_BYPASS                                            0x4002
+#define mmDCP4_GRPH_LUT_10BIT_BYPASS                                            0x4202
+#define mmDCP5_GRPH_LUT_10BIT_BYPASS                                            0x4402
+#define mmGRPH_SWAP_CNTL                                                        0x1a03
+#define mmDCP0_GRPH_SWAP_CNTL                                                   0x1a03
+#define mmDCP1_GRPH_SWAP_CNTL                                                   0x1c03
+#define mmDCP2_GRPH_SWAP_CNTL                                                   0x1e03
+#define mmDCP3_GRPH_SWAP_CNTL                                                   0x4003
+#define mmDCP4_GRPH_SWAP_CNTL                                                   0x4203
+#define mmDCP5_GRPH_SWAP_CNTL                                                   0x4403
+#define mmGRPH_PRIMARY_SURFACE_ADDRESS                                          0x1a04
+#define mmDCP0_GRPH_PRIMARY_SURFACE_ADDRESS                                     0x1a04
+#define mmDCP1_GRPH_PRIMARY_SURFACE_ADDRESS                                     0x1c04
+#define mmDCP2_GRPH_PRIMARY_SURFACE_ADDRESS                                     0x1e04
+#define mmDCP3_GRPH_PRIMARY_SURFACE_ADDRESS                                     0x4004
+#define mmDCP4_GRPH_PRIMARY_SURFACE_ADDRESS                                     0x4204
+#define mmDCP5_GRPH_PRIMARY_SURFACE_ADDRESS                                     0x4404
+#define mmGRPH_SECONDARY_SURFACE_ADDRESS                                        0x1a05
+#define mmDCP0_GRPH_SECONDARY_SURFACE_ADDRESS                                   0x1a05
+#define mmDCP1_GRPH_SECONDARY_SURFACE_ADDRESS                                   0x1c05
+#define mmDCP2_GRPH_SECONDARY_SURFACE_ADDRESS                                   0x1e05
+#define mmDCP3_GRPH_SECONDARY_SURFACE_ADDRESS                                   0x4005
+#define mmDCP4_GRPH_SECONDARY_SURFACE_ADDRESS                                   0x4205
+#define mmDCP5_GRPH_SECONDARY_SURFACE_ADDRESS                                   0x4405
+#define mmGRPH_PITCH                                                            0x1a06
+#define mmDCP0_GRPH_PITCH                                                       0x1a06
+#define mmDCP1_GRPH_PITCH                                                       0x1c06
+#define mmDCP2_GRPH_PITCH                                                       0x1e06
+#define mmDCP3_GRPH_PITCH                                                       0x4006
+#define mmDCP4_GRPH_PITCH                                                       0x4206
+#define mmDCP5_GRPH_PITCH                                                       0x4406
+#define mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH                                     0x1a07
+#define mmDCP0_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                                0x1a07
+#define mmDCP1_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                                0x1c07
+#define mmDCP2_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                                0x1e07
+#define mmDCP3_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                                0x4007
+#define mmDCP4_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                                0x4207
+#define mmDCP5_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                                0x4407
+#define mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH                                   0x1a08
+#define mmDCP0_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH                              0x1a08
+#define mmDCP1_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH                              0x1c08
+#define mmDCP2_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH                              0x1e08
+#define mmDCP3_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH                              0x4008
+#define mmDCP4_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH                              0x4208
+#define mmDCP5_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH                              0x4408
+#define mmGRPH_SURFACE_OFFSET_X                                                 0x1a09
+#define mmDCP0_GRPH_SURFACE_OFFSET_X                                            0x1a09
+#define mmDCP1_GRPH_SURFACE_OFFSET_X                                            0x1c09
+#define mmDCP2_GRPH_SURFACE_OFFSET_X                                            0x1e09
+#define mmDCP3_GRPH_SURFACE_OFFSET_X                                            0x4009
+#define mmDCP4_GRPH_SURFACE_OFFSET_X                                            0x4209
+#define mmDCP5_GRPH_SURFACE_OFFSET_X                                            0x4409
+#define mmGRPH_SURFACE_OFFSET_Y                                                 0x1a0a
+#define mmDCP0_GRPH_SURFACE_OFFSET_Y                                            0x1a0a
+#define mmDCP1_GRPH_SURFACE_OFFSET_Y                                            0x1c0a
+#define mmDCP2_GRPH_SURFACE_OFFSET_Y                                            0x1e0a
+#define mmDCP3_GRPH_SURFACE_OFFSET_Y                                            0x400a
+#define mmDCP4_GRPH_SURFACE_OFFSET_Y                                            0x420a
+#define mmDCP5_GRPH_SURFACE_OFFSET_Y                                            0x440a
+#define mmGRPH_X_START                                                          0x1a0b
+#define mmDCP0_GRPH_X_START                                                     0x1a0b
+#define mmDCP1_GRPH_X_START                                                     0x1c0b
+#define mmDCP2_GRPH_X_START                                                     0x1e0b
+#define mmDCP3_GRPH_X_START                                                     0x400b
+#define mmDCP4_GRPH_X_START                                                     0x420b
+#define mmDCP5_GRPH_X_START                                                     0x440b
+#define mmGRPH_Y_START                                                          0x1a0c
+#define mmDCP0_GRPH_Y_START                                                     0x1a0c
+#define mmDCP1_GRPH_Y_START                                                     0x1c0c
+#define mmDCP2_GRPH_Y_START                                                     0x1e0c
+#define mmDCP3_GRPH_Y_START                                                     0x400c
+#define mmDCP4_GRPH_Y_START                                                     0x420c
+#define mmDCP5_GRPH_Y_START                                                     0x440c
+#define mmGRPH_X_END                                                            0x1a0d
+#define mmDCP0_GRPH_X_END                                                       0x1a0d
+#define mmDCP1_GRPH_X_END                                                       0x1c0d
+#define mmDCP2_GRPH_X_END                                                       0x1e0d
+#define mmDCP3_GRPH_X_END                                                       0x400d
+#define mmDCP4_GRPH_X_END                                                       0x420d
+#define mmDCP5_GRPH_X_END                                                       0x440d
+#define mmGRPH_Y_END                                                            0x1a0e
+#define mmDCP0_GRPH_Y_END                                                       0x1a0e
+#define mmDCP1_GRPH_Y_END                                                       0x1c0e
+#define mmDCP2_GRPH_Y_END                                                       0x1e0e
+#define mmDCP3_GRPH_Y_END                                                       0x400e
+#define mmDCP4_GRPH_Y_END                                                       0x420e
+#define mmDCP5_GRPH_Y_END                                                       0x440e
+#define mmINPUT_GAMMA_CONTROL                                                   0x1a10
+#define mmDCP0_INPUT_GAMMA_CONTROL                                              0x1a10
+#define mmDCP1_INPUT_GAMMA_CONTROL                                              0x1c10
+#define mmDCP2_INPUT_GAMMA_CONTROL                                              0x1e10
+#define mmDCP3_INPUT_GAMMA_CONTROL                                              0x4010
+#define mmDCP4_INPUT_GAMMA_CONTROL                                              0x4210
+#define mmDCP5_INPUT_GAMMA_CONTROL                                              0x4410
+#define mmGRPH_UPDATE                                                           0x1a11
+#define mmDCP0_GRPH_UPDATE                                                      0x1a11
+#define mmDCP1_GRPH_UPDATE                                                      0x1c11
+#define mmDCP2_GRPH_UPDATE                                                      0x1e11
+#define mmDCP3_GRPH_UPDATE                                                      0x4011
+#define mmDCP4_GRPH_UPDATE                                                      0x4211
+#define mmDCP5_GRPH_UPDATE                                                      0x4411
+#define mmGRPH_FLIP_CONTROL                                                     0x1a12
+#define mmDCP0_GRPH_FLIP_CONTROL                                                0x1a12
+#define mmDCP1_GRPH_FLIP_CONTROL                                                0x1c12
+#define mmDCP2_GRPH_FLIP_CONTROL                                                0x1e12
+#define mmDCP3_GRPH_FLIP_CONTROL                                                0x4012
+#define mmDCP4_GRPH_FLIP_CONTROL                                                0x4212
+#define mmDCP5_GRPH_FLIP_CONTROL                                                0x4412
+#define mmGRPH_SURFACE_ADDRESS_INUSE                                            0x1a13
+#define mmDCP0_GRPH_SURFACE_ADDRESS_INUSE                                       0x1a13
+#define mmDCP1_GRPH_SURFACE_ADDRESS_INUSE                                       0x1c13
+#define mmDCP2_GRPH_SURFACE_ADDRESS_INUSE                                       0x1e13
+#define mmDCP3_GRPH_SURFACE_ADDRESS_INUSE                                       0x4013
+#define mmDCP4_GRPH_SURFACE_ADDRESS_INUSE                                       0x4213
+#define mmDCP5_GRPH_SURFACE_ADDRESS_INUSE                                       0x4413
+#define mmGRPH_DFQ_CONTROL                                                      0x1a14
+#define mmDCP0_GRPH_DFQ_CONTROL                                                 0x1a14
+#define mmDCP1_GRPH_DFQ_CONTROL                                                 0x1c14
+#define mmDCP2_GRPH_DFQ_CONTROL                                                 0x1e14
+#define mmDCP3_GRPH_DFQ_CONTROL                                                 0x4014
+#define mmDCP4_GRPH_DFQ_CONTROL                                                 0x4214
+#define mmDCP5_GRPH_DFQ_CONTROL                                                 0x4414
+#define mmGRPH_DFQ_STATUS                                                       0x1a15
+#define mmDCP0_GRPH_DFQ_STATUS                                                  0x1a15
+#define mmDCP1_GRPH_DFQ_STATUS                                                  0x1c15
+#define mmDCP2_GRPH_DFQ_STATUS                                                  0x1e15
+#define mmDCP3_GRPH_DFQ_STATUS                                                  0x4015
+#define mmDCP4_GRPH_DFQ_STATUS                                                  0x4215
+#define mmDCP5_GRPH_DFQ_STATUS                                                  0x4415
+#define mmGRPH_INTERRUPT_STATUS                                                 0x1a16
+#define mmDCP0_GRPH_INTERRUPT_STATUS                                            0x1a16
+#define mmDCP1_GRPH_INTERRUPT_STATUS                                            0x1c16
+#define mmDCP2_GRPH_INTERRUPT_STATUS                                            0x1e16
+#define mmDCP3_GRPH_INTERRUPT_STATUS                                            0x4016
+#define mmDCP4_GRPH_INTERRUPT_STATUS                                            0x4216
+#define mmDCP5_GRPH_INTERRUPT_STATUS                                            0x4416
+#define mmGRPH_INTERRUPT_CONTROL                                                0x1a17
+#define mmDCP0_GRPH_INTERRUPT_CONTROL                                           0x1a17
+#define mmDCP1_GRPH_INTERRUPT_CONTROL                                           0x1c17
+#define mmDCP2_GRPH_INTERRUPT_CONTROL                                           0x1e17
+#define mmDCP3_GRPH_INTERRUPT_CONTROL                                           0x4017
+#define mmDCP4_GRPH_INTERRUPT_CONTROL                                           0x4217
+#define mmDCP5_GRPH_INTERRUPT_CONTROL                                           0x4417
+#define mmGRPH_SURFACE_ADDRESS_HIGH_INUSE                                       0x1a18
+#define mmDCP0_GRPH_SURFACE_ADDRESS_HIGH_INUSE                                  0x1a18
+#define mmDCP1_GRPH_SURFACE_ADDRESS_HIGH_INUSE                                  0x1c18
+#define mmDCP2_GRPH_SURFACE_ADDRESS_HIGH_INUSE                                  0x1e18
+#define mmDCP3_GRPH_SURFACE_ADDRESS_HIGH_INUSE                                  0x4018
+#define mmDCP4_GRPH_SURFACE_ADDRESS_HIGH_INUSE                                  0x4218
+#define mmDCP5_GRPH_SURFACE_ADDRESS_HIGH_INUSE                                  0x4418
+#define mmGRPH_COMPRESS_SURFACE_ADDRESS                                         0x1a19
+#define mmDCP0_GRPH_COMPRESS_SURFACE_ADDRESS                                    0x1a19
+#define mmDCP1_GRPH_COMPRESS_SURFACE_ADDRESS                                    0x1c19
+#define mmDCP2_GRPH_COMPRESS_SURFACE_ADDRESS                                    0x1e19
+#define mmDCP3_GRPH_COMPRESS_SURFACE_ADDRESS                                    0x4019
+#define mmDCP4_GRPH_COMPRESS_SURFACE_ADDRESS                                    0x4219
+#define mmDCP5_GRPH_COMPRESS_SURFACE_ADDRESS                                    0x4419
+#define mmGRPH_COMPRESS_PITCH                                                   0x1a1a
+#define mmDCP0_GRPH_COMPRESS_PITCH                                              0x1a1a
+#define mmDCP1_GRPH_COMPRESS_PITCH                                              0x1c1a
+#define mmDCP2_GRPH_COMPRESS_PITCH                                              0x1e1a
+#define mmDCP3_GRPH_COMPRESS_PITCH                                              0x401a
+#define mmDCP4_GRPH_COMPRESS_PITCH                                              0x421a
+#define mmDCP5_GRPH_COMPRESS_PITCH                                              0x441a
+#define mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH                                    0x1a1b
+#define mmDCP0_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH                               0x1a1b
+#define mmDCP1_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH                               0x1c1b
+#define mmDCP2_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH                               0x1e1b
+#define mmDCP3_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH                               0x401b
+#define mmDCP4_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH                               0x421b
+#define mmDCP5_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH                               0x441b
+#define mmGRPH_PIPE_OUTSTANDING_REQUEST_LIMIT                                   0x1a1c
+#define mmDCP0_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT                              0x1a1c
+#define mmDCP1_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT                              0x1c1c
+#define mmDCP2_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT                              0x1e1c
+#define mmDCP3_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT                              0x401c
+#define mmDCP4_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT                              0x421c
+#define mmDCP5_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT                              0x441c
+#define mmPRESCALE_GRPH_CONTROL                                                 0x1a2d
+#define mmDCP0_PRESCALE_GRPH_CONTROL                                            0x1a2d
+#define mmDCP1_PRESCALE_GRPH_CONTROL                                            0x1c2d
+#define mmDCP2_PRESCALE_GRPH_CONTROL                                            0x1e2d
+#define mmDCP3_PRESCALE_GRPH_CONTROL                                            0x402d
+#define mmDCP4_PRESCALE_GRPH_CONTROL                                            0x422d
+#define mmDCP5_PRESCALE_GRPH_CONTROL                                            0x442d
+#define mmPRESCALE_VALUES_GRPH_R                                                0x1a2e
+#define mmDCP0_PRESCALE_VALUES_GRPH_R                                           0x1a2e
+#define mmDCP1_PRESCALE_VALUES_GRPH_R                                           0x1c2e
+#define mmDCP2_PRESCALE_VALUES_GRPH_R                                           0x1e2e
+#define mmDCP3_PRESCALE_VALUES_GRPH_R                                           0x402e
+#define mmDCP4_PRESCALE_VALUES_GRPH_R                                           0x422e
+#define mmDCP5_PRESCALE_VALUES_GRPH_R                                           0x442e
+#define mmPRESCALE_VALUES_GRPH_G                                                0x1a2f
+#define mmDCP0_PRESCALE_VALUES_GRPH_G                                           0x1a2f
+#define mmDCP1_PRESCALE_VALUES_GRPH_G                                           0x1c2f
+#define mmDCP2_PRESCALE_VALUES_GRPH_G                                           0x1e2f
+#define mmDCP3_PRESCALE_VALUES_GRPH_G                                           0x402f
+#define mmDCP4_PRESCALE_VALUES_GRPH_G                                           0x422f
+#define mmDCP5_PRESCALE_VALUES_GRPH_G                                           0x442f
+#define mmPRESCALE_VALUES_GRPH_B                                                0x1a30
+#define mmDCP0_PRESCALE_VALUES_GRPH_B                                           0x1a30
+#define mmDCP1_PRESCALE_VALUES_GRPH_B                                           0x1c30
+#define mmDCP2_PRESCALE_VALUES_GRPH_B                                           0x1e30
+#define mmDCP3_PRESCALE_VALUES_GRPH_B                                           0x4030
+#define mmDCP4_PRESCALE_VALUES_GRPH_B                                           0x4230
+#define mmDCP5_PRESCALE_VALUES_GRPH_B                                           0x4430
+#define mmINPUT_CSC_CONTROL                                                     0x1a35
+#define mmDCP0_INPUT_CSC_CONTROL                                                0x1a35
+#define mmDCP1_INPUT_CSC_CONTROL                                                0x1c35
+#define mmDCP2_INPUT_CSC_CONTROL                                                0x1e35
+#define mmDCP3_INPUT_CSC_CONTROL                                                0x4035
+#define mmDCP4_INPUT_CSC_CONTROL                                                0x4235
+#define mmDCP5_INPUT_CSC_CONTROL                                                0x4435
+#define mmINPUT_CSC_C11_C12                                                     0x1a36
+#define mmDCP0_INPUT_CSC_C11_C12                                                0x1a36
+#define mmDCP1_INPUT_CSC_C11_C12                                                0x1c36
+#define mmDCP2_INPUT_CSC_C11_C12                                                0x1e36
+#define mmDCP3_INPUT_CSC_C11_C12                                                0x4036
+#define mmDCP4_INPUT_CSC_C11_C12                                                0x4236
+#define mmDCP5_INPUT_CSC_C11_C12                                                0x4436
+#define mmINPUT_CSC_C13_C14                                                     0x1a37
+#define mmDCP0_INPUT_CSC_C13_C14                                                0x1a37
+#define mmDCP1_INPUT_CSC_C13_C14                                                0x1c37
+#define mmDCP2_INPUT_CSC_C13_C14                                                0x1e37
+#define mmDCP3_INPUT_CSC_C13_C14                                                0x4037
+#define mmDCP4_INPUT_CSC_C13_C14                                                0x4237
+#define mmDCP5_INPUT_CSC_C13_C14                                                0x4437
+#define mmINPUT_CSC_C21_C22                                                     0x1a38
+#define mmDCP0_INPUT_CSC_C21_C22                                                0x1a38
+#define mmDCP1_INPUT_CSC_C21_C22                                                0x1c38
+#define mmDCP2_INPUT_CSC_C21_C22                                                0x1e38
+#define mmDCP3_INPUT_CSC_C21_C22                                                0x4038
+#define mmDCP4_INPUT_CSC_C21_C22                                                0x4238
+#define mmDCP5_INPUT_CSC_C21_C22                                                0x4438
+#define mmINPUT_CSC_C23_C24                                                     0x1a39
+#define mmDCP0_INPUT_CSC_C23_C24                                                0x1a39
+#define mmDCP1_INPUT_CSC_C23_C24                                                0x1c39
+#define mmDCP2_INPUT_CSC_C23_C24                                                0x1e39
+#define mmDCP3_INPUT_CSC_C23_C24                                                0x4039
+#define mmDCP4_INPUT_CSC_C23_C24                                                0x4239
+#define mmDCP5_INPUT_CSC_C23_C24                                                0x4439
+#define mmINPUT_CSC_C31_C32                                                     0x1a3a
+#define mmDCP0_INPUT_CSC_C31_C32                                                0x1a3a
+#define mmDCP1_INPUT_CSC_C31_C32                                                0x1c3a
+#define mmDCP2_INPUT_CSC_C31_C32                                                0x1e3a
+#define mmDCP3_INPUT_CSC_C31_C32                                                0x403a
+#define mmDCP4_INPUT_CSC_C31_C32                                                0x423a
+#define mmDCP5_INPUT_CSC_C31_C32                                                0x443a
+#define mmINPUT_CSC_C33_C34                                                     0x1a3b
+#define mmDCP0_INPUT_CSC_C33_C34                                                0x1a3b
+#define mmDCP1_INPUT_CSC_C33_C34                                                0x1c3b
+#define mmDCP2_INPUT_CSC_C33_C34                                                0x1e3b
+#define mmDCP3_INPUT_CSC_C33_C34                                                0x403b
+#define mmDCP4_INPUT_CSC_C33_C34                                                0x423b
+#define mmDCP5_INPUT_CSC_C33_C34                                                0x443b
+#define mmOUTPUT_CSC_CONTROL                                                    0x1a3c
+#define mmDCP0_OUTPUT_CSC_CONTROL                                               0x1a3c
+#define mmDCP1_OUTPUT_CSC_CONTROL                                               0x1c3c
+#define mmDCP2_OUTPUT_CSC_CONTROL                                               0x1e3c
+#define mmDCP3_OUTPUT_CSC_CONTROL                                               0x403c
+#define mmDCP4_OUTPUT_CSC_CONTROL                                               0x423c
+#define mmDCP5_OUTPUT_CSC_CONTROL                                               0x443c
+#define mmOUTPUT_CSC_C11_C12                                                    0x1a3d
+#define mmDCP0_OUTPUT_CSC_C11_C12                                               0x1a3d
+#define mmDCP1_OUTPUT_CSC_C11_C12                                               0x1c3d
+#define mmDCP2_OUTPUT_CSC_C11_C12                                               0x1e3d
+#define mmDCP3_OUTPUT_CSC_C11_C12                                               0x403d
+#define mmDCP4_OUTPUT_CSC_C11_C12                                               0x423d
+#define mmDCP5_OUTPUT_CSC_C11_C12                                               0x443d
+#define mmOUTPUT_CSC_C13_C14                                                    0x1a3e
+#define mmDCP0_OUTPUT_CSC_C13_C14                                               0x1a3e
+#define mmDCP1_OUTPUT_CSC_C13_C14                                               0x1c3e
+#define mmDCP2_OUTPUT_CSC_C13_C14                                               0x1e3e
+#define mmDCP3_OUTPUT_CSC_C13_C14                                               0x403e
+#define mmDCP4_OUTPUT_CSC_C13_C14                                               0x423e
+#define mmDCP5_OUTPUT_CSC_C13_C14                                               0x443e
+#define mmOUTPUT_CSC_C21_C22                                                    0x1a3f
+#define mmDCP0_OUTPUT_CSC_C21_C22                                               0x1a3f
+#define mmDCP1_OUTPUT_CSC_C21_C22                                               0x1c3f
+#define mmDCP2_OUTPUT_CSC_C21_C22                                               0x1e3f
+#define mmDCP3_OUTPUT_CSC_C21_C22                                               0x403f
+#define mmDCP4_OUTPUT_CSC_C21_C22                                               0x423f
+#define mmDCP5_OUTPUT_CSC_C21_C22                                               0x443f
+#define mmOUTPUT_CSC_C23_C24                                                    0x1a40
+#define mmDCP0_OUTPUT_CSC_C23_C24                                               0x1a40
+#define mmDCP1_OUTPUT_CSC_C23_C24                                               0x1c40
+#define mmDCP2_OUTPUT_CSC_C23_C24                                               0x1e40
+#define mmDCP3_OUTPUT_CSC_C23_C24                                               0x4040
+#define mmDCP4_OUTPUT_CSC_C23_C24                                               0x4240
+#define mmDCP5_OUTPUT_CSC_C23_C24                                               0x4440
+#define mmOUTPUT_CSC_C31_C32                                                    0x1a41
+#define mmDCP0_OUTPUT_CSC_C31_C32                                               0x1a41
+#define mmDCP1_OUTPUT_CSC_C31_C32                                               0x1c41
+#define mmDCP2_OUTPUT_CSC_C31_C32                                               0x1e41
+#define mmDCP3_OUTPUT_CSC_C31_C32                                               0x4041
+#define mmDCP4_OUTPUT_CSC_C31_C32                                               0x4241
+#define mmDCP5_OUTPUT_CSC_C31_C32                                               0x4441
+#define mmOUTPUT_CSC_C33_C34                                                    0x1a42
+#define mmDCP0_OUTPUT_CSC_C33_C34                                               0x1a42
+#define mmDCP1_OUTPUT_CSC_C33_C34                                               0x1c42
+#define mmDCP2_OUTPUT_CSC_C33_C34                                               0x1e42
+#define mmDCP3_OUTPUT_CSC_C33_C34                                               0x4042
+#define mmDCP4_OUTPUT_CSC_C33_C34                                               0x4242
+#define mmDCP5_OUTPUT_CSC_C33_C34                                               0x4442
+#define mmCOMM_MATRIXA_TRANS_C11_C12                                            0x1a43
+#define mmDCP0_COMM_MATRIXA_TRANS_C11_C12                                       0x1a43
+#define mmDCP1_COMM_MATRIXA_TRANS_C11_C12                                       0x1c43
+#define mmDCP2_COMM_MATRIXA_TRANS_C11_C12                                       0x1e43
+#define mmDCP3_COMM_MATRIXA_TRANS_C11_C12                                       0x4043
+#define mmDCP4_COMM_MATRIXA_TRANS_C11_C12                                       0x4243
+#define mmDCP5_COMM_MATRIXA_TRANS_C11_C12                                       0x4443
+#define mmCOMM_MATRIXA_TRANS_C13_C14                                            0x1a44
+#define mmDCP0_COMM_MATRIXA_TRANS_C13_C14                                       0x1a44
+#define mmDCP1_COMM_MATRIXA_TRANS_C13_C14                                       0x1c44
+#define mmDCP2_COMM_MATRIXA_TRANS_C13_C14                                       0x1e44
+#define mmDCP3_COMM_MATRIXA_TRANS_C13_C14                                       0x4044
+#define mmDCP4_COMM_MATRIXA_TRANS_C13_C14                                       0x4244
+#define mmDCP5_COMM_MATRIXA_TRANS_C13_C14                                       0x4444
+#define mmCOMM_MATRIXA_TRANS_C21_C22                                            0x1a45
+#define mmDCP0_COMM_MATRIXA_TRANS_C21_C22                                       0x1a45
+#define mmDCP1_COMM_MATRIXA_TRANS_C21_C22                                       0x1c45
+#define mmDCP2_COMM_MATRIXA_TRANS_C21_C22                                       0x1e45
+#define mmDCP3_COMM_MATRIXA_TRANS_C21_C22                                       0x4045
+#define mmDCP4_COMM_MATRIXA_TRANS_C21_C22                                       0x4245
+#define mmDCP5_COMM_MATRIXA_TRANS_C21_C22                                       0x4445
+#define mmCOMM_MATRIXA_TRANS_C23_C24                                            0x1a46
+#define mmDCP0_COMM_MATRIXA_TRANS_C23_C24                                       0x1a46
+#define mmDCP1_COMM_MATRIXA_TRANS_C23_C24                                       0x1c46
+#define mmDCP2_COMM_MATRIXA_TRANS_C23_C24                                       0x1e46
+#define mmDCP3_COMM_MATRIXA_TRANS_C23_C24                                       0x4046
+#define mmDCP4_COMM_MATRIXA_TRANS_C23_C24                                       0x4246
+#define mmDCP5_COMM_MATRIXA_TRANS_C23_C24                                       0x4446
+#define mmCOMM_MATRIXA_TRANS_C31_C32                                            0x1a47
+#define mmDCP0_COMM_MATRIXA_TRANS_C31_C32                                       0x1a47
+#define mmDCP1_COMM_MATRIXA_TRANS_C31_C32                                       0x1c47
+#define mmDCP2_COMM_MATRIXA_TRANS_C31_C32                                       0x1e47
+#define mmDCP3_COMM_MATRIXA_TRANS_C31_C32                                       0x4047
+#define mmDCP4_COMM_MATRIXA_TRANS_C31_C32                                       0x4247
+#define mmDCP5_COMM_MATRIXA_TRANS_C31_C32                                       0x4447
+#define mmCOMM_MATRIXA_TRANS_C33_C34                                            0x1a48
+#define mmDCP0_COMM_MATRIXA_TRANS_C33_C34                                       0x1a48
+#define mmDCP1_COMM_MATRIXA_TRANS_C33_C34                                       0x1c48
+#define mmDCP2_COMM_MATRIXA_TRANS_C33_C34                                       0x1e48
+#define mmDCP3_COMM_MATRIXA_TRANS_C33_C34                                       0x4048
+#define mmDCP4_COMM_MATRIXA_TRANS_C33_C34                                       0x4248
+#define mmDCP5_COMM_MATRIXA_TRANS_C33_C34                                       0x4448
+#define mmCOMM_MATRIXB_TRANS_C11_C12                                            0x1a49
+#define mmDCP0_COMM_MATRIXB_TRANS_C11_C12                                       0x1a49
+#define mmDCP1_COMM_MATRIXB_TRANS_C11_C12                                       0x1c49
+#define mmDCP2_COMM_MATRIXB_TRANS_C11_C12                                       0x1e49
+#define mmDCP3_COMM_MATRIXB_TRANS_C11_C12                                       0x4049
+#define mmDCP4_COMM_MATRIXB_TRANS_C11_C12                                       0x4249
+#define mmDCP5_COMM_MATRIXB_TRANS_C11_C12                                       0x4449
+#define mmCOMM_MATRIXB_TRANS_C13_C14                                            0x1a4a
+#define mmDCP0_COMM_MATRIXB_TRANS_C13_C14                                       0x1a4a
+#define mmDCP1_COMM_MATRIXB_TRANS_C13_C14                                       0x1c4a
+#define mmDCP2_COMM_MATRIXB_TRANS_C13_C14                                       0x1e4a
+#define mmDCP3_COMM_MATRIXB_TRANS_C13_C14                                       0x404a
+#define mmDCP4_COMM_MATRIXB_TRANS_C13_C14                                       0x424a
+#define mmDCP5_COMM_MATRIXB_TRANS_C13_C14                                       0x444a
+#define mmCOMM_MATRIXB_TRANS_C21_C22                                            0x1a4b
+#define mmDCP0_COMM_MATRIXB_TRANS_C21_C22                                       0x1a4b
+#define mmDCP1_COMM_MATRIXB_TRANS_C21_C22                                       0x1c4b
+#define mmDCP2_COMM_MATRIXB_TRANS_C21_C22                                       0x1e4b
+#define mmDCP3_COMM_MATRIXB_TRANS_C21_C22                                       0x404b
+#define mmDCP4_COMM_MATRIXB_TRANS_C21_C22                                       0x424b
+#define mmDCP5_COMM_MATRIXB_TRANS_C21_C22                                       0x444b
+#define mmCOMM_MATRIXB_TRANS_C23_C24                                            0x1a4c
+#define mmDCP0_COMM_MATRIXB_TRANS_C23_C24                                       0x1a4c
+#define mmDCP1_COMM_MATRIXB_TRANS_C23_C24                                       0x1c4c
+#define mmDCP2_COMM_MATRIXB_TRANS_C23_C24                                       0x1e4c
+#define mmDCP3_COMM_MATRIXB_TRANS_C23_C24                                       0x404c
+#define mmDCP4_COMM_MATRIXB_TRANS_C23_C24                                       0x424c
+#define mmDCP5_COMM_MATRIXB_TRANS_C23_C24                                       0x444c
+#define mmCOMM_MATRIXB_TRANS_C31_C32                                            0x1a4d
+#define mmDCP0_COMM_MATRIXB_TRANS_C31_C32                                       0x1a4d
+#define mmDCP1_COMM_MATRIXB_TRANS_C31_C32                                       0x1c4d
+#define mmDCP2_COMM_MATRIXB_TRANS_C31_C32                                       0x1e4d
+#define mmDCP3_COMM_MATRIXB_TRANS_C31_C32                                       0x404d
+#define mmDCP4_COMM_MATRIXB_TRANS_C31_C32                                       0x424d
+#define mmDCP5_COMM_MATRIXB_TRANS_C31_C32                                       0x444d
+#define mmCOMM_MATRIXB_TRANS_C33_C34                                            0x1a4e
+#define mmDCP0_COMM_MATRIXB_TRANS_C33_C34                                       0x1a4e
+#define mmDCP1_COMM_MATRIXB_TRANS_C33_C34                                       0x1c4e
+#define mmDCP2_COMM_MATRIXB_TRANS_C33_C34                                       0x1e4e
+#define mmDCP3_COMM_MATRIXB_TRANS_C33_C34                                       0x404e
+#define mmDCP4_COMM_MATRIXB_TRANS_C33_C34                                       0x424e
+#define mmDCP5_COMM_MATRIXB_TRANS_C33_C34                                       0x444e
+#define mmDENORM_CONTROL                                                        0x1a50
+#define mmDCP0_DENORM_CONTROL                                                   0x1a50
+#define mmDCP1_DENORM_CONTROL                                                   0x1c50
+#define mmDCP2_DENORM_CONTROL                                                   0x1e50
+#define mmDCP3_DENORM_CONTROL                                                   0x4050
+#define mmDCP4_DENORM_CONTROL                                                   0x4250
+#define mmDCP5_DENORM_CONTROL                                                   0x4450
+#define mmOUT_ROUND_CONTROL                                                     0x1a51
+#define mmDCP0_OUT_ROUND_CONTROL                                                0x1a51
+#define mmDCP1_OUT_ROUND_CONTROL                                                0x1c51
+#define mmDCP2_OUT_ROUND_CONTROL                                                0x1e51
+#define mmDCP3_OUT_ROUND_CONTROL                                                0x4051
+#define mmDCP4_OUT_ROUND_CONTROL                                                0x4251
+#define mmDCP5_OUT_ROUND_CONTROL                                                0x4451
+#define mmOUT_CLAMP_CONTROL_R_CR                                                0x1a52
+#define mmDCP0_OUT_CLAMP_CONTROL_R_CR                                           0x1a52
+#define mmDCP1_OUT_CLAMP_CONTROL_R_CR                                           0x1c52
+#define mmDCP2_OUT_CLAMP_CONTROL_R_CR                                           0x1e52
+#define mmDCP3_OUT_CLAMP_CONTROL_R_CR                                           0x4052
+#define mmDCP4_OUT_CLAMP_CONTROL_R_CR                                           0x4252
+#define mmDCP5_OUT_CLAMP_CONTROL_R_CR                                           0x4452
+#define mmOUT_CLAMP_CONTROL_G_Y                                                 0x1a9c
+#define mmDCP0_OUT_CLAMP_CONTROL_G_Y                                            0x1a9c
+#define mmDCP1_OUT_CLAMP_CONTROL_G_Y                                            0x1c9c
+#define mmDCP2_OUT_CLAMP_CONTROL_G_Y                                            0x1e9c
+#define mmDCP3_OUT_CLAMP_CONTROL_G_Y                                            0x409c
+#define mmDCP4_OUT_CLAMP_CONTROL_G_Y                                            0x429c
+#define mmDCP5_OUT_CLAMP_CONTROL_G_Y                                            0x449c
+#define mmOUT_CLAMP_CONTROL_B_CB                                                0x1a9d
+#define mmDCP0_OUT_CLAMP_CONTROL_B_CB                                           0x1a9d
+#define mmDCP1_OUT_CLAMP_CONTROL_B_CB                                           0x1c9d
+#define mmDCP2_OUT_CLAMP_CONTROL_B_CB                                           0x1e9d
+#define mmDCP3_OUT_CLAMP_CONTROL_B_CB                                           0x409d
+#define mmDCP4_OUT_CLAMP_CONTROL_B_CB                                           0x429d
+#define mmDCP5_OUT_CLAMP_CONTROL_B_CB                                           0x449d
+#define mmKEY_CONTROL                                                           0x1a53
+#define mmDCP0_KEY_CONTROL                                                      0x1a53
+#define mmDCP1_KEY_CONTROL                                                      0x1c53
+#define mmDCP2_KEY_CONTROL                                                      0x1e53
+#define mmDCP3_KEY_CONTROL                                                      0x4053
+#define mmDCP4_KEY_CONTROL                                                      0x4253
+#define mmDCP5_KEY_CONTROL                                                      0x4453
+#define mmKEY_RANGE_ALPHA                                                       0x1a54
+#define mmDCP0_KEY_RANGE_ALPHA                                                  0x1a54
+#define mmDCP1_KEY_RANGE_ALPHA                                                  0x1c54
+#define mmDCP2_KEY_RANGE_ALPHA                                                  0x1e54
+#define mmDCP3_KEY_RANGE_ALPHA                                                  0x4054
+#define mmDCP4_KEY_RANGE_ALPHA                                                  0x4254
+#define mmDCP5_KEY_RANGE_ALPHA                                                  0x4454
+#define mmKEY_RANGE_RED                                                         0x1a55
+#define mmDCP0_KEY_RANGE_RED                                                    0x1a55
+#define mmDCP1_KEY_RANGE_RED                                                    0x1c55
+#define mmDCP2_KEY_RANGE_RED                                                    0x1e55
+#define mmDCP3_KEY_RANGE_RED                                                    0x4055
+#define mmDCP4_KEY_RANGE_RED                                                    0x4255
+#define mmDCP5_KEY_RANGE_RED                                                    0x4455
+#define mmKEY_RANGE_GREEN                                                       0x1a56
+#define mmDCP0_KEY_RANGE_GREEN                                                  0x1a56
+#define mmDCP1_KEY_RANGE_GREEN                                                  0x1c56
+#define mmDCP2_KEY_RANGE_GREEN                                                  0x1e56
+#define mmDCP3_KEY_RANGE_GREEN                                                  0x4056
+#define mmDCP4_KEY_RANGE_GREEN                                                  0x4256
+#define mmDCP5_KEY_RANGE_GREEN                                                  0x4456
+#define mmKEY_RANGE_BLUE                                                        0x1a57
+#define mmDCP0_KEY_RANGE_BLUE                                                   0x1a57
+#define mmDCP1_KEY_RANGE_BLUE                                                   0x1c57
+#define mmDCP2_KEY_RANGE_BLUE                                                   0x1e57
+#define mmDCP3_KEY_RANGE_BLUE                                                   0x4057
+#define mmDCP4_KEY_RANGE_BLUE                                                   0x4257
+#define mmDCP5_KEY_RANGE_BLUE                                                   0x4457
+#define mmDEGAMMA_CONTROL                                                       0x1a58
+#define mmDCP0_DEGAMMA_CONTROL                                                  0x1a58
+#define mmDCP1_DEGAMMA_CONTROL                                                  0x1c58
+#define mmDCP2_DEGAMMA_CONTROL                                                  0x1e58
+#define mmDCP3_DEGAMMA_CONTROL                                                  0x4058
+#define mmDCP4_DEGAMMA_CONTROL                                                  0x4258
+#define mmDCP5_DEGAMMA_CONTROL                                                  0x4458
+#define mmGAMUT_REMAP_CONTROL                                                   0x1a59
+#define mmDCP0_GAMUT_REMAP_CONTROL                                              0x1a59
+#define mmDCP1_GAMUT_REMAP_CONTROL                                              0x1c59
+#define mmDCP2_GAMUT_REMAP_CONTROL                                              0x1e59
+#define mmDCP3_GAMUT_REMAP_CONTROL                                              0x4059
+#define mmDCP4_GAMUT_REMAP_CONTROL                                              0x4259
+#define mmDCP5_GAMUT_REMAP_CONTROL                                              0x4459
+#define mmGAMUT_REMAP_C11_C12                                                   0x1a5a
+#define mmDCP0_GAMUT_REMAP_C11_C12                                              0x1a5a
+#define mmDCP1_GAMUT_REMAP_C11_C12                                              0x1c5a
+#define mmDCP2_GAMUT_REMAP_C11_C12                                              0x1e5a
+#define mmDCP3_GAMUT_REMAP_C11_C12                                              0x405a
+#define mmDCP4_GAMUT_REMAP_C11_C12                                              0x425a
+#define mmDCP5_GAMUT_REMAP_C11_C12                                              0x445a
+#define mmGAMUT_REMAP_C13_C14                                                   0x1a5b
+#define mmDCP0_GAMUT_REMAP_C13_C14                                              0x1a5b
+#define mmDCP1_GAMUT_REMAP_C13_C14                                              0x1c5b
+#define mmDCP2_GAMUT_REMAP_C13_C14                                              0x1e5b
+#define mmDCP3_GAMUT_REMAP_C13_C14                                              0x405b
+#define mmDCP4_GAMUT_REMAP_C13_C14                                              0x425b
+#define mmDCP5_GAMUT_REMAP_C13_C14                                              0x445b
+#define mmGAMUT_REMAP_C21_C22                                                   0x1a5c
+#define mmDCP0_GAMUT_REMAP_C21_C22                                              0x1a5c
+#define mmDCP1_GAMUT_REMAP_C21_C22                                              0x1c5c
+#define mmDCP2_GAMUT_REMAP_C21_C22                                              0x1e5c
+#define mmDCP3_GAMUT_REMAP_C21_C22                                              0x405c
+#define mmDCP4_GAMUT_REMAP_C21_C22                                              0x425c
+#define mmDCP5_GAMUT_REMAP_C21_C22                                              0x445c
+#define mmGAMUT_REMAP_C23_C24                                                   0x1a5d
+#define mmDCP0_GAMUT_REMAP_C23_C24                                              0x1a5d
+#define mmDCP1_GAMUT_REMAP_C23_C24                                              0x1c5d
+#define mmDCP2_GAMUT_REMAP_C23_C24                                              0x1e5d
+#define mmDCP3_GAMUT_REMAP_C23_C24                                              0x405d
+#define mmDCP4_GAMUT_REMAP_C23_C24                                              0x425d
+#define mmDCP5_GAMUT_REMAP_C23_C24                                              0x445d
+#define mmGAMUT_REMAP_C31_C32                                                   0x1a5e
+#define mmDCP0_GAMUT_REMAP_C31_C32                                              0x1a5e
+#define mmDCP1_GAMUT_REMAP_C31_C32                                              0x1c5e
+#define mmDCP2_GAMUT_REMAP_C31_C32                                              0x1e5e
+#define mmDCP3_GAMUT_REMAP_C31_C32                                              0x405e
+#define mmDCP4_GAMUT_REMAP_C31_C32                                              0x425e
+#define mmDCP5_GAMUT_REMAP_C31_C32                                              0x445e
+#define mmGAMUT_REMAP_C33_C34                                                   0x1a5f
+#define mmDCP0_GAMUT_REMAP_C33_C34                                              0x1a5f
+#define mmDCP1_GAMUT_REMAP_C33_C34                                              0x1c5f
+#define mmDCP2_GAMUT_REMAP_C33_C34                                              0x1e5f
+#define mmDCP3_GAMUT_REMAP_C33_C34                                              0x405f
+#define mmDCP4_GAMUT_REMAP_C33_C34                                              0x425f
+#define mmDCP5_GAMUT_REMAP_C33_C34                                              0x445f
+#define mmDCP_SPATIAL_DITHER_CNTL                                               0x1a60
+#define mmDCP0_DCP_SPATIAL_DITHER_CNTL                                          0x1a60
+#define mmDCP1_DCP_SPATIAL_DITHER_CNTL                                          0x1c60
+#define mmDCP2_DCP_SPATIAL_DITHER_CNTL                                          0x1e60
+#define mmDCP3_DCP_SPATIAL_DITHER_CNTL                                          0x4060
+#define mmDCP4_DCP_SPATIAL_DITHER_CNTL                                          0x4260
+#define mmDCP5_DCP_SPATIAL_DITHER_CNTL                                          0x4460
+#define mmDCP_RANDOM_SEEDS                                                      0x1a61
+#define mmDCP0_DCP_RANDOM_SEEDS                                                 0x1a61
+#define mmDCP1_DCP_RANDOM_SEEDS                                                 0x1c61
+#define mmDCP2_DCP_RANDOM_SEEDS                                                 0x1e61
+#define mmDCP3_DCP_RANDOM_SEEDS                                                 0x4061
+#define mmDCP4_DCP_RANDOM_SEEDS                                                 0x4261
+#define mmDCP5_DCP_RANDOM_SEEDS                                                 0x4461
+#define mmDCP_FP_CONVERTED_FIELD                                                0x1a65
+#define mmDCP0_DCP_FP_CONVERTED_FIELD                                           0x1a65
+#define mmDCP1_DCP_FP_CONVERTED_FIELD                                           0x1c65
+#define mmDCP2_DCP_FP_CONVERTED_FIELD                                           0x1e65
+#define mmDCP3_DCP_FP_CONVERTED_FIELD                                           0x4065
+#define mmDCP4_DCP_FP_CONVERTED_FIELD                                           0x4265
+#define mmDCP5_DCP_FP_CONVERTED_FIELD                                           0x4465
+#define mmCUR_CONTROL                                                           0x1a66
+#define mmDCP0_CUR_CONTROL                                                      0x1a66
+#define mmDCP1_CUR_CONTROL                                                      0x1c66
+#define mmDCP2_CUR_CONTROL                                                      0x1e66
+#define mmDCP3_CUR_CONTROL                                                      0x4066
+#define mmDCP4_CUR_CONTROL                                                      0x4266
+#define mmDCP5_CUR_CONTROL                                                      0x4466
+#define mmCUR_SURFACE_ADDRESS                                                   0x1a67
+#define mmDCP0_CUR_SURFACE_ADDRESS                                              0x1a67
+#define mmDCP1_CUR_SURFACE_ADDRESS                                              0x1c67
+#define mmDCP2_CUR_SURFACE_ADDRESS                                              0x1e67
+#define mmDCP3_CUR_SURFACE_ADDRESS                                              0x4067
+#define mmDCP4_CUR_SURFACE_ADDRESS                                              0x4267
+#define mmDCP5_CUR_SURFACE_ADDRESS                                              0x4467
+#define mmCUR_SIZE                                                              0x1a68
+#define mmDCP0_CUR_SIZE                                                         0x1a68
+#define mmDCP1_CUR_SIZE                                                         0x1c68
+#define mmDCP2_CUR_SIZE                                                         0x1e68
+#define mmDCP3_CUR_SIZE                                                         0x4068
+#define mmDCP4_CUR_SIZE                                                         0x4268
+#define mmDCP5_CUR_SIZE                                                         0x4468
+#define mmCUR_SURFACE_ADDRESS_HIGH                                              0x1a69
+#define mmDCP0_CUR_SURFACE_ADDRESS_HIGH                                         0x1a69
+#define mmDCP1_CUR_SURFACE_ADDRESS_HIGH                                         0x1c69
+#define mmDCP2_CUR_SURFACE_ADDRESS_HIGH                                         0x1e69
+#define mmDCP3_CUR_SURFACE_ADDRESS_HIGH                                         0x4069
+#define mmDCP4_CUR_SURFACE_ADDRESS_HIGH                                         0x4269
+#define mmDCP5_CUR_SURFACE_ADDRESS_HIGH                                         0x4469
+#define mmCUR_POSITION                                                          0x1a6a
+#define mmDCP0_CUR_POSITION                                                     0x1a6a
+#define mmDCP1_CUR_POSITION                                                     0x1c6a
+#define mmDCP2_CUR_POSITION                                                     0x1e6a
+#define mmDCP3_CUR_POSITION                                                     0x406a
+#define mmDCP4_CUR_POSITION                                                     0x426a
+#define mmDCP5_CUR_POSITION                                                     0x446a
+#define mmCUR_HOT_SPOT                                                          0x1a6b
+#define mmDCP0_CUR_HOT_SPOT                                                     0x1a6b
+#define mmDCP1_CUR_HOT_SPOT                                                     0x1c6b
+#define mmDCP2_CUR_HOT_SPOT                                                     0x1e6b
+#define mmDCP3_CUR_HOT_SPOT                                                     0x406b
+#define mmDCP4_CUR_HOT_SPOT                                                     0x426b
+#define mmDCP5_CUR_HOT_SPOT                                                     0x446b
+#define mmCUR_COLOR1                                                            0x1a6c
+#define mmDCP0_CUR_COLOR1                                                       0x1a6c
+#define mmDCP1_CUR_COLOR1                                                       0x1c6c
+#define mmDCP2_CUR_COLOR1                                                       0x1e6c
+#define mmDCP3_CUR_COLOR1                                                       0x406c
+#define mmDCP4_CUR_COLOR1                                                       0x426c
+#define mmDCP5_CUR_COLOR1                                                       0x446c
+#define mmCUR_COLOR2                                                            0x1a6d
+#define mmDCP0_CUR_COLOR2                                                       0x1a6d
+#define mmDCP1_CUR_COLOR2                                                       0x1c6d
+#define mmDCP2_CUR_COLOR2                                                       0x1e6d
+#define mmDCP3_CUR_COLOR2                                                       0x406d
+#define mmDCP4_CUR_COLOR2                                                       0x426d
+#define mmDCP5_CUR_COLOR2                                                       0x446d
+#define mmCUR_UPDATE                                                            0x1a6e
+#define mmDCP0_CUR_UPDATE                                                       0x1a6e
+#define mmDCP1_CUR_UPDATE                                                       0x1c6e
+#define mmDCP2_CUR_UPDATE                                                       0x1e6e
+#define mmDCP3_CUR_UPDATE                                                       0x406e
+#define mmDCP4_CUR_UPDATE                                                       0x426e
+#define mmDCP5_CUR_UPDATE                                                       0x446e
+#define mmCUR_REQUEST_FILTER_CNTL                                               0x1a99
+#define mmDCP0_CUR_REQUEST_FILTER_CNTL                                          0x1a99
+#define mmDCP1_CUR_REQUEST_FILTER_CNTL                                          0x1c99
+#define mmDCP2_CUR_REQUEST_FILTER_CNTL                                          0x1e99
+#define mmDCP3_CUR_REQUEST_FILTER_CNTL                                          0x4099
+#define mmDCP4_CUR_REQUEST_FILTER_CNTL                                          0x4299
+#define mmDCP5_CUR_REQUEST_FILTER_CNTL                                          0x4499
+#define mmCUR_STEREO_CONTROL                                                    0x1a9a
+#define mmDCP0_CUR_STEREO_CONTROL                                               0x1a9a
+#define mmDCP1_CUR_STEREO_CONTROL                                               0x1c9a
+#define mmDCP2_CUR_STEREO_CONTROL                                               0x1e9a
+#define mmDCP3_CUR_STEREO_CONTROL                                               0x409a
+#define mmDCP4_CUR_STEREO_CONTROL                                               0x429a
+#define mmDCP5_CUR_STEREO_CONTROL                                               0x449a
+#define mmDC_LUT_RW_MODE                                                        0x1a78
+#define mmDCP0_DC_LUT_RW_MODE                                                   0x1a78
+#define mmDCP1_DC_LUT_RW_MODE                                                   0x1c78
+#define mmDCP2_DC_LUT_RW_MODE                                                   0x1e78
+#define mmDCP3_DC_LUT_RW_MODE                                                   0x4078
+#define mmDCP4_DC_LUT_RW_MODE                                                   0x4278
+#define mmDCP5_DC_LUT_RW_MODE                                                   0x4478
+#define mmDC_LUT_RW_INDEX                                                       0x1a79
+#define mmDCP0_DC_LUT_RW_INDEX                                                  0x1a79
+#define mmDCP1_DC_LUT_RW_INDEX                                                  0x1c79
+#define mmDCP2_DC_LUT_RW_INDEX                                                  0x1e79
+#define mmDCP3_DC_LUT_RW_INDEX                                                  0x4079
+#define mmDCP4_DC_LUT_RW_INDEX                                                  0x4279
+#define mmDCP5_DC_LUT_RW_INDEX                                                  0x4479
+#define mmDC_LUT_SEQ_COLOR                                                      0x1a7a
+#define mmDCP0_DC_LUT_SEQ_COLOR                                                 0x1a7a
+#define mmDCP1_DC_LUT_SEQ_COLOR                                                 0x1c7a
+#define mmDCP2_DC_LUT_SEQ_COLOR                                                 0x1e7a
+#define mmDCP3_DC_LUT_SEQ_COLOR                                                 0x407a
+#define mmDCP4_DC_LUT_SEQ_COLOR                                                 0x427a
+#define mmDCP5_DC_LUT_SEQ_COLOR                                                 0x447a
+#define mmDC_LUT_PWL_DATA                                                       0x1a7b
+#define mmDCP0_DC_LUT_PWL_DATA                                                  0x1a7b
+#define mmDCP1_DC_LUT_PWL_DATA                                                  0x1c7b
+#define mmDCP2_DC_LUT_PWL_DATA                                                  0x1e7b
+#define mmDCP3_DC_LUT_PWL_DATA                                                  0x407b
+#define mmDCP4_DC_LUT_PWL_DATA                                                  0x427b
+#define mmDCP5_DC_LUT_PWL_DATA                                                  0x447b
+#define mmDC_LUT_30_COLOR                                                       0x1a7c
+#define mmDCP0_DC_LUT_30_COLOR                                                  0x1a7c
+#define mmDCP1_DC_LUT_30_COLOR                                                  0x1c7c
+#define mmDCP2_DC_LUT_30_COLOR                                                  0x1e7c
+#define mmDCP3_DC_LUT_30_COLOR                                                  0x407c
+#define mmDCP4_DC_LUT_30_COLOR                                                  0x427c
+#define mmDCP5_DC_LUT_30_COLOR                                                  0x447c
+#define mmDC_LUT_VGA_ACCESS_ENABLE                                              0x1a7d
+#define mmDCP0_DC_LUT_VGA_ACCESS_ENABLE                                         0x1a7d
+#define mmDCP1_DC_LUT_VGA_ACCESS_ENABLE                                         0x1c7d
+#define mmDCP2_DC_LUT_VGA_ACCESS_ENABLE                                         0x1e7d
+#define mmDCP3_DC_LUT_VGA_ACCESS_ENABLE                                         0x407d
+#define mmDCP4_DC_LUT_VGA_ACCESS_ENABLE                                         0x427d
+#define mmDCP5_DC_LUT_VGA_ACCESS_ENABLE                                         0x447d
+#define mmDC_LUT_WRITE_EN_MASK                                                  0x1a7e
+#define mmDCP0_DC_LUT_WRITE_EN_MASK                                             0x1a7e
+#define mmDCP1_DC_LUT_WRITE_EN_MASK                                             0x1c7e
+#define mmDCP2_DC_LUT_WRITE_EN_MASK                                             0x1e7e
+#define mmDCP3_DC_LUT_WRITE_EN_MASK                                             0x407e
+#define mmDCP4_DC_LUT_WRITE_EN_MASK                                             0x427e
+#define mmDCP5_DC_LUT_WRITE_EN_MASK                                             0x447e
+#define mmDC_LUT_AUTOFILL                                                       0x1a7f
+#define mmDCP0_DC_LUT_AUTOFILL                                                  0x1a7f
+#define mmDCP1_DC_LUT_AUTOFILL                                                  0x1c7f
+#define mmDCP2_DC_LUT_AUTOFILL                                                  0x1e7f
+#define mmDCP3_DC_LUT_AUTOFILL                                                  0x407f
+#define mmDCP4_DC_LUT_AUTOFILL                                                  0x427f
+#define mmDCP5_DC_LUT_AUTOFILL                                                  0x447f
+#define mmDC_LUT_CONTROL                                                        0x1a80
+#define mmDCP0_DC_LUT_CONTROL                                                   0x1a80
+#define mmDCP1_DC_LUT_CONTROL                                                   0x1c80
+#define mmDCP2_DC_LUT_CONTROL                                                   0x1e80
+#define mmDCP3_DC_LUT_CONTROL                                                   0x4080
+#define mmDCP4_DC_LUT_CONTROL                                                   0x4280
+#define mmDCP5_DC_LUT_CONTROL                                                   0x4480
+#define mmDC_LUT_BLACK_OFFSET_BLUE                                              0x1a81
+#define mmDCP0_DC_LUT_BLACK_OFFSET_BLUE                                         0x1a81
+#define mmDCP1_DC_LUT_BLACK_OFFSET_BLUE                                         0x1c81
+#define mmDCP2_DC_LUT_BLACK_OFFSET_BLUE                                         0x1e81
+#define mmDCP3_DC_LUT_BLACK_OFFSET_BLUE                                         0x4081
+#define mmDCP4_DC_LUT_BLACK_OFFSET_BLUE                                         0x4281
+#define mmDCP5_DC_LUT_BLACK_OFFSET_BLUE                                         0x4481
+#define mmDC_LUT_BLACK_OFFSET_GREEN                                             0x1a82
+#define mmDCP0_DC_LUT_BLACK_OFFSET_GREEN                                        0x1a82
+#define mmDCP1_DC_LUT_BLACK_OFFSET_GREEN                                        0x1c82
+#define mmDCP2_DC_LUT_BLACK_OFFSET_GREEN                                        0x1e82
+#define mmDCP3_DC_LUT_BLACK_OFFSET_GREEN                                        0x4082
+#define mmDCP4_DC_LUT_BLACK_OFFSET_GREEN                                        0x4282
+#define mmDCP5_DC_LUT_BLACK_OFFSET_GREEN                                        0x4482
+#define mmDC_LUT_BLACK_OFFSET_RED                                               0x1a83
+#define mmDCP0_DC_LUT_BLACK_OFFSET_RED                                          0x1a83
+#define mmDCP1_DC_LUT_BLACK_OFFSET_RED                                          0x1c83
+#define mmDCP2_DC_LUT_BLACK_OFFSET_RED                                          0x1e83
+#define mmDCP3_DC_LUT_BLACK_OFFSET_RED                                          0x4083
+#define mmDCP4_DC_LUT_BLACK_OFFSET_RED                                          0x4283
+#define mmDCP5_DC_LUT_BLACK_OFFSET_RED                                          0x4483
+#define mmDC_LUT_WHITE_OFFSET_BLUE                                              0x1a84
+#define mmDCP0_DC_LUT_WHITE_OFFSET_BLUE                                         0x1a84
+#define mmDCP1_DC_LUT_WHITE_OFFSET_BLUE                                         0x1c84
+#define mmDCP2_DC_LUT_WHITE_OFFSET_BLUE                                         0x1e84
+#define mmDCP3_DC_LUT_WHITE_OFFSET_BLUE                                         0x4084
+#define mmDCP4_DC_LUT_WHITE_OFFSET_BLUE                                         0x4284
+#define mmDCP5_DC_LUT_WHITE_OFFSET_BLUE                                         0x4484
+#define mmDC_LUT_WHITE_OFFSET_GREEN                                             0x1a85
+#define mmDCP0_DC_LUT_WHITE_OFFSET_GREEN                                        0x1a85
+#define mmDCP1_DC_LUT_WHITE_OFFSET_GREEN                                        0x1c85
+#define mmDCP2_DC_LUT_WHITE_OFFSET_GREEN                                        0x1e85
+#define mmDCP3_DC_LUT_WHITE_OFFSET_GREEN                                        0x4085
+#define mmDCP4_DC_LUT_WHITE_OFFSET_GREEN                                        0x4285
+#define mmDCP5_DC_LUT_WHITE_OFFSET_GREEN                                        0x4485
+#define mmDC_LUT_WHITE_OFFSET_RED                                               0x1a86
+#define mmDCP0_DC_LUT_WHITE_OFFSET_RED                                          0x1a86
+#define mmDCP1_DC_LUT_WHITE_OFFSET_RED                                          0x1c86
+#define mmDCP2_DC_LUT_WHITE_OFFSET_RED                                          0x1e86
+#define mmDCP3_DC_LUT_WHITE_OFFSET_RED                                          0x4086
+#define mmDCP4_DC_LUT_WHITE_OFFSET_RED                                          0x4286
+#define mmDCP5_DC_LUT_WHITE_OFFSET_RED                                          0x4486
+#define mmDCP_CRC_CONTROL                                                       0x1a87
+#define mmDCP0_DCP_CRC_CONTROL                                                  0x1a87
+#define mmDCP1_DCP_CRC_CONTROL                                                  0x1c87
+#define mmDCP2_DCP_CRC_CONTROL                                                  0x1e87
+#define mmDCP3_DCP_CRC_CONTROL                                                  0x4087
+#define mmDCP4_DCP_CRC_CONTROL                                                  0x4287
+#define mmDCP5_DCP_CRC_CONTROL                                                  0x4487
+#define mmDCP_CRC_MASK                                                          0x1a88
+#define mmDCP0_DCP_CRC_MASK                                                     0x1a88
+#define mmDCP1_DCP_CRC_MASK                                                     0x1c88
+#define mmDCP2_DCP_CRC_MASK                                                     0x1e88
+#define mmDCP3_DCP_CRC_MASK                                                     0x4088
+#define mmDCP4_DCP_CRC_MASK                                                     0x4288
+#define mmDCP5_DCP_CRC_MASK                                                     0x4488
+#define mmDCP_CRC_CURRENT                                                       0x1a89
+#define mmDCP0_DCP_CRC_CURRENT                                                  0x1a89
+#define mmDCP1_DCP_CRC_CURRENT                                                  0x1c89
+#define mmDCP2_DCP_CRC_CURRENT                                                  0x1e89
+#define mmDCP3_DCP_CRC_CURRENT                                                  0x4089
+#define mmDCP4_DCP_CRC_CURRENT                                                  0x4289
+#define mmDCP5_DCP_CRC_CURRENT                                                  0x4489
+#define mmDVMM_PTE_CONTROL                                                      0x1a8a
+#define mmDCP0_DVMM_PTE_CONTROL                                                 0x1a8a
+#define mmDCP1_DVMM_PTE_CONTROL                                                 0x1c8a
+#define mmDCP2_DVMM_PTE_CONTROL                                                 0x1e8a
+#define mmDCP3_DVMM_PTE_CONTROL                                                 0x408a
+#define mmDCP4_DVMM_PTE_CONTROL                                                 0x428a
+#define mmDCP5_DVMM_PTE_CONTROL                                                 0x448a
+#define mmDCP_CRC_LAST                                                          0x1a8b
+#define mmDCP0_DCP_CRC_LAST                                                     0x1a8b
+#define mmDCP1_DCP_CRC_LAST                                                     0x1c8b
+#define mmDCP2_DCP_CRC_LAST                                                     0x1e8b
+#define mmDCP3_DCP_CRC_LAST                                                     0x408b
+#define mmDCP4_DCP_CRC_LAST                                                     0x428b
+#define mmDCP5_DCP_CRC_LAST                                                     0x448b
+#define mmDCP_DEBUG                                                             0x1a8d
+#define mmDCP0_DCP_DEBUG                                                        0x1a8d
+#define mmDCP1_DCP_DEBUG                                                        0x1c8d
+#define mmDCP2_DCP_DEBUG                                                        0x1e8d
+#define mmDCP3_DCP_DEBUG                                                        0x408d
+#define mmDCP4_DCP_DEBUG                                                        0x428d
+#define mmDCP5_DCP_DEBUG                                                        0x448d
+#define mmGRPH_FLIP_RATE_CNTL                                                   0x1a8e
+#define mmDCP0_GRPH_FLIP_RATE_CNTL                                              0x1a8e
+#define mmDCP1_GRPH_FLIP_RATE_CNTL                                              0x1c8e
+#define mmDCP2_GRPH_FLIP_RATE_CNTL                                              0x1e8e
+#define mmDCP3_GRPH_FLIP_RATE_CNTL                                              0x408e
+#define mmDCP4_GRPH_FLIP_RATE_CNTL                                              0x428e
+#define mmDCP5_GRPH_FLIP_RATE_CNTL                                              0x448e
+#define mmDCP_GSL_CONTROL                                                       0x1a90
+#define mmDCP0_DCP_GSL_CONTROL                                                  0x1a90
+#define mmDCP1_DCP_GSL_CONTROL                                                  0x1c90
+#define mmDCP2_DCP_GSL_CONTROL                                                  0x1e90
+#define mmDCP3_DCP_GSL_CONTROL                                                  0x4090
+#define mmDCP4_DCP_GSL_CONTROL                                                  0x4290
+#define mmDCP5_DCP_GSL_CONTROL                                                  0x4490
+#define mmDCP_LB_DATA_GAP_BETWEEN_CHUNK                                         0x1a91
+#define mmDCP0_DCP_LB_DATA_GAP_BETWEEN_CHUNK                                    0x1a91
+#define mmDCP1_DCP_LB_DATA_GAP_BETWEEN_CHUNK                                    0x1c91
+#define mmDCP2_DCP_LB_DATA_GAP_BETWEEN_CHUNK                                    0x1e91
+#define mmDCP3_DCP_LB_DATA_GAP_BETWEEN_CHUNK                                    0x4091
+#define mmDCP4_DCP_LB_DATA_GAP_BETWEEN_CHUNK                                    0x4291
+#define mmDCP5_DCP_LB_DATA_GAP_BETWEEN_CHUNK                                    0x4491
+#define mmDCP_DEBUG_SG                                                          0x1a92
+#define mmDCP0_DCP_DEBUG_SG                                                     0x1a92
+#define mmDCP1_DCP_DEBUG_SG                                                     0x1c92
+#define mmDCP2_DCP_DEBUG_SG                                                     0x1e92
+#define mmDCP3_DCP_DEBUG_SG                                                     0x4092
+#define mmDCP4_DCP_DEBUG_SG                                                     0x4292
+#define mmDCP5_DCP_DEBUG_SG                                                     0x4492
+#define mmDCP_DEBUG_SG2                                                         0x1a94
+#define mmDCP0_DCP_DEBUG_SG2                                                    0x1a94
+#define mmDCP1_DCP_DEBUG_SG2                                                    0x1c94
+#define mmDCP2_DCP_DEBUG_SG2                                                    0x1e94
+#define mmDCP3_DCP_DEBUG_SG2                                                    0x4094
+#define mmDCP4_DCP_DEBUG_SG2                                                    0x4294
+#define mmDCP5_DCP_DEBUG_SG2                                                    0x4494
+#define mmDCP_DVMM_DEBUG                                                        0x1a93
+#define mmDCP0_DCP_DVMM_DEBUG                                                   0x1a93
+#define mmDCP1_DCP_DVMM_DEBUG                                                   0x1c93
+#define mmDCP2_DCP_DVMM_DEBUG                                                   0x1e93
+#define mmDCP3_DCP_DVMM_DEBUG                                                   0x4093
+#define mmDCP4_DCP_DVMM_DEBUG                                                   0x4293
+#define mmDCP5_DCP_DVMM_DEBUG                                                   0x4493
+#define mmDCP_TEST_DEBUG_INDEX                                                  0x1a95
+#define mmDCP0_DCP_TEST_DEBUG_INDEX                                             0x1a95
+#define mmDCP1_DCP_TEST_DEBUG_INDEX                                             0x1c95
+#define mmDCP2_DCP_TEST_DEBUG_INDEX                                             0x1e95
+#define mmDCP3_DCP_TEST_DEBUG_INDEX                                             0x4095
+#define mmDCP4_DCP_TEST_DEBUG_INDEX                                             0x4295
+#define mmDCP5_DCP_TEST_DEBUG_INDEX                                             0x4495
+#define mmDCP_TEST_DEBUG_DATA                                                   0x1a96
+#define mmDCP0_DCP_TEST_DEBUG_DATA                                              0x1a96
+#define mmDCP1_DCP_TEST_DEBUG_DATA                                              0x1c96
+#define mmDCP2_DCP_TEST_DEBUG_DATA                                              0x1e96
+#define mmDCP3_DCP_TEST_DEBUG_DATA                                              0x4096
+#define mmDCP4_DCP_TEST_DEBUG_DATA                                              0x4296
+#define mmDCP5_DCP_TEST_DEBUG_DATA                                              0x4496
+#define mmGRPH_STEREOSYNC_FLIP                                                  0x1a97
+#define mmDCP0_GRPH_STEREOSYNC_FLIP                                             0x1a97
+#define mmDCP1_GRPH_STEREOSYNC_FLIP                                             0x1c97
+#define mmDCP2_GRPH_STEREOSYNC_FLIP                                             0x1e97
+#define mmDCP3_GRPH_STEREOSYNC_FLIP                                             0x4097
+#define mmDCP4_GRPH_STEREOSYNC_FLIP                                             0x4297
+#define mmDCP5_GRPH_STEREOSYNC_FLIP                                             0x4497
+#define mmDCP_DEBUG2                                                            0x1a98
+#define mmDCP0_DCP_DEBUG2                                                       0x1a98
+#define mmDCP1_DCP_DEBUG2                                                       0x1c98
+#define mmDCP2_DCP_DEBUG2                                                       0x1e98
+#define mmDCP3_DCP_DEBUG2                                                       0x4098
+#define mmDCP4_DCP_DEBUG2                                                       0x4298
+#define mmDCP5_DCP_DEBUG2                                                       0x4498
+#define mmHW_ROTATION                                                           0x1a9e
+#define mmDCP0_HW_ROTATION                                                      0x1a9e
+#define mmDCP1_HW_ROTATION                                                      0x1c9e
+#define mmDCP2_HW_ROTATION                                                      0x1e9e
+#define mmDCP3_HW_ROTATION                                                      0x409e
+#define mmDCP4_HW_ROTATION                                                      0x429e
+#define mmDCP5_HW_ROTATION                                                      0x449e
+#define mmGRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL                                    0x1a9f
+#define mmDCP0_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL                               0x1a9f
+#define mmDCP1_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL                               0x1c9f
+#define mmDCP2_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL                               0x1e9f
+#define mmDCP3_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL                               0x409f
+#define mmDCP4_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL                               0x429f
+#define mmDCP5_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL                               0x449f
+#define mmREGAMMA_CONTROL                                                       0x1aa0
+#define mmDCP0_REGAMMA_CONTROL                                                  0x1aa0
+#define mmDCP1_REGAMMA_CONTROL                                                  0x1ca0
+#define mmDCP2_REGAMMA_CONTROL                                                  0x1ea0
+#define mmDCP3_REGAMMA_CONTROL                                                  0x40a0
+#define mmDCP4_REGAMMA_CONTROL                                                  0x42a0
+#define mmDCP5_REGAMMA_CONTROL                                                  0x44a0
+#define mmREGAMMA_LUT_INDEX                                                     0x1aa1
+#define mmDCP0_REGAMMA_LUT_INDEX                                                0x1aa1
+#define mmDCP1_REGAMMA_LUT_INDEX                                                0x1ca1
+#define mmDCP2_REGAMMA_LUT_INDEX                                                0x1ea1
+#define mmDCP3_REGAMMA_LUT_INDEX                                                0x40a1
+#define mmDCP4_REGAMMA_LUT_INDEX                                                0x42a1
+#define mmDCP5_REGAMMA_LUT_INDEX                                                0x44a1
+#define mmREGAMMA_LUT_DATA                                                      0x1aa2
+#define mmDCP0_REGAMMA_LUT_DATA                                                 0x1aa2
+#define mmDCP1_REGAMMA_LUT_DATA                                                 0x1ca2
+#define mmDCP2_REGAMMA_LUT_DATA                                                 0x1ea2
+#define mmDCP3_REGAMMA_LUT_DATA                                                 0x40a2
+#define mmDCP4_REGAMMA_LUT_DATA                                                 0x42a2
+#define mmDCP5_REGAMMA_LUT_DATA                                                 0x44a2
+#define mmREGAMMA_LUT_WRITE_EN_MASK                                             0x1aa3
+#define mmDCP0_REGAMMA_LUT_WRITE_EN_MASK                                        0x1aa3
+#define mmDCP1_REGAMMA_LUT_WRITE_EN_MASK                                        0x1ca3
+#define mmDCP2_REGAMMA_LUT_WRITE_EN_MASK                                        0x1ea3
+#define mmDCP3_REGAMMA_LUT_WRITE_EN_MASK                                        0x40a3
+#define mmDCP4_REGAMMA_LUT_WRITE_EN_MASK                                        0x42a3
+#define mmDCP5_REGAMMA_LUT_WRITE_EN_MASK                                        0x44a3
+#define mmREGAMMA_CNTLA_START_CNTL                                              0x1aa4
+#define mmDCP0_REGAMMA_CNTLA_START_CNTL                                         0x1aa4
+#define mmDCP1_REGAMMA_CNTLA_START_CNTL                                         0x1ca4
+#define mmDCP2_REGAMMA_CNTLA_START_CNTL                                         0x1ea4
+#define mmDCP3_REGAMMA_CNTLA_START_CNTL                                         0x40a4
+#define mmDCP4_REGAMMA_CNTLA_START_CNTL                                         0x42a4
+#define mmDCP5_REGAMMA_CNTLA_START_CNTL                                         0x44a4
+#define mmREGAMMA_CNTLA_SLOPE_CNTL                                              0x1aa5
+#define mmDCP0_REGAMMA_CNTLA_SLOPE_CNTL                                         0x1aa5
+#define mmDCP1_REGAMMA_CNTLA_SLOPE_CNTL                                         0x1ca5
+#define mmDCP2_REGAMMA_CNTLA_SLOPE_CNTL                                         0x1ea5
+#define mmDCP3_REGAMMA_CNTLA_SLOPE_CNTL                                         0x40a5
+#define mmDCP4_REGAMMA_CNTLA_SLOPE_CNTL                                         0x42a5
+#define mmDCP5_REGAMMA_CNTLA_SLOPE_CNTL                                         0x44a5
+#define mmREGAMMA_CNTLA_END_CNTL1                                               0x1aa6
+#define mmDCP0_REGAMMA_CNTLA_END_CNTL1                                          0x1aa6
+#define mmDCP1_REGAMMA_CNTLA_END_CNTL1                                          0x1ca6
+#define mmDCP2_REGAMMA_CNTLA_END_CNTL1                                          0x1ea6
+#define mmDCP3_REGAMMA_CNTLA_END_CNTL1                                          0x40a6
+#define mmDCP4_REGAMMA_CNTLA_END_CNTL1                                          0x42a6
+#define mmDCP5_REGAMMA_CNTLA_END_CNTL1                                          0x44a6
+#define mmREGAMMA_CNTLA_END_CNTL2                                               0x1aa7
+#define mmDCP0_REGAMMA_CNTLA_END_CNTL2                                          0x1aa7
+#define mmDCP1_REGAMMA_CNTLA_END_CNTL2                                          0x1ca7
+#define mmDCP2_REGAMMA_CNTLA_END_CNTL2                                          0x1ea7
+#define mmDCP3_REGAMMA_CNTLA_END_CNTL2                                          0x40a7
+#define mmDCP4_REGAMMA_CNTLA_END_CNTL2                                          0x42a7
+#define mmDCP5_REGAMMA_CNTLA_END_CNTL2                                          0x44a7
+#define mmREGAMMA_CNTLA_REGION_0_1                                              0x1aa8
+#define mmDCP0_REGAMMA_CNTLA_REGION_0_1                                         0x1aa8
+#define mmDCP1_REGAMMA_CNTLA_REGION_0_1                                         0x1ca8
+#define mmDCP2_REGAMMA_CNTLA_REGION_0_1                                         0x1ea8
+#define mmDCP3_REGAMMA_CNTLA_REGION_0_1                                         0x40a8
+#define mmDCP4_REGAMMA_CNTLA_REGION_0_1                                         0x42a8
+#define mmDCP5_REGAMMA_CNTLA_REGION_0_1                                         0x44a8
+#define mmREGAMMA_CNTLA_REGION_2_3                                              0x1aa9
+#define mmDCP0_REGAMMA_CNTLA_REGION_2_3                                         0x1aa9
+#define mmDCP1_REGAMMA_CNTLA_REGION_2_3                                         0x1ca9
+#define mmDCP2_REGAMMA_CNTLA_REGION_2_3                                         0x1ea9
+#define mmDCP3_REGAMMA_CNTLA_REGION_2_3                                         0x40a9
+#define mmDCP4_REGAMMA_CNTLA_REGION_2_3                                         0x42a9
+#define mmDCP5_REGAMMA_CNTLA_REGION_2_3                                         0x44a9
+#define mmREGAMMA_CNTLA_REGION_4_5                                              0x1aaa
+#define mmDCP0_REGAMMA_CNTLA_REGION_4_5                                         0x1aaa
+#define mmDCP1_REGAMMA_CNTLA_REGION_4_5                                         0x1caa
+#define mmDCP2_REGAMMA_CNTLA_REGION_4_5                                         0x1eaa
+#define mmDCP3_REGAMMA_CNTLA_REGION_4_5                                         0x40aa
+#define mmDCP4_REGAMMA_CNTLA_REGION_4_5                                         0x42aa
+#define mmDCP5_REGAMMA_CNTLA_REGION_4_5                                         0x44aa
+#define mmREGAMMA_CNTLA_REGION_6_7                                              0x1aab
+#define mmDCP0_REGAMMA_CNTLA_REGION_6_7                                         0x1aab
+#define mmDCP1_REGAMMA_CNTLA_REGION_6_7                                         0x1cab
+#define mmDCP2_REGAMMA_CNTLA_REGION_6_7                                         0x1eab
+#define mmDCP3_REGAMMA_CNTLA_REGION_6_7                                         0x40ab
+#define mmDCP4_REGAMMA_CNTLA_REGION_6_7                                         0x42ab
+#define mmDCP5_REGAMMA_CNTLA_REGION_6_7                                         0x44ab
+#define mmREGAMMA_CNTLA_REGION_8_9                                              0x1aac
+#define mmDCP0_REGAMMA_CNTLA_REGION_8_9                                         0x1aac
+#define mmDCP1_REGAMMA_CNTLA_REGION_8_9                                         0x1cac
+#define mmDCP2_REGAMMA_CNTLA_REGION_8_9                                         0x1eac
+#define mmDCP3_REGAMMA_CNTLA_REGION_8_9                                         0x40ac
+#define mmDCP4_REGAMMA_CNTLA_REGION_8_9                                         0x42ac
+#define mmDCP5_REGAMMA_CNTLA_REGION_8_9                                         0x44ac
+#define mmREGAMMA_CNTLA_REGION_10_11                                            0x1aad
+#define mmDCP0_REGAMMA_CNTLA_REGION_10_11                                       0x1aad
+#define mmDCP1_REGAMMA_CNTLA_REGION_10_11                                       0x1cad
+#define mmDCP2_REGAMMA_CNTLA_REGION_10_11                                       0x1ead
+#define mmDCP3_REGAMMA_CNTLA_REGION_10_11                                       0x40ad
+#define mmDCP4_REGAMMA_CNTLA_REGION_10_11                                       0x42ad
+#define mmDCP5_REGAMMA_CNTLA_REGION_10_11                                       0x44ad
+#define mmREGAMMA_CNTLA_REGION_12_13                                            0x1aae
+#define mmDCP0_REGAMMA_CNTLA_REGION_12_13                                       0x1aae
+#define mmDCP1_REGAMMA_CNTLA_REGION_12_13                                       0x1cae
+#define mmDCP2_REGAMMA_CNTLA_REGION_12_13                                       0x1eae
+#define mmDCP3_REGAMMA_CNTLA_REGION_12_13                                       0x40ae
+#define mmDCP4_REGAMMA_CNTLA_REGION_12_13                                       0x42ae
+#define mmDCP5_REGAMMA_CNTLA_REGION_12_13                                       0x44ae
+#define mmREGAMMA_CNTLA_REGION_14_15                                            0x1aaf
+#define mmDCP0_REGAMMA_CNTLA_REGION_14_15                                       0x1aaf
+#define mmDCP1_REGAMMA_CNTLA_REGION_14_15                                       0x1caf
+#define mmDCP2_REGAMMA_CNTLA_REGION_14_15                                       0x1eaf
+#define mmDCP3_REGAMMA_CNTLA_REGION_14_15                                       0x40af
+#define mmDCP4_REGAMMA_CNTLA_REGION_14_15                                       0x42af
+#define mmDCP5_REGAMMA_CNTLA_REGION_14_15                                       0x44af
+#define mmREGAMMA_CNTLB_START_CNTL                                              0x1ab0
+#define mmDCP0_REGAMMA_CNTLB_START_CNTL                                         0x1ab0
+#define mmDCP1_REGAMMA_CNTLB_START_CNTL                                         0x1cb0
+#define mmDCP2_REGAMMA_CNTLB_START_CNTL                                         0x1eb0
+#define mmDCP3_REGAMMA_CNTLB_START_CNTL                                         0x40b0
+#define mmDCP4_REGAMMA_CNTLB_START_CNTL                                         0x42b0
+#define mmDCP5_REGAMMA_CNTLB_START_CNTL                                         0x44b0
+#define mmREGAMMA_CNTLB_SLOPE_CNTL                                              0x1ab1
+#define mmDCP0_REGAMMA_CNTLB_SLOPE_CNTL                                         0x1ab1
+#define mmDCP1_REGAMMA_CNTLB_SLOPE_CNTL                                         0x1cb1
+#define mmDCP2_REGAMMA_CNTLB_SLOPE_CNTL                                         0x1eb1
+#define mmDCP3_REGAMMA_CNTLB_SLOPE_CNTL                                         0x40b1
+#define mmDCP4_REGAMMA_CNTLB_SLOPE_CNTL                                         0x42b1
+#define mmDCP5_REGAMMA_CNTLB_SLOPE_CNTL                                         0x44b1
+#define mmREGAMMA_CNTLB_END_CNTL1                                               0x1ab2
+#define mmDCP0_REGAMMA_CNTLB_END_CNTL1                                          0x1ab2
+#define mmDCP1_REGAMMA_CNTLB_END_CNTL1                                          0x1cb2
+#define mmDCP2_REGAMMA_CNTLB_END_CNTL1                                          0x1eb2
+#define mmDCP3_REGAMMA_CNTLB_END_CNTL1                                          0x40b2
+#define mmDCP4_REGAMMA_CNTLB_END_CNTL1                                          0x42b2
+#define mmDCP5_REGAMMA_CNTLB_END_CNTL1                                          0x44b2
+#define mmREGAMMA_CNTLB_END_CNTL2                                               0x1ab3
+#define mmDCP0_REGAMMA_CNTLB_END_CNTL2                                          0x1ab3
+#define mmDCP1_REGAMMA_CNTLB_END_CNTL2                                          0x1cb3
+#define mmDCP2_REGAMMA_CNTLB_END_CNTL2                                          0x1eb3
+#define mmDCP3_REGAMMA_CNTLB_END_CNTL2                                          0x40b3
+#define mmDCP4_REGAMMA_CNTLB_END_CNTL2                                          0x42b3
+#define mmDCP5_REGAMMA_CNTLB_END_CNTL2                                          0x44b3
+#define mmREGAMMA_CNTLB_REGION_0_1                                              0x1ab4
+#define mmDCP0_REGAMMA_CNTLB_REGION_0_1                                         0x1ab4
+#define mmDCP1_REGAMMA_CNTLB_REGION_0_1                                         0x1cb4
+#define mmDCP2_REGAMMA_CNTLB_REGION_0_1                                         0x1eb4
+#define mmDCP3_REGAMMA_CNTLB_REGION_0_1                                         0x40b4
+#define mmDCP4_REGAMMA_CNTLB_REGION_0_1                                         0x42b4
+#define mmDCP5_REGAMMA_CNTLB_REGION_0_1                                         0x44b4
+#define mmREGAMMA_CNTLB_REGION_2_3                                              0x1ab5
+#define mmDCP0_REGAMMA_CNTLB_REGION_2_3                                         0x1ab5
+#define mmDCP1_REGAMMA_CNTLB_REGION_2_3                                         0x1cb5
+#define mmDCP2_REGAMMA_CNTLB_REGION_2_3                                         0x1eb5
+#define mmDCP3_REGAMMA_CNTLB_REGION_2_3                                         0x40b5
+#define mmDCP4_REGAMMA_CNTLB_REGION_2_3                                         0x42b5
+#define mmDCP5_REGAMMA_CNTLB_REGION_2_3                                         0x44b5
+#define mmREGAMMA_CNTLB_REGION_4_5                                              0x1ab6
+#define mmDCP0_REGAMMA_CNTLB_REGION_4_5                                         0x1ab6
+#define mmDCP1_REGAMMA_CNTLB_REGION_4_5                                         0x1cb6
+#define mmDCP2_REGAMMA_CNTLB_REGION_4_5                                         0x1eb6
+#define mmDCP3_REGAMMA_CNTLB_REGION_4_5                                         0x40b6
+#define mmDCP4_REGAMMA_CNTLB_REGION_4_5                                         0x42b6
+#define mmDCP5_REGAMMA_CNTLB_REGION_4_5                                         0x44b6
+#define mmREGAMMA_CNTLB_REGION_6_7                                              0x1ab7
+#define mmDCP0_REGAMMA_CNTLB_REGION_6_7                                         0x1ab7
+#define mmDCP1_REGAMMA_CNTLB_REGION_6_7                                         0x1cb7
+#define mmDCP2_REGAMMA_CNTLB_REGION_6_7                                         0x1eb7
+#define mmDCP3_REGAMMA_CNTLB_REGION_6_7                                         0x40b7
+#define mmDCP4_REGAMMA_CNTLB_REGION_6_7                                         0x42b7
+#define mmDCP5_REGAMMA_CNTLB_REGION_6_7                                         0x44b7
+#define mmREGAMMA_CNTLB_REGION_8_9                                              0x1ab8
+#define mmDCP0_REGAMMA_CNTLB_REGION_8_9                                         0x1ab8
+#define mmDCP1_REGAMMA_CNTLB_REGION_8_9                                         0x1cb8
+#define mmDCP2_REGAMMA_CNTLB_REGION_8_9                                         0x1eb8
+#define mmDCP3_REGAMMA_CNTLB_REGION_8_9                                         0x40b8
+#define mmDCP4_REGAMMA_CNTLB_REGION_8_9                                         0x42b8
+#define mmDCP5_REGAMMA_CNTLB_REGION_8_9                                         0x44b8
+#define mmREGAMMA_CNTLB_REGION_10_11                                            0x1ab9
+#define mmDCP0_REGAMMA_CNTLB_REGION_10_11                                       0x1ab9
+#define mmDCP1_REGAMMA_CNTLB_REGION_10_11                                       0x1cb9
+#define mmDCP2_REGAMMA_CNTLB_REGION_10_11                                       0x1eb9
+#define mmDCP3_REGAMMA_CNTLB_REGION_10_11                                       0x40b9
+#define mmDCP4_REGAMMA_CNTLB_REGION_10_11                                       0x42b9
+#define mmDCP5_REGAMMA_CNTLB_REGION_10_11                                       0x44b9
+#define mmREGAMMA_CNTLB_REGION_12_13                                            0x1aba
+#define mmDCP0_REGAMMA_CNTLB_REGION_12_13                                       0x1aba
+#define mmDCP1_REGAMMA_CNTLB_REGION_12_13                                       0x1cba
+#define mmDCP2_REGAMMA_CNTLB_REGION_12_13                                       0x1eba
+#define mmDCP3_REGAMMA_CNTLB_REGION_12_13                                       0x40ba
+#define mmDCP4_REGAMMA_CNTLB_REGION_12_13                                       0x42ba
+#define mmDCP5_REGAMMA_CNTLB_REGION_12_13                                       0x44ba
+#define mmREGAMMA_CNTLB_REGION_14_15                                            0x1abb
+#define mmDCP0_REGAMMA_CNTLB_REGION_14_15                                       0x1abb
+#define mmDCP1_REGAMMA_CNTLB_REGION_14_15                                       0x1cbb
+#define mmDCP2_REGAMMA_CNTLB_REGION_14_15                                       0x1ebb
+#define mmDCP3_REGAMMA_CNTLB_REGION_14_15                                       0x40bb
+#define mmDCP4_REGAMMA_CNTLB_REGION_14_15                                       0x42bb
+#define mmDCP5_REGAMMA_CNTLB_REGION_14_15                                       0x44bb
+#define mmALPHA_CONTROL                                                         0x1abc
+#define mmDCP0_ALPHA_CONTROL                                                    0x1abc
+#define mmDCP1_ALPHA_CONTROL                                                    0x1cbc
+#define mmDCP2_ALPHA_CONTROL                                                    0x1ebc
+#define mmDCP3_ALPHA_CONTROL                                                    0x40bc
+#define mmDCP4_ALPHA_CONTROL                                                    0x42bc
+#define mmDCP5_ALPHA_CONTROL                                                    0x44bc
+#define mmGRPH_XDMA_RECOVERY_SURFACE_ADDRESS                                    0x1abd
+#define mmDCP0_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS                               0x1abd
+#define mmDCP1_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS                               0x1cbd
+#define mmDCP2_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS                               0x1ebd
+#define mmDCP3_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS                               0x40bd
+#define mmDCP4_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS                               0x42bd
+#define mmDCP5_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS                               0x44bd
+#define mmGRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH                               0x1abe
+#define mmDCP0_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH                          0x1abe
+#define mmDCP1_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH                          0x1cbe
+#define mmDCP2_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH                          0x1ebe
+#define mmDCP3_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH                          0x40be
+#define mmDCP4_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH                          0x42be
+#define mmDCP5_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH                          0x44be
+#define mmGRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS                                  0x1abf
+#define mmDCP0_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS                             0x1abf
+#define mmDCP1_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS                             0x1cbf
+#define mmDCP2_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS                             0x1ebf
+#define mmDCP3_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS                             0x40bf
+#define mmDCP4_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS                             0x42bf
+#define mmDCP5_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS                             0x44bf
+#define mmGRPH_SURFACE_COUNTER_CONTROL                                          0x1a0f
+#define mmDCP0_GRPH_SURFACE_COUNTER_CONTROL                                     0x1a0f
+#define mmDCP1_GRPH_SURFACE_COUNTER_CONTROL                                     0x1c0f
+#define mmDCP2_GRPH_SURFACE_COUNTER_CONTROL                                     0x1e0f
+#define mmDCP3_GRPH_SURFACE_COUNTER_CONTROL                                     0x400f
+#define mmDCP4_GRPH_SURFACE_COUNTER_CONTROL                                     0x420f
+#define mmDCP5_GRPH_SURFACE_COUNTER_CONTROL                                     0x440f
+#define mmGRPH_SURFACE_COUNTER_OUTPUT                                           0x1a1d
+#define mmDCP0_GRPH_SURFACE_COUNTER_OUTPUT                                      0x1a1d
+#define mmDCP1_GRPH_SURFACE_COUNTER_OUTPUT                                      0x1c1d
+#define mmDCP2_GRPH_SURFACE_COUNTER_OUTPUT                                      0x1e1d
+#define mmDCP3_GRPH_SURFACE_COUNTER_OUTPUT                                      0x401d
+#define mmDCP4_GRPH_SURFACE_COUNTER_OUTPUT                                      0x421d
+#define mmDCP5_GRPH_SURFACE_COUNTER_OUTPUT                                      0x441d
+#define mmDIG_FE_CNTL                                                           0x4a00
+#define mmDIG0_DIG_FE_CNTL                                                      0x4a00
+#define mmDIG1_DIG_FE_CNTL                                                      0x4b00
+#define mmDIG2_DIG_FE_CNTL                                                      0x4c00
+#define mmDIG3_DIG_FE_CNTL                                                      0x4d00
+#define mmDIG4_DIG_FE_CNTL                                                      0x4e00
+#define mmDIG5_DIG_FE_CNTL                                                      0x4f00
+#define mmDIG6_DIG_FE_CNTL                                                      0x5400
+#define mmDIG7_DIG_FE_CNTL                                                      0x5600
+#define mmDIG8_DIG_FE_CNTL                                                      0x5700
+#define mmDIG_OUTPUT_CRC_CNTL                                                   0x4a01
+#define mmDIG0_DIG_OUTPUT_CRC_CNTL                                              0x4a01
+#define mmDIG1_DIG_OUTPUT_CRC_CNTL                                              0x4b01
+#define mmDIG2_DIG_OUTPUT_CRC_CNTL                                              0x4c01
+#define mmDIG3_DIG_OUTPUT_CRC_CNTL                                              0x4d01
+#define mmDIG4_DIG_OUTPUT_CRC_CNTL                                              0x4e01
+#define mmDIG5_DIG_OUTPUT_CRC_CNTL                                              0x4f01
+#define mmDIG6_DIG_OUTPUT_CRC_CNTL                                              0x5401
+#define mmDIG7_DIG_OUTPUT_CRC_CNTL                                              0x5601
+#define mmDIG8_DIG_OUTPUT_CRC_CNTL                                              0x5701
+#define mmDIG_OUTPUT_CRC_RESULT                                                 0x4a02
+#define mmDIG0_DIG_OUTPUT_CRC_RESULT                                            0x4a02
+#define mmDIG1_DIG_OUTPUT_CRC_RESULT                                            0x4b02
+#define mmDIG2_DIG_OUTPUT_CRC_RESULT                                            0x4c02
+#define mmDIG3_DIG_OUTPUT_CRC_RESULT                                            0x4d02
+#define mmDIG4_DIG_OUTPUT_CRC_RESULT                                            0x4e02
+#define mmDIG5_DIG_OUTPUT_CRC_RESULT                                            0x4f02
+#define mmDIG6_DIG_OUTPUT_CRC_RESULT                                            0x5402
+#define mmDIG7_DIG_OUTPUT_CRC_RESULT                                            0x5602
+#define mmDIG8_DIG_OUTPUT_CRC_RESULT                                            0x5702
+#define mmDIG_CLOCK_PATTERN                                                     0x4a03
+#define mmDIG0_DIG_CLOCK_PATTERN                                                0x4a03
+#define mmDIG1_DIG_CLOCK_PATTERN                                                0x4b03
+#define mmDIG2_DIG_CLOCK_PATTERN                                                0x4c03
+#define mmDIG3_DIG_CLOCK_PATTERN                                                0x4d03
+#define mmDIG4_DIG_CLOCK_PATTERN                                                0x4e03
+#define mmDIG5_DIG_CLOCK_PATTERN                                                0x4f03
+#define mmDIG6_DIG_CLOCK_PATTERN                                                0x5403
+#define mmDIG7_DIG_CLOCK_PATTERN                                                0x5603
+#define mmDIG8_DIG_CLOCK_PATTERN                                                0x5703
+#define mmDIG_TEST_PATTERN                                                      0x4a04
+#define mmDIG0_DIG_TEST_PATTERN                                                 0x4a04
+#define mmDIG1_DIG_TEST_PATTERN                                                 0x4b04
+#define mmDIG2_DIG_TEST_PATTERN                                                 0x4c04
+#define mmDIG3_DIG_TEST_PATTERN                                                 0x4d04
+#define mmDIG4_DIG_TEST_PATTERN                                                 0x4e04
+#define mmDIG5_DIG_TEST_PATTERN                                                 0x4f04
+#define mmDIG6_DIG_TEST_PATTERN                                                 0x5404
+#define mmDIG7_DIG_TEST_PATTERN                                                 0x5604
+#define mmDIG8_DIG_TEST_PATTERN                                                 0x5704
+#define mmDIG_RANDOM_PATTERN_SEED                                               0x4a05
+#define mmDIG0_DIG_RANDOM_PATTERN_SEED                                          0x4a05
+#define mmDIG1_DIG_RANDOM_PATTERN_SEED                                          0x4b05
+#define mmDIG2_DIG_RANDOM_PATTERN_SEED                                          0x4c05
+#define mmDIG3_DIG_RANDOM_PATTERN_SEED                                          0x4d05
+#define mmDIG4_DIG_RANDOM_PATTERN_SEED                                          0x4e05
+#define mmDIG5_DIG_RANDOM_PATTERN_SEED                                          0x4f05
+#define mmDIG6_DIG_RANDOM_PATTERN_SEED                                          0x5405
+#define mmDIG7_DIG_RANDOM_PATTERN_SEED                                          0x5605
+#define mmDIG8_DIG_RANDOM_PATTERN_SEED                                          0x5705
+#define mmDIG_FIFO_STATUS                                                       0x4a06
+#define mmDIG0_DIG_FIFO_STATUS                                                  0x4a06
+#define mmDIG1_DIG_FIFO_STATUS                                                  0x4b06
+#define mmDIG2_DIG_FIFO_STATUS                                                  0x4c06
+#define mmDIG3_DIG_FIFO_STATUS                                                  0x4d06
+#define mmDIG4_DIG_FIFO_STATUS                                                  0x4e06
+#define mmDIG5_DIG_FIFO_STATUS                                                  0x4f06
+#define mmDIG6_DIG_FIFO_STATUS                                                  0x5406
+#define mmDIG7_DIG_FIFO_STATUS                                                  0x5606
+#define mmDIG8_DIG_FIFO_STATUS                                                  0x5706
+#define mmDIG_DISPCLK_SWITCH_CNTL                                               0x4a07
+#define mmDIG0_DIG_DISPCLK_SWITCH_CNTL                                          0x4a07
+#define mmDIG1_DIG_DISPCLK_SWITCH_CNTL                                          0x4b07
+#define mmDIG2_DIG_DISPCLK_SWITCH_CNTL                                          0x4c07
+#define mmDIG3_DIG_DISPCLK_SWITCH_CNTL                                          0x4d07
+#define mmDIG4_DIG_DISPCLK_SWITCH_CNTL                                          0x4e07
+#define mmDIG5_DIG_DISPCLK_SWITCH_CNTL                                          0x4f07
+#define mmDIG6_DIG_DISPCLK_SWITCH_CNTL                                          0x5407
+#define mmDIG7_DIG_DISPCLK_SWITCH_CNTL                                          0x5607
+#define mmDIG8_DIG_DISPCLK_SWITCH_CNTL                                          0x5707
+#define mmDIG_DISPCLK_SWITCH_STATUS                                             0x4a08
+#define mmDIG0_DIG_DISPCLK_SWITCH_STATUS                                        0x4a08
+#define mmDIG1_DIG_DISPCLK_SWITCH_STATUS                                        0x4b08
+#define mmDIG2_DIG_DISPCLK_SWITCH_STATUS                                        0x4c08
+#define mmDIG3_DIG_DISPCLK_SWITCH_STATUS                                        0x4d08
+#define mmDIG4_DIG_DISPCLK_SWITCH_STATUS                                        0x4e08
+#define mmDIG5_DIG_DISPCLK_SWITCH_STATUS                                        0x4f08
+#define mmDIG6_DIG_DISPCLK_SWITCH_STATUS                                        0x5408
+#define mmDIG7_DIG_DISPCLK_SWITCH_STATUS                                        0x5608
+#define mmDIG8_DIG_DISPCLK_SWITCH_STATUS                                        0x5708
+#define mmHDMI_CONTROL                                                          0x4a09
+#define mmDIG0_HDMI_CONTROL                                                     0x4a09
+#define mmDIG1_HDMI_CONTROL                                                     0x4b09
+#define mmDIG2_HDMI_CONTROL                                                     0x4c09
+#define mmDIG3_HDMI_CONTROL                                                     0x4d09
+#define mmDIG4_HDMI_CONTROL                                                     0x4e09
+#define mmDIG5_HDMI_CONTROL                                                     0x4f09
+#define mmDIG6_HDMI_CONTROL                                                     0x5409
+#define mmDIG7_HDMI_CONTROL                                                     0x5609
+#define mmDIG8_HDMI_CONTROL                                                     0x5709
+#define mmHDMI_STATUS                                                           0x4a0a
+#define mmDIG0_HDMI_STATUS                                                      0x4a0a
+#define mmDIG1_HDMI_STATUS                                                      0x4b0a
+#define mmDIG2_HDMI_STATUS                                                      0x4c0a
+#define mmDIG3_HDMI_STATUS                                                      0x4d0a
+#define mmDIG4_HDMI_STATUS                                                      0x4e0a
+#define mmDIG5_HDMI_STATUS                                                      0x4f0a
+#define mmDIG6_HDMI_STATUS                                                      0x540a
+#define mmDIG7_HDMI_STATUS                                                      0x560a
+#define mmDIG8_HDMI_STATUS                                                      0x570a
+#define mmHDMI_AUDIO_PACKET_CONTROL                                             0x4a0b
+#define mmDIG0_HDMI_AUDIO_PACKET_CONTROL                                        0x4a0b
+#define mmDIG1_HDMI_AUDIO_PACKET_CONTROL                                        0x4b0b
+#define mmDIG2_HDMI_AUDIO_PACKET_CONTROL                                        0x4c0b
+#define mmDIG3_HDMI_AUDIO_PACKET_CONTROL                                        0x4d0b
+#define mmDIG4_HDMI_AUDIO_PACKET_CONTROL                                        0x4e0b
+#define mmDIG5_HDMI_AUDIO_PACKET_CONTROL                                        0x4f0b
+#define mmDIG6_HDMI_AUDIO_PACKET_CONTROL                                        0x540b
+#define mmDIG7_HDMI_AUDIO_PACKET_CONTROL                                        0x560b
+#define mmDIG8_HDMI_AUDIO_PACKET_CONTROL                                        0x570b
+#define mmHDMI_ACR_PACKET_CONTROL                                               0x4a0c
+#define mmDIG0_HDMI_ACR_PACKET_CONTROL                                          0x4a0c
+#define mmDIG1_HDMI_ACR_PACKET_CONTROL                                          0x4b0c
+#define mmDIG2_HDMI_ACR_PACKET_CONTROL                                          0x4c0c
+#define mmDIG3_HDMI_ACR_PACKET_CONTROL                                          0x4d0c
+#define mmDIG4_HDMI_ACR_PACKET_CONTROL                                          0x4e0c
+#define mmDIG5_HDMI_ACR_PACKET_CONTROL                                          0x4f0c
+#define mmDIG6_HDMI_ACR_PACKET_CONTROL                                          0x540c
+#define mmDIG7_HDMI_ACR_PACKET_CONTROL                                          0x560c
+#define mmDIG8_HDMI_ACR_PACKET_CONTROL                                          0x570c
+#define mmHDMI_VBI_PACKET_CONTROL                                               0x4a0d
+#define mmDIG0_HDMI_VBI_PACKET_CONTROL                                          0x4a0d
+#define mmDIG1_HDMI_VBI_PACKET_CONTROL                                          0x4b0d
+#define mmDIG2_HDMI_VBI_PACKET_CONTROL                                          0x4c0d
+#define mmDIG3_HDMI_VBI_PACKET_CONTROL                                          0x4d0d
+#define mmDIG4_HDMI_VBI_PACKET_CONTROL                                          0x4e0d
+#define mmDIG5_HDMI_VBI_PACKET_CONTROL                                          0x4f0d
+#define mmDIG6_HDMI_VBI_PACKET_CONTROL                                          0x540d
+#define mmDIG7_HDMI_VBI_PACKET_CONTROL                                          0x560d
+#define mmDIG8_HDMI_VBI_PACKET_CONTROL                                          0x570d
+#define mmHDMI_INFOFRAME_CONTROL0                                               0x4a0e
+#define mmDIG0_HDMI_INFOFRAME_CONTROL0                                          0x4a0e
+#define mmDIG1_HDMI_INFOFRAME_CONTROL0                                          0x4b0e
+#define mmDIG2_HDMI_INFOFRAME_CONTROL0                                          0x4c0e
+#define mmDIG3_HDMI_INFOFRAME_CONTROL0                                          0x4d0e
+#define mmDIG4_HDMI_INFOFRAME_CONTROL0                                          0x4e0e
+#define mmDIG5_HDMI_INFOFRAME_CONTROL0                                          0x4f0e
+#define mmDIG6_HDMI_INFOFRAME_CONTROL0                                          0x540e
+#define mmDIG7_HDMI_INFOFRAME_CONTROL0                                          0x560e
+#define mmDIG8_HDMI_INFOFRAME_CONTROL0                                          0x570e
+#define mmHDMI_INFOFRAME_CONTROL1                                               0x4a0f
+#define mmDIG0_HDMI_INFOFRAME_CONTROL1                                          0x4a0f
+#define mmDIG1_HDMI_INFOFRAME_CONTROL1                                          0x4b0f
+#define mmDIG2_HDMI_INFOFRAME_CONTROL1                                          0x4c0f
+#define mmDIG3_HDMI_INFOFRAME_CONTROL1                                          0x4d0f
+#define mmDIG4_HDMI_INFOFRAME_CONTROL1                                          0x4e0f
+#define mmDIG5_HDMI_INFOFRAME_CONTROL1                                          0x4f0f
+#define mmDIG6_HDMI_INFOFRAME_CONTROL1                                          0x540f
+#define mmDIG7_HDMI_INFOFRAME_CONTROL1                                          0x560f
+#define mmDIG8_HDMI_INFOFRAME_CONTROL1                                          0x570f
+#define mmHDMI_GENERIC_PACKET_CONTROL0                                          0x4a10
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL0                                     0x4a10
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL0                                     0x4b10
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL0                                     0x4c10
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL0                                     0x4d10
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL0                                     0x4e10
+#define mmDIG5_HDMI_GENERIC_PACKET_CONTROL0                                     0x4f10
+#define mmDIG6_HDMI_GENERIC_PACKET_CONTROL0                                     0x5410
+#define mmDIG7_HDMI_GENERIC_PACKET_CONTROL0                                     0x5610
+#define mmDIG8_HDMI_GENERIC_PACKET_CONTROL0                                     0x5710
+#define mmAFMT_INTERRUPT_STATUS                                                 0x4a11
+#define mmDIG0_AFMT_INTERRUPT_STATUS                                            0x4a11
+#define mmDIG1_AFMT_INTERRUPT_STATUS                                            0x4b11
+#define mmDIG2_AFMT_INTERRUPT_STATUS                                            0x4c11
+#define mmDIG3_AFMT_INTERRUPT_STATUS                                            0x4d11
+#define mmDIG4_AFMT_INTERRUPT_STATUS                                            0x4e11
+#define mmDIG5_AFMT_INTERRUPT_STATUS                                            0x4f11
+#define mmDIG6_AFMT_INTERRUPT_STATUS                                            0x5411
+#define mmDIG7_AFMT_INTERRUPT_STATUS                                            0x5611
+#define mmDIG8_AFMT_INTERRUPT_STATUS                                            0x5711
+#define mmHDMI_GC                                                               0x4a13
+#define mmDIG0_HDMI_GC                                                          0x4a13
+#define mmDIG1_HDMI_GC                                                          0x4b13
+#define mmDIG2_HDMI_GC                                                          0x4c13
+#define mmDIG3_HDMI_GC                                                          0x4d13
+#define mmDIG4_HDMI_GC                                                          0x4e13
+#define mmDIG5_HDMI_GC                                                          0x4f13
+#define mmDIG6_HDMI_GC                                                          0x5413
+#define mmDIG7_HDMI_GC                                                          0x5613
+#define mmDIG8_HDMI_GC                                                          0x5713
+#define mmAFMT_AUDIO_PACKET_CONTROL2                                            0x4a14
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL2                                       0x4a14
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL2                                       0x4b14
+#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL2                                       0x4c14
+#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL2                                       0x4d14
+#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL2                                       0x4e14
+#define mmDIG5_AFMT_AUDIO_PACKET_CONTROL2                                       0x4f14
+#define mmDIG6_AFMT_AUDIO_PACKET_CONTROL2                                       0x5414
+#define mmDIG7_AFMT_AUDIO_PACKET_CONTROL2                                       0x5614
+#define mmDIG8_AFMT_AUDIO_PACKET_CONTROL2                                       0x5714
+#define mmAFMT_ISRC1_0                                                          0x4a15
+#define mmDIG0_AFMT_ISRC1_0                                                     0x4a15
+#define mmDIG1_AFMT_ISRC1_0                                                     0x4b15
+#define mmDIG2_AFMT_ISRC1_0                                                     0x4c15
+#define mmDIG3_AFMT_ISRC1_0                                                     0x4d15
+#define mmDIG4_AFMT_ISRC1_0                                                     0x4e15
+#define mmDIG5_AFMT_ISRC1_0                                                     0x4f15
+#define mmDIG6_AFMT_ISRC1_0                                                     0x5415
+#define mmDIG7_AFMT_ISRC1_0                                                     0x5615
+#define mmDIG8_AFMT_ISRC1_0                                                     0x5715
+#define mmAFMT_ISRC1_1                                                          0x4a16
+#define mmDIG0_AFMT_ISRC1_1                                                     0x4a16
+#define mmDIG1_AFMT_ISRC1_1                                                     0x4b16
+#define mmDIG2_AFMT_ISRC1_1                                                     0x4c16
+#define mmDIG3_AFMT_ISRC1_1                                                     0x4d16
+#define mmDIG4_AFMT_ISRC1_1                                                     0x4e16
+#define mmDIG5_AFMT_ISRC1_1                                                     0x4f16
+#define mmDIG6_AFMT_ISRC1_1                                                     0x5416
+#define mmDIG7_AFMT_ISRC1_1                                                     0x5616
+#define mmDIG8_AFMT_ISRC1_1                                                     0x5716
+#define mmAFMT_ISRC1_2                                                          0x4a17
+#define mmDIG0_AFMT_ISRC1_2                                                     0x4a17
+#define mmDIG1_AFMT_ISRC1_2                                                     0x4b17
+#define mmDIG2_AFMT_ISRC1_2                                                     0x4c17
+#define mmDIG3_AFMT_ISRC1_2                                                     0x4d17
+#define mmDIG4_AFMT_ISRC1_2                                                     0x4e17
+#define mmDIG5_AFMT_ISRC1_2                                                     0x4f17
+#define mmDIG6_AFMT_ISRC1_2                                                     0x5417
+#define mmDIG7_AFMT_ISRC1_2                                                     0x5617
+#define mmDIG8_AFMT_ISRC1_2                                                     0x5717
+#define mmAFMT_ISRC1_3                                                          0x4a18
+#define mmDIG0_AFMT_ISRC1_3                                                     0x4a18
+#define mmDIG1_AFMT_ISRC1_3                                                     0x4b18
+#define mmDIG2_AFMT_ISRC1_3                                                     0x4c18
+#define mmDIG3_AFMT_ISRC1_3                                                     0x4d18
+#define mmDIG4_AFMT_ISRC1_3                                                     0x4e18
+#define mmDIG5_AFMT_ISRC1_3                                                     0x4f18
+#define mmDIG6_AFMT_ISRC1_3                                                     0x5418
+#define mmDIG7_AFMT_ISRC1_3                                                     0x5618
+#define mmDIG8_AFMT_ISRC1_3                                                     0x5718
+#define mmAFMT_ISRC1_4                                                          0x4a19
+#define mmDIG0_AFMT_ISRC1_4                                                     0x4a19
+#define mmDIG1_AFMT_ISRC1_4                                                     0x4b19
+#define mmDIG2_AFMT_ISRC1_4                                                     0x4c19
+#define mmDIG3_AFMT_ISRC1_4                                                     0x4d19
+#define mmDIG4_AFMT_ISRC1_4                                                     0x4e19
+#define mmDIG5_AFMT_ISRC1_4                                                     0x4f19
+#define mmDIG6_AFMT_ISRC1_4                                                     0x5419
+#define mmDIG7_AFMT_ISRC1_4                                                     0x5619
+#define mmDIG8_AFMT_ISRC1_4                                                     0x5719
+#define mmAFMT_ISRC2_0                                                          0x4a1a
+#define mmDIG0_AFMT_ISRC2_0                                                     0x4a1a
+#define mmDIG1_AFMT_ISRC2_0                                                     0x4b1a
+#define mmDIG2_AFMT_ISRC2_0                                                     0x4c1a
+#define mmDIG3_AFMT_ISRC2_0                                                     0x4d1a
+#define mmDIG4_AFMT_ISRC2_0                                                     0x4e1a
+#define mmDIG5_AFMT_ISRC2_0                                                     0x4f1a
+#define mmDIG6_AFMT_ISRC2_0                                                     0x541a
+#define mmDIG7_AFMT_ISRC2_0                                                     0x561a
+#define mmDIG8_AFMT_ISRC2_0                                                     0x571a
+#define mmAFMT_ISRC2_1                                                          0x4a1b
+#define mmDIG0_AFMT_ISRC2_1                                                     0x4a1b
+#define mmDIG1_AFMT_ISRC2_1                                                     0x4b1b
+#define mmDIG2_AFMT_ISRC2_1                                                     0x4c1b
+#define mmDIG3_AFMT_ISRC2_1                                                     0x4d1b
+#define mmDIG4_AFMT_ISRC2_1                                                     0x4e1b
+#define mmDIG5_AFMT_ISRC2_1                                                     0x4f1b
+#define mmDIG6_AFMT_ISRC2_1                                                     0x541b
+#define mmDIG7_AFMT_ISRC2_1                                                     0x561b
+#define mmDIG8_AFMT_ISRC2_1                                                     0x571b
+#define mmAFMT_ISRC2_2                                                          0x4a1c
+#define mmDIG0_AFMT_ISRC2_2                                                     0x4a1c
+#define mmDIG1_AFMT_ISRC2_2                                                     0x4b1c
+#define mmDIG2_AFMT_ISRC2_2                                                     0x4c1c
+#define mmDIG3_AFMT_ISRC2_2                                                     0x4d1c
+#define mmDIG4_AFMT_ISRC2_2                                                     0x4e1c
+#define mmDIG5_AFMT_ISRC2_2                                                     0x4f1c
+#define mmDIG6_AFMT_ISRC2_2                                                     0x541c
+#define mmDIG7_AFMT_ISRC2_2                                                     0x561c
+#define mmDIG8_AFMT_ISRC2_2                                                     0x571c
+#define mmAFMT_ISRC2_3                                                          0x4a1d
+#define mmDIG0_AFMT_ISRC2_3                                                     0x4a1d
+#define mmDIG1_AFMT_ISRC2_3                                                     0x4b1d
+#define mmDIG2_AFMT_ISRC2_3                                                     0x4c1d
+#define mmDIG3_AFMT_ISRC2_3                                                     0x4d1d
+#define mmDIG4_AFMT_ISRC2_3                                                     0x4e1d
+#define mmDIG5_AFMT_ISRC2_3                                                     0x4f1d
+#define mmDIG6_AFMT_ISRC2_3                                                     0x541d
+#define mmDIG7_AFMT_ISRC2_3                                                     0x561d
+#define mmDIG8_AFMT_ISRC2_3                                                     0x571d
+#define mmAFMT_AVI_INFO0                                                        0x4a1e
+#define mmDIG0_AFMT_AVI_INFO0                                                   0x4a1e
+#define mmDIG1_AFMT_AVI_INFO0                                                   0x4b1e
+#define mmDIG2_AFMT_AVI_INFO0                                                   0x4c1e
+#define mmDIG3_AFMT_AVI_INFO0                                                   0x4d1e
+#define mmDIG4_AFMT_AVI_INFO0                                                   0x4e1e
+#define mmDIG5_AFMT_AVI_INFO0                                                   0x4f1e
+#define mmDIG6_AFMT_AVI_INFO0                                                   0x541e
+#define mmDIG7_AFMT_AVI_INFO0                                                   0x561e
+#define mmDIG8_AFMT_AVI_INFO0                                                   0x571e
+#define mmAFMT_AVI_INFO1                                                        0x4a1f
+#define mmDIG0_AFMT_AVI_INFO1                                                   0x4a1f
+#define mmDIG1_AFMT_AVI_INFO1                                                   0x4b1f
+#define mmDIG2_AFMT_AVI_INFO1                                                   0x4c1f
+#define mmDIG3_AFMT_AVI_INFO1                                                   0x4d1f
+#define mmDIG4_AFMT_AVI_INFO1                                                   0x4e1f
+#define mmDIG5_AFMT_AVI_INFO1                                                   0x4f1f
+#define mmDIG6_AFMT_AVI_INFO1                                                   0x541f
+#define mmDIG7_AFMT_AVI_INFO1                                                   0x561f
+#define mmDIG8_AFMT_AVI_INFO1                                                   0x571f
+#define mmAFMT_AVI_INFO2                                                        0x4a20
+#define mmDIG0_AFMT_AVI_INFO2                                                   0x4a20
+#define mmDIG1_AFMT_AVI_INFO2                                                   0x4b20
+#define mmDIG2_AFMT_AVI_INFO2                                                   0x4c20
+#define mmDIG3_AFMT_AVI_INFO2                                                   0x4d20
+#define mmDIG4_AFMT_AVI_INFO2                                                   0x4e20
+#define mmDIG5_AFMT_AVI_INFO2                                                   0x4f20
+#define mmDIG6_AFMT_AVI_INFO2                                                   0x5420
+#define mmDIG7_AFMT_AVI_INFO2                                                   0x5620
+#define mmDIG8_AFMT_AVI_INFO2                                                   0x5720
+#define mmAFMT_AVI_INFO3                                                        0x4a21
+#define mmDIG0_AFMT_AVI_INFO3                                                   0x4a21
+#define mmDIG1_AFMT_AVI_INFO3                                                   0x4b21
+#define mmDIG2_AFMT_AVI_INFO3                                                   0x4c21
+#define mmDIG3_AFMT_AVI_INFO3                                                   0x4d21
+#define mmDIG4_AFMT_AVI_INFO3                                                   0x4e21
+#define mmDIG5_AFMT_AVI_INFO3                                                   0x4f21
+#define mmDIG6_AFMT_AVI_INFO3                                                   0x5421
+#define mmDIG7_AFMT_AVI_INFO3                                                   0x5621
+#define mmDIG8_AFMT_AVI_INFO3                                                   0x5721
+#define mmAFMT_MPEG_INFO0                                                       0x4a22
+#define mmDIG0_AFMT_MPEG_INFO0                                                  0x4a22
+#define mmDIG1_AFMT_MPEG_INFO0                                                  0x4b22
+#define mmDIG2_AFMT_MPEG_INFO0                                                  0x4c22
+#define mmDIG3_AFMT_MPEG_INFO0                                                  0x4d22
+#define mmDIG4_AFMT_MPEG_INFO0                                                  0x4e22
+#define mmDIG5_AFMT_MPEG_INFO0                                                  0x4f22
+#define mmDIG6_AFMT_MPEG_INFO0                                                  0x5422
+#define mmDIG7_AFMT_MPEG_INFO0                                                  0x5622
+#define mmDIG8_AFMT_MPEG_INFO0                                                  0x5722
+#define mmAFMT_MPEG_INFO1                                                       0x4a23
+#define mmDIG0_AFMT_MPEG_INFO1                                                  0x4a23
+#define mmDIG1_AFMT_MPEG_INFO1                                                  0x4b23
+#define mmDIG2_AFMT_MPEG_INFO1                                                  0x4c23
+#define mmDIG3_AFMT_MPEG_INFO1                                                  0x4d23
+#define mmDIG4_AFMT_MPEG_INFO1                                                  0x4e23
+#define mmDIG5_AFMT_MPEG_INFO1                                                  0x4f23
+#define mmDIG6_AFMT_MPEG_INFO1                                                  0x5423
+#define mmDIG7_AFMT_MPEG_INFO1                                                  0x5623
+#define mmDIG8_AFMT_MPEG_INFO1                                                  0x5723
+#define mmAFMT_GENERIC_HDR                                                      0x4a24
+#define mmDIG0_AFMT_GENERIC_HDR                                                 0x4a24
+#define mmDIG1_AFMT_GENERIC_HDR                                                 0x4b24
+#define mmDIG2_AFMT_GENERIC_HDR                                                 0x4c24
+#define mmDIG3_AFMT_GENERIC_HDR                                                 0x4d24
+#define mmDIG4_AFMT_GENERIC_HDR                                                 0x4e24
+#define mmDIG5_AFMT_GENERIC_HDR                                                 0x4f24
+#define mmDIG6_AFMT_GENERIC_HDR                                                 0x5424
+#define mmDIG7_AFMT_GENERIC_HDR                                                 0x5624
+#define mmDIG8_AFMT_GENERIC_HDR                                                 0x5724
+#define mmAFMT_GENERIC_0                                                        0x4a25
+#define mmDIG0_AFMT_GENERIC_0                                                   0x4a25
+#define mmDIG1_AFMT_GENERIC_0                                                   0x4b25
+#define mmDIG2_AFMT_GENERIC_0                                                   0x4c25
+#define mmDIG3_AFMT_GENERIC_0                                                   0x4d25
+#define mmDIG4_AFMT_GENERIC_0                                                   0x4e25
+#define mmDIG5_AFMT_GENERIC_0                                                   0x4f25
+#define mmDIG6_AFMT_GENERIC_0                                                   0x5425
+#define mmDIG7_AFMT_GENERIC_0                                                   0x5625
+#define mmDIG8_AFMT_GENERIC_0                                                   0x5725
+#define mmAFMT_GENERIC_1                                                        0x4a26
+#define mmDIG0_AFMT_GENERIC_1                                                   0x4a26
+#define mmDIG1_AFMT_GENERIC_1                                                   0x4b26
+#define mmDIG2_AFMT_GENERIC_1                                                   0x4c26
+#define mmDIG3_AFMT_GENERIC_1                                                   0x4d26
+#define mmDIG4_AFMT_GENERIC_1                                                   0x4e26
+#define mmDIG5_AFMT_GENERIC_1                                                   0x4f26
+#define mmDIG6_AFMT_GENERIC_1                                                   0x5426
+#define mmDIG7_AFMT_GENERIC_1                                                   0x5626
+#define mmDIG8_AFMT_GENERIC_1                                                   0x5726
+#define mmAFMT_GENERIC_2                                                        0x4a27
+#define mmDIG0_AFMT_GENERIC_2                                                   0x4a27
+#define mmDIG1_AFMT_GENERIC_2                                                   0x4b27
+#define mmDIG2_AFMT_GENERIC_2                                                   0x4c27
+#define mmDIG3_AFMT_GENERIC_2                                                   0x4d27
+#define mmDIG4_AFMT_GENERIC_2                                                   0x4e27
+#define mmDIG5_AFMT_GENERIC_2                                                   0x4f27
+#define mmDIG6_AFMT_GENERIC_2                                                   0x5427
+#define mmDIG7_AFMT_GENERIC_2                                                   0x5627
+#define mmDIG8_AFMT_GENERIC_2                                                   0x5727
+#define mmAFMT_GENERIC_3                                                        0x4a28
+#define mmDIG0_AFMT_GENERIC_3                                                   0x4a28
+#define mmDIG1_AFMT_GENERIC_3                                                   0x4b28
+#define mmDIG2_AFMT_GENERIC_3                                                   0x4c28
+#define mmDIG3_AFMT_GENERIC_3                                                   0x4d28
+#define mmDIG4_AFMT_GENERIC_3                                                   0x4e28
+#define mmDIG5_AFMT_GENERIC_3                                                   0x4f28
+#define mmDIG6_AFMT_GENERIC_3                                                   0x5428
+#define mmDIG7_AFMT_GENERIC_3                                                   0x5628
+#define mmDIG8_AFMT_GENERIC_3                                                   0x5728
+#define mmAFMT_GENERIC_4                                                        0x4a29
+#define mmDIG0_AFMT_GENERIC_4                                                   0x4a29
+#define mmDIG1_AFMT_GENERIC_4                                                   0x4b29
+#define mmDIG2_AFMT_GENERIC_4                                                   0x4c29
+#define mmDIG3_AFMT_GENERIC_4                                                   0x4d29
+#define mmDIG4_AFMT_GENERIC_4                                                   0x4e29
+#define mmDIG5_AFMT_GENERIC_4                                                   0x4f29
+#define mmDIG6_AFMT_GENERIC_4                                                   0x5429
+#define mmDIG7_AFMT_GENERIC_4                                                   0x5629
+#define mmDIG8_AFMT_GENERIC_4                                                   0x5729
+#define mmAFMT_GENERIC_5                                                        0x4a2a
+#define mmDIG0_AFMT_GENERIC_5                                                   0x4a2a
+#define mmDIG1_AFMT_GENERIC_5                                                   0x4b2a
+#define mmDIG2_AFMT_GENERIC_5                                                   0x4c2a
+#define mmDIG3_AFMT_GENERIC_5                                                   0x4d2a
+#define mmDIG4_AFMT_GENERIC_5                                                   0x4e2a
+#define mmDIG5_AFMT_GENERIC_5                                                   0x4f2a
+#define mmDIG6_AFMT_GENERIC_5                                                   0x542a
+#define mmDIG7_AFMT_GENERIC_5                                                   0x562a
+#define mmDIG8_AFMT_GENERIC_5                                                   0x572a
+#define mmAFMT_GENERIC_6                                                        0x4a2b
+#define mmDIG0_AFMT_GENERIC_6                                                   0x4a2b
+#define mmDIG1_AFMT_GENERIC_6                                                   0x4b2b
+#define mmDIG2_AFMT_GENERIC_6                                                   0x4c2b
+#define mmDIG3_AFMT_GENERIC_6                                                   0x4d2b
+#define mmDIG4_AFMT_GENERIC_6                                                   0x4e2b
+#define mmDIG5_AFMT_GENERIC_6                                                   0x4f2b
+#define mmDIG6_AFMT_GENERIC_6                                                   0x542b
+#define mmDIG7_AFMT_GENERIC_6                                                   0x562b
+#define mmDIG8_AFMT_GENERIC_6                                                   0x572b
+#define mmAFMT_GENERIC_7                                                        0x4a2c
+#define mmDIG0_AFMT_GENERIC_7                                                   0x4a2c
+#define mmDIG1_AFMT_GENERIC_7                                                   0x4b2c
+#define mmDIG2_AFMT_GENERIC_7                                                   0x4c2c
+#define mmDIG3_AFMT_GENERIC_7                                                   0x4d2c
+#define mmDIG4_AFMT_GENERIC_7                                                   0x4e2c
+#define mmDIG5_AFMT_GENERIC_7                                                   0x4f2c
+#define mmDIG6_AFMT_GENERIC_7                                                   0x542c
+#define mmDIG7_AFMT_GENERIC_7                                                   0x562c
+#define mmDIG8_AFMT_GENERIC_7                                                   0x572c
+#define mmHDMI_GENERIC_PACKET_CONTROL1                                          0x4a2d
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL1                                     0x4a2d
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL1                                     0x4b2d
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL1                                     0x4c2d
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL1                                     0x4d2d
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL1                                     0x4e2d
+#define mmDIG5_HDMI_GENERIC_PACKET_CONTROL1                                     0x4f2d
+#define mmDIG6_HDMI_GENERIC_PACKET_CONTROL1                                     0x542d
+#define mmDIG7_HDMI_GENERIC_PACKET_CONTROL1                                     0x562d
+#define mmDIG8_HDMI_GENERIC_PACKET_CONTROL1                                     0x572d
+#define mmHDMI_ACR_32_0                                                         0x4a2e
+#define mmDIG0_HDMI_ACR_32_0                                                    0x4a2e
+#define mmDIG1_HDMI_ACR_32_0                                                    0x4b2e
+#define mmDIG2_HDMI_ACR_32_0                                                    0x4c2e
+#define mmDIG3_HDMI_ACR_32_0                                                    0x4d2e
+#define mmDIG4_HDMI_ACR_32_0                                                    0x4e2e
+#define mmDIG5_HDMI_ACR_32_0                                                    0x4f2e
+#define mmDIG6_HDMI_ACR_32_0                                                    0x542e
+#define mmDIG7_HDMI_ACR_32_0                                                    0x562e
+#define mmDIG8_HDMI_ACR_32_0                                                    0x572e
+#define mmHDMI_ACR_32_1                                                         0x4a2f
+#define mmDIG0_HDMI_ACR_32_1                                                    0x4a2f
+#define mmDIG1_HDMI_ACR_32_1                                                    0x4b2f
+#define mmDIG2_HDMI_ACR_32_1                                                    0x4c2f
+#define mmDIG3_HDMI_ACR_32_1                                                    0x4d2f
+#define mmDIG4_HDMI_ACR_32_1                                                    0x4e2f
+#define mmDIG5_HDMI_ACR_32_1                                                    0x4f2f
+#define mmDIG6_HDMI_ACR_32_1                                                    0x542f
+#define mmDIG7_HDMI_ACR_32_1                                                    0x562f
+#define mmDIG8_HDMI_ACR_32_1                                                    0x572f
+#define mmHDMI_ACR_44_0                                                         0x4a30
+#define mmDIG0_HDMI_ACR_44_0                                                    0x4a30
+#define mmDIG1_HDMI_ACR_44_0                                                    0x4b30
+#define mmDIG2_HDMI_ACR_44_0                                                    0x4c30
+#define mmDIG3_HDMI_ACR_44_0                                                    0x4d30
+#define mmDIG4_HDMI_ACR_44_0                                                    0x4e30
+#define mmDIG5_HDMI_ACR_44_0                                                    0x4f30
+#define mmDIG6_HDMI_ACR_44_0                                                    0x5430
+#define mmDIG7_HDMI_ACR_44_0                                                    0x5630
+#define mmDIG8_HDMI_ACR_44_0                                                    0x5730
+#define mmHDMI_ACR_44_1                                                         0x4a31
+#define mmDIG0_HDMI_ACR_44_1                                                    0x4a31
+#define mmDIG1_HDMI_ACR_44_1                                                    0x4b31
+#define mmDIG2_HDMI_ACR_44_1                                                    0x4c31
+#define mmDIG3_HDMI_ACR_44_1                                                    0x4d31
+#define mmDIG4_HDMI_ACR_44_1                                                    0x4e31
+#define mmDIG5_HDMI_ACR_44_1                                                    0x4f31
+#define mmDIG6_HDMI_ACR_44_1                                                    0x5431
+#define mmDIG7_HDMI_ACR_44_1                                                    0x5631
+#define mmDIG8_HDMI_ACR_44_1                                                    0x5731
+#define mmHDMI_ACR_48_0                                                         0x4a32
+#define mmDIG0_HDMI_ACR_48_0                                                    0x4a32
+#define mmDIG1_HDMI_ACR_48_0                                                    0x4b32
+#define mmDIG2_HDMI_ACR_48_0                                                    0x4c32
+#define mmDIG3_HDMI_ACR_48_0                                                    0x4d32
+#define mmDIG4_HDMI_ACR_48_0                                                    0x4e32
+#define mmDIG5_HDMI_ACR_48_0                                                    0x4f32
+#define mmDIG6_HDMI_ACR_48_0                                                    0x5432
+#define mmDIG7_HDMI_ACR_48_0                                                    0x5632
+#define mmDIG8_HDMI_ACR_48_0                                                    0x5732
+#define mmHDMI_ACR_48_1                                                         0x4a33
+#define mmDIG0_HDMI_ACR_48_1                                                    0x4a33
+#define mmDIG1_HDMI_ACR_48_1                                                    0x4b33
+#define mmDIG2_HDMI_ACR_48_1                                                    0x4c33
+#define mmDIG3_HDMI_ACR_48_1                                                    0x4d33
+#define mmDIG4_HDMI_ACR_48_1                                                    0x4e33
+#define mmDIG5_HDMI_ACR_48_1                                                    0x4f33
+#define mmDIG6_HDMI_ACR_48_1                                                    0x5433
+#define mmDIG7_HDMI_ACR_48_1                                                    0x5633
+#define mmDIG8_HDMI_ACR_48_1                                                    0x5733
+#define mmHDMI_ACR_STATUS_0                                                     0x4a34
+#define mmDIG0_HDMI_ACR_STATUS_0                                                0x4a34
+#define mmDIG1_HDMI_ACR_STATUS_0                                                0x4b34
+#define mmDIG2_HDMI_ACR_STATUS_0                                                0x4c34
+#define mmDIG3_HDMI_ACR_STATUS_0                                                0x4d34
+#define mmDIG4_HDMI_ACR_STATUS_0                                                0x4e34
+#define mmDIG5_HDMI_ACR_STATUS_0                                                0x4f34
+#define mmDIG6_HDMI_ACR_STATUS_0                                                0x5434
+#define mmDIG7_HDMI_ACR_STATUS_0                                                0x5634
+#define mmDIG8_HDMI_ACR_STATUS_0                                                0x5734
+#define mmHDMI_ACR_STATUS_1                                                     0x4a35
+#define mmDIG0_HDMI_ACR_STATUS_1                                                0x4a35
+#define mmDIG1_HDMI_ACR_STATUS_1                                                0x4b35
+#define mmDIG2_HDMI_ACR_STATUS_1                                                0x4c35
+#define mmDIG3_HDMI_ACR_STATUS_1                                                0x4d35
+#define mmDIG4_HDMI_ACR_STATUS_1                                                0x4e35
+#define mmDIG5_HDMI_ACR_STATUS_1                                                0x4f35
+#define mmDIG6_HDMI_ACR_STATUS_1                                                0x5435
+#define mmDIG7_HDMI_ACR_STATUS_1                                                0x5635
+#define mmDIG8_HDMI_ACR_STATUS_1                                                0x5735
+#define mmAFMT_AUDIO_INFO0                                                      0x4a36
+#define mmDIG0_AFMT_AUDIO_INFO0                                                 0x4a36
+#define mmDIG1_AFMT_AUDIO_INFO0                                                 0x4b36
+#define mmDIG2_AFMT_AUDIO_INFO0                                                 0x4c36
+#define mmDIG3_AFMT_AUDIO_INFO0                                                 0x4d36
+#define mmDIG4_AFMT_AUDIO_INFO0                                                 0x4e36
+#define mmDIG5_AFMT_AUDIO_INFO0                                                 0x4f36
+#define mmDIG6_AFMT_AUDIO_INFO0                                                 0x5436
+#define mmDIG7_AFMT_AUDIO_INFO0                                                 0x5636
+#define mmDIG8_AFMT_AUDIO_INFO0                                                 0x5736
+#define mmAFMT_AUDIO_INFO1                                                      0x4a37
+#define mmDIG0_AFMT_AUDIO_INFO1                                                 0x4a37
+#define mmDIG1_AFMT_AUDIO_INFO1                                                 0x4b37
+#define mmDIG2_AFMT_AUDIO_INFO1                                                 0x4c37
+#define mmDIG3_AFMT_AUDIO_INFO1                                                 0x4d37
+#define mmDIG4_AFMT_AUDIO_INFO1                                                 0x4e37
+#define mmDIG5_AFMT_AUDIO_INFO1                                                 0x4f37
+#define mmDIG6_AFMT_AUDIO_INFO1                                                 0x5437
+#define mmDIG7_AFMT_AUDIO_INFO1                                                 0x5637
+#define mmDIG8_AFMT_AUDIO_INFO1                                                 0x5737
+#define mmAFMT_60958_0                                                          0x4a38
+#define mmDIG0_AFMT_60958_0                                                     0x4a38
+#define mmDIG1_AFMT_60958_0                                                     0x4b38
+#define mmDIG2_AFMT_60958_0                                                     0x4c38
+#define mmDIG3_AFMT_60958_0                                                     0x4d38
+#define mmDIG4_AFMT_60958_0                                                     0x4e38
+#define mmDIG5_AFMT_60958_0                                                     0x4f38
+#define mmDIG6_AFMT_60958_0                                                     0x5438
+#define mmDIG7_AFMT_60958_0                                                     0x5638
+#define mmDIG8_AFMT_60958_0                                                     0x5738
+#define mmAFMT_60958_1                                                          0x4a39
+#define mmDIG0_AFMT_60958_1                                                     0x4a39
+#define mmDIG1_AFMT_60958_1                                                     0x4b39
+#define mmDIG2_AFMT_60958_1                                                     0x4c39
+#define mmDIG3_AFMT_60958_1                                                     0x4d39
+#define mmDIG4_AFMT_60958_1                                                     0x4e39
+#define mmDIG5_AFMT_60958_1                                                     0x4f39
+#define mmDIG6_AFMT_60958_1                                                     0x5439
+#define mmDIG7_AFMT_60958_1                                                     0x5639
+#define mmDIG8_AFMT_60958_1                                                     0x5739
+#define mmAFMT_AUDIO_CRC_CONTROL                                                0x4a3a
+#define mmDIG0_AFMT_AUDIO_CRC_CONTROL                                           0x4a3a
+#define mmDIG1_AFMT_AUDIO_CRC_CONTROL                                           0x4b3a
+#define mmDIG2_AFMT_AUDIO_CRC_CONTROL                                           0x4c3a
+#define mmDIG3_AFMT_AUDIO_CRC_CONTROL                                           0x4d3a
+#define mmDIG4_AFMT_AUDIO_CRC_CONTROL                                           0x4e3a
+#define mmDIG5_AFMT_AUDIO_CRC_CONTROL                                           0x4f3a
+#define mmDIG6_AFMT_AUDIO_CRC_CONTROL                                           0x543a
+#define mmDIG7_AFMT_AUDIO_CRC_CONTROL                                           0x563a
+#define mmDIG8_AFMT_AUDIO_CRC_CONTROL                                           0x573a
+#define mmAFMT_RAMP_CONTROL0                                                    0x4a3b
+#define mmDIG0_AFMT_RAMP_CONTROL0                                               0x4a3b
+#define mmDIG1_AFMT_RAMP_CONTROL0                                               0x4b3b
+#define mmDIG2_AFMT_RAMP_CONTROL0                                               0x4c3b
+#define mmDIG3_AFMT_RAMP_CONTROL0                                               0x4d3b
+#define mmDIG4_AFMT_RAMP_CONTROL0                                               0x4e3b
+#define mmDIG5_AFMT_RAMP_CONTROL0                                               0x4f3b
+#define mmDIG6_AFMT_RAMP_CONTROL0                                               0x543b
+#define mmDIG7_AFMT_RAMP_CONTROL0                                               0x563b
+#define mmDIG8_AFMT_RAMP_CONTROL0                                               0x573b
+#define mmAFMT_RAMP_CONTROL1                                                    0x4a3c
+#define mmDIG0_AFMT_RAMP_CONTROL1                                               0x4a3c
+#define mmDIG1_AFMT_RAMP_CONTROL1                                               0x4b3c
+#define mmDIG2_AFMT_RAMP_CONTROL1                                               0x4c3c
+#define mmDIG3_AFMT_RAMP_CONTROL1                                               0x4d3c
+#define mmDIG4_AFMT_RAMP_CONTROL1                                               0x4e3c
+#define mmDIG5_AFMT_RAMP_CONTROL1                                               0x4f3c
+#define mmDIG6_AFMT_RAMP_CONTROL1                                               0x543c
+#define mmDIG7_AFMT_RAMP_CONTROL1                                               0x563c
+#define mmDIG8_AFMT_RAMP_CONTROL1                                               0x573c
+#define mmAFMT_RAMP_CONTROL2                                                    0x4a3d
+#define mmDIG0_AFMT_RAMP_CONTROL2                                               0x4a3d
+#define mmDIG1_AFMT_RAMP_CONTROL2                                               0x4b3d
+#define mmDIG2_AFMT_RAMP_CONTROL2                                               0x4c3d
+#define mmDIG3_AFMT_RAMP_CONTROL2                                               0x4d3d
+#define mmDIG4_AFMT_RAMP_CONTROL2                                               0x4e3d
+#define mmDIG5_AFMT_RAMP_CONTROL2                                               0x4f3d
+#define mmDIG6_AFMT_RAMP_CONTROL2                                               0x543d
+#define mmDIG7_AFMT_RAMP_CONTROL2                                               0x563d
+#define mmDIG8_AFMT_RAMP_CONTROL2                                               0x573d
+#define mmAFMT_RAMP_CONTROL3                                                    0x4a3e
+#define mmDIG0_AFMT_RAMP_CONTROL3                                               0x4a3e
+#define mmDIG1_AFMT_RAMP_CONTROL3                                               0x4b3e
+#define mmDIG2_AFMT_RAMP_CONTROL3                                               0x4c3e
+#define mmDIG3_AFMT_RAMP_CONTROL3                                               0x4d3e
+#define mmDIG4_AFMT_RAMP_CONTROL3                                               0x4e3e
+#define mmDIG5_AFMT_RAMP_CONTROL3                                               0x4f3e
+#define mmDIG6_AFMT_RAMP_CONTROL3                                               0x543e
+#define mmDIG7_AFMT_RAMP_CONTROL3                                               0x563e
+#define mmDIG8_AFMT_RAMP_CONTROL3                                               0x573e
+#define mmAFMT_60958_2                                                          0x4a3f
+#define mmDIG0_AFMT_60958_2                                                     0x4a3f
+#define mmDIG1_AFMT_60958_2                                                     0x4b3f
+#define mmDIG2_AFMT_60958_2                                                     0x4c3f
+#define mmDIG3_AFMT_60958_2                                                     0x4d3f
+#define mmDIG4_AFMT_60958_2                                                     0x4e3f
+#define mmDIG5_AFMT_60958_2                                                     0x4f3f
+#define mmDIG6_AFMT_60958_2                                                     0x543f
+#define mmDIG7_AFMT_60958_2                                                     0x563f
+#define mmDIG8_AFMT_60958_2                                                     0x573f
+#define mmAFMT_AUDIO_CRC_RESULT                                                 0x4a40
+#define mmDIG0_AFMT_AUDIO_CRC_RESULT                                            0x4a40
+#define mmDIG1_AFMT_AUDIO_CRC_RESULT                                            0x4b40
+#define mmDIG2_AFMT_AUDIO_CRC_RESULT                                            0x4c40
+#define mmDIG3_AFMT_AUDIO_CRC_RESULT                                            0x4d40
+#define mmDIG4_AFMT_AUDIO_CRC_RESULT                                            0x4e40
+#define mmDIG5_AFMT_AUDIO_CRC_RESULT                                            0x4f40
+#define mmDIG6_AFMT_AUDIO_CRC_RESULT                                            0x5440
+#define mmDIG7_AFMT_AUDIO_CRC_RESULT                                            0x5640
+#define mmDIG8_AFMT_AUDIO_CRC_RESULT                                            0x5740
+#define mmAFMT_STATUS                                                           0x4a41
+#define mmDIG0_AFMT_STATUS                                                      0x4a41
+#define mmDIG1_AFMT_STATUS                                                      0x4b41
+#define mmDIG2_AFMT_STATUS                                                      0x4c41
+#define mmDIG3_AFMT_STATUS                                                      0x4d41
+#define mmDIG4_AFMT_STATUS                                                      0x4e41
+#define mmDIG5_AFMT_STATUS                                                      0x4f41
+#define mmDIG6_AFMT_STATUS                                                      0x5441
+#define mmDIG7_AFMT_STATUS                                                      0x5641
+#define mmDIG8_AFMT_STATUS                                                      0x5741
+#define mmAFMT_AUDIO_PACKET_CONTROL                                             0x4a42
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL                                        0x4a42
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL                                        0x4b42
+#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL                                        0x4c42
+#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL                                        0x4d42
+#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL                                        0x4e42
+#define mmDIG5_AFMT_AUDIO_PACKET_CONTROL                                        0x4f42
+#define mmDIG6_AFMT_AUDIO_PACKET_CONTROL                                        0x5442
+#define mmDIG7_AFMT_AUDIO_PACKET_CONTROL                                        0x5642
+#define mmDIG8_AFMT_AUDIO_PACKET_CONTROL                                        0x5742
+#define mmAFMT_VBI_PACKET_CONTROL                                               0x4a43
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL                                          0x4a43
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL                                          0x4b43
+#define mmDIG2_AFMT_VBI_PACKET_CONTROL                                          0x4c43
+#define mmDIG3_AFMT_VBI_PACKET_CONTROL                                          0x4d43
+#define mmDIG4_AFMT_VBI_PACKET_CONTROL                                          0x4e43
+#define mmDIG5_AFMT_VBI_PACKET_CONTROL                                          0x4f43
+#define mmDIG6_AFMT_VBI_PACKET_CONTROL                                          0x5443
+#define mmDIG7_AFMT_VBI_PACKET_CONTROL                                          0x5643
+#define mmDIG8_AFMT_VBI_PACKET_CONTROL                                          0x5743
+#define mmAFMT_INFOFRAME_CONTROL0                                               0x4a44
+#define mmDIG0_AFMT_INFOFRAME_CONTROL0                                          0x4a44
+#define mmDIG1_AFMT_INFOFRAME_CONTROL0                                          0x4b44
+#define mmDIG2_AFMT_INFOFRAME_CONTROL0                                          0x4c44
+#define mmDIG3_AFMT_INFOFRAME_CONTROL0                                          0x4d44
+#define mmDIG4_AFMT_INFOFRAME_CONTROL0                                          0x4e44
+#define mmDIG5_AFMT_INFOFRAME_CONTROL0                                          0x4f44
+#define mmDIG6_AFMT_INFOFRAME_CONTROL0                                          0x5444
+#define mmDIG7_AFMT_INFOFRAME_CONTROL0                                          0x5644
+#define mmDIG8_AFMT_INFOFRAME_CONTROL0                                          0x5744
+#define mmAFMT_AUDIO_SRC_CONTROL                                                0x4a45
+#define mmDIG0_AFMT_AUDIO_SRC_CONTROL                                           0x4a45
+#define mmDIG1_AFMT_AUDIO_SRC_CONTROL                                           0x4b45
+#define mmDIG2_AFMT_AUDIO_SRC_CONTROL                                           0x4c45
+#define mmDIG3_AFMT_AUDIO_SRC_CONTROL                                           0x4d45
+#define mmDIG4_AFMT_AUDIO_SRC_CONTROL                                           0x4e45
+#define mmDIG5_AFMT_AUDIO_SRC_CONTROL                                           0x4f45
+#define mmDIG6_AFMT_AUDIO_SRC_CONTROL                                           0x5445
+#define mmDIG7_AFMT_AUDIO_SRC_CONTROL                                           0x5645
+#define mmDIG8_AFMT_AUDIO_SRC_CONTROL                                           0x5745
+#define mmAFMT_AUDIO_DBG_DTO_CNTL                                               0x4a46
+#define mmDIG0_AFMT_AUDIO_DBG_DTO_CNTL                                          0x4a46
+#define mmDIG1_AFMT_AUDIO_DBG_DTO_CNTL                                          0x4b46
+#define mmDIG2_AFMT_AUDIO_DBG_DTO_CNTL                                          0x4c46
+#define mmDIG3_AFMT_AUDIO_DBG_DTO_CNTL                                          0x4d46
+#define mmDIG4_AFMT_AUDIO_DBG_DTO_CNTL                                          0x4e46
+#define mmDIG5_AFMT_AUDIO_DBG_DTO_CNTL                                          0x4f46
+#define mmDIG6_AFMT_AUDIO_DBG_DTO_CNTL                                          0x5446
+#define mmDIG7_AFMT_AUDIO_DBG_DTO_CNTL                                          0x5646
+#define mmDIG8_AFMT_AUDIO_DBG_DTO_CNTL                                          0x5746
+#define mmAFMT_CNTL                                                             0x4a7e
+#define mmDIG0_AFMT_CNTL                                                        0x4a7e
+#define mmDIG1_AFMT_CNTL                                                        0x4b7e
+#define mmDIG2_AFMT_CNTL                                                        0x4c7e
+#define mmDIG3_AFMT_CNTL                                                        0x4d7e
+#define mmDIG4_AFMT_CNTL                                                        0x4e7e
+#define mmDIG5_AFMT_CNTL                                                        0x4f7e
+#define mmDIG6_AFMT_CNTL                                                        0x547e
+#define mmDIG7_AFMT_CNTL                                                        0x567e
+#define mmDIG8_AFMT_CNTL                                                        0x577e
+#define mmDIG_BE_CNTL                                                           0x4a47
+#define mmDIG0_DIG_BE_CNTL                                                      0x4a47
+#define mmDIG1_DIG_BE_CNTL                                                      0x4b47
+#define mmDIG2_DIG_BE_CNTL                                                      0x4c47
+#define mmDIG3_DIG_BE_CNTL                                                      0x4d47
+#define mmDIG4_DIG_BE_CNTL                                                      0x4e47
+#define mmDIG5_DIG_BE_CNTL                                                      0x4f47
+#define mmDIG6_DIG_BE_CNTL                                                      0x5447
+#define mmDIG7_DIG_BE_CNTL                                                      0x5647
+#define mmDIG8_DIG_BE_CNTL                                                      0x5747
+#define mmDIG_BE_EN_CNTL                                                        0x4a48
+#define mmDIG0_DIG_BE_EN_CNTL                                                   0x4a48
+#define mmDIG1_DIG_BE_EN_CNTL                                                   0x4b48
+#define mmDIG2_DIG_BE_EN_CNTL                                                   0x4c48
+#define mmDIG3_DIG_BE_EN_CNTL                                                   0x4d48
+#define mmDIG4_DIG_BE_EN_CNTL                                                   0x4e48
+#define mmDIG5_DIG_BE_EN_CNTL                                                   0x4f48
+#define mmDIG6_DIG_BE_EN_CNTL                                                   0x5448
+#define mmDIG7_DIG_BE_EN_CNTL                                                   0x5648
+#define mmDIG8_DIG_BE_EN_CNTL                                                   0x5748
+#define mmTMDS_CNTL                                                             0x4a6b
+#define mmDIG0_TMDS_CNTL                                                        0x4a6b
+#define mmDIG1_TMDS_CNTL                                                        0x4b6b
+#define mmDIG2_TMDS_CNTL                                                        0x4c6b
+#define mmDIG3_TMDS_CNTL                                                        0x4d6b
+#define mmDIG4_TMDS_CNTL                                                        0x4e6b
+#define mmDIG5_TMDS_CNTL                                                        0x4f6b
+#define mmDIG6_TMDS_CNTL                                                        0x546b
+#define mmDIG7_TMDS_CNTL                                                        0x566b
+#define mmDIG8_TMDS_CNTL                                                        0x576b
+#define mmTMDS_CONTROL_CHAR                                                     0x4a6c
+#define mmDIG0_TMDS_CONTROL_CHAR                                                0x4a6c
+#define mmDIG1_TMDS_CONTROL_CHAR                                                0x4b6c
+#define mmDIG2_TMDS_CONTROL_CHAR                                                0x4c6c
+#define mmDIG3_TMDS_CONTROL_CHAR                                                0x4d6c
+#define mmDIG4_TMDS_CONTROL_CHAR                                                0x4e6c
+#define mmDIG5_TMDS_CONTROL_CHAR                                                0x4f6c
+#define mmDIG6_TMDS_CONTROL_CHAR                                                0x546c
+#define mmDIG7_TMDS_CONTROL_CHAR                                                0x566c
+#define mmDIG8_TMDS_CONTROL_CHAR                                                0x576c
+#define mmTMDS_CONTROL0_FEEDBACK                                                0x4a6d
+#define mmDIG0_TMDS_CONTROL0_FEEDBACK                                           0x4a6d
+#define mmDIG1_TMDS_CONTROL0_FEEDBACK                                           0x4b6d
+#define mmDIG2_TMDS_CONTROL0_FEEDBACK                                           0x4c6d
+#define mmDIG3_TMDS_CONTROL0_FEEDBACK                                           0x4d6d
+#define mmDIG4_TMDS_CONTROL0_FEEDBACK                                           0x4e6d
+#define mmDIG5_TMDS_CONTROL0_FEEDBACK                                           0x4f6d
+#define mmDIG6_TMDS_CONTROL0_FEEDBACK                                           0x546d
+#define mmDIG7_TMDS_CONTROL0_FEEDBACK                                           0x566d
+#define mmDIG8_TMDS_CONTROL0_FEEDBACK                                           0x576d
+#define mmTMDS_STEREOSYNC_CTL_SEL                                               0x4a6e
+#define mmDIG0_TMDS_STEREOSYNC_CTL_SEL                                          0x4a6e
+#define mmDIG1_TMDS_STEREOSYNC_CTL_SEL                                          0x4b6e
+#define mmDIG2_TMDS_STEREOSYNC_CTL_SEL                                          0x4c6e
+#define mmDIG3_TMDS_STEREOSYNC_CTL_SEL                                          0x4d6e
+#define mmDIG4_TMDS_STEREOSYNC_CTL_SEL                                          0x4e6e
+#define mmDIG5_TMDS_STEREOSYNC_CTL_SEL                                          0x4f6e
+#define mmDIG6_TMDS_STEREOSYNC_CTL_SEL                                          0x546e
+#define mmDIG7_TMDS_STEREOSYNC_CTL_SEL                                          0x566e
+#define mmDIG8_TMDS_STEREOSYNC_CTL_SEL                                          0x576e
+#define mmTMDS_SYNC_CHAR_PATTERN_0_1                                            0x4a6f
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_0_1                                       0x4a6f
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_0_1                                       0x4b6f
+#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_0_1                                       0x4c6f
+#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_0_1                                       0x4d6f
+#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_0_1                                       0x4e6f
+#define mmDIG5_TMDS_SYNC_CHAR_PATTERN_0_1                                       0x4f6f
+#define mmDIG6_TMDS_SYNC_CHAR_PATTERN_0_1                                       0x546f
+#define mmDIG7_TMDS_SYNC_CHAR_PATTERN_0_1                                       0x566f
+#define mmDIG8_TMDS_SYNC_CHAR_PATTERN_0_1                                       0x576f
+#define mmTMDS_SYNC_CHAR_PATTERN_2_3                                            0x4a70
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_2_3                                       0x4a70
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_2_3                                       0x4b70
+#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_2_3                                       0x4c70
+#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_2_3                                       0x4d70
+#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_2_3                                       0x4e70
+#define mmDIG5_TMDS_SYNC_CHAR_PATTERN_2_3                                       0x4f70
+#define mmDIG6_TMDS_SYNC_CHAR_PATTERN_2_3                                       0x5470
+#define mmDIG7_TMDS_SYNC_CHAR_PATTERN_2_3                                       0x5670
+#define mmDIG8_TMDS_SYNC_CHAR_PATTERN_2_3                                       0x5770
+#define mmTMDS_DEBUG                                                            0x4a71
+#define mmDIG0_TMDS_DEBUG                                                       0x4a71
+#define mmDIG1_TMDS_DEBUG                                                       0x4b71
+#define mmDIG2_TMDS_DEBUG                                                       0x4c71
+#define mmDIG3_TMDS_DEBUG                                                       0x4d71
+#define mmDIG4_TMDS_DEBUG                                                       0x4e71
+#define mmDIG5_TMDS_DEBUG                                                       0x4f71
+#define mmDIG6_TMDS_DEBUG                                                       0x5471
+#define mmDIG7_TMDS_DEBUG                                                       0x5671
+#define mmDIG8_TMDS_DEBUG                                                       0x5771
+#define mmTMDS_CTL_BITS                                                         0x4a72
+#define mmDIG0_TMDS_CTL_BITS                                                    0x4a72
+#define mmDIG1_TMDS_CTL_BITS                                                    0x4b72
+#define mmDIG2_TMDS_CTL_BITS                                                    0x4c72
+#define mmDIG3_TMDS_CTL_BITS                                                    0x4d72
+#define mmDIG4_TMDS_CTL_BITS                                                    0x4e72
+#define mmDIG5_TMDS_CTL_BITS                                                    0x4f72
+#define mmDIG6_TMDS_CTL_BITS                                                    0x5472
+#define mmDIG7_TMDS_CTL_BITS                                                    0x5672
+#define mmDIG8_TMDS_CTL_BITS                                                    0x5772
+#define mmTMDS_DCBALANCER_CONTROL                                               0x4a73
+#define mmDIG0_TMDS_DCBALANCER_CONTROL                                          0x4a73
+#define mmDIG1_TMDS_DCBALANCER_CONTROL                                          0x4b73
+#define mmDIG2_TMDS_DCBALANCER_CONTROL                                          0x4c73
+#define mmDIG3_TMDS_DCBALANCER_CONTROL                                          0x4d73
+#define mmDIG4_TMDS_DCBALANCER_CONTROL                                          0x4e73
+#define mmDIG5_TMDS_DCBALANCER_CONTROL                                          0x4f73
+#define mmDIG6_TMDS_DCBALANCER_CONTROL                                          0x5473
+#define mmDIG7_TMDS_DCBALANCER_CONTROL                                          0x5673
+#define mmDIG8_TMDS_DCBALANCER_CONTROL                                          0x5773
+#define mmTMDS_CTL0_1_GEN_CNTL                                                  0x4a75
+#define mmDIG0_TMDS_CTL0_1_GEN_CNTL                                             0x4a75
+#define mmDIG1_TMDS_CTL0_1_GEN_CNTL                                             0x4b75
+#define mmDIG2_TMDS_CTL0_1_GEN_CNTL                                             0x4c75
+#define mmDIG3_TMDS_CTL0_1_GEN_CNTL                                             0x4d75
+#define mmDIG4_TMDS_CTL0_1_GEN_CNTL                                             0x4e75
+#define mmDIG5_TMDS_CTL0_1_GEN_CNTL                                             0x4f75
+#define mmDIG6_TMDS_CTL0_1_GEN_CNTL                                             0x5475
+#define mmDIG7_TMDS_CTL0_1_GEN_CNTL                                             0x5675
+#define mmDIG8_TMDS_CTL0_1_GEN_CNTL                                             0x5775
+#define mmTMDS_CTL2_3_GEN_CNTL                                                  0x4a76
+#define mmDIG0_TMDS_CTL2_3_GEN_CNTL                                             0x4a76
+#define mmDIG1_TMDS_CTL2_3_GEN_CNTL                                             0x4b76
+#define mmDIG2_TMDS_CTL2_3_GEN_CNTL                                             0x4c76
+#define mmDIG3_TMDS_CTL2_3_GEN_CNTL                                             0x4d76
+#define mmDIG4_TMDS_CTL2_3_GEN_CNTL                                             0x4e76
+#define mmDIG5_TMDS_CTL2_3_GEN_CNTL                                             0x4f76
+#define mmDIG6_TMDS_CTL2_3_GEN_CNTL                                             0x5476
+#define mmDIG7_TMDS_CTL2_3_GEN_CNTL                                             0x5676
+#define mmDIG8_TMDS_CTL2_3_GEN_CNTL                                             0x5776
+#define mmDIG_VERSION                                                           0x4a78
+#define mmDIG0_DIG_VERSION                                                      0x4a78
+#define mmDIG1_DIG_VERSION                                                      0x4b78
+#define mmDIG2_DIG_VERSION                                                      0x4c78
+#define mmDIG3_DIG_VERSION                                                      0x4d78
+#define mmDIG4_DIG_VERSION                                                      0x4e78
+#define mmDIG5_DIG_VERSION                                                      0x4f78
+#define mmDIG6_DIG_VERSION                                                      0x5478
+#define mmDIG7_DIG_VERSION                                                      0x5678
+#define mmDIG8_DIG_VERSION                                                      0x5778
+#define mmDIG_LANE_ENABLE                                                       0x4a79
+#define mmDIG0_DIG_LANE_ENABLE                                                  0x4a79
+#define mmDIG1_DIG_LANE_ENABLE                                                  0x4b79
+#define mmDIG2_DIG_LANE_ENABLE                                                  0x4c79
+#define mmDIG3_DIG_LANE_ENABLE                                                  0x4d79
+#define mmDIG4_DIG_LANE_ENABLE                                                  0x4e79
+#define mmDIG5_DIG_LANE_ENABLE                                                  0x4f79
+#define mmDIG6_DIG_LANE_ENABLE                                                  0x5479
+#define mmDIG7_DIG_LANE_ENABLE                                                  0x5679
+#define mmDIG8_DIG_LANE_ENABLE                                                  0x5779
+#define mmDIG_TEST_DEBUG_INDEX                                                  0x4a7a
+#define mmDIG0_DIG_TEST_DEBUG_INDEX                                             0x4a7a
+#define mmDIG1_DIG_TEST_DEBUG_INDEX                                             0x4b7a
+#define mmDIG2_DIG_TEST_DEBUG_INDEX                                             0x4c7a
+#define mmDIG3_DIG_TEST_DEBUG_INDEX                                             0x4d7a
+#define mmDIG4_DIG_TEST_DEBUG_INDEX                                             0x4e7a
+#define mmDIG5_DIG_TEST_DEBUG_INDEX                                             0x4f7a
+#define mmDIG6_DIG_TEST_DEBUG_INDEX                                             0x547a
+#define mmDIG7_DIG_TEST_DEBUG_INDEX                                             0x567a
+#define mmDIG8_DIG_TEST_DEBUG_INDEX                                             0x577a
+#define mmDIG_TEST_DEBUG_DATA                                                   0x4a7b
+#define mmDIG0_DIG_TEST_DEBUG_DATA                                              0x4a7b
+#define mmDIG1_DIG_TEST_DEBUG_DATA                                              0x4b7b
+#define mmDIG2_DIG_TEST_DEBUG_DATA                                              0x4c7b
+#define mmDIG3_DIG_TEST_DEBUG_DATA                                              0x4d7b
+#define mmDIG4_DIG_TEST_DEBUG_DATA                                              0x4e7b
+#define mmDIG5_DIG_TEST_DEBUG_DATA                                              0x4f7b
+#define mmDIG6_DIG_TEST_DEBUG_DATA                                              0x547b
+#define mmDIG7_DIG_TEST_DEBUG_DATA                                              0x567b
+#define mmDIG8_DIG_TEST_DEBUG_DATA                                              0x577b
+#define mmDIG_FE_TEST_DEBUG_INDEX                                               0x4a7c
+#define mmDIG0_DIG_FE_TEST_DEBUG_INDEX                                          0x4a7c
+#define mmDIG1_DIG_FE_TEST_DEBUG_INDEX                                          0x4b7c
+#define mmDIG2_DIG_FE_TEST_DEBUG_INDEX                                          0x4c7c
+#define mmDIG3_DIG_FE_TEST_DEBUG_INDEX                                          0x4d7c
+#define mmDIG4_DIG_FE_TEST_DEBUG_INDEX                                          0x4e7c
+#define mmDIG5_DIG_FE_TEST_DEBUG_INDEX                                          0x4f7c
+#define mmDIG6_DIG_FE_TEST_DEBUG_INDEX                                          0x547c
+#define mmDIG7_DIG_FE_TEST_DEBUG_INDEX                                          0x567c
+#define mmDIG8_DIG_FE_TEST_DEBUG_INDEX                                          0x577c
+#define mmDIG_FE_TEST_DEBUG_DATA                                                0x4a7d
+#define mmDIG0_DIG_FE_TEST_DEBUG_DATA                                           0x4a7d
+#define mmDIG1_DIG_FE_TEST_DEBUG_DATA                                           0x4b7d
+#define mmDIG2_DIG_FE_TEST_DEBUG_DATA                                           0x4c7d
+#define mmDIG3_DIG_FE_TEST_DEBUG_DATA                                           0x4d7d
+#define mmDIG4_DIG_FE_TEST_DEBUG_DATA                                           0x4e7d
+#define mmDIG5_DIG_FE_TEST_DEBUG_DATA                                           0x4f7d
+#define mmDIG6_DIG_FE_TEST_DEBUG_DATA                                           0x547d
+#define mmDIG7_DIG_FE_TEST_DEBUG_DATA                                           0x567d
+#define mmDIG8_DIG_FE_TEST_DEBUG_DATA                                           0x577d
+#define mmDMCU_CTRL                                                             0x1600
+#define mmDMCU_STATUS                                                           0x1601
+#define mmDMCU_PC_START_ADDR                                                    0x1602
+#define mmDMCU_FW_START_ADDR                                                    0x1603
+#define mmDMCU_FW_END_ADDR                                                      0x1604
+#define mmDMCU_FW_ISR_START_ADDR                                                0x1605
+#define mmDMCU_FW_CS_HI                                                         0x1606
+#define mmDMCU_FW_CS_LO                                                         0x1607
+#define mmDMCU_RAM_ACCESS_CTRL                                                  0x1608
+#define mmDMCU_ERAM_WR_CTRL                                                     0x1609
+#define mmDMCU_ERAM_WR_DATA                                                     0x160a
+#define mmDMCU_ERAM_RD_CTRL                                                     0x160b
+#define mmDMCU_ERAM_RD_DATA                                                     0x160c
+#define mmDMCU_IRAM_WR_CTRL                                                     0x160d
+#define mmDMCU_IRAM_WR_DATA                                                     0x160e
+#define mmDMCU_IRAM_RD_CTRL                                                     0x160f
+#define mmDMCU_IRAM_RD_DATA                                                     0x1610
+#define mmDMCU_EVENT_TRIGGER                                                    0x1611
+#define mmDMCU_UC_INTERNAL_INT_STATUS                                           0x1612
+#define mmDMCU_SS_INTERRUPT_CNTL_STATUS                                         0x1613
+#define mmDMCU_INTERRUPT_STATUS                                                 0x1614
+#define mmDMCU_INTERRUPT_STATUS_1                                               0x1633
+#define mmDMCU_INTERRUPT_TO_HOST_EN_MASK                                        0x1615
+#define mmDMCU_INTERRUPT_TO_UC_EN_MASK                                          0x1616
+#define mmDMCU_INTERRUPT_TO_UC_EN_MASK_1                                        0x1631
+#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL                                     0x1617
+#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1                                   0x1632
+#define mmDC_DMCU_SCRATCH                                                       0x1618
+#define mmDMCU_INT_CNT                                                          0x1619
+#define mmDMCU_FW_CHECKSUM_SMPL_BYTE_POS                                        0x161a
+#define mmDMCU_UC_CLK_GATING_CNTL                                               0x161b
+#define mmMASTER_COMM_DATA_REG1                                                 0x161c
+#define mmMASTER_COMM_DATA_REG2                                                 0x161d
+#define mmMASTER_COMM_DATA_REG3                                                 0x161e
+#define mmMASTER_COMM_CMD_REG                                                   0x161f
+#define mmMASTER_COMM_CNTL_REG                                                  0x1620
+#define mmSLAVE_COMM_DATA_REG1                                                  0x1621
+#define mmSLAVE_COMM_DATA_REG2                                                  0x1622
+#define mmSLAVE_COMM_DATA_REG3                                                  0x1623
+#define mmSLAVE_COMM_CMD_REG                                                    0x1624
+#define mmSLAVE_COMM_CNTL_REG                                                   0x1625
+#define mmDMCU_TEST_DEBUG_INDEX                                                 0x1626
+#define mmDMCU_TEST_DEBUG_DATA                                                  0x1627
+#define mmDMCU_PERFMON_INTERRUPT_STATUS1                                        0x1644
+#define mmDMCU_PERFMON_INTERRUPT_STATUS2                                        0x1645
+#define mmDMCU_PERFMON_INTERRUPT_STATUS3                                        0x1646
+#define mmDMCU_PERFMON_INTERRUPT_STATUS4                                        0x1647
+#define mmDMCU_PERFMON_INTERRUPT_STATUS5                                        0x1642
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1                                 0x1674
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2                                 0x1675
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3                                 0x1676
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4                                 0x1677
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5                                 0x1643
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1                            0x1678
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2                            0x1679
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3                            0x167a
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4                            0x167b
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5                            0x1673
+#define mmDMCU_DPRX_INTERRUPT_STATUS1                                           0x1634
+#define mmDMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1                                    0x1635
+#define mmDMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1                               0x1636
+#define mmDP_LINK_CNTL                                                          0x4aa0
+#define mmDP0_DP_LINK_CNTL                                                      0x4aa0
+#define mmDP1_DP_LINK_CNTL                                                      0x4ba0
+#define mmDP2_DP_LINK_CNTL                                                      0x4ca0
+#define mmDP3_DP_LINK_CNTL                                                      0x4da0
+#define mmDP4_DP_LINK_CNTL                                                      0x4ea0
+#define mmDP5_DP_LINK_CNTL                                                      0x4fa0
+#define mmDP6_DP_LINK_CNTL                                                      0x54a0
+#define mmDP7_DP_LINK_CNTL                                                      0x56a0
+#define mmDP8_DP_LINK_CNTL                                                      0x57a0
+#define mmDP_PIXEL_FORMAT                                                       0x4aa1
+#define mmDP0_DP_PIXEL_FORMAT                                                   0x4aa1
+#define mmDP1_DP_PIXEL_FORMAT                                                   0x4ba1
+#define mmDP2_DP_PIXEL_FORMAT                                                   0x4ca1
+#define mmDP3_DP_PIXEL_FORMAT                                                   0x4da1
+#define mmDP4_DP_PIXEL_FORMAT                                                   0x4ea1
+#define mmDP5_DP_PIXEL_FORMAT                                                   0x4fa1
+#define mmDP6_DP_PIXEL_FORMAT                                                   0x54a1
+#define mmDP7_DP_PIXEL_FORMAT                                                   0x56a1
+#define mmDP8_DP_PIXEL_FORMAT                                                   0x57a1
+#define mmDP_MSA_COLORIMETRY                                                    0x4aa2
+#define mmDP0_DP_MSA_COLORIMETRY                                                0x4aa2
+#define mmDP1_DP_MSA_COLORIMETRY                                                0x4ba2
+#define mmDP2_DP_MSA_COLORIMETRY                                                0x4ca2
+#define mmDP3_DP_MSA_COLORIMETRY                                                0x4da2
+#define mmDP4_DP_MSA_COLORIMETRY                                                0x4ea2
+#define mmDP5_DP_MSA_COLORIMETRY                                                0x4fa2
+#define mmDP6_DP_MSA_COLORIMETRY                                                0x54a2
+#define mmDP7_DP_MSA_COLORIMETRY                                                0x56a2
+#define mmDP8_DP_MSA_COLORIMETRY                                                0x57a2
+#define mmDP_CONFIG                                                             0x4aa3
+#define mmDP0_DP_CONFIG                                                         0x4aa3
+#define mmDP1_DP_CONFIG                                                         0x4ba3
+#define mmDP2_DP_CONFIG                                                         0x4ca3
+#define mmDP3_DP_CONFIG                                                         0x4da3
+#define mmDP4_DP_CONFIG                                                         0x4ea3
+#define mmDP5_DP_CONFIG                                                         0x4fa3
+#define mmDP6_DP_CONFIG                                                         0x54a3
+#define mmDP7_DP_CONFIG                                                         0x56a3
+#define mmDP8_DP_CONFIG                                                         0x57a3
+#define mmDP_VID_STREAM_CNTL                                                    0x4aa4
+#define mmDP0_DP_VID_STREAM_CNTL                                                0x4aa4
+#define mmDP1_DP_VID_STREAM_CNTL                                                0x4ba4
+#define mmDP2_DP_VID_STREAM_CNTL                                                0x4ca4
+#define mmDP3_DP_VID_STREAM_CNTL                                                0x4da4
+#define mmDP4_DP_VID_STREAM_CNTL                                                0x4ea4
+#define mmDP5_DP_VID_STREAM_CNTL                                                0x4fa4
+#define mmDP6_DP_VID_STREAM_CNTL                                                0x54a4
+#define mmDP7_DP_VID_STREAM_CNTL                                                0x56a4
+#define mmDP8_DP_VID_STREAM_CNTL                                                0x57a4
+#define mmDP_STEER_FIFO                                                         0x4aa5
+#define mmDP0_DP_STEER_FIFO                                                     0x4aa5
+#define mmDP1_DP_STEER_FIFO                                                     0x4ba5
+#define mmDP2_DP_STEER_FIFO                                                     0x4ca5
+#define mmDP3_DP_STEER_FIFO                                                     0x4da5
+#define mmDP4_DP_STEER_FIFO                                                     0x4ea5
+#define mmDP5_DP_STEER_FIFO                                                     0x4fa5
+#define mmDP6_DP_STEER_FIFO                                                     0x54a5
+#define mmDP7_DP_STEER_FIFO                                                     0x56a5
+#define mmDP8_DP_STEER_FIFO                                                     0x57a5
+#define mmDP_MSA_MISC                                                           0x4aa6
+#define mmDP0_DP_MSA_MISC                                                       0x4aa6
+#define mmDP1_DP_MSA_MISC                                                       0x4ba6
+#define mmDP2_DP_MSA_MISC                                                       0x4ca6
+#define mmDP3_DP_MSA_MISC                                                       0x4da6
+#define mmDP4_DP_MSA_MISC                                                       0x4ea6
+#define mmDP5_DP_MSA_MISC                                                       0x4fa6
+#define mmDP6_DP_MSA_MISC                                                       0x54a6
+#define mmDP7_DP_MSA_MISC                                                       0x56a6
+#define mmDP8_DP_MSA_MISC                                                       0x57a6
+#define mmDP_VID_TIMING                                                         0x4aa8
+#define mmDP0_DP_VID_TIMING                                                     0x4aa8
+#define mmDP1_DP_VID_TIMING                                                     0x4ba8
+#define mmDP2_DP_VID_TIMING                                                     0x4ca8
+#define mmDP3_DP_VID_TIMING                                                     0x4da8
+#define mmDP4_DP_VID_TIMING                                                     0x4ea8
+#define mmDP5_DP_VID_TIMING                                                     0x4fa8
+#define mmDP6_DP_VID_TIMING                                                     0x54a8
+#define mmDP7_DP_VID_TIMING                                                     0x56a8
+#define mmDP8_DP_VID_TIMING                                                     0x57a8
+#define mmDP_VID_N                                                              0x4aa9
+#define mmDP0_DP_VID_N                                                          0x4aa9
+#define mmDP1_DP_VID_N                                                          0x4ba9
+#define mmDP2_DP_VID_N                                                          0x4ca9
+#define mmDP3_DP_VID_N                                                          0x4da9
+#define mmDP4_DP_VID_N                                                          0x4ea9
+#define mmDP5_DP_VID_N                                                          0x4fa9
+#define mmDP6_DP_VID_N                                                          0x54a9
+#define mmDP7_DP_VID_N                                                          0x56a9
+#define mmDP8_DP_VID_N                                                          0x57a9
+#define mmDP_VID_M                                                              0x4aaa
+#define mmDP0_DP_VID_M                                                          0x4aaa
+#define mmDP1_DP_VID_M                                                          0x4baa
+#define mmDP2_DP_VID_M                                                          0x4caa
+#define mmDP3_DP_VID_M                                                          0x4daa
+#define mmDP4_DP_VID_M                                                          0x4eaa
+#define mmDP5_DP_VID_M                                                          0x4faa
+#define mmDP6_DP_VID_M                                                          0x54aa
+#define mmDP7_DP_VID_M                                                          0x56aa
+#define mmDP8_DP_VID_M                                                          0x57aa
+#define mmDP_LINK_FRAMING_CNTL                                                  0x4aab
+#define mmDP0_DP_LINK_FRAMING_CNTL                                              0x4aab
+#define mmDP1_DP_LINK_FRAMING_CNTL                                              0x4bab
+#define mmDP2_DP_LINK_FRAMING_CNTL                                              0x4cab
+#define mmDP3_DP_LINK_FRAMING_CNTL                                              0x4dab
+#define mmDP4_DP_LINK_FRAMING_CNTL                                              0x4eab
+#define mmDP5_DP_LINK_FRAMING_CNTL                                              0x4fab
+#define mmDP6_DP_LINK_FRAMING_CNTL                                              0x54ab
+#define mmDP7_DP_LINK_FRAMING_CNTL                                              0x56ab
+#define mmDP8_DP_LINK_FRAMING_CNTL                                              0x57ab
+#define mmDP_HBR2_EYE_PATTERN                                                   0x4aac
+#define mmDP0_DP_HBR2_EYE_PATTERN                                               0x4aac
+#define mmDP1_DP_HBR2_EYE_PATTERN                                               0x4bac
+#define mmDP2_DP_HBR2_EYE_PATTERN                                               0x4cac
+#define mmDP3_DP_HBR2_EYE_PATTERN                                               0x4dac
+#define mmDP4_DP_HBR2_EYE_PATTERN                                               0x4eac
+#define mmDP5_DP_HBR2_EYE_PATTERN                                               0x4fac
+#define mmDP6_DP_HBR2_EYE_PATTERN                                               0x54ac
+#define mmDP7_DP_HBR2_EYE_PATTERN                                               0x56ac
+#define mmDP8_DP_HBR2_EYE_PATTERN                                               0x57ac
+#define mmDP_VID_MSA_VBID                                                       0x4aad
+#define mmDP0_DP_VID_MSA_VBID                                                   0x4aad
+#define mmDP1_DP_VID_MSA_VBID                                                   0x4bad
+#define mmDP2_DP_VID_MSA_VBID                                                   0x4cad
+#define mmDP3_DP_VID_MSA_VBID                                                   0x4dad
+#define mmDP4_DP_VID_MSA_VBID                                                   0x4ead
+#define mmDP5_DP_VID_MSA_VBID                                                   0x4fad
+#define mmDP6_DP_VID_MSA_VBID                                                   0x54ad
+#define mmDP7_DP_VID_MSA_VBID                                                   0x56ad
+#define mmDP8_DP_VID_MSA_VBID                                                   0x57ad
+#define mmDP_VID_INTERRUPT_CNTL                                                 0x4aae
+#define mmDP0_DP_VID_INTERRUPT_CNTL                                             0x4aae
+#define mmDP1_DP_VID_INTERRUPT_CNTL                                             0x4bae
+#define mmDP2_DP_VID_INTERRUPT_CNTL                                             0x4cae
+#define mmDP3_DP_VID_INTERRUPT_CNTL                                             0x4dae
+#define mmDP4_DP_VID_INTERRUPT_CNTL                                             0x4eae
+#define mmDP5_DP_VID_INTERRUPT_CNTL                                             0x4fae
+#define mmDP6_DP_VID_INTERRUPT_CNTL                                             0x54ae
+#define mmDP7_DP_VID_INTERRUPT_CNTL                                             0x56ae
+#define mmDP8_DP_VID_INTERRUPT_CNTL                                             0x57ae
+#define mmDP_DPHY_CNTL                                                          0x4aaf
+#define mmDP0_DP_DPHY_CNTL                                                      0x4aaf
+#define mmDP1_DP_DPHY_CNTL                                                      0x4baf
+#define mmDP2_DP_DPHY_CNTL                                                      0x4caf
+#define mmDP3_DP_DPHY_CNTL                                                      0x4daf
+#define mmDP4_DP_DPHY_CNTL                                                      0x4eaf
+#define mmDP5_DP_DPHY_CNTL                                                      0x4faf
+#define mmDP6_DP_DPHY_CNTL                                                      0x54af
+#define mmDP7_DP_DPHY_CNTL                                                      0x56af
+#define mmDP8_DP_DPHY_CNTL                                                      0x57af
+#define mmDP_DPHY_TRAINING_PATTERN_SEL                                          0x4ab0
+#define mmDP0_DP_DPHY_TRAINING_PATTERN_SEL                                      0x4ab0
+#define mmDP1_DP_DPHY_TRAINING_PATTERN_SEL                                      0x4bb0
+#define mmDP2_DP_DPHY_TRAINING_PATTERN_SEL                                      0x4cb0
+#define mmDP3_DP_DPHY_TRAINING_PATTERN_SEL                                      0x4db0
+#define mmDP4_DP_DPHY_TRAINING_PATTERN_SEL                                      0x4eb0
+#define mmDP5_DP_DPHY_TRAINING_PATTERN_SEL                                      0x4fb0
+#define mmDP6_DP_DPHY_TRAINING_PATTERN_SEL                                      0x54b0
+#define mmDP7_DP_DPHY_TRAINING_PATTERN_SEL                                      0x56b0
+#define mmDP8_DP_DPHY_TRAINING_PATTERN_SEL                                      0x57b0
+#define mmDP_DPHY_SYM0                                                          0x4ab1
+#define mmDP0_DP_DPHY_SYM0                                                      0x4ab1
+#define mmDP1_DP_DPHY_SYM0                                                      0x4bb1
+#define mmDP2_DP_DPHY_SYM0                                                      0x4cb1
+#define mmDP3_DP_DPHY_SYM0                                                      0x4db1
+#define mmDP4_DP_DPHY_SYM0                                                      0x4eb1
+#define mmDP5_DP_DPHY_SYM0                                                      0x4fb1
+#define mmDP6_DP_DPHY_SYM0                                                      0x54b1
+#define mmDP7_DP_DPHY_SYM0                                                      0x56b1
+#define mmDP8_DP_DPHY_SYM0                                                      0x57b1
+#define mmDP_DPHY_SYM1                                                          0x4ab2
+#define mmDP0_DP_DPHY_SYM1                                                      0x4ab2
+#define mmDP1_DP_DPHY_SYM1                                                      0x4bb2
+#define mmDP2_DP_DPHY_SYM1                                                      0x4cb2
+#define mmDP3_DP_DPHY_SYM1                                                      0x4db2
+#define mmDP4_DP_DPHY_SYM1                                                      0x4eb2
+#define mmDP5_DP_DPHY_SYM1                                                      0x4fb2
+#define mmDP6_DP_DPHY_SYM1                                                      0x54b2
+#define mmDP7_DP_DPHY_SYM1                                                      0x56b2
+#define mmDP8_DP_DPHY_SYM1                                                      0x57b2
+#define mmDP_DPHY_SYM2                                                          0x4ab3
+#define mmDP0_DP_DPHY_SYM2                                                      0x4ab3
+#define mmDP1_DP_DPHY_SYM2                                                      0x4bb3
+#define mmDP2_DP_DPHY_SYM2                                                      0x4cb3
+#define mmDP3_DP_DPHY_SYM2                                                      0x4db3
+#define mmDP4_DP_DPHY_SYM2                                                      0x4eb3
+#define mmDP5_DP_DPHY_SYM2                                                      0x4fb3
+#define mmDP6_DP_DPHY_SYM2                                                      0x54b3
+#define mmDP7_DP_DPHY_SYM2                                                      0x56b3
+#define mmDP8_DP_DPHY_SYM2                                                      0x57b3
+#define mmDP_DPHY_8B10B_CNTL                                                    0x4ab4
+#define mmDP0_DP_DPHY_8B10B_CNTL                                                0x4ab4
+#define mmDP1_DP_DPHY_8B10B_CNTL                                                0x4bb4
+#define mmDP2_DP_DPHY_8B10B_CNTL                                                0x4cb4
+#define mmDP3_DP_DPHY_8B10B_CNTL                                                0x4db4
+#define mmDP4_DP_DPHY_8B10B_CNTL                                                0x4eb4
+#define mmDP5_DP_DPHY_8B10B_CNTL                                                0x4fb4
+#define mmDP6_DP_DPHY_8B10B_CNTL                                                0x54b4
+#define mmDP7_DP_DPHY_8B10B_CNTL                                                0x56b4
+#define mmDP8_DP_DPHY_8B10B_CNTL                                                0x57b4
+#define mmDP_DPHY_PRBS_CNTL                                                     0x4ab5
+#define mmDP0_DP_DPHY_PRBS_CNTL                                                 0x4ab5
+#define mmDP1_DP_DPHY_PRBS_CNTL                                                 0x4bb5
+#define mmDP2_DP_DPHY_PRBS_CNTL                                                 0x4cb5
+#define mmDP3_DP_DPHY_PRBS_CNTL                                                 0x4db5
+#define mmDP4_DP_DPHY_PRBS_CNTL                                                 0x4eb5
+#define mmDP5_DP_DPHY_PRBS_CNTL                                                 0x4fb5
+#define mmDP6_DP_DPHY_PRBS_CNTL                                                 0x54b5
+#define mmDP7_DP_DPHY_PRBS_CNTL                                                 0x56b5
+#define mmDP8_DP_DPHY_PRBS_CNTL                                                 0x57b5
+#define mmDP_DPHY_BS_SR_SWAP_CNTL                                               0x4adc
+#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL                                           0x4adc
+#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL                                           0x4bdc
+#define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL                                           0x4cdc
+#define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL                                           0x4ddc
+#define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL                                           0x4edc
+#define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL                                           0x4fdc
+#define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL                                           0x54dc
+#define mmDP7_DP_DPHY_BS_SR_SWAP_CNTL                                           0x56dc
+#define mmDP8_DP_DPHY_BS_SR_SWAP_CNTL                                           0x57dc
+#define mmDP_DPHY_CRC_EN                                                        0x4ab7
+#define mmDP0_DP_DPHY_CRC_EN                                                    0x4ab7
+#define mmDP1_DP_DPHY_CRC_EN                                                    0x4bb7
+#define mmDP2_DP_DPHY_CRC_EN                                                    0x4cb7
+#define mmDP3_DP_DPHY_CRC_EN                                                    0x4db7
+#define mmDP4_DP_DPHY_CRC_EN                                                    0x4eb7
+#define mmDP5_DP_DPHY_CRC_EN                                                    0x4fb7
+#define mmDP6_DP_DPHY_CRC_EN                                                    0x54b7
+#define mmDP7_DP_DPHY_CRC_EN                                                    0x56b7
+#define mmDP8_DP_DPHY_CRC_EN                                                    0x57b7
+#define mmDP_DPHY_CRC_CNTL                                                      0x4ab8
+#define mmDP0_DP_DPHY_CRC_CNTL                                                  0x4ab8
+#define mmDP1_DP_DPHY_CRC_CNTL                                                  0x4bb8
+#define mmDP2_DP_DPHY_CRC_CNTL                                                  0x4cb8
+#define mmDP3_DP_DPHY_CRC_CNTL                                                  0x4db8
+#define mmDP4_DP_DPHY_CRC_CNTL                                                  0x4eb8
+#define mmDP5_DP_DPHY_CRC_CNTL                                                  0x4fb8
+#define mmDP6_DP_DPHY_CRC_CNTL                                                  0x54b8
+#define mmDP7_DP_DPHY_CRC_CNTL                                                  0x56b8
+#define mmDP8_DP_DPHY_CRC_CNTL                                                  0x57b8
+#define mmDP_DPHY_CRC_RESULT                                                    0x4ab9
+#define mmDP0_DP_DPHY_CRC_RESULT                                                0x4ab9
+#define mmDP1_DP_DPHY_CRC_RESULT                                                0x4bb9
+#define mmDP2_DP_DPHY_CRC_RESULT                                                0x4cb9
+#define mmDP3_DP_DPHY_CRC_RESULT                                                0x4db9
+#define mmDP4_DP_DPHY_CRC_RESULT                                                0x4eb9
+#define mmDP5_DP_DPHY_CRC_RESULT                                                0x4fb9
+#define mmDP6_DP_DPHY_CRC_RESULT                                                0x54b9
+#define mmDP7_DP_DPHY_CRC_RESULT                                                0x56b9
+#define mmDP8_DP_DPHY_CRC_RESULT                                                0x57b9
+#define mmDP_DPHY_CRC_MST_CNTL                                                  0x4aba
+#define mmDP0_DP_DPHY_CRC_MST_CNTL                                              0x4aba
+#define mmDP1_DP_DPHY_CRC_MST_CNTL                                              0x4bba
+#define mmDP2_DP_DPHY_CRC_MST_CNTL                                              0x4cba
+#define mmDP3_DP_DPHY_CRC_MST_CNTL                                              0x4dba
+#define mmDP4_DP_DPHY_CRC_MST_CNTL                                              0x4eba
+#define mmDP5_DP_DPHY_CRC_MST_CNTL                                              0x4fba
+#define mmDP6_DP_DPHY_CRC_MST_CNTL                                              0x54ba
+#define mmDP7_DP_DPHY_CRC_MST_CNTL                                              0x56ba
+#define mmDP8_DP_DPHY_CRC_MST_CNTL                                              0x57ba
+#define mmDP_DPHY_CRC_MST_STATUS                                                0x4abb
+#define mmDP0_DP_DPHY_CRC_MST_STATUS                                            0x4abb
+#define mmDP1_DP_DPHY_CRC_MST_STATUS                                            0x4bbb
+#define mmDP2_DP_DPHY_CRC_MST_STATUS                                            0x4cbb
+#define mmDP3_DP_DPHY_CRC_MST_STATUS                                            0x4dbb
+#define mmDP4_DP_DPHY_CRC_MST_STATUS                                            0x4ebb
+#define mmDP5_DP_DPHY_CRC_MST_STATUS                                            0x4fbb
+#define mmDP6_DP_DPHY_CRC_MST_STATUS                                            0x54bb
+#define mmDP7_DP_DPHY_CRC_MST_STATUS                                            0x56bb
+#define mmDP8_DP_DPHY_CRC_MST_STATUS                                            0x57bb
+#define mmDP_DPHY_FAST_TRAINING                                                 0x4abc
+#define mmDP0_DP_DPHY_FAST_TRAINING                                             0x4abc
+#define mmDP1_DP_DPHY_FAST_TRAINING                                             0x4bbc
+#define mmDP2_DP_DPHY_FAST_TRAINING                                             0x4cbc
+#define mmDP3_DP_DPHY_FAST_TRAINING                                             0x4dbc
+#define mmDP4_DP_DPHY_FAST_TRAINING                                             0x4ebc
+#define mmDP5_DP_DPHY_FAST_TRAINING                                             0x4fbc
+#define mmDP6_DP_DPHY_FAST_TRAINING                                             0x54bc
+#define mmDP7_DP_DPHY_FAST_TRAINING                                             0x56bc
+#define mmDP8_DP_DPHY_FAST_TRAINING                                             0x57bc
+#define mmDP_DPHY_FAST_TRAINING_STATUS                                          0x4abd
+#define mmDP0_DP_DPHY_FAST_TRAINING_STATUS                                      0x4abd
+#define mmDP1_DP_DPHY_FAST_TRAINING_STATUS                                      0x4bbd
+#define mmDP2_DP_DPHY_FAST_TRAINING_STATUS                                      0x4cbd
+#define mmDP3_DP_DPHY_FAST_TRAINING_STATUS                                      0x4dbd
+#define mmDP4_DP_DPHY_FAST_TRAINING_STATUS                                      0x4ebd
+#define mmDP5_DP_DPHY_FAST_TRAINING_STATUS                                      0x4fbd
+#define mmDP6_DP_DPHY_FAST_TRAINING_STATUS                                      0x54bd
+#define mmDP7_DP_DPHY_FAST_TRAINING_STATUS                                      0x56bd
+#define mmDP8_DP_DPHY_FAST_TRAINING_STATUS                                      0x57bd
+#define mmDP_DPHY_HBR2_PATTERN_CONTROL                                          0x4add
+#define mmDP0_DP_DPHY_HBR2_PATTERN_CONTROL                                      0x4add
+#define mmDP1_DP_DPHY_HBR2_PATTERN_CONTROL                                      0x4bdd
+#define mmDP2_DP_DPHY_HBR2_PATTERN_CONTROL                                      0x4cdd
+#define mmDP3_DP_DPHY_HBR2_PATTERN_CONTROL                                      0x4ddd
+#define mmDP4_DP_DPHY_HBR2_PATTERN_CONTROL                                      0x4edd
+#define mmDP5_DP_DPHY_HBR2_PATTERN_CONTROL                                      0x4fdd
+#define mmDP6_DP_DPHY_HBR2_PATTERN_CONTROL                                      0x54dd
+#define mmDP7_DP_DPHY_HBR2_PATTERN_CONTROL                                      0x56dd
+#define mmDP8_DP_DPHY_HBR2_PATTERN_CONTROL                                      0x57dd
+#define mmDP_MSA_V_TIMING_OVERRIDE1                                             0x4abe
+#define mmDP0_DP_MSA_V_TIMING_OVERRIDE1                                         0x4abe
+#define mmDP1_DP_MSA_V_TIMING_OVERRIDE1                                         0x4bbe
+#define mmDP2_DP_MSA_V_TIMING_OVERRIDE1                                         0x4cbe
+#define mmDP3_DP_MSA_V_TIMING_OVERRIDE1                                         0x4dbe
+#define mmDP4_DP_MSA_V_TIMING_OVERRIDE1                                         0x4ebe
+#define mmDP5_DP_MSA_V_TIMING_OVERRIDE1                                         0x4fbe
+#define mmDP6_DP_MSA_V_TIMING_OVERRIDE1                                         0x54be
+#define mmDP7_DP_MSA_V_TIMING_OVERRIDE1                                         0x56be
+#define mmDP8_DP_MSA_V_TIMING_OVERRIDE1                                         0x57be
+#define mmDP_MSA_V_TIMING_OVERRIDE2                                             0x4abf
+#define mmDP0_DP_MSA_V_TIMING_OVERRIDE2                                         0x4abf
+#define mmDP1_DP_MSA_V_TIMING_OVERRIDE2                                         0x4bbf
+#define mmDP2_DP_MSA_V_TIMING_OVERRIDE2                                         0x4cbf
+#define mmDP3_DP_MSA_V_TIMING_OVERRIDE2                                         0x4dbf
+#define mmDP4_DP_MSA_V_TIMING_OVERRIDE2                                         0x4ebf
+#define mmDP5_DP_MSA_V_TIMING_OVERRIDE2                                         0x4fbf
+#define mmDP6_DP_MSA_V_TIMING_OVERRIDE2                                         0x54bf
+#define mmDP7_DP_MSA_V_TIMING_OVERRIDE2                                         0x56bf
+#define mmDP8_DP_MSA_V_TIMING_OVERRIDE2                                         0x57bf
+#define mmDP_SEC_CNTL                                                           0x4ac3
+#define mmDP0_DP_SEC_CNTL                                                       0x4ac3
+#define mmDP1_DP_SEC_CNTL                                                       0x4bc3
+#define mmDP2_DP_SEC_CNTL                                                       0x4cc3
+#define mmDP3_DP_SEC_CNTL                                                       0x4dc3
+#define mmDP4_DP_SEC_CNTL                                                       0x4ec3
+#define mmDP5_DP_SEC_CNTL                                                       0x4fc3
+#define mmDP6_DP_SEC_CNTL                                                       0x54c3
+#define mmDP7_DP_SEC_CNTL                                                       0x56c3
+#define mmDP8_DP_SEC_CNTL                                                       0x57c3
+#define mmDP_SEC_CNTL1                                                          0x4ac4
+#define mmDP0_DP_SEC_CNTL1                                                      0x4ac4
+#define mmDP1_DP_SEC_CNTL1                                                      0x4bc4
+#define mmDP2_DP_SEC_CNTL1                                                      0x4cc4
+#define mmDP3_DP_SEC_CNTL1                                                      0x4dc4
+#define mmDP4_DP_SEC_CNTL1                                                      0x4ec4
+#define mmDP5_DP_SEC_CNTL1                                                      0x4fc4
+#define mmDP6_DP_SEC_CNTL1                                                      0x54c4
+#define mmDP7_DP_SEC_CNTL1                                                      0x56c4
+#define mmDP8_DP_SEC_CNTL1                                                      0x57c4
+#define mmDP_SEC_FRAMING1                                                       0x4ac5
+#define mmDP0_DP_SEC_FRAMING1                                                   0x4ac5
+#define mmDP1_DP_SEC_FRAMING1                                                   0x4bc5
+#define mmDP2_DP_SEC_FRAMING1                                                   0x4cc5
+#define mmDP3_DP_SEC_FRAMING1                                                   0x4dc5
+#define mmDP4_DP_SEC_FRAMING1                                                   0x4ec5
+#define mmDP5_DP_SEC_FRAMING1                                                   0x4fc5
+#define mmDP6_DP_SEC_FRAMING1                                                   0x54c5
+#define mmDP7_DP_SEC_FRAMING1                                                   0x56c5
+#define mmDP8_DP_SEC_FRAMING1                                                   0x57c5
+#define mmDP_SEC_FRAMING2                                                       0x4ac6
+#define mmDP0_DP_SEC_FRAMING2                                                   0x4ac6
+#define mmDP1_DP_SEC_FRAMING2                                                   0x4bc6
+#define mmDP2_DP_SEC_FRAMING2                                                   0x4cc6
+#define mmDP3_DP_SEC_FRAMING2                                                   0x4dc6
+#define mmDP4_DP_SEC_FRAMING2                                                   0x4ec6
+#define mmDP5_DP_SEC_FRAMING2                                                   0x4fc6
+#define mmDP6_DP_SEC_FRAMING2                                                   0x54c6
+#define mmDP7_DP_SEC_FRAMING2                                                   0x56c6
+#define mmDP8_DP_SEC_FRAMING2                                                   0x57c6
+#define mmDP_SEC_FRAMING3                                                       0x4ac7
+#define mmDP0_DP_SEC_FRAMING3                                                   0x4ac7
+#define mmDP1_DP_SEC_FRAMING3                                                   0x4bc7
+#define mmDP2_DP_SEC_FRAMING3                                                   0x4cc7
+#define mmDP3_DP_SEC_FRAMING3                                                   0x4dc7
+#define mmDP4_DP_SEC_FRAMING3                                                   0x4ec7
+#define mmDP5_DP_SEC_FRAMING3                                                   0x4fc7
+#define mmDP6_DP_SEC_FRAMING3                                                   0x54c7
+#define mmDP7_DP_SEC_FRAMING3                                                   0x56c7
+#define mmDP8_DP_SEC_FRAMING3                                                   0x57c7
+#define mmDP_SEC_FRAMING4                                                       0x4ac8
+#define mmDP0_DP_SEC_FRAMING4                                                   0x4ac8
+#define mmDP1_DP_SEC_FRAMING4                                                   0x4bc8
+#define mmDP2_DP_SEC_FRAMING4                                                   0x4cc8
+#define mmDP3_DP_SEC_FRAMING4                                                   0x4dc8
+#define mmDP4_DP_SEC_FRAMING4                                                   0x4ec8
+#define mmDP5_DP_SEC_FRAMING4                                                   0x4fc8
+#define mmDP6_DP_SEC_FRAMING4                                                   0x54c8
+#define mmDP7_DP_SEC_FRAMING4                                                   0x56c8
+#define mmDP8_DP_SEC_FRAMING4                                                   0x57c8
+#define mmDP_SEC_AUD_N                                                          0x4ac9
+#define mmDP0_DP_SEC_AUD_N                                                      0x4ac9
+#define mmDP1_DP_SEC_AUD_N                                                      0x4bc9
+#define mmDP2_DP_SEC_AUD_N                                                      0x4cc9
+#define mmDP3_DP_SEC_AUD_N                                                      0x4dc9
+#define mmDP4_DP_SEC_AUD_N                                                      0x4ec9
+#define mmDP5_DP_SEC_AUD_N                                                      0x4fc9
+#define mmDP6_DP_SEC_AUD_N                                                      0x54c9
+#define mmDP7_DP_SEC_AUD_N                                                      0x56c9
+#define mmDP8_DP_SEC_AUD_N                                                      0x57c9
+#define mmDP_SEC_AUD_N_READBACK                                                 0x4aca
+#define mmDP0_DP_SEC_AUD_N_READBACK                                             0x4aca
+#define mmDP1_DP_SEC_AUD_N_READBACK                                             0x4bca
+#define mmDP2_DP_SEC_AUD_N_READBACK                                             0x4cca
+#define mmDP3_DP_SEC_AUD_N_READBACK                                             0x4dca
+#define mmDP4_DP_SEC_AUD_N_READBACK                                             0x4eca
+#define mmDP5_DP_SEC_AUD_N_READBACK                                             0x4fca
+#define mmDP6_DP_SEC_AUD_N_READBACK                                             0x54ca
+#define mmDP7_DP_SEC_AUD_N_READBACK                                             0x56ca
+#define mmDP8_DP_SEC_AUD_N_READBACK                                             0x57ca
+#define mmDP_SEC_AUD_M                                                          0x4acb
+#define mmDP0_DP_SEC_AUD_M                                                      0x4acb
+#define mmDP1_DP_SEC_AUD_M                                                      0x4bcb
+#define mmDP2_DP_SEC_AUD_M                                                      0x4ccb
+#define mmDP3_DP_SEC_AUD_M                                                      0x4dcb
+#define mmDP4_DP_SEC_AUD_M                                                      0x4ecb
+#define mmDP5_DP_SEC_AUD_M                                                      0x4fcb
+#define mmDP6_DP_SEC_AUD_M                                                      0x54cb
+#define mmDP7_DP_SEC_AUD_M                                                      0x56cb
+#define mmDP8_DP_SEC_AUD_M                                                      0x57cb
+#define mmDP_SEC_AUD_M_READBACK                                                 0x4acc
+#define mmDP0_DP_SEC_AUD_M_READBACK                                             0x4acc
+#define mmDP1_DP_SEC_AUD_M_READBACK                                             0x4bcc
+#define mmDP2_DP_SEC_AUD_M_READBACK                                             0x4ccc
+#define mmDP3_DP_SEC_AUD_M_READBACK                                             0x4dcc
+#define mmDP4_DP_SEC_AUD_M_READBACK                                             0x4ecc
+#define mmDP5_DP_SEC_AUD_M_READBACK                                             0x4fcc
+#define mmDP6_DP_SEC_AUD_M_READBACK                                             0x54cc
+#define mmDP7_DP_SEC_AUD_M_READBACK                                             0x56cc
+#define mmDP8_DP_SEC_AUD_M_READBACK                                             0x57cc
+#define mmDP_SEC_TIMESTAMP                                                      0x4acd
+#define mmDP0_DP_SEC_TIMESTAMP                                                  0x4acd
+#define mmDP1_DP_SEC_TIMESTAMP                                                  0x4bcd
+#define mmDP2_DP_SEC_TIMESTAMP                                                  0x4ccd
+#define mmDP3_DP_SEC_TIMESTAMP                                                  0x4dcd
+#define mmDP4_DP_SEC_TIMESTAMP                                                  0x4ecd
+#define mmDP5_DP_SEC_TIMESTAMP                                                  0x4fcd
+#define mmDP6_DP_SEC_TIMESTAMP                                                  0x54cd
+#define mmDP7_DP_SEC_TIMESTAMP                                                  0x56cd
+#define mmDP8_DP_SEC_TIMESTAMP                                                  0x57cd
+#define mmDP_SEC_PACKET_CNTL                                                    0x4ace
+#define mmDP0_DP_SEC_PACKET_CNTL                                                0x4ace
+#define mmDP1_DP_SEC_PACKET_CNTL                                                0x4bce
+#define mmDP2_DP_SEC_PACKET_CNTL                                                0x4cce
+#define mmDP3_DP_SEC_PACKET_CNTL                                                0x4dce
+#define mmDP4_DP_SEC_PACKET_CNTL                                                0x4ece
+#define mmDP5_DP_SEC_PACKET_CNTL                                                0x4fce
+#define mmDP6_DP_SEC_PACKET_CNTL                                                0x54ce
+#define mmDP7_DP_SEC_PACKET_CNTL                                                0x56ce
+#define mmDP8_DP_SEC_PACKET_CNTL                                                0x57ce
+#define mmDP_MSE_RATE_CNTL                                                      0x4acf
+#define mmDP0_DP_MSE_RATE_CNTL                                                  0x4acf
+#define mmDP1_DP_MSE_RATE_CNTL                                                  0x4bcf
+#define mmDP2_DP_MSE_RATE_CNTL                                                  0x4ccf
+#define mmDP3_DP_MSE_RATE_CNTL                                                  0x4dcf
+#define mmDP4_DP_MSE_RATE_CNTL                                                  0x4ecf
+#define mmDP5_DP_MSE_RATE_CNTL                                                  0x4fcf
+#define mmDP6_DP_MSE_RATE_CNTL                                                  0x54cf
+#define mmDP7_DP_MSE_RATE_CNTL                                                  0x56cf
+#define mmDP8_DP_MSE_RATE_CNTL                                                  0x57cf
+#define mmDP_MSE_RATE_UPDATE                                                    0x4ad1
+#define mmDP0_DP_MSE_RATE_UPDATE                                                0x4ad1
+#define mmDP1_DP_MSE_RATE_UPDATE                                                0x4bd1
+#define mmDP2_DP_MSE_RATE_UPDATE                                                0x4cd1
+#define mmDP3_DP_MSE_RATE_UPDATE                                                0x4dd1
+#define mmDP4_DP_MSE_RATE_UPDATE                                                0x4ed1
+#define mmDP5_DP_MSE_RATE_UPDATE                                                0x4fd1
+#define mmDP6_DP_MSE_RATE_UPDATE                                                0x54d1
+#define mmDP7_DP_MSE_RATE_UPDATE                                                0x56d1
+#define mmDP8_DP_MSE_RATE_UPDATE                                                0x57d1
+#define mmDP_MSE_SAT0                                                           0x4ad2
+#define mmDP0_DP_MSE_SAT0                                                       0x4ad2
+#define mmDP1_DP_MSE_SAT0                                                       0x4bd2
+#define mmDP2_DP_MSE_SAT0                                                       0x4cd2
+#define mmDP3_DP_MSE_SAT0                                                       0x4dd2
+#define mmDP4_DP_MSE_SAT0                                                       0x4ed2
+#define mmDP5_DP_MSE_SAT0                                                       0x4fd2
+#define mmDP6_DP_MSE_SAT0                                                       0x54d2
+#define mmDP7_DP_MSE_SAT0                                                       0x56d2
+#define mmDP8_DP_MSE_SAT0                                                       0x57d2
+#define mmDP_MSE_SAT1                                                           0x4ad3
+#define mmDP0_DP_MSE_SAT1                                                       0x4ad3
+#define mmDP1_DP_MSE_SAT1                                                       0x4bd3
+#define mmDP2_DP_MSE_SAT1                                                       0x4cd3
+#define mmDP3_DP_MSE_SAT1                                                       0x4dd3
+#define mmDP4_DP_MSE_SAT1                                                       0x4ed3
+#define mmDP5_DP_MSE_SAT1                                                       0x4fd3
+#define mmDP6_DP_MSE_SAT1                                                       0x54d3
+#define mmDP7_DP_MSE_SAT1                                                       0x56d3
+#define mmDP8_DP_MSE_SAT1                                                       0x57d3
+#define mmDP_MSE_SAT2                                                           0x4ad4
+#define mmDP0_DP_MSE_SAT2                                                       0x4ad4
+#define mmDP1_DP_MSE_SAT2                                                       0x4bd4
+#define mmDP2_DP_MSE_SAT2                                                       0x4cd4
+#define mmDP3_DP_MSE_SAT2                                                       0x4dd4
+#define mmDP4_DP_MSE_SAT2                                                       0x4ed4
+#define mmDP5_DP_MSE_SAT2                                                       0x4fd4
+#define mmDP6_DP_MSE_SAT2                                                       0x54d4
+#define mmDP7_DP_MSE_SAT2                                                       0x56d4
+#define mmDP8_DP_MSE_SAT2                                                       0x57d4
+#define mmDP_MSE_SAT_UPDATE                                                     0x4ad5
+#define mmDP0_DP_MSE_SAT_UPDATE                                                 0x4ad5
+#define mmDP1_DP_MSE_SAT_UPDATE                                                 0x4bd5
+#define mmDP2_DP_MSE_SAT_UPDATE                                                 0x4cd5
+#define mmDP3_DP_MSE_SAT_UPDATE                                                 0x4dd5
+#define mmDP4_DP_MSE_SAT_UPDATE                                                 0x4ed5
+#define mmDP5_DP_MSE_SAT_UPDATE                                                 0x4fd5
+#define mmDP6_DP_MSE_SAT_UPDATE                                                 0x54d5
+#define mmDP7_DP_MSE_SAT_UPDATE                                                 0x56d5
+#define mmDP8_DP_MSE_SAT_UPDATE                                                 0x57d5
+#define mmDP_MSE_LINK_TIMING                                                    0x4ad6
+#define mmDP0_DP_MSE_LINK_TIMING                                                0x4ad6
+#define mmDP1_DP_MSE_LINK_TIMING                                                0x4bd6
+#define mmDP2_DP_MSE_LINK_TIMING                                                0x4cd6
+#define mmDP3_DP_MSE_LINK_TIMING                                                0x4dd6
+#define mmDP4_DP_MSE_LINK_TIMING                                                0x4ed6
+#define mmDP5_DP_MSE_LINK_TIMING                                                0x4fd6
+#define mmDP6_DP_MSE_LINK_TIMING                                                0x54d6
+#define mmDP7_DP_MSE_LINK_TIMING                                                0x56d6
+#define mmDP8_DP_MSE_LINK_TIMING                                                0x57d6
+#define mmDP_MSE_MISC_CNTL                                                      0x4ad7
+#define mmDP0_DP_MSE_MISC_CNTL                                                  0x4ad7
+#define mmDP1_DP_MSE_MISC_CNTL                                                  0x4bd7
+#define mmDP2_DP_MSE_MISC_CNTL                                                  0x4cd7
+#define mmDP3_DP_MSE_MISC_CNTL                                                  0x4dd7
+#define mmDP4_DP_MSE_MISC_CNTL                                                  0x4ed7
+#define mmDP5_DP_MSE_MISC_CNTL                                                  0x4fd7
+#define mmDP6_DP_MSE_MISC_CNTL                                                  0x54d7
+#define mmDP7_DP_MSE_MISC_CNTL                                                  0x56d7
+#define mmDP8_DP_MSE_MISC_CNTL                                                  0x57d7
+#define mmDP_MSE_SAT0_STATUS                                                    0x4adf
+#define mmDP0_DP_MSE_SAT0_STATUS                                                0x4adf
+#define mmDP1_DP_MSE_SAT0_STATUS                                                0x4bdf
+#define mmDP2_DP_MSE_SAT0_STATUS                                                0x4cdf
+#define mmDP3_DP_MSE_SAT0_STATUS                                                0x4ddf
+#define mmDP4_DP_MSE_SAT0_STATUS                                                0x4edf
+#define mmDP5_DP_MSE_SAT0_STATUS                                                0x4fdf
+#define mmDP6_DP_MSE_SAT0_STATUS                                                0x54df
+#define mmDP7_DP_MSE_SAT0_STATUS                                                0x56df
+#define mmDP8_DP_MSE_SAT0_STATUS                                                0x57df
+#define mmDP_MSE_SAT1_STATUS                                                    0x4ae0
+#define mmDP0_DP_MSE_SAT1_STATUS                                                0x4ae0
+#define mmDP1_DP_MSE_SAT1_STATUS                                                0x4be0
+#define mmDP2_DP_MSE_SAT1_STATUS                                                0x4ce0
+#define mmDP3_DP_MSE_SAT1_STATUS                                                0x4de0
+#define mmDP4_DP_MSE_SAT1_STATUS                                                0x4ee0
+#define mmDP5_DP_MSE_SAT1_STATUS                                                0x4fe0
+#define mmDP6_DP_MSE_SAT1_STATUS                                                0x54e0
+#define mmDP7_DP_MSE_SAT1_STATUS                                                0x56e0
+#define mmDP8_DP_MSE_SAT1_STATUS                                                0x57e0
+#define mmDP_MSE_SAT2_STATUS                                                    0x4ae1
+#define mmDP0_DP_MSE_SAT2_STATUS                                                0x4ae1
+#define mmDP1_DP_MSE_SAT2_STATUS                                                0x4be1
+#define mmDP2_DP_MSE_SAT2_STATUS                                                0x4ce1
+#define mmDP3_DP_MSE_SAT2_STATUS                                                0x4de1
+#define mmDP4_DP_MSE_SAT2_STATUS                                                0x4ee1
+#define mmDP5_DP_MSE_SAT2_STATUS                                                0x4fe1
+#define mmDP6_DP_MSE_SAT2_STATUS                                                0x54e1
+#define mmDP7_DP_MSE_SAT2_STATUS                                                0x56e1
+#define mmDP8_DP_MSE_SAT2_STATUS                                                0x57e1
+#define mmDP_TEST_DEBUG_INDEX                                                   0x4ad8
+#define mmDP0_DP_TEST_DEBUG_INDEX                                               0x4ad8
+#define mmDP1_DP_TEST_DEBUG_INDEX                                               0x4bd8
+#define mmDP2_DP_TEST_DEBUG_INDEX                                               0x4cd8
+#define mmDP3_DP_TEST_DEBUG_INDEX                                               0x4dd8
+#define mmDP4_DP_TEST_DEBUG_INDEX                                               0x4ed8
+#define mmDP5_DP_TEST_DEBUG_INDEX                                               0x4fd8
+#define mmDP6_DP_TEST_DEBUG_INDEX                                               0x54d8
+#define mmDP7_DP_TEST_DEBUG_INDEX                                               0x56d8
+#define mmDP8_DP_TEST_DEBUG_INDEX                                               0x57d8
+#define mmDP_TEST_DEBUG_DATA                                                    0x4ad9
+#define mmDP0_DP_TEST_DEBUG_DATA                                                0x4ad9
+#define mmDP1_DP_TEST_DEBUG_DATA                                                0x4bd9
+#define mmDP2_DP_TEST_DEBUG_DATA                                                0x4cd9
+#define mmDP3_DP_TEST_DEBUG_DATA                                                0x4dd9
+#define mmDP4_DP_TEST_DEBUG_DATA                                                0x4ed9
+#define mmDP5_DP_TEST_DEBUG_DATA                                                0x4fd9
+#define mmDP6_DP_TEST_DEBUG_DATA                                                0x54d9
+#define mmDP7_DP_TEST_DEBUG_DATA                                                0x56d9
+#define mmDP8_DP_TEST_DEBUG_DATA                                                0x57d9
+#define mmDP_FE_TEST_DEBUG_INDEX                                                0x4ada
+#define mmDP0_DP_FE_TEST_DEBUG_INDEX                                            0x4ada
+#define mmDP1_DP_FE_TEST_DEBUG_INDEX                                            0x4bda
+#define mmDP2_DP_FE_TEST_DEBUG_INDEX                                            0x4cda
+#define mmDP3_DP_FE_TEST_DEBUG_INDEX                                            0x4dda
+#define mmDP4_DP_FE_TEST_DEBUG_INDEX                                            0x4eda
+#define mmDP5_DP_FE_TEST_DEBUG_INDEX                                            0x4fda
+#define mmDP6_DP_FE_TEST_DEBUG_INDEX                                            0x54da
+#define mmDP7_DP_FE_TEST_DEBUG_INDEX                                            0x56da
+#define mmDP8_DP_FE_TEST_DEBUG_INDEX                                            0x57da
+#define mmDP_FE_TEST_DEBUG_DATA                                                 0x4adb
+#define mmDP0_DP_FE_TEST_DEBUG_DATA                                             0x4adb
+#define mmDP1_DP_FE_TEST_DEBUG_DATA                                             0x4bdb
+#define mmDP2_DP_FE_TEST_DEBUG_DATA                                             0x4cdb
+#define mmDP3_DP_FE_TEST_DEBUG_DATA                                             0x4ddb
+#define mmDP4_DP_FE_TEST_DEBUG_DATA                                             0x4edb
+#define mmDP5_DP_FE_TEST_DEBUG_DATA                                             0x4fdb
+#define mmDP6_DP_FE_TEST_DEBUG_DATA                                             0x54db
+#define mmDP7_DP_FE_TEST_DEBUG_DATA                                             0x56db
+#define mmDP8_DP_FE_TEST_DEBUG_DATA                                             0x57db
+#define mmAUX_CONTROL                                                           0x5c00
+#define mmDP_AUX0_AUX_CONTROL                                                   0x5c00
+#define mmDP_AUX1_AUX_CONTROL                                                   0x5c1c
+#define mmDP_AUX2_AUX_CONTROL                                                   0x5c38
+#define mmDP_AUX3_AUX_CONTROL                                                   0x5c54
+#define mmDP_AUX4_AUX_CONTROL                                                   0x5c70
+#define mmDP_AUX5_AUX_CONTROL                                                   0x5c8c
+#define mmAUX_SW_CONTROL                                                        0x5c01
+#define mmDP_AUX0_AUX_SW_CONTROL                                                0x5c01
+#define mmDP_AUX1_AUX_SW_CONTROL                                                0x5c1d
+#define mmDP_AUX2_AUX_SW_CONTROL                                                0x5c39
+#define mmDP_AUX3_AUX_SW_CONTROL                                                0x5c55
+#define mmDP_AUX4_AUX_SW_CONTROL                                                0x5c71
+#define mmDP_AUX5_AUX_SW_CONTROL                                                0x5c8d
+#define mmAUX_ARB_CONTROL                                                       0x5c02
+#define mmDP_AUX0_AUX_ARB_CONTROL                                               0x5c02
+#define mmDP_AUX1_AUX_ARB_CONTROL                                               0x5c1e
+#define mmDP_AUX2_AUX_ARB_CONTROL                                               0x5c3a
+#define mmDP_AUX3_AUX_ARB_CONTROL                                               0x5c56
+#define mmDP_AUX4_AUX_ARB_CONTROL                                               0x5c72
+#define mmDP_AUX5_AUX_ARB_CONTROL                                               0x5c8e
+#define mmAUX_INTERRUPT_CONTROL                                                 0x5c03
+#define mmDP_AUX0_AUX_INTERRUPT_CONTROL                                         0x5c03
+#define mmDP_AUX1_AUX_INTERRUPT_CONTROL                                         0x5c1f
+#define mmDP_AUX2_AUX_INTERRUPT_CONTROL                                         0x5c3b
+#define mmDP_AUX3_AUX_INTERRUPT_CONTROL                                         0x5c57
+#define mmDP_AUX4_AUX_INTERRUPT_CONTROL                                         0x5c73
+#define mmDP_AUX5_AUX_INTERRUPT_CONTROL                                         0x5c8f
+#define mmAUX_SW_STATUS                                                         0x5c04
+#define mmDP_AUX0_AUX_SW_STATUS                                                 0x5c04
+#define mmDP_AUX1_AUX_SW_STATUS                                                 0x5c20
+#define mmDP_AUX2_AUX_SW_STATUS                                                 0x5c3c
+#define mmDP_AUX3_AUX_SW_STATUS                                                 0x5c58
+#define mmDP_AUX4_AUX_SW_STATUS                                                 0x5c74
+#define mmDP_AUX5_AUX_SW_STATUS                                                 0x5c90
+#define mmAUX_LS_STATUS                                                         0x5c05
+#define mmDP_AUX0_AUX_LS_STATUS                                                 0x5c05
+#define mmDP_AUX1_AUX_LS_STATUS                                                 0x5c21
+#define mmDP_AUX2_AUX_LS_STATUS                                                 0x5c3d
+#define mmDP_AUX3_AUX_LS_STATUS                                                 0x5c59
+#define mmDP_AUX4_AUX_LS_STATUS                                                 0x5c75
+#define mmDP_AUX5_AUX_LS_STATUS                                                 0x5c91
+#define mmAUX_SW_DATA                                                           0x5c06
+#define mmDP_AUX0_AUX_SW_DATA                                                   0x5c06
+#define mmDP_AUX1_AUX_SW_DATA                                                   0x5c22
+#define mmDP_AUX2_AUX_SW_DATA                                                   0x5c3e
+#define mmDP_AUX3_AUX_SW_DATA                                                   0x5c5a
+#define mmDP_AUX4_AUX_SW_DATA                                                   0x5c76
+#define mmDP_AUX5_AUX_SW_DATA                                                   0x5c92
+#define mmAUX_LS_DATA                                                           0x5c07
+#define mmDP_AUX0_AUX_LS_DATA                                                   0x5c07
+#define mmDP_AUX1_AUX_LS_DATA                                                   0x5c23
+#define mmDP_AUX2_AUX_LS_DATA                                                   0x5c3f
+#define mmDP_AUX3_AUX_LS_DATA                                                   0x5c5b
+#define mmDP_AUX4_AUX_LS_DATA                                                   0x5c77
+#define mmDP_AUX5_AUX_LS_DATA                                                   0x5c93
+#define mmAUX_DPHY_TX_REF_CONTROL                                               0x5c08
+#define mmDP_AUX0_AUX_DPHY_TX_REF_CONTROL                                       0x5c08
+#define mmDP_AUX1_AUX_DPHY_TX_REF_CONTROL                                       0x5c24
+#define mmDP_AUX2_AUX_DPHY_TX_REF_CONTROL                                       0x5c40
+#define mmDP_AUX3_AUX_DPHY_TX_REF_CONTROL                                       0x5c5c
+#define mmDP_AUX4_AUX_DPHY_TX_REF_CONTROL                                       0x5c78
+#define mmDP_AUX5_AUX_DPHY_TX_REF_CONTROL                                       0x5c94
+#define mmAUX_DPHY_TX_CONTROL                                                   0x5c09
+#define mmDP_AUX0_AUX_DPHY_TX_CONTROL                                           0x5c09
+#define mmDP_AUX1_AUX_DPHY_TX_CONTROL                                           0x5c25
+#define mmDP_AUX2_AUX_DPHY_TX_CONTROL                                           0x5c41
+#define mmDP_AUX3_AUX_DPHY_TX_CONTROL                                           0x5c5d
+#define mmDP_AUX4_AUX_DPHY_TX_CONTROL                                           0x5c79
+#define mmDP_AUX5_AUX_DPHY_TX_CONTROL                                           0x5c95
+#define mmAUX_DPHY_RX_CONTROL0                                                  0x5c0a
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL0                                          0x5c0a
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL0                                          0x5c26
+#define mmDP_AUX2_AUX_DPHY_RX_CONTROL0                                          0x5c42
+#define mmDP_AUX3_AUX_DPHY_RX_CONTROL0                                          0x5c5e
+#define mmDP_AUX4_AUX_DPHY_RX_CONTROL0                                          0x5c7a
+#define mmDP_AUX5_AUX_DPHY_RX_CONTROL0                                          0x5c96
+#define mmAUX_DPHY_RX_CONTROL1                                                  0x5c0b
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL1                                          0x5c0b
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL1                                          0x5c27
+#define mmDP_AUX2_AUX_DPHY_RX_CONTROL1                                          0x5c43
+#define mmDP_AUX3_AUX_DPHY_RX_CONTROL1                                          0x5c5f
+#define mmDP_AUX4_AUX_DPHY_RX_CONTROL1                                          0x5c7b
+#define mmDP_AUX5_AUX_DPHY_RX_CONTROL1                                          0x5c97
+#define mmAUX_DPHY_TX_STATUS                                                    0x5c0c
+#define mmDP_AUX0_AUX_DPHY_TX_STATUS                                            0x5c0c
+#define mmDP_AUX1_AUX_DPHY_TX_STATUS                                            0x5c28
+#define mmDP_AUX2_AUX_DPHY_TX_STATUS                                            0x5c44
+#define mmDP_AUX3_AUX_DPHY_TX_STATUS                                            0x5c60
+#define mmDP_AUX4_AUX_DPHY_TX_STATUS                                            0x5c7c
+#define mmDP_AUX5_AUX_DPHY_TX_STATUS                                            0x5c98
+#define mmAUX_DPHY_RX_STATUS                                                    0x5c0d
+#define mmDP_AUX0_AUX_DPHY_RX_STATUS                                            0x5c0d
+#define mmDP_AUX1_AUX_DPHY_RX_STATUS                                            0x5c29
+#define mmDP_AUX2_AUX_DPHY_RX_STATUS                                            0x5c45
+#define mmDP_AUX3_AUX_DPHY_RX_STATUS                                            0x5c61
+#define mmDP_AUX4_AUX_DPHY_RX_STATUS                                            0x5c7d
+#define mmDP_AUX5_AUX_DPHY_RX_STATUS                                            0x5c99
+#define mmAUX_GTC_SYNC_ERROR_CONTROL                                            0x5c0f
+#define mmDP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL                                    0x5c0f
+#define mmDP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL                                    0x5c2b
+#define mmDP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL                                    0x5c47
+#define mmDP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL                                    0x5c63
+#define mmDP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL                                    0x5c7f
+#define mmDP_AUX5_AUX_GTC_SYNC_ERROR_CONTROL                                    0x5c9b
+#define mmAUX_GTC_SYNC_CONTROLLER_STATUS                                        0x5c10
+#define mmDP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS                                0x5c10
+#define mmDP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS                                0x5c2c
+#define mmDP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS                                0x5c48
+#define mmDP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS                                0x5c64
+#define mmDP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS                                0x5c80
+#define mmDP_AUX5_AUX_GTC_SYNC_CONTROLLER_STATUS                                0x5c9c
+#define mmAUX_GTC_SYNC_STATUS                                                   0x5c11
+#define mmDP_AUX0_AUX_GTC_SYNC_STATUS                                           0x5c11
+#define mmDP_AUX1_AUX_GTC_SYNC_STATUS                                           0x5c2d
+#define mmDP_AUX2_AUX_GTC_SYNC_STATUS                                           0x5c49
+#define mmDP_AUX3_AUX_GTC_SYNC_STATUS                                           0x5c65
+#define mmDP_AUX4_AUX_GTC_SYNC_STATUS                                           0x5c81
+#define mmDP_AUX5_AUX_GTC_SYNC_STATUS                                           0x5c9d
+#define mmAUX_TEST_DEBUG_INDEX                                                  0x5c14
+#define mmDP_AUX0_AUX_TEST_DEBUG_INDEX                                          0x5c14
+#define mmDP_AUX1_AUX_TEST_DEBUG_INDEX                                          0x5c30
+#define mmDP_AUX2_AUX_TEST_DEBUG_INDEX                                          0x5c4c
+#define mmDP_AUX3_AUX_TEST_DEBUG_INDEX                                          0x5c68
+#define mmDP_AUX4_AUX_TEST_DEBUG_INDEX                                          0x5c84
+#define mmDP_AUX5_AUX_TEST_DEBUG_INDEX                                          0x5ca0
+#define mmAUX_TEST_DEBUG_DATA                                                   0x5c15
+#define mmDP_AUX0_AUX_TEST_DEBUG_DATA                                           0x5c15
+#define mmDP_AUX1_AUX_TEST_DEBUG_DATA                                           0x5c31
+#define mmDP_AUX2_AUX_TEST_DEBUG_DATA                                           0x5c4d
+#define mmDP_AUX3_AUX_TEST_DEBUG_DATA                                           0x5c69
+#define mmDP_AUX4_AUX_TEST_DEBUG_DATA                                           0x5c85
+#define mmDP_AUX5_AUX_TEST_DEBUG_DATA                                           0x5ca1
+#define ixDP_AUX_DEBUG_A                                                        0x10
+#define ixDP_AUX_DEBUG_B                                                        0x11
+#define ixDP_AUX_DEBUG_C                                                        0x12
+#define ixDP_AUX_DEBUG_D                                                        0x13
+#define ixDP_AUX_DEBUG_E                                                        0x14
+#define ixDP_AUX_DEBUG_F                                                        0x15
+#define ixDP_AUX_DEBUG_G                                                        0x16
+#define ixDP_AUX_DEBUG_H                                                        0x17
+#define ixDP_AUX_DEBUG_I                                                        0x18
+#define ixDP_AUX_DEBUG_J                                                        0x19
+#define ixDP_AUX_DEBUG_K                                                        0x1a
+#define ixDP_AUX_DEBUG_L                                                        0x1b
+#define ixDP_AUX_DEBUG_M                                                        0x1c
+#define ixDP_AUX_DEBUG_N                                                        0x1d
+#define ixDP_AUX_DEBUG_O                                                        0x1e
+#define ixDP_AUX_DEBUG_P                                                        0x1f
+#define ixDP_AUX_DEBUG_Q                                                        0x20
+#define mmDVO_ENABLE                                                            0x16a0
+#define mmDVO_SOURCE_SELECT                                                     0x16a1
+#define mmDVO_OUTPUT                                                            0x16a2
+#define mmDVO_CONTROL                                                           0x16a3
+#define mmDVO_CRC_EN                                                            0x16a4
+#define mmDVO_CRC2_SIG_MASK                                                     0x16a5
+#define mmDVO_CRC2_SIG_RESULT                                                   0x16a6
+#define mmDVO_FIFO_ERROR_STATUS                                                 0x16a7
+#define mmDVO_TEST_DEBUG_INDEX                                                  0x16a8
+#define mmDVO_TEST_DEBUG_DATA                                                   0x16a9
+#define mmFBC_CNTL                                                              0x280
+#define mmFBC_IDLE_FORCE_CLEAR_MASK                                             0x282
+#define mmFBC_START_STOP_DELAY                                                  0x283
+#define mmFBC_COMP_CNTL                                                         0x284
+#define mmFBC_COMP_MODE                                                         0x285
+#define mmFBC_DEBUG0                                                            0x286
+#define mmFBC_DEBUG1                                                            0x287
+#define mmFBC_DEBUG2                                                            0x288
+#define mmFBC_IND_LUT0                                                          0x289
+#define mmFBC_IND_LUT1                                                          0x28a
+#define mmFBC_IND_LUT2                                                          0x28b
+#define mmFBC_IND_LUT3                                                          0x28c
+#define mmFBC_IND_LUT4                                                          0x28d
+#define mmFBC_IND_LUT5                                                          0x28e
+#define mmFBC_IND_LUT6                                                          0x28f
+#define mmFBC_IND_LUT7                                                          0x290
+#define mmFBC_IND_LUT8                                                          0x291
+#define mmFBC_IND_LUT9                                                          0x292
+#define mmFBC_IND_LUT10                                                         0x293
+#define mmFBC_IND_LUT11                                                         0x294
+#define mmFBC_IND_LUT12                                                         0x295
+#define mmFBC_IND_LUT13                                                         0x296
+#define mmFBC_IND_LUT14                                                         0x297
+#define mmFBC_IND_LUT15                                                         0x298
+#define mmFBC_CSM_REGION_OFFSET_01                                              0x299
+#define mmFBC_CSM_REGION_OFFSET_23                                              0x29a
+#define mmFBC_CLIENT_REGION_MASK                                                0x29b
+#define mmFBC_DEBUG_COMP                                                        0x29c
+#define mmFBC_DEBUG_CSR                                                         0x29d
+#define mmFBC_DEBUG_CSR_RDATA                                                   0x29e
+#define mmFBC_DEBUG_CSR_WDATA                                                   0x29f
+#define mmFBC_DEBUG_CSR_RDATA_HI                                                0x2a0
+#define mmFBC_DEBUG_CSR_WDATA_HI                                                0x2a1
+#define mmFBC_MISC                                                              0x2a2
+#define mmFBC_STATUS                                                            0x2a3
+#define mmFBC_ALPHA_CNTL                                                        0x2a6
+#define mmFBC_ALPHA_RGB_OVERRIDE                                                0x2a7
+#define mmFBC_TEST_DEBUG_INDEX                                                  0x2a4
+#define mmFBC_TEST_DEBUG_DATA                                                   0x2a5
+#define mmFMT_CLAMP_COMPONENT_R                                                 0x1be8
+#define mmFMT0_FMT_CLAMP_COMPONENT_R                                            0x1be8
+#define mmFMT1_FMT_CLAMP_COMPONENT_R                                            0x1de8
+#define mmFMT2_FMT_CLAMP_COMPONENT_R                                            0x1fe8
+#define mmFMT3_FMT_CLAMP_COMPONENT_R                                            0x41e8
+#define mmFMT4_FMT_CLAMP_COMPONENT_R                                            0x43e8
+#define mmFMT5_FMT_CLAMP_COMPONENT_R                                            0x45e8
+#define mmFMT_CLAMP_COMPONENT_G                                                 0x1be9
+#define mmFMT0_FMT_CLAMP_COMPONENT_G                                            0x1be9
+#define mmFMT1_FMT_CLAMP_COMPONENT_G                                            0x1de9
+#define mmFMT2_FMT_CLAMP_COMPONENT_G                                            0x1fe9
+#define mmFMT3_FMT_CLAMP_COMPONENT_G                                            0x41e9
+#define mmFMT4_FMT_CLAMP_COMPONENT_G                                            0x43e9
+#define mmFMT5_FMT_CLAMP_COMPONENT_G                                            0x45e9
+#define mmFMT_CLAMP_COMPONENT_B                                                 0x1bea
+#define mmFMT0_FMT_CLAMP_COMPONENT_B                                            0x1bea
+#define mmFMT1_FMT_CLAMP_COMPONENT_B                                            0x1dea
+#define mmFMT2_FMT_CLAMP_COMPONENT_B                                            0x1fea
+#define mmFMT3_FMT_CLAMP_COMPONENT_B                                            0x41ea
+#define mmFMT4_FMT_CLAMP_COMPONENT_B                                            0x43ea
+#define mmFMT5_FMT_CLAMP_COMPONENT_B                                            0x45ea
+#define mmFMT_DYNAMIC_EXP_CNTL                                                  0x1bed
+#define mmFMT0_FMT_DYNAMIC_EXP_CNTL                                             0x1bed
+#define mmFMT1_FMT_DYNAMIC_EXP_CNTL                                             0x1ded
+#define mmFMT2_FMT_DYNAMIC_EXP_CNTL                                             0x1fed
+#define mmFMT3_FMT_DYNAMIC_EXP_CNTL                                             0x41ed
+#define mmFMT4_FMT_DYNAMIC_EXP_CNTL                                             0x43ed
+#define mmFMT5_FMT_DYNAMIC_EXP_CNTL                                             0x45ed
+#define mmFMT_CONTROL                                                           0x1bee
+#define mmFMT0_FMT_CONTROL                                                      0x1bee
+#define mmFMT1_FMT_CONTROL                                                      0x1dee
+#define mmFMT2_FMT_CONTROL                                                      0x1fee
+#define mmFMT3_FMT_CONTROL                                                      0x41ee
+#define mmFMT4_FMT_CONTROL                                                      0x43ee
+#define mmFMT5_FMT_CONTROL                                                      0x45ee
+#define mmFMT_BIT_DEPTH_CONTROL                                                 0x1bf2
+#define mmFMT0_FMT_BIT_DEPTH_CONTROL                                            0x1bf2
+#define mmFMT1_FMT_BIT_DEPTH_CONTROL                                            0x1df2
+#define mmFMT2_FMT_BIT_DEPTH_CONTROL                                            0x1ff2
+#define mmFMT3_FMT_BIT_DEPTH_CONTROL                                            0x41f2
+#define mmFMT4_FMT_BIT_DEPTH_CONTROL                                            0x43f2
+#define mmFMT5_FMT_BIT_DEPTH_CONTROL                                            0x45f2
+#define mmFMT_DITHER_RAND_R_SEED                                                0x1bf3
+#define mmFMT0_FMT_DITHER_RAND_R_SEED                                           0x1bf3
+#define mmFMT1_FMT_DITHER_RAND_R_SEED                                           0x1df3
+#define mmFMT2_FMT_DITHER_RAND_R_SEED                                           0x1ff3
+#define mmFMT3_FMT_DITHER_RAND_R_SEED                                           0x41f3
+#define mmFMT4_FMT_DITHER_RAND_R_SEED                                           0x43f3
+#define mmFMT5_FMT_DITHER_RAND_R_SEED                                           0x45f3
+#define mmFMT_DITHER_RAND_G_SEED                                                0x1bf4
+#define mmFMT0_FMT_DITHER_RAND_G_SEED                                           0x1bf4
+#define mmFMT1_FMT_DITHER_RAND_G_SEED                                           0x1df4
+#define mmFMT2_FMT_DITHER_RAND_G_SEED                                           0x1ff4
+#define mmFMT3_FMT_DITHER_RAND_G_SEED                                           0x41f4
+#define mmFMT4_FMT_DITHER_RAND_G_SEED                                           0x43f4
+#define mmFMT5_FMT_DITHER_RAND_G_SEED                                           0x45f4
+#define mmFMT_DITHER_RAND_B_SEED                                                0x1bf5
+#define mmFMT0_FMT_DITHER_RAND_B_SEED                                           0x1bf5
+#define mmFMT1_FMT_DITHER_RAND_B_SEED                                           0x1df5
+#define mmFMT2_FMT_DITHER_RAND_B_SEED                                           0x1ff5
+#define mmFMT3_FMT_DITHER_RAND_B_SEED                                           0x41f5
+#define mmFMT4_FMT_DITHER_RAND_B_SEED                                           0x43f5
+#define mmFMT5_FMT_DITHER_RAND_B_SEED                                           0x45f5
+#define mmFMT_TEMPORAL_DITHER_PATTERN_CONTROL                                   0x1bf6
+#define mmFMT0_FMT_TEMPORAL_DITHER_PATTERN_CONTROL                              0x1bf6
+#define mmFMT1_FMT_TEMPORAL_DITHER_PATTERN_CONTROL                              0x1df6
+#define mmFMT2_FMT_TEMPORAL_DITHER_PATTERN_CONTROL                              0x1ff6
+#define mmFMT3_FMT_TEMPORAL_DITHER_PATTERN_CONTROL                              0x41f6
+#define mmFMT4_FMT_TEMPORAL_DITHER_PATTERN_CONTROL                              0x43f6
+#define mmFMT5_FMT_TEMPORAL_DITHER_PATTERN_CONTROL                              0x45f6
+#define mmFMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX                     0x1bf7
+#define mmFMT0_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX                0x1bf7
+#define mmFMT1_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX                0x1df7
+#define mmFMT2_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX                0x1ff7
+#define mmFMT3_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX                0x41f7
+#define mmFMT4_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX                0x43f7
+#define mmFMT5_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX                0x45f7
+#define mmFMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX                     0x1bf8
+#define mmFMT0_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX                0x1bf8
+#define mmFMT1_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX                0x1df8
+#define mmFMT2_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX                0x1ff8
+#define mmFMT3_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX                0x41f8
+#define mmFMT4_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX                0x43f8
+#define mmFMT5_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX                0x45f8
+#define mmFMT_CLAMP_CNTL                                                        0x1bf9
+#define mmFMT0_FMT_CLAMP_CNTL                                                   0x1bf9
+#define mmFMT1_FMT_CLAMP_CNTL                                                   0x1df9
+#define mmFMT2_FMT_CLAMP_CNTL                                                   0x1ff9
+#define mmFMT3_FMT_CLAMP_CNTL                                                   0x41f9
+#define mmFMT4_FMT_CLAMP_CNTL                                                   0x43f9
+#define mmFMT5_FMT_CLAMP_CNTL                                                   0x45f9
+#define mmFMT_CRC_CNTL                                                          0x1bfa
+#define mmFMT0_FMT_CRC_CNTL                                                     0x1bfa
+#define mmFMT1_FMT_CRC_CNTL                                                     0x1dfa
+#define mmFMT2_FMT_CRC_CNTL                                                     0x1ffa
+#define mmFMT3_FMT_CRC_CNTL                                                     0x41fa
+#define mmFMT4_FMT_CRC_CNTL                                                     0x43fa
+#define mmFMT5_FMT_CRC_CNTL                                                     0x45fa
+#define mmFMT_CRC_SIG_RED_GREEN_MASK                                            0x1bfb
+#define mmFMT0_FMT_CRC_SIG_RED_GREEN_MASK                                       0x1bfb
+#define mmFMT1_FMT_CRC_SIG_RED_GREEN_MASK                                       0x1dfb
+#define mmFMT2_FMT_CRC_SIG_RED_GREEN_MASK                                       0x1ffb
+#define mmFMT3_FMT_CRC_SIG_RED_GREEN_MASK                                       0x41fb
+#define mmFMT4_FMT_CRC_SIG_RED_GREEN_MASK                                       0x43fb
+#define mmFMT5_FMT_CRC_SIG_RED_GREEN_MASK                                       0x45fb
+#define mmFMT_CRC_SIG_BLUE_CONTROL_MASK                                         0x1bfc
+#define mmFMT0_FMT_CRC_SIG_BLUE_CONTROL_MASK                                    0x1bfc
+#define mmFMT1_FMT_CRC_SIG_BLUE_CONTROL_MASK                                    0x1dfc
+#define mmFMT2_FMT_CRC_SIG_BLUE_CONTROL_MASK                                    0x1ffc
+#define mmFMT3_FMT_CRC_SIG_BLUE_CONTROL_MASK                                    0x41fc
+#define mmFMT4_FMT_CRC_SIG_BLUE_CONTROL_MASK                                    0x43fc
+#define mmFMT5_FMT_CRC_SIG_BLUE_CONTROL_MASK                                    0x45fc
+#define mmFMT_CRC_SIG_RED_GREEN                                                 0x1bfd
+#define mmFMT0_FMT_CRC_SIG_RED_GREEN                                            0x1bfd
+#define mmFMT1_FMT_CRC_SIG_RED_GREEN                                            0x1dfd
+#define mmFMT2_FMT_CRC_SIG_RED_GREEN                                            0x1ffd
+#define mmFMT3_FMT_CRC_SIG_RED_GREEN                                            0x41fd
+#define mmFMT4_FMT_CRC_SIG_RED_GREEN                                            0x43fd
+#define mmFMT5_FMT_CRC_SIG_RED_GREEN                                            0x45fd
+#define mmFMT_CRC_SIG_BLUE_CONTROL                                              0x1bfe
+#define mmFMT0_FMT_CRC_SIG_BLUE_CONTROL                                         0x1bfe
+#define mmFMT1_FMT_CRC_SIG_BLUE_CONTROL                                         0x1dfe
+#define mmFMT2_FMT_CRC_SIG_BLUE_CONTROL                                         0x1ffe
+#define mmFMT3_FMT_CRC_SIG_BLUE_CONTROL                                         0x41fe
+#define mmFMT4_FMT_CRC_SIG_BLUE_CONTROL                                         0x43fe
+#define mmFMT5_FMT_CRC_SIG_BLUE_CONTROL                                         0x45fe
+#define mmFMT_DEBUG_CNTL                                                        0x1bff
+#define mmFMT0_FMT_DEBUG_CNTL                                                   0x1bff
+#define mmFMT1_FMT_DEBUG_CNTL                                                   0x1dff
+#define mmFMT2_FMT_DEBUG_CNTL                                                   0x1fff
+#define mmFMT3_FMT_DEBUG_CNTL                                                   0x41ff
+#define mmFMT4_FMT_DEBUG_CNTL                                                   0x43ff
+#define mmFMT5_FMT_DEBUG_CNTL                                                   0x45ff
+#define mmFMT_SIDE_BY_SIDE_STEREO_CONTROL                                       0x1bf0
+#define mmFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL                                  0x1bf0
+#define mmFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL                                  0x1df0
+#define mmFMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL                                  0x1ff0
+#define mmFMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL                                  0x41f0
+#define mmFMT4_FMT_SIDE_BY_SIDE_STEREO_CONTROL                                  0x43f0
+#define mmFMT5_FMT_SIDE_BY_SIDE_STEREO_CONTROL                                  0x45f0
+#define mmFMT_420_HBLANK_EARLY_START                                            0x1bf1
+#define mmFMT0_FMT_420_HBLANK_EARLY_START                                       0x1bf1
+#define mmFMT1_FMT_420_HBLANK_EARLY_START                                       0x1df1
+#define mmFMT2_FMT_420_HBLANK_EARLY_START                                       0x1ff1
+#define mmFMT3_FMT_420_HBLANK_EARLY_START                                       0x41f1
+#define mmFMT4_FMT_420_HBLANK_EARLY_START                                       0x43f1
+#define mmFMT5_FMT_420_HBLANK_EARLY_START                                       0x45f1
+#define mmFMT_TEST_DEBUG_INDEX                                                  0x1beb
+#define mmFMT0_FMT_TEST_DEBUG_INDEX                                             0x1beb
+#define mmFMT1_FMT_TEST_DEBUG_INDEX                                             0x1deb
+#define mmFMT2_FMT_TEST_DEBUG_INDEX                                             0x1feb
+#define mmFMT3_FMT_TEST_DEBUG_INDEX                                             0x41eb
+#define mmFMT4_FMT_TEST_DEBUG_INDEX                                             0x43eb
+#define mmFMT5_FMT_TEST_DEBUG_INDEX                                             0x45eb
+#define mmFMT_TEST_DEBUG_DATA                                                   0x1bec
+#define mmFMT0_FMT_TEST_DEBUG_DATA                                              0x1bec
+#define mmFMT1_FMT_TEST_DEBUG_DATA                                              0x1dec
+#define mmFMT2_FMT_TEST_DEBUG_DATA                                              0x1fec
+#define mmFMT3_FMT_TEST_DEBUG_DATA                                              0x41ec
+#define mmFMT4_FMT_TEST_DEBUG_DATA                                              0x43ec
+#define mmFMT5_FMT_TEST_DEBUG_DATA                                              0x45ec
+#define ixFMT_DEBUG0                                                            0x1
+#define ixFMT_DEBUG1                                                            0x2
+#define ixFMT_DEBUG2                                                            0x3
+#define ixFMT_DEBUG3                                                            0x4
+#define ixFMT_DEBUG_ID                                                          0x0
+#define mmLB_DATA_FORMAT                                                        0x1ac0
+#define mmLB0_LB_DATA_FORMAT                                                    0x1ac0
+#define mmLB1_LB_DATA_FORMAT                                                    0x1cc0
+#define mmLB2_LB_DATA_FORMAT                                                    0x1ec0
+#define mmLB3_LB_DATA_FORMAT                                                    0x40c0
+#define mmLB4_LB_DATA_FORMAT                                                    0x42c0
+#define mmLB5_LB_DATA_FORMAT                                                    0x44c0
+#define mmLB_MEMORY_CTRL                                                        0x1ac1
+#define mmLB0_LB_MEMORY_CTRL                                                    0x1ac1
+#define mmLB1_LB_MEMORY_CTRL                                                    0x1cc1
+#define mmLB2_LB_MEMORY_CTRL                                                    0x1ec1
+#define mmLB3_LB_MEMORY_CTRL                                                    0x40c1
+#define mmLB4_LB_MEMORY_CTRL                                                    0x42c1
+#define mmLB5_LB_MEMORY_CTRL                                                    0x44c1
+#define mmLB_MEMORY_SIZE_STATUS                                                 0x1ac2
+#define mmLB0_LB_MEMORY_SIZE_STATUS                                             0x1ac2
+#define mmLB1_LB_MEMORY_SIZE_STATUS                                             0x1cc2
+#define mmLB2_LB_MEMORY_SIZE_STATUS                                             0x1ec2
+#define mmLB3_LB_MEMORY_SIZE_STATUS                                             0x40c2
+#define mmLB4_LB_MEMORY_SIZE_STATUS                                             0x42c2
+#define mmLB5_LB_MEMORY_SIZE_STATUS                                             0x44c2
+#define mmLB_DESKTOP_HEIGHT                                                     0x1ac3
+#define mmLB0_LB_DESKTOP_HEIGHT                                                 0x1ac3
+#define mmLB1_LB_DESKTOP_HEIGHT                                                 0x1cc3
+#define mmLB2_LB_DESKTOP_HEIGHT                                                 0x1ec3
+#define mmLB3_LB_DESKTOP_HEIGHT                                                 0x40c3
+#define mmLB4_LB_DESKTOP_HEIGHT                                                 0x42c3
+#define mmLB5_LB_DESKTOP_HEIGHT                                                 0x44c3
+#define mmLB_VLINE_START_END                                                    0x1ac4
+#define mmLB0_LB_VLINE_START_END                                                0x1ac4
+#define mmLB1_LB_VLINE_START_END                                                0x1cc4
+#define mmLB2_LB_VLINE_START_END                                                0x1ec4
+#define mmLB3_LB_VLINE_START_END                                                0x40c4
+#define mmLB4_LB_VLINE_START_END                                                0x42c4
+#define mmLB5_LB_VLINE_START_END                                                0x44c4
+#define mmLB_VLINE2_START_END                                                   0x1ac5
+#define mmLB0_LB_VLINE2_START_END                                               0x1ac5
+#define mmLB1_LB_VLINE2_START_END                                               0x1cc5
+#define mmLB2_LB_VLINE2_START_END                                               0x1ec5
+#define mmLB3_LB_VLINE2_START_END                                               0x40c5
+#define mmLB4_LB_VLINE2_START_END                                               0x42c5
+#define mmLB5_LB_VLINE2_START_END                                               0x44c5
+#define mmLB_V_COUNTER                                                          0x1ac6
+#define mmLB0_LB_V_COUNTER                                                      0x1ac6
+#define mmLB1_LB_V_COUNTER                                                      0x1cc6
+#define mmLB2_LB_V_COUNTER                                                      0x1ec6
+#define mmLB3_LB_V_COUNTER                                                      0x40c6
+#define mmLB4_LB_V_COUNTER                                                      0x42c6
+#define mmLB5_LB_V_COUNTER                                                      0x44c6
+#define mmLB_SNAPSHOT_V_COUNTER                                                 0x1ac7
+#define mmLB0_LB_SNAPSHOT_V_COUNTER                                             0x1ac7
+#define mmLB1_LB_SNAPSHOT_V_COUNTER                                             0x1cc7
+#define mmLB2_LB_SNAPSHOT_V_COUNTER                                             0x1ec7
+#define mmLB3_LB_SNAPSHOT_V_COUNTER                                             0x40c7
+#define mmLB4_LB_SNAPSHOT_V_COUNTER                                             0x42c7
+#define mmLB5_LB_SNAPSHOT_V_COUNTER                                             0x44c7
+#define mmLB_INTERRUPT_MASK                                                     0x1ac8
+#define mmLB0_LB_INTERRUPT_MASK                                                 0x1ac8
+#define mmLB1_LB_INTERRUPT_MASK                                                 0x1cc8
+#define mmLB2_LB_INTERRUPT_MASK                                                 0x1ec8
+#define mmLB3_LB_INTERRUPT_MASK                                                 0x40c8
+#define mmLB4_LB_INTERRUPT_MASK                                                 0x42c8
+#define mmLB5_LB_INTERRUPT_MASK                                                 0x44c8
+#define mmLB_VLINE_STATUS                                                       0x1ac9
+#define mmLB0_LB_VLINE_STATUS                                                   0x1ac9
+#define mmLB1_LB_VLINE_STATUS                                                   0x1cc9
+#define mmLB2_LB_VLINE_STATUS                                                   0x1ec9
+#define mmLB3_LB_VLINE_STATUS                                                   0x40c9
+#define mmLB4_LB_VLINE_STATUS                                                   0x42c9
+#define mmLB5_LB_VLINE_STATUS                                                   0x44c9
+#define mmLB_VLINE2_STATUS                                                      0x1aca
+#define mmLB0_LB_VLINE2_STATUS                                                  0x1aca
+#define mmLB1_LB_VLINE2_STATUS                                                  0x1cca
+#define mmLB2_LB_VLINE2_STATUS                                                  0x1eca
+#define mmLB3_LB_VLINE2_STATUS                                                  0x40ca
+#define mmLB4_LB_VLINE2_STATUS                                                  0x42ca
+#define mmLB5_LB_VLINE2_STATUS                                                  0x44ca
+#define mmLB_VBLANK_STATUS                                                      0x1acb
+#define mmLB0_LB_VBLANK_STATUS                                                  0x1acb
+#define mmLB1_LB_VBLANK_STATUS                                                  0x1ccb
+#define mmLB2_LB_VBLANK_STATUS                                                  0x1ecb
+#define mmLB3_LB_VBLANK_STATUS                                                  0x40cb
+#define mmLB4_LB_VBLANK_STATUS                                                  0x42cb
+#define mmLB5_LB_VBLANK_STATUS                                                  0x44cb
+#define mmLB_SYNC_RESET_SEL                                                     0x1acc
+#define mmLB0_LB_SYNC_RESET_SEL                                                 0x1acc
+#define mmLB1_LB_SYNC_RESET_SEL                                                 0x1ccc
+#define mmLB2_LB_SYNC_RESET_SEL                                                 0x1ecc
+#define mmLB3_LB_SYNC_RESET_SEL                                                 0x40cc
+#define mmLB4_LB_SYNC_RESET_SEL                                                 0x42cc
+#define mmLB5_LB_SYNC_RESET_SEL                                                 0x44cc
+#define mmLB_BLACK_KEYER_R_CR                                                   0x1acd
+#define mmLB0_LB_BLACK_KEYER_R_CR                                               0x1acd
+#define mmLB1_LB_BLACK_KEYER_R_CR                                               0x1ccd
+#define mmLB2_LB_BLACK_KEYER_R_CR                                               0x1ecd
+#define mmLB3_LB_BLACK_KEYER_R_CR                                               0x40cd
+#define mmLB4_LB_BLACK_KEYER_R_CR                                               0x42cd
+#define mmLB5_LB_BLACK_KEYER_R_CR                                               0x44cd
+#define mmLB_BLACK_KEYER_G_Y                                                    0x1ace
+#define mmLB0_LB_BLACK_KEYER_G_Y                                                0x1ace
+#define mmLB1_LB_BLACK_KEYER_G_Y                                                0x1cce
+#define mmLB2_LB_BLACK_KEYER_G_Y                                                0x1ece
+#define mmLB3_LB_BLACK_KEYER_G_Y                                                0x40ce
+#define mmLB4_LB_BLACK_KEYER_G_Y                                                0x42ce
+#define mmLB5_LB_BLACK_KEYER_G_Y                                                0x44ce
+#define mmLB_BLACK_KEYER_B_CB                                                   0x1acf
+#define mmLB0_LB_BLACK_KEYER_B_CB                                               0x1acf
+#define mmLB1_LB_BLACK_KEYER_B_CB                                               0x1ccf
+#define mmLB2_LB_BLACK_KEYER_B_CB                                               0x1ecf
+#define mmLB3_LB_BLACK_KEYER_B_CB                                               0x40cf
+#define mmLB4_LB_BLACK_KEYER_B_CB                                               0x42cf
+#define mmLB5_LB_BLACK_KEYER_B_CB                                               0x44cf
+#define mmLB_KEYER_COLOR_CTRL                                                   0x1ad0
+#define mmLB0_LB_KEYER_COLOR_CTRL                                               0x1ad0
+#define mmLB1_LB_KEYER_COLOR_CTRL                                               0x1cd0
+#define mmLB2_LB_KEYER_COLOR_CTRL                                               0x1ed0
+#define mmLB3_LB_KEYER_COLOR_CTRL                                               0x40d0
+#define mmLB4_LB_KEYER_COLOR_CTRL                                               0x42d0
+#define mmLB5_LB_KEYER_COLOR_CTRL                                               0x44d0
+#define mmLB_KEYER_COLOR_R_CR                                                   0x1ad1
+#define mmLB0_LB_KEYER_COLOR_R_CR                                               0x1ad1
+#define mmLB1_LB_KEYER_COLOR_R_CR                                               0x1cd1
+#define mmLB2_LB_KEYER_COLOR_R_CR                                               0x1ed1
+#define mmLB3_LB_KEYER_COLOR_R_CR                                               0x40d1
+#define mmLB4_LB_KEYER_COLOR_R_CR                                               0x42d1
+#define mmLB5_LB_KEYER_COLOR_R_CR                                               0x44d1
+#define mmLB_KEYER_COLOR_G_Y                                                    0x1ad2
+#define mmLB0_LB_KEYER_COLOR_G_Y                                                0x1ad2
+#define mmLB1_LB_KEYER_COLOR_G_Y                                                0x1cd2
+#define mmLB2_LB_KEYER_COLOR_G_Y                                                0x1ed2
+#define mmLB3_LB_KEYER_COLOR_G_Y                                                0x40d2
+#define mmLB4_LB_KEYER_COLOR_G_Y                                                0x42d2
+#define mmLB5_LB_KEYER_COLOR_G_Y                                                0x44d2
+#define mmLB_KEYER_COLOR_B_CB                                                   0x1ad3
+#define mmLB0_LB_KEYER_COLOR_B_CB                                               0x1ad3
+#define mmLB1_LB_KEYER_COLOR_B_CB                                               0x1cd3
+#define mmLB2_LB_KEYER_COLOR_B_CB                                               0x1ed3
+#define mmLB3_LB_KEYER_COLOR_B_CB                                               0x40d3
+#define mmLB4_LB_KEYER_COLOR_B_CB                                               0x42d3
+#define mmLB5_LB_KEYER_COLOR_B_CB                                               0x44d3
+#define mmLB_KEYER_COLOR_REP_R_CR                                               0x1ad4
+#define mmLB0_LB_KEYER_COLOR_REP_R_CR                                           0x1ad4
+#define mmLB1_LB_KEYER_COLOR_REP_R_CR                                           0x1cd4
+#define mmLB2_LB_KEYER_COLOR_REP_R_CR                                           0x1ed4
+#define mmLB3_LB_KEYER_COLOR_REP_R_CR                                           0x40d4
+#define mmLB4_LB_KEYER_COLOR_REP_R_CR                                           0x42d4
+#define mmLB5_LB_KEYER_COLOR_REP_R_CR                                           0x44d4
+#define mmLB_KEYER_COLOR_REP_G_Y                                                0x1ad5
+#define mmLB0_LB_KEYER_COLOR_REP_G_Y                                            0x1ad5
+#define mmLB1_LB_KEYER_COLOR_REP_G_Y                                            0x1cd5
+#define mmLB2_LB_KEYER_COLOR_REP_G_Y                                            0x1ed5
+#define mmLB3_LB_KEYER_COLOR_REP_G_Y                                            0x40d5
+#define mmLB4_LB_KEYER_COLOR_REP_G_Y                                            0x42d5
+#define mmLB5_LB_KEYER_COLOR_REP_G_Y                                            0x44d5
+#define mmLB_KEYER_COLOR_REP_B_CB                                               0x1ad6
+#define mmLB0_LB_KEYER_COLOR_REP_B_CB                                           0x1ad6
+#define mmLB1_LB_KEYER_COLOR_REP_B_CB                                           0x1cd6
+#define mmLB2_LB_KEYER_COLOR_REP_B_CB                                           0x1ed6
+#define mmLB3_LB_KEYER_COLOR_REP_B_CB                                           0x40d6
+#define mmLB4_LB_KEYER_COLOR_REP_B_CB                                           0x42d6
+#define mmLB5_LB_KEYER_COLOR_REP_B_CB                                           0x44d6
+#define mmLB_BUFFER_LEVEL_STATUS                                                0x1ad7
+#define mmLB0_LB_BUFFER_LEVEL_STATUS                                            0x1ad7
+#define mmLB1_LB_BUFFER_LEVEL_STATUS                                            0x1cd7
+#define mmLB2_LB_BUFFER_LEVEL_STATUS                                            0x1ed7
+#define mmLB3_LB_BUFFER_LEVEL_STATUS                                            0x40d7
+#define mmLB4_LB_BUFFER_LEVEL_STATUS                                            0x42d7
+#define mmLB5_LB_BUFFER_LEVEL_STATUS                                            0x44d7
+#define mmLB_BUFFER_URGENCY_CTRL                                                0x1ad8
+#define mmLB0_LB_BUFFER_URGENCY_CTRL                                            0x1ad8
+#define mmLB1_LB_BUFFER_URGENCY_CTRL                                            0x1cd8
+#define mmLB2_LB_BUFFER_URGENCY_CTRL                                            0x1ed8
+#define mmLB3_LB_BUFFER_URGENCY_CTRL                                            0x40d8
+#define mmLB4_LB_BUFFER_URGENCY_CTRL                                            0x42d8
+#define mmLB5_LB_BUFFER_URGENCY_CTRL                                            0x44d8
+#define mmLB_BUFFER_URGENCY_STATUS                                              0x1ad9
+#define mmLB0_LB_BUFFER_URGENCY_STATUS                                          0x1ad9
+#define mmLB1_LB_BUFFER_URGENCY_STATUS                                          0x1cd9
+#define mmLB2_LB_BUFFER_URGENCY_STATUS                                          0x1ed9
+#define mmLB3_LB_BUFFER_URGENCY_STATUS                                          0x40d9
+#define mmLB4_LB_BUFFER_URGENCY_STATUS                                          0x42d9
+#define mmLB5_LB_BUFFER_URGENCY_STATUS                                          0x44d9
+#define mmLB_BUFFER_STATUS                                                      0x1ada
+#define mmLB0_LB_BUFFER_STATUS                                                  0x1ada
+#define mmLB1_LB_BUFFER_STATUS                                                  0x1cda
+#define mmLB2_LB_BUFFER_STATUS                                                  0x1eda
+#define mmLB3_LB_BUFFER_STATUS                                                  0x40da
+#define mmLB4_LB_BUFFER_STATUS                                                  0x42da
+#define mmLB5_LB_BUFFER_STATUS                                                  0x44da
+#define mmLB_NO_OUTSTANDING_REQ_STATUS                                          0x1adc
+#define mmLB0_LB_NO_OUTSTANDING_REQ_STATUS                                      0x1adc
+#define mmLB1_LB_NO_OUTSTANDING_REQ_STATUS                                      0x1cdc
+#define mmLB2_LB_NO_OUTSTANDING_REQ_STATUS                                      0x1edc
+#define mmLB3_LB_NO_OUTSTANDING_REQ_STATUS                                      0x40dc
+#define mmLB4_LB_NO_OUTSTANDING_REQ_STATUS                                      0x42dc
+#define mmLB5_LB_NO_OUTSTANDING_REQ_STATUS                                      0x44dc
+#define mmMVP_AFR_FLIP_MODE                                                     0x1ae0
+#define mmLB0_MVP_AFR_FLIP_MODE                                                 0x1ae0
+#define mmLB1_MVP_AFR_FLIP_MODE                                                 0x1ce0
+#define mmLB2_MVP_AFR_FLIP_MODE                                                 0x1ee0
+#define mmLB3_MVP_AFR_FLIP_MODE                                                 0x40e0
+#define mmLB4_MVP_AFR_FLIP_MODE                                                 0x42e0
+#define mmLB5_MVP_AFR_FLIP_MODE                                                 0x44e0
+#define mmMVP_AFR_FLIP_FIFO_CNTL                                                0x1ae1
+#define mmLB0_MVP_AFR_FLIP_FIFO_CNTL                                            0x1ae1
+#define mmLB1_MVP_AFR_FLIP_FIFO_CNTL                                            0x1ce1
+#define mmLB2_MVP_AFR_FLIP_FIFO_CNTL                                            0x1ee1
+#define mmLB3_MVP_AFR_FLIP_FIFO_CNTL                                            0x40e1
+#define mmLB4_MVP_AFR_FLIP_FIFO_CNTL                                            0x42e1
+#define mmLB5_MVP_AFR_FLIP_FIFO_CNTL                                            0x44e1
+#define mmMVP_FLIP_LINE_NUM_INSERT                                              0x1ae2
+#define mmLB0_MVP_FLIP_LINE_NUM_INSERT                                          0x1ae2
+#define mmLB1_MVP_FLIP_LINE_NUM_INSERT                                          0x1ce2
+#define mmLB2_MVP_FLIP_LINE_NUM_INSERT                                          0x1ee2
+#define mmLB3_MVP_FLIP_LINE_NUM_INSERT                                          0x40e2
+#define mmLB4_MVP_FLIP_LINE_NUM_INSERT                                          0x42e2
+#define mmLB5_MVP_FLIP_LINE_NUM_INSERT                                          0x44e2
+#define mmDC_MVP_LB_CONTROL                                                     0x1ae3
+#define mmLB0_DC_MVP_LB_CONTROL                                                 0x1ae3
+#define mmLB1_DC_MVP_LB_CONTROL                                                 0x1ce3
+#define mmLB2_DC_MVP_LB_CONTROL                                                 0x1ee3
+#define mmLB3_DC_MVP_LB_CONTROL                                                 0x40e3
+#define mmLB4_DC_MVP_LB_CONTROL                                                 0x42e3
+#define mmLB5_DC_MVP_LB_CONTROL                                                 0x44e3
+#define mmLB_DEBUG                                                              0x1ae4
+#define mmLB0_LB_DEBUG                                                          0x1ae4
+#define mmLB1_LB_DEBUG                                                          0x1ce4
+#define mmLB2_LB_DEBUG                                                          0x1ee4
+#define mmLB3_LB_DEBUG                                                          0x40e4
+#define mmLB4_LB_DEBUG                                                          0x42e4
+#define mmLB5_LB_DEBUG                                                          0x44e4
+#define mmLB_DEBUG2                                                             0x1ae5
+#define mmLB0_LB_DEBUG2                                                         0x1ae5
+#define mmLB1_LB_DEBUG2                                                         0x1ce5
+#define mmLB2_LB_DEBUG2                                                         0x1ee5
+#define mmLB3_LB_DEBUG2                                                         0x40e5
+#define mmLB4_LB_DEBUG2                                                         0x42e5
+#define mmLB5_LB_DEBUG2                                                         0x44e5
+#define mmLB_DEBUG3                                                             0x1ae6
+#define mmLB0_LB_DEBUG3                                                         0x1ae6
+#define mmLB1_LB_DEBUG3                                                         0x1ce6
+#define mmLB2_LB_DEBUG3                                                         0x1ee6
+#define mmLB3_LB_DEBUG3                                                         0x40e6
+#define mmLB4_LB_DEBUG3                                                         0x42e6
+#define mmLB5_LB_DEBUG3                                                         0x44e6
+#define mmLB_TEST_DEBUG_INDEX                                                   0x1afe
+#define mmLB0_LB_TEST_DEBUG_INDEX                                               0x1afe
+#define mmLB1_LB_TEST_DEBUG_INDEX                                               0x1cfe
+#define mmLB2_LB_TEST_DEBUG_INDEX                                               0x1efe
+#define mmLB3_LB_TEST_DEBUG_INDEX                                               0x40fe
+#define mmLB4_LB_TEST_DEBUG_INDEX                                               0x42fe
+#define mmLB5_LB_TEST_DEBUG_INDEX                                               0x44fe
+#define mmLB_TEST_DEBUG_DATA                                                    0x1aff
+#define mmLB0_LB_TEST_DEBUG_DATA                                                0x1aff
+#define mmLB1_LB_TEST_DEBUG_DATA                                                0x1cff
+#define mmLB2_LB_TEST_DEBUG_DATA                                                0x1eff
+#define mmLB3_LB_TEST_DEBUG_DATA                                                0x40ff
+#define mmLB4_LB_TEST_DEBUG_DATA                                                0x42ff
+#define mmLB5_LB_TEST_DEBUG_DATA                                                0x44ff
+#define mmLBV_DATA_FORMAT                                                       0x463c
+#define mmLBV0_LBV_DATA_FORMAT                                                  0x463c
+#define mmLBV1_LBV_DATA_FORMAT                                                  0x983c
+#define mmLBV_MEMORY_CTRL                                                       0x463d
+#define mmLBV0_LBV_MEMORY_CTRL                                                  0x463d
+#define mmLBV1_LBV_MEMORY_CTRL                                                  0x983d
+#define mmLBV_MEMORY_SIZE_STATUS                                                0x463e
+#define mmLBV0_LBV_MEMORY_SIZE_STATUS                                           0x463e
+#define mmLBV1_LBV_MEMORY_SIZE_STATUS                                           0x983e
+#define mmLBV_DESKTOP_HEIGHT                                                    0x463f
+#define mmLBV0_LBV_DESKTOP_HEIGHT                                               0x463f
+#define mmLBV1_LBV_DESKTOP_HEIGHT                                               0x983f
+#define mmLBV_VLINE_START_END                                                   0x4640
+#define mmLBV0_LBV_VLINE_START_END                                              0x4640
+#define mmLBV1_LBV_VLINE_START_END                                              0x9840
+#define mmLBV_VLINE2_START_END                                                  0x4641
+#define mmLBV0_LBV_VLINE2_START_END                                             0x4641
+#define mmLBV1_LBV_VLINE2_START_END                                             0x9841
+#define mmLBV_V_COUNTER                                                         0x4642
+#define mmLBV0_LBV_V_COUNTER                                                    0x4642
+#define mmLBV1_LBV_V_COUNTER                                                    0x9842
+#define mmLBV_SNAPSHOT_V_COUNTER                                                0x4643
+#define mmLBV0_LBV_SNAPSHOT_V_COUNTER                                           0x4643
+#define mmLBV1_LBV_SNAPSHOT_V_COUNTER                                           0x9843
+#define mmLBV_V_COUNTER_CHROMA                                                  0x4644
+#define mmLBV0_LBV_V_COUNTER_CHROMA                                             0x4644
+#define mmLBV1_LBV_V_COUNTER_CHROMA                                             0x9844
+#define mmLBV_SNAPSHOT_V_COUNTER_CHROMA                                         0x4645
+#define mmLBV0_LBV_SNAPSHOT_V_COUNTER_CHROMA                                    0x4645
+#define mmLBV1_LBV_SNAPSHOT_V_COUNTER_CHROMA                                    0x9845
+#define mmLBV_INTERRUPT_MASK                                                    0x4646
+#define mmLBV0_LBV_INTERRUPT_MASK                                               0x4646
+#define mmLBV1_LBV_INTERRUPT_MASK                                               0x9846
+#define mmLBV_VLINE_STATUS                                                      0x4647
+#define mmLBV0_LBV_VLINE_STATUS                                                 0x4647
+#define mmLBV1_LBV_VLINE_STATUS                                                 0x9847
+#define mmLBV_VLINE2_STATUS                                                     0x4648
+#define mmLBV0_LBV_VLINE2_STATUS                                                0x4648
+#define mmLBV1_LBV_VLINE2_STATUS                                                0x9848
+#define mmLBV_VBLANK_STATUS                                                     0x4649
+#define mmLBV0_LBV_VBLANK_STATUS                                                0x4649
+#define mmLBV1_LBV_VBLANK_STATUS                                                0x9849
+#define mmLBV_SYNC_RESET_SEL                                                    0x464a
+#define mmLBV0_LBV_SYNC_RESET_SEL                                               0x464a
+#define mmLBV1_LBV_SYNC_RESET_SEL                                               0x984a
+#define mmLBV_BLACK_KEYER_R_CR                                                  0x464b
+#define mmLBV0_LBV_BLACK_KEYER_R_CR                                             0x464b
+#define mmLBV1_LBV_BLACK_KEYER_R_CR                                             0x984b
+#define mmLBV_BLACK_KEYER_G_Y                                                   0x464c
+#define mmLBV0_LBV_BLACK_KEYER_G_Y                                              0x464c
+#define mmLBV1_LBV_BLACK_KEYER_G_Y                                              0x984c
+#define mmLBV_BLACK_KEYER_B_CB                                                  0x464d
+#define mmLBV0_LBV_BLACK_KEYER_B_CB                                             0x464d
+#define mmLBV1_LBV_BLACK_KEYER_B_CB                                             0x984d
+#define mmLBV_KEYER_COLOR_CTRL                                                  0x464e
+#define mmLBV0_LBV_KEYER_COLOR_CTRL                                             0x464e
+#define mmLBV1_LBV_KEYER_COLOR_CTRL                                             0x984e
+#define mmLBV_KEYER_COLOR_R_CR                                                  0x464f
+#define mmLBV0_LBV_KEYER_COLOR_R_CR                                             0x464f
+#define mmLBV1_LBV_KEYER_COLOR_R_CR                                             0x984f
+#define mmLBV_KEYER_COLOR_G_Y                                                   0x4650
+#define mmLBV0_LBV_KEYER_COLOR_G_Y                                              0x4650
+#define mmLBV1_LBV_KEYER_COLOR_G_Y                                              0x9850
+#define mmLBV_KEYER_COLOR_B_CB                                                  0x4651
+#define mmLBV0_LBV_KEYER_COLOR_B_CB                                             0x4651
+#define mmLBV1_LBV_KEYER_COLOR_B_CB                                             0x9851
+#define mmLBV_KEYER_COLOR_REP_R_CR                                              0x4652
+#define mmLBV0_LBV_KEYER_COLOR_REP_R_CR                                         0x4652
+#define mmLBV1_LBV_KEYER_COLOR_REP_R_CR                                         0x9852
+#define mmLBV_KEYER_COLOR_REP_G_Y                                               0x4653
+#define mmLBV0_LBV_KEYER_COLOR_REP_G_Y                                          0x4653
+#define mmLBV1_LBV_KEYER_COLOR_REP_G_Y                                          0x9853
+#define mmLBV_KEYER_COLOR_REP_B_CB                                              0x4654
+#define mmLBV0_LBV_KEYER_COLOR_REP_B_CB                                         0x4654
+#define mmLBV1_LBV_KEYER_COLOR_REP_B_CB                                         0x9854
+#define mmLBV_BUFFER_LEVEL_STATUS                                               0x4655
+#define mmLBV0_LBV_BUFFER_LEVEL_STATUS                                          0x4655
+#define mmLBV1_LBV_BUFFER_LEVEL_STATUS                                          0x9855
+#define mmLBV_BUFFER_URGENCY_CTRL                                               0x4656
+#define mmLBV0_LBV_BUFFER_URGENCY_CTRL                                          0x4656
+#define mmLBV1_LBV_BUFFER_URGENCY_CTRL                                          0x9856
+#define mmLBV_BUFFER_URGENCY_STATUS                                             0x4657
+#define mmLBV0_LBV_BUFFER_URGENCY_STATUS                                        0x4657
+#define mmLBV1_LBV_BUFFER_URGENCY_STATUS                                        0x9857
+#define mmLBV_BUFFER_STATUS                                                     0x4658
+#define mmLBV0_LBV_BUFFER_STATUS                                                0x4658
+#define mmLBV1_LBV_BUFFER_STATUS                                                0x9858
+#define mmLBV_NO_OUTSTANDING_REQ_STATUS                                         0x4659
+#define mmLBV0_LBV_NO_OUTSTANDING_REQ_STATUS                                    0x4659
+#define mmLBV1_LBV_NO_OUTSTANDING_REQ_STATUS                                    0x9859
+#define mmLBV_DEBUG                                                             0x465a
+#define mmLBV0_LBV_DEBUG                                                        0x465a
+#define mmLBV1_LBV_DEBUG                                                        0x985a
+#define mmLBV_DEBUG2                                                            0x465b
+#define mmLBV0_LBV_DEBUG2                                                       0x465b
+#define mmLBV1_LBV_DEBUG2                                                       0x985b
+#define mmLBV_DEBUG3                                                            0x465c
+#define mmLBV0_LBV_DEBUG3                                                       0x465c
+#define mmLBV1_LBV_DEBUG3                                                       0x985c
+#define mmLBV_TEST_DEBUG_INDEX                                                  0x4666
+#define mmLBV0_LBV_TEST_DEBUG_INDEX                                             0x4666
+#define mmLBV1_LBV_TEST_DEBUG_INDEX                                             0x9866
+#define mmLBV_TEST_DEBUG_DATA                                                   0x4667
+#define mmLBV0_LBV_TEST_DEBUG_DATA                                              0x4667
+#define mmLBV1_LBV_TEST_DEBUG_DATA                                              0x9867
+#define mmMVP_CONTROL1                                                          0x2ac
+#define mmMVP_CONTROL2                                                          0x2ad
+#define mmMVP_FIFO_CONTROL                                                      0x2ae
+#define mmMVP_FIFO_STATUS                                                       0x2af
+#define mmMVP_SLAVE_STATUS                                                      0x2b0
+#define mmMVP_INBAND_CNTL_CAP                                                   0x2b1
+#define mmMVP_BLACK_KEYER                                                       0x2b2
+#define mmMVP_CRC_CNTL                                                          0x2b3
+#define mmMVP_CRC_RESULT_BLUE_GREEN                                             0x2b4
+#define mmMVP_CRC_RESULT_RED                                                    0x2b5
+#define mmMVP_CONTROL3                                                          0x2b6
+#define mmMVP_RECEIVE_CNT_CNTL1                                                 0x2b7
+#define mmMVP_RECEIVE_CNT_CNTL2                                                 0x2b8
+#define mmMVP_DEBUG                                                             0x2bb
+#define mmMVP_TEST_DEBUG_INDEX                                                  0x2b9
+#define mmMVP_TEST_DEBUG_DATA                                                   0x2ba
+#define ixMVP_DEBUG_12                                                          0xc
+#define ixMVP_DEBUG_13                                                          0xd
+#define ixMVP_DEBUG_14                                                          0xe
+#define ixMVP_DEBUG_15                                                          0xf
+#define ixMVP_DEBUG_16                                                          0x10
+#define ixMVP_DEBUG_17                                                          0x11
+#define mmSCL_COEF_RAM_SELECT                                                   0x1b40
+#define mmSCL0_SCL_COEF_RAM_SELECT                                              0x1b40
+#define mmSCL1_SCL_COEF_RAM_SELECT                                              0x1d40
+#define mmSCL2_SCL_COEF_RAM_SELECT                                              0x1f40
+#define mmSCL3_SCL_COEF_RAM_SELECT                                              0x4140
+#define mmSCL4_SCL_COEF_RAM_SELECT                                              0x4340
+#define mmSCL5_SCL_COEF_RAM_SELECT                                              0x4540
+#define mmSCL_COEF_RAM_TAP_DATA                                                 0x1b41
+#define mmSCL0_SCL_COEF_RAM_TAP_DATA                                            0x1b41
+#define mmSCL1_SCL_COEF_RAM_TAP_DATA                                            0x1d41
+#define mmSCL2_SCL_COEF_RAM_TAP_DATA                                            0x1f41
+#define mmSCL3_SCL_COEF_RAM_TAP_DATA                                            0x4141
+#define mmSCL4_SCL_COEF_RAM_TAP_DATA                                            0x4341
+#define mmSCL5_SCL_COEF_RAM_TAP_DATA                                            0x4541
+#define mmSCL_MODE                                                              0x1b42
+#define mmSCL0_SCL_MODE                                                         0x1b42
+#define mmSCL1_SCL_MODE                                                         0x1d42
+#define mmSCL2_SCL_MODE                                                         0x1f42
+#define mmSCL3_SCL_MODE                                                         0x4142
+#define mmSCL4_SCL_MODE                                                         0x4342
+#define mmSCL5_SCL_MODE                                                         0x4542
+#define mmSCL_TAP_CONTROL                                                       0x1b43
+#define mmSCL0_SCL_TAP_CONTROL                                                  0x1b43
+#define mmSCL1_SCL_TAP_CONTROL                                                  0x1d43
+#define mmSCL2_SCL_TAP_CONTROL                                                  0x1f43
+#define mmSCL3_SCL_TAP_CONTROL                                                  0x4143
+#define mmSCL4_SCL_TAP_CONTROL                                                  0x4343
+#define mmSCL5_SCL_TAP_CONTROL                                                  0x4543
+#define mmSCL_CONTROL                                                           0x1b44
+#define mmSCL0_SCL_CONTROL                                                      0x1b44
+#define mmSCL1_SCL_CONTROL                                                      0x1d44
+#define mmSCL2_SCL_CONTROL                                                      0x1f44
+#define mmSCL3_SCL_CONTROL                                                      0x4144
+#define mmSCL4_SCL_CONTROL                                                      0x4344
+#define mmSCL5_SCL_CONTROL                                                      0x4544
+#define mmSCL_BYPASS_CONTROL                                                    0x1b45
+#define mmSCL0_SCL_BYPASS_CONTROL                                               0x1b45
+#define mmSCL1_SCL_BYPASS_CONTROL                                               0x1d45
+#define mmSCL2_SCL_BYPASS_CONTROL                                               0x1f45
+#define mmSCL3_SCL_BYPASS_CONTROL                                               0x4145
+#define mmSCL4_SCL_BYPASS_CONTROL                                               0x4345
+#define mmSCL5_SCL_BYPASS_CONTROL                                               0x4545
+#define mmSCL_MANUAL_REPLICATE_CONTROL                                          0x1b46
+#define mmSCL0_SCL_MANUAL_REPLICATE_CONTROL                                     0x1b46
+#define mmSCL1_SCL_MANUAL_REPLICATE_CONTROL                                     0x1d46
+#define mmSCL2_SCL_MANUAL_REPLICATE_CONTROL                                     0x1f46
+#define mmSCL3_SCL_MANUAL_REPLICATE_CONTROL                                     0x4146
+#define mmSCL4_SCL_MANUAL_REPLICATE_CONTROL                                     0x4346
+#define mmSCL5_SCL_MANUAL_REPLICATE_CONTROL                                     0x4546
+#define mmSCL_AUTOMATIC_MODE_CONTROL                                            0x1b47
+#define mmSCL0_SCL_AUTOMATIC_MODE_CONTROL                                       0x1b47
+#define mmSCL1_SCL_AUTOMATIC_MODE_CONTROL                                       0x1d47
+#define mmSCL2_SCL_AUTOMATIC_MODE_CONTROL                                       0x1f47
+#define mmSCL3_SCL_AUTOMATIC_MODE_CONTROL                                       0x4147
+#define mmSCL4_SCL_AUTOMATIC_MODE_CONTROL                                       0x4347
+#define mmSCL5_SCL_AUTOMATIC_MODE_CONTROL                                       0x4547
+#define mmSCL_HORZ_FILTER_CONTROL                                               0x1b48
+#define mmSCL0_SCL_HORZ_FILTER_CONTROL                                          0x1b48
+#define mmSCL1_SCL_HORZ_FILTER_CONTROL                                          0x1d48
+#define mmSCL2_SCL_HORZ_FILTER_CONTROL                                          0x1f48
+#define mmSCL3_SCL_HORZ_FILTER_CONTROL                                          0x4148
+#define mmSCL4_SCL_HORZ_FILTER_CONTROL                                          0x4348
+#define mmSCL5_SCL_HORZ_FILTER_CONTROL                                          0x4548
+#define mmSCL_HORZ_FILTER_SCALE_RATIO                                           0x1b49
+#define mmSCL0_SCL_HORZ_FILTER_SCALE_RATIO                                      0x1b49
+#define mmSCL1_SCL_HORZ_FILTER_SCALE_RATIO                                      0x1d49
+#define mmSCL2_SCL_HORZ_FILTER_SCALE_RATIO                                      0x1f49
+#define mmSCL3_SCL_HORZ_FILTER_SCALE_RATIO                                      0x4149
+#define mmSCL4_SCL_HORZ_FILTER_SCALE_RATIO                                      0x4349
+#define mmSCL5_SCL_HORZ_FILTER_SCALE_RATIO                                      0x4549
+#define mmSCL_HORZ_FILTER_INIT                                                  0x1b4a
+#define mmSCL0_SCL_HORZ_FILTER_INIT                                             0x1b4a
+#define mmSCL1_SCL_HORZ_FILTER_INIT                                             0x1d4a
+#define mmSCL2_SCL_HORZ_FILTER_INIT                                             0x1f4a
+#define mmSCL3_SCL_HORZ_FILTER_INIT                                             0x414a
+#define mmSCL4_SCL_HORZ_FILTER_INIT                                             0x434a
+#define mmSCL5_SCL_HORZ_FILTER_INIT                                             0x454a
+#define mmSCL_VERT_FILTER_CONTROL                                               0x1b4b
+#define mmSCL0_SCL_VERT_FILTER_CONTROL                                          0x1b4b
+#define mmSCL1_SCL_VERT_FILTER_CONTROL                                          0x1d4b
+#define mmSCL2_SCL_VERT_FILTER_CONTROL                                          0x1f4b
+#define mmSCL3_SCL_VERT_FILTER_CONTROL                                          0x414b
+#define mmSCL4_SCL_VERT_FILTER_CONTROL                                          0x434b
+#define mmSCL5_SCL_VERT_FILTER_CONTROL                                          0x454b
+#define mmSCL_VERT_FILTER_SCALE_RATIO                                           0x1b4c
+#define mmSCL0_SCL_VERT_FILTER_SCALE_RATIO                                      0x1b4c
+#define mmSCL1_SCL_VERT_FILTER_SCALE_RATIO                                      0x1d4c
+#define mmSCL2_SCL_VERT_FILTER_SCALE_RATIO                                      0x1f4c
+#define mmSCL3_SCL_VERT_FILTER_SCALE_RATIO                                      0x414c
+#define mmSCL4_SCL_VERT_FILTER_SCALE_RATIO                                      0x434c
+#define mmSCL5_SCL_VERT_FILTER_SCALE_RATIO                                      0x454c
+#define mmSCL_VERT_FILTER_INIT                                                  0x1b4d
+#define mmSCL0_SCL_VERT_FILTER_INIT                                             0x1b4d
+#define mmSCL1_SCL_VERT_FILTER_INIT                                             0x1d4d
+#define mmSCL2_SCL_VERT_FILTER_INIT                                             0x1f4d
+#define mmSCL3_SCL_VERT_FILTER_INIT                                             0x414d
+#define mmSCL4_SCL_VERT_FILTER_INIT                                             0x434d
+#define mmSCL5_SCL_VERT_FILTER_INIT                                             0x454d
+#define mmSCL_VERT_FILTER_INIT_BOT                                              0x1b4e
+#define mmSCL0_SCL_VERT_FILTER_INIT_BOT                                         0x1b4e
+#define mmSCL1_SCL_VERT_FILTER_INIT_BOT                                         0x1d4e
+#define mmSCL2_SCL_VERT_FILTER_INIT_BOT                                         0x1f4e
+#define mmSCL3_SCL_VERT_FILTER_INIT_BOT                                         0x414e
+#define mmSCL4_SCL_VERT_FILTER_INIT_BOT                                         0x434e
+#define mmSCL5_SCL_VERT_FILTER_INIT_BOT                                         0x454e
+#define mmSCL_ROUND_OFFSET                                                      0x1b4f
+#define mmSCL0_SCL_ROUND_OFFSET                                                 0x1b4f
+#define mmSCL1_SCL_ROUND_OFFSET                                                 0x1d4f
+#define mmSCL2_SCL_ROUND_OFFSET                                                 0x1f4f
+#define mmSCL3_SCL_ROUND_OFFSET                                                 0x414f
+#define mmSCL4_SCL_ROUND_OFFSET                                                 0x434f
+#define mmSCL5_SCL_ROUND_OFFSET                                                 0x454f
+#define mmSCL_UPDATE                                                            0x1b51
+#define mmSCL0_SCL_UPDATE                                                       0x1b51
+#define mmSCL1_SCL_UPDATE                                                       0x1d51
+#define mmSCL2_SCL_UPDATE                                                       0x1f51
+#define mmSCL3_SCL_UPDATE                                                       0x4151
+#define mmSCL4_SCL_UPDATE                                                       0x4351
+#define mmSCL5_SCL_UPDATE                                                       0x4551
+#define mmSCL_F_SHARP_CONTROL                                                   0x1b53
+#define mmSCL0_SCL_F_SHARP_CONTROL                                              0x1b53
+#define mmSCL1_SCL_F_SHARP_CONTROL                                              0x1d53
+#define mmSCL2_SCL_F_SHARP_CONTROL                                              0x1f53
+#define mmSCL3_SCL_F_SHARP_CONTROL                                              0x4153
+#define mmSCL4_SCL_F_SHARP_CONTROL                                              0x4353
+#define mmSCL5_SCL_F_SHARP_CONTROL                                              0x4553
+#define mmSCL_ALU_CONTROL                                                       0x1b54
+#define mmSCL0_SCL_ALU_CONTROL                                                  0x1b54
+#define mmSCL1_SCL_ALU_CONTROL                                                  0x1d54
+#define mmSCL2_SCL_ALU_CONTROL                                                  0x1f54
+#define mmSCL3_SCL_ALU_CONTROL                                                  0x4154
+#define mmSCL4_SCL_ALU_CONTROL                                                  0x4354
+#define mmSCL5_SCL_ALU_CONTROL                                                  0x4554
+#define mmSCL_COEF_RAM_CONFLICT_STATUS                                          0x1b55
+#define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS                                     0x1b55
+#define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS                                     0x1d55
+#define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS                                     0x1f55
+#define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS                                     0x4155
+#define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS                                     0x4355
+#define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS                                     0x4555
+#define mmVIEWPORT_START_SECONDARY                                              0x1b5b
+#define mmSCL0_VIEWPORT_START_SECONDARY                                         0x1b5b
+#define mmSCL1_VIEWPORT_START_SECONDARY                                         0x1d5b
+#define mmSCL2_VIEWPORT_START_SECONDARY                                         0x1f5b
+#define mmSCL3_VIEWPORT_START_SECONDARY                                         0x415b
+#define mmSCL4_VIEWPORT_START_SECONDARY                                         0x435b
+#define mmSCL5_VIEWPORT_START_SECONDARY                                         0x455b
+#define mmVIEWPORT_START                                                        0x1b5c
+#define mmSCL0_VIEWPORT_START                                                   0x1b5c
+#define mmSCL1_VIEWPORT_START                                                   0x1d5c
+#define mmSCL2_VIEWPORT_START                                                   0x1f5c
+#define mmSCL3_VIEWPORT_START                                                   0x415c
+#define mmSCL4_VIEWPORT_START                                                   0x435c
+#define mmSCL5_VIEWPORT_START                                                   0x455c
+#define mmVIEWPORT_SIZE                                                         0x1b5d
+#define mmSCL0_VIEWPORT_SIZE                                                    0x1b5d
+#define mmSCL1_VIEWPORT_SIZE                                                    0x1d5d
+#define mmSCL2_VIEWPORT_SIZE                                                    0x1f5d
+#define mmSCL3_VIEWPORT_SIZE                                                    0x415d
+#define mmSCL4_VIEWPORT_SIZE                                                    0x435d
+#define mmSCL5_VIEWPORT_SIZE                                                    0x455d
+#define mmEXT_OVERSCAN_LEFT_RIGHT                                               0x1b5e
+#define mmSCL0_EXT_OVERSCAN_LEFT_RIGHT                                          0x1b5e
+#define mmSCL1_EXT_OVERSCAN_LEFT_RIGHT                                          0x1d5e
+#define mmSCL2_EXT_OVERSCAN_LEFT_RIGHT                                          0x1f5e
+#define mmSCL3_EXT_OVERSCAN_LEFT_RIGHT                                          0x415e
+#define mmSCL4_EXT_OVERSCAN_LEFT_RIGHT                                          0x435e
+#define mmSCL5_EXT_OVERSCAN_LEFT_RIGHT                                          0x455e
+#define mmEXT_OVERSCAN_TOP_BOTTOM                                               0x1b5f
+#define mmSCL0_EXT_OVERSCAN_TOP_BOTTOM                                          0x1b5f
+#define mmSCL1_EXT_OVERSCAN_TOP_BOTTOM                                          0x1d5f
+#define mmSCL2_EXT_OVERSCAN_TOP_BOTTOM                                          0x1f5f
+#define mmSCL3_EXT_OVERSCAN_TOP_BOTTOM                                          0x415f
+#define mmSCL4_EXT_OVERSCAN_TOP_BOTTOM                                          0x435f
+#define mmSCL5_EXT_OVERSCAN_TOP_BOTTOM                                          0x455f
+#define mmSCL_MODE_CHANGE_DET1                                                  0x1b60
+#define mmSCL0_SCL_MODE_CHANGE_DET1                                             0x1b60
+#define mmSCL1_SCL_MODE_CHANGE_DET1                                             0x1d60
+#define mmSCL2_SCL_MODE_CHANGE_DET1                                             0x1f60
+#define mmSCL3_SCL_MODE_CHANGE_DET1                                             0x4160
+#define mmSCL4_SCL_MODE_CHANGE_DET1                                             0x4360
+#define mmSCL5_SCL_MODE_CHANGE_DET1                                             0x4560
+#define mmSCL_MODE_CHANGE_DET2                                                  0x1b61
+#define mmSCL0_SCL_MODE_CHANGE_DET2                                             0x1b61
+#define mmSCL1_SCL_MODE_CHANGE_DET2                                             0x1d61
+#define mmSCL2_SCL_MODE_CHANGE_DET2                                             0x1f61
+#define mmSCL3_SCL_MODE_CHANGE_DET2                                             0x4161
+#define mmSCL4_SCL_MODE_CHANGE_DET2                                             0x4361
+#define mmSCL5_SCL_MODE_CHANGE_DET2                                             0x4561
+#define mmSCL_MODE_CHANGE_DET3                                                  0x1b62
+#define mmSCL0_SCL_MODE_CHANGE_DET3                                             0x1b62
+#define mmSCL1_SCL_MODE_CHANGE_DET3                                             0x1d62
+#define mmSCL2_SCL_MODE_CHANGE_DET3                                             0x1f62
+#define mmSCL3_SCL_MODE_CHANGE_DET3                                             0x4162
+#define mmSCL4_SCL_MODE_CHANGE_DET3                                             0x4362
+#define mmSCL5_SCL_MODE_CHANGE_DET3                                             0x4562
+#define mmSCL_MODE_CHANGE_MASK                                                  0x1b63
+#define mmSCL0_SCL_MODE_CHANGE_MASK                                             0x1b63
+#define mmSCL1_SCL_MODE_CHANGE_MASK                                             0x1d63
+#define mmSCL2_SCL_MODE_CHANGE_MASK                                             0x1f63
+#define mmSCL3_SCL_MODE_CHANGE_MASK                                             0x4163
+#define mmSCL4_SCL_MODE_CHANGE_MASK                                             0x4363
+#define mmSCL5_SCL_MODE_CHANGE_MASK                                             0x4563
+#define mmSCL_DEBUG2                                                            0x1b69
+#define mmSCL0_SCL_DEBUG2                                                       0x1b69
+#define mmSCL1_SCL_DEBUG2                                                       0x1d69
+#define mmSCL2_SCL_DEBUG2                                                       0x1f69
+#define mmSCL3_SCL_DEBUG2                                                       0x4169
+#define mmSCL4_SCL_DEBUG2                                                       0x4369
+#define mmSCL5_SCL_DEBUG2                                                       0x4569
+#define mmSCL_DEBUG                                                             0x1b6a
+#define mmSCL0_SCL_DEBUG                                                        0x1b6a
+#define mmSCL1_SCL_DEBUG                                                        0x1d6a
+#define mmSCL2_SCL_DEBUG                                                        0x1f6a
+#define mmSCL3_SCL_DEBUG                                                        0x416a
+#define mmSCL4_SCL_DEBUG                                                        0x436a
+#define mmSCL5_SCL_DEBUG                                                        0x456a
+#define mmSCL_TEST_DEBUG_INDEX                                                  0x1b6b
+#define mmSCL0_SCL_TEST_DEBUG_INDEX                                             0x1b6b
+#define mmSCL1_SCL_TEST_DEBUG_INDEX                                             0x1d6b
+#define mmSCL2_SCL_TEST_DEBUG_INDEX                                             0x1f6b
+#define mmSCL3_SCL_TEST_DEBUG_INDEX                                             0x416b
+#define mmSCL4_SCL_TEST_DEBUG_INDEX                                             0x436b
+#define mmSCL5_SCL_TEST_DEBUG_INDEX                                             0x456b
+#define mmSCL_TEST_DEBUG_DATA                                                   0x1b6c
+#define mmSCL0_SCL_TEST_DEBUG_DATA                                              0x1b6c
+#define mmSCL1_SCL_TEST_DEBUG_DATA                                              0x1d6c
+#define mmSCL2_SCL_TEST_DEBUG_DATA                                              0x1f6c
+#define mmSCL3_SCL_TEST_DEBUG_DATA                                              0x416c
+#define mmSCL4_SCL_TEST_DEBUG_DATA                                              0x436c
+#define mmSCL5_SCL_TEST_DEBUG_DATA                                              0x456c
+#define mmSCLV_COEF_RAM_SELECT                                                  0x4670
+#define mmSCLV0_SCLV_COEF_RAM_SELECT                                            0x4670
+#define mmSCLV1_SCLV_COEF_RAM_SELECT                                            0x9870
+#define mmSCLV_COEF_RAM_TAP_DATA                                                0x4671
+#define mmSCLV0_SCLV_COEF_RAM_TAP_DATA                                          0x4671
+#define mmSCLV1_SCLV_COEF_RAM_TAP_DATA                                          0x9871
+#define mmSCLV_MODE                                                             0x4672
+#define mmSCLV0_SCLV_MODE                                                       0x4672
+#define mmSCLV1_SCLV_MODE                                                       0x9872
+#define mmSCLV_TAP_CONTROL                                                      0x4673
+#define mmSCLV0_SCLV_TAP_CONTROL                                                0x4673
+#define mmSCLV1_SCLV_TAP_CONTROL                                                0x9873
+#define mmSCLV_CONTROL                                                          0x4674
+#define mmSCLV0_SCLV_CONTROL                                                    0x4674
+#define mmSCLV1_SCLV_CONTROL                                                    0x9874
+#define mmSCLV_MANUAL_REPLICATE_CONTROL                                         0x4675
+#define mmSCLV0_SCLV_MANUAL_REPLICATE_CONTROL                                   0x4675
+#define mmSCLV1_SCLV_MANUAL_REPLICATE_CONTROL                                   0x9875
+#define mmSCLV_AUTOMATIC_MODE_CONTROL                                           0x4676
+#define mmSCLV0_SCLV_AUTOMATIC_MODE_CONTROL                                     0x4676
+#define mmSCLV1_SCLV_AUTOMATIC_MODE_CONTROL                                     0x9876
+#define mmSCLV_HORZ_FILTER_CONTROL                                              0x4677
+#define mmSCLV0_SCLV_HORZ_FILTER_CONTROL                                        0x4677
+#define mmSCLV1_SCLV_HORZ_FILTER_CONTROL                                        0x9877
+#define mmSCLV_HORZ_FILTER_SCALE_RATIO                                          0x4678
+#define mmSCLV0_SCLV_HORZ_FILTER_SCALE_RATIO                                    0x4678
+#define mmSCLV1_SCLV_HORZ_FILTER_SCALE_RATIO                                    0x9878
+#define mmSCLV_HORZ_FILTER_INIT                                                 0x4679
+#define mmSCLV0_SCLV_HORZ_FILTER_INIT                                           0x4679
+#define mmSCLV1_SCLV_HORZ_FILTER_INIT                                           0x9879
+#define mmSCLV_HORZ_FILTER_SCALE_RATIO_C                                        0x467a
+#define mmSCLV0_SCLV_HORZ_FILTER_SCALE_RATIO_C                                  0x467a
+#define mmSCLV1_SCLV_HORZ_FILTER_SCALE_RATIO_C                                  0x987a
+#define mmSCLV_HORZ_FILTER_INIT_C                                               0x467b
+#define mmSCLV0_SCLV_HORZ_FILTER_INIT_C                                         0x467b
+#define mmSCLV1_SCLV_HORZ_FILTER_INIT_C                                         0x987b
+#define mmSCLV_VERT_FILTER_CONTROL                                              0x467c
+#define mmSCLV0_SCLV_VERT_FILTER_CONTROL                                        0x467c
+#define mmSCLV1_SCLV_VERT_FILTER_CONTROL                                        0x987c
+#define mmSCLV_VERT_FILTER_SCALE_RATIO                                          0x467d
+#define mmSCLV0_SCLV_VERT_FILTER_SCALE_RATIO                                    0x467d
+#define mmSCLV1_SCLV_VERT_FILTER_SCALE_RATIO                                    0x987d
+#define mmSCLV_VERT_FILTER_INIT                                                 0x467e
+#define mmSCLV0_SCLV_VERT_FILTER_INIT                                           0x467e
+#define mmSCLV1_SCLV_VERT_FILTER_INIT                                           0x987e
+#define mmSCLV_VERT_FILTER_INIT_BOT                                             0x467f
+#define mmSCLV0_SCLV_VERT_FILTER_INIT_BOT                                       0x467f
+#define mmSCLV1_SCLV_VERT_FILTER_INIT_BOT                                       0x987f
+#define mmSCLV_VERT_FILTER_SCALE_RATIO_C                                        0x4680
+#define mmSCLV0_SCLV_VERT_FILTER_SCALE_RATIO_C                                  0x4680
+#define mmSCLV1_SCLV_VERT_FILTER_SCALE_RATIO_C                                  0x9880
+#define mmSCLV_VERT_FILTER_INIT_C                                               0x4681
+#define mmSCLV0_SCLV_VERT_FILTER_INIT_C                                         0x4681
+#define mmSCLV1_SCLV_VERT_FILTER_INIT_C                                         0x9881
+#define mmSCLV_VERT_FILTER_INIT_BOT_C                                           0x4682
+#define mmSCLV0_SCLV_VERT_FILTER_INIT_BOT_C                                     0x4682
+#define mmSCLV1_SCLV_VERT_FILTER_INIT_BOT_C                                     0x9882
+#define mmSCLV_ROUND_OFFSET                                                     0x4683
+#define mmSCLV0_SCLV_ROUND_OFFSET                                               0x4683
+#define mmSCLV1_SCLV_ROUND_OFFSET                                               0x9883
+#define mmSCLV_UPDATE                                                           0x4684
+#define mmSCLV0_SCLV_UPDATE                                                     0x4684
+#define mmSCLV1_SCLV_UPDATE                                                     0x9884
+#define mmSCLV_ALU_CONTROL                                                      0x4685
+#define mmSCLV0_SCLV_ALU_CONTROL                                                0x4685
+#define mmSCLV1_SCLV_ALU_CONTROL                                                0x9885
+#define mmSCLV_VIEWPORT_START                                                   0x4686
+#define mmSCLV0_SCLV_VIEWPORT_START                                             0x4686
+#define mmSCLV1_SCLV_VIEWPORT_START                                             0x9886
+#define mmSCLV_VIEWPORT_START_SECONDARY                                         0x4687
+#define mmSCLV0_SCLV_VIEWPORT_START_SECONDARY                                   0x4687
+#define mmSCLV1_SCLV_VIEWPORT_START_SECONDARY                                   0x9887
+#define mmSCLV_VIEWPORT_SIZE                                                    0x4688
+#define mmSCLV0_SCLV_VIEWPORT_SIZE                                              0x4688
+#define mmSCLV1_SCLV_VIEWPORT_SIZE                                              0x9888
+#define mmSCLV_VIEWPORT_START_C                                                 0x4689
+#define mmSCLV0_SCLV_VIEWPORT_START_C                                           0x4689
+#define mmSCLV1_SCLV_VIEWPORT_START_C                                           0x9889
+#define mmSCLV_VIEWPORT_START_SECONDARY_C                                       0x468a
+#define mmSCLV0_SCLV_VIEWPORT_START_SECONDARY_C                                 0x468a
+#define mmSCLV1_SCLV_VIEWPORT_START_SECONDARY_C                                 0x988a
+#define mmSCLV_VIEWPORT_SIZE_C                                                  0x468b
+#define mmSCLV0_SCLV_VIEWPORT_SIZE_C                                            0x468b
+#define mmSCLV1_SCLV_VIEWPORT_SIZE_C                                            0x988b
+#define mmSCLV_EXT_OVERSCAN_LEFT_RIGHT                                          0x468c
+#define mmSCLV0_SCLV_EXT_OVERSCAN_LEFT_RIGHT                                    0x468c
+#define mmSCLV1_SCLV_EXT_OVERSCAN_LEFT_RIGHT                                    0x988c
+#define mmSCLV_EXT_OVERSCAN_TOP_BOTTOM                                          0x468d
+#define mmSCLV0_SCLV_EXT_OVERSCAN_TOP_BOTTOM                                    0x468d
+#define mmSCLV1_SCLV_EXT_OVERSCAN_TOP_BOTTOM                                    0x988d
+#define mmSCLV_MODE_CHANGE_DET1                                                 0x468e
+#define mmSCLV0_SCLV_MODE_CHANGE_DET1                                           0x468e
+#define mmSCLV1_SCLV_MODE_CHANGE_DET1                                           0x988e
+#define mmSCLV_MODE_CHANGE_DET2                                                 0x468f
+#define mmSCLV0_SCLV_MODE_CHANGE_DET2                                           0x468f
+#define mmSCLV1_SCLV_MODE_CHANGE_DET2                                           0x988f
+#define mmSCLV_MODE_CHANGE_DET3                                                 0x4690
+#define mmSCLV0_SCLV_MODE_CHANGE_DET3                                           0x4690
+#define mmSCLV1_SCLV_MODE_CHANGE_DET3                                           0x9890
+#define mmSCLV_MODE_CHANGE_MASK                                                 0x4691
+#define mmSCLV0_SCLV_MODE_CHANGE_MASK                                           0x4691
+#define mmSCLV1_SCLV_MODE_CHANGE_MASK                                           0x9891
+#define mmSCLV_HORZ_FILTER_INIT_BOT                                             0x4692
+#define mmSCLV0_SCLV_HORZ_FILTER_INIT_BOT                                       0x4692
+#define mmSCLV1_SCLV_HORZ_FILTER_INIT_BOT                                       0x9892
+#define mmSCLV_HORZ_FILTER_INIT_BOT_C                                           0x4693
+#define mmSCLV0_SCLV_HORZ_FILTER_INIT_BOT_C                                     0x4693
+#define mmSCLV1_SCLV_HORZ_FILTER_INIT_BOT_C                                     0x9893
+#define mmSCLV_DEBUG2                                                           0x4694
+#define mmSCLV0_SCLV_DEBUG2                                                     0x4694
+#define mmSCLV1_SCLV_DEBUG2                                                     0x9894
+#define mmSCLV_DEBUG                                                            0x4695
+#define mmSCLV0_SCLV_DEBUG                                                      0x4695
+#define mmSCLV1_SCLV_DEBUG                                                      0x9895
+#define mmSCLV_TEST_DEBUG_INDEX                                                 0x4696
+#define mmSCLV0_SCLV_TEST_DEBUG_INDEX                                           0x4696
+#define mmSCLV1_SCLV_TEST_DEBUG_INDEX                                           0x9896
+#define mmSCLV_TEST_DEBUG_DATA                                                  0x4697
+#define mmSCLV0_SCLV_TEST_DEBUG_DATA                                            0x4697
+#define mmSCLV1_SCLV_TEST_DEBUG_DATA                                            0x9897
+#define mmCOL_MAN_UPDATE                                                        0x46a4
+#define mmCOL_MAN0_COL_MAN_UPDATE                                               0x46a4
+#define mmCOL_MAN1_COL_MAN_UPDATE                                               0x98a4
+#define mmCOL_MAN_INPUT_CSC_CONTROL                                             0x46a5
+#define mmCOL_MAN0_COL_MAN_INPUT_CSC_CONTROL                                    0x46a5
+#define mmCOL_MAN1_COL_MAN_INPUT_CSC_CONTROL                                    0x98a5
+#define mmINPUT_CSC_C11_C12_A                                                   0x46a6
+#define mmCOL_MAN0_INPUT_CSC_C11_C12_A                                          0x46a6
+#define mmCOL_MAN1_INPUT_CSC_C11_C12_A                                          0x98a6
+#define mmINPUT_CSC_C13_C14_A                                                   0x46a7
+#define mmCOL_MAN0_INPUT_CSC_C13_C14_A                                          0x46a7
+#define mmCOL_MAN1_INPUT_CSC_C13_C14_A                                          0x98a7
+#define mmINPUT_CSC_C21_C22_A                                                   0x46a8
+#define mmCOL_MAN0_INPUT_CSC_C21_C22_A                                          0x46a8
+#define mmCOL_MAN1_INPUT_CSC_C21_C22_A                                          0x98a8
+#define mmINPUT_CSC_C23_C24_A                                                   0x46a9
+#define mmCOL_MAN0_INPUT_CSC_C23_C24_A                                          0x46a9
+#define mmCOL_MAN1_INPUT_CSC_C23_C24_A                                          0x98a9
+#define mmINPUT_CSC_C31_C32_A                                                   0x46aa
+#define mmCOL_MAN0_INPUT_CSC_C31_C32_A                                          0x46aa
+#define mmCOL_MAN1_INPUT_CSC_C31_C32_A                                          0x98aa
+#define mmINPUT_CSC_C33_C34_A                                                   0x46ab
+#define mmCOL_MAN0_INPUT_CSC_C33_C34_A                                          0x46ab
+#define mmCOL_MAN1_INPUT_CSC_C33_C34_A                                          0x98ab
+#define mmINPUT_CSC_C11_C12_B                                                   0x46ac
+#define mmCOL_MAN0_INPUT_CSC_C11_C12_B                                          0x46ac
+#define mmCOL_MAN1_INPUT_CSC_C11_C12_B                                          0x98ac
+#define mmINPUT_CSC_C13_C14_B                                                   0x46ad
+#define mmCOL_MAN0_INPUT_CSC_C13_C14_B                                          0x46ad
+#define mmCOL_MAN1_INPUT_CSC_C13_C14_B                                          0x98ad
+#define mmINPUT_CSC_C21_C22_B                                                   0x46ae
+#define mmCOL_MAN0_INPUT_CSC_C21_C22_B                                          0x46ae
+#define mmCOL_MAN1_INPUT_CSC_C21_C22_B                                          0x98ae
+#define mmINPUT_CSC_C23_C24_B                                                   0x46af
+#define mmCOL_MAN0_INPUT_CSC_C23_C24_B                                          0x46af
+#define mmCOL_MAN1_INPUT_CSC_C23_C24_B                                          0x98af
+#define mmINPUT_CSC_C31_C32_B                                                   0x46b0
+#define mmCOL_MAN0_INPUT_CSC_C31_C32_B                                          0x46b0
+#define mmCOL_MAN1_INPUT_CSC_C31_C32_B                                          0x98b0
+#define mmINPUT_CSC_C33_C34_B                                                   0x46b1
+#define mmCOL_MAN0_INPUT_CSC_C33_C34_B                                          0x46b1
+#define mmCOL_MAN1_INPUT_CSC_C33_C34_B                                          0x98b1
+#define mmPRESCALE_CONTROL                                                      0x46b2
+#define mmCOL_MAN0_PRESCALE_CONTROL                                             0x46b2
+#define mmCOL_MAN1_PRESCALE_CONTROL                                             0x98b2
+#define mmPRESCALE_VALUES_R                                                     0x46b3
+#define mmCOL_MAN0_PRESCALE_VALUES_R                                            0x46b3
+#define mmCOL_MAN1_PRESCALE_VALUES_R                                            0x98b3
+#define mmPRESCALE_VALUES_G                                                     0x46b4
+#define mmCOL_MAN0_PRESCALE_VALUES_G                                            0x46b4
+#define mmCOL_MAN1_PRESCALE_VALUES_G                                            0x98b4
+#define mmPRESCALE_VALUES_B                                                     0x46b5
+#define mmCOL_MAN0_PRESCALE_VALUES_B                                            0x46b5
+#define mmCOL_MAN1_PRESCALE_VALUES_B                                            0x98b5
+#define mmCOL_MAN_OUTPUT_CSC_CONTROL                                            0x46b6
+#define mmCOL_MAN0_COL_MAN_OUTPUT_CSC_CONTROL                                   0x46b6
+#define mmCOL_MAN1_COL_MAN_OUTPUT_CSC_CONTROL                                   0x98b6
+#define mmOUTPUT_CSC_C11_C12_A                                                  0x46b7
+#define mmCOL_MAN0_OUTPUT_CSC_C11_C12_A                                         0x46b7
+#define mmCOL_MAN1_OUTPUT_CSC_C11_C12_A                                         0x98b7
+#define mmOUTPUT_CSC_C13_C14_A                                                  0x46b8
+#define mmCOL_MAN0_OUTPUT_CSC_C13_C14_A                                         0x46b8
+#define mmCOL_MAN1_OUTPUT_CSC_C13_C14_A                                         0x98b8
+#define mmOUTPUT_CSC_C21_C22_A                                                  0x46b9
+#define mmCOL_MAN0_OUTPUT_CSC_C21_C22_A                                         0x46b9
+#define mmCOL_MAN1_OUTPUT_CSC_C21_C22_A                                         0x98b9
+#define mmOUTPUT_CSC_C23_C24_A                                                  0x46ba
+#define mmCOL_MAN0_OUTPUT_CSC_C23_C24_A                                         0x46ba
+#define mmCOL_MAN1_OUTPUT_CSC_C23_C24_A                                         0x98ba
+#define mmOUTPUT_CSC_C31_C32_A                                                  0x46bb
+#define mmCOL_MAN0_OUTPUT_CSC_C31_C32_A                                         0x46bb
+#define mmCOL_MAN1_OUTPUT_CSC_C31_C32_A                                         0x98bb
+#define mmOUTPUT_CSC_C33_C34_A                                                  0x46bc
+#define mmCOL_MAN0_OUTPUT_CSC_C33_C34_A                                         0x46bc
+#define mmCOL_MAN1_OUTPUT_CSC_C33_C34_A                                         0x98bc
+#define mmOUTPUT_CSC_C11_C12_B                                                  0x46bd
+#define mmCOL_MAN0_OUTPUT_CSC_C11_C12_B                                         0x46bd
+#define mmCOL_MAN1_OUTPUT_CSC_C11_C12_B                                         0x98bd
+#define mmOUTPUT_CSC_C13_C14_B                                                  0x46be
+#define mmCOL_MAN0_OUTPUT_CSC_C13_C14_B                                         0x46be
+#define mmCOL_MAN1_OUTPUT_CSC_C13_C14_B                                         0x98be
+#define mmOUTPUT_CSC_C21_C22_B                                                  0x46bf
+#define mmCOL_MAN0_OUTPUT_CSC_C21_C22_B                                         0x46bf
+#define mmCOL_MAN1_OUTPUT_CSC_C21_C22_B                                         0x98bf
+#define mmOUTPUT_CSC_C23_C24_B                                                  0x46c0
+#define mmCOL_MAN0_OUTPUT_CSC_C23_C24_B                                         0x46c0
+#define mmCOL_MAN1_OUTPUT_CSC_C23_C24_B                                         0x98c0
+#define mmOUTPUT_CSC_C31_C32_B                                                  0x46c1
+#define mmCOL_MAN0_OUTPUT_CSC_C31_C32_B                                         0x46c1
+#define mmCOL_MAN1_OUTPUT_CSC_C31_C32_B                                         0x98c1
+#define mmOUTPUT_CSC_C33_C34_B                                                  0x46c2
+#define mmCOL_MAN0_OUTPUT_CSC_C33_C34_B                                         0x46c2
+#define mmCOL_MAN1_OUTPUT_CSC_C33_C34_B                                         0x98c2
+#define mmDENORM_CLAMP_CONTROL                                                  0x46c3
+#define mmCOL_MAN0_DENORM_CLAMP_CONTROL                                         0x46c3
+#define mmCOL_MAN1_DENORM_CLAMP_CONTROL                                         0x98c3
+#define mmDENORM_CLAMP_RANGE_R_CR                                               0x46c4
+#define mmCOL_MAN0_DENORM_CLAMP_RANGE_R_CR                                      0x46c4
+#define mmCOL_MAN1_DENORM_CLAMP_RANGE_R_CR                                      0x98c4
+#define mmDENORM_CLAMP_RANGE_G_Y                                                0x46c5
+#define mmCOL_MAN0_DENORM_CLAMP_RANGE_G_Y                                       0x46c5
+#define mmCOL_MAN1_DENORM_CLAMP_RANGE_G_Y                                       0x98c5
+#define mmDENORM_CLAMP_RANGE_B_CB                                               0x46c6
+#define mmCOL_MAN0_DENORM_CLAMP_RANGE_B_CB                                      0x46c6
+#define mmCOL_MAN1_DENORM_CLAMP_RANGE_B_CB                                      0x98c6
+#define mmCOL_MAN_FP_CONVERTED_FIELD                                            0x46c7
+#define mmCOL_MAN0_COL_MAN_FP_CONVERTED_FIELD                                   0x46c7
+#define mmCOL_MAN1_COL_MAN_FP_CONVERTED_FIELD                                   0x98c7
+#define mmGAMMA_CORR_CONTROL                                                    0x46c8
+#define mmCOL_MAN0_GAMMA_CORR_CONTROL                                           0x46c8
+#define mmCOL_MAN1_GAMMA_CORR_CONTROL                                           0x98c8
+#define mmGAMMA_CORR_LUT_INDEX                                                  0x46c9
+#define mmCOL_MAN0_GAMMA_CORR_LUT_INDEX                                         0x46c9
+#define mmCOL_MAN1_GAMMA_CORR_LUT_INDEX                                         0x98c9
+#define mmGAMMA_CORR_LUT_DATA                                                   0x46ca
+#define mmCOL_MAN0_GAMMA_CORR_LUT_DATA                                          0x46ca
+#define mmCOL_MAN1_GAMMA_CORR_LUT_DATA                                          0x98ca
+#define mmGAMMA_CORR_LUT_WRITE_EN_MASK                                          0x46cb
+#define mmCOL_MAN0_GAMMA_CORR_LUT_WRITE_EN_MASK                                 0x46cb
+#define mmCOL_MAN1_GAMMA_CORR_LUT_WRITE_EN_MASK                                 0x98cb
+#define mmGAMMA_CORR_CNTLA_START_CNTL                                           0x46cc
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_START_CNTL                                  0x46cc
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_START_CNTL                                  0x98cc
+#define mmGAMMA_CORR_CNTLA_SLOPE_CNTL                                           0x46cd
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_SLOPE_CNTL                                  0x46cd
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_SLOPE_CNTL                                  0x98cd
+#define mmGAMMA_CORR_CNTLA_END_CNTL1                                            0x46ce
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_END_CNTL1                                   0x46ce
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_END_CNTL1                                   0x98ce
+#define mmGAMMA_CORR_CNTLA_END_CNTL2                                            0x46cf
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_END_CNTL2                                   0x46cf
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_END_CNTL2                                   0x98cf
+#define mmGAMMA_CORR_CNTLA_REGION_0_1                                           0x46d0
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_0_1                                  0x46d0
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_0_1                                  0x98d0
+#define mmGAMMA_CORR_CNTLA_REGION_2_3                                           0x46d1
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_2_3                                  0x46d1
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_2_3                                  0x98d1
+#define mmGAMMA_CORR_CNTLA_REGION_4_5                                           0x46d2
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_4_5                                  0x46d2
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_4_5                                  0x98d2
+#define mmGAMMA_CORR_CNTLA_REGION_6_7                                           0x46d3
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_6_7                                  0x46d3
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_6_7                                  0x98d3
+#define mmGAMMA_CORR_CNTLA_REGION_8_9                                           0x46d4
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_8_9                                  0x46d4
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_8_9                                  0x98d4
+#define mmGAMMA_CORR_CNTLA_REGION_10_11                                         0x46d5
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_10_11                                0x46d5
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_10_11                                0x98d5
+#define mmGAMMA_CORR_CNTLA_REGION_12_13                                         0x46d6
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_12_13                                0x46d6
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_12_13                                0x98d6
+#define mmGAMMA_CORR_CNTLA_REGION_14_15                                         0x46d7
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_14_15                                0x46d7
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_14_15                                0x98d7
+#define mmGAMMA_CORR_CNTLB_START_CNTL                                           0x46d8
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_START_CNTL                                  0x46d8
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_START_CNTL                                  0x98d8
+#define mmGAMMA_CORR_CNTLB_SLOPE_CNTL                                           0x46d9
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_SLOPE_CNTL                                  0x46d9
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_SLOPE_CNTL                                  0x98d9
+#define mmGAMMA_CORR_CNTLB_END_CNTL1                                            0x46da
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_END_CNTL1                                   0x46da
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_END_CNTL1                                   0x98da
+#define mmGAMMA_CORR_CNTLB_END_CNTL2                                            0x46db
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_END_CNTL2                                   0x46db
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_END_CNTL2                                   0x98db
+#define mmGAMMA_CORR_CNTLB_REGION_0_1                                           0x46dc
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_0_1                                  0x46dc
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_0_1                                  0x98dc
+#define mmGAMMA_CORR_CNTLB_REGION_2_3                                           0x46dd
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_2_3                                  0x46dd
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_2_3                                  0x98dd
+#define mmGAMMA_CORR_CNTLB_REGION_4_5                                           0x46de
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_4_5                                  0x46de
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_4_5                                  0x98de
+#define mmGAMMA_CORR_CNTLB_REGION_6_7                                           0x46df
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_6_7                                  0x46df
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_6_7                                  0x98df
+#define mmGAMMA_CORR_CNTLB_REGION_8_9                                           0x46e0
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_8_9                                  0x46e0
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_8_9                                  0x98e0
+#define mmGAMMA_CORR_CNTLB_REGION_10_11                                         0x46e1
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_10_11                                0x46e1
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_10_11                                0x98e1
+#define mmGAMMA_CORR_CNTLB_REGION_12_13                                         0x46e2
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_12_13                                0x46e2
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_12_13                                0x98e2
+#define mmGAMMA_CORR_CNTLB_REGION_14_15                                         0x46e3
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_14_15                                0x46e3
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_14_15                                0x98e3
+#define mmPACK_FIFO_ERROR                                                       0x46e4
+#define mmCOL_MAN0_PACK_FIFO_ERROR                                              0x46e4
+#define mmCOL_MAN1_PACK_FIFO_ERROR                                              0x98e4
+#define mmOUTPUT_FIFO_ERROR                                                     0x46e5
+#define mmCOL_MAN0_OUTPUT_FIFO_ERROR                                            0x46e5
+#define mmCOL_MAN1_OUTPUT_FIFO_ERROR                                            0x98e5
+#define mmINPUT_GAMMA_LUT_AUTOFILL                                              0x46e6
+#define mmCOL_MAN0_INPUT_GAMMA_LUT_AUTOFILL                                     0x46e6
+#define mmCOL_MAN1_INPUT_GAMMA_LUT_AUTOFILL                                     0x98e6
+#define mmINPUT_GAMMA_LUT_RW_INDEX                                              0x46e7
+#define mmCOL_MAN0_INPUT_GAMMA_LUT_RW_INDEX                                     0x46e7
+#define mmCOL_MAN1_INPUT_GAMMA_LUT_RW_INDEX                                     0x98e7
+#define mmINPUT_GAMMA_LUT_SEQ_COLOR                                             0x46e8
+#define mmCOL_MAN0_INPUT_GAMMA_LUT_SEQ_COLOR                                    0x46e8
+#define mmCOL_MAN1_INPUT_GAMMA_LUT_SEQ_COLOR                                    0x98e8
+#define mmINPUT_GAMMA_LUT_PWL_DATA                                              0x46e9
+#define mmCOL_MAN0_INPUT_GAMMA_LUT_PWL_DATA                                     0x46e9
+#define mmCOL_MAN1_INPUT_GAMMA_LUT_PWL_DATA                                     0x98e9
+#define mmINPUT_GAMMA_LUT_30_COLOR                                              0x46ea
+#define mmCOL_MAN0_INPUT_GAMMA_LUT_30_COLOR                                     0x46ea
+#define mmCOL_MAN1_INPUT_GAMMA_LUT_30_COLOR                                     0x98ea
+#define mmCOL_MAN_INPUT_GAMMA_CONTROL1                                          0x46eb
+#define mmCOL_MAN0_COL_MAN_INPUT_GAMMA_CONTROL1                                 0x46eb
+#define mmCOL_MAN1_COL_MAN_INPUT_GAMMA_CONTROL1                                 0x98eb
+#define mmCOL_MAN_INPUT_GAMMA_CONTROL2                                          0x46ec
+#define mmCOL_MAN0_COL_MAN_INPUT_GAMMA_CONTROL2                                 0x46ec
+#define mmCOL_MAN1_COL_MAN_INPUT_GAMMA_CONTROL2                                 0x98ec
+#define mmINPUT_GAMMA_BW_OFFSETS_B                                              0x46ed
+#define mmCOL_MAN0_INPUT_GAMMA_BW_OFFSETS_B                                     0x46ed
+#define mmCOL_MAN1_INPUT_GAMMA_BW_OFFSETS_B                                     0x98ed
+#define mmINPUT_GAMMA_BW_OFFSETS_G                                              0x46ee
+#define mmCOL_MAN0_INPUT_GAMMA_BW_OFFSETS_G                                     0x46ee
+#define mmCOL_MAN1_INPUT_GAMMA_BW_OFFSETS_G                                     0x98ee
+#define mmINPUT_GAMMA_BW_OFFSETS_R                                              0x46ef
+#define mmCOL_MAN0_INPUT_GAMMA_BW_OFFSETS_R                                     0x46ef
+#define mmCOL_MAN1_INPUT_GAMMA_BW_OFFSETS_R                                     0x98ef
+#define mmCOL_MAN_DEBUG_CONTROL                                                 0x46f0
+#define mmCOL_MAN0_COL_MAN_DEBUG_CONTROL                                        0x46f0
+#define mmCOL_MAN1_COL_MAN_DEBUG_CONTROL                                        0x98f0
+#define mmCOL_MAN_TEST_DEBUG_INDEX                                              0x46f1
+#define mmCOL_MAN0_COL_MAN_TEST_DEBUG_INDEX                                     0x46f1
+#define mmCOL_MAN1_COL_MAN_TEST_DEBUG_INDEX                                     0x98f1
+#define mmCOL_MAN_TEST_DEBUG_DATA                                               0x46f3
+#define mmCOL_MAN0_COL_MAN_TEST_DEBUG_DATA                                      0x46f3
+#define mmCOL_MAN1_COL_MAN_TEST_DEBUG_DATA                                      0x98f3
+#define mmUNP_GRPH_ENABLE                                                       0x4600
+#define mmUNP0_UNP_GRPH_ENABLE                                                  0x4600
+#define mmUNP1_UNP_GRPH_ENABLE                                                  0x9800
+#define mmUNP_GRPH_CONTROL                                                      0x4601
+#define mmUNP0_UNP_GRPH_CONTROL                                                 0x4601
+#define mmUNP1_UNP_GRPH_CONTROL                                                 0x9801
+#define mmUNP_GRPH_CONTROL_C                                                    0x4602
+#define mmUNP0_UNP_GRPH_CONTROL_C                                               0x4602
+#define mmUNP1_UNP_GRPH_CONTROL_C                                               0x9802
+#define mmUNP_GRPH_CONTROL_EXP                                                  0x4603
+#define mmUNP0_UNP_GRPH_CONTROL_EXP                                             0x4603
+#define mmUNP1_UNP_GRPH_CONTROL_EXP                                             0x9803
+#define mmUNP_GRPH_SWAP_CNTL                                                    0x4605
+#define mmUNP0_UNP_GRPH_SWAP_CNTL                                               0x4605
+#define mmUNP1_UNP_GRPH_SWAP_CNTL                                               0x9805
+#define mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_L                                    0x4606
+#define mmUNP0_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L                               0x4606
+#define mmUNP1_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L                               0x9806
+#define mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_C                                    0x4607
+#define mmUNP0_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C                               0x4607
+#define mmUNP1_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C                               0x9807
+#define mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L                               0x4608
+#define mmUNP0_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L                          0x4608
+#define mmUNP1_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L                          0x9808
+#define mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C                               0x4609
+#define mmUNP0_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C                          0x4609
+#define mmUNP1_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C                          0x9809
+#define mmUNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L                             0x460a
+#define mmUNP0_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L                        0x460a
+#define mmUNP1_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L                        0x980a
+#define mmUNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C                             0x460b
+#define mmUNP0_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C                        0x460b
+#define mmUNP1_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C                        0x980b
+#define mmUNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L                        0x460c
+#define mmUNP0_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L                   0x460c
+#define mmUNP1_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L                   0x980c
+#define mmUNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C                        0x460d
+#define mmUNP0_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C                   0x460d
+#define mmUNP1_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C                   0x980d
+#define mmUNP_GRPH_SECONDARY_SURFACE_ADDRESS_L                                  0x460e
+#define mmUNP0_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_L                             0x460e
+#define mmUNP1_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_L                             0x980e
+#define mmUNP_GRPH_SECONDARY_SURFACE_ADDRESS_C                                  0x460f
+#define mmUNP0_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_C                             0x460f
+#define mmUNP1_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_C                             0x980f
+#define mmUNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L                             0x4610
+#define mmUNP0_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L                        0x4610
+#define mmUNP1_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L                        0x9810
+#define mmUNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C                             0x4611
+#define mmUNP0_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C                        0x4611
+#define mmUNP1_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C                        0x9811
+#define mmUNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L                           0x4612
+#define mmUNP0_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L                      0x4612
+#define mmUNP1_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L                      0x9812
+#define mmUNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C                           0x4613
+#define mmUNP0_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C                      0x4613
+#define mmUNP1_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C                      0x9813
+#define mmUNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L                      0x4614
+#define mmUNP0_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L                 0x4614
+#define mmUNP1_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L                 0x9814
+#define mmUNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C                      0x4615
+#define mmUNP0_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C                 0x4615
+#define mmUNP1_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C                 0x9815
+#define mmUNP_GRPH_PITCH_L                                                      0x4616
+#define mmUNP0_UNP_GRPH_PITCH_L                                                 0x4616
+#define mmUNP1_UNP_GRPH_PITCH_L                                                 0x9816
+#define mmUNP_GRPH_PITCH_C                                                      0x4617
+#define mmUNP0_UNP_GRPH_PITCH_C                                                 0x4617
+#define mmUNP1_UNP_GRPH_PITCH_C                                                 0x9817
+#define mmUNP_GRPH_SURFACE_OFFSET_X_L                                           0x4618
+#define mmUNP0_UNP_GRPH_SURFACE_OFFSET_X_L                                      0x4618
+#define mmUNP1_UNP_GRPH_SURFACE_OFFSET_X_L                                      0x9818
+#define mmUNP_GRPH_SURFACE_OFFSET_X_C                                           0x4619
+#define mmUNP0_UNP_GRPH_SURFACE_OFFSET_X_C                                      0x4619
+#define mmUNP1_UNP_GRPH_SURFACE_OFFSET_X_C                                      0x9819
+#define mmUNP_GRPH_SURFACE_OFFSET_Y_L                                           0x461a
+#define mmUNP0_UNP_GRPH_SURFACE_OFFSET_Y_L                                      0x461a
+#define mmUNP1_UNP_GRPH_SURFACE_OFFSET_Y_L                                      0x981a
+#define mmUNP_GRPH_SURFACE_OFFSET_Y_C                                           0x461b
+#define mmUNP0_UNP_GRPH_SURFACE_OFFSET_Y_C                                      0x461b
+#define mmUNP1_UNP_GRPH_SURFACE_OFFSET_Y_C                                      0x981b
+#define mmUNP_GRPH_X_START_L                                                    0x461c
+#define mmUNP0_UNP_GRPH_X_START_L                                               0x461c
+#define mmUNP1_UNP_GRPH_X_START_L                                               0x981c
+#define mmUNP_GRPH_X_START_C                                                    0x461d
+#define mmUNP0_UNP_GRPH_X_START_C                                               0x461d
+#define mmUNP1_UNP_GRPH_X_START_C                                               0x981d
+#define mmUNP_GRPH_Y_START_L                                                    0x461e
+#define mmUNP0_UNP_GRPH_Y_START_L                                               0x461e
+#define mmUNP1_UNP_GRPH_Y_START_L                                               0x981e
+#define mmUNP_GRPH_Y_START_C                                                    0x461f
+#define mmUNP0_UNP_GRPH_Y_START_C                                               0x461f
+#define mmUNP1_UNP_GRPH_Y_START_C                                               0x981f
+#define mmUNP_GRPH_X_END_L                                                      0x4620
+#define mmUNP0_UNP_GRPH_X_END_L                                                 0x4620
+#define mmUNP1_UNP_GRPH_X_END_L                                                 0x9820
+#define mmUNP_GRPH_X_END_C                                                      0x4621
+#define mmUNP0_UNP_GRPH_X_END_C                                                 0x4621
+#define mmUNP1_UNP_GRPH_X_END_C                                                 0x9821
+#define mmUNP_GRPH_Y_END_L                                                      0x4622
+#define mmUNP0_UNP_GRPH_Y_END_L                                                 0x4622
+#define mmUNP1_UNP_GRPH_Y_END_L                                                 0x9822
+#define mmUNP_GRPH_Y_END_C                                                      0x4623
+#define mmUNP0_UNP_GRPH_Y_END_C                                                 0x4623
+#define mmUNP1_UNP_GRPH_Y_END_C                                                 0x9823
+#define mmUNP_GRPH_UPDATE                                                       0x4624
+#define mmUNP0_UNP_GRPH_UPDATE                                                  0x4624
+#define mmUNP1_UNP_GRPH_UPDATE                                                  0x9824
+#define mmUNP_PIPE_OUTSTANDING_REQUEST_LIMIT                                    0x463a
+#define mmUNP0_UNP_PIPE_OUTSTANDING_REQUEST_LIMIT                               0x463a
+#define mmUNP1_UNP_PIPE_OUTSTANDING_REQUEST_LIMIT                               0x983a
+#define mmUNP_GRPH_SURFACE_ADDRESS_INUSE_L                                      0x4625
+#define mmUNP0_UNP_GRPH_SURFACE_ADDRESS_INUSE_L                                 0x4625
+#define mmUNP1_UNP_GRPH_SURFACE_ADDRESS_INUSE_L                                 0x9825
+#define mmUNP_GRPH_SURFACE_ADDRESS_INUSE_C                                      0x4626
+#define mmUNP0_UNP_GRPH_SURFACE_ADDRESS_INUSE_C                                 0x4626
+#define mmUNP1_UNP_GRPH_SURFACE_ADDRESS_INUSE_C                                 0x9826
+#define mmUNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_L                                 0x4627
+#define mmUNP0_UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_L                            0x4627
+#define mmUNP1_UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_L                            0x9827
+#define mmUNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_C                                 0x4628
+#define mmUNP0_UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_C                            0x4628
+#define mmUNP1_UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_C                            0x9828
+#define mmUNP_DVMM_PTE_CONTROL                                                  0x4629
+#define mmUNP_GRPH_INTERRUPT_STATUS                                             0x462b
+#define mmUNP0_UNP_GRPH_INTERRUPT_STATUS                                        0x462b
+#define mmUNP1_UNP_GRPH_INTERRUPT_STATUS                                        0x982b
+#define mmUNP_GRPH_INTERRUPT_CONTROL                                            0x462c
+#define mmUNP0_UNP_GRPH_INTERRUPT_CONTROL                                       0x462c
+#define mmUNP1_UNP_GRPH_INTERRUPT_CONTROL                                       0x982c
+#define mmUNP_GRPH_STEREOSYNC_FLIP                                              0x462e
+#define mmUNP0_UNP_GRPH_STEREOSYNC_FLIP                                         0x462e
+#define mmUNP1_UNP_GRPH_STEREOSYNC_FLIP                                         0x982e
+#define mmUNP_FLIP_CONTROL                                                      0x462f
+#define mmUNP0_UNP_FLIP_CONTROL                                                 0x462f
+#define mmUNP1_UNP_FLIP_CONTROL                                                 0x982f
+#define mmUNP_CRC_CONTROL                                                       0x4630
+#define mmUNP0_UNP_CRC_CONTROL                                                  0x4630
+#define mmUNP1_UNP_CRC_CONTROL                                                  0x9830
+#define mmUNP_CRC_MASK                                                          0x4631
+#define mmUNP0_UNP_CRC_MASK                                                     0x4631
+#define mmUNP1_UNP_CRC_MASK                                                     0x9831
+#define mmUNP_CRC_CURRENT                                                       0x4632
+#define mmUNP0_UNP_CRC_CURRENT                                                  0x4632
+#define mmUNP1_UNP_CRC_CURRENT                                                  0x9832
+#define mmUNP_CRC_LAST                                                          0x4633
+#define mmUNP0_UNP_CRC_LAST                                                     0x4633
+#define mmUNP1_UNP_CRC_LAST                                                     0x9833
+#define mmUNP_LB_DATA_GAP_BETWEEN_CHUNK                                         0x4634
+#define mmUNP0_UNP_LB_DATA_GAP_BETWEEN_CHUNK                                    0x4634
+#define mmUNP1_UNP_LB_DATA_GAP_BETWEEN_CHUNK                                    0x9834
+#define mmUNP_HW_ROTATION                                                       0x4635
+#define mmUNP0_UNP_HW_ROTATION                                                  0x4635
+#define mmUNP1_UNP_HW_ROTATION                                                  0x9835
+#define mmUNP_DEBUG                                                             0x4636
+#define mmUNP0_UNP_DEBUG                                                        0x4636
+#define mmUNP1_UNP_DEBUG                                                        0x9836
+#define mmUNP_DEBUG2                                                            0x4637
+#define mmUNP0_UNP_DEBUG2                                                       0x4637
+#define mmUNP1_UNP_DEBUG2                                                       0x9837
+#define mmUNP_DVMM_DEBUG                                                        0x463b
+#define mmUNP0_UNP_DVMM_DEBUG                                                   0x463b
+#define mmUNP1_UNP_DVMM_DEBUG                                                   0x983b
+#define mmUNP_TEST_DEBUG_INDEX                                                  0x4638
+#define mmUNP0_UNP_TEST_DEBUG_INDEX                                             0x4638
+#define mmUNP1_UNP_TEST_DEBUG_INDEX                                             0x9838
+#define mmUNP_TEST_DEBUG_DATA                                                   0x4639
+#define mmUNP0_UNP_TEST_DEBUG_DATA                                              0x4639
+#define mmUNP1_UNP_TEST_DEBUG_DATA                                              0x9839
+#define mmGENMO_WT                                                              0xf0
+#define mmGENMO_RD                                                              0xf3
+#define mmGENENB                                                                0xf0
+#define mmGENFC_WT                                                              0xee
+#define mmVGA0_GENFC_WT                                                         0xee
+#define mmVGA1_GENFC_WT                                                         0xf6
+#define mmGENFC_RD                                                              0xf2
+#define mmGENS0                                                                 0xf0
+#define mmGENS1                                                                 0xee
+#define mmVGA0_GENS1                                                            0xee
+#define mmVGA1_GENS1                                                            0xf6
+#define mmDAC_DATA                                                              0xf2
+#define mmDAC_MASK                                                              0xf1
+#define mmDAC_R_INDEX                                                           0xf1
+#define mmDAC_W_INDEX                                                           0xf2
+#define mmSEQ8_IDX                                                              0xf1
+#define mmSEQ8_DATA                                                             0xf1
+#define ixSEQ00                                                                 0x0
+#define ixSEQ01                                                                 0x1
+#define ixSEQ02                                                                 0x2
+#define ixSEQ03                                                                 0x3
+#define ixSEQ04                                                                 0x4
+#define mmCRTC8_IDX                                                             0xed
+#define mmVGA0_CRTC8_IDX                                                        0xed
+#define mmVGA1_CRTC8_IDX                                                        0xf5
+#define mmCRTC8_DATA                                                            0xed
+#define mmVGA0_CRTC8_DATA                                                       0xed
+#define mmVGA1_CRTC8_DATA                                                       0xf5
+#define ixCRT00                                                                 0x0
+#define ixCRT01                                                                 0x1
+#define ixCRT02                                                                 0x2
+#define ixCRT03                                                                 0x3
+#define ixCRT04                                                                 0x4
+#define ixCRT05                                                                 0x5
+#define ixCRT06                                                                 0x6
+#define ixCRT07                                                                 0x7
+#define ixCRT08                                                                 0x8
+#define ixCRT09                                                                 0x9
+#define ixCRT0A                                                                 0xa
+#define ixCRT0B                                                                 0xb
+#define ixCRT0C                                                                 0xc
+#define ixCRT0D                                                                 0xd
+#define ixCRT0E                                                                 0xe
+#define ixCRT0F                                                                 0xf
+#define ixCRT10                                                                 0x10
+#define ixCRT11                                                                 0x11
+#define ixCRT12                                                                 0x12
+#define ixCRT13                                                                 0x13
+#define ixCRT14                                                                 0x14
+#define ixCRT15                                                                 0x15
+#define ixCRT16                                                                 0x16
+#define ixCRT17                                                                 0x17
+#define ixCRT18                                                                 0x18
+#define ixCRT1E                                                                 0x1e
+#define ixCRT1F                                                                 0x1f
+#define ixCRT22                                                                 0x22
+#define mmGRPH8_IDX                                                             0xf3
+#define mmGRPH8_DATA                                                            0xf3
+#define ixGRA00                                                                 0x0
+#define ixGRA01                                                                 0x1
+#define ixGRA02                                                                 0x2
+#define ixGRA03                                                                 0x3
+#define ixGRA04                                                                 0x4
+#define ixGRA05                                                                 0x5
+#define ixGRA06                                                                 0x6
+#define ixGRA07                                                                 0x7
+#define ixGRA08                                                                 0x8
+#define mmATTRX                                                                 0xf0
+#define mmATTRDW                                                                0xf0
+#define mmATTRDR                                                                0xf0
+#define ixATTR00                                                                0x0
+#define ixATTR01                                                                0x1
+#define ixATTR02                                                                0x2
+#define ixATTR03                                                                0x3
+#define ixATTR04                                                                0x4
+#define ixATTR05                                                                0x5
+#define ixATTR06                                                                0x6
+#define ixATTR07                                                                0x7
+#define ixATTR08                                                                0x8
+#define ixATTR09                                                                0x9
+#define ixATTR0A                                                                0xa
+#define ixATTR0B                                                                0xb
+#define ixATTR0C                                                                0xc
+#define ixATTR0D                                                                0xd
+#define ixATTR0E                                                                0xe
+#define ixATTR0F                                                                0xf
+#define ixATTR10                                                                0x10
+#define ixATTR11                                                                0x11
+#define ixATTR12                                                                0x12
+#define ixATTR13                                                                0x13
+#define ixATTR14                                                                0x14
+#define mmVGA_RENDER_CONTROL                                                    0xc0
+#define mmVGA_SOURCE_SELECT                                                     0xfc
+#define mmVGA_SEQUENCER_RESET_CONTROL                                           0xc1
+#define mmVGA_MODE_CONTROL                                                      0xc2
+#define mmVGA_SURFACE_PITCH_SELECT                                              0xc3
+#define mmVGA_MEMORY_BASE_ADDRESS                                               0xc4
+#define mmVGA_MEMORY_BASE_ADDRESS_HIGH                                          0xc9
+#define mmVGA_DISPBUF1_SURFACE_ADDR                                             0xc6
+#define mmVGA_DISPBUF2_SURFACE_ADDR                                             0xc8
+#define mmVGA_HDP_CONTROL                                                       0xca
+#define mmVGA_CACHE_CONTROL                                                     0xcb
+#define mmD1VGA_CONTROL                                                         0xcc
+#define mmD2VGA_CONTROL                                                         0xce
+#define mmD3VGA_CONTROL                                                         0xf8
+#define mmD4VGA_CONTROL                                                         0xf9
+#define mmD5VGA_CONTROL                                                         0xfa
+#define mmD6VGA_CONTROL                                                         0xfb
+#define mmVGA_HW_DEBUG                                                          0xcf
+#define mmVGA_STATUS                                                            0xd0
+#define mmVGA_INTERRUPT_CONTROL                                                 0xd1
+#define mmVGA_STATUS_CLEAR                                                      0xd2
+#define mmVGA_INTERRUPT_STATUS                                                  0xd3
+#define mmVGA_MAIN_CONTROL                                                      0xd4
+#define mmVGA_TEST_CONTROL                                                      0xd5
+#define mmVGA_DEBUG_READBACK_INDEX                                              0xd6
+#define mmVGA_DEBUG_READBACK_DATA                                               0xd7
+#define mmVGA_MEM_WRITE_PAGE_ADDR                                               0x12
+#define mmVGA_MEM_READ_PAGE_ADDR                                                0x13
+#define mmVGA_TEST_DEBUG_INDEX                                                  0xc5
+#define mmVGA_TEST_DEBUG_DATA                                                   0xc7
+#define ixVGADCC_DBG_DCCIF_C                                                    0x7e
+#define mmBPHYC_DAC_MACRO_CNTL                                                  0x48b9
+#define mmBPHYC_DAC_AUTO_CALIB_CONTROL                                          0x48ba
+#define mmDPG_PIPE_ARBITRATION_CONTROL1                                         0x1b30
+#define mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1                                0x1b30
+#define mmDMIF_PG1_DPG_PIPE_ARBITRATION_CONTROL1                                0x1d30
+#define mmDMIF_PG2_DPG_PIPE_ARBITRATION_CONTROL1                                0x1f30
+#define mmDMIF_PG3_DPG_PIPE_ARBITRATION_CONTROL1                                0x4130
+#define mmDMIF_PG4_DPG_PIPE_ARBITRATION_CONTROL1                                0x4330
+#define mmDMIF_PG5_DPG_PIPE_ARBITRATION_CONTROL1                                0x4530
+#define mmDPG_PIPE_ARBITRATION_CONTROL2                                         0x1b31
+#define mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL2                                0x1b31
+#define mmDMIF_PG1_DPG_PIPE_ARBITRATION_CONTROL2                                0x1d31
+#define mmDMIF_PG2_DPG_PIPE_ARBITRATION_CONTROL2                                0x1f31
+#define mmDMIF_PG3_DPG_PIPE_ARBITRATION_CONTROL2                                0x4131
+#define mmDMIF_PG4_DPG_PIPE_ARBITRATION_CONTROL2                                0x4331
+#define mmDMIF_PG5_DPG_PIPE_ARBITRATION_CONTROL2                                0x4531
+#define mmDPG_WATERMARK_MASK_CONTROL                                            0x1b32
+#define mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL                                   0x1b32
+#define mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL                                   0x1d32
+#define mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL                                   0x1f32
+#define mmDMIF_PG3_DPG_WATERMARK_MASK_CONTROL                                   0x4132
+#define mmDMIF_PG4_DPG_WATERMARK_MASK_CONTROL                                   0x4332
+#define mmDMIF_PG5_DPG_WATERMARK_MASK_CONTROL                                   0x4532
+#define mmDPG_PIPE_URGENCY_CONTROL                                              0x1b33
+#define mmDMIF_PG0_DPG_PIPE_URGENCY_CONTROL                                     0x1b33
+#define mmDMIF_PG1_DPG_PIPE_URGENCY_CONTROL                                     0x1d33
+#define mmDMIF_PG2_DPG_PIPE_URGENCY_CONTROL                                     0x1f33
+#define mmDMIF_PG3_DPG_PIPE_URGENCY_CONTROL                                     0x4133
+#define mmDMIF_PG4_DPG_PIPE_URGENCY_CONTROL                                     0x4333
+#define mmDMIF_PG5_DPG_PIPE_URGENCY_CONTROL                                     0x4533
+#define mmDPG_PIPE_DPM_CONTROL                                                  0x1b34
+#define mmDMIF_PG0_DPG_PIPE_DPM_CONTROL                                         0x1b34
+#define mmDMIF_PG1_DPG_PIPE_DPM_CONTROL                                         0x1d34
+#define mmDMIF_PG2_DPG_PIPE_DPM_CONTROL                                         0x1f34
+#define mmDMIF_PG3_DPG_PIPE_DPM_CONTROL                                         0x4134
+#define mmDMIF_PG4_DPG_PIPE_DPM_CONTROL                                         0x4334
+#define mmDMIF_PG5_DPG_PIPE_DPM_CONTROL                                         0x4534
+#define mmDPG_PIPE_STUTTER_CONTROL                                              0x1b35
+#define mmDMIF_PG0_DPG_PIPE_STUTTER_CONTROL                                     0x1b35
+#define mmDMIF_PG1_DPG_PIPE_STUTTER_CONTROL                                     0x1d35
+#define mmDMIF_PG2_DPG_PIPE_STUTTER_CONTROL                                     0x1f35
+#define mmDMIF_PG3_DPG_PIPE_STUTTER_CONTROL                                     0x4135
+#define mmDMIF_PG4_DPG_PIPE_STUTTER_CONTROL                                     0x4335
+#define mmDMIF_PG5_DPG_PIPE_STUTTER_CONTROL                                     0x4535
+#define mmDPG_PIPE_NB_PSTATE_CHANGE_CONTROL                                     0x1b36
+#define mmDMIF_PG0_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL                            0x1b36
+#define mmDMIF_PG1_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL                            0x1d36
+#define mmDMIF_PG2_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL                            0x1f36
+#define mmDMIF_PG3_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL                            0x4136
+#define mmDMIF_PG4_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL                            0x4336
+#define mmDMIF_PG5_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL                            0x4536
+#define mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH                                     0x1b37
+#define mmDMIF_PG0_DPG_PIPE_STUTTER_CONTROL_NONLPTCH                            0x1b37
+#define mmDMIF_PG1_DPG_PIPE_STUTTER_CONTROL_NONLPTCH                            0x1d37
+#define mmDMIF_PG2_DPG_PIPE_STUTTER_CONTROL_NONLPTCH                            0x1f37
+#define mmDMIF_PG3_DPG_PIPE_STUTTER_CONTROL_NONLPTCH                            0x4137
+#define mmDMIF_PG4_DPG_PIPE_STUTTER_CONTROL_NONLPTCH                            0x4337
+#define mmDMIF_PG5_DPG_PIPE_STUTTER_CONTROL_NONLPTCH                            0x4537
+#define mmDPG_REPEATER_PROGRAM                                                  0x1b3a
+#define mmDMIF_PG0_DPG_REPEATER_PROGRAM                                         0x1b3a
+#define mmDMIF_PG1_DPG_REPEATER_PROGRAM                                         0x1d3a
+#define mmDMIF_PG2_DPG_REPEATER_PROGRAM                                         0x1f3a
+#define mmDMIF_PG3_DPG_REPEATER_PROGRAM                                         0x413a
+#define mmDMIF_PG4_DPG_REPEATER_PROGRAM                                         0x433a
+#define mmDMIF_PG5_DPG_REPEATER_PROGRAM                                         0x453a
+#define mmDPG_HW_DEBUG_A                                                        0x1b3b
+#define mmDMIF_PG0_DPG_HW_DEBUG_A                                               0x1b3b
+#define mmDMIF_PG1_DPG_HW_DEBUG_A                                               0x1d3b
+#define mmDMIF_PG2_DPG_HW_DEBUG_A                                               0x1f3b
+#define mmDMIF_PG3_DPG_HW_DEBUG_A                                               0x413b
+#define mmDMIF_PG4_DPG_HW_DEBUG_A                                               0x433b
+#define mmDMIF_PG5_DPG_HW_DEBUG_A                                               0x453b
+#define mmDPG_HW_DEBUG_B                                                        0x1b3c
+#define mmDMIF_PG0_DPG_HW_DEBUG_B                                               0x1b3c
+#define mmDMIF_PG1_DPG_HW_DEBUG_B                                               0x1d3c
+#define mmDMIF_PG2_DPG_HW_DEBUG_B                                               0x1f3c
+#define mmDMIF_PG3_DPG_HW_DEBUG_B                                               0x413c
+#define mmDMIF_PG4_DPG_HW_DEBUG_B                                               0x433c
+#define mmDMIF_PG5_DPG_HW_DEBUG_B                                               0x453c
+#define mmDPG_HW_DEBUG_11                                                       0x1b3d
+#define mmDMIF_PG0_DPG_HW_DEBUG_11                                              0x1b3d
+#define mmDMIF_PG1_DPG_HW_DEBUG_11                                              0x1d3d
+#define mmDMIF_PG2_DPG_HW_DEBUG_11                                              0x1f3d
+#define mmDMIF_PG3_DPG_HW_DEBUG_11                                              0x413d
+#define mmDMIF_PG4_DPG_HW_DEBUG_11                                              0x433d
+#define mmDMIF_PG5_DPG_HW_DEBUG_11                                              0x453d
+#define mmDPG_CHK_PRE_PROC_CNTL                                                 0x1b3e
+#define mmDMIF_PG0_DPG_CHK_PRE_PROC_CNTL                                        0x1b3e
+#define mmDMIF_PG1_DPG_CHK_PRE_PROC_CNTL                                        0x1d3e
+#define mmDMIF_PG2_DPG_CHK_PRE_PROC_CNTL                                        0x1f3e
+#define mmDMIF_PG3_DPG_CHK_PRE_PROC_CNTL                                        0x413e
+#define mmDMIF_PG4_DPG_CHK_PRE_PROC_CNTL                                        0x433e
+#define mmDMIF_PG5_DPG_CHK_PRE_PROC_CNTL                                        0x453e
+#define mmDPG_DVMM_STATUS                                                       0x1b3f
+#define mmDMIF_PG0_DPG_DVMM_STATUS                                              0x1b3f
+#define mmDMIF_PG1_DPG_DVMM_STATUS                                              0x1d3f
+#define mmDMIF_PG2_DPG_DVMM_STATUS                                              0x1f3f
+#define mmDMIF_PG3_DPG_DVMM_STATUS                                              0x413f
+#define mmDMIF_PG4_DPG_DVMM_STATUS                                              0x433f
+#define mmDMIF_PG5_DPG_DVMM_STATUS                                              0x453f
+#define mmDPG_TEST_DEBUG_INDEX                                                  0x1b38
+#define mmDMIF_PG0_DPG_TEST_DEBUG_INDEX                                         0x1b38
+#define mmDMIF_PG1_DPG_TEST_DEBUG_INDEX                                         0x1d38
+#define mmDMIF_PG2_DPG_TEST_DEBUG_INDEX                                         0x1f38
+#define mmDMIF_PG3_DPG_TEST_DEBUG_INDEX                                         0x4138
+#define mmDMIF_PG4_DPG_TEST_DEBUG_INDEX                                         0x4338
+#define mmDMIF_PG5_DPG_TEST_DEBUG_INDEX                                         0x4538
+#define mmDPG_TEST_DEBUG_DATA                                                   0x1b39
+#define mmDMIF_PG0_DPG_TEST_DEBUG_DATA                                          0x1b39
+#define mmDMIF_PG1_DPG_TEST_DEBUG_DATA                                          0x1d39
+#define mmDMIF_PG2_DPG_TEST_DEBUG_DATA                                          0x1f39
+#define mmDMIF_PG3_DPG_TEST_DEBUG_DATA                                          0x4139
+#define mmDMIF_PG4_DPG_TEST_DEBUG_DATA                                          0x4339
+#define mmDMIF_PG5_DPG_TEST_DEBUG_DATA                                          0x4539
+#define mmDPGV0_PIPE_ARBITRATION_CONTROL1                                       0x4730
+#define mmDMIFV_PG0_DPGV0_PIPE_ARBITRATION_CONTROL1                             0x4730
+#define mmDMIFV_PG1_DPGV0_PIPE_ARBITRATION_CONTROL1                             0x9930
+#define mmDPGV1_PIPE_ARBITRATION_CONTROL1                                       0x473d
+#define mmDMIFV_PG0_DPGV1_PIPE_ARBITRATION_CONTROL1                             0x473d
+#define mmDMIFV_PG1_DPGV1_PIPE_ARBITRATION_CONTROL1                             0x993d
+#define mmDPGV0_PIPE_ARBITRATION_CONTROL2                                       0x4731
+#define mmDMIFV_PG0_DPGV0_PIPE_ARBITRATION_CONTROL2                             0x4731
+#define mmDMIFV_PG1_DPGV0_PIPE_ARBITRATION_CONTROL2                             0x9931
+#define mmDPGV1_PIPE_ARBITRATION_CONTROL2                                       0x473e
+#define mmDMIFV_PG0_DPGV1_PIPE_ARBITRATION_CONTROL2                             0x473e
+#define mmDMIFV_PG1_DPGV1_PIPE_ARBITRATION_CONTROL2                             0x993e
+#define mmDPGV0_WATERMARK_MASK_CONTROL                                          0x4732
+#define mmDMIFV_PG0_DPGV0_WATERMARK_MASK_CONTROL                                0x4732
+#define mmDMIFV_PG1_DPGV0_WATERMARK_MASK_CONTROL                                0x9932
+#define mmDPGV1_WATERMARK_MASK_CONTROL                                          0x473f
+#define mmDMIFV_PG0_DPGV1_WATERMARK_MASK_CONTROL                                0x473f
+#define mmDMIFV_PG1_DPGV1_WATERMARK_MASK_CONTROL                                0x993f
+#define mmDPGV0_PIPE_URGENCY_CONTROL                                            0x4733
+#define mmDMIFV_PG0_DPGV0_PIPE_URGENCY_CONTROL                                  0x4733
+#define mmDMIFV_PG1_DPGV0_PIPE_URGENCY_CONTROL                                  0x9933
+#define mmDPGV1_PIPE_URGENCY_CONTROL                                            0x4740
+#define mmDMIFV_PG0_DPGV1_PIPE_URGENCY_CONTROL                                  0x4740
+#define mmDMIFV_PG1_DPGV1_PIPE_URGENCY_CONTROL                                  0x9940
+#define mmDPGV0_PIPE_DPM_CONTROL                                                0x4734
+#define mmDMIFV_PG0_DPGV0_PIPE_DPM_CONTROL                                      0x4734
+#define mmDMIFV_PG1_DPGV0_PIPE_DPM_CONTROL                                      0x9934
+#define mmDPGV1_PIPE_DPM_CONTROL                                                0x4741
+#define mmDMIFV_PG0_DPGV1_PIPE_DPM_CONTROL                                      0x4741
+#define mmDMIFV_PG1_DPGV1_PIPE_DPM_CONTROL                                      0x9941
+#define mmDPGV0_PIPE_STUTTER_CONTROL                                            0x4735
+#define mmDMIFV_PG0_DPGV0_PIPE_STUTTER_CONTROL                                  0x4735
+#define mmDMIFV_PG1_DPGV0_PIPE_STUTTER_CONTROL                                  0x9935
+#define mmDPGV1_PIPE_STUTTER_CONTROL                                            0x4742
+#define mmDMIFV_PG0_DPGV1_PIPE_STUTTER_CONTROL                                  0x4742
+#define mmDMIFV_PG1_DPGV1_PIPE_STUTTER_CONTROL                                  0x9942
+#define mmDPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL                                   0x4736
+#define mmDMIFV_PG0_DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL                         0x4736
+#define mmDMIFV_PG1_DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL                         0x9936
+#define mmDPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL                                   0x4743
+#define mmDMIFV_PG0_DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL                         0x4743
+#define mmDMIFV_PG1_DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL                         0x9943
+#define mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH                                   0x4737
+#define mmDMIFV_PG0_DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH                         0x4737
+#define mmDMIFV_PG1_DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH                         0x9937
+#define mmDPGV1_PIPE_STUTTER_CONTROL_NONLPTCH                                   0x4744
+#define mmDMIFV_PG0_DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH                         0x4744
+#define mmDMIFV_PG1_DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH                         0x9944
+#define mmDPGV0_REPEATER_PROGRAM                                                0x4738
+#define mmDMIFV_PG0_DPGV0_REPEATER_PROGRAM                                      0x4738
+#define mmDMIFV_PG1_DPGV0_REPEATER_PROGRAM                                      0x9938
+#define mmDPGV1_REPEATER_PROGRAM                                                0x4745
+#define mmDMIFV_PG0_DPGV1_REPEATER_PROGRAM                                      0x4745
+#define mmDMIFV_PG1_DPGV1_REPEATER_PROGRAM                                      0x9945
+#define mmDPGV0_HW_DEBUG_A                                                      0x4739
+#define mmDMIFV_PG0_DPGV0_HW_DEBUG_A                                            0x4739
+#define mmDMIFV_PG1_DPGV0_HW_DEBUG_A                                            0x9939
+#define mmDPGV1_HW_DEBUG_A                                                      0x4746
+#define mmDMIFV_PG0_DPGV1_HW_DEBUG_A                                            0x4746
+#define mmDMIFV_PG1_DPGV1_HW_DEBUG_A                                            0x9946
+#define mmDPGV0_HW_DEBUG_B                                                      0x473a
+#define mmDMIFV_PG0_DPGV0_HW_DEBUG_B                                            0x473a
+#define mmDMIFV_PG1_DPGV0_HW_DEBUG_B                                            0x993a
+#define mmDPGV1_HW_DEBUG_B                                                      0x4747
+#define mmDMIFV_PG0_DPGV1_HW_DEBUG_B                                            0x4747
+#define mmDMIFV_PG1_DPGV1_HW_DEBUG_B                                            0x9947
+#define mmDPGV0_HW_DEBUG_11                                                     0x473b
+#define mmDMIFV_PG0_DPGV0_HW_DEBUG_11                                           0x473b
+#define mmDMIFV_PG1_DPGV0_HW_DEBUG_11                                           0x993b
+#define mmDPGV1_HW_DEBUG_11                                                     0x4748
+#define mmDMIFV_PG0_DPGV1_HW_DEBUG_11                                           0x4748
+#define mmDMIFV_PG1_DPGV1_HW_DEBUG_11                                           0x9948
+#define mmDPGV0_CHK_PRE_PROC_CNTL                                               0x473c
+#define mmDMIFV_PG0_DPGV0_CHK_PRE_PROC_CNTL                                     0x473c
+#define mmDMIFV_PG1_DPGV0_CHK_PRE_PROC_CNTL                                     0x993c
+#define mmDPGV1_CHK_PRE_PROC_CNTL                                               0x4749
+#define mmDMIFV_PG0_DPGV1_CHK_PRE_PROC_CNTL                                     0x4749
+#define mmDMIFV_PG1_DPGV1_CHK_PRE_PROC_CNTL                                     0x9949
+#define mmDPGV_TEST_DEBUG_INDEX                                                 0x474e
+#define mmDMIFV_PG0_DPGV_TEST_DEBUG_INDEX                                       0x474e
+#define mmDMIFV_PG1_DPGV_TEST_DEBUG_INDEX                                       0x994e
+#define mmDPGV_TEST_DEBUG_DATA                                                  0x474f
+#define mmDMIFV_PG0_DPGV_TEST_DEBUG_DATA                                        0x474f
+#define mmDMIFV_PG1_DPGV_TEST_DEBUG_DATA                                        0x994f
+#define mmAZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX                       0x18
+#define mmAZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA                        0x18
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID                   0xf00
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID                            0xf02
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT                 0xf04
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT             0x1f04
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE                         0x1f05
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES               0x1f0a
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS                     0x1f0b
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES                       0x1f0f
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE                          0x1705
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESET                                0x17ff
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID                0x1720
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2              0x1721
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3              0x1722
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4              0x1723
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION            0x1770
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID                   0x1828
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID                            0x1829
+#define mmAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL                                 0x182a
+#define mmAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL                                   0x182b
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE                         0x182c
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES               0x182d
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS                     0x182e
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES                       0x182f
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE                          0x1830
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET                                0x1831
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID                0x1832
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION            0x1833
+#define mmCC_RCU_DC_AUDIO_PORT_CONNECTIVITY                                     0x1834
+#define mmCC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY                               0x1835
+#define mmAZALIA_F0_CODEC_DEBUG                                                 0x1836
+#define mmAZALIA_F0_GTC_GROUP_OFFSET0                                           0x1837
+#define mmAZALIA_F0_GTC_GROUP_OFFSET1                                           0x1838
+#define mmAZALIA_F0_GTC_GROUP_OFFSET2                                           0x1839
+#define mmAZALIA_F0_GTC_GROUP_OFFSET3                                           0x183a
+#define mmAZALIA_F0_GTC_GROUP_OFFSET4                                           0x183b
+#define mmAZALIA_F0_GTC_GROUP_OFFSET5                                           0x183c
+#define mmAZALIA_F0_GTC_GROUP_OFFSET6                                           0x183d
+#define mmGLOBAL_CAPABILITIES                                                   0x0
+#define mmMINOR_VERSION                                                         0x0
+#define mmMAJOR_VERSION                                                         0x0
+#define mmOUTPUT_PAYLOAD_CAPABILITY                                             0x1
+#define mmINPUT_PAYLOAD_CAPABILITY                                              0x1
+#define mmGLOBAL_CONTROL                                                        0x2
+#define mmWAKE_ENABLE                                                           0x3
+#define mmSTATE_CHANGE_STATUS                                                   0x3
+#define mmGLOBAL_STATUS                                                         0x4
+#define mmOUTPUT_STREAM_PAYLOAD_CAPABILITY                                      0x6
+#define mmINPUT_STREAM_PAYLOAD_CAPABILITY                                       0x6
+#define mmINTERRUPT_CONTROL                                                     0x8
+#define mmINTERRUPT_STATUS                                                      0x9
+#define mmWALL_CLOCK_COUNTER                                                    0xc
+#define mmSTREAM_SYNCHRONIZATION                                                0xe
+#define mmCORB_LOWER_BASE_ADDRESS                                               0x10
+#define mmCORB_UPPER_BASE_ADDRESS                                               0x11
+#define mmCORB_WRITE_POINTER                                                    0x12
+#define mmCORB_READ_POINTER                                                     0x12
+#define mmCORB_CONTROL                                                          0x13
+#define mmCORB_STATUS                                                           0x13
+#define mmCORB_SIZE                                                             0x13
+#define mmRIRB_LOWER_BASE_ADDRESS                                               0x14
+#define mmRIRB_UPPER_BASE_ADDRESS                                               0x15
+#define mmRIRB_WRITE_POINTER                                                    0x16
+#define mmRESPONSE_INTERRUPT_COUNT                                              0x16
+#define mmRIRB_CONTROL                                                          0x17
+#define mmRIRB_STATUS                                                           0x17
+#define mmRIRB_SIZE                                                             0x17
+#define mmIMMEDIATE_COMMAND_OUTPUT_INTERFACE                                    0x18
+#define mmIMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX                              0x18
+#define mmIMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA                               0x18
+#define mmIMMEDIATE_RESPONSE_INPUT_INTERFACE                                    0x19
+#define mmIMMEDIATE_COMMAND_STATUS                                              0x1a
+#define mmDMA_POSITION_LOWER_BASE_ADDRESS                                       0x1c
+#define mmDMA_POSITION_UPPER_BASE_ADDRESS                                       0x1d
+#define mmWALL_CLOCK_COUNTER_ALIAS                                              0x80c
+#define mmOUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS                           0x20
+#define mmOUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER              0x21
+#define mmOUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH                         0x22
+#define mmOUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX                             0x23
+#define mmOUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE                                    0x24
+#define mmOUTPUT_STREAM_DESCRIPTOR_FORMAT                                       0x24
+#define mmOUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS               0x26
+#define mmOUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS               0x27
+#define mmOUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS        0x821
+#define mmAZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX                   0x18
+#define mmAZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA                    0x18
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES         0x2f09
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES              0x2f0a
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS                    0x2f0b
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT                    0x2200
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID                   0x2706
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER                   0x270d
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2                 0x270e
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3                 0x273e
+#define ixAZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL                              0x2724
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE                           0x2770
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING                       0x2771
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES               0x3f09
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES                            0x3f0c
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH                  0x3f0e
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY            0x3702
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL                            0x3707
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE                      0x3708
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE                        0x3709
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT            0x371c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2          0x371d
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3          0x371e
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4          0x371f
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION               0x3770
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION                        0x3771
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO                             0x3772
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR                          0x3776
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA                     0x3776
+#define ixAUDIO_DESCRIPTOR0                                                     0x1
+#define ixAUDIO_DESCRIPTOR1                                                     0x2
+#define ixAUDIO_DESCRIPTOR2                                                     0x3
+#define ixAUDIO_DESCRIPTOR3                                                     0x4
+#define ixAUDIO_DESCRIPTOR4                                                     0x5
+#define ixAUDIO_DESCRIPTOR5                                                     0x6
+#define ixAUDIO_DESCRIPTOR6                                                     0x7
+#define ixAUDIO_DESCRIPTOR7                                                     0x8
+#define ixAUDIO_DESCRIPTOR8                                                     0x9
+#define ixAUDIO_DESCRIPTOR9                                                     0xa
+#define ixAUDIO_DESCRIPTOR10                                                    0xb
+#define ixAUDIO_DESCRIPTOR11                                                    0xc
+#define ixAUDIO_DESCRIPTOR12                                                    0xd
+#define ixAUDIO_DESCRIPTOR13                                                    0xe
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE                     0x3777
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE                     0x3778
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE                     0x3779
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE                     0x377a
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC                                   0x377b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_HBR                                       0x377c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX                     0x3780
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA                      0x3781
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID                           0x0
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID                                0x1
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN                      0x2
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID0                                   0x3
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID1                                   0x4
+#define ixSINK_DESCRIPTION0                                                     0x5
+#define ixSINK_DESCRIPTION1                                                     0x6
+#define ixSINK_DESCRIPTION2                                                     0x7
+#define ixSINK_DESCRIPTION3                                                     0x8
+#define ixSINK_DESCRIPTION4                                                     0x9
+#define ixSINK_DESCRIPTION5                                                     0xa
+#define ixSINK_DESCRIPTION6                                                     0xb
+#define ixSINK_DESCRIPTION7                                                     0xc
+#define ixSINK_DESCRIPTION8                                                     0xd
+#define ixSINK_DESCRIPTION9                                                     0xe
+#define ixSINK_DESCRIPTION10                                                    0xf
+#define ixSINK_DESCRIPTION11                                                    0x10
+#define ixSINK_DESCRIPTION12                                                    0x11
+#define ixSINK_DESCRIPTION13                                                    0x12
+#define ixSINK_DESCRIPTION14                                                    0x13
+#define ixSINK_DESCRIPTION15                                                    0x14
+#define ixSINK_DESCRIPTION16                                                    0x15
+#define ixSINK_DESCRIPTION17                                                    0x16
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE                      0x3785
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE                      0x3786
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE                      0x3787
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE                      0x3788
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE                         0x3789
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0                             0x378a
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1                             0x378b
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2                             0x378c
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3                             0x378d
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4                             0x378e
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5                             0x378f
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6                             0x3790
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7                             0x3791
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8                             0x3792
+#define ixAZALIA_F2_CODEC_PIN_ASSOCIATION_INFO                                  0x3793
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS                     0x3797
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL                     0x3798
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB                                      0x3799
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT                       0x379a
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE                               0x379b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED                            0x379c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION           0x379d
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE                          0x379e
+#define mmAZALIA_CONTROLLER_CLOCK_GATING                                        0x17e4
+#define mmAZALIA_AUDIO_DTO                                                      0x17e5
+#define mmAZALIA_AUDIO_DTO_CONTROL                                              0x17e6
+#define mmAZALIA_SCLK_CONTROL                                                   0x17e7
+#define mmAZALIA_UNDERFLOW_FILLER_SAMPLE                                        0x17e8
+#define mmAZALIA_DATA_DMA_CONTROL                                               0x17e9
+#define mmAZALIA_BDL_DMA_CONTROL                                                0x17ea
+#define mmAZALIA_RIRB_AND_DP_CONTROL                                            0x17eb
+#define mmAZALIA_CORB_DMA_CONTROL                                               0x17ec
+#define mmAZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER                          0x17f3
+#define mmAZALIA_CYCLIC_BUFFER_SYNC                                             0x17f4
+#define mmAZALIA_GLOBAL_CAPABILITIES                                            0x17f5
+#define mmAZALIA_OUTPUT_PAYLOAD_CAPABILITY                                      0x17f6
+#define mmAZALIA_OUTPUT_STREAM_ARBITER_CONTROL                                  0x17f7
+#define mmAZALIA_INPUT_PAYLOAD_CAPABILITY                                       0x17f8
+#define mmAZALIA_CONTROLLER_DEBUG                                               0x17f9
+#define mmAZALIA_MEM_PWR_CTRL                                                   0x1810
+#define mmAZALIA_MEM_PWR_STATUS                                                 0x1811
+#define mmDCI_PG_DEBUG_CONFIG                                                   0x1812
+#define mmAZALIA_INPUT_CRC0_CONTROL0                                            0x17fb
+#define mmAZALIA_INPUT_CRC0_CONTROL1                                            0x17fc
+#define mmAZALIA_INPUT_CRC0_CONTROL2                                            0x17fd
+#define mmAZALIA_INPUT_CRC0_CONTROL3                                            0x17fe
+#define mmAZALIA_INPUT_CRC0_RESULT                                              0x17ff
+#define ixAZALIA_INPUT_CRC0_CHANNEL0                                            0x0
+#define ixAZALIA_INPUT_CRC0_CHANNEL1                                            0x1
+#define ixAZALIA_INPUT_CRC0_CHANNEL2                                            0x2
+#define ixAZALIA_INPUT_CRC0_CHANNEL3                                            0x3
+#define ixAZALIA_INPUT_CRC0_CHANNEL4                                            0x4
+#define ixAZALIA_INPUT_CRC0_CHANNEL5                                            0x5
+#define ixAZALIA_INPUT_CRC0_CHANNEL6                                            0x6
+#define ixAZALIA_INPUT_CRC0_CHANNEL7                                            0x7
+#define mmAZALIA_INPUT_CRC1_CONTROL0                                            0x1800
+#define mmAZALIA_INPUT_CRC1_CONTROL1                                            0x1801
+#define mmAZALIA_INPUT_CRC1_CONTROL2                                            0x1802
+#define mmAZALIA_INPUT_CRC1_CONTROL3                                            0x1803
+#define mmAZALIA_INPUT_CRC1_RESULT                                              0x1804
+#define ixAZALIA_INPUT_CRC1_CHANNEL0                                            0x0
+#define ixAZALIA_INPUT_CRC1_CHANNEL1                                            0x1
+#define ixAZALIA_INPUT_CRC1_CHANNEL2                                            0x2
+#define ixAZALIA_INPUT_CRC1_CHANNEL3                                            0x3
+#define ixAZALIA_INPUT_CRC1_CHANNEL4                                            0x4
+#define ixAZALIA_INPUT_CRC1_CHANNEL5                                            0x5
+#define ixAZALIA_INPUT_CRC1_CHANNEL6                                            0x6
+#define ixAZALIA_INPUT_CRC1_CHANNEL7                                            0x7
+#define mmAZALIA_CRC0_CONTROL0                                                  0x1805
+#define mmAZALIA_CRC0_CONTROL1                                                  0x1806
+#define mmAZALIA_CRC0_CONTROL2                                                  0x1807
+#define mmAZALIA_CRC0_CONTROL3                                                  0x1808
+#define mmAZALIA_CRC0_RESULT                                                    0x1809
+#define ixAZALIA_CRC0_CHANNEL0                                                  0x0
+#define ixAZALIA_CRC0_CHANNEL1                                                  0x1
+#define ixAZALIA_CRC0_CHANNEL2                                                  0x2
+#define ixAZALIA_CRC0_CHANNEL3                                                  0x3
+#define ixAZALIA_CRC0_CHANNEL4                                                  0x4
+#define ixAZALIA_CRC0_CHANNEL5                                                  0x5
+#define ixAZALIA_CRC0_CHANNEL6                                                  0x6
+#define ixAZALIA_CRC0_CHANNEL7                                                  0x7
+#define mmAZALIA_CRC1_CONTROL0                                                  0x180a
+#define mmAZALIA_CRC1_CONTROL1                                                  0x180b
+#define mmAZALIA_CRC1_CONTROL2                                                  0x180c
+#define mmAZALIA_CRC1_CONTROL3                                                  0x180d
+#define mmAZALIA_CRC1_RESULT                                                    0x180e
+#define ixAZALIA_CRC1_CHANNEL0                                                  0x0
+#define ixAZALIA_CRC1_CHANNEL1                                                  0x1
+#define ixAZALIA_CRC1_CHANNEL2                                                  0x2
+#define ixAZALIA_CRC1_CHANNEL3                                                  0x3
+#define ixAZALIA_CRC1_CHANNEL4                                                  0x4
+#define ixAZALIA_CRC1_CHANNEL5                                                  0x5
+#define ixAZALIA_CRC1_CHANNEL6                                                  0x6
+#define ixAZALIA_CRC1_CHANNEL7                                                  0x7
+#define mmAZ_TEST_DEBUG_INDEX                                                   0x181f
+#define mmAZ_TEST_DEBUG_DATA                                                    0x1820
+#define mmAZALIA_STREAM_INDEX                                                   0x1780
+#define mmAZF0STREAM0_AZALIA_STREAM_INDEX                                       0x1780
+#define mmAZF0STREAM1_AZALIA_STREAM_INDEX                                       0x1782
+#define mmAZF0STREAM2_AZALIA_STREAM_INDEX                                       0x1784
+#define mmAZF0STREAM3_AZALIA_STREAM_INDEX                                       0x1786
+#define mmAZF0STREAM4_AZALIA_STREAM_INDEX                                       0x1788
+#define mmAZF0STREAM5_AZALIA_STREAM_INDEX                                       0x178a
+#define mmAZF0STREAM6_AZALIA_STREAM_INDEX                                       0x178c
+#define mmAZF0STREAM7_AZALIA_STREAM_INDEX                                       0x178e
+#define mmAZF0STREAM8_AZALIA_STREAM_INDEX                                       0x59c0
+#define mmAZF0STREAM9_AZALIA_STREAM_INDEX                                       0x59c2
+#define mmAZF0STREAM10_AZALIA_STREAM_INDEX                                      0x59c4
+#define mmAZF0STREAM11_AZALIA_STREAM_INDEX                                      0x59c6
+#define mmAZF0STREAM12_AZALIA_STREAM_INDEX                                      0x59c8
+#define mmAZF0STREAM13_AZALIA_STREAM_INDEX                                      0x59ca
+#define mmAZF0STREAM14_AZALIA_STREAM_INDEX                                      0x59cc
+#define mmAZF0STREAM15_AZALIA_STREAM_INDEX                                      0x59ce
+#define mmAZALIA_STREAM_DATA                                                    0x1781
+#define mmAZF0STREAM0_AZALIA_STREAM_DATA                                        0x1781
+#define mmAZF0STREAM1_AZALIA_STREAM_DATA                                        0x1783
+#define mmAZF0STREAM2_AZALIA_STREAM_DATA                                        0x1785
+#define mmAZF0STREAM3_AZALIA_STREAM_DATA                                        0x1787
+#define mmAZF0STREAM4_AZALIA_STREAM_DATA                                        0x1789
+#define mmAZF0STREAM5_AZALIA_STREAM_DATA                                        0x178b
+#define mmAZF0STREAM6_AZALIA_STREAM_DATA                                        0x178d
+#define mmAZF0STREAM7_AZALIA_STREAM_DATA                                        0x178f
+#define mmAZF0STREAM8_AZALIA_STREAM_DATA                                        0x59c1
+#define mmAZF0STREAM9_AZALIA_STREAM_DATA                                        0x59c3
+#define mmAZF0STREAM10_AZALIA_STREAM_DATA                                       0x59c5
+#define mmAZF0STREAM11_AZALIA_STREAM_DATA                                       0x59c7
+#define mmAZF0STREAM12_AZALIA_STREAM_DATA                                       0x59c9
+#define mmAZF0STREAM13_AZALIA_STREAM_DATA                                       0x59cb
+#define mmAZF0STREAM14_AZALIA_STREAM_DATA                                       0x59cd
+#define mmAZF0STREAM15_AZALIA_STREAM_DATA                                       0x59cf
+#define ixAZALIA_FIFO_SIZE_CONTROL                                              0x0
+#define ixAZALIA_LATENCY_COUNTER_CONTROL                                        0x1
+#define ixAZALIA_WORSTCASE_LATENCY_COUNT                                        0x2
+#define ixAZALIA_CUMULATIVE_LATENCY_COUNT                                       0x3
+#define ixAZALIA_CUMULATIVE_REQUEST_COUNT                                       0x4
+#define ixAZALIA_STREAM_DEBUG                                                   0x5
+#define mmAZALIA_F0_CODEC_ENDPOINT_INDEX                                        0x17a8
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX                          0x17a8
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX                          0x17ac
+#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX                          0x17b0
+#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX                          0x17b4
+#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX                          0x17b8
+#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX                          0x17bc
+#define mmAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX                          0x17c0
+#define mmAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX                          0x17c4
+#define mmAZALIA_F0_CODEC_ENDPOINT_DATA                                         0x17a9
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA                           0x17a9
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA                           0x17ad
+#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA                           0x17b1
+#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA                           0x17b5
+#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA                           0x17b9
+#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA                           0x17bd
+#define mmAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA                           0x17c1
+#define mmAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA                           0x17c5
+#define ixAZALIA_F0_CODEC_CONVERTER_PIN_DEBUG                                   0x0
+#define ixAZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES         0x1
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT                    0x2
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID                   0x3
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER                   0x4
+#define ixAZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS                    0x5
+#define ixAZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES              0x6
+#define ixAZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL                              0x7
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE                           0x8
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING                       0x9
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_OFFSET_DEBUG                    0xa
+#define ixAZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA                           0xc
+#define ixAZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN                       0xd
+#define ixAZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX                       0xe
+#define ixAZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES               0x20
+#define ixAZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES                            0x21
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE                      0x22
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE                        0x23
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL                            0x24
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER                           0x25
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0                         0x28
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1                         0x29
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2                         0x2a
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3                         0x2b
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4                         0x2c
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5                         0x2d
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6                         0x2e
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7                         0x2f
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8                         0x30
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9                         0x31
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10                        0x32
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11                        0x33
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12                        0x34
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13                        0x35
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE                       0x36
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2                      0x57
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE                         0x58
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC                          0x37
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR                              0x38
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0                                0x3a
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1                                0x3b
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2                                0x3c
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3                                0x3d
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4                                0x3e
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5                                0x3f
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6                                0x40
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7                                0x41
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8                                0x42
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL                          0x54
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE                0x55
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT            0x56
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0                             0x59
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1                             0x5a
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2                             0x5b
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3                             0x5c
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4                             0x5d
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5                             0x5e
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6                             0x5f
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7                             0x60
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8                             0x61
+#define ixAZALIA_F0_CODEC_PIN_ASSOCIATION_INFO                                  0x62
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS                     0x63
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL                     0x64
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_LPIB                                      0x65
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT                       0x66
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE                               0x67
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED                            0x68
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION           0x69
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE                          0x6a
+#define ixAZALIA_F0_AUDIO_ENABLE_STATUS                                         0x6b
+#define ixAZALIA_F0_AUDIO_ENABLED_INT_STATUS                                    0x6c
+#define ixAZALIA_F0_AUDIO_DISABLED_INT_STATUS                                   0x6d
+#define ixAZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS                             0x6e
+#define mmAZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX                                  0x59d4
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX               0x59d4
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX               0x59d8
+#define mmAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX               0x59dc
+#define mmAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX               0x59e0
+#define mmAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX               0x59e4
+#define mmAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX               0x59e8
+#define mmAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX               0x59ec
+#define mmAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX               0x59f0
+#define mmAZALIA_F0_CODEC_INPUT_ENDPOINT_DATA                                   0x59d5
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA                0x59d5
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA                0x59d9
+#define mmAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA                0x59dd
+#define mmAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA                0x59e1
+#define mmAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA                0x59e5
+#define mmAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA                0x59e9
+#define mmAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA                0x59ed
+#define mmAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA                0x59f1
+#define ixAZALIA_F0_CODEC_INPUT_CONVERTER_PIN_DEBUG                             0x0
+#define ixAZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES   0x1
+#define ixAZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT              0x2
+#define ixAZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID             0x3
+#define ixAZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER             0x4
+#define ixAZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS              0x5
+#define ixAZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES        0x6
+#define ixAZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES         0x20
+#define ixAZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES                      0x21
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE                0x22
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE            0x23
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL                      0x24
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE                 0x36
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2                0x37
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR                        0x38
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION                  0x53
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL                    0x54
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE          0x55
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT      0x56
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL                0x67
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME                           0x68
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL               0x64
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB                                0x65
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT                 0x66
+#define mmAZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX                    0x18
+#define mmAZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA                     0x18
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES   0x6f09
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES        0x6f0a
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS              0x6f0b
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT              0x6200
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID             0x6706
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER             0x670d
+#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES         0x7f09
+#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES                      0x7f0c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL                      0x7707
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE                0x7708
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE                  0x7709
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT      0x771c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2    0x771d
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3    0x771e
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4    0x771f
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE                0x7777
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE                0x7785
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE                0x7778
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE                0x7786
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR                                 0x777c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE                0x7779
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE                0x7787
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE                0x777a
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE                0x7788
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION                  0x7771
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL                0x779b
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME                           0x779c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L                    0x779d
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H                    0x779e
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL               0x7798
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB                                0x7799
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT                 0x779a
+#define mmBLND_CONTROL                                                          0x1b6d
+#define mmBLND0_BLND_CONTROL                                                    0x1b6d
+#define mmBLND1_BLND_CONTROL                                                    0x1d6d
+#define mmBLND2_BLND_CONTROL                                                    0x1f6d
+#define mmBLND3_BLND_CONTROL                                                    0x416d
+#define mmBLND4_BLND_CONTROL                                                    0x436d
+#define mmBLND5_BLND_CONTROL                                                    0x456d
+#define mmBLND_SM_CONTROL2                                                      0x1b6e
+#define mmBLND0_BLND_SM_CONTROL2                                                0x1b6e
+#define mmBLND1_BLND_SM_CONTROL2                                                0x1d6e
+#define mmBLND2_BLND_SM_CONTROL2                                                0x1f6e
+#define mmBLND3_BLND_SM_CONTROL2                                                0x416e
+#define mmBLND4_BLND_SM_CONTROL2                                                0x436e
+#define mmBLND5_BLND_SM_CONTROL2                                                0x456e
+#define mmBLND_CONTROL2                                                         0x1b6f
+#define mmBLND0_BLND_CONTROL2                                                   0x1b6f
+#define mmBLND1_BLND_CONTROL2                                                   0x1d6f
+#define mmBLND2_BLND_CONTROL2                                                   0x1f6f
+#define mmBLND3_BLND_CONTROL2                                                   0x416f
+#define mmBLND4_BLND_CONTROL2                                                   0x436f
+#define mmBLND5_BLND_CONTROL2                                                   0x456f
+#define mmBLND_UPDATE                                                           0x1b70
+#define mmBLND0_BLND_UPDATE                                                     0x1b70
+#define mmBLND1_BLND_UPDATE                                                     0x1d70
+#define mmBLND2_BLND_UPDATE                                                     0x1f70
+#define mmBLND3_BLND_UPDATE                                                     0x4170
+#define mmBLND4_BLND_UPDATE                                                     0x4370
+#define mmBLND5_BLND_UPDATE                                                     0x4570
+#define mmBLND_UNDERFLOW_INTERRUPT                                              0x1b71
+#define mmBLND0_BLND_UNDERFLOW_INTERRUPT                                        0x1b71
+#define mmBLND1_BLND_UNDERFLOW_INTERRUPT                                        0x1d71
+#define mmBLND2_BLND_UNDERFLOW_INTERRUPT                                        0x1f71
+#define mmBLND3_BLND_UNDERFLOW_INTERRUPT                                        0x4171
+#define mmBLND4_BLND_UNDERFLOW_INTERRUPT                                        0x4371
+#define mmBLND5_BLND_UNDERFLOW_INTERRUPT                                        0x4571
+#define mmBLND_V_UPDATE_LOCK                                                    0x1b73
+#define mmBLND0_BLND_V_UPDATE_LOCK                                              0x1b73
+#define mmBLND1_BLND_V_UPDATE_LOCK                                              0x1d73
+#define mmBLND2_BLND_V_UPDATE_LOCK                                              0x1f73
+#define mmBLND3_BLND_V_UPDATE_LOCK                                              0x4173
+#define mmBLND4_BLND_V_UPDATE_LOCK                                              0x4373
+#define mmBLND5_BLND_V_UPDATE_LOCK                                              0x4573
+#define mmBLND_REG_UPDATE_STATUS                                                0x1b77
+#define mmBLND0_BLND_REG_UPDATE_STATUS                                          0x1b77
+#define mmBLND1_BLND_REG_UPDATE_STATUS                                          0x1d77
+#define mmBLND2_BLND_REG_UPDATE_STATUS                                          0x1f77
+#define mmBLND3_BLND_REG_UPDATE_STATUS                                          0x4177
+#define mmBLND4_BLND_REG_UPDATE_STATUS                                          0x4377
+#define mmBLND5_BLND_REG_UPDATE_STATUS                                          0x4577
+#define mmBLND_DEBUG                                                            0x1b74
+#define mmBLND0_BLND_DEBUG                                                      0x1b74
+#define mmBLND1_BLND_DEBUG                                                      0x1d74
+#define mmBLND2_BLND_DEBUG                                                      0x1f74
+#define mmBLND3_BLND_DEBUG                                                      0x4174
+#define mmBLND4_BLND_DEBUG                                                      0x4374
+#define mmBLND5_BLND_DEBUG                                                      0x4574
+#define mmBLND_TEST_DEBUG_INDEX                                                 0x1b75
+#define mmBLND0_BLND_TEST_DEBUG_INDEX                                           0x1b75
+#define mmBLND1_BLND_TEST_DEBUG_INDEX                                           0x1d75
+#define mmBLND2_BLND_TEST_DEBUG_INDEX                                           0x1f75
+#define mmBLND3_BLND_TEST_DEBUG_INDEX                                           0x4175
+#define mmBLND4_BLND_TEST_DEBUG_INDEX                                           0x4375
+#define mmBLND5_BLND_TEST_DEBUG_INDEX                                           0x4575
+#define mmBLND_TEST_DEBUG_DATA                                                  0x1b76
+#define mmBLND0_BLND_TEST_DEBUG_DATA                                            0x1b76
+#define mmBLND1_BLND_TEST_DEBUG_DATA                                            0x1d76
+#define mmBLND2_BLND_TEST_DEBUG_DATA                                            0x1f76
+#define mmBLND3_BLND_TEST_DEBUG_DATA                                            0x4176
+#define mmBLND4_BLND_TEST_DEBUG_DATA                                            0x4376
+#define mmBLND5_BLND_TEST_DEBUG_DATA                                            0x4576
+#define mmWB_ENABLE                                                             0x5e18
+#define mmWB_EC_CONFIG                                                          0x5e19
+#define mmCNV_MODE                                                              0x5e1a
+#define mmCNV_WINDOW_START                                                      0x5e1b
+#define mmCNV_WINDOW_SIZE                                                       0x5e1c
+#define mmCNV_UPDATE                                                            0x5e1d
+#define mmCNV_SOURCE_SIZE                                                       0x5e1e
+#define mmCNV_CSC_CONTROL                                                       0x5e1f
+#define mmCNV_CSC_C11_C12                                                       0x5e20
+#define mmCNV_CSC_C13_C14                                                       0x5e21
+#define mmCNV_CSC_C21_C22                                                       0x5e22
+#define mmCNV_CSC_C23_C24                                                       0x5e23
+#define mmCNV_CSC_C31_C32                                                       0x5e24
+#define mmCNV_CSC_C33_C34                                                       0x5e25
+#define mmCNV_CSC_ROUND_OFFSET_R                                                0x5e26
+#define mmCNV_CSC_ROUND_OFFSET_G                                                0x5e27
+#define mmCNV_CSC_ROUND_OFFSET_B                                                0x5e28
+#define mmCNV_CSC_CLAMP_R                                                       0x5e29
+#define mmCNV_CSC_CLAMP_G                                                       0x5e2a
+#define mmCNV_CSC_CLAMP_B                                                       0x5e2b
+#define mmCNV_TEST_CNTL                                                         0x5e2c
+#define mmCNV_TEST_CRC_RED                                                      0x5e2d
+#define mmCNV_TEST_CRC_GREEN                                                    0x5e2e
+#define mmCNV_TEST_CRC_BLUE                                                     0x5e2f
+#define mmWB_DEBUG_CTRL                                                         0x5e30
+#define mmWB_DBG_MODE                                                           0x5e31
+#define mmWB_HW_DEBUG                                                           0x5e32
+#define mmCNV_INPUT_SELECT                                                      0x5e33
+#define mmWB_SOFT_RESET                                                         0x5e36
+#define mmWB_WARM_UP_MODE_CTL1                                                  0x5e37
+#define mmWB_WARM_UP_MODE_CTL2                                                  0x5e38
+#define mmCNV_TEST_DEBUG_INDEX                                                  0x5e34
+#define mmCNV_TEST_DEBUG_DATA                                                   0x5e35
+#define mmDCFE_CLOCK_CONTROL                                                    0x1b00
+#define mmDCFE0_DCFE_CLOCK_CONTROL                                              0x1b00
+#define mmDCFE1_DCFE_CLOCK_CONTROL                                              0x1d00
+#define mmDCFE2_DCFE_CLOCK_CONTROL                                              0x1f00
+#define mmDCFE3_DCFE_CLOCK_CONTROL                                              0x4100
+#define mmDCFE4_DCFE_CLOCK_CONTROL                                              0x4300
+#define mmDCFE5_DCFE_CLOCK_CONTROL                                              0x4500
+#define mmDCFE_SOFT_RESET                                                       0x1b01
+#define mmDCFE0_DCFE_SOFT_RESET                                                 0x1b01
+#define mmDCFE1_DCFE_SOFT_RESET                                                 0x1d01
+#define mmDCFE2_DCFE_SOFT_RESET                                                 0x1f01
+#define mmDCFE3_DCFE_SOFT_RESET                                                 0x4101
+#define mmDCFE4_DCFE_SOFT_RESET                                                 0x4301
+#define mmDCFE5_DCFE_SOFT_RESET                                                 0x4501
+#define mmDCFE_DBG_CONFIG                                                       0x1b02
+#define mmDCFE0_DCFE_DBG_CONFIG                                                 0x1b02
+#define mmDCFE1_DCFE_DBG_CONFIG                                                 0x1d02
+#define mmDCFE2_DCFE_DBG_CONFIG                                                 0x1f02
+#define mmDCFE3_DCFE_DBG_CONFIG                                                 0x4102
+#define mmDCFE4_DCFE_DBG_CONFIG                                                 0x4302
+#define mmDCFE5_DCFE_DBG_CONFIG                                                 0x4502
+#define mmDCFE_MEM_PWR_CTRL                                                     0x1b03
+#define mmDCFE0_DCFE_MEM_PWR_CTRL                                               0x1b03
+#define mmDCFE1_DCFE_MEM_PWR_CTRL                                               0x1d03
+#define mmDCFE2_DCFE_MEM_PWR_CTRL                                               0x1f03
+#define mmDCFE3_DCFE_MEM_PWR_CTRL                                               0x4103
+#define mmDCFE4_DCFE_MEM_PWR_CTRL                                               0x4303
+#define mmDCFE5_DCFE_MEM_PWR_CTRL                                               0x4503
+#define mmDCFE_MEM_PWR_CTRL2                                                    0x1b04
+#define mmDCFE0_DCFE_MEM_PWR_CTRL2                                              0x1b04
+#define mmDCFE1_DCFE_MEM_PWR_CTRL2                                              0x1d04
+#define mmDCFE2_DCFE_MEM_PWR_CTRL2                                              0x1f04
+#define mmDCFE3_DCFE_MEM_PWR_CTRL2                                              0x4104
+#define mmDCFE4_DCFE_MEM_PWR_CTRL2                                              0x4304
+#define mmDCFE5_DCFE_MEM_PWR_CTRL2                                              0x4504
+#define mmDCFE_MEM_PWR_STATUS                                                   0x1b05
+#define mmDCFE0_DCFE_MEM_PWR_STATUS                                             0x1b05
+#define mmDCFE1_DCFE_MEM_PWR_STATUS                                             0x1d05
+#define mmDCFE2_DCFE_MEM_PWR_STATUS                                             0x1f05
+#define mmDCFE3_DCFE_MEM_PWR_STATUS                                             0x4105
+#define mmDCFE4_DCFE_MEM_PWR_STATUS                                             0x4305
+#define mmDCFE5_DCFE_MEM_PWR_STATUS                                             0x4505
+#define mmDCFE_MISC                                                             0x1b06
+#define mmDCFE0_DCFE_MISC                                                       0x1b06
+#define mmDCFE1_DCFE_MISC                                                       0x1d06
+#define mmDCFE2_DCFE_MISC                                                       0x1f06
+#define mmDCFE3_DCFE_MISC                                                       0x4106
+#define mmDCFE4_DCFE_MISC                                                       0x4306
+#define mmDCFE5_DCFE_MISC                                                       0x4506
+#define mmDCFE_FLUSH                                                            0x1b07
+#define mmDCFE0_DCFE_FLUSH                                                      0x1b07
+#define mmDCFE1_DCFE_FLUSH                                                      0x1d07
+#define mmDCFE2_DCFE_FLUSH                                                      0x1f07
+#define mmDCFE3_DCFE_FLUSH                                                      0x4107
+#define mmDCFE4_DCFE_FLUSH                                                      0x4307
+#define mmDCFE5_DCFE_FLUSH                                                      0x4507
+#define mmDCFEV_CLOCK_CONTROL                                                   0x46f4
+#define mmDCFEV0_DCFEV_CLOCK_CONTROL                                            0x46f4
+#define mmDCFEV1_DCFEV_CLOCK_CONTROL                                            0x98f4
+#define mmDCFEV_SOFT_RESET                                                      0x46f5
+#define mmDCFEV0_DCFEV_SOFT_RESET                                               0x46f5
+#define mmDCFEV1_DCFEV_SOFT_RESET                                               0x98f5
+#define mmDCFEV_DMIFV_CLOCK_CONTROL                                             0x46f6
+#define mmDCFEV0_DCFEV_DMIFV_CLOCK_CONTROL                                      0x46f6
+#define mmDCFEV1_DCFEV_DMIFV_CLOCK_CONTROL                                      0x98f6
+#define mmDCFEV_DBG_CONFIG                                                      0x46f7
+#define mmDCFEV0_DCFEV_DBG_CONFIG                                               0x46f7
+#define mmDCFEV1_DCFEV_DBG_CONFIG                                               0x98f7
+#define mmDCFEV_DMIFV_MEM_PWR_CTRL                                              0x46f8
+#define mmDCFEV0_DCFEV_DMIFV_MEM_PWR_CTRL                                       0x46f8
+#define mmDCFEV1_DCFEV_DMIFV_MEM_PWR_CTRL                                       0x98f8
+#define mmDCFEV_DMIFV_MEM_PWR_STATUS                                            0x46f9
+#define mmDCFEV0_DCFEV_DMIFV_MEM_PWR_STATUS                                     0x46f9
+#define mmDCFEV1_DCFEV_DMIFV_MEM_PWR_STATUS                                     0x98f9
+#define mmDCFEV_MEM_PWR_CTRL                                                    0x46fa
+#define mmDCFEV0_DCFEV_MEM_PWR_CTRL                                             0x46fa
+#define mmDCFEV1_DCFEV_MEM_PWR_CTRL                                             0x98fa
+#define mmDCFEV_MEM_PWR_CTRL2                                                   0x46fb
+#define mmDCFEV0_DCFEV_MEM_PWR_CTRL2                                            0x46fb
+#define mmDCFEV1_DCFEV_MEM_PWR_CTRL2                                            0x98fb
+#define mmDCFEV_MEM_PWR_STATUS                                                  0x46fc
+#define mmDCFEV0_DCFEV_MEM_PWR_STATUS                                           0x46fc
+#define mmDCFEV1_DCFEV_MEM_PWR_STATUS                                           0x98fc
+#define mmDCFEV_L_FLUSH                                                         0x46ff
+#define mmDCFEV0_DCFEV_L_FLUSH                                                  0x46ff
+#define mmDCFEV1_DCFEV_L_FLUSH                                                  0x98ff
+#define mmDCFEV_C_FLUSH                                                         0x4700
+#define mmDCFEV0_DCFEV_C_FLUSH                                                  0x4700
+#define mmDCFEV1_DCFEV_C_FLUSH                                                  0x9900
+#define mmDCFEV_DMIFV_DEBUG                                                     0x46fd
+#define mmDCFEV0_DCFEV_DMIFV_DEBUG                                              0x46fd
+#define mmDCFEV1_DCFEV_DMIFV_DEBUG                                              0x98fd
+#define mmDCFEV_MISC                                                            0x46fe
+#define mmDCFEV0_DCFEV_MISC                                                     0x46fe
+#define mmDCFEV1_DCFEV_MISC                                                     0x98fe
+#define mmDC_HPD_INT_STATUS                                                     0x1898
+#define mmHPD0_DC_HPD_INT_STATUS                                                0x1898
+#define mmHPD1_DC_HPD_INT_STATUS                                                0x18a0
+#define mmHPD2_DC_HPD_INT_STATUS                                                0x18a8
+#define mmHPD3_DC_HPD_INT_STATUS                                                0x18b0
+#define mmHPD4_DC_HPD_INT_STATUS                                                0x18b8
+#define mmHPD5_DC_HPD_INT_STATUS                                                0x18c0
+#define mmDC_HPD_INT_CONTROL                                                    0x1899
+#define mmHPD0_DC_HPD_INT_CONTROL                                               0x1899
+#define mmHPD1_DC_HPD_INT_CONTROL                                               0x18a1
+#define mmHPD2_DC_HPD_INT_CONTROL                                               0x18a9
+#define mmHPD3_DC_HPD_INT_CONTROL                                               0x18b1
+#define mmHPD4_DC_HPD_INT_CONTROL                                               0x18b9
+#define mmHPD5_DC_HPD_INT_CONTROL                                               0x18c1
+#define mmDC_HPD_CONTROL                                                        0x189a
+#define mmHPD0_DC_HPD_CONTROL                                                   0x189a
+#define mmHPD1_DC_HPD_CONTROL                                                   0x18a2
+#define mmHPD2_DC_HPD_CONTROL                                                   0x18aa
+#define mmHPD3_DC_HPD_CONTROL                                                   0x18b2
+#define mmHPD4_DC_HPD_CONTROL                                                   0x18ba
+#define mmHPD5_DC_HPD_CONTROL                                                   0x18c2
+#define mmDC_HPD_FAST_TRAIN_CNTL                                                0x189b
+#define mmHPD0_DC_HPD_FAST_TRAIN_CNTL                                           0x189b
+#define mmHPD1_DC_HPD_FAST_TRAIN_CNTL                                           0x18a3
+#define mmHPD2_DC_HPD_FAST_TRAIN_CNTL                                           0x18ab
+#define mmHPD3_DC_HPD_FAST_TRAIN_CNTL                                           0x18b3
+#define mmHPD4_DC_HPD_FAST_TRAIN_CNTL                                           0x18bb
+#define mmHPD5_DC_HPD_FAST_TRAIN_CNTL                                           0x18c3
+#define mmDC_HPD_TOGGLE_FILT_CNTL                                               0x189c
+#define mmHPD0_DC_HPD_TOGGLE_FILT_CNTL                                          0x189c
+#define mmHPD1_DC_HPD_TOGGLE_FILT_CNTL                                          0x18a4
+#define mmHPD2_DC_HPD_TOGGLE_FILT_CNTL                                          0x18ac
+#define mmHPD3_DC_HPD_TOGGLE_FILT_CNTL                                          0x18b4
+#define mmHPD4_DC_HPD_TOGGLE_FILT_CNTL                                          0x18bc
+#define mmHPD5_DC_HPD_TOGGLE_FILT_CNTL                                          0x18c4
+#define mmDCO_SCRATCH0                                                          0x184e
+#define mmDCO_SCRATCH1                                                          0x184f
+#define mmDCO_SCRATCH2                                                          0x1850
+#define mmDCO_SCRATCH3                                                          0x1851
+#define mmDCO_SCRATCH4                                                          0x1852
+#define mmDCO_SCRATCH5                                                          0x1853
+#define mmDCO_SCRATCH6                                                          0x1854
+#define mmDCO_SCRATCH7                                                          0x1855
+#define mmDCE_VCE_CONTROL                                                       0x1856
+#define mmDISP_INTERRUPT_STATUS                                                 0x1857
+#define mmDISP_INTERRUPT_STATUS_CONTINUE                                        0x1858
+#define mmDISP_INTERRUPT_STATUS_CONTINUE2                                       0x1859
+#define mmDISP_INTERRUPT_STATUS_CONTINUE3                                       0x185a
+#define mmDISP_INTERRUPT_STATUS_CONTINUE4                                       0x185b
+#define mmDISP_INTERRUPT_STATUS_CONTINUE5                                       0x185c
+#define mmDISP_INTERRUPT_STATUS_CONTINUE6                                       0x185d
+#define mmDISP_INTERRUPT_STATUS_CONTINUE7                                       0x185e
+#define mmDISP_INTERRUPT_STATUS_CONTINUE8                                       0x185f
+#define mmDISP_INTERRUPT_STATUS_CONTINUE9                                       0x1860
+#define mmDISP_INTERRUPT_STATUS_CONTINUE10                                      0x1875
+#define mmDCO_MEM_PWR_STATUS                                                    0x1861
+#define mmDCO_MEM_PWR_STATUS1                                                   0x1874
+#define mmDCO_MEM_PWR_CTRL                                                      0x1862
+#define mmDCO_MEM_PWR_CTRL2                                                     0x1863
+#define mmFMT_MEMORY0_CONTROL                                                   0x1888
+#define mmFMT_MEMORY1_CONTROL                                                   0x1889
+#define mmFMT_MEMORY2_CONTROL                                                   0x188a
+#define mmFMT_MEMORY3_CONTROL                                                   0x188b
+#define mmFMT_MEMORY4_CONTROL                                                   0x188c
+#define mmFMT_MEMORY5_CONTROL                                                   0x188d
+#define mmDCO_CLK_CNTL                                                          0x1864
+#define mmDCO_CLK_CNTL2                                                         0x1876
+#define mmDCO_CLK_CNTL3                                                         0x1877
+#define mmDPDBG_CNTL                                                            0x1866
+#define mmDPDBG_INTERRUPT                                                       0x1867
+#define mmDCO_POWER_MANAGEMENT_CNTL                                             0x1868
+#define mmDCO_SOFT_RESET                                                        0x1871
+#define mmDIG_SOFT_RESET                                                        0x1872
+#define mmDIG_SOFT_RESET_2                                                      0x186a
+#define mmDCO_STEREOSYNC_SEL                                                    0x186e
+#define mmDCO_HDMI_RXSTATUS_TIMER_CONTROL                                       0x1883
+#define mmDCO_PSP_INTERRUPT_STATUS                                              0x1884
+#define mmDCO_PSP_INTERRUPT_CLEAR                                               0x1885
+#define mmDCO_GENERIC_INTERRUPT_MESSAGE                                         0x1886
+#define mmDCO_GENERIC_INTERRUPT_CLEAR                                           0x1887
+#define mmDCO_TEST_DEBUG_INDEX                                                  0x186f
+#define mmDCO_TEST_DEBUG_DATA                                                   0x1870
+#define mmDC_I2C_CONTROL                                                        0x16d4
+#define mmDC_I2C_ARBITRATION                                                    0x16d5
+#define mmDC_I2C_INTERRUPT_CONTROL                                              0x16d6
+#define mmDC_I2C_SW_STATUS                                                      0x16d7
+#define mmDC_I2C_DDC1_HW_STATUS                                                 0x16d8
+#define mmDC_I2C_DDC2_HW_STATUS                                                 0x16d9
+#define mmDC_I2C_DDC3_HW_STATUS                                                 0x16da
+#define mmDC_I2C_DDC4_HW_STATUS                                                 0x16db
+#define mmDC_I2C_DDC5_HW_STATUS                                                 0x16dc
+#define mmDC_I2C_DDC6_HW_STATUS                                                 0x16dd
+#define mmDC_I2C_DDC1_SPEED                                                     0x16de
+#define mmDC_I2C_DDC1_SETUP                                                     0x16df
+#define mmDC_I2C_DDC2_SPEED                                                     0x16e0
+#define mmDC_I2C_DDC2_SETUP                                                     0x16e1
+#define mmDC_I2C_DDC3_SPEED                                                     0x16e2
+#define mmDC_I2C_DDC3_SETUP                                                     0x16e3
+#define mmDC_I2C_DDC4_SPEED                                                     0x16e4
+#define mmDC_I2C_DDC4_SETUP                                                     0x16e5
+#define mmDC_I2C_DDC5_SPEED                                                     0x16e6
+#define mmDC_I2C_DDC5_SETUP                                                     0x16e7
+#define mmDC_I2C_DDC6_SPEED                                                     0x16e8
+#define mmDC_I2C_DDC6_SETUP                                                     0x16e9
+#define mmDC_I2C_TRANSACTION0                                                   0x16ea
+#define mmDC_I2C_TRANSACTION1                                                   0x16eb
+#define mmDC_I2C_TRANSACTION2                                                   0x16ec
+#define mmDC_I2C_TRANSACTION3                                                   0x16ed
+#define mmDC_I2C_DATA                                                           0x16ee
+#define mmDC_I2C_DDCVGA_HW_STATUS                                               0x16ef
+#define mmDC_I2C_DDCVGA_SPEED                                                   0x16f0
+#define mmDC_I2C_DDCVGA_SETUP                                                   0x16f1
+#define mmDC_I2C_EDID_DETECT_CTRL                                               0x16f2
+#define mmDC_I2C_READ_REQUEST_INTERRUPT                                         0x16f3
+#define mmGENERIC_I2C_CONTROL                                                   0x16f4
+#define mmGENERIC_I2C_INTERRUPT_CONTROL                                         0x16f5
+#define mmGENERIC_I2C_STATUS                                                    0x16f6
+#define mmGENERIC_I2C_SPEED                                                     0x16f7
+#define mmGENERIC_I2C_SETUP                                                     0x16f8
+#define mmGENERIC_I2C_TRANSACTION                                               0x16f9
+#define mmGENERIC_I2C_DATA                                                      0x16fa
+#define mmGENERIC_I2C_PIN_SELECTION                                             0x16fb
+#define mmGENERIC_I2C_PIN_DEBUG                                                 0x16fc
+#define mmBLNDV_CONTROL                                                         0x476d
+#define mmBLNDV0_BLNDV_CONTROL                                                  0x476d
+#define mmBLNDV1_BLNDV_CONTROL                                                  0x996d
+#define mmBLNDV_SM_CONTROL2                                                     0x476e
+#define mmBLNDV0_BLNDV_SM_CONTROL2                                              0x476e
+#define mmBLNDV1_BLNDV_SM_CONTROL2                                              0x996e
+#define mmBLNDV_CONTROL2                                                        0x476f
+#define mmBLNDV0_BLNDV_CONTROL2                                                 0x476f
+#define mmBLNDV1_BLNDV_CONTROL2                                                 0x996f
+#define mmBLNDV_UPDATE                                                          0x4770
+#define mmBLNDV0_BLNDV_UPDATE                                                   0x4770
+#define mmBLNDV1_BLNDV_UPDATE                                                   0x9970
+#define mmBLNDV_UNDERFLOW_INTERRUPT                                             0x4771
+#define mmBLNDV0_BLNDV_UNDERFLOW_INTERRUPT                                      0x4771
+#define mmBLNDV1_BLNDV_UNDERFLOW_INTERRUPT                                      0x9971
+#define mmBLNDV_V_UPDATE_LOCK                                                   0x4773
+#define mmBLNDV0_BLNDV_V_UPDATE_LOCK                                            0x4773
+#define mmBLNDV1_BLNDV_V_UPDATE_LOCK                                            0x9973
+#define mmBLNDV_REG_UPDATE_STATUS                                               0x4777
+#define mmBLNDV0_BLNDV_REG_UPDATE_STATUS                                        0x4777
+#define mmBLNDV1_BLNDV_REG_UPDATE_STATUS                                        0x9977
+#define mmBLNDV_DEBUG                                                           0x4774
+#define mmBLNDV0_BLNDV_DEBUG                                                    0x4774
+#define mmBLNDV1_BLNDV_DEBUG                                                    0x9974
+#define mmBLNDV_TEST_DEBUG_INDEX                                                0x4775
+#define mmBLNDV0_BLNDV_TEST_DEBUG_INDEX                                         0x4775
+#define mmBLNDV1_BLNDV_TEST_DEBUG_INDEX                                         0x9975
+#define mmBLNDV_TEST_DEBUG_DATA                                                 0x4776
+#define mmBLNDV0_BLNDV_TEST_DEBUG_DATA                                          0x4776
+#define mmBLNDV1_BLNDV_TEST_DEBUG_DATA                                          0x9976
+#define mmCRTCV_H_TOTAL                                                         0x4780
+#define mmCRTCV0_CRTCV_H_TOTAL                                                  0x4780
+#define mmCRTCV1_CRTCV_H_TOTAL                                                  0x9980
+#define mmCRTCV_H_BLANK_START_END                                               0x4781
+#define mmCRTCV0_CRTCV_H_BLANK_START_END                                        0x4781
+#define mmCRTCV1_CRTCV_H_BLANK_START_END                                        0x9981
+#define mmCRTCV_H_SYNC_A                                                        0x4782
+#define mmCRTCV0_CRTCV_H_SYNC_A                                                 0x4782
+#define mmCRTCV1_CRTCV_H_SYNC_A                                                 0x9982
+#define mmCRTCV_V_TOTAL                                                         0x4787
+#define mmCRTCV0_CRTCV_V_TOTAL                                                  0x4787
+#define mmCRTCV1_CRTCV_V_TOTAL                                                  0x9987
+#define mmCRTCV_V_BLANK_START_END                                               0x478d
+#define mmCRTCV0_CRTCV_V_BLANK_START_END                                        0x478d
+#define mmCRTCV1_CRTCV_V_BLANK_START_END                                        0x998d
+#define mmCRTCV_V_SYNC_A                                                        0x478e
+#define mmCRTCV0_CRTCV_V_SYNC_A                                                 0x478e
+#define mmCRTCV1_CRTCV_V_SYNC_A                                                 0x998e
+#define mmCRTCV_CONTROL                                                         0x479c
+#define mmCRTCV0_CRTCV_CONTROL                                                  0x479c
+#define mmCRTCV1_CRTCV_CONTROL                                                  0x999c
+#define mmCRTCV_START_LINE_CONTROL                                              0x47b3
+#define mmCRTCV0_CRTCV_START_LINE_CONTROL                                       0x47b3
+#define mmCRTCV1_CRTCV_START_LINE_CONTROL                                       0x99b3
+#define mmCRTCV_OVERSCAN_COLOR                                                  0x47c8
+#define mmCRTCV0_CRTCV_OVERSCAN_COLOR                                           0x47c8
+#define mmCRTCV1_CRTCV_OVERSCAN_COLOR                                           0x99c8
+#define mmCRTCV_OVERSCAN_COLOR_EXT                                              0x47c9
+#define mmCRTCV0_CRTCV_OVERSCAN_COLOR_EXT                                       0x47c9
+#define mmCRTCV1_CRTCV_OVERSCAN_COLOR_EXT                                       0x99c9
+#define mmCRTCV_BLACK_COLOR                                                     0x47cc
+#define mmCRTCV0_CRTCV_BLACK_COLOR                                              0x47cc
+#define mmCRTCV1_CRTCV_BLACK_COLOR                                              0x99cc
+#define mmCRTCV_BLACK_COLOR_EXT                                                 0x47cd
+#define mmCRTCV0_CRTCV_BLACK_COLOR_EXT                                          0x47cd
+#define mmCRTCV1_CRTCV_BLACK_COLOR_EXT                                          0x99cd
+#define mmCRTCV_CRC_CNTL                                                        0x47d4
+#define mmCRTCV0_CRTCV_CRC_CNTL                                                 0x47d4
+#define mmCRTCV1_CRTCV_CRC_CNTL                                                 0x99d4
+#define mmCRTCV_CRC0_WINDOWA_X_CONTROL                                          0x47d5
+#define mmCRTCV0_CRTCV_CRC0_WINDOWA_X_CONTROL                                   0x47d5
+#define mmCRTCV1_CRTCV_CRC0_WINDOWA_X_CONTROL                                   0x99d5
+#define mmCRTCV_CRC0_WINDOWA_Y_CONTROL                                          0x47d6
+#define mmCRTCV0_CRTCV_CRC0_WINDOWA_Y_CONTROL                                   0x47d6
+#define mmCRTCV1_CRTCV_CRC0_WINDOWA_Y_CONTROL                                   0x99d6
+#define mmCRTCV_CRC0_WINDOWB_X_CONTROL                                          0x47d7
+#define mmCRTCV0_CRTCV_CRC0_WINDOWB_X_CONTROL                                   0x47d7
+#define mmCRTCV1_CRTCV_CRC0_WINDOWB_X_CONTROL                                   0x99d7
+#define mmCRTCV_CRC0_WINDOWB_Y_CONTROL                                          0x47d8
+#define mmCRTCV0_CRTCV_CRC0_WINDOWB_Y_CONTROL                                   0x47d8
+#define mmCRTCV1_CRTCV_CRC0_WINDOWB_Y_CONTROL                                   0x99d8
+#define mmCRTCV_CRC0_DATA_RG                                                    0x47d9
+#define mmCRTCV0_CRTCV_CRC0_DATA_RG                                             0x47d9
+#define mmCRTCV1_CRTCV_CRC0_DATA_RG                                             0x99d9
+#define mmCRTCV_CRC0_DATA_B                                                     0x47da
+#define mmCRTCV0_CRTCV_CRC0_DATA_B                                              0x47da
+#define mmCRTCV1_CRTCV_CRC0_DATA_B                                              0x99da
+#define mmCRTCV_CRC1_WINDOWA_X_CONTROL                                          0x47db
+#define mmCRTCV0_CRTCV_CRC1_WINDOWA_X_CONTROL                                   0x47db
+#define mmCRTCV1_CRTCV_CRC1_WINDOWA_X_CONTROL                                   0x99db
+#define mmCRTCV_CRC1_WINDOWA_Y_CONTROL                                          0x47dc
+#define mmCRTCV0_CRTCV_CRC1_WINDOWA_Y_CONTROL                                   0x47dc
+#define mmCRTCV1_CRTCV_CRC1_WINDOWA_Y_CONTROL                                   0x99dc
+#define mmCRTCV_CRC1_WINDOWB_X_CONTROL                                          0x47dd
+#define mmCRTCV0_CRTCV_CRC1_WINDOWB_X_CONTROL                                   0x47dd
+#define mmCRTCV1_CRTCV_CRC1_WINDOWB_X_CONTROL                                   0x99dd
+#define mmCRTCV_CRC1_WINDOWB_Y_CONTROL                                          0x47de
+#define mmCRTCV0_CRTCV_CRC1_WINDOWB_Y_CONTROL                                   0x47de
+#define mmCRTCV1_CRTCV_CRC1_WINDOWB_Y_CONTROL                                   0x99de
+#define mmCRTCV_CRC1_DATA_RG                                                    0x47df
+#define mmCRTCV0_CRTCV_CRC1_DATA_RG                                             0x47df
+#define mmCRTCV1_CRTCV_CRC1_DATA_RG                                             0x99df
+#define mmCRTCV_CRC1_DATA_B                                                     0x47e0
+#define mmCRTCV0_CRTCV_CRC1_DATA_B                                              0x47e0
+#define mmCRTCV1_CRTCV_CRC1_DATA_B                                              0x99e0
+#define mmCRTCV_TEST_DEBUG_INDEX                                                0x47c6
+#define mmCRTCV0_CRTCV_TEST_DEBUG_INDEX                                         0x47c6
+#define mmCRTCV1_CRTCV_TEST_DEBUG_INDEX                                         0x99c6
+#define mmCRTCV_TEST_DEBUG_DATA                                                 0x47c7
+#define mmCRTCV0_CRTCV_TEST_DEBUG_DATA                                          0x47c7
+#define mmCRTCV1_CRTCV_TEST_DEBUG_DATA                                          0x99c7
+#define mmXDMA_MC_PCIE_CLIENT_CONFIG                                            0x3e0
+#define mmXDMA_LOCAL_SURFACE_TILING1                                            0x3e1
+#define mmXDMA_LOCAL_SURFACE_TILING2                                            0x3e2
+#define mmXDMA_INTERRUPT                                                        0x3e3
+#define mmXDMA_CLOCK_GATING_CNTL                                                0x3e4
+#define mmXDMA_MEM_POWER_CNTL                                                   0x3e6
+#define mmXDMA_IF_BIF_STATUS                                                    0x3e7
+#define mmXDMA_PERF_MEAS_STATUS                                                 0x3e8
+#define mmXDMA_IF_STATUS                                                        0x3e9
+#define mmXDMA_TEST_DEBUG_INDEX                                                 0x3ea
+#define mmXDMA_TEST_DEBUG_DATA                                                  0x3eb
+#define mmXDMA_RBBMIF_RDWR_CNTL                                                 0x3f8
+#define mmXDMA_PG_CONTROL                                                       0x3f9
+#define mmXDMA_PG_WDATA                                                         0x3fa
+#define mmXDMA_PG_STATUS                                                        0x3fb
+#define mmXDMA_AON_TEST_DEBUG_INDEX                                             0x3fc
+#define mmXDMA_AON_TEST_DEBUG_DATA                                              0x3fd
+#define mmXDMA_MSTR_CNTL                                                        0x3ec
+#define mmXDMA_MSTR_STATUS                                                      0x3ed
+#define mmXDMA_MSTR_MEM_CLIENT_CONFIG                                           0x3ee
+#define mmXDMA_MSTR_LOCAL_SURFACE_BASE_ADDR                                     0x3ef
+#define mmXDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH                                0x3f0
+#define mmXDMA_MSTR_LOCAL_SURFACE_PITCH                                         0x3f1
+#define mmXDMA_MSTR_CMD_URGENT_CNTL                                             0x3f2
+#define mmXDMA_MSTR_MEM_URGENT_CNTL                                             0x3f3
+#define mmXDMA_MSTR_PCIE_NACK_STATUS                                            0x3f5
+#define mmXDMA_MSTR_MEM_NACK_STATUS                                             0x3f6
+#define mmXDMA_MSTR_VSYNC_GSL_CHECK                                             0x3f7
+#define mmXDMA_MSTR_PIPE_CNTL                                                   0x400
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_PIPE_CNTL                                   0x400
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_PIPE_CNTL                                   0x410
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_PIPE_CNTL                                   0x420
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_PIPE_CNTL                                   0x430
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_PIPE_CNTL                                   0x440
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_PIPE_CNTL                                   0x450
+#define mmXDMA_MSTR_READ_COMMAND                                                0x401
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_READ_COMMAND                                0x401
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_READ_COMMAND                                0x411
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_READ_COMMAND                                0x421
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_READ_COMMAND                                0x431
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_READ_COMMAND                                0x441
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_READ_COMMAND                                0x451
+#define mmXDMA_MSTR_CHANNEL_DIM                                                 0x402
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_CHANNEL_DIM                                 0x402
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_CHANNEL_DIM                                 0x412
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_CHANNEL_DIM                                 0x422
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_CHANNEL_DIM                                 0x432
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_CHANNEL_DIM                                 0x442
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_CHANNEL_DIM                                 0x452
+#define mmXDMA_MSTR_HEIGHT                                                      0x403
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_HEIGHT                                      0x403
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_HEIGHT                                      0x413
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_HEIGHT                                      0x423
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_HEIGHT                                      0x433
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_HEIGHT                                      0x443
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_HEIGHT                                      0x453
+#define mmXDMA_MSTR_REMOTE_SURFACE_BASE                                         0x404
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_REMOTE_SURFACE_BASE                         0x404
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_REMOTE_SURFACE_BASE                         0x414
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_REMOTE_SURFACE_BASE                         0x424
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_REMOTE_SURFACE_BASE                         0x434
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_REMOTE_SURFACE_BASE                         0x444
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_REMOTE_SURFACE_BASE                         0x454
+#define mmXDMA_MSTR_REMOTE_SURFACE_BASE_HIGH                                    0x405
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH                    0x405
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH                    0x415
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH                    0x425
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH                    0x435
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH                    0x445
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH                    0x455
+#define mmXDMA_MSTR_REMOTE_GPU_ADDRESS                                          0x406
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_REMOTE_GPU_ADDRESS                          0x406
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_REMOTE_GPU_ADDRESS                          0x416
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_REMOTE_GPU_ADDRESS                          0x426
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_REMOTE_GPU_ADDRESS                          0x436
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_REMOTE_GPU_ADDRESS                          0x446
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_REMOTE_GPU_ADDRESS                          0x456
+#define mmXDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH                                     0x407
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH                     0x407
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH                     0x417
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH                     0x427
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH                     0x437
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH                     0x447
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH                     0x457
+#define mmXDMA_MSTR_CACHE_BASE_ADDR                                             0x408
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_CACHE_BASE_ADDR                             0x408
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_CACHE_BASE_ADDR                             0x418
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_CACHE_BASE_ADDR                             0x428
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_CACHE_BASE_ADDR                             0x438
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_CACHE_BASE_ADDR                             0x448
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_CACHE_BASE_ADDR                             0x458
+#define mmXDMA_MSTR_CACHE_BASE_ADDR_HIGH                                        0x409
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_CACHE_BASE_ADDR_HIGH                        0x409
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_CACHE_BASE_ADDR_HIGH                        0x419
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_CACHE_BASE_ADDR_HIGH                        0x429
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_CACHE_BASE_ADDR_HIGH                        0x439
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_CACHE_BASE_ADDR_HIGH                        0x449
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_CACHE_BASE_ADDR_HIGH                        0x459
+#define mmXDMA_MSTR_CACHE                                                       0x40a
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_CACHE                                       0x40a
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_CACHE                                       0x41a
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_CACHE                                       0x42a
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_CACHE                                       0x43a
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_CACHE                                       0x44a
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_CACHE                                       0x45a
+#define mmXDMA_MSTR_CHANNEL_START                                               0x40b
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_CHANNEL_START                               0x40b
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_CHANNEL_START                               0x41b
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_CHANNEL_START                               0x42b
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_CHANNEL_START                               0x43b
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_CHANNEL_START                               0x44b
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_CHANNEL_START                               0x45b
+#define mmXDMA_MSTR_PERFMEAS_STATUS                                             0x40e
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_PERFMEAS_STATUS                             0x40e
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_PERFMEAS_STATUS                             0x41e
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_PERFMEAS_STATUS                             0x42e
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_PERFMEAS_STATUS                             0x43e
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_PERFMEAS_STATUS                             0x44e
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_PERFMEAS_STATUS                             0x45e
+#define mmXDMA_MSTR_PERFMEAS_CNTL                                               0x40f
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_PERFMEAS_CNTL                               0x40f
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_PERFMEAS_CNTL                               0x41f
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_PERFMEAS_CNTL                               0x42f
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_PERFMEAS_CNTL                               0x43f
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_PERFMEAS_CNTL                               0x44f
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_PERFMEAS_CNTL                               0x45f
+#define mmXDMA_SLV_CNTL                                                         0x460
+#define mmXDMA_SLV_MEM_CLIENT_CONFIG                                            0x461
+#define mmXDMA_SLV_SLS_PITCH                                                    0x462
+#define mmXDMA_SLV_READ_URGENT_CNTL                                             0x463
+#define mmXDMA_SLV_WRITE_URGENT_CNTL                                            0x464
+#define mmXDMA_SLV_WB_RATE_CNTL                                                 0x465
+#define mmXDMA_SLV_READ_LATENCY_MINMAX                                          0x466
+#define mmXDMA_SLV_READ_LATENCY_AVE                                             0x467
+#define mmXDMA_SLV_PCIE_NACK_STATUS                                             0x468
+#define mmXDMA_SLV_MEM_NACK_STATUS                                              0x469
+#define mmXDMA_SLV_RDRET_BUF_STATUS                                             0x46a
+#define mmXDMA_SLV_READ_LATENCY_TIMER                                           0x46b
+#define mmXDMA_SLV_FLIP_PENDING                                                 0x46c
+#define mmXDMA_SLV_CHANNEL_CNTL                                                 0x470
+#define mmXDMA_SLV_CHANNEL0_XDMA_SLV_CHANNEL_CNTL                               0x470
+#define mmXDMA_SLV_CHANNEL1_XDMA_SLV_CHANNEL_CNTL                               0x478
+#define mmXDMA_SLV_CHANNEL2_XDMA_SLV_CHANNEL_CNTL                               0x480
+#define mmXDMA_SLV_CHANNEL3_XDMA_SLV_CHANNEL_CNTL                               0x488
+#define mmXDMA_SLV_CHANNEL4_XDMA_SLV_CHANNEL_CNTL                               0x490
+#define mmXDMA_SLV_CHANNEL5_XDMA_SLV_CHANNEL_CNTL                               0x498
+#define mmXDMA_SLV_REMOTE_GPU_ADDRESS                                           0x471
+#define mmXDMA_SLV_CHANNEL0_XDMA_SLV_REMOTE_GPU_ADDRESS                         0x471
+#define mmXDMA_SLV_CHANNEL1_XDMA_SLV_REMOTE_GPU_ADDRESS                         0x479
+#define mmXDMA_SLV_CHANNEL2_XDMA_SLV_REMOTE_GPU_ADDRESS                         0x481
+#define mmXDMA_SLV_CHANNEL3_XDMA_SLV_REMOTE_GPU_ADDRESS                         0x489
+#define mmXDMA_SLV_CHANNEL4_XDMA_SLV_REMOTE_GPU_ADDRESS                         0x491
+#define mmXDMA_SLV_CHANNEL5_XDMA_SLV_REMOTE_GPU_ADDRESS                         0x499
+#define mmXDMA_SLV_REMOTE_GPU_ADDRESS_HIGH                                      0x472
+#define mmXDMA_SLV_CHANNEL0_XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH                    0x472
+#define mmXDMA_SLV_CHANNEL1_XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH                    0x47a
+#define mmXDMA_SLV_CHANNEL2_XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH                    0x482
+#define mmXDMA_SLV_CHANNEL3_XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH                    0x48a
+#define mmXDMA_SLV_CHANNEL4_XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH                    0x492
+#define mmXDMA_SLV_CHANNEL5_XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH                    0x49a
+#define mmCMD_BUS_TX_CONTROL_LANE0                                              0x48e0
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE0                           0x48e0
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE0                           0x4980
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE0                           0x9a20
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE0                           0x9ac0
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_TX_CONTROL_LANE0                           0x9b60
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_TX_CONTROL_LANE0                           0x9c00
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_TX_CONTROL_LANE0                           0x9ca0
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_TX_CONTROL_LANE0                           0x9d40
+#define mmCMD_BUS_TX_CONTROL_LANE1                                              0x48f0
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE1                           0x48f0
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE1                           0x4990
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE1                           0x9a30
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE1                           0x9ad0
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_TX_CONTROL_LANE1                           0x9b70
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_TX_CONTROL_LANE1                           0x9c10
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_TX_CONTROL_LANE1                           0x9cb0
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_TX_CONTROL_LANE1                           0x9d50
+#define mmCMD_BUS_TX_CONTROL_LANE2                                              0x4900
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE2                           0x4900
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE2                           0x49a0
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE2                           0x9a40
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE2                           0x9ae0
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_TX_CONTROL_LANE2                           0x9b80
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_TX_CONTROL_LANE2                           0x9c20
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_TX_CONTROL_LANE2                           0x9cc0
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_TX_CONTROL_LANE2                           0x9d60
+#define mmCMD_BUS_TX_CONTROL_LANE3                                              0x4910
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE3                           0x4910
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE3                           0x49b0
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE3                           0x9a50
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE3                           0x9af0
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_TX_CONTROL_LANE3                           0x9b90
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_TX_CONTROL_LANE3                           0x9c30
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_TX_CONTROL_LANE3                           0x9cd0
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_TX_CONTROL_LANE3                           0x9d70
+#define mmMARGIN_DEEMPH_LANE0                                                   0x48e1
+#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE0                                0x48e1
+#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE0                                0x4981
+#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE0                                0x9a21
+#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE0                                0x9ac1
+#define mmDC_COMBOPHYTXREGS4_MARGIN_DEEMPH_LANE0                                0x9b61
+#define mmDC_COMBOPHYTXREGS5_MARGIN_DEEMPH_LANE0                                0x9c01
+#define mmDC_COMBOPHYTXREGS6_MARGIN_DEEMPH_LANE0                                0x9ca1
+#define mmDC_COMBOPHYTXREGS7_MARGIN_DEEMPH_LANE0                                0x9d41
+#define mmMARGIN_DEEMPH_LANE1                                                   0x48f1
+#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE1                                0x48f1
+#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE1                                0x4991
+#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE1                                0x9a31
+#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE1                                0x9ad1
+#define mmDC_COMBOPHYTXREGS4_MARGIN_DEEMPH_LANE1                                0x9b71
+#define mmDC_COMBOPHYTXREGS5_MARGIN_DEEMPH_LANE1                                0x9c11
+#define mmDC_COMBOPHYTXREGS6_MARGIN_DEEMPH_LANE1                                0x9cb1
+#define mmDC_COMBOPHYTXREGS7_MARGIN_DEEMPH_LANE1                                0x9d51
+#define mmMARGIN_DEEMPH_LANE2                                                   0x4901
+#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE2                                0x4901
+#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE2                                0x49a1
+#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE2                                0x9a41
+#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE2                                0x9ae1
+#define mmDC_COMBOPHYTXREGS4_MARGIN_DEEMPH_LANE2                                0x9b81
+#define mmDC_COMBOPHYTXREGS5_MARGIN_DEEMPH_LANE2                                0x9c21
+#define mmDC_COMBOPHYTXREGS6_MARGIN_DEEMPH_LANE2                                0x9cc1
+#define mmDC_COMBOPHYTXREGS7_MARGIN_DEEMPH_LANE2                                0x9d61
+#define mmMARGIN_DEEMPH_LANE3                                                   0x4911
+#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE3                                0x4911
+#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE3                                0x49b1
+#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE3                                0x9a51
+#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE3                                0x9af1
+#define mmDC_COMBOPHYTXREGS4_MARGIN_DEEMPH_LANE3                                0x9b91
+#define mmDC_COMBOPHYTXREGS5_MARGIN_DEEMPH_LANE3                                0x9c31
+#define mmDC_COMBOPHYTXREGS6_MARGIN_DEEMPH_LANE3                                0x9cd1
+#define mmDC_COMBOPHYTXREGS7_MARGIN_DEEMPH_LANE3                                0x9d71
+#define mmCMD_BUS_GLOBAL_FOR_TX_LANE0                                           0x48e2
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE0                        0x48e2
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE0                        0x4982
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE0                        0x9a22
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE0                        0x9ac2
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_GLOBAL_FOR_TX_LANE0                        0x9b62
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_GLOBAL_FOR_TX_LANE0                        0x9c02
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_GLOBAL_FOR_TX_LANE0                        0x9ca2
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_GLOBAL_FOR_TX_LANE0                        0x9d42
+#define mmCMD_BUS_GLOBAL_FOR_TX_LANE1                                           0x48f2
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE1                        0x48f2
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE1                        0x4992
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE1                        0x9a32
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE1                        0x9ad2
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_GLOBAL_FOR_TX_LANE1                        0x9b72
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_GLOBAL_FOR_TX_LANE1                        0x9c12
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_GLOBAL_FOR_TX_LANE1                        0x9cb2
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_GLOBAL_FOR_TX_LANE1                        0x9d52
+#define mmCMD_BUS_GLOBAL_FOR_TX_LANE2                                           0x4902
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE2                        0x4902
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE2                        0x49a2
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE2                        0x9a42
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE2                        0x9ae2
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_GLOBAL_FOR_TX_LANE2                        0x9b82
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_GLOBAL_FOR_TX_LANE2                        0x9c22
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_GLOBAL_FOR_TX_LANE2                        0x9cc2
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_GLOBAL_FOR_TX_LANE2                        0x9d62
+#define mmCMD_BUS_GLOBAL_FOR_TX_LANE3                                           0x4912
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE3                        0x4912
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE3                        0x49b2
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE3                        0x9a52
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE3                        0x9af2
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_GLOBAL_FOR_TX_LANE3                        0x9b92
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_GLOBAL_FOR_TX_LANE3                        0x9c32
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_GLOBAL_FOR_TX_LANE3                        0x9cd2
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_GLOBAL_FOR_TX_LANE3                        0x9d72
+#define mmTX_DISP_RFU0_LANE0                                                    0x48e3
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE0                                 0x48e3
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE0                                 0x4983
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE0                                 0x9a23
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE0                                 0x9ac3
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU0_LANE0                                 0x9b63
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU0_LANE0                                 0x9c03
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU0_LANE0                                 0x9ca3
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU0_LANE0                                 0x9d43
+#define mmTX_DISP_RFU0_LANE1                                                    0x48f3
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE1                                 0x48f3
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE1                                 0x4993
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE1                                 0x9a33
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE1                                 0x9ad3
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU0_LANE1                                 0x9b73
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU0_LANE1                                 0x9c13
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU0_LANE1                                 0x9cb3
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU0_LANE1                                 0x9d53
+#define mmTX_DISP_RFU0_LANE2                                                    0x4903
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE2                                 0x4903
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE2                                 0x49a3
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE2                                 0x9a43
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE2                                 0x9ae3
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU0_LANE2                                 0x9b83
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU0_LANE2                                 0x9c23
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU0_LANE2                                 0x9cc3
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU0_LANE2                                 0x9d63
+#define mmTX_DISP_RFU0_LANE3                                                    0x4913
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE3                                 0x4913
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE3                                 0x49b3
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE3                                 0x9a53
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE3                                 0x9af3
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU0_LANE3                                 0x9b93
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU0_LANE3                                 0x9c33
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU0_LANE3                                 0x9cd3
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU0_LANE3                                 0x9d73
+#define mmTX_DISP_RFU1_LANE0                                                    0x48e4
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE0                                 0x48e4
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE0                                 0x4984
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE0                                 0x9a24
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE0                                 0x9ac4
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU1_LANE0                                 0x9b64
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU1_LANE0                                 0x9c04
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU1_LANE0                                 0x9ca4
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU1_LANE0                                 0x9d44
+#define mmTX_DISP_RFU1_LANE1                                                    0x48f4
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE1                                 0x48f4
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE1                                 0x4994
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE1                                 0x9a34
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE1                                 0x9ad4
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU1_LANE1                                 0x9b74
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU1_LANE1                                 0x9c14
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU1_LANE1                                 0x9cb4
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU1_LANE1                                 0x9d54
+#define mmTX_DISP_RFU1_LANE2                                                    0x4904
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE2                                 0x4904
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE2                                 0x49a4
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE2                                 0x9a44
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE2                                 0x9ae4
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU1_LANE2                                 0x9b84
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU1_LANE2                                 0x9c24
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU1_LANE2                                 0x9cc4
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU1_LANE2                                 0x9d64
+#define mmTX_DISP_RFU1_LANE3                                                    0x4914
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE3                                 0x4914
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE3                                 0x49b4
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE3                                 0x9a54
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE3                                 0x9af4
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU1_LANE3                                 0x9b94
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU1_LANE3                                 0x9c34
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU1_LANE3                                 0x9cd4
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU1_LANE3                                 0x9d74
+#define mmTX_DISP_RFU2_LANE0                                                    0x48e5
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE0                                 0x48e5
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE0                                 0x4985
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE0                                 0x9a25
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE0                                 0x9ac5
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU2_LANE0                                 0x9b65
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU2_LANE0                                 0x9c05
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU2_LANE0                                 0x9ca5
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU2_LANE0                                 0x9d45
+#define mmTX_DISP_RFU2_LANE1                                                    0x48f5
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE1                                 0x48f5
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE1                                 0x4995
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE1                                 0x9a35
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE1                                 0x9ad5
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU2_LANE1                                 0x9b75
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU2_LANE1                                 0x9c15
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU2_LANE1                                 0x9cb5
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU2_LANE1                                 0x9d55
+#define mmTX_DISP_RFU2_LANE2                                                    0x4905
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE2                                 0x4905
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE2                                 0x49a5
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE2                                 0x9a45
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE2                                 0x9ae5
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU2_LANE2                                 0x9b85
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU2_LANE2                                 0x9c25
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU2_LANE2                                 0x9cc5
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU2_LANE2                                 0x9d65
+#define mmTX_DISP_RFU2_LANE3                                                    0x4915
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE3                                 0x4915
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE3                                 0x49b5
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE3                                 0x9a55
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE3                                 0x9af5
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU2_LANE3                                 0x9b95
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU2_LANE3                                 0x9c35
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU2_LANE3                                 0x9cd5
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU2_LANE3                                 0x9d75
+#define mmTX_DISP_RFU3_LANE0                                                    0x48e6
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE0                                 0x48e6
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE0                                 0x4986
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE0                                 0x9a26
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE0                                 0x9ac6
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU3_LANE0                                 0x9b66
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU3_LANE0                                 0x9c06
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU3_LANE0                                 0x9ca6
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU3_LANE0                                 0x9d46
+#define mmTX_DISP_RFU3_LANE1                                                    0x48f6
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE1                                 0x48f6
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE1                                 0x4996
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE1                                 0x9a36
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE1                                 0x9ad6
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU3_LANE1                                 0x9b76
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU3_LANE1                                 0x9c16
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU3_LANE1                                 0x9cb6
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU3_LANE1                                 0x9d56
+#define mmTX_DISP_RFU3_LANE2                                                    0x4906
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE2                                 0x4906
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE2                                 0x49a6
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE2                                 0x9a46
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE2                                 0x9ae6
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU3_LANE2                                 0x9b86
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU3_LANE2                                 0x9c26
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU3_LANE2                                 0x9cc6
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU3_LANE2                                 0x9d66
+#define mmTX_DISP_RFU3_LANE3                                                    0x4916
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE3                                 0x4916
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE3                                 0x49b6
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE3                                 0x9a56
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE3                                 0x9af6
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU3_LANE3                                 0x9b96
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU3_LANE3                                 0x9c36
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU3_LANE3                                 0x9cd6
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU3_LANE3                                 0x9d76
+#define mmTX_DISP_RFU4_LANE0                                                    0x48e7
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE0                                 0x48e7
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE0                                 0x4987
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE0                                 0x9a27
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE0                                 0x9ac7
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU4_LANE0                                 0x9b67
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU4_LANE0                                 0x9c07
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU4_LANE0                                 0x9ca7
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU4_LANE0                                 0x9d47
+#define mmTX_DISP_RFU4_LANE1                                                    0x48f7
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE1                                 0x48f7
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE1                                 0x4997
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE1                                 0x9a37
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE1                                 0x9ad7
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU4_LANE1                                 0x9b77
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU4_LANE1                                 0x9c17
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU4_LANE1                                 0x9cb7
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU4_LANE1                                 0x9d57
+#define mmTX_DISP_RFU4_LANE2                                                    0x4907
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE2                                 0x4907
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE2                                 0x49a7
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE2                                 0x9a47
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE2                                 0x9ae7
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU4_LANE2                                 0x9b87
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU4_LANE2                                 0x9c27
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU4_LANE2                                 0x9cc7
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU4_LANE2                                 0x9d67
+#define mmTX_DISP_RFU4_LANE3                                                    0x4917
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE3                                 0x4917
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE3                                 0x49b7
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE3                                 0x9a57
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE3                                 0x9af7
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU4_LANE3                                 0x9b97
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU4_LANE3                                 0x9c37
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU4_LANE3                                 0x9cd7
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU4_LANE3                                 0x9d77
+#define mmTX_DISP_RFU5_LANE0                                                    0x48e8
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE0                                 0x48e8
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE0                                 0x4988
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE0                                 0x9a28
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE0                                 0x9ac8
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU5_LANE0                                 0x9b68
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU5_LANE0                                 0x9c08
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU5_LANE0                                 0x9ca8
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU5_LANE0                                 0x9d48
+#define mmTX_DISP_RFU5_LANE1                                                    0x48f8
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE1                                 0x48f8
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE1                                 0x4998
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE1                                 0x9a38
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE1                                 0x9ad8
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU5_LANE1                                 0x9b78
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU5_LANE1                                 0x9c18
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU5_LANE1                                 0x9cb8
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU5_LANE1                                 0x9d58
+#define mmTX_DISP_RFU5_LANE2                                                    0x4908
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE2                                 0x4908
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE2                                 0x49a8
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE2                                 0x9a48
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE2                                 0x9ae8
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU5_LANE2                                 0x9b88
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU5_LANE2                                 0x9c28
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU5_LANE2                                 0x9cc8
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU5_LANE2                                 0x9d68
+#define mmTX_DISP_RFU5_LANE3                                                    0x4918
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE3                                 0x4918
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE3                                 0x49b8
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE3                                 0x9a58
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE3                                 0x9af8
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU5_LANE3                                 0x9b98
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU5_LANE3                                 0x9c38
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU5_LANE3                                 0x9cd8
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU5_LANE3                                 0x9d78
+#define mmTX_DISP_RFU6_LANE0                                                    0x48e9
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE0                                 0x48e9
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE0                                 0x4989
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE0                                 0x9a29
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE0                                 0x9ac9
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU6_LANE0                                 0x9b69
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU6_LANE0                                 0x9c09
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU6_LANE0                                 0x9ca9
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU6_LANE0                                 0x9d49
+#define mmTX_DISP_RFU6_LANE1                                                    0x48f9
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE1                                 0x48f9
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE1                                 0x4999
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE1                                 0x9a39
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE1                                 0x9ad9
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU6_LANE1                                 0x9b79
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU6_LANE1                                 0x9c19
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU6_LANE1                                 0x9cb9
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU6_LANE1                                 0x9d59
+#define mmTX_DISP_RFU6_LANE2                                                    0x4909
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE2                                 0x4909
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE2                                 0x49a9
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE2                                 0x9a49
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE2                                 0x9ae9
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU6_LANE2                                 0x9b89
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU6_LANE2                                 0x9c29
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU6_LANE2                                 0x9cc9
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU6_LANE2                                 0x9d69
+#define mmTX_DISP_RFU6_LANE3                                                    0x4919
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE3                                 0x4919
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE3                                 0x49b9
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE3                                 0x9a59
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE3                                 0x9af9
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU6_LANE3                                 0x9b99
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU6_LANE3                                 0x9c39
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU6_LANE3                                 0x9cd9
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU6_LANE3                                 0x9d79
+#define mmTX_DISP_RFU7_LANE0                                                    0x48ea
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE0                                 0x48ea
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE0                                 0x498a
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE0                                 0x9a2a
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE0                                 0x9aca
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU7_LANE0                                 0x9b6a
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU7_LANE0                                 0x9c0a
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU7_LANE0                                 0x9caa
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU7_LANE0                                 0x9d4a
+#define mmTX_DISP_RFU7_LANE1                                                    0x48fa
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE1                                 0x48fa
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE1                                 0x499a
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE1                                 0x9a3a
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE1                                 0x9ada
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU7_LANE1                                 0x9b7a
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU7_LANE1                                 0x9c1a
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU7_LANE1                                 0x9cba
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU7_LANE1                                 0x9d5a
+#define mmTX_DISP_RFU7_LANE2                                                    0x490a
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE2                                 0x490a
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE2                                 0x49aa
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE2                                 0x9a4a
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE2                                 0x9aea
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU7_LANE2                                 0x9b8a
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU7_LANE2                                 0x9c2a
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU7_LANE2                                 0x9cca
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU7_LANE2                                 0x9d6a
+#define mmTX_DISP_RFU7_LANE3                                                    0x491a
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE3                                 0x491a
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE3                                 0x49ba
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE3                                 0x9a5a
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE3                                 0x9afa
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU7_LANE3                                 0x9b9a
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU7_LANE3                                 0x9c3a
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU7_LANE3                                 0x9cda
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU7_LANE3                                 0x9d7a
+#define mmTX_DISP_RFU8_LANE0                                                    0x48eb
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE0                                 0x48eb
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE0                                 0x498b
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE0                                 0x9a2b
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE0                                 0x9acb
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU8_LANE0                                 0x9b6b
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU8_LANE0                                 0x9c0b
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU8_LANE0                                 0x9cab
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU8_LANE0                                 0x9d4b
+#define mmTX_DISP_RFU8_LANE1                                                    0x48fb
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE1                                 0x48fb
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE1                                 0x499b
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE1                                 0x9a3b
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE1                                 0x9adb
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU8_LANE1                                 0x9b7b
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU8_LANE1                                 0x9c1b
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU8_LANE1                                 0x9cbb
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU8_LANE1                                 0x9d5b
+#define mmTX_DISP_RFU8_LANE2                                                    0x490b
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE2                                 0x490b
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE2                                 0x49ab
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE2                                 0x9a4b
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE2                                 0x9aeb
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU8_LANE2                                 0x9b8b
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU8_LANE2                                 0x9c2b
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU8_LANE2                                 0x9ccb
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU8_LANE2                                 0x9d6b
+#define mmTX_DISP_RFU8_LANE3                                                    0x491b
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE3                                 0x491b
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE3                                 0x49bb
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE3                                 0x9a5b
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE3                                 0x9afb
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU8_LANE3                                 0x9b9b
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU8_LANE3                                 0x9c3b
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU8_LANE3                                 0x9cdb
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU8_LANE3                                 0x9d7b
+#define mmTX_DISP_RFU9_LANE0                                                    0x48ec
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE0                                 0x48ec
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE0                                 0x498c
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE0                                 0x9a2c
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE0                                 0x9acc
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU9_LANE0                                 0x9b6c
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU9_LANE0                                 0x9c0c
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU9_LANE0                                 0x9cac
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU9_LANE0                                 0x9d4c
+#define mmTX_DISP_RFU9_LANE1                                                    0x48fc
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE1                                 0x48fc
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE1                                 0x499c
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE1                                 0x9a3c
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE1                                 0x9adc
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU9_LANE1                                 0x9b7c
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU9_LANE1                                 0x9c1c
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU9_LANE1                                 0x9cbc
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU9_LANE1                                 0x9d5c
+#define mmTX_DISP_RFU9_LANE2                                                    0x490c
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE2                                 0x490c
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE2                                 0x49ac
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE2                                 0x9a4c
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE2                                 0x9aec
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU9_LANE2                                 0x9b8c
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU9_LANE2                                 0x9c2c
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU9_LANE2                                 0x9ccc
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU9_LANE2                                 0x9d6c
+#define mmTX_DISP_RFU9_LANE3                                                    0x491c
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE3                                 0x491c
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE3                                 0x49bc
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE3                                 0x9a5c
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE3                                 0x9afc
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU9_LANE3                                 0x9b9c
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU9_LANE3                                 0x9c3c
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU9_LANE3                                 0x9cdc
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU9_LANE3                                 0x9d7c
+#define mmTX_DISP_RFU10_LANE0                                                   0x48ed
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE0                                0x48ed
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE0                                0x498d
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE0                                0x9a2d
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE0                                0x9acd
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU10_LANE0                                0x9b6d
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU10_LANE0                                0x9c0d
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU10_LANE0                                0x9cad
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU10_LANE0                                0x9d4d
+#define mmTX_DISP_RFU10_LANE1                                                   0x48fd
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE1                                0x48fd
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE1                                0x499d
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE1                                0x9a3d
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE1                                0x9add
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU10_LANE1                                0x9b7d
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU10_LANE1                                0x9c1d
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU10_LANE1                                0x9cbd
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU10_LANE1                                0x9d5d
+#define mmTX_DISP_RFU10_LANE2                                                   0x490d
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE2                                0x490d
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE2                                0x49ad
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE2                                0x9a4d
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE2                                0x9aed
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU10_LANE2                                0x9b8d
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU10_LANE2                                0x9c2d
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU10_LANE2                                0x9ccd
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU10_LANE2                                0x9d6d
+#define mmTX_DISP_RFU10_LANE3                                                   0x491d
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE3                                0x491d
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE3                                0x49bd
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE3                                0x9a5d
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE3                                0x9afd
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU10_LANE3                                0x9b9d
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU10_LANE3                                0x9c3d
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU10_LANE3                                0x9cdd
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU10_LANE3                                0x9d7d
+#define mmTX_DISP_RFU11_LANE0                                                   0x48ee
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE0                                0x48ee
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE0                                0x498e
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE0                                0x9a2e
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE0                                0x9ace
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU11_LANE0                                0x9b6e
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU11_LANE0                                0x9c0e
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU11_LANE0                                0x9cae
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU11_LANE0                                0x9d4e
+#define mmTX_DISP_RFU11_LANE1                                                   0x48fe
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE1                                0x48fe
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE1                                0x499e
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE1                                0x9a3e
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE1                                0x9ade
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU11_LANE1                                0x9b7e
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU11_LANE1                                0x9c1e
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU11_LANE1                                0x9cbe
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU11_LANE1                                0x9d5e
+#define mmTX_DISP_RFU11_LANE2                                                   0x490e
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE2                                0x490e
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE2                                0x49ae
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE2                                0x9a4e
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE2                                0x9aee
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU11_LANE2                                0x9b8e
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU11_LANE2                                0x9c2e
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU11_LANE2                                0x9cce
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU11_LANE2                                0x9d6e
+#define mmTX_DISP_RFU11_LANE3                                                   0x491e
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE3                                0x491e
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE3                                0x49be
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE3                                0x9a5e
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE3                                0x9afe
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU11_LANE3                                0x9b9e
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU11_LANE3                                0x9c3e
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU11_LANE3                                0x9cde
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU11_LANE3                                0x9d7e
+#define mmTX_DISP_RFU12_LANE0                                                   0x48ef
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE0                                0x48ef
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE0                                0x498f
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE0                                0x9a2f
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE0                                0x9acf
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU12_LANE0                                0x9b6f
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU12_LANE0                                0x9c0f
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU12_LANE0                                0x9caf
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU12_LANE0                                0x9d4f
+#define mmTX_DISP_RFU12_LANE1                                                   0x48ff
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE1                                0x48ff
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE1                                0x499f
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE1                                0x9a3f
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE1                                0x9adf
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU12_LANE1                                0x9b7f
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU12_LANE1                                0x9c1f
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU12_LANE1                                0x9cbf
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU12_LANE1                                0x9d5f
+#define mmTX_DISP_RFU12_LANE2                                                   0x490f
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE2                                0x490f
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE2                                0x49af
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE2                                0x9a4f
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE2                                0x9aef
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU12_LANE2                                0x9b8f
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU12_LANE2                                0x9c2f
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU12_LANE2                                0x9ccf
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU12_LANE2                                0x9d6f
+#define mmTX_DISP_RFU12_LANE3                                                   0x491f
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE3                                0x491f
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE3                                0x49bf
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE3                                0x9a5f
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE3                                0x9aff
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU12_LANE3                                0x9b9f
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU12_LANE3                                0x9c3f
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU12_LANE3                                0x9cdf
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU12_LANE3                                0x9d7f
+#define mmCOMMON_MAR_DEEMPH_NOM                                                 0x48c3
+#define mmDC_COMBOPHYCMREGS0_COMMON_MAR_DEEMPH_NOM                              0x48c3
+#define mmDC_COMBOPHYCMREGS1_COMMON_MAR_DEEMPH_NOM                              0x4963
+#define mmDC_COMBOPHYCMREGS2_COMMON_MAR_DEEMPH_NOM                              0x9a03
+#define mmDC_COMBOPHYCMREGS3_COMMON_MAR_DEEMPH_NOM                              0x9aa3
+#define mmDC_COMBOPHYCMREGS4_COMMON_MAR_DEEMPH_NOM                              0x9b43
+#define mmDC_COMBOPHYCMREGS5_COMMON_MAR_DEEMPH_NOM                              0x9be3
+#define mmDC_COMBOPHYCMREGS6_COMMON_MAR_DEEMPH_NOM                              0x9c83
+#define mmDC_COMBOPHYCMREGS7_COMMON_MAR_DEEMPH_NOM                              0x9d23
+#define mmCOMMON_LANE_PWRMGMT                                                   0x48c4
+#define mmDC_COMBOPHYCMREGS0_COMMON_LANE_PWRMGMT                                0x48c4
+#define mmDC_COMBOPHYCMREGS1_COMMON_LANE_PWRMGMT                                0x4964
+#define mmDC_COMBOPHYCMREGS2_COMMON_LANE_PWRMGMT                                0x9a04
+#define mmDC_COMBOPHYCMREGS3_COMMON_LANE_PWRMGMT                                0x9aa4
+#define mmDC_COMBOPHYCMREGS4_COMMON_LANE_PWRMGMT                                0x9b44
+#define mmDC_COMBOPHYCMREGS5_COMMON_LANE_PWRMGMT                                0x9be4
+#define mmDC_COMBOPHYCMREGS6_COMMON_LANE_PWRMGMT                                0x9c84
+#define mmDC_COMBOPHYCMREGS7_COMMON_LANE_PWRMGMT                                0x9d24
+#define mmCOMMON_TXCNTRL                                                        0x48c5
+#define mmDC_COMBOPHYCMREGS0_COMMON_TXCNTRL                                     0x48c5
+#define mmDC_COMBOPHYCMREGS1_COMMON_TXCNTRL                                     0x4965
+#define mmDC_COMBOPHYCMREGS2_COMMON_TXCNTRL                                     0x9a05
+#define mmDC_COMBOPHYCMREGS3_COMMON_TXCNTRL                                     0x9aa5
+#define mmDC_COMBOPHYCMREGS4_COMMON_TXCNTRL                                     0x9b45
+#define mmDC_COMBOPHYCMREGS5_COMMON_TXCNTRL                                     0x9be5
+#define mmDC_COMBOPHYCMREGS6_COMMON_TXCNTRL                                     0x9c85
+#define mmDC_COMBOPHYCMREGS7_COMMON_TXCNTRL                                     0x9d25
+#define mmCOMMON_TMDP                                                           0x48c6
+#define mmDC_COMBOPHYCMREGS0_COMMON_TMDP                                        0x48c6
+#define mmDC_COMBOPHYCMREGS1_COMMON_TMDP                                        0x4966
+#define mmDC_COMBOPHYCMREGS2_COMMON_TMDP                                        0x9a06
+#define mmDC_COMBOPHYCMREGS3_COMMON_TMDP                                        0x9aa6
+#define mmDC_COMBOPHYCMREGS4_COMMON_TMDP                                        0x9b46
+#define mmDC_COMBOPHYCMREGS5_COMMON_TMDP                                        0x9be6
+#define mmDC_COMBOPHYCMREGS6_COMMON_TMDP                                        0x9c86
+#define mmDC_COMBOPHYCMREGS7_COMMON_TMDP                                        0x9d26
+#define mmCOMMON_LANE_RESETS                                                    0x48c7
+#define mmDC_COMBOPHYCMREGS0_COMMON_LANE_RESETS                                 0x48c7
+#define mmDC_COMBOPHYCMREGS1_COMMON_LANE_RESETS                                 0x4967
+#define mmDC_COMBOPHYCMREGS2_COMMON_LANE_RESETS                                 0x9a07
+#define mmDC_COMBOPHYCMREGS3_COMMON_LANE_RESETS                                 0x9aa7
+#define mmDC_COMBOPHYCMREGS4_COMMON_LANE_RESETS                                 0x9b47
+#define mmDC_COMBOPHYCMREGS5_COMMON_LANE_RESETS                                 0x9be7
+#define mmDC_COMBOPHYCMREGS6_COMMON_LANE_RESETS                                 0x9c87
+#define mmDC_COMBOPHYCMREGS7_COMMON_LANE_RESETS                                 0x9d27
+#define mmCOMMON_ZCALCODE_CTRL                                                  0x48c8
+#define mmDC_COMBOPHYCMREGS0_COMMON_ZCALCODE_CTRL                               0x48c8
+#define mmDC_COMBOPHYCMREGS1_COMMON_ZCALCODE_CTRL                               0x4968
+#define mmDC_COMBOPHYCMREGS2_COMMON_ZCALCODE_CTRL                               0x9a08
+#define mmDC_COMBOPHYCMREGS3_COMMON_ZCALCODE_CTRL                               0x9aa8
+#define mmDC_COMBOPHYCMREGS4_COMMON_ZCALCODE_CTRL                               0x9b48
+#define mmDC_COMBOPHYCMREGS5_COMMON_ZCALCODE_CTRL                               0x9be8
+#define mmDC_COMBOPHYCMREGS6_COMMON_ZCALCODE_CTRL                               0x9c88
+#define mmDC_COMBOPHYCMREGS7_COMMON_ZCALCODE_CTRL                               0x9d28
+#define mmCOMMON_DISP_RFU1                                                      0x48c9
+#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU1                                   0x48c9
+#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU1                                   0x4969
+#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU1                                   0x9a09
+#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU1                                   0x9aa9
+#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU1                                   0x9b49
+#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU1                                   0x9be9
+#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU1                                   0x9c89
+#define mmDC_COMBOPHYCMREGS7_COMMON_DISP_RFU1                                   0x9d29
+#define mmCOMMON_DISP_RFU2                                                      0x48ca
+#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU2                                   0x48ca
+#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU2                                   0x496a
+#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU2                                   0x9a0a
+#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU2                                   0x9aaa
+#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU2                                   0x9b4a
+#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU2                                   0x9bea
+#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU2                                   0x9c8a
+#define mmDC_COMBOPHYCMREGS7_COMMON_DISP_RFU2                                   0x9d2a
+#define mmCOMMON_DISP_RFU3                                                      0x48cb
+#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU3                                   0x48cb
+#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU3                                   0x496b
+#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU3                                   0x9a0b
+#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU3                                   0x9aab
+#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU3                                   0x9b4b
+#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU3                                   0x9beb
+#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU3                                   0x9c8b
+#define mmDC_COMBOPHYCMREGS7_COMMON_DISP_RFU3                                   0x9d2b
+#define mmCOMMON_DISP_RFU4                                                      0x48cc
+#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU4                                   0x48cc
+#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU4                                   0x496c
+#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU4                                   0x9a0c
+#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU4                                   0x9aac
+#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU4                                   0x9b4c
+#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU4                                   0x9bec
+#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU4                                   0x9c8c
+#define mmDC_COMBOPHYCMREGS7_COMMON_DISP_RFU4                                   0x9d2c
+#define mmCOMMON_DISP_RFU5                                                      0x48cd
+#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU5                                   0x48cd
+#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU5                                   0x496d
+#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU5                                   0x9a0d
+#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU5                                   0x9aad
+#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU5                                   0x9b4d
+#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU5                                   0x9bed
+#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU5                                   0x9c8d
+#define mmDC_COMBOPHYCMREGS7_COMMON_DISP_RFU5                                   0x9d2d
+#define mmCOMMON_DISP_RFU6                                                      0x48ce
+#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU6                                   0x48ce
+#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU6                                   0x496e
+#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU6                                   0x9a0e
+#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU6                                   0x9aae
+#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU6                                   0x9b4e
+#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU6                                   0x9bee
+#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU6                                   0x9c8e
+#define mmDC_COMBOPHYCMREGS7_COMMON_DISP_RFU6                                   0x9d2e
+#define mmCOMMON_DISP_RFU7                                                      0x48cf
+#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU7                                   0x48cf
+#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU7                                   0x496f
+#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU7                                   0x9a0f
+#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU7                                   0x9aaf
+#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU7                                   0x9b4f
+#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU7                                   0x9bef
+#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU7                                   0x9c8f
+#define mmDC_COMBOPHYCMREGS7_COMMON_DISP_RFU7                                   0x9d2f
+#define mmFREQ_CTRL0                                                            0x4920
+#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL0                                        0x4920
+#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL0                                        0x49c0
+#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL0                                        0x9a60
+#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL0                                        0x9b00
+#define mmDC_COMBOPHYPLLREGS4_FREQ_CTRL0                                        0x9ba0
+#define mmDC_COMBOPHYPLLREGS5_FREQ_CTRL0                                        0x9c40
+#define mmDC_COMBOPHYPLLREGS6_FREQ_CTRL0                                        0x9ce0
+#define mmDC_COMBOPHYPLLREGS7_FREQ_CTRL0                                        0x9d80
+#define mmFREQ_CTRL1                                                            0x4921
+#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL1                                        0x4921
+#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL1                                        0x49c1
+#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL1                                        0x9a61
+#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL1                                        0x9b01
+#define mmDC_COMBOPHYPLLREGS4_FREQ_CTRL1                                        0x9ba1
+#define mmDC_COMBOPHYPLLREGS5_FREQ_CTRL1                                        0x9c41
+#define mmDC_COMBOPHYPLLREGS6_FREQ_CTRL1                                        0x9ce1
+#define mmDC_COMBOPHYPLLREGS7_FREQ_CTRL1                                        0x9d81
+#define mmFREQ_CTRL2                                                            0x4922
+#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL2                                        0x4922
+#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL2                                        0x49c2
+#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL2                                        0x9a62
+#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL2                                        0x9b02
+#define mmDC_COMBOPHYPLLREGS4_FREQ_CTRL2                                        0x9ba2
+#define mmDC_COMBOPHYPLLREGS5_FREQ_CTRL2                                        0x9c42
+#define mmDC_COMBOPHYPLLREGS6_FREQ_CTRL2                                        0x9ce2
+#define mmDC_COMBOPHYPLLREGS7_FREQ_CTRL2                                        0x9d82
+#define mmFREQ_CTRL3                                                            0x4923
+#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL3                                        0x4923
+#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL3                                        0x49c3
+#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL3                                        0x9a63
+#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL3                                        0x9b03
+#define mmDC_COMBOPHYPLLREGS4_FREQ_CTRL3                                        0x9ba3
+#define mmDC_COMBOPHYPLLREGS5_FREQ_CTRL3                                        0x9c43
+#define mmDC_COMBOPHYPLLREGS6_FREQ_CTRL3                                        0x9ce3
+#define mmDC_COMBOPHYPLLREGS7_FREQ_CTRL3                                        0x9d83
+#define mmBW_CTRL_COARSE                                                        0x4924
+#define mmDC_COMBOPHYPLLREGS0_BW_CTRL_COARSE                                    0x4924
+#define mmDC_COMBOPHYPLLREGS1_BW_CTRL_COARSE                                    0x49c4
+#define mmDC_COMBOPHYPLLREGS2_BW_CTRL_COARSE                                    0x9a64
+#define mmDC_COMBOPHYPLLREGS3_BW_CTRL_COARSE                                    0x9b04
+#define mmDC_COMBOPHYPLLREGS4_BW_CTRL_COARSE                                    0x9ba4
+#define mmDC_COMBOPHYPLLREGS5_BW_CTRL_COARSE                                    0x9c44
+#define mmDC_COMBOPHYPLLREGS6_BW_CTRL_COARSE                                    0x9ce4
+#define mmDC_COMBOPHYPLLREGS7_BW_CTRL_COARSE                                    0x9d84
+#define mmBW_CTRL_FINE                                                          0x4925
+#define mmDC_COMBOPHYPLLREGS0_BW_CTRL_FINE                                      0x4925
+#define mmDC_COMBOPHYPLLREGS1_BW_CTRL_FINE                                      0x49c5
+#define mmDC_COMBOPHYPLLREGS2_BW_CTRL_FINE                                      0x9a65
+#define mmDC_COMBOPHYPLLREGS3_BW_CTRL_FINE                                      0x9b05
+#define mmDC_COMBOPHYPLLREGS4_BW_CTRL_FINE                                      0x9ba5
+#define mmDC_COMBOPHYPLLREGS5_BW_CTRL_FINE                                      0x9c45
+#define mmDC_COMBOPHYPLLREGS6_BW_CTRL_FINE                                      0x9ce5
+#define mmDC_COMBOPHYPLLREGS7_BW_CTRL_FINE                                      0x9d85
+#define mmCAL_CTRL                                                              0x4926
+#define mmDC_COMBOPHYPLLREGS0_CAL_CTRL                                          0x4926
+#define mmDC_COMBOPHYPLLREGS1_CAL_CTRL                                          0x49c6
+#define mmDC_COMBOPHYPLLREGS2_CAL_CTRL                                          0x9a66
+#define mmDC_COMBOPHYPLLREGS3_CAL_CTRL                                          0x9b06
+#define mmDC_COMBOPHYPLLREGS4_CAL_CTRL                                          0x9ba6
+#define mmDC_COMBOPHYPLLREGS5_CAL_CTRL                                          0x9c46
+#define mmDC_COMBOPHYPLLREGS6_CAL_CTRL                                          0x9ce6
+#define mmDC_COMBOPHYPLLREGS7_CAL_CTRL                                          0x9d86
+#define mmLOOP_CTRL                                                             0x4927
+#define mmDC_COMBOPHYPLLREGS0_LOOP_CTRL                                         0x4927
+#define mmDC_COMBOPHYPLLREGS1_LOOP_CTRL                                         0x49c7
+#define mmDC_COMBOPHYPLLREGS2_LOOP_CTRL                                         0x9a67
+#define mmDC_COMBOPHYPLLREGS3_LOOP_CTRL                                         0x9b07
+#define mmDC_COMBOPHYPLLREGS4_LOOP_CTRL                                         0x9ba7
+#define mmDC_COMBOPHYPLLREGS5_LOOP_CTRL                                         0x9c47
+#define mmDC_COMBOPHYPLLREGS6_LOOP_CTRL                                         0x9ce7
+#define mmDC_COMBOPHYPLLREGS7_LOOP_CTRL                                         0x9d87
+#define mmDEBUG0                                                                0x4928
+#define mmDC_COMBOPHYPLLREGS0_DEBUG0                                            0x4928
+#define mmDC_COMBOPHYPLLREGS1_DEBUG0                                            0x49c8
+#define mmDC_COMBOPHYPLLREGS2_DEBUG0                                            0x9a68
+#define mmDC_COMBOPHYPLLREGS3_DEBUG0                                            0x9b08
+#define mmDC_COMBOPHYPLLREGS4_DEBUG0                                            0x9ba8
+#define mmDC_COMBOPHYPLLREGS5_DEBUG0                                            0x9c48
+#define mmDC_COMBOPHYPLLREGS6_DEBUG0                                            0x9ce8
+#define mmDC_COMBOPHYPLLREGS7_DEBUG0                                            0x9d88
+#define mmVREG_CFG                                                              0x4929
+#define mmDC_COMBOPHYPLLREGS0_VREG_CFG                                          0x4929
+#define mmDC_COMBOPHYPLLREGS1_VREG_CFG                                          0x49c9
+#define mmDC_COMBOPHYPLLREGS2_VREG_CFG                                          0x9a69
+#define mmDC_COMBOPHYPLLREGS3_VREG_CFG                                          0x9b09
+#define mmDC_COMBOPHYPLLREGS4_VREG_CFG                                          0x9ba9
+#define mmDC_COMBOPHYPLLREGS5_VREG_CFG                                          0x9c49
+#define mmDC_COMBOPHYPLLREGS6_VREG_CFG                                          0x9ce9
+#define mmDC_COMBOPHYPLLREGS7_VREG_CFG                                          0x9d89
+#define mmOBSERVE0                                                              0x492a
+#define mmDC_COMBOPHYPLLREGS0_OBSERVE0                                          0x492a
+#define mmDC_COMBOPHYPLLREGS1_OBSERVE0                                          0x49ca
+#define mmDC_COMBOPHYPLLREGS2_OBSERVE0                                          0x9a6a
+#define mmDC_COMBOPHYPLLREGS3_OBSERVE0                                          0x9b0a
+#define mmDC_COMBOPHYPLLREGS4_OBSERVE0                                          0x9baa
+#define mmDC_COMBOPHYPLLREGS5_OBSERVE0                                          0x9c4a
+#define mmDC_COMBOPHYPLLREGS6_OBSERVE0                                          0x9cea
+#define mmDC_COMBOPHYPLLREGS7_OBSERVE0                                          0x9d8a
+#define mmOBSERVE1                                                              0x492b
+#define mmDC_COMBOPHYPLLREGS0_OBSERVE1                                          0x492b
+#define mmDC_COMBOPHYPLLREGS1_OBSERVE1                                          0x49cb
+#define mmDC_COMBOPHYPLLREGS2_OBSERVE1                                          0x9a6b
+#define mmDC_COMBOPHYPLLREGS3_OBSERVE1                                          0x9b0b
+#define mmDC_COMBOPHYPLLREGS4_OBSERVE1                                          0x9bab
+#define mmDC_COMBOPHYPLLREGS5_OBSERVE1                                          0x9c4b
+#define mmDC_COMBOPHYPLLREGS6_OBSERVE1                                          0x9ceb
+#define mmDC_COMBOPHYPLLREGS7_OBSERVE1                                          0x9d8b
+#define mmDFT_OUT                                                               0x492c
+#define mmDC_COMBOPHYPLLREGS0_DFT_OUT                                           0x492c
+#define mmDC_COMBOPHYPLLREGS1_DFT_OUT                                           0x49cc
+#define mmDC_COMBOPHYPLLREGS2_DFT_OUT                                           0x9a6c
+#define mmDC_COMBOPHYPLLREGS3_DFT_OUT                                           0x9b0c
+#define mmDC_COMBOPHYPLLREGS4_DFT_OUT                                           0x9bac
+#define mmDC_COMBOPHYPLLREGS5_DFT_OUT                                           0x9c4c
+#define mmDC_COMBOPHYPLLREGS6_DFT_OUT                                           0x9cec
+#define mmDC_COMBOPHYPLLREGS7_DFT_OUT                                           0x9d8c
+#define mmPLL_WRAP_CNTRL1                                                       0x495e
+#define mmDC_COMBOPHYPLLREGS0_PLL_WRAP_CNTRL1                                   0x495e
+#define mmDC_COMBOPHYPLLREGS1_PLL_WRAP_CNTRL1                                   0x49fe
+#define mmDC_COMBOPHYPLLREGS2_PLL_WRAP_CNTRL1                                   0x9a9e
+#define mmDC_COMBOPHYPLLREGS3_PLL_WRAP_CNTRL1                                   0x9b3e
+#define mmDC_COMBOPHYPLLREGS4_PLL_WRAP_CNTRL1                                   0x9bde
+#define mmDC_COMBOPHYPLLREGS5_PLL_WRAP_CNTRL1                                   0x9c7e
+#define mmDC_COMBOPHYPLLREGS6_PLL_WRAP_CNTRL1                                   0x9d1e
+#define mmDC_COMBOPHYPLLREGS7_PLL_WRAP_CNTRL1                                   0x9dbe
+#define mmPLL_WRAP_CNTRL                                                        0x495f
+#define mmDC_COMBOPHYPLLREGS0_PLL_WRAP_CNTRL                                    0x495f
+#define mmDC_COMBOPHYPLLREGS1_PLL_WRAP_CNTRL                                    0x49ff
+#define mmDC_COMBOPHYPLLREGS2_PLL_WRAP_CNTRL                                    0x9a9f
+#define mmDC_COMBOPHYPLLREGS3_PLL_WRAP_CNTRL                                    0x9b3f
+#define mmDC_COMBOPHYPLLREGS4_PLL_WRAP_CNTRL                                    0x9bdf
+#define mmDC_COMBOPHYPLLREGS5_PLL_WRAP_CNTRL                                    0x9c7f
+#define mmDC_COMBOPHYPLLREGS6_PLL_WRAP_CNTRL                                    0x9d1f
+#define mmDC_COMBOPHYPLLREGS7_PLL_WRAP_CNTRL                                    0x9dbf
+#define mmPPLL_VREG_CFG                                                         0x1700
+#define mmDC_DISPLAYPLLREGS0_PPLL_VREG_CFG                                      0x1700
+#define mmDC_DISPLAYPLLREGS1_PPLL_VREG_CFG                                      0x172a
+#define mmDC_DISPLAYPLLREGS2_PPLL_VREG_CFG                                      0x1754
+#define mmPPLL_MODE_CNTL                                                        0x1701
+#define mmDC_DISPLAYPLLREGS0_PPLL_MODE_CNTL                                     0x1701
+#define mmDC_DISPLAYPLLREGS1_PPLL_MODE_CNTL                                     0x172b
+#define mmDC_DISPLAYPLLREGS2_PPLL_MODE_CNTL                                     0x1755
+#define mmPPLL_FREQ_CTRL0                                                       0x1702
+#define mmDC_DISPLAYPLLREGS0_PPLL_FREQ_CTRL0                                    0x1702
+#define mmDC_DISPLAYPLLREGS1_PPLL_FREQ_CTRL0                                    0x172c
+#define mmDC_DISPLAYPLLREGS2_PPLL_FREQ_CTRL0                                    0x1756
+#define mmPPLL_FREQ_CTRL1                                                       0x1703
+#define mmDC_DISPLAYPLLREGS0_PPLL_FREQ_CTRL1                                    0x1703
+#define mmDC_DISPLAYPLLREGS1_PPLL_FREQ_CTRL1                                    0x172d
+#define mmDC_DISPLAYPLLREGS2_PPLL_FREQ_CTRL1                                    0x1757
+#define mmPPLL_FREQ_CTRL2                                                       0x1704
+#define mmDC_DISPLAYPLLREGS0_PPLL_FREQ_CTRL2                                    0x1704
+#define mmDC_DISPLAYPLLREGS1_PPLL_FREQ_CTRL2                                    0x172e
+#define mmDC_DISPLAYPLLREGS2_PPLL_FREQ_CTRL2                                    0x1758
+#define mmPPLL_FREQ_CTRL3                                                       0x1705
+#define mmDC_DISPLAYPLLREGS0_PPLL_FREQ_CTRL3                                    0x1705
+#define mmDC_DISPLAYPLLREGS1_PPLL_FREQ_CTRL3                                    0x172f
+#define mmDC_DISPLAYPLLREGS2_PPLL_FREQ_CTRL3                                    0x1759
+#define mmPPLL_BW_CTRL_COARSE                                                   0x1706
+#define mmDC_DISPLAYPLLREGS0_PPLL_BW_CTRL_COARSE                                0x1706
+#define mmDC_DISPLAYPLLREGS1_PPLL_BW_CTRL_COARSE                                0x1730
+#define mmDC_DISPLAYPLLREGS2_PPLL_BW_CTRL_COARSE                                0x175a
+#define mmPPLL_BW_CTRL_FINE                                                     0x1708
+#define mmDC_DISPLAYPLLREGS0_PPLL_BW_CTRL_FINE                                  0x1708
+#define mmDC_DISPLAYPLLREGS1_PPLL_BW_CTRL_FINE                                  0x1732
+#define mmDC_DISPLAYPLLREGS2_PPLL_BW_CTRL_FINE                                  0x175c
+#define mmPPLL_CAL_CTRL                                                         0x1709
+#define mmDC_DISPLAYPLLREGS0_PPLL_CAL_CTRL                                      0x1709
+#define mmDC_DISPLAYPLLREGS1_PPLL_CAL_CTRL                                      0x1733
+#define mmDC_DISPLAYPLLREGS2_PPLL_CAL_CTRL                                      0x175d
+#define mmPPLL_LOOP_CTRL                                                        0x170a
+#define mmDC_DISPLAYPLLREGS0_PPLL_LOOP_CTRL                                     0x170a
+#define mmDC_DISPLAYPLLREGS1_PPLL_LOOP_CTRL                                     0x1734
+#define mmDC_DISPLAYPLLREGS2_PPLL_LOOP_CTRL                                     0x175e
+#define mmPPLL_REFCLK_CNTL                                                      0x1718
+#define mmDC_DISPLAYPLLREGS0_PPLL_REFCLK_CNTL                                   0x1718
+#define mmDC_DISPLAYPLLREGS1_PPLL_REFCLK_CNTL                                   0x1742
+#define mmDC_DISPLAYPLLREGS2_PPLL_REFCLK_CNTL                                   0x176c
+#define mmPPLL_CLKOUT_CNTL                                                      0x1719
+#define mmDC_DISPLAYPLLREGS0_PPLL_CLKOUT_CNTL                                   0x1719
+#define mmDC_DISPLAYPLLREGS1_PPLL_CLKOUT_CNTL                                   0x1743
+#define mmDC_DISPLAYPLLREGS2_PPLL_CLKOUT_CNTL                                   0x176d
+#define mmPPLL_DFT_CNTL                                                         0x171a
+#define mmDC_DISPLAYPLLREGS0_PPLL_DFT_CNTL                                      0x171a
+#define mmDC_DISPLAYPLLREGS1_PPLL_DFT_CNTL                                      0x1744
+#define mmDC_DISPLAYPLLREGS2_PPLL_DFT_CNTL                                      0x176e
+#define mmPPLL_ANALOG_CNTL                                                      0x171b
+#define mmDC_DISPLAYPLLREGS0_PPLL_ANALOG_CNTL                                   0x171b
+#define mmDC_DISPLAYPLLREGS1_PPLL_ANALOG_CNTL                                   0x1745
+#define mmDC_DISPLAYPLLREGS2_PPLL_ANALOG_CNTL                                   0x176f
+#define mmPPLL_POSTDIV                                                          0x171c
+#define mmDC_DISPLAYPLLREGS0_PPLL_POSTDIV                                       0x171c
+#define mmDC_DISPLAYPLLREGS1_PPLL_POSTDIV                                       0x1746
+#define mmDC_DISPLAYPLLREGS2_PPLL_POSTDIV                                       0x1770
+#define mmPPLL_DEBUG0                                                           0x1720
+#define mmDC_DISPLAYPLLREGS0_PPLL_DEBUG0                                        0x1720
+#define mmDC_DISPLAYPLLREGS1_PPLL_DEBUG0                                        0x174a
+#define mmDC_DISPLAYPLLREGS2_PPLL_DEBUG0                                        0x1774
+#define mmPPLL_OBSERVE0                                                         0x1721
+#define mmDC_DISPLAYPLLREGS0_PPLL_OBSERVE0                                      0x1721
+#define mmDC_DISPLAYPLLREGS1_PPLL_OBSERVE0                                      0x174b
+#define mmDC_DISPLAYPLLREGS2_PPLL_OBSERVE0                                      0x1775
+#define mmPPLL_OBSERVE1                                                         0x1722
+#define mmDC_DISPLAYPLLREGS0_PPLL_OBSERVE1                                      0x1722
+#define mmDC_DISPLAYPLLREGS1_PPLL_OBSERVE1                                      0x174c
+#define mmDC_DISPLAYPLLREGS2_PPLL_OBSERVE1                                      0x1776
+#define mmPPLL_UPDATE_CNTL                                                      0x1724
+#define mmDC_DISPLAYPLLREGS0_PPLL_UPDATE_CNTL                                   0x1724
+#define mmDC_DISPLAYPLLREGS1_PPLL_UPDATE_CNTL                                   0x174e
+#define mmDC_DISPLAYPLLREGS2_PPLL_UPDATE_CNTL                                   0x1778
+#define mmPPLL_OBSERVE0_OUT                                                     0x1725
+#define mmDC_DISPLAYPLLREGS0_PPLL_OBSERVE0_OUT                                  0x1725
+#define mmDC_DISPLAYPLLREGS1_PPLL_OBSERVE0_OUT                                  0x174f
+#define mmDC_DISPLAYPLLREGS2_PPLL_OBSERVE0_OUT                                  0x1779
+#define mmPPLL_STATUS_DEBUG1                                                    0x1726
+#define mmDC_DISPLAYPLLREGS0_PPLL_STATUS_DEBUG1                                 0x1726
+#define mmDC_DISPLAYPLLREGS1_PPLL_STATUS_DEBUG1                                 0x1750
+#define mmDC_DISPLAYPLLREGS2_PPLL_STATUS_DEBUG1                                 0x177a
+#define mmPPLL_DEBUG_MUX_CNTL                                                   0x1727
+#define mmDC_DISPLAYPLLREGS0_PPLL_DEBUG_MUX_CNTL                                0x1727
+#define mmDC_DISPLAYPLLREGS1_PPLL_DEBUG_MUX_CNTL                                0x1751
+#define mmDC_DISPLAYPLLREGS2_PPLL_DEBUG_MUX_CNTL                                0x177b
+#define mmPPLL_DIV_UPDATE_DEBUG                                                 0x1728
+#define mmDC_DISPLAYPLLREGS0_PPLL_DIV_UPDATE_DEBUG                              0x1728
+#define mmDC_DISPLAYPLLREGS1_PPLL_DIV_UPDATE_DEBUG                              0x1752
+#define mmDC_DISPLAYPLLREGS2_PPLL_DIV_UPDATE_DEBUG                              0x177c
+#define mmPPLL_STATUS_DEBUG0                                                    0x1729
+#define mmDC_DISPLAYPLLREGS0_PPLL_STATUS_DEBUG0                                 0x1729
+#define mmDC_DISPLAYPLLREGS1_PPLL_STATUS_DEBUG0                                 0x1753
+#define mmDC_DISPLAYPLLREGS2_PPLL_STATUS_DEBUG0                                 0x177d
+#define mmCOMP_EN_CTL                                                           0x9dc0
+#define mmDPCSTX_PHY_CNTL                                                       0x48d0
+#define mmDPCSTX0_DPCSTX_PHY_CNTL                                               0x48d0
+#define mmDPCSTX1_DPCSTX_PHY_CNTL                                               0x4970
+#define mmDPCSTX2_DPCSTX_PHY_CNTL                                               0x9a10
+#define mmDPCSTX3_DPCSTX_PHY_CNTL                                               0x9ab0
+#define mmDPCSTX4_DPCSTX_PHY_CNTL                                               0x9b50
+#define mmDPCSTX5_DPCSTX_PHY_CNTL                                               0x9bf0
+#define mmDPCSTX6_DPCSTX_PHY_CNTL                                               0x9c90
+#define mmDPCSTX7_DPCSTX_PHY_CNTL                                               0x9d30
+#define mmDPCSTX_TX_CLOCK_CNTL                                                  0x48d1
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL                                          0x48d1
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL                                          0x4971
+#define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL                                          0x9a11
+#define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL                                          0x9ab1
+#define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL                                          0x9b51
+#define mmDPCSTX5_DPCSTX_TX_CLOCK_CNTL                                          0x9bf1
+#define mmDPCSTX6_DPCSTX_TX_CLOCK_CNTL                                          0x9c91
+#define mmDPCSTX7_DPCSTX_TX_CLOCK_CNTL                                          0x9d31
+#define mmDPCSTX_TX_CNTL                                                        0x48d3
+#define mmDPCSTX0_DPCSTX_TX_CNTL                                                0x48d3
+#define mmDPCSTX1_DPCSTX_TX_CNTL                                                0x4973
+#define mmDPCSTX2_DPCSTX_TX_CNTL                                                0x9a13
+#define mmDPCSTX3_DPCSTX_TX_CNTL                                                0x9ab3
+#define mmDPCSTX4_DPCSTX_TX_CNTL                                                0x9b53
+#define mmDPCSTX5_DPCSTX_TX_CNTL                                                0x9bf3
+#define mmDPCSTX6_DPCSTX_TX_CNTL                                                0x9c93
+#define mmDPCSTX7_DPCSTX_TX_CNTL                                                0x9d33
+#define mmDPCSTX_CBUS_CNTL                                                      0x48d5
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL                                              0x48d5
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL                                              0x4975
+#define mmDPCSTX2_DPCSTX_CBUS_CNTL                                              0x9a15
+#define mmDPCSTX3_DPCSTX_CBUS_CNTL                                              0x9ab5
+#define mmDPCSTX4_DPCSTX_CBUS_CNTL                                              0x9b55
+#define mmDPCSTX5_DPCSTX_CBUS_CNTL                                              0x9bf5
+#define mmDPCSTX6_DPCSTX_CBUS_CNTL                                              0x9c95
+#define mmDPCSTX7_DPCSTX_CBUS_CNTL                                              0x9d35
+#define mmDPCSTX_REG_ERROR_STATUS                                               0x48d6
+#define mmDPCSTX0_DPCSTX_REG_ERROR_STATUS                                       0x48d6
+#define mmDPCSTX1_DPCSTX_REG_ERROR_STATUS                                       0x4976
+#define mmDPCSTX2_DPCSTX_REG_ERROR_STATUS                                       0x9a16
+#define mmDPCSTX3_DPCSTX_REG_ERROR_STATUS                                       0x9ab6
+#define mmDPCSTX4_DPCSTX_REG_ERROR_STATUS                                       0x9b56
+#define mmDPCSTX5_DPCSTX_REG_ERROR_STATUS                                       0x9bf6
+#define mmDPCSTX6_DPCSTX_REG_ERROR_STATUS                                       0x9c96
+#define mmDPCSTX7_DPCSTX_REG_ERROR_STATUS                                       0x9d36
+#define mmDPCSTX_TX_ERROR_STATUS                                                0x48d7
+#define mmDPCSTX0_DPCSTX_TX_ERROR_STATUS                                        0x48d7
+#define mmDPCSTX1_DPCSTX_TX_ERROR_STATUS                                        0x4977
+#define mmDPCSTX2_DPCSTX_TX_ERROR_STATUS                                        0x9a17
+#define mmDPCSTX3_DPCSTX_TX_ERROR_STATUS                                        0x9ab7
+#define mmDPCSTX4_DPCSTX_TX_ERROR_STATUS                                        0x9b57
+#define mmDPCSTX5_DPCSTX_TX_ERROR_STATUS                                        0x9bf7
+#define mmDPCSTX6_DPCSTX_TX_ERROR_STATUS                                        0x9c97
+#define mmDPCSTX7_DPCSTX_TX_ERROR_STATUS                                        0x9d37
+#define mmDPCSTX_PLL_UPDATE_ADDR                                                0x48d8
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR                                        0x48d8
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR                                        0x4978
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR                                        0x9a18
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR                                        0x9ab8
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR                                        0x9b58
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR                                        0x9bf8
+#define mmDPCSTX6_DPCSTX_PLL_UPDATE_ADDR                                        0x9c98
+#define mmDPCSTX7_DPCSTX_PLL_UPDATE_ADDR                                        0x9d38
+#define mmDPCSTX_PLL_UPDATE_DATA                                                0x48d9
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA                                        0x48d9
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA                                        0x4979
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA                                        0x9a19
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA                                        0x9ab9
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA                                        0x9b59
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA                                        0x9bf9
+#define mmDPCSTX6_DPCSTX_PLL_UPDATE_DATA                                        0x9c99
+#define mmDPCSTX7_DPCSTX_PLL_UPDATE_DATA                                        0x9d39
+#define mmDPCSTX_INDEX_MODE_ADDR                                                0x48da
+#define mmDPCSTX0_DPCSTX_INDEX_MODE_ADDR                                        0x48da
+#define mmDPCSTX1_DPCSTX_INDEX_MODE_ADDR                                        0x497a
+#define mmDPCSTX2_DPCSTX_INDEX_MODE_ADDR                                        0x9a1a
+#define mmDPCSTX3_DPCSTX_INDEX_MODE_ADDR                                        0x9aba
+#define mmDPCSTX4_DPCSTX_INDEX_MODE_ADDR                                        0x9b5a
+#define mmDPCSTX5_DPCSTX_INDEX_MODE_ADDR                                        0x9bfa
+#define mmDPCSTX6_DPCSTX_INDEX_MODE_ADDR                                        0x9c9a
+#define mmDPCSTX7_DPCSTX_INDEX_MODE_ADDR                                        0x9d3a
+#define mmDPCSTX_INDEX_MODE_DATA                                                0x48db
+#define mmDPCSTX0_DPCSTX_INDEX_MODE_DATA                                        0x48db
+#define mmDPCSTX1_DPCSTX_INDEX_MODE_DATA                                        0x497b
+#define mmDPCSTX2_DPCSTX_INDEX_MODE_DATA                                        0x9a1b
+#define mmDPCSTX3_DPCSTX_INDEX_MODE_DATA                                        0x9abb
+#define mmDPCSTX4_DPCSTX_INDEX_MODE_DATA                                        0x9b5b
+#define mmDPCSTX5_DPCSTX_INDEX_MODE_DATA                                        0x9bfb
+#define mmDPCSTX6_DPCSTX_INDEX_MODE_DATA                                        0x9c9b
+#define mmDPCSTX7_DPCSTX_INDEX_MODE_DATA                                        0x9d3b
+#define mmDPCSTX_DEBUG_CONFIG                                                   0x48dc
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG                                           0x48dc
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG                                           0x497c
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG                                           0x9a1c
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG                                           0x9abc
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG                                           0x9b5c
+#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG                                           0x9bfc
+#define mmDPCSTX6_DPCSTX_DEBUG_CONFIG                                           0x9c9c
+#define mmDPCSTX7_DPCSTX_DEBUG_CONFIG                                           0x9d3c
+#define mmDPCSTX_TEST_DEBUG_DATA                                                0x48dd
+#define mmDPCSTX0_DPCSTX_TEST_DEBUG_DATA                                        0x48dd
+#define mmDPCSTX1_DPCSTX_TEST_DEBUG_DATA                                        0x497d
+#define mmDPCSTX2_DPCSTX_TEST_DEBUG_DATA                                        0x9a1d
+#define mmDPCSTX3_DPCSTX_TEST_DEBUG_DATA                                        0x9abd
+#define mmDPCSTX4_DPCSTX_TEST_DEBUG_DATA                                        0x9b5d
+#define mmDPCSTX5_DPCSTX_TEST_DEBUG_DATA                                        0x9bfd
+#define mmDPCSTX6_DPCSTX_TEST_DEBUG_DATA                                        0x9c9d
+#define mmDPCSTX7_DPCSTX_TEST_DEBUG_DATA                                        0x9d3d
+
+#endif /* DCE_11_2_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_enum.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_enum.h
new file mode 100644
index 0000000..b2ea420
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_enum.h
@@ -0,0 +1,6813 @@
+/*
+ * DCE_11_2 Register documentation
+ *
+ * Copyright (C) 2016  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DCE_11_2_ENUM_H
+#define DCE_11_2_ENUM_H
+
+typedef enum CRTC_CONTROL_CRTC_START_POINT_CNTL {
+	CRTC_CONTROL_CRTC_START_POINT_CNTL_NORMAL        = 0x0,
+	CRTC_CONTROL_CRTC_START_POINT_CNTL_DP            = 0x1,
+} CRTC_CONTROL_CRTC_START_POINT_CNTL;
+typedef enum CRTC_CONTROL_CRTC_FIELD_NUMBER_CNTL {
+	CRTC_CONTROL_CRTC_FIELD_NUMBER_CNTL_NORMAL       = 0x0,
+	CRTC_CONTROL_CRTC_FIELD_NUMBER_CNTL_DP           = 0x1,
+} CRTC_CONTROL_CRTC_FIELD_NUMBER_CNTL;
+typedef enum CRTC_CONTROL_CRTC_DISABLE_POINT_CNTL {
+	CRTC_CONTROL_CRTC_DISABLE_POINT_CNTL_DISABLE     = 0x0,
+	CRTC_CONTROL_CRTC_DISABLE_POINT_CNTL_DISABLE_CURRENT= 0x1,
+	CRTC_CONTROL_CRTC_DISABLE_POINT_CNTL_RESERVED    = 0x2,
+	CRTC_CONTROL_CRTC_DISABLE_POINT_CNTL_DISABLE_FIRST= 0x3,
+} CRTC_CONTROL_CRTC_DISABLE_POINT_CNTL;
+typedef enum CRTC_CONTROL_CRTC_FIELD_NUMBER_POLARITY {
+	CRTC_CONTROL_CRTC_FIELD_NUMBER_POLARITY_FALSE    = 0x0,
+	CRTC_CONTROL_CRTC_FIELD_NUMBER_POLARITY_TRUE     = 0x1,
+} CRTC_CONTROL_CRTC_FIELD_NUMBER_POLARITY;
+typedef enum CRTC_CONTROL_CRTC_DISP_READ_REQUEST_DISABLE {
+	CRTC_CONTROL_CRTC_DISP_READ_REQUEST_DISABLE_FALSE= 0x0,
+	CRTC_CONTROL_CRTC_DISP_READ_REQUEST_DISABLE_TRUE = 0x1,
+} CRTC_CONTROL_CRTC_DISP_READ_REQUEST_DISABLE;
+typedef enum CRTC_CONTROL_CRTC_SOF_PULL_EN {
+	CRTC_CONTROL_CRTC_SOF_PULL_EN_FALSE              = 0x0,
+	CRTC_CONTROL_CRTC_SOF_PULL_EN_TRUE               = 0x1,
+} CRTC_CONTROL_CRTC_SOF_PULL_EN;
+typedef enum CRTC_H_SYNC_B_CNTL_CRTC_H_SYNC_B_POL {
+	CRTC_H_SYNC_B_CNTL_CRTC_H_SYNC_B_POL_FALSE       = 0x0,
+	CRTC_H_SYNC_B_CNTL_CRTC_H_SYNC_B_POL_TRUE        = 0x1,
+} CRTC_H_SYNC_B_CNTL_CRTC_H_SYNC_B_POL;
+typedef enum CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MAX_SEL {
+	CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MAX_SEL_FALSE  = 0x0,
+	CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MAX_SEL_TRUE   = 0x1,
+} CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MAX_SEL;
+typedef enum CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MIN_SEL {
+	CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MIN_SEL_FALSE  = 0x0,
+	CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MIN_SEL_TRUE   = 0x1,
+} CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MIN_SEL;
+typedef enum CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_EN {
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_EN_FALSE= 0x0,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_EN_TRUE= 0x1,
+} CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_EN;
+typedef enum CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_TO_MASTER_VSYNC {
+	CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_TO_MASTER_VSYNC_DISABLE= 0x0,
+	CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_TO_MASTER_VSYNC_ENABLE= 0x1,
+} CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_TO_MASTER_VSYNC;
+typedef enum CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_ON_EVENT {
+	CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_ON_EVENT_DISABLE= 0x0,
+	CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_ON_EVENT_ENABLE= 0x1,
+} CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_ON_EVENT;
+typedef enum CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK {
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_FRAME_START= 0x0,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_CRTC_TRIG_A= 0x1,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_CRTC_TRIG_B= 0x2,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_CURSOR_CHANGE= 0x3,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_OTHER_CLIENT= 0x4,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_MC_DC_REGION0= 0x5,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_MC_DC_REGION1= 0x6,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_MC_DC_REGION2= 0x7,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_MC_DC_REGION3= 0x8,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_GRAPHIC_UPDATE_PENDING= 0x9,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_RESERVED2= 0xa,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_INVALID= 0xb,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_DOUBLE_BUFFER= 0xc,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_D1CRTC_VERT_COUNT_NOM= 0xd,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_D1CRTC_VERT_COUNT= 0xe,
+	CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_RESERVED= 0xf,
+} CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK;
+typedef enum CRTC_V_TOTAL_INT_STATUS_CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK {
+	CRTC_V_TOTAL_INT_STATUS_CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK_FALSE= 0x0,
+	CRTC_V_TOTAL_INT_STATUS_CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK_TRUE= 0x1,
+} CRTC_V_TOTAL_INT_STATUS_CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK;
+typedef enum CRTC_VSYNC_NOM_INT_STATUS_CRTC_VSYNC_NOM_INT_CLEAR {
+	CRTC_VSYNC_NOM_INT_STATUS_CRTC_VSYNC_NOM_INT_CLEAR_FALSE= 0x0,
+	CRTC_VSYNC_NOM_INT_STATUS_CRTC_VSYNC_NOM_INT_CLEAR_TRUE= 0x1,
+} CRTC_VSYNC_NOM_INT_STATUS_CRTC_VSYNC_NOM_INT_CLEAR;
+typedef enum CRTC_V_SYNC_B_CNTL_CRTC_V_SYNC_B_POL {
+	CRTC_V_SYNC_B_CNTL_CRTC_V_SYNC_B_POL_FALSE       = 0x0,
+	CRTC_V_SYNC_B_CNTL_CRTC_V_SYNC_B_POL_TRUE        = 0x1,
+} CRTC_V_SYNC_B_CNTL_CRTC_V_SYNC_B_POL;
+typedef enum CRTC_DTMTEST_CNTL_CRTC_DTMTEST_CRTC_EN {
+	CRTC_DTMTEST_CNTL_CRTC_DTMTEST_CRTC_EN_FALSE     = 0x0,
+	CRTC_DTMTEST_CNTL_CRTC_DTMTEST_CRTC_EN_TRUE      = 0x1,
+} CRTC_DTMTEST_CNTL_CRTC_DTMTEST_CRTC_EN;
+typedef enum CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT {
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_VSYNCA_OTHER= 0x1,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_HSYNCA_OTHER= 0x2,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_GENERICF= 0x5,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_GENERICE= 0x6,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_VSYNCA  = 0x7,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_HSYNCA  = 0x8,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_VSYNCB  = 0x9,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_HSYNCB  = 0xa,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_HPD1    = 0xb,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_HPD2    = 0xc,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_GENERICD= 0xd,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_GENERICC= 0xe,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_IGSL0   = 0x10,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_IGSL1   = 0x11,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_IGSL2   = 0x12,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_IBLON   = 0x13,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_GENERICA= 0x14,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_GENERICB= 0x15,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_IGSL_ALLOW= 0x16,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_MANUAL_FLOW= 0x17,
+} CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT;
+typedef enum CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT {
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT_INTERLACE= 0x1,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT_GENERICA= 0x2,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT_GENERICB= 0x3,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT_HSYNCA= 0x4,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT_HSYNCB= 0x5,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT_VIDEO = 0x6,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT_GENERICC= 0x7,
+} CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT;
+typedef enum CRTC_TRIGA_CNTL_CRTC_TRIGA_RESYNC_BYPASS_EN {
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_RESYNC_BYPASS_EN_FALSE= 0x0,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_RESYNC_BYPASS_EN_TRUE = 0x1,
+} CRTC_TRIGA_CNTL_CRTC_TRIGA_RESYNC_BYPASS_EN;
+typedef enum CRTC_TRIGA_CNTL_CRTC_TRIGA_CLEAR {
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_CLEAR_FALSE           = 0x0,
+	CRTC_TRIGA_CNTL_CRTC_TRIGA_CLEAR_TRUE            = 0x1,
+} CRTC_TRIGA_CNTL_CRTC_TRIGA_CLEAR;
+typedef enum CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT {
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_VSYNCA_OTHER= 0x1,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_HSYNCA_OTHER= 0x2,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_GENERICF= 0x5,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_GENERICE= 0x6,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_VSYNCA  = 0x7,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_HSYNCA  = 0x8,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_VSYNCB  = 0x9,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_HSYNCB  = 0xa,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_HPD1    = 0xb,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_HPD2    = 0xc,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_GENERICD= 0xd,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_GENERICC= 0xe,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_IGSL0   = 0x10,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_IGSL1   = 0x11,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_IGSL2   = 0x12,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_IBLON   = 0x13,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_GENERICA= 0x14,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_GENERICB= 0x15,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_IGSL_ALLOW= 0x16,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_MANUAL_FLOW= 0x17,
+} CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT;
+typedef enum CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT {
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT_INTERLACE= 0x1,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT_GENERICA= 0x2,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT_GENERICB= 0x3,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT_HSYNCA= 0x4,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT_HSYNCB= 0x5,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT_VIDEO = 0x6,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT_GENERICC= 0x7,
+} CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT;
+typedef enum CRTC_TRIGB_CNTL_CRTC_TRIGB_RESYNC_BYPASS_EN {
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_RESYNC_BYPASS_EN_FALSE= 0x0,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_RESYNC_BYPASS_EN_TRUE = 0x1,
+} CRTC_TRIGB_CNTL_CRTC_TRIGB_RESYNC_BYPASS_EN;
+typedef enum CRTC_TRIGB_CNTL_CRTC_TRIGB_CLEAR {
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_CLEAR_FALSE           = 0x0,
+	CRTC_TRIGB_CNTL_CRTC_TRIGB_CLEAR_TRUE            = 0x1,
+} CRTC_TRIGB_CNTL_CRTC_TRIGB_CLEAR;
+typedef enum CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_MODE {
+	CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_MODE_DISABLE= 0x0,
+	CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_MODE_HCOUNT= 0x1,
+	CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_MODE_HCOUNT_VCOUNT= 0x2,
+	CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_MODE_RESERVED= 0x3,
+} CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_MODE;
+typedef enum CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CHECK {
+	CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CHECK_FALSE= 0x0,
+	CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CHECK_TRUE= 0x1,
+} CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CHECK;
+typedef enum CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_TRIG_SEL {
+	CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_TRIG_SEL_FALSE= 0x0,
+	CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_TRIG_SEL_TRUE= 0x1,
+} CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_TRIG_SEL;
+typedef enum CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CLEAR {
+	CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CLEAR_FALSE= 0x0,
+	CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CLEAR_TRUE= 0x1,
+} CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CLEAR;
+typedef enum CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT {
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_LOGIC0= 0x0,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_GENERICF= 0x1,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_GENERICE= 0x2,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_HPD1= 0x3,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_HPD2= 0x4,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_DDC1DATA= 0x5,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_DDC1CLK= 0x6,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_DDC2DATA= 0x7,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_DDC2CLK= 0x8,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_DVOCLK= 0x9,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_MANUAL= 0xa,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_LOGIC1= 0xb,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_GENERICB= 0xc,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_GENERICA= 0xd,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_GENERICD= 0xe,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_GENERICC= 0xf,
+} CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT;
+typedef enum CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_POLARITY {
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_POLARITY_FALSE= 0x0,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_POLARITY_TRUE= 0x1,
+} CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_POLARITY;
+typedef enum CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_GRANULARITY {
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_GRANULARITY_FALSE= 0x0,
+	CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_GRANULARITY_TRUE= 0x1,
+} CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_GRANULARITY;
+typedef enum CRTC_STEREO_FORCE_NEXT_EYE_CRTC_STEREO_FORCE_NEXT_EYE {
+	CRTC_STEREO_FORCE_NEXT_EYE_CRTC_STEREO_FORCE_NEXT_EYE_NO= 0x0,
+	CRTC_STEREO_FORCE_NEXT_EYE_CRTC_STEREO_FORCE_NEXT_EYE_RIGHT= 0x1,
+	CRTC_STEREO_FORCE_NEXT_EYE_CRTC_STEREO_FORCE_NEXT_EYE_LEFT= 0x2,
+	CRTC_STEREO_FORCE_NEXT_EYE_CRTC_STEREO_FORCE_NEXT_EYE_RESERVED= 0x3,
+} CRTC_STEREO_FORCE_NEXT_EYE_CRTC_STEREO_FORCE_NEXT_EYE;
+typedef enum CRTC_CONTROL_CRTC_MASTER_EN {
+	CRTC_CONTROL_CRTC_MASTER_EN_FALSE                = 0x0,
+	CRTC_CONTROL_CRTC_MASTER_EN_TRUE                 = 0x1,
+} CRTC_CONTROL_CRTC_MASTER_EN;
+typedef enum CRTC_BLANK_CONTROL_CRTC_BLANK_DATA_EN {
+	CRTC_BLANK_CONTROL_CRTC_BLANK_DATA_EN_FALSE      = 0x0,
+	CRTC_BLANK_CONTROL_CRTC_BLANK_DATA_EN_TRUE       = 0x1,
+} CRTC_BLANK_CONTROL_CRTC_BLANK_DATA_EN;
+typedef enum CRTC_BLANK_CONTROL_CRTC_BLANK_DE_MODE {
+	CRTC_BLANK_CONTROL_CRTC_BLANK_DE_MODE_FALSE      = 0x0,
+	CRTC_BLANK_CONTROL_CRTC_BLANK_DE_MODE_TRUE       = 0x1,
+} CRTC_BLANK_CONTROL_CRTC_BLANK_DE_MODE;
+typedef enum CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_ENABLE {
+	CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_ENABLE_FALSE= 0x0,
+	CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_ENABLE_TRUE= 0x1,
+} CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_ENABLE;
+typedef enum CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_FORCE_NEXT_FIELD {
+	CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_FORCE_NEXT_FIELD_NOT= 0x0,
+	CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_FORCE_NEXT_FIELD_ODD= 0x1,
+	CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_FORCE_NEXT_FIELD_EVEN= 0x2,
+	CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_FORCE_NEXT_FIELD_NOT2= 0x3,
+} CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_FORCE_NEXT_FIELD;
+typedef enum CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_INDICATION_OUTPUT_POLARITY {
+	CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_INDICATION_OUTPUT_POLARITY_FALSE= 0x0,
+	CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_INDICATION_OUTPUT_POLARITY_TRUE= 0x1,
+} CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_INDICATION_OUTPUT_POLARITY;
+typedef enum CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_ALIGNMENT {
+	CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_ALIGNMENT_FALSE= 0x0,
+	CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_ALIGNMENT_TRUE= 0x1,
+} CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_ALIGNMENT;
+typedef enum CRTC_COUNT_CONTROL_CRTC_HORZ_COUNT_BY2_EN {
+	CRTC_COUNT_CONTROL_CRTC_HORZ_COUNT_BY2_EN_FALSE  = 0x0,
+	CRTC_COUNT_CONTROL_CRTC_HORZ_COUNT_BY2_EN_TRUE   = 0x1,
+} CRTC_COUNT_CONTROL_CRTC_HORZ_COUNT_BY2_EN;
+typedef enum CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE {
+	CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_FALSE= 0x0,
+	CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_TRUE= 0x1,
+} CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE;
+typedef enum CRTC_VERT_SYNC_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR {
+	CRTC_VERT_SYNC_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR_FALSE= 0x0,
+	CRTC_VERT_SYNC_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR_TRUE= 0x1,
+} CRTC_VERT_SYNC_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR;
+typedef enum CRTC_VERT_SYNC_CONTROL_CRTC_AUTO_FORCE_VSYNC_MODE {
+	CRTC_VERT_SYNC_CONTROL_CRTC_AUTO_FORCE_VSYNC_MODE_DISABLE= 0x0,
+	CRTC_VERT_SYNC_CONTROL_CRTC_AUTO_FORCE_VSYNC_MODE_TRIGGERA= 0x1,
+	CRTC_VERT_SYNC_CONTROL_CRTC_AUTO_FORCE_VSYNC_MODE_TRIGGERB= 0x2,
+	CRTC_VERT_SYNC_CONTROL_CRTC_AUTO_FORCE_VSYNC_MODE_RESERVED= 0x3,
+} CRTC_VERT_SYNC_CONTROL_CRTC_AUTO_FORCE_VSYNC_MODE;
+typedef enum CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_OUTPUT_POLARITY {
+	CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_OUTPUT_POLARITY_FALSE= 0x0,
+	CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_OUTPUT_POLARITY_TRUE= 0x1,
+} CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_OUTPUT_POLARITY;
+typedef enum CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_SELECT_POLARITY {
+	CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_SELECT_POLARITY_FALSE= 0x0,
+	CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_SELECT_POLARITY_TRUE= 0x1,
+} CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_SELECT_POLARITY;
+typedef enum CRTC_STEREO_CONTROL_CRTC_STEREO_EYE_FLAG_POLARITY {
+	CRTC_STEREO_CONTROL_CRTC_STEREO_EYE_FLAG_POLARITY_FALSE= 0x0,
+	CRTC_STEREO_CONTROL_CRTC_STEREO_EYE_FLAG_POLARITY_TRUE= 0x1,
+} CRTC_STEREO_CONTROL_CRTC_STEREO_EYE_FLAG_POLARITY;
+typedef enum CRTC_STEREO_CONTROL_CRTC_STEREO_EN {
+	CRTC_STEREO_CONTROL_CRTC_STEREO_EN_FALSE         = 0x0,
+	CRTC_STEREO_CONTROL_CRTC_STEREO_EN_TRUE          = 0x1,
+} CRTC_STEREO_CONTROL_CRTC_STEREO_EN;
+typedef enum CRTC_SNAPSHOT_STATUS_CRTC_SNAPSHOT_CLEAR {
+	CRTC_SNAPSHOT_STATUS_CRTC_SNAPSHOT_CLEAR_FALSE   = 0x0,
+	CRTC_SNAPSHOT_STATUS_CRTC_SNAPSHOT_CLEAR_TRUE    = 0x1,
+} CRTC_SNAPSHOT_STATUS_CRTC_SNAPSHOT_CLEAR;
+typedef enum CRTC_SNAPSHOT_CONTROL_CRTC_AUTO_SNAPSHOT_TRIG_SEL {
+	CRTC_SNAPSHOT_CONTROL_CRTC_AUTO_SNAPSHOT_TRIG_SEL_DISABLE= 0x0,
+	CRTC_SNAPSHOT_CONTROL_CRTC_AUTO_SNAPSHOT_TRIG_SEL_TRIGGERA= 0x1,
+	CRTC_SNAPSHOT_CONTROL_CRTC_AUTO_SNAPSHOT_TRIG_SEL_TRIGGERB= 0x2,
+	CRTC_SNAPSHOT_CONTROL_CRTC_AUTO_SNAPSHOT_TRIG_SEL_RESERVED= 0x3,
+} CRTC_SNAPSHOT_CONTROL_CRTC_AUTO_SNAPSHOT_TRIG_SEL;
+typedef enum CRTC_START_LINE_CONTROL_CRTC_PROGRESSIVE_START_LINE_EARLY {
+	CRTC_START_LINE_CONTROL_CRTC_PROGRESSIVE_START_LINE_EARLY_FALSE= 0x0,
+	CRTC_START_LINE_CONTROL_CRTC_PROGRESSIVE_START_LINE_EARLY_TRUE= 0x1,
+} CRTC_START_LINE_CONTROL_CRTC_PROGRESSIVE_START_LINE_EARLY;
+typedef enum CRTC_START_LINE_CONTROL_CRTC_INTERLACE_START_LINE_EARLY {
+	CRTC_START_LINE_CONTROL_CRTC_INTERLACE_START_LINE_EARLY_FALSE= 0x0,
+	CRTC_START_LINE_CONTROL_CRTC_INTERLACE_START_LINE_EARLY_TRUE= 0x1,
+} CRTC_START_LINE_CONTROL_CRTC_INTERLACE_START_LINE_EARLY;
+typedef enum CRTC_START_LINE_CONTROL_CRTC_LEGACY_REQUESTOR_EN {
+	CRTC_START_LINE_CONTROL_CRTC_LEGACY_REQUESTOR_EN_FALSE= 0x0,
+	CRTC_START_LINE_CONTROL_CRTC_LEGACY_REQUESTOR_EN_TRUE= 0x1,
+} CRTC_START_LINE_CONTROL_CRTC_LEGACY_REQUESTOR_EN;
+typedef enum CRTC_START_LINE_CONTROL_CRTC_PREFETCH_EN {
+	CRTC_START_LINE_CONTROL_CRTC_PREFETCH_EN_FALSE   = 0x0,
+	CRTC_START_LINE_CONTROL_CRTC_PREFETCH_EN_TRUE    = 0x1,
+} CRTC_START_LINE_CONTROL_CRTC_PREFETCH_EN;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_MSK {
+	CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_MSK_FALSE= 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_MSK_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_TYPE {
+	CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_TYPE_FALSE= 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_TYPE_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_TYPE;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_MSK {
+	CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_MSK_FALSE= 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_MSK_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_TYPE {
+	CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_TYPE_FALSE= 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_TYPE_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_TYPE;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_MSK {
+	CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_MSK_FALSE= 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_MSK_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_TYPE {
+	CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_TYPE_FALSE= 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_TYPE_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_TYPE;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK {
+	CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK_FALSE= 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE {
+	CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE_FALSE= 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_MSK {
+	CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_MSK_FALSE  = 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_MSK_TRUE   = 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_TYPE {
+	CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_TYPE_FALSE = 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_TYPE_TRUE  = 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_TYPE;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_MSK {
+	CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_MSK_FALSE  = 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_MSK_TRUE   = 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_TYPE {
+	CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_TYPE_FALSE = 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_TYPE_TRUE  = 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_TYPE;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_MSK {
+	CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_MSK_FALSE= 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_MSK_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_TYPE {
+	CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_TYPE_FALSE= 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_TYPE_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_TYPE;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_MSK {
+	CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_MSK_FALSE= 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_MSK_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_TYPE {
+	CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_TYPE_FALSE= 0x0,
+	CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_TYPE_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_TYPE;
+typedef enum CRTC_UPDATE_LOCK_CRTC_UPDATE_LOCK {
+	CRTC_UPDATE_LOCK_CRTC_UPDATE_LOCK_FALSE          = 0x0,
+	CRTC_UPDATE_LOCK_CRTC_UPDATE_LOCK_TRUE           = 0x1,
+} CRTC_UPDATE_LOCK_CRTC_UPDATE_LOCK;
+typedef enum CRTC_DOUBLE_BUFFER_CONTROL_CRTC_UPDATE_INSTANTLY {
+	CRTC_DOUBLE_BUFFER_CONTROL_CRTC_UPDATE_INSTANTLY_FALSE= 0x0,
+	CRTC_DOUBLE_BUFFER_CONTROL_CRTC_UPDATE_INSTANTLY_TRUE= 0x1,
+} CRTC_DOUBLE_BUFFER_CONTROL_CRTC_UPDATE_INSTANTLY;
+typedef enum CRTC_DOUBLE_BUFFER_CONTROL_CRTC_BLANK_DATA_DOUBLE_BUFFER_EN {
+	CRTC_DOUBLE_BUFFER_CONTROL_CRTC_BLANK_DATA_DOUBLE_BUFFER_EN_FALSE= 0x0,
+	CRTC_DOUBLE_BUFFER_CONTROL_CRTC_BLANK_DATA_DOUBLE_BUFFER_EN_TRUE= 0x1,
+} CRTC_DOUBLE_BUFFER_CONTROL_CRTC_BLANK_DATA_DOUBLE_BUFFER_EN;
+typedef enum CRTC_VGA_PARAMETER_CAPTURE_MODE_CRTC_VGA_PARAMETER_CAPTURE_MODE {
+	CRTC_VGA_PARAMETER_CAPTURE_MODE_CRTC_VGA_PARAMETER_CAPTURE_MODE_FALSE= 0x0,
+	CRTC_VGA_PARAMETER_CAPTURE_MODE_CRTC_VGA_PARAMETER_CAPTURE_MODE_TRUE= 0x1,
+} CRTC_VGA_PARAMETER_CAPTURE_MODE_CRTC_VGA_PARAMETER_CAPTURE_MODE;
+typedef enum CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_EN {
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_EN_FALSE= 0x0,
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_EN_TRUE= 0x1,
+} CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_EN;
+typedef enum CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE {
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_RGB= 0x0,
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_YCBCR601= 0x1,
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_YCBCR709= 0x2,
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_VBARS= 0x3,
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_HBARS= 0x4,
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_SRRGB= 0x5,
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_DRRGB= 0x6,
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_XRBIAS= 0x7,
+} CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE;
+typedef enum CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_DYNAMIC_RANGE {
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_DYNAMIC_RANGE_FALSE= 0x0,
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_DYNAMIC_RANGE_TRUE= 0x1,
+} CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_DYNAMIC_RANGE;
+typedef enum CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_COLOR_FORMAT {
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_COLOR_FORMAT_6BPC= 0x0,
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_COLOR_FORMAT_8BPC= 0x1,
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_COLOR_FORMAT_10BPC= 0x2,
+	CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_COLOR_FORMAT_RESERVED= 0x3,
+} CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_COLOR_FORMAT;
+typedef enum MASTER_UPDATE_LOCK_MASTER_UPDATE_LOCK {
+	MASTER_UPDATE_LOCK_MASTER_UPDATE_LOCK_FALSE      = 0x0,
+	MASTER_UPDATE_LOCK_MASTER_UPDATE_LOCK_TRUE       = 0x1,
+} MASTER_UPDATE_LOCK_MASTER_UPDATE_LOCK;
+typedef enum MASTER_UPDATE_LOCK_GSL_CONTROL_MASTER_UPDATE_LOCK {
+	MASTER_UPDATE_LOCK_GSL_CONTROL_MASTER_UPDATE_LOCK_FALSE= 0x0,
+	MASTER_UPDATE_LOCK_GSL_CONTROL_MASTER_UPDATE_LOCK_TRUE= 0x1,
+} MASTER_UPDATE_LOCK_GSL_CONTROL_MASTER_UPDATE_LOCK;
+typedef enum MASTER_UPDATE_LOCK_UNDERFLOW_UPDATE_LOCK {
+	MASTER_UPDATE_LOCK_UNDERFLOW_UPDATE_LOCK_FALSE   = 0x0,
+	MASTER_UPDATE_LOCK_UNDERFLOW_UPDATE_LOCK_TRUE    = 0x1,
+} MASTER_UPDATE_LOCK_UNDERFLOW_UPDATE_LOCK;
+typedef enum MASTER_UPDATE_MODE_MASTER_UPDATE_MODE {
+	MASTER_UPDATE_MODE_MASTER_UPDATE_MODE_BETWEEN    = 0x0,
+	MASTER_UPDATE_MODE_MASTER_UPDATE_MODE_HSYNCA     = 0x1,
+	MASTER_UPDATE_MODE_MASTER_UPDATE_MODE_VSYNCA     = 0x2,
+	MASTER_UPDATE_MODE_MASTER_UPDATE_MODE_BEFORE     = 0x3,
+} MASTER_UPDATE_MODE_MASTER_UPDATE_MODE;
+typedef enum MASTER_UPDATE_MODE_MASTER_UPDATE_INTERLACED_MODE {
+	MASTER_UPDATE_MODE_MASTER_UPDATE_INTERLACED_MODE_BOTH= 0x0,
+	MASTER_UPDATE_MODE_MASTER_UPDATE_INTERLACED_MODE_EVEN= 0x1,
+	MASTER_UPDATE_MODE_MASTER_UPDATE_INTERLACED_MODE_ODD= 0x2,
+	MASTER_UPDATE_MODE_MASTER_UPDATE_INTERLACED_MODE_RESERVED= 0x3,
+} MASTER_UPDATE_MODE_MASTER_UPDATE_INTERLACED_MODE;
+typedef enum CRTC_MVP_INBAND_CNTL_INSERT_CRTC_MVP_INBAND_OUT_MODE {
+	CRTC_MVP_INBAND_CNTL_INSERT_CRTC_MVP_INBAND_OUT_MODE_DISABLE= 0x0,
+	CRTC_MVP_INBAND_CNTL_INSERT_CRTC_MVP_INBAND_OUT_MODE_DEBUG= 0x1,
+	CRTC_MVP_INBAND_CNTL_INSERT_CRTC_MVP_INBAND_OUT_MODE_NORMAL= 0x2,
+} CRTC_MVP_INBAND_CNTL_INSERT_CRTC_MVP_INBAND_OUT_MODE;
+typedef enum CRTC_MVP_STATUS_CRTC_FLIP_NOW_CLEAR {
+	CRTC_MVP_STATUS_CRTC_FLIP_NOW_CLEAR_FALSE        = 0x0,
+	CRTC_MVP_STATUS_CRTC_FLIP_NOW_CLEAR_TRUE         = 0x1,
+} CRTC_MVP_STATUS_CRTC_FLIP_NOW_CLEAR;
+typedef enum CRTC_MVP_STATUS_CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR {
+	CRTC_MVP_STATUS_CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR_FALSE= 0x0,
+	CRTC_MVP_STATUS_CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR_TRUE= 0x1,
+} CRTC_MVP_STATUS_CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR;
+typedef enum CRTC_V_UPDATE_INT_STATUS_CRTC_V_UPDATE_INT_CLEAR {
+	CRTC_V_UPDATE_INT_STATUS_CRTC_V_UPDATE_INT_CLEAR_FALSE= 0x0,
+	CRTC_V_UPDATE_INT_STATUS_CRTC_V_UPDATE_INT_CLEAR_TRUE= 0x1,
+} CRTC_V_UPDATE_INT_STATUS_CRTC_V_UPDATE_INT_CLEAR;
+typedef enum CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_OUTPUT_POLARITY {
+	CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_FALSE= 0x0,
+	CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_OUTPUT_POLARITY;
+typedef enum CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_ENABLE {
+	CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_FALSE= 0x0,
+	CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_ENABLE;
+typedef enum CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_CLEAR {
+	CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_CLEAR_FALSE= 0x0,
+	CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_CLEAR_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_CLEAR;
+typedef enum CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_TYPE {
+	CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_TYPE_FALSE= 0x0,
+	CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_TYPE_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_TYPE;
+typedef enum CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_CLEAR {
+	CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_CLEAR_CLEAR_FALSE= 0x0,
+	CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_CLEAR_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_CLEAR;
+typedef enum CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_ENABLE {
+	CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_ENABLE_FALSE= 0x0,
+	CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_ENABLE_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_ENABLE;
+typedef enum CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_TYPE {
+	CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_TYPE_FALSE= 0x0,
+	CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_TYPE_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_TYPE;
+typedef enum CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_CLEAR {
+	CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_CLEAR_CLEAR_FALSE= 0x0,
+	CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_CLEAR_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_CLEAR;
+typedef enum CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_ENABLE {
+	CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_ENABLE_FALSE= 0x0,
+	CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_ENABLE_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_ENABLE;
+typedef enum CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_TYPE {
+	CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_TYPE_FALSE= 0x0,
+	CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_TYPE_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_TYPE;
+typedef enum CRTC_CRC_CNTL_CRTC_CRC_EN {
+	CRTC_CRC_CNTL_CRTC_CRC_EN_FALSE                  = 0x0,
+	CRTC_CRC_CNTL_CRTC_CRC_EN_TRUE                   = 0x1,
+} CRTC_CRC_CNTL_CRTC_CRC_EN;
+typedef enum CRTC_CRC_CNTL_CRTC_CRC_CONT_EN {
+	CRTC_CRC_CNTL_CRTC_CRC_CONT_EN_FALSE             = 0x0,
+	CRTC_CRC_CNTL_CRTC_CRC_CONT_EN_TRUE              = 0x1,
+} CRTC_CRC_CNTL_CRTC_CRC_CONT_EN;
+typedef enum CRTC_CRC_CNTL_CRTC_CRC_STEREO_MODE {
+	CRTC_CRC_CNTL_CRTC_CRC_STEREO_MODE_LEFT          = 0x0,
+	CRTC_CRC_CNTL_CRTC_CRC_STEREO_MODE_RIGHT         = 0x1,
+	CRTC_CRC_CNTL_CRTC_CRC_STEREO_MODE_BOTH_EYES     = 0x2,
+	CRTC_CRC_CNTL_CRTC_CRC_STEREO_MODE_BOTH_FIELDS   = 0x3,
+} CRTC_CRC_CNTL_CRTC_CRC_STEREO_MODE;
+typedef enum CRTC_CRC_CNTL_CRTC_CRC_INTERLACE_MODE {
+	CRTC_CRC_CNTL_CRTC_CRC_INTERLACE_MODE_TOP        = 0x0,
+	CRTC_CRC_CNTL_CRTC_CRC_INTERLACE_MODE_BOTTOM     = 0x1,
+	CRTC_CRC_CNTL_CRTC_CRC_INTERLACE_MODE_BOTH_BOTTOM= 0x2,
+	CRTC_CRC_CNTL_CRTC_CRC_INTERLACE_MODE_BOTH_FIELD = 0x3,
+} CRTC_CRC_CNTL_CRTC_CRC_INTERLACE_MODE;
+typedef enum CRTC_CRC_CNTL_CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS {
+	CRTC_CRC_CNTL_CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS_FALSE= 0x0,
+	CRTC_CRC_CNTL_CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS_TRUE= 0x1,
+} CRTC_CRC_CNTL_CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS;
+typedef enum CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT {
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_UAB          = 0x0,
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_UA_B         = 0x1,
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_U_AB         = 0x2,
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_U_A_B        = 0x3,
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_IAB          = 0x4,
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_IA_B         = 0x5,
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_I_AB         = 0x6,
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_I_A_B        = 0x7,
+} CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT;
+typedef enum CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT {
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_UAB          = 0x0,
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_UA_B         = 0x1,
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_U_AB         = 0x2,
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_U_A_B        = 0x3,
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_IAB          = 0x4,
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_IA_B         = 0x5,
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_I_AB         = 0x6,
+	CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_I_A_B        = 0x7,
+} CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_ENABLE {
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_ENABLE_DISABLE= 0x0,
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_ENABLE_ONESHOT= 0x1,
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_ENABLE_CONTINUOUS= 0x2,
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_ENABLE_RESERVED= 0x3,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_ENABLE;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HCOUNT_MODE_ENABLE {
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HCOUNT_MODE_ENABLE_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HCOUNT_MODE_ENABLE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HCOUNT_MODE_ENABLE;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_ENABLE {
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_ENABLE_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_ENABLE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_ENABLE;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW {
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW_1pixel= 0x0,
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW_2pixel= 0x1,
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW_3pixel= 0x2,
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW_4pixel= 0x3,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_ENABLE {
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_ENABLE_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_ENABLE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_ENABLE;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_UPDATE {
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_UPDATE_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_UPDATE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_UPDATE;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_VSYNC_POLARITY {
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_VSYNC_POLARITY_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_VSYNC_POLARITY_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_VSYNC_POLARITY;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HSYNC_POLARITY {
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HSYNC_POLARITY_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HSYNC_POLARITY_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HSYNC_POLARITY;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_INTERLACE_MODE {
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_INTERLACE_MODE_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_INTERLACE_MODE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_INTERLACE_MODE;
+typedef enum CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_ENABLE {
+	CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_ENABLE_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_ENABLE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_ENABLE;
+typedef enum CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_CLEAR {
+	CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_CLEAR_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_CLEAR_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_CLEAR;
+typedef enum CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_TYPE {
+	CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_TYPE_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_TYPE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_TYPE;
+typedef enum CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT {
+	CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_1FRAME= 0x0,
+	CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_2FRAME= 0x1,
+	CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_4FRAME= 0x2,
+	CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_8FRAME= 0x3,
+	CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_16FRAME= 0x4,
+	CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_32FRAME= 0x5,
+	CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_64FRAME= 0x6,
+	CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_128FRAME= 0x7,
+} CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT;
+typedef enum CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_ENABLE {
+	CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_ENABLE_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_ENABLE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_ENABLE;
+typedef enum CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_CLEAR {
+	CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_CLEAR_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_CLEAR_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_CLEAR;
+typedef enum CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_TYPE {
+	CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_TYPE_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_TYPE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_TYPE;
+typedef enum CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_ENABLE {
+	CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_ENABLE_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_ENABLE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_ENABLE;
+typedef enum CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_CLEAR {
+	CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_CLEAR_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_CLEAR_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_CLEAR;
+typedef enum CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_TYPE {
+	CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_TYPE_FALSE= 0x0,
+	CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_TYPE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_TYPE;
+typedef enum CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_ENABLE {
+	CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_ENABLE_FALSE= 0x0,
+	CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_ENABLE_TRUE= 0x1,
+} CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_ENABLE;
+typedef enum CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_CLEAR {
+	CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_CLEAR_FALSE= 0x0,
+	CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_CLEAR_TRUE= 0x1,
+} CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_CLEAR;
+typedef enum CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_TYPE {
+	CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_TYPE_FALSE= 0x0,
+	CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_TYPE_TRUE= 0x1,
+} CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_TYPE;
+typedef enum CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE {
+	CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE_FALSE= 0x0,
+	CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE_TRUE= 0x1,
+} CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE;
+typedef enum CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE_VALUE {
+	CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE_VALUE_OFF= 0x0,
+	CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE_VALUE_ON= 0x1,
+} CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE_VALUE;
+typedef enum CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN {
+	CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN_FALSE= 0x0,
+	CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN_TRUE= 0x1,
+} CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN;
+typedef enum CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN_DB {
+	CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN_DB_FALSE= 0x0,
+	CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN_DB_TRUE= 0x1,
+} CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN_DB;
+typedef enum CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_V_UPDATE_MODE {
+	CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_V_UPDATE_MODE_BLOCK_BOTH= 0x0,
+	CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_V_UPDATE_MODE_BLOCK_INTERLACE= 0x1,
+	CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_V_UPDATE_MODE_BLOCK_PROGRASSIVE= 0x2,
+	CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_V_UPDATE_MODE_RESERVED= 0x3,
+} CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_V_UPDATE_MODE;
+typedef enum CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_STEREO_SEL_OVR {
+	CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_STEREO_SEL_OVR_FALSE= 0x0,
+	CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_STEREO_SEL_OVR_TRUE= 0x1,
+} CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_STEREO_SEL_OVR;
+typedef enum CRTC_V_SYNC_A_POL {
+	CRTC_V_SYNC_A_POL_HIGH                           = 0x0,
+	CRTC_V_SYNC_A_POL_LOW                            = 0x1,
+} CRTC_V_SYNC_A_POL;
+typedef enum CRTC_H_SYNC_A_POL {
+	CRTC_H_SYNC_A_POL_HIGH                           = 0x0,
+	CRTC_H_SYNC_A_POL_LOW                            = 0x1,
+} CRTC_H_SYNC_A_POL;
+typedef enum CRTC_HORZ_REPETITION_COUNT {
+	CRTC_HORZ_REPETITION_COUNT_0                     = 0x0,
+	CRTC_HORZ_REPETITION_COUNT_1                     = 0x1,
+	CRTC_HORZ_REPETITION_COUNT_2                     = 0x2,
+	CRTC_HORZ_REPETITION_COUNT_3                     = 0x3,
+	CRTC_HORZ_REPETITION_COUNT_4                     = 0x4,
+	CRTC_HORZ_REPETITION_COUNT_5                     = 0x5,
+	CRTC_HORZ_REPETITION_COUNT_6                     = 0x6,
+	CRTC_HORZ_REPETITION_COUNT_7                     = 0x7,
+	CRTC_HORZ_REPETITION_COUNT_8                     = 0x8,
+	CRTC_HORZ_REPETITION_COUNT_9                     = 0x9,
+	CRTC_HORZ_REPETITION_COUNT_10                    = 0xa,
+	CRTC_HORZ_REPETITION_COUNT_11                    = 0xb,
+	CRTC_HORZ_REPETITION_COUNT_12                    = 0xc,
+	CRTC_HORZ_REPETITION_COUNT_13                    = 0xd,
+	CRTC_HORZ_REPETITION_COUNT_14                    = 0xe,
+	CRTC_HORZ_REPETITION_COUNT_15                    = 0xf,
+} CRTC_HORZ_REPETITION_COUNT;
+typedef enum PERFCOUNTER_CVALUE_SEL {
+	PERFCOUNTER_CVALUE_SEL_47_0                      = 0x0,
+	PERFCOUNTER_CVALUE_SEL_15_0                      = 0x1,
+	PERFCOUNTER_CVALUE_SEL_31_16                     = 0x2,
+	PERFCOUNTER_CVALUE_SEL_47_32                     = 0x3,
+	PERFCOUNTER_CVALUE_SEL_11_0                      = 0x4,
+	PERFCOUNTER_CVALUE_SEL_23_12                     = 0x5,
+	PERFCOUNTER_CVALUE_SEL_35_24                     = 0x6,
+	PERFCOUNTER_CVALUE_SEL_47_36                     = 0x7,
+} PERFCOUNTER_CVALUE_SEL;
+typedef enum PERFCOUNTER_INC_MODE {
+	PERFCOUNTER_INC_MODE_MULTI_BIT                   = 0x0,
+	PERFCOUNTER_INC_MODE_BOTH_EDGE                   = 0x1,
+	PERFCOUNTER_INC_MODE_LSB                         = 0x2,
+	PERFCOUNTER_INC_MODE_POS_EDGE                    = 0x3,
+} PERFCOUNTER_INC_MODE;
+typedef enum PERFCOUNTER_HW_CNTL_SEL {
+	PERFCOUNTER_HW_CNTL_SEL_RUNEN                    = 0x0,
+	PERFCOUNTER_HW_CNTL_SEL_CNTOFF                   = 0x1,
+} PERFCOUNTER_HW_CNTL_SEL;
+typedef enum PERFCOUNTER_RUNEN_MODE {
+	PERFCOUNTER_RUNEN_MODE_LEVEL                     = 0x0,
+	PERFCOUNTER_RUNEN_MODE_EDGE                      = 0x1,
+} PERFCOUNTER_RUNEN_MODE;
+typedef enum PERFCOUNTER_CNTOFF_START_DIS {
+	PERFCOUNTER_CNTOFF_START_ENABLE                  = 0x0,
+	PERFCOUNTER_CNTOFF_START_DISABLE                 = 0x1,
+} PERFCOUNTER_CNTOFF_START_DIS;
+typedef enum PERFCOUNTER_RESTART_EN {
+	PERFCOUNTER_RESTART_DISABLE                      = 0x0,
+	PERFCOUNTER_RESTART_ENABLE                       = 0x1,
+} PERFCOUNTER_RESTART_EN;
+typedef enum PERFCOUNTER_INT_EN {
+	PERFCOUNTER_INT_DISABLE                          = 0x0,
+	PERFCOUNTER_INT_ENABLE                           = 0x1,
+} PERFCOUNTER_INT_EN;
+typedef enum PERFCOUNTER_OFF_MASK {
+	PERFCOUNTER_OFF_MASK_DISABLE                     = 0x0,
+	PERFCOUNTER_OFF_MASK_ENABLE                      = 0x1,
+} PERFCOUNTER_OFF_MASK;
+typedef enum PERFCOUNTER_ACTIVE {
+	PERFCOUNTER_IS_IDLE                              = 0x0,
+	PERFCOUNTER_IS_ACTIVE                            = 0x1,
+} PERFCOUNTER_ACTIVE;
+typedef enum PERFCOUNTER_INT_TYPE {
+	PERFCOUNTER_INT_TYPE_LEVEL                       = 0x0,
+	PERFCOUNTER_INT_TYPE_PULSE                       = 0x1,
+} PERFCOUNTER_INT_TYPE;
+typedef enum PERFCOUNTER_COUNTED_VALUE_TYPE {
+	PERFCOUNTER_COUNTED_VALUE_TYPE_ACC               = 0x0,
+	PERFCOUNTER_COUNTED_VALUE_TYPE_MAX               = 0x1,
+} PERFCOUNTER_COUNTED_VALUE_TYPE;
+typedef enum PERFCOUNTER_CNTL_SEL {
+	PERFCOUNTER_CNTL_SEL_0                           = 0x0,
+	PERFCOUNTER_CNTL_SEL_1                           = 0x1,
+	PERFCOUNTER_CNTL_SEL_2                           = 0x2,
+	PERFCOUNTER_CNTL_SEL_3                           = 0x3,
+	PERFCOUNTER_CNTL_SEL_4                           = 0x4,
+	PERFCOUNTER_CNTL_SEL_5                           = 0x5,
+	PERFCOUNTER_CNTL_SEL_6                           = 0x6,
+	PERFCOUNTER_CNTL_SEL_7                           = 0x7,
+} PERFCOUNTER_CNTL_SEL;
+typedef enum PERFCOUNTER_CNT0_STATE {
+	PERFCOUNTER_CNT0_STATE_RESET                     = 0x0,
+	PERFCOUNTER_CNT0_STATE_START                     = 0x1,
+	PERFCOUNTER_CNT0_STATE_FREEZE                    = 0x2,
+	PERFCOUNTER_CNT0_STATE_HW                        = 0x3,
+} PERFCOUNTER_CNT0_STATE;
+typedef enum PERFCOUNTER_STATE_SEL0 {
+	PERFCOUNTER_STATE_SEL0_GLOBAL                    = 0x0,
+	PERFCOUNTER_STATE_SEL0_LOCAL                     = 0x1,
+} PERFCOUNTER_STATE_SEL0;
+typedef enum PERFCOUNTER_CNT1_STATE {
+	PERFCOUNTER_CNT1_STATE_RESET                     = 0x0,
+	PERFCOUNTER_CNT1_STATE_START                     = 0x1,
+	PERFCOUNTER_CNT1_STATE_FREEZE                    = 0x2,
+	PERFCOUNTER_CNT1_STATE_HW                        = 0x3,
+} PERFCOUNTER_CNT1_STATE;
+typedef enum PERFCOUNTER_STATE_SEL1 {
+	PERFCOUNTER_STATE_SEL1_GLOBAL                    = 0x0,
+	PERFCOUNTER_STATE_SEL1_LOCAL                     = 0x1,
+} PERFCOUNTER_STATE_SEL1;
+typedef enum PERFCOUNTER_CNT2_STATE {
+	PERFCOUNTER_CNT2_STATE_RESET                     = 0x0,
+	PERFCOUNTER_CNT2_STATE_START                     = 0x1,
+	PERFCOUNTER_CNT2_STATE_FREEZE                    = 0x2,
+	PERFCOUNTER_CNT2_STATE_HW                        = 0x3,
+} PERFCOUNTER_CNT2_STATE;
+typedef enum PERFCOUNTER_STATE_SEL2 {
+	PERFCOUNTER_STATE_SEL2_GLOBAL                    = 0x0,
+	PERFCOUNTER_STATE_SEL2_LOCAL                     = 0x1,
+} PERFCOUNTER_STATE_SEL2;
+typedef enum PERFCOUNTER_CNT3_STATE {
+	PERFCOUNTER_CNT3_STATE_RESET                     = 0x0,
+	PERFCOUNTER_CNT3_STATE_START                     = 0x1,
+	PERFCOUNTER_CNT3_STATE_FREEZE                    = 0x2,
+	PERFCOUNTER_CNT3_STATE_HW                        = 0x3,
+} PERFCOUNTER_CNT3_STATE;
+typedef enum PERFCOUNTER_STATE_SEL3 {
+	PERFCOUNTER_STATE_SEL3_GLOBAL                    = 0x0,
+	PERFCOUNTER_STATE_SEL3_LOCAL                     = 0x1,
+} PERFCOUNTER_STATE_SEL3;
+typedef enum PERFCOUNTER_CNT4_STATE {
+	PERFCOUNTER_CNT4_STATE_RESET                     = 0x0,
+	PERFCOUNTER_CNT4_STATE_START                     = 0x1,
+	PERFCOUNTER_CNT4_STATE_FREEZE                    = 0x2,
+	PERFCOUNTER_CNT4_STATE_HW                        = 0x3,
+} PERFCOUNTER_CNT4_STATE;
+typedef enum PERFCOUNTER_STATE_SEL4 {
+	PERFCOUNTER_STATE_SEL4_GLOBAL                    = 0x0,
+	PERFCOUNTER_STATE_SEL4_LOCAL                     = 0x1,
+} PERFCOUNTER_STATE_SEL4;
+typedef enum PERFCOUNTER_CNT5_STATE {
+	PERFCOUNTER_CNT5_STATE_RESET                     = 0x0,
+	PERFCOUNTER_CNT5_STATE_START                     = 0x1,
+	PERFCOUNTER_CNT5_STATE_FREEZE                    = 0x2,
+	PERFCOUNTER_CNT5_STATE_HW                        = 0x3,
+} PERFCOUNTER_CNT5_STATE;
+typedef enum PERFCOUNTER_STATE_SEL5 {
+	PERFCOUNTER_STATE_SEL5_GLOBAL                    = 0x0,
+	PERFCOUNTER_STATE_SEL5_LOCAL                     = 0x1,
+} PERFCOUNTER_STATE_SEL5;
+typedef enum PERFCOUNTER_CNT6_STATE {
+	PERFCOUNTER_CNT6_STATE_RESET                     = 0x0,
+	PERFCOUNTER_CNT6_STATE_START                     = 0x1,
+	PERFCOUNTER_CNT6_STATE_FREEZE                    = 0x2,
+	PERFCOUNTER_CNT6_STATE_HW                        = 0x3,
+} PERFCOUNTER_CNT6_STATE;
+typedef enum PERFCOUNTER_STATE_SEL6 {
+	PERFCOUNTER_STATE_SEL6_GLOBAL                    = 0x0,
+	PERFCOUNTER_STATE_SEL6_LOCAL                     = 0x1,
+} PERFCOUNTER_STATE_SEL6;
+typedef enum PERFCOUNTER_CNT7_STATE {
+	PERFCOUNTER_CNT7_STATE_RESET                     = 0x0,
+	PERFCOUNTER_CNT7_STATE_START                     = 0x1,
+	PERFCOUNTER_CNT7_STATE_FREEZE                    = 0x2,
+	PERFCOUNTER_CNT7_STATE_HW                        = 0x3,
+} PERFCOUNTER_CNT7_STATE;
+typedef enum PERFCOUNTER_STATE_SEL7 {
+	PERFCOUNTER_STATE_SEL7_GLOBAL                    = 0x0,
+	PERFCOUNTER_STATE_SEL7_LOCAL                     = 0x1,
+} PERFCOUNTER_STATE_SEL7;
+typedef enum PERFMON_STATE {
+	PERFMON_STATE_RESET                              = 0x0,
+	PERFMON_STATE_START                              = 0x1,
+	PERFMON_STATE_FREEZE                             = 0x2,
+	PERFMON_STATE_HW                                 = 0x3,
+} PERFMON_STATE;
+typedef enum PERFMON_CNTOFF_AND_OR {
+	PERFMON_CNTOFF_OR                                = 0x0,
+	PERFMON_CNTOFF_AND                               = 0x1,
+} PERFMON_CNTOFF_AND_OR;
+typedef enum PERFMON_CNTOFF_INT_EN {
+	PERFMON_CNTOFF_INT_DISABLE                       = 0x0,
+	PERFMON_CNTOFF_INT_ENABLE                        = 0x1,
+} PERFMON_CNTOFF_INT_EN;
+typedef enum PERFMON_CNTOFF_INT_TYPE {
+	PERFMON_CNTOFF_INT_TYPE_LEVEL                    = 0x0,
+	PERFMON_CNTOFF_INT_TYPE_PULSE                    = 0x1,
+} PERFMON_CNTOFF_INT_TYPE;
+typedef enum ENABLE {
+	DISABLE_THE_FEATURE                              = 0x0,
+	ENABLE_THE_FEATURE                               = 0x1,
+} ENABLE;
+typedef enum ENABLE_CLOCK {
+	DISABLE_THE_CLOCK                                = 0x0,
+	ENABLE_THE_CLOCK                                 = 0x1,
+} ENABLE_CLOCK;
+typedef enum FORCE_VBI {
+	FORCE_VBI_LOW                                    = 0x0,
+	FORCE_VBI_HIGH                                   = 0x1,
+} FORCE_VBI;
+typedef enum OVERRIDE_CGTT_SCLK {
+	OVERRIDE_CGTT_SCLK_NOOP                          = 0x0,
+	SET_OVERRIDE_CGTT_SCLK                           = 0x1,
+} OVERRIDE_CGTT_SCLK;
+typedef enum CLEAR_SMU_INTR {
+	SMU_INTR_STATUS_NOOP                             = 0x0,
+	SMU_INTR_STATUS_CLEAR                            = 0x1,
+} CLEAR_SMU_INTR;
+typedef enum STATIC_SCREEN_SMU_INTR {
+	STATIC_SCREEN_SMU_INTR_NOOP                      = 0x0,
+	SET_STATIC_SCREEN_SMU_INTR                       = 0x1,
+} STATIC_SCREEN_SMU_INTR;
+typedef enum JITTER_REMOVE_DISABLE {
+	ENABLE_JITTER_REMOVAL                            = 0x0,
+	DISABLE_JITTER_REMOVAL                           = 0x1,
+} JITTER_REMOVE_DISABLE;
+typedef enum DISABLE_CLOCK_GATING {
+	CLOCK_GATING_ENABLED                             = 0x0,
+	CLOCK_GATING_DISABLED                            = 0x1,
+} DISABLE_CLOCK_GATING;
+typedef enum DISABLE_CLOCK_GATING_IN_DCO {
+	CLOCK_GATING_ENABLED_IN_DCO                      = 0x0,
+	CLOCK_GATING_DISABLED_IN_DCO                     = 0x1,
+} DISABLE_CLOCK_GATING_IN_DCO;
+typedef enum DCCG_DEEP_COLOR_CNTL {
+	DCCG_DEEP_COLOR_DTO_DISABLE                      = 0x0,
+	DCCG_DEEP_COLOR_DTO_5_4_RATIO                    = 0x1,
+	DCCG_DEEP_COLOR_DTO_3_2_RATIO                    = 0x2,
+	DCCG_DEEP_COLOR_DTO_2_1_RATIO                    = 0x3,
+} DCCG_DEEP_COLOR_CNTL;
+typedef enum REFCLK_CLOCK_EN {
+	REFCLK_CLOCK_EN_PCIE_REFCLK                      = 0x0,
+	REFCLK_CLOCK_EN_ALLOW_SRC                        = 0x1,
+} REFCLK_CLOCK_EN;
+typedef enum REFCLK_SRC_SEL {
+	REFCLK_SRC_SEL_XTALIN                            = 0x0,
+	REFCLK_SRC_SEL_DISPPLL                           = 0x1,
+} REFCLK_SRC_SEL;
+typedef enum DPREFCLK_SRC_SEL {
+	DPREFCLK_SRC_SEL_CK                              = 0x0,
+	DPREFCLK_SRC_SEL_P0PLL                           = 0x1,
+	DPREFCLK_SRC_SEL_P1PLL                           = 0x2,
+	DPREFCLK_SRC_SEL_P2PLL                           = 0x3,
+	DPREFCLK_SRC_SEL_P3PLL                           = 0x4,
+} DPREFCLK_SRC_SEL;
+typedef enum XTAL_REF_SEL {
+	XTAL_REF_SEL_1X                                  = 0x0,
+	XTAL_REF_SEL_2X                                  = 0x1,
+} XTAL_REF_SEL;
+typedef enum XTAL_REF_CLOCK_SOURCE_SEL {
+	XTAL_REF_CLOCK_SOURCE_SEL_XTALIN                 = 0x0,
+	XTAL_REF_CLOCK_SOURCE_SEL_PPLL                   = 0x1,
+} XTAL_REF_CLOCK_SOURCE_SEL;
+typedef enum MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL {
+	MICROSECOND_TIME_BASE_CLOCK_IS_XTALIN            = 0x0,
+	MICROSECOND_TIME_BASE_CLOCK_IS_PPLL_REFCLK       = 0x1,
+} MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL;
+typedef enum ALLOW_SR_ON_TRANS_REQ {
+	ALLOW_SR_ON_TRANS_REQ_ENABLE                     = 0x0,
+	ALLOW_SR_ON_TRANS_REQ_DISABLE                    = 0x1,
+} ALLOW_SR_ON_TRANS_REQ;
+typedef enum MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL {
+	MILLISECOND_TIME_BASE_CLOCK_IS_XTALIN            = 0x0,
+	MILLISECOND_TIME_BASE_CLOCK_IS_PPLL_REFCLK       = 0x1,
+} MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL;
+typedef enum PIPE_PIXEL_RATE_SOURCE {
+	PIPE_PIXEL_RATE_SOURCE_P0PLL                     = 0x0,
+	PIPE_PIXEL_RATE_SOURCE_P1PLL                     = 0x1,
+	PIPE_PIXEL_RATE_SOURCE_P2PLL                     = 0x2,
+} PIPE_PIXEL_RATE_SOURCE;
+typedef enum PIPE_PHYPLL_PIXEL_RATE_SOURCE {
+	PIPE_PHYPLL_PIXEL_RATE_SOURCE_UNIPHYA            = 0x0,
+	PIPE_PHYPLL_PIXEL_RATE_SOURCE_UNIPHYB            = 0x1,
+	PIPE_PHYPLL_PIXEL_RATE_SOURCE_UNIPHYC            = 0x2,
+	PIPE_PHYPLL_PIXEL_RATE_SOURCE_UNIPHYD            = 0x3,
+	PIPE_PHYPLL_PIXEL_RATE_SOURCE_UNIPHYE            = 0x4,
+	PIPE_PHYPLL_PIXEL_RATE_SOURCE_UNIPHYF            = 0x5,
+	PIPE_PHYPLL_PIXEL_RATE_SOURCE_UNIPHYG            = 0x6,
+} PIPE_PHYPLL_PIXEL_RATE_SOURCE;
+typedef enum PIPE_PIXEL_RATE_PLL_SOURCE {
+	PIPE_PIXEL_RATE_PLL_SOURCE_PHYPLL                = 0x0,
+	PIPE_PIXEL_RATE_PLL_SOURCE_DISPPLL               = 0x1,
+} PIPE_PIXEL_RATE_PLL_SOURCE;
+typedef enum DP_DTO_DS_DISABLE {
+	DP_DTO_DESPREAD_DISABLE                          = 0x0,
+	DP_DTO_DESPREAD_ENABLE                           = 0x1,
+} DP_DTO_DS_DISABLE;
+typedef enum CRTC_ADD_PIXEL {
+	CRTC_ADD_PIXEL_NOOP                              = 0x0,
+	CRTC_ADD_PIXEL_FORCE                             = 0x1,
+} CRTC_ADD_PIXEL;
+typedef enum CRTC_DROP_PIXEL {
+	CRTC_DROP_PIXEL_NOOP                             = 0x0,
+	CRTC_DROP_PIXEL_FORCE                            = 0x1,
+} CRTC_DROP_PIXEL;
+typedef enum SYMCLK_FE_FORCE_EN {
+	SYMCLK_FE_FORCE_EN_DISABLE                       = 0x0,
+	SYMCLK_FE_FORCE_EN_ENABLE                        = 0x1,
+} SYMCLK_FE_FORCE_EN;
+typedef enum SYMCLK_FE_FORCE_SRC {
+	SYMCLK_FE_FORCE_SRC_UNIPHYA                      = 0x0,
+	SYMCLK_FE_FORCE_SRC_UNIPHYB                      = 0x1,
+	SYMCLK_FE_FORCE_SRC_UNIPHYC                      = 0x2,
+	SYMCLK_FE_FORCE_SRC_UNIPHYD                      = 0x3,
+	SYMCLK_FE_FORCE_SRC_UNIPHYE                      = 0x4,
+	SYMCLK_FE_FORCE_SRC_UNIPHYF                      = 0x5,
+	SYMCLK_FE_FORCE_SRC_UNIPHYG                      = 0x6,
+} SYMCLK_FE_FORCE_SRC;
+typedef enum DPDBG_CLK_FORCE_EN {
+	DPDBG_CLK_FORCE_EN_DISABLE                       = 0x0,
+	DPDBG_CLK_FORCE_EN_ENABLE                        = 0x1,
+} DPDBG_CLK_FORCE_EN;
+typedef enum DVOACLK_COARSE_SKEW_CNTL {
+	DVOACLK_COARSE_SKEW_CNTL_NO_ADJUSTMENT           = 0x0,
+	DVOACLK_COARSE_SKEW_CNTL_DELAY_1_STEP            = 0x1,
+	DVOACLK_COARSE_SKEW_CNTL_DELAY_2_STEPS           = 0x2,
+	DVOACLK_COARSE_SKEW_CNTL_DELAY_3_STEPS           = 0x3,
+	DVOACLK_COARSE_SKEW_CNTL_DELAY_4_STEPS           = 0x4,
+	DVOACLK_COARSE_SKEW_CNTL_DELAY_5_STEPS           = 0x5,
+	DVOACLK_COARSE_SKEW_CNTL_DELAY_6_STEPS           = 0x6,
+	DVOACLK_COARSE_SKEW_CNTL_DELAY_7_STEPS           = 0x7,
+	DVOACLK_COARSE_SKEW_CNTL_DELAY_8_STEPS           = 0x8,
+	DVOACLK_COARSE_SKEW_CNTL_DELAY_9_STEPS           = 0x9,
+	DVOACLK_COARSE_SKEW_CNTL_DELAY_10_STEPS          = 0xa,
+	DVOACLK_COARSE_SKEW_CNTL_DELAY_11_STEPS          = 0xb,
+	DVOACLK_COARSE_SKEW_CNTL_DELAY_12_STEPS          = 0xc,
+	DVOACLK_COARSE_SKEW_CNTL_DELAY_13_STEPS          = 0xd,
+	DVOACLK_COARSE_SKEW_CNTL_DELAY_14_STEPS          = 0xe,
+	DVOACLK_COARSE_SKEW_CNTL_DELAY_15_STEPS          = 0xf,
+	DVOACLK_COARSE_SKEW_CNTL_EARLY_1_STEP            = 0x10,
+	DVOACLK_COARSE_SKEW_CNTL_EARLY_2_STEPS           = 0x11,
+	DVOACLK_COARSE_SKEW_CNTL_EARLY_3_STEPS           = 0x12,
+	DVOACLK_COARSE_SKEW_CNTL_EARLY_4_STEPS           = 0x13,
+	DVOACLK_COARSE_SKEW_CNTL_EARLY_5_STEPS           = 0x14,
+	DVOACLK_COARSE_SKEW_CNTL_EARLY_6_STEPS           = 0x15,
+	DVOACLK_COARSE_SKEW_CNTL_EARLY_7_STEPS           = 0x16,
+	DVOACLK_COARSE_SKEW_CNTL_EARLY_8_STEPS           = 0x17,
+	DVOACLK_COARSE_SKEW_CNTL_EARLY_9_STEPS           = 0x18,
+	DVOACLK_COARSE_SKEW_CNTL_EARLY_10_STEPS          = 0x19,
+	DVOACLK_COARSE_SKEW_CNTL_EARLY_11_STEPS          = 0x1a,
+	DVOACLK_COARSE_SKEW_CNTL_EARLY_12_STEPS          = 0x1b,
+	DVOACLK_COARSE_SKEW_CNTL_EARLY_13_STEPS          = 0x1c,
+	DVOACLK_COARSE_SKEW_CNTL_EARLY_14_STEPS          = 0x1d,
+	DVOACLK_COARSE_SKEW_CNTL_EARLY_15_STEPS          = 0x1e,
+} DVOACLK_COARSE_SKEW_CNTL;
+typedef enum DVOACLK_FINE_SKEW_CNTL {
+	DVOACLK_FINE_SKEW_CNTL_NO_ADJUSTMENT             = 0x0,
+	DVOACLK_FINE_SKEW_CNTL_DELAY_1_STEP              = 0x1,
+	DVOACLK_FINE_SKEW_CNTL_DELAY_2_STEPS             = 0x2,
+	DVOACLK_FINE_SKEW_CNTL_DELAY_3_STEPS             = 0x3,
+	DVOACLK_FINE_SKEW_CNTL_EARLY_1_STEP              = 0x4,
+	DVOACLK_FINE_SKEW_CNTL_EARLY_2_STEPS             = 0x5,
+	DVOACLK_FINE_SKEW_CNTL_EARLY_3_STEPS             = 0x6,
+	DVOACLK_FINE_SKEW_CNTL_EARLY_4_STEPS             = 0x7,
+} DVOACLK_FINE_SKEW_CNTL;
+typedef enum DVOACLKD_IN_PHASE {
+	DVOACLKD_IN_OPPOSITE_PHASE_WITH_PCLK_DVO         = 0x0,
+	DVOACLKD_IN_PHASE_WITH_PCLK_DVO                  = 0x1,
+} DVOACLKD_IN_PHASE;
+typedef enum DVOACLKC_IN_PHASE {
+	DVOACLKC_IN_OPPOSITE_PHASE_WITH_PCLK_DVO         = 0x0,
+	DVOACLKC_IN_PHASE_WITH_PCLK_DVO                  = 0x1,
+} DVOACLKC_IN_PHASE;
+typedef enum DVOACLKC_MVP_IN_PHASE {
+	DVOACLKC_MVP_IN_OPPOSITE_PHASE_WITH_PCLK_DVO     = 0x0,
+	DVOACLKC_MVP_IN_PHASE_WITH_PCLK_DVO              = 0x1,
+} DVOACLKC_MVP_IN_PHASE;
+typedef enum DVOACLKC_MVP_SKEW_PHASE_OVERRIDE {
+	DVOACLKC_MVP_SKEW_PHASE_OVERRIDE_DISABLE         = 0x0,
+	DVOACLKC_MVP_SKEW_PHASE_OVERRIDE_ENABLE          = 0x1,
+} DVOACLKC_MVP_SKEW_PHASE_OVERRIDE;
+typedef enum MVP_CLK_SRC_SEL {
+	MVP_CLK_SRC_SEL_RSRV                             = 0x0,
+	MVP_CLK_SRC_SEL_IO_1                             = 0x1,
+	MVP_CLK_SRC_SEL_IO_2                             = 0x2,
+	MVP_CLK_SRC_SEL_REFCLK                           = 0x3,
+} MVP_CLK_SRC_SEL;
+typedef enum DCCG_AUDIO_DTO0_SOURCE_SEL {
+	DCCG_AUDIO_DTO0_SOURCE_SEL_CRTC0                 = 0x0,
+	DCCG_AUDIO_DTO0_SOURCE_SEL_CRTC1                 = 0x1,
+	DCCG_AUDIO_DTO0_SOURCE_SEL_CRTC2                 = 0x2,
+	DCCG_AUDIO_DTO0_SOURCE_SEL_CRTC3                 = 0x3,
+	DCCG_AUDIO_DTO0_SOURCE_SEL_CRTC4                 = 0x4,
+	DCCG_AUDIO_DTO0_SOURCE_SEL_CRTC5                 = 0x5,
+	DCCG_AUDIO_DTO0_SOURCE_SEL_RESERVED              = 0x6,
+} DCCG_AUDIO_DTO0_SOURCE_SEL;
+typedef enum DCCG_AUDIO_DTO_SEL {
+	DCCG_AUDIO_DTO_SEL_AUDIO_DTO0                    = 0x0,
+	DCCG_AUDIO_DTO_SEL_AUDIO_DTO1                    = 0x1,
+	DCCG_AUDIO_DTO_SEL_NO_AUDIO_DTO                  = 0x2,
+} DCCG_AUDIO_DTO_SEL;
+typedef enum DCCG_AUDIO_DTO2_SOURCE_SEL {
+	DCCG_AUDIO_DTO2_SOURCE_SEL_AMCLK0                = 0x0,
+	DCCG_AUDIO_DTO2_SOURCE_SEL_AMCLK1                = 0x1,
+} DCCG_AUDIO_DTO2_SOURCE_SEL;
+typedef enum DCCG_AUDIO_DTO_USE_512FBR_DTO {
+	DCCG_AUDIO_DTO_USE_128FBR_FOR_DP                 = 0x0,
+	DCCG_AUDIO_DTO_USE_512FBR_FOR_DP                 = 0x1,
+} DCCG_AUDIO_DTO_USE_512FBR_DTO;
+typedef enum DCCG_DBG_EN {
+	DCCG_DBG_EN_DISABLE                              = 0x0,
+	DCCG_DBG_EN_ENABLE                               = 0x1,
+} DCCG_DBG_EN;
+typedef enum DCCG_DBG_BLOCK_SEL {
+	DCCG_DBG_BLOCK_SEL_DCCG                          = 0x0,
+	DCCG_DBG_BLOCK_SEL_PMON                          = 0x1,
+	DCCG_DBG_BLOCK_SEL_PMON2                         = 0x2,
+} DCCG_DBG_BLOCK_SEL;
+typedef enum DCCG_DBG_CLOCK_SEL {
+	DCCG_DBG_CLOCK_SEL_DISPCLK                       = 0x0,
+	DCCG_DBG_CLOCK_SEL_SCLK                          = 0x1,
+	DCCG_DBG_CLOCK_SEL_MVPCLK                        = 0x2,
+	DCCG_DBG_CLOCK_SEL_DVOCLK                        = 0x3,
+	DCCG_DBG_CLOCK_SEL_DACCLK                        = 0x4,
+	DCCG_DBG_CLOCK_SEL_REFCLK                        = 0x5,
+	DCCG_DBG_CLOCK_SEL_SYMCLKA                       = 0x6,
+	DCCG_DBG_CLOCK_SEL_SYMCLKB                       = 0x7,
+	DCCG_DBG_CLOCK_SEL_SYMCLKC                       = 0x8,
+	DCCG_DBG_CLOCK_SEL_SYMCLKD                       = 0x9,
+	DCCG_DBG_CLOCK_SEL_SYMCLKE                       = 0xa,
+	DCCG_DBG_CLOCK_SEL_SYMCLKG                       = 0xb,
+	DCCG_DBG_CLOCK_SEL_SYMCLKF                       = 0xc,
+	DCCG_DBG_CLOCK_SEL_RSRV                          = 0xd,
+	DCCG_DBG_CLOCK_SEL_AOMCLK0                       = 0xe,
+	DCCG_DBG_CLOCK_SEL_AOMCLK1                       = 0xf,
+	DCCG_DBG_CLOCK_SEL_AOMCLK2                       = 0x10,
+	DCCG_DBG_CLOCK_SEL_DPREFCLK                      = 0x11,
+	DCCG_DBG_CLOCK_SEL_UNB_DB_CLK                    = 0x12,
+	DCCG_DBG_CLOCK_SEL_DSICLK                        = 0x13,
+	DCCG_DBG_CLOCK_SEL_BYTECLK                       = 0x14,
+	DCCG_DBG_CLOCK_SEL_ESCCLK                        = 0x15,
+	DCCG_DBG_CLOCK_SEL_SYMCLKLPA                     = 0x16,
+	DCCG_DBG_CLOCK_SEL_SYMCLKLPB                     = 0x17,
+} DCCG_DBG_CLOCK_SEL;
+typedef enum DCCG_DBG_OUT_BLOCK_SEL {
+	DCCG_DBG_OUT_BLOCK_SEL_DCCG                      = 0x0,
+	DCCG_DBG_OUT_BLOCK_SEL_DCO                       = 0x1,
+	DCCG_DBG_OUT_BLOCK_SEL_DCIO                      = 0x2,
+	DCCG_DBG_OUT_BLOCK_SEL_DSI                       = 0x3,
+} DCCG_DBG_OUT_BLOCK_SEL;
+typedef enum DISPCLK_FREQ_RAMP_DONE {
+	DISPCLK_FREQ_RAMP_IN_PROGRESS                    = 0x0,
+	DISPCLK_FREQ_RAMP_COMPLETED                      = 0x1,
+} DISPCLK_FREQ_RAMP_DONE;
+typedef enum DCCG_FIFO_ERRDET_RESET {
+	DCCG_FIFO_ERRDET_RESET_NOOP                      = 0x0,
+	DCCG_FIFO_ERRDET_RESET_FORCE                     = 0x1,
+} DCCG_FIFO_ERRDET_RESET;
+typedef enum DCCG_FIFO_ERRDET_STATE {
+	DCCG_FIFO_ERRDET_STATE_DETECTION                 = 0x0,
+	DCCG_FIFO_ERRDET_STATE_CALIBRATION               = 0x1,
+} DCCG_FIFO_ERRDET_STATE;
+typedef enum DCCG_FIFO_ERRDET_OVR_EN {
+	DCCG_FIFO_ERRDET_OVR_DISABLE                     = 0x0,
+	DCCG_FIFO_ERRDET_OVR_ENABLE                      = 0x1,
+} DCCG_FIFO_ERRDET_OVR_EN;
+typedef enum DISPCLK_CHG_FWD_CORR_DISABLE {
+	DISPCLK_CHG_FWD_CORR_ENABLE_AT_BEGINNING         = 0x0,
+	DISPCLK_CHG_FWD_CORR_DISABLE_AT_BEGINNING        = 0x1,
+} DISPCLK_CHG_FWD_CORR_DISABLE;
+typedef enum DC_MEM_GLOBAL_PWR_REQ_DIS {
+	DC_MEM_GLOBAL_PWR_REQ_ENABLE                     = 0x0,
+	DC_MEM_GLOBAL_PWR_REQ_DISABLE                    = 0x1,
+} DC_MEM_GLOBAL_PWR_REQ_DIS;
+typedef enum DCCG_PERF_RUN {
+	DCCG_PERF_RUN_NOOP                               = 0x0,
+	DCCG_PERF_RUN_START                              = 0x1,
+} DCCG_PERF_RUN;
+typedef enum DCCG_PERF_MODE_VSYNC {
+	DCCG_PERF_MODE_VSYNC_NOOP                        = 0x0,
+	DCCG_PERF_MODE_VSYNC_START                       = 0x1,
+} DCCG_PERF_MODE_VSYNC;
+typedef enum DCCG_PERF_MODE_HSYNC {
+	DCCG_PERF_MODE_HSYNC_NOOP                        = 0x0,
+	DCCG_PERF_MODE_HSYNC_START                       = 0x1,
+} DCCG_PERF_MODE_HSYNC;
+typedef enum DCCG_PERF_CRTC_SELECT {
+	DCCG_PERF_SEL_CRTC0                              = 0x0,
+	DCCG_PERF_SEL_CRTC1                              = 0x1,
+	DCCG_PERF_SEL_CRTC2                              = 0x2,
+	DCCG_PERF_SEL_CRTC3                              = 0x3,
+	DCCG_PERF_SEL_CRTC4                              = 0x4,
+	DCCG_PERF_SEL_CRTC5                              = 0x5,
+} DCCG_PERF_CRTC_SELECT;
+typedef enum CLOCK_BRANCH_SOFT_RESET {
+	CLOCK_BRANCH_SOFT_RESET_NOOP                     = 0x0,
+	CLOCK_BRANCH_SOFT_RESET_FORCE                    = 0x1,
+} CLOCK_BRANCH_SOFT_RESET;
+typedef enum PLL_CFG_IF_SOFT_RESET {
+	PLL_CFG_IF_SOFT_RESET_NOOP                       = 0x0,
+	PLL_CFG_IF_SOFT_RESET_FORCE                      = 0x1,
+} PLL_CFG_IF_SOFT_RESET;
+typedef enum DVO_ENABLE_RST {
+	DVO_ENABLE_RST_DISABLE                           = 0x0,
+	DVO_ENABLE_RST_ENABLE                            = 0x1,
+} DVO_ENABLE_RST;
+typedef enum LptNumBanks {
+	LPT_NUM_BANKS_2BANK                              = 0x0,
+	LPT_NUM_BANKS_4BANK                              = 0x1,
+	LPT_NUM_BANKS_8BANK                              = 0x2,
+	LPT_NUM_BANKS_16BANK                             = 0x3,
+	LPT_NUM_BANKS_32BANK                             = 0x4,
+} LptNumBanks;
+typedef enum DCIO_DC_GENERICA_SEL {
+	DCIO_GENERICA_SEL_DACA_STEREOSYNC                = 0x0,
+	DCIO_GENERICA_SEL_STEREOSYNC                     = 0x1,
+	DCIO_GENERICA_SEL_DACA_PIXCLK                    = 0x2,
+	DCIO_GENERICA_SEL_DACB_PIXCLK                    = 0x3,
+	DCIO_GENERICA_SEL_DVOA_CTL3                      = 0x4,
+	DCIO_GENERICA_SEL_P1_PLLCLK                      = 0x5,
+	DCIO_GENERICA_SEL_P2_PLLCLK                      = 0x6,
+	DCIO_GENERICA_SEL_DVOA_STEREOSYNC                = 0x7,
+	DCIO_GENERICA_SEL_DACA_FIELD_NUMBER              = 0x8,
+	DCIO_GENERICA_SEL_DACB_FIELD_NUMBER              = 0x9,
+	DCIO_GENERICA_SEL_GENERICA_DCCG                  = 0xa,
+	DCIO_GENERICA_SEL_SYNCEN                         = 0xb,
+	DCIO_GENERICA_SEL_GENERICA_SCG                   = 0xc,
+	DCIO_GENERICA_SEL_RESERVED_VALUE13               = 0xd,
+	DCIO_GENERICA_SEL_RESERVED_VALUE14               = 0xe,
+	DCIO_GENERICA_SEL_RESERVED_VALUE15               = 0xf,
+	DCIO_GENERICA_SEL_GENERICA_DPRX                  = 0x10,
+	DCIO_GENERICA_SEL_GENERICB_DPRX                  = 0x11,
+} DCIO_DC_GENERICA_SEL;
+typedef enum DCIO_DC_GENERIC_UNIPHY_REFDIV_CLK_SEL {
+	DCIO_UNIPHYA_TEST_REFDIV_CLK                     = 0x0,
+	DCIO_UNIPHYB_TEST_REFDIV_CLK                     = 0x1,
+	DCIO_UNIPHYC_TEST_REFDIV_CLK                     = 0x2,
+	DCIO_UNIPHYD_TEST_REFDIV_CLK                     = 0x3,
+	DCIO_UNIPHYE_TEST_REFDIV_CLK                     = 0x4,
+	DCIO_UNIPHYF_TEST_REFDIV_CLK                     = 0x5,
+	DCIO_UNIPHYG_TEST_REFDIV_CLK                     = 0x6,
+	DCIO_UNIPHYLPA_TEST_REFDIV_CLK                   = 0x7,
+	DCIO_UNIPHYLPB_TEST_REFDIV_CLK                   = 0x8,
+} DCIO_DC_GENERIC_UNIPHY_REFDIV_CLK_SEL;
+typedef enum DCIO_DC_GENERIC_UNIPHY_FBDIV_CLK_SEL {
+	DCIO_UNIPHYA_FBDIV_CLK                           = 0x0,
+	DCIO_UNIPHYB_FBDIV_CLK                           = 0x1,
+	DCIO_UNIPHYC_FBDIV_CLK                           = 0x2,
+	DCIO_UNIPHYD_FBDIV_CLK                           = 0x3,
+	DCIO_UNIPHYE_FBDIV_CLK                           = 0x4,
+	DCIO_UNIPHYF_FBDIV_CLK                           = 0x5,
+	DCIO_UNIPHYG_FBDIV_CLK                           = 0x6,
+	DCIO_UNIPHYLPA_FBDIV_CLK                         = 0x7,
+	DCIO_UNIPHYLPB_FBDIV_CLK                         = 0x8,
+} DCIO_DC_GENERIC_UNIPHY_FBDIV_CLK_SEL;
+typedef enum DCIO_DC_GENERIC_UNIPHY_FBDIV_SSC_CLK_SEL {
+	DCIO_UNIPHYA_FBDIV_SSC_CLK                       = 0x0,
+	DCIO_UNIPHYB_FBDIV_SSC_CLK                       = 0x1,
+	DCIO_UNIPHYC_FBDIV_SSC_CLK                       = 0x2,
+	DCIO_UNIPHYD_FBDIV_SSC_CLK                       = 0x3,
+	DCIO_UNIPHYE_FBDIV_SSC_CLK                       = 0x4,
+	DCIO_UNIPHYF_FBDIV_SSC_CLK                       = 0x5,
+	DCIO_UNIPHYG_FBDIV_SSC_CLK                       = 0x6,
+	DCIO_UNIPHYLPA_FBDIV_SSC_CLK                     = 0x7,
+	DCIO_UNIPHYLPB_FBDIV_SSC_CLK                     = 0x8,
+} DCIO_DC_GENERIC_UNIPHY_FBDIV_SSC_CLK_SEL;
+typedef enum DCIO_DC_GENERIC_UNIPHY_FBDIV_CLK_DIV2_SEL {
+	DCIO_UNIPHYA_TEST_FBDIV_CLK_DIV2                 = 0x0,
+	DCIO_UNIPHYB_TEST_FBDIV_CLK_DIV2                 = 0x1,
+	DCIO_UNIPHYC_TEST_FBDIV_CLK_DIV2                 = 0x2,
+	DCIO_UNIPHYD_TEST_FBDIV_CLK_DIV2                 = 0x3,
+	DCIO_UNIPHYE_TEST_FBDIV_CLK_DIV2                 = 0x4,
+	DCIO_UNIPHYF_TEST_FBDIV_CLK_DIV2                 = 0x5,
+	DCIO_UNIPHYG_TEST_FBDIV_CLK_DIV2                 = 0x6,
+	DCIO_UNIPHYLPA_TEST_FBDIV_CLK_DIV2               = 0x7,
+	DCIO_UNIPHYLPB_TEST_FBDIV_CLK_DIV2               = 0x8,
+} DCIO_DC_GENERIC_UNIPHY_FBDIV_CLK_DIV2_SEL;
+typedef enum DCIO_DC_GENERICB_SEL {
+	DCIO_GENERICB_SEL_DACA_STEREOSYNC                = 0x0,
+	DCIO_GENERICB_SEL_STEREOSYNC                     = 0x1,
+	DCIO_GENERICB_SEL_DACA_PIXCLK                    = 0x2,
+	DCIO_GENERICB_SEL_DACB_PIXCLK                    = 0x3,
+	DCIO_GENERICB_SEL_DVOA_CTL3                      = 0x4,
+	DCIO_GENERICB_SEL_P1_PLLCLK                      = 0x5,
+	DCIO_GENERICB_SEL_P2_PLLCLK                      = 0x6,
+	DCIO_GENERICB_SEL_DVOA_STEREOSYNC                = 0x7,
+	DCIO_GENERICB_SEL_DACA_FIELD_NUMBER              = 0x8,
+	DCIO_GENERICB_SEL_DACB_FIELD_NUMBER              = 0x9,
+	DCIO_GENERICB_SEL_GENERICB_DCCG                  = 0xa,
+	DCIO_GENERICB_SEL_SYNCEN                         = 0xb,
+	DCIO_GENERICB_SEL_GENERICA_SCG                   = 0xc,
+	DCIO_GENERICB_SEL_RESERVED_VALUE13               = 0xd,
+	DCIO_GENERICB_SEL_RESERVED_VALUE14               = 0xe,
+	DCIO_GENERICB_SEL_RESERVED_VALUE15               = 0xf,
+} DCIO_DC_GENERICB_SEL;
+typedef enum DCIO_DC_PAD_EXTERN_SIG_SEL {
+	DCIO_DC_PAD_EXTERN_SIG_SEL_MVP                   = 0x0,
+	DCIO_DC_PAD_EXTERN_SIG_SEL_VSYNCA                = 0x1,
+	DCIO_DC_PAD_EXTERN_SIG_SEL_GENLK_CLK             = 0x2,
+	DCIO_DC_PAD_EXTERN_SIG_SEL_GENLK_VSYNC           = 0x3,
+	DCIO_DC_PAD_EXTERN_SIG_SEL_GENERICA              = 0x4,
+	DCIO_DC_PAD_EXTERN_SIG_SEL_GENERICB              = 0x5,
+	DCIO_DC_PAD_EXTERN_SIG_SEL_GENERICC              = 0x6,
+	DCIO_DC_PAD_EXTERN_SIG_SEL_HPD1                  = 0x7,
+	DCIO_DC_PAD_EXTERN_SIG_SEL_HPD2                  = 0x8,
+	DCIO_DC_PAD_EXTERN_SIG_SEL_DDC1CLK               = 0x9,
+	DCIO_DC_PAD_EXTERN_SIG_SEL_DDC1DATA              = 0xa,
+	DCIO_DC_PAD_EXTERN_SIG_SEL_DDC2CLK               = 0xb,
+	DCIO_DC_PAD_EXTERN_SIG_SEL_DDC2DATA              = 0xc,
+	DCIO_DC_PAD_EXTERN_SIG_SEL_VHAD1                 = 0xd,
+	DCIO_DC_PAD_EXTERN_SIG_SEL_VHAD0                 = 0xe,
+	DCIO_DC_PAD_EXTERN_SIG_SEL_VPHCTL                = 0xf,
+} DCIO_DC_PAD_EXTERN_SIG_SEL;
+typedef enum DCIO_DC_PAD_EXTERN_SIG_MVP_PIXEL_SRC_STATUS {
+	DCIO_MVP_PIXEL_SRC_STATUS_HSYNCA                 = 0x0,
+	DCIO_MVP_PIXEL_SRC_STATUS_HSYNCA_DUPLICATE       = 0x1,
+	DCIO_MVP_PIXEL_SRC_STATUS_CRTC                   = 0x2,
+	DCIO_MVP_PIXEL_SRC_STATUS_LB                     = 0x3,
+} DCIO_DC_PAD_EXTERN_SIG_MVP_PIXEL_SRC_STATUS;
+typedef enum DCIO_DC_REF_CLK_CNTL_HSYNCA_OUTPUT_SEL {
+	DCIO_HSYNCA_OUTPUT_SEL_DISABLE                   = 0x0,
+	DCIO_HSYNCA_OUTPUT_SEL_PPLL1                     = 0x1,
+	DCIO_HSYNCA_OUTPUT_SEL_PPLL2                     = 0x2,
+	DCIO_HSYNCA_OUTPUT_SEL_RESERVED                  = 0x3,
+} DCIO_DC_REF_CLK_CNTL_HSYNCA_OUTPUT_SEL;
+typedef enum DCIO_DC_REF_CLK_CNTL_GENLK_CLK_OUTPUT_SEL {
+	DCIO_GENLK_CLK_OUTPUT_SEL_DISABLE                = 0x0,
+	DCIO_GENLK_CLK_OUTPUT_SEL_PPLL1                  = 0x1,
+	DCIO_GENLK_CLK_OUTPUT_SEL_PPLL2                  = 0x2,
+	DCIO_GENLK_CLK_OUTPUT_SEL_RESERVED_VALUE3        = 0x3,
+} DCIO_DC_REF_CLK_CNTL_GENLK_CLK_OUTPUT_SEL;
+typedef enum DCIO_DC_GPIO_VIP_DEBUG {
+	DCIO_DC_GPIO_VIP_DEBUG_NORMAL                    = 0x0,
+	DCIO_DC_GPIO_VIP_DEBUG_CG_BIG                    = 0x1,
+} DCIO_DC_GPIO_VIP_DEBUG;
+typedef enum DCIO_DC_GPIO_MACRO_DEBUG {
+	DCIO_DC_GPIO_MACRO_DEBUG_NORMAL                  = 0x0,
+	DCIO_DC_GPIO_MACRO_DEBUG_CHIP_BIF                = 0x1,
+	DCIO_DC_GPIO_MACRO_DEBUG_RESERVED_VALUE2         = 0x2,
+	DCIO_DC_GPIO_MACRO_DEBUG_RESERVED_VALUE3         = 0x3,
+} DCIO_DC_GPIO_MACRO_DEBUG;
+typedef enum DCIO_DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL {
+	DCIO_DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL_NORMAL       = 0x0,
+	DCIO_DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL_SWAP         = 0x1,
+} DCIO_DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL;
+typedef enum DCIO_DC_GPIO_DEBUG_BUS_FLOP_EN {
+	DCIO_DC_GPIO_DEBUG_BUS_FLOP_EN_BYPASS            = 0x0,
+	DCIO_DC_GPIO_DEBUG_BUS_FLOP_EN_ENABLE            = 0x1,
+} DCIO_DC_GPIO_DEBUG_BUS_FLOP_EN;
+typedef enum DCIO_DC_GPIO_DEBUG_DPRX_LOOPBACK_ENABLE {
+	DCIO_DPRX_LOOPBACK_ENABLE_NORMAL                 = 0x0,
+	DCIO_DPRX_LOOPBACK_ENABLE_LOOP                   = 0x1,
+} DCIO_DC_GPIO_DEBUG_DPRX_LOOPBACK_ENABLE;
+typedef enum DCIO_UNIPHY_LINK_CNTL_MINIMUM_PIXVLD_LOW_DURATION {
+	DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_3_CLOCKS = 0x0,
+	DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_7_CLOCKS = 0x1,
+	DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_11_CLOCKS= 0x2,
+	DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_15_CLOCKS= 0x3,
+	DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_19_CLOCKS= 0x4,
+	DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_23_CLOCKS= 0x5,
+	DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_27_CLOCKS= 0x6,
+	DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_31_CLOCKS= 0x7,
+} DCIO_UNIPHY_LINK_CNTL_MINIMUM_PIXVLD_LOW_DURATION;
+typedef enum DCIO_UNIPHY_LINK_CNTL_CHANNEL_INVERT {
+	DCIO_UNIPHY_CHANNEL_NO_INVERSION                 = 0x0,
+	DCIO_UNIPHY_CHANNEL_INVERTED                     = 0x1,
+} DCIO_UNIPHY_LINK_CNTL_CHANNEL_INVERT;
+typedef enum DCIO_UNIPHY_LINK_CNTL_ENABLE_HPD_MASK {
+	DCIO_UNIPHY_LINK_ENABLE_HPD_MASK_DISALLOW        = 0x0,
+	DCIO_UNIPHY_LINK_ENABLE_HPD_MASK_ALLOW           = 0x1,
+	DCIO_UNIPHY_LINK_ENABLE_HPD_MASK_ALLOW_DEBOUNCED = 0x2,
+	DCIO_UNIPHY_LINK_ENABLE_HPD_MASK_ALLOW_TOGGLE_FILTERED= 0x3,
+} DCIO_UNIPHY_LINK_CNTL_ENABLE_HPD_MASK;
+typedef enum DCIO_UNIPHY_CHANNEL_XBAR_SOURCE {
+	DCIO_UNIPHY_CHANNEL_XBAR_SOURCE_CH0              = 0x0,
+	DCIO_UNIPHY_CHANNEL_XBAR_SOURCE_CH1              = 0x1,
+	DCIO_UNIPHY_CHANNEL_XBAR_SOURCE_CH2              = 0x2,
+	DCIO_UNIPHY_CHANNEL_XBAR_SOURCE_CH3              = 0x3,
+} DCIO_UNIPHY_CHANNEL_XBAR_SOURCE;
+typedef enum DCIO_DC_DVODATA_CONFIG_VIP_MUX_EN {
+	DCIO_VIP_MUX_EN_DVO                              = 0x0,
+	DCIO_VIP_MUX_EN_VIP                              = 0x1,
+} DCIO_DC_DVODATA_CONFIG_VIP_MUX_EN;
+typedef enum DCIO_DC_DVODATA_CONFIG_VIP_ALTER_MAPPING_EN {
+	DCIO_VIP_ALTER_MAPPING_EN_DEFAULT                = 0x0,
+	DCIO_VIP_ALTER_MAPPING_EN_ALTERNATIVE            = 0x1,
+} DCIO_DC_DVODATA_CONFIG_VIP_ALTER_MAPPING_EN;
+typedef enum DCIO_DC_DVODATA_CONFIG_DVO_ALTER_MAPPING_EN {
+	DCIO_DVO_ALTER_MAPPING_EN_DEFAULT                = 0x0,
+	DCIO_DVO_ALTER_MAPPING_EN_ALTERNATIVE            = 0x1,
+} DCIO_DC_DVODATA_CONFIG_DVO_ALTER_MAPPING_EN;
+typedef enum DCIO_LVTMA_PWRSEQ_CNTL_DISABLE_SYNCEN_CONTROL_OF_TX_EN {
+	DCIO_LVTMA_PWRSEQ_DISABLE_SYNCEN_CONTROL_OF_TX_ENABLE= 0x0,
+	DCIO_LVTMA_PWRSEQ_DISABLE_SYNCEN_CONTROL_OF_TX_DISABLE= 0x1,
+} DCIO_LVTMA_PWRSEQ_CNTL_DISABLE_SYNCEN_CONTROL_OF_TX_EN;
+typedef enum DCIO_LVTMA_PWRSEQ_CNTL_TARGET_STATE {
+	DCIO_LVTMA_PWRSEQ_TARGET_STATE_LCD_OFF           = 0x0,
+	DCIO_LVTMA_PWRSEQ_TARGET_STATE_LCD_ON            = 0x1,
+} DCIO_LVTMA_PWRSEQ_CNTL_TARGET_STATE;
+typedef enum DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_SYNCEN_POL {
+	DCIO_LVTMA_SYNCEN_POL_NON_INVERT                 = 0x0,
+	DCIO_LVTMA_SYNCEN_POL_INVERT                     = 0x1,
+} DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_SYNCEN_POL;
+typedef enum DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_DIGON {
+	DCIO_LVTMA_DIGON_OFF                             = 0x0,
+	DCIO_LVTMA_DIGON_ON                              = 0x1,
+} DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_DIGON;
+typedef enum DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_DIGON_POL {
+	DCIO_LVTMA_DIGON_POL_NON_INVERT                  = 0x0,
+	DCIO_LVTMA_DIGON_POL_INVERT                      = 0x1,
+} DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_DIGON_POL;
+typedef enum DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_BLON {
+	DCIO_LVTMA_BLON_OFF                              = 0x0,
+	DCIO_LVTMA_BLON_ON                               = 0x1,
+} DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_BLON;
+typedef enum DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_BLON_POL {
+	DCIO_LVTMA_BLON_POL_NON_INVERT                   = 0x0,
+	DCIO_LVTMA_BLON_POL_INVERT                       = 0x1,
+} DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_BLON_POL;
+typedef enum DCIO_LVTMA_PWRSEQ_DELAY2_LVTMA_VARY_BL_OVERRIDE_EN {
+	DCIO_LVTMA_VARY_BL_OVERRIDE_EN_BLON              = 0x0,
+	DCIO_LVTMA_VARY_BL_OVERRIDE_EN_SEPARATE          = 0x1,
+} DCIO_LVTMA_PWRSEQ_DELAY2_LVTMA_VARY_BL_OVERRIDE_EN;
+typedef enum DCIO_BL_PWM_CNTL_BL_PWM_FRACTIONAL_EN {
+	DCIO_BL_PWM_FRACTIONAL_DISABLE                   = 0x0,
+	DCIO_BL_PWM_FRACTIONAL_ENABLE                    = 0x1,
+} DCIO_BL_PWM_CNTL_BL_PWM_FRACTIONAL_EN;
+typedef enum DCIO_BL_PWM_CNTL_BL_PWM_EN {
+	DCIO_BL_PWM_DISABLE                              = 0x0,
+	DCIO_BL_PWM_ENABLE                               = 0x1,
+} DCIO_BL_PWM_CNTL_BL_PWM_EN;
+typedef enum DCIO_BL_PWM_CNTL2_DBG_BL_PWM_INPUT_REFCLK_SELECT {
+	DCIO_DBG_BL_PWM_INPUT_REFCLK_SELECT_NORMAL       = 0x0,
+	DCIO_DBG_BL_PWM_INPUT_REFCLK_SELECT_DEBUG1       = 0x1,
+	DCIO_DBG_BL_PWM_INPUT_REFCLK_SELECT_DEBUG2       = 0x2,
+	DCIO_DBG_BL_PWM_INPUT_REFCLK_SELECT_DEBUG3       = 0x3,
+} DCIO_BL_PWM_CNTL2_DBG_BL_PWM_INPUT_REFCLK_SELECT;
+typedef enum DCIO_BL_PWM_CNTL2_BL_PWM_OVERRIDE_BL_OUT_ENABLE {
+	DCIO_BL_PWM_OVERRIDE_BL_OUT_DISABLE              = 0x0,
+	DCIO_BL_PWM_OVERRIDE_BL_OUT_ENABLE               = 0x1,
+} DCIO_BL_PWM_CNTL2_BL_PWM_OVERRIDE_BL_OUT_ENABLE;
+typedef enum DCIO_BL_PWM_CNTL2_BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN {
+	DCIO_BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN_NORMAL      = 0x0,
+	DCIO_BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN_PWM         = 0x1,
+} DCIO_BL_PWM_CNTL2_BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN;
+typedef enum DCIO_BL_PWM_GRP1_REG_LOCK {
+	DCIO_BL_PWM_GRP1_REG_LOCK_DISABLE                = 0x0,
+	DCIO_BL_PWM_GRP1_REG_LOCK_ENABLE                 = 0x1,
+} DCIO_BL_PWM_GRP1_REG_LOCK;
+typedef enum DCIO_BL_PWM_GRP1_UPDATE_AT_FRAME_START {
+	DCIO_BL_PWM_GRP1_UPDATE_AT_FRAME_START_DISABLE   = 0x0,
+	DCIO_BL_PWM_GRP1_UPDATE_AT_FRAME_START_ENABLE    = 0x1,
+} DCIO_BL_PWM_GRP1_UPDATE_AT_FRAME_START;
+typedef enum DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL {
+	DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL_CONTROLLER1= 0x0,
+	DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL_CONTROLLER2= 0x1,
+	DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL_CONTROLLER3= 0x2,
+	DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL_CONTROLLER4= 0x3,
+	DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL_CONTROLLER5= 0x4,
+	DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL_CONTROLLER6= 0x5,
+} DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL;
+typedef enum DCIO_BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN {
+	DCIO_BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN_BL_PWM = 0x0,
+	DCIO_BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN_BL1_PWM= 0x1,
+} DCIO_BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN;
+typedef enum DCIO_BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN {
+	DCIO_BL_PWM_GRP1_IGNORE_MASTER_LOCK_ENABLE       = 0x0,
+	DCIO_BL_PWM_GRP1_IGNORE_MASTER_LOCK_DISABLE      = 0x1,
+} DCIO_BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN;
+typedef enum DCIO_GSL_SEL {
+	DCIO_GSL_SEL_GROUP_0                             = 0x0,
+	DCIO_GSL_SEL_GROUP_1                             = 0x1,
+	DCIO_GSL_SEL_GROUP_2                             = 0x2,
+} DCIO_GSL_SEL;
+typedef enum DCIO_GENLK_CLK_GSL_MASK {
+	DCIO_GENLK_CLK_GSL_MASK_NO                       = 0x0,
+	DCIO_GENLK_CLK_GSL_MASK_TIMING                   = 0x1,
+	DCIO_GENLK_CLK_GSL_MASK_STEREO                   = 0x2,
+} DCIO_GENLK_CLK_GSL_MASK;
+typedef enum DCIO_GENLK_VSYNC_GSL_MASK {
+	DCIO_GENLK_VSYNC_GSL_MASK_NO                     = 0x0,
+	DCIO_GENLK_VSYNC_GSL_MASK_TIMING                 = 0x1,
+	DCIO_GENLK_VSYNC_GSL_MASK_STEREO                 = 0x2,
+} DCIO_GENLK_VSYNC_GSL_MASK;
+typedef enum DCIO_SWAPLOCK_A_GSL_MASK {
+	DCIO_SWAPLOCK_A_GSL_MASK_NO                      = 0x0,
+	DCIO_SWAPLOCK_A_GSL_MASK_TIMING                  = 0x1,
+	DCIO_SWAPLOCK_A_GSL_MASK_STEREO                  = 0x2,
+} DCIO_SWAPLOCK_A_GSL_MASK;
+typedef enum DCIO_SWAPLOCK_B_GSL_MASK {
+	DCIO_SWAPLOCK_B_GSL_MASK_NO                      = 0x0,
+	DCIO_SWAPLOCK_B_GSL_MASK_TIMING                  = 0x1,
+	DCIO_SWAPLOCK_B_GSL_MASK_STEREO                  = 0x2,
+} DCIO_SWAPLOCK_B_GSL_MASK;
+typedef enum DCIO_GSL_VSYNC_SEL {
+	DCIO_GSL_VSYNC_SEL_PIPE0                         = 0x0,
+	DCIO_GSL_VSYNC_SEL_PIPE1                         = 0x1,
+	DCIO_GSL_VSYNC_SEL_PIPE2                         = 0x2,
+	DCIO_GSL_VSYNC_SEL_PIPE3                         = 0x3,
+	DCIO_GSL_VSYNC_SEL_PIPE4                         = 0x4,
+	DCIO_GSL_VSYNC_SEL_PIPE5                         = 0x5,
+} DCIO_GSL_VSYNC_SEL;
+typedef enum DCIO_GSL0_TIMING_SYNC_SEL {
+	DCIO_GSL0_TIMING_SYNC_SEL_PIPE                   = 0x0,
+	DCIO_GSL0_TIMING_SYNC_SEL_GENCLK_VSYNC           = 0x1,
+	DCIO_GSL0_TIMING_SYNC_SEL_GENCLK_CLK             = 0x2,
+	DCIO_GSL0_TIMING_SYNC_SEL_SWAPLOCK_A             = 0x3,
+	DCIO_GSL0_TIMING_SYNC_SEL_SWAPLOCK_B             = 0x4,
+} DCIO_GSL0_TIMING_SYNC_SEL;
+typedef enum DCIO_GSL0_GLOBAL_UNLOCK_SEL {
+	DCIO_GSL0_GLOBAL_UNLOCK_SEL_INVERSION            = 0x0,
+	DCIO_GSL0_GLOBAL_UNLOCK_SEL_GENCLK_VSYNC         = 0x1,
+	DCIO_GSL0_GLOBAL_UNLOCK_SEL_GENLK_CLK            = 0x2,
+	DCIO_GSL0_GLOBAL_UNLOCK_SEL_SWAPLOCK_A           = 0x3,
+	DCIO_GSL0_GLOBAL_UNLOCK_SEL_SWAPLOCK_B           = 0x4,
+} DCIO_GSL0_GLOBAL_UNLOCK_SEL;
+typedef enum DCIO_GSL1_TIMING_SYNC_SEL {
+	DCIO_GSL1_TIMING_SYNC_SEL_PIPE                   = 0x0,
+	DCIO_GSL1_TIMING_SYNC_SEL_GENCLK_VSYNC           = 0x1,
+	DCIO_GSL1_TIMING_SYNC_SEL_GENCLK_CLK             = 0x2,
+	DCIO_GSL1_TIMING_SYNC_SEL_SWAPLOCK_A             = 0x3,
+	DCIO_GSL1_TIMING_SYNC_SEL_SWAPLOCK_B             = 0x4,
+} DCIO_GSL1_TIMING_SYNC_SEL;
+typedef enum DCIO_GSL1_GLOBAL_UNLOCK_SEL {
+	DCIO_GSL1_GLOBAL_UNLOCK_SEL_INVERSION            = 0x0,
+	DCIO_GSL1_GLOBAL_UNLOCK_SEL_GENCLK_VSYNC         = 0x1,
+	DCIO_GSL1_GLOBAL_UNLOCK_SEL_GENLK_CLK            = 0x2,
+	DCIO_GSL1_GLOBAL_UNLOCK_SEL_SWAPLOCK_A           = 0x3,
+	DCIO_GSL1_GLOBAL_UNLOCK_SEL_SWAPLOCK_B           = 0x4,
+} DCIO_GSL1_GLOBAL_UNLOCK_SEL;
+typedef enum DCIO_GSL2_TIMING_SYNC_SEL {
+	DCIO_GSL2_TIMING_SYNC_SEL_PIPE                   = 0x0,
+	DCIO_GSL2_TIMING_SYNC_SEL_GENCLK_VSYNC           = 0x1,
+	DCIO_GSL2_TIMING_SYNC_SEL_GENCLK_CLK             = 0x2,
+	DCIO_GSL2_TIMING_SYNC_SEL_SWAPLOCK_A             = 0x3,
+	DCIO_GSL2_TIMING_SYNC_SEL_SWAPLOCK_B             = 0x4,
+} DCIO_GSL2_TIMING_SYNC_SEL;
+typedef enum DCIO_GSL2_GLOBAL_UNLOCK_SEL {
+	DCIO_GSL2_GLOBAL_UNLOCK_SEL_INVERSION            = 0x0,
+	DCIO_GSL2_GLOBAL_UNLOCK_SEL_GENCLK_VSYNC         = 0x1,
+	DCIO_GSL2_GLOBAL_UNLOCK_SEL_GENLK_CLK            = 0x2,
+	DCIO_GSL2_GLOBAL_UNLOCK_SEL_SWAPLOCK_A           = 0x3,
+	DCIO_GSL2_GLOBAL_UNLOCK_SEL_SWAPLOCK_B           = 0x4,
+} DCIO_GSL2_GLOBAL_UNLOCK_SEL;
+typedef enum DCIO_DC_GPU_TIMER_START_POSITION {
+	DCIO_GPU_TIMER_START_0_END_27                    = 0x0,
+	DCIO_GPU_TIMER_START_1_END_28                    = 0x1,
+	DCIO_GPU_TIMER_START_2_END_29                    = 0x2,
+	DCIO_GPU_TIMER_START_3_END_30                    = 0x3,
+	DCIO_GPU_TIMER_START_4_END_31                    = 0x4,
+	DCIO_GPU_TIMER_START_6_END_33                    = 0x5,
+	DCIO_GPU_TIMER_START_8_END_35                    = 0x6,
+	DCIO_GPU_TIMER_START_10_END_37                   = 0x7,
+} DCIO_DC_GPU_TIMER_START_POSITION;
+typedef enum DCIO_CLOCK_CNTL_DCIO_TEST_CLK_SEL {
+	DCIO_TEST_CLK_SEL_DISPCLK                        = 0x0,
+	DCIO_TEST_CLK_SEL_GATED_DISPCLK                  = 0x1,
+	DCIO_TEST_CLK_SEL_SCLK                           = 0x2,
+} DCIO_CLOCK_CNTL_DCIO_TEST_CLK_SEL;
+typedef enum DCIO_CLOCK_CNTL_DISPCLK_R_DCIO_GATE_DIS {
+	DCIO_DISPCLK_R_DCIO_GATE_DISABLE                 = 0x0,
+	DCIO_DISPCLK_R_DCIO_GATE_ENABLE                  = 0x1,
+} DCIO_CLOCK_CNTL_DISPCLK_R_DCIO_GATE_DIS;
+typedef enum DCIO_DCO_DCFE_EXT_VSYNC_MUX {
+	DCIO_EXT_VSYNC_MUX_SWAPLOCKB                     = 0x0,
+	DCIO_EXT_VSYNC_MUX_CRTC0                         = 0x1,
+	DCIO_EXT_VSYNC_MUX_CRTC1                         = 0x2,
+	DCIO_EXT_VSYNC_MUX_CRTC2                         = 0x3,
+	DCIO_EXT_VSYNC_MUX_CRTC3                         = 0x4,
+	DCIO_EXT_VSYNC_MUX_CRTC4                         = 0x5,
+	DCIO_EXT_VSYNC_MUX_CRTC5                         = 0x6,
+	DCIO_EXT_VSYNC_MUX_GENERICB                      = 0x7,
+} DCIO_DCO_DCFE_EXT_VSYNC_MUX;
+typedef enum DCIO_DCO_EXT_VSYNC_MASK {
+	DCIO_EXT_VSYNC_MASK_NONE                         = 0x0,
+	DCIO_EXT_VSYNC_MASK_PIPE0                        = 0x1,
+	DCIO_EXT_VSYNC_MASK_PIPE1                        = 0x2,
+	DCIO_EXT_VSYNC_MASK_PIPE2                        = 0x3,
+	DCIO_EXT_VSYNC_MASK_PIPE3                        = 0x4,
+	DCIO_EXT_VSYNC_MASK_PIPE4                        = 0x5,
+	DCIO_EXT_VSYNC_MASK_PIPE5                        = 0x6,
+	DCIO_EXT_VSYNC_MASK_NONE_DUPLICATE               = 0x7,
+} DCIO_DCO_EXT_VSYNC_MASK;
+typedef enum DCIO_DBG_OUT_PIN_SEL {
+	DCIO_DBG_OUT_PIN_SEL_LOW_12BIT                   = 0x0,
+	DCIO_DBG_OUT_PIN_SEL_HIGH_12BIT                  = 0x1,
+} DCIO_DBG_OUT_PIN_SEL;
+typedef enum DCIO_DBG_OUT_12BIT_SEL {
+	DCIO_DBG_OUT_12BIT_SEL_LOW_12BIT                 = 0x0,
+	DCIO_DBG_OUT_12BIT_SEL_MID_12BIT                 = 0x1,
+	DCIO_DBG_OUT_12BIT_SEL_HIGH_12BIT                = 0x2,
+	DCIO_DBG_OUT_12BIT_SEL_OVERRIDE                  = 0x3,
+} DCIO_DBG_OUT_12BIT_SEL;
+typedef enum DCIO_DSYNC_SOFT_RESET {
+	DCIO_DSYNC_SOFT_RESET_DEASSERT                   = 0x0,
+	DCIO_DSYNC_SOFT_RESET_ASSERT                     = 0x1,
+} DCIO_DSYNC_SOFT_RESET;
+typedef enum DCIO_DACA_SOFT_RESET {
+	DCIO_DACA_SOFT_RESET_DEASSERT                    = 0x0,
+	DCIO_DACA_SOFT_RESET_ASSERT                      = 0x1,
+} DCIO_DACA_SOFT_RESET;
+typedef enum DCIO_DCRXPHY_SOFT_RESET {
+	DCIO_DCRXPHY_SOFT_RESET_DEASSERT                 = 0x0,
+	DCIO_DCRXPHY_SOFT_RESET_ASSERT                   = 0x1,
+} DCIO_DCRXPHY_SOFT_RESET;
+typedef enum DCIO_DPHY_LANE_SEL {
+	DCIO_DPHY_LANE_SEL_LANE0                         = 0x0,
+	DCIO_DPHY_LANE_SEL_LANE1                         = 0x1,
+	DCIO_DPHY_LANE_SEL_LANE2                         = 0x2,
+	DCIO_DPHY_LANE_SEL_LANE3                         = 0x3,
+} DCIO_DPHY_LANE_SEL;
+typedef enum DCIO_DPCS_INTERRUPT_TYPE {
+	DCIO_DPCS_INTERRUPT_TYPE_LEVEL_BASED             = 0x0,
+	DCIO_DPCS_INTERRUPT_TYPE_PULSE_BASED             = 0x1,
+} DCIO_DPCS_INTERRUPT_TYPE;
+typedef enum DCIO_DPCS_INTERRUPT_MASK {
+	DCIO_DPCS_INTERRUPT_DISABLE                      = 0x0,
+	DCIO_DPCS_INTERRUPT_ENABLE                       = 0x1,
+} DCIO_DPCS_INTERRUPT_MASK;
+typedef enum DCIO_DC_GPU_TIMER_READ_SELECT {
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D1_V_UPDATE     = 0x0,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D1_V_UPDATE     = 0x1,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D2_V_UPDATE     = 0x2,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D2_V_UPDATE     = 0x3,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D3_V_UPDATE     = 0x4,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D3_V_UPDATE     = 0x5,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D4_V_UPDATE     = 0x6,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D4_V_UPDATE     = 0x7,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D5_V_UPDATE     = 0x8,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D5_V_UPDATE     = 0x9,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D6_V_UPDATE     = 0xa,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D6_V_UPDATE     = 0xb,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D1_P_FLIP       = 0xc,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D1_P_FLIP       = 0xd,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D2_P_FLIP       = 0xe,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D2_P_FLIP       = 0xf,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D3_P_FLIP       = 0x10,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D3_P_FLIP       = 0x11,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D4_P_FLIP       = 0x12,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D4_P_FLIP       = 0x13,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D5_P_FLIP       = 0x14,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D5_P_FLIP       = 0x15,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D6_P_FLIP       = 0x16,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D6_P_FLIP       = 0x17,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D1_VSYNC_NOM    = 0x18,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D1_VSYNC_NOM    = 0x19,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D2_VSYNC_NOM    = 0x1a,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D2_VSYNC_NOM    = 0x1b,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D3_VSYNC_NOM    = 0x1c,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D3_VSYNC_NOM    = 0x1d,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D4_VSYNC_NOM    = 0x1e,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D4_VSYNC_NOM    = 0x1f,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D5_VSYNC_NOM    = 0x20,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D5_VSYNC_NOM    = 0x21,
+	DCIO_GPU_TIMER_READ_SELECT_LOWER_D6_VSYNC_NOM    = 0x22,
+	DCIO_GPU_TIMER_READ_SELECT_UPPER_D6_VSYNC_NOM    = 0x23,
+} DCIO_DC_GPU_TIMER_READ_SELECT;
+typedef enum DCIO_IMPCAL_STEP_DELAY {
+	DCIO_IMPCAL_STEP_DELAY_1us                       = 0x0,
+	DCIO_IMPCAL_STEP_DELAY_2us                       = 0x1,
+	DCIO_IMPCAL_STEP_DELAY_3us                       = 0x2,
+	DCIO_IMPCAL_STEP_DELAY_4us                       = 0x3,
+	DCIO_IMPCAL_STEP_DELAY_5us                       = 0x4,
+	DCIO_IMPCAL_STEP_DELAY_6us                       = 0x5,
+	DCIO_IMPCAL_STEP_DELAY_7us                       = 0x6,
+	DCIO_IMPCAL_STEP_DELAY_8us                       = 0x7,
+	DCIO_IMPCAL_STEP_DELAY_9us                       = 0x8,
+	DCIO_IMPCAL_STEP_DELAY_10us                      = 0x9,
+	DCIO_IMPCAL_STEP_DELAY_11us                      = 0xa,
+	DCIO_IMPCAL_STEP_DELAY_12us                      = 0xb,
+	DCIO_IMPCAL_STEP_DELAY_13us                      = 0xc,
+	DCIO_IMPCAL_STEP_DELAY_14us                      = 0xd,
+	DCIO_IMPCAL_STEP_DELAY_15us                      = 0xe,
+	DCIO_IMPCAL_STEP_DELAY_16us                      = 0xf,
+} DCIO_IMPCAL_STEP_DELAY;
+typedef enum DCIO_UNIPHY_IMPCAL_SEL {
+	DCIO_UNIPHY_IMPCAL_SEL_TEMPERATURE               = 0x0,
+	DCIO_UNIPHY_IMPCAL_SEL_BINARY                    = 0x1,
+} DCIO_UNIPHY_IMPCAL_SEL;
+typedef enum DCIO_DBG_CLOCK_SEL {
+	DCIO_DBG_CLOCK_SEL_DISPCLK                       = 0x0,
+	DCIO_DBG_CLOCK_SEL_SYMCLKA                       = 0x1,
+	DCIO_DBG_CLOCK_SEL_SYMCLKB                       = 0x2,
+	DCIO_DBG_CLOCK_SEL_SYMCLKC                       = 0x3,
+	DCIO_DBG_CLOCK_SEL_SYMCLKD                       = 0x4,
+	DCIO_DBG_CLOCK_SEL_SYMCLKE                       = 0x5,
+	DCIO_DBG_CLOCK_SEL_SYMCLKF                       = 0x6,
+	DCIO_DBG_CLOCK_SEL_REFCLK                        = 0xb,
+} DCIO_DBG_CLOCK_SEL;
+typedef enum DCIOCHIP_HPD_SEL {
+	DCIOCHIP_HPD_SEL_ASYNC                           = 0x0,
+	DCIOCHIP_HPD_SEL_CLOCKED                         = 0x1,
+} DCIOCHIP_HPD_SEL;
+typedef enum DCIOCHIP_PAD_MODE {
+	DCIOCHIP_PAD_MODE_DDC                            = 0x0,
+	DCIOCHIP_PAD_MODE_DP                             = 0x1,
+} DCIOCHIP_PAD_MODE;
+typedef enum DCIOCHIP_AUXSLAVE_PAD_MODE {
+	DCIOCHIP_AUXSLAVE_PAD_MODE_I2C                   = 0x0,
+	DCIOCHIP_AUXSLAVE_PAD_MODE_AUX                   = 0x1,
+} DCIOCHIP_AUXSLAVE_PAD_MODE;
+typedef enum DCIOCHIP_INVERT {
+	DCIOCHIP_POL_NON_INVERT                          = 0x0,
+	DCIOCHIP_POL_INVERT                              = 0x1,
+} DCIOCHIP_INVERT;
+typedef enum DCIOCHIP_PD_EN {
+	DCIOCHIP_PD_EN_NOTALLOW                          = 0x0,
+	DCIOCHIP_PD_EN_ALLOW                             = 0x1,
+} DCIOCHIP_PD_EN;
+typedef enum DCIOCHIP_GPIO_MASK_EN {
+	DCIOCHIP_GPIO_MASK_EN_HARDWARE                   = 0x0,
+	DCIOCHIP_GPIO_MASK_EN_SOFTWARE                   = 0x1,
+} DCIOCHIP_GPIO_MASK_EN;
+typedef enum DCIOCHIP_MASK {
+	DCIOCHIP_MASK_DISABLE                            = 0x0,
+	DCIOCHIP_MASK_ENABLE                             = 0x1,
+} DCIOCHIP_MASK;
+typedef enum DCIOCHIP_GPIO_I2C_MASK {
+	DCIOCHIP_GPIO_I2C_MASK_DISABLE                   = 0x0,
+	DCIOCHIP_GPIO_I2C_MASK_ENABLE                    = 0x1,
+} DCIOCHIP_GPIO_I2C_MASK;
+typedef enum DCIOCHIP_GPIO_I2C_DRIVE {
+	DCIOCHIP_GPIO_I2C_DRIVE_LOW                      = 0x0,
+	DCIOCHIP_GPIO_I2C_DRIVE_HIGH                     = 0x1,
+} DCIOCHIP_GPIO_I2C_DRIVE;
+typedef enum DCIOCHIP_GPIO_I2C_EN {
+	DCIOCHIP_GPIO_I2C_DISABLE                        = 0x0,
+	DCIOCHIP_GPIO_I2C_ENABLE                         = 0x1,
+} DCIOCHIP_GPIO_I2C_EN;
+typedef enum DCIOCHIP_MASK_4BIT {
+	DCIOCHIP_MASK_4BIT_DISABLE                       = 0x0,
+	DCIOCHIP_MASK_4BIT_ENABLE                        = 0xf,
+} DCIOCHIP_MASK_4BIT;
+typedef enum DCIOCHIP_ENABLE_4BIT {
+	DCIOCHIP_4BIT_DISABLE                            = 0x0,
+	DCIOCHIP_4BIT_ENABLE                             = 0xf,
+} DCIOCHIP_ENABLE_4BIT;
+typedef enum DCIOCHIP_MASK_5BIT {
+	DCIOCHIP_MASIK_5BIT_DISABLE                      = 0x0,
+	DCIOCHIP_MASIK_5BIT_ENABLE                       = 0x1f,
+} DCIOCHIP_MASK_5BIT;
+typedef enum DCIOCHIP_ENABLE_5BIT {
+	DCIOCHIP_5BIT_DISABLE                            = 0x0,
+	DCIOCHIP_5BIT_ENABLE                             = 0x1f,
+} DCIOCHIP_ENABLE_5BIT;
+typedef enum DCIOCHIP_MASK_2BIT {
+	DCIOCHIP_MASK_2BIT_DISABLE                       = 0x0,
+	DCIOCHIP_MASK_2BIT_ENABLE                        = 0x3,
+} DCIOCHIP_MASK_2BIT;
+typedef enum DCIOCHIP_ENABLE_2BIT {
+	DCIOCHIP_2BIT_DISABLE                            = 0x0,
+	DCIOCHIP_2BIT_ENABLE                             = 0x3,
+} DCIOCHIP_ENABLE_2BIT;
+typedef enum DCIOCHIP_REF_27_SRC_SEL {
+	DCIOCHIP_REF_27_SRC_SEL_XTAL_DIVIDER             = 0x0,
+	DCIOCHIP_REF_27_SRC_SEL_DISP_CLKIN2_DIVIDER      = 0x1,
+	DCIOCHIP_REF_27_SRC_SEL_XTAL_BYPASS              = 0x2,
+	DCIOCHIP_REF_27_SRC_SEL_DISP_CLKIN2_BYPASS       = 0x3,
+} DCIOCHIP_REF_27_SRC_SEL;
+typedef enum DCIOCHIP_DVO_VREFPON {
+	DCIOCHIP_DVO_VREFPON_DISABLE                     = 0x0,
+	DCIOCHIP_DVO_VREFPON_ENABLE                      = 0x1,
+} DCIOCHIP_DVO_VREFPON;
+typedef enum DCIOCHIP_DVO_VREFSEL {
+	DCIOCHIP_DVO_VREFSEL_ONCHIP                      = 0x0,
+	DCIOCHIP_DVO_VREFSEL_EXTERNAL                    = 0x1,
+} DCIOCHIP_DVO_VREFSEL;
+typedef enum DCIOCHIP_SPDIF1_IMODE {
+	DCIOCHIP_SPDIF1_IMODE_OE_A                       = 0x0,
+	DCIOCHIP_SPDIF1_IMODE_TSTE_TSTO                  = 0x1,
+} DCIOCHIP_SPDIF1_IMODE;
+typedef enum DCIOCHIP_AUX_FALLSLEWSEL {
+	DCIOCHIP_AUX_FALLSLEWSEL_LOW                     = 0x0,
+	DCIOCHIP_AUX_FALLSLEWSEL_HIGH0                   = 0x1,
+	DCIOCHIP_AUX_FALLSLEWSEL_HIGH1                   = 0x2,
+	DCIOCHIP_AUX_FALLSLEWSEL_ULTRAHIGH               = 0x3,
+} DCIOCHIP_AUX_FALLSLEWSEL;
+typedef enum DCIOCHIP_AUX_SPIKESEL {
+	DCIOCHIP_AUX_SPIKESEL_50NS                       = 0x0,
+	DCIOCHIP_AUX_SPIKESEL_10NS                       = 0x1,
+} DCIOCHIP_AUX_SPIKESEL;
+typedef enum DCIOCHIP_AUX_CSEL0P9 {
+	DCIOCHIP_AUX_CSEL_DEC1P0                         = 0x0,
+	DCIOCHIP_AUX_CSEL_DEC0P9                         = 0x1,
+} DCIOCHIP_AUX_CSEL0P9;
+typedef enum DCIOCHIP_AUX_CSEL1P1 {
+	DCIOCHIP_AUX_CSEL_INC1P0                         = 0x0,
+	DCIOCHIP_AUX_CSEL_INC1P1                         = 0x1,
+} DCIOCHIP_AUX_CSEL1P1;
+typedef enum DCIOCHIP_AUX_RSEL0P9 {
+	DCIOCHIP_AUX_RSEL_DEC1P0                         = 0x0,
+	DCIOCHIP_AUX_RSEL_DEC0P9                         = 0x1,
+} DCIOCHIP_AUX_RSEL0P9;
+typedef enum DCIOCHIP_AUX_RSEL1P1 {
+	DCIOCHIP_AUX_RSEL_INC1P0                         = 0x0,
+	DCIOCHIP_AUX_RSEL_INC1P1                         = 0x1,
+} DCIOCHIP_AUX_RSEL1P1;
+typedef enum DCP_GRPH_ENABLE {
+	DCP_GRPH_ENABLE_FALSE                            = 0x0,
+	DCP_GRPH_ENABLE_TRUE                             = 0x1,
+} DCP_GRPH_ENABLE;
+typedef enum DCP_GRPH_KEYER_ALPHA_SEL {
+	DCP_GRPH_KEYER_ALPHA_SEL_FALSE                   = 0x0,
+	DCP_GRPH_KEYER_ALPHA_SEL_TRUE                    = 0x1,
+} DCP_GRPH_KEYER_ALPHA_SEL;
+typedef enum DCP_GRPH_DEPTH {
+	DCP_GRPH_DEPTH_8BPP                              = 0x0,
+	DCP_GRPH_DEPTH_16BPP                             = 0x1,
+	DCP_GRPH_DEPTH_32BPP                             = 0x2,
+	DCP_GRPH_DEPTH_64BPP                             = 0x3,
+} DCP_GRPH_DEPTH;
+typedef enum DCP_GRPH_NUM_BANKS {
+	DCP_GRPH_NUM_BANKS_2BANK                         = 0x0,
+	DCP_GRPH_NUM_BANKS_4BANK                         = 0x1,
+	DCP_GRPH_NUM_BANKS_8BANK                         = 0x2,
+	DCP_GRPH_NUM_BANKS_16BANK                        = 0x3,
+} DCP_GRPH_NUM_BANKS;
+typedef enum DCP_GRPH_BANK_WIDTH {
+	DCP_GRPH_BANK_WIDTH_1                            = 0x0,
+	DCP_GRPH_BANK_WIDTH_2                            = 0x1,
+	DCP_GRPH_BANK_WIDTH_4                            = 0x2,
+	DCP_GRPH_BANK_WIDTH_8                            = 0x3,
+} DCP_GRPH_BANK_WIDTH;
+typedef enum DCP_GRPH_FORMAT {
+	DCP_GRPH_FORMAT_8BPP                             = 0x0,
+	DCP_GRPH_FORMAT_16BPP                            = 0x1,
+	DCP_GRPH_FORMAT_32BPP                            = 0x2,
+	DCP_GRPH_FORMAT_64BPP                            = 0x3,
+} DCP_GRPH_FORMAT;
+typedef enum DCP_GRPH_BANK_HEIGHT {
+	DCP_GRPH_BANK_HEIGHT_1                           = 0x0,
+	DCP_GRPH_BANK_HEIGHT_2                           = 0x1,
+	DCP_GRPH_BANK_HEIGHT_4                           = 0x2,
+	DCP_GRPH_BANK_HEIGHT_8                           = 0x3,
+} DCP_GRPH_BANK_HEIGHT;
+typedef enum DCP_GRPH_TILE_SPLIT {
+	DCP_GRPH_TILE_SPLIT_64B                          = 0x0,
+	DCP_GRPH_TILE_SPLIT_128B                         = 0x1,
+	DCP_GRPH_TILE_SPLIT_256B                         = 0x2,
+	DCP_GRPH_TILE_SPLIT_512B                         = 0x3,
+	DCP_GRPH_TILE_SPLIT_1B                           = 0x4,
+	DCP_GRPH_TILE_SPLIT_2B                           = 0x5,
+	DCP_GRPH_TILE_SPLIT_4B                           = 0x6,
+} DCP_GRPH_TILE_SPLIT;
+typedef enum DCP_GRPH_ADDRESS_TRANSLATION_ENABLE {
+	DCP_GRPH_ADDRESS_TRANSLATION_ENABLE_FALSE        = 0x0,
+	DCP_GRPH_ADDRESS_TRANSLATION_ENABLE_TRUE         = 0x1,
+} DCP_GRPH_ADDRESS_TRANSLATION_ENABLE;
+typedef enum DCP_GRPH_PRIVILEGED_ACCESS_ENABLE {
+	DCP_GRPH_PRIVILEGED_ACCESS_ENABLE_FALSE          = 0x0,
+	DCP_GRPH_PRIVILEGED_ACCESS_ENABLE_TRUE           = 0x1,
+} DCP_GRPH_PRIVILEGED_ACCESS_ENABLE;
+typedef enum DCP_GRPH_MACRO_TILE_ASPECT {
+	DCP_GRPH_MACRO_TILE_ASPECT_1                     = 0x0,
+	DCP_GRPH_MACRO_TILE_ASPECT_2                     = 0x1,
+	DCP_GRPH_MACRO_TILE_ASPECT_4                     = 0x2,
+	DCP_GRPH_MACRO_TILE_ASPECT_8                     = 0x3,
+} DCP_GRPH_MACRO_TILE_ASPECT;
+typedef enum DCP_GRPH_ARRAY_MODE {
+	DCP_GRPH_ARRAY_MODE_0                            = 0x0,
+	DCP_GRPH_ARRAY_MODE_1                            = 0x1,
+	DCP_GRPH_ARRAY_MODE_2                            = 0x2,
+	DCP_GRPH_ARRAY_MODE_3                            = 0x3,
+	DCP_GRPH_ARRAY_MODE_4                            = 0x4,
+	DCP_GRPH_ARRAY_MODE_7                            = 0x7,
+	DCP_GRPH_ARRAY_MODE_12                           = 0xc,
+	DCP_GRPH_ARRAY_MODE_13                           = 0xd,
+} DCP_GRPH_ARRAY_MODE;
+typedef enum DCP_GRPH_MICRO_TILE_MODE {
+	DCP_GRPH_MICRO_TILE_MODE_0                       = 0x0,
+	DCP_GRPH_MICRO_TILE_MODE_1                       = 0x1,
+	DCP_GRPH_MICRO_TILE_MODE_2                       = 0x2,
+	DCP_GRPH_MICRO_TILE_MODE_3                       = 0x3,
+} DCP_GRPH_MICRO_TILE_MODE;
+typedef enum DCP_GRPH_COLOR_EXPANSION_MODE {
+	DCP_GRPH_COLOR_EXPANSION_MODE_DEXP               = 0x0,
+	DCP_GRPH_COLOR_EXPANSION_MODE_ZEXP               = 0x1,
+} DCP_GRPH_COLOR_EXPANSION_MODE;
+typedef enum DCP_GRPH_LUT_10BIT_BYPASS_EN {
+	DCP_GRPH_LUT_10BIT_BYPASS_EN_FALSE               = 0x0,
+	DCP_GRPH_LUT_10BIT_BYPASS_EN_TRUE                = 0x1,
+} DCP_GRPH_LUT_10BIT_BYPASS_EN;
+typedef enum DCP_GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN {
+	DCP_GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN_FALSE       = 0x0,
+	DCP_GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN_TRUE        = 0x1,
+} DCP_GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN;
+typedef enum DCP_GRPH_ENDIAN_SWAP {
+	DCP_GRPH_ENDIAN_SWAP_NONE                        = 0x0,
+	DCP_GRPH_ENDIAN_SWAP_8IN16                       = 0x1,
+	DCP_GRPH_ENDIAN_SWAP_8IN32                       = 0x2,
+	DCP_GRPH_ENDIAN_SWAP_8IN64                       = 0x3,
+} DCP_GRPH_ENDIAN_SWAP;
+typedef enum DCP_GRPH_RED_CROSSBAR {
+	DCP_GRPH_RED_CROSSBAR_FROM_R                     = 0x0,
+	DCP_GRPH_RED_CROSSBAR_FROM_G                     = 0x1,
+	DCP_GRPH_RED_CROSSBAR_FROM_B                     = 0x2,
+	DCP_GRPH_RED_CROSSBAR_FROM_A                     = 0x3,
+} DCP_GRPH_RED_CROSSBAR;
+typedef enum DCP_GRPH_GREEN_CROSSBAR {
+	DCP_GRPH_GREEN_CROSSBAR_FROM_G                   = 0x0,
+	DCP_GRPH_GREEN_CROSSBAR_FROM_B                   = 0x1,
+	DCP_GRPH_GREEN_CROSSBAR_FROM_A                   = 0x2,
+	DCP_GRPH_GREEN_CROSSBAR_FROM_R                   = 0x3,
+} DCP_GRPH_GREEN_CROSSBAR;
+typedef enum DCP_GRPH_BLUE_CROSSBAR {
+	DCP_GRPH_BLUE_CROSSBAR_FROM_B                    = 0x0,
+	DCP_GRPH_BLUE_CROSSBAR_FROM_A                    = 0x1,
+	DCP_GRPH_BLUE_CROSSBAR_FROM_R                    = 0x2,
+	DCP_GRPH_BLUE_CROSSBAR_FROM_G                    = 0x3,
+} DCP_GRPH_BLUE_CROSSBAR;
+typedef enum DCP_GRPH_ALPHA_CROSSBAR {
+	DCP_GRPH_ALPHA_CROSSBAR_FROM_A                   = 0x0,
+	DCP_GRPH_ALPHA_CROSSBAR_FROM_R                   = 0x1,
+	DCP_GRPH_ALPHA_CROSSBAR_FROM_G                   = 0x2,
+	DCP_GRPH_ALPHA_CROSSBAR_FROM_B                   = 0x3,
+} DCP_GRPH_ALPHA_CROSSBAR;
+typedef enum DCP_GRPH_PRIMARY_DFQ_ENABLE {
+	DCP_GRPH_PRIMARY_DFQ_ENABLE_FALSE                = 0x0,
+	DCP_GRPH_PRIMARY_DFQ_ENABLE_TRUE                 = 0x1,
+} DCP_GRPH_PRIMARY_DFQ_ENABLE;
+typedef enum DCP_GRPH_SECONDARY_DFQ_ENABLE {
+	DCP_GRPH_SECONDARY_DFQ_ENABLE_FALSE              = 0x0,
+	DCP_GRPH_SECONDARY_DFQ_ENABLE_TRUE               = 0x1,
+} DCP_GRPH_SECONDARY_DFQ_ENABLE;
+typedef enum DCP_GRPH_INPUT_GAMMA_MODE {
+	DCP_GRPH_INPUT_GAMMA_MODE_LUT                    = 0x0,
+	DCP_GRPH_INPUT_GAMMA_MODE_BYPASS                 = 0x1,
+} DCP_GRPH_INPUT_GAMMA_MODE;
+typedef enum DCP_GRPH_MODE_UPDATE_PENDING {
+	DCP_GRPH_MODE_UPDATE_PENDING_FALSE               = 0x0,
+	DCP_GRPH_MODE_UPDATE_PENDING_TRUE                = 0x1,
+} DCP_GRPH_MODE_UPDATE_PENDING;
+typedef enum DCP_GRPH_MODE_UPDATE_TAKEN {
+	DCP_GRPH_MODE_UPDATE_TAKEN_FALSE                 = 0x0,
+	DCP_GRPH_MODE_UPDATE_TAKEN_TRUE                  = 0x1,
+} DCP_GRPH_MODE_UPDATE_TAKEN;
+typedef enum DCP_GRPH_SURFACE_UPDATE_PENDING {
+	DCP_GRPH_SURFACE_UPDATE_PENDING_FALSE            = 0x0,
+	DCP_GRPH_SURFACE_UPDATE_PENDING_TRUE             = 0x1,
+} DCP_GRPH_SURFACE_UPDATE_PENDING;
+typedef enum DCP_GRPH_SURFACE_UPDATE_TAKEN {
+	DCP_GRPH_SURFACE_UPDATE_TAKEN_FALSE              = 0x0,
+	DCP_GRPH_SURFACE_UPDATE_TAKEN_TRUE               = 0x1,
+} DCP_GRPH_SURFACE_UPDATE_TAKEN;
+typedef enum DCP_GRPH_SURFACE_XDMA_PENDING_ENABLE {
+	DCP_GRPH_SURFACE_XDMA_PENDING_ENABLE_FALSE       = 0x0,
+	DCP_GRPH_SURFACE_XDMA_PENDING_ENABLE_TRUE        = 0x1,
+} DCP_GRPH_SURFACE_XDMA_PENDING_ENABLE;
+typedef enum DCP_GRPH_UPDATE_LOCK {
+	DCP_GRPH_UPDATE_LOCK_FALSE                       = 0x0,
+	DCP_GRPH_UPDATE_LOCK_TRUE                        = 0x1,
+} DCP_GRPH_UPDATE_LOCK;
+typedef enum DCP_GRPH_SURFACE_IGNORE_UPDATE_LOCK {
+	DCP_GRPH_SURFACE_IGNORE_UPDATE_LOCK_FALSE        = 0x0,
+	DCP_GRPH_SURFACE_IGNORE_UPDATE_LOCK_TRUE         = 0x1,
+} DCP_GRPH_SURFACE_IGNORE_UPDATE_LOCK;
+typedef enum DCP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE {
+	DCP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE_FALSE      = 0x0,
+	DCP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE_TRUE       = 0x1,
+} DCP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE;
+typedef enum DCP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE {
+	DCP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE_FALSE   = 0x0,
+	DCP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE_TRUE    = 0x1,
+} DCP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE;
+typedef enum DCP_GRPH_SURFACE_UPDATE_H_RETRACE_EN {
+	DCP_GRPH_SURFACE_UPDATE_H_RETRACE_EN_FALSE       = 0x0,
+	DCP_GRPH_SURFACE_UPDATE_H_RETRACE_EN_TRUE        = 0x1,
+} DCP_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+typedef enum DCP_GRPH_XDMA_SUPER_AA_EN {
+	DCP_GRPH_XDMA_SUPER_AA_EN_FALSE                  = 0x0,
+	DCP_GRPH_XDMA_SUPER_AA_EN_TRUE                   = 0x1,
+} DCP_GRPH_XDMA_SUPER_AA_EN;
+typedef enum DCP_GRPH_DFQ_RESET {
+	DCP_GRPH_DFQ_RESET_FALSE                         = 0x0,
+	DCP_GRPH_DFQ_RESET_TRUE                          = 0x1,
+} DCP_GRPH_DFQ_RESET;
+typedef enum DCP_GRPH_DFQ_SIZE {
+	DCP_GRPH_DFQ_SIZE_DEEP1                          = 0x0,
+	DCP_GRPH_DFQ_SIZE_DEEP2                          = 0x1,
+	DCP_GRPH_DFQ_SIZE_DEEP3                          = 0x2,
+	DCP_GRPH_DFQ_SIZE_DEEP4                          = 0x3,
+	DCP_GRPH_DFQ_SIZE_DEEP5                          = 0x4,
+	DCP_GRPH_DFQ_SIZE_DEEP6                          = 0x5,
+	DCP_GRPH_DFQ_SIZE_DEEP7                          = 0x6,
+	DCP_GRPH_DFQ_SIZE_DEEP8                          = 0x7,
+} DCP_GRPH_DFQ_SIZE;
+typedef enum DCP_GRPH_DFQ_MIN_FREE_ENTRIES {
+	DCP_GRPH_DFQ_MIN_FREE_ENTRIES_1                  = 0x0,
+	DCP_GRPH_DFQ_MIN_FREE_ENTRIES_2                  = 0x1,
+	DCP_GRPH_DFQ_MIN_FREE_ENTRIES_3                  = 0x2,
+	DCP_GRPH_DFQ_MIN_FREE_ENTRIES_4                  = 0x3,
+	DCP_GRPH_DFQ_MIN_FREE_ENTRIES_5                  = 0x4,
+	DCP_GRPH_DFQ_MIN_FREE_ENTRIES_6                  = 0x5,
+	DCP_GRPH_DFQ_MIN_FREE_ENTRIES_7                  = 0x6,
+	DCP_GRPH_DFQ_MIN_FREE_ENTRIES_8                  = 0x7,
+} DCP_GRPH_DFQ_MIN_FREE_ENTRIES;
+typedef enum DCP_GRPH_DFQ_RESET_ACK {
+	DCP_GRPH_DFQ_RESET_ACK_FALSE                     = 0x0,
+	DCP_GRPH_DFQ_RESET_ACK_TRUE                      = 0x1,
+} DCP_GRPH_DFQ_RESET_ACK;
+typedef enum DCP_GRPH_PFLIP_INT_CLEAR {
+	DCP_GRPH_PFLIP_INT_CLEAR_FALSE                   = 0x0,
+	DCP_GRPH_PFLIP_INT_CLEAR_TRUE                    = 0x1,
+} DCP_GRPH_PFLIP_INT_CLEAR;
+typedef enum DCP_GRPH_PFLIP_INT_MASK {
+	DCP_GRPH_PFLIP_INT_MASK_FALSE                    = 0x0,
+	DCP_GRPH_PFLIP_INT_MASK_TRUE                     = 0x1,
+} DCP_GRPH_PFLIP_INT_MASK;
+typedef enum DCP_GRPH_PFLIP_INT_TYPE {
+	DCP_GRPH_PFLIP_INT_TYPE_LEGACY_LEVEL             = 0x0,
+	DCP_GRPH_PFLIP_INT_TYPE_PULSE                    = 0x1,
+} DCP_GRPH_PFLIP_INT_TYPE;
+typedef enum DCP_GRPH_PRESCALE_SELECT {
+	DCP_GRPH_PRESCALE_SELECT_FIXED                   = 0x0,
+	DCP_GRPH_PRESCALE_SELECT_FLOATING                = 0x1,
+} DCP_GRPH_PRESCALE_SELECT;
+typedef enum DCP_GRPH_PRESCALE_R_SIGN {
+	DCP_GRPH_PRESCALE_R_SIGN_UNSIGNED                = 0x0,
+	DCP_GRPH_PRESCALE_R_SIGN_SIGNED                  = 0x1,
+} DCP_GRPH_PRESCALE_R_SIGN;
+typedef enum DCP_GRPH_PRESCALE_G_SIGN {
+	DCP_GRPH_PRESCALE_G_SIGN_UNSIGNED                = 0x0,
+	DCP_GRPH_PRESCALE_G_SIGN_SIGNED                  = 0x1,
+} DCP_GRPH_PRESCALE_G_SIGN;
+typedef enum DCP_GRPH_PRESCALE_B_SIGN {
+	DCP_GRPH_PRESCALE_B_SIGN_UNSIGNED                = 0x0,
+	DCP_GRPH_PRESCALE_B_SIGN_SIGNED                  = 0x1,
+} DCP_GRPH_PRESCALE_B_SIGN;
+typedef enum DCP_GRPH_PRESCALE_BYPASS {
+	DCP_GRPH_PRESCALE_BYPASS_FALSE                   = 0x0,
+	DCP_GRPH_PRESCALE_BYPASS_TRUE                    = 0x1,
+} DCP_GRPH_PRESCALE_BYPASS;
+typedef enum DCP_INPUT_CSC_GRPH_MODE {
+	DCP_INPUT_CSC_GRPH_MODE_BYPASS                   = 0x0,
+	DCP_INPUT_CSC_GRPH_MODE_INPUT_CSC_COEF           = 0x1,
+	DCP_INPUT_CSC_GRPH_MODE_SHARED_COEF              = 0x2,
+	DCP_INPUT_CSC_GRPH_MODE_RESERVED                 = 0x3,
+} DCP_INPUT_CSC_GRPH_MODE;
+typedef enum DCP_OUTPUT_CSC_GRPH_MODE {
+	DCP_OUTPUT_CSC_GRPH_MODE_BYPASS                  = 0x0,
+	DCP_OUTPUT_CSC_GRPH_MODE_RGB                     = 0x1,
+	DCP_OUTPUT_CSC_GRPH_MODE_YCBCR601                = 0x2,
+	DCP_OUTPUT_CSC_GRPH_MODE_YCBCR709                = 0x3,
+	DCP_OUTPUT_CSC_GRPH_MODE_OUTPUT_CSC_COEF         = 0x4,
+	DCP_OUTPUT_CSC_GRPH_MODE_SHARED_COEF             = 0x5,
+	DCP_OUTPUT_CSC_GRPH_MODE_RESERVED0               = 0x6,
+	DCP_OUTPUT_CSC_GRPH_MODE_RESERVED1               = 0x7,
+} DCP_OUTPUT_CSC_GRPH_MODE;
+typedef enum DCP_DENORM_MODE {
+	DCP_DENORM_MODE_UNITY                            = 0x0,
+	DCP_DENORM_MODE_6BIT                             = 0x1,
+	DCP_DENORM_MODE_8BIT                             = 0x2,
+	DCP_DENORM_MODE_10BIT                            = 0x3,
+	DCP_DENORM_MODE_11BIT                            = 0x4,
+	DCP_DENORM_MODE_12BIT                            = 0x5,
+	DCP_DENORM_MODE_RESERVED0                        = 0x6,
+	DCP_DENORM_MODE_RESERVED1                        = 0x7,
+} DCP_DENORM_MODE;
+typedef enum DCP_DENORM_14BIT_OUT {
+	DCP_DENORM_14BIT_OUT_FALSE                       = 0x0,
+	DCP_DENORM_14BIT_OUT_TRUE                        = 0x1,
+} DCP_DENORM_14BIT_OUT;
+typedef enum DCP_OUT_ROUND_TRUNC_MODE {
+	DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_12             = 0x0,
+	DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_11             = 0x1,
+	DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_10             = 0x2,
+	DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_9              = 0x3,
+	DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_8              = 0x4,
+	DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_RESERVED       = 0x5,
+	DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_14             = 0x6,
+	DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_13             = 0x7,
+	DCP_OUT_ROUND_TRUNC_MODE_ROUND_12                = 0x8,
+	DCP_OUT_ROUND_TRUNC_MODE_ROUND_11                = 0x9,
+	DCP_OUT_ROUND_TRUNC_MODE_ROUND_10                = 0xa,
+	DCP_OUT_ROUND_TRUNC_MODE_ROUND_9                 = 0xb,
+	DCP_OUT_ROUND_TRUNC_MODE_ROUND_8                 = 0xc,
+	DCP_OUT_ROUND_TRUNC_MODE_ROUND_RESERVED          = 0xd,
+	DCP_OUT_ROUND_TRUNC_MODE_ROUND_14                = 0xe,
+	DCP_OUT_ROUND_TRUNC_MODE_ROUND_13                = 0xf,
+} DCP_OUT_ROUND_TRUNC_MODE;
+typedef enum DCP_KEY_MODE {
+	DCP_KEY_MODE_ALPHA0                              = 0x0,
+	DCP_KEY_MODE_ALPHA1                              = 0x1,
+	DCP_KEY_MODE_IN_RANGE_ALPHA1                     = 0x2,
+	DCP_KEY_MODE_IN_RANGE_ALPHA0                     = 0x3,
+} DCP_KEY_MODE;
+typedef enum DCP_GRPH_DEGAMMA_MODE {
+	DCP_GRPH_DEGAMMA_MODE_BYPASS                     = 0x0,
+	DCP_GRPH_DEGAMMA_MODE_ROMA                       = 0x1,
+	DCP_GRPH_DEGAMMA_MODE_ROMB                       = 0x2,
+	DCP_GRPH_DEGAMMA_MODE_RESERVED                   = 0x3,
+} DCP_GRPH_DEGAMMA_MODE;
+typedef enum DCP_CURSOR2_DEGAMMA_MODE {
+	DCP_CURSOR2_DEGAMMA_MODE_BYPASS                  = 0x0,
+	DCP_CURSOR2_DEGAMMA_MODE_ROMA                    = 0x1,
+	DCP_CURSOR2_DEGAMMA_MODE_ROMB                    = 0x2,
+	DCP_CURSOR2_DEGAMMA_MODE_RESERVED                = 0x3,
+} DCP_CURSOR2_DEGAMMA_MODE;
+typedef enum DCP_CURSOR_DEGAMMA_MODE {
+	DCP_CURSOR_DEGAMMA_MODE_BYPASS                   = 0x0,
+	DCP_CURSOR_DEGAMMA_MODE_ROMA                     = 0x1,
+	DCP_CURSOR_DEGAMMA_MODE_ROMB                     = 0x2,
+	DCP_CURSOR_DEGAMMA_MODE_RESERVED                 = 0x3,
+} DCP_CURSOR_DEGAMMA_MODE;
+typedef enum DCP_GRPH_GAMUT_REMAP_MODE {
+	DCP_GRPH_GAMUT_REMAP_MODE_BYPASS                 = 0x0,
+	DCP_GRPH_GAMUT_REMAP_MODE_ROMA                   = 0x1,
+	DCP_GRPH_GAMUT_REMAP_MODE_ROMB                   = 0x2,
+	DCP_GRPH_GAMUT_REMAP_MODE_RESERVED               = 0x3,
+} DCP_GRPH_GAMUT_REMAP_MODE;
+typedef enum DCP_SPATIAL_DITHER_EN {
+	DCP_SPATIAL_DITHER_EN_FALSE                      = 0x0,
+	DCP_SPATIAL_DITHER_EN_TRUE                       = 0x1,
+} DCP_SPATIAL_DITHER_EN;
+typedef enum DCP_SPATIAL_DITHER_MODE {
+	DCP_SPATIAL_DITHER_MODE_BYPASS                   = 0x0,
+	DCP_SPATIAL_DITHER_MODE_ROMA                     = 0x1,
+	DCP_SPATIAL_DITHER_MODE_ROMB                     = 0x2,
+	DCP_SPATIAL_DITHER_MODE_RESERVED                 = 0x3,
+} DCP_SPATIAL_DITHER_MODE;
+typedef enum DCP_SPATIAL_DITHER_DEPTH {
+	DCP_SPATIAL_DITHER_DEPTH_30BPP                   = 0x0,
+	DCP_SPATIAL_DITHER_DEPTH_24BPP                   = 0x1,
+	DCP_SPATIAL_DITHER_DEPTH_36BPP                   = 0x2,
+	DCP_SPATIAL_DITHER_DEPTH_UNDEFINED               = 0x3,
+} DCP_SPATIAL_DITHER_DEPTH;
+typedef enum DCP_FRAME_RANDOM_ENABLE {
+	DCP_FRAME_RANDOM_ENABLE_FALSE                    = 0x0,
+	DCP_FRAME_RANDOM_ENABLE_TRUE                     = 0x1,
+} DCP_FRAME_RANDOM_ENABLE;
+typedef enum DCP_RGB_RANDOM_ENABLE {
+	DCP_RGB_RANDOM_ENABLE_FALSE                      = 0x0,
+	DCP_RGB_RANDOM_ENABLE_TRUE                       = 0x1,
+} DCP_RGB_RANDOM_ENABLE;
+typedef enum DCP_HIGHPASS_RANDOM_ENABLE {
+	DCP_HIGHPASS_RANDOM_ENABLE_FALSE                 = 0x0,
+	DCP_HIGHPASS_RANDOM_ENABLE_TRUE                  = 0x1,
+} DCP_HIGHPASS_RANDOM_ENABLE;
+typedef enum DCP_CURSOR_EN {
+	DCP_CURSOR_EN_FALSE                              = 0x0,
+	DCP_CURSOR_EN_TRUE                               = 0x1,
+} DCP_CURSOR_EN;
+typedef enum DCP_CUR_INV_TRANS_CLAMP {
+	DCP_CUR_INV_TRANS_CLAMP_FALSE                    = 0x0,
+	DCP_CUR_INV_TRANS_CLAMP_TRUE                     = 0x1,
+} DCP_CUR_INV_TRANS_CLAMP;
+typedef enum DCP_CURSOR_MODE {
+	DCP_CURSOR_MODE_MONO_2BPP                        = 0x0,
+	DCP_CURSOR_MODE_24BPP_1BIT                       = 0x1,
+	DCP_CURSOR_MODE_24BPP_8BIT_PREMULTI              = 0x2,
+	DCP_CURSOR_MODE_24BPP_8BIT_UNPREMULTI            = 0x3,
+} DCP_CURSOR_MODE;
+typedef enum DCP_CURSOR_2X_MAGNIFY {
+	DCP_CURSOR_2X_MAGNIFY_FALSE                      = 0x0,
+	DCP_CURSOR_2X_MAGNIFY_TRUE                       = 0x1,
+} DCP_CURSOR_2X_MAGNIFY;
+typedef enum DCP_CURSOR_FORCE_MC_ON {
+	DCP_CURSOR_FORCE_MC_ON_FALSE                     = 0x0,
+	DCP_CURSOR_FORCE_MC_ON_TRUE                      = 0x1,
+} DCP_CURSOR_FORCE_MC_ON;
+typedef enum DCP_CURSOR_URGENT_CONTROL {
+	DCP_CURSOR_URGENT_CONTROL_MODE_0                 = 0x0,
+	DCP_CURSOR_URGENT_CONTROL_MODE_1                 = 0x1,
+	DCP_CURSOR_URGENT_CONTROL_MODE_2                 = 0x2,
+	DCP_CURSOR_URGENT_CONTROL_MODE_3                 = 0x3,
+	DCP_CURSOR_URGENT_CONTROL_MODE_4                 = 0x4,
+} DCP_CURSOR_URGENT_CONTROL;
+typedef enum DCP_CURSOR_UPDATE_PENDING {
+	DCP_CURSOR_UPDATE_PENDING_FALSE                  = 0x0,
+	DCP_CURSOR_UPDATE_PENDING_TRUE                   = 0x1,
+} DCP_CURSOR_UPDATE_PENDING;
+typedef enum DCP_CURSOR_UPDATE_TAKEN {
+	DCP_CURSOR_UPDATE_TAKEN_FALSE                    = 0x0,
+	DCP_CURSOR_UPDATE_TAKEN_TRUE                     = 0x1,
+} DCP_CURSOR_UPDATE_TAKEN;
+typedef enum DCP_CURSOR_UPDATE_LOCK {
+	DCP_CURSOR_UPDATE_LOCK_FALSE                     = 0x0,
+	DCP_CURSOR_UPDATE_LOCK_TRUE                      = 0x1,
+} DCP_CURSOR_UPDATE_LOCK;
+typedef enum DCP_CURSOR_DISABLE_MULTIPLE_UPDATE {
+	DCP_CURSOR_DISABLE_MULTIPLE_UPDATE_FALSE         = 0x0,
+	DCP_CURSOR_DISABLE_MULTIPLE_UPDATE_TRUE          = 0x1,
+} DCP_CURSOR_DISABLE_MULTIPLE_UPDATE;
+typedef enum DCP_CURSOR_UPDATE_STEREO_MODE {
+	DCP_CURSOR_UPDATE_STEREO_MODE_BOTH               = 0x0,
+	DCP_CURSOR_UPDATE_STEREO_MODE_SECONDARY_ONLY     = 0x1,
+	DCP_CURSOR_UPDATE_STEREO_MODE_UNDEFINED          = 0x2,
+	DCP_CURSOR_UPDATE_STEREO_MODE_PRIMARY_ONLY       = 0x3,
+} DCP_CURSOR_UPDATE_STEREO_MODE;
+typedef enum DCP_CURSOR2_EN {
+	DCP_CURSOR2_EN_FALSE                             = 0x0,
+	DCP_CURSOR2_EN_TRUE                              = 0x1,
+} DCP_CURSOR2_EN;
+typedef enum DCP_CUR2_INV_TRANS_CLAMP {
+	DCP_CUR2_INV_TRANS_CLAMP_FALSE                   = 0x0,
+	DCP_CUR2_INV_TRANS_CLAMP_TRUE                    = 0x1,
+} DCP_CUR2_INV_TRANS_CLAMP;
+typedef enum DCP_CURSOR2_MODE {
+	DCP_CURSOR2_MODE_MONO_2BPP                       = 0x0,
+	DCP_CURSOR2_MODE_24BPP_1BIT                      = 0x1,
+	DCP_CURSOR2_MODE_24BPP_8BIT_PREMULTI             = 0x2,
+	DCP_CURSOR2_MODE_24BPP_8BIT_UNPREMULTI           = 0x3,
+} DCP_CURSOR2_MODE;
+typedef enum DCP_CURSOR2_2X_MAGNIFY {
+	DCP_CURSOR2_2X_MAGNIFY_FALSE                     = 0x0,
+	DCP_CURSOR2_2X_MAGNIFY_TRUE                      = 0x1,
+} DCP_CURSOR2_2X_MAGNIFY;
+typedef enum DCP_CURSOR2_FORCE_MC_ON {
+	DCP_CURSOR2_FORCE_MC_ON_FALSE                    = 0x0,
+	DCP_CURSOR2_FORCE_MC_ON_TRUE                     = 0x1,
+} DCP_CURSOR2_FORCE_MC_ON;
+typedef enum DCP_CURSOR2_URGENT_CONTROL {
+	DCP_CURSOR2_URGENT_CONTROL_MODE_0                = 0x0,
+	DCP_CURSOR2_URGENT_CONTROL_MODE_1                = 0x1,
+	DCP_CURSOR2_URGENT_CONTROL_MODE_2                = 0x2,
+	DCP_CURSOR2_URGENT_CONTROL_MODE_3                = 0x3,
+	DCP_CURSOR2_URGENT_CONTROL_MODE_4                = 0x4,
+} DCP_CURSOR2_URGENT_CONTROL;
+typedef enum DCP_CURSOR2_UPDATE_PENDING {
+	DCP_CURSOR2_UPDATE_PENDING_FALSE                 = 0x0,
+	DCP_CURSOR2_UPDATE_PENDING_TRUE                  = 0x1,
+} DCP_CURSOR2_UPDATE_PENDING;
+typedef enum DCP_CURSOR2_UPDATE_TAKEN {
+	DCP_CURSOR2_UPDATE_TAKEN_FALSE                   = 0x0,
+	DCP_CURSOR2_UPDATE_TAKEN_TRUE                    = 0x1,
+} DCP_CURSOR2_UPDATE_TAKEN;
+typedef enum DCP_CURSOR2_UPDATE_LOCK {
+	DCP_CURSOR2_UPDATE_LOCK_FALSE                    = 0x0,
+	DCP_CURSOR2_UPDATE_LOCK_TRUE                     = 0x1,
+} DCP_CURSOR2_UPDATE_LOCK;
+typedef enum DCP_CURSOR2_DISABLE_MULTIPLE_UPDATE {
+	DCP_CURSOR2_DISABLE_MULTIPLE_UPDATE_FALSE        = 0x0,
+	DCP_CURSOR2_DISABLE_MULTIPLE_UPDATE_TRUE         = 0x1,
+} DCP_CURSOR2_DISABLE_MULTIPLE_UPDATE;
+typedef enum DCP_CURSOR2_UPDATE_STEREO_MODE {
+	DCP_CURSOR2_UPDATE_STEREO_MODE_BOTH              = 0x0,
+	DCP_CURSOR2_UPDATE_STEREO_MODE_SECONDARY_ONLY    = 0x1,
+	DCP_CURSOR2_UPDATE_STEREO_MODE_UNDEFINED         = 0x2,
+	DCP_CURSOR2_UPDATE_STEREO_MODE_PRIMARY_ONLY      = 0x3,
+} DCP_CURSOR2_UPDATE_STEREO_MODE;
+typedef enum DCP_CUR_REQUEST_FILTER_DIS {
+	DCP_CUR_REQUEST_FILTER_DIS_FALSE                 = 0x0,
+	DCP_CUR_REQUEST_FILTER_DIS_TRUE                  = 0x1,
+} DCP_CUR_REQUEST_FILTER_DIS;
+typedef enum DCP_CURSOR_STEREO_EN {
+	DCP_CURSOR_STEREO_EN_FALSE                       = 0x0,
+	DCP_CURSOR_STEREO_EN_TRUE                        = 0x1,
+} DCP_CURSOR_STEREO_EN;
+typedef enum DCP_CURSOR_STEREO_OFFSET_YNX {
+	DCP_CURSOR_STEREO_OFFSET_YNX_X_POSITION          = 0x0,
+	DCP_CURSOR_STEREO_OFFSET_YNX_Y_POSITION          = 0x1,
+} DCP_CURSOR_STEREO_OFFSET_YNX;
+typedef enum DCP_CURSOR2_STEREO_EN {
+	DCP_CURSOR2_STEREO_EN_FALSE                      = 0x0,
+	DCP_CURSOR2_STEREO_EN_TRUE                       = 0x1,
+} DCP_CURSOR2_STEREO_EN;
+typedef enum DCP_CURSOR2_STEREO_OFFSET_YNX {
+	DCP_CURSOR2_STEREO_OFFSET_YNX_X_POSITION         = 0x0,
+	DCP_CURSOR2_STEREO_OFFSET_YNX_Y_POSITION         = 0x1,
+} DCP_CURSOR2_STEREO_OFFSET_YNX;
+typedef enum DCP_DC_LUT_RW_MODE {
+	DCP_DC_LUT_RW_MODE_256_ENTRY                     = 0x0,
+	DCP_DC_LUT_RW_MODE_PWL                           = 0x1,
+} DCP_DC_LUT_RW_MODE;
+typedef enum DCP_DC_LUT_VGA_ACCESS_ENABLE {
+	DCP_DC_LUT_VGA_ACCESS_ENABLE_FALSE               = 0x0,
+	DCP_DC_LUT_VGA_ACCESS_ENABLE_TRUE                = 0x1,
+} DCP_DC_LUT_VGA_ACCESS_ENABLE;
+typedef enum DCP_DC_LUT_AUTOFILL {
+	DCP_DC_LUT_AUTOFILL_FALSE                        = 0x0,
+	DCP_DC_LUT_AUTOFILL_TRUE                         = 0x1,
+} DCP_DC_LUT_AUTOFILL;
+typedef enum DCP_DC_LUT_AUTOFILL_DONE {
+	DCP_DC_LUT_AUTOFILL_DONE_FALSE                   = 0x0,
+	DCP_DC_LUT_AUTOFILL_DONE_TRUE                    = 0x1,
+} DCP_DC_LUT_AUTOFILL_DONE;
+typedef enum DCP_DC_LUT_INC_B {
+	DCP_DC_LUT_INC_B_NA                              = 0x0,
+	DCP_DC_LUT_INC_B_2                               = 0x1,
+	DCP_DC_LUT_INC_B_4                               = 0x2,
+	DCP_DC_LUT_INC_B_8                               = 0x3,
+	DCP_DC_LUT_INC_B_16                              = 0x4,
+	DCP_DC_LUT_INC_B_32                              = 0x5,
+	DCP_DC_LUT_INC_B_64                              = 0x6,
+	DCP_DC_LUT_INC_B_128                             = 0x7,
+	DCP_DC_LUT_INC_B_256                             = 0x8,
+	DCP_DC_LUT_INC_B_512                             = 0x9,
+} DCP_DC_LUT_INC_B;
+typedef enum DCP_DC_LUT_DATA_B_SIGNED_EN {
+	DCP_DC_LUT_DATA_B_SIGNED_EN_FALSE                = 0x0,
+	DCP_DC_LUT_DATA_B_SIGNED_EN_TRUE                 = 0x1,
+} DCP_DC_LUT_DATA_B_SIGNED_EN;
+typedef enum DCP_DC_LUT_DATA_B_FLOAT_POINT_EN {
+	DCP_DC_LUT_DATA_B_FLOAT_POINT_EN_FALSE           = 0x0,
+	DCP_DC_LUT_DATA_B_FLOAT_POINT_EN_TRUE            = 0x1,
+} DCP_DC_LUT_DATA_B_FLOAT_POINT_EN;
+typedef enum DCP_DC_LUT_DATA_B_FORMAT {
+	DCP_DC_LUT_DATA_B_FORMAT_U0P10                   = 0x0,
+	DCP_DC_LUT_DATA_B_FORMAT_S1P10                   = 0x1,
+	DCP_DC_LUT_DATA_B_FORMAT_U1P11                   = 0x2,
+	DCP_DC_LUT_DATA_B_FORMAT_U0P12                   = 0x3,
+} DCP_DC_LUT_DATA_B_FORMAT;
+typedef enum DCP_DC_LUT_INC_G {
+	DCP_DC_LUT_INC_G_NA                              = 0x0,
+	DCP_DC_LUT_INC_G_2                               = 0x1,
+	DCP_DC_LUT_INC_G_4                               = 0x2,
+	DCP_DC_LUT_INC_G_8                               = 0x3,
+	DCP_DC_LUT_INC_G_16                              = 0x4,
+	DCP_DC_LUT_INC_G_32                              = 0x5,
+	DCP_DC_LUT_INC_G_64                              = 0x6,
+	DCP_DC_LUT_INC_G_128                             = 0x7,
+	DCP_DC_LUT_INC_G_256                             = 0x8,
+	DCP_DC_LUT_INC_G_512                             = 0x9,
+} DCP_DC_LUT_INC_G;
+typedef enum DCP_DC_LUT_DATA_G_SIGNED_EN {
+	DCP_DC_LUT_DATA_G_SIGNED_EN_FALSE                = 0x0,
+	DCP_DC_LUT_DATA_G_SIGNED_EN_TRUE                 = 0x1,
+} DCP_DC_LUT_DATA_G_SIGNED_EN;
+typedef enum DCP_DC_LUT_DATA_G_FLOAT_POINT_EN {
+	DCP_DC_LUT_DATA_G_FLOAT_POINT_EN_FALSE           = 0x0,
+	DCP_DC_LUT_DATA_G_FLOAT_POINT_EN_TRUE            = 0x1,
+} DCP_DC_LUT_DATA_G_FLOAT_POINT_EN;
+typedef enum DCP_DC_LUT_DATA_G_FORMAT {
+	DCP_DC_LUT_DATA_G_FORMAT_U0P10                   = 0x0,
+	DCP_DC_LUT_DATA_G_FORMAT_S1P10                   = 0x1,
+	DCP_DC_LUT_DATA_G_FORMAT_U1P11                   = 0x2,
+	DCP_DC_LUT_DATA_G_FORMAT_U0P12                   = 0x3,
+} DCP_DC_LUT_DATA_G_FORMAT;
+typedef enum DCP_DC_LUT_INC_R {
+	DCP_DC_LUT_INC_R_NA                              = 0x0,
+	DCP_DC_LUT_INC_R_2                               = 0x1,
+	DCP_DC_LUT_INC_R_4                               = 0x2,
+	DCP_DC_LUT_INC_R_8                               = 0x3,
+	DCP_DC_LUT_INC_R_16                              = 0x4,
+	DCP_DC_LUT_INC_R_32                              = 0x5,
+	DCP_DC_LUT_INC_R_64                              = 0x6,
+	DCP_DC_LUT_INC_R_128                             = 0x7,
+	DCP_DC_LUT_INC_R_256                             = 0x8,
+	DCP_DC_LUT_INC_R_512                             = 0x9,
+} DCP_DC_LUT_INC_R;
+typedef enum DCP_DC_LUT_DATA_R_SIGNED_EN {
+	DCP_DC_LUT_DATA_R_SIGNED_EN_FALSE                = 0x0,
+	DCP_DC_LUT_DATA_R_SIGNED_EN_TRUE                 = 0x1,
+} DCP_DC_LUT_DATA_R_SIGNED_EN;
+typedef enum DCP_DC_LUT_DATA_R_FLOAT_POINT_EN {
+	DCP_DC_LUT_DATA_R_FLOAT_POINT_EN_FALSE           = 0x0,
+	DCP_DC_LUT_DATA_R_FLOAT_POINT_EN_TRUE            = 0x1,
+} DCP_DC_LUT_DATA_R_FLOAT_POINT_EN;
+typedef enum DCP_DC_LUT_DATA_R_FORMAT {
+	DCP_DC_LUT_DATA_R_FORMAT_U0P10                   = 0x0,
+	DCP_DC_LUT_DATA_R_FORMAT_S1P10                   = 0x1,
+	DCP_DC_LUT_DATA_R_FORMAT_U1P11                   = 0x2,
+	DCP_DC_LUT_DATA_R_FORMAT_U0P12                   = 0x3,
+} DCP_DC_LUT_DATA_R_FORMAT;
+typedef enum DCP_CRC_ENABLE {
+	DCP_CRC_ENABLE_FALSE                             = 0x0,
+	DCP_CRC_ENABLE_TRUE                              = 0x1,
+} DCP_CRC_ENABLE;
+typedef enum DCP_CRC_SOURCE_SEL {
+	DCP_CRC_SOURCE_SEL_OUTPUT_PIX                    = 0x0,
+	DCP_CRC_SOURCE_SEL_INPUT_L32                     = 0x1,
+	DCP_CRC_SOURCE_SEL_INPUT_H32                     = 0x2,
+	DCP_CRC_SOURCE_SEL_OUTPUT_CNTL                   = 0x4,
+} DCP_CRC_SOURCE_SEL;
+typedef enum DCP_CRC_LINE_SEL {
+	DCP_CRC_LINE_SEL_RESERVED                        = 0x0,
+	DCP_CRC_LINE_SEL_EVEN                            = 0x1,
+	DCP_CRC_LINE_SEL_ODD                             = 0x2,
+	DCP_CRC_LINE_SEL_BOTH                            = 0x3,
+} DCP_CRC_LINE_SEL;
+typedef enum DCP_GRPH_FLIP_RATE {
+	DCP_GRPH_FLIP_RATE_1FRAME                        = 0x0,
+	DCP_GRPH_FLIP_RATE_2FRAME                        = 0x1,
+	DCP_GRPH_FLIP_RATE_3FRAME                        = 0x2,
+	DCP_GRPH_FLIP_RATE_4FRAME                        = 0x3,
+	DCP_GRPH_FLIP_RATE_5FRAME                        = 0x4,
+	DCP_GRPH_FLIP_RATE_6FRAME                        = 0x5,
+	DCP_GRPH_FLIP_RATE_7FRAME                        = 0x6,
+	DCP_GRPH_FLIP_RATE_8FRAME                        = 0x7,
+} DCP_GRPH_FLIP_RATE;
+typedef enum DCP_GRPH_FLIP_RATE_ENABLE {
+	DCP_GRPH_FLIP_RATE_ENABLE_FALSE                  = 0x0,
+	DCP_GRPH_FLIP_RATE_ENABLE_TRUE                   = 0x1,
+} DCP_GRPH_FLIP_RATE_ENABLE;
+typedef enum DCP_GSL0_EN {
+	DCP_GSL0_EN_FALSE                                = 0x0,
+	DCP_GSL0_EN_TRUE                                 = 0x1,
+} DCP_GSL0_EN;
+typedef enum DCP_GSL1_EN {
+	DCP_GSL1_EN_FALSE                                = 0x0,
+	DCP_GSL1_EN_TRUE                                 = 0x1,
+} DCP_GSL1_EN;
+typedef enum DCP_GSL2_EN {
+	DCP_GSL2_EN_FALSE                                = 0x0,
+	DCP_GSL2_EN_TRUE                                 = 0x1,
+} DCP_GSL2_EN;
+typedef enum DCP_GSL_MASTER_EN {
+	DCP_GSL_MASTER_EN_FALSE                          = 0x0,
+	DCP_GSL_MASTER_EN_TRUE                           = 0x1,
+} DCP_GSL_MASTER_EN;
+typedef enum DCP_GSL_XDMA_GROUP {
+	DCP_GSL_XDMA_GROUP_VSYNC                         = 0x0,
+	DCP_GSL_XDMA_GROUP_HSYNC0                        = 0x1,
+	DCP_GSL_XDMA_GROUP_HSYNC1                        = 0x2,
+	DCP_GSL_XDMA_GROUP_HSYNC2                        = 0x3,
+} DCP_GSL_XDMA_GROUP;
+typedef enum DCP_GSL_XDMA_GROUP_UNDERFLOW_EN {
+	DCP_GSL_XDMA_GROUP_UNDERFLOW_EN_FALSE            = 0x0,
+	DCP_GSL_XDMA_GROUP_UNDERFLOW_EN_TRUE             = 0x1,
+} DCP_GSL_XDMA_GROUP_UNDERFLOW_EN;
+typedef enum DCP_GSL_SYNC_SOURCE {
+	DCP_GSL_SYNC_SOURCE_FLIP                         = 0x0,
+	DCP_GSL_SYNC_SOURCE_PHASE0                       = 0x1,
+	DCP_GSL_SYNC_SOURCE_RESET                        = 0x2,
+	DCP_GSL_SYNC_SOURCE_PHASE1                       = 0x3,
+} DCP_GSL_SYNC_SOURCE;
+typedef enum DCP_GSL_DELAY_SURFACE_UPDATE_PENDING {
+	DCP_GSL_DELAY_SURFACE_UPDATE_PENDING_FALSE       = 0x0,
+	DCP_GSL_DELAY_SURFACE_UPDATE_PENDING_TRUE        = 0x1,
+} DCP_GSL_DELAY_SURFACE_UPDATE_PENDING;
+typedef enum DCP_TEST_DEBUG_WRITE_EN {
+	DCP_TEST_DEBUG_WRITE_EN_FALSE                    = 0x0,
+	DCP_TEST_DEBUG_WRITE_EN_TRUE                     = 0x1,
+} DCP_TEST_DEBUG_WRITE_EN;
+typedef enum DCP_GRPH_STEREOSYNC_FLIP_EN {
+	DCP_GRPH_STEREOSYNC_FLIP_EN_FALSE                = 0x0,
+	DCP_GRPH_STEREOSYNC_FLIP_EN_TRUE                 = 0x1,
+} DCP_GRPH_STEREOSYNC_FLIP_EN;
+typedef enum DCP_GRPH_STEREOSYNC_FLIP_MODE {
+	DCP_GRPH_STEREOSYNC_FLIP_MODE_FLIP               = 0x0,
+	DCP_GRPH_STEREOSYNC_FLIP_MODE_PHASE0             = 0x1,
+	DCP_GRPH_STEREOSYNC_FLIP_MODE_RESET              = 0x2,
+	DCP_GRPH_STEREOSYNC_FLIP_MODE_PHASE1             = 0x3,
+} DCP_GRPH_STEREOSYNC_FLIP_MODE;
+typedef enum DCP_GRPH_STEREOSYNC_SELECT_DISABLE {
+	DCP_GRPH_STEREOSYNC_SELECT_DISABLE_FALSE         = 0x0,
+	DCP_GRPH_STEREOSYNC_SELECT_DISABLE_TRUE          = 0x1,
+} DCP_GRPH_STEREOSYNC_SELECT_DISABLE;
+typedef enum DCP_GRPH_ROTATION_ANGLE {
+	DCP_GRPH_ROTATION_ANGLE_0                        = 0x0,
+	DCP_GRPH_ROTATION_ANGLE_90                       = 0x1,
+	DCP_GRPH_ROTATION_ANGLE_180                      = 0x2,
+	DCP_GRPH_ROTATION_ANGLE_270                      = 0x3,
+} DCP_GRPH_ROTATION_ANGLE;
+typedef enum DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_EN {
+	DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_EN_FALSE       = 0x0,
+	DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_EN_TRUE        = 0x1,
+} DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_EN;
+typedef enum DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_MODE {
+	DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_MODE_RELY_NUM  = 0x0,
+	DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_MODE_RELY_ENABLE= 0x1,
+} DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_MODE;
+typedef enum DCP_GRPH_REGAMMA_MODE {
+	DCP_GRPH_REGAMMA_MODE_BYPASS                     = 0x0,
+	DCP_GRPH_REGAMMA_MODE_SRGB                       = 0x1,
+	DCP_GRPH_REGAMMA_MODE_XVYCC                      = 0x2,
+	DCP_GRPH_REGAMMA_MODE_PROGA                      = 0x3,
+	DCP_GRPH_REGAMMA_MODE_PROGB                      = 0x4,
+} DCP_GRPH_REGAMMA_MODE;
+typedef enum DCP_ALPHA_ROUND_TRUNC_MODE {
+	DCP_ALPHA_ROUND_TRUNC_MODE_ROUND                 = 0x0,
+	DCP_ALPHA_ROUND_TRUNC_MODE_TRUNC                 = 0x1,
+} DCP_ALPHA_ROUND_TRUNC_MODE;
+typedef enum DCP_CURSOR_ALPHA_BLND_ENA {
+	DCP_CURSOR_ALPHA_BLND_ENA_FALSE                  = 0x0,
+	DCP_CURSOR_ALPHA_BLND_ENA_TRUE                   = 0x1,
+} DCP_CURSOR_ALPHA_BLND_ENA;
+typedef enum DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_MASK {
+	DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_MASK_FALSE   = 0x0,
+	DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_MASK_TRUE    = 0x1,
+} DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_MASK;
+typedef enum DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_ACK {
+	DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_ACK_FALSE    = 0x0,
+	DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_ACK_TRUE     = 0x1,
+} DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_ACK;
+typedef enum DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_MASK {
+	DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_MASK_FALSE     = 0x0,
+	DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_MASK_TRUE      = 0x1,
+} DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_MASK;
+typedef enum DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_ACK {
+	DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_ACK_FALSE      = 0x0,
+	DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_ACK_TRUE       = 0x1,
+} DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_ACK;
+typedef enum DCP_GRPH_SURFACE_COUNTER_EN {
+	DCP_GRPH_SURFACE_COUNTER_EN_DISABLE              = 0x0,
+	DCP_GRPH_SURFACE_COUNTER_EN_ENABLE               = 0x1,
+} DCP_GRPH_SURFACE_COUNTER_EN;
+typedef enum DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT {
+	DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_0          = 0x0,
+	DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_1          = 0x1,
+	DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_2          = 0x2,
+	DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_3          = 0x3,
+	DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_4          = 0x4,
+	DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_5          = 0x5,
+	DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_6          = 0x6,
+	DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_7          = 0x7,
+	DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_8          = 0x8,
+	DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_9          = 0x9,
+	DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_10         = 0xa,
+	DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_11         = 0xb,
+} DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT;
+typedef enum DCP_GRPH_SURFACE_COUNTER_ERR_WRAP_OCCURED {
+	DCP_GRPH_SURFACE_COUNTER_ERR_WRAP_OCCURED_NO     = 0x0,
+	DCP_GRPH_SURFACE_COUNTER_ERR_WRAP_OCCURED_YES    = 0x1,
+} DCP_GRPH_SURFACE_COUNTER_ERR_WRAP_OCCURED;
+typedef enum HDMI_KEEPOUT_MODE {
+	HDMI_KEEPOUT_0_650PIX_AFTER_VSYNC                = 0x0,
+	HDMI_KEEPOUT_509_650PIX_AFTER_VSYNC              = 0x1,
+} HDMI_KEEPOUT_MODE;
+typedef enum HDMI_CLOCK_CHANNEL_RATE {
+	HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE       = 0x0,
+	HDMI_CLOCK_CHANNEL_FREQ_QUARTER_TO_CHAR_RATE     = 0x1,
+} HDMI_CLOCK_CHANNEL_RATE;
+typedef enum HDMI_NO_EXTRA_NULL_PACKET_FILLED {
+	HDMI_EXTRA_NULL_PACKET_FILLED_ENABLE             = 0x0,
+	HDMI_EXTRA_NULL_PACKET_FILLED_DISABLE            = 0x1,
+} HDMI_NO_EXTRA_NULL_PACKET_FILLED;
+typedef enum HDMI_PACKET_GEN_VERSION {
+	HDMI_PACKET_GEN_VERSION_OLD                      = 0x0,
+	HDMI_PACKET_GEN_VERSION_NEW                      = 0x1,
+} HDMI_PACKET_GEN_VERSION;
+typedef enum HDMI_ERROR_ACK {
+	HDMI_ERROR_ACK_INT                               = 0x0,
+	HDMI_ERROR_NOT_ACK                               = 0x1,
+} HDMI_ERROR_ACK;
+typedef enum HDMI_ERROR_MASK {
+	HDMI_ERROR_MASK_INT                              = 0x0,
+	HDMI_ERROR_NOT_MASK                              = 0x1,
+} HDMI_ERROR_MASK;
+typedef enum HDMI_DEEP_COLOR_DEPTH {
+	HDMI_DEEP_COLOR_DEPTH_24BPP                      = 0x0,
+	HDMI_DEEP_COLOR_DEPTH_30BPP                      = 0x1,
+	HDMI_DEEP_COLOR_DEPTH_36BPP                      = 0x2,
+	HDMI_DEEP_COLOR_DEPTH_RESERVED                   = 0x3,
+} HDMI_DEEP_COLOR_DEPTH;
+typedef enum HDMI_AUDIO_DELAY_EN {
+	HDMI_AUDIO_DELAY_DISABLE                         = 0x0,
+	HDMI_AUDIO_DELAY_58CLK                           = 0x1,
+	HDMI_AUDIO_DELAY_56CLK                           = 0x2,
+	HDMI_AUDIO_DELAY_RESERVED                        = 0x3,
+} HDMI_AUDIO_DELAY_EN;
+typedef enum HDMI_AUDIO_SEND_MAX_PACKETS {
+	HDMI_NOT_SEND_MAX_AUDIO_PACKETS                  = 0x0,
+	HDMI_SEND_MAX_AUDIO_PACKETS                      = 0x1,
+} HDMI_AUDIO_SEND_MAX_PACKETS;
+typedef enum HDMI_ACR_SEND {
+	HDMI_ACR_NOT_SEND                                = 0x0,
+	HDMI_ACR_PKT_SEND                                = 0x1,
+} HDMI_ACR_SEND;
+typedef enum HDMI_ACR_CONT {
+	HDMI_ACR_CONT_DISABLE                            = 0x0,
+	HDMI_ACR_CONT_ENABLE                             = 0x1,
+} HDMI_ACR_CONT;
+typedef enum HDMI_ACR_SELECT {
+	HDMI_ACR_SELECT_HW                               = 0x0,
+	HDMI_ACR_SELECT_32K                              = 0x1,
+	HDMI_ACR_SELECT_44K                              = 0x2,
+	HDMI_ACR_SELECT_48K                              = 0x3,
+} HDMI_ACR_SELECT;
+typedef enum HDMI_ACR_SOURCE {
+	HDMI_ACR_SOURCE_HW                               = 0x0,
+	HDMI_ACR_SOURCE_SW                               = 0x1,
+} HDMI_ACR_SOURCE;
+typedef enum HDMI_ACR_N_MULTIPLE {
+	HDMI_ACR_0_MULTIPLE_RESERVED                     = 0x0,
+	HDMI_ACR_1_MULTIPLE                              = 0x1,
+	HDMI_ACR_2_MULTIPLE                              = 0x2,
+	HDMI_ACR_3_MULTIPLE_RESERVED                     = 0x3,
+	HDMI_ACR_4_MULTIPLE                              = 0x4,
+	HDMI_ACR_5_MULTIPLE_RESERVED                     = 0x5,
+	HDMI_ACR_6_MULTIPLE_RESERVED                     = 0x6,
+	HDMI_ACR_7_MULTIPLE_RESERVED                     = 0x7,
+} HDMI_ACR_N_MULTIPLE;
+typedef enum HDMI_ACR_AUDIO_PRIORITY {
+	HDMI_ACR_PKT_HIGH_PRIORITY_THAN_AUDIO_SAMPLE     = 0x0,
+	HDMI_AUDIO_SAMPLE_HIGH_PRIORITY_THAN_ACR_PKT     = 0x1,
+} HDMI_ACR_AUDIO_PRIORITY;
+typedef enum HDMI_NULL_SEND {
+	HDMI_NULL_NOT_SEND                               = 0x0,
+	HDMI_NULL_PKT_SEND                               = 0x1,
+} HDMI_NULL_SEND;
+typedef enum HDMI_GC_SEND {
+	HDMI_GC_NOT_SEND                                 = 0x0,
+	HDMI_GC_PKT_SEND                                 = 0x1,
+} HDMI_GC_SEND;
+typedef enum HDMI_GC_CONT {
+	HDMI_GC_CONT_DISABLE                             = 0x0,
+	HDMI_GC_CONT_ENABLE                              = 0x1,
+} HDMI_GC_CONT;
+typedef enum HDMI_ISRC_SEND {
+	HDMI_ISRC_NOT_SEND                               = 0x0,
+	HDMI_ISRC_PKT_SEND                               = 0x1,
+} HDMI_ISRC_SEND;
+typedef enum HDMI_ISRC_CONT {
+	HDMI_ISRC_CONT_DISABLE                           = 0x0,
+	HDMI_ISRC_CONT_ENABLE                            = 0x1,
+} HDMI_ISRC_CONT;
+typedef enum HDMI_AVI_INFO_SEND {
+	HDMI_AVI_INFO_NOT_SEND                           = 0x0,
+	HDMI_AVI_INFO_PKT_SEND                           = 0x1,
+} HDMI_AVI_INFO_SEND;
+typedef enum HDMI_AVI_INFO_CONT {
+	HDMI_AVI_INFO_CONT_DISABLE                       = 0x0,
+	HDMI_AVI_INFO_CONT_ENABLE                        = 0x1,
+} HDMI_AVI_INFO_CONT;
+typedef enum HDMI_AUDIO_INFO_SEND {
+	HDMI_AUDIO_INFO_NOT_SEND                         = 0x0,
+	HDMI_AUDIO_INFO_PKT_SEND                         = 0x1,
+} HDMI_AUDIO_INFO_SEND;
+typedef enum HDMI_AUDIO_INFO_CONT {
+	HDMI_AUDIO_INFO_CONT_DISABLE                     = 0x0,
+	HDMI_AUDIO_INFO_CONT_ENABLE                      = 0x1,
+} HDMI_AUDIO_INFO_CONT;
+typedef enum HDMI_MPEG_INFO_SEND {
+	HDMI_MPEG_INFO_NOT_SEND                          = 0x0,
+	HDMI_MPEG_INFO_PKT_SEND                          = 0x1,
+} HDMI_MPEG_INFO_SEND;
+typedef enum HDMI_MPEG_INFO_CONT {
+	HDMI_MPEG_INFO_CONT_DISABLE                      = 0x0,
+	HDMI_MPEG_INFO_CONT_ENABLE                       = 0x1,
+} HDMI_MPEG_INFO_CONT;
+typedef enum HDMI_GENERIC0_SEND {
+	HDMI_GENERIC0_NOT_SEND                           = 0x0,
+	HDMI_GENERIC0_PKT_SEND                           = 0x1,
+} HDMI_GENERIC0_SEND;
+typedef enum HDMI_GENERIC0_CONT {
+	HDMI_GENERIC0_CONT_DISABLE                       = 0x0,
+	HDMI_GENERIC0_CONT_ENABLE                        = 0x1,
+} HDMI_GENERIC0_CONT;
+typedef enum HDMI_GENERIC1_SEND {
+	HDMI_GENERIC1_NOT_SEND                           = 0x0,
+	HDMI_GENERIC1_PKT_SEND                           = 0x1,
+} HDMI_GENERIC1_SEND;
+typedef enum HDMI_GENERIC1_CONT {
+	HDMI_GENERIC1_CONT_DISABLE                       = 0x0,
+	HDMI_GENERIC1_CONT_ENABLE                        = 0x1,
+} HDMI_GENERIC1_CONT;
+typedef enum HDMI_GC_AVMUTE_CONT {
+	HDMI_GC_AVMUTE_CONT_DISABLE                      = 0x0,
+	HDMI_GC_AVMUTE_CONT_ENABLE                       = 0x1,
+} HDMI_GC_AVMUTE_CONT;
+typedef enum HDMI_PACKING_PHASE_OVERRIDE {
+	HDMI_PACKING_PHASE_SET_BY_HW                     = 0x0,
+	HDMI_PACKING_PHASE_SET_BY_SW                     = 0x1,
+} HDMI_PACKING_PHASE_OVERRIDE;
+typedef enum HDMI_GENERIC2_SEND {
+	HDMI_GENERIC2_NOT_SEND                           = 0x0,
+	HDMI_GENERIC2_PKT_SEND                           = 0x1,
+} HDMI_GENERIC2_SEND;
+typedef enum HDMI_GENERIC2_CONT {
+	HDMI_GENERIC2_CONT_DISABLE                       = 0x0,
+	HDMI_GENERIC2_CONT_ENABLE                        = 0x1,
+} HDMI_GENERIC2_CONT;
+typedef enum HDMI_GENERIC3_SEND {
+	HDMI_GENERIC3_NOT_SEND                           = 0x0,
+	HDMI_GENERIC3_PKT_SEND                           = 0x1,
+} HDMI_GENERIC3_SEND;
+typedef enum HDMI_GENERIC3_CONT {
+	HDMI_GENERIC3_CONT_DISABLE                       = 0x0,
+	HDMI_GENERIC3_CONT_ENABLE                        = 0x1,
+} HDMI_GENERIC3_CONT;
+typedef enum TMDS_PIXEL_ENCODING {
+	TMDS_PIXEL_ENCODING_444_OR_420                   = 0x0,
+	TMDS_PIXEL_ENCODING_422                          = 0x1,
+} TMDS_PIXEL_ENCODING;
+typedef enum TMDS_COLOR_FORMAT {
+	TMDS_COLOR_FORMAT__24BPP__TWIN30BPP_MSB__DUAL48BPP= 0x0,
+	TMDS_COLOR_FORMAT_TWIN30BPP_LSB                  = 0x1,
+	TMDS_COLOR_FORMAT_DUAL30BPP                      = 0x2,
+	TMDS_COLOR_FORMAT_RESERVED                       = 0x3,
+} TMDS_COLOR_FORMAT;
+typedef enum TMDS_STEREOSYNC_CTL_SEL_REG {
+	TMDS_STEREOSYNC_CTL0                             = 0x0,
+	TMDS_STEREOSYNC_CTL1                             = 0x1,
+	TMDS_STEREOSYNC_CTL2                             = 0x2,
+	TMDS_STEREOSYNC_CTL3                             = 0x3,
+} TMDS_STEREOSYNC_CTL_SEL_REG;
+typedef enum TMDS_CTL0_DATA_SEL {
+	TMDS_CTL0_DATA_SEL0_RESERVED                     = 0x0,
+	TMDS_CTL0_DATA_SEL1_DISPLAY_ENABLE               = 0x1,
+	TMDS_CTL0_DATA_SEL2_VSYNC                        = 0x2,
+	TMDS_CTL0_DATA_SEL3_RESERVED                     = 0x3,
+	TMDS_CTL0_DATA_SEL4_HSYNC                        = 0x4,
+	TMDS_CTL0_DATA_SEL5_SEL7_RESERVED                = 0x5,
+	TMDS_CTL0_DATA_SEL8_RANDOM_DATA                  = 0x6,
+	TMDS_CTL0_DATA_SEL9_SEL15_RANDOM_DATA            = 0x7,
+} TMDS_CTL0_DATA_SEL;
+typedef enum TMDS_CTL0_DATA_INVERT {
+	TMDS_CTL0_DATA_NORMAL                            = 0x0,
+	TMDS_CTL0_DATA_INVERT_EN                         = 0x1,
+} TMDS_CTL0_DATA_INVERT;
+typedef enum TMDS_CTL0_DATA_MODULATION {
+	TMDS_CTL0_DATA_MODULATION_DISABLE                = 0x0,
+	TMDS_CTL0_DATA_MODULATION_BIT0                   = 0x1,
+	TMDS_CTL0_DATA_MODULATION_BIT1                   = 0x2,
+	TMDS_CTL0_DATA_MODULATION_BIT2                   = 0x3,
+} TMDS_CTL0_DATA_MODULATION;
+typedef enum TMDS_CTL0_PATTERN_OUT_EN {
+	TMDS_CTL0_PATTERN_OUT_DISABLE                    = 0x0,
+	TMDS_CTL0_PATTERN_OUT_ENABLE                     = 0x1,
+} TMDS_CTL0_PATTERN_OUT_EN;
+typedef enum TMDS_CTL1_DATA_SEL {
+	TMDS_CTL1_DATA_SEL0_RESERVED                     = 0x0,
+	TMDS_CTL1_DATA_SEL1_DISPLAY_ENABLE               = 0x1,
+	TMDS_CTL1_DATA_SEL2_VSYNC                        = 0x2,
+	TMDS_CTL1_DATA_SEL3_RESERVED                     = 0x3,
+	TMDS_CTL1_DATA_SEL4_HSYNC                        = 0x4,
+	TMDS_CTL1_DATA_SEL5_SEL7_RESERVED                = 0x5,
+	TMDS_CTL1_DATA_SEL8_BLANK_TIME                   = 0x6,
+	TMDS_CTL1_DATA_SEL9_SEL15_RESERVED               = 0x7,
+} TMDS_CTL1_DATA_SEL;
+typedef enum TMDS_CTL1_DATA_INVERT {
+	TMDS_CTL1_DATA_NORMAL                            = 0x0,
+	TMDS_CTL1_DATA_INVERT_EN                         = 0x1,
+} TMDS_CTL1_DATA_INVERT;
+typedef enum TMDS_CTL1_DATA_MODULATION {
+	TMDS_CTL1_DATA_MODULATION_DISABLE                = 0x0,
+	TMDS_CTL1_DATA_MODULATION_BIT0                   = 0x1,
+	TMDS_CTL1_DATA_MODULATION_BIT1                   = 0x2,
+	TMDS_CTL1_DATA_MODULATION_BIT2                   = 0x3,
+} TMDS_CTL1_DATA_MODULATION;
+typedef enum TMDS_CTL1_PATTERN_OUT_EN {
+	TMDS_CTL1_PATTERN_OUT_DISABLE                    = 0x0,
+	TMDS_CTL1_PATTERN_OUT_ENABLE                     = 0x1,
+} TMDS_CTL1_PATTERN_OUT_EN;
+typedef enum TMDS_CTL2_DATA_SEL {
+	TMDS_CTL2_DATA_SEL0_RESERVED                     = 0x0,
+	TMDS_CTL2_DATA_SEL1_DISPLAY_ENABLE               = 0x1,
+	TMDS_CTL2_DATA_SEL2_VSYNC                        = 0x2,
+	TMDS_CTL2_DATA_SEL3_RESERVED                     = 0x3,
+	TMDS_CTL2_DATA_SEL4_HSYNC                        = 0x4,
+	TMDS_CTL2_DATA_SEL5_SEL7_RESERVED                = 0x5,
+	TMDS_CTL2_DATA_SEL8_BLANK_TIME                   = 0x6,
+	TMDS_CTL2_DATA_SEL9_SEL15_RESERVED               = 0x7,
+} TMDS_CTL2_DATA_SEL;
+typedef enum TMDS_CTL2_DATA_INVERT {
+	TMDS_CTL2_DATA_NORMAL                            = 0x0,
+	TMDS_CTL2_DATA_INVERT_EN                         = 0x1,
+} TMDS_CTL2_DATA_INVERT;
+typedef enum TMDS_CTL2_DATA_MODULATION {
+	TMDS_CTL2_DATA_MODULATION_DISABLE                = 0x0,
+	TMDS_CTL2_DATA_MODULATION_BIT0                   = 0x1,
+	TMDS_CTL2_DATA_MODULATION_BIT1                   = 0x2,
+	TMDS_CTL2_DATA_MODULATION_BIT2                   = 0x3,
+} TMDS_CTL2_DATA_MODULATION;
+typedef enum TMDS_CTL2_PATTERN_OUT_EN {
+	TMDS_CTL2_PATTERN_OUT_DISABLE                    = 0x0,
+	TMDS_CTL2_PATTERN_OUT_ENABLE                     = 0x1,
+} TMDS_CTL2_PATTERN_OUT_EN;
+typedef enum TMDS_CTL3_DATA_INVERT {
+	TMDS_CTL3_DATA_NORMAL                            = 0x0,
+	TMDS_CTL3_DATA_INVERT_EN                         = 0x1,
+} TMDS_CTL3_DATA_INVERT;
+typedef enum TMDS_CTL3_DATA_MODULATION {
+	TMDS_CTL3_DATA_MODULATION_DISABLE                = 0x0,
+	TMDS_CTL3_DATA_MODULATION_BIT0                   = 0x1,
+	TMDS_CTL3_DATA_MODULATION_BIT1                   = 0x2,
+	TMDS_CTL3_DATA_MODULATION_BIT2                   = 0x3,
+} TMDS_CTL3_DATA_MODULATION;
+typedef enum TMDS_CTL3_PATTERN_OUT_EN {
+	TMDS_CTL3_PATTERN_OUT_DISABLE                    = 0x0,
+	TMDS_CTL3_PATTERN_OUT_ENABLE                     = 0x1,
+} TMDS_CTL3_PATTERN_OUT_EN;
+typedef enum TMDS_CTL3_DATA_SEL {
+	TMDS_CTL3_DATA_SEL0_RESERVED                     = 0x0,
+	TMDS_CTL3_DATA_SEL1_DISPLAY_ENABLE               = 0x1,
+	TMDS_CTL3_DATA_SEL2_VSYNC                        = 0x2,
+	TMDS_CTL3_DATA_SEL3_RESERVED                     = 0x3,
+	TMDS_CTL3_DATA_SEL4_HSYNC                        = 0x4,
+	TMDS_CTL3_DATA_SEL5_SEL7_RESERVED                = 0x5,
+	TMDS_CTL3_DATA_SEL8_BLANK_TIME                   = 0x6,
+	TMDS_CTL3_DATA_SEL9_SEL15_RESERVED               = 0x7,
+} TMDS_CTL3_DATA_SEL;
+typedef enum DIG_FE_CNTL_SOURCE_SELECT {
+	DIG_FE_SOURCE_FROM_FMT0                          = 0x0,
+	DIG_FE_SOURCE_FROM_FMT1                          = 0x1,
+	DIG_FE_SOURCE_FROM_FMT2                          = 0x2,
+	DIG_FE_SOURCE_FROM_FMT3                          = 0x3,
+	DIG_FE_SOURCE_FROM_FMT4                          = 0x4,
+	DIG_FE_SOURCE_FROM_FMT5                          = 0x5,
+} DIG_FE_CNTL_SOURCE_SELECT;
+typedef enum DIG_FE_CNTL_STEREOSYNC_SELECT {
+	DIG_FE_STEREOSYNC_FROM_FMT0                      = 0x0,
+	DIG_FE_STEREOSYNC_FROM_FMT1                      = 0x1,
+	DIG_FE_STEREOSYNC_FROM_FMT2                      = 0x2,
+	DIG_FE_STEREOSYNC_FROM_FMT3                      = 0x3,
+	DIG_FE_STEREOSYNC_FROM_FMT4                      = 0x4,
+	DIG_FE_STEREOSYNC_FROM_FMT5                      = 0x5,
+} DIG_FE_CNTL_STEREOSYNC_SELECT;
+typedef enum DIG_FIFO_READ_CLOCK_SRC {
+	DIG_FIFO_READ_CLOCK_SRC_FROM_DCCG                = 0x0,
+	DIG_FIFO_READ_CLOCK_SRC_FROM_DISPLAY_PIPE        = 0x1,
+} DIG_FIFO_READ_CLOCK_SRC;
+typedef enum DIG_OUTPUT_CRC_CNTL_LINK_SEL {
+	DIG_OUTPUT_CRC_ON_LINK0                          = 0x0,
+	DIG_OUTPUT_CRC_ON_LINK1                          = 0x1,
+} DIG_OUTPUT_CRC_CNTL_LINK_SEL;
+typedef enum DIG_OUTPUT_CRC_DATA_SEL {
+	DIG_OUTPUT_CRC_FOR_FULLFRAME                     = 0x0,
+	DIG_OUTPUT_CRC_FOR_ACTIVEONLY                    = 0x1,
+	DIG_OUTPUT_CRC_FOR_VBI                           = 0x2,
+	DIG_OUTPUT_CRC_FOR_AUDIO                         = 0x3,
+} DIG_OUTPUT_CRC_DATA_SEL;
+typedef enum DIG_TEST_PATTERN_TEST_PATTERN_OUT_EN {
+	DIG_IN_NORMAL_OPERATION                          = 0x0,
+	DIG_IN_DEBUG_MODE                                = 0x1,
+} DIG_TEST_PATTERN_TEST_PATTERN_OUT_EN;
+typedef enum DIG_TEST_PATTERN_HALF_CLOCK_PATTERN_SEL {
+	DIG_10BIT_TEST_PATTERN                           = 0x0,
+	DIG_ALTERNATING_TEST_PATTERN                     = 0x1,
+} DIG_TEST_PATTERN_HALF_CLOCK_PATTERN_SEL;
+typedef enum DIG_TEST_PATTERN_RANDOM_PATTERN_OUT_EN {
+	DIG_TEST_PATTERN_NORMAL                          = 0x0,
+	DIG_TEST_PATTERN_RANDOM                          = 0x1,
+} DIG_TEST_PATTERN_RANDOM_PATTERN_OUT_EN;
+typedef enum DIG_TEST_PATTERN_RANDOM_PATTERN_RESET {
+	DIG_RANDOM_PATTERN_ENABLED                       = 0x0,
+	DIG_RANDOM_PATTERN_RESETED                       = 0x1,
+} DIG_TEST_PATTERN_RANDOM_PATTERN_RESET;
+typedef enum DIG_TEST_PATTERN_EXTERNAL_RESET_EN {
+	DIG_TEST_PATTERN_EXTERNAL_RESET_ENABLE           = 0x0,
+	DIG_TEST_PATTERN_EXTERNAL_RESET_BY_EXT_SIG       = 0x1,
+} DIG_TEST_PATTERN_EXTERNAL_RESET_EN;
+typedef enum DIG_RANDOM_PATTERN_SEED_RAN_PAT {
+	DIG_RANDOM_PATTERN_SEED_RAN_PAT_ALL_PIXELS       = 0x0,
+	DIG_RANDOM_PATTERN_SEED_RAN_PAT_DE_HIGH          = 0x1,
+} DIG_RANDOM_PATTERN_SEED_RAN_PAT;
+typedef enum DIG_FIFO_STATUS_USE_OVERWRITE_LEVEL {
+	DIG_FIFO_USE_OVERWRITE_LEVEL                     = 0x0,
+	DIG_FIFO_USE_CAL_AVERAGE_LEVEL                   = 0x1,
+} DIG_FIFO_STATUS_USE_OVERWRITE_LEVEL;
+typedef enum DIG_FIFO_ERROR_ACK {
+	DIG_FIFO_ERROR_ACK_INT                           = 0x0,
+	DIG_FIFO_ERROR_NOT_ACK                           = 0x1,
+} DIG_FIFO_ERROR_ACK;
+typedef enum DIG_FIFO_STATUS_FORCE_RECAL_AVERAGE {
+	DIG_FIFO_NOT_FORCE_RECAL_AVERAGE                 = 0x0,
+	DIG_FIFO_FORCE_RECAL_AVERAGE_LEVEL               = 0x1,
+} DIG_FIFO_STATUS_FORCE_RECAL_AVERAGE;
+typedef enum DIG_FIFO_STATUS_FORCE_RECOMP_MINMAX {
+	DIG_FIFO_NOT_FORCE_RECOMP_MINMAX                 = 0x0,
+	DIG_FIFO_FORCE_RECOMP_MINMAX                     = 0x1,
+} DIG_FIFO_STATUS_FORCE_RECOMP_MINMAX;
+typedef enum DIG_DISPCLK_SWITCH_CNTL_SWITCH_POINT {
+	DIG_DISPCLK_SWITCH_AT_EARLY_VBLANK               = 0x0,
+	DIG_DISPCLK_SWITCH_AT_FIRST_HSYNC                = 0x1,
+} DIG_DISPCLK_SWITCH_CNTL_SWITCH_POINT;
+typedef enum DIG_DISPCLK_SWITCH_ALLOWED_INT_ACK {
+	DIG_DISPCLK_SWITCH_ALLOWED_ACK_INT               = 0x0,
+	DIG_DISPCLK_SWITCH_ALLOWED_INT_NOT_ACK           = 0x1,
+} DIG_DISPCLK_SWITCH_ALLOWED_INT_ACK;
+typedef enum DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK {
+	DIG_DISPCLK_SWITCH_ALLOWED_MASK_INT              = 0x0,
+	DIG_DISPCLK_SWITCH_ALLOWED_INT_UNMASK            = 0x1,
+} DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK;
+typedef enum AFMT_INTERRUPT_STATUS_CHG_MASK {
+	AFMT_INTERRUPT_DISABLE                           = 0x0,
+	AFMT_INTERRUPT_ENABLE                            = 0x1,
+} AFMT_INTERRUPT_STATUS_CHG_MASK;
+typedef enum HDMI_GC_AVMUTE {
+	HDMI_GC_AVMUTE_SET                               = 0x0,
+	HDMI_GC_AVMUTE_UNSET                             = 0x1,
+} HDMI_GC_AVMUTE;
+typedef enum HDMI_DEFAULT_PAHSE {
+	HDMI_DEFAULT_PHASE_IS_0                          = 0x0,
+	HDMI_DEFAULT_PHASE_IS_1                          = 0x1,
+} HDMI_DEFAULT_PAHSE;
+typedef enum AFMT_AUDIO_PACKET_CONTROL2_AUDIO_LAYOUT_OVRD {
+	AFMT_AUDIO_LAYOUT_DETERMINED_BY_AZ_AUDIO_CHANNEL_STATUS= 0x0,
+	AFMT_AUDIO_LAYOUT_OVRD_BY_REGISTER               = 0x1,
+} AFMT_AUDIO_PACKET_CONTROL2_AUDIO_LAYOUT_OVRD;
+typedef enum AUDIO_LAYOUT_SELECT {
+	AUDIO_LAYOUT_0                                   = 0x0,
+	AUDIO_LAYOUT_1                                   = 0x1,
+} AUDIO_LAYOUT_SELECT;
+typedef enum AFMT_AUDIO_CRC_CONTROL_CONT {
+	AFMT_AUDIO_CRC_ONESHOT                           = 0x0,
+	AFMT_AUDIO_CRC_AUTO_RESTART                      = 0x1,
+} AFMT_AUDIO_CRC_CONTROL_CONT;
+typedef enum AFMT_AUDIO_CRC_CONTROL_SOURCE {
+	AFMT_AUDIO_CRC_SOURCE_FROM_FIFO_INPUT            = 0x0,
+	AFMT_AUDIO_CRC_SOURCE_FROM_FIFO_OUTPUT           = 0x1,
+} AFMT_AUDIO_CRC_CONTROL_SOURCE;
+typedef enum AFMT_AUDIO_CRC_CONTROL_CH_SEL {
+	AFMT_AUDIO_CRC_CH0_SIG                           = 0x0,
+	AFMT_AUDIO_CRC_CH1_SIG                           = 0x1,
+	AFMT_AUDIO_CRC_CH2_SIG                           = 0x2,
+	AFMT_AUDIO_CRC_CH3_SIG                           = 0x3,
+	AFMT_AUDIO_CRC_CH4_SIG                           = 0x4,
+	AFMT_AUDIO_CRC_CH5_SIG                           = 0x5,
+	AFMT_AUDIO_CRC_CH6_SIG                           = 0x6,
+	AFMT_AUDIO_CRC_CH7_SIG                           = 0x7,
+	AFMT_AUDIO_CRC_RESERVED                          = 0x8,
+	AFMT_AUDIO_CRC_AUDIO_SAMPLE_COUNT                = 0x9,
+} AFMT_AUDIO_CRC_CONTROL_CH_SEL;
+typedef enum AFMT_RAMP_CONTROL0_SIGN {
+	AFMT_RAMP_SIGNED                                 = 0x0,
+	AFMT_RAMP_UNSIGNED                               = 0x1,
+} AFMT_RAMP_CONTROL0_SIGN;
+typedef enum AFMT_AUDIO_PACKET_CONTROL_AUDIO_SAMPLE_SEND {
+	AFMT_AUDIO_PACKET_SENT_DISABLED                  = 0x0,
+	AFMT_AUDIO_PACKET_SENT_ENABLED                   = 0x1,
+} AFMT_AUDIO_PACKET_CONTROL_AUDIO_SAMPLE_SEND;
+typedef enum AFMT_AUDIO_PACKET_CONTROL_RESET_FIFO_WHEN_AUDIO_DIS {
+	AFMT_NOT_RESET_AUDIO_FIFO_WHEN_AUDIO_DISABLED_RESERVED= 0x0,
+	AFMT_RESET_AUDIO_FIFO_WHEN_AUDIO_DISABLED        = 0x1,
+} AFMT_AUDIO_PACKET_CONTROL_RESET_FIFO_WHEN_AUDIO_DIS;
+typedef enum AFMT_INFOFRAME_CONTROL0_AUDIO_INFO_SOURCE {
+	AFMT_INFOFRAME_SOURCE_FROM_AZALIA_BLOCK          = 0x0,
+	AFMT_INFOFRAME_SOURCE_FROM_AFMT_REGISTERS        = 0x1,
+} AFMT_INFOFRAME_CONTROL0_AUDIO_INFO_SOURCE;
+typedef enum AFMT_AUDIO_SRC_CONTROL_SELECT {
+	AFMT_AUDIO_SRC_FROM_AZ_STREAM0                   = 0x0,
+	AFMT_AUDIO_SRC_FROM_AZ_STREAM1                   = 0x1,
+	AFMT_AUDIO_SRC_FROM_AZ_STREAM2                   = 0x2,
+	AFMT_AUDIO_SRC_FROM_AZ_STREAM3                   = 0x3,
+	AFMT_AUDIO_SRC_FROM_AZ_STREAM4                   = 0x4,
+	AFMT_AUDIO_SRC_FROM_AZ_STREAM5                   = 0x5,
+	AFMT_AUDIO_SRC_RESERVED                          = 0x6,
+} AFMT_AUDIO_SRC_CONTROL_SELECT;
+typedef enum DIG_BE_CNTL_MODE {
+	DIG_BE_DP_SST_MODE                               = 0x0,
+	DIG_BE_RESERVED1                                 = 0x1,
+	DIG_BE_TMDS_DVI_MODE                             = 0x2,
+	DIG_BE_TMDS_HDMI_MODE                            = 0x3,
+	DIG_BE_SDVO_RESERVED                             = 0x4,
+	DIG_BE_DP_MST_MODE                               = 0x5,
+	DIG_BE_RESERVED2                                 = 0x6,
+	DIG_BE_RESERVED3                                 = 0x7,
+} DIG_BE_CNTL_MODE;
+typedef enum DIG_BE_CNTL_HPD_SELECT {
+	DIG_BE_CNTL_HPD1                                 = 0x0,
+	DIG_BE_CNTL_HPD2                                 = 0x1,
+	DIG_BE_CNTL_HPD3                                 = 0x2,
+	DIG_BE_CNTL_HPD4                                 = 0x3,
+	DIG_BE_CNTL_HPD5                                 = 0x4,
+	DIG_BE_CNTL_HPD6                                 = 0x5,
+} DIG_BE_CNTL_HPD_SELECT;
+typedef enum LVTMA_RANDOM_PATTERN_SEED_RAN_PAT {
+	LVTMA_RANDOM_PATTERN_SEED_ALL_PIXELS             = 0x0,
+	LVTMA_RANDOM_PATTERN_SEED_ONLY_DE_HIGH           = 0x1,
+} LVTMA_RANDOM_PATTERN_SEED_RAN_PAT;
+typedef enum TMDS_SYNC_PHASE {
+	TMDS_NOT_SYNC_PHASE_ON_FRAME_START               = 0x0,
+	TMDS_SYNC_PHASE_ON_FRAME_START                   = 0x1,
+} TMDS_SYNC_PHASE;
+typedef enum TMDS_DATA_SYNCHRONIZATION_DSINTSEL {
+	TMDS_DATA_SYNCHRONIZATION_DSINTSEL_PCLK_TMDS     = 0x0,
+	TMDS_DATA_SYNCHRONIZATION_DSINTSEL_TMDS_PLL      = 0x1,
+} TMDS_DATA_SYNCHRONIZATION_DSINTSEL;
+typedef enum TMDS_TRANSMITTER_ENABLE_HPD_MASK {
+	TMDS_TRANSMITTER_HPD_MASK_NOT_OVERRIDE           = 0x0,
+	TMDS_TRANSMITTER_HPD_MASK_OVERRIDE               = 0x1,
+} TMDS_TRANSMITTER_ENABLE_HPD_MASK;
+typedef enum TMDS_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK {
+	TMDS_TRANSMITTER_LNKCEN_HPD_MASK_NOT_OVERRIDE    = 0x0,
+	TMDS_TRANSMITTER_LNKCEN_HPD_MASK_OVERRIDE        = 0x1,
+} TMDS_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK;
+typedef enum TMDS_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK {
+	TMDS_TRANSMITTER_LNKDEN_HPD_MASK_NOT_OVERRIDE    = 0x0,
+	TMDS_TRANSMITTER_LNKDEN_HPD_MASK_OVERRIDE        = 0x1,
+} TMDS_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK;
+typedef enum TMDS_TRANSMITTER_CONTROL_PLL_ENABLE_HPD_MASK {
+	TMDS_TRANSMITTER_HPD_NOT_OVERRIDE_PLL_ENABLE     = 0x0,
+	TMDS_TRANSMITTER_HPD_OVERRIDE_PLL_ENABLE_ON_DISCON= 0x1,
+	TMDS_TRANSMITTER_HPD_OVERRIDE_PLL_ENABLE_ON_CON  = 0x2,
+	TMDS_TRANSMITTER_HPD_OVERRIDE_PLL_ENABLE         = 0x3,
+} TMDS_TRANSMITTER_CONTROL_PLL_ENABLE_HPD_MASK;
+typedef enum TMDS_TRANSMITTER_CONTROL_IDSCKSELA {
+	TMDS_TRANSMITTER_IDSCKSELA_USE_IPIXCLK           = 0x0,
+	TMDS_TRANSMITTER_IDSCKSELA_USE_IDCLK             = 0x1,
+} TMDS_TRANSMITTER_CONTROL_IDSCKSELA;
+typedef enum TMDS_TRANSMITTER_CONTROL_IDSCKSELB {
+	TMDS_TRANSMITTER_IDSCKSELB_USE_IPIXCLK           = 0x0,
+	TMDS_TRANSMITTER_IDSCKSELB_USE_IDCLK             = 0x1,
+} TMDS_TRANSMITTER_CONTROL_IDSCKSELB;
+typedef enum TMDS_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN {
+	TMDS_TRANSMITTER_PLL_PWRUP_SEQ_DISABLE           = 0x0,
+	TMDS_TRANSMITTER_PLL_PWRUP_SEQ_ENABLE            = 0x1,
+} TMDS_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN;
+typedef enum TMDS_TRANSMITTER_CONTROL_PLL_RESET_HPD_MASK {
+	TMDS_TRANSMITTER_PLL_NOT_RST_ON_HPD              = 0x0,
+	TMDS_TRANSMITTER_PLL_RST_ON_HPD                  = 0x1,
+} TMDS_TRANSMITTER_CONTROL_PLL_RESET_HPD_MASK;
+typedef enum TMDS_TRANSMITTER_CONTROL_TMCLK_FROM_PADS {
+	TMDS_TRANSMITTER_TMCLK_FROM_TMDS_TMCLK           = 0x0,
+	TMDS_TRANSMITTER_TMCLK_FROM_PADS                 = 0x1,
+} TMDS_TRANSMITTER_CONTROL_TMCLK_FROM_PADS;
+typedef enum TMDS_TRANSMITTER_CONTROL_TDCLK_FROM_PADS {
+	TMDS_TRANSMITTER_TDCLK_FROM_TMDS_TDCLK           = 0x0,
+	TMDS_TRANSMITTER_TDCLK_FROM_PADS                 = 0x1,
+} TMDS_TRANSMITTER_CONTROL_TDCLK_FROM_PADS;
+typedef enum TMDS_TRANSMITTER_CONTROL_PLLSEL_OVERWRITE_EN {
+	TMDS_TRANSMITTER_PLLSEL_BY_HW                    = 0x0,
+	TMDS_TRANSMITTER_PLLSEL_OVERWRITE_BY_SW          = 0x1,
+} TMDS_TRANSMITTER_CONTROL_PLLSEL_OVERWRITE_EN;
+typedef enum TMDS_TRANSMITTER_CONTROL_BYPASS_PLLA {
+	TMDS_TRANSMITTER_BYPASS_PLLA_COHERENT            = 0x0,
+	TMDS_TRANSMITTER_BYPASS_PLLA_INCOHERENT          = 0x1,
+} TMDS_TRANSMITTER_CONTROL_BYPASS_PLLA;
+typedef enum TMDS_TRANSMITTER_CONTROL_BYPASS_PLLB {
+	TMDS_TRANSMITTER_BYPASS_PLLB_COHERENT            = 0x0,
+	TMDS_TRANSMITTER_BYPASS_PLLB_INCOHERENT          = 0x1,
+} TMDS_TRANSMITTER_CONTROL_BYPASS_PLLB;
+typedef enum TMDS_REG_TEST_OUTPUTA_CNTLA {
+	TMDS_REG_TEST_OUTPUTA_CNTLA_OTDATA0              = 0x0,
+	TMDS_REG_TEST_OUTPUTA_CNTLA_OTDATA1              = 0x1,
+	TMDS_REG_TEST_OUTPUTA_CNTLA_OTDATA2              = 0x2,
+	TMDS_REG_TEST_OUTPUTA_CNTLA_NA                   = 0x3,
+} TMDS_REG_TEST_OUTPUTA_CNTLA;
+typedef enum TMDS_REG_TEST_OUTPUTB_CNTLB {
+	TMDS_REG_TEST_OUTPUTB_CNTLB_OTDATB0              = 0x0,
+	TMDS_REG_TEST_OUTPUTB_CNTLB_OTDATB1              = 0x1,
+	TMDS_REG_TEST_OUTPUTB_CNTLB_OTDATB2              = 0x2,
+	TMDS_REG_TEST_OUTPUTB_CNTLB_NA                   = 0x3,
+} TMDS_REG_TEST_OUTPUTB_CNTLB;
+typedef enum DP_LINK_TRAINING_COMPLETE {
+	DP_LINK_TRAINING_NOT_COMPLETE                    = 0x0,
+	DP_LINK_TRAINING_ALREADY_COMPLETE                = 0x1,
+} DP_LINK_TRAINING_COMPLETE;
+typedef enum DP_EMBEDDED_PANEL_MODE {
+	DP_EXTERNAL_PANEL                                = 0x0,
+	DP_EMBEDDED_PANEL                                = 0x1,
+} DP_EMBEDDED_PANEL_MODE;
+typedef enum DP_PIXEL_ENCODING {
+	DP_PIXEL_ENCODING_RGB444                         = 0x0,
+	DP_PIXEL_ENCODING_YCBCR422                       = 0x1,
+	DP_PIXEL_ENCODING_YCBCR444                       = 0x2,
+	DP_PIXEL_ENCODING_RGB_WIDE_GAMUT                 = 0x3,
+	DP_PIXEL_ENCODING_Y_ONLY                         = 0x4,
+	DP_PIXEL_ENCODING_YCBCR420                       = 0x5,
+	DP_PIXEL_ENCODING_RESERVED                       = 0x6,
+} DP_PIXEL_ENCODING;
+typedef enum DP_DYN_RANGE {
+	DP_DYN_VESA_RANGE                                = 0x0,
+	DP_DYN_CEA_RANGE                                 = 0x1,
+} DP_DYN_RANGE;
+typedef enum DP_YCBCR_RANGE {
+	DP_YCBCR_RANGE_BT601_5                           = 0x0,
+	DP_YCBCR_RANGE_BT709_5                           = 0x1,
+} DP_YCBCR_RANGE;
+typedef enum DP_COMPONENT_DEPTH {
+	DP_COMPONENT_DEPTH_6BPC                          = 0x0,
+	DP_COMPONENT_DEPTH_8BPC                          = 0x1,
+	DP_COMPONENT_DEPTH_10BPC                         = 0x2,
+	DP_COMPONENT_DEPTH_12BPC                         = 0x3,
+	DP_COMPONENT_DEPTH_16BPC                         = 0x4,
+	DP_COMPONENT_DEPTH_RESERVED                      = 0x5,
+} DP_COMPONENT_DEPTH;
+typedef enum DP_MSA_MISC0_OVERRIDE_ENABLE {
+	MSA_MISC0_OVERRIDE_DISABLE                       = 0x0,
+	MSA_MISC0_OVERRIDE_ENABLE                        = 0x1,
+} DP_MSA_MISC0_OVERRIDE_ENABLE;
+typedef enum DP_MSA_MISC1_BIT7_OVERRIDE_ENABLE {
+	MSA_MISC1_BIT7_OVERRIDE_DISABLE                  = 0x0,
+	MSA_MISC1_BIT7_OVERRIDE_ENABLE                   = 0x1,
+} DP_MSA_MISC1_BIT7_OVERRIDE_ENABLE;
+typedef enum DP_UDI_LANES {
+	DP_UDI_1_LANE                                    = 0x0,
+	DP_UDI_2_LANES                                   = 0x1,
+	DP_UDI_LANES_RESERVED                            = 0x2,
+	DP_UDI_4_LANES                                   = 0x3,
+} DP_UDI_LANES;
+typedef enum DP_VID_STREAM_DIS_DEFER {
+	DP_VID_STREAM_DIS_NO_DEFER                       = 0x0,
+	DP_VID_STREAM_DIS_DEFER_TO_HBLANK                = 0x1,
+	DP_VID_STREAM_DIS_DEFER_TO_VBLANK                = 0x2,
+} DP_VID_STREAM_DIS_DEFER;
+typedef enum DP_STEER_OVERFLOW_ACK {
+	DP_STEER_OVERFLOW_ACK_NO_EFFECT                  = 0x0,
+	DP_STEER_OVERFLOW_ACK_CLR_INTERRUPT              = 0x1,
+} DP_STEER_OVERFLOW_ACK;
+typedef enum DP_STEER_OVERFLOW_MASK {
+	DP_STEER_OVERFLOW_MASKED                         = 0x0,
+	DP_STEER_OVERFLOW_UNMASK                         = 0x1,
+} DP_STEER_OVERFLOW_MASK;
+typedef enum DP_TU_OVERFLOW_ACK {
+	DP_TU_OVERFLOW_ACK_NO_EFFECT                     = 0x0,
+	DP_TU_OVERFLOW_ACK_CLR_INTERRUPT                 = 0x1,
+} DP_TU_OVERFLOW_ACK;
+typedef enum DP_VID_TIMING_MODE {
+	DP_VID_TIMING_MODE_ASYNC                         = 0x0,
+	DP_VID_TIMING_MODE_SYNC                          = 0x1,
+} DP_VID_TIMING_MODE;
+typedef enum DP_VID_M_N_DOUBLE_BUFFER_MODE {
+	DP_VID_M_N_DOUBLE_BUFFER_AFTER_VID_M_UPDATE      = 0x0,
+	DP_VID_M_N_DOUBLE_BUFFER_AT_FRAME_START          = 0x1,
+} DP_VID_M_N_DOUBLE_BUFFER_MODE;
+typedef enum DP_VID_M_N_GEN_EN {
+	DP_VID_M_N_PROGRAMMED_VIA_REG                    = 0x0,
+	DP_VID_M_N_CALC_AUTO                             = 0x1,
+} DP_VID_M_N_GEN_EN;
+typedef enum DP_VID_M_DOUBLE_VALUE_EN {
+	DP_VID_M_INPUT_PIXEL_RATE                        = 0x0,
+	DP_VID_M_DOUBLE_INPUT_PIXEL_RATE                 = 0x1,
+} DP_VID_M_DOUBLE_VALUE_EN;
+typedef enum DP_VID_ENHANCED_FRAME_MODE {
+	VID_NORMAL_FRAME_MODE                            = 0x0,
+	VID_ENHANCED_MODE                                = 0x1,
+} DP_VID_ENHANCED_FRAME_MODE;
+typedef enum DP_VID_MSA_TOP_FIELD_MODE {
+	DP_TOP_FIELD_ONLY                                = 0x0,
+	DP_TOP_PLUS_BOTTOM_FIELD                         = 0x1,
+} DP_VID_MSA_TOP_FIELD_MODE;
+typedef enum DP_VID_VBID_FIELD_POL {
+	DP_VID_VBID_FIELD_POL_NORMAL                     = 0x0,
+	DP_VID_VBID_FIELD_POL_INV                        = 0x1,
+} DP_VID_VBID_FIELD_POL;
+typedef enum DP_VID_STREAM_DISABLE_ACK {
+	ID_STREAM_DISABLE_NO_ACK                         = 0x0,
+	ID_STREAM_DISABLE_ACKED                          = 0x1,
+} DP_VID_STREAM_DISABLE_ACK;
+typedef enum DP_VID_STREAM_DISABLE_MASK {
+	VID_STREAM_DISABLE_MASKED                        = 0x0,
+	VID_STREAM_DISABLE_UNMASK                        = 0x1,
+} DP_VID_STREAM_DISABLE_MASK;
+typedef enum DPHY_ATEST_SEL_LANE0 {
+	DPHY_ATEST_LANE0_PRBS_PATTERN                    = 0x0,
+	DPHY_ATEST_LANE0_REG_PATTERN                     = 0x1,
+} DPHY_ATEST_SEL_LANE0;
+typedef enum DPHY_ATEST_SEL_LANE1 {
+	DPHY_ATEST_LANE1_PRBS_PATTERN                    = 0x0,
+	DPHY_ATEST_LANE1_REG_PATTERN                     = 0x1,
+} DPHY_ATEST_SEL_LANE1;
+typedef enum DPHY_ATEST_SEL_LANE2 {
+	DPHY_ATEST_LANE2_PRBS_PATTERN                    = 0x0,
+	DPHY_ATEST_LANE2_REG_PATTERN                     = 0x1,
+} DPHY_ATEST_SEL_LANE2;
+typedef enum DPHY_ATEST_SEL_LANE3 {
+	DPHY_ATEST_LANE3_PRBS_PATTERN                    = 0x0,
+	DPHY_ATEST_LANE3_REG_PATTERN                     = 0x1,
+} DPHY_ATEST_SEL_LANE3;
+typedef enum DPHY_BYPASS {
+	DPHY_8B10B_OUTPUT                                = 0x0,
+	DPHY_DBG_OUTPUT                                  = 0x1,
+} DPHY_BYPASS;
+typedef enum DPHY_SKEW_BYPASS {
+	DPHY_WITH_SKEW                                   = 0x0,
+	DPHY_NO_SKEW                                     = 0x1,
+} DPHY_SKEW_BYPASS;
+typedef enum DPHY_TRAINING_PATTERN_SEL {
+	DPHY_TRAINING_PATTERN_1                          = 0x0,
+	DPHY_TRAINING_PATTERN_2                          = 0x1,
+	DPHY_TRAINING_PATTERN_3                          = 0x2,
+	DPHY_TRAINING_PATTERN_4                          = 0x3,
+} DPHY_TRAINING_PATTERN_SEL;
+typedef enum DPHY_8B10B_RESET {
+	DPHY_8B10B_NOT_RESET                             = 0x0,
+	DPHY_8B10B_RESETET                               = 0x1,
+} DPHY_8B10B_RESET;
+typedef enum DP_DPHY_8B10B_EXT_DISP {
+	DP_DPHY_8B10B_EXT_DISP_ZERO                      = 0x0,
+	DP_DPHY_8B10B_EXT_DISP_ONE                       = 0x1,
+} DP_DPHY_8B10B_EXT_DISP;
+typedef enum DPHY_8B10B_CUR_DISP {
+	DPHY_8B10B_CUR_DISP_ZERO                         = 0x0,
+	DPHY_8B10B_CUR_DISP_ONE                          = 0x1,
+} DPHY_8B10B_CUR_DISP;
+typedef enum DPHY_PRBS_EN {
+	DPHY_PRBS_DISABLE                                = 0x0,
+	DPHY_PRBS_ENABLE                                 = 0x1,
+} DPHY_PRBS_EN;
+typedef enum DPHY_PRBS_SEL {
+	DPHY_PRBS7_SELECTED                              = 0x0,
+	DPHY_PRBS23_SELECTED                             = 0x1,
+	DPHY_PRBS11_SELECTED                             = 0x2,
+} DPHY_PRBS_SEL;
+typedef enum DPHY_LOAD_BS_COUNT_START {
+	DPHY_LOAD_BS_COUNT_STARTED                       = 0x0,
+	DPHY_LOAD_BS_COUNT_NOT_STARTED                   = 0x1,
+} DPHY_LOAD_BS_COUNT_START;
+typedef enum DPHY_CRC_EN {
+	DPHY_CRC_DISABLED                                = 0x0,
+	DPHY_CRC_ENABLED                                 = 0x1,
+} DPHY_CRC_EN;
+typedef enum DPHY_CRC_CONT_EN {
+	DPHY_CRC_ONE_SHOT                                = 0x0,
+	DPHY_CRC_CONTINUOUS                              = 0x1,
+} DPHY_CRC_CONT_EN;
+typedef enum DPHY_CRC_FIELD {
+	DPHY_CRC_START_FROM_TOP_FIELD                    = 0x0,
+	DPHY_CRC_START_FROM_BOTTOM_FIELD                 = 0x1,
+} DPHY_CRC_FIELD;
+typedef enum DPHY_CRC_SEL {
+	DPHY_CRC_LANE0_SELECTED                          = 0x0,
+	DPHY_CRC_LANE1_SELECTED                          = 0x1,
+	DPHY_CRC_LANE2_SELECTED                          = 0x2,
+	DPHY_CRC_LANE3_SELECTED                          = 0x3,
+} DPHY_CRC_SEL;
+typedef enum DPHY_RX_FAST_TRAINING_CAPABLE {
+	DPHY_FAST_TRAINING_NOT_CAPABLE_0                 = 0x0,
+	DPHY_FAST_TRAINING_CAPABLE                       = 0x1,
+} DPHY_RX_FAST_TRAINING_CAPABLE;
+typedef enum DP_SEC_COLLISION_ACK {
+	DP_SEC_COLLISION_ACK_NO_EFFECT                   = 0x0,
+	DP_SEC_COLLISION_ACK_CLR_FLAG                    = 0x1,
+} DP_SEC_COLLISION_ACK;
+typedef enum DP_SEC_AUDIO_MUTE {
+	DP_SEC_AUDIO_MUTE_HW_CTRL                        = 0x0,
+	DP_SEC_AUDIO_MUTE_SW_CTRL                        = 0x1,
+} DP_SEC_AUDIO_MUTE;
+typedef enum DP_SEC_TIMESTAMP_MODE {
+	DP_SEC_TIMESTAMP_PROGRAMMABLE_MODE               = 0x0,
+	DP_SEC_TIMESTAMP_AUTO_CALC_MODE                  = 0x1,
+} DP_SEC_TIMESTAMP_MODE;
+typedef enum DP_SEC_ASP_PRIORITY {
+	DP_SEC_ASP_LOW_PRIORITY                          = 0x0,
+	DP_SEC_ASP_HIGH_PRIORITY                         = 0x1,
+} DP_SEC_ASP_PRIORITY;
+typedef enum DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE {
+	DP_SEC_ASP_CHANNEL_COUNT_FROM_AZ                 = 0x0,
+	DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_ENABLED        = 0x1,
+} DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE;
+typedef enum DP_MSE_SAT_UPDATE_ACT {
+	DP_MSE_SAT_UPDATE_NO_ACTION                      = 0x0,
+	DP_MSE_SAT_UPDATE_WITH_TRIGGER                   = 0x1,
+	DP_MSE_SAT_UPDATE_WITHOUT_TRIGGER                = 0x2,
+} DP_MSE_SAT_UPDATE_ACT;
+typedef enum DP_MSE_LINK_LINE {
+	DP_MSE_LINK_LINE_32_MTP_LONG                     = 0x0,
+	DP_MSE_LINK_LINE_64_MTP_LONG                     = 0x1,
+	DP_MSE_LINK_LINE_128_MTP_LONG                    = 0x2,
+	DP_MSE_LINK_LINE_256_MTP_LONG                    = 0x3,
+} DP_MSE_LINK_LINE;
+typedef enum DP_MSE_BLANK_CODE {
+	DP_MSE_BLANK_CODE_SF_FILLED                      = 0x0,
+	DP_MSE_BLANK_CODE_ZERO_FILLED                    = 0x1,
+} DP_MSE_BLANK_CODE;
+typedef enum DP_MSE_TIMESTAMP_MODE {
+	DP_MSE_TIMESTAMP_CALC_BASED_ON_LINK_RATE         = 0x0,
+	DP_MSE_TIMESTAMP_CALC_BASED_ON_VC_RATE           = 0x1,
+} DP_MSE_TIMESTAMP_MODE;
+typedef enum DP_MSE_ZERO_ENCODER {
+	DP_MSE_NOT_ZERO_FE_ENCODER                       = 0x0,
+	DP_MSE_ZERO_FE_ENCODER                           = 0x1,
+} DP_MSE_ZERO_ENCODER;
+typedef enum DP_MSE_OUTPUT_DPDBG_DATA {
+	DP_MSE_OUTPUT_DPDBG_DATA_DIS                     = 0x0,
+	DP_MSE_OUTPUT_DPDBG_DATA_EN                      = 0x1,
+} DP_MSE_OUTPUT_DPDBG_DATA;
+typedef enum DP_DPHY_HBR2_PATTERN_CONTROL_MODE {
+	DP_DPHY_HBR2_PASS_THROUGH                        = 0x0,
+	DP_DPHY_HBR2_PATTERN_1                           = 0x1,
+	DP_DPHY_HBR2_PATTERN_2_NEG                       = 0x2,
+	DP_DPHY_HBR2_PATTERN_3                           = 0x3,
+	DP_DPHY_HBR2_PATTERN_2_POS                       = 0x6,
+} DP_DPHY_HBR2_PATTERN_CONTROL_MODE;
+typedef enum DPHY_CRC_MST_PHASE_ERROR_ACK {
+	DPHY_CRC_MST_PHASE_ERROR_NO_ACK                  = 0x0,
+	DPHY_CRC_MST_PHASE_ERROR_ACKED                   = 0x1,
+} DPHY_CRC_MST_PHASE_ERROR_ACK;
+typedef enum DPHY_SW_FAST_TRAINING_START {
+	DPHY_SW_FAST_TRAINING_NOT_STARTED                = 0x0,
+	DPHY_SW_FAST_TRAINING_STARTED                    = 0x1,
+} DPHY_SW_FAST_TRAINING_START;
+typedef enum DP_DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN {
+	DP_DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_DISABLED= 0x0,
+	DP_DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_ENABLED = 0x1,
+} DP_DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN;
+typedef enum DP_DPHY_FAST_TRAINING_COMPLETE_MASK {
+	DP_DPHY_FAST_TRAINING_COMPLETE_MASKED            = 0x0,
+	DP_DPHY_FAST_TRAINING_COMPLETE_NOT_MASKED        = 0x1,
+} DP_DPHY_FAST_TRAINING_COMPLETE_MASK;
+typedef enum DP_DPHY_FAST_TRAINING_COMPLETE_ACK {
+	DP_DPHY_FAST_TRAINING_COMPLETE_NOT_ACKED         = 0x0,
+	DP_DPHY_FAST_TRAINING_COMPLETE_ACKED             = 0x1,
+} DP_DPHY_FAST_TRAINING_COMPLETE_ACK;
+typedef enum DP_MSA_V_TIMING_OVERRIDE_EN {
+	MSA_V_TIMING_OVERRIDE_DISABLED                   = 0x0,
+	MSA_V_TIMING_OVERRIDE_ENABLED                    = 0x1,
+} DP_MSA_V_TIMING_OVERRIDE_EN;
+typedef enum DP_SEC_GSP0_PRIORITY {
+	SEC_GSP0_PRIORITY_LOW                            = 0x0,
+	SEC_GSP0_PRIORITY_HIGH                           = 0x1,
+} DP_SEC_GSP0_PRIORITY;
+typedef enum DP_SEC_GSP0_SEND {
+	NOT_SENT                                         = 0x0,
+	FORCE_SENT                                       = 0x1,
+} DP_SEC_GSP0_SEND;
+typedef enum DP_AUX_CONTROL_HPD_SEL {
+	DP_AUX_CONTROL_HPD1_SELECTED                     = 0x0,
+	DP_AUX_CONTROL_HPD2_SELECTED                     = 0x1,
+	DP_AUX_CONTROL_HPD3_SELECTED                     = 0x2,
+	DP_AUX_CONTROL_HPD4_SELECTED                     = 0x3,
+	DP_AUX_CONTROL_HPD5_SELECTED                     = 0x4,
+	DP_AUX_CONTROL_HPD6_SELECTED                     = 0x5,
+} DP_AUX_CONTROL_HPD_SEL;
+typedef enum DP_AUX_CONTROL_TEST_MODE {
+	DP_AUX_CONTROL_TEST_MODE_DISABLE                 = 0x0,
+	DP_AUX_CONTROL_TEST_MODE_ENABLE                  = 0x1,
+} DP_AUX_CONTROL_TEST_MODE;
+typedef enum DP_AUX_SW_CONTROL_SW_GO {
+	DP_AUX_SW_CONTROL_SW__NOT_GO                     = 0x0,
+	DP_AUX_SW_CONTROL_SW__GO                         = 0x1,
+} DP_AUX_SW_CONTROL_SW_GO;
+typedef enum DP_AUX_SW_CONTROL_LS_READ_TRIG {
+	DP_AUX_SW_CONTROL_LS_READ__NOT_TRIG              = 0x0,
+	DP_AUX_SW_CONTROL_LS_READ__TRIG                  = 0x1,
+} DP_AUX_SW_CONTROL_LS_READ_TRIG;
+typedef enum DP_AUX_ARB_CONTROL_ARB_PRIORITY {
+	DP_AUX_ARB_CONTROL_ARB_PRIORITY__GTC_LS_SW       = 0x0,
+	DP_AUX_ARB_CONTROL_ARB_PRIORITY__LS_GTC_SW       = 0x1,
+	DP_AUX_ARB_CONTROL_ARB_PRIORITY__SW_LS_GTC       = 0x2,
+	DP_AUX_ARB_CONTROL_ARB_PRIORITY__SW_GTC_LS       = 0x3,
+} DP_AUX_ARB_CONTROL_ARB_PRIORITY;
+typedef enum DP_AUX_ARB_CONTROL_USE_AUX_REG_REQ {
+	DP_AUX_ARB_CONTROL__NOT_USE_AUX_REG_REQ          = 0x0,
+	DP_AUX_ARB_CONTROL__USE_AUX_REG_REQ              = 0x1,
+} DP_AUX_ARB_CONTROL_USE_AUX_REG_REQ;
+typedef enum DP_AUX_ARB_CONTROL_DONE_USING_AUX_REG {
+	DP_AUX_ARB_CONTROL__DONE_NOT_USING_AUX_REG       = 0x0,
+	DP_AUX_ARB_CONTROL__DONE_USING_AUX_REG           = 0x1,
+} DP_AUX_ARB_CONTROL_DONE_USING_AUX_REG;
+typedef enum DP_AUX_INT_ACK {
+	DP_AUX_INT__NOT_ACK                              = 0x0,
+	DP_AUX_INT__ACK                                  = 0x1,
+} DP_AUX_INT_ACK;
+typedef enum DP_AUX_LS_UPDATE_ACK {
+	DP_AUX_INT_LS_UPDATE_NOT_ACK                     = 0x0,
+	DP_AUX_INT_LS_UPDATE_ACK                         = 0x1,
+} DP_AUX_LS_UPDATE_ACK;
+typedef enum DP_AUX_DPHY_TX_REF_CONTROL_TX_REF_SEL {
+	DP_AUX_DPHY_TX_REF_CONTROL_TX_REF_SEL__DIVIDED_SYM_CLK= 0x0,
+	DP_AUX_DPHY_TX_REF_CONTROL_TX_REF_SEL__FROM_DCCG_MICROSECOND_REF= 0x1,
+} DP_AUX_DPHY_TX_REF_CONTROL_TX_REF_SEL;
+typedef enum DP_AUX_DPHY_TX_REF_CONTROL_TX_RATE {
+	DP_AUX_DPHY_TX_REF_CONTROL_TX_RATE__1MHZ         = 0x0,
+	DP_AUX_DPHY_TX_REF_CONTROL_TX_RATE__2MHZ         = 0x1,
+	DP_AUX_DPHY_TX_REF_CONTROL_TX_RATE__4MHZ         = 0x2,
+	DP_AUX_DPHY_TX_REF_CONTROL_TX_RATE__8MHZ         = 0x3,
+} DP_AUX_DPHY_TX_REF_CONTROL_TX_RATE;
+typedef enum DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN {
+	DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__0US        = 0x0,
+	DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__8US        = 0x1,
+	DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__16US       = 0x2,
+	DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__24US       = 0x3,
+	DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__32US       = 0x4,
+	DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__40US       = 0x5,
+	DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__48US       = 0x6,
+	DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__56US       = 0x7,
+} DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN;
+typedef enum DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY {
+	DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY__0   = 0x0,
+	DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY__16US= 0x1,
+	DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY__32US= 0x2,
+	DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY__64US= 0x3,
+	DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY__128US= 0x4,
+	DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY__256US= 0x5,
+} DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY;
+typedef enum DP_AUX_DPHY_RX_CONTROL_START_WINDOW {
+	DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO2_PERIOD = 0x0,
+	DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO4_PERIOD = 0x1,
+	DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO8_PERIOD = 0x2,
+	DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO16_PERIOD= 0x3,
+	DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO32_PERIOD= 0x4,
+	DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO64_PERIOD= 0x5,
+	DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO128_PERIOD= 0x6,
+	DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO256_PERIOD= 0x7,
+} DP_AUX_DPHY_RX_CONTROL_START_WINDOW;
+typedef enum DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW {
+	DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO2_PERIOD= 0x0,
+	DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO4_PERIOD= 0x1,
+	DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO8_PERIOD= 0x2,
+	DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO16_PERIOD= 0x3,
+	DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO32_PERIOD= 0x4,
+	DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO64_PERIOD= 0x5,
+	DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO128_PERIOD= 0x6,
+	DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO256_PERIOD= 0x7,
+} DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW;
+typedef enum DP_AUX_DPHY_RX_CONTROL_HALF_SYM_DETECT_LEN {
+	DP_AUX_DPHY_RX_CONTROL_HALF_SYM_DETECT_LEN__6_EDGES= 0x0,
+	DP_AUX_DPHY_RX_CONTROL_HALF_SYM_DETECT_LEN__10_EDGES= 0x1,
+	DP_AUX_DPHY_RX_CONTROL_HALF_SYM_DETECT_LEN__18_EDGES= 0x2,
+	DP_AUX_DPHY_RX_CONTROL_HALF_SYM_DETECT_LEN__RESERVED= 0x3,
+} DP_AUX_DPHY_RX_CONTROL_HALF_SYM_DETECT_LEN;
+typedef enum DP_AUX_DPHY_RX_CONTROL_ALLOW_BELOW_THRESHOLD_PHASE_DETECT {
+	DP_AUX_DPHY_RX_CONTROL__NOT_ALLOW_BELOW_THRESHOLD_PHASE_DETECT= 0x0,
+	DP_AUX_DPHY_RX_CONTROL__ALLOW_BELOW_THRESHOLD_PHASE_DETECT= 0x1,
+} DP_AUX_DPHY_RX_CONTROL_ALLOW_BELOW_THRESHOLD_PHASE_DETECT;
+typedef enum DP_AUX_DPHY_RX_CONTROL_ALLOW_BELOW_THRESHOLD_START {
+	DP_AUX_DPHY_RX_CONTROL__NOT_ALLOW_BELOW_THRESHOLD_START= 0x0,
+	DP_AUX_DPHY_RX_CONTROL__ALLOW_BELOW_THRESHOLD_START= 0x1,
+} DP_AUX_DPHY_RX_CONTROL_ALLOW_BELOW_THRESHOLD_START;
+typedef enum DP_AUX_DPHY_RX_CONTROL_ALLOW_BELOW_THRESHOLD_STOP {
+	DP_AUX_DPHY_RX_CONTROL__NOT_ALLOW_BELOW_THRESHOLD_STOP= 0x0,
+	DP_AUX_DPHY_RX_CONTROL__ALLOW_BELOW_THRESHOLD_STOP= 0x1,
+} DP_AUX_DPHY_RX_CONTROL_ALLOW_BELOW_THRESHOLD_STOP;
+typedef enum DP_AUX_DPHY_RX_CONTROL_PHASE_DETECT_LEN {
+	DP_AUX_DPHY_RX_CONTROL_PHASE_DETECT_LEN__2_HALF_SYMBOLS= 0x0,
+	DP_AUX_DPHY_RX_CONTROL_PHASE_DETECT_LEN__4_HALF_SYMBOLS= 0x1,
+	DP_AUX_DPHY_RX_CONTROL_PHASE_DETECT_LEN__6_HALF_SYMBOLS= 0x2,
+	DP_AUX_DPHY_RX_CONTROL_PHASE_DETECT_LEN__8_HALF_SYMBOLS= 0x3,
+} DP_AUX_DPHY_RX_CONTROL_PHASE_DETECT_LEN;
+typedef enum DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN {
+	DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_450US         = 0x0,
+	DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_500US         = 0x1,
+	DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_550US         = 0x2,
+	DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_600US         = 0x3,
+	DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_650US         = 0x4,
+	DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_700US         = 0x5,
+	DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_750US         = 0x6,
+	DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_800US         = 0x7,
+} DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN;
+typedef enum DP_AUX_DPHY_RX_DETECTION_THRESHOLD {
+	DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2         = 0x0,
+	DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4         = 0x1,
+	DP_AUX_DPHY_RX_DETECTION_THRESHOLD__7to8         = 0x2,
+	DP_AUX_DPHY_RX_DETECTION_THRESHOLD__15to16       = 0x3,
+	DP_AUX_DPHY_RX_DETECTION_THRESHOLD__31to32       = 0x4,
+	DP_AUX_DPHY_RX_DETECTION_THRESHOLD__63to64       = 0x5,
+	DP_AUX_DPHY_RX_DETECTION_THRESHOLD__127to128     = 0x6,
+	DP_AUX_DPHY_RX_DETECTION_THRESHOLD__255to256     = 0x7,
+} DP_AUX_DPHY_RX_DETECTION_THRESHOLD;
+typedef enum DP_AUX_GTC_SYNC_CONTROL_GTC_SYNC_BLOCK_REQ {
+	DP_AUX_GTC_SYNC_CONTROL_GTC_SYNC_ALLOW_REQ_FROM_OTHER_AUX= 0x0,
+	DP_AUX_GTC_SYNC_CONTROL_GTC_SYNC_BLOCK_REQ_FROM_OTHER_AUX= 0x1,
+} DP_AUX_GTC_SYNC_CONTROL_GTC_SYNC_BLOCK_REQ;
+typedef enum DP_AUX_GTC_SYNC_CONTROL_INTERVAL_RESET_WINDOW {
+	DP_AUX_GTC_SYNC_CONTROL_INTERVAL_RESET_WINDOW__300US= 0x0,
+	DP_AUX_GTC_SYNC_CONTROL_INTERVAL_RESET_WINDOW__400US= 0x1,
+	DP_AUX_GTC_SYNC_CONTROL_INTERVAL_RESET_WINDOW__500US= 0x2,
+	DP_AUX_GTC_SYNC_CONTROL_INTERVAL_RESET_WINDOW__600US= 0x3,
+} DP_AUX_GTC_SYNC_CONTROL_INTERVAL_RESET_WINDOW;
+typedef enum DP_AUX_GTC_SYNC_CONTROL_OFFSET_CALC_MAX_ATTEMPT {
+	DP_AUX_GTC_SYNC_CONTROL_OFFSET_CALC_MAX_ATTEMPT__4_ATTAMPS= 0x0,
+	DP_AUX_GTC_SYNC_CONTROL_OFFSET_CALC_MAX_ATTEMPT__8_ATTAMPS= 0x1,
+	DP_AUX_GTC_SYNC_CONTROL_OFFSET_CALC_MAX_ATTEMPT__16_ATTAMPS= 0x2,
+	DP_AUX_GTC_SYNC_CONTROL_OFFSET_CALC_MAX_ATTEMPT__RESERVED= 0x3,
+} DP_AUX_GTC_SYNC_CONTROL_OFFSET_CALC_MAX_ATTEMPT;
+typedef enum DP_AUX_GTC_SYNC_ERROR_CONTROL_LOCK_ACQ_TIMEOUT_LEN {
+	DP_AUX_GTC_SYNC_ERROR_CONTROL_LOCK_ACQ_TIMEOUT_LEN__0= 0x0,
+	DP_AUX_GTC_SYNC_ERROR_CONTROL_LOCK_ACQ_TIMEOUT_LEN__64= 0x1,
+	DP_AUX_GTC_SYNC_ERROR_CONTROL_LOCK_ACQ_TIMEOUT_LEN__128= 0x2,
+	DP_AUX_GTC_SYNC_ERROR_CONTROL_LOCK_ACQ_TIMEOUT_LEN__256= 0x3,
+} DP_AUX_GTC_SYNC_ERROR_CONTROL_LOCK_ACQ_TIMEOUT_LEN;
+typedef enum DP_AUX_ERR_OCCURRED_ACK {
+	DP_AUX_ERR_OCCURRED__NOT_ACK                     = 0x0,
+	DP_AUX_ERR_OCCURRED__ACK                         = 0x1,
+} DP_AUX_ERR_OCCURRED_ACK;
+typedef enum DP_AUX_POTENTIAL_ERR_REACHED_ACK {
+	DP_AUX_POTENTIAL_ERR_REACHED__NOT_ACK            = 0x0,
+	DP_AUX_POTENTIAL_ERR_REACHED__ACK                = 0x1,
+} DP_AUX_POTENTIAL_ERR_REACHED_ACK;
+typedef enum DP_AUX_DEFINITE_ERR_REACHED_ACK {
+	ALPHA_DP_AUX_DEFINITE_ERR_REACHED_NOT_ACK        = 0x0,
+	ALPHA_DP_AUX_DEFINITE_ERR_REACHED_ACK            = 0x1,
+} DP_AUX_DEFINITE_ERR_REACHED_ACK;
+typedef enum DP_AUX_RESET {
+	DP_AUX_RESET_DEASSERTED                          = 0x0,
+	DP_AUX_RESET_ASSERTED                            = 0x1,
+} DP_AUX_RESET;
+typedef enum DP_AUX_RESET_DONE {
+	DP_AUX_RESET_SEQUENCE_NOT_DONE                   = 0x0,
+	DP_AUX_RESET_SEQUENCE_DONE                       = 0x1,
+} DP_AUX_RESET_DONE;
+typedef enum FBC_IDLE_MASK_MASK_BITS {
+	FBC_IDLE_MASK_DISP_REG_UPDATE                    = 0x0,
+	FBC_IDLE_MASK_RESERVED1                          = 0x1,
+	FBC_IDLE_MASK_FBC_GRPH_COMP_EN                   = 0x2,
+	FBC_IDLE_MASK_FBC_MIN_COMPRESSION                = 0x3,
+	FBC_IDLE_MASK_FBC_ALPHA_COMP_EN                  = 0x4,
+	FBC_IDLE_MASK_FBC_ZERO_ALPHA_CHUNK_SKIP_EN       = 0x5,
+	FBC_IDLE_MASK_FBC_FORCE_COPY_TO_COMP_BUF         = 0x6,
+	FBC_IDLE_MASK_RESERVED7                          = 0x7,
+	FBC_IDLE_MASK_RESERVED8                          = 0x8,
+	FBC_IDLE_MASK_RESERVED9                          = 0x9,
+	FBC_IDLE_MASK_RESERVED10                         = 0xa,
+	FBC_IDLE_MASK_RESERVED11                         = 0xb,
+	FBC_IDLE_MASK_RESERVED12                         = 0xc,
+	FBC_IDLE_MASK_RESERVED13                         = 0xd,
+	FBC_IDLE_MASK_RESERVED14                         = 0xe,
+	FBC_IDLE_MASK_RESERVED15                         = 0xf,
+	FBC_IDLE_MASK_RESERVED16                         = 0x10,
+	FBC_IDLE_MASK_RESERVED17                         = 0x11,
+	FBC_IDLE_MASK_RESERVED18                         = 0x12,
+	FBC_IDLE_MASK_RESERVED19                         = 0x13,
+	FBC_IDLE_MASK_RESERVED20                         = 0x14,
+	FBC_IDLE_MASK_RESERVED21                         = 0x15,
+	FBC_IDLE_MASK_RESERVED22                         = 0x16,
+	FBC_IDLE_MASK_RESERVED23                         = 0x17,
+	FBC_IDLE_MASK_MC_HIT_REGION_0                    = 0x18,
+	FBC_IDLE_MASK_MC_HIT_REGION_1                    = 0x19,
+	FBC_IDLE_MASK_MC_HIT_REGION_2                    = 0x1a,
+	FBC_IDLE_MASK_MC_HIT_REGION_3                    = 0x1b,
+	FBC_IDLE_MASK_MC_WRITE                           = 0x1c,
+	FBC_IDLE_MASK_CG_STATIC_SCREEN                   = 0x1d,
+	FBC_IDLE_MASK_RESERVED30                         = 0x1e,
+	FBC_IDLE_MASK_RESERVED31                         = 0x1f,
+} FBC_IDLE_MASK_MASK_BITS;
+typedef enum FMT_CONTROL_PIXEL_ENCODING {
+	FMT_CONTROL_PIXEL_ENCODING_RGB444_OR_YCBCR444    = 0x0,
+	FMT_CONTROL_PIXEL_ENCODING_YCBCR422              = 0x1,
+	FMT_CONTROL_PIXEL_ENCODING_YCBCR420              = 0x2,
+	FMT_CONTROL_PIXEL_ENCODING_RESERVED              = 0x3,
+} FMT_CONTROL_PIXEL_ENCODING;
+typedef enum FMT_CONTROL_SUBSAMPLING_MODE {
+	FMT_CONTROL_SUBSAMPLING_MODE_DROP                = 0x0,
+	FMT_CONTROL_SUBSAMPLING_MODE_AVERAGE             = 0x1,
+	FMT_CONTROL_SUBSAMPLING_MODE_3_TAP               = 0x2,
+	FMT_CONTROL_SUBSAMPLING_MODE_RESERVED            = 0x3,
+} FMT_CONTROL_SUBSAMPLING_MODE;
+typedef enum FMT_CONTROL_SUBSAMPLING_ORDER {
+	FMT_CONTROL_SUBSAMPLING_ORDER_CB_BEFORE_CR       = 0x0,
+	FMT_CONTROL_SUBSAMPLING_ORDER_CR_BEFORE_CB       = 0x1,
+} FMT_CONTROL_SUBSAMPLING_ORDER;
+typedef enum FMT_CONTROL_CBCR_BIT_REDUCTION_BYPASS {
+	FMT_CONTROL_CBCR_BIT_REDUCTION_BYPASS_DISABLE    = 0x0,
+	FMT_CONTROL_CBCR_BIT_REDUCTION_BYPASS_ENABLE     = 0x1,
+} FMT_CONTROL_CBCR_BIT_REDUCTION_BYPASS;
+typedef enum FMT_BIT_DEPTH_CONTROL_TRUNCATE_MODE {
+	FMT_BIT_DEPTH_CONTROL_TRUNCATE_MODE_TRUNCATION   = 0x0,
+	FMT_BIT_DEPTH_CONTROL_TRUNCATE_MODE_ROUNDING     = 0x1,
+} FMT_BIT_DEPTH_CONTROL_TRUNCATE_MODE;
+typedef enum FMT_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH {
+	FMT_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH_18BPP       = 0x0,
+	FMT_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH_24BPP       = 0x1,
+	FMT_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH_30BPP       = 0x2,
+} FMT_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH;
+typedef enum FMT_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH {
+	FMT_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH_18BPP = 0x0,
+	FMT_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH_24BPP = 0x1,
+	FMT_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH_30BPP = 0x2,
+} FMT_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH;
+typedef enum FMT_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH {
+	FMT_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH_18BPP= 0x0,
+	FMT_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH_24BPP= 0x1,
+	FMT_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH_30BPP= 0x2,
+} FMT_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH;
+typedef enum FMT_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL {
+	FMT_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL_GREY_LEVEL2 = 0x0,
+	FMT_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL_GREY_LEVEL4 = 0x1,
+} FMT_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL;
+typedef enum FMT_BIT_DEPTH_CONTROL_25FRC_SEL {
+	FMT_BIT_DEPTH_CONTROL_25FRC_SEL_Ei               = 0x0,
+	FMT_BIT_DEPTH_CONTROL_25FRC_SEL_Fi               = 0x1,
+	FMT_BIT_DEPTH_CONTROL_25FRC_SEL_Gi               = 0x2,
+	FMT_BIT_DEPTH_CONTROL_25FRC_SEL_RESERVED         = 0x3,
+} FMT_BIT_DEPTH_CONTROL_25FRC_SEL;
+typedef enum FMT_BIT_DEPTH_CONTROL_50FRC_SEL {
+	FMT_BIT_DEPTH_CONTROL_50FRC_SEL_A                = 0x0,
+	FMT_BIT_DEPTH_CONTROL_50FRC_SEL_B                = 0x1,
+	FMT_BIT_DEPTH_CONTROL_50FRC_SEL_C                = 0x2,
+	FMT_BIT_DEPTH_CONTROL_50FRC_SEL_D                = 0x3,
+} FMT_BIT_DEPTH_CONTROL_50FRC_SEL;
+typedef enum FMT_BIT_DEPTH_CONTROL_75FRC_SEL {
+	FMT_BIT_DEPTH_CONTROL_75FRC_SEL_E                = 0x0,
+	FMT_BIT_DEPTH_CONTROL_75FRC_SEL_F                = 0x1,
+	FMT_BIT_DEPTH_CONTROL_75FRC_SEL_G                = 0x2,
+	FMT_BIT_DEPTH_CONTROL_75FRC_SEL_RESERVED         = 0x3,
+} FMT_BIT_DEPTH_CONTROL_75FRC_SEL;
+typedef enum FMT_TEMPORAL_DITHER_PATTERN_CONTROL_SELECT {
+	FMT_TEMPORAL_DITHER_PATTERN_CONTROL_SELECT_LEGACY_HARDCODED_PATTERN= 0x0,
+	FMT_TEMPORAL_DITHER_PATTERN_CONTROL_SELECT_PROGRAMMABLE_PATTERN= 0x1,
+} FMT_TEMPORAL_DITHER_PATTERN_CONTROL_SELECT;
+typedef enum FMT_TEMPORAL_DITHER_PATTERN_CONTROL_RGB1_BGR0 {
+	FMT_TEMPORAL_DITHER_PATTERN_CONTROL_RGB1_BGR0_BGR= 0x0,
+	FMT_TEMPORAL_DITHER_PATTERN_CONTROL_RGB1_BGR0_RGB= 0x1,
+} FMT_TEMPORAL_DITHER_PATTERN_CONTROL_RGB1_BGR0;
+typedef enum FMT_CLAMP_CNTL_COLOR_FORMAT {
+	FMT_CLAMP_CNTL_COLOR_FORMAT_6BPC                 = 0x0,
+	FMT_CLAMP_CNTL_COLOR_FORMAT_8BPC                 = 0x1,
+	FMT_CLAMP_CNTL_COLOR_FORMAT_10BPC                = 0x2,
+	FMT_CLAMP_CNTL_COLOR_FORMAT_12BPC                = 0x3,
+	FMT_CLAMP_CNTL_COLOR_FORMAT_RESERVED1            = 0x4,
+	FMT_CLAMP_CNTL_COLOR_FORMAT_RESERVED2            = 0x5,
+	FMT_CLAMP_CNTL_COLOR_FORMAT_RESERVED3            = 0x6,
+	FMT_CLAMP_CNTL_COLOR_FORMAT_PROGRAMMABLE         = 0x7,
+} FMT_CLAMP_CNTL_COLOR_FORMAT;
+typedef enum FMT_CRC_CNTL_CONT_EN {
+	FMT_CRC_CNTL_CONT_EN_ONE_SHOT                    = 0x0,
+	FMT_CRC_CNTL_CONT_EN_CONT                        = 0x1,
+} FMT_CRC_CNTL_CONT_EN;
+typedef enum FMT_CRC_CNTL_INCLUDE_OVERSCAN {
+	FMT_CRC_CNTL_INCLUDE_OVERSCAN_NOT_INCLUDE        = 0x0,
+	FMT_CRC_CNTL_INCLUDE_OVERSCAN_INCLUDE            = 0x1,
+} FMT_CRC_CNTL_INCLUDE_OVERSCAN;
+typedef enum FMT_CRC_CNTL_ONLY_BLANKB {
+	FMT_CRC_CNTL_ONLY_BLANKB_ENTIRE_FIELD            = 0x0,
+	FMT_CRC_CNTL_ONLY_BLANKB_NON_BLANK               = 0x1,
+} FMT_CRC_CNTL_ONLY_BLANKB;
+typedef enum FMT_CRC_CNTL_PSR_MODE_ENABLE {
+	FMT_CRC_CNTL_PSR_MODE_ENABLE_NORMAL              = 0x0,
+	FMT_CRC_CNTL_PSR_MODE_ENABLE_EDP_PSR_CRC         = 0x1,
+} FMT_CRC_CNTL_PSR_MODE_ENABLE;
+typedef enum FMT_CRC_CNTL_INTERLACE_MODE {
+	FMT_CRC_CNTL_INTERLACE_MODE_TOP                  = 0x0,
+	FMT_CRC_CNTL_INTERLACE_MODE_BOTTOM               = 0x1,
+	FMT_CRC_CNTL_INTERLACE_MODE_BOTH_BOTTOM          = 0x2,
+	FMT_CRC_CNTL_INTERLACE_MODE_BOTH_EACH            = 0x3,
+} FMT_CRC_CNTL_INTERLACE_MODE;
+typedef enum FMT_CRC_CNTL_EVEN_ODD_PIX_ENABLE {
+	FMT_CRC_CNTL_EVEN_ODD_PIX_ENABLE_ALL             = 0x0,
+	FMT_CRC_CNTL_EVEN_ODD_PIX_ENABLE_ODD_EVEN        = 0x1,
+} FMT_CRC_CNTL_EVEN_ODD_PIX_ENABLE;
+typedef enum FMT_CRC_CNTL_EVEN_ODD_PIX_SELECT {
+	FMT_CRC_CNTL_EVEN_ODD_PIX_SELECT_EVEN            = 0x0,
+	FMT_CRC_CNTL_EVEN_ODD_PIX_SELECT_ODD             = 0x1,
+} FMT_CRC_CNTL_EVEN_ODD_PIX_SELECT;
+typedef enum FMT_DEBUG_CNTL_COLOR_SELECT {
+	FMT_DEBUG_CNTL_COLOR_SELECT_BLUE                 = 0x0,
+	FMT_DEBUG_CNTL_COLOR_SELECT_GREEN                = 0x1,
+	FMT_DEBUG_CNTL_COLOR_SELECT_RED1                 = 0x2,
+	FMT_DEBUG_CNTL_COLOR_SELECT_RED2                 = 0x3,
+} FMT_DEBUG_CNTL_COLOR_SELECT;
+typedef enum FMT_SPATIAL_DITHER_MODE {
+	FMT_SPATIAL_DITHER_MODE_0                        = 0x0,
+	FMT_SPATIAL_DITHER_MODE_1                        = 0x1,
+	FMT_SPATIAL_DITHER_MODE_2                        = 0x2,
+	FMT_SPATIAL_DITHER_MODE_3                        = 0x3,
+} FMT_SPATIAL_DITHER_MODE;
+typedef enum FMT_STEREOSYNC_OVR_POL {
+	FMT_STEREOSYNC_OVR_POL_INVERTED                  = 0x0,
+	FMT_STEREOSYNC_OVR_POL_NOT_INVERTED              = 0x1,
+} FMT_STEREOSYNC_OVR_POL;
+typedef enum FMT_DYNAMIC_EXP_MODE {
+	FMT_DYNAMIC_EXP_MODE_10to12                      = 0x0,
+	FMT_DYNAMIC_EXP_MODE_8to12                       = 0x1,
+} FMT_DYNAMIC_EXP_MODE;
+typedef enum LB_DATA_FORMAT_PIXEL_DEPTH {
+	LB_DATA_FORMAT_PIXEL_DEPTH_30BPP                 = 0x0,
+	LB_DATA_FORMAT_PIXEL_DEPTH_24BPP                 = 0x1,
+	LB_DATA_FORMAT_PIXEL_DEPTH_18BPP                 = 0x2,
+	LB_DATA_FORMAT_PIXEL_DEPTH_36BPP                 = 0x3,
+} LB_DATA_FORMAT_PIXEL_DEPTH;
+typedef enum LB_DATA_FORMAT_PIXEL_EXPAN_MODE {
+	LB_DATA_FORMAT_PIXEL_EXPAN_MODE_ZERO_PIXEL_EXPANSION= 0x0,
+	LB_DATA_FORMAT_PIXEL_EXPAN_MODE_DYNAMIC_PIXEL_EXPANSION= 0x1,
+} LB_DATA_FORMAT_PIXEL_EXPAN_MODE;
+typedef enum LB_DATA_FORMAT_PIXEL_REDUCE_MODE {
+	LB_DATA_FORMAT_PIXEL_REDUCE_MODE_TRUNCATION      = 0x0,
+	LB_DATA_FORMAT_PIXEL_REDUCE_MODE_ROUNDING        = 0x1,
+} LB_DATA_FORMAT_PIXEL_REDUCE_MODE;
+typedef enum LB_DATA_FORMAT_DYNAMIC_PIXEL_DEPTH {
+	LB_DATA_FORMAT_DYNAMIC_PIXEL_DEPTH_36BPP         = 0x0,
+	LB_DATA_FORMAT_DYNAMIC_PIXEL_DEPTH_30BPP         = 0x1,
+} LB_DATA_FORMAT_DYNAMIC_PIXEL_DEPTH;
+typedef enum LB_DATA_FORMAT_INTERLEAVE_EN {
+	LB_DATA_FORMAT_INTERLEAVE_DISABLE                = 0x0,
+	LB_DATA_FORMAT_INTERLEAVE_ENABLE                 = 0x1,
+} LB_DATA_FORMAT_INTERLEAVE_EN;
+typedef enum LB_DATA_FORMAT_PREFILL_EN {
+	LB_DATA_FORMAT_PREFILL_DISABLE                   = 0x0,
+	LB_DATA_FORMAT_PREFILL_ENABLE                    = 0x1,
+} LB_DATA_FORMAT_PREFILL_EN;
+typedef enum LB_DATA_FORMAT_REQUEST_MODE {
+	LB_DATA_FORMAT_REQUEST_MODE_NORMAL               = 0x0,
+	LB_DATA_FORMAT_REQUEST_MODE_START_OF_LINE        = 0x1,
+} LB_DATA_FORMAT_REQUEST_MODE;
+typedef enum LB_DATA_FORMAT_ALPHA_EN {
+	LB_DATA_FORMAT_ALPHA_DISABLE                     = 0x0,
+	LB_DATA_FORMAT_ALPHA_ENABLE                      = 0x1,
+} LB_DATA_FORMAT_ALPHA_EN;
+typedef enum LB_VLINE_START_END_VLINE_INV {
+	LB_VLINE_START_END_VLINE_NORMAL                  = 0x0,
+	LB_VLINE_START_END_VLINE_INVERSE                 = 0x1,
+} LB_VLINE_START_END_VLINE_INV;
+typedef enum LB_VLINE2_START_END_VLINE2_INV {
+	LB_VLINE2_START_END_VLINE2_NORMAL                = 0x0,
+	LB_VLINE2_START_END_VLINE2_INVERSE               = 0x1,
+} LB_VLINE2_START_END_VLINE2_INV;
+typedef enum LB_INTERRUPT_MASK_VBLANK_INTERRUPT_MASK {
+	LB_INTERRUPT_MASK_VBLANK_INTERRUPT_DISABLE       = 0x0,
+	LB_INTERRUPT_MASK_VBLANK_INTERRUPT_ENABLE        = 0x1,
+} LB_INTERRUPT_MASK_VBLANK_INTERRUPT_MASK;
+typedef enum LB_INTERRUPT_MASK_VLINE_INTERRUPT_MASK {
+	LB_INTERRUPT_MASK_VLINE_INTERRUPT_DISABLE        = 0x0,
+	LB_INTERRUPT_MASK_VLINE_INTERRUPT_ENABLE         = 0x1,
+} LB_INTERRUPT_MASK_VLINE_INTERRUPT_MASK;
+typedef enum LB_INTERRUPT_MASK_VLINE2_INTERRUPT_MASK {
+	LB_INTERRUPT_MASK_VLINE2_INTERRUPT_DISABLE       = 0x0,
+	LB_INTERRUPT_MASK_VLINE2_INTERRUPT_ENABLE        = 0x1,
+} LB_INTERRUPT_MASK_VLINE2_INTERRUPT_MASK;
+typedef enum LB_VLINE_STATUS_VLINE_ACK {
+	LB_VLINE_STATUS_VLINE_NORMAL                     = 0x0,
+	LB_VLINE_STATUS_VLINE_CLEAR                      = 0x1,
+} LB_VLINE_STATUS_VLINE_ACK;
+typedef enum LB_VLINE_STATUS_VLINE_INTERRUPT_TYPE {
+	LB_VLINE_STATUS_VLINE_INTERRUPT_TYPE_LEVEL_BASED = 0x0,
+	LB_VLINE_STATUS_VLINE_INTERRUPT_TYPE_PULSE_BASED = 0x1,
+} LB_VLINE_STATUS_VLINE_INTERRUPT_TYPE;
+typedef enum LB_VLINE2_STATUS_VLINE2_ACK {
+	LB_VLINE2_STATUS_VLINE2_NORMAL                   = 0x0,
+	LB_VLINE2_STATUS_VLINE2_CLEAR                    = 0x1,
+} LB_VLINE2_STATUS_VLINE2_ACK;
+typedef enum LB_VLINE2_STATUS_VLINE2_INTERRUPT_TYPE {
+	LB_VLINE2_STATUS_VLINE2_INTERRUPT_TYPE_LEVEL_BASED= 0x0,
+	LB_VLINE2_STATUS_VLINE2_INTERRUPT_TYPE_PULSE_BASED= 0x1,
+} LB_VLINE2_STATUS_VLINE2_INTERRUPT_TYPE;
+typedef enum LB_VBLANK_STATUS_VBLANK_ACK {
+	LB_VBLANK_STATUS_VBLANK_NORMAL                   = 0x0,
+	LB_VBLANK_STATUS_VBLANK_CLEAR                    = 0x1,
+} LB_VBLANK_STATUS_VBLANK_ACK;
+typedef enum LB_VBLANK_STATUS_VBLANK_INTERRUPT_TYPE {
+	LB_VBLANK_STATUS_VBLANK_INTERRUPT_TYPE_LEVEL_BASED= 0x0,
+	LB_VBLANK_STATUS_VBLANK_INTERRUPT_TYPE_PULSE_BASED= 0x1,
+} LB_VBLANK_STATUS_VBLANK_INTERRUPT_TYPE;
+typedef enum LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL {
+	LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL_DISABLE      = 0x0,
+	LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL_FROM_VSYNC_VBLANK= 0x1,
+	LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL_FROM_POWERDOWN_RESET= 0x2,
+	LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL_FROM_VSYNC_VBLANK_POWERDOWN_RESET= 0x3,
+} LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL;
+typedef enum LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL2 {
+	LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL2_USE_VBLANK  = 0x0,
+	LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL2_USE_VSYNC   = 0x1,
+} LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL2;
+typedef enum LB_SYNC_RESET_SEL_LB_SYNC_DURATION {
+	LB_SYNC_RESET_SEL_LB_SYNC_DURATION_16_CLOCKS     = 0x0,
+	LB_SYNC_RESET_SEL_LB_SYNC_DURATION_32_CLOCKS     = 0x1,
+	LB_SYNC_RESET_SEL_LB_SYNC_DURATION_64_CLOCKS     = 0x2,
+	LB_SYNC_RESET_SEL_LB_SYNC_DURATION_128_CLOCKS    = 0x3,
+} LB_SYNC_RESET_SEL_LB_SYNC_DURATION;
+typedef enum LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_EN {
+	LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_DISABLE       = 0x0,
+	LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_ENABLE        = 0x1,
+} LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_EN;
+typedef enum LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_REP_EN {
+	LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_REPLACEMENT_DISABLE= 0x0,
+	LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_REPLACEMENT_ENABLE= 0x1,
+} LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_REP_EN;
+typedef enum LB_BUFFER_STATUS_LB_BUFFER_EMPTY_ACK {
+	LB_BUFFER_STATUS_LB_BUFFER_EMPTY_NORMAL          = 0x0,
+	LB_BUFFER_STATUS_LB_BUFFER_EMPTY_RESET           = 0x1,
+} LB_BUFFER_STATUS_LB_BUFFER_EMPTY_ACK;
+typedef enum LB_BUFFER_STATUS_LB_BUFFER_FULL_ACK {
+	LB_BUFFER_STATUS_LB_BUFFER_FULL_NORMAL           = 0x0,
+	LB_BUFFER_STATUS_LB_BUFFER_FULL_RESET            = 0x1,
+} LB_BUFFER_STATUS_LB_BUFFER_FULL_ACK;
+typedef enum LB_MVP_AFR_FLIP_MODE_MVP_AFR_FLIP_MODE {
+	LB_MVP_AFR_FLIP_MODE_MVP_AFR_FLIP_MODE_REAL_FLIP = 0x2,
+	LB_MVP_AFR_FLIP_MODE_MVP_AFR_FLIP_MODE_DUMMY_FLIP= 0x3,
+} LB_MVP_AFR_FLIP_MODE_MVP_AFR_FLIP_MODE;
+typedef enum LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_RESET {
+	LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_NORMAL= 0x0,
+	LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_RESET_ACTIVE= 0x1,
+} LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_RESET;
+typedef enum LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_RESET_ACK {
+	LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_RESET_ACK_NOT_USED0= 0x0,
+	LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_RESET_ACK_NOT_USED1= 0x1,
+} LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_RESET_ACK;
+typedef enum LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_LINE_NUM_INSERT_MODE {
+	LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_LINE_NUM_INSERT_MODE_NO_INSERT= 0x0,
+	LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_LINE_NUM_INSERT_MODE_DEBUG= 0x1,
+	LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_LINE_NUM_INSERT_MODE_HSYNC_MODE= 0x2,
+} LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_LINE_NUM_INSERT_MODE;
+typedef enum LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_AUTO_ENABLE {
+	LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_AUTO_DISABLE= 0x0,
+	LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_AUTO_EN     = 0x1,
+} LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_AUTO_ENABLE;
+typedef enum LB_DC_MVP_LB_CONTROL_MVP_SWAP_LOCK_IN_MODE {
+	ALPHA_LB_DC_MVP_LB_CONTROL_MVP_SWAP_LOCK_IN_MODE_MASTER= 0x1,
+	ALPHA_LB_DC_MVP_LB_CONTROL_MVP_SWAP_LOCK_IN_MODE_SLAVE= 0x2,
+} LB_DC_MVP_LB_CONTROL_MVP_SWAP_LOCK_IN_MODE;
+typedef enum LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_SEL {
+	LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_SEL_NOT_USED0= 0x0,
+	LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_SEL_NOT_USED1= 0x1,
+} LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_SEL;
+typedef enum LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_FORCE_ONE {
+	LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_NO_FORCE_ONE= 0x0,
+	LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_FORCE_TO_ONE= 0x1,
+} LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_FORCE_ONE;
+typedef enum LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_FORCE_ZERO {
+	LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_NO_FORCE_ZERO= 0x0,
+	LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_FORCE_TO_ZERO= 0x1,
+} LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_FORCE_ZERO;
+typedef enum LB_TEST_DEBUG_INDEX_LB_TEST_DEBUG_WRITE_EN {
+	LB_TEST_DEBUG_INDEX_LB_TEST_DEBUG_WRITE_EN_NOT_USED0= 0x0,
+	LB_TEST_DEBUG_INDEX_LB_TEST_DEBUG_WRITE_EN_NOT_USED1= 0x1,
+} LB_TEST_DEBUG_INDEX_LB_TEST_DEBUG_WRITE_EN;
+typedef enum LBV_PIXEL_DEPTH {
+	PIXEL_DEPTH_30BPP                                = 0x0,
+	PIXEL_DEPTH_24BPP                                = 0x1,
+	PIXEL_DEPTH_18BPP                                = 0x2,
+	PIXEL_DEPTH_38BPP                                = 0x3,
+} LBV_PIXEL_DEPTH;
+typedef enum LBV_PIXEL_EXPAN_MODE {
+	PIXEL_EXPAN_MODE_ZERO_EXP                        = 0x0,
+	PIXEL_EXPAN_MODE_DYN_EXP                         = 0x1,
+} LBV_PIXEL_EXPAN_MODE;
+typedef enum LBV_INTERLEAVE_EN {
+	INTERLEAVE_DIS                                   = 0x0,
+	INTERLEAVE_EN                                    = 0x1,
+} LBV_INTERLEAVE_EN;
+typedef enum LBV_PIXEL_REDUCE_MODE {
+	PIXEL_REDUCE_MODE_TRUNCATION                     = 0x0,
+	PIXEL_REDUCE_MODE_ROUNDING                       = 0x1,
+} LBV_PIXEL_REDUCE_MODE;
+typedef enum LBV_DYNAMIC_PIXEL_DEPTH {
+	DYNAMIC_PIXEL_DEPTH_36BPP                        = 0x0,
+	DYNAMIC_PIXEL_DEPTH_30BPP                        = 0x1,
+} LBV_DYNAMIC_PIXEL_DEPTH;
+typedef enum LBV_DITHER_EN {
+	DITHER_DIS                                       = 0x0,
+	DITHER_EN                                        = 0x1,
+} LBV_DITHER_EN;
+typedef enum LBV_DOWNSCALE_PREFETCH_EN {
+	DOWNSCALE_PREFETCH_DIS                           = 0x0,
+	DOWNSCALE_PREFETCH_EN                            = 0x1,
+} LBV_DOWNSCALE_PREFETCH_EN;
+typedef enum LBV_MEMORY_CONFIG {
+	MEMORY_CONFIG_0                                  = 0x0,
+	MEMORY_CONFIG_1                                  = 0x1,
+	MEMORY_CONFIG_2                                  = 0x2,
+	MEMORY_CONFIG_3                                  = 0x3,
+} LBV_MEMORY_CONFIG;
+typedef enum LBV_SYNC_RESET_SEL2 {
+	SYNC_RESET_SEL2_VBLANK                           = 0x0,
+	SYNC_RESET_SEL2_VSYNC                            = 0x1,
+} LBV_SYNC_RESET_SEL2;
+typedef enum LBV_SYNC_DURATION {
+	SYNC_DURATION_16                                 = 0x0,
+	SYNC_DURATION_32                                 = 0x1,
+	SYNC_DURATION_64                                 = 0x2,
+	SYNC_DURATION_128                                = 0x3,
+} LBV_SYNC_DURATION;
+typedef enum SCL_C_RAM_TAP_PAIR_IDX {
+	SCL_C_RAM_TAP_PAIR_ID0                           = 0x0,
+	SCL_C_RAM_TAP_PAIR_ID1                           = 0x1,
+	SCL_C_RAM_TAP_PAIR_ID2                           = 0x2,
+	SCL_C_RAM_TAP_PAIR_ID3                           = 0x3,
+	SCL_C_RAM_TAP_PAIR_ID4                           = 0x4,
+} SCL_C_RAM_TAP_PAIR_IDX;
+typedef enum SCL_C_RAM_PHASE {
+	SCL_C_RAM_PHASE_0                                = 0x0,
+	SCL_C_RAM_PHASE_1                                = 0x1,
+	SCL_C_RAM_PHASE_2                                = 0x2,
+	SCL_C_RAM_PHASE_3                                = 0x3,
+	SCL_C_RAM_PHASE_4                                = 0x4,
+	SCL_C_RAM_PHASE_5                                = 0x5,
+	SCL_C_RAM_PHASE_6                                = 0x6,
+	SCL_C_RAM_PHASE_7                                = 0x7,
+	SCL_C_RAM_PHASE_8                                = 0x8,
+} SCL_C_RAM_PHASE;
+typedef enum SCL_C_RAM_FILTER_TYPE {
+	SCL_C_RAM_FILTER_TYPE_VERT_LUMA_RGB_LUT          = 0x0,
+	SCL_C_RAM_FILTER_TYPE_VERT_CHROMA_LUT            = 0x1,
+	SCL_C_RAM_FILTER_TYPE_HORI_LUMA_RGB_LUT          = 0x2,
+	SCL_C_RAM_FILTER_TYPE_HORI_CHROMA_LUT            = 0x3,
+} SCL_C_RAM_FILTER_TYPE;
+typedef enum SCL_MODE_SEL {
+	SCL_MODE_RGB_BYPASS                              = 0x0,
+	SCL_MODE_RGB_SCALING                             = 0x1,
+	SCL_MODE_YCBCR_SCALING                           = 0x2,
+	SCL_MODE_YCBCR_BYPASS                            = 0x3,
+} SCL_MODE_SEL;
+typedef enum SCL_PSCL_EN {
+	SCL_PSCL_DISABLE                                 = 0x0,
+	SCL_PSCL_ENANBLE                                 = 0x1,
+} SCL_PSCL_EN;
+typedef enum SCL_V_NUM_OF_TAPS {
+	SCL_V_NUM_OF_TAPS_1                              = 0x0,
+	SCL_V_NUM_OF_TAPS_2                              = 0x1,
+	SCL_V_NUM_OF_TAPS_3                              = 0x2,
+	SCL_V_NUM_OF_TAPS_4                              = 0x3,
+	SCL_V_NUM_OF_TAPS_5                              = 0x4,
+	SCL_V_NUM_OF_TAPS_6                              = 0x5,
+} SCL_V_NUM_OF_TAPS;
+typedef enum SCL_H_NUM_OF_TAPS {
+	SCL_H_NUM_OF_TAPS_1                              = 0x0,
+	SCL_H_NUM_OF_TAPS_2                              = 0x1,
+	SCL_H_NUM_OF_TAPS_4                              = 0x3,
+	SCL_H_NUM_OF_TAPS_6                              = 0x5,
+	SCL_H_NUM_OF_TAPS_8                              = 0x7,
+	SCL_H_NUM_OF_TAPS_10                             = 0x9,
+} SCL_H_NUM_OF_TAPS;
+typedef enum SCL_BOUNDARY_MODE {
+	SCL_BOUNDARY_MODE_BLACK                          = 0x0,
+	SCL_BOUNDARY_MODE_EDGE                           = 0x1,
+} SCL_BOUNDARY_MODE;
+typedef enum SCL_EARLY_EOL_MOD {
+	SCL_EARLY_EOL_MODE_CRTC                          = 0x0,
+	SCL_EARLY_EOL_MODE_INTERNAL                      = 0x1,
+} SCL_EARLY_EOL_MOD;
+typedef enum SCL_BYPASS_MODE {
+	SCL_BYPASS_MODE_MC_MR                            = 0x0,
+	SCL_BYPASS_MODE_AC_NR                            = 0x1,
+	SCL_BYPASS_MODE_AC_AR                            = 0x2,
+	SCL_BYPASS_MODE_RESERVED                         = 0x3,
+} SCL_BYPASS_MODE;
+typedef enum SCL_V_MANUAL_REPLICATE_FACTOR {
+	SCL_V_MANUAL_REPLICATE_FACTOR_1                  = 0x0,
+	SCL_V_MANUAL_REPLICATE_FACTOR_2                  = 0x1,
+	SCL_V_MANUAL_REPLICATE_FACTOR_3                  = 0x2,
+	SCL_V_MANUAL_REPLICATE_FACTOR_4                  = 0x3,
+	SCL_V_MANUAL_REPLICATE_FACTOR_5                  = 0x4,
+	SCL_V_MANUAL_REPLICATE_FACTOR_6                  = 0x5,
+	SCL_V_MANUAL_REPLICATE_FACTOR_7                  = 0x6,
+	SCL_V_MANUAL_REPLICATE_FACTOR_8                  = 0x7,
+	SCL_V_MANUAL_REPLICATE_FACTOR_9                  = 0x8,
+	SCL_V_MANUAL_REPLICATE_FACTOR_10                 = 0x9,
+	SCL_V_MANUAL_REPLICATE_FACTOR_11                 = 0xa,
+	SCL_V_MANUAL_REPLICATE_FACTOR_12                 = 0xb,
+	SCL_V_MANUAL_REPLICATE_FACTOR_13                 = 0xc,
+	SCL_V_MANUAL_REPLICATE_FACTOR_14                 = 0xd,
+	SCL_V_MANUAL_REPLICATE_FACTOR_15                 = 0xe,
+	SCL_V_MANUAL_REPLICATE_FACTOR_16                 = 0xf,
+} SCL_V_MANUAL_REPLICATE_FACTOR;
+typedef enum SCL_H_MANUAL_REPLICATE_FACTOR {
+	SCL_H_MANUAL_REPLICATE_FACTOR_1                  = 0x0,
+	SCL_H_MANUAL_REPLICATE_FACTOR_2                  = 0x1,
+	SCL_H_MANUAL_REPLICATE_FACTOR_3                  = 0x2,
+	SCL_H_MANUAL_REPLICATE_FACTOR_4                  = 0x3,
+	SCL_H_MANUAL_REPLICATE_FACTOR_5                  = 0x4,
+	SCL_H_MANUAL_REPLICATE_FACTOR_6                  = 0x5,
+	SCL_H_MANUAL_REPLICATE_FACTOR_7                  = 0x6,
+	SCL_H_MANUAL_REPLICATE_FACTOR_8                  = 0x7,
+	SCL_H_MANUAL_REPLICATE_FACTOR_9                  = 0x8,
+	SCL_H_MANUAL_REPLICATE_FACTOR_10                 = 0x9,
+	SCL_H_MANUAL_REPLICATE_FACTOR_11                 = 0xa,
+	SCL_H_MANUAL_REPLICATE_FACTOR_12                 = 0xb,
+	SCL_H_MANUAL_REPLICATE_FACTOR_13                 = 0xc,
+	SCL_H_MANUAL_REPLICATE_FACTOR_14                 = 0xd,
+	SCL_H_MANUAL_REPLICATE_FACTOR_15                 = 0xe,
+	SCL_H_MANUAL_REPLICATE_FACTOR_16                 = 0xf,
+} SCL_H_MANUAL_REPLICATE_FACTOR;
+typedef enum SCL_V_CALC_AUTO_RATIO_EN {
+	SCL_V_CALC_AUTO_RATIO_DISABLE                    = 0x0,
+	SCL_V_CALC_AUTO_RATIO_ENABLE                     = 0x1,
+} SCL_V_CALC_AUTO_RATIO_EN;
+typedef enum SCL_H_CALC_AUTO_RATIO_EN {
+	SCL_H_CALC_AUTO_RATIO_DISABLE                    = 0x0,
+	SCL_H_CALC_AUTO_RATIO_ENABLE                     = 0x1,
+} SCL_H_CALC_AUTO_RATIO_EN;
+typedef enum SCL_H_FILTER_PICK_NEAREST {
+	SCL_H_FILTER_PICK_NEAREST_DISABLE                = 0x0,
+	SCL_H_FILTER_PICK_NEAREST_ENABLE                 = 0x1,
+} SCL_H_FILTER_PICK_NEAREST;
+typedef enum SCL_H_2TAP_HARDCODE_COEF_EN {
+	SCL_H_2TAP_HARDCODE_COEF_DISABLE                 = 0x0,
+	SCL_H_2TAP_HARDCODE_COEF_ENABLE                  = 0x1,
+} SCL_H_2TAP_HARDCODE_COEF_EN;
+typedef enum SCL_V_FILTER_PICK_NEAREST {
+	SCL_V_FILTER_PICK_NEAREST_DISABLE                = 0x0,
+	SCL_V_FILTER_PICK_NEAREST_ENABLE                 = 0x1,
+} SCL_V_FILTER_PICK_NEAREST;
+typedef enum SCL_V_2TAP_HARDCODE_COEF_EN {
+	SCL_V_2TAP_HARDCODE_COEF_DISABLE                 = 0x0,
+	SCL_V_2TAP_HARDCODE_COEF_ENABLE                  = 0x1,
+} SCL_V_2TAP_HARDCODE_COEF_EN;
+typedef enum SCL_UPDATE_TAKEN {
+	SCL_UPDATE_TAKEN_NO                              = 0x0,
+	SCL_UPDATE_TAKEN_YES                             = 0x1,
+} SCL_UPDATE_TAKEN;
+typedef enum SCL_UPDATE_LOCK {
+	SCL_UPDATE_UNLOCKED                              = 0x0,
+	SCL_UPDATE_LOCKED                                = 0x1,
+} SCL_UPDATE_LOCK;
+typedef enum SCL_COEF_UPDATE_COMPLETE {
+	SCL_COEF_UPDATE_NOT_COMPLETED                    = 0x0,
+	SCL_COEF_UPDATE_COMPLETED                        = 0x1,
+} SCL_COEF_UPDATE_COMPLETE;
+typedef enum SCL_HF_SHARP_SCALE_FACTOR {
+	SCL_HF_SHARP_SCALE_FACTOR_0                      = 0x0,
+	SCL_HF_SHARP_SCALE_FACTOR_1                      = 0x1,
+	SCL_HF_SHARP_SCALE_FACTOR_2                      = 0x2,
+	SCL_HF_SHARP_SCALE_FACTOR_3                      = 0x3,
+	SCL_HF_SHARP_SCALE_FACTOR_4                      = 0x4,
+	SCL_HF_SHARP_SCALE_FACTOR_5                      = 0x5,
+	SCL_HF_SHARP_SCALE_FACTOR_6                      = 0x6,
+	SCL_HF_SHARP_SCALE_FACTOR_7                      = 0x7,
+} SCL_HF_SHARP_SCALE_FACTOR;
+typedef enum SCL_HF_SHARP_EN {
+	SCL_HF_SHARP_DISABLE                             = 0x0,
+	SCL_HF_SHARP_ENABLE                              = 0x1,
+} SCL_HF_SHARP_EN;
+typedef enum SCL_VF_SHARP_SCALE_FACTOR {
+	SCL_VF_SHARP_SCALE_FACTOR_0                      = 0x0,
+	SCL_VF_SHARP_SCALE_FACTOR_1                      = 0x1,
+	SCL_VF_SHARP_SCALE_FACTOR_2                      = 0x2,
+	SCL_VF_SHARP_SCALE_FACTOR_3                      = 0x3,
+	SCL_VF_SHARP_SCALE_FACTOR_4                      = 0x4,
+	SCL_VF_SHARP_SCALE_FACTOR_5                      = 0x5,
+	SCL_VF_SHARP_SCALE_FACTOR_6                      = 0x6,
+	SCL_VF_SHARP_SCALE_FACTOR_7                      = 0x7,
+} SCL_VF_SHARP_SCALE_FACTOR;
+typedef enum SCL_VF_SHARP_EN {
+	SCL_VF_SHARP_DISABLE                             = 0x0,
+	SCL_VF_SHARP_ENABLE                              = 0x1,
+} SCL_VF_SHARP_EN;
+typedef enum SCL_ALU_DISABLE {
+	SCL_ALU_ENABLED                                  = 0x0,
+	SCL_ALU_DISABLED                                 = 0x1,
+} SCL_ALU_DISABLE;
+typedef enum SCL_HOST_CONFLICT_MASK {
+	SCL_HOST_CONFLICT_DISABLE_INTERRUPT              = 0x0,
+	SCL_HOST_CONFLICT_ENABLE_INTERRUPT               = 0x1,
+} SCL_HOST_CONFLICT_MASK;
+typedef enum SCL_SCL_MODE_CHANGE_MASK {
+	SCL_MODE_CHANGE_DISABLE_INTERRUPT                = 0x0,
+	SCL_MODE_CHANGE_ENABLE_INTERRUPT                 = 0x1,
+} SCL_SCL_MODE_CHANGE_MASK;
+typedef enum SCLV_MODE_SEL {
+	SCLV_MODE_RGB_BYPASS                             = 0x0,
+	SCLV_MODE_RGB_SCALING                            = 0x1,
+	SCLV_MODE_YCBCR_SCALING                          = 0x2,
+	SCLV_MODE_YCBCR_BYPASS                           = 0x3,
+} SCLV_MODE_SEL;
+typedef enum SCLV_INTERLACE_SOURCE {
+	INTERLACE_SOURCE_PROGRESSIVE                     = 0x0,
+	INTERLACE_SOURCE_INTERLEAVE                      = 0x1,
+	INTERLACE_SOURCE_STACK                           = 0x2,
+} SCLV_INTERLACE_SOURCE;
+typedef enum SCLV_UPDATE_LOCK {
+	UPDATE_UNLOCKED                                  = 0x0,
+	UPDATE_LOCKED                                    = 0x1,
+} SCLV_UPDATE_LOCK;
+typedef enum SCLV_COEF_UPDATE_COMPLETE {
+	COEF_UPDATE_NOT_COMPLETE                         = 0x0,
+	COEF_UPDATE_COMPLETE                             = 0x1,
+} SCLV_COEF_UPDATE_COMPLETE;
+typedef enum COL_MAN_UPDATE_LOCK {
+	COL_MAN_UPDATE_UNLOCKED                          = 0x0,
+	COL_MAN_UPDATE_LOCKED                            = 0x1,
+} COL_MAN_UPDATE_LOCK;
+typedef enum COL_MAN_DISABLE_MULTIPLE_UPDATE {
+	COL_MAN_MULTIPLE_UPDATE                          = 0x0,
+	COL_MAN_MULTIPLE_UPDAT_EDISABLE                  = 0x1,
+} COL_MAN_DISABLE_MULTIPLE_UPDATE;
+typedef enum COL_MAN_INPUTCSC_MODE {
+	INPUTCSC_MODE_BYPASS                             = 0x0,
+	INPUTCSC_MODE_A                                  = 0x1,
+	INPUTCSC_MODE_B                                  = 0x2,
+	INPUTCSC_MODE_UNITY                              = 0x3,
+} COL_MAN_INPUTCSC_MODE;
+typedef enum COL_MAN_INPUTCSC_TYPE {
+	INPUTCSC_TYPE_12_0                               = 0x0,
+	INPUTCSC_TYPE_10_2                               = 0x1,
+	INPUTCSC_TYPE_8_4                                = 0x2,
+} COL_MAN_INPUTCSC_TYPE;
+typedef enum COL_MAN_INPUTCSC_CONVERT {
+	INPUTCSC_ROUND                                   = 0x0,
+	INPUTCSC_TRUNCATE                                = 0x1,
+} COL_MAN_INPUTCSC_CONVERT;
+typedef enum COL_MAN_PRESCALE_MODE {
+	PRESCALE_MODE_BYPASS                             = 0x0,
+	PRESCALE_MODE_PROGRAM                            = 0x1,
+	PRESCALE_MODE_UNITY                              = 0x2,
+} COL_MAN_PRESCALE_MODE;
+typedef enum COL_MAN_INPUT_GAMMA_MODE {
+	INGAMMA_MODE_BYPASS                              = 0x0,
+	INGAMMA_MODE_FIX                                 = 0x1,
+	INGAMMA_MODE_FLOAT                               = 0x2,
+} COL_MAN_INPUT_GAMMA_MODE;
+typedef enum COL_MAN_OUTPUT_CSC_MODE {
+	COL_MAN_OUTPUT_CSC_BYPASS                        = 0x0,
+	COL_MAN_OUTPUT_CSC_RGB                           = 0x1,
+	COL_MAN_OUTPUT_CSC_YCrCb601                      = 0x2,
+	COL_MAN_OUTPUT_CSC_YCrCb709                      = 0x3,
+	COL_MAN_OUTPUT_CSC_A                             = 0x4,
+	COL_MAN_OUTPUT_CSC_B                             = 0x5,
+	COL_MAN_OUTPUT_CSC_UNITY                         = 0x6,
+} COL_MAN_OUTPUT_CSC_MODE;
+typedef enum COL_MAN_DENORM_CLAMP_CONTROL {
+	DENORM_CLAMP_MODE_UNITY                          = 0x0,
+	DENORM_CLAMP_MODE_8                              = 0x1,
+	DENORM_CLAMP_MODE_10                             = 0x2,
+	DENORM_CLAMP_MODE_12                             = 0x3,
+} COL_MAN_DENORM_CLAMP_CONTROL;
+typedef enum COL_MAN_GAMMA_CORR_CONTROL {
+	GAMMA_CORR_MODE_BYPASS                           = 0x0,
+	GAMMA_CORR_MODE_A                                = 0x1,
+	GAMMA_CORR_MODE_B                                = 0x2,
+} COL_MAN_GAMMA_CORR_CONTROL;
+typedef enum COL_MAN_GLOBAL_PASSTHROUGH_ENABLE {
+	CM_GLOBAL_PASSTHROUGH_DISBALE                    = 0x0,
+	CM_GLOBAL_PASSTHROUGH_ENABLE                     = 0x1,
+} COL_MAN_GLOBAL_PASSTHROUGH_ENABLE;
+typedef enum UNP_GRPH_EN {
+	UNP_GRPH_DISABLED                                = 0x0,
+	UNP_GRPH_ENABLED                                 = 0x1,
+} UNP_GRPH_EN;
+typedef enum UNP_GRPH_DEPTH {
+	UNP_GRPH_8BPP                                    = 0x0,
+	UNP_GRPH_16BPP                                   = 0x1,
+	UNP_GRPH_32BPP                                   = 0x2,
+} UNP_GRPH_DEPTH;
+typedef enum UNP_GRPH_NUM_BANKS {
+	UNP_GRPH_ADDR_SURF_2_BANK                        = 0x0,
+	UNP_GRPH_ADDR_SURF_4_BANK                        = 0x1,
+	UNP_GRPH_ADDR_SURF_8_BANK                        = 0x2,
+	UNP_GRPH_ADDR_SURF_16_BANK                       = 0x3,
+} UNP_GRPH_NUM_BANKS;
+typedef enum UNP_GRPH_BANK_WIDTH {
+	UNP_GRPH_ADDR_SURF_BANK_WIDTH_1                  = 0x0,
+	UNP_GRPH_ADDR_SURF_BANK_WIDTH_2                  = 0x1,
+	UNP_GRPH_ADDR_SURF_BANK_WIDTH_4                  = 0x2,
+	UNP_GRPH_ADDR_SURF_BANK_WIDTH_8                  = 0x3,
+} UNP_GRPH_BANK_WIDTH;
+typedef enum UNP_GRPH_BANK_HEIGHT {
+	UNP_GRPH_ADDR_SURF_BANK_HEIGHT_1                 = 0x0,
+	UNP_GRPH_ADDR_SURF_BANK_HEIGHT_2                 = 0x1,
+	UNP_GRPH_ADDR_SURF_BANK_HEIGHT_4                 = 0x2,
+	UNP_GRPH_ADDR_SURF_BANK_HEIGHT_8                 = 0x3,
+} UNP_GRPH_BANK_HEIGHT;
+typedef enum UNP_GRPH_TILE_SPLIT {
+	UNP_ADDR_SURF_TILE_SPLIT_64B                     = 0x0,
+	UNP_ADDR_SURF_TILE_SPLIT_128B                    = 0x1,
+	UNP_ADDR_SURF_TILE_SPLIT_256B                    = 0x2,
+	UNP_ADDR_SURF_TILE_SPLIT_512B                    = 0x3,
+	UNP_ADDR_SURF_TILE_SPLIT_1KB                     = 0x4,
+	UNP_ADDR_SURF_TILE_SPLIT_2KB                     = 0x5,
+	UNP_ADDR_SURF_TILE_SPLIT_4KB                     = 0x6,
+} UNP_GRPH_TILE_SPLIT;
+typedef enum UNP_GRPH_ADDRESS_TRANSLATION_ENABLE {
+	UNP_GRPH_ADDRESS_TRANSLATION_ENABLE0             = 0x0,
+	UNP_GRPH_ADDRESS_TRANSLATION_ENABLE1             = 0x1,
+} UNP_GRPH_ADDRESS_TRANSLATION_ENABLE;
+typedef enum UNP_GRPH_PRIVILEGED_ACCESS_ENABLE {
+	UNP_GRPH_PRIVILEGED_ACCESS_DIS                   = 0x0,
+	UNP_GRPH_PRIVILEGED_ACCESS_EN                    = 0x1,
+} UNP_GRPH_PRIVILEGED_ACCESS_ENABLE;
+typedef enum UNP_GRPH_MACRO_TILE_ASPECT {
+	UNP_ADDR_SURF_MACRO_ASPECT_1                     = 0x0,
+	UNP_ADDR_SURF_MACRO_ASPECT_2                     = 0x1,
+	UNP_ADDR_SURF_MACRO_ASPECT_4                     = 0x2,
+	UNP_ADDR_SURF_MACRO_ASPECT_8                     = 0x3,
+} UNP_GRPH_MACRO_TILE_ASPECT;
+typedef enum UNP_GRPH_COLOR_EXPANSION_MODE {
+	UNP_GRPH_DYNAMIC_EXPANSION                       = 0x0,
+	UNP_GRPH_ZERO_EXPANSION                          = 0x1,
+} UNP_GRPH_COLOR_EXPANSION_MODE;
+typedef enum UNP_VIDEO_FORMAT {
+	UNP_VIDEO_FORMAT0                                = 0x0,
+	UNP_VIDEO_FORMAT1                                = 0x1,
+	UNP_VIDEO_FORMAT_YUV420_YCbCr                    = 0x2,
+	UNP_VIDEO_FORMAT_YUV420_YCrCb                    = 0x3,
+	UNP_VIDEO_FORMAT_YUV422_YCb                      = 0x4,
+	UNP_VIDEO_FORMAT_YUV422_YCr                      = 0x5,
+	UNP_VIDEO_FORMAT_YUV422_CbY                      = 0x6,
+	UNP_VIDEO_FORMAT_YUV422_CrY                      = 0x7,
+} UNP_VIDEO_FORMAT;
+typedef enum UNP_GRPH_ENDIAN_SWAP {
+	UNP_GRPH_ENDIAN_SWAP_NONE                        = 0x0,
+	UNP_GRPH_ENDIAN_SWAP_8IN16                       = 0x1,
+	UNP_GRPH_ENDIAN_SWAP_8IN32                       = 0x2,
+	UNP_GRPH_ENDIAN_SWAP_8IN43                       = 0x3,
+} UNP_GRPH_ENDIAN_SWAP;
+typedef enum UNP_GRPH_RED_CROSSBAR {
+	UNP_GRPH_RED_CROSSBAR_R_Cr                       = 0x0,
+	UNP_GRPH_RED_CROSSBAR_G_Y                        = 0x1,
+	UNP_GRPH_RED_CROSSBAR_B_Cb                       = 0x2,
+	UNP_GRPH_RED_CROSSBAR_A                          = 0x3,
+} UNP_GRPH_RED_CROSSBAR;
+typedef enum UNP_GRPH_GREEN_CROSSBAR {
+	UNP_UNP_GRPH_GREEN_CROSSBAR_GY_AND_Y             = 0x0,
+	UNP_UNP_GRPH_GREEN_CROSSBAR_B_Cb_AND_C           = 0x1,
+	UNP_UNP_GRPH_GREEN_CROSSBAR_A                    = 0x2,
+	UNP_UNP_GRPH_GREEN_CROSSBAR_R_Cr                 = 0x3,
+} UNP_GRPH_GREEN_CROSSBAR;
+typedef enum UNP_GRPH_BLUE_CROSSBAR {
+	UNP_GRPH_BLUE_CROSSBAR_B_Cb_AND_C                = 0x0,
+	UNP_GRPH_BLUE_CROSSBAR_A                         = 0x1,
+	UNP_GRPH_BLUE_CROSSBAR_R_Cr                      = 0x2,
+	UNP_GRPH_BLUE_CROSSBAR_GY_AND_Y                  = 0x3,
+} UNP_GRPH_BLUE_CROSSBAR;
+typedef enum UNP_GRPH_MODE_UPDATE_LOCKG {
+	UNP_GRPH_UPDATE_LOCK_0                           = 0x0,
+	UNP_GRPH_UPDATE_LOCK_1                           = 0x1,
+} UNP_GRPH_MODE_UPDATE_LOCKG;
+typedef enum UNP_GRPH_SURFACE_IGNORE_UPDATE_LOCK {
+	UNP_GRPH_SURFACE_IGNORE_UPDATE_LOCK_0            = 0x0,
+	UNP_GRPH_SURFACE_IGNORE_UPDATE_LOCK_1            = 0x1,
+} UNP_GRPH_SURFACE_IGNORE_UPDATE_LOCK;
+typedef enum UNP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE {
+	UNP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE_0          = 0x0,
+	UNP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE_1          = 0x1,
+} UNP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE;
+typedef enum UNP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE {
+	UNP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE_0       = 0x0,
+	UNP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE_1       = 0x1,
+} UNP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE;
+typedef enum UNP_GRPH_STEREOSYNC_FLIP_EN {
+	UNP_GRPH_STEREOSYNC_FLIP_DISABLE                 = 0x0,
+	UNP_GRPH_STEREOSYNC_FLIP_ENABLE                  = 0x1,
+} UNP_GRPH_STEREOSYNC_FLIP_EN;
+typedef enum UNP_GRPH_STEREOSYNC_FLIP_MODE {
+	UNP_GRPH_STEREOSYNC_FLIP_MODE_0                  = 0x0,
+	UNP_GRPH_STEREOSYNC_FLIP_MODE_1                  = 0x1,
+	UNP_GRPH_STEREOSYNC_FLIP_MODE_2                  = 0x2,
+	UNP_GRPH_STEREOSYNC_FLIP_MODE_3                  = 0x3,
+} UNP_GRPH_STEREOSYNC_FLIP_MODE;
+typedef enum UNP_GRPH_STACK_INTERLACE_FLIP_EN {
+	UNP_GRPH_STACK_INTERLACE_FLIP_DISABLE            = 0x0,
+	UNP_GRPH_STACK_INTERLACE_FLIP_ENABLE             = 0x1,
+} UNP_GRPH_STACK_INTERLACE_FLIP_EN;
+typedef enum UNP_GRPH_STACK_INTERLACE_FLIP_MODE {
+	UNP_GRPH_STACK_INTERLACE_FLIP_MODE_0             = 0x0,
+	UNP_GRPH_STACK_INTERLACE_FLIP_MODE_1             = 0x1,
+	UNP_GRPH_STACK_INTERLACE_FLIP_MODE_2             = 0x2,
+	UNP_GRPH_STACK_INTERLACE_FLIP_MODE_3             = 0x3,
+} UNP_GRPH_STACK_INTERLACE_FLIP_MODE;
+typedef enum UNP_GRPH_STEREOSYNC_SELECT_DISABLE {
+	UNP_GRPH_STEREOSYNC_SELECT_EN                    = 0x0,
+	UNP_GRPH_STEREOSYNC_SELECT_DIS                   = 0x1,
+} UNP_GRPH_STEREOSYNC_SELECT_DISABLE;
+typedef enum UNP_CRC_SOURCE_SEL {
+	UNP_CRC_SOURCE_SEL_NP_TO_LBV                     = 0x0,
+	UNP_CRC_SOURCE_SEL_LOWER32                       = 0x1,
+	UNP_CRC_SOURCE_SEL_RESERVED                      = 0x2,
+	UNP_CRC_SOURCE_SEL_LOWER16                       = 0x3,
+	UNP_CRC_SOURCE_SEL_UNP_TO_LBV                    = 0x4,
+} UNP_CRC_SOURCE_SEL;
+typedef enum UNP_CRC_LINE_SEL {
+	UNP_CRC_LINE_SEL_RESERVED                        = 0x0,
+	UNP_CRC_LINE_SEL_EVEN_ONLY                       = 0x1,
+	UNP_CRC_LINE_SEL_ODD_ONLY                        = 0x2,
+	UNP_CRC_LINE_SEL_ODD_EVEN                        = 0x3,
+} UNP_CRC_LINE_SEL;
+typedef enum UNP_ROTATION_ANGLE {
+	UNP_ROTATION_ANGLE_0                             = 0x0,
+	UNP_ROTATION_ANGLE_90                            = 0x1,
+	UNP_ROTATION_ANGLE_180                           = 0x2,
+	UNP_ROTATION_ANGLE_270                           = 0x3,
+	UNP_ROTATION_ANGLE_0m                            = 0x4,
+	UNP_ROTATION_ANGLE_90m                           = 0x5,
+	UNP_ROTATION_ANGLE_180m                          = 0x6,
+	UNP_ROTATION_ANGLE_270m                          = 0x7,
+} UNP_ROTATION_ANGLE;
+typedef enum UNP_PIXEL_DROP {
+	UNP_PIXEL_NO_DROP                                = 0x0,
+	UNP_PIXEL_DROPPING                               = 0x1,
+} UNP_PIXEL_DROP;
+typedef enum UNP_BUFFER_MODE {
+	UNP_BUFFER_MODE_LUMA                             = 0x0,
+	UNP_BUFFER_MODE_LUMA_CHROMA                      = 0x1,
+} UNP_BUFFER_MODE;
+typedef enum WATERMARK_MASK_CONTROL {
+	WM_MASK_CONTROL_SET_A                            = 0x0,
+	WM_MASK_CONTROL_SET_B                            = 0x1,
+	WM_MASK_CONTROL_SET_C                            = 0x2,
+	WM_MASK_CONTROL_SET_D                            = 0x3,
+	WM_MASK_CONTROL_RESERVED1                        = 0x4,
+	WM_MASK_CONTROL_RESERVED2                        = 0x5,
+	WM_MASK_CONTROL_RESERVED3                        = 0x6,
+	WM_MASK_CONTROL_ACTIVE_SET                       = 0x7,
+} WATERMARK_MASK_CONTROL;
+typedef enum AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET_CODEC_RESET {
+	AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET_CODEC_NOT_RESET= 0x0,
+	AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET_CODEC_DO_RESET= 0x1,
+} AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET_CODEC_RESET;
+typedef enum CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY {
+	CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_ALL= 0x0,
+	CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_6= 0x1,
+	CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_5= 0x2,
+	CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_4= 0x3,
+	CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_3= 0x4,
+	CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_2= 0x5,
+	CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_1= 0x6,
+	CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_0= 0x7,
+} CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY;
+typedef enum CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY {
+	CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_ALL= 0x0,
+	CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_6= 0x1,
+	CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_5= 0x2,
+	CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_4= 0x3,
+	CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_3= 0x4,
+	CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_2= 0x5,
+	CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_1= 0x6,
+	CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_0= 0x7,
+} CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY;
+typedef enum GENERIC_AZ_CONTROLLER_REGISTER_ENABLE_CONTROL {
+	GENERIC_AZ_CONTROLLER_REGISTER_DISABLE           = 0x0,
+	GENERIC_AZ_CONTROLLER_REGISTER_ENABLE            = 0x1,
+} GENERIC_AZ_CONTROLLER_REGISTER_ENABLE_CONTROL;
+typedef enum GENERIC_AZ_CONTROLLER_REGISTER_ENABLE_CONTROL_RESERVED {
+	GENERIC_AZ_CONTROLLER_REGISTER_DISABLE_RESERVED  = 0x0,
+	GENERIC_AZ_CONTROLLER_REGISTER_ENABLE_RESERVED   = 0x1,
+} GENERIC_AZ_CONTROLLER_REGISTER_ENABLE_CONTROL_RESERVED;
+typedef enum GENERIC_AZ_CONTROLLER_REGISTER_STATUS {
+	GENERIC_AZ_CONTROLLER_REGISTER_STATUS_NOT_SET    = 0x0,
+	GENERIC_AZ_CONTROLLER_REGISTER_STATUS_SET        = 0x1,
+} GENERIC_AZ_CONTROLLER_REGISTER_STATUS;
+typedef enum GENERIC_AZ_CONTROLLER_REGISTER_STATUS_RESERVED {
+	GENERIC_AZ_CONTROLLER_REGISTER_STATUS_NOT_SET_RESERVED= 0x0,
+	GENERIC_AZ_CONTROLLER_REGISTER_STATUS_SET_RESERVED= 0x1,
+} GENERIC_AZ_CONTROLLER_REGISTER_STATUS_RESERVED;
+typedef enum AZ_GLOBAL_CAPABILITIES {
+	AZ_GLOBAL_CAPABILITIES_SIXTY_FOUR_BIT_ADDRESS_NOT_SUPPORTED= 0x0,
+	AZ_GLOBAL_CAPABILITIES_SIXTY_FOUR_BIT_ADDRESS_SUPPORTED= 0x1,
+} AZ_GLOBAL_CAPABILITIES;
+typedef enum GLOBAL_CONTROL_ACCEPT_UNSOLICITED_RESPONSE {
+	ACCEPT_UNSOLICITED_RESPONSE_NOT_ENABLE           = 0x0,
+	ACCEPT_UNSOLICITED_RESPONSE_ENABLE               = 0x1,
+} GLOBAL_CONTROL_ACCEPT_UNSOLICITED_RESPONSE;
+typedef enum GLOBAL_CONTROL_FLUSH_CONTROL {
+	FLUSH_CONTROL_FLUSH_NOT_STARTED                  = 0x0,
+	FLUSH_CONTROL_FLUSH_STARTED                      = 0x1,
+} GLOBAL_CONTROL_FLUSH_CONTROL;
+typedef enum GLOBAL_CONTROL_CONTROLLER_RESET {
+	CONTROLLER_RESET_AZ_CONTROLLER_IN_RESET          = 0x0,
+	CONTROLLER_RESET_AZ_CONTROLLER_NOT_IN_RESET      = 0x1,
+} GLOBAL_CONTROL_CONTROLLER_RESET;
+typedef enum AZ_STATE_CHANGE_STATUS {
+	AZ_STATE_CHANGE_STATUS_CODEC_NOT_PRESENT         = 0x0,
+	AZ_STATE_CHANGE_STATUS_CODEC_PRESENT             = 0x1,
+} AZ_STATE_CHANGE_STATUS;
+typedef enum GLOBAL_STATUS_FLUSH_STATUS {
+	GLOBAL_STATUS_FLUSH_STATUS_FLUSH_NOT_ENDED       = 0x0,
+	GLOBAL_STATUS_FLUSH_STATUS_FLUSH_ENDED           = 0x1,
+} GLOBAL_STATUS_FLUSH_STATUS;
+typedef enum STREAM_0_SYNCHRONIZATION {
+	STREAM_0_SYNCHRONIZATION_STEAM_NOT_STOPPED       = 0x0,
+	STREAM_0_SYNCHRONIZATION_STEAM_STOPPED           = 0x1,
+} STREAM_0_SYNCHRONIZATION;
+typedef enum STREAM_1_SYNCHRONIZATION {
+	STREAM_1_SYNCHRONIZATION_STEAM_NOT_STOPPED       = 0x0,
+	STREAM_1_SYNCHRONIZATION_STEAM_STOPPED           = 0x1,
+} STREAM_1_SYNCHRONIZATION;
+typedef enum STREAM_2_SYNCHRONIZATION {
+	STREAM_2_SYNCHRONIZATION_STEAM_NOT_STOPPED       = 0x0,
+	STREAM_2_SYNCHRONIZATION_STEAM_STOPPED           = 0x1,
+} STREAM_2_SYNCHRONIZATION;
+typedef enum STREAM_3_SYNCHRONIZATION {
+	STREAM_3_SYNCHRONIZATION_STEAM_NOT_STOPPED       = 0x0,
+	STREAM_3_SYNCHRONIZATION_STEAM_STOPPED           = 0x1,
+} STREAM_3_SYNCHRONIZATION;
+typedef enum STREAM_4_SYNCHRONIZATION {
+	STREAM_4_SYNCHRONIZATION_STEAM_NOT_STOPPED       = 0x0,
+	STREAM_4_SYNCHRONIZATION_STEAM_STOPPED           = 0x1,
+} STREAM_4_SYNCHRONIZATION;
+typedef enum STREAM_5_SYNCHRONIZATION {
+	STREAM_5_SYNCHRONIZATION_STEAM_NOT_STOPPED       = 0x0,
+	STREAM_5_SYNCHRONIZATION_STEAM_STOPPED           = 0x1,
+} STREAM_5_SYNCHRONIZATION;
+typedef enum STREAM_6_SYNCHRONIZATION {
+	STREAM_6_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+	STREAM_6_SYNCHRONIZATION_STEAM_STOPPED_RESERVED  = 0x1,
+} STREAM_6_SYNCHRONIZATION;
+typedef enum STREAM_7_SYNCHRONIZATION {
+	STREAM_7_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+	STREAM_7_SYNCHRONIZATION_STEAM_STOPPED_RESERVED  = 0x1,
+} STREAM_7_SYNCHRONIZATION;
+typedef enum STREAM_8_SYNCHRONIZATION {
+	STREAM_8_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+	STREAM_8_SYNCHRONIZATION_STEAM_STOPPED_RESERVED  = 0x1,
+} STREAM_8_SYNCHRONIZATION;
+typedef enum STREAM_9_SYNCHRONIZATION {
+	STREAM_9_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+	STREAM_9_SYNCHRONIZATION_STEAM_STOPPED_RESERVED  = 0x1,
+} STREAM_9_SYNCHRONIZATION;
+typedef enum STREAM_10_SYNCHRONIZATION {
+	STREAM_10_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+	STREAM_10_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_10_SYNCHRONIZATION;
+typedef enum STREAM_11_SYNCHRONIZATION {
+	STREAM_11_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+	STREAM_11_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_11_SYNCHRONIZATION;
+typedef enum STREAM_12_SYNCHRONIZATION {
+	STREAM_12_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+	STREAM_12_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_12_SYNCHRONIZATION;
+typedef enum STREAM_13_SYNCHRONIZATION {
+	STREAM_13_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+	STREAM_13_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_13_SYNCHRONIZATION;
+typedef enum STREAM_14_SYNCHRONIZATION {
+	STREAM_14_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+	STREAM_14_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_14_SYNCHRONIZATION;
+typedef enum STREAM_15_SYNCHRONIZATION {
+	STREAM_15_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+	STREAM_15_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_15_SYNCHRONIZATION;
+typedef enum CORB_READ_POINTER_RESET {
+	CORB_READ_POINTER_RESET_CORB_DMA_IS_NOT_RESET    = 0x0,
+	CORB_READ_POINTER_RESET_CORB_DMA_IS_RESET        = 0x1,
+} CORB_READ_POINTER_RESET;
+typedef enum AZ_CORB_SIZE {
+	AZ_CORB_SIZE_2ENTRIES_RESERVED                   = 0x0,
+	AZ_CORB_SIZE_16ENTRIES_RESERVED                  = 0x1,
+	AZ_CORB_SIZE_256ENTRIES                          = 0x2,
+	AZ_CORB_SIZE_RESERVED                            = 0x3,
+} AZ_CORB_SIZE;
+typedef enum AZ_RIRB_WRITE_POINTER_RESET {
+	AZ_RIRB_WRITE_POINTER_NOT_RESET                  = 0x0,
+	AZ_RIRB_WRITE_POINTER_DO_RESET                   = 0x1,
+} AZ_RIRB_WRITE_POINTER_RESET;
+typedef enum RIRB_CONTROL_RESPONSE_OVERRUN_INTERRUPT_CONTROL {
+	RIRB_CONTROL_RESPONSE_OVERRUN_INTERRUPT_CONTROL_INTERRUPT_DISABLED= 0x0,
+	RIRB_CONTROL_RESPONSE_OVERRUN_INTERRUPT_CONTROL_INTERRUPT_ENABLED= 0x1,
+} RIRB_CONTROL_RESPONSE_OVERRUN_INTERRUPT_CONTROL;
+typedef enum RIRB_CONTROL_RESPONSE_INTERRUPT_CONTROL {
+	RIRB_CONTROL_RESPONSE_INTERRUPT_CONTROL_INTERRUPT_DISABLED= 0x0,
+	RIRB_CONTROL_RESPONSE_INTERRUPT_CONTROL_INTERRUPT_ENABLED= 0x1,
+} RIRB_CONTROL_RESPONSE_INTERRUPT_CONTROL;
+typedef enum AZ_RIRB_SIZE {
+	AZ_RIRB_SIZE_2ENTRIES_RESERVED                   = 0x0,
+	AZ_RIRB_SIZE_16ENTRIES_RESERVED                  = 0x1,
+	AZ_RIRB_SIZE_256ENTRIES                          = 0x2,
+	AZ_RIRB_SIZE_UNDEFINED                           = 0x3,
+} AZ_RIRB_SIZE;
+typedef enum IMMEDIATE_COMMAND_STATUS_IMMEDIATE_RESULT_VALID {
+	IMMEDIATE_COMMAND_STATUS_IMMEDIATE_RESULT_VALID_NO_IMMEDIATE_RESPONSE_VALID= 0x0,
+	IMMEDIATE_COMMAND_STATUS_IMMEDIATE_RESULT_VALID_IMMEDIATE_RESPONSE_VALID= 0x1,
+} IMMEDIATE_COMMAND_STATUS_IMMEDIATE_RESULT_VALID;
+typedef enum IMMEDIATE_COMMAND_STATUS_IMMEDIATE_COMMAND_BUSY {
+	IMMEDIATE_COMMAND_STATUS_IMMEDIATE_COMMAND_NOT_BUSY= 0x0,
+	IMMEDIATE_COMMAND_STATUS_IMMEDIATE_COMMAND_IS_BUSY= 0x1,
+} IMMEDIATE_COMMAND_STATUS_IMMEDIATE_COMMAND_BUSY;
+typedef enum DMA_POSITION_LOWER_BASE_ADDRESS_BUFFER_ENABLE {
+	DMA_POSITION_LOWER_BASE_ADDRESS_BUFFER_ENABLE_DMA_DISABLE= 0x0,
+	DMA_POSITION_LOWER_BASE_ADDRESS_BUFFER_ENABLE_DMA_ENABLE= 0x1,
+} DMA_POSITION_LOWER_BASE_ADDRESS_BUFFER_ENABLE;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR {
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR_STATUS_NOT_SET= 0x0,
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR_STATUS_SET= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR {
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR_STATUS_NOT_SET= 0x0,
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR_STATUS_SET= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_BUFFER_COMPLETION_INTERRUPT_STATUS {
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_BUFFER_COMPLETION_INTERRUPT_STATUS_NOT_SET= 0x0,
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_BUFFER_COMPLETION_INTERRUPT_STATUS_SET= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_BUFFER_COMPLETION_INTERRUPT_STATUS;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_TRAFFIC_PRIORITY {
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_NO_TRAFFIC_PRIORITY= 0x0,
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_YES_TRAFFIC_PRIORITY= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_TRAFFIC_PRIORITY;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR_INTERRUPT_ENABLE {
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR_INTERRUPT_DISABLED= 0x0,
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR_INTERRUPT_ENABLED= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR_INTERRUPT_ENABLE;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR_INTERRUPT_ENABLE {
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR_INTERRUPT_DISABLED= 0x0,
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR_INTERRUPT_ENABLED= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR_INTERRUPT_ENABLE;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_INTERRUPT_ON_COMPLETION_ENABLE {
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_INTERRUPT_ON_COMPLETION_ENABLE_INTERRUPT_DISABLED= 0x0,
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_INTERRUPT_ON_COMPLETION_ENABLE_INTERRUPT_ENABLED= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_INTERRUPT_ON_COMPLETION_ENABLE;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_RUN {
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_NOT_RUN= 0x0,
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_DO_RUN= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_RUN;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_RESET {
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_NOT_RESET= 0x0,
+	OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_IS_RESET= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_RESET;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_RATE {
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_RATE_48KHZ= 0x0,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_RATE_44P1KHZ= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_RATE;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_MULTIPLE {
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_MULTIPLE_BY1= 0x0,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_MULTIPLE_BY2= 0x1,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_MULTIPLE_BY3_RESERVED= 0x2,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_MULTIPLE_BY4= 0x3,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_MULTIPLE_RESERVED= 0x4,
+} OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_MULTIPLE;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR {
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY1= 0x0,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY2_RESERVED= 0x1,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY3= 0x2,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY4_RESERVED= 0x3,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY5_RESERVED= 0x4,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY6_RESERVED= 0x5,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY7_RESERVED= 0x6,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY8_RESERVED= 0x7,
+} OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE {
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE_8_RESERVED= 0x0,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE_16= 0x1,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE_20= 0x2,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE_24= 0x3,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE_32_RESERVED= 0x4,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE_RESERVED= 0x5,
+} OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS {
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_1= 0x0,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_2= 0x1,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_3= 0x2,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_4= 0x3,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_5= 0x4,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_6= 0x5,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_7= 0x6,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_8= 0x7,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_9_RESERVED= 0x8,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_10_RESERVED= 0x9,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_11_RESERVED= 0xa,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_12_RESERVED= 0xb,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_13_RESERVED= 0xc,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_14_RESERVED= 0xd,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_15_RESERVED= 0xe,
+	OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_16_RESERVED= 0xf,
+} OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE {
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE_PCM= 0x0,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE_NOT_PCM= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE {
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE_48KHZ= 0x0,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE_44P1KHZ= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE {
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY1= 0x0,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY2= 0x1,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY3_RESERVED= 0x2,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY4= 0x3,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_RESERVED= 0x4,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR {
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY1= 0x0,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY2_RESERVED= 0x1,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY3= 0x2,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY4_RESERVED= 0x3,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY5_RESERVED= 0x4,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY6_RESERVED= 0x5,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY7_RESERVED= 0x6,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY8_RESERVED= 0x7,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE {
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_8_RESERVED= 0x0,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_16= 0x1,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_20= 0x2,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_24= 0x3,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_32_RESERVED= 0x4,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_RESERVED= 0x5,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS {
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_1= 0x0,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_2= 0x1,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_3= 0x2,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_4= 0x3,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_5= 0x4,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_6= 0x5,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_7= 0x6,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_8= 0x7,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_RESERVED= 0x8,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_L {
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_L_BIT7_NOT_SET= 0x0,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_L_BIT7_IS_SET= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_L;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRO {
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRO_BIT_A_NOT_SET= 0x0,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRO_BIT_A_IS_SET= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRO;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_NON_AUDIO {
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_NON_AUDIO_BIT_B_NOT_SET= 0x0,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_NON_AUDIO_BIT_B_IS_SET= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_NON_AUDIO;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_COPY {
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_COPY_BIT_C_IS_SET= 0x0,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_COPY_BIT_C_NOT_SET= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_COPY;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRE {
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRE_LSB_OF_D_NOT_SET= 0x0,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRE_LSB_OF_D_IS_SET= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRE;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_VCFG {
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_VALIDITY_CFG_NOT_ON= 0x0,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_VALIDITY_CFG_ON= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_VCFG;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_V {
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_V_BIT28_IS_ZERO= 0x0,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_V_BIT28_IS_ONE= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_V;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN {
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN_DIGITAL_TRANSMISSION_DISABLED= 0x0,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN_DIGITAL_TRANSMISSION_ENABLED= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3_KEEPALIVE {
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3_KEEPALIVE_SILENT_STREAM_NOT_ENABLE= 0x0,
+	AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3_KEEPALIVE_SILENT_STREAM_ENABLE= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3_KEEPALIVE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL_OUT_ENABLE {
+	AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL_OUT_ENABLE_PIN_SHUT_OFF= 0x0,
+	AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL_OUT_ENABLE_PIN_DRIVEN= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL_OUT_ENABLE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_ENABLE {
+	AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DISABLED= 0x0,
+	AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_ENABLED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_ENABLE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO_DOWN_MIX_INHIBIT {
+	AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_NO_INFO_OR_PERMITTED= 0x0,
+	AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_FORBIDDEN   = 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO_DOWN_MIX_INHIBIT;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE_MULTICHANNEL01_MUTE {
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE_MULTICHANNEL01_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE_MULTICHANNEL01_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE_MULTICHANNEL01_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE_MULTICHANNEL23_MUTE {
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE_MULTICHANNEL23_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE_MULTICHANNEL23_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE_MULTICHANNEL23_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE_MULTICHANNEL45_MUTE {
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE_MULTICHANNEL45_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE_MULTICHANNEL45_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE_MULTICHANNEL45_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE_MULTICHANNEL67_MUTE {
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE_MULTICHANNEL67_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE_MULTICHANNEL67_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE_MULTICHANNEL67_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_MUTE {
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_MUTE {
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_MUTE {
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_MUTE {
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_MODE {
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_PAIR_MODE= 0x0,
+	AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_SINGLE_MODE= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_MODE;
+typedef enum AZ_LATENCY_COUNTER_CONTROL {
+	AZ_LATENCY_COUNTER_NO_RESET                      = 0x0,
+	AZ_LATENCY_COUNTER_RESET_DONE                    = 0x1,
+} AZ_LATENCY_COUNTER_CONTROL;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE {
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_OUTPUT_CONVERTER_RESERVED= 0x0,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_INPUT_CONVERTER_RESERVED= 0x1,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_MIXER_RESERVED= 0x2,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_SELECTOR_RESERVED= 0x3,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_PIN_RESERVED= 0x4,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_POWER_WIDGET_RESERVED= 0x5,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VOLUME_KNOB_RESERVED= 0x6,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_BEEP_GENERATOR_RESERVED= 0x7,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_RESERVED_RESERVED= 0x8,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VENDOR_DEFINED_RESERVED= 0x9,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP {
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_LR_SWAP_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_LR_SWAP_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL {
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_POWER_CONTROL_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_POWER_CONTROL_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL {
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_IS_ANALOG= 0x0,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_IS_DIGITAL= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST {
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_CONNECTION_LIST= 0x0,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_CONNECTION_LIST= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY {
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_UNSOLICITED_RESPONSE_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_UNSOLICITED_RESPONSE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET {
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_NO_PROCESSING_CAPABILITIES= 0x0,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_HAVE_PROCESSING_CAPABILITIES= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE {
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_SUPPORT_STRIPING= 0x0,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_SUPPORT_STRIPING= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_FORMAT_OVERRIDE {
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_FORMAT_OVERRIDE= 0x0,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_SUPPORT_FORMAT_OVERRIDE= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_FORMAT_OVERRIDE;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE {
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_AMPLIFIER_PARAMETER= 0x0,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_AMPLIFIER_PARAMETER_OVERRIDE= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT {
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_OUTPUT_AMPLIFIER= 0x0,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_OUTPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT {
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_INPUT_AMPLIFIER= 0x0,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_INPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES {
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES_MONOPHONIC= 0x0,
+	AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES_STEREO= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE {
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_OUTPUT_CONVERTER_RESERVED= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_INPUT_CONVERTER_RESERVED= 0x1,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_MIXER_RESERVED= 0x2,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_SELECTOR_RESERVED= 0x3,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_PIN_RESERVED= 0x4,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_POWER_WIDGET_RESERVED= 0x5,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VOLUME_KNOB_RESERVED= 0x6,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_BEEP_GENERATOR_RESERVED= 0x7,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_RESERVED_RESERVED= 0x8,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VENDOR_DEFINED_RESERVED= 0x9,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP {
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_LR_SWAP_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_LR_SWAP_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL {
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_POWER_CONTROL_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_POWER_CONTROL_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL {
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_IS_ANALOG= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_IS_DIGITAL= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST {
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_CONNECTION_LIST= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_CONNECTION_LIST= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY {
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_UNSOLICITED_RESPONSE_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_UNSOLICITED_RESPONSE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET {
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_NO_PROCESSING_CAPABILITIES= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_HAVE_PROCESSING_CAPABILITIES= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE {
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_SUPPORT_STRIPING= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_SUPPORT_STRIPING= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE {
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_AMPLIFIER_PARAMETER= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_AMPLIFIER_PARAMETER_OVERRIDE= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT {
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_OUTPUT_AMPLIFIER= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_OUTPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT {
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_INPUT_AMPLIFIER_PRESENT= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_INPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_EAPD_CAPABLE {
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_NO_EAPD_PIN= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HAVE_EAPD_PIN= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_EAPD_CAPABLE;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_BALANCED_I_O_PINS {
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_I_O_PINS_ARE_NOT_BALANCED= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_I_O_PINS_ARE_BALANCED= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_BALANCED_I_O_PINS;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_INPUT_CAPABLE {
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_NO_INPUT_PIN= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HAVE_INPUT_PIN= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_INPUT_CAPABLE;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_OUTPUT_CAPABLE {
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_NO_OUTPUT_PIN= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HAVE_OUTPUT_PIN= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_OUTPUT_CAPABLE;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HEADPHONE_DRIVE_CAPABLE {
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_NO_HEADPHONE_DRIVE_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HAVE_HEADPHONE_DRIVE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HEADPHONE_DRIVE_CAPABLE;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_JACK_DETECTION_CAPABILITY {
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_NO_JACK_DETECTION_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HAVE_JACK_DETECTION_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_JACK_DETECTION_CAPABILITY;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_TRIGGER_REQUIRED {
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_NO_TRIGGER_REQUIRED_FOR_IMPEDANCE_MEASUREMENT= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_TRIGGER_REQUIRED_FOR_IMPEDANCE_MEASUREMENT= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_TRIGGER_REQUIRED;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_IMPEDANCE_SENSE_CAPABLE {
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_NO_IMPEDANCE_SENSE_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HAVE_IMPEDANCE_SENSE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_IMPEDANCE_SENSE_CAPABLE;
+typedef enum AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_MODE {
+	AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_PAIR_MODE= 0x0,
+	AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_SINGLE_MODE= 0x1,
+} AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_MODE;
+typedef enum AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_HBR_CAPABLE {
+	AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_NO_HBR_CAPABLILITY= 0x0,
+	AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_HAVE_HBR_CAPABLILITY= 0x1,
+} AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_HBR_CAPABLE;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE {
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_OUTPUT_CONVERTER_RESERVED= 0x0,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_INPUT_CONVERTER_RESERVED= 0x1,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_MIXER_RESERVED= 0x2,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_SELECTOR_RESERVED= 0x3,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_PIN_RESERVED= 0x4,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_POWER_WIDGET_RESERVED= 0x5,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VOLUME_KNOB_RESERVED= 0x6,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_BEEP_GENERATOR_RESERVED= 0x7,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_RESERVED= 0x8,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VENDOR_DEFINED_RESERVED= 0x9,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP {
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_LR_SWAP_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_LR_SWAP_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL {
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_POWER_CONTROL_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_POWER_CONTROL_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL {
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CODEC_CONVERTER0_IS_ANALOG= 0x0,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CODEC_CONVERTER0_IS_DIGITAL= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST {
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_CONNECTION_LIST= 0x0,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_CONNECTION_LIST= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY {
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_UNSOLICITED_RESPONSE_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_UNSOLICITED_RESPONSE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET {
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_CODEC_CONVERTER0_HAVE_NO_PROCESSING_CAPABILITIES= 0x0,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_CODEC_CONVERTER0_HAVE_PROCESSING_CAPABILITIES= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE {
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NOT_SUPPORT_STRIPING= 0x0,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_SUPPORT_STRIPING= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_FORMAT_OVERRIDE {
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_FORMAT_OVERRIDE= 0x0,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_FORMAT_OVERRIDE= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_FORMAT_OVERRIDE;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE {
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_AMPLIFIER_PARAMETER= 0x0,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_AMPLIFIER_PARAMETER= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT {
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_OUTPUT_AMPLIFIER= 0x0,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_OUTPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT {
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_INPUT_AMPLIFIER= 0x0,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_INPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES {
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES_MONOPHONIC= 0x0,
+	AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES_STEREO= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_OUTPUT_CONVERTER_RESERVED= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_INPUT_CONVERTER_RESERVED= 0x1,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_MIXER_RESERVED= 0x2,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_SELECTOR_RESERVED= 0x3,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_PIN_RESERVED= 0x4,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_POWER_WIDGET_RESERVED= 0x5,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VOLUME_KNOB_RESERVED= 0x6,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_BEEP_GENERATOR_RESERVED= 0x7,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_RESERVED= 0x8,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VENDOR_DEFINED_RESERVED= 0x9,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_LR_SWAP= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_LR_SWAP= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_POWER_CONTROL_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_POWER_CONTROL_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_IS_ANALOG= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_IS_DIGITAL= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_CONNECTION_LIST= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_CONNECTION_LIST= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_UNSOLICITED_RESPONSE_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_UNSOLICITED_RESPONSE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_NO_PROCESING_CAPABILITIES= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_HAVE_PROCESING_CAPABILITIES= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_SUPPORT_STRIPING= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_SUPPORT_STRIPING= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_AMPLIFIER_PARAMETER= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_AMPLIFIER_PARAMETER_OVERRIDE= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_OUTPUT_AMPLIFIER= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_OUTPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_INPUT_AMPLIFIER= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_INPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DP {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DP_NOT_ENABLED= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DP_ENABLED= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DP;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_EAPD_CAPABLE {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_EAPD_CAPABLE_NO_EAPD_PIN= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_EAPD_CAPABLE_HAVE_EAPD_PIN= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_EAPD_CAPABLE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HDMI {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HDMI_NOT_ENABLED= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HDMI_ENABLED= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HDMI;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_BALANCED_I_O_PINS {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_I_O_PINS_NOT_BALANCED= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_I_O_PINS_ARE_BALANCED= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_BALANCED_I_O_PINS;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_INPUT_CAPABLE {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_NO_INPUT_PIN= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HAVE_INPUT_PIN= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_INPUT_CAPABLE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_OUTPUT_CAPABLE {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_NO_OUTPUT_PIN= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HAVE_OUTPUT_PIN= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_OUTPUT_CAPABLE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HEADPHONE_DRIVE_CAPABLE {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_NO_HEADPHONE_DRIVE_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HAVE_HEADPHONE_DRIVE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HEADPHONE_DRIVE_CAPABLE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_JACK_DETECTION_CAPABILITY {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_NO_JACK_PRESENCE_DETECTION_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HAVE_JACK_PRESENCE_DETECTION_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_JACK_DETECTION_CAPABILITY;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_TRIGGER_REQUIRED {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_NO_TRIGGER_REQUIRED_FOR_IMPEDANCE_MEASUREMENT= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_TRIGGER_REQUIRED_FOR_IMPEDANCE_MEASUREMENT= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_TRIGGER_REQUIRED;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_IMPEDANCE_SENSE_CAPABLE {
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_NO_IMPEDANCE_SENSE_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HAVE_IMPEDANCE_SENSE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_IMPEDANCE_SENSE_CAPABLE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_HBR_CAPABLE {
+	AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_NO_HBR_CAPABILITY= 0x0,
+	AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_HAVE_HBR_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_HBR_CAPABLE;
+typedef enum AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE {
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE_PCM= 0x0,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE_NOT_PCM= 0x1,
+} AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE;
+typedef enum AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE {
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE_48KHZ= 0x0,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE_44P1KHZ= 0x1,
+} AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE;
+typedef enum AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE {
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY1= 0x0,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY2= 0x1,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY3_RESERVED= 0x2,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY4= 0x3,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_RESERVED= 0x4,
+} AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE;
+typedef enum AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR {
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY1= 0x0,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY2_RESERVED= 0x1,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY3= 0x2,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY4_RESERVED= 0x3,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY5_RESERVED= 0x4,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY6_RESERVED= 0x5,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY7_RESERVED= 0x6,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY8_RESERVED= 0x7,
+} AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR;
+typedef enum AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE {
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_8_RESERVED= 0x0,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_16= 0x1,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_20= 0x2,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_24= 0x3,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_32_RESERVED= 0x4,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_RESERVED= 0x5,
+} AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE;
+typedef enum AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS {
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_1= 0x0,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_2= 0x1,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_3= 0x2,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_4= 0x3,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_5= 0x4,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_6= 0x5,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_7= 0x6,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_8= 0x7,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_RESERVED= 0x8,
+} AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS;
+typedef enum AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN {
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN_DIGITAL_TRANSMISSION_DISABLED= 0x0,
+	AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN_DIGITAL_TRANSMISSION_ENABLED= 0x1,
+} AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_IN_ENABLE {
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_IN_ENABLE_PIN_SHUT_OFF= 0x0,
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_IN_ENABLE_PIN_DRIVEN= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_IN_ENABLE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_ENABLE {
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DISABLED= 0x0,
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_ENABLED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_ENABLE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE_MULTICHANNEL0_MUTE {
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE_MULTICHANNEL0_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE_MULTICHANNEL0_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE_MULTICHANNEL0_MUTE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_MUTE {
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_MUTE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE_MULTICHANNEL2_MUTE {
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE_MULTICHANNEL2_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE_MULTICHANNEL2_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE_MULTICHANNEL2_MUTE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_MUTE {
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_MUTE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE_MULTICHANNEL4_MUTE {
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE_MULTICHANNEL4_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE_MULTICHANNEL4_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE_MULTICHANNEL4_MUTE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_MUTE {
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_MUTE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE_MULTICHANNEL6_MUTE {
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE_MULTICHANNEL6_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE_MULTICHANNEL6_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE_MULTICHANNEL6_MUTE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_MUTE {
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_NOT_MUTED= 0x0,
+	AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_MUTE;
+typedef enum BLND_CONTROL_BLND_MODE {
+	BLND_CONTROL_BLND_MODE_CURRENT_PIPE_ONLY         = 0x0,
+	BLND_CONTROL_BLND_MODE_OTHER_PIPE_ONLY           = 0x1,
+	BLND_CONTROL_BLND_MODE_ALPHA_BLENDING_MODE       = 0x2,
+	BLND_CONTROL_BLND_MODE_OTHER_STEREO_TYPE         = 0x3,
+} BLND_CONTROL_BLND_MODE;
+typedef enum BLND_CONTROL_BLND_STEREO_TYPE {
+	BLND_CONTROL_BLND_STEREO_TYPE_NON_SINGLE_PIPE_STEREO= 0x0,
+	BLND_CONTROL_BLND_STEREO_TYPE_SIDE_BY_SIDE_SINGLE_PIPE_STEREO= 0x1,
+	BLND_CONTROL_BLND_STEREO_TYPE_TOP_BOTTOM_SINGLE_PIPE_STEREO= 0x2,
+	BLND_CONTROL_BLND_STEREO_TYPE_UNUSED             = 0x3,
+} BLND_CONTROL_BLND_STEREO_TYPE;
+typedef enum BLND_CONTROL_BLND_STEREO_POLARITY {
+	BLND_CONTROL_BLND_STEREO_POLARITY_LOW            = 0x0,
+	BLND_CONTROL_BLND_STEREO_POLARITY_HIGH           = 0x1,
+} BLND_CONTROL_BLND_STEREO_POLARITY;
+typedef enum BLND_CONTROL_BLND_FEEDTHROUGH_EN {
+	BLND_CONTROL_BLND_FEEDTHROUGH_EN_FALSE           = 0x0,
+	BLND_CONTROL_BLND_FEEDTHROUGH_EN_TRUE            = 0x1,
+} BLND_CONTROL_BLND_FEEDTHROUGH_EN;
+typedef enum BLND_CONTROL_BLND_ALPHA_MODE {
+	BLND_CONTROL_BLND_ALPHA_MODE_CURRENT_PIXEL_ALPHA = 0x0,
+	BLND_CONTROL_BLND_ALPHA_MODE_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN= 0x1,
+	BLND_CONTROL_BLND_ALPHA_MODE_GLOBAL_ALPHA_ONLY   = 0x2,
+	BLND_CONTROL_BLND_ALPHA_MODE_UNUSED              = 0x3,
+} BLND_CONTROL_BLND_ALPHA_MODE;
+typedef enum BLND_CONTROL_BLND_ACTIVE_OVERLAP_ONLY {
+	BLND_CONTROL_BLND_ACTIVE_OVERLAY_ONLY_FALSE      = 0x0,
+	BLND_CONTROL_BLND_ACTIVE_OVERLAY_ONLY_TRUE       = 0x1,
+} BLND_CONTROL_BLND_ACTIVE_OVERLAP_ONLY;
+typedef enum BLND_CONTROL_BLND_MULTIPLIED_MODE {
+	BLND_CONTROL_BLND_MULTIPLIED_MODE_FALSE          = 0x0,
+	BLND_CONTROL_BLND_MULTIPLIED_MODE_TRUE           = 0x1,
+} BLND_CONTROL_BLND_MULTIPLIED_MODE;
+typedef enum BLND_SM_CONTROL2_SM_MODE {
+	BLND_SM_CONTROL2_SM_MODE_SINGLE_PLANE            = 0x0,
+	BLND_SM_CONTROL2_SM_MODE_ROW_SUBSAMPLING         = 0x2,
+	BLND_SM_CONTROL2_SM_MODE_COLUMN_SUBSAMPLING      = 0x4,
+	BLND_SM_CONTROL2_SM_MODE_CHECKERBOARD_SUBSAMPLING= 0x6,
+} BLND_SM_CONTROL2_SM_MODE;
+typedef enum BLND_SM_CONTROL2_SM_FRAME_ALTERNATE {
+	BLND_SM_CONTROL2_SM_FRAME_ALTERNATE_FALSE        = 0x0,
+	BLND_SM_CONTROL2_SM_FRAME_ALTERNATE_TRUE         = 0x1,
+} BLND_SM_CONTROL2_SM_FRAME_ALTERNATE;
+typedef enum BLND_SM_CONTROL2_SM_FIELD_ALTERNATE {
+	BLND_SM_CONTROL2_SM_FIELD_ALTERNATE_FALSE        = 0x0,
+	BLND_SM_CONTROL2_SM_FIELD_ALTERNATE_TRUE         = 0x1,
+} BLND_SM_CONTROL2_SM_FIELD_ALTERNATE;
+typedef enum BLND_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL {
+	BLND_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_NO_FORCE= 0x0,
+	BLND_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_RESERVED= 0x1,
+	BLND_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_FORCE_LOW= 0x2,
+	BLND_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_FORCE_HIGH= 0x3,
+} BLND_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL;
+typedef enum BLND_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL {
+	BLND_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_NO_FORCE  = 0x0,
+	BLND_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_RESERVED  = 0x1,
+	BLND_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_FORCE_LOW = 0x2,
+	BLND_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_FORCE_HIGH= 0x3,
+} BLND_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL;
+typedef enum BLND_CONTROL2_PTI_ENABLE {
+	BLND_CONTROL2_PTI_ENABLE_FALSE                   = 0x0,
+	BLND_CONTROL2_PTI_ENABLE_TRUE                    = 0x1,
+} BLND_CONTROL2_PTI_ENABLE;
+typedef enum BLND_CONTROL2_BLND_SUPERAA_DEGAMMA_EN {
+	BLND_CONTROL2_BLND_SUPERAA_DEGAMMA_EN_FALSE      = 0x0,
+	BLND_CONTROL2_BLND_SUPERAA_DEGAMMA_EN_TRUE       = 0x1,
+} BLND_CONTROL2_BLND_SUPERAA_DEGAMMA_EN;
+typedef enum BLND_CONTROL2_BLND_SUPERAA_REGAMMA_EN {
+	BLND_CONTROL2_BLND_SUPERAA_REGAMMA_EN_FALSE      = 0x0,
+	BLND_CONTROL2_BLND_SUPERAA_REGAMMA_EN_TRUE       = 0x1,
+} BLND_CONTROL2_BLND_SUPERAA_REGAMMA_EN;
+typedef enum BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK {
+	BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK_FALSE= 0x0,
+	BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK_TRUE= 0x1,
+} BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK;
+typedef enum BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK {
+	BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK_FALSE= 0x0,
+	BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK_TRUE= 0x1,
+} BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK;
+typedef enum BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK {
+	BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK_FALSE= 0x0,
+	BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK_TRUE= 0x1,
+} BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK;
+typedef enum BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK {
+	BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK_FALSE= 0x0,
+	BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK_TRUE= 0x1,
+} BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK;
+typedef enum BLND_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK {
+	BLND_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK_FALSE= 0x0,
+	BLND_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK_TRUE= 0x1,
+} BLND_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK;
+typedef enum BLND_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK {
+	BLND_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK_FALSE= 0x0,
+	BLND_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK_TRUE= 0x1,
+} BLND_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK;
+typedef enum BLND_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK {
+	BLND_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK_FALSE  = 0x0,
+	BLND_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK_TRUE   = 0x1,
+} BLND_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK;
+typedef enum BLND_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK {
+	BLND_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK_FALSE = 0x0,
+	BLND_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK_TRUE  = 0x1,
+} BLND_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK;
+typedef enum BLND_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE {
+	BLND_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE_FALSE = 0x0,
+	BLND_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE_TRUE  = 0x1,
+} BLND_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE;
+typedef enum BLND_DEBUG_BLND_CNV_MUX_SELECT {
+	BLND_DEBUG_BLND_CNV_MUX_SELECT_LOW               = 0x0,
+	BLND_DEBUG_BLND_CNV_MUX_SELECT_HIGH              = 0x1,
+} BLND_DEBUG_BLND_CNV_MUX_SELECT;
+typedef enum BLND_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN {
+	BLND_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN_FALSE= 0x0,
+	BLND_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN_TRUE= 0x1,
+} BLND_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN;
+typedef enum SurfaceEndian {
+	ENDIAN_NONE                                      = 0x0,
+	ENDIAN_8IN16                                     = 0x1,
+	ENDIAN_8IN32                                     = 0x2,
+	ENDIAN_8IN64                                     = 0x3,
+} SurfaceEndian;
+typedef enum ArrayMode {
+	ARRAY_LINEAR_GENERAL                             = 0x0,
+	ARRAY_LINEAR_ALIGNED                             = 0x1,
+	ARRAY_1D_TILED_THIN1                             = 0x2,
+	ARRAY_1D_TILED_THICK                             = 0x3,
+	ARRAY_2D_TILED_THIN1                             = 0x4,
+	ARRAY_PRT_TILED_THIN1                            = 0x5,
+	ARRAY_PRT_2D_TILED_THIN1                         = 0x6,
+	ARRAY_2D_TILED_THICK                             = 0x7,
+	ARRAY_2D_TILED_XTHICK                            = 0x8,
+	ARRAY_PRT_TILED_THICK                            = 0x9,
+	ARRAY_PRT_2D_TILED_THICK                         = 0xa,
+	ARRAY_PRT_3D_TILED_THIN1                         = 0xb,
+	ARRAY_3D_TILED_THIN1                             = 0xc,
+	ARRAY_3D_TILED_THICK                             = 0xd,
+	ARRAY_3D_TILED_XTHICK                            = 0xe,
+	ARRAY_PRT_3D_TILED_THICK                         = 0xf,
+} ArrayMode;
+typedef enum PipeTiling {
+	CONFIG_1_PIPE                                    = 0x0,
+	CONFIG_2_PIPE                                    = 0x1,
+	CONFIG_4_PIPE                                    = 0x2,
+	CONFIG_8_PIPE                                    = 0x3,
+} PipeTiling;
+typedef enum BankTiling {
+	CONFIG_4_BANK                                    = 0x0,
+	CONFIG_8_BANK                                    = 0x1,
+} BankTiling;
+typedef enum GroupInterleave {
+	CONFIG_256B_GROUP                                = 0x0,
+	CONFIG_512B_GROUP                                = 0x1,
+} GroupInterleave;
+typedef enum RowTiling {
+	CONFIG_1KB_ROW                                   = 0x0,
+	CONFIG_2KB_ROW                                   = 0x1,
+	CONFIG_4KB_ROW                                   = 0x2,
+	CONFIG_8KB_ROW                                   = 0x3,
+	CONFIG_1KB_ROW_OPT                               = 0x4,
+	CONFIG_2KB_ROW_OPT                               = 0x5,
+	CONFIG_4KB_ROW_OPT                               = 0x6,
+	CONFIG_8KB_ROW_OPT                               = 0x7,
+} RowTiling;
+typedef enum BankSwapBytes {
+	CONFIG_128B_SWAPS                                = 0x0,
+	CONFIG_256B_SWAPS                                = 0x1,
+	CONFIG_512B_SWAPS                                = 0x2,
+	CONFIG_1KB_SWAPS                                 = 0x3,
+} BankSwapBytes;
+typedef enum SampleSplitBytes {
+	CONFIG_1KB_SPLIT                                 = 0x0,
+	CONFIG_2KB_SPLIT                                 = 0x1,
+	CONFIG_4KB_SPLIT                                 = 0x2,
+	CONFIG_8KB_SPLIT                                 = 0x3,
+} SampleSplitBytes;
+typedef enum NumPipes {
+	ADDR_CONFIG_1_PIPE                               = 0x0,
+	ADDR_CONFIG_2_PIPE                               = 0x1,
+	ADDR_CONFIG_4_PIPE                               = 0x2,
+	ADDR_CONFIG_8_PIPE                               = 0x3,
+} NumPipes;
+typedef enum PipeInterleaveSize {
+	ADDR_CONFIG_PIPE_INTERLEAVE_256B                 = 0x0,
+	ADDR_CONFIG_PIPE_INTERLEAVE_512B                 = 0x1,
+} PipeInterleaveSize;
+typedef enum BankInterleaveSize {
+	ADDR_CONFIG_BANK_INTERLEAVE_1                    = 0x0,
+	ADDR_CONFIG_BANK_INTERLEAVE_2                    = 0x1,
+	ADDR_CONFIG_BANK_INTERLEAVE_4                    = 0x2,
+	ADDR_CONFIG_BANK_INTERLEAVE_8                    = 0x3,
+} BankInterleaveSize;
+typedef enum NumShaderEngines {
+	ADDR_CONFIG_1_SHADER_ENGINE                      = 0x0,
+	ADDR_CONFIG_2_SHADER_ENGINE                      = 0x1,
+} NumShaderEngines;
+typedef enum ShaderEngineTileSize {
+	ADDR_CONFIG_SE_TILE_16                           = 0x0,
+	ADDR_CONFIG_SE_TILE_32                           = 0x1,
+} ShaderEngineTileSize;
+typedef enum NumGPUs {
+	ADDR_CONFIG_1_GPU                                = 0x0,
+	ADDR_CONFIG_2_GPU                                = 0x1,
+	ADDR_CONFIG_4_GPU                                = 0x2,
+} NumGPUs;
+typedef enum MultiGPUTileSize {
+	ADDR_CONFIG_GPU_TILE_16                          = 0x0,
+	ADDR_CONFIG_GPU_TILE_32                          = 0x1,
+	ADDR_CONFIG_GPU_TILE_64                          = 0x2,
+	ADDR_CONFIG_GPU_TILE_128                         = 0x3,
+} MultiGPUTileSize;
+typedef enum RowSize {
+	ADDR_CONFIG_1KB_ROW                              = 0x0,
+	ADDR_CONFIG_2KB_ROW                              = 0x1,
+	ADDR_CONFIG_4KB_ROW                              = 0x2,
+} RowSize;
+typedef enum NumLowerPipes {
+	ADDR_CONFIG_1_LOWER_PIPES                        = 0x0,
+	ADDR_CONFIG_2_LOWER_PIPES                        = 0x1,
+} NumLowerPipes;
+typedef enum DebugBlockId {
+	DBG_CLIENT_BLKID_RESERVED                        = 0x0,
+	DBG_CLIENT_BLKID_dbg                             = 0x1,
+	DBG_CLIENT_BLKID_scf2                            = 0x2,
+	DBG_CLIENT_BLKID_mcd5                            = 0x3,
+	DBG_CLIENT_BLKID_vmc                             = 0x4,
+	DBG_CLIENT_BLKID_sx30                            = 0x5,
+	DBG_CLIENT_BLKID_mcd2                            = 0x6,
+	DBG_CLIENT_BLKID_bci1                            = 0x7,
+	DBG_CLIENT_BLKID_xdma_dbg_client_wrapper         = 0x8,
+	DBG_CLIENT_BLKID_mcc0                            = 0x9,
+	DBG_CLIENT_BLKID_uvdf_2                          = 0xa,
+	DBG_CLIENT_BLKID_uvdf_3                          = 0xb,
+	DBG_CLIENT_BLKID_uvdt_0                          = 0xc,
+	DBG_CLIENT_BLKID_uvdi_0                          = 0xd,
+	DBG_CLIENT_BLKID_bci0                            = 0xe,
+	DBG_CLIENT_BLKID_vceb0_1                         = 0xf,
+	DBG_CLIENT_BLKID_cb100                           = 0x10,
+	DBG_CLIENT_BLKID_cb001                           = 0x11,
+	DBG_CLIENT_BLKID_mcd4                            = 0x12,
+	DBG_CLIENT_BLKID_tmonw00                         = 0x13,
+	DBG_CLIENT_BLKID_cb101                           = 0x14,
+	DBG_CLIENT_BLKID_sx10                            = 0x15,
+	DBG_CLIENT_BLKID_cb301                           = 0x16,
+	DBG_CLIENT_BLKID_tmonw01                         = 0x17,
+	DBG_CLIENT_BLKID_vcea0_0                         = 0x18,
+	DBG_CLIENT_BLKID_vcea0_1                         = 0x19,
+	DBG_CLIENT_BLKID_vcea0_2                         = 0x1a,
+	DBG_CLIENT_BLKID_vcea0_3                         = 0x1b,
+	DBG_CLIENT_BLKID_scf1                            = 0x1c,
+	DBG_CLIENT_BLKID_sx20                            = 0x1d,
+	DBG_CLIENT_BLKID_spim1                           = 0x1e,
+	DBG_CLIENT_BLKID_pa10                            = 0x1f,
+	DBG_CLIENT_BLKID_pa00                            = 0x20,
+	DBG_CLIENT_BLKID_gmcon                           = 0x21,
+	DBG_CLIENT_BLKID_mcb                             = 0x22,
+	DBG_CLIENT_BLKID_vgt0                            = 0x23,
+	DBG_CLIENT_BLKID_pc0                             = 0x24,
+	DBG_CLIENT_BLKID_bci2                            = 0x25,
+	DBG_CLIENT_BLKID_uvdb_0                          = 0x26,
+	DBG_CLIENT_BLKID_spim3                           = 0x27,
+	DBG_CLIENT_BLKID_cpc_0                           = 0x28,
+	DBG_CLIENT_BLKID_cpc_1                           = 0x29,
+	DBG_CLIENT_BLKID_uvdm_0                          = 0x2a,
+	DBG_CLIENT_BLKID_uvdm_1                          = 0x2b,
+	DBG_CLIENT_BLKID_uvdm_2                          = 0x2c,
+	DBG_CLIENT_BLKID_uvdm_3                          = 0x2d,
+	DBG_CLIENT_BLKID_cb000                           = 0x2e,
+	DBG_CLIENT_BLKID_spim0                           = 0x2f,
+	DBG_CLIENT_BLKID_mcc2                            = 0x30,
+	DBG_CLIENT_BLKID_ds0                             = 0x31,
+	DBG_CLIENT_BLKID_srbm                            = 0x32,
+	DBG_CLIENT_BLKID_ih                              = 0x33,
+	DBG_CLIENT_BLKID_sem                             = 0x34,
+	DBG_CLIENT_BLKID_sdma_0                          = 0x35,
+	DBG_CLIENT_BLKID_sdma_1                          = 0x36,
+	DBG_CLIENT_BLKID_hdp                             = 0x37,
+	DBG_CLIENT_BLKID_cb200                           = 0x38,
+	DBG_CLIENT_BLKID_scf3                            = 0x39,
+	DBG_CLIENT_BLKID_vceb1_0                         = 0x3a,
+	DBG_CLIENT_BLKID_vcea1_0                         = 0x3b,
+	DBG_CLIENT_BLKID_vcea1_1                         = 0x3c,
+	DBG_CLIENT_BLKID_vcea1_2                         = 0x3d,
+	DBG_CLIENT_BLKID_vcea1_3                         = 0x3e,
+	DBG_CLIENT_BLKID_bci3                            = 0x3f,
+	DBG_CLIENT_BLKID_mcd0                            = 0x40,
+	DBG_CLIENT_BLKID_pa11                            = 0x41,
+	DBG_CLIENT_BLKID_pa01                            = 0x42,
+	DBG_CLIENT_BLKID_cb201                           = 0x43,
+	DBG_CLIENT_BLKID_spim2                           = 0x44,
+	DBG_CLIENT_BLKID_vgt2                            = 0x45,
+	DBG_CLIENT_BLKID_pc2                             = 0x46,
+	DBG_CLIENT_BLKID_smu_0                           = 0x47,
+	DBG_CLIENT_BLKID_smu_1                           = 0x48,
+	DBG_CLIENT_BLKID_smu_2                           = 0x49,
+	DBG_CLIENT_BLKID_cb1                             = 0x4a,
+	DBG_CLIENT_BLKID_ia0                             = 0x4b,
+	DBG_CLIENT_BLKID_wd                              = 0x4c,
+	DBG_CLIENT_BLKID_ia1                             = 0x4d,
+	DBG_CLIENT_BLKID_vcec1_0                         = 0x4e,
+	DBG_CLIENT_BLKID_scf0                            = 0x4f,
+	DBG_CLIENT_BLKID_vgt1                            = 0x50,
+	DBG_CLIENT_BLKID_pc1                             = 0x51,
+	DBG_CLIENT_BLKID_cb0                             = 0x52,
+	DBG_CLIENT_BLKID_gdc_one_0                       = 0x53,
+	DBG_CLIENT_BLKID_gdc_one_1                       = 0x54,
+	DBG_CLIENT_BLKID_gdc_one_2                       = 0x55,
+	DBG_CLIENT_BLKID_gdc_one_3                       = 0x56,
+	DBG_CLIENT_BLKID_gdc_one_4                       = 0x57,
+	DBG_CLIENT_BLKID_gdc_one_5                       = 0x58,
+	DBG_CLIENT_BLKID_gdc_one_6                       = 0x59,
+	DBG_CLIENT_BLKID_gdc_one_7                       = 0x5a,
+	DBG_CLIENT_BLKID_gdc_one_8                       = 0x5b,
+	DBG_CLIENT_BLKID_gdc_one_9                       = 0x5c,
+	DBG_CLIENT_BLKID_gdc_one_10                      = 0x5d,
+	DBG_CLIENT_BLKID_gdc_one_11                      = 0x5e,
+	DBG_CLIENT_BLKID_gdc_one_12                      = 0x5f,
+	DBG_CLIENT_BLKID_gdc_one_13                      = 0x60,
+	DBG_CLIENT_BLKID_gdc_one_14                      = 0x61,
+	DBG_CLIENT_BLKID_gdc_one_15                      = 0x62,
+	DBG_CLIENT_BLKID_gdc_one_16                      = 0x63,
+	DBG_CLIENT_BLKID_gdc_one_17                      = 0x64,
+	DBG_CLIENT_BLKID_gdc_one_18                      = 0x65,
+	DBG_CLIENT_BLKID_gdc_one_19                      = 0x66,
+	DBG_CLIENT_BLKID_gdc_one_20                      = 0x67,
+	DBG_CLIENT_BLKID_gdc_one_21                      = 0x68,
+	DBG_CLIENT_BLKID_gdc_one_22                      = 0x69,
+	DBG_CLIENT_BLKID_gdc_one_23                      = 0x6a,
+	DBG_CLIENT_BLKID_gdc_one_24                      = 0x6b,
+	DBG_CLIENT_BLKID_gdc_one_25                      = 0x6c,
+	DBG_CLIENT_BLKID_gdc_one_26                      = 0x6d,
+	DBG_CLIENT_BLKID_gdc_one_27                      = 0x6e,
+	DBG_CLIENT_BLKID_gdc_one_28                      = 0x6f,
+	DBG_CLIENT_BLKID_gdc_one_29                      = 0x70,
+	DBG_CLIENT_BLKID_gdc_one_30                      = 0x71,
+	DBG_CLIENT_BLKID_gdc_one_31                      = 0x72,
+	DBG_CLIENT_BLKID_gdc_one_32                      = 0x73,
+	DBG_CLIENT_BLKID_gdc_one_33                      = 0x74,
+	DBG_CLIENT_BLKID_gdc_one_34                      = 0x75,
+	DBG_CLIENT_BLKID_gdc_one_35                      = 0x76,
+	DBG_CLIENT_BLKID_vceb0_0                         = 0x77,
+	DBG_CLIENT_BLKID_vgt3                            = 0x78,
+	DBG_CLIENT_BLKID_pc3                             = 0x79,
+	DBG_CLIENT_BLKID_mcd3                            = 0x7a,
+	DBG_CLIENT_BLKID_uvdu_0                          = 0x7b,
+	DBG_CLIENT_BLKID_uvdu_1                          = 0x7c,
+	DBG_CLIENT_BLKID_uvdu_2                          = 0x7d,
+	DBG_CLIENT_BLKID_uvdu_3                          = 0x7e,
+	DBG_CLIENT_BLKID_uvdu_4                          = 0x7f,
+	DBG_CLIENT_BLKID_uvdu_5                          = 0x80,
+	DBG_CLIENT_BLKID_uvdu_6                          = 0x81,
+	DBG_CLIENT_BLKID_cb300                           = 0x82,
+	DBG_CLIENT_BLKID_mcd1                            = 0x83,
+	DBG_CLIENT_BLKID_sx00                            = 0x84,
+	DBG_CLIENT_BLKID_uvdf_0                          = 0x85,
+	DBG_CLIENT_BLKID_uvdf_1                          = 0x86,
+	DBG_CLIENT_BLKID_mcc3                            = 0x87,
+	DBG_CLIENT_BLKID_cpg_0                           = 0x88,
+	DBG_CLIENT_BLKID_cpg_1                           = 0x89,
+	DBG_CLIENT_BLKID_gck                             = 0x8a,
+	DBG_CLIENT_BLKID_mcc1                            = 0x8b,
+	DBG_CLIENT_BLKID_cpf_0                           = 0x8c,
+	DBG_CLIENT_BLKID_cpf_1                           = 0x8d,
+	DBG_CLIENT_BLKID_rlc                             = 0x8e,
+	DBG_CLIENT_BLKID_grbm                            = 0x8f,
+	DBG_CLIENT_BLKID_sammsp                          = 0x90,
+	DBG_CLIENT_BLKID_dci_pg                          = 0x91,
+	DBG_CLIENT_BLKID_dci_0                           = 0x92,
+	DBG_CLIENT_BLKID_dccg0_0                         = 0x93,
+	DBG_CLIENT_BLKID_dccg0_1                         = 0x94,
+	DBG_CLIENT_BLKID_dccg0_2                         = 0x95,
+	DBG_CLIENT_BLKID_dccg0_3                         = 0x96,
+	DBG_CLIENT_BLKID_dccg0_4                         = 0x97,
+	DBG_CLIENT_BLKID_dccg0_5                         = 0x98,
+	DBG_CLIENT_BLKID_dccg0_6                         = 0x99,
+	DBG_CLIENT_BLKID_dccg0_7                         = 0x9a,
+	DBG_CLIENT_BLKID_dccg0_8                         = 0x9b,
+	DBG_CLIENT_BLKID_dcfe01_0                        = 0x9c,
+	DBG_CLIENT_BLKID_dcfe02_0                        = 0x9d,
+	DBG_CLIENT_BLKID_dcfe03_0                        = 0x9e,
+	DBG_CLIENT_BLKID_dcfe04_0                        = 0x9f,
+	DBG_CLIENT_BLKID_dcfe05_0                        = 0xa0,
+	DBG_CLIENT_BLKID_dcfe06_0                        = 0xa1,
+	DBG_CLIENT_BLKID_uvde_0                          = 0xa2,
+	DBG_CLIENT_BLKID_RESERVED_LAST                   = 0xa3,
+} DebugBlockId;
+typedef enum DebugBlockId_OLD {
+	DBG_BLOCK_ID_RESERVED                            = 0x0,
+	DBG_BLOCK_ID_DBG                                 = 0x1,
+	DBG_BLOCK_ID_VMC                                 = 0x2,
+	DBG_BLOCK_ID_PDMA                                = 0x3,
+	DBG_BLOCK_ID_CG                                  = 0x4,
+	DBG_BLOCK_ID_SRBM                                = 0x5,
+	DBG_BLOCK_ID_GRBM                                = 0x6,
+	DBG_BLOCK_ID_RLC                                 = 0x7,
+	DBG_BLOCK_ID_CSC                                 = 0x8,
+	DBG_BLOCK_ID_SEM                                 = 0x9,
+	DBG_BLOCK_ID_IH                                  = 0xa,
+	DBG_BLOCK_ID_SC                                  = 0xb,
+	DBG_BLOCK_ID_SQ                                  = 0xc,
+	DBG_BLOCK_ID_AVP                                 = 0xd,
+	DBG_BLOCK_ID_GMCON                               = 0xe,
+	DBG_BLOCK_ID_SMU                                 = 0xf,
+	DBG_BLOCK_ID_DMA0                                = 0x10,
+	DBG_BLOCK_ID_DMA1                                = 0x11,
+	DBG_BLOCK_ID_SPIM                                = 0x12,
+	DBG_BLOCK_ID_GDS                                 = 0x13,
+	DBG_BLOCK_ID_SPIS                                = 0x14,
+	DBG_BLOCK_ID_UNUSED0                             = 0x15,
+	DBG_BLOCK_ID_PA0                                 = 0x16,
+	DBG_BLOCK_ID_PA1                                 = 0x17,
+	DBG_BLOCK_ID_CP0                                 = 0x18,
+	DBG_BLOCK_ID_CP1                                 = 0x19,
+	DBG_BLOCK_ID_CP2                                 = 0x1a,
+	DBG_BLOCK_ID_UNUSED1                             = 0x1b,
+	DBG_BLOCK_ID_UVDU                                = 0x1c,
+	DBG_BLOCK_ID_UVDM                                = 0x1d,
+	DBG_BLOCK_ID_VCE                                 = 0x1e,
+	DBG_BLOCK_ID_UNUSED2                             = 0x1f,
+	DBG_BLOCK_ID_VGT0                                = 0x20,
+	DBG_BLOCK_ID_VGT1                                = 0x21,
+	DBG_BLOCK_ID_IA                                  = 0x22,
+	DBG_BLOCK_ID_UNUSED3                             = 0x23,
+	DBG_BLOCK_ID_SCT0                                = 0x24,
+	DBG_BLOCK_ID_SCT1                                = 0x25,
+	DBG_BLOCK_ID_SPM0                                = 0x26,
+	DBG_BLOCK_ID_SPM1                                = 0x27,
+	DBG_BLOCK_ID_TCAA                                = 0x28,
+	DBG_BLOCK_ID_TCAB                                = 0x29,
+	DBG_BLOCK_ID_TCCA                                = 0x2a,
+	DBG_BLOCK_ID_TCCB                                = 0x2b,
+	DBG_BLOCK_ID_MCC0                                = 0x2c,
+	DBG_BLOCK_ID_MCC1                                = 0x2d,
+	DBG_BLOCK_ID_MCC2                                = 0x2e,
+	DBG_BLOCK_ID_MCC3                                = 0x2f,
+	DBG_BLOCK_ID_SX0                                 = 0x30,
+	DBG_BLOCK_ID_SX1                                 = 0x31,
+	DBG_BLOCK_ID_SX2                                 = 0x32,
+	DBG_BLOCK_ID_SX3                                 = 0x33,
+	DBG_BLOCK_ID_UNUSED4                             = 0x34,
+	DBG_BLOCK_ID_UNUSED5                             = 0x35,
+	DBG_BLOCK_ID_UNUSED6                             = 0x36,
+	DBG_BLOCK_ID_UNUSED7                             = 0x37,
+	DBG_BLOCK_ID_PC0                                 = 0x38,
+	DBG_BLOCK_ID_PC1                                 = 0x39,
+	DBG_BLOCK_ID_UNUSED8                             = 0x3a,
+	DBG_BLOCK_ID_UNUSED9                             = 0x3b,
+	DBG_BLOCK_ID_UNUSED10                            = 0x3c,
+	DBG_BLOCK_ID_UNUSED11                            = 0x3d,
+	DBG_BLOCK_ID_MCB                                 = 0x3e,
+	DBG_BLOCK_ID_UNUSED12                            = 0x3f,
+	DBG_BLOCK_ID_SCB0                                = 0x40,
+	DBG_BLOCK_ID_SCB1                                = 0x41,
+	DBG_BLOCK_ID_UNUSED13                            = 0x42,
+	DBG_BLOCK_ID_UNUSED14                            = 0x43,
+	DBG_BLOCK_ID_SCF0                                = 0x44,
+	DBG_BLOCK_ID_SCF1                                = 0x45,
+	DBG_BLOCK_ID_UNUSED15                            = 0x46,
+	DBG_BLOCK_ID_UNUSED16                            = 0x47,
+	DBG_BLOCK_ID_BCI0                                = 0x48,
+	DBG_BLOCK_ID_BCI1                                = 0x49,
+	DBG_BLOCK_ID_BCI2                                = 0x4a,
+	DBG_BLOCK_ID_BCI3                                = 0x4b,
+	DBG_BLOCK_ID_UNUSED17                            = 0x4c,
+	DBG_BLOCK_ID_UNUSED18                            = 0x4d,
+	DBG_BLOCK_ID_UNUSED19                            = 0x4e,
+	DBG_BLOCK_ID_UNUSED20                            = 0x4f,
+	DBG_BLOCK_ID_CB00                                = 0x50,
+	DBG_BLOCK_ID_CB01                                = 0x51,
+	DBG_BLOCK_ID_CB02                                = 0x52,
+	DBG_BLOCK_ID_CB03                                = 0x53,
+	DBG_BLOCK_ID_CB04                                = 0x54,
+	DBG_BLOCK_ID_UNUSED21                            = 0x55,
+	DBG_BLOCK_ID_UNUSED22                            = 0x56,
+	DBG_BLOCK_ID_UNUSED23                            = 0x57,
+	DBG_BLOCK_ID_CB10                                = 0x58,
+	DBG_BLOCK_ID_CB11                                = 0x59,
+	DBG_BLOCK_ID_CB12                                = 0x5a,
+	DBG_BLOCK_ID_CB13                                = 0x5b,
+	DBG_BLOCK_ID_CB14                                = 0x5c,
+	DBG_BLOCK_ID_UNUSED24                            = 0x5d,
+	DBG_BLOCK_ID_UNUSED25                            = 0x5e,
+	DBG_BLOCK_ID_UNUSED26                            = 0x5f,
+	DBG_BLOCK_ID_TCP0                                = 0x60,
+	DBG_BLOCK_ID_TCP1                                = 0x61,
+	DBG_BLOCK_ID_TCP2                                = 0x62,
+	DBG_BLOCK_ID_TCP3                                = 0x63,
+	DBG_BLOCK_ID_TCP4                                = 0x64,
+	DBG_BLOCK_ID_TCP5                                = 0x65,
+	DBG_BLOCK_ID_TCP6                                = 0x66,
+	DBG_BLOCK_ID_TCP7                                = 0x67,
+	DBG_BLOCK_ID_TCP8                                = 0x68,
+	DBG_BLOCK_ID_TCP9                                = 0x69,
+	DBG_BLOCK_ID_TCP10                               = 0x6a,
+	DBG_BLOCK_ID_TCP11                               = 0x6b,
+	DBG_BLOCK_ID_TCP12                               = 0x6c,
+	DBG_BLOCK_ID_TCP13                               = 0x6d,
+	DBG_BLOCK_ID_TCP14                               = 0x6e,
+	DBG_BLOCK_ID_TCP15                               = 0x6f,
+	DBG_BLOCK_ID_TCP16                               = 0x70,
+	DBG_BLOCK_ID_TCP17                               = 0x71,
+	DBG_BLOCK_ID_TCP18                               = 0x72,
+	DBG_BLOCK_ID_TCP19                               = 0x73,
+	DBG_BLOCK_ID_TCP20                               = 0x74,
+	DBG_BLOCK_ID_TCP21                               = 0x75,
+	DBG_BLOCK_ID_TCP22                               = 0x76,
+	DBG_BLOCK_ID_TCP23                               = 0x77,
+	DBG_BLOCK_ID_TCP_RESERVED0                       = 0x78,
+	DBG_BLOCK_ID_TCP_RESERVED1                       = 0x79,
+	DBG_BLOCK_ID_TCP_RESERVED2                       = 0x7a,
+	DBG_BLOCK_ID_TCP_RESERVED3                       = 0x7b,
+	DBG_BLOCK_ID_TCP_RESERVED4                       = 0x7c,
+	DBG_BLOCK_ID_TCP_RESERVED5                       = 0x7d,
+	DBG_BLOCK_ID_TCP_RESERVED6                       = 0x7e,
+	DBG_BLOCK_ID_TCP_RESERVED7                       = 0x7f,
+	DBG_BLOCK_ID_DB00                                = 0x80,
+	DBG_BLOCK_ID_DB01                                = 0x81,
+	DBG_BLOCK_ID_DB02                                = 0x82,
+	DBG_BLOCK_ID_DB03                                = 0x83,
+	DBG_BLOCK_ID_DB04                                = 0x84,
+	DBG_BLOCK_ID_UNUSED27                            = 0x85,
+	DBG_BLOCK_ID_UNUSED28                            = 0x86,
+	DBG_BLOCK_ID_UNUSED29                            = 0x87,
+	DBG_BLOCK_ID_DB10                                = 0x88,
+	DBG_BLOCK_ID_DB11                                = 0x89,
+	DBG_BLOCK_ID_DB12                                = 0x8a,
+	DBG_BLOCK_ID_DB13                                = 0x8b,
+	DBG_BLOCK_ID_DB14                                = 0x8c,
+	DBG_BLOCK_ID_UNUSED30                            = 0x8d,
+	DBG_BLOCK_ID_UNUSED31                            = 0x8e,
+	DBG_BLOCK_ID_UNUSED32                            = 0x8f,
+	DBG_BLOCK_ID_TCC0                                = 0x90,
+	DBG_BLOCK_ID_TCC1                                = 0x91,
+	DBG_BLOCK_ID_TCC2                                = 0x92,
+	DBG_BLOCK_ID_TCC3                                = 0x93,
+	DBG_BLOCK_ID_TCC4                                = 0x94,
+	DBG_BLOCK_ID_TCC5                                = 0x95,
+	DBG_BLOCK_ID_TCC6                                = 0x96,
+	DBG_BLOCK_ID_TCC7                                = 0x97,
+	DBG_BLOCK_ID_SPS00                               = 0x98,
+	DBG_BLOCK_ID_SPS01                               = 0x99,
+	DBG_BLOCK_ID_SPS02                               = 0x9a,
+	DBG_BLOCK_ID_SPS10                               = 0x9b,
+	DBG_BLOCK_ID_SPS11                               = 0x9c,
+	DBG_BLOCK_ID_SPS12                               = 0x9d,
+	DBG_BLOCK_ID_UNUSED33                            = 0x9e,
+	DBG_BLOCK_ID_UNUSED34                            = 0x9f,
+	DBG_BLOCK_ID_TA00                                = 0xa0,
+	DBG_BLOCK_ID_TA01                                = 0xa1,
+	DBG_BLOCK_ID_TA02                                = 0xa2,
+	DBG_BLOCK_ID_TA03                                = 0xa3,
+	DBG_BLOCK_ID_TA04                                = 0xa4,
+	DBG_BLOCK_ID_TA05                                = 0xa5,
+	DBG_BLOCK_ID_TA06                                = 0xa6,
+	DBG_BLOCK_ID_TA07                                = 0xa7,
+	DBG_BLOCK_ID_TA08                                = 0xa8,
+	DBG_BLOCK_ID_TA09                                = 0xa9,
+	DBG_BLOCK_ID_TA0A                                = 0xaa,
+	DBG_BLOCK_ID_TA0B                                = 0xab,
+	DBG_BLOCK_ID_UNUSED35                            = 0xac,
+	DBG_BLOCK_ID_UNUSED36                            = 0xad,
+	DBG_BLOCK_ID_UNUSED37                            = 0xae,
+	DBG_BLOCK_ID_UNUSED38                            = 0xaf,
+	DBG_BLOCK_ID_TA10                                = 0xb0,
+	DBG_BLOCK_ID_TA11                                = 0xb1,
+	DBG_BLOCK_ID_TA12                                = 0xb2,
+	DBG_BLOCK_ID_TA13                                = 0xb3,
+	DBG_BLOCK_ID_TA14                                = 0xb4,
+	DBG_BLOCK_ID_TA15                                = 0xb5,
+	DBG_BLOCK_ID_TA16                                = 0xb6,
+	DBG_BLOCK_ID_TA17                                = 0xb7,
+	DBG_BLOCK_ID_TA18                                = 0xb8,
+	DBG_BLOCK_ID_TA19                                = 0xb9,
+	DBG_BLOCK_ID_TA1A                                = 0xba,
+	DBG_BLOCK_ID_TA1B                                = 0xbb,
+	DBG_BLOCK_ID_UNUSED39                            = 0xbc,
+	DBG_BLOCK_ID_UNUSED40                            = 0xbd,
+	DBG_BLOCK_ID_UNUSED41                            = 0xbe,
+	DBG_BLOCK_ID_UNUSED42                            = 0xbf,
+	DBG_BLOCK_ID_TD00                                = 0xc0,
+	DBG_BLOCK_ID_TD01                                = 0xc1,
+	DBG_BLOCK_ID_TD02                                = 0xc2,
+	DBG_BLOCK_ID_TD03                                = 0xc3,
+	DBG_BLOCK_ID_TD04                                = 0xc4,
+	DBG_BLOCK_ID_TD05                                = 0xc5,
+	DBG_BLOCK_ID_TD06                                = 0xc6,
+	DBG_BLOCK_ID_TD07                                = 0xc7,
+	DBG_BLOCK_ID_TD08                                = 0xc8,
+	DBG_BLOCK_ID_TD09                                = 0xc9,
+	DBG_BLOCK_ID_TD0A                                = 0xca,
+	DBG_BLOCK_ID_TD0B                                = 0xcb,
+	DBG_BLOCK_ID_UNUSED43                            = 0xcc,
+	DBG_BLOCK_ID_UNUSED44                            = 0xcd,
+	DBG_BLOCK_ID_UNUSED45                            = 0xce,
+	DBG_BLOCK_ID_UNUSED46                            = 0xcf,
+	DBG_BLOCK_ID_TD10                                = 0xd0,
+	DBG_BLOCK_ID_TD11                                = 0xd1,
+	DBG_BLOCK_ID_TD12                                = 0xd2,
+	DBG_BLOCK_ID_TD13                                = 0xd3,
+	DBG_BLOCK_ID_TD14                                = 0xd4,
+	DBG_BLOCK_ID_TD15                                = 0xd5,
+	DBG_BLOCK_ID_TD16                                = 0xd6,
+	DBG_BLOCK_ID_TD17                                = 0xd7,
+	DBG_BLOCK_ID_TD18                                = 0xd8,
+	DBG_BLOCK_ID_TD19                                = 0xd9,
+	DBG_BLOCK_ID_TD1A                                = 0xda,
+	DBG_BLOCK_ID_TD1B                                = 0xdb,
+	DBG_BLOCK_ID_UNUSED47                            = 0xdc,
+	DBG_BLOCK_ID_UNUSED48                            = 0xdd,
+	DBG_BLOCK_ID_UNUSED49                            = 0xde,
+	DBG_BLOCK_ID_UNUSED50                            = 0xdf,
+	DBG_BLOCK_ID_MCD0                                = 0xe0,
+	DBG_BLOCK_ID_MCD1                                = 0xe1,
+	DBG_BLOCK_ID_MCD2                                = 0xe2,
+	DBG_BLOCK_ID_MCD3                                = 0xe3,
+	DBG_BLOCK_ID_MCD4                                = 0xe4,
+	DBG_BLOCK_ID_MCD5                                = 0xe5,
+	DBG_BLOCK_ID_UNUSED51                            = 0xe6,
+	DBG_BLOCK_ID_UNUSED52                            = 0xe7,
+} DebugBlockId_OLD;
+typedef enum DebugBlockId_BY2 {
+	DBG_BLOCK_ID_RESERVED_BY2                        = 0x0,
+	DBG_BLOCK_ID_VMC_BY2                             = 0x1,
+	DBG_BLOCK_ID_CG_BY2                              = 0x2,
+	DBG_BLOCK_ID_GRBM_BY2                            = 0x3,
+	DBG_BLOCK_ID_CSC_BY2                             = 0x4,
+	DBG_BLOCK_ID_IH_BY2                              = 0x5,
+	DBG_BLOCK_ID_SQ_BY2                              = 0x6,
+	DBG_BLOCK_ID_GMCON_BY2                           = 0x7,
+	DBG_BLOCK_ID_DMA0_BY2                            = 0x8,
+	DBG_BLOCK_ID_SPIM_BY2                            = 0x9,
+	DBG_BLOCK_ID_SPIS_BY2                            = 0xa,
+	DBG_BLOCK_ID_PA0_BY2                             = 0xb,
+	DBG_BLOCK_ID_CP0_BY2                             = 0xc,
+	DBG_BLOCK_ID_CP2_BY2                             = 0xd,
+	DBG_BLOCK_ID_UVDU_BY2                            = 0xe,
+	DBG_BLOCK_ID_VCE_BY2                             = 0xf,
+	DBG_BLOCK_ID_VGT0_BY2                            = 0x10,
+	DBG_BLOCK_ID_IA_BY2                              = 0x11,
+	DBG_BLOCK_ID_SCT0_BY2                            = 0x12,
+	DBG_BLOCK_ID_SPM0_BY2                            = 0x13,
+	DBG_BLOCK_ID_TCAA_BY2                            = 0x14,
+	DBG_BLOCK_ID_TCCA_BY2                            = 0x15,
+	DBG_BLOCK_ID_MCC0_BY2                            = 0x16,
+	DBG_BLOCK_ID_MCC2_BY2                            = 0x17,
+	DBG_BLOCK_ID_SX0_BY2                             = 0x18,
+	DBG_BLOCK_ID_SX2_BY2                             = 0x19,
+	DBG_BLOCK_ID_UNUSED4_BY2                         = 0x1a,
+	DBG_BLOCK_ID_UNUSED6_BY2                         = 0x1b,
+	DBG_BLOCK_ID_PC0_BY2                             = 0x1c,
+	DBG_BLOCK_ID_UNUSED8_BY2                         = 0x1d,
+	DBG_BLOCK_ID_UNUSED10_BY2                        = 0x1e,
+	DBG_BLOCK_ID_MCB_BY2                             = 0x1f,
+	DBG_BLOCK_ID_SCB0_BY2                            = 0x20,
+	DBG_BLOCK_ID_UNUSED13_BY2                        = 0x21,
+	DBG_BLOCK_ID_SCF0_BY2                            = 0x22,
+	DBG_BLOCK_ID_UNUSED15_BY2                        = 0x23,
+	DBG_BLOCK_ID_BCI0_BY2                            = 0x24,
+	DBG_BLOCK_ID_BCI2_BY2                            = 0x25,
+	DBG_BLOCK_ID_UNUSED17_BY2                        = 0x26,
+	DBG_BLOCK_ID_UNUSED19_BY2                        = 0x27,
+	DBG_BLOCK_ID_CB00_BY2                            = 0x28,
+	DBG_BLOCK_ID_CB02_BY2                            = 0x29,
+	DBG_BLOCK_ID_CB04_BY2                            = 0x2a,
+	DBG_BLOCK_ID_UNUSED22_BY2                        = 0x2b,
+	DBG_BLOCK_ID_CB10_BY2                            = 0x2c,
+	DBG_BLOCK_ID_CB12_BY2                            = 0x2d,
+	DBG_BLOCK_ID_CB14_BY2                            = 0x2e,
+	DBG_BLOCK_ID_UNUSED25_BY2                        = 0x2f,
+	DBG_BLOCK_ID_TCP0_BY2                            = 0x30,
+	DBG_BLOCK_ID_TCP2_BY2                            = 0x31,
+	DBG_BLOCK_ID_TCP4_BY2                            = 0x32,
+	DBG_BLOCK_ID_TCP6_BY2                            = 0x33,
+	DBG_BLOCK_ID_TCP8_BY2                            = 0x34,
+	DBG_BLOCK_ID_TCP10_BY2                           = 0x35,
+	DBG_BLOCK_ID_TCP12_BY2                           = 0x36,
+	DBG_BLOCK_ID_TCP14_BY2                           = 0x37,
+	DBG_BLOCK_ID_TCP16_BY2                           = 0x38,
+	DBG_BLOCK_ID_TCP18_BY2                           = 0x39,
+	DBG_BLOCK_ID_TCP20_BY2                           = 0x3a,
+	DBG_BLOCK_ID_TCP22_BY2                           = 0x3b,
+	DBG_BLOCK_ID_TCP_RESERVED0_BY2                   = 0x3c,
+	DBG_BLOCK_ID_TCP_RESERVED2_BY2                   = 0x3d,
+	DBG_BLOCK_ID_TCP_RESERVED4_BY2                   = 0x3e,
+	DBG_BLOCK_ID_TCP_RESERVED6_BY2                   = 0x3f,
+	DBG_BLOCK_ID_DB00_BY2                            = 0x40,
+	DBG_BLOCK_ID_DB02_BY2                            = 0x41,
+	DBG_BLOCK_ID_DB04_BY2                            = 0x42,
+	DBG_BLOCK_ID_UNUSED28_BY2                        = 0x43,
+	DBG_BLOCK_ID_DB10_BY2                            = 0x44,
+	DBG_BLOCK_ID_DB12_BY2                            = 0x45,
+	DBG_BLOCK_ID_DB14_BY2                            = 0x46,
+	DBG_BLOCK_ID_UNUSED31_BY2                        = 0x47,
+	DBG_BLOCK_ID_TCC0_BY2                            = 0x48,
+	DBG_BLOCK_ID_TCC2_BY2                            = 0x49,
+	DBG_BLOCK_ID_TCC4_BY2                            = 0x4a,
+	DBG_BLOCK_ID_TCC6_BY2                            = 0x4b,
+	DBG_BLOCK_ID_SPS00_BY2                           = 0x4c,
+	DBG_BLOCK_ID_SPS02_BY2                           = 0x4d,
+	DBG_BLOCK_ID_SPS11_BY2                           = 0x4e,
+	DBG_BLOCK_ID_UNUSED33_BY2                        = 0x4f,
+	DBG_BLOCK_ID_TA00_BY2                            = 0x50,
+	DBG_BLOCK_ID_TA02_BY2                            = 0x51,
+	DBG_BLOCK_ID_TA04_BY2                            = 0x52,
+	DBG_BLOCK_ID_TA06_BY2                            = 0x53,
+	DBG_BLOCK_ID_TA08_BY2                            = 0x54,
+	DBG_BLOCK_ID_TA0A_BY2                            = 0x55,
+	DBG_BLOCK_ID_UNUSED35_BY2                        = 0x56,
+	DBG_BLOCK_ID_UNUSED37_BY2                        = 0x57,
+	DBG_BLOCK_ID_TA10_BY2                            = 0x58,
+	DBG_BLOCK_ID_TA12_BY2                            = 0x59,
+	DBG_BLOCK_ID_TA14_BY2                            = 0x5a,
+	DBG_BLOCK_ID_TA16_BY2                            = 0x5b,
+	DBG_BLOCK_ID_TA18_BY2                            = 0x5c,
+	DBG_BLOCK_ID_TA1A_BY2                            = 0x5d,
+	DBG_BLOCK_ID_UNUSED39_BY2                        = 0x5e,
+	DBG_BLOCK_ID_UNUSED41_BY2                        = 0x5f,
+	DBG_BLOCK_ID_TD00_BY2                            = 0x60,
+	DBG_BLOCK_ID_TD02_BY2                            = 0x61,
+	DBG_BLOCK_ID_TD04_BY2                            = 0x62,
+	DBG_BLOCK_ID_TD06_BY2                            = 0x63,
+	DBG_BLOCK_ID_TD08_BY2                            = 0x64,
+	DBG_BLOCK_ID_TD0A_BY2                            = 0x65,
+	DBG_BLOCK_ID_UNUSED43_BY2                        = 0x66,
+	DBG_BLOCK_ID_UNUSED45_BY2                        = 0x67,
+	DBG_BLOCK_ID_TD10_BY2                            = 0x68,
+	DBG_BLOCK_ID_TD12_BY2                            = 0x69,
+	DBG_BLOCK_ID_TD14_BY2                            = 0x6a,
+	DBG_BLOCK_ID_TD16_BY2                            = 0x6b,
+	DBG_BLOCK_ID_TD18_BY2                            = 0x6c,
+	DBG_BLOCK_ID_TD1A_BY2                            = 0x6d,
+	DBG_BLOCK_ID_UNUSED47_BY2                        = 0x6e,
+	DBG_BLOCK_ID_UNUSED49_BY2                        = 0x6f,
+	DBG_BLOCK_ID_MCD0_BY2                            = 0x70,
+	DBG_BLOCK_ID_MCD2_BY2                            = 0x71,
+	DBG_BLOCK_ID_MCD4_BY2                            = 0x72,
+	DBG_BLOCK_ID_UNUSED51_BY2                        = 0x73,
+} DebugBlockId_BY2;
+typedef enum DebugBlockId_BY4 {
+	DBG_BLOCK_ID_RESERVED_BY4                        = 0x0,
+	DBG_BLOCK_ID_CG_BY4                              = 0x1,
+	DBG_BLOCK_ID_CSC_BY4                             = 0x2,
+	DBG_BLOCK_ID_SQ_BY4                              = 0x3,
+	DBG_BLOCK_ID_DMA0_BY4                            = 0x4,
+	DBG_BLOCK_ID_SPIS_BY4                            = 0x5,
+	DBG_BLOCK_ID_CP0_BY4                             = 0x6,
+	DBG_BLOCK_ID_UVDU_BY4                            = 0x7,
+	DBG_BLOCK_ID_VGT0_BY4                            = 0x8,
+	DBG_BLOCK_ID_SCT0_BY4                            = 0x9,
+	DBG_BLOCK_ID_TCAA_BY4                            = 0xa,
+	DBG_BLOCK_ID_MCC0_BY4                            = 0xb,
+	DBG_BLOCK_ID_SX0_BY4                             = 0xc,
+	DBG_BLOCK_ID_UNUSED4_BY4                         = 0xd,
+	DBG_BLOCK_ID_PC0_BY4                             = 0xe,
+	DBG_BLOCK_ID_UNUSED10_BY4                        = 0xf,
+	DBG_BLOCK_ID_SCB0_BY4                            = 0x10,
+	DBG_BLOCK_ID_SCF0_BY4                            = 0x11,
+	DBG_BLOCK_ID_BCI0_BY4                            = 0x12,
+	DBG_BLOCK_ID_UNUSED17_BY4                        = 0x13,
+	DBG_BLOCK_ID_CB00_BY4                            = 0x14,
+	DBG_BLOCK_ID_CB04_BY4                            = 0x15,
+	DBG_BLOCK_ID_CB10_BY4                            = 0x16,
+	DBG_BLOCK_ID_CB14_BY4                            = 0x17,
+	DBG_BLOCK_ID_TCP0_BY4                            = 0x18,
+	DBG_BLOCK_ID_TCP4_BY4                            = 0x19,
+	DBG_BLOCK_ID_TCP8_BY4                            = 0x1a,
+	DBG_BLOCK_ID_TCP12_BY4                           = 0x1b,
+	DBG_BLOCK_ID_TCP16_BY4                           = 0x1c,
+	DBG_BLOCK_ID_TCP20_BY4                           = 0x1d,
+	DBG_BLOCK_ID_TCP_RESERVED0_BY4                   = 0x1e,
+	DBG_BLOCK_ID_TCP_RESERVED4_BY4                   = 0x1f,
+	DBG_BLOCK_ID_DB_BY4                              = 0x20,
+	DBG_BLOCK_ID_DB04_BY4                            = 0x21,
+	DBG_BLOCK_ID_DB10_BY4                            = 0x22,
+	DBG_BLOCK_ID_DB14_BY4                            = 0x23,
+	DBG_BLOCK_ID_TCC0_BY4                            = 0x24,
+	DBG_BLOCK_ID_TCC4_BY4                            = 0x25,
+	DBG_BLOCK_ID_SPS00_BY4                           = 0x26,
+	DBG_BLOCK_ID_SPS11_BY4                           = 0x27,
+	DBG_BLOCK_ID_TA00_BY4                            = 0x28,
+	DBG_BLOCK_ID_TA04_BY4                            = 0x29,
+	DBG_BLOCK_ID_TA08_BY4                            = 0x2a,
+	DBG_BLOCK_ID_UNUSED35_BY4                        = 0x2b,
+	DBG_BLOCK_ID_TA10_BY4                            = 0x2c,
+	DBG_BLOCK_ID_TA14_BY4                            = 0x2d,
+	DBG_BLOCK_ID_TA18_BY4                            = 0x2e,
+	DBG_BLOCK_ID_UNUSED39_BY4                        = 0x2f,
+	DBG_BLOCK_ID_TD00_BY4                            = 0x30,
+	DBG_BLOCK_ID_TD04_BY4                            = 0x31,
+	DBG_BLOCK_ID_TD08_BY4                            = 0x32,
+	DBG_BLOCK_ID_UNUSED43_BY4                        = 0x33,
+	DBG_BLOCK_ID_TD10_BY4                            = 0x34,
+	DBG_BLOCK_ID_TD14_BY4                            = 0x35,
+	DBG_BLOCK_ID_TD18_BY4                            = 0x36,
+	DBG_BLOCK_ID_UNUSED47_BY4                        = 0x37,
+	DBG_BLOCK_ID_MCD0_BY4                            = 0x38,
+	DBG_BLOCK_ID_MCD4_BY4                            = 0x39,
+} DebugBlockId_BY4;
+typedef enum DebugBlockId_BY8 {
+	DBG_BLOCK_ID_RESERVED_BY8                        = 0x0,
+	DBG_BLOCK_ID_CSC_BY8                             = 0x1,
+	DBG_BLOCK_ID_DMA0_BY8                            = 0x2,
+	DBG_BLOCK_ID_CP0_BY8                             = 0x3,
+	DBG_BLOCK_ID_VGT0_BY8                            = 0x4,
+	DBG_BLOCK_ID_TCAA_BY8                            = 0x5,
+	DBG_BLOCK_ID_SX0_BY8                             = 0x6,
+	DBG_BLOCK_ID_PC0_BY8                             = 0x7,
+	DBG_BLOCK_ID_SCB0_BY8                            = 0x8,
+	DBG_BLOCK_ID_BCI0_BY8                            = 0x9,
+	DBG_BLOCK_ID_CB00_BY8                            = 0xa,
+	DBG_BLOCK_ID_CB10_BY8                            = 0xb,
+	DBG_BLOCK_ID_TCP0_BY8                            = 0xc,
+	DBG_BLOCK_ID_TCP8_BY8                            = 0xd,
+	DBG_BLOCK_ID_TCP16_BY8                           = 0xe,
+	DBG_BLOCK_ID_TCP_RESERVED0_BY8                   = 0xf,
+	DBG_BLOCK_ID_DB00_BY8                            = 0x10,
+	DBG_BLOCK_ID_DB10_BY8                            = 0x11,
+	DBG_BLOCK_ID_TCC0_BY8                            = 0x12,
+	DBG_BLOCK_ID_SPS00_BY8                           = 0x13,
+	DBG_BLOCK_ID_TA00_BY8                            = 0x14,
+	DBG_BLOCK_ID_TA08_BY8                            = 0x15,
+	DBG_BLOCK_ID_TA10_BY8                            = 0x16,
+	DBG_BLOCK_ID_TA18_BY8                            = 0x17,
+	DBG_BLOCK_ID_TD00_BY8                            = 0x18,
+	DBG_BLOCK_ID_TD08_BY8                            = 0x19,
+	DBG_BLOCK_ID_TD10_BY8                            = 0x1a,
+	DBG_BLOCK_ID_TD18_BY8                            = 0x1b,
+	DBG_BLOCK_ID_MCD0_BY8                            = 0x1c,
+} DebugBlockId_BY8;
+typedef enum DebugBlockId_BY16 {
+	DBG_BLOCK_ID_RESERVED_BY16                       = 0x0,
+	DBG_BLOCK_ID_DMA0_BY16                           = 0x1,
+	DBG_BLOCK_ID_VGT0_BY16                           = 0x2,
+	DBG_BLOCK_ID_SX0_BY16                            = 0x3,
+	DBG_BLOCK_ID_SCB0_BY16                           = 0x4,
+	DBG_BLOCK_ID_CB00_BY16                           = 0x5,
+	DBG_BLOCK_ID_TCP0_BY16                           = 0x6,
+	DBG_BLOCK_ID_TCP16_BY16                          = 0x7,
+	DBG_BLOCK_ID_DB00_BY16                           = 0x8,
+	DBG_BLOCK_ID_TCC0_BY16                           = 0x9,
+	DBG_BLOCK_ID_TA00_BY16                           = 0xa,
+	DBG_BLOCK_ID_TA10_BY16                           = 0xb,
+	DBG_BLOCK_ID_TD00_BY16                           = 0xc,
+	DBG_BLOCK_ID_TD10_BY16                           = 0xd,
+	DBG_BLOCK_ID_MCD0_BY16                           = 0xe,
+} DebugBlockId_BY16;
+typedef enum ColorTransform {
+	DCC_CT_AUTO                                      = 0x0,
+	DCC_CT_NONE                                      = 0x1,
+	ABGR_TO_A_BG_G_RB                                = 0x2,
+	BGRA_TO_BG_G_RB_A                                = 0x3,
+} ColorTransform;
+typedef enum CompareRef {
+	REF_NEVER                                        = 0x0,
+	REF_LESS                                         = 0x1,
+	REF_EQUAL                                        = 0x2,
+	REF_LEQUAL                                       = 0x3,
+	REF_GREATER                                      = 0x4,
+	REF_NOTEQUAL                                     = 0x5,
+	REF_GEQUAL                                       = 0x6,
+	REF_ALWAYS                                       = 0x7,
+} CompareRef;
+typedef enum ReadSize {
+	READ_256_BITS                                    = 0x0,
+	READ_512_BITS                                    = 0x1,
+} ReadSize;
+typedef enum DepthFormat {
+	DEPTH_INVALID                                    = 0x0,
+	DEPTH_16                                         = 0x1,
+	DEPTH_X8_24                                      = 0x2,
+	DEPTH_8_24                                       = 0x3,
+	DEPTH_X8_24_FLOAT                                = 0x4,
+	DEPTH_8_24_FLOAT                                 = 0x5,
+	DEPTH_32_FLOAT                                   = 0x6,
+	DEPTH_X24_8_32_FLOAT                             = 0x7,
+} DepthFormat;
+typedef enum ZFormat {
+	Z_INVALID                                        = 0x0,
+	Z_16                                             = 0x1,
+	Z_24                                             = 0x2,
+	Z_32_FLOAT                                       = 0x3,
+} ZFormat;
+typedef enum StencilFormat {
+	STENCIL_INVALID                                  = 0x0,
+	STENCIL_8                                        = 0x1,
+} StencilFormat;
+typedef enum CmaskMode {
+	CMASK_CLEAR_NONE                                 = 0x0,
+	CMASK_CLEAR_ONE                                  = 0x1,
+	CMASK_CLEAR_ALL                                  = 0x2,
+	CMASK_ANY_EXPANDED                               = 0x3,
+	CMASK_ALPHA0_FRAG1                               = 0x4,
+	CMASK_ALPHA0_FRAG2                               = 0x5,
+	CMASK_ALPHA0_FRAG4                               = 0x6,
+	CMASK_ALPHA0_FRAGS                               = 0x7,
+	CMASK_ALPHA1_FRAG1                               = 0x8,
+	CMASK_ALPHA1_FRAG2                               = 0x9,
+	CMASK_ALPHA1_FRAG4                               = 0xa,
+	CMASK_ALPHA1_FRAGS                               = 0xb,
+	CMASK_ALPHAX_FRAG1                               = 0xc,
+	CMASK_ALPHAX_FRAG2                               = 0xd,
+	CMASK_ALPHAX_FRAG4                               = 0xe,
+	CMASK_ALPHAX_FRAGS                               = 0xf,
+} CmaskMode;
+typedef enum QuadExportFormat {
+	EXPORT_UNUSED                                    = 0x0,
+	EXPORT_32_R                                      = 0x1,
+	EXPORT_32_GR                                     = 0x2,
+	EXPORT_32_AR                                     = 0x3,
+	EXPORT_FP16_ABGR                                 = 0x4,
+	EXPORT_UNSIGNED16_ABGR                           = 0x5,
+	EXPORT_SIGNED16_ABGR                             = 0x6,
+	EXPORT_32_ABGR                                   = 0x7,
+} QuadExportFormat;
+typedef enum QuadExportFormatOld {
+	EXPORT_4P_32BPC_ABGR                             = 0x0,
+	EXPORT_4P_16BPC_ABGR                             = 0x1,
+	EXPORT_4P_32BPC_GR                               = 0x2,
+	EXPORT_4P_32BPC_AR                               = 0x3,
+	EXPORT_2P_32BPC_ABGR                             = 0x4,
+	EXPORT_8P_32BPC_R                                = 0x5,
+} QuadExportFormatOld;
+typedef enum ColorFormat {
+	COLOR_INVALID                                    = 0x0,
+	COLOR_8                                          = 0x1,
+	COLOR_16                                         = 0x2,
+	COLOR_8_8                                        = 0x3,
+	COLOR_32                                         = 0x4,
+	COLOR_16_16                                      = 0x5,
+	COLOR_10_11_11                                   = 0x6,
+	COLOR_11_11_10                                   = 0x7,
+	COLOR_10_10_10_2                                 = 0x8,
+	COLOR_2_10_10_10                                 = 0x9,
+	COLOR_8_8_8_8                                    = 0xa,
+	COLOR_32_32                                      = 0xb,
+	COLOR_16_16_16_16                                = 0xc,
+	COLOR_RESERVED_13                                = 0xd,
+	COLOR_32_32_32_32                                = 0xe,
+	COLOR_RESERVED_15                                = 0xf,
+	COLOR_5_6_5                                      = 0x10,
+	COLOR_1_5_5_5                                    = 0x11,
+	COLOR_5_5_5_1                                    = 0x12,
+	COLOR_4_4_4_4                                    = 0x13,
+	COLOR_8_24                                       = 0x14,
+	COLOR_24_8                                       = 0x15,
+	COLOR_X24_8_32_FLOAT                             = 0x16,
+	COLOR_RESERVED_23                                = 0x17,
+} ColorFormat;
+typedef enum SurfaceFormat {
+	FMT_INVALID                                      = 0x0,
+	FMT_8                                            = 0x1,
+	FMT_16                                           = 0x2,
+	FMT_8_8                                          = 0x3,
+	FMT_32                                           = 0x4,
+	FMT_16_16                                        = 0x5,
+	FMT_10_11_11                                     = 0x6,
+	FMT_11_11_10                                     = 0x7,
+	FMT_10_10_10_2                                   = 0x8,
+	FMT_2_10_10_10                                   = 0x9,
+	FMT_8_8_8_8                                      = 0xa,
+	FMT_32_32                                        = 0xb,
+	FMT_16_16_16_16                                  = 0xc,
+	FMT_32_32_32                                     = 0xd,
+	FMT_32_32_32_32                                  = 0xe,
+	FMT_RESERVED_4                                   = 0xf,
+	FMT_5_6_5                                        = 0x10,
+	FMT_1_5_5_5                                      = 0x11,
+	FMT_5_5_5_1                                      = 0x12,
+	FMT_4_4_4_4                                      = 0x13,
+	FMT_8_24                                         = 0x14,
+	FMT_24_8                                         = 0x15,
+	FMT_X24_8_32_FLOAT                               = 0x16,
+	FMT_RESERVED_33                                  = 0x17,
+	FMT_11_11_10_FLOAT                               = 0x18,
+	FMT_16_FLOAT                                     = 0x19,
+	FMT_32_FLOAT                                     = 0x1a,
+	FMT_16_16_FLOAT                                  = 0x1b,
+	FMT_8_24_FLOAT                                   = 0x1c,
+	FMT_24_8_FLOAT                                   = 0x1d,
+	FMT_32_32_FLOAT                                  = 0x1e,
+	FMT_10_11_11_FLOAT                               = 0x1f,
+	FMT_16_16_16_16_FLOAT                            = 0x20,
+	FMT_3_3_2                                        = 0x21,
+	FMT_6_5_5                                        = 0x22,
+	FMT_32_32_32_32_FLOAT                            = 0x23,
+	FMT_RESERVED_36                                  = 0x24,
+	FMT_1                                            = 0x25,
+	FMT_1_REVERSED                                   = 0x26,
+	FMT_GB_GR                                        = 0x27,
+	FMT_BG_RG                                        = 0x28,
+	FMT_32_AS_8                                      = 0x29,
+	FMT_32_AS_8_8                                    = 0x2a,
+	FMT_5_9_9_9_SHAREDEXP                            = 0x2b,
+	FMT_8_8_8                                        = 0x2c,
+	FMT_16_16_16                                     = 0x2d,
+	FMT_16_16_16_FLOAT                               = 0x2e,
+	FMT_4_4                                          = 0x2f,
+	FMT_32_32_32_FLOAT                               = 0x30,
+	FMT_BC1                                          = 0x31,
+	FMT_BC2                                          = 0x32,
+	FMT_BC3                                          = 0x33,
+	FMT_BC4                                          = 0x34,
+	FMT_BC5                                          = 0x35,
+	FMT_BC6                                          = 0x36,
+	FMT_BC7                                          = 0x37,
+	FMT_32_AS_32_32_32_32                            = 0x38,
+	FMT_APC3                                         = 0x39,
+	FMT_APC4                                         = 0x3a,
+	FMT_APC5                                         = 0x3b,
+	FMT_APC6                                         = 0x3c,
+	FMT_APC7                                         = 0x3d,
+	FMT_CTX1                                         = 0x3e,
+	FMT_RESERVED_63                                  = 0x3f,
+} SurfaceFormat;
+typedef enum BUF_DATA_FORMAT {
+	BUF_DATA_FORMAT_INVALID                          = 0x0,
+	BUF_DATA_FORMAT_8                                = 0x1,
+	BUF_DATA_FORMAT_16                               = 0x2,
+	BUF_DATA_FORMAT_8_8                              = 0x3,
+	BUF_DATA_FORMAT_32                               = 0x4,
+	BUF_DATA_FORMAT_16_16                            = 0x5,
+	BUF_DATA_FORMAT_10_11_11                         = 0x6,
+	BUF_DATA_FORMAT_11_11_10                         = 0x7,
+	BUF_DATA_FORMAT_10_10_10_2                       = 0x8,
+	BUF_DATA_FORMAT_2_10_10_10                       = 0x9,
+	BUF_DATA_FORMAT_8_8_8_8                          = 0xa,
+	BUF_DATA_FORMAT_32_32                            = 0xb,
+	BUF_DATA_FORMAT_16_16_16_16                      = 0xc,
+	BUF_DATA_FORMAT_32_32_32                         = 0xd,
+	BUF_DATA_FORMAT_32_32_32_32                      = 0xe,
+	BUF_DATA_FORMAT_RESERVED_15                      = 0xf,
+} BUF_DATA_FORMAT;
+typedef enum IMG_DATA_FORMAT {
+	IMG_DATA_FORMAT_INVALID                          = 0x0,
+	IMG_DATA_FORMAT_8                                = 0x1,
+	IMG_DATA_FORMAT_16                               = 0x2,
+	IMG_DATA_FORMAT_8_8                              = 0x3,
+	IMG_DATA_FORMAT_32                               = 0x4,
+	IMG_DATA_FORMAT_16_16                            = 0x5,
+	IMG_DATA_FORMAT_10_11_11                         = 0x6,
+	IMG_DATA_FORMAT_11_11_10                         = 0x7,
+	IMG_DATA_FORMAT_10_10_10_2                       = 0x8,
+	IMG_DATA_FORMAT_2_10_10_10                       = 0x9,
+	IMG_DATA_FORMAT_8_8_8_8                          = 0xa,
+	IMG_DATA_FORMAT_32_32                            = 0xb,
+	IMG_DATA_FORMAT_16_16_16_16                      = 0xc,
+	IMG_DATA_FORMAT_32_32_32                         = 0xd,
+	IMG_DATA_FORMAT_32_32_32_32                      = 0xe,
+	IMG_DATA_FORMAT_RESERVED_15                      = 0xf,
+	IMG_DATA_FORMAT_5_6_5                            = 0x10,
+	IMG_DATA_FORMAT_1_5_5_5                          = 0x11,
+	IMG_DATA_FORMAT_5_5_5_1                          = 0x12,
+	IMG_DATA_FORMAT_4_4_4_4                          = 0x13,
+	IMG_DATA_FORMAT_8_24                             = 0x14,
+	IMG_DATA_FORMAT_24_8                             = 0x15,
+	IMG_DATA_FORMAT_X24_8_32                         = 0x16,
+	IMG_DATA_FORMAT_RESERVED_23                      = 0x17,
+	IMG_DATA_FORMAT_RESERVED_24                      = 0x18,
+	IMG_DATA_FORMAT_RESERVED_25                      = 0x19,
+	IMG_DATA_FORMAT_RESERVED_26                      = 0x1a,
+	IMG_DATA_FORMAT_RESERVED_27                      = 0x1b,
+	IMG_DATA_FORMAT_RESERVED_28                      = 0x1c,
+	IMG_DATA_FORMAT_RESERVED_29                      = 0x1d,
+	IMG_DATA_FORMAT_RESERVED_30                      = 0x1e,
+	IMG_DATA_FORMAT_RESERVED_31                      = 0x1f,
+	IMG_DATA_FORMAT_GB_GR                            = 0x20,
+	IMG_DATA_FORMAT_BG_RG                            = 0x21,
+	IMG_DATA_FORMAT_5_9_9_9                          = 0x22,
+	IMG_DATA_FORMAT_BC1                              = 0x23,
+	IMG_DATA_FORMAT_BC2                              = 0x24,
+	IMG_DATA_FORMAT_BC3                              = 0x25,
+	IMG_DATA_FORMAT_BC4                              = 0x26,
+	IMG_DATA_FORMAT_BC5                              = 0x27,
+	IMG_DATA_FORMAT_BC6                              = 0x28,
+	IMG_DATA_FORMAT_BC7                              = 0x29,
+	IMG_DATA_FORMAT_RESERVED_42                      = 0x2a,
+	IMG_DATA_FORMAT_RESERVED_43                      = 0x2b,
+	IMG_DATA_FORMAT_FMASK8_S2_F1                     = 0x2c,
+	IMG_DATA_FORMAT_FMASK8_S4_F1                     = 0x2d,
+	IMG_DATA_FORMAT_FMASK8_S8_F1                     = 0x2e,
+	IMG_DATA_FORMAT_FMASK8_S2_F2                     = 0x2f,
+	IMG_DATA_FORMAT_FMASK8_S4_F2                     = 0x30,
+	IMG_DATA_FORMAT_FMASK8_S4_F4                     = 0x31,
+	IMG_DATA_FORMAT_FMASK16_S16_F1                   = 0x32,
+	IMG_DATA_FORMAT_FMASK16_S8_F2                    = 0x33,
+	IMG_DATA_FORMAT_FMASK32_S16_F2                   = 0x34,
+	IMG_DATA_FORMAT_FMASK32_S8_F4                    = 0x35,
+	IMG_DATA_FORMAT_FMASK32_S8_F8                    = 0x36,
+	IMG_DATA_FORMAT_FMASK64_S16_F4                   = 0x37,
+	IMG_DATA_FORMAT_FMASK64_S16_F8                   = 0x38,
+	IMG_DATA_FORMAT_4_4                              = 0x39,
+	IMG_DATA_FORMAT_6_5_5                            = 0x3a,
+	IMG_DATA_FORMAT_1                                = 0x3b,
+	IMG_DATA_FORMAT_1_REVERSED                       = 0x3c,
+	IMG_DATA_FORMAT_32_AS_8                          = 0x3d,
+	IMG_DATA_FORMAT_32_AS_8_8                        = 0x3e,
+	IMG_DATA_FORMAT_32_AS_32_32_32_32                = 0x3f,
+} IMG_DATA_FORMAT;
+typedef enum BUF_NUM_FORMAT {
+	BUF_NUM_FORMAT_UNORM                             = 0x0,
+	BUF_NUM_FORMAT_SNORM                             = 0x1,
+	BUF_NUM_FORMAT_USCALED                           = 0x2,
+	BUF_NUM_FORMAT_SSCALED                           = 0x3,
+	BUF_NUM_FORMAT_UINT                              = 0x4,
+	BUF_NUM_FORMAT_SINT                              = 0x5,
+	BUF_NUM_FORMAT_RESERVED_6                        = 0x6,
+	BUF_NUM_FORMAT_FLOAT                             = 0x7,
+} BUF_NUM_FORMAT;
+typedef enum IMG_NUM_FORMAT {
+	IMG_NUM_FORMAT_UNORM                             = 0x0,
+	IMG_NUM_FORMAT_SNORM                             = 0x1,
+	IMG_NUM_FORMAT_USCALED                           = 0x2,
+	IMG_NUM_FORMAT_SSCALED                           = 0x3,
+	IMG_NUM_FORMAT_UINT                              = 0x4,
+	IMG_NUM_FORMAT_SINT                              = 0x5,
+	IMG_NUM_FORMAT_RESERVED_6                        = 0x6,
+	IMG_NUM_FORMAT_FLOAT                             = 0x7,
+	IMG_NUM_FORMAT_RESERVED_8                        = 0x8,
+	IMG_NUM_FORMAT_SRGB                              = 0x9,
+	IMG_NUM_FORMAT_RESERVED_10                       = 0xa,
+	IMG_NUM_FORMAT_RESERVED_11                       = 0xb,
+	IMG_NUM_FORMAT_RESERVED_12                       = 0xc,
+	IMG_NUM_FORMAT_RESERVED_13                       = 0xd,
+	IMG_NUM_FORMAT_RESERVED_14                       = 0xe,
+	IMG_NUM_FORMAT_RESERVED_15                       = 0xf,
+} IMG_NUM_FORMAT;
+typedef enum TileType {
+	ARRAY_COLOR_TILE                                 = 0x0,
+	ARRAY_DEPTH_TILE                                 = 0x1,
+} TileType;
+typedef enum NonDispTilingOrder {
+	ADDR_SURF_MICRO_TILING_DISPLAY                   = 0x0,
+	ADDR_SURF_MICRO_TILING_NON_DISPLAY               = 0x1,
+} NonDispTilingOrder;
+typedef enum MicroTileMode {
+	ADDR_SURF_DISPLAY_MICRO_TILING                   = 0x0,
+	ADDR_SURF_THIN_MICRO_TILING                      = 0x1,
+	ADDR_SURF_DEPTH_MICRO_TILING                     = 0x2,
+	ADDR_SURF_ROTATED_MICRO_TILING                   = 0x3,
+	ADDR_SURF_THICK_MICRO_TILING                     = 0x4,
+} MicroTileMode;
+typedef enum TileSplit {
+	ADDR_SURF_TILE_SPLIT_64B                         = 0x0,
+	ADDR_SURF_TILE_SPLIT_128B                        = 0x1,
+	ADDR_SURF_TILE_SPLIT_256B                        = 0x2,
+	ADDR_SURF_TILE_SPLIT_512B                        = 0x3,
+	ADDR_SURF_TILE_SPLIT_1KB                         = 0x4,
+	ADDR_SURF_TILE_SPLIT_2KB                         = 0x5,
+	ADDR_SURF_TILE_SPLIT_4KB                         = 0x6,
+} TileSplit;
+typedef enum SampleSplit {
+	ADDR_SURF_SAMPLE_SPLIT_1                         = 0x0,
+	ADDR_SURF_SAMPLE_SPLIT_2                         = 0x1,
+	ADDR_SURF_SAMPLE_SPLIT_4                         = 0x2,
+	ADDR_SURF_SAMPLE_SPLIT_8                         = 0x3,
+} SampleSplit;
+typedef enum PipeConfig {
+	ADDR_SURF_P2                                     = 0x0,
+	ADDR_SURF_P2_RESERVED0                           = 0x1,
+	ADDR_SURF_P2_RESERVED1                           = 0x2,
+	ADDR_SURF_P2_RESERVED2                           = 0x3,
+	ADDR_SURF_P4_8x16                                = 0x4,
+	ADDR_SURF_P4_16x16                               = 0x5,
+	ADDR_SURF_P4_16x32                               = 0x6,
+	ADDR_SURF_P4_32x32                               = 0x7,
+	ADDR_SURF_P8_16x16_8x16                          = 0x8,
+	ADDR_SURF_P8_16x32_8x16                          = 0x9,
+	ADDR_SURF_P8_32x32_8x16                          = 0xa,
+	ADDR_SURF_P8_16x32_16x16                         = 0xb,
+	ADDR_SURF_P8_32x32_16x16                         = 0xc,
+	ADDR_SURF_P8_32x32_16x32                         = 0xd,
+	ADDR_SURF_P8_32x64_32x32                         = 0xe,
+	ADDR_SURF_P8_RESERVED0                           = 0xf,
+	ADDR_SURF_P16_32x32_8x16                         = 0x10,
+	ADDR_SURF_P16_32x32_16x16                        = 0x11,
+} PipeConfig;
+typedef enum NumBanks {
+	ADDR_SURF_2_BANK                                 = 0x0,
+	ADDR_SURF_4_BANK                                 = 0x1,
+	ADDR_SURF_8_BANK                                 = 0x2,
+	ADDR_SURF_16_BANK                                = 0x3,
+} NumBanks;
+typedef enum BankWidth {
+	ADDR_SURF_BANK_WIDTH_1                           = 0x0,
+	ADDR_SURF_BANK_WIDTH_2                           = 0x1,
+	ADDR_SURF_BANK_WIDTH_4                           = 0x2,
+	ADDR_SURF_BANK_WIDTH_8                           = 0x3,
+} BankWidth;
+typedef enum BankHeight {
+	ADDR_SURF_BANK_HEIGHT_1                          = 0x0,
+	ADDR_SURF_BANK_HEIGHT_2                          = 0x1,
+	ADDR_SURF_BANK_HEIGHT_4                          = 0x2,
+	ADDR_SURF_BANK_HEIGHT_8                          = 0x3,
+} BankHeight;
+typedef enum BankWidthHeight {
+	ADDR_SURF_BANK_WH_1                              = 0x0,
+	ADDR_SURF_BANK_WH_2                              = 0x1,
+	ADDR_SURF_BANK_WH_4                              = 0x2,
+	ADDR_SURF_BANK_WH_8                              = 0x3,
+} BankWidthHeight;
+typedef enum MacroTileAspect {
+	ADDR_SURF_MACRO_ASPECT_1                         = 0x0,
+	ADDR_SURF_MACRO_ASPECT_2                         = 0x1,
+	ADDR_SURF_MACRO_ASPECT_4                         = 0x2,
+	ADDR_SURF_MACRO_ASPECT_8                         = 0x3,
+} MacroTileAspect;
+typedef enum GATCL1RequestType {
+	GATCL1_TYPE_NORMAL                               = 0x0,
+	GATCL1_TYPE_SHOOTDOWN                            = 0x1,
+	GATCL1_TYPE_BYPASS                               = 0x2,
+} GATCL1RequestType;
+typedef enum TCC_CACHE_POLICIES {
+	TCC_CACHE_POLICY_LRU                             = 0x0,
+	TCC_CACHE_POLICY_STREAM                          = 0x1,
+} TCC_CACHE_POLICIES;
+typedef enum MTYPE {
+	MTYPE_NC_NV                                      = 0x0,
+	MTYPE_NC                                         = 0x1,
+	MTYPE_CC                                         = 0x2,
+	MTYPE_UC                                         = 0x3,
+} MTYPE;
+typedef enum PERFMON_COUNTER_MODE {
+	PERFMON_COUNTER_MODE_ACCUM                       = 0x0,
+	PERFMON_COUNTER_MODE_ACTIVE_CYCLES               = 0x1,
+	PERFMON_COUNTER_MODE_MAX                         = 0x2,
+	PERFMON_COUNTER_MODE_DIRTY                       = 0x3,
+	PERFMON_COUNTER_MODE_SAMPLE                      = 0x4,
+	PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT    = 0x5,
+	PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT     = 0x6,
+	PERFMON_COUNTER_MODE_CYCLES_GE_HI                = 0x7,
+	PERFMON_COUNTER_MODE_CYCLES_EQ_HI                = 0x8,
+	PERFMON_COUNTER_MODE_INACTIVE_CYCLES             = 0x9,
+	PERFMON_COUNTER_MODE_RESERVED                    = 0xf,
+} PERFMON_COUNTER_MODE;
+typedef enum PERFMON_SPM_MODE {
+	PERFMON_SPM_MODE_OFF                             = 0x0,
+	PERFMON_SPM_MODE_16BIT_CLAMP                     = 0x1,
+	PERFMON_SPM_MODE_16BIT_NO_CLAMP                  = 0x2,
+	PERFMON_SPM_MODE_32BIT_CLAMP                     = 0x3,
+	PERFMON_SPM_MODE_32BIT_NO_CLAMP                  = 0x4,
+	PERFMON_SPM_MODE_RESERVED_5                      = 0x5,
+	PERFMON_SPM_MODE_RESERVED_6                      = 0x6,
+	PERFMON_SPM_MODE_RESERVED_7                      = 0x7,
+	PERFMON_SPM_MODE_TEST_MODE_0                     = 0x8,
+	PERFMON_SPM_MODE_TEST_MODE_1                     = 0x9,
+	PERFMON_SPM_MODE_TEST_MODE_2                     = 0xa,
+} PERFMON_SPM_MODE;
+typedef enum SurfaceTiling {
+	ARRAY_LINEAR                                     = 0x0,
+	ARRAY_TILED                                      = 0x1,
+} SurfaceTiling;
+typedef enum SurfaceArray {
+	ARRAY_1D                                         = 0x0,
+	ARRAY_2D                                         = 0x1,
+	ARRAY_3D                                         = 0x2,
+	ARRAY_3D_SLICE                                   = 0x3,
+} SurfaceArray;
+typedef enum ColorArray {
+	ARRAY_2D_ALT_COLOR                               = 0x0,
+	ARRAY_2D_COLOR                                   = 0x1,
+	ARRAY_3D_SLICE_COLOR                             = 0x3,
+} ColorArray;
+typedef enum DepthArray {
+	ARRAY_2D_ALT_DEPTH                               = 0x0,
+	ARRAY_2D_DEPTH                                   = 0x1,
+} DepthArray;
+typedef enum ENUM_NUM_SIMD_PER_CU {
+	NUM_SIMD_PER_CU                                  = 0x4,
+} ENUM_NUM_SIMD_PER_CU;
+typedef enum MEM_PWR_FORCE_CTRL {
+	NO_FORCE_REQUEST                                 = 0x0,
+	FORCE_LIGHT_SLEEP_REQUEST                        = 0x1,
+	FORCE_DEEP_SLEEP_REQUEST                         = 0x2,
+	FORCE_SHUT_DOWN_REQUEST                          = 0x3,
+} MEM_PWR_FORCE_CTRL;
+typedef enum MEM_PWR_FORCE_CTRL2 {
+	NO_FORCE_REQ                                     = 0x0,
+	FORCE_LIGHT_SLEEP_REQ                            = 0x1,
+} MEM_PWR_FORCE_CTRL2;
+typedef enum MEM_PWR_DIS_CTRL {
+	ENABLE_MEM_PWR_CTRL                              = 0x0,
+	DISABLE_MEM_PWR_CTRL                             = 0x1,
+} MEM_PWR_DIS_CTRL;
+typedef enum MEM_PWR_SEL_CTRL {
+	DYNAMIC_SHUT_DOWN_ENABLE                         = 0x0,
+	DYNAMIC_DEEP_SLEEP_ENABLE                        = 0x1,
+	DYNAMIC_LIGHT_SLEEP_ENABLE                       = 0x2,
+} MEM_PWR_SEL_CTRL;
+typedef enum MEM_PWR_SEL_CTRL2 {
+	DYNAMIC_DEEP_SLEEP_EN                            = 0x0,
+	DYNAMIC_LIGHT_SLEEP_EN                           = 0x1,
+} MEM_PWR_SEL_CTRL2;
+typedef enum HPD_INT_CONTROL_ACK {
+	HPD_INT_CONTROL_ACK_0                            = 0x0,
+	HPD_INT_CONTROL_ACK_1                            = 0x1,
+} HPD_INT_CONTROL_ACK;
+typedef enum HPD_INT_CONTROL_POLARITY {
+	HPD_INT_CONTROL_GEN_INT_ON_DISCON                = 0x0,
+	HPD_INT_CONTROL_GEN_INT_ON_CON                   = 0x1,
+} HPD_INT_CONTROL_POLARITY;
+typedef enum HPD_INT_CONTROL_RX_INT_ACK {
+	HPD_INT_CONTROL_RX_INT_ACK_0                     = 0x0,
+	HPD_INT_CONTROL_RX_INT_ACK_1                     = 0x1,
+} HPD_INT_CONTROL_RX_INT_ACK;
+typedef enum DPDBG_EN {
+	DPDBG_DISABLE                                    = 0x0,
+	DPDBG_ENABLE                                     = 0x1,
+} DPDBG_EN;
+typedef enum DPDBG_INPUT_EN {
+	DPDBG_INPUT_DISABLE                              = 0x0,
+	DPDBG_INPUT_ENABLE                               = 0x1,
+} DPDBG_INPUT_EN;
+typedef enum DPDBG_ERROR_DETECTION_MODE {
+	DPDBG_ERROR_DETECTION_MODE_CSC                   = 0x0,
+	DPDBG_ERROR_DETECTION_MODE_RS_ENCODING           = 0x1,
+} DPDBG_ERROR_DETECTION_MODE;
+typedef enum DPDBG_FIFO_OVERFLOW_INTERRUPT_MASK {
+	DPDBG_FIFO_OVERFLOW_INT_DISABLE                  = 0x0,
+	DPDBG_FIFO_OVERFLOW_INT_ENABLE                   = 0x1,
+} DPDBG_FIFO_OVERFLOW_INTERRUPT_MASK;
+typedef enum DPDBG_FIFO_OVERFLOW_INTERRUPT_TYPE {
+	DPDBG_FIFO_OVERFLOW_INT_LEVEL_BASED              = 0x0,
+	DPDBG_FIFO_OVERFLOW_INT_PULSE_BASED              = 0x1,
+} DPDBG_FIFO_OVERFLOW_INTERRUPT_TYPE;
+typedef enum DPDBG_FIFO_OVERFLOW_INTERRUPT_ACK {
+	DPDBG_FIFO_OVERFLOW_INT_NO_ACK                   = 0x0,
+	DPDBG_FIFO_OVERFLOW_INT_CLEAR                    = 0x1,
+} DPDBG_FIFO_OVERFLOW_INTERRUPT_ACK;
+typedef enum PM_ASSERT_RESET {
+	PM_ASSERT_RESET_0                                = 0x0,
+	PM_ASSERT_RESET_1                                = 0x1,
+} PM_ASSERT_RESET;
+typedef enum DAC_MUX_SELECT {
+	DAC_MUX_SELECT_DACA                              = 0x0,
+	DAC_MUX_SELECT_DACB                              = 0x1,
+} DAC_MUX_SELECT;
+typedef enum TMDS_DVO_MUX_SELECT {
+	TMDS_DVO_MUX_SELECT_B                            = 0x0,
+	TMDS_DVO_MUX_SELECT_G                            = 0x1,
+	TMDS_DVO_MUX_SELECT_R                            = 0x2,
+	TMDS_DVO_MUX_SELECT_RESERVED                     = 0x3,
+} TMDS_DVO_MUX_SELECT;
+typedef enum DACA_SOFT_RESET {
+	DACA_SOFT_RESET_0                                = 0x0,
+	DACA_SOFT_RESET_1                                = 0x1,
+} DACA_SOFT_RESET;
+typedef enum I2S0_SPDIF0_SOFT_RESET {
+	I2S0_SPDIF0_SOFT_RESET_0                         = 0x0,
+	I2S0_SPDIF0_SOFT_RESET_1                         = 0x1,
+} I2S0_SPDIF0_SOFT_RESET;
+typedef enum I2S1_SOFT_RESET {
+	I2S1_SOFT_RESET_0                                = 0x0,
+	I2S1_SOFT_RESET_1                                = 0x1,
+} I2S1_SOFT_RESET;
+typedef enum SPDIF1_SOFT_RESET {
+	SPDIF1_SOFT_RESET_0                              = 0x0,
+	SPDIF1_SOFT_RESET_1                              = 0x1,
+} SPDIF1_SOFT_RESET;
+typedef enum DB_CLK_SOFT_RESET {
+	DB_CLK_SOFT_RESET_0                              = 0x0,
+	DB_CLK_SOFT_RESET_1                              = 0x1,
+} DB_CLK_SOFT_RESET;
+typedef enum FMT0_SOFT_RESET {
+	FMT0_SOFT_RESET_0                                = 0x0,
+	FMT0_SOFT_RESET_1                                = 0x1,
+} FMT0_SOFT_RESET;
+typedef enum FMT1_SOFT_RESET {
+	FMT1_SOFT_RESET_0                                = 0x0,
+	FMT1_SOFT_RESET_1                                = 0x1,
+} FMT1_SOFT_RESET;
+typedef enum FMT2_SOFT_RESET {
+	FMT2_SOFT_RESET_0                                = 0x0,
+	FMT2_SOFT_RESET_1                                = 0x1,
+} FMT2_SOFT_RESET;
+typedef enum FMT3_SOFT_RESET {
+	FMT3_SOFT_RESET_0                                = 0x0,
+	FMT3_SOFT_RESET_1                                = 0x1,
+} FMT3_SOFT_RESET;
+typedef enum FMT4_SOFT_RESET {
+	FMT4_SOFT_RESET_0                                = 0x0,
+	FMT4_SOFT_RESET_1                                = 0x1,
+} FMT4_SOFT_RESET;
+typedef enum FMT5_SOFT_RESET {
+	FMT5_SOFT_RESET_0                                = 0x0,
+	FMT5_SOFT_RESET_1                                = 0x1,
+} FMT5_SOFT_RESET;
+typedef enum MVP_SOFT_RESET {
+	MVP_SOFT_RESET_0                                 = 0x0,
+	MVP_SOFT_RESET_1                                 = 0x1,
+} MVP_SOFT_RESET;
+typedef enum ABM_SOFT_RESET {
+	ABM_SOFT_RESET_0                                 = 0x0,
+	ABM_SOFT_RESET_1                                 = 0x1,
+} ABM_SOFT_RESET;
+typedef enum DVO_SOFT_RESET {
+	DVO_SOFT_RESET_0                                 = 0x0,
+	DVO_SOFT_RESET_1                                 = 0x1,
+} DVO_SOFT_RESET;
+typedef enum DIGA_FE_SOFT_RESET {
+	DIGA_FE_SOFT_RESET_0                             = 0x0,
+	DIGA_FE_SOFT_RESET_1                             = 0x1,
+} DIGA_FE_SOFT_RESET;
+typedef enum DIGA_BE_SOFT_RESET {
+	DIGA_BE_SOFT_RESET_0                             = 0x0,
+	DIGA_BE_SOFT_RESET_1                             = 0x1,
+} DIGA_BE_SOFT_RESET;
+typedef enum DIGB_FE_SOFT_RESET {
+	DIGB_FE_SOFT_RESET_0                             = 0x0,
+	DIGB_FE_SOFT_RESET_1                             = 0x1,
+} DIGB_FE_SOFT_RESET;
+typedef enum DIGB_BE_SOFT_RESET {
+	DIGB_BE_SOFT_RESET_0                             = 0x0,
+	DIGB_BE_SOFT_RESET_1                             = 0x1,
+} DIGB_BE_SOFT_RESET;
+typedef enum DIGC_FE_SOFT_RESET {
+	DIGC_FE_SOFT_RESET_0                             = 0x0,
+	DIGC_FE_SOFT_RESET_1                             = 0x1,
+} DIGC_FE_SOFT_RESET;
+typedef enum DIGC_BE_SOFT_RESET {
+	DIGC_BE_SOFT_RESET_0                             = 0x0,
+	DIGC_BE_SOFT_RESET_1                             = 0x1,
+} DIGC_BE_SOFT_RESET;
+typedef enum DIGD_FE_SOFT_RESET {
+	DIGD_FE_SOFT_RESET_0                             = 0x0,
+	DIGD_FE_SOFT_RESET_1                             = 0x1,
+} DIGD_FE_SOFT_RESET;
+typedef enum DIGD_BE_SOFT_RESET {
+	DIGD_BE_SOFT_RESET_0                             = 0x0,
+	DIGD_BE_SOFT_RESET_1                             = 0x1,
+} DIGD_BE_SOFT_RESET;
+typedef enum DIGE_FE_SOFT_RESET {
+	DIGE_FE_SOFT_RESET_0                             = 0x0,
+	DIGE_FE_SOFT_RESET_1                             = 0x1,
+} DIGE_FE_SOFT_RESET;
+typedef enum DIGE_BE_SOFT_RESET {
+	DIGE_BE_SOFT_RESET_0                             = 0x0,
+	DIGE_BE_SOFT_RESET_1                             = 0x1,
+} DIGE_BE_SOFT_RESET;
+typedef enum DIGF_FE_SOFT_RESET {
+	DIGF_FE_SOFT_RESET_0                             = 0x0,
+	DIGF_FE_SOFT_RESET_1                             = 0x1,
+} DIGF_FE_SOFT_RESET;
+typedef enum DIGF_BE_SOFT_RESET {
+	DIGF_BE_SOFT_RESET_0                             = 0x0,
+	DIGF_BE_SOFT_RESET_1                             = 0x1,
+} DIGF_BE_SOFT_RESET;
+typedef enum DIGG_FE_SOFT_RESET {
+	DIGG_FE_SOFT_RESET_0                             = 0x0,
+	DIGG_FE_SOFT_RESET_1                             = 0x1,
+} DIGG_FE_SOFT_RESET;
+typedef enum DIGG_BE_SOFT_RESET {
+	DIGG_BE_SOFT_RESET_0                             = 0x0,
+	DIGG_BE_SOFT_RESET_1                             = 0x1,
+} DIGG_BE_SOFT_RESET;
+typedef enum DPDBG_SOFT_RESET {
+	DPDBG_SOFT_RESET_0                               = 0x0,
+	DPDBG_SOFT_RESET_1                               = 0x1,
+} DPDBG_SOFT_RESET;
+typedef enum DIGLPA_FE_SOFT_RESET {
+	DIGLPA_FE_SOFT_RESET_0                           = 0x0,
+	DIGLPA_FE_SOFT_RESET_1                           = 0x1,
+} DIGLPA_FE_SOFT_RESET;
+typedef enum DIGLPA_BE_SOFT_RESET {
+	DIGLPA_BE_SOFT_RESET_0                           = 0x0,
+	DIGLPA_BE_SOFT_RESET_1                           = 0x1,
+} DIGLPA_BE_SOFT_RESET;
+typedef enum DIGLPB_FE_SOFT_RESET {
+	DIGLPB_FE_SOFT_RESET_0                           = 0x0,
+	DIGLPB_FE_SOFT_RESET_1                           = 0x1,
+} DIGLPB_FE_SOFT_RESET;
+typedef enum DIGLPB_BE_SOFT_RESET {
+	DIGLPB_BE_SOFT_RESET_0                           = 0x0,
+	DIGLPB_BE_SOFT_RESET_1                           = 0x1,
+} DIGLPB_BE_SOFT_RESET;
+typedef enum GENERICA_STEREOSYNC_SEL {
+	GENERICA_STEREOSYNC_SEL_D1                       = 0x0,
+	GENERICA_STEREOSYNC_SEL_D2                       = 0x1,
+	GENERICA_STEREOSYNC_SEL_D3                       = 0x2,
+	GENERICA_STEREOSYNC_SEL_D4                       = 0x3,
+	GENERICA_STEREOSYNC_SEL_D5                       = 0x4,
+	GENERICA_STEREOSYNC_SEL_D6                       = 0x5,
+	GENERICA_STEREOSYNC_SEL_RESERVED                 = 0x6,
+} GENERICA_STEREOSYNC_SEL;
+typedef enum GENERICB_STEREOSYNC_SEL {
+	GENERICB_STEREOSYNC_SEL_D1                       = 0x0,
+	GENERICB_STEREOSYNC_SEL_D2                       = 0x1,
+	GENERICB_STEREOSYNC_SEL_D3                       = 0x2,
+	GENERICB_STEREOSYNC_SEL_D4                       = 0x3,
+	GENERICB_STEREOSYNC_SEL_D5                       = 0x4,
+	GENERICB_STEREOSYNC_SEL_D6                       = 0x5,
+	GENERICB_STEREOSYNC_SEL_RESERVED                 = 0x6,
+} GENERICB_STEREOSYNC_SEL;
+typedef enum DCO_DBG_BLOCK_SEL {
+	DCO_DBG_BLOCK_SEL_DCO                            = 0x0,
+	DCO_DBG_BLOCK_SEL_ABM                            = 0x1,
+	DCO_DBG_BLOCK_SEL_DVO                            = 0x2,
+	DCO_DBG_BLOCK_SEL_DAC                            = 0x3,
+	DCO_DBG_BLOCK_SEL_MVP                            = 0x4,
+	DCO_DBG_BLOCK_SEL_FMT0                           = 0x5,
+	DCO_DBG_BLOCK_SEL_FMT1                           = 0x6,
+	DCO_DBG_BLOCK_SEL_FMT2                           = 0x7,
+	DCO_DBG_BLOCK_SEL_FMT3                           = 0x8,
+	DCO_DBG_BLOCK_SEL_FMT4                           = 0x9,
+	DCO_DBG_BLOCK_SEL_FMT5                           = 0xa,
+	DCO_DBG_BLOCK_SEL_DIGFE_A                        = 0xb,
+	DCO_DBG_BLOCK_SEL_DIGFE_B                        = 0xc,
+	DCO_DBG_BLOCK_SEL_DIGFE_C                        = 0xd,
+	DCO_DBG_BLOCK_SEL_DIGFE_D                        = 0xe,
+	DCO_DBG_BLOCK_SEL_DIGFE_E                        = 0xf,
+	DCO_DBG_BLOCK_SEL_DIGFE_F                        = 0x10,
+	DCO_DBG_BLOCK_SEL_DIGFE_G                        = 0x11,
+	DCO_DBG_BLOCK_SEL_DIGA                           = 0x12,
+	DCO_DBG_BLOCK_SEL_DIGB                           = 0x13,
+	DCO_DBG_BLOCK_SEL_DIGC                           = 0x14,
+	DCO_DBG_BLOCK_SEL_DIGD                           = 0x15,
+	DCO_DBG_BLOCK_SEL_DIGE                           = 0x16,
+	DCO_DBG_BLOCK_SEL_DIGF                           = 0x17,
+	DCO_DBG_BLOCK_SEL_DIGG                           = 0x18,
+	DCO_DBG_BLOCK_SEL_DPFE_A                         = 0x19,
+	DCO_DBG_BLOCK_SEL_DPFE_B                         = 0x1a,
+	DCO_DBG_BLOCK_SEL_DPFE_C                         = 0x1b,
+	DCO_DBG_BLOCK_SEL_DPFE_D                         = 0x1c,
+	DCO_DBG_BLOCK_SEL_DPFE_E                         = 0x1d,
+	DCO_DBG_BLOCK_SEL_DPFE_F                         = 0x1e,
+	DCO_DBG_BLOCK_SEL_DPFE_G                         = 0x1f,
+	DCO_DBG_BLOCK_SEL_DPA                            = 0x20,
+	DCO_DBG_BLOCK_SEL_DPB                            = 0x21,
+	DCO_DBG_BLOCK_SEL_DPC                            = 0x22,
+	DCO_DBG_BLOCK_SEL_DPD                            = 0x23,
+	DCO_DBG_BLOCK_SEL_DPE                            = 0x24,
+	DCO_DBG_BLOCK_SEL_DPF                            = 0x25,
+	DCO_DBG_BLOCK_SEL_DPG                            = 0x26,
+	DCO_DBG_BLOCK_SEL_AUX0                           = 0x27,
+	DCO_DBG_BLOCK_SEL_AUX1                           = 0x28,
+	DCO_DBG_BLOCK_SEL_AUX2                           = 0x29,
+	DCO_DBG_BLOCK_SEL_AUX3                           = 0x2a,
+	DCO_DBG_BLOCK_SEL_AUX4                           = 0x2b,
+	DCO_DBG_BLOCK_SEL_AUX5                           = 0x2c,
+	DCO_DBG_BLOCK_SEL_PERFMON_DCO                    = 0x2d,
+	DCO_DBG_BLOCK_SEL_AUDIO_OUT                      = 0x2e,
+	DCO_DBG_BLOCK_SEL_DIGLPFEA                       = 0x2f,
+	DCO_DBG_BLOCK_SEL_DIGLPFEB                       = 0x30,
+	DCO_DBG_BLOCK_SEL_DIGLPA                         = 0x31,
+	DCO_DBG_BLOCK_SEL_DIGLPB                         = 0x32,
+	DCO_DBG_BLOCK_SEL_DPLPFEA                        = 0x33,
+	DCO_DBG_BLOCK_SEL_DPLPFEB                        = 0x34,
+	DCO_DBG_BLOCK_SEL_DPLPA                          = 0x35,
+	DCO_DBG_BLOCK_SEL_DPLPB                          = 0x36,
+} DCO_DBG_BLOCK_SEL;
+typedef enum DCO_DBG_CLOCK_SEL {
+	DCO_DBG_CLOCK_SEL_DISPCLK                        = 0x0,
+	DCO_DBG_CLOCK_SEL_SCLK                           = 0x1,
+	DCO_DBG_CLOCK_SEL_MVPCLK                         = 0x2,
+	DCO_DBG_CLOCK_SEL_DVOCLK                         = 0x3,
+	DCO_DBG_CLOCK_SEL_DACCLK                         = 0x4,
+	DCO_DBG_CLOCK_SEL_REFCLK                         = 0x5,
+	DCO_DBG_CLOCK_SEL_SYMCLKA                        = 0x6,
+	DCO_DBG_CLOCK_SEL_SYMCLKB                        = 0x7,
+	DCO_DBG_CLOCK_SEL_SYMCLKC                        = 0x8,
+	DCO_DBG_CLOCK_SEL_SYMCLKD                        = 0x9,
+	DCO_DBG_CLOCK_SEL_SYMCLKE                        = 0xa,
+	DCO_DBG_CLOCK_SEL_SYMCLKF                        = 0xb,
+	DCO_DBG_CLOCK_SEL_SYMCLKG                        = 0xc,
+	DCO_DBG_CLOCK_SEL_RESERVED                       = 0xd,
+	DCO_DBG_CLOCK_SEL_AM0CLK                         = 0xe,
+	DCO_DBG_CLOCK_SEL_AM1CLK                         = 0xf,
+	DCO_DBG_CLOCK_SEL_AM2CLK                         = 0x10,
+	DCO_DBG_CLOCK_SEL_SYMCLKLPA                      = 0x11,
+	DCO_DBG_CLOCK_SEL_SYMCLKLPB                      = 0x12,
+} DCO_DBG_CLOCK_SEL;
+typedef enum DCO_HDMI_RXSTATUS_TIMER_CONTROL_DCO_HDMI_RXSTATUS_TIMER_TYPE {
+	DCO_HDMI_RXSTATUS_TIMER_TYPE_LEVEL               = 0x0,
+	DCO_HDMI_RXSTATUS_TIMER_TYPE_PULSE               = 0x1,
+} DCO_HDMI_RXSTATUS_TIMER_CONTROL_DCO_HDMI_RXSTATUS_TIMER_TYPE;
+typedef enum FMT420_MEMORY_SOURCE_SEL {
+	FMT420_MEMORY_SOURCE_SEL_FMT0                    = 0x0,
+	FMT420_MEMORY_SOURCE_SEL_FMT1                    = 0x1,
+	FMT420_MEMORY_SOURCE_SEL_FMT2                    = 0x2,
+	FMT420_MEMORY_SOURCE_SEL_FMT3                    = 0x3,
+	FMT420_MEMORY_SOURCE_SEL_FMT4                    = 0x4,
+	FMT420_MEMORY_SOURCE_SEL_FMT5                    = 0x5,
+	FMT420_MEMORY_SOURCE_SEL_FMT_RESERVED            = 0x6,
+} FMT420_MEMORY_SOURCE_SEL;
+typedef enum DOUT_I2C_CONTROL_GO {
+	DOUT_I2C_CONTROL_STOP_TRANSFER                   = 0x0,
+	DOUT_I2C_CONTROL_START_TRANSFER                  = 0x1,
+} DOUT_I2C_CONTROL_GO;
+typedef enum DOUT_I2C_CONTROL_SOFT_RESET {
+	DOUT_I2C_CONTROL_NOT_RESET_I2C_CONTROLLER        = 0x0,
+	DOUT_I2C_CONTROL_RESET_I2C_CONTROLLER            = 0x1,
+} DOUT_I2C_CONTROL_SOFT_RESET;
+typedef enum DOUT_I2C_CONTROL_SEND_RESET {
+	DOUT_I2C_CONTROL__NOT_SEND_RESET                 = 0x0,
+	DOUT_I2C_CONTROL__SEND_RESET                     = 0x1,
+} DOUT_I2C_CONTROL_SEND_RESET;
+typedef enum DOUT_I2C_CONTROL_SW_STATUS_RESET {
+	DOUT_I2C_CONTROL_NOT_RESET_SW_STATUS             = 0x0,
+	DOUT_I2C_CONTROL_RESET_SW_STATUS                 = 0x1,
+} DOUT_I2C_CONTROL_SW_STATUS_RESET;
+typedef enum DOUT_I2C_CONTROL_DDC_SELECT {
+	DOUT_I2C_CONTROL_SELECT_DDC1                     = 0x0,
+	DOUT_I2C_CONTROL_SELECT_DDC2                     = 0x1,
+	DOUT_I2C_CONTROL_SELECT_DDC3                     = 0x2,
+	DOUT_I2C_CONTROL_SELECT_DDC4                     = 0x3,
+	DOUT_I2C_CONTROL_SELECT_DDC5                     = 0x4,
+	DOUT_I2C_CONTROL_SELECT_DDC6                     = 0x5,
+	DOUT_I2C_CONTROL_SELECT_DDCVGA                   = 0x6,
+} DOUT_I2C_CONTROL_DDC_SELECT;
+typedef enum DOUT_I2C_CONTROL_TRANSACTION_COUNT {
+	DOUT_I2C_CONTROL_TRANS0                          = 0x0,
+	DOUT_I2C_CONTROL_TRANS0_TRANS1                   = 0x1,
+	DOUT_I2C_CONTROL_TRANS0_TRANS1_TRANS2            = 0x2,
+	DOUT_I2C_CONTROL_TRANS0_TRANS1_TRANS2_TRANS3     = 0x3,
+} DOUT_I2C_CONTROL_TRANSACTION_COUNT;
+typedef enum DOUT_I2C_CONTROL_DBG_REF_SEL {
+	DOUT_I2C_CONTROL_NORMAL_DEBUG                    = 0x0,
+	DOUT_I2C_CONTROL_FAST_REFERENCE_DEBUG            = 0x1,
+} DOUT_I2C_CONTROL_DBG_REF_SEL;
+typedef enum DOUT_I2C_ARBITRATION_SW_PRIORITY {
+	DOUT_I2C_ARBITRATION_SW_PRIORITY_NORMAL          = 0x0,
+	DOUT_I2C_ARBITRATION_SW_PRIORITY_HIGH            = 0x1,
+	DOUT_I2C_ARBITRATION_SW_PRIORITY_0_RESERVED      = 0x2,
+	DOUT_I2C_ARBITRATION_SW_PRIORITY_1_RESERVED      = 0x3,
+} DOUT_I2C_ARBITRATION_SW_PRIORITY;
+typedef enum DOUT_I2C_ARBITRATION_NO_QUEUED_SW_GO {
+	DOUT_I2C_ARBITRATION_SW_QUEUE_ENABLED            = 0x0,
+	DOUT_I2C_ARBITRATION_SW_QUEUE_DISABLED           = 0x1,
+} DOUT_I2C_ARBITRATION_NO_QUEUED_SW_GO;
+typedef enum DOUT_I2C_ARBITRATION_ABORT_XFER {
+	DOUT_I2C_ARBITRATION_NOT_ABORT_CURRENT_TRANSFER  = 0x0,
+	DOUT_I2C_ARBITRATION_ABORT_CURRENT_TRANSFER      = 0x1,
+} DOUT_I2C_ARBITRATION_ABORT_XFER;
+typedef enum DOUT_I2C_ARBITRATION_USE_I2C_REG_REQ {
+	DOUT_I2C_ARBITRATION__NOT_USE_I2C_REG_REQ        = 0x0,
+	DOUT_I2C_ARBITRATION__USE_I2C_REG_REQ            = 0x1,
+} DOUT_I2C_ARBITRATION_USE_I2C_REG_REQ;
+typedef enum DOUT_I2C_ARBITRATION_DONE_USING_I2C_REG {
+	DOUT_I2C_ARBITRATION_DONE__NOT_USING_I2C_REG     = 0x0,
+	DOUT_I2C_ARBITRATION_DONE__USING_I2C_REG         = 0x1,
+} DOUT_I2C_ARBITRATION_DONE_USING_I2C_REG;
+typedef enum DOUT_I2C_ACK {
+	DOUT_I2C_NO_ACK                                  = 0x0,
+	DOUT_I2C_ACK_TO_CLEAN                            = 0x1,
+} DOUT_I2C_ACK;
+typedef enum DOUT_I2C_DDC_SPEED_THRESHOLD {
+	DOUT_I2C_DDC_SPEED_THRESHOLD_BIG_THAN_ZERO       = 0x0,
+	DOUT_I2C_DDC_SPEED_THRESHOLD_QUATER_OF_TOTAL_SAMPLE= 0x1,
+	DOUT_I2C_DDC_SPEED_THRESHOLD_HALF_OF_TOTAL_SAMPLE= 0x2,
+	DOUT_I2C_DDC_SPEED_THRESHOLD_THREE_QUATERS_OF_TOTAL_SAMPLE= 0x3,
+} DOUT_I2C_DDC_SPEED_THRESHOLD;
+typedef enum DOUT_I2C_DDC_SETUP_DATA_DRIVE_EN {
+	DOUT_I2C_DDC_SETUP_DATA_DRIVE_BY_EXTERNAL_RESISTOR= 0x0,
+	DOUT_I2C_DDC_SETUP_I2C_PAD_DRIVE_SDA             = 0x1,
+} DOUT_I2C_DDC_SETUP_DATA_DRIVE_EN;
+typedef enum DOUT_I2C_DDC_SETUP_DATA_DRIVE_SEL {
+	DOUT_I2C_DDC_SETUP_DATA_DRIVE_FOR_10MCLKS        = 0x0,
+	DOUT_I2C_DDC_SETUP_DATA_DRIVE_FOR_20MCLKS        = 0x1,
+} DOUT_I2C_DDC_SETUP_DATA_DRIVE_SEL;
+typedef enum DOUT_I2C_DDC_SETUP_EDID_DETECT_MODE {
+	DOUT_I2C_DDC_SETUP_EDID_DETECT_CONNECT           = 0x0,
+	DOUT_I2C_DDC_SETUP_EDID_DETECT_DISCONNECT        = 0x1,
+} DOUT_I2C_DDC_SETUP_EDID_DETECT_MODE;
+typedef enum DOUT_I2C_DDC_SETUP_CLK_DRIVE_EN {
+	DOUT_I2C_DDC_SETUP_CLK_DRIVE_BY_EXTERNAL_RESISTOR= 0x0,
+	DOUT_I2C_DDC_SETUP_I2C_PAD_DRIVE_SCL             = 0x1,
+} DOUT_I2C_DDC_SETUP_CLK_DRIVE_EN;
+typedef enum DOUT_I2C_TRANSACTION_STOP_ON_NACK {
+	DOUT_I2C_TRANSACTION_STOP_CURRENT_TRANS          = 0x0,
+	DOUT_I2C_TRANSACTION_STOP_ALL_TRANS              = 0x1,
+} DOUT_I2C_TRANSACTION_STOP_ON_NACK;
+typedef enum DOUT_I2C_DATA_INDEX_WRITE {
+	DOUT_I2C_DATA__NOT_INDEX_WRITE                   = 0x0,
+	DOUT_I2C_DATA__INDEX_WRITE                       = 0x1,
+} DOUT_I2C_DATA_INDEX_WRITE;
+typedef enum DOUT_I2C_EDID_DETECT_CTRL_SEND_RESET {
+	DOUT_I2C_EDID_NOT_SEND_RESET_BEFORE_EDID_READ_TRACTION= 0x0,
+	DOUT_I2C_EDID_SEND_RESET_BEFORE_EDID_READ_TRACTION= 0x1,
+} DOUT_I2C_EDID_DETECT_CTRL_SEND_RESET;
+typedef enum DOUT_I2C_READ_REQUEST_INTERRUPT_TYPE {
+	DOUT_I2C_READ_REQUEST_INTERRUPT_TYPE__LEVEL      = 0x0,
+	DOUT_I2C_READ_REQUEST_INTERRUPT_TYPE__PULSE      = 0x1,
+} DOUT_I2C_READ_REQUEST_INTERRUPT_TYPE;
+typedef enum BLNDV_CONTROL_BLND_MODE {
+	BLNDV_CONTROL_BLND_MODE_CURRENT_PIPE_ONLY        = 0x0,
+	BLNDV_CONTROL_BLND_MODE_OTHER_PIPE_ONLY          = 0x1,
+	BLNDV_CONTROL_BLND_MODE_ALPHA_BLENDING_MODE      = 0x2,
+	BLNDV_CONTROL_BLND_MODE_OTHER_STEREO_TYPE        = 0x3,
+} BLNDV_CONTROL_BLND_MODE;
+typedef enum BLNDV_CONTROL_BLND_STEREO_TYPE {
+	BLNDV_CONTROL_BLND_STEREO_TYPE_NON_SINGLE_PIPE_STEREO= 0x0,
+	BLNDV_CONTROL_BLND_STEREO_TYPE_SIDE_BY_SIDE_SINGLE_PIPE_STEREO= 0x1,
+	BLNDV_CONTROL_BLND_STEREO_TYPE_TOP_BOTTOM_SINGLE_PIPE_STEREO= 0x2,
+	BLNDV_CONTROL_BLND_STEREO_TYPE_UNUSED            = 0x3,
+} BLNDV_CONTROL_BLND_STEREO_TYPE;
+typedef enum BLNDV_CONTROL_BLND_STEREO_POLARITY {
+	BLNDV_CONTROL_BLND_STEREO_POLARITY_LOW           = 0x0,
+	BLNDV_CONTROL_BLND_STEREO_POLARITY_HIGH          = 0x1,
+} BLNDV_CONTROL_BLND_STEREO_POLARITY;
+typedef enum BLNDV_CONTROL_BLND_FEEDTHROUGH_EN {
+	BLNDV_CONTROL_BLND_FEEDTHROUGH_EN_FALSE          = 0x0,
+	BLNDV_CONTROL_BLND_FEEDTHROUGH_EN_TRUE           = 0x1,
+} BLNDV_CONTROL_BLND_FEEDTHROUGH_EN;
+typedef enum BLNDV_CONTROL_BLND_ALPHA_MODE {
+	BLNDV_CONTROL_BLND_ALPHA_MODE_CURRENT_PIXEL_ALPHA= 0x0,
+	BLNDV_CONTROL_BLND_ALPHA_MODE_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN= 0x1,
+	BLNDV_CONTROL_BLND_ALPHA_MODE_GLOBAL_ALPHA_ONLY  = 0x2,
+	BLNDV_CONTROL_BLND_ALPHA_MODE_UNUSED             = 0x3,
+} BLNDV_CONTROL_BLND_ALPHA_MODE;
+typedef enum BLNDV_CONTROL_BLND_ACTIVE_OVERLAP_ONLY {
+	BLNDV_CONTROL_BLND_ACTIVE_OVERLAP_ONLY_FALSE     = 0x0,
+	BLNDV_CONTROL_BLND_ACTIVE_OVERLAP_ONLY_TRUE      = 0x1,
+} BLNDV_CONTROL_BLND_ACTIVE_OVERLAP_ONLY;
+typedef enum BLNDV_CONTROL_BLND_MULTIPLIED_MODE {
+	BLNDV_CONTROL_BLND_MULTIPLIED_MODE_FALSE         = 0x0,
+	BLNDV_CONTROL_BLND_MULTIPLIED_MODE_TRUE          = 0x1,
+} BLNDV_CONTROL_BLND_MULTIPLIED_MODE;
+typedef enum BLNDV_SM_CONTROL2_SM_MODE {
+	BLNDV_SM_CONTROL2_SM_MODE_SINGLE_PLANE           = 0x0,
+	BLNDV_SM_CONTROL2_SM_MODE_ROW_SUBSAMPLING        = 0x2,
+	BLNDV_SM_CONTROL2_SM_MODE_COLUMN_SUBSAMPLING     = 0x4,
+	BLNDV_SM_CONTROL2_SM_MODE_CHECKERBOARD_SUBSAMPLING= 0x6,
+} BLNDV_SM_CONTROL2_SM_MODE;
+typedef enum BLNDV_SM_CONTROL2_SM_FRAME_ALTERNATE {
+	BLNDV_SM_CONTROL2_SM_FRAME_ALTERNATE_FALSE       = 0x0,
+	BLNDV_SM_CONTROL2_SM_FRAME_ALTERNATE_TRUE        = 0x1,
+} BLNDV_SM_CONTROL2_SM_FRAME_ALTERNATE;
+typedef enum BLNDV_SM_CONTROL2_SM_FIELD_ALTERNATE {
+	BLNDV_SM_CONTROL2_SM_FIELD_ALTERNATE_FALSE       = 0x0,
+	BLNDV_SM_CONTROL2_SM_FIELD_ALTERNATE_TRUE        = 0x1,
+} BLNDV_SM_CONTROL2_SM_FIELD_ALTERNATE;
+typedef enum BLNDV_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL {
+	BLNDV_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_NO_FORCE= 0x0,
+	BLNDV_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_RESERVED= 0x1,
+	BLNDV_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_FORCE_LOW= 0x2,
+	BLNDV_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_FORCE_HIGH= 0x3,
+} BLNDV_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL;
+typedef enum BLNDV_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL {
+	BLNDV_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_NO_FORCE = 0x0,
+	BLNDV_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_RESERVED = 0x1,
+	BLNDV_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_FORCE_LOW= 0x2,
+	BLNDV_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_FORCE_HIGH= 0x3,
+} BLNDV_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL;
+typedef enum BLNDV_CONTROL2_PTI_ENABLE {
+	BLNDV_CONTROL2_PTI_ENABLE_FALSE                  = 0x0,
+	BLNDV_CONTROL2_PTI_ENABLE_TRUE                   = 0x1,
+} BLNDV_CONTROL2_PTI_ENABLE;
+typedef enum BLNDV_CONTROL2_BLND_SUPERAA_DEGAMMA_EN {
+	BLNDV_CONTROL2_BLND_SUPERAA_DEGAMMA_EN_FALSE     = 0x0,
+	BLNDV_CONTROL2_BLND_SUPERAA_DEGAMMA_EN_TRUE      = 0x1,
+} BLNDV_CONTROL2_BLND_SUPERAA_DEGAMMA_EN;
+typedef enum BLNDV_CONTROL2_BLND_SUPERAA_REGAMMA_EN {
+	BLNDV_CONTROL2_BLND_SUPERAA_REGAMMA_EN_FALSE     = 0x0,
+	BLNDV_CONTROL2_BLND_SUPERAA_REGAMMA_EN_TRUE      = 0x1,
+} BLNDV_CONTROL2_BLND_SUPERAA_REGAMMA_EN;
+typedef enum BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK {
+	BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK_FALSE= 0x0,
+	BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK_TRUE= 0x1,
+} BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK;
+typedef enum BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK {
+	BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK_FALSE= 0x0,
+	BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK_TRUE= 0x1,
+} BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK;
+typedef enum BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK {
+	BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK_FALSE= 0x0,
+	BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK_TRUE= 0x1,
+} BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK;
+typedef enum BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK {
+	BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK_FALSE= 0x0,
+	BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK_TRUE= 0x1,
+} BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK;
+typedef enum BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK {
+	BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK_FALSE= 0x0,
+	BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK_TRUE= 0x1,
+} BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK;
+typedef enum BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK {
+	BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK_FALSE= 0x0,
+	BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK_TRUE= 0x1,
+} BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK;
+typedef enum BLNDV_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK {
+	BLNDV_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK_FALSE = 0x0,
+	BLNDV_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK_TRUE  = 0x1,
+} BLNDV_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK;
+typedef enum BLNDV_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK {
+	BLNDV_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK_FALSE= 0x0,
+	BLNDV_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK_TRUE = 0x1,
+} BLNDV_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK;
+typedef enum BLNDV_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE {
+	BLNDV_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE_FALSE= 0x0,
+	BLNDV_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE_TRUE = 0x1,
+} BLNDV_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE;
+typedef enum BLNDV_DEBUG_BLND_CNV_MUX_SELECT {
+	BLNDV_DEBUG_BLND_CNV_MUX_SELECT_LOW              = 0x0,
+	BLNDV_DEBUG_BLND_CNV_MUX_SELECT_HIGH             = 0x1,
+} BLNDV_DEBUG_BLND_CNV_MUX_SELECT;
+typedef enum BLNDV_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN {
+	BLNDV_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN_FALSE= 0x0,
+	BLNDV_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN_TRUE= 0x1,
+} BLNDV_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN;
+typedef enum DPCSTX_DBG_CFGCLK_SEL {
+	DPCSTX_DBG_CFGCLK_SEL_DC_DPCS_INF                = 0x0,
+	DPCSTX_DBG_CFGCLK_SEL_DPCS_BPHY_INF              = 0x1,
+	DPCSTX_DBG_CFGCLK_SEL_CBUS_SLAVE                 = 0x2,
+	DPCSTX_DBG_CFGCLK_SEL_CBUS_MASTER                = 0x3,
+} DPCSTX_DBG_CFGCLK_SEL;
+typedef enum DPCSTX_TX_SYMCLK_SEL {
+	DPCSTX_DBG_TX_SYMCLK_SEL_IN0                     = 0x0,
+	DPCSTX_DBG_TX_SYMCLK_SEL_IN1                     = 0x1,
+	DPCSTX_DBG_TX_SYMCLK_SEL_FIFO_WR                 = 0x2,
+} DPCSTX_TX_SYMCLK_SEL;
+typedef enum DPCSTX_TX_SYMCLK_DIV2_SEL {
+	DPCSTX_DBG_TX_SYMCLK_DIV2_SEL_OUT0               = 0x0,
+	DPCSTX_DBG_TX_SYMCLK_DIV2_SEL_OUT1               = 0x1,
+	DPCSTX_DBG_TX_SYMCLK_DIV2_SEL_OUT2               = 0x2,
+	DPCSTX_DBG_TX_SYMCLK_DIV2_SEL_OUT3               = 0x3,
+	DPCSTX_DBG_TX_SYMCLK_DIV2_SEL_FIFO_RD            = 0x4,
+	DPCSTX_DBG_TX_SYMCLK_DIV2_SEL_INT                = 0x5,
+} DPCSTX_TX_SYMCLK_DIV2_SEL;
+typedef enum DPCSTX_DBG_CLOCK_SEL {
+	DPCSTX_DBG_CLOCK_SEL_DC_CFGCLK                   = 0x0,
+	DPCSTX_DBG_CLOCK_SEL_PHY_CFGCLK                  = 0x1,
+	DPCSTX_DBG_CLOCK_SEL_TXSYMCLK                    = 0x2,
+} DPCSTX_DBG_CLOCK_SEL;
+typedef enum DPCSTX_DVI_LINK_MODE {
+	DPCSTX_DVI_LINK_MODE_NORMAL                      = 0x0,
+	DPCSTX_DVI_LINK_MODE_DUAL_LINK_MASTER            = 0x1,
+	DPCSTX_DVI_LINK_MODE_DUAL_LINK_SLAVER            = 0x2,
+} DPCSTX_DVI_LINK_MODE;
+
+#endif /* DCE_11_2_ENUM_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
new file mode 100755
index 0000000..1ddc418
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
@@ -0,0 +1,18687 @@
+/*
+ * DCE_11_2 Register documentation
+ *
+ * Copyright (C) 2016  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DCE_11_2_SH_MASK_H
+#define DCE_11_2_SH_MASK_H
+
+#define PIPE0_PG_CONFIG__PIPE0_POWER_FORCEON_MASK 0x1
+#define PIPE0_PG_CONFIG__PIPE0_POWER_FORCEON__SHIFT 0x0
+#define PIPE0_PG_ENABLE__PIPE0_POWER_GATE_MASK 0x1
+#define PIPE0_PG_ENABLE__PIPE0_POWER_GATE__SHIFT 0x0
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_READ_DATA_MASK 0xffffff
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_READ_DATA__SHIFT 0x0
+#define PIPE0_PG_STATUS__PIPE0_DEBUG_PWR_STATUS_MASK 0x3000000
+#define PIPE0_PG_STATUS__PIPE0_DEBUG_PWR_STATUS__SHIFT 0x18
+#define PIPE0_PG_STATUS__PIPE0_DESIRED_PWR_STATE_MASK 0x10000000
+#define PIPE0_PG_STATUS__PIPE0_DESIRED_PWR_STATE__SHIFT 0x1c
+#define PIPE0_PG_STATUS__PIPE0_REQUESTED_PWR_STATE_MASK 0x20000000
+#define PIPE0_PG_STATUS__PIPE0_REQUESTED_PWR_STATE__SHIFT 0x1d
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_PWR_STATUS_MASK 0xc0000000
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define PIPE1_PG_CONFIG__PIPE1_POWER_FORCEON_MASK 0x1
+#define PIPE1_PG_CONFIG__PIPE1_POWER_FORCEON__SHIFT 0x0
+#define PIPE1_PG_ENABLE__PIPE1_POWER_GATE_MASK 0x1
+#define PIPE1_PG_ENABLE__PIPE1_POWER_GATE__SHIFT 0x0
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_READ_DATA_MASK 0xffffff
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_READ_DATA__SHIFT 0x0
+#define PIPE1_PG_STATUS__PIPE1_DEBUG_PWR_STATUS_MASK 0x3000000
+#define PIPE1_PG_STATUS__PIPE1_DEBUG_PWR_STATUS__SHIFT 0x18
+#define PIPE1_PG_STATUS__PIPE1_DESIRED_PWR_STATE_MASK 0x10000000
+#define PIPE1_PG_STATUS__PIPE1_DESIRED_PWR_STATE__SHIFT 0x1c
+#define PIPE1_PG_STATUS__PIPE1_REQUESTED_PWR_STATE_MASK 0x20000000
+#define PIPE1_PG_STATUS__PIPE1_REQUESTED_PWR_STATE__SHIFT 0x1d
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_PWR_STATUS_MASK 0xc0000000
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define PIPE2_PG_CONFIG__PIPE2_POWER_FORCEON_MASK 0x1
+#define PIPE2_PG_CONFIG__PIPE2_POWER_FORCEON__SHIFT 0x0
+#define PIPE2_PG_ENABLE__PIPE2_POWER_GATE_MASK 0x1
+#define PIPE2_PG_ENABLE__PIPE2_POWER_GATE__SHIFT 0x0
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_READ_DATA_MASK 0xffffff
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_READ_DATA__SHIFT 0x0
+#define PIPE2_PG_STATUS__PIPE2_DEBUG_PWR_STATUS_MASK 0x3000000
+#define PIPE2_PG_STATUS__PIPE2_DEBUG_PWR_STATUS__SHIFT 0x18
+#define PIPE2_PG_STATUS__PIPE2_DESIRED_PWR_STATE_MASK 0x10000000
+#define PIPE2_PG_STATUS__PIPE2_DESIRED_PWR_STATE__SHIFT 0x1c
+#define PIPE2_PG_STATUS__PIPE2_REQUESTED_PWR_STATE_MASK 0x20000000
+#define PIPE2_PG_STATUS__PIPE2_REQUESTED_PWR_STATE__SHIFT 0x1d
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_PWR_STATUS_MASK 0xc0000000
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define PIPE3_PG_CONFIG__PIPE3_POWER_FORCEON_MASK 0x1
+#define PIPE3_PG_CONFIG__PIPE3_POWER_FORCEON__SHIFT 0x0
+#define PIPE3_PG_ENABLE__PIPE3_POWER_GATE_MASK 0x1
+#define PIPE3_PG_ENABLE__PIPE3_POWER_GATE__SHIFT 0x0
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_READ_DATA_MASK 0xffffff
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_READ_DATA__SHIFT 0x0
+#define PIPE3_PG_STATUS__PIPE3_DEBUG_PWR_STATUS_MASK 0x3000000
+#define PIPE3_PG_STATUS__PIPE3_DEBUG_PWR_STATUS__SHIFT 0x18
+#define PIPE3_PG_STATUS__PIPE3_DESIRED_PWR_STATE_MASK 0x10000000
+#define PIPE3_PG_STATUS__PIPE3_DESIRED_PWR_STATE__SHIFT 0x1c
+#define PIPE3_PG_STATUS__PIPE3_REQUESTED_PWR_STATE_MASK 0x20000000
+#define PIPE3_PG_STATUS__PIPE3_REQUESTED_PWR_STATE__SHIFT 0x1d
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_PWR_STATUS_MASK 0xc0000000
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define PIPE4_PG_CONFIG__PIPE4_POWER_FORCEON_MASK 0x1
+#define PIPE4_PG_CONFIG__PIPE4_POWER_FORCEON__SHIFT 0x0
+#define PIPE4_PG_ENABLE__PIPE4_POWER_GATE_MASK 0x1
+#define PIPE4_PG_ENABLE__PIPE4_POWER_GATE__SHIFT 0x0
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_READ_DATA_MASK 0xffffff
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_READ_DATA__SHIFT 0x0
+#define PIPE4_PG_STATUS__PIPE4_DEBUG_PWR_STATUS_MASK 0x3000000
+#define PIPE4_PG_STATUS__PIPE4_DEBUG_PWR_STATUS__SHIFT 0x18
+#define PIPE4_PG_STATUS__PIPE4_DESIRED_PWR_STATE_MASK 0x10000000
+#define PIPE4_PG_STATUS__PIPE4_DESIRED_PWR_STATE__SHIFT 0x1c
+#define PIPE4_PG_STATUS__PIPE4_REQUESTED_PWR_STATE_MASK 0x20000000
+#define PIPE4_PG_STATUS__PIPE4_REQUESTED_PWR_STATE__SHIFT 0x1d
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_PWR_STATUS_MASK 0xc0000000
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define PIPE5_PG_CONFIG__PIPE5_POWER_FORCEON_MASK 0x1
+#define PIPE5_PG_CONFIG__PIPE5_POWER_FORCEON__SHIFT 0x0
+#define PIPE5_PG_ENABLE__PIPE5_POWER_GATE_MASK 0x1
+#define PIPE5_PG_ENABLE__PIPE5_POWER_GATE__SHIFT 0x0
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_READ_DATA_MASK 0xffffff
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_READ_DATA__SHIFT 0x0
+#define PIPE5_PG_STATUS__PIPE5_DEBUG_PWR_STATUS_MASK 0x3000000
+#define PIPE5_PG_STATUS__PIPE5_DEBUG_PWR_STATUS__SHIFT 0x18
+#define PIPE5_PG_STATUS__PIPE5_DESIRED_PWR_STATE_MASK 0x10000000
+#define PIPE5_PG_STATUS__PIPE5_DESIRED_PWR_STATE__SHIFT 0x1c
+#define PIPE5_PG_STATUS__PIPE5_REQUESTED_PWR_STATE_MASK 0x20000000
+#define PIPE5_PG_STATUS__PIPE5_REQUESTED_PWR_STATE__SHIFT 0x1d
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_PWR_STATUS_MASK 0xc0000000
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DCPG_INTERRUPT_STATUS__DCFE0_POWER_UP_INT_OCCURRED_MASK 0x1
+#define DCPG_INTERRUPT_STATUS__DCFE0_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DCPG_INTERRUPT_STATUS__DCFE0_POWER_DOWN_INT_OCCURRED_MASK 0x2
+#define DCPG_INTERRUPT_STATUS__DCFE0_POWER_DOWN_INT_OCCURRED__SHIFT 0x1
+#define DCPG_INTERRUPT_STATUS__DCFE1_POWER_UP_INT_OCCURRED_MASK 0x4
+#define DCPG_INTERRUPT_STATUS__DCFE1_POWER_UP_INT_OCCURRED__SHIFT 0x2
+#define DCPG_INTERRUPT_STATUS__DCFE1_POWER_DOWN_INT_OCCURRED_MASK 0x8
+#define DCPG_INTERRUPT_STATUS__DCFE1_POWER_DOWN_INT_OCCURRED__SHIFT 0x3
+#define DCPG_INTERRUPT_STATUS__DCFE2_POWER_UP_INT_OCCURRED_MASK 0x10
+#define DCPG_INTERRUPT_STATUS__DCFE2_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DCPG_INTERRUPT_STATUS__DCFE2_POWER_DOWN_INT_OCCURRED_MASK 0x20
+#define DCPG_INTERRUPT_STATUS__DCFE2_POWER_DOWN_INT_OCCURRED__SHIFT 0x5
+#define DCPG_INTERRUPT_STATUS__DCFE3_POWER_UP_INT_OCCURRED_MASK 0x40
+#define DCPG_INTERRUPT_STATUS__DCFE3_POWER_UP_INT_OCCURRED__SHIFT 0x6
+#define DCPG_INTERRUPT_STATUS__DCFE3_POWER_DOWN_INT_OCCURRED_MASK 0x80
+#define DCPG_INTERRUPT_STATUS__DCFE3_POWER_DOWN_INT_OCCURRED__SHIFT 0x7
+#define DCPG_INTERRUPT_STATUS__DCFE4_POWER_UP_INT_OCCURRED_MASK 0x100
+#define DCPG_INTERRUPT_STATUS__DCFE4_POWER_UP_INT_OCCURRED__SHIFT 0x8
+#define DCPG_INTERRUPT_STATUS__DCFE4_POWER_DOWN_INT_OCCURRED_MASK 0x200
+#define DCPG_INTERRUPT_STATUS__DCFE4_POWER_DOWN_INT_OCCURRED__SHIFT 0x9
+#define DCPG_INTERRUPT_STATUS__DCFE5_POWER_UP_INT_OCCURRED_MASK 0x400
+#define DCPG_INTERRUPT_STATUS__DCFE5_POWER_UP_INT_OCCURRED__SHIFT 0xa
+#define DCPG_INTERRUPT_STATUS__DCFE5_POWER_DOWN_INT_OCCURRED_MASK 0x800
+#define DCPG_INTERRUPT_STATUS__DCFE5_POWER_DOWN_INT_OCCURRED__SHIFT 0xb
+#define DCPG_INTERRUPT_STATUS__DCFEV0_POWER_UP_INT_OCCURRED_MASK 0x1000
+#define DCPG_INTERRUPT_STATUS__DCFEV0_POWER_UP_INT_OCCURRED__SHIFT 0xc
+#define DCPG_INTERRUPT_STATUS__DCFEV0_POWER_DOWN_INT_OCCURRED_MASK 0x2000
+#define DCPG_INTERRUPT_STATUS__DCFEV0_POWER_DOWN_INT_OCCURRED__SHIFT 0xd
+#define DCPG_INTERRUPT_STATUS__DSI_POWER_UP_INT_OCCURRED_MASK 0x4000
+#define DCPG_INTERRUPT_STATUS__DSI_POWER_UP_INT_OCCURRED__SHIFT 0xe
+#define DCPG_INTERRUPT_STATUS__DSI_POWER_DOWN_INT_OCCURRED_MASK 0x8000
+#define DCPG_INTERRUPT_STATUS__DSI_POWER_DOWN_INT_OCCURRED__SHIFT 0xf
+#define DCPG_INTERRUPT_STATUS__DCFEV1_POWER_UP_INT_OCCURRED_MASK 0x10000
+#define DCPG_INTERRUPT_STATUS__DCFEV1_POWER_UP_INT_OCCURRED__SHIFT 0x10
+#define DCPG_INTERRUPT_STATUS__DCFEV1_POWER_DOWN_INT_OCCURRED_MASK 0x20000
+#define DCPG_INTERRUPT_STATUS__DCFEV1_POWER_DOWN_INT_OCCURRED__SHIFT 0x11
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_UP_INT_MASK_MASK 0x1
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_UP_INT_MASK__SHIFT 0x0
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_UP_INT_CLEAR_MASK 0x2
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_DOWN_INT_MASK_MASK 0x4
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_DOWN_INT_MASK__SHIFT 0x2
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_DOWN_INT_CLEAR_MASK 0x8
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_UP_INT_MASK_MASK 0x10
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_UP_INT_MASK__SHIFT 0x4
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_UP_INT_CLEAR_MASK 0x20
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_DOWN_INT_MASK_MASK 0x40
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_DOWN_INT_MASK__SHIFT 0x6
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_DOWN_INT_CLEAR_MASK 0x80
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_DOWN_INT_CLEAR__SHIFT 0x7
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_UP_INT_MASK_MASK 0x100
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_UP_INT_MASK__SHIFT 0x8
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_UP_INT_CLEAR_MASK 0x200
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_UP_INT_CLEAR__SHIFT 0x9
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_DOWN_INT_MASK_MASK 0x400
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_DOWN_INT_MASK__SHIFT 0xa
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_DOWN_INT_CLEAR_MASK 0x800
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_UP_INT_MASK_MASK 0x1000
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_UP_INT_MASK__SHIFT 0xc
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_UP_INT_CLEAR_MASK 0x2000
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_DOWN_INT_MASK_MASK 0x4000
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_DOWN_INT_MASK__SHIFT 0xe
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_DOWN_INT_CLEAR_MASK 0x8000
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_DOWN_INT_CLEAR__SHIFT 0xf
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_UP_INT_MASK_MASK 0x10000
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_UP_INT_MASK__SHIFT 0x10
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_UP_INT_CLEAR_MASK 0x20000
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_UP_INT_CLEAR__SHIFT 0x11
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_DOWN_INT_MASK_MASK 0x40000
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_DOWN_INT_MASK__SHIFT 0x12
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_DOWN_INT_CLEAR_MASK 0x80000
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_DOWN_INT_CLEAR__SHIFT 0x13
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_UP_INT_MASK_MASK 0x100000
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_UP_INT_MASK__SHIFT 0x14
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_UP_INT_CLEAR_MASK 0x200000
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_UP_INT_CLEAR__SHIFT 0x15
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_DOWN_INT_MASK_MASK 0x400000
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_DOWN_INT_MASK__SHIFT 0x16
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_DOWN_INT_CLEAR_MASK 0x800000
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_DOWN_INT_CLEAR__SHIFT 0x17
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_UP_INT_MASK_MASK 0x1000000
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_UP_INT_MASK__SHIFT 0x18
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_UP_INT_CLEAR_MASK 0x2000000
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_UP_INT_CLEAR__SHIFT 0x19
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_DOWN_INT_MASK_MASK 0x4000000
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_DOWN_INT_MASK__SHIFT 0x1a
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_DOWN_INT_CLEAR_MASK 0x8000000
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_DOWN_INT_CLEAR__SHIFT 0x1b
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_UP_INT_MASK_MASK 0x10000000
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_UP_INT_MASK__SHIFT 0x1c
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_UP_INT_CLEAR_MASK 0x20000000
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_UP_INT_CLEAR__SHIFT 0x1d
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_DOWN_INT_MASK_MASK 0x40000000
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_DOWN_INT_MASK__SHIFT 0x1e
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_DOWN_INT_CLEAR_MASK 0x80000000
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_DOWN_INT_CLEAR__SHIFT 0x1f
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_UP_INT_MASK_MASK 0x1000000
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_UP_INT_MASK__SHIFT 0x18
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_UP_INT_CLEAR_MASK 0x2000000
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_UP_INT_CLEAR__SHIFT 0x19
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_DOWN_INT_MASK_MASK 0x4000000
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_DOWN_INT_MASK__SHIFT 0x1a
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_DOWN_INT_CLEAR_MASK 0x8000000
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_DOWN_INT_CLEAR__SHIFT 0x1b
+#define DC_IP_REQUEST_CNTL__IP_REQUEST_EN_MASK 0x1
+#define DC_IP_REQUEST_CNTL__IP_REQUEST_EN__SHIFT 0x0
+#define DC_PGFSM_CONFIG_REG__PGFSM_CONFIG_REG_MASK 0xffffffff
+#define DC_PGFSM_CONFIG_REG__PGFSM_CONFIG_REG__SHIFT 0x0
+#define DC_PGFSM_WRITE_REG__PGFSM_WRITE_REG_MASK 0xffffffff
+#define DC_PGFSM_WRITE_REG__PGFSM_WRITE_REG__SHIFT 0x0
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_BUSY_MASK 0x1
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_BUSY__SHIFT 0x0
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_FORCE_MASK 0x2
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_FORCE__SHIFT 0x1
+#define DC_PGCNTL_STATUS_REG__IPREQ_IGNORE_STATUS_MASK 0x4
+#define DC_PGCNTL_STATUS_REG__IPREQ_IGNORE_STATUS__SHIFT 0x2
+#define DC_PGCNTL_STATUS_REG__DCPG_ECO_DEBUG_MASK 0xffff0000
+#define DC_PGCNTL_STATUS_REG__DCPG_ECO_DEBUG__SHIFT 0x10
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_INDEX_MASK 0xff
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DCPG_TEST_DEBUG_DATA__DCPG_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DCPG_TEST_DEBUG_DATA__DCPG_TEST_DEBUG_DATA__SHIFT 0x0
+#define BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x1ffff
+#define BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x0
+#define BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x1ffff
+#define BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x0
+#define BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x1ffff
+#define BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x0
+#define BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x1ffff
+#define BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x0
+#define BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x1ffff
+#define BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x0
+#define BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x1ffff
+#define BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x0
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x1
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x0
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x2
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x1
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x4
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x2
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x8
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x3
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xffff0000
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x10
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x1
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x2
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0xff00
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0xff0000
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x1
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x0
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x100
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x8
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x10000
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0xe0000
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x11
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x1000000
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define DC_ABM1_CNTL__ABM1_EN_MASK 0x1
+#define DC_ABM1_CNTL__ABM1_EN__SHIFT 0x0
+#define DC_ABM1_CNTL__ABM1_SOURCE_SELECT_MASK 0x700
+#define DC_ABM1_CNTL__ABM1_SOURCE_SELECT__SHIFT 0x8
+#define DC_ABM1_CNTL__ABM1_BLANK_MODE_SUPPORT_ENABLE_MASK 0x80000000
+#define DC_ABM1_CNTL__ABM1_BLANK_MODE_SUPPORT_ENABLE__SHIFT 0x1f
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0xf
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x0
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0xf00
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x8
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0xf0000
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x10
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x7fff
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x0
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x7ff0000
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x10
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x7fff
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x0
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x7ff0000
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x10
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x7fff
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x0
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x7ff0000
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x10
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x7fff
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x0
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x7ff0000
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x10
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x7fff
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x0
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x7ff0000
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x10
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x3ff
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x0
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x3ff0000
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x10
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x3ff
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x0
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x3ff0000
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x10
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x1c
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x1d
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x1e
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x1
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x0
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x100
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x8
+#define DC_ABM1_DEBUG_MISC__ABM1_HG_FORCE_INTERRUPT_MASK 0x1
+#define DC_ABM1_DEBUG_MISC__ABM1_HG_FORCE_INTERRUPT__SHIFT 0x0
+#define DC_ABM1_DEBUG_MISC__ABM1_LS_FORCE_INTERRUPT_MASK 0x100
+#define DC_ABM1_DEBUG_MISC__ABM1_LS_FORCE_INTERRUPT__SHIFT 0x8
+#define DC_ABM1_DEBUG_MISC__ABM1_BL_FORCE_INTERRUPT_MASK 0x10000
+#define DC_ABM1_DEBUG_MISC__ABM1_BL_FORCE_INTERRUPT__SHIFT 0x10
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x1
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x0
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x2
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x1
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x4
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x2
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x100
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x8
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x200
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x9
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x400
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0xa
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x10000
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x10
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x1000000
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x18
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x1f
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x3
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x0
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x100
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x8
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x1000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0xc
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x30000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x10
+#define DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x100000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x14
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x800000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x17
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x7000000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x18
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x1c
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x1d
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x1e
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xffffffff
+#define DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x0
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x3ff
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x0
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x3ff0000
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x10
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x3ff
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x0
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x3ff0000
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x10
+#define DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0xffffff
+#define DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x0
+#define DC_ABM1_LS_OVR_SCAN_BIN__ABM1_LS_OVR_SCAN_BIN_MASK 0xffffff
+#define DC_ABM1_LS_OVR_SCAN_BIN__ABM1_LS_OVR_SCAN_BIN__SHIFT 0x0
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x3ff
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x0
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x3ff0000
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x10
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0xffffff
+#define DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0xffffff
+#define DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x1
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x2
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0xff00
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0xff0000
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x1
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x2
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0xff00
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0xff0000
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xffffffff
+#define DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x0
+#define DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xffffffff
+#define DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x0
+#define DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xffffffff
+#define DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x0
+#define DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xffffffff
+#define DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x0
+#define DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xffffffff
+#define DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x0
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_R_PIXEL_VALUE_MASK 0x3ff
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_R_PIXEL_VALUE__SHIFT 0x0
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_G_PIXEL_VALUE_MASK 0xffc00
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_G_PIXEL_VALUE__SHIFT 0xa
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_B_PIXEL_VALUE_MASK 0x3ff00000
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_B_PIXEL_VALUE__SHIFT 0x14
+#define DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000
+#define DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x1f
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_INDEX_MASK 0xff
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define ABM_TEST_DEBUG_DATA__ABM_TEST_DEBUG_DATA_MASK 0xffffffff
+#define ABM_TEST_DEBUG_DATA__ABM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM_MASK 0x3ff
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM__SHIFT 0x0
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM_DIS_MASK 0x10000
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM_DIS__SHIFT 0x10
+#define CRTC_H_TOTAL__CRTC_H_TOTAL_MASK 0x3fff
+#define CRTC_H_TOTAL__CRTC_H_TOTAL__SHIFT 0x0
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_START_MASK 0x3fff
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_START__SHIFT 0x0
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_END_MASK 0x3fff0000
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_END__SHIFT 0x10
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_START_MASK 0x3fff
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_START__SHIFT 0x0
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_END_MASK 0x3fff0000
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_END__SHIFT 0x10
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_POL_MASK 0x1
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_POL__SHIFT 0x0
+#define CRTC_H_SYNC_A_CNTL__CRTC_COMP_SYNC_A_EN_MASK 0x10000
+#define CRTC_H_SYNC_A_CNTL__CRTC_COMP_SYNC_A_EN__SHIFT 0x10
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_CUTOFF_MASK 0x20000
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_START_MASK 0x3fff
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_START__SHIFT 0x0
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_END_MASK 0x3fff0000
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_END__SHIFT 0x10
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_POL_MASK 0x1
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_POL__SHIFT 0x0
+#define CRTC_H_SYNC_B_CNTL__CRTC_COMP_SYNC_B_EN_MASK 0x10000
+#define CRTC_H_SYNC_B_CNTL__CRTC_COMP_SYNC_B_EN__SHIFT 0x10
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_CUTOFF_MASK 0x20000
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_CUTOFF__SHIFT 0x11
+#define CRTC_VBI_END__CRTC_VBI_V_END_MASK 0x3fff
+#define CRTC_VBI_END__CRTC_VBI_V_END__SHIFT 0x0
+#define CRTC_VBI_END__CRTC_VBI_H_END_MASK 0x3fff0000
+#define CRTC_VBI_END__CRTC_VBI_H_END__SHIFT 0x10
+#define CRTC_V_TOTAL__CRTC_V_TOTAL_MASK 0x3fff
+#define CRTC_V_TOTAL__CRTC_V_TOTAL__SHIFT 0x0
+#define CRTC_V_TOTAL_MIN__CRTC_V_TOTAL_MIN_MASK 0x3fff
+#define CRTC_V_TOTAL_MIN__CRTC_V_TOTAL_MIN__SHIFT 0x0
+#define CRTC_V_TOTAL_MAX__CRTC_V_TOTAL_MAX_MASK 0x3fff
+#define CRTC_V_TOTAL_MAX__CRTC_V_TOTAL_MAX__SHIFT 0x0
+#define CRTC_V_TOTAL_MAX__CRTC_ALLOW_VBLANK_EXTENSION_FOR_MC_TRAINING_MASK 0x10000
+#define CRTC_V_TOTAL_MAX__CRTC_ALLOW_VBLANK_EXTENSION_FOR_MC_TRAINING__SHIFT 0x10
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL_MASK 0x1
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL_MASK 0x10
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL__SHIFT 0x4
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT_MASK 0x100
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT__SHIFT 0x8
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC_MASK 0x1000
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC__SHIFT 0xc
+#define CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK_EN_MASK 0x8000
+#define CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK_EN__SHIFT 0xf
+#define CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK_MASK 0xffff0000
+#define CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_MASK 0x1
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED__SHIFT 0x0
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x10
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x4
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK_MASK 0x100
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK__SHIFT 0x8
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK_MASK 0x1000
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK__SHIFT 0xc
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM_MASK 0x1
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM__SHIFT 0x0
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM_INT_CLEAR_MASK 0x10
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK 0x3fff
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_START__SHIFT 0x0
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_END_MASK 0x3fff0000
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_END__SHIFT 0x10
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_START_MASK 0x3fff
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_START__SHIFT 0x0
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_END_MASK 0x3fff0000
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_END__SHIFT 0x10
+#define CRTC_V_SYNC_A_CNTL__CRTC_V_SYNC_A_POL_MASK 0x1
+#define CRTC_V_SYNC_A_CNTL__CRTC_V_SYNC_A_POL__SHIFT 0x0
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_START_MASK 0x3fff
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_START__SHIFT 0x0
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_END_MASK 0x3fff0000
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_END__SHIFT 0x10
+#define CRTC_V_SYNC_B_CNTL__CRTC_V_SYNC_B_POL_MASK 0x1
+#define CRTC_V_SYNC_B_CNTL__CRTC_V_SYNC_B_POL__SHIFT 0x0
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CRTC_EN_MASK 0x1
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CRTC_EN__SHIFT 0x0
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CLK_DIV_MASK 0x1e
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CLK_DIV__SHIFT 0x1
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_VERT_COUNT_MASK 0x3fff
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_VERT_COUNT__SHIFT 0x0
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_HORZ_COUNT_MASK 0x3fff0000
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_HORZ_COUNT__SHIFT 0x10
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_SOURCE_SELECT_MASK 0x1f
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_SELECT_MASK 0xe0
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_SELECT__SHIFT 0x5
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RESYNC_BYPASS_EN_MASK 0x100
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RESYNC_BYPASS_EN__SHIFT 0x8
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_INPUT_STATUS_MASK 0x200
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_INPUT_STATUS__SHIFT 0x9
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_STATUS_MASK 0x400
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_STATUS__SHIFT 0xa
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_OCCURRED_MASK 0x800
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_OCCURRED__SHIFT 0xb
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x3000
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0xc
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x30000
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FREQUENCY_SELECT_MASK 0x300000
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_DELAY_MASK 0x1f000000
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_DELAY__SHIFT 0x18
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_CLEAR_MASK 0x80000000
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_CLEAR__SHIFT 0x1f
+#define CRTC_TRIGA_MANUAL_TRIG__CRTC_TRIGA_MANUAL_TRIG_MASK 0x1
+#define CRTC_TRIGA_MANUAL_TRIG__CRTC_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_SOURCE_SELECT_MASK 0x1f
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_SELECT_MASK 0xe0
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_SELECT__SHIFT 0x5
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RESYNC_BYPASS_EN_MASK 0x100
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RESYNC_BYPASS_EN__SHIFT 0x8
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_INPUT_STATUS_MASK 0x200
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_INPUT_STATUS__SHIFT 0x9
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_STATUS_MASK 0x400
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_STATUS__SHIFT 0xa
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_OCCURRED_MASK 0x800
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_OCCURRED__SHIFT 0xb
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x3000
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0xc
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x30000
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FREQUENCY_SELECT_MASK 0x300000
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_DELAY_MASK 0x1f000000
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_DELAY__SHIFT 0x18
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_CLEAR_MASK 0x80000000
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_CLEAR__SHIFT 0x1f
+#define CRTC_TRIGB_MANUAL_TRIG__CRTC_TRIGB_MANUAL_TRIG_MASK 0x1
+#define CRTC_TRIGB_MANUAL_TRIG__CRTC_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_MODE_MASK 0x3
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CHECK_MASK 0x10
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x100
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_OCCURRED_MASK 0x10000
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CLEAR_MASK 0x1000000
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_SOURCE_SELECT_MASK 0x1f
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_SOURCE_SELECT__SHIFT 0x0
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_POLARITY_MASK 0x100
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_POLARITY__SHIFT 0x8
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_GRANULARITY_MASK 0x10000
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_GRANULARITY__SHIFT 0x10
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_INPUT_STATUS_MASK 0x1000000
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_INPUT_STATUS__SHIFT 0x18
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_STEREO_FORCE_NEXT_EYE_MASK 0x3
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_AVSYNC_FRAME_COUNTER_MASK 0xff00
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_AVSYNC_FRAME_COUNTER__SHIFT 0x8
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_AVSYNC_LINE_COUNTER_MASK 0x1fff0000
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_AVSYNC_LINE_COUNTER__SHIFT 0x10
+#define CRTC_AVSYNC_COUNTER__CRTC_AVSYNC_COUNTER_MASK 0xffffffff
+#define CRTC_AVSYNC_COUNTER__CRTC_AVSYNC_COUNTER__SHIFT 0x0
+#define CRTC_CONTROL__CRTC_MASTER_EN_MASK 0x1
+#define CRTC_CONTROL__CRTC_MASTER_EN__SHIFT 0x0
+#define CRTC_CONTROL__CRTC_SYNC_RESET_SEL_MASK 0x10
+#define CRTC_CONTROL__CRTC_SYNC_RESET_SEL__SHIFT 0x4
+#define CRTC_CONTROL__CRTC_DISABLE_POINT_CNTL_MASK 0x300
+#define CRTC_CONTROL__CRTC_DISABLE_POINT_CNTL__SHIFT 0x8
+#define CRTC_CONTROL__CRTC_START_POINT_CNTL_MASK 0x1000
+#define CRTC_CONTROL__CRTC_START_POINT_CNTL__SHIFT 0xc
+#define CRTC_CONTROL__CRTC_FIELD_NUMBER_CNTL_MASK 0x2000
+#define CRTC_CONTROL__CRTC_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define CRTC_CONTROL__CRTC_FIELD_NUMBER_POLARITY_MASK 0x4000
+#define CRTC_CONTROL__CRTC_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define CRTC_CONTROL__CRTC_CURRENT_MASTER_EN_STATE_MASK 0x10000
+#define CRTC_CONTROL__CRTC_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define CRTC_CONTROL__CRTC_HBLANK_EARLY_CONTROL_MASK 0x700000
+#define CRTC_CONTROL__CRTC_HBLANK_EARLY_CONTROL__SHIFT 0x14
+#define CRTC_CONTROL__CRTC_DISP_READ_REQUEST_DISABLE_MASK 0x1000000
+#define CRTC_CONTROL__CRTC_DISP_READ_REQUEST_DISABLE__SHIFT 0x18
+#define CRTC_CONTROL__CRTC_SOF_PULL_EN_MASK 0x20000000
+#define CRTC_CONTROL__CRTC_SOF_PULL_EN__SHIFT 0x1d
+#define CRTC_CONTROL__CRTC_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000
+#define CRTC_CONTROL__CRTC_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e
+#define CRTC_CONTROL__CRTC_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000
+#define CRTC_CONTROL__CRTC_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f
+#define CRTC_BLANK_CONTROL__CRTC_CURRENT_BLANK_STATE_MASK 0x1
+#define CRTC_BLANK_CONTROL__CRTC_CURRENT_BLANK_STATE__SHIFT 0x0
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK 0x100
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN__SHIFT 0x8
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DE_MODE_MASK 0x10000
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DE_MODE__SHIFT 0x10
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_ENABLE_MASK 0x1
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_ENABLE__SHIFT 0x0
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_FORCE_NEXT_FIELD_MASK 0x30000
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_CURRENT_FIELD_MASK 0x1
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_NEXT_FIELD_MASK 0x2
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define CRTC_FIELD_INDICATION_CONTROL__CRTC_FIELD_INDICATION_OUTPUT_POLARITY_MASK 0x1
+#define CRTC_FIELD_INDICATION_CONTROL__CRTC_FIELD_INDICATION_OUTPUT_POLARITY__SHIFT 0x0
+#define CRTC_FIELD_INDICATION_CONTROL__CRTC_FIELD_ALIGNMENT_MASK 0x2
+#define CRTC_FIELD_INDICATION_CONTROL__CRTC_FIELD_ALIGNMENT__SHIFT 0x1
+#define CRTC_PIXEL_DATA_READBACK0__CRTC_PIXEL_DATA_BLUE_CB_MASK 0xfff
+#define CRTC_PIXEL_DATA_READBACK0__CRTC_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define CRTC_PIXEL_DATA_READBACK0__CRTC_PIXEL_DATA_GREEN_Y_MASK 0xfff0000
+#define CRTC_PIXEL_DATA_READBACK0__CRTC_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define CRTC_PIXEL_DATA_READBACK1__CRTC_PIXEL_DATA_RED_CR_MASK 0xfff
+#define CRTC_PIXEL_DATA_READBACK1__CRTC_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define CRTC_STATUS__CRTC_V_BLANK_MASK 0x1
+#define CRTC_STATUS__CRTC_V_BLANK__SHIFT 0x0
+#define CRTC_STATUS__CRTC_V_ACTIVE_DISP_MASK 0x2
+#define CRTC_STATUS__CRTC_V_ACTIVE_DISP__SHIFT 0x1
+#define CRTC_STATUS__CRTC_V_SYNC_A_MASK 0x4
+#define CRTC_STATUS__CRTC_V_SYNC_A__SHIFT 0x2
+#define CRTC_STATUS__CRTC_V_UPDATE_MASK 0x8
+#define CRTC_STATUS__CRTC_V_UPDATE__SHIFT 0x3
+#define CRTC_STATUS__CRTC_V_START_LINE_MASK 0x10
+#define CRTC_STATUS__CRTC_V_START_LINE__SHIFT 0x4
+#define CRTC_STATUS__CRTC_V_BLANK_3D_STRUCTURE_MASK 0x20
+#define CRTC_STATUS__CRTC_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define CRTC_STATUS__CRTC_H_BLANK_MASK 0x10000
+#define CRTC_STATUS__CRTC_H_BLANK__SHIFT 0x10
+#define CRTC_STATUS__CRTC_H_ACTIVE_DISP_MASK 0x20000
+#define CRTC_STATUS__CRTC_H_ACTIVE_DISP__SHIFT 0x11
+#define CRTC_STATUS__CRTC_H_SYNC_A_MASK 0x40000
+#define CRTC_STATUS__CRTC_H_SYNC_A__SHIFT 0x12
+#define CRTC_STATUS_POSITION__CRTC_VERT_COUNT_MASK 0x3fff
+#define CRTC_STATUS_POSITION__CRTC_VERT_COUNT__SHIFT 0x0
+#define CRTC_STATUS_POSITION__CRTC_HORZ_COUNT_MASK 0x3fff0000
+#define CRTC_STATUS_POSITION__CRTC_HORZ_COUNT__SHIFT 0x10
+#define CRTC_NOM_VERT_POSITION__CRTC_VERT_COUNT_NOM_MASK 0x3fff
+#define CRTC_NOM_VERT_POSITION__CRTC_VERT_COUNT_NOM__SHIFT 0x0
+#define CRTC_STATUS_FRAME_COUNT__CRTC_FRAME_COUNT_MASK 0xffffff
+#define CRTC_STATUS_FRAME_COUNT__CRTC_FRAME_COUNT__SHIFT 0x0
+#define CRTC_STATUS_VF_COUNT__CRTC_VF_COUNT_MASK 0x3fffffff
+#define CRTC_STATUS_VF_COUNT__CRTC_VF_COUNT__SHIFT 0x0
+#define CRTC_STATUS_HV_COUNT__CRTC_HV_COUNT_MASK 0x3fffffff
+#define CRTC_STATUS_HV_COUNT__CRTC_HV_COUNT__SHIFT 0x0
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_COUNT_BY2_EN_MASK 0x1
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_REPETITION_COUNT_MASK 0x1e
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define CRTC_COUNT_RESET__CRTC_RESET_FRAME_COUNT_MASK 0x1
+#define CRTC_COUNT_RESET__CRTC_RESET_FRAME_COUNT__SHIFT 0x0
+#define CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE__CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x1
+#define CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE__CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x1
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x100
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define CRTC_VERT_SYNC_CONTROL__CRTC_AUTO_FORCE_VSYNC_MODE_MASK 0x30000
+#define CRTC_VERT_SYNC_CONTROL__CRTC_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define CRTC_STEREO_STATUS__CRTC_STEREO_CURRENT_EYE_MASK 0x1
+#define CRTC_STEREO_STATUS__CRTC_STEREO_CURRENT_EYE__SHIFT 0x0
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_OUTPUT_MASK 0x100
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_SELECT_MASK 0x10000
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_SELECT__SHIFT 0x10
+#define CRTC_STEREO_STATUS__CRTC_STEREO_EYE_FLAG_MASK 0x100000
+#define CRTC_STEREO_STATUS__CRTC_STEREO_EYE_FLAG__SHIFT 0x14
+#define CRTC_STEREO_STATUS__CRTC_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x3000000
+#define CRTC_STEREO_STATUS__CRTC_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x3fff
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x8000
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_SELECT_POLARITY_MASK 0x10000
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_SELECT_POLARITY__SHIFT 0x10
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_EYE_FLAG_POLARITY_MASK 0x20000
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define CRTC_STEREO_CONTROL__CRTC_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x40000
+#define CRTC_STEREO_CONTROL__CRTC_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define CRTC_STEREO_CONTROL__CRTC_DISABLE_FIELD_NUM_MASK 0x80000
+#define CRTC_STEREO_CONTROL__CRTC_DISABLE_FIELD_NUM__SHIFT 0x13
+#define CRTC_STEREO_CONTROL__CRTC_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x100000
+#define CRTC_STEREO_CONTROL__CRTC_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_EN_MASK 0x1000000
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_EN__SHIFT 0x18
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_OCCURRED_MASK 0x1
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_CLEAR_MASK 0x2
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_CLEAR__SHIFT 0x1
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_MANUAL_TRIGGER_MASK 0x4
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define CRTC_SNAPSHOT_CONTROL__CRTC_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x3
+#define CRTC_SNAPSHOT_CONTROL__CRTC_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_VERT_COUNT_MASK 0x3fff
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_HORZ_COUNT_MASK 0x3fff0000
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define CRTC_SNAPSHOT_FRAME__CRTC_SNAPSHOT_FRAME_COUNT_MASK 0xffffff
+#define CRTC_SNAPSHOT_FRAME__CRTC_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define CRTC_START_LINE_CONTROL__CRTC_PROGRESSIVE_START_LINE_EARLY_MASK 0x1
+#define CRTC_START_LINE_CONTROL__CRTC_PROGRESSIVE_START_LINE_EARLY__SHIFT 0x0
+#define CRTC_START_LINE_CONTROL__CRTC_INTERLACE_START_LINE_EARLY_MASK 0x2
+#define CRTC_START_LINE_CONTROL__CRTC_INTERLACE_START_LINE_EARLY__SHIFT 0x1
+#define CRTC_START_LINE_CONTROL__CRTC_PREFETCH_EN_MASK 0x4
+#define CRTC_START_LINE_CONTROL__CRTC_PREFETCH_EN__SHIFT 0x2
+#define CRTC_START_LINE_CONTROL__CRTC_LEGACY_REQUESTOR_EN_MASK 0x100
+#define CRTC_START_LINE_CONTROL__CRTC_LEGACY_REQUESTOR_EN__SHIFT 0x8
+#define CRTC_START_LINE_CONTROL__CRTC_ADVANCED_START_LINE_POSITION_MASK 0xff000
+#define CRTC_START_LINE_CONTROL__CRTC_ADVANCED_START_LINE_POSITION__SHIFT 0xc
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_MSK_MASK 0x1
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_TYPE_MASK 0x2
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK 0x10
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK__SHIFT 0x4
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_TYPE_MASK 0x20
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_TYPE__SHIFT 0x5
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_MSK_MASK 0x100
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_TYPE_MASK 0x200
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x10000
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x20000
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_MSK_MASK 0x1000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_MSK__SHIFT 0x18
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_MSK_MASK 0x2000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_MSK__SHIFT 0x19
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_TYPE_MASK 0x4000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_TYPE__SHIFT 0x1a
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_TYPE_MASK 0x8000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_TYPE__SHIFT 0x1b
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_MSK_MASK 0x10000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_TYPE_MASK 0x20000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define CRTC_UPDATE_LOCK__CRTC_UPDATE_LOCK_MASK 0x1
+#define CRTC_UPDATE_LOCK__CRTC_UPDATE_LOCK__SHIFT 0x0
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_PENDING_MASK 0x1
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_PENDING__SHIFT 0x0
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_INSTANTLY_MASK 0x100
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_INSTANTLY__SHIFT 0x8
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x10000
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x10
+#define CRTC_VGA_PARAMETER_CAPTURE_MODE__CRTC_VGA_PARAMETER_CAPTURE_MODE_MASK 0x1
+#define CRTC_VGA_PARAMETER_CAPTURE_MODE__CRTC_VGA_PARAMETER_CAPTURE_MODE__SHIFT 0x0
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_EN_MASK 0x1
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_EN__SHIFT 0x0
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_MODE_MASK 0x700
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_MODE__SHIFT 0x8
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_DYNAMIC_RANGE_MASK 0x10000
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_DYNAMIC_RANGE__SHIFT 0x10
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_COLOR_FORMAT_MASK 0xff000000
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_COLOR_FORMAT__SHIFT 0x18
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC0_MASK 0xf
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC0__SHIFT 0x0
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC1_MASK 0xf0
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC1__SHIFT 0x4
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_VRES_MASK 0xf00
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_VRES__SHIFT 0x8
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_HRES_MASK 0xf000
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_HRES__SHIFT 0xc
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_RAMP0_OFFSET_MASK 0xffff0000
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_RAMP0_OFFSET__SHIFT 0x10
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_DATA_MASK 0xffff
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_DATA__SHIFT 0x0
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_MASK_MASK 0x3f0000
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_MASK__SHIFT 0x10
+#define CRTC_MASTER_UPDATE_LOCK__MASTER_UPDATE_LOCK_MASK 0x1
+#define CRTC_MASTER_UPDATE_LOCK__MASTER_UPDATE_LOCK__SHIFT 0x0
+#define CRTC_MASTER_UPDATE_LOCK__GSL_CONTROL_MASTER_UPDATE_LOCK_MASK 0x100
+#define CRTC_MASTER_UPDATE_LOCK__GSL_CONTROL_MASTER_UPDATE_LOCK__SHIFT 0x8
+#define CRTC_MASTER_UPDATE_LOCK__UNDERFLOW_UPDATE_LOCK_MASK 0x10000
+#define CRTC_MASTER_UPDATE_LOCK__UNDERFLOW_UPDATE_LOCK__SHIFT 0x10
+#define CRTC_MASTER_UPDATE_MODE__MASTER_UPDATE_MODE_MASK 0x7
+#define CRTC_MASTER_UPDATE_MODE__MASTER_UPDATE_MODE__SHIFT 0x0
+#define CRTC_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x30000
+#define CRTC_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x10
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_OUT_MODE_MASK 0x3
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_OUT_MODE__SHIFT 0x0
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_CNTL_CHAR_INSERT_MASK 0xffffff00
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_CNTL_CHAR_INSERT__SHIFT 0x8
+#define CRTC_MVP_INBAND_CNTL_INSERT_TIMER__CRTC_MVP_INBAND_CNTL_CHAR_INSERT_TIMER_MASK 0xff
+#define CRTC_MVP_INBAND_CNTL_INSERT_TIMER__CRTC_MVP_INBAND_CNTL_CHAR_INSERT_TIMER__SHIFT 0x0
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_OCCURRED_MASK 0x1
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_OCCURRED__SHIFT 0x0
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_OCCURRED_MASK 0x10
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_OCCURRED__SHIFT 0x4
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_CLEAR_MASK 0x10000
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_CLEAR__SHIFT 0x10
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR_MASK 0x100000
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR__SHIFT 0x14
+#define CRTC_MASTER_EN__CRTC_MASTER_EN_MASK 0x1
+#define CRTC_MASTER_EN__CRTC_MASTER_EN__SHIFT 0x0
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_ALLOW_STOP_OFF_V_CNT_MASK 0xff
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_ALLOW_STOP_OFF_V_CNT__SHIFT 0x0
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_DISABLE_ALLOW_STOP_OFF_V_CNT_MASK 0x10000
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_DISABLE_ALLOW_STOP_OFF_V_CNT__SHIFT 0x10
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_OCCURRED_MASK 0x1
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_OCCURRED__SHIFT 0x0
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK 0x100
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR__SHIFT 0x8
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_BLUE_MASK 0x3ff
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_BLUE__SHIFT 0x0
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_GREEN_MASK 0xffc00
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_GREEN__SHIFT 0xa
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_RED_MASK 0x3ff00000
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_RED__SHIFT 0x14
+#define CRTC_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_BLUE_EXT_MASK 0x3
+#define CRTC_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_BLUE_EXT__SHIFT 0x0
+#define CRTC_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_GREEN_EXT_MASK 0x300
+#define CRTC_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_GREEN_EXT__SHIFT 0x8
+#define CRTC_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_RED_EXT_MASK 0x30000
+#define CRTC_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_RED_EXT__SHIFT 0x10
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_BLUE_CB_MASK 0x3ff
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x0
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_GREEN_Y_MASK 0xffc00
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0xa
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_RED_CR_MASK 0x3ff00000
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_RED_CR__SHIFT 0x14
+#define CRTC_BLANK_DATA_COLOR_EXT__CRTC_BLANK_DATA_COLOR_BLUE_CB_EXT_MASK 0x3
+#define CRTC_BLANK_DATA_COLOR_EXT__CRTC_BLANK_DATA_COLOR_BLUE_CB_EXT__SHIFT 0x0
+#define CRTC_BLANK_DATA_COLOR_EXT__CRTC_BLANK_DATA_COLOR_GREEN_Y_EXT_MASK 0x300
+#define CRTC_BLANK_DATA_COLOR_EXT__CRTC_BLANK_DATA_COLOR_GREEN_Y_EXT__SHIFT 0x8
+#define CRTC_BLANK_DATA_COLOR_EXT__CRTC_BLANK_DATA_COLOR_RED_CR_EXT_MASK 0x30000
+#define CRTC_BLANK_DATA_COLOR_EXT__CRTC_BLANK_DATA_COLOR_RED_CR_EXT__SHIFT 0x10
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_B_CB_MASK 0x3ff
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_B_CB__SHIFT 0x0
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_G_Y_MASK 0xffc00
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_G_Y__SHIFT 0xa
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_R_CR_MASK 0x3ff00000
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_R_CR__SHIFT 0x14
+#define CRTC_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_B_CB_EXT_MASK 0x3
+#define CRTC_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_B_CB_EXT__SHIFT 0x0
+#define CRTC_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_G_Y_EXT_MASK 0x300
+#define CRTC_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_G_Y_EXT__SHIFT 0x8
+#define CRTC_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_R_CR_EXT_MASK 0x30000
+#define CRTC_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_R_CR_EXT__SHIFT 0x10
+#define CRTC_VERTICAL_INTERRUPT0_POSITION__CRTC_VERTICAL_INTERRUPT0_LINE_START_MASK 0x3fff
+#define CRTC_VERTICAL_INTERRUPT0_POSITION__CRTC_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define CRTC_VERTICAL_INTERRUPT0_POSITION__CRTC_VERTICAL_INTERRUPT0_LINE_END_MASK 0x3fff0000
+#define CRTC_VERTICAL_INTERRUPT0_POSITION__CRTC_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x10
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x100
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_STATUS_MASK 0x1000
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x10000
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK 0x100000
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x1000000
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define CRTC_VERTICAL_INTERRUPT1_POSITION__CRTC_VERTICAL_INTERRUPT1_LINE_START_MASK 0x3fff
+#define CRTC_VERTICAL_INTERRUPT1_POSITION__CRTC_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x100
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_STATUS_MASK 0x1000
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x10000
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_CLEAR_MASK 0x100000
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x1000000
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define CRTC_VERTICAL_INTERRUPT2_POSITION__CRTC_VERTICAL_INTERRUPT2_LINE_START_MASK 0x3fff
+#define CRTC_VERTICAL_INTERRUPT2_POSITION__CRTC_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x100
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_STATUS_MASK 0x1000
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x10000
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_CLEAR_MASK 0x100000
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x1000000
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define CRTC_CRC_CNTL__CRTC_CRC_EN_MASK 0x1
+#define CRTC_CRC_CNTL__CRTC_CRC_EN__SHIFT 0x0
+#define CRTC_CRC_CNTL__CRTC_CRC_CONT_EN_MASK 0x10
+#define CRTC_CRC_CNTL__CRTC_CRC_CONT_EN__SHIFT 0x4
+#define CRTC_CRC_CNTL__CRTC_CRC_STEREO_MODE_MASK 0x300
+#define CRTC_CRC_CNTL__CRTC_CRC_STEREO_MODE__SHIFT 0x8
+#define CRTC_CRC_CNTL__CRTC_CRC_INTERLACE_MODE_MASK 0x3000
+#define CRTC_CRC_CNTL__CRTC_CRC_INTERLACE_MODE__SHIFT 0xc
+#define CRTC_CRC_CNTL__CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x10000
+#define CRTC_CRC_CNTL__CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x10
+#define CRTC_CRC_CNTL__CRTC_CRC0_SELECT_MASK 0x700000
+#define CRTC_CRC_CNTL__CRTC_CRC0_SELECT__SHIFT 0x14
+#define CRTC_CRC_CNTL__CRTC_CRC1_SELECT_MASK 0x7000000
+#define CRTC_CRC_CNTL__CRTC_CRC1_SELECT__SHIFT 0x18
+#define CRTC_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_START_MASK 0x3fff
+#define CRTC_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define CRTC_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_END_MASK 0x3fff0000
+#define CRTC_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define CRTC_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_START_MASK 0x3fff
+#define CRTC_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define CRTC_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_END_MASK 0x3fff0000
+#define CRTC_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define CRTC_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_START_MASK 0x3fff
+#define CRTC_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define CRTC_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_END_MASK 0x3fff0000
+#define CRTC_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define CRTC_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_START_MASK 0x3fff
+#define CRTC_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define CRTC_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_END_MASK 0x3fff0000
+#define CRTC_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define CRTC_CRC0_DATA_RG__CRC0_R_CR_MASK 0xffff
+#define CRTC_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define CRTC_CRC0_DATA_RG__CRC0_G_Y_MASK 0xffff0000
+#define CRTC_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define CRTC_CRC0_DATA_B__CRC0_B_CB_MASK 0xffff
+#define CRTC_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define CRTC_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_START_MASK 0x3fff
+#define CRTC_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define CRTC_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_END_MASK 0x3fff0000
+#define CRTC_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define CRTC_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_START_MASK 0x3fff
+#define CRTC_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define CRTC_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_END_MASK 0x3fff0000
+#define CRTC_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define CRTC_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_START_MASK 0x3fff
+#define CRTC_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define CRTC_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_END_MASK 0x3fff0000
+#define CRTC_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define CRTC_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_START_MASK 0x3fff
+#define CRTC_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define CRTC_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_END_MASK 0x3fff0000
+#define CRTC_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define CRTC_CRC1_DATA_RG__CRC1_R_CR_MASK 0xffff
+#define CRTC_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define CRTC_CRC1_DATA_RG__CRC1_G_Y_MASK 0xffff0000
+#define CRTC_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define CRTC_CRC1_DATA_B__CRC1_B_CB_MASK 0xffff
+#define CRTC_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_ENABLE_MASK 0x3
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_ENABLE__SHIFT 0x0
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_HCOUNT_MODE_ENABLE_MASK 0x8
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_HCOUNT_MODE_ENABLE__SHIFT 0x3
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_ENABLE_MASK 0x10
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_ENABLE__SHIFT 0x4
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW_MASK 0x60
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW__SHIFT 0x5
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_WINDOW_ENABLE_MASK 0x100
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_WINDOW_ENABLE__SHIFT 0x8
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_WINDOW_UPDATE_MASK 0x200
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_WINDOW_UPDATE__SHIFT 0x9
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_VSYNC_POLARITY_MASK 0x1000
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_VSYNC_POLARITY__SHIFT 0xc
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_HSYNC_POLARITY_MASK 0x2000
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_HSYNC_POLARITY__SHIFT 0xd
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_INTERLACE_MODE_MASK 0x4000
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_INTERLACE_MODE__SHIFT 0xe
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_MASTER_FRAME_RATE_MASK 0x7000000
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_MASTER_FRAME_RATE__SHIFT 0x18
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_SLAVE_FRAME_RATE_MASK 0x70000000
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_SLAVE_FRAME_RATE__SHIFT 0x1c
+#define CRTC_EXT_TIMING_SYNC_WINDOW_START__CRTC_EXT_TIMING_SYNC_WINDOW_START_X_MASK 0x3fff
+#define CRTC_EXT_TIMING_SYNC_WINDOW_START__CRTC_EXT_TIMING_SYNC_WINDOW_START_X__SHIFT 0x0
+#define CRTC_EXT_TIMING_SYNC_WINDOW_START__CRTC_EXT_TIMING_SYNC_WINDOW_START_Y_MASK 0x3fff0000
+#define CRTC_EXT_TIMING_SYNC_WINDOW_START__CRTC_EXT_TIMING_SYNC_WINDOW_START_Y__SHIFT 0x10
+#define CRTC_EXT_TIMING_SYNC_WINDOW_END__CRTC_EXT_TIMING_SYNC_WINDOW_END_X_MASK 0x3fff
+#define CRTC_EXT_TIMING_SYNC_WINDOW_END__CRTC_EXT_TIMING_SYNC_WINDOW_END_X__SHIFT 0x0
+#define CRTC_EXT_TIMING_SYNC_WINDOW_END__CRTC_EXT_TIMING_SYNC_WINDOW_END_Y_MASK 0x3fff0000
+#define CRTC_EXT_TIMING_SYNC_WINDOW_END__CRTC_EXT_TIMING_SYNC_WINDOW_END_Y__SHIFT 0x10
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_INT_ENABLE_MASK 0x1
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_INT_ENABLE__SHIFT 0x0
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_STATUS_MASK 0x10
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_STATUS__SHIFT 0x4
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_INT_STATUS_MASK 0x100
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_INT_STATUS__SHIFT 0x8
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_CLEAR_MASK 0x10000
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_CLEAR__SHIFT 0x10
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_INT_TYPE_MASK 0x100000
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_INT_TYPE__SHIFT 0x14
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_MASK 0xe0000000
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT__SHIFT 0x1d
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_INT_ENABLE_MASK 0x1
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_INT_ENABLE__SHIFT 0x0
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_STATUS_MASK 0x10
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_STATUS__SHIFT 0x4
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_INT_STATUS_MASK 0x100
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_INT_STATUS__SHIFT 0x8
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_CLEAR_MASK 0x10000
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_CLEAR__SHIFT 0x10
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_INT_TYPE_MASK 0x100000
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_INT_TYPE__SHIFT 0x14
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_INT_ENABLE_MASK 0x1
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_INT_ENABLE__SHIFT 0x0
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_STATUS_MASK 0x10
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_STATUS__SHIFT 0x4
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_INT_STATUS_MASK 0x100
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_INT_STATUS__SHIFT 0x8
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_CLEAR_MASK 0x10000
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_CLEAR__SHIFT 0x10
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_INT_TYPE_MASK 0x100000
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_INT_TYPE__SHIFT 0x14
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_EVENT_MASK_MASK 0xffff
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_FRAME_COUNT_MASK 0xff0000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_ENABLE_MASK 0x1000000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_SS_STATUS_MASK 0x2000000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_SS_STATUS__SHIFT 0x19
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_STATUS_MASK 0x4000000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_CLEAR_MASK 0x8000000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_TYPE_MASK 0x10000000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_OVERRIDE_MASK 0x40000000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN_MASK 0x1
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN__SHIFT 0x0
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN_DB_MASK 0x10
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN_DB__SHIFT 0x4
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x300
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x1000
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET_MASK 0x10000
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x20000
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_MASK 0xc0000
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_LIMIT_MASK 0xff
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_DELAY_MASK 0xff00
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x10000
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MODE_MASK 0x60000
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_CLEAR_MASK 0x80000
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_OCCURRED_MASK 0x100000
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x800000
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MASK 0xff000000
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP__SHIFT 0x18
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_START_MASK 0x3fff
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_START__SHIFT 0x0
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_END_MASK 0x3fff0000
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_END__SHIFT 0x10
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_LINE_NUM_MASK 0x3fff
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_LINE_NUM__SHIFT 0x0
+#define CRTC_GSL_CONTROL__CRTC_GSL_FORCE_DELAY_MASK 0x1f0000
+#define CRTC_GSL_CONTROL__CRTC_GSL_FORCE_DELAY__SHIFT 0x10
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_ALL_FIELDS_MASK 0x10000000
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_INDEX_MASK 0xff
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CRTC_TEST_DEBUG_DATA__CRTC_TEST_DEBUG_DATA_MASK 0xffffffff
+#define CRTC_TEST_DEBUG_DATA__CRTC_TEST_DEBUG_DATA__SHIFT 0x0
+#define DAC_ENABLE__DAC_ENABLE_MASK 0x1
+#define DAC_ENABLE__DAC_ENABLE__SHIFT 0x0
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ENABLE_MASK 0x2
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ENABLE__SHIFT 0x1
+#define DAC_ENABLE__DAC_RESYNC_FIFO_POINTER_SKEW_MASK 0xc
+#define DAC_ENABLE__DAC_RESYNC_FIFO_POINTER_SKEW__SHIFT 0x2
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR_MASK 0x10
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR__SHIFT 0x4
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR_ACK_MASK 0x20
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR_ACK__SHIFT 0x5
+#define DAC_ENABLE__DAC_RESYNC_FIFO_TVOUT_SIM_MASK 0x100
+#define DAC_ENABLE__DAC_RESYNC_FIFO_TVOUT_SIM__SHIFT 0x8
+#define DAC_SOURCE_SELECT__DAC_SOURCE_SELECT_MASK 0x7
+#define DAC_SOURCE_SELECT__DAC_SOURCE_SELECT__SHIFT 0x0
+#define DAC_SOURCE_SELECT__DAC_TV_SELECT_MASK 0x8
+#define DAC_SOURCE_SELECT__DAC_TV_SELECT__SHIFT 0x3
+#define DAC_CRC_EN__DAC_CRC_EN_MASK 0x1
+#define DAC_CRC_EN__DAC_CRC_EN__SHIFT 0x0
+#define DAC_CRC_EN__DAC_CRC_CONT_EN_MASK 0x10000
+#define DAC_CRC_EN__DAC_CRC_CONT_EN__SHIFT 0x10
+#define DAC_CRC_CONTROL__DAC_CRC_FIELD_MASK 0x1
+#define DAC_CRC_CONTROL__DAC_CRC_FIELD__SHIFT 0x0
+#define DAC_CRC_CONTROL__DAC_CRC_ONLY_BLANKB_MASK 0x100
+#define DAC_CRC_CONTROL__DAC_CRC_ONLY_BLANKB__SHIFT 0x8
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_BLUE_MASK_MASK 0x3ff
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_GREEN_MASK_MASK 0xffc00
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_GREEN_MASK__SHIFT 0xa
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_RED_MASK_MASK 0x3ff00000
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_RED_MASK__SHIFT 0x14
+#define DAC_CRC_SIG_CONTROL_MASK__DAC_CRC_SIG_CONTROL_MASK_MASK 0x3f
+#define DAC_CRC_SIG_CONTROL_MASK__DAC_CRC_SIG_CONTROL_MASK__SHIFT 0x0
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_BLUE_MASK 0x3ff
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_BLUE__SHIFT 0x0
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_GREEN_MASK 0xffc00
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_GREEN__SHIFT 0xa
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_RED_MASK 0x3ff00000
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_RED__SHIFT 0x14
+#define DAC_CRC_SIG_CONTROL__DAC_CRC_SIG_CONTROL_MASK 0x3f
+#define DAC_CRC_SIG_CONTROL__DAC_CRC_SIG_CONTROL__SHIFT 0x0
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_HSYNCA_TRISTATE_MASK 0x1
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_HSYNCA_TRISTATE__SHIFT 0x0
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_VSYNCA_TRISTATE_MASK 0x100
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_VSYNCA_TRISTATE__SHIFT 0x8
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_SYNCA_TRISTATE_MASK 0x10000
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_SYNCA_TRISTATE__SHIFT 0x10
+#define DAC_STEREOSYNC_SELECT__DAC_STEREOSYNC_SELECT_MASK 0x7
+#define DAC_STEREOSYNC_SELECT__DAC_STEREOSYNC_SELECT__SHIFT 0x0
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_MODE_MASK 0x3
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_MODE__SHIFT 0x0
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_FRAME_TIME_COUNTER_MASK 0xff00
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_FRAME_TIME_COUNTER__SHIFT 0x8
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_CHECK_MASK_MASK 0x70000
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_CHECK_MASK__SHIFT 0x10
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_POWERUP_COUNTER_MASK 0xff
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_POWERUP_COUNTER__SHIFT 0x0
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_TESTMODE_MASK 0x100
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_TESTMODE__SHIFT 0x8
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_IN_DELAY_MASK 0xff
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_IN_DELAY__SHIFT 0x0
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_OUT_DELAY_MASK 0xff00
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_OUT_DELAY__SHIFT 0x8
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_STATUS_MASK 0x1
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_STATUS__SHIFT 0x0
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_CONNECT_MASK 0x10
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_CONNECT__SHIFT 0x4
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_RED_SENSE_MASK 0x300
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_RED_SENSE__SHIFT 0x8
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_GREEN_SENSE_MASK 0x30000
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_GREEN_SENSE__SHIFT 0x10
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_BLUE_SENSE_MASK 0x3000000
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_BLUE_SENSE__SHIFT 0x18
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_ACK_MASK 0x1
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_ACK__SHIFT 0x0
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_INT_ENABLE_MASK 0x10000
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_INT_ENABLE__SHIFT 0x10
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_EN_MASK 0x1
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_EN__SHIFT 0x0
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_SEL_MASK 0x700
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_SEL__SHIFT 0x8
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_ON_BLANKB_ONLY_MASK 0x1000000
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_ON_BLANKB_ONLY__SHIFT 0x18
+#define DAC_FORCE_DATA__DAC_FORCE_DATA_MASK 0x3ff
+#define DAC_FORCE_DATA__DAC_FORCE_DATA__SHIFT 0x0
+#define DAC_POWERDOWN__DAC_POWERDOWN_MASK 0x1
+#define DAC_POWERDOWN__DAC_POWERDOWN__SHIFT 0x0
+#define DAC_POWERDOWN__DAC_POWERDOWN_BLUE_MASK 0x100
+#define DAC_POWERDOWN__DAC_POWERDOWN_BLUE__SHIFT 0x8
+#define DAC_POWERDOWN__DAC_POWERDOWN_GREEN_MASK 0x10000
+#define DAC_POWERDOWN__DAC_POWERDOWN_GREEN__SHIFT 0x10
+#define DAC_POWERDOWN__DAC_POWERDOWN_RED_MASK 0x1000000
+#define DAC_POWERDOWN__DAC_POWERDOWN_RED__SHIFT 0x18
+#define DAC_CONTROL__DAC_DFORCE_EN_MASK 0x1
+#define DAC_CONTROL__DAC_DFORCE_EN__SHIFT 0x0
+#define DAC_CONTROL__DAC_TV_ENABLE_MASK 0x100
+#define DAC_CONTROL__DAC_TV_ENABLE__SHIFT 0x8
+#define DAC_CONTROL__DAC_ZSCALE_SHIFT_MASK 0x10000
+#define DAC_CONTROL__DAC_ZSCALE_SHIFT__SHIFT 0x10
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_DDET_REF_EN_MASK 0x1
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_DDET_REF_EN__SHIFT 0x0
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_SDET_REF_EN_MASK 0x100
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_SDET_REF_EN__SHIFT 0x8
+#define DAC_COMPARATOR_ENABLE__DAC_R_ASYNC_ENABLE_MASK 0x10000
+#define DAC_COMPARATOR_ENABLE__DAC_R_ASYNC_ENABLE__SHIFT 0x10
+#define DAC_COMPARATOR_ENABLE__DAC_G_ASYNC_ENABLE_MASK 0x20000
+#define DAC_COMPARATOR_ENABLE__DAC_G_ASYNC_ENABLE__SHIFT 0x11
+#define DAC_COMPARATOR_ENABLE__DAC_B_ASYNC_ENABLE_MASK 0x40000
+#define DAC_COMPARATOR_ENABLE__DAC_B_ASYNC_ENABLE__SHIFT 0x12
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_MASK 0x1
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT__SHIFT 0x0
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_BLUE_MASK 0x2
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_BLUE__SHIFT 0x1
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_GREEN_MASK 0x4
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_GREEN__SHIFT 0x2
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_RED_MASK 0x8
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_RED__SHIFT 0x3
+#define DAC_PWR_CNTL__DAC_BG_MODE_MASK 0x3
+#define DAC_PWR_CNTL__DAC_BG_MODE__SHIFT 0x0
+#define DAC_PWR_CNTL__DAC_PWRCNTL_MASK 0x30000
+#define DAC_PWR_CNTL__DAC_PWRCNTL__SHIFT 0x10
+#define DAC_DFT_CONFIG__DAC_DFT_CONFIG_MASK 0xffffffff
+#define DAC_DFT_CONFIG__DAC_DFT_CONFIG__SHIFT 0x0
+#define DAC_FIFO_STATUS__DAC_FIFO_USE_OVERWRITE_LEVEL_MASK 0x2
+#define DAC_FIFO_STATUS__DAC_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DAC_FIFO_STATUS__DAC_FIFO_OVERWRITE_LEVEL_MASK 0xfc
+#define DAC_FIFO_STATUS__DAC_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DAC_FIFO_STATUS__DAC_FIFO_CAL_AVERAGE_LEVEL_MASK 0xfc00
+#define DAC_FIFO_STATUS__DAC_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DAC_FIFO_STATUS__DAC_FIFO_MAXIMUM_LEVEL_MASK 0xf0000
+#define DAC_FIFO_STATUS__DAC_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DAC_FIFO_STATUS__DAC_FIFO_MINIMUM_LEVEL_MASK 0x3c00000
+#define DAC_FIFO_STATUS__DAC_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DAC_FIFO_STATUS__DAC_FIFO_CALIBRATED_MASK 0x20000000
+#define DAC_FIFO_STATUS__DAC_FIFO_CALIBRATED__SHIFT 0x1d
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DAC_TEST_DEBUG_INDEX__DAC_TEST_DEBUG_INDEX_MASK 0xff
+#define DAC_TEST_DEBUG_INDEX__DAC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DAC_TEST_DEBUG_INDEX__DAC_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DAC_TEST_DEBUG_INDEX__DAC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DAC_TEST_DEBUG_DATA__DAC_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DAC_TEST_DEBUG_DATA__DAC_TEST_DEBUG_DATA__SHIFT 0x0
+#define PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x1ff
+#define PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0xe00
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x3000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x4000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xe
+#define PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x8000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0xf
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_SEL_MASK 0x1f0000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x10
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x200000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x15
+#define PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x400000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x16
+#define PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x800000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x17
+#define PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x1000000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x18
+#define PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x2000000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x19
+#define PERFCOUNTER_CNTL__PERFCOUNTER_INT_TYPE_MASK 0x4000000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_INT_TYPE__SHIFT 0x1a
+#define PERFCOUNTER_CNTL__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x8000000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x1b
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xe0000000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x3
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x4
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x30
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x40
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x300
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x400
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x3000
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x4000
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x30000
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x40000
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x300000
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x400000
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x3000000
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x4000000
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define PERFMON_CNTL__PERFMON_STATE_MASK 0x3
+#define PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define PERFMON_CNTL__PERFMON_RUN_ENABLE_SEL_MASK 0xfc
+#define PERFMON_CNTL__PERFMON_RUN_ENABLE_SEL__SHIFT 0x2
+#define PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0xfffff00
+#define PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000
+#define PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000
+#define PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000
+#define PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000
+#define PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x1
+#define PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x2
+#define PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x1
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x2
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x4
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x8
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x10
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x20
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x40
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x80
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x100
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x200
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x400
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x800
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x1000
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x2000
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x4000
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x8000
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xffff0000
+#define PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xffffffff
+#define PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define PERFMON_HI__PERFMON_HI_MASK 0xffff
+#define PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define PERFMON_HI__PERFMON_READ_SEL_MASK 0xe0000000
+#define PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define PERFMON_LOW__PERFMON_LOW_MASK 0xffffffff
+#define PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define PERFMON_TEST_DEBUG_INDEX__PERFMON_TEST_DEBUG_INDEX_MASK 0xff
+#define PERFMON_TEST_DEBUG_INDEX__PERFMON_TEST_DEBUG_INDEX__SHIFT 0x0
+#define PERFMON_TEST_DEBUG_INDEX__PERFMON_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define PERFMON_TEST_DEBUG_INDEX__PERFMON_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define PERFMON_TEST_DEBUG_DATA__PERFMON_TEST_DEBUG_DATA_MASK 0xffffffff
+#define PERFMON_TEST_DEBUG_DATA__PERFMON_TEST_DEBUG_DATA__SHIFT 0x0
+#define REFCLK_CNTL__REFCLK_CLOCK_EN_MASK 0x1
+#define REFCLK_CNTL__REFCLK_CLOCK_EN__SHIFT 0x0
+#define REFCLK_CNTL__REFCLK_SRC_SEL_MASK 0x2
+#define REFCLK_CNTL__REFCLK_SRC_SEL__SHIFT 0x1
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P0PLL_CBUS_ANTIGLITCH_RESETB_MASK 0x1
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P0PLL_CBUS_ANTIGLITCH_RESETB__SHIFT 0x0
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P1PLL_CBUS_ANTIGLITCH_RESETB_MASK 0x2
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P1PLL_CBUS_ANTIGLITCH_RESETB__SHIFT 0x1
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P2PLL_CBUS_ANTIGLITCH_RESETB_MASK 0x4
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P2PLL_CBUS_ANTIGLITCH_RESETB__SHIFT 0x2
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P3PLL_CBUS_ANTIGLITCH_RESETB_MASK 0x8
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P3PLL_CBUS_ANTIGLITCH_RESETB__SHIFT 0x3
+#define DCCG_CBUS_SPARE__P0PLL_CBUS_SPARE_MASK 0xff
+#define DCCG_CBUS_SPARE__P0PLL_CBUS_SPARE__SHIFT 0x0
+#define DCCG_CBUS_SPARE__P1PLL_CBUS_SPARE_MASK 0xff00
+#define DCCG_CBUS_SPARE__P1PLL_CBUS_SPARE__SHIFT 0x8
+#define DCCG_CBUS_SPARE__P2PLL_CBUS_SPARE_MASK 0xff0000
+#define DCCG_CBUS_SPARE__P2PLL_CBUS_SPARE__SHIFT 0x10
+#define DCCG_CBUS_SPARE__P3PLL_CBUS_SPARE_MASK 0xff000000
+#define DCCG_CBUS_SPARE__P3PLL_CBUS_SPARE__SHIFT 0x18
+#define DCCG_CBUS_WRCMD_DELAY__CBUS_PLL_WRCMD_DELAY_MASK 0xf
+#define DCCG_CBUS_WRCMD_DELAY__CBUS_PLL_WRCMD_DELAY__SHIFT 0x0
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL_MASK 0x7
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL__SHIFT 0x0
+#define DPREFCLK_CNTL__UNB_DB_CLK_ENABLE_MASK 0x100
+#define DPREFCLK_CNTL__UNB_DB_CLK_ENABLE__SHIFT 0x8
+#define DCE_VERSION__MAJOR_VERSION_MASK 0xff
+#define DCE_VERSION__MAJOR_VERSION__SHIFT 0x0
+#define DCE_VERSION__MINOR_VERSION_MASK 0xff00
+#define DCE_VERSION__MINOR_VERSION__SHIFT 0x8
+#define AVSYNC_COUNTER_WRITE__AVSYNC_COUNTER_WRVALUE_MASK 0xffffffff
+#define AVSYNC_COUNTER_WRITE__AVSYNC_COUNTER_WRVALUE__SHIFT 0x0
+#define AVSYNC_COUNTER_CONTROL__AVSYNC_COUNTER_ENABLE_MASK 0x1
+#define AVSYNC_COUNTER_CONTROL__AVSYNC_COUNTER_ENABLE__SHIFT 0x0
+#define AVSYNC_COUNTER_READ__AVSYNC_COUNTER_RDVALUE_MASK 0xffffffff
+#define AVSYNC_COUNTER_READ__AVSYNC_COUNTER_RDVALUE__SHIFT 0x0
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE_MASK 0x1
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE__SHIFT 0x0
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR_MASK 0xffffffff
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR__SHIFT 0x0
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO_MASK 0xffffffff
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO__SHIFT 0x0
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT_MASK 0xffffffff
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT__SHIFT 0x0
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR_MASK 0xffffffff
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR__SHIFT 0x0
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO_MASK 0xffffffff
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO__SHIFT 0x0
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE_MASK 0x1
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE__SHIFT 0x0
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC_MASK 0x30
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC__SHIFT 0x4
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE_MASK 0x100
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE__SHIFT 0x8
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS_MASK 0x200
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS__SHIFT 0x9
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV_MASK 0x30000
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV__SHIFT 0x10
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS_MASK 0x1000000
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS__SHIFT 0x18
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL_MASK 0x2000000
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL__SHIFT 0x19
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL_MASK 0xffffffff
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL__SHIFT 0x0
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_ENABLE_MASK 0x1
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_ENABLE__SHIFT 0x0
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_TRIG_VALUE_MASK 0x1ff0
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_TRIG_VALUE__SHIFT 0x4
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_TRIG_OCCURRED_MASK 0x10000
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_TRIG_OCCURRED__SHIFT 0x10
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_TRIG_CLEAR_MASK 0x20000
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_TRIG_CLEAR__SHIFT 0x11
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_JITTER_COUNT_ENABLE_MASK 0x100000
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_JITTER_COUNT_ENABLE__SHIFT 0x14
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_JITTER_COUNT_SRC_SEL_MASK 0x200000
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_JITTER_COUNT_SRC_SEL__SHIFT 0x15
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_JITTER_COUNT_MASK 0xff000000
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_JITTER_COUNT__SHIFT 0x18
+#define DMCU_SMU_INTERRUPT_CNTL__DMCU_SMU_STATIC_SCREEN_INT_MASK 0x1
+#define DMCU_SMU_INTERRUPT_CNTL__DMCU_SMU_STATIC_SCREEN_INT__SHIFT 0x0
+#define DMCU_SMU_INTERRUPT_CNTL__DMCU_SMU_STATIC_SCREEN_STATUS_MASK 0xffff0000
+#define DMCU_SMU_INTERRUPT_CNTL__DMCU_SMU_STATIC_SCREEN_STATUS__SHIFT 0x10
+#define SMU_CONTROL__DISPLAY0_FORCE_VBI_MASK 0x1
+#define SMU_CONTROL__DISPLAY0_FORCE_VBI__SHIFT 0x0
+#define SMU_CONTROL__DISPLAY1_FORCE_VBI_MASK 0x2
+#define SMU_CONTROL__DISPLAY1_FORCE_VBI__SHIFT 0x1
+#define SMU_CONTROL__DISPLAY2_FORCE_VBI_MASK 0x4
+#define SMU_CONTROL__DISPLAY2_FORCE_VBI__SHIFT 0x2
+#define SMU_CONTROL__DISPLAY3_FORCE_VBI_MASK 0x8
+#define SMU_CONTROL__DISPLAY3_FORCE_VBI__SHIFT 0x3
+#define SMU_CONTROL__DISPLAY4_FORCE_VBI_MASK 0x10
+#define SMU_CONTROL__DISPLAY4_FORCE_VBI__SHIFT 0x4
+#define SMU_CONTROL__DISPLAY5_FORCE_VBI_MASK 0x20
+#define SMU_CONTROL__DISPLAY5_FORCE_VBI__SHIFT 0x5
+#define SMU_CONTROL__DISPLAY_V0_FORCE_VBI_MASK 0x40
+#define SMU_CONTROL__DISPLAY_V0_FORCE_VBI__SHIFT 0x6
+#define SMU_CONTROL__DISPLAY_V1_FORCE_VBI_MASK 0x80
+#define SMU_CONTROL__DISPLAY_V1_FORCE_VBI__SHIFT 0x7
+#define SMU_CONTROL__MCIF_WB_FORCE_VBI_MASK 0x100
+#define SMU_CONTROL__MCIF_WB_FORCE_VBI__SHIFT 0x8
+#define SMU_CONTROL__SMU_DC_INT_CLEAR_MASK 0x10000
+#define SMU_CONTROL__SMU_DC_INT_CLEAR__SHIFT 0x10
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_ENABLE_MASK 0x1
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_ENABLE__SHIFT 0x0
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_STATUS_MASK 0x10
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_STATUS__SHIFT 0x4
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_EVENT_MASK 0xffff0000
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_EVENT__SHIFT 0x10
+#define DAC_CLK_ENABLE__DACA_CLK_ENABLE_MASK 0x1
+#define DAC_CLK_ENABLE__DACA_CLK_ENABLE__SHIFT 0x0
+#define DAC_CLK_ENABLE__DACB_CLK_ENABLE_MASK 0x10
+#define DAC_CLK_ENABLE__DACB_CLK_ENABLE__SHIFT 0x4
+#define DVO_CLK_ENABLE__DVO_CLK_ENABLE_MASK 0x1
+#define DVO_CLK_ENABLE__DVO_CLK_ENABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE_MASK 0x1
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE_MASK 0x2
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL__SCLK_GATE_DISABLE_MASK 0x4
+#define DCCG_GATE_DISABLE_CNTL__SCLK_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE_MASK 0x8
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE_MASK 0x10
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL__DACBCLK_GATE_DISABLE_MASK 0x20
+#define DCCG_GATE_DISABLE_CNTL__DACBCLK_GATE_DISABLE__SHIFT 0x5
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE_MASK 0x40
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL__DPDBG_CLK_GATE_DISABLE_MASK 0x80
+#define DCCG_GATE_DISABLE_CNTL__DPDBG_CLK_GATE_DISABLE__SHIFT 0x7
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE_MASK 0x100
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE_MASK 0x20000
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE_MASK 0x40000
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE_MASK 0x80000
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL__AUDIO_DTO2_CLK_GATE_DISABLE_MASK 0x200000
+#define DCCG_GATE_DISABLE_CNTL__AUDIO_DTO2_CLK_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE_MASK 0x400000
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL__UNB_DB_CLK_GATE_DISABLE_MASK 0x800000
+#define DCCG_GATE_DISABLE_CNTL__UNB_DB_CLK_GATE_DISABLE__SHIFT 0x17
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE_MASK 0x4000000
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE_MASK 0x8000000
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE__SHIFT 0x1b
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE_MASK 0x10000000
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE__SHIFT 0x1c
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE_MASK 0x20000000
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE__SHIFT 0x1d
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE_MASK 0x40000000
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE__SHIFT 0x1e
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE_MASK 0x1
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE_MASK 0x2
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE_MASK 0x4
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE_MASK 0x8
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE_MASK 0x10
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE_MASK 0x20
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE__SHIFT 0x5
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE_MASK 0x40
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPA_FE_GATE_DISABLE_MASK 0x100
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPA_FE_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPB_FE_GATE_DISABLE_MASK 0x200
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPB_FE_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE_MASK 0x10000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE__SHIFT 0x10
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE_MASK 0x20000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE_MASK 0x40000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE_MASK 0x80000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE_MASK 0x100000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE__SHIFT 0x14
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE_MASK 0x200000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE_MASK 0x400000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPA_GATE_DISABLE_MASK 0x1000000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPA_GATE_DISABLE__SHIFT 0x18
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPB_GATE_DISABLE_MASK 0x2000000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPB_GATE_DISABLE__SHIFT 0x19
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY_MASK 0xf
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY_MASK 0xff0
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_ON_DELAY_MASK 0xf
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_OFF_DELAY_MASK 0xff0
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SCLK_CGTT_BLK_CTRL_REG__CGTT_SCLK_OVERRIDE_MASK 0x1000
+#define SCLK_CGTT_BLK_CTRL_REG__CGTT_SCLK_OVERRIDE__SHIFT 0xc
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY_MASK 0xf
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY_MASK 0xff0
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_ON_DELAY_MASK 0xf
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_OFF_DELAY_MASK 0xff0
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY_MASK 0xf
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY_MASK 0xff0
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA_MASK 0xffffffff
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA__SHIFT 0x0
+#define PIXCLK0_RESYNC_CNTL__PIXCLK0_RESYNC_ENABLE_MASK 0x1
+#define PIXCLK0_RESYNC_CNTL__PIXCLK0_RESYNC_ENABLE__SHIFT 0x0
+#define PIXCLK0_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL0_MASK 0x30
+#define PIXCLK0_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL0__SHIFT 0x4
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE_MASK 0x1
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL_MASK 0x30
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE_MASK 0x100
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x200
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE_MASK 0x1
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL_MASK 0x30
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE_MASK 0x100
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x200
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_RESYNC_ENABLE_MASK 0x1
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DCCG_DEEP_COLOR_CNTL_MASK 0x30
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_ENABLE_MASK 0x100
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x200
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_RESYNC_ENABLE_MASK 0x1
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DCCG_DEEP_COLOR_CNTL_MASK 0x30
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_ENABLE_MASK 0x100
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x200
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_RESYNC_ENABLE_MASK 0x1
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DCCG_DEEP_COLOR_CNTL_MASK 0x30
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_ENABLE_MASK 0x100
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x200
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x1
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x30
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x100
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x200
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV_MASK 0x7f
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV_MASK 0x7f00
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV__SHIFT 0x8
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL_MASK 0x10000
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL__SHIFT 0x10
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL_MASK 0x20000
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL__SHIFT 0x11
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x100000
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ_MASK 0x100
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ__SHIFT 0x8
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV_MASK 0x1ffff
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x100000
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY_MASK 0x3fff
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY__SHIFT 0x0
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE_MASK 0xf0000
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE__SHIFT 0x10
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE_MASK 0x100000
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE__SHIFT 0x14
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES_MASK 0xe000000
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES__SHIFT 0x19
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET_MASK 0x10000000
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET__SHIFT 0x1c
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE_MASK 0x20000000
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE__SHIFT 0x1d
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN_MASK 0x40000000
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN__SHIFT 0x1e
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE_MASK 0x80000000
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE__SHIFT 0x1f
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS_MASK 0x1
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS__SHIFT 0x0
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE_MASK 0x1
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE__SHIFT 0x0
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE_MASK 0x2
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE__SHIFT 0x1
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE_MASK 0x4
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE__SHIFT 0x2
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE_MASK 0x8
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE__SHIFT 0x3
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE_MASK 0x10
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE__SHIFT 0x4
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN_MASK 0x20
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN__SHIFT 0x5
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC_MASK 0x40
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC__SHIFT 0x6
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC_MASK 0x80
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC__SHIFT 0x7
+#define DCCG_PERFMON_CNTL__DCCG_PERF_CRTC_SEL_MASK 0x700
+#define DCCG_PERFMON_CNTL__DCCG_PERF_CRTC_SEL__SHIFT 0x8
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV_MASK 0xfffff800
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV__SHIFT 0xb
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DSICLK_ENABLE_MASK 0x1
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DSICLK_ENABLE__SHIFT 0x0
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_REFCLK_ENABLE_MASK 0x2
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_REFCLK_ENABLE__SHIFT 0x1
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK1_ENABLE_MASK 0x4
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK1_ENABLE__SHIFT 0x2
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK2_ENABLE_MASK 0x8
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK2_ENABLE__SHIFT 0x3
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYC_PIXCLK_ENABLE_MASK 0x10
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYC_PIXCLK_ENABLE__SHIFT 0x4
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYD_PIXCLK_ENABLE_MASK 0x20
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYD_PIXCLK_ENABLE__SHIFT 0x5
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYE_PIXCLK_ENABLE_MASK 0x40
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYE_PIXCLK_ENABLE__SHIFT 0x6
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYF_PIXCLK_ENABLE_MASK 0x80
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYF_PIXCLK_ENABLE__SHIFT 0x7
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYG_PIXCLK_ENABLE_MASK 0x100
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYG_PIXCLK_ENABLE__SHIFT 0x8
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_PIXEL_RATE_SOURCE_MASK 0x3
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE_MASK 0x10
+#define CRTC0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE__SHIFT 0x4
+#define CRTC0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE_MASK 0x20
+#define CRTC0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE__SHIFT 0x5
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_ADD_PIXEL_MASK 0x100
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_ADD_PIXEL__SHIFT 0x8
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DROP_PIXEL_MASK 0x200
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DROP_PIXEL__SHIFT 0x9
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_HALF_RATE_EN_MASK 0x800
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_FIFO_ERROR_MASK 0xc000
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_FIFO_ERROR__SHIFT 0xe
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_ERROR_COUNT_MASK 0xfff0000
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_ERROR_COUNT__SHIFT 0x10
+#define DP_DTO0_PHASE__DP_DTO0_PHASE_MASK 0xffffffff
+#define DP_DTO0_PHASE__DP_DTO0_PHASE__SHIFT 0x0
+#define DP_DTO0_MODULO__DP_DTO0_MODULO_MASK 0xffffffff
+#define DP_DTO0_MODULO__DP_DTO0_MODULO__SHIFT 0x0
+#define CRTC0_PHYPLL_PIXEL_RATE_CNTL__CRTC0_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x7
+#define CRTC0_PHYPLL_PIXEL_RATE_CNTL__CRTC0_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC0_PHYPLL_PIXEL_RATE_CNTL__CRTC0_PIXEL_RATE_PLL_SOURCE_MASK 0x10
+#define CRTC0_PHYPLL_PIXEL_RATE_CNTL__CRTC0_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_PIXEL_RATE_SOURCE_MASK 0x3
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE_MASK 0x10
+#define CRTC1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE__SHIFT 0x4
+#define CRTC1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE_MASK 0x20
+#define CRTC1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE__SHIFT 0x5
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_ADD_PIXEL_MASK 0x100
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_ADD_PIXEL__SHIFT 0x8
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DROP_PIXEL_MASK 0x200
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DROP_PIXEL__SHIFT 0x9
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_HALF_RATE_EN_MASK 0x800
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_FIFO_ERROR_MASK 0xc000
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_FIFO_ERROR__SHIFT 0xe
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_ERROR_COUNT_MASK 0xfff0000
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_ERROR_COUNT__SHIFT 0x10
+#define DP_DTO1_PHASE__DP_DTO1_PHASE_MASK 0xffffffff
+#define DP_DTO1_PHASE__DP_DTO1_PHASE__SHIFT 0x0
+#define DP_DTO1_MODULO__DP_DTO1_MODULO_MASK 0xffffffff
+#define DP_DTO1_MODULO__DP_DTO1_MODULO__SHIFT 0x0
+#define CRTC1_PHYPLL_PIXEL_RATE_CNTL__CRTC1_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x7
+#define CRTC1_PHYPLL_PIXEL_RATE_CNTL__CRTC1_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC1_PHYPLL_PIXEL_RATE_CNTL__CRTC1_PIXEL_RATE_PLL_SOURCE_MASK 0x10
+#define CRTC1_PHYPLL_PIXEL_RATE_CNTL__CRTC1_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_PIXEL_RATE_SOURCE_MASK 0x3
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE_MASK 0x10
+#define CRTC2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE__SHIFT 0x4
+#define CRTC2_PIXEL_RATE_CNTL__DP_DTO2_DS_DISABLE_MASK 0x20
+#define CRTC2_PIXEL_RATE_CNTL__DP_DTO2_DS_DISABLE__SHIFT 0x5
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_ADD_PIXEL_MASK 0x100
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_ADD_PIXEL__SHIFT 0x8
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DROP_PIXEL_MASK 0x200
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DROP_PIXEL__SHIFT 0x9
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_HALF_RATE_EN_MASK 0x800
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_FIFO_ERROR_MASK 0xc000
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_FIFO_ERROR__SHIFT 0xe
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_ERROR_COUNT_MASK 0xfff0000
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_ERROR_COUNT__SHIFT 0x10
+#define DP_DTO2_PHASE__DP_DTO2_PHASE_MASK 0xffffffff
+#define DP_DTO2_PHASE__DP_DTO2_PHASE__SHIFT 0x0
+#define DP_DTO2_MODULO__DP_DTO2_MODULO_MASK 0xffffffff
+#define DP_DTO2_MODULO__DP_DTO2_MODULO__SHIFT 0x0
+#define CRTC2_PHYPLL_PIXEL_RATE_CNTL__CRTC2_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x7
+#define CRTC2_PHYPLL_PIXEL_RATE_CNTL__CRTC2_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC2_PHYPLL_PIXEL_RATE_CNTL__CRTC2_PIXEL_RATE_PLL_SOURCE_MASK 0x10
+#define CRTC2_PHYPLL_PIXEL_RATE_CNTL__CRTC2_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_PIXEL_RATE_SOURCE_MASK 0x3
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE_MASK 0x10
+#define CRTC3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE__SHIFT 0x4
+#define CRTC3_PIXEL_RATE_CNTL__DP_DTO3_DS_DISABLE_MASK 0x20
+#define CRTC3_PIXEL_RATE_CNTL__DP_DTO3_DS_DISABLE__SHIFT 0x5
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_ADD_PIXEL_MASK 0x100
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_ADD_PIXEL__SHIFT 0x8
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DROP_PIXEL_MASK 0x200
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DROP_PIXEL__SHIFT 0x9
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_HALF_RATE_EN_MASK 0x800
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_FIFO_ERROR_MASK 0xc000
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_FIFO_ERROR__SHIFT 0xe
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_ERROR_COUNT_MASK 0xfff0000
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_ERROR_COUNT__SHIFT 0x10
+#define DP_DTO3_PHASE__DP_DTO3_PHASE_MASK 0xffffffff
+#define DP_DTO3_PHASE__DP_DTO3_PHASE__SHIFT 0x0
+#define DP_DTO3_MODULO__DP_DTO3_MODULO_MASK 0xffffffff
+#define DP_DTO3_MODULO__DP_DTO3_MODULO__SHIFT 0x0
+#define CRTC3_PHYPLL_PIXEL_RATE_CNTL__CRTC3_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x7
+#define CRTC3_PHYPLL_PIXEL_RATE_CNTL__CRTC3_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC3_PHYPLL_PIXEL_RATE_CNTL__CRTC3_PIXEL_RATE_PLL_SOURCE_MASK 0x10
+#define CRTC3_PHYPLL_PIXEL_RATE_CNTL__CRTC3_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_PIXEL_RATE_SOURCE_MASK 0x3
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC4_PIXEL_RATE_CNTL__DP_DTO4_ENABLE_MASK 0x10
+#define CRTC4_PIXEL_RATE_CNTL__DP_DTO4_ENABLE__SHIFT 0x4
+#define CRTC4_PIXEL_RATE_CNTL__DP_DTO4_DS_DISABLE_MASK 0x20
+#define CRTC4_PIXEL_RATE_CNTL__DP_DTO4_DS_DISABLE__SHIFT 0x5
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_ADD_PIXEL_MASK 0x100
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_ADD_PIXEL__SHIFT 0x8
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DROP_PIXEL_MASK 0x200
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DROP_PIXEL__SHIFT 0x9
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_HALF_RATE_EN_MASK 0x800
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_FIFO_ERROR_MASK 0xc000
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_FIFO_ERROR__SHIFT 0xe
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_ERROR_COUNT_MASK 0xfff0000
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_ERROR_COUNT__SHIFT 0x10
+#define DP_DTO4_PHASE__DP_DTO4_PHASE_MASK 0xffffffff
+#define DP_DTO4_PHASE__DP_DTO4_PHASE__SHIFT 0x0
+#define DP_DTO4_MODULO__DP_DTO4_MODULO_MASK 0xffffffff
+#define DP_DTO4_MODULO__DP_DTO4_MODULO__SHIFT 0x0
+#define CRTC4_PHYPLL_PIXEL_RATE_CNTL__CRTC4_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x7
+#define CRTC4_PHYPLL_PIXEL_RATE_CNTL__CRTC4_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC4_PHYPLL_PIXEL_RATE_CNTL__CRTC4_PIXEL_RATE_PLL_SOURCE_MASK 0x10
+#define CRTC4_PHYPLL_PIXEL_RATE_CNTL__CRTC4_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_PIXEL_RATE_SOURCE_MASK 0x3
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC5_PIXEL_RATE_CNTL__DP_DTO5_ENABLE_MASK 0x10
+#define CRTC5_PIXEL_RATE_CNTL__DP_DTO5_ENABLE__SHIFT 0x4
+#define CRTC5_PIXEL_RATE_CNTL__DP_DTO5_DS_DISABLE_MASK 0x20
+#define CRTC5_PIXEL_RATE_CNTL__DP_DTO5_DS_DISABLE__SHIFT 0x5
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_ADD_PIXEL_MASK 0x100
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_ADD_PIXEL__SHIFT 0x8
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DROP_PIXEL_MASK 0x200
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DROP_PIXEL__SHIFT 0x9
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_HALF_RATE_EN_MASK 0x800
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_FIFO_ERROR_MASK 0xc000
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_FIFO_ERROR__SHIFT 0xe
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_ERROR_COUNT_MASK 0xfff0000
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_ERROR_COUNT__SHIFT 0x10
+#define DP_DTO5_PHASE__DP_DTO5_PHASE_MASK 0xffffffff
+#define DP_DTO5_PHASE__DP_DTO5_PHASE__SHIFT 0x0
+#define DP_DTO5_MODULO__DP_DTO5_MODULO_MASK 0xffffffff
+#define DP_DTO5_MODULO__DP_DTO5_MODULO__SHIFT 0x0
+#define CRTC5_PHYPLL_PIXEL_RATE_CNTL__CRTC5_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x7
+#define CRTC5_PHYPLL_PIXEL_RATE_CNTL__CRTC5_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC5_PHYPLL_PIXEL_RATE_CNTL__CRTC5_PIXEL_RATE_PLL_SOURCE_MASK 0x10
+#define CRTC5_PHYPLL_PIXEL_RATE_CNTL__CRTC5_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET_MASK 0x1
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET__SHIFT 0x0
+#define DCCG_SOFT_RESET__PCIE_REFCLK_SOFT_RESET_MASK 0x2
+#define DCCG_SOFT_RESET__PCIE_REFCLK_SOFT_RESET__SHIFT 0x1
+#define DCCG_SOFT_RESET__SOFT_RESET_DVO_MASK 0x4
+#define DCCG_SOFT_RESET__SOFT_RESET_DVO__SHIFT 0x2
+#define DCCG_SOFT_RESET__DVO_ENABLE_RST_MASK 0x8
+#define DCCG_SOFT_RESET__DVO_ENABLE_RST__SHIFT 0x3
+#define DCCG_SOFT_RESET__AUDIO_DTO2_CLK_SOFT_RESET_MASK 0x10
+#define DCCG_SOFT_RESET__AUDIO_DTO2_CLK_SOFT_RESET__SHIFT 0x4
+#define DCCG_SOFT_RESET__DPREFCLK_SOFT_RESET_MASK 0x100
+#define DCCG_SOFT_RESET__DPREFCLK_SOFT_RESET__SHIFT 0x8
+#define DCCG_SOFT_RESET__AMCLK0_SOFT_RESET_MASK 0x1000
+#define DCCG_SOFT_RESET__AMCLK0_SOFT_RESET__SHIFT 0xc
+#define DCCG_SOFT_RESET__AMCLK1_SOFT_RESET_MASK 0x2000
+#define DCCG_SOFT_RESET__AMCLK1_SOFT_RESET__SHIFT 0xd
+#define DCCG_SOFT_RESET__P0PLL_CFG_IF_SOFT_RESET_MASK 0x4000
+#define DCCG_SOFT_RESET__P0PLL_CFG_IF_SOFT_RESET__SHIFT 0xe
+#define DCCG_SOFT_RESET__P1PLL_CFG_IF_SOFT_RESET_MASK 0x8000
+#define DCCG_SOFT_RESET__P1PLL_CFG_IF_SOFT_RESET__SHIFT 0xf
+#define DCCG_SOFT_RESET__P2PLL_CFG_IF_SOFT_RESET_MASK 0x10000
+#define DCCG_SOFT_RESET__P2PLL_CFG_IF_SOFT_RESET__SHIFT 0x10
+#define DCCG_SOFT_RESET__A0PLL_CFG_IF_SOFT_RESET_MASK 0x20000
+#define DCCG_SOFT_RESET__A0PLL_CFG_IF_SOFT_RESET__SHIFT 0x11
+#define DCCG_SOFT_RESET__A1PLL_CFG_IF_SOFT_RESET_MASK 0x40000
+#define DCCG_SOFT_RESET__A1PLL_CFG_IF_SOFT_RESET__SHIFT 0x12
+#define DCCG_SOFT_RESET__C0PLL_CFG_IF_SOFT_RESET_MASK 0x80000
+#define DCCG_SOFT_RESET__C0PLL_CFG_IF_SOFT_RESET__SHIFT 0x13
+#define DCCG_SOFT_RESET__C1PLL_CFG_IF_SOFT_RESET_MASK 0x100000
+#define DCCG_SOFT_RESET__C1PLL_CFG_IF_SOFT_RESET__SHIFT 0x14
+#define DCCG_SOFT_RESET__C2PLL_CFG_IF_SOFT_RESET_MASK 0x200000
+#define DCCG_SOFT_RESET__C2PLL_CFG_IF_SOFT_RESET__SHIFT 0x15
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE_MASK 0x1
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN_MASK 0x10
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC_MASK 0x700
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE_MASK 0x1
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN_MASK 0x10
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC_MASK 0x700
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE_MASK 0x1
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_EN_MASK 0x10
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_SRC_MASK 0x700
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE_MASK 0x1
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_EN_MASK 0x10
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_SRC_MASK 0x700
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE_MASK 0x1
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_EN_MASK 0x10
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_SRC_MASK 0x700
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_CLOCK_ENABLE_MASK 0x1
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_EN_MASK 0x10
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_SRC_MASK 0x700
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_SRC__SHIFT 0x8
+#define DPDBG_CLK_FORCE_CONTROL__DPDBG_CLK_FORCE_EN_MASK 0x10
+#define DPDBG_CLK_FORCE_CONTROL__DPDBG_CLK_FORCE_EN__SHIFT 0x4
+#define DPDBG_CLK_FORCE_CONTROL__DPDBG_CLK_FORCE_SRC_MASK 0x700
+#define DPDBG_CLK_FORCE_CONTROL__DPDBG_CLK_FORCE_SRC__SHIFT 0x8
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL_MASK 0x7
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT 0x0
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL_MASK 0x30
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL__SHIFT 0x4
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_SOURCE_SEL_MASK 0x3000
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_SOURCE_SEL__SHIFT 0xc
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_CLOCK_EN_MASK 0x10000
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_CLOCK_EN__SHIFT 0x10
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO_MASK 0x100000
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO__SHIFT 0x14
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO_MASK 0x1000000
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO__SHIFT 0x18
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO_MASK 0x10000000
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO__SHIFT 0x1c
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE_MASK 0xffffffff
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE_MASK 0xffffffff
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE_MASK 0xffffffff
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE_MASK 0xffffffff
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE__SHIFT 0x0
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_INDEX_MASK 0xff
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DCCG_TEST_DEBUG_DATA__DCCG_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DCCG_TEST_DEBUG_DATA__DCCG_TEST_DEBUG_DATA__SHIFT 0x0
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_SEL_MASK 0x1ff
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_SEL__SHIFT 0x0
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_INV_MASK 0x1000
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_INV__SHIFT 0xc
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_SEL_MASK 0x1ff0000
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_SEL__SHIFT 0x10
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_INV_MASK 0x10000000
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_INV__SHIFT 0x1c
+#define CPLL_MACRO_CNTL_RESERVED0__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED0__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED1__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED1__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED2__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED2__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED3__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED3__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED4__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED4__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED5__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED5__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED6__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED6__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED7__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED7__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED8__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED8__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED9__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED9__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED10__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED10__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED11__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED11__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED0__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED0__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED1__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED1__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED2__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED2__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED3__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED3__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED4__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED4__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED5__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED5__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED6__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED6__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED7__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED7__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED8__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED8__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED9__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED9__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED10__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED10__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED11__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED11__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED12__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED12__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED13__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED13__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED14__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED14__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED15__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED15__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED16__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED16__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED17__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED17__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED18__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED18__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED19__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED19__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED20__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED20__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED21__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED21__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED22__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED22__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED23__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED23__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED24__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED24__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED25__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED25__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED26__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED26__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED27__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED27__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED28__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED28__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED29__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED29__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED30__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED30__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED31__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED31__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED32__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED32__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED33__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED33__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED34__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED34__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED35__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED35__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED36__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED36__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED37__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED37__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED38__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED38__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED39__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED39__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED40__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED40__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED41__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED41__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x7f
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x7f00
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE_MASK 0x18000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE__SHIFT 0xf
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG_MASK 0x20000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG__SHIFT 0x11
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG_MASK 0x40000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG__SHIFT 0x12
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x80000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHG_DONE_MASK 0x100000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHG_DONE__SHIFT 0x14
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHGTOG_MASK 0x200000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHGTOG__SHIFT 0x15
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_DONETOG_MASK 0x400000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_DONETOG__SHIFT 0x16
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_WDIVIDER_MASK 0x7f000000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_WDIVIDER__SHIFT 0x18
+#define DCDEBUG_BUS_CLK1_SEL__DCDEBUG_BUS_CLK1_SEL_MASK 0xffffffff
+#define DCDEBUG_BUS_CLK1_SEL__DCDEBUG_BUS_CLK1_SEL__SHIFT 0x0
+#define DCDEBUG_BUS_CLK2_SEL__DCDEBUG_BUS_CLK2_SEL_MASK 0xffffffff
+#define DCDEBUG_BUS_CLK2_SEL__DCDEBUG_BUS_CLK2_SEL__SHIFT 0x0
+#define DCDEBUG_BUS_CLK3_SEL__DCDEBUG_BUS_CLK3_SEL_MASK 0xffffffff
+#define DCDEBUG_BUS_CLK3_SEL__DCDEBUG_BUS_CLK3_SEL__SHIFT 0x0
+#define DCDEBUG_BUS_CLK4_SEL__DCDEBUG_BUS_CLK4_SEL_MASK 0xffffffff
+#define DCDEBUG_BUS_CLK4_SEL__DCDEBUG_BUS_CLK4_SEL__SHIFT 0x0
+#define DCDEBUG_BUS_CLK5_SEL__DCDEBUG_BUS_CLK5_SEL_MASK 0xffffffff
+#define DCDEBUG_BUS_CLK5_SEL__DCDEBUG_BUS_CLK5_SEL__SHIFT 0x0
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_PIN_SEL_MASK 0x1f
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_PIN_SEL__SHIFT 0x0
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_REGBIT_SEL_MASK 0x3e0
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_REGBIT_SEL__SHIFT 0x5
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_EN_MASK 0x1000
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_EN__SHIFT 0xc
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_PIN_SEL_MASK 0xf8000
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_PIN_SEL__SHIFT 0xf
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_REGBIT_SEL_MASK 0x1f00000
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_REGBIT_SEL__SHIFT 0x14
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_EN_MASK 0x10000000
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_EN__SHIFT 0x1c
+#define DCDEBUG_OUT_CNTL__DCDEBUG_BLOCK_SEL_MASK 0x1f
+#define DCDEBUG_OUT_CNTL__DCDEBUG_BLOCK_SEL__SHIFT 0x0
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_24BIT_SEL_MASK 0x800000
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_24BIT_SEL__SHIFT 0x17
+#define DCDEBUG_OUT_CNTL__DCDEBUG_CLK_SEL_MASK 0x1f000000
+#define DCDEBUG_OUT_CNTL__DCDEBUG_CLK_SEL__SHIFT 0x18
+#define DCDEBUG_OUT_DATA__DCDEBUG_OUT_DATA_MASK 0xffffffff
+#define DCDEBUG_OUT_DATA__DCDEBUG_OUT_DATA__SHIFT 0x0
+#define DMIF_CONTROL__DMIF_BUFF_SIZE_MASK 0x3
+#define DMIF_CONTROL__DMIF_BUFF_SIZE__SHIFT 0x0
+#define DMIF_CONTROL__DMIF_GROUP_REQUESTS_IN_CHUNK_MASK 0x4
+#define DMIF_CONTROL__DMIF_GROUP_REQUESTS_IN_CHUNK__SHIFT 0x2
+#define DMIF_CONTROL__DMIF_DISABLE_EARLY_RECEIVED_LEVEL_COUNT_MASK 0x10
+#define DMIF_CONTROL__DMIF_DISABLE_EARLY_RECEIVED_LEVEL_COUNT__SHIFT 0x4
+#define DMIF_CONTROL__DMIF_REQ_BURST_SIZE_MASK 0x700
+#define DMIF_CONTROL__DMIF_REQ_BURST_SIZE__SHIFT 0x8
+#define DMIF_CONTROL__DMIF_UNDERFLOW_RECOVERY_EN_MASK 0x800
+#define DMIF_CONTROL__DMIF_UNDERFLOW_RECOVERY_EN__SHIFT 0xb
+#define DMIF_CONTROL__DMIF_FORCE_TOTAL_REQ_BURST_SIZE_MASK 0x1f000
+#define DMIF_CONTROL__DMIF_FORCE_TOTAL_REQ_BURST_SIZE__SHIFT 0xc
+#define DMIF_CONTROL__DMIF_MAX_TOTAL_OUTSTANDING_CHUNK_REQUESTS_MASK 0x7e0000
+#define DMIF_CONTROL__DMIF_MAX_TOTAL_OUTSTANDING_CHUNK_REQUESTS__SHIFT 0x11
+#define DMIF_CONTROL__DMIF_DELAY_ARBITRATION_MASK 0x1f000000
+#define DMIF_CONTROL__DMIF_DELAY_ARBITRATION__SHIFT 0x18
+#define DMIF_CONTROL__DMIF_CHUNK_BUFF_MARGIN_MASK 0x60000000
+#define DMIF_CONTROL__DMIF_CHUNK_BUFF_MARGIN__SHIFT 0x1d
+#define DMIF_CONTROL__DMIF_PSTATE_URGENT_DISABLE_MASK 0x80000000
+#define DMIF_CONTROL__DMIF_PSTATE_URGENT_DISABLE__SHIFT 0x1f
+#define DMIF_STATUS__DMIF_MC_SEND_ON_IDLE_MASK 0x3f
+#define DMIF_STATUS__DMIF_MC_SEND_ON_IDLE__SHIFT 0x0
+#define DMIF_STATUS__DMIF_CLEAR_MC_SEND_ON_IDLE_MASK 0x3f00
+#define DMIF_STATUS__DMIF_CLEAR_MC_SEND_ON_IDLE__SHIFT 0x8
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_ENABLE_MASK 0x10000
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_ENABLE__SHIFT 0x10
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_URGENT_ONLY_MASK 0x20000
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_URGENT_ONLY__SHIFT 0x11
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_SOURCE_SELECT_MASK 0xf00000
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_SOURCE_SELECT__SHIFT 0x14
+#define DMIF_STATUS__DMIF_PERFORMANCE_COUNTER_SOURCE_SELECT_MASK 0xf000000
+#define DMIF_STATUS__DMIF_PERFORMANCE_COUNTER_SOURCE_SELECT__SHIFT 0x18
+#define DMIF_STATUS__DMIF_UNDERFLOW_MASK 0x10000000
+#define DMIF_STATUS__DMIF_UNDERFLOW__SHIFT 0x1c
+#define DMIF_STATUS__DMIF_MC_LATENCY_TAP_POINT_MASK 0x60000000
+#define DMIF_STATUS__DMIF_MC_LATENCY_TAP_POINT__SHIFT 0x1d
+#define DMIF_STATUS__DMIF_MC_LATENCY_REQ_TYPE_MASK 0x80000000
+#define DMIF_STATUS__DMIF_MC_LATENCY_REQ_TYPE__SHIFT 0x1f
+#define DMIFV_STATUS__DMIFV_MC_SEND_ON_IDLE_MASK 0xf
+#define DMIFV_STATUS__DMIFV_MC_SEND_ON_IDLE__SHIFT 0x0
+#define DMIFV_STATUS__DMIFV_CLEAR_MC_SEND_ON_IDLE_MASK 0xf00
+#define DMIFV_STATUS__DMIFV_CLEAR_MC_SEND_ON_IDLE__SHIFT 0x8
+#define DMIF_HW_DEBUG__DMIF_HW_DEBUG_MASK 0xffffffff
+#define DMIF_HW_DEBUG__DMIF_HW_DEBUG__SHIFT 0x0
+#define DMIF_ARBITRATION_CONTROL__DMIF_ARBITRATION_REFERENCE_CLOCK_PERIOD_MASK 0xffff
+#define DMIF_ARBITRATION_CONTROL__DMIF_ARBITRATION_REFERENCE_CLOCK_PERIOD__SHIFT 0x0
+#define DMIF_ARBITRATION_CONTROL__PIPE_SWITCH_EFFICIENCY_WEIGHT_MASK 0xffff0000
+#define DMIF_ARBITRATION_CONTROL__PIPE_SWITCH_EFFICIENCY_WEIGHT__SHIFT 0x10
+#define PIPE0_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE0_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define PIPE1_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE1_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define PIPE2_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE2_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define PIPE3_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE3_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define PIPE4_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE4_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define PIPE5_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE5_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define PIPE6_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE6_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define PIPE7_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE7_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define DMIF_P_VMID__P_VMID_PIPE0_MASK 0xf
+#define DMIF_P_VMID__P_VMID_PIPE0__SHIFT 0x0
+#define DMIF_P_VMID__P_VMID_PIPE1_MASK 0xf0
+#define DMIF_P_VMID__P_VMID_PIPE1__SHIFT 0x4
+#define DMIF_P_VMID__P_VMID_PIPE2_MASK 0xf00
+#define DMIF_P_VMID__P_VMID_PIPE2__SHIFT 0x8
+#define DMIF_P_VMID__P_VMID_PIPE3_MASK 0xf000
+#define DMIF_P_VMID__P_VMID_PIPE3__SHIFT 0xc
+#define DMIF_P_VMID__P_VMID_PIPE4_MASK 0xf0000
+#define DMIF_P_VMID__P_VMID_PIPE4__SHIFT 0x10
+#define DMIF_P_VMID__P_VMID_PIPE5_MASK 0xf00000
+#define DMIF_P_VMID__P_VMID_PIPE5__SHIFT 0x14
+#define DMIF_P_VMID__P_VMID_PIPE6_MASK 0xf000000
+#define DMIF_P_VMID__P_VMID_PIPE6__SHIFT 0x18
+#define DMIF_P_VMID__P_VMID_PIPE7_MASK 0xf0000000
+#define DMIF_P_VMID__P_VMID_PIPE7__SHIFT 0x1c
+#define DMIF_URG_OVERRIDE__DMIF_URG_OVERRIDE_EN_MASK 0x1
+#define DMIF_URG_OVERRIDE__DMIF_URG_OVERRIDE_EN__SHIFT 0x0
+#define DMIF_URG_OVERRIDE__DMIF_URG_OVERRIDE_LEVEL_MASK 0xf0
+#define DMIF_URG_OVERRIDE__DMIF_URG_OVERRIDE_LEVEL__SHIFT 0x4
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_INDEX_MASK 0xff
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DMIF_TEST_DEBUG_DATA__DMIF_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DMIF_TEST_DEBUG_DATA__DMIF_TEST_DEBUG_DATA__SHIFT 0x0
+#define DMIF_DEBUG02_CORE0__DB_DATA_MASK 0xffff
+#define DMIF_DEBUG02_CORE0__DB_DATA__SHIFT 0x0
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNT_EN_MASK 0x10000
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNT_EN__SHIFT 0x10
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNTER_MASK 0xffe0000
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNTER__SHIFT 0x11
+#define DMIF_DEBUG02_CORE1__DB_DATA_MASK 0xffff
+#define DMIF_DEBUG02_CORE1__DB_DATA__SHIFT 0x0
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNT_EN_MASK 0x10000
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNT_EN__SHIFT 0x10
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNTER_MASK 0xffe0000
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNTER__SHIFT 0x11
+#define DMIF_ADDR_CALC__ADDR_CONFIG_PIPE_INTERLEAVE_SIZE_MASK 0x70
+#define DMIF_ADDR_CALC__ADDR_CONFIG_PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define DMIF_ADDR_CALC__ADDR_CONFIG_ROW_SIZE_MASK 0x30000000
+#define DMIF_ADDR_CALC__ADDR_CONFIG_ROW_SIZE__SHIFT 0x1c
+#define DMIF_STATUS2__DMIF_PIPE0_DISPCLK_STATUS_MASK 0x1
+#define DMIF_STATUS2__DMIF_PIPE0_DISPCLK_STATUS__SHIFT 0x0
+#define DMIF_STATUS2__DMIF_PIPE1_DISPCLK_STATUS_MASK 0x2
+#define DMIF_STATUS2__DMIF_PIPE1_DISPCLK_STATUS__SHIFT 0x1
+#define DMIF_STATUS2__DMIF_PIPE2_DISPCLK_STATUS_MASK 0x4
+#define DMIF_STATUS2__DMIF_PIPE2_DISPCLK_STATUS__SHIFT 0x2
+#define DMIF_STATUS2__DMIF_PIPE3_DISPCLK_STATUS_MASK 0x8
+#define DMIF_STATUS2__DMIF_PIPE3_DISPCLK_STATUS__SHIFT 0x3
+#define DMIF_STATUS2__DMIF_PIPE4_DISPCLK_STATUS_MASK 0x10
+#define DMIF_STATUS2__DMIF_PIPE4_DISPCLK_STATUS__SHIFT 0x4
+#define DMIF_STATUS2__DMIF_PIPE5_DISPCLK_STATUS_MASK 0x20
+#define DMIF_STATUS2__DMIF_PIPE5_DISPCLK_STATUS__SHIFT 0x5
+#define DMIF_STATUS2__DMIF_CHUNK_TRACKER_SCLK_STATUS_MASK 0x100
+#define DMIF_STATUS2__DMIF_CHUNK_TRACKER_SCLK_STATUS__SHIFT 0x8
+#define DMIF_STATUS2__DMIF_FBC_TRACKER_SCLK_STATUS_MASK 0x200
+#define DMIF_STATUS2__DMIF_FBC_TRACKER_SCLK_STATUS__SHIFT 0x9
+#define PIPE0_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE0_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define PIPE1_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE1_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define PIPE2_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE2_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define PIPE3_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE3_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define PIPE4_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE4_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define PIPE5_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE5_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define PIPE6_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE6_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define PIPE7_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE7_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define DVMM_REG_RD_STATUS__DVMM_REG_RD_STATUS_MASK 0x1
+#define DVMM_REG_RD_STATUS__DVMM_REG_RD_STATUS__SHIFT 0x0
+#define DVMM_REG_RD_DATA__DVMM_REG_RD_DATA_MASK 0xffffffff
+#define DVMM_REG_RD_DATA__DVMM_REG_RD_DATA__SHIFT 0x0
+#define DVMM_PTE_REQ__MAX_PTEREQ_TO_ISSUE_MASK 0xff
+#define DVMM_PTE_REQ__MAX_PTEREQ_TO_ISSUE__SHIFT 0x0
+#define DVMM_PTE_REQ__HFLIP_PTEREQ_PER_CHUNK_INT_MASK 0xff00
+#define DVMM_PTE_REQ__HFLIP_PTEREQ_PER_CHUNK_INT__SHIFT 0x8
+#define DVMM_PTE_REQ__HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER_MASK 0x3f0000
+#define DVMM_PTE_REQ__HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER__SHIFT 0x10
+#define DVMM_CNTL__PDE_CACHE_INVALIDATE_CNTL_MASK 0x3
+#define DVMM_CNTL__PDE_CACHE_INVALIDATE_CNTL__SHIFT 0x0
+#define DVMM_CNTL__DEBUG_SYSTEM_ACCESS_MODE_MASK 0x30
+#define DVMM_CNTL__DEBUG_SYSTEM_ACCESS_MODE__SHIFT 0x4
+#define DVMM_CNTL__FORCE_SYSTEM_ACCESS_MODE_MASK 0x80
+#define DVMM_CNTL__FORCE_SYSTEM_ACCESS_MODE__SHIFT 0x7
+#define DVMM_CNTL__DBG_DCE_VMID_MASK 0xf00
+#define DVMM_CNTL__DBG_DCE_VMID__SHIFT 0x8
+#define DVMM_CNTL__FORCE_DBG_DCE_VMID_MASK 0x8000
+#define DVMM_CNTL__FORCE_DBG_DCE_VMID__SHIFT 0xf
+#define DVMM_CNTL__OVERRIDE_SNOOP_MASK 0x20000
+#define DVMM_CNTL__OVERRIDE_SNOOP__SHIFT 0x11
+#define DVMM_CNTL__ENABLE_PDE_INVALIDATE_MASK 0x40000
+#define DVMM_CNTL__ENABLE_PDE_INVALIDATE__SHIFT 0x12
+#define DVMM_FAULT_STATUS__DVMM_FAULT_STATUS_MASK 0xffffffff
+#define DVMM_FAULT_STATUS__DVMM_FAULT_STATUS__SHIFT 0x0
+#define DVMM_FAULT_ADDR__DVMM_FAULT_ADDR_MASK 0xffffffff
+#define DVMM_FAULT_ADDR__DVMM_FAULT_ADDR__SHIFT 0x0
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ENABLE_MASK 0x1
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ENABLE__SHIFT 0x0
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_MODE_MASK 0x18
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_MODE__SHIFT 0x3
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_PIPES_MASK 0xe0
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_PIPES__SHIFT 0x5
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_BANKS_MASK 0x700
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_BANKS__SHIFT 0x8
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE_MASK 0x800
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE__SHIFT 0xb
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROW_SIZE_MASK 0x7000
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROW_SIZE__SHIFT 0xc
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROWS_PER_CHAN_MASK 0xfff0000
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROWS_PER_CHAN__SHIFT 0x10
+#define MCIF_CONTROL__MCIF_BUFF_SIZE_MASK 0x3
+#define MCIF_CONTROL__MCIF_BUFF_SIZE__SHIFT 0x0
+#define MCIF_CONTROL__ADDRESS_TRANSLATION_ENABLE_MASK 0x10
+#define MCIF_CONTROL__ADDRESS_TRANSLATION_ENABLE__SHIFT 0x4
+#define MCIF_CONTROL__PRIVILEGED_ACCESS_ENABLE_MASK 0x100
+#define MCIF_CONTROL__PRIVILEGED_ACCESS_ENABLE__SHIFT 0x8
+#define MCIF_CONTROL__MCIF_SLOW_REQ_INTERVAL_MASK 0xf000
+#define MCIF_CONTROL__MCIF_SLOW_REQ_INTERVAL__SHIFT 0xc
+#define MCIF_CONTROL__LOW_READ_URG_LEVEL_MASK 0xff0000
+#define MCIF_CONTROL__LOW_READ_URG_LEVEL__SHIFT 0x10
+#define MCIF_CONTROL__MC_CLEAN_DEASSERT_LATENCY_MASK 0x3f000000
+#define MCIF_CONTROL__MC_CLEAN_DEASSERT_LATENCY__SHIFT 0x18
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_ENABLE_MASK 0x40000000
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_ENABLE__SHIFT 0x1e
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_URGENT_ONLY_MASK 0x80000000
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_URGENT_ONLY__SHIFT 0x1f
+#define MCIF_WRITE_COMBINE_CONTROL__MCIF_WRITE_COMBINE_TIMEOUT_MASK 0xff
+#define MCIF_WRITE_COMBINE_CONTROL__MCIF_WRITE_COMBINE_TIMEOUT__SHIFT 0x0
+#define MCIF_WRITE_COMBINE_CONTROL__VIP_WRITE_COMBINE_TIMEOUT_MASK 0xff00
+#define MCIF_WRITE_COMBINE_CONTROL__VIP_WRITE_COMBINE_TIMEOUT__SHIFT 0x8
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_INDEX_MASK 0xff
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_INDEX__SHIFT 0x0
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define MCIF_TEST_DEBUG_DATA__MCIF_TEST_DEBUG_DATA_MASK 0xffffffff
+#define MCIF_TEST_DEBUG_DATA__MCIF_TEST_DEBUG_DATA__SHIFT 0x0
+#define IDDCCIF02_DBG_DCCIF_C__DBG_DCCIF_C_MASK 0xffffffff
+#define IDDCCIF02_DBG_DCCIF_C__DBG_DCCIF_C__SHIFT 0x0
+#define IDDCCIF04_DBG_DCCIF_E__DBG_DCCIF_E_MASK 0xffffffff
+#define IDDCCIF04_DBG_DCCIF_E__DBG_DCCIF_E__SHIFT 0x0
+#define IDDCCIF05_DBG_DCCIF_F__DBG_DCCIF_F_MASK 0xffffffff
+#define IDDCCIF05_DBG_DCCIF_F__DBG_DCCIF_F__SHIFT 0x0
+#define MCIF_VMID__MCIF_WR_VMID_MASK 0xf
+#define MCIF_VMID__MCIF_WR_VMID__SHIFT 0x0
+#define MCIF_VMID__VIP_WR_VMID_MASK 0xf0
+#define MCIF_VMID__VIP_WR_VMID__SHIFT 0x4
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE_DIS_MASK 0x1
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE_DIS__SHIFT 0x0
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE_MASK 0x30
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE__SHIFT 0x4
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_SIZE_MASK 0xff00
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_SIZE__SHIFT 0x8
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_PIPE_MASK 0x70000
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_PIPE__SHIFT 0x10
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_TYPE_MASK 0x180000
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_TYPE__SHIFT 0x13
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS_MASK 0x7e
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS__SHIFT 0x1
+#define CC_DC_PIPE_DIS__DC_UNDERLAY_PIPE_DIS_MASK 0x3f0000
+#define CC_DC_PIPE_DIS__DC_UNDERLAY_PIPE_DIS__SHIFT 0x10
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_OCCURRED_MASK 0x1
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_OCCURRED__SHIFT 0x0
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_CLEAR_MASK 0x10
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_CLEAR__SHIFT 0x4
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_OCCURRED_MASK 0x100
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_OCCURRED__SHIFT 0x8
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_CLEAR_MASK 0x1000
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_CLEAR__SHIFT 0xc
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_OCCURRED_MASK 0x10000
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_OCCURRED__SHIFT 0x10
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_CLEAR_MASK 0x100000
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_CLEAR__SHIFT 0x14
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_OCCURRED_MASK 0x1000000
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_OCCURRED__SHIFT 0x18
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_CLEAR_MASK 0x10000000
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_CLEAR__SHIFT 0x1c
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_DELAY_MASK 0xfffff
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_DELAY__SHIFT 0x0
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_TO_REQ_HOLD_MASK 0xfff00000
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_TO_REQ_HOLD__SHIFT 0x14
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_CLIENTS_DEC_MASK 0xffff
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_CLIENTS_DEC__SHIFT 0x0
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_OP_MASK 0x10000000
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_OP__SHIFT 0x1c
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_RDWR_STATUS_MASK 0x20000000
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_RDWR_STATUS__SHIFT 0x1d
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_ACK_MASK 0x40000000
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_ACK__SHIFT 0x1e
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_MASK_MASK 0x80000000
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_MASK__SHIFT 0x1f
+#define RBBMIF_TIMEOUT_DIS__CLIENT0_TIMEOUT_DIS_MASK 0x1
+#define RBBMIF_TIMEOUT_DIS__CLIENT0_TIMEOUT_DIS__SHIFT 0x0
+#define RBBMIF_TIMEOUT_DIS__CLIENT1_TIMEOUT_DIS_MASK 0x2
+#define RBBMIF_TIMEOUT_DIS__CLIENT1_TIMEOUT_DIS__SHIFT 0x1
+#define RBBMIF_TIMEOUT_DIS__CLIENT2_TIMEOUT_DIS_MASK 0x4
+#define RBBMIF_TIMEOUT_DIS__CLIENT2_TIMEOUT_DIS__SHIFT 0x2
+#define RBBMIF_TIMEOUT_DIS__CLIENT3_TIMEOUT_DIS_MASK 0x8
+#define RBBMIF_TIMEOUT_DIS__CLIENT3_TIMEOUT_DIS__SHIFT 0x3
+#define RBBMIF_TIMEOUT_DIS__CLIENT4_TIMEOUT_DIS_MASK 0x10
+#define RBBMIF_TIMEOUT_DIS__CLIENT4_TIMEOUT_DIS__SHIFT 0x4
+#define RBBMIF_TIMEOUT_DIS__CLIENT5_TIMEOUT_DIS_MASK 0x20
+#define RBBMIF_TIMEOUT_DIS__CLIENT5_TIMEOUT_DIS__SHIFT 0x5
+#define RBBMIF_TIMEOUT_DIS__CLIENT6_TIMEOUT_DIS_MASK 0x40
+#define RBBMIF_TIMEOUT_DIS__CLIENT6_TIMEOUT_DIS__SHIFT 0x6
+#define RBBMIF_TIMEOUT_DIS__CLIENT7_TIMEOUT_DIS_MASK 0x80
+#define RBBMIF_TIMEOUT_DIS__CLIENT7_TIMEOUT_DIS__SHIFT 0x7
+#define RBBMIF_TIMEOUT_DIS__CLIENT8_TIMEOUT_DIS_MASK 0x100
+#define RBBMIF_TIMEOUT_DIS__CLIENT8_TIMEOUT_DIS__SHIFT 0x8
+#define RBBMIF_TIMEOUT_DIS__CLIENT9_TIMEOUT_DIS_MASK 0x200
+#define RBBMIF_TIMEOUT_DIS__CLIENT9_TIMEOUT_DIS__SHIFT 0x9
+#define RBBMIF_TIMEOUT_DIS__CLIENT10_TIMEOUT_DIS_MASK 0x400
+#define RBBMIF_TIMEOUT_DIS__CLIENT10_TIMEOUT_DIS__SHIFT 0xa
+#define RBBMIF_TIMEOUT_DIS__CLIENT11_TIMEOUT_DIS_MASK 0x800
+#define RBBMIF_TIMEOUT_DIS__CLIENT11_TIMEOUT_DIS__SHIFT 0xb
+#define RBBMIF_TIMEOUT_DIS__CLIENT12_TIMEOUT_DIS_MASK 0x1000
+#define RBBMIF_TIMEOUT_DIS__CLIENT12_TIMEOUT_DIS__SHIFT 0xc
+#define RBBMIF_TIMEOUT_DIS__CLIENT13_TIMEOUT_DIS_MASK 0x2000
+#define RBBMIF_TIMEOUT_DIS__CLIENT13_TIMEOUT_DIS__SHIFT 0xd
+#define RBBMIF_TIMEOUT_DIS__CLIENT14_TIMEOUT_DIS_MASK 0x4000
+#define RBBMIF_TIMEOUT_DIS__CLIENT14_TIMEOUT_DIS__SHIFT 0xe
+#define RBBMIF_TIMEOUT_DIS__CLIENT15_TIMEOUT_DIS_MASK 0x8000
+#define RBBMIF_TIMEOUT_DIS__CLIENT15_TIMEOUT_DIS__SHIFT 0xf
+#define RBBMIF_STATUS_FLAG__RBBMIF_STATE_MASK 0x3
+#define RBBMIF_STATUS_FLAG__RBBMIF_STATE__SHIFT 0x0
+#define RBBMIF_STATUS_FLAG__RBBMIF_READ_TIMEOUT_MASK 0x10
+#define RBBMIF_STATUS_FLAG__RBBMIF_READ_TIMEOUT__SHIFT 0x4
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_EMPTY_MASK 0x20
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_EMPTY__SHIFT 0x5
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_FULL_MASK 0x40
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_FULL__SHIFT 0x6
+#define DCI_MEM_PWR_STATUS__DMIF_RDREQ_MEM1_PWR_STATE_MASK 0x3
+#define DCI_MEM_PWR_STATUS__DMIF_RDREQ_MEM1_PWR_STATE__SHIFT 0x0
+#define DCI_MEM_PWR_STATUS__DMIF_RDREQ_MEM2_PWR_STATE_MASK 0xc
+#define DCI_MEM_PWR_STATUS__DMIF_RDREQ_MEM2_PWR_STATE__SHIFT 0x2
+#define DCI_MEM_PWR_STATUS__MCIF_RDREQ_MEM_PWR_STATE_MASK 0x10
+#define DCI_MEM_PWR_STATUS__MCIF_RDREQ_MEM_PWR_STATE__SHIFT 0x4
+#define DCI_MEM_PWR_STATUS__MCIF_WRREQ_MEM_PWR_STATE_MASK 0x40
+#define DCI_MEM_PWR_STATUS__MCIF_WRREQ_MEM_PWR_STATE__SHIFT 0x6
+#define DCI_MEM_PWR_STATUS__VGA_MEM_PWR_STATE_MASK 0x100
+#define DCI_MEM_PWR_STATUS__VGA_MEM_PWR_STATE__SHIFT 0x8
+#define DCI_MEM_PWR_STATUS__DMCU_ERAM_MEM_PWR_STATE_MASK 0x600
+#define DCI_MEM_PWR_STATUS__DMCU_ERAM_MEM_PWR_STATE__SHIFT 0x9
+#define DCI_MEM_PWR_STATUS__DMCU_IRAM_MEM_PWR_STATE_MASK 0x800
+#define DCI_MEM_PWR_STATUS__DMCU_IRAM_MEM_PWR_STATE__SHIFT 0xb
+#define DCI_MEM_PWR_STATUS__FBC_MEM_PWR_STATE_MASK 0x3000
+#define DCI_MEM_PWR_STATUS__FBC_MEM_PWR_STATE__SHIFT 0xc
+#define DCI_MEM_PWR_STATUS__MCIF_MEM_PWR_STATE_MASK 0xc000
+#define DCI_MEM_PWR_STATUS__MCIF_MEM_PWR_STATE__SHIFT 0xe
+#define DCI_MEM_PWR_STATUS__VIP_MEM_PWR_STATE_MASK 0x400000
+#define DCI_MEM_PWR_STATUS__VIP_MEM_PWR_STATE__SHIFT 0x16
+#define DCI_MEM_PWR_STATUS__DMIF0_ASYNC_MEM_PWR_STATE_MASK 0x3000000
+#define DCI_MEM_PWR_STATUS__DMIF0_ASYNC_MEM_PWR_STATE__SHIFT 0x18
+#define DCI_MEM_PWR_STATUS__DMIF0_DATA_MEM_PWR_STATE_MASK 0xc000000
+#define DCI_MEM_PWR_STATUS__DMIF0_DATA_MEM_PWR_STATE__SHIFT 0x1a
+#define DCI_MEM_PWR_STATUS__DMIF0_CHUNK_MEM_PWR_STATE_MASK 0x10000000
+#define DCI_MEM_PWR_STATUS__DMIF0_CHUNK_MEM_PWR_STATE__SHIFT 0x1c
+#define DCI_MEM_PWR_STATUS__DMIF_RDREQ_MEM3_PWR_STATE_MASK 0xc0000000
+#define DCI_MEM_PWR_STATUS__DMIF_RDREQ_MEM3_PWR_STATE__SHIFT 0x1e
+#define DCI_MEM_PWR_STATUS2__DMIF1_ASYNC_MEM_PWR_STATE_MASK 0x3
+#define DCI_MEM_PWR_STATUS2__DMIF1_ASYNC_MEM_PWR_STATE__SHIFT 0x0
+#define DCI_MEM_PWR_STATUS2__DMIF1_DATA_MEM_PWR_STATE_MASK 0xc
+#define DCI_MEM_PWR_STATUS2__DMIF1_DATA_MEM_PWR_STATE__SHIFT 0x2
+#define DCI_MEM_PWR_STATUS2__DMIF1_CHUNK_MEM_PWR_STATE_MASK 0x10
+#define DCI_MEM_PWR_STATUS2__DMIF1_CHUNK_MEM_PWR_STATE__SHIFT 0x4
+#define DCI_MEM_PWR_STATUS2__DMIF2_ASYNC_MEM_PWR_STATE_MASK 0x60
+#define DCI_MEM_PWR_STATUS2__DMIF2_ASYNC_MEM_PWR_STATE__SHIFT 0x5
+#define DCI_MEM_PWR_STATUS2__DMIF2_DATA_MEM_PWR_STATE_MASK 0x180
+#define DCI_MEM_PWR_STATUS2__DMIF2_DATA_MEM_PWR_STATE__SHIFT 0x7
+#define DCI_MEM_PWR_STATUS2__DMIF2_CHUNK_MEM_PWR_STATE_MASK 0x200
+#define DCI_MEM_PWR_STATUS2__DMIF2_CHUNK_MEM_PWR_STATE__SHIFT 0x9
+#define DCI_MEM_PWR_STATUS2__DMIF3_ASYNC_MEM_PWR_STATE_MASK 0xc00
+#define DCI_MEM_PWR_STATUS2__DMIF3_ASYNC_MEM_PWR_STATE__SHIFT 0xa
+#define DCI_MEM_PWR_STATUS2__DMIF3_DATA_MEM_PWR_STATE_MASK 0x3000
+#define DCI_MEM_PWR_STATUS2__DMIF3_DATA_MEM_PWR_STATE__SHIFT 0xc
+#define DCI_MEM_PWR_STATUS2__DMIF3_CHUNK_MEM_PWR_STATE_MASK 0x4000
+#define DCI_MEM_PWR_STATUS2__DMIF3_CHUNK_MEM_PWR_STATE__SHIFT 0xe
+#define DCI_MEM_PWR_STATUS2__DMIF4_ASYNC_MEM_PWR_STATE_MASK 0x18000
+#define DCI_MEM_PWR_STATUS2__DMIF4_ASYNC_MEM_PWR_STATE__SHIFT 0xf
+#define DCI_MEM_PWR_STATUS2__DMIF4_DATA_MEM_PWR_STATE_MASK 0x60000
+#define DCI_MEM_PWR_STATUS2__DMIF4_DATA_MEM_PWR_STATE__SHIFT 0x11
+#define DCI_MEM_PWR_STATUS2__DMIF4_CHUNK_MEM_PWR_STATE_MASK 0x80000
+#define DCI_MEM_PWR_STATUS2__DMIF4_CHUNK_MEM_PWR_STATE__SHIFT 0x13
+#define DCI_MEM_PWR_STATUS2__DMIF5_ASYNC_MEM_PWR_STATE_MASK 0x300000
+#define DCI_MEM_PWR_STATUS2__DMIF5_ASYNC_MEM_PWR_STATE__SHIFT 0x14
+#define DCI_MEM_PWR_STATUS2__DMIF5_DATA_MEM_PWR_STATE_MASK 0xc00000
+#define DCI_MEM_PWR_STATUS2__DMIF5_DATA_MEM_PWR_STATE__SHIFT 0x16
+#define DCI_MEM_PWR_STATUS2__DMIF5_CHUNK_MEM_PWR_STATE_MASK 0x1000000
+#define DCI_MEM_PWR_STATUS2__DMIF5_CHUNK_MEM_PWR_STATE__SHIFT 0x18
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_LUMA_MEM0_PWR_STATE_MASK 0x3
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_LUMA_MEM0_PWR_STATE__SHIFT 0x0
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_LUMA_MEM1_PWR_STATE_MASK 0xc
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_LUMA_MEM1_PWR_STATE__SHIFT 0x2
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_CHROMA_MEM0_PWR_STATE_MASK 0x30
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_CHROMA_MEM0_PWR_STATE__SHIFT 0x4
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_CHROMA_MEM1_PWR_STATE_MASK 0xc0
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_CHROMA_MEM1_PWR_STATE__SHIFT 0x6
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_LUMA_MEM0_PWR_STATE_MASK 0x300
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_LUMA_MEM0_PWR_STATE__SHIFT 0x8
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_LUMA_MEM1_PWR_STATE_MASK 0xc00
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_LUMA_MEM1_PWR_STATE__SHIFT 0xa
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_CHROMA_MEM0_PWR_STATE_MASK 0x3000
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_CHROMA_MEM0_PWR_STATE__SHIFT 0xc
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_CHROMA_MEM1_PWR_STATE_MASK 0xc000
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_CHROMA_MEM1_PWR_STATE__SHIFT 0xe
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_LUMA_MEM0_PWR_STATE_MASK 0x30000
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_LUMA_MEM0_PWR_STATE__SHIFT 0x10
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_LUMA_MEM1_PWR_STATE_MASK 0xc0000
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_LUMA_MEM1_PWR_STATE__SHIFT 0x12
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_CHROMA_MEM0_PWR_STATE_MASK 0x300000
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_CHROMA_MEM0_PWR_STATE__SHIFT 0x14
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_CHROMA_MEM1_PWR_STATE_MASK 0xc00000
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_CHROMA_MEM1_PWR_STATE__SHIFT 0x16
+#define DCI_CLK_CNTL__DCI_TEST_CLK_SEL_MASK 0x1f
+#define DCI_CLK_CNTL__DCI_TEST_CLK_SEL__SHIFT 0x0
+#define DCI_CLK_CNTL__DISPCLK_R_DCI_GATE_DIS_MASK 0x20
+#define DCI_CLK_CNTL__DISPCLK_R_DCI_GATE_DIS__SHIFT 0x5
+#define DCI_CLK_CNTL__DISPCLK_M_GATE_DIS_MASK 0x40
+#define DCI_CLK_CNTL__DISPCLK_M_GATE_DIS__SHIFT 0x6
+#define DCI_CLK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS_MASK 0x80
+#define DCI_CLK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS__SHIFT 0x7
+#define DCI_CLK_CNTL__SCLK_R_AZ_GATE_DIS_MASK 0x100
+#define DCI_CLK_CNTL__SCLK_R_AZ_GATE_DIS__SHIFT 0x8
+#define DCI_CLK_CNTL__DISPCLK_G_FBC_GATE_DIS_MASK 0x200
+#define DCI_CLK_CNTL__DISPCLK_G_FBC_GATE_DIS__SHIFT 0x9
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV1_L_GATE_DIS_MASK 0x400
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV1_L_GATE_DIS__SHIFT 0xa
+#define DCI_CLK_CNTL__DISPCLK_G_VGA_GATE_DIS_MASK 0x800
+#define DCI_CLK_CNTL__DISPCLK_G_VGA_GATE_DIS__SHIFT 0xb
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV1_C_GATE_DIS_MASK 0x1000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV1_C_GATE_DIS__SHIFT 0xc
+#define DCI_CLK_CNTL__DISPCLK_G_VIP_GATE_DIS_MASK 0x2000
+#define DCI_CLK_CNTL__DISPCLK_G_VIP_GATE_DIS__SHIFT 0xd
+#define DCI_CLK_CNTL__VPCLK_POL_MASK 0x4000
+#define DCI_CLK_CNTL__VPCLK_POL__SHIFT 0xe
+#define DCI_CLK_CNTL__DISPCLK_G_DMCU_GATE_DIS_MASK 0x8000
+#define DCI_CLK_CNTL__DISPCLK_G_DMCU_GATE_DIS__SHIFT 0xf
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF0_GATE_DIS_MASK 0x10000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF0_GATE_DIS__SHIFT 0x10
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF1_GATE_DIS_MASK 0x20000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF1_GATE_DIS__SHIFT 0x11
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF2_GATE_DIS_MASK 0x40000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF2_GATE_DIS__SHIFT 0x12
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF3_GATE_DIS_MASK 0x80000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF3_GATE_DIS__SHIFT 0x13
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF4_GATE_DIS_MASK 0x100000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF4_GATE_DIS__SHIFT 0x14
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF5_GATE_DIS_MASK 0x200000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF5_GATE_DIS__SHIFT 0x15
+#define DCI_CLK_CNTL__SCLK_G_DMIF_GATE_DIS_MASK 0x400000
+#define DCI_CLK_CNTL__SCLK_G_DMIF_GATE_DIS__SHIFT 0x16
+#define DCI_CLK_CNTL__SCLK_G_DMIFTRK_GATE_DIS_MASK 0x800000
+#define DCI_CLK_CNTL__SCLK_G_DMIFTRK_GATE_DIS__SHIFT 0x17
+#define DCI_CLK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS_MASK 0x1000000
+#define DCI_CLK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS__SHIFT 0x18
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV0_L_GATE_DIS_MASK 0x2000000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV0_L_GATE_DIS__SHIFT 0x19
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV0_C_GATE_DIS_MASK 0x4000000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV0_C_GATE_DIS__SHIFT 0x1a
+#define DCI_CLK_CNTL__DCI_PG_TEST_CLK_SEL_MASK 0xf8000000
+#define DCI_CLK_CNTL__DCI_PG_TEST_CLK_SEL__SHIFT 0x1b
+#define DCI_CLK_RAMP_CNTL__DISPCLK_G_MCIF_DWB_GATE_DIS_MASK 0x1
+#define DCI_CLK_RAMP_CNTL__DISPCLK_G_MCIF_DWB_GATE_DIS__SHIFT 0x0
+#define DCI_CLK_RAMP_CNTL__SCLK_G_MCIF_DWB_GATE_DIS_MASK 0x2
+#define DCI_CLK_RAMP_CNTL__SCLK_G_MCIF_DWB_GATE_DIS__SHIFT 0x1
+#define DCI_CLK_RAMP_CNTL__DISPCLK_G_MCIF_CWB0_GATE_DIS_MASK 0x4
+#define DCI_CLK_RAMP_CNTL__DISPCLK_G_MCIF_CWB0_GATE_DIS__SHIFT 0x2
+#define DCI_CLK_RAMP_CNTL__SCLK_G_MCIF_CWB0_GATE_DIS_MASK 0x8
+#define DCI_CLK_RAMP_CNTL__SCLK_G_MCIF_CWB0_GATE_DIS__SHIFT 0x3
+#define DCI_CLK_RAMP_CNTL__DISPCLK_G_MCIF_CWB1_GATE_DIS_MASK 0x10
+#define DCI_CLK_RAMP_CNTL__DISPCLK_G_MCIF_CWB1_GATE_DIS__SHIFT 0x4
+#define DCI_CLK_RAMP_CNTL__SCLK_G_MCIF_CWB1_GATE_DIS_MASK 0x80000000
+#define DCI_CLK_RAMP_CNTL__SCLK_G_MCIF_CWB1_GATE_DIS__SHIFT 0x1f
+#define DCI_MEM_PWR_CNTL__DMIF_RDREQ_MEM_PWR_FORCE_MASK 0x3
+#define DCI_MEM_PWR_CNTL__DMIF_RDREQ_MEM_PWR_FORCE__SHIFT 0x0
+#define DCI_MEM_PWR_CNTL__DMIF_RDREQ_MEM_PWR_DIS_MASK 0x4
+#define DCI_MEM_PWR_CNTL__DMIF_RDREQ_MEM_PWR_DIS__SHIFT 0x2
+#define DCI_MEM_PWR_CNTL__MCIF_RDREQ_MEM_PWR_FORCE_MASK 0x8
+#define DCI_MEM_PWR_CNTL__MCIF_RDREQ_MEM_PWR_FORCE__SHIFT 0x3
+#define DCI_MEM_PWR_CNTL__MCIF_RDREQ_MEM_PWR_DIS_MASK 0x10
+#define DCI_MEM_PWR_CNTL__MCIF_RDREQ_MEM_PWR_DIS__SHIFT 0x4
+#define DCI_MEM_PWR_CNTL__MCIF_WRREQ_MEM_PWR_FORCE_MASK 0x20
+#define DCI_MEM_PWR_CNTL__MCIF_WRREQ_MEM_PWR_FORCE__SHIFT 0x5
+#define DCI_MEM_PWR_CNTL__MCIF_WRREQ_MEM_PWR_DIS_MASK 0x40
+#define DCI_MEM_PWR_CNTL__MCIF_WRREQ_MEM_PWR_DIS__SHIFT 0x6
+#define DCI_MEM_PWR_CNTL__VGA_MEM_PWR_FORCE_MASK 0x80
+#define DCI_MEM_PWR_CNTL__VGA_MEM_PWR_FORCE__SHIFT 0x7
+#define DCI_MEM_PWR_CNTL__VGA_MEM_PWR_DIS_MASK 0x100
+#define DCI_MEM_PWR_CNTL__VGA_MEM_PWR_DIS__SHIFT 0x8
+#define DCI_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_FORCE_MASK 0x600
+#define DCI_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_FORCE__SHIFT 0x9
+#define DCI_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_DIS_MASK 0x800
+#define DCI_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_DIS__SHIFT 0xb
+#define DCI_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_FORCE_MASK 0x1000
+#define DCI_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_FORCE__SHIFT 0xc
+#define DCI_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_DIS_MASK 0x2000
+#define DCI_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_DIS__SHIFT 0xd
+#define DCI_MEM_PWR_CNTL__FBC_MEM_PWR_FORCE_MASK 0xc000
+#define DCI_MEM_PWR_CNTL__FBC_MEM_PWR_FORCE__SHIFT 0xe
+#define DCI_MEM_PWR_CNTL__FBC_MEM_PWR_DIS_MASK 0x10000
+#define DCI_MEM_PWR_CNTL__FBC_MEM_PWR_DIS__SHIFT 0x10
+#define DCI_MEM_PWR_CNTL__MCIF_MEM_PWR_FORCE_MASK 0x60000
+#define DCI_MEM_PWR_CNTL__MCIF_MEM_PWR_FORCE__SHIFT 0x11
+#define DCI_MEM_PWR_CNTL__MCIF_MEM_PWR_DIS_MASK 0x80000
+#define DCI_MEM_PWR_CNTL__MCIF_MEM_PWR_DIS__SHIFT 0x13
+#define DCI_MEM_PWR_CNTL__MCIF_DWB_MEM_PWR_FORCE_MASK 0x300000
+#define DCI_MEM_PWR_CNTL__MCIF_DWB_MEM_PWR_FORCE__SHIFT 0x14
+#define DCI_MEM_PWR_CNTL__MCIF_DWB_MEM_PWR_DIS_MASK 0x400000
+#define DCI_MEM_PWR_CNTL__MCIF_DWB_MEM_PWR_DIS__SHIFT 0x16
+#define DCI_MEM_PWR_CNTL__MCIF_CWB0_MEM_PWR_FORCE_MASK 0x1800000
+#define DCI_MEM_PWR_CNTL__MCIF_CWB0_MEM_PWR_FORCE__SHIFT 0x17
+#define DCI_MEM_PWR_CNTL__MCIF_CWB0_MEM_PWR_DIS_MASK 0x2000000
+#define DCI_MEM_PWR_CNTL__MCIF_CWB0_MEM_PWR_DIS__SHIFT 0x19
+#define DCI_MEM_PWR_CNTL__MCIF_CWB1_MEM_PWR_FORCE_MASK 0xc000000
+#define DCI_MEM_PWR_CNTL__MCIF_CWB1_MEM_PWR_FORCE__SHIFT 0x1a
+#define DCI_MEM_PWR_CNTL__MCIF_CWB1_MEM_PWR_DIS_MASK 0x10000000
+#define DCI_MEM_PWR_CNTL__MCIF_CWB1_MEM_PWR_DIS__SHIFT 0x1c
+#define DCI_MEM_PWR_CNTL__VIP_MEM_PWR_FORCE_MASK 0x20000000
+#define DCI_MEM_PWR_CNTL__VIP_MEM_PWR_FORCE__SHIFT 0x1d
+#define DCI_MEM_PWR_CNTL__VIP_MEM_PWR_DIS_MASK 0x40000000
+#define DCI_MEM_PWR_CNTL__VIP_MEM_PWR_DIS__SHIFT 0x1e
+#define DCI_MEM_PWR_CNTL2__DMIF0_ASYNC_MEM_PWR_FORCE_MASK 0x3
+#define DCI_MEM_PWR_CNTL2__DMIF0_ASYNC_MEM_PWR_FORCE__SHIFT 0x0
+#define DCI_MEM_PWR_CNTL2__DMIF0_ASYNC_MEM_PWR_DIS_MASK 0x4
+#define DCI_MEM_PWR_CNTL2__DMIF0_ASYNC_MEM_PWR_DIS__SHIFT 0x2
+#define DCI_MEM_PWR_CNTL2__DMIF0_DATA_MEM_PWR_FORCE_MASK 0x18
+#define DCI_MEM_PWR_CNTL2__DMIF0_DATA_MEM_PWR_FORCE__SHIFT 0x3
+#define DCI_MEM_PWR_CNTL2__DMIF0_DATA_MEM_PWR_DIS_MASK 0x20
+#define DCI_MEM_PWR_CNTL2__DMIF0_DATA_MEM_PWR_DIS__SHIFT 0x5
+#define DCI_MEM_PWR_CNTL2__DMIF0_CHUNK_MEM_PWR_FORCE_MASK 0x40
+#define DCI_MEM_PWR_CNTL2__DMIF0_CHUNK_MEM_PWR_FORCE__SHIFT 0x6
+#define DCI_MEM_PWR_CNTL2__DMIF0_CHUNK_MEM_PWR_DIS_MASK 0x80
+#define DCI_MEM_PWR_CNTL2__DMIF0_CHUNK_MEM_PWR_DIS__SHIFT 0x7
+#define DCI_MEM_PWR_CNTL2__DMIF1_ASYNC_MEM_PWR_FORCE_MASK 0x300
+#define DCI_MEM_PWR_CNTL2__DMIF1_ASYNC_MEM_PWR_FORCE__SHIFT 0x8
+#define DCI_MEM_PWR_CNTL2__DMIF1_ASYNC_MEM_PWR_DIS_MASK 0x400
+#define DCI_MEM_PWR_CNTL2__DMIF1_ASYNC_MEM_PWR_DIS__SHIFT 0xa
+#define DCI_MEM_PWR_CNTL2__DMIF1_DATA_MEM_PWR_FORCE_MASK 0x1800
+#define DCI_MEM_PWR_CNTL2__DMIF1_DATA_MEM_PWR_FORCE__SHIFT 0xb
+#define DCI_MEM_PWR_CNTL2__DMIF1_DATA_MEM_PWR_DIS_MASK 0x2000
+#define DCI_MEM_PWR_CNTL2__DMIF1_DATA_MEM_PWR_DIS__SHIFT 0xd
+#define DCI_MEM_PWR_CNTL2__DMIF1_CHUNK_MEM_PWR_FORCE_MASK 0x4000
+#define DCI_MEM_PWR_CNTL2__DMIF1_CHUNK_MEM_PWR_FORCE__SHIFT 0xe
+#define DCI_MEM_PWR_CNTL2__DMIF1_CHUNK_MEM_PWR_DIS_MASK 0x8000
+#define DCI_MEM_PWR_CNTL2__DMIF1_CHUNK_MEM_PWR_DIS__SHIFT 0xf
+#define DCI_MEM_PWR_CNTL2__DMIF2_ASYNC_MEM_PWR_FORCE_MASK 0x30000
+#define DCI_MEM_PWR_CNTL2__DMIF2_ASYNC_MEM_PWR_FORCE__SHIFT 0x10
+#define DCI_MEM_PWR_CNTL2__DMIF2_ASYNC_MEM_PWR_DIS_MASK 0x40000
+#define DCI_MEM_PWR_CNTL2__DMIF2_ASYNC_MEM_PWR_DIS__SHIFT 0x12
+#define DCI_MEM_PWR_CNTL2__DMIF2_DATA_MEM_PWR_FORCE_MASK 0x180000
+#define DCI_MEM_PWR_CNTL2__DMIF2_DATA_MEM_PWR_FORCE__SHIFT 0x13
+#define DCI_MEM_PWR_CNTL2__DMIF2_DATA_MEM_PWR_DIS_MASK 0x200000
+#define DCI_MEM_PWR_CNTL2__DMIF2_DATA_MEM_PWR_DIS__SHIFT 0x15
+#define DCI_MEM_PWR_CNTL2__DMIF2_CHUNK_MEM_PWR_FORCE_MASK 0x400000
+#define DCI_MEM_PWR_CNTL2__DMIF2_CHUNK_MEM_PWR_FORCE__SHIFT 0x16
+#define DCI_MEM_PWR_CNTL2__DMIF2_CHUNK_MEM_PWR_DIS_MASK 0x800000
+#define DCI_MEM_PWR_CNTL2__DMIF2_CHUNK_MEM_PWR_DIS__SHIFT 0x17
+#define DCI_MEM_PWR_CNTL2__DMIF3_ASYNC_MEM_PWR_FORCE_MASK 0x3000000
+#define DCI_MEM_PWR_CNTL2__DMIF3_ASYNC_MEM_PWR_FORCE__SHIFT 0x18
+#define DCI_MEM_PWR_CNTL2__DMIF3_ASYNC_MEM_PWR_DIS_MASK 0x4000000
+#define DCI_MEM_PWR_CNTL2__DMIF3_ASYNC_MEM_PWR_DIS__SHIFT 0x1a
+#define DCI_MEM_PWR_CNTL2__DMIF3_DATA_MEM_PWR_FORCE_MASK 0x18000000
+#define DCI_MEM_PWR_CNTL2__DMIF3_DATA_MEM_PWR_FORCE__SHIFT 0x1b
+#define DCI_MEM_PWR_CNTL2__DMIF3_DATA_MEM_PWR_DIS_MASK 0x20000000
+#define DCI_MEM_PWR_CNTL2__DMIF3_DATA_MEM_PWR_DIS__SHIFT 0x1d
+#define DCI_MEM_PWR_CNTL2__DMIF3_CHUNK_MEM_PWR_FORCE_MASK 0x40000000
+#define DCI_MEM_PWR_CNTL2__DMIF3_CHUNK_MEM_PWR_FORCE__SHIFT 0x1e
+#define DCI_MEM_PWR_CNTL2__DMIF3_CHUNK_MEM_PWR_DIS_MASK 0x80000000
+#define DCI_MEM_PWR_CNTL2__DMIF3_CHUNK_MEM_PWR_DIS__SHIFT 0x1f
+#define DCI_MEM_PWR_CNTL3__DMIF4_ASYNC_MEM_PWR_FORCE_MASK 0x3
+#define DCI_MEM_PWR_CNTL3__DMIF4_ASYNC_MEM_PWR_FORCE__SHIFT 0x0
+#define DCI_MEM_PWR_CNTL3__DMIF4_ASYNC_MEM_PWR_DIS_MASK 0x4
+#define DCI_MEM_PWR_CNTL3__DMIF4_ASYNC_MEM_PWR_DIS__SHIFT 0x2
+#define DCI_MEM_PWR_CNTL3__DMIF4_DATA_MEM_PWR_FORCE_MASK 0x18
+#define DCI_MEM_PWR_CNTL3__DMIF4_DATA_MEM_PWR_FORCE__SHIFT 0x3
+#define DCI_MEM_PWR_CNTL3__DMIF4_DATA_MEM_PWR_DIS_MASK 0x20
+#define DCI_MEM_PWR_CNTL3__DMIF4_DATA_MEM_PWR_DIS__SHIFT 0x5
+#define DCI_MEM_PWR_CNTL3__DMIF4_CHUNK_MEM_PWR_FORCE_MASK 0x40
+#define DCI_MEM_PWR_CNTL3__DMIF4_CHUNK_MEM_PWR_FORCE__SHIFT 0x6
+#define DCI_MEM_PWR_CNTL3__DMIF4_CHUNK_MEM_PWR_DIS_MASK 0x80
+#define DCI_MEM_PWR_CNTL3__DMIF4_CHUNK_MEM_PWR_DIS__SHIFT 0x7
+#define DCI_MEM_PWR_CNTL3__DMIF5_ASYNC_MEM_PWR_FORCE_MASK 0x300
+#define DCI_MEM_PWR_CNTL3__DMIF5_ASYNC_MEM_PWR_FORCE__SHIFT 0x8
+#define DCI_MEM_PWR_CNTL3__DMIF5_ASYNC_MEM_PWR_DIS_MASK 0x400
+#define DCI_MEM_PWR_CNTL3__DMIF5_ASYNC_MEM_PWR_DIS__SHIFT 0xa
+#define DCI_MEM_PWR_CNTL3__DMIF5_DATA_MEM_PWR_FORCE_MASK 0x1800
+#define DCI_MEM_PWR_CNTL3__DMIF5_DATA_MEM_PWR_FORCE__SHIFT 0xb
+#define DCI_MEM_PWR_CNTL3__DMIF5_DATA_MEM_PWR_DIS_MASK 0x2000
+#define DCI_MEM_PWR_CNTL3__DMIF5_DATA_MEM_PWR_DIS__SHIFT 0xd
+#define DCI_MEM_PWR_CNTL3__DMIF5_CHUNK_MEM_PWR_FORCE_MASK 0x4000
+#define DCI_MEM_PWR_CNTL3__DMIF5_CHUNK_MEM_PWR_FORCE__SHIFT 0xe
+#define DCI_MEM_PWR_CNTL3__DMIF5_CHUNK_MEM_PWR_DIS_MASK 0x8000
+#define DCI_MEM_PWR_CNTL3__DMIF5_CHUNK_MEM_PWR_DIS__SHIFT 0xf
+#define DCI_MEM_PWR_CNTL3__DMIF_RDREQ_MEM_PWR_MODE_SEL_MASK 0x30000
+#define DCI_MEM_PWR_CNTL3__DMIF_RDREQ_MEM_PWR_MODE_SEL__SHIFT 0x10
+#define DCI_MEM_PWR_CNTL3__DMIF_ASYNC_MEM_PWR_MODE_SEL_MASK 0xc0000
+#define DCI_MEM_PWR_CNTL3__DMIF_ASYNC_MEM_PWR_MODE_SEL__SHIFT 0x12
+#define DCI_MEM_PWR_CNTL3__DMIF_DATA_MEM_PWR_MODE_SEL_MASK 0x300000
+#define DCI_MEM_PWR_CNTL3__DMIF_DATA_MEM_PWR_MODE_SEL__SHIFT 0x14
+#define DCI_MEM_PWR_CNTL3__DMCU_ERAM_MEM_PWR_MODE_SEL_MASK 0x400000
+#define DCI_MEM_PWR_CNTL3__DMCU_ERAM_MEM_PWR_MODE_SEL__SHIFT 0x16
+#define DCI_MEM_PWR_CNTL3__FBC_MEM_PWR_MODE_SEL_MASK 0x1800000
+#define DCI_MEM_PWR_CNTL3__FBC_MEM_PWR_MODE_SEL__SHIFT 0x17
+#define DCI_MEM_PWR_CNTL3__MCIF_CWB0_MEM_PWR_MODE_SEL_MASK 0x6000000
+#define DCI_MEM_PWR_CNTL3__MCIF_CWB0_MEM_PWR_MODE_SEL__SHIFT 0x19
+#define DCI_MEM_PWR_CNTL3__MCIF_CWB1_MEM_PWR_MODE_SEL_MASK 0x18000000
+#define DCI_MEM_PWR_CNTL3__MCIF_CWB1_MEM_PWR_MODE_SEL__SHIFT 0x1b
+#define DCI_MEM_PWR_CNTL3__MCIF_DWB_MEM_PWR_MODE_SEL_MASK 0x60000000
+#define DCI_MEM_PWR_CNTL3__MCIF_DWB_MEM_PWR_MODE_SEL__SHIFT 0x1d
+#define DCI_MEM_PWR_CNTL4__MCIF_DWB_LUMA_MEM_EN_NUM_MASK 0x1
+#define DCI_MEM_PWR_CNTL4__MCIF_DWB_LUMA_MEM_EN_NUM__SHIFT 0x0
+#define DCI_MEM_PWR_CNTL4__MCIF_DWB_CHROMA_MEM_EN_NUM_MASK 0x2
+#define DCI_MEM_PWR_CNTL4__MCIF_DWB_CHROMA_MEM_EN_NUM__SHIFT 0x1
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB0_LUMA_MEM_EN_NUM_MASK 0x4
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB0_LUMA_MEM_EN_NUM__SHIFT 0x2
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB0_CHROMA_MEM_EN_NUM_MASK 0x8
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB0_CHROMA_MEM_EN_NUM__SHIFT 0x3
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB1_LUMA_MEM_EN_NUM_MASK 0x10
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB1_LUMA_MEM_EN_NUM__SHIFT 0x4
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB1_CHROMA_MEM_EN_NUM_MASK 0x20
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB1_CHROMA_MEM_EN_NUM__SHIFT 0x5
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE0_MEM_PWR_FORCE_MASK 0x3
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE0_MEM_PWR_FORCE__SHIFT 0x0
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE0_MEM_PWR_DIS_MASK 0x4
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE0_MEM_PWR_DIS__SHIFT 0x2
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE1_MEM_PWR_FORCE_MASK 0x18
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE1_MEM_PWR_FORCE__SHIFT 0x3
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE1_MEM_PWR_DIS_MASK 0x20
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE1_MEM_PWR_DIS__SHIFT 0x5
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE2_MEM_PWR_FORCE_MASK 0xc0
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE2_MEM_PWR_FORCE__SHIFT 0x6
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE2_MEM_PWR_DIS_MASK 0x100
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE2_MEM_PWR_DIS__SHIFT 0x8
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE3_MEM_PWR_FORCE_MASK 0x600
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE3_MEM_PWR_FORCE__SHIFT 0x9
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE3_MEM_PWR_DIS_MASK 0x800
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE3_MEM_PWR_DIS__SHIFT 0xb
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE4_MEM_PWR_FORCE_MASK 0x3000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE4_MEM_PWR_FORCE__SHIFT 0xc
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE4_MEM_PWR_DIS_MASK 0x4000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE4_MEM_PWR_DIS__SHIFT 0xe
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE5_MEM_PWR_FORCE_MASK 0x18000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE5_MEM_PWR_FORCE__SHIFT 0xf
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE5_MEM_PWR_DIS_MASK 0x20000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE5_MEM_PWR_DIS__SHIFT 0x11
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE6_MEM_PWR_FORCE_MASK 0xc0000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE6_MEM_PWR_FORCE__SHIFT 0x12
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE6_MEM_PWR_DIS_MASK 0x100000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE6_MEM_PWR_DIS__SHIFT 0x14
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE7_MEM_PWR_FORCE_MASK 0x600000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE7_MEM_PWR_FORCE__SHIFT 0x15
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE7_MEM_PWR_DIS_MASK 0x800000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE7_MEM_PWR_DIS__SHIFT 0x17
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE_MEM_PWR_MODE_SEL_MASK 0x3000000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE_MEM_PWR_MODE_SEL__SHIFT 0x18
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE0_PTE_PGMEM_STATE_MASK 0x3
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE0_PTE_PGMEM_STATE__SHIFT 0x0
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE1_PTE_PGMEM_STATE_MASK 0xc
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE1_PTE_PGMEM_STATE__SHIFT 0x2
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE2_PTE_PGMEM_STATE_MASK 0x30
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE2_PTE_PGMEM_STATE__SHIFT 0x4
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE3_PTE_PGMEM_STATE_MASK 0xc0
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE3_PTE_PGMEM_STATE__SHIFT 0x6
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE4_PTE_PGMEM_STATE_MASK 0x300
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE4_PTE_PGMEM_STATE__SHIFT 0x8
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE5_PTE_PGMEM_STATE_MASK 0xc00
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE5_PTE_PGMEM_STATE__SHIFT 0xa
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE6_PTE_PGMEM_STATE_MASK 0x3000
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE6_PTE_PGMEM_STATE__SHIFT 0xc
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE7_PTE_PGMEM_STATE_MASK 0xc000
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE7_PTE_PGMEM_STATE__SHIFT 0xe
+#define DCI_SOFT_RESET__VGA_SOFT_RESET_MASK 0x1
+#define DCI_SOFT_RESET__VGA_SOFT_RESET__SHIFT 0x0
+#define DCI_SOFT_RESET__VIP_SOFT_RESET_MASK 0x2
+#define DCI_SOFT_RESET__VIP_SOFT_RESET__SHIFT 0x1
+#define DCI_SOFT_RESET__MCIF_SOFT_RESET_MASK 0x4
+#define DCI_SOFT_RESET__MCIF_SOFT_RESET__SHIFT 0x2
+#define DCI_SOFT_RESET__FBC_SOFT_RESET_MASK 0x8
+#define DCI_SOFT_RESET__FBC_SOFT_RESET__SHIFT 0x3
+#define DCI_SOFT_RESET__DMIF0_SOFT_RESET_MASK 0x10
+#define DCI_SOFT_RESET__DMIF0_SOFT_RESET__SHIFT 0x4
+#define DCI_SOFT_RESET__DMIF1_SOFT_RESET_MASK 0x20
+#define DCI_SOFT_RESET__DMIF1_SOFT_RESET__SHIFT 0x5
+#define DCI_SOFT_RESET__DMIF2_SOFT_RESET_MASK 0x40
+#define DCI_SOFT_RESET__DMIF2_SOFT_RESET__SHIFT 0x6
+#define DCI_SOFT_RESET__DMIF3_SOFT_RESET_MASK 0x80
+#define DCI_SOFT_RESET__DMIF3_SOFT_RESET__SHIFT 0x7
+#define DCI_SOFT_RESET__DMIF4_SOFT_RESET_MASK 0x100
+#define DCI_SOFT_RESET__DMIF4_SOFT_RESET__SHIFT 0x8
+#define DCI_SOFT_RESET__DMIF5_SOFT_RESET_MASK 0x200
+#define DCI_SOFT_RESET__DMIF5_SOFT_RESET__SHIFT 0x9
+#define DCI_SOFT_RESET__DCFEV0_L_SOFT_RESET_MASK 0x400
+#define DCI_SOFT_RESET__DCFEV0_L_SOFT_RESET__SHIFT 0xa
+#define DCI_SOFT_RESET__DCFEV0_C_SOFT_RESET_MASK 0x800
+#define DCI_SOFT_RESET__DCFEV0_C_SOFT_RESET__SHIFT 0xb
+#define DCI_SOFT_RESET__DCFEV1_L_SOFT_RESET_MASK 0x1000
+#define DCI_SOFT_RESET__DCFEV1_L_SOFT_RESET__SHIFT 0xc
+#define DCI_SOFT_RESET__DCFEV1_C_SOFT_RESET_MASK 0x2000
+#define DCI_SOFT_RESET__DCFEV1_C_SOFT_RESET__SHIFT 0xd
+#define DCI_SOFT_RESET__DMIFARB_SOFT_RESET_MASK 0x4000
+#define DCI_SOFT_RESET__DMIFARB_SOFT_RESET__SHIFT 0xe
+#define DCI_SOFT_RESET__MCIF_DWB_SOFT_RESET_MASK 0x10000
+#define DCI_SOFT_RESET__MCIF_DWB_SOFT_RESET__SHIFT 0x10
+#define DCI_SOFT_RESET__MCIF_CWB0_SOFT_RESET_MASK 0x20000
+#define DCI_SOFT_RESET__MCIF_CWB0_SOFT_RESET__SHIFT 0x11
+#define DCI_SOFT_RESET__MCIF_CWB1_SOFT_RESET_MASK 0x40000
+#define DCI_SOFT_RESET__MCIF_CWB1_SOFT_RESET__SHIFT 0x12
+#define DCI_SOFT_RESET__MCIF_WB_SOFT_RESET_MASK 0x80000
+#define DCI_SOFT_RESET__MCIF_WB_SOFT_RESET__SHIFT 0x13
+#define DCI_MISC__MCIF_WB_URG_OVRD_MASK 0x1
+#define DCI_MISC__MCIF_WB_URG_OVRD__SHIFT 0x0
+#define DCI_MISC__MCIF_WB_URG_LVL_MASK 0x1e
+#define DCI_MISC__MCIF_WB_URG_LVL__SHIFT 0x1
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_INDEX_MASK 0xff
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DCI_TEST_DEBUG_DATA__DCI_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DCI_TEST_DEBUG_DATA__DCI_TEST_DEBUG_DATA__SHIFT 0x0
+#define DCI_DEBUG_CONFIG__DCI_DBG_EN_MASK 0x1
+#define DCI_DEBUG_CONFIG__DCI_DBG_EN__SHIFT 0x0
+#define DCI_DEBUG_CONFIG__DCI_DBG_BLOCK_SEL_MASK 0xf0
+#define DCI_DEBUG_CONFIG__DCI_DBG_BLOCK_SEL__SHIFT 0x4
+#define DCI_DEBUG_CONFIG__DCI_DBG_CLOCK_SEL_MASK 0xf00
+#define DCI_DEBUG_CONFIG__DCI_DBG_CLOCK_SEL__SHIFT 0x8
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x7
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x0
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x10
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x4
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x7
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x0
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x10
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x4
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x7
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x0
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x10
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x4
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x7
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x0
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x10
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x4
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x7
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x0
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x10
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x4
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x7
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x0
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x10
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x4
+#define DC_GENERICA__GENERICA_EN_MASK 0x1
+#define DC_GENERICA__GENERICA_EN__SHIFT 0x0
+#define DC_GENERICA__GENERICA_SEL_MASK 0xf80
+#define DC_GENERICA__GENERICA_SEL__SHIFT 0x7
+#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL_MASK 0xf000
+#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL__SHIFT 0xc
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL_MASK 0xf0000
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL__SHIFT 0x10
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL_MASK 0xf00000
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL__SHIFT 0x14
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL_MASK 0xf000000
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL__SHIFT 0x18
+#define DC_GENERICB__GENERICB_EN_MASK 0x1
+#define DC_GENERICB__GENERICB_EN__SHIFT 0x0
+#define DC_GENERICB__GENERICB_SEL_MASK 0xf00
+#define DC_GENERICB__GENERICB_SEL__SHIFT 0x8
+#define DC_GENERICB__GENERICB_UNIPHY_REFDIV_CLK_SEL_MASK 0xf000
+#define DC_GENERICB__GENERICB_UNIPHY_REFDIV_CLK_SEL__SHIFT 0xc
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_SEL_MASK 0xf0000
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_SEL__SHIFT 0x10
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_SSC_CLK_SEL_MASK 0xf00000
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_SSC_CLK_SEL__SHIFT 0x14
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_DIV2_SEL_MASK 0xf000000
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_DIV2_SEL__SHIFT 0x18
+#define DC_PAD_EXTERN_SIG__DC_PAD_EXTERN_SIG_SEL_MASK 0xf
+#define DC_PAD_EXTERN_SIG__DC_PAD_EXTERN_SIG_SEL__SHIFT 0x0
+#define DC_PAD_EXTERN_SIG__MVP_PIXEL_SRC_STATUS_MASK 0x30
+#define DC_PAD_EXTERN_SIG__MVP_PIXEL_SRC_STATUS__SHIFT 0x4
+#define DC_REF_CLK_CNTL__HSYNCA_OUTPUT_SEL_MASK 0x3
+#define DC_REF_CLK_CNTL__HSYNCA_OUTPUT_SEL__SHIFT 0x0
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL_MASK 0x300
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL__SHIFT 0x8
+#define DC_GPIO_DEBUG__DC_GPIO_VIP_DEBUG_MASK 0x1
+#define DC_GPIO_DEBUG__DC_GPIO_VIP_DEBUG__SHIFT 0x0
+#define DC_GPIO_DEBUG__DC_GPIO_MACRO_DEBUG_MASK 0x300
+#define DC_GPIO_DEBUG__DC_GPIO_MACRO_DEBUG__SHIFT 0x8
+#define DC_GPIO_DEBUG__DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL_MASK 0x10000
+#define DC_GPIO_DEBUG__DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL__SHIFT 0x10
+#define DC_GPIO_DEBUG__DC_GPIO_DEBUG_BUS_FLOP_EN_MASK 0x20000
+#define DC_GPIO_DEBUG__DC_GPIO_DEBUG_BUS_FLOP_EN__SHIFT 0x11
+#define DC_GPIO_DEBUG__DPRX_LOOPBACK_ENABLE_MASK 0x80000000
+#define DC_GPIO_DEBUG__DPRX_LOOPBACK_ENABLE__SHIFT 0x1f
+#define UNIPHYA_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x1
+#define UNIPHYA_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYA_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x10
+#define UNIPHYA_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYA_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYA_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYA_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYA_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYA_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYA_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYB_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x1
+#define UNIPHYB_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYB_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x10
+#define UNIPHYB_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYB_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYB_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYB_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYB_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYB_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYB_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYC_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x1
+#define UNIPHYC_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYC_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x10
+#define UNIPHYC_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYC_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYC_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYC_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYC_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYC_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYC_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYD_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x1
+#define UNIPHYD_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYD_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x10
+#define UNIPHYD_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYD_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYD_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYD_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYD_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYD_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYD_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYE_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x1
+#define UNIPHYE_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYE_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x10
+#define UNIPHYE_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYE_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYE_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYE_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYE_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYE_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYE_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYF_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x1
+#define UNIPHYF_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYF_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x10
+#define UNIPHYF_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYF_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYF_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYF_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYF_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYF_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYF_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYG_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x1
+#define UNIPHYG_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYG_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x10
+#define UNIPHYG_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYG_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYG_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYG_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYG_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYG_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYG_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_PFREQCHG_MASK 0x1
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_PFREQCHG__SHIFT 0x0
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_PIXVLD_RESET_MASK 0x10
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_PFREQCHG_MASK 0x1
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_PFREQCHG__SHIFT 0x0
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_PIXVLD_RESET_MASK 0x10
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_ENABLE_LINKA_MASK 0x1
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_ENABLE_LINKA__SHIFT 0x0
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_CALOUT_LINKA_MASK 0x100
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_CALOUT_LINKA__SHIFT 0x8
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA_MASK 0x200
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA__SHIFT 0x9
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA_AK_MASK 0x400
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA_AK__SHIFT 0xa
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_VALUE_LINKA_MASK 0xf0000
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_VALUE_LINKA__SHIFT 0x10
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_STEP_DELAY_LINKA_MASK 0xf00000
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_STEP_DELAY_LINKA__SHIFT 0x14
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_LINKA_MASK 0xf000000
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_LINKA__SHIFT 0x18
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKA_MASK 0x10000000
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKA__SHIFT 0x1c
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_SEL_LINKA_MASK 0x40000000
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_SEL_LINKA__SHIFT 0x1e
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_ENABLE_LINKB_MASK 0x1
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_ENABLE_LINKB__SHIFT 0x0
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_CALOUT_LINKB_MASK 0x100
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_CALOUT_LINKB__SHIFT 0x8
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB_MASK 0x200
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB__SHIFT 0x9
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB_AK_MASK 0x400
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB_AK__SHIFT 0xa
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_VALUE_LINKB_MASK 0xf0000
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_VALUE_LINKB__SHIFT 0x10
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_STEP_DELAY_LINKB_MASK 0xf00000
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_STEP_DELAY_LINKB__SHIFT 0x14
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_LINKB_MASK 0xf000000
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_LINKB__SHIFT 0x18
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKB_MASK 0x10000000
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKB__SHIFT 0x1c
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_SEL_LINKB_MASK 0x40000000
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_SEL_LINKB__SHIFT 0x1e
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_ENABLE_LINKC_MASK 0x1
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_ENABLE_LINKC__SHIFT 0x0
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_CALOUT_LINKC_MASK 0x100
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_CALOUT_LINKC__SHIFT 0x8
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC_MASK 0x200
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC__SHIFT 0x9
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC_AK_MASK 0x400
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC_AK__SHIFT 0xa
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_VALUE_LINKC_MASK 0xf0000
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_VALUE_LINKC__SHIFT 0x10
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_STEP_DELAY_LINKC_MASK 0xf00000
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_STEP_DELAY_LINKC__SHIFT 0x14
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_LINKC_MASK 0xf000000
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_LINKC__SHIFT 0x18
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKC_MASK 0x10000000
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKC__SHIFT 0x1c
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_SEL_LINKC_MASK 0x40000000
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_SEL_LINKC__SHIFT 0x1e
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_ENABLE_LINKD_MASK 0x1
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_ENABLE_LINKD__SHIFT 0x0
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_CALOUT_LINKD_MASK 0x100
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_CALOUT_LINKD__SHIFT 0x8
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD_MASK 0x200
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD__SHIFT 0x9
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD_AK_MASK 0x400
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD_AK__SHIFT 0xa
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_VALUE_LINKD_MASK 0xf0000
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_VALUE_LINKD__SHIFT 0x10
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_STEP_DELAY_LINKD_MASK 0xf00000
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_STEP_DELAY_LINKD__SHIFT 0x14
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_LINKD_MASK 0xf000000
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_LINKD__SHIFT 0x18
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKD_MASK 0x10000000
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKD__SHIFT 0x1c
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_SEL_LINKD_MASK 0x40000000
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_SEL_LINKD__SHIFT 0x1e
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_ENABLE_LINKE_MASK 0x1
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_ENABLE_LINKE__SHIFT 0x0
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_CALOUT_LINKE_MASK 0x100
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_CALOUT_LINKE__SHIFT 0x8
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE_MASK 0x200
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE__SHIFT 0x9
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE_AK_MASK 0x400
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE_AK__SHIFT 0xa
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_VALUE_LINKE_MASK 0xf0000
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_VALUE_LINKE__SHIFT 0x10
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_STEP_DELAY_LINKE_MASK 0xf00000
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_STEP_DELAY_LINKE__SHIFT 0x14
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_LINKE_MASK 0xf000000
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_LINKE__SHIFT 0x18
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKE_MASK 0x10000000
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKE__SHIFT 0x1c
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_SEL_LINKE_MASK 0x40000000
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_SEL_LINKE__SHIFT 0x1e
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_ENABLE_LINKF_MASK 0x1
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_ENABLE_LINKF__SHIFT 0x0
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_CALOUT_LINKF_MASK 0x100
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_CALOUT_LINKF__SHIFT 0x8
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF_MASK 0x200
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF__SHIFT 0x9
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF_AK_MASK 0x400
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF_AK__SHIFT 0xa
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_VALUE_LINKF_MASK 0xf0000
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_VALUE_LINKF__SHIFT 0x10
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_STEP_DELAY_LINKF_MASK 0xf00000
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_STEP_DELAY_LINKF__SHIFT 0x14
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_LINKF_MASK 0xf000000
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_LINKF__SHIFT 0x18
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKF_MASK 0x10000000
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKF__SHIFT 0x1c
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_SEL_LINKF_MASK 0x40000000
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_SEL_LINKF__SHIFT 0x1e
+#define UNIPHY_IMPCAL_PERIOD__UNIPHY_IMPCAL_PERIOD_MASK 0xffffffff
+#define UNIPHY_IMPCAL_PERIOD__UNIPHY_IMPCAL_PERIOD__SHIFT 0x0
+#define AUXP_IMPCAL__AUXP_IMPCAL_ENABLE_MASK 0x1
+#define AUXP_IMPCAL__AUXP_IMPCAL_ENABLE__SHIFT 0x0
+#define AUXP_IMPCAL__AUXP_IMPCAL_CALOUT_MASK 0x100
+#define AUXP_IMPCAL__AUXP_IMPCAL_CALOUT__SHIFT 0x8
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR_MASK 0x200
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR__SHIFT 0x9
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR_AK_MASK 0x400
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR_AK__SHIFT 0xa
+#define AUXP_IMPCAL__AUXP_IMPCAL_VALUE_MASK 0xf0000
+#define AUXP_IMPCAL__AUXP_IMPCAL_VALUE__SHIFT 0x10
+#define AUXP_IMPCAL__AUXP_IMPCAL_STEP_DELAY_MASK 0xf00000
+#define AUXP_IMPCAL__AUXP_IMPCAL_STEP_DELAY__SHIFT 0x14
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE_MASK 0xf000000
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE__SHIFT 0x18
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE_ENABLE_MASK 0x10000000
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE_ENABLE__SHIFT 0x1c
+#define AUXN_IMPCAL__AUXN_IMPCAL_ENABLE_MASK 0x1
+#define AUXN_IMPCAL__AUXN_IMPCAL_ENABLE__SHIFT 0x0
+#define AUXN_IMPCAL__AUXN_IMPCAL_CALOUT_MASK 0x100
+#define AUXN_IMPCAL__AUXN_IMPCAL_CALOUT__SHIFT 0x8
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR_MASK 0x200
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR__SHIFT 0x9
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR_AK_MASK 0x400
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR_AK__SHIFT 0xa
+#define AUXN_IMPCAL__AUXN_IMPCAL_VALUE_MASK 0xf0000
+#define AUXN_IMPCAL__AUXN_IMPCAL_VALUE__SHIFT 0x10
+#define AUXN_IMPCAL__AUXN_IMPCAL_STEP_DELAY_MASK 0xf00000
+#define AUXN_IMPCAL__AUXN_IMPCAL_STEP_DELAY__SHIFT 0x14
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE_MASK 0xf000000
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE__SHIFT 0x18
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE_ENABLE_MASK 0x10000000
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE_ENABLE__SHIFT 0x1c
+#define DCIO_IMPCAL_CNTL__CALR_CNTL_OVERRIDE_MASK 0xf
+#define DCIO_IMPCAL_CNTL__CALR_CNTL_OVERRIDE__SHIFT 0x0
+#define DCIO_IMPCAL_CNTL__IMPCAL_SOFT_RESET_MASK 0x20
+#define DCIO_IMPCAL_CNTL__IMPCAL_SOFT_RESET__SHIFT 0x5
+#define DCIO_IMPCAL_CNTL__IMPCAL_STATUS_MASK 0x300
+#define DCIO_IMPCAL_CNTL__IMPCAL_STATUS__SHIFT 0x8
+#define DCIO_IMPCAL_CNTL__IMPCAL_ARB_STATE_MASK 0x7000
+#define DCIO_IMPCAL_CNTL__IMPCAL_ARB_STATE__SHIFT 0xc
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_INTERVAL_MASK 0x78000
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_INTERVAL__SHIFT 0xf
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_BIASENTST_MASK 0x380000
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_BIASENTST__SHIFT 0x13
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_RESBIASEN_MASK 0x400000
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_RESBIASEN__SHIFT 0x16
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_SPARE_CONTROL_MASK 0x1800000
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_SPARE_CONTROL__SHIFT 0x17
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKA_MASK 0x7fff
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKA__SHIFT 0x0
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKB_MASK 0x7fff0000
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKB__SHIFT 0x10
+#define DCIO_IMPCAL_CNTL_CD__CALR_CNTL_OVERRIDE_MASK 0xf
+#define DCIO_IMPCAL_CNTL_CD__CALR_CNTL_OVERRIDE__SHIFT 0x0
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_SOFT_RESET_MASK 0x20
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_SOFT_RESET__SHIFT 0x5
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_STATUS_MASK 0x300
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_STATUS__SHIFT 0x8
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_ARB_STATE_MASK 0x7000
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_ARB_STATE__SHIFT 0xc
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKC_MASK 0x7fff
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKC__SHIFT 0x0
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKD_MASK 0x7fff0000
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKD__SHIFT 0x10
+#define DCIO_IMPCAL_CNTL_EF__CALR_CNTL_OVERRIDE_MASK 0xf
+#define DCIO_IMPCAL_CNTL_EF__CALR_CNTL_OVERRIDE__SHIFT 0x0
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_SOFT_RESET_MASK 0x20
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_SOFT_RESET__SHIFT 0x5
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_STATUS_MASK 0x300
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_STATUS__SHIFT 0x8
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_ARB_STATE_MASK 0x7000
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_ARB_STATE__SHIFT 0xc
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKE_MASK 0x7fff
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKE__SHIFT 0x0
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKF_MASK 0x7fff0000
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKF__SHIFT 0x10
+#define DCIO_WRCMD_DELAY__UNIPHY_DELAY_MASK 0xf
+#define DCIO_WRCMD_DELAY__UNIPHY_DELAY__SHIFT 0x0
+#define DCIO_WRCMD_DELAY__DAC_DELAY_MASK 0xf0
+#define DCIO_WRCMD_DELAY__DAC_DELAY__SHIFT 0x4
+#define DCIO_WRCMD_DELAY__DPHY_DELAY_MASK 0xf00
+#define DCIO_WRCMD_DELAY__DPHY_DELAY__SHIFT 0x8
+#define DCIO_WRCMD_DELAY__DCRXPHY_DELAY_MASK 0xf000
+#define DCIO_WRCMD_DELAY__DCRXPHY_DELAY__SHIFT 0xc
+#define DCIO_WRCMD_DELAY__ZCAL_DELAY_MASK 0xf0000
+#define DCIO_WRCMD_DELAY__ZCAL_DELAY__SHIFT 0x10
+#define DC_PINSTRAPS__DC_PINSTRAPS_BIF_CEC_DIS_MASK 0x400
+#define DC_PINSTRAPS__DC_PINSTRAPS_BIF_CEC_DIS__SHIFT 0xa
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD_MASK 0x2000
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD__SHIFT 0xd
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0xc000
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0xe
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS_MASK 0x10000
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS__SHIFT 0x10
+#define DC_PINSTRAPS__DC_PINSTRAPS_CONNECTIVITY_MASK 0xe0000
+#define DC_PINSTRAPS__DC_PINSTRAPS_CONNECTIVITY__SHIFT 0x11
+#define DC_DVODATA_CONFIG__VIP_MUX_EN_MASK 0x80000
+#define DC_DVODATA_CONFIG__VIP_MUX_EN__SHIFT 0x13
+#define DC_DVODATA_CONFIG__VIP_ALTER_MAPPING_EN_MASK 0x100000
+#define DC_DVODATA_CONFIG__VIP_ALTER_MAPPING_EN__SHIFT 0x14
+#define DC_DVODATA_CONFIG__DVO_ALTER_MAPPING_EN_MASK 0x200000
+#define DC_DVODATA_CONFIG__DVO_ALTER_MAPPING_EN__SHIFT 0x15
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_EN_MASK 0x1
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_EN__SHIFT 0x0
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_DISABLE_SYNCEN_CONTROL_OF_TX_EN_MASK 0x2
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_DISABLE_SYNCEN_CONTROL_OF_TX_EN__SHIFT 0x1
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_TARGET_STATE_MASK 0x10
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_TARGET_STATE__SHIFT 0x4
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_MASK 0x100
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN__SHIFT 0x8
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_OVRD_MASK 0x200
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_OVRD__SHIFT 0x9
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_POL_MASK 0x400
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_POL__SHIFT 0xa
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_MASK 0x10000
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON__SHIFT 0x10
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_OVRD_MASK 0x20000
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_OVRD__SHIFT 0x11
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_POL_MASK 0x40000
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_POL__SHIFT 0x12
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_MASK 0x1000000
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON__SHIFT 0x18
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_OVRD_MASK 0x2000000
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_OVRD__SHIFT 0x19
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_POL_MASK 0x4000000
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_POL__SHIFT 0x1a
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_TARGET_STATE_R_MASK 0x1
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_TARGET_STATE_R__SHIFT 0x0
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DIGON_MASK 0x2
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DIGON__SHIFT 0x1
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_SYNCEN_MASK 0x4
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_SYNCEN__SHIFT 0x2
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_BLON_MASK 0x8
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_BLON__SHIFT 0x3
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DONE_MASK 0x10
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DONE__SHIFT 0x4
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_STATE_MASK 0xf00
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_STATE__SHIFT 0x8
+#define LVTMA_PWRSEQ_REF_DIV__LVTMA_PWRSEQ_REF_DIV_MASK 0xfff
+#define LVTMA_PWRSEQ_REF_DIV__LVTMA_PWRSEQ_REF_DIV__SHIFT 0x0
+#define LVTMA_PWRSEQ_REF_DIV__BL_PWM_REF_DIV_MASK 0xffff0000
+#define LVTMA_PWRSEQ_REF_DIV__BL_PWM_REF_DIV__SHIFT 0x10
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY1_MASK 0xff
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY1__SHIFT 0x0
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY2_MASK 0xff00
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY2__SHIFT 0x8
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY1_MASK 0xff0000
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY1__SHIFT 0x10
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY2_MASK 0xff000000
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY2__SHIFT 0x18
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_MIN_LENGTH_MASK 0xff
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_MIN_LENGTH__SHIFT 0x0
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRUP_DELAY3_MASK 0xff00
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRUP_DELAY3__SHIFT 0x8
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_DELAY3_MASK 0xff0000
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_DELAY3__SHIFT 0x10
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_VARY_BL_OVERRIDE_EN_MASK 0x1000000
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_VARY_BL_OVERRIDE_EN__SHIFT 0x18
+#define BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT_MASK 0xffff
+#define BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT__SHIFT 0x0
+#define BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN_MASK 0x40000000
+#define BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN__SHIFT 0x1e
+#define BL_PWM_CNTL__BL_PWM_EN_MASK 0x80000000
+#define BL_PWM_CNTL__BL_PWM_EN__SHIFT 0x1f
+#define BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0xffff
+#define BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x0
+#define BL_PWM_CNTL2__DBG_BL_PWM_INPUT_REFCLK_SELECT_MASK 0x30000000
+#define BL_PWM_CNTL2__DBG_BL_PWM_INPUT_REFCLK_SELECT__SHIFT 0x1c
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE_MASK 0x40000000
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE__SHIFT 0x1e
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN_MASK 0x80000000
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN__SHIFT 0x1f
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_MASK 0xffff
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD__SHIFT 0x0
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT_MASK 0xf0000
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT__SHIFT 0x10
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK_MASK 0x1
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK__SHIFT 0x0
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING_MASK 0x100
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING__SHIFT 0x8
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START_MASK 0x10000
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_FRAME_START_DISP_SEL_MASK 0xe0000
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_FRAME_START_DISP_SEL__SHIFT 0x11
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN_MASK 0x1000000
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN_MASK 0x80000000
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_TIMING_SYNC_SEL_MASK 0x3
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_TIMING_SYNC_SEL__SHIFT 0x0
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_LOCK_SEL_MASK 0x30
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_LOCK_SEL__SHIFT 0x4
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK_MASK 0x300
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK__SHIFT 0x8
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_TIMING_SYNC_SEL_MASK 0x30000
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_TIMING_SYNC_SEL__SHIFT 0x10
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_LOCK_SEL_MASK 0x300000
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_LOCK_SEL__SHIFT 0x14
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK_MASK 0x3000000
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK__SHIFT 0x18
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_TIMING_SYNC_SEL_MASK 0x3
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_TIMING_SYNC_SEL__SHIFT 0x0
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_LOCK_SEL_MASK 0x30
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_LOCK_SEL__SHIFT 0x4
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK_MASK 0x300
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK__SHIFT 0x8
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_TIMING_SYNC_SEL_MASK 0x30000
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_TIMING_SYNC_SEL__SHIFT 0x10
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_LOCK_SEL_MASK 0x300000
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_LOCK_SEL__SHIFT 0x14
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK_MASK 0x3000000
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK__SHIFT 0x18
+#define DCIO_GSL0_CNTL__DCIO_GSL0_VSYNC_SEL_MASK 0x7
+#define DCIO_GSL0_CNTL__DCIO_GSL0_VSYNC_SEL__SHIFT 0x0
+#define DCIO_GSL0_CNTL__DCIO_GSL0_TIMING_SYNC_SEL_MASK 0x700
+#define DCIO_GSL0_CNTL__DCIO_GSL0_TIMING_SYNC_SEL__SHIFT 0x8
+#define DCIO_GSL0_CNTL__DCIO_GSL0_GLOBAL_UNLOCK_SEL_MASK 0x70000
+#define DCIO_GSL0_CNTL__DCIO_GSL0_GLOBAL_UNLOCK_SEL__SHIFT 0x10
+#define DCIO_GSL1_CNTL__DCIO_GSL1_VSYNC_SEL_MASK 0x7
+#define DCIO_GSL1_CNTL__DCIO_GSL1_VSYNC_SEL__SHIFT 0x0
+#define DCIO_GSL1_CNTL__DCIO_GSL1_TIMING_SYNC_SEL_MASK 0x700
+#define DCIO_GSL1_CNTL__DCIO_GSL1_TIMING_SYNC_SEL__SHIFT 0x8
+#define DCIO_GSL1_CNTL__DCIO_GSL1_GLOBAL_UNLOCK_SEL_MASK 0x70000
+#define DCIO_GSL1_CNTL__DCIO_GSL1_GLOBAL_UNLOCK_SEL__SHIFT 0x10
+#define DCIO_GSL2_CNTL__DCIO_GSL2_VSYNC_SEL_MASK 0x7
+#define DCIO_GSL2_CNTL__DCIO_GSL2_VSYNC_SEL__SHIFT 0x0
+#define DCIO_GSL2_CNTL__DCIO_GSL2_TIMING_SYNC_SEL_MASK 0x700
+#define DCIO_GSL2_CNTL__DCIO_GSL2_TIMING_SYNC_SEL__SHIFT 0x8
+#define DCIO_GSL2_CNTL__DCIO_GSL2_GLOBAL_UNLOCK_SEL_MASK 0x70000
+#define DCIO_GSL2_CNTL__DCIO_GSL2_GLOBAL_UNLOCK_SEL__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_MASK 0x7
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_MASK 0x70
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_MASK 0x700
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_MASK 0x7000
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_MASK 0x70000
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_MASK 0x700000
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D1_P_FLIP_MASK 0x7
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D1_P_FLIP__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D2_P_FLIP_MASK 0x70
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D2_P_FLIP__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D3_P_FLIP_MASK 0x700
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D3_P_FLIP__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D4_P_FLIP_MASK 0x7000
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D4_P_FLIP__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D5_P_FLIP_MASK 0x70000
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D5_P_FLIP__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D6_P_FLIP_MASK 0x700000
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D6_P_FLIP__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_DCFEV0_P_FLIP_MASK 0x3800000
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_DCFEV0_P_FLIP__SHIFT 0x17
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_DCFEV1_P_FLIP_MASK 0x1c000000
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_DCFEV1_P_FLIP__SHIFT 0x1a
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ_MASK 0xffffffff
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ__SHIFT 0x0
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT_MASK 0x3f
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT__SHIFT 0x0
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM_MASK 0x700
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM__SHIFT 0x8
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM_MASK 0x3800
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM__SHIFT 0xb
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM_MASK 0x1c000
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM__SHIFT 0xe
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM_MASK 0xe0000
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM__SHIFT 0x11
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM_MASK 0x700000
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM__SHIFT 0x14
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM_MASK 0x3800000
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM__SHIFT 0x17
+#define DCIO_CLOCK_CNTL__DCIO_TEST_CLK_SEL_MASK 0x1f
+#define DCIO_CLOCK_CNTL__DCIO_TEST_CLK_SEL__SHIFT 0x0
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS_MASK 0x20
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS__SHIFT 0x5
+#define DCIO_DEBUG__DCIO_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG__DCIO_DEBUG__SHIFT 0x0
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE0_EXT_VSYNC_MUX_MASK 0x7
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE0_EXT_VSYNC_MUX__SHIFT 0x0
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE1_EXT_VSYNC_MUX_MASK 0x70
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE1_EXT_VSYNC_MUX__SHIFT 0x4
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE2_EXT_VSYNC_MUX_MASK 0x700
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE2_EXT_VSYNC_MUX__SHIFT 0x8
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE3_EXT_VSYNC_MUX_MASK 0x7000
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE3_EXT_VSYNC_MUX__SHIFT 0xc
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE4_EXT_VSYNC_MUX_MASK 0x70000
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE4_EXT_VSYNC_MUX__SHIFT 0x10
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE5_EXT_VSYNC_MUX_MASK 0x700000
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE5_EXT_VSYNC_MUX__SHIFT 0x14
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_SWAPLOCKB_EXT_VSYNC_MASK_MASK 0x7000000
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_SWAPLOCKB_EXT_VSYNC_MASK__SHIFT 0x18
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_GENERICB_EXT_VSYNC_MASK_MASK 0x70000000
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_GENERICB_EXT_VSYNC_MASK__SHIFT 0x1c
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_CRTC_MANUAL_FLOW_CONTROL_MASK 0x80000000
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_CRTC_MANUAL_FLOW_CONTROL__SHIFT 0x1f
+#define DBG_OUT_CNTL__DBG_OUT_PIN_EN_MASK 0x1
+#define DBG_OUT_CNTL__DBG_OUT_PIN_EN__SHIFT 0x0
+#define DBG_OUT_CNTL__DBG_OUT_PIN_SEL_MASK 0x10
+#define DBG_OUT_CNTL__DBG_OUT_PIN_SEL__SHIFT 0x4
+#define DBG_OUT_CNTL__DBG_OUT_12BIT_SEL_MASK 0x300
+#define DBG_OUT_CNTL__DBG_OUT_12BIT_SEL__SHIFT 0x8
+#define DBG_OUT_CNTL__DBG_OUT_TEST_DATA_MASK 0xfff000
+#define DBG_OUT_CNTL__DBG_OUT_TEST_DATA__SHIFT 0xc
+#define DCIO_DEBUG_CONFIG__DCIO_DBG_EN_MASK 0x1
+#define DCIO_DEBUG_CONFIG__DCIO_DBG_EN__SHIFT 0x0
+#define DCIO_DEBUG_CONFIG__DCIO_DBG_SEL_MASK 0xf00
+#define DCIO_DEBUG_CONFIG__DCIO_DBG_SEL__SHIFT 0x8
+#define DCIO_SOFT_RESET__UNIPHYA_SOFT_RESET_MASK 0x1
+#define DCIO_SOFT_RESET__UNIPHYA_SOFT_RESET__SHIFT 0x0
+#define DCIO_SOFT_RESET__DSYNCA_SOFT_RESET_MASK 0x2
+#define DCIO_SOFT_RESET__DSYNCA_SOFT_RESET__SHIFT 0x1
+#define DCIO_SOFT_RESET__UNIPHYB_SOFT_RESET_MASK 0x4
+#define DCIO_SOFT_RESET__UNIPHYB_SOFT_RESET__SHIFT 0x2
+#define DCIO_SOFT_RESET__DSYNCB_SOFT_RESET_MASK 0x8
+#define DCIO_SOFT_RESET__DSYNCB_SOFT_RESET__SHIFT 0x3
+#define DCIO_SOFT_RESET__UNIPHYC_SOFT_RESET_MASK 0x10
+#define DCIO_SOFT_RESET__UNIPHYC_SOFT_RESET__SHIFT 0x4
+#define DCIO_SOFT_RESET__DSYNCC_SOFT_RESET_MASK 0x20
+#define DCIO_SOFT_RESET__DSYNCC_SOFT_RESET__SHIFT 0x5
+#define DCIO_SOFT_RESET__UNIPHYD_SOFT_RESET_MASK 0x40
+#define DCIO_SOFT_RESET__UNIPHYD_SOFT_RESET__SHIFT 0x6
+#define DCIO_SOFT_RESET__DSYNCD_SOFT_RESET_MASK 0x80
+#define DCIO_SOFT_RESET__DSYNCD_SOFT_RESET__SHIFT 0x7
+#define DCIO_SOFT_RESET__UNIPHYE_SOFT_RESET_MASK 0x100
+#define DCIO_SOFT_RESET__UNIPHYE_SOFT_RESET__SHIFT 0x8
+#define DCIO_SOFT_RESET__DSYNCE_SOFT_RESET_MASK 0x200
+#define DCIO_SOFT_RESET__DSYNCE_SOFT_RESET__SHIFT 0x9
+#define DCIO_SOFT_RESET__UNIPHYF_SOFT_RESET_MASK 0x400
+#define DCIO_SOFT_RESET__UNIPHYF_SOFT_RESET__SHIFT 0xa
+#define DCIO_SOFT_RESET__DSYNCF_SOFT_RESET_MASK 0x800
+#define DCIO_SOFT_RESET__DSYNCF_SOFT_RESET__SHIFT 0xb
+#define DCIO_SOFT_RESET__UNIPHYG_SOFT_RESET_MASK 0x1000
+#define DCIO_SOFT_RESET__UNIPHYG_SOFT_RESET__SHIFT 0xc
+#define DCIO_SOFT_RESET__DSYNCG_SOFT_RESET_MASK 0x2000
+#define DCIO_SOFT_RESET__DSYNCG_SOFT_RESET__SHIFT 0xd
+#define DCIO_SOFT_RESET__DACA_SOFT_RESET_MASK 0x10000
+#define DCIO_SOFT_RESET__DACA_SOFT_RESET__SHIFT 0x10
+#define DCIO_SOFT_RESET__DCRXPHY_SOFT_RESET_MASK 0x100000
+#define DCIO_SOFT_RESET__DCRXPHY_SOFT_RESET__SHIFT 0x14
+#define DCIO_SOFT_RESET__DPHY_SOFT_RESET_MASK 0x1000000
+#define DCIO_SOFT_RESET__DPHY_SOFT_RESET__SHIFT 0x18
+#define DCIO_SOFT_RESET__ZCAL_SOFT_RESET_MASK 0x4000000
+#define DCIO_SOFT_RESET__ZCAL_SOFT_RESET__SHIFT 0x1a
+#define DCIO_SOFT_RESET__UNIPHYLPA_SOFT_RESET_MASK 0x10000000
+#define DCIO_SOFT_RESET__UNIPHYLPA_SOFT_RESET__SHIFT 0x1c
+#define DCIO_SOFT_RESET__DSYNCLPA_SOFT_RESET_MASK 0x20000000
+#define DCIO_SOFT_RESET__DSYNCLPA_SOFT_RESET__SHIFT 0x1d
+#define DCIO_SOFT_RESET__UNIPHYLPB_SOFT_RESET_MASK 0x40000000
+#define DCIO_SOFT_RESET__UNIPHYLPB_SOFT_RESET__SHIFT 0x1e
+#define DCIO_SOFT_RESET__DSYNCLPB_SOFT_RESET_MASK 0x80000000
+#define DCIO_SOFT_RESET__DSYNCLPB_SOFT_RESET__SHIFT 0x1f
+#define DCIO_DPHY_SEL__DPHY_LANE0_SEL_MASK 0x3
+#define DCIO_DPHY_SEL__DPHY_LANE0_SEL__SHIFT 0x0
+#define DCIO_DPHY_SEL__DPHY_LANE1_SEL_MASK 0xc
+#define DCIO_DPHY_SEL__DPHY_LANE1_SEL__SHIFT 0x2
+#define DCIO_DPHY_SEL__DPHY_LANE2_SEL_MASK 0x30
+#define DCIO_DPHY_SEL__DPHY_LANE2_SEL__SHIFT 0x4
+#define DCIO_DPHY_SEL__DPHY_LANE3_SEL_MASK 0xc0
+#define DCIO_DPHY_SEL__DPHY_LANE3_SEL__SHIFT 0x6
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXA_INT_TYPE_MASK 0x1
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXA_INT_TYPE__SHIFT 0x0
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXA_INT_MASK_MASK 0x2
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXA_INT_MASK__SHIFT 0x1
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXA_INT_OCCUR_MASK 0x4
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXA_INT_OCCUR__SHIFT 0x2
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXB_INT_TYPE_MASK 0x8
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXB_INT_TYPE__SHIFT 0x3
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXB_INT_MASK_MASK 0x10
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXB_INT_MASK__SHIFT 0x4
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXB_INT_OCCUR_MASK 0x20
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXB_INT_OCCUR__SHIFT 0x5
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXC_INT_TYPE_MASK 0x40
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXC_INT_TYPE__SHIFT 0x6
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXC_INT_MASK_MASK 0x80
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXC_INT_MASK__SHIFT 0x7
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXC_INT_OCCUR_MASK 0x100
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXC_INT_OCCUR__SHIFT 0x8
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXD_INT_TYPE_MASK 0x200
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXD_INT_TYPE__SHIFT 0x9
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXD_INT_MASK_MASK 0x400
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXD_INT_MASK__SHIFT 0xa
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXD_INT_OCCUR_MASK 0x800
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXD_INT_OCCUR__SHIFT 0xb
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXE_INT_TYPE_MASK 0x1000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXE_INT_TYPE__SHIFT 0xc
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXE_INT_MASK_MASK 0x2000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXE_INT_MASK__SHIFT 0xd
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXE_INT_OCCUR_MASK 0x4000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXE_INT_OCCUR__SHIFT 0xe
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXF_INT_TYPE_MASK 0x8000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXF_INT_TYPE__SHIFT 0xf
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXF_INT_MASK_MASK 0x10000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXF_INT_MASK__SHIFT 0x10
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXF_INT_OCCUR_MASK 0x20000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXF_INT_OCCUR__SHIFT 0x11
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXG_INT_TYPE_MASK 0x40000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXG_INT_TYPE__SHIFT 0x12
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXG_INT_MASK_MASK 0x80000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXG_INT_MASK__SHIFT 0x13
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXG_INT_OCCUR_MASK 0x100000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXG_INT_OCCUR__SHIFT 0x14
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPA_INT_TYPE_MASK 0x1000000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPA_INT_TYPE__SHIFT 0x18
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPA_INT_MASK_MASK 0x2000000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPA_INT_MASK__SHIFT 0x19
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPA_INT_OCCUR_MASK 0x4000000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPA_INT_OCCUR__SHIFT 0x1a
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPB_INT_TYPE_MASK 0x8000000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPB_INT_TYPE__SHIFT 0x1b
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPB_INT_MASK_MASK 0x10000000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPB_INT_MASK__SHIFT 0x1c
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPB_INT_OCCUR_MASK 0x20000000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPB_INT_OCCUR__SHIFT 0x1d
+#define DCIO_DPCS_RX_INTERRUPT__DCIO_DPCS_RXA_INT_TYPE_MASK 0x1
+#define DCIO_DPCS_RX_INTERRUPT__DCIO_DPCS_RXA_INT_TYPE__SHIFT 0x0
+#define DCIO_DPCS_RX_INTERRUPT__DCIO_DPCS_RXA_INT_MASK_MASK 0x2
+#define DCIO_DPCS_RX_INTERRUPT__DCIO_DPCS_RXA_INT_MASK__SHIFT 0x1
+#define DCIO_DPCS_RX_INTERRUPT__DCIO_DPCS_RXA_INT_OCCUR_MASK 0x4
+#define DCIO_DPCS_RX_INTERRUPT__DCIO_DPCS_RXA_INT_OCCUR__SHIFT 0x2
+#define DCIO_SEMAPHORE0__DCIO_SEMAPHORE0_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE0__DCIO_SEMAPHORE0_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE0__DCIO_SEMAPHORE0_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE0__DCIO_SEMAPHORE0_GNT__SHIFT 0x10
+#define DCIO_SEMAPHORE1__DCIO_SEMAPHORE1_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE1__DCIO_SEMAPHORE1_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE1__DCIO_SEMAPHORE1_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE1__DCIO_SEMAPHORE1_GNT__SHIFT 0x10
+#define DCIO_SEMAPHORE2__DCIO_SEMAPHORE2_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE2__DCIO_SEMAPHORE2_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE2__DCIO_SEMAPHORE2_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE2__DCIO_SEMAPHORE2_GNT__SHIFT 0x10
+#define DCIO_SEMAPHORE3__DCIO_SEMAPHORE3_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE3__DCIO_SEMAPHORE3_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE3__DCIO_SEMAPHORE3_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE3__DCIO_SEMAPHORE3_GNT__SHIFT 0x10
+#define DCIO_SEMAPHORE4__DCIO_SEMAPHORE4_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE4__DCIO_SEMAPHORE4_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE4__DCIO_SEMAPHORE4_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE4__DCIO_SEMAPHORE4_GNT__SHIFT 0x10
+#define DCIO_SEMAPHORE5__DCIO_SEMAPHORE5_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE5__DCIO_SEMAPHORE5_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE5__DCIO_SEMAPHORE5_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE5__DCIO_SEMAPHORE5_GNT__SHIFT 0x10
+#define DCIO_SEMAPHORE6__DCIO_SEMAPHORE6_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE6__DCIO_SEMAPHORE6_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE6__DCIO_SEMAPHORE6_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE6__DCIO_SEMAPHORE6_GNT__SHIFT 0x10
+#define DCIO_SEMAPHORE7__DCIO_SEMAPHORE7_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE7__DCIO_SEMAPHORE7_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE7__DCIO_SEMAPHORE7_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE7__DCIO_SEMAPHORE7_GNT__SHIFT 0x10
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_INDEX_MASK 0xff
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DCIO_TEST_DEBUG_DATA__DCIO_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DCIO_TEST_DEBUG_DATA__DCIO_TEST_DEBUG_DATA__SHIFT 0x0
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_A0_REG_MASK 0x3
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_A0_REG__SHIFT 0x0
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_MASK_REG_MASK 0xc
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_MASK_REG__SHIFT 0x2
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_EN_REG_MASK 0x30
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_EN_REG__SHIFT 0x4
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_A0_MASK 0xc0
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_A0__SHIFT 0x6
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_SEL0_MASK 0x300
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_SEL0__SHIFT 0x8
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_EN_MASK 0xc00
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_EN__SHIFT 0xa
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCLK_C_MASK 0x1000
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCLK_C__SHIFT 0xc
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_A0_REG_MASK 0x2000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_A0_REG__SHIFT 0xd
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_A0_PREMUX_MASK 0x4000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_A0_PREMUX__SHIFT 0xe
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_A0_MASK 0x8000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_A0__SHIFT 0xf
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_EN_REG_MASK 0x10000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_EN_REG__SHIFT 0x10
+#define DCIO_DEBUG1__DCO_DCIO_DVO_HSYNC_TRISTATE_MASK 0x20000
+#define DCIO_DEBUG1__DCO_DCIO_DVO_HSYNC_TRISTATE__SHIFT 0x11
+#define DCIO_DEBUG1__DCO_DCIO_DVO_CLK_TRISTATE_MASK 0x40000
+#define DCIO_DEBUG1__DCO_DCIO_DVO_CLK_TRISTATE__SHIFT 0x12
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_EN_PREMUX_MASK 0x80000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_EN_PREMUX__SHIFT 0x13
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_EN_MASK 0x100000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_EN__SHIFT 0x14
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_MUX_MASK 0x200000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_MUX__SHIFT 0x15
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_MASK_REG_MASK 0x400000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_MASK_REG__SHIFT 0x16
+#define DCIO_DEBUG1__DCO_DCIO_DVO_ENABLE_MASK 0x800000
+#define DCIO_DEBUG1__DCO_DCIO_DVO_ENABLE__SHIFT 0x17
+#define DCIO_DEBUG1__DCO_DCIO_DVO_VSYNC_TRISTATE_MASK 0x1000000
+#define DCIO_DEBUG1__DCO_DCIO_DVO_VSYNC_TRISTATE__SHIFT 0x18
+#define DCIO_DEBUG1__DCO_DCIO_DVO_RATE_SEL_MASK 0x2000000
+#define DCIO_DEBUG1__DCO_DCIO_DVO_RATE_SEL__SHIFT 0x19
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_SEL0_PREMUX_MASK 0x4000000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_SEL0_PREMUX__SHIFT 0x1a
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_SEL0_MASK 0x8000000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_SEL0__SHIFT 0x1b
+#define DCIO_DEBUG2__DCIO_DEBUG2_MASK 0xffffffff
+#define DCIO_DEBUG2__DCIO_DEBUG2__SHIFT 0x0
+#define DCIO_DEBUG3__DCIO_DEBUG3_MASK 0xffffffff
+#define DCIO_DEBUG3__DCIO_DEBUG3__SHIFT 0x0
+#define DCIO_DEBUG4__DCIO_DEBUG4_MASK 0xffffffff
+#define DCIO_DEBUG4__DCIO_DEBUG4__SHIFT 0x0
+#define DCIO_DEBUG5__DCIO_DEBUG5_MASK 0xffffffff
+#define DCIO_DEBUG5__DCIO_DEBUG5__SHIFT 0x0
+#define DCIO_DEBUG6__DCIO_DEBUG6_MASK 0xffffffff
+#define DCIO_DEBUG6__DCIO_DEBUG6__SHIFT 0x0
+#define DCIO_DEBUG7__DCIO_DEBUG7_MASK 0xffffffff
+#define DCIO_DEBUG7__DCIO_DEBUG7__SHIFT 0x0
+#define DCIO_DEBUG8__DCIO_DEBUG8_MASK 0xffffffff
+#define DCIO_DEBUG8__DCIO_DEBUG8__SHIFT 0x0
+#define DCIO_DEBUG9__DCIO_DEBUG9_MASK 0xffffffff
+#define DCIO_DEBUG9__DCIO_DEBUG9__SHIFT 0x0
+#define DCIO_DEBUGA__DCIO_DEBUGA_MASK 0xffffffff
+#define DCIO_DEBUGA__DCIO_DEBUGA__SHIFT 0x0
+#define DCIO_DEBUGB__DCIO_DEBUGB_MASK 0xffffffff
+#define DCIO_DEBUGB__DCIO_DEBUGB__SHIFT 0x0
+#define DCIO_DEBUGC__DCIO_DEBUGC_MASK 0xffffffff
+#define DCIO_DEBUGC__DCIO_DEBUGC__SHIFT 0x0
+#define DCIO_DEBUGD__DCIO_DEBUGD_MASK 0xffffffff
+#define DCIO_DEBUGD__DCIO_DEBUGD__SHIFT 0x0
+#define DCIO_DEBUGE__DCIO_DIGA_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUGE__DCIO_DIGA_DEBUG__SHIFT 0x0
+#define DCIO_DEBUGF__DCIO_DIGB_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUGF__DCIO_DIGB_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG10__DCIO_DIGC_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG10__DCIO_DIGC_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG11__DCIO_DIGD_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG11__DCIO_DIGD_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG12__DCIO_DIGE_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG12__DCIO_DIGE_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG13__DCIO_DIGF_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG13__DCIO_DIGF_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG14__DCIO_DIGG_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG14__DCIO_DIGG_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG15__DCIO_DEBUG15_MASK 0xffffffff
+#define DCIO_DEBUG15__DCIO_DEBUG15__SHIFT 0x0
+#define DCIO_DEBUG16__DCIO_DEBUG16_MASK 0xffffffff
+#define DCIO_DEBUG16__DCIO_DEBUG16__SHIFT 0x0
+#define DCIO_DEBUG17__DCIO_DEBUG17_MASK 0xffffffff
+#define DCIO_DEBUG17__DCIO_DEBUG17__SHIFT 0x0
+#define DCIO_DEBUG18__DCIO_DEBUG18_MASK 0xffffffff
+#define DCIO_DEBUG18__DCIO_DEBUG18__SHIFT 0x0
+#define DCIO_DEBUG19__DCIO_DIGLPA_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG19__DCIO_DIGLPA_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG1A__DCIO_DIGLPB_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG1A__DCIO_DIGLPB_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG1B__DCIO_DEBUGHPD_MASK 0xffffffff
+#define DCIO_DEBUG1B__DCIO_DEBUGHPD__SHIFT 0x0
+#define DCIO_DEBUG1C__DCIO_DEBUG_UNIPHYA_CFG_MASK 0xffffffff
+#define DCIO_DEBUG1C__DCIO_DEBUG_UNIPHYA_CFG__SHIFT 0x0
+#define DCIO_DEBUG1D__DCIO_DEBUG_UNIPHYB_CFG_MASK 0xffffffff
+#define DCIO_DEBUG1D__DCIO_DEBUG_UNIPHYB_CFG__SHIFT 0x0
+#define DCIO_DEBUG1E__DCIO_DEBUG_UNIPHYC_CFG_MASK 0xffffffff
+#define DCIO_DEBUG1E__DCIO_DEBUG_UNIPHYC_CFG__SHIFT 0x0
+#define DCIO_DEBUG1F__DCIO_DEBUG_UNIPHYD_CFG_MASK 0xffffffff
+#define DCIO_DEBUG1F__DCIO_DEBUG_UNIPHYD_CFG__SHIFT 0x0
+#define DCIO_DEBUG20__DCIO_DEBUG_UNIPHYE_CFG_MASK 0xffffffff
+#define DCIO_DEBUG20__DCIO_DEBUG_UNIPHYE_CFG__SHIFT 0x0
+#define DCIO_DEBUG21__DCIO_DEBUG_UNIPHYF_CFG_MASK 0xffffffff
+#define DCIO_DEBUG21__DCIO_DEBUG_UNIPHYF_CFG__SHIFT 0x0
+#define DCIO_DEBUG22__DCIO_DEBUG_UNIPHYG_CFG_MASK 0xffffffff
+#define DCIO_DEBUG22__DCIO_DEBUG_UNIPHYG_CFG__SHIFT 0x0
+#define DCIO_DEBUG23__DCIO_DEBUG_UNIPHYLPA_CFG_MASK 0xffffffff
+#define DCIO_DEBUG23__DCIO_DEBUG_UNIPHYLPA_CFG__SHIFT 0x0
+#define DCIO_DEBUG24__DCIO_DEBUG_UNIPHYLPB_CFG_MASK 0xffffffff
+#define DCIO_DEBUG24__DCIO_DEBUG_UNIPHYLPB_CFG__SHIFT 0x0
+#define DCIO_DEBUG25__DCIO_DEBUG_DCRXPHY_CFG_MASK 0xffffffff
+#define DCIO_DEBUG25__DCIO_DEBUG_DCRXPHY_CFG__SHIFT 0x0
+#define DCIO_DEBUG26__DCIO_DEBUG_DPHY_CFG_MASK 0xffffffff
+#define DCIO_DEBUG26__DCIO_DEBUG_DPHY_CFG__SHIFT 0x0
+#define DCIO_DEBUG27__DCIO_DEBUG_DACA_CFG_MASK 0xffffffff
+#define DCIO_DEBUG27__DCIO_DEBUG_DACA_CFG__SHIFT 0x0
+#define DCIO_DEBUG28__DCIO_DEBUG_ZCAL_CFG_MASK 0xffffffff
+#define DCIO_DEBUG28__DCIO_DEBUG_ZCAL_CFG__SHIFT 0x0
+#define DCIO_DEBUG_ID__DCIO_DEBUG_ID_MASK 0xffffffff
+#define DCIO_DEBUG_ID__DCIO_DEBUG_ID__SHIFT 0x0
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK_MASK 0x1
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK__SHIFT 0x0
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS_MASK 0x2
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS__SHIFT 0x1
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV_MASK 0x4
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV__SHIFT 0x2
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV1_MASK 0x8
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV1__SHIFT 0x3
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK_MASK 0x10
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK__SHIFT 0x4
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS_MASK 0x20
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS__SHIFT 0x5
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV_MASK 0x40
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV__SHIFT 0x6
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV1_MASK 0x80
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV1__SHIFT 0x7
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK_MASK 0x100
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK__SHIFT 0x8
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS_MASK 0x200
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS__SHIFT 0x9
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV_MASK 0x400
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV__SHIFT 0xa
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV1_MASK 0x800
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV1__SHIFT 0xb
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK_MASK 0x1000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK__SHIFT 0xc
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS_MASK 0x2000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS__SHIFT 0xd
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV_MASK 0x4000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV__SHIFT 0xe
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV1_MASK 0x8000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV1__SHIFT 0xf
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK_MASK 0x10000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK__SHIFT 0x10
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS_MASK 0x20000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS__SHIFT 0x11
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV_MASK 0x40000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV__SHIFT 0x12
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV1_MASK 0x80000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV1__SHIFT 0x13
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK_MASK 0x100000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK__SHIFT 0x14
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS_MASK 0x200000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS__SHIFT 0x15
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV_MASK 0x400000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV__SHIFT 0x16
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV1_MASK 0x800000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV1__SHIFT 0x17
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK_MASK 0x1000000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK__SHIFT 0x18
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS_MASK 0x2000000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS__SHIFT 0x19
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV_MASK 0x4000000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV__SHIFT 0x1a
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV1_MASK 0x8000000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV1__SHIFT 0x1b
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK 0x1
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A__SHIFT 0x0
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK 0x100
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A__SHIFT 0x8
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK 0x10000
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A__SHIFT 0x10
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK 0x100000
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A__SHIFT 0x14
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK 0x200000
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A__SHIFT 0x15
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK 0x400000
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A__SHIFT 0x16
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK 0x800000
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A__SHIFT 0x17
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN_MASK 0x1
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN__SHIFT 0x0
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN_MASK 0x100
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN__SHIFT 0x8
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN_MASK 0x10000
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN__SHIFT 0x10
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN_MASK 0x100000
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN__SHIFT 0x14
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN_MASK 0x200000
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN__SHIFT 0x15
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN_MASK 0x400000
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN__SHIFT 0x16
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN_MASK 0x800000
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN__SHIFT 0x17
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y_MASK 0x1
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y__SHIFT 0x0
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y_MASK 0x100
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y__SHIFT 0x8
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y_MASK 0x10000
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y__SHIFT 0x10
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y_MASK 0x100000
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y__SHIFT 0x14
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y_MASK 0x200000
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y__SHIFT 0x15
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y_MASK 0x400000
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y__SHIFT 0x16
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y_MASK 0x800000
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y__SHIFT 0x17
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK_MASK 0x1
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN_MASK 0x10
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV_MASK 0x40
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV1_MASK 0x80
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV1__SHIFT 0x7
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK_MASK 0x100
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN_MASK 0x1000
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV_MASK 0x4000
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV1_MASK 0x8000
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV1__SHIFT 0xf
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE_MASK 0x10000
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE__SHIFT 0x10
+#define DC_GPIO_DDC1_MASK__AUX1_POL_MASK 0x100000
+#define DC_GPIO_DDC1_MASK__AUX1_POL__SHIFT 0x14
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN_MASK 0x400000
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR_MASK 0xf000000
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR_MASK 0xf0000000
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A_MASK 0x1
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A_MASK 0x100
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN_MASK 0x1
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN_MASK 0x100
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y_MASK 0x1
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y_MASK 0x100
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK_MASK 0x1
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN_MASK 0x10
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV_MASK 0x40
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV1_MASK 0x80
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV1__SHIFT 0x7
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK_MASK 0x100
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN_MASK 0x1000
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV_MASK 0x4000
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV1_MASK 0x8000
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV1__SHIFT 0xf
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE_MASK 0x10000
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE__SHIFT 0x10
+#define DC_GPIO_DDC2_MASK__AUX2_POL_MASK 0x100000
+#define DC_GPIO_DDC2_MASK__AUX2_POL__SHIFT 0x14
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN_MASK 0x400000
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR_MASK 0xf000000
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR_MASK 0xf0000000
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A_MASK 0x1
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A_MASK 0x100
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN_MASK 0x1
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN_MASK 0x100
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y_MASK 0x1
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y_MASK 0x100
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK_MASK 0x1
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN_MASK 0x10
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV_MASK 0x40
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV1_MASK 0x80
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV1__SHIFT 0x7
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK_MASK 0x100
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN_MASK 0x1000
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV_MASK 0x4000
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV1_MASK 0x8000
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV1__SHIFT 0xf
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE_MASK 0x10000
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE__SHIFT 0x10
+#define DC_GPIO_DDC3_MASK__AUX3_POL_MASK 0x100000
+#define DC_GPIO_DDC3_MASK__AUX3_POL__SHIFT 0x14
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN_MASK 0x400000
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR_MASK 0xf000000
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR_MASK 0xf0000000
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A_MASK 0x1
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A_MASK 0x100
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN_MASK 0x1
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN_MASK 0x100
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y_MASK 0x1
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y_MASK 0x100
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK_MASK 0x1
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN_MASK 0x10
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV_MASK 0x40
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV1_MASK 0x80
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV1__SHIFT 0x7
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK_MASK 0x100
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN_MASK 0x1000
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV_MASK 0x4000
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV1_MASK 0x8000
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV1__SHIFT 0xf
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE_MASK 0x10000
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE__SHIFT 0x10
+#define DC_GPIO_DDC4_MASK__AUX4_POL_MASK 0x100000
+#define DC_GPIO_DDC4_MASK__AUX4_POL__SHIFT 0x14
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN_MASK 0x400000
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR_MASK 0xf000000
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR_MASK 0xf0000000
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A_MASK 0x1
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A_MASK 0x100
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN_MASK 0x1
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN_MASK 0x100
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y_MASK 0x1
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y_MASK 0x100
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK_MASK 0x1
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN_MASK 0x10
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV_MASK 0x40
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV1_MASK 0x80
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV1__SHIFT 0x7
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK_MASK 0x100
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN_MASK 0x1000
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV_MASK 0x4000
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV1_MASK 0x8000
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV1__SHIFT 0xf
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE_MASK 0x10000
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE__SHIFT 0x10
+#define DC_GPIO_DDC5_MASK__AUX5_POL_MASK 0x100000
+#define DC_GPIO_DDC5_MASK__AUX5_POL__SHIFT 0x14
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN_MASK 0x400000
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR_MASK 0xf000000
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR_MASK 0xf0000000
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A_MASK 0x1
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A_MASK 0x100
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN_MASK 0x1
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN_MASK 0x100
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y_MASK 0x1
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y_MASK 0x100
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_MASK_MASK 0x1
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_PD_EN_MASK 0x10
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_RECV_MASK 0x40
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_RECV1_MASK 0x80
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_RECV1__SHIFT 0x7
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_MASK_MASK 0x100
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_PD_EN_MASK 0x1000
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_RECV_MASK 0x4000
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_RECV1_MASK 0x8000
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_RECV1__SHIFT 0xf
+#define DC_GPIO_DDC6_MASK__AUX_PAD6_MODE_MASK 0x10000
+#define DC_GPIO_DDC6_MASK__AUX_PAD6_MODE__SHIFT 0x10
+#define DC_GPIO_DDC6_MASK__AUX6_POL_MASK 0x100000
+#define DC_GPIO_DDC6_MASK__AUX6_POL__SHIFT 0x14
+#define DC_GPIO_DDC6_MASK__ALLOW_HW_DDC6_PD_EN_MASK 0x400000
+#define DC_GPIO_DDC6_MASK__ALLOW_HW_DDC6_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_STR_MASK 0xf000000
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_STR_MASK 0xf0000000
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK 0x1
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK 0x100
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6CLK_EN_MASK 0x1
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6DATA_EN_MASK 0x100
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6CLK_Y_MASK 0x1
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6DATA_Y_MASK 0x100
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK_MASK 0x1
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV_MASK 0x40
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV1_MASK 0x80
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV1__SHIFT 0x7
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK_MASK 0x100
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN_MASK 0x1000
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV_MASK 0x4000
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV1_MASK 0x8000
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV1__SHIFT 0xf
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE_MASK 0x10000
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE__SHIFT 0x10
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL_MASK 0x100000
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL__SHIFT 0x14
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN_MASK 0x400000
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR_MASK 0xf000000
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR__SHIFT 0x18
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR_MASK 0xf0000000
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A_MASK 0x1
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A__SHIFT 0x0
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A_MASK 0x100
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A__SHIFT 0x8
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN_MASK 0x1
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN__SHIFT 0x0
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN_MASK 0x100
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN__SHIFT 0x8
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RXSEL_MASK 0x30000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RXSEL__SHIFT 0x10
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SPARE_MASK 0xc0000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SPARE__SHIFT 0x12
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_BIASCRTEN_MASK 0x100000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_BIASCRTEN__SHIFT 0x14
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_CSEL0P9_MASK 0x200000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_CSEL0P9__SHIFT 0x15
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_CSEL1P1_MASK 0x400000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_CSEL1P1__SHIFT 0x16
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_COMPSEL_MASK 0x800000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_COMPSEL__SHIFT 0x17
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RSEL0P9_MASK 0x1000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RSEL0P9__SHIFT 0x18
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RSEL1P1_MASK 0x2000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RSEL1P1__SHIFT 0x19
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SPIKERCEN_MASK 0x4000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SPIKERCEN__SHIFT 0x1a
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SPIKERCSEL_MASK 0x8000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SPIKERCSEL__SHIFT 0x1b
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_FALLSLEWSEL_MASK 0x30000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_FALLSLEWSEL__SHIFT 0x1c
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RESBIASEN_MASK 0x40000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RESBIASEN__SHIFT 0x1e
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SLEWN_MASK 0x80000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SLEWN__SHIFT 0x1f
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y_MASK 0x1
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y__SHIFT 0x0
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y_MASK 0x100
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y__SHIFT 0x8
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_MASK_MASK 0x1
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_MASK__SHIFT 0x0
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_PD_DIS_MASK 0x10
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_PD_DIS__SHIFT 0x4
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_RECV_MASK 0x40
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_RECV__SHIFT 0x6
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_RECV1_MASK 0x80
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_RECV1__SHIFT 0x7
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_MASK_MASK 0x100
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_MASK__SHIFT 0x8
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_PD_DIS_MASK 0x1000
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_PD_DIS__SHIFT 0xc
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_RECV_MASK 0x4000
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_RECV__SHIFT 0xe
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_RECV1_MASK 0x8000
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_RECV1__SHIFT 0xf
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_CRTC_HSYNC_MASK_MASK 0x7000000
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_CRTC_HSYNC_MASK__SHIFT 0x18
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_CRTC_VSYNC_MASK_MASK 0x70000000
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_CRTC_VSYNC_MASK__SHIFT 0x1c
+#define DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK 0x1
+#define DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A__SHIFT 0x0
+#define DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK 0x100
+#define DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A__SHIFT 0x8
+#define DC_GPIO_SYNCA_EN__DC_GPIO_HSYNCA_EN_MASK 0x1
+#define DC_GPIO_SYNCA_EN__DC_GPIO_HSYNCA_EN__SHIFT 0x0
+#define DC_GPIO_SYNCA_EN__DC_GPIO_VSYNCA_EN_MASK 0x100
+#define DC_GPIO_SYNCA_EN__DC_GPIO_VSYNCA_EN__SHIFT 0x8
+#define DC_GPIO_SYNCA_Y__DC_GPIO_HSYNCA_Y_MASK 0x1
+#define DC_GPIO_SYNCA_Y__DC_GPIO_HSYNCA_Y__SHIFT 0x0
+#define DC_GPIO_SYNCA_Y__DC_GPIO_VSYNCA_Y_MASK 0x100
+#define DC_GPIO_SYNCA_Y__DC_GPIO_VSYNCA_Y__SHIFT 0x8
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK_MASK 0x1
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK__SHIFT 0x0
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS_MASK 0x2
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS__SHIFT 0x1
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV_MASK 0x4
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV__SHIFT 0x2
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN_MASK 0x8
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN__SHIFT 0x3
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV1_MASK 0x10
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV1__SHIFT 0x4
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV1_MASK 0x20
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV1__SHIFT 0x5
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK_MASK 0x100
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK__SHIFT 0x8
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS_MASK 0x200
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS__SHIFT 0x9
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV_MASK 0x400
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV__SHIFT 0xa
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN_MASK 0x800
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN__SHIFT 0xb
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK_MASK 0x10000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK__SHIFT 0x10
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS_MASK 0x20000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS__SHIFT 0x11
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV_MASK 0x40000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV__SHIFT 0x12
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN_MASK 0x80000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN__SHIFT 0x13
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV1_MASK 0x100000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV1__SHIFT 0x14
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV1_MASK 0x800000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV1__SHIFT 0x17
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK_MASK 0x1000000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK__SHIFT 0x18
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS_MASK 0x2000000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS__SHIFT 0x19
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV_MASK 0x4000000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV__SHIFT 0x1a
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN_MASK 0x8000000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN__SHIFT 0x1b
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK 0x1
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A__SHIFT 0x0
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK 0x100
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A__SHIFT 0x8
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK 0x10000
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A__SHIFT 0x10
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK 0x1000000
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A__SHIFT 0x18
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN_MASK 0x1
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN__SHIFT 0x0
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN_MASK 0x100
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN__SHIFT 0x8
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN_MASK 0x10000
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN__SHIFT 0x10
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN_MASK 0x1000000
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN__SHIFT 0x18
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y_MASK 0x1
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y__SHIFT 0x0
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y_MASK 0x100
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y__SHIFT 0x8
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y_MASK 0x10000
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y__SHIFT 0x10
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y_MASK 0x1000000
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y__SHIFT 0x18
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK_MASK 0x1
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK__SHIFT 0x0
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_MASK_MASK 0x2
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_MASK__SHIFT 0x1
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_PD_DIS_MASK 0x4
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_PD_DIS__SHIFT 0x2
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RECV_MASK 0x8
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RECV__SHIFT 0x3
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS_MASK 0x10
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS__SHIFT 0x4
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV1_MASK 0x20
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV1__SHIFT 0x5
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV_MASK 0x40
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV__SHIFT 0x6
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RECV1_MASK 0x80
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RECV1__SHIFT 0x7
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK_MASK 0x100
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK__SHIFT 0x8
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS_MASK 0x200
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS__SHIFT 0x9
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV_MASK 0x400
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV__SHIFT 0xa
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV1_MASK 0x800
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV1__SHIFT 0xb
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK_MASK 0x10000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK__SHIFT 0x10
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS_MASK 0x20000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS__SHIFT 0x11
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV_MASK 0x40000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV__SHIFT 0x12
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV1_MASK 0x80000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV1__SHIFT 0x13
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK_MASK 0x100000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK__SHIFT 0x14
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS_MASK 0x200000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS__SHIFT 0x15
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV_MASK 0x400000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV__SHIFT 0x16
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV1_MASK 0x800000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV1__SHIFT 0x17
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK_MASK 0x1000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK__SHIFT 0x18
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS_MASK 0x2000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS__SHIFT 0x19
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV_MASK 0x4000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV__SHIFT 0x1a
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV1_MASK 0x8000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV1__SHIFT 0x1b
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK_MASK 0x10000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK__SHIFT 0x1c
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS_MASK 0x20000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS__SHIFT 0x1d
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV_MASK 0x40000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV__SHIFT 0x1e
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV1_MASK 0x80000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV1__SHIFT 0x1f
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK 0x1
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A__SHIFT 0x0
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK 0x100
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A__SHIFT 0x8
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK 0x10000
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A__SHIFT 0x10
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK 0x1000000
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A__SHIFT 0x18
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK 0x4000000
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A__SHIFT 0x1a
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK 0x10000000
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A__SHIFT 0x1c
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN_MASK 0x1
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN__SHIFT 0x0
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI_MASK 0x2
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI__SHIFT 0x1
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE_MASK 0x4
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE__SHIFT 0x2
+#define DC_GPIO_HPD_EN__RX_HPD_SCHMEN_PI_MASK 0x8
+#define DC_GPIO_HPD_EN__RX_HPD_SCHMEN_PI__SHIFT 0x3
+#define DC_GPIO_HPD_EN__RX_HPD_SLEWNCORE_MASK 0x10
+#define DC_GPIO_HPD_EN__RX_HPD_SLEWNCORE__SHIFT 0x4
+#define DC_GPIO_HPD_EN__HPD12_SPARE0_MASK 0x20
+#define DC_GPIO_HPD_EN__HPD12_SPARE0__SHIFT 0x5
+#define DC_GPIO_HPD_EN__HPD1_SEL0_MASK 0x40
+#define DC_GPIO_HPD_EN__HPD1_SEL0__SHIFT 0x6
+#define DC_GPIO_HPD_EN__RX_HPD_SEL0_MASK 0x80
+#define DC_GPIO_HPD_EN__RX_HPD_SEL0__SHIFT 0x7
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN_MASK 0x100
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN__SHIFT 0x8
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI_MASK 0x200
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI__SHIFT 0x9
+#define DC_GPIO_HPD_EN__HPD12_SPARE1_MASK 0x400
+#define DC_GPIO_HPD_EN__HPD12_SPARE1__SHIFT 0xa
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN_MASK 0x10000
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN__SHIFT 0x10
+#define DC_GPIO_HPD_EN__HPD3_SCHMEN_PI_MASK 0x20000
+#define DC_GPIO_HPD_EN__HPD3_SCHMEN_PI__SHIFT 0x11
+#define DC_GPIO_HPD_EN__HPD34_SPARE0_MASK 0x40000
+#define DC_GPIO_HPD_EN__HPD34_SPARE0__SHIFT 0x12
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN_MASK 0x100000
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN__SHIFT 0x14
+#define DC_GPIO_HPD_EN__HPD4_SCHMEN_PI_MASK 0x200000
+#define DC_GPIO_HPD_EN__HPD4_SCHMEN_PI__SHIFT 0x15
+#define DC_GPIO_HPD_EN__HPD34_SPARE1_MASK 0x400000
+#define DC_GPIO_HPD_EN__HPD34_SPARE1__SHIFT 0x16
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN_MASK 0x1000000
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN__SHIFT 0x18
+#define DC_GPIO_HPD_EN__HPD5_SCHMEN_PI_MASK 0x2000000
+#define DC_GPIO_HPD_EN__HPD5_SCHMEN_PI__SHIFT 0x19
+#define DC_GPIO_HPD_EN__HPD56_SPARE0_MASK 0x4000000
+#define DC_GPIO_HPD_EN__HPD56_SPARE0__SHIFT 0x1a
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN_MASK 0x10000000
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN__SHIFT 0x1c
+#define DC_GPIO_HPD_EN__HPD6_SCHMEN_PI_MASK 0x20000000
+#define DC_GPIO_HPD_EN__HPD6_SCHMEN_PI__SHIFT 0x1d
+#define DC_GPIO_HPD_EN__HPD56_SPARE1_MASK 0x40000000
+#define DC_GPIO_HPD_EN__HPD56_SPARE1__SHIFT 0x1e
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y_MASK 0x1
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y__SHIFT 0x0
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y_MASK 0x100
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y__SHIFT 0x8
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y_MASK 0x10000
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y__SHIFT 0x10
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y_MASK 0x1000000
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y__SHIFT 0x18
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y_MASK 0x4000000
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y__SHIFT 0x1a
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y_MASK 0x10000000
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y__SHIFT 0x1c
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK_MASK 0x1
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK__SHIFT 0x0
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS_MASK 0x10
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS__SHIFT 0x4
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV_MASK 0x40
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV__SHIFT 0x6
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV1_MASK 0x80
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV1__SHIFT 0x7
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK_MASK 0x100
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK__SHIFT 0x8
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS_MASK 0x1000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS__SHIFT 0xc
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV_MASK 0x4000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV__SHIFT 0xe
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV1_MASK 0x8000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV1__SHIFT 0xf
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_MASK_MASK 0x10000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_MASK__SHIFT 0x10
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_PD_DIS_MASK 0x100000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_PD_DIS__SHIFT 0x14
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_RECV_MASK 0x400000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_RECV__SHIFT 0x16
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_RECV1_MASK 0x800000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_RECV1__SHIFT 0x17
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_MASK_MASK 0x1000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_MASK__SHIFT 0x18
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_PD_DIS_MASK 0x2000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_PD_DIS__SHIFT 0x19
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_RECV_MASK 0x4000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_RECV__SHIFT 0x1a
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_RECV1_MASK 0x8000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_RECV1__SHIFT 0x1b
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_MASK_MASK 0x10000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_MASK__SHIFT 0x1c
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_PD_DIS_MASK 0x20000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_PD_DIS__SHIFT 0x1d
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_RECV_MASK 0x40000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_RECV__SHIFT 0x1e
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_RECV1_MASK 0x80000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_RECV1__SHIFT 0x1f
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_BLON_A_MASK 0x1
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_BLON_A__SHIFT 0x0
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_DIGON_A_MASK 0x100
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_DIGON_A__SHIFT 0x8
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_ENA_BL_A_MASK 0x10000
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_ENA_BL_A__SHIFT 0x10
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_VSYNC_IN_A_MASK 0x1000000
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_VSYNC_IN_A__SHIFT 0x18
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_HSYNC_IN_A_MASK 0x80000000
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_HSYNC_IN_A__SHIFT 0x1f
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN_MASK 0x1
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN__SHIFT 0x0
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_GENERICA_EN_MASK 0x2
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_GENERICA_EN__SHIFT 0x1
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN_MASK 0x100
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN__SHIFT 0x8
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_ENA_BL_EN_MASK 0x10000
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_ENA_BL_EN__SHIFT 0x10
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VSYNC_IN_EN_MASK 0x1000000
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VSYNC_IN_EN__SHIFT 0x18
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_HSYNC_IN_EN_MASK 0x80000000
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_HSYNC_IN_EN__SHIFT 0x1f
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_BLON_Y_MASK 0x1
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_BLON_Y__SHIFT 0x0
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_DIGON_Y_MASK 0x100
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_DIGON_Y__SHIFT 0x8
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_ENA_BL_Y_MASK 0x10000
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_ENA_BL_Y__SHIFT 0x10
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_VSYNC_IN_MASK 0x1000000
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_VSYNC_IN__SHIFT 0x18
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_HSYNC_IN_MASK 0x80000000
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_HSYNC_IN__SHIFT 0x1f
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN_MASK 0xf
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK 0xf0
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT 0x4
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SN_MASK 0xf00
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SN__SHIFT 0x8
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SP_MASK 0xf000
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SP__SHIFT 0xc
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN_MASK 0xf0000
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN__SHIFT 0x10
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP_MASK 0xf00000
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP__SHIFT 0x14
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN_MASK 0xf000000
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN__SHIFT 0x18
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP_MASK 0xf0000000
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP__SHIFT 0x1c
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN_MASK 0xf
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP_MASK 0xf0
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP__SHIFT 0x4
+#define DC_GPIO_PAD_STRENGTH_2__EXT_RESET_DRVSTRENGTH_MASK 0x700
+#define DC_GPIO_PAD_STRENGTH_2__EXT_RESET_DRVSTRENGTH__SHIFT 0x8
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_DRVSTRENGTH_MASK 0x7000
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_DRVSTRENGTH__SHIFT 0xc
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SN_MASK 0xf0000
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SN__SHIFT 0x10
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SP_MASK 0xf00000
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SP__SHIFT 0x14
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_SRC_SEL_MASK 0xc0000000
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_SRC_SEL__SHIFT 0x1e
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_SLEWN_MASK 0x1
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_SLEWN__SHIFT 0x0
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_WAKE_MASK 0x2
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_WAKE__SHIFT 0x1
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_RXSEL_MASK 0x4
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_RXSEL__SHIFT 0x2
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_MODE_MASK 0x8
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_MODE__SHIFT 0x3
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_PD_EN_MASK 0x10
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_PD_EN__SHIFT 0x4
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_EN_MASK 0x20
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_EN__SHIFT 0x5
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_PD_EN_MASK 0x40
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_PD_EN__SHIFT 0x6
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_EN_MASK 0x80
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_EN__SHIFT 0x7
+#define PHY_AUX_CNTL__AUX_PAD_SLEWN_MASK 0x1000
+#define PHY_AUX_CNTL__AUX_PAD_SLEWN__SHIFT 0xc
+#define PHY_AUX_CNTL__AUXSLAVE_CLK_PD_EN_MASK 0x2000
+#define PHY_AUX_CNTL__AUXSLAVE_CLK_PD_EN__SHIFT 0xd
+#define PHY_AUX_CNTL__AUX_PAD_WAKE_MASK 0x4000
+#define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0xe
+#define PHY_AUX_CNTL__AUX_PAD_RXSEL_MASK 0x30000
+#define PHY_AUX_CNTL__AUX_PAD_RXSEL__SHIFT 0x10
+#define PHY_AUX_CNTL__AUX_PAD_RESBIASEN_MASK 0x40000
+#define PHY_AUX_CNTL__AUX_PAD_RESBIASEN__SHIFT 0x12
+#define PHY_AUX_CNTL__AUX_PAD_COMPSEL_MASK 0x80000
+#define PHY_AUX_CNTL__AUX_PAD_COMPSEL__SHIFT 0x13
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A_MASK 0x1
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A__SHIFT 0x0
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SDA_A_MASK 0x2
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SDA_A__SHIFT 0x1
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SCL_EN_MASK 0x1
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SCL_EN__SHIFT 0x0
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SDA_EN_MASK 0x2
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SDA_EN__SHIFT 0x1
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_DATA_PD_EN_MASK 0x4
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_DATA_PD_EN__SHIFT 0x2
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RXSEL_MASK 0x30000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RXSEL__SHIFT 0x10
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SPARE_MASK 0xc0000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SPARE__SHIFT 0x12
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_BIASCRTEN_MASK 0x100000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_BIASCRTEN__SHIFT 0x14
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_CSEL0P9_MASK 0x200000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_CSEL0P9__SHIFT 0x15
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_CSEL1P1_MASK 0x400000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_CSEL1P1__SHIFT 0x16
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_COMPSEL_MASK 0x800000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_COMPSEL__SHIFT 0x17
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RSEL0P9_MASK 0x1000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RSEL0P9__SHIFT 0x18
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RSEL1P1_MASK 0x2000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RSEL1P1__SHIFT 0x19
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SPIKERCEN_MASK 0x4000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SPIKERCEN__SHIFT 0x1a
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SPIKERCSEL_MASK 0x8000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SPIKERCSEL__SHIFT 0x1b
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_FALLSLEWSEL_MASK 0x30000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_FALLSLEWSEL__SHIFT 0x1c
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RESBIASEN_MASK 0x40000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RESBIASEN__SHIFT 0x1e
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SLEWN_MASK 0x80000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SLEWN__SHIFT 0x1f
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SCL_Y_MASK 0x1
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SCL_Y__SHIFT 0x0
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SDA_Y_MASK 0x2
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SDA_Y__SHIFT 0x1
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SN_MASK 0xf
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SP_MASK 0xf0
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SP__SHIFT 0x4
+#define DVO_VREF_CONTROL__DVO_VREFPON_MASK 0x1
+#define DVO_VREF_CONTROL__DVO_VREFPON__SHIFT 0x0
+#define DVO_VREF_CONTROL__DVO_VREFSEL_MASK 0x2
+#define DVO_VREF_CONTROL__DVO_VREFSEL__SHIFT 0x1
+#define DVO_VREF_CONTROL__DVO_VREFCAL_MASK 0xf0
+#define DVO_VREF_CONTROL__DVO_VREFCAL__SHIFT 0x4
+#define DVO_SKEW_ADJUST__DVO_SKEW_ADJUST_MASK 0xffffffff
+#define DVO_SKEW_ADJUST__DVO_SKEW_ADJUST__SHIFT 0x0
+#define DC_GPIO_RECEIVER_EN0__VIPPAD_SCL_RECEN_MASK 0x1
+#define DC_GPIO_RECEIVER_EN0__VIPPAD_SCL_RECEN__SHIFT 0x0
+#define DC_GPIO_RECEIVER_EN0__VIPPAD_SDA_RECEN_MASK 0x2
+#define DC_GPIO_RECEIVER_EN0__VIPPAD_SDA_RECEN__SHIFT 0x1
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_RX_HPD_RECEN_MASK 0x10000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_RX_HPD_RECEN__SHIFT 0x10
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_HPD1_RECEN_MASK 0x20000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_HPD1_RECEN__SHIFT 0x11
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENLK_VSYNC_RECEN_MASK 0x40000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENLK_VSYNC_RECEN__SHIFT 0x12
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENLK_CLK_RECEN_MASK 0x80000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENLK_CLK_RECEN__SHIFT 0x13
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_VSYNCA_RECEN_MASK 0x100000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_VSYNCA_RECEN__SHIFT 0x14
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_HSYNCA_RECEN_MASK 0x200000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_HSYNCA_RECEN__SHIFT 0x15
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICG_RECEN_MASK 0x400000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICG_RECEN__SHIFT 0x16
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICF_RECEN_MASK 0x800000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICF_RECEN__SHIFT 0x17
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICE_RECEN_MASK 0x1000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICE_RECEN__SHIFT 0x18
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICD_RECEN_MASK 0x2000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICD_RECEN__SHIFT 0x19
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICC_RECEN_MASK 0x4000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICC_RECEN__SHIFT 0x1a
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICB_RECEN_MASK 0x8000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICB_RECEN__SHIFT 0x1b
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICA_RECEN_MASK 0x10000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICA_RECEN__SHIFT 0x1c
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_BLON_RECEN_MASK 0x20000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_BLON_RECEN__SHIFT 0x1d
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_DIGON_RECEN_MASK 0x40000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_DIGON_RECEN__SHIFT 0x1e
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_DDC2DATA_RECEN_MASK 0x80000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_DDC2DATA_RECEN__SHIFT 0x1f
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC2CLK_RECEN_MASK 0x1
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC2CLK_RECEN__SHIFT 0x0
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC1DATA_RECEN_MASK 0x2
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC1DATA_RECEN__SHIFT 0x1
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC1CLK_RECEN_MASK 0x4
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC1CLK_RECEN__SHIFT 0x2
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC3DATA_RECEN_MASK 0x8
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC3DATA_RECEN__SHIFT 0x3
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC3CLK_RECEN_MASK 0x10
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC3CLK_RECEN__SHIFT 0x4
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC4DATA_RECEN_MASK 0x20
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC4DATA_RECEN__SHIFT 0x5
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC4CLK_RECEN_MASK 0x40
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC4CLK_RECEN__SHIFT 0x6
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC5DATA_RECEN_MASK 0x80
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC5DATA_RECEN__SHIFT 0x7
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC5CLK_RECEN_MASK 0x100
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC5CLK_RECEN__SHIFT 0x8
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC6DATA_RECEN_MASK 0x200
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC6DATA_RECEN__SHIFT 0x9
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC6CLK_RECEN_MASK 0x400
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC6CLK_RECEN__SHIFT 0xa
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD2_RECEN_MASK 0x800
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD2_RECEN__SHIFT 0xb
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD3_RECEN_MASK 0x1000
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD3_RECEN__SHIFT 0xc
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD4_RECEN_MASK 0x2000
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD4_RECEN__SHIFT 0xd
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD5_RECEN_MASK 0x4000
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD5_RECEN__SHIFT 0xe
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD6_RECEN_MASK 0x8000
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD6_RECEN__SHIFT 0xf
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_ENA_BL_RECEN_MASK 0x10000
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_ENA_BL_RECEN__SHIFT 0x10
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_SWAPLOCK_A_RECEN_MASK 0x20000
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_SWAPLOCK_A_RECEN__SHIFT 0x11
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_SWAPLOCK_B_RECEN_MASK 0x40000
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_SWAPLOCK_B_RECEN__SHIFT 0x12
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_I2SDATA0_MASK_MASK 0xf
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_I2SDATA0_MASK__SHIFT 0x0
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_MCLK0_MASK_MASK 0x10
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_MCLK0_MASK__SHIFT 0x4
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_BCLK0_MASK_MASK 0x20
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_BCLK0_MASK__SHIFT 0x5
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_LRCK0_MASK_MASK 0x40
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_LRCK0_MASK__SHIFT 0x6
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_SPDIF0_MASK_MASK 0x80
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_SPDIF0_MASK__SHIFT 0x7
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_I2SDATA1_MASK_MASK 0x100
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_I2SDATA1_MASK__SHIFT 0x8
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_MCLK1_MASK_MASK 0x200
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_MCLK1_MASK__SHIFT 0x9
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_BCLK1_MASK_MASK 0x400
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_BCLK1_MASK__SHIFT 0xa
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_LRCK1_MASK_MASK 0x800
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_LRCK1_MASK__SHIFT 0xb
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_SPDIF1_MASK_MASK 0x1000
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_SPDIF1_MASK__SHIFT 0xc
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_I2SDATA0_A_MASK 0xf
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_I2SDATA0_A__SHIFT 0x0
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_MCLK0_A_MASK 0x10
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_MCLK0_A__SHIFT 0x4
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_BCLK0_A_MASK 0x20
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_BCLK0_A__SHIFT 0x5
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_LRCK0_A_MASK 0x40
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_LRCK0_A__SHIFT 0x6
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_SPDIF0_A_MASK 0x80
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_SPDIF0_A__SHIFT 0x7
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_I2SDATA1_A_MASK 0x100
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_I2SDATA1_A__SHIFT 0x8
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_MCLK1_A_MASK 0x200
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_MCLK1_A__SHIFT 0x9
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_BCLK1_A_MASK 0x400
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_BCLK1_A__SHIFT 0xa
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_LRCK1_A_MASK 0x800
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_LRCK1_A__SHIFT 0xb
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_SPDIF1_A_MASK 0x1000
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_SPDIF1_A__SHIFT 0xc
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_I2SDATA0_EN_MASK 0xf
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_I2SDATA0_EN__SHIFT 0x0
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_MCLK0_EN_MASK 0x10
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_MCLK0_EN__SHIFT 0x4
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_BCLK0_EN_MASK 0x20
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_BCLK0_EN__SHIFT 0x5
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_LRCK0_EN_MASK 0x40
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_LRCK0_EN__SHIFT 0x6
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_SPDIF0_EN_MASK 0x80
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_SPDIF0_EN__SHIFT 0x7
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_I2SDATA1_EN_MASK 0x100
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_I2SDATA1_EN__SHIFT 0x8
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_MCLK1_EN_MASK 0x200
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_MCLK1_EN__SHIFT 0x9
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_BCLK1_EN_MASK 0x400
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_BCLK1_EN__SHIFT 0xa
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_LRCK1_EN_MASK 0x800
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_LRCK1_EN__SHIFT 0xb
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_SPDIF1_EN_MASK 0x1000
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_SPDIF1_EN__SHIFT 0xc
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_APORT_MASK 0x2000
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_APORT__SHIFT 0xd
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_PU_MASK 0x4000
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_PU__SHIFT 0xe
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_RXSEL_MASK 0x8000
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_RXSEL__SHIFT 0xf
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_SCHMEN_MASK 0x10000
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_SCHMEN__SHIFT 0x10
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_SMODE_EN_MASK 0x20000
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_SMODE_EN__SHIFT 0x11
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_IMODE_MASK 0x40000
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_IMODE__SHIFT 0x12
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_I2SDATA0_Y_MASK 0xf
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_I2SDATA0_Y__SHIFT 0x0
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_MCLK0_Y_MASK 0x10
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_MCLK0_Y__SHIFT 0x4
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_BCLK0_Y_MASK 0x20
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_BCLK0_Y__SHIFT 0x5
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_LRCK0_Y_MASK 0x40
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_LRCK0_Y__SHIFT 0x6
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_SPDIF0_Y_MASK 0x80
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_SPDIF0_Y__SHIFT 0x7
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_I2SDATA1_Y_MASK 0x100
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_I2SDATA1_Y__SHIFT 0x8
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_MCLK1_Y_MASK 0x200
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_MCLK1_Y__SHIFT 0x9
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_BCLK1_Y_MASK 0x400
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_BCLK1_Y__SHIFT 0xa
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_LRCK1_Y_MASK 0x800
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_LRCK1_Y__SHIFT 0xb
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_SPDIF1_Y_MASK 0x1000
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_SPDIF1_Y__SHIFT 0xc
+#define DC_GPIO_I2S_SPDIF_STRENGTH__I2S0_DRVSTRENGTH_MASK 0x7
+#define DC_GPIO_I2S_SPDIF_STRENGTH__I2S0_DRVSTRENGTH__SHIFT 0x0
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF0_DRVSTRENGTH_SN_MASK 0x700
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF0_DRVSTRENGTH_SN__SHIFT 0x8
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF0_DRVSTRENGTH_SP_MASK 0x3800
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF0_DRVSTRENGTH_SP__SHIFT 0xb
+#define DC_GPIO_I2S_SPDIF_STRENGTH__I2S1_DRVSTRENGTH_MASK 0x70000
+#define DC_GPIO_I2S_SPDIF_STRENGTH__I2S1_DRVSTRENGTH__SHIFT 0x10
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF1_DRVSTRENGTH_SN_MASK 0x7000000
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF1_DRVSTRENGTH_SN__SHIFT 0x18
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF1_DRVSTRENGTH_SP_MASK 0x38000000
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF1_DRVSTRENGTH_SP__SHIFT 0x1b
+#define DC_GPIO_TX12_EN__DC_GPIO_BLON_TX12_EN_MASK 0x1
+#define DC_GPIO_TX12_EN__DC_GPIO_BLON_TX12_EN__SHIFT 0x0
+#define DC_GPIO_TX12_EN__DC_GPIO_DIGON_TX12_EN_MASK 0x2
+#define DC_GPIO_TX12_EN__DC_GPIO_DIGON_TX12_EN__SHIFT 0x1
+#define DC_GPIO_TX12_EN__DC_GPIO_ENA_BL_TX12_EN_MASK 0x4
+#define DC_GPIO_TX12_EN__DC_GPIO_ENA_BL_TX12_EN__SHIFT 0x2
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICA_TX12_EN_MASK 0x8
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICA_TX12_EN__SHIFT 0x3
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICB_TX12_EN_MASK 0x10
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICB_TX12_EN__SHIFT 0x4
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICC_TX12_EN_MASK 0x20
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICC_TX12_EN__SHIFT 0x5
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICD_TX12_EN_MASK 0x40
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICD_TX12_EN__SHIFT 0x6
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICE_TX12_EN_MASK 0x80
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICE_TX12_EN__SHIFT 0x7
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICF_TX12_EN_MASK 0x100
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICF_TX12_EN__SHIFT 0x8
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICG_TX12_EN_MASK 0x200
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICG_TX12_EN__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_FALLSLEWSEL_MASK 0x3
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_FALLSLEWSEL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_FALLSLEWSEL_MASK 0xc
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_FALLSLEWSEL__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_FALLSLEWSEL_MASK 0x30
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_FALLSLEWSEL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_FALLSLEWSEL_MASK 0xc0
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_FALLSLEWSEL__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_FALLSLEWSEL_MASK 0x300
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_FALLSLEWSEL__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_FALLSLEWSEL_MASK 0xc00
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_FALLSLEWSEL__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCEN_MASK 0x10000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCEN__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCEN_MASK 0x20000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCEN__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCEN_MASK 0x40000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCEN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCEN_MASK 0x80000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCEN__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCEN_MASK 0x100000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCEN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCEN_MASK 0x200000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCEN__SHIFT 0x15
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCSEL_MASK 0x1000000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCSEL__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCSEL_MASK 0x2000000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCSEL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCSEL_MASK 0x4000000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCSEL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCSEL_MASK 0x8000000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCSEL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCSEL_MASK 0x10000000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCSEL_MASK 0x20000000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_CSEL_0P9_MASK 0x1
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_CSEL_0P9__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_CSEL_0P9_MASK 0x2
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_CSEL_0P9__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_CSEL_0P9_MASK 0x4
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_CSEL_0P9__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_CSEL_0P9_MASK 0x8
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_CSEL_0P9__SHIFT 0x3
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_CSEL_0P9_MASK 0x10
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_CSEL_0P9__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_CSEL_0P9_MASK 0x20
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_CSEL_0P9__SHIFT 0x5
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_CSEL_1P1_MASK 0x100
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_CSEL_1P1__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_CSEL_1P1_MASK 0x200
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_CSEL_1P1__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_CSEL_1P1_MASK 0x400
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_CSEL_1P1__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_CSEL_1P1_MASK 0x800
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_CSEL_1P1__SHIFT 0xb
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_CSEL_1P1_MASK 0x1000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_CSEL_1P1__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_CSEL_1P1_MASK 0x2000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_CSEL_1P1__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_RSEL_0P9_MASK 0x10000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_RSEL_0P9__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_RSEL_0P9_MASK 0x20000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_RSEL_0P9__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_RSEL_0P9_MASK 0x40000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_RSEL_0P9__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_RSEL_0P9_MASK 0x80000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_RSEL_0P9__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_RSEL_0P9_MASK 0x100000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_RSEL_0P9__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_RSEL_0P9_MASK 0x200000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_RSEL_0P9__SHIFT 0x15
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_RSEL_1P1_MASK 0x1000000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_RSEL_1P1__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_RSEL_1P1_MASK 0x2000000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_RSEL_1P1__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_RSEL_1P1_MASK 0x4000000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_RSEL_1P1__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_RSEL_1P1_MASK 0x8000000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_RSEL_1P1__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_RSEL_1P1_MASK 0x10000000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_RSEL_1P1__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_RSEL_1P1_MASK 0x20000000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_RSEL_1P1__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX1_BIASCRTEN_MASK 0x1
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX1_BIASCRTEN__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX2_BIASCRTEN_MASK 0x2
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX2_BIASCRTEN__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX3_BIASCRTEN_MASK 0x4
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX3_BIASCRTEN__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX4_BIASCRTEN_MASK 0x8
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX4_BIASCRTEN__SHIFT 0x3
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX5_BIASCRTEN_MASK 0x10
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX5_BIASCRTEN__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX6_BIASCRTEN_MASK 0x20
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX6_BIASCRTEN__SHIFT 0x5
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX1_SPARE_MASK 0xc0
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX1_SPARE__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX2_SPARE_MASK 0x300
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX2_SPARE__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX3_SPARE_MASK 0xc00
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX3_SPARE__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX4_SPARE_MASK 0x3000
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX4_SPARE__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX5_SPARE_MASK 0xc000
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX5_SPARE__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX6_SPARE_MASK 0x30000
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX6_SPARE__SHIFT 0x10
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_FALLSLEWSEL_MASK 0x3
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_FALLSLEWSEL__SHIFT 0x0
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_FALLSLEWSEL_MASK 0xc
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_FALLSLEWSEL__SHIFT 0x2
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_FALLSLEWSEL_MASK 0x30
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_FALLSLEWSEL__SHIFT 0x4
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_SPIKERCEN_MASK 0x100
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_SPIKERCEN__SHIFT 0x8
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_SPIKERCEN_MASK 0x200
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_SPIKERCEN__SHIFT 0x9
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_SPIKERCEN_MASK 0x400
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_SPIKERCEN__SHIFT 0xa
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_SPIKERCSEL_MASK 0x1000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_SPIKERCSEL__SHIFT 0xc
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_SPIKERCSEL_MASK 0x2000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_SPIKERCSEL__SHIFT 0xd
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_SPIKERCSEL_MASK 0x4000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_SPIKERCSEL__SHIFT 0xe
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_CSEL_0P9_MASK 0x10000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_CSEL_0P9__SHIFT 0x10
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_CSEL_0P9_MASK 0x20000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_CSEL_0P9__SHIFT 0x11
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_CSEL_0P9_MASK 0x40000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_CSEL_0P9__SHIFT 0x12
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_CSEL_1P1_MASK 0x100000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_CSEL_1P1__SHIFT 0x14
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_CSEL_1P1_MASK 0x200000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_CSEL_1P1__SHIFT 0x15
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_CSEL_1P1_MASK 0x400000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_CSEL_1P1__SHIFT 0x16
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_RSEL_0P9_MASK 0x1000000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_RSEL_0P9__SHIFT 0x18
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_RSEL_0P9_MASK 0x2000000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_RSEL_0P9__SHIFT 0x19
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_RSEL_0P9_MASK 0x4000000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_RSEL_0P9__SHIFT 0x1a
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_RSEL_1P1_MASK 0x10000000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_RSEL_1P1__SHIFT 0x1c
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_RSEL_1P1_MASK 0x20000000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_RSEL_1P1__SHIFT 0x1d
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_RSEL_1P1_MASK 0x40000000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_RSEL_1P1__SHIFT 0x1e
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD12_BIASCRTEN_MASK 0x1
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD12_BIASCRTEN__SHIFT 0x0
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD34_BIASCRTEN_MASK 0x2
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD34_BIASCRTEN__SHIFT 0x1
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD56_BIASCRTEN_MASK 0x4
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD56_BIASCRTEN__SHIFT 0x2
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD12_SLEWN_MASK 0x10
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD12_SLEWN__SHIFT 0x4
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD34_SLEWN_MASK 0x20
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD34_SLEWN__SHIFT 0x5
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD56_SLEWN_MASK 0x40
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD56_SLEWN__SHIFT 0x6
+#define DAC_MACRO_CNTL_RESERVED0__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DAC_MACRO_CNTL_RESERVED0__DAC_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DAC_MACRO_CNTL_RESERVED1__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DAC_MACRO_CNTL_RESERVED1__DAC_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DAC_MACRO_CNTL_RESERVED2__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DAC_MACRO_CNTL_RESERVED2__DAC_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DAC_MACRO_CNTL_RESERVED3__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DAC_MACRO_CNTL_RESERVED3__DAC_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED58__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED58__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED59__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED59__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED60__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED60__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED61__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED61__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED62__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED62__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED63__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED63__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED64__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED64__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED65__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED65__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED66__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED66__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED67__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED67__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED68__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED68__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED69__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED69__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED70__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED70__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED71__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED71__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED72__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED72__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED73__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED73__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED74__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED74__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED75__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED75__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED76__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED76__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED77__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED77__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED78__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED78__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED79__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED79__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED80__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED80__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED81__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED81__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED82__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED82__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED83__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED83__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED84__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED84__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED85__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED85__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED86__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED86__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED87__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED87__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED88__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED88__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED89__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED89__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED90__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED90__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED91__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED91__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED92__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED92__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED93__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED93__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED94__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED94__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED95__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED95__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED96__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED96__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED97__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED97__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED98__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED98__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED99__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED99__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED100__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED100__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED101__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED101__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED102__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED102__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED103__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED103__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED104__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED104__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED105__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED105__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED106__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED106__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED107__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED107__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED108__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED108__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED109__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED109__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED110__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED110__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED111__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED111__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED112__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED112__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED113__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED113__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED114__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED114__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED115__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED115__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED116__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED116__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED117__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED117__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED118__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED118__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED119__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED119__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED120__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED120__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED121__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED121__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED122__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED122__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED123__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED123__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED124__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED124__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED125__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED125__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED126__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED126__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED127__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED127__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED128__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED128__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED129__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED129__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED130__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED130__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED131__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED131__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED132__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED132__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED133__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED133__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED134__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED134__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED135__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED135__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED136__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED136__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED137__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED137__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED138__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED138__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED139__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED139__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED140__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED140__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED141__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED141__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED142__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED142__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED143__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED143__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED144__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED144__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED145__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED145__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED146__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED146__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED147__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED147__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED148__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED148__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED149__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED149__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED150__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED150__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED151__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED151__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED152__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED152__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED153__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED153__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED154__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED154__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED155__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED155__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED156__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED156__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED157__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED157__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED158__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED158__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED159__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED159__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED0__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED0__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED1__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED1__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED2__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED2__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED3__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED3__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED4__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED4__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED5__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED5__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED6__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED6__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED7__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED7__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED8__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED8__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED9__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED9__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED10__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED10__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED11__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED11__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED12__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED12__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED13__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED13__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED14__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED14__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED15__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED15__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED16__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED16__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED17__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED17__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED18__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED18__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED19__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED19__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED20__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED20__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED21__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED21__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED22__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED22__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED23__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED23__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED24__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED24__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED25__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED25__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED26__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED26__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED27__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED27__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED28__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED28__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED29__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED29__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED30__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED30__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED31__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED31__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED32__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED32__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED33__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED33__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED34__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED34__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED35__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED35__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED36__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED36__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED37__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED37__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED38__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED38__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED39__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED39__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED40__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED40__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED41__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED41__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED42__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED42__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED43__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED43__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED44__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED44__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED45__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED45__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED46__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED46__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED47__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED47__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED48__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED48__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED49__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED49__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED50__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED50__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED51__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED51__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED52__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED52__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED53__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED53__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED54__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED54__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED55__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED55__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED56__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED56__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED57__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED57__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED58__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED58__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED59__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED59__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED60__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED60__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED61__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED61__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED62__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED62__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED63__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED63__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED64__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED64__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED65__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED65__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED66__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED66__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED67__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED67__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED68__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED68__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED69__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED69__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED70__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED70__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED71__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED71__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED72__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED72__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED73__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED73__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED74__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED74__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED75__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED75__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED76__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED76__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED77__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED77__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED78__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED78__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED79__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED79__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED80__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED80__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED81__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED81__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED82__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED82__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED83__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED83__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED84__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED84__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED85__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED85__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED86__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED86__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED87__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED87__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED88__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED88__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED89__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED89__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED90__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED90__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED91__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED91__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED92__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED92__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED93__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED93__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED94__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED94__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED95__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED95__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED96__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED96__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED97__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED97__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED98__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED98__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED99__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED99__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED100__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED100__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED101__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED101__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED102__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED102__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED103__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED103__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED104__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED104__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED105__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED105__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED106__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED106__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED107__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED107__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED108__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED108__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED109__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED109__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED110__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED110__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED111__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED111__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED112__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED112__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED113__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED113__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED114__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED114__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED115__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED115__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED116__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED116__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED117__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED117__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED118__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED118__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED119__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED119__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED120__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED120__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED121__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED121__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED122__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED122__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED123__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED123__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED124__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED124__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED125__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED125__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED126__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED126__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED127__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED127__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED128__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED128__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED129__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED129__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED130__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED130__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED131__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED131__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED132__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED132__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED133__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED133__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED134__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED134__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED135__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED135__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED136__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED136__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED137__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED137__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED138__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED138__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED139__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED139__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED140__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED140__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED141__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED141__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED142__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED142__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED143__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED143__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED144__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED144__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED145__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED145__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED146__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED146__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED147__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED147__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED148__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED148__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED149__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED149__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED150__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED150__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED151__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED151__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED152__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED152__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED153__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED153__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED154__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED154__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED155__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED155__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED156__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED156__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED157__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED157__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED158__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED158__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED159__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED159__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED160__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED160__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED161__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED161__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED162__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED162__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED163__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED163__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED164__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED164__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED165__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED165__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED166__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED166__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED167__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED167__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED168__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED168__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED169__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED169__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED170__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED170__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED171__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED171__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED172__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED172__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED173__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED173__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED174__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED174__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED175__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED175__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED176__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED176__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED177__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED177__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED178__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED178__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED179__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED179__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED180__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED180__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED181__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED181__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED182__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED182__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED183__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED183__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED184__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED184__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED185__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED185__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED186__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED186__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED187__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED187__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED188__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED188__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED189__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED189__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED190__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED190__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED191__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED191__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED192__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED192__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED193__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED193__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED194__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED194__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED195__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED195__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED196__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED196__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED197__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED197__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED198__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED198__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED199__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED199__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED200__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED200__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED201__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED201__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED202__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED202__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED203__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED203__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED204__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED204__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED205__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED205__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED206__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED206__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED207__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED207__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED208__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED208__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED209__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED209__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED210__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED210__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED211__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED211__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED212__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED212__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED213__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED213__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED214__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED214__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED215__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED215__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED216__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED216__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED217__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED217__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED218__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED218__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED219__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED219__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED220__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED220__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED221__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED221__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED222__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED222__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED223__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED223__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED224__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED224__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED225__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED225__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED226__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED226__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED227__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED227__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED228__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED228__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED229__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED229__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED230__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED230__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED231__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED231__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED232__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED232__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED233__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED233__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED234__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED234__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED235__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED235__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED236__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED236__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED237__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED237__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED238__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED238__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED239__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED239__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED240__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED240__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED241__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED241__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED242__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED242__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED243__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED243__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED244__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED244__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED245__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED245__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED246__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED246__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED247__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED247__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED248__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED248__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED249__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED249__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED250__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED250__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED251__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED251__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED252__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED252__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED253__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED253__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED254__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED254__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED255__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED255__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED256__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED256__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED257__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED257__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED258__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED258__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED259__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED259__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED260__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED260__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED261__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED261__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED262__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED262__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED263__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED263__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED264__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED264__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED265__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED265__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED266__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED266__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED267__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED267__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED268__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED268__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED269__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED269__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED270__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED270__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED271__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED271__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED272__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED272__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED273__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED273__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED274__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED274__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED275__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED275__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED276__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED276__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED277__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED277__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED278__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED278__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED279__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED279__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED280__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED280__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED281__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED281__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED282__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED282__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED283__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED283__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED284__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED284__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED285__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED285__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED286__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED286__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED287__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED287__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED288__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED288__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED289__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED289__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED290__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED290__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED291__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED291__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED292__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED292__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED293__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED293__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED294__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED294__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED295__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED295__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED296__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED296__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED297__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED297__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED298__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED298__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED299__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED299__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED300__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED300__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED301__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED301__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED302__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED302__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED303__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED303__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED304__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED304__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED305__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED305__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED306__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED306__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED307__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED307__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED308__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED308__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED309__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED309__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED310__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED310__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED311__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED311__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED312__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED312__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED313__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED313__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED314__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED314__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED315__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED315__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED316__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED316__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED317__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED317__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED318__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED318__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED319__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED319__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED320__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED320__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED321__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED321__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED322__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED322__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED323__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED323__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED324__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED324__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED325__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED325__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED326__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED326__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED327__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED327__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED328__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED328__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED329__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED329__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED330__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED330__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED331__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED331__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED332__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED332__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED333__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED333__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED334__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED334__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED335__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED335__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED336__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED336__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED337__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED337__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED338__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED338__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED339__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED339__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED340__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED340__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED341__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED341__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED342__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED342__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED343__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED343__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED344__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED344__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED345__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED345__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED346__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED346__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED347__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED347__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED348__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED348__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED349__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED349__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED350__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED350__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED351__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED351__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED352__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED352__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED353__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED353__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED354__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED354__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED355__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED355__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED356__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED356__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED357__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED357__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED358__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED358__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED359__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED359__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED360__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED360__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED361__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED361__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED362__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED362__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED363__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED363__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED364__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED364__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED365__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED365__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED366__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED366__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED367__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED367__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED368__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED368__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED369__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED369__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED370__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED370__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED371__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED371__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED372__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED372__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED373__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED373__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED374__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED374__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED375__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED375__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED376__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED376__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED377__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED377__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED378__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED378__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED379__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED379__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED0__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED0__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED1__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED1__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED2__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED2__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED3__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED3__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED4__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED4__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED5__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED5__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED6__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED6__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED7__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED7__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED8__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED8__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED9__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED9__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED10__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED10__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED11__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED11__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED12__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED12__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED13__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED13__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED14__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED14__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED15__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED15__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED16__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED16__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED17__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED17__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED18__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED18__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED19__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED19__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED20__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED20__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED21__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED21__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED22__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED22__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED23__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED23__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED24__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED24__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED25__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED25__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED26__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED26__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED27__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED27__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED28__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED28__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED29__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED29__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED30__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED30__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED31__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED31__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED32__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED32__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED33__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED33__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED34__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED34__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED35__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED35__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED36__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED36__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED37__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED37__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED38__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED38__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED39__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED39__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED40__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED40__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED41__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED41__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED42__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED42__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED43__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED43__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED44__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED44__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED45__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED45__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED46__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED46__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED47__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED47__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED48__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED48__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED49__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED49__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED50__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED50__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED51__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED51__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED52__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED52__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED53__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED53__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED54__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED54__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED55__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED55__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED56__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED56__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED57__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED57__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED58__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED58__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED59__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED59__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED60__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED60__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED61__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED61__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED62__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED62__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED63__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED63__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define GRPH_ENABLE__GRPH_ENABLE_MASK 0x1
+#define GRPH_ENABLE__GRPH_ENABLE__SHIFT 0x0
+#define GRPH_ENABLE__GRPH_KEYER_ALPHA_SEL_MASK 0x2
+#define GRPH_ENABLE__GRPH_KEYER_ALPHA_SEL__SHIFT 0x1
+#define GRPH_CONTROL__GRPH_DEPTH_MASK 0x3
+#define GRPH_CONTROL__GRPH_DEPTH__SHIFT 0x0
+#define GRPH_CONTROL__GRPH_NUM_BANKS_MASK 0xc
+#define GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT 0x2
+#define GRPH_CONTROL__GRPH_Z_MASK 0x30
+#define GRPH_CONTROL__GRPH_Z__SHIFT 0x4
+#define GRPH_CONTROL__GRPH_BANK_WIDTH_MASK 0xc0
+#define GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT 0x6
+#define GRPH_CONTROL__GRPH_FORMAT_MASK 0x700
+#define GRPH_CONTROL__GRPH_FORMAT__SHIFT 0x8
+#define GRPH_CONTROL__GRPH_BANK_HEIGHT_MASK 0x1800
+#define GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT 0xb
+#define GRPH_CONTROL__GRPH_TILE_SPLIT_MASK 0xe000
+#define GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT 0xd
+#define GRPH_CONTROL__GRPH_ADDRESS_TRANSLATION_ENABLE_MASK 0x10000
+#define GRPH_CONTROL__GRPH_ADDRESS_TRANSLATION_ENABLE__SHIFT 0x10
+#define GRPH_CONTROL__GRPH_PRIVILEGED_ACCESS_ENABLE_MASK 0x20000
+#define GRPH_CONTROL__GRPH_PRIVILEGED_ACCESS_ENABLE__SHIFT 0x11
+#define GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT_MASK 0xc0000
+#define GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT 0x12
+#define GRPH_CONTROL__GRPH_ARRAY_MODE_MASK 0xf00000
+#define GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT 0x14
+#define GRPH_CONTROL__GRPH_PIPE_CONFIG_MASK 0x1f000000
+#define GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT 0x18
+#define GRPH_CONTROL__GRPH_MICRO_TILE_MODE_MASK 0x60000000
+#define GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT 0x1d
+#define GRPH_CONTROL__GRPH_COLOR_EXPANSION_MODE_MASK 0x80000000
+#define GRPH_CONTROL__GRPH_COLOR_EXPANSION_MODE__SHIFT 0x1f
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK 0x100
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN__SHIFT 0x8
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN_MASK 0x10000
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN__SHIFT 0x10
+#define GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP_MASK 0x3
+#define GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT 0x0
+#define GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR_MASK 0x30
+#define GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT 0x4
+#define GRPH_SWAP_CNTL__GRPH_GREEN_CROSSBAR_MASK 0xc0
+#define GRPH_SWAP_CNTL__GRPH_GREEN_CROSSBAR__SHIFT 0x6
+#define GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR_MASK 0x300
+#define GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT 0x8
+#define GRPH_SWAP_CNTL__GRPH_ALPHA_CROSSBAR_MASK 0xc00
+#define GRPH_SWAP_CNTL__GRPH_ALPHA_CROSSBAR__SHIFT 0xa
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_DFQ_ENABLE_MASK 0x1
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_DFQ_ENABLE__SHIFT 0x0
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK 0xffffff00
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS__SHIFT 0x8
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_DFQ_ENABLE_MASK 0x1
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_DFQ_ENABLE__SHIFT 0x0
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK 0xffffff00
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS__SHIFT 0x8
+#define GRPH_PITCH__GRPH_PITCH_MASK 0x7fff
+#define GRPH_PITCH__GRPH_PITCH__SHIFT 0x0
+#define GRPH_PRIMARY_SURFACE_ADDRESS_HIGH__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0xff
+#define GRPH_PRIMARY_SURFACE_ADDRESS_HIGH__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define GRPH_SECONDARY_SURFACE_ADDRESS_HIGH__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0xff
+#define GRPH_SECONDARY_SURFACE_ADDRESS_HIGH__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define GRPH_SURFACE_OFFSET_X__GRPH_SURFACE_OFFSET_X_MASK 0x3fff
+#define GRPH_SURFACE_OFFSET_X__GRPH_SURFACE_OFFSET_X__SHIFT 0x0
+#define GRPH_SURFACE_OFFSET_Y__GRPH_SURFACE_OFFSET_Y_MASK 0x3fff
+#define GRPH_SURFACE_OFFSET_Y__GRPH_SURFACE_OFFSET_Y__SHIFT 0x0
+#define GRPH_X_START__GRPH_X_START_MASK 0x3fff
+#define GRPH_X_START__GRPH_X_START__SHIFT 0x0
+#define GRPH_Y_START__GRPH_Y_START_MASK 0x3fff
+#define GRPH_Y_START__GRPH_Y_START__SHIFT 0x0
+#define GRPH_X_END__GRPH_X_END_MASK 0x7fff
+#define GRPH_X_END__GRPH_X_END__SHIFT 0x0
+#define GRPH_Y_END__GRPH_Y_END_MASK 0x7fff
+#define GRPH_Y_END__GRPH_Y_END__SHIFT 0x0
+#define INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE_MASK 0x1
+#define INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT 0x0
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_PENDING_MASK 0x1
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_PENDING__SHIFT 0x0
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_TAKEN_MASK 0x2
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_TAKEN__SHIFT 0x1
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK 0x4
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING__SHIFT 0x2
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_TAKEN_MASK 0x8
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_TAKEN__SHIFT 0x3
+#define GRPH_UPDATE__GRPH_SURFACE_XDMA_PENDING_ENABLE_MASK 0x100
+#define GRPH_UPDATE__GRPH_SURFACE_XDMA_PENDING_ENABLE__SHIFT 0x8
+#define GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK 0x10000
+#define GRPH_UPDATE__GRPH_UPDATE_LOCK__SHIFT 0x10
+#define GRPH_UPDATE__GRPH_SURFACE_IGNORE_UPDATE_LOCK_MASK 0x100000
+#define GRPH_UPDATE__GRPH_SURFACE_IGNORE_UPDATE_LOCK__SHIFT 0x14
+#define GRPH_UPDATE__GRPH_MODE_DISABLE_MULTIPLE_UPDATE_MASK 0x1000000
+#define GRPH_UPDATE__GRPH_MODE_DISABLE_MULTIPLE_UPDATE__SHIFT 0x18
+#define GRPH_UPDATE__GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE_MASK 0x10000000
+#define GRPH_UPDATE__GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE__SHIFT 0x1c
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK 0x1
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN__SHIFT 0x0
+#define GRPH_FLIP_CONTROL__GRPH_XDMA_SUPER_AA_EN_MASK 0x2
+#define GRPH_FLIP_CONTROL__GRPH_XDMA_SUPER_AA_EN__SHIFT 0x1
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_IMMEDIATE_EN_MASK 0x10
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_IMMEDIATE_EN__SHIFT 0x4
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_PENDING_MODE_MASK 0x20
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_PENDING_MODE__SHIFT 0x5
+#define GRPH_SURFACE_ADDRESS_INUSE__GRPH_SURFACE_ADDRESS_INUSE_MASK 0xffffff00
+#define GRPH_SURFACE_ADDRESS_INUSE__GRPH_SURFACE_ADDRESS_INUSE__SHIFT 0x8
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_RESET_MASK 0x1
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_RESET__SHIFT 0x0
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_SIZE_MASK 0x70
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_SIZE__SHIFT 0x4
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_MIN_FREE_ENTRIES_MASK 0x700
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_MIN_FREE_ENTRIES__SHIFT 0x8
+#define GRPH_DFQ_STATUS__GRPH_PRIMARY_DFQ_NUM_ENTRIES_MASK 0xf
+#define GRPH_DFQ_STATUS__GRPH_PRIMARY_DFQ_NUM_ENTRIES__SHIFT 0x0
+#define GRPH_DFQ_STATUS__GRPH_SECONDARY_DFQ_NUM_ENTRIES_MASK 0xf0
+#define GRPH_DFQ_STATUS__GRPH_SECONDARY_DFQ_NUM_ENTRIES__SHIFT 0x4
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_FLAG_MASK 0x100
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_FLAG__SHIFT 0x8
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_ACK_MASK 0x200
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_ACK__SHIFT 0x9
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK 0x1
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED__SHIFT 0x0
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK 0x100
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR__SHIFT 0x8
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK 0x1
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK__SHIFT 0x0
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_TYPE_MASK 0x100
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_TYPE__SHIFT 0x8
+#define GRPH_SURFACE_ADDRESS_HIGH_INUSE__GRPH_SURFACE_ADDRESS_HIGH_INUSE_MASK 0xff
+#define GRPH_SURFACE_ADDRESS_HIGH_INUSE__GRPH_SURFACE_ADDRESS_HIGH_INUSE__SHIFT 0x0
+#define GRPH_COMPRESS_SURFACE_ADDRESS__GRPH_COMPRESS_SURFACE_ADDRESS_MASK 0xffffff00
+#define GRPH_COMPRESS_SURFACE_ADDRESS__GRPH_COMPRESS_SURFACE_ADDRESS__SHIFT 0x8
+#define GRPH_COMPRESS_PITCH__GRPH_COMPRESS_PITCH_MASK 0x1ffc0
+#define GRPH_COMPRESS_PITCH__GRPH_COMPRESS_PITCH__SHIFT 0x6
+#define GRPH_COMPRESS_SURFACE_ADDRESS_HIGH__GRPH_COMPRESS_SURFACE_ADDRESS_HIGH_MASK 0xff
+#define GRPH_COMPRESS_SURFACE_ADDRESS_HIGH__GRPH_COMPRESS_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT__GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT_MASK 0xff
+#define GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT__GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT__SHIFT 0x0
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_SELECT_MASK 0x1
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_SELECT__SHIFT 0x0
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_R_SIGN_MASK 0x2
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_R_SIGN__SHIFT 0x1
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_G_SIGN_MASK 0x4
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_G_SIGN__SHIFT 0x2
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_B_SIGN_MASK 0x8
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_B_SIGN__SHIFT 0x3
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK 0x10
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS__SHIFT 0x4
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_BIAS_R_MASK 0xffff
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_BIAS_R__SHIFT 0x0
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_SCALE_R_MASK 0xffff0000
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_SCALE_R__SHIFT 0x10
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_BIAS_G_MASK 0xffff
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_BIAS_G__SHIFT 0x0
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_SCALE_G_MASK 0xffff0000
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_SCALE_G__SHIFT 0x10
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_BIAS_B_MASK 0xffff
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_BIAS_B__SHIFT 0x0
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_SCALE_B_MASK 0xffff0000
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_SCALE_B__SHIFT 0x10
+#define INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE_MASK 0x3
+#define INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT 0x0
+#define INPUT_CSC_C11_C12__INPUT_CSC_C11_MASK 0xffff
+#define INPUT_CSC_C11_C12__INPUT_CSC_C11__SHIFT 0x0
+#define INPUT_CSC_C11_C12__INPUT_CSC_C12_MASK 0xffff0000
+#define INPUT_CSC_C11_C12__INPUT_CSC_C12__SHIFT 0x10
+#define INPUT_CSC_C13_C14__INPUT_CSC_C13_MASK 0xffff
+#define INPUT_CSC_C13_C14__INPUT_CSC_C13__SHIFT 0x0
+#define INPUT_CSC_C13_C14__INPUT_CSC_C14_MASK 0xffff0000
+#define INPUT_CSC_C13_C14__INPUT_CSC_C14__SHIFT 0x10
+#define INPUT_CSC_C21_C22__INPUT_CSC_C21_MASK 0xffff
+#define INPUT_CSC_C21_C22__INPUT_CSC_C21__SHIFT 0x0
+#define INPUT_CSC_C21_C22__INPUT_CSC_C22_MASK 0xffff0000
+#define INPUT_CSC_C21_C22__INPUT_CSC_C22__SHIFT 0x10
+#define INPUT_CSC_C23_C24__INPUT_CSC_C23_MASK 0xffff
+#define INPUT_CSC_C23_C24__INPUT_CSC_C23__SHIFT 0x0
+#define INPUT_CSC_C23_C24__INPUT_CSC_C24_MASK 0xffff0000
+#define INPUT_CSC_C23_C24__INPUT_CSC_C24__SHIFT 0x10
+#define INPUT_CSC_C31_C32__INPUT_CSC_C31_MASK 0xffff
+#define INPUT_CSC_C31_C32__INPUT_CSC_C31__SHIFT 0x0
+#define INPUT_CSC_C31_C32__INPUT_CSC_C32_MASK 0xffff0000
+#define INPUT_CSC_C31_C32__INPUT_CSC_C32__SHIFT 0x10
+#define INPUT_CSC_C33_C34__INPUT_CSC_C33_MASK 0xffff
+#define INPUT_CSC_C33_C34__INPUT_CSC_C33__SHIFT 0x0
+#define INPUT_CSC_C33_C34__INPUT_CSC_C34_MASK 0xffff0000
+#define INPUT_CSC_C33_C34__INPUT_CSC_C34__SHIFT 0x10
+#define OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE_MASK 0x7
+#define OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT 0x0
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C11_MASK 0xffff
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C11__SHIFT 0x0
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C12_MASK 0xffff0000
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C12__SHIFT 0x10
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C13_MASK 0xffff
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C13__SHIFT 0x0
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C14_MASK 0xffff0000
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C14__SHIFT 0x10
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C21_MASK 0xffff
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C21__SHIFT 0x0
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C22_MASK 0xffff0000
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C22__SHIFT 0x10
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C23_MASK 0xffff
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C23__SHIFT 0x0
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C24_MASK 0xffff0000
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C24__SHIFT 0x10
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C31_MASK 0xffff
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C31__SHIFT 0x0
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C32_MASK 0xffff0000
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C32__SHIFT 0x10
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C33_MASK 0xffff
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C33__SHIFT 0x0
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C34_MASK 0xffff0000
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C34__SHIFT 0x10
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C11_MASK 0xffff
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C11__SHIFT 0x0
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C12_MASK 0xffff0000
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C12__SHIFT 0x10
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C13_MASK 0xffff
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C13__SHIFT 0x0
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C14_MASK 0xffff0000
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C14__SHIFT 0x10
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C21_MASK 0xffff
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C21__SHIFT 0x0
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C22_MASK 0xffff0000
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C22__SHIFT 0x10
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C23_MASK 0xffff
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C23__SHIFT 0x0
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C24_MASK 0xffff0000
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C24__SHIFT 0x10
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C31_MASK 0xffff
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C31__SHIFT 0x0
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C32_MASK 0xffff0000
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C32__SHIFT 0x10
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C33_MASK 0xffff
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C33__SHIFT 0x0
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C34_MASK 0xffff0000
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C34__SHIFT 0x10
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C11_MASK 0xffff
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C11__SHIFT 0x0
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C12_MASK 0xffff0000
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C12__SHIFT 0x10
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C13_MASK 0xffff
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C13__SHIFT 0x0
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C14_MASK 0xffff0000
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C14__SHIFT 0x10
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C21_MASK 0xffff
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C21__SHIFT 0x0
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C22_MASK 0xffff0000
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C22__SHIFT 0x10
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C23_MASK 0xffff
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C23__SHIFT 0x0
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C24_MASK 0xffff0000
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C24__SHIFT 0x10
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C31_MASK 0xffff
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C31__SHIFT 0x0
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C32_MASK 0xffff0000
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C32__SHIFT 0x10
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C33_MASK 0xffff
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C33__SHIFT 0x0
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C34_MASK 0xffff0000
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C34__SHIFT 0x10
+#define DENORM_CONTROL__DENORM_MODE_MASK 0x7
+#define DENORM_CONTROL__DENORM_MODE__SHIFT 0x0
+#define DENORM_CONTROL__DENORM_14BIT_OUT_MASK 0x10
+#define DENORM_CONTROL__DENORM_14BIT_OUT__SHIFT 0x4
+#define OUT_ROUND_CONTROL__OUT_ROUND_TRUNC_MODE_MASK 0xf
+#define OUT_ROUND_CONTROL__OUT_ROUND_TRUNC_MODE__SHIFT 0x0
+#define OUT_CLAMP_CONTROL_R_CR__OUT_CLAMP_MAX_R_CR_MASK 0x3fff
+#define OUT_CLAMP_CONTROL_R_CR__OUT_CLAMP_MAX_R_CR__SHIFT 0x0
+#define OUT_CLAMP_CONTROL_R_CR__OUT_CLAMP_MIN_R_CR_MASK 0x3fff0000
+#define OUT_CLAMP_CONTROL_R_CR__OUT_CLAMP_MIN_R_CR__SHIFT 0x10
+#define OUT_CLAMP_CONTROL_G_Y__OUT_CLAMP_MAX_G_Y_MASK 0x3fff
+#define OUT_CLAMP_CONTROL_G_Y__OUT_CLAMP_MAX_G_Y__SHIFT 0x0
+#define OUT_CLAMP_CONTROL_G_Y__OUT_CLAMP_MIN_G_Y_MASK 0x3fff0000
+#define OUT_CLAMP_CONTROL_G_Y__OUT_CLAMP_MIN_G_Y__SHIFT 0x10
+#define OUT_CLAMP_CONTROL_B_CB__OUT_CLAMP_MAX_B_CB_MASK 0x3fff
+#define OUT_CLAMP_CONTROL_B_CB__OUT_CLAMP_MAX_B_CB__SHIFT 0x0
+#define OUT_CLAMP_CONTROL_B_CB__OUT_CLAMP_MIN_B_CB_MASK 0x3fff0000
+#define OUT_CLAMP_CONTROL_B_CB__OUT_CLAMP_MIN_B_CB__SHIFT 0x10
+#define KEY_CONTROL__KEY_MODE_MASK 0x6
+#define KEY_CONTROL__KEY_MODE__SHIFT 0x1
+#define KEY_RANGE_ALPHA__KEY_ALPHA_LOW_MASK 0xffff
+#define KEY_RANGE_ALPHA__KEY_ALPHA_LOW__SHIFT 0x0
+#define KEY_RANGE_ALPHA__KEY_ALPHA_HIGH_MASK 0xffff0000
+#define KEY_RANGE_ALPHA__KEY_ALPHA_HIGH__SHIFT 0x10
+#define KEY_RANGE_RED__KEY_RED_LOW_MASK 0xffff
+#define KEY_RANGE_RED__KEY_RED_LOW__SHIFT 0x0
+#define KEY_RANGE_RED__KEY_RED_HIGH_MASK 0xffff0000
+#define KEY_RANGE_RED__KEY_RED_HIGH__SHIFT 0x10
+#define KEY_RANGE_GREEN__KEY_GREEN_LOW_MASK 0xffff
+#define KEY_RANGE_GREEN__KEY_GREEN_LOW__SHIFT 0x0
+#define KEY_RANGE_GREEN__KEY_GREEN_HIGH_MASK 0xffff0000
+#define KEY_RANGE_GREEN__KEY_GREEN_HIGH__SHIFT 0x10
+#define KEY_RANGE_BLUE__KEY_BLUE_LOW_MASK 0xffff
+#define KEY_RANGE_BLUE__KEY_BLUE_LOW__SHIFT 0x0
+#define KEY_RANGE_BLUE__KEY_BLUE_HIGH_MASK 0xffff0000
+#define KEY_RANGE_BLUE__KEY_BLUE_HIGH__SHIFT 0x10
+#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE_MASK 0x3
+#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT 0x0
+#define DEGAMMA_CONTROL__CURSOR2_DEGAMMA_MODE_MASK 0x300
+#define DEGAMMA_CONTROL__CURSOR2_DEGAMMA_MODE__SHIFT 0x8
+#define DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE_MASK 0x3000
+#define DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT 0xc
+#define GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE_MASK 0x3
+#define GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT 0x0
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C11_MASK 0xffff
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C11__SHIFT 0x0
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C12_MASK 0xffff0000
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C12__SHIFT 0x10
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C13_MASK 0xffff
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C13__SHIFT 0x0
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C14_MASK 0xffff0000
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C14__SHIFT 0x10
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C21_MASK 0xffff
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C21__SHIFT 0x0
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C22_MASK 0xffff0000
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C22__SHIFT 0x10
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C23_MASK 0xffff
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C23__SHIFT 0x0
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C24_MASK 0xffff0000
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C24__SHIFT 0x10
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C31_MASK 0xffff
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C31__SHIFT 0x0
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C32_MASK 0xffff0000
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C32__SHIFT 0x10
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C33_MASK 0xffff
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C33__SHIFT 0x0
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C34_MASK 0xffff0000
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C34__SHIFT 0x10
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_EN_MASK 0x1
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_EN__SHIFT 0x0
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_MODE_MASK 0x30
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_MODE__SHIFT 0x4
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_DEPTH_MASK 0xc0
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_DEPTH__SHIFT 0x6
+#define DCP_SPATIAL_DITHER_CNTL__DCP_FRAME_RANDOM_ENABLE_MASK 0x100
+#define DCP_SPATIAL_DITHER_CNTL__DCP_FRAME_RANDOM_ENABLE__SHIFT 0x8
+#define DCP_SPATIAL_DITHER_CNTL__DCP_RGB_RANDOM_ENABLE_MASK 0x200
+#define DCP_SPATIAL_DITHER_CNTL__DCP_RGB_RANDOM_ENABLE__SHIFT 0x9
+#define DCP_SPATIAL_DITHER_CNTL__DCP_HIGHPASS_RANDOM_ENABLE_MASK 0x400
+#define DCP_SPATIAL_DITHER_CNTL__DCP_HIGHPASS_RANDOM_ENABLE__SHIFT 0xa
+#define DCP_RANDOM_SEEDS__DCP_RAND_R_SEED_MASK 0xff
+#define DCP_RANDOM_SEEDS__DCP_RAND_R_SEED__SHIFT 0x0
+#define DCP_RANDOM_SEEDS__DCP_RAND_G_SEED_MASK 0xff00
+#define DCP_RANDOM_SEEDS__DCP_RAND_G_SEED__SHIFT 0x8
+#define DCP_RANDOM_SEEDS__DCP_RAND_B_SEED_MASK 0xff0000
+#define DCP_RANDOM_SEEDS__DCP_RAND_B_SEED__SHIFT 0x10
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_DATA_MASK 0x3ffff
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_DATA__SHIFT 0x0
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_INDEX_MASK 0x7f00000
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_INDEX__SHIFT 0x14
+#define CUR_CONTROL__CURSOR_EN_MASK 0x1
+#define CUR_CONTROL__CURSOR_EN__SHIFT 0x0
+#define CUR_CONTROL__CUR_INV_TRANS_CLAMP_MASK 0x10
+#define CUR_CONTROL__CUR_INV_TRANS_CLAMP__SHIFT 0x4
+#define CUR_CONTROL__CURSOR_MODE_MASK 0x300
+#define CUR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CUR_CONTROL__CURSOR_BUSY_START_LINE_POSITION_MASK 0xf000
+#define CUR_CONTROL__CURSOR_BUSY_START_LINE_POSITION__SHIFT 0xc
+#define CUR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x10000
+#define CUR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x10
+#define CUR_CONTROL__CURSOR_FORCE_MC_ON_MASK 0x100000
+#define CUR_CONTROL__CURSOR_FORCE_MC_ON__SHIFT 0x14
+#define CUR_CONTROL__CURSOR_URGENT_CONTROL_MASK 0x7000000
+#define CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT 0x18
+#define CUR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xffffffff
+#define CUR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CUR_SIZE__CURSOR_HEIGHT_MASK 0x7f
+#define CUR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CUR_SIZE__CURSOR_WIDTH_MASK 0x7f0000
+#define CUR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CUR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0xff
+#define CUR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CUR_POSITION__CURSOR_Y_POSITION_MASK 0x3fff
+#define CUR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CUR_POSITION__CURSOR_X_POSITION_MASK 0x3fff0000
+#define CUR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x7f
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x7f0000
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CUR_COLOR1__CUR_COLOR1_BLUE_MASK 0xff
+#define CUR_COLOR1__CUR_COLOR1_BLUE__SHIFT 0x0
+#define CUR_COLOR1__CUR_COLOR1_GREEN_MASK 0xff00
+#define CUR_COLOR1__CUR_COLOR1_GREEN__SHIFT 0x8
+#define CUR_COLOR1__CUR_COLOR1_RED_MASK 0xff0000
+#define CUR_COLOR1__CUR_COLOR1_RED__SHIFT 0x10
+#define CUR_COLOR2__CUR_COLOR2_BLUE_MASK 0xff
+#define CUR_COLOR2__CUR_COLOR2_BLUE__SHIFT 0x0
+#define CUR_COLOR2__CUR_COLOR2_GREEN_MASK 0xff00
+#define CUR_COLOR2__CUR_COLOR2_GREEN__SHIFT 0x8
+#define CUR_COLOR2__CUR_COLOR2_RED_MASK 0xff0000
+#define CUR_COLOR2__CUR_COLOR2_RED__SHIFT 0x10
+#define CUR_UPDATE__CURSOR_UPDATE_PENDING_MASK 0x1
+#define CUR_UPDATE__CURSOR_UPDATE_PENDING__SHIFT 0x0
+#define CUR_UPDATE__CURSOR_UPDATE_TAKEN_MASK 0x2
+#define CUR_UPDATE__CURSOR_UPDATE_TAKEN__SHIFT 0x1
+#define CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK 0x10000
+#define CUR_UPDATE__CURSOR_UPDATE_LOCK__SHIFT 0x10
+#define CUR_UPDATE__CURSOR_DISABLE_MULTIPLE_UPDATE_MASK 0x1000000
+#define CUR_UPDATE__CURSOR_DISABLE_MULTIPLE_UPDATE__SHIFT 0x18
+#define CUR_UPDATE__CURSOR_UPDATE_STEREO_MODE_MASK 0x6000000
+#define CUR_UPDATE__CURSOR_UPDATE_STEREO_MODE__SHIFT 0x19
+#define CUR_REQUEST_FILTER_CNTL__CUR_REQUEST_FILTER_DIS_MASK 0x1
+#define CUR_REQUEST_FILTER_CNTL__CUR_REQUEST_FILTER_DIS__SHIFT 0x0
+#define CUR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x1
+#define CUR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CUR_STEREO_CONTROL__CURSOR_STEREO_OFFSET_YNX_MASK 0x2
+#define CUR_STEREO_CONTROL__CURSOR_STEREO_OFFSET_YNX__SHIFT 0x1
+#define CUR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x3ff0
+#define CUR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CUR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0x3ff0000
+#define CUR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x10
+#define DC_LUT_RW_MODE__DC_LUT_RW_MODE_MASK 0x1
+#define DC_LUT_RW_MODE__DC_LUT_RW_MODE__SHIFT 0x0
+#define DC_LUT_RW_MODE__DC_LUT_ERROR_MASK 0x10000
+#define DC_LUT_RW_MODE__DC_LUT_ERROR__SHIFT 0x10
+#define DC_LUT_RW_MODE__DC_LUT_ERROR_RST_MASK 0x20000
+#define DC_LUT_RW_MODE__DC_LUT_ERROR_RST__SHIFT 0x11
+#define DC_LUT_RW_INDEX__DC_LUT_RW_INDEX_MASK 0xff
+#define DC_LUT_RW_INDEX__DC_LUT_RW_INDEX__SHIFT 0x0
+#define DC_LUT_SEQ_COLOR__DC_LUT_SEQ_COLOR_MASK 0xffff
+#define DC_LUT_SEQ_COLOR__DC_LUT_SEQ_COLOR__SHIFT 0x0
+#define DC_LUT_PWL_DATA__DC_LUT_BASE_MASK 0xffff
+#define DC_LUT_PWL_DATA__DC_LUT_BASE__SHIFT 0x0
+#define DC_LUT_PWL_DATA__DC_LUT_DELTA_MASK 0xffff0000
+#define DC_LUT_PWL_DATA__DC_LUT_DELTA__SHIFT 0x10
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_BLUE_MASK 0x3ff
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_BLUE__SHIFT 0x0
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_GREEN_MASK 0xffc00
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_GREEN__SHIFT 0xa
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_RED_MASK 0x3ff00000
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_RED__SHIFT 0x14
+#define DC_LUT_VGA_ACCESS_ENABLE__DC_LUT_VGA_ACCESS_ENABLE_MASK 0x1
+#define DC_LUT_VGA_ACCESS_ENABLE__DC_LUT_VGA_ACCESS_ENABLE__SHIFT 0x0
+#define DC_LUT_WRITE_EN_MASK__DC_LUT_WRITE_EN_MASK_MASK 0x7
+#define DC_LUT_WRITE_EN_MASK__DC_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL_MASK 0x1
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL__SHIFT 0x0
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL_DONE_MASK 0x2
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL_DONE__SHIFT 0x1
+#define DC_LUT_CONTROL__DC_LUT_INC_B_MASK 0xf
+#define DC_LUT_CONTROL__DC_LUT_INC_B__SHIFT 0x0
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_SIGNED_EN_MASK 0x10
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_SIGNED_EN__SHIFT 0x4
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FLOAT_POINT_EN_MASK 0x20
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FLOAT_POINT_EN__SHIFT 0x5
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FORMAT_MASK 0xc0
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FORMAT__SHIFT 0x6
+#define DC_LUT_CONTROL__DC_LUT_INC_G_MASK 0xf00
+#define DC_LUT_CONTROL__DC_LUT_INC_G__SHIFT 0x8
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_SIGNED_EN_MASK 0x1000
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_SIGNED_EN__SHIFT 0xc
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FLOAT_POINT_EN_MASK 0x2000
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FLOAT_POINT_EN__SHIFT 0xd
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FORMAT_MASK 0xc000
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FORMAT__SHIFT 0xe
+#define DC_LUT_CONTROL__DC_LUT_INC_R_MASK 0xf0000
+#define DC_LUT_CONTROL__DC_LUT_INC_R__SHIFT 0x10
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_SIGNED_EN_MASK 0x100000
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_SIGNED_EN__SHIFT 0x14
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FLOAT_POINT_EN_MASK 0x200000
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FLOAT_POINT_EN__SHIFT 0x15
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FORMAT_MASK 0xc00000
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FORMAT__SHIFT 0x16
+#define DC_LUT_BLACK_OFFSET_BLUE__DC_LUT_BLACK_OFFSET_BLUE_MASK 0xffff
+#define DC_LUT_BLACK_OFFSET_BLUE__DC_LUT_BLACK_OFFSET_BLUE__SHIFT 0x0
+#define DC_LUT_BLACK_OFFSET_GREEN__DC_LUT_BLACK_OFFSET_GREEN_MASK 0xffff
+#define DC_LUT_BLACK_OFFSET_GREEN__DC_LUT_BLACK_OFFSET_GREEN__SHIFT 0x0
+#define DC_LUT_BLACK_OFFSET_RED__DC_LUT_BLACK_OFFSET_RED_MASK 0xffff
+#define DC_LUT_BLACK_OFFSET_RED__DC_LUT_BLACK_OFFSET_RED__SHIFT 0x0
+#define DC_LUT_WHITE_OFFSET_BLUE__DC_LUT_WHITE_OFFSET_BLUE_MASK 0xffff
+#define DC_LUT_WHITE_OFFSET_BLUE__DC_LUT_WHITE_OFFSET_BLUE__SHIFT 0x0
+#define DC_LUT_WHITE_OFFSET_GREEN__DC_LUT_WHITE_OFFSET_GREEN_MASK 0xffff
+#define DC_LUT_WHITE_OFFSET_GREEN__DC_LUT_WHITE_OFFSET_GREEN__SHIFT 0x0
+#define DC_LUT_WHITE_OFFSET_RED__DC_LUT_WHITE_OFFSET_RED_MASK 0xffff
+#define DC_LUT_WHITE_OFFSET_RED__DC_LUT_WHITE_OFFSET_RED__SHIFT 0x0
+#define DCP_CRC_CONTROL__DCP_CRC_ENABLE_MASK 0x1
+#define DCP_CRC_CONTROL__DCP_CRC_ENABLE__SHIFT 0x0
+#define DCP_CRC_CONTROL__DCP_CRC_SOURCE_SEL_MASK 0x1c
+#define DCP_CRC_CONTROL__DCP_CRC_SOURCE_SEL__SHIFT 0x2
+#define DCP_CRC_CONTROL__DCP_CRC_LINE_SEL_MASK 0x300
+#define DCP_CRC_CONTROL__DCP_CRC_LINE_SEL__SHIFT 0x8
+#define DCP_CRC_MASK__DCP_CRC_MASK_MASK 0xffffffff
+#define DCP_CRC_MASK__DCP_CRC_MASK__SHIFT 0x0
+#define DCP_CRC_CURRENT__DCP_CRC_CURRENT_MASK 0xffffffff
+#define DCP_CRC_CURRENT__DCP_CRC_CURRENT__SHIFT 0x0
+#define DVMM_PTE_CONTROL__DVMM_USE_SINGLE_PTE_MASK 0x1
+#define DVMM_PTE_CONTROL__DVMM_USE_SINGLE_PTE__SHIFT 0x0
+#define DVMM_PTE_CONTROL__DVMM_PAGE_WIDTH_MASK 0x1e
+#define DVMM_PTE_CONTROL__DVMM_PAGE_WIDTH__SHIFT 0x1
+#define DVMM_PTE_CONTROL__DVMM_PAGE_HEIGHT_MASK 0x1e0
+#define DVMM_PTE_CONTROL__DVMM_PAGE_HEIGHT__SHIFT 0x5
+#define DVMM_PTE_CONTROL__DVMM_MIN_PTE_BEFORE_FLIP_MASK 0x7fe00
+#define DVMM_PTE_CONTROL__DVMM_MIN_PTE_BEFORE_FLIP__SHIFT 0x9
+#define DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE0_MASK 0x100000
+#define DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE0__SHIFT 0x14
+#define DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE1_MASK 0x200000
+#define DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE1__SHIFT 0x15
+#define DCP_CRC_LAST__DCP_CRC_LAST_MASK 0xffffffff
+#define DCP_CRC_LAST__DCP_CRC_LAST__SHIFT 0x0
+#define DCP_DEBUG__DCP_DEBUG_MASK 0xffffffff
+#define DCP_DEBUG__DCP_DEBUG__SHIFT 0x0
+#define GRPH_FLIP_RATE_CNTL__GRPH_FLIP_RATE_MASK 0x7
+#define GRPH_FLIP_RATE_CNTL__GRPH_FLIP_RATE__SHIFT 0x0
+#define GRPH_FLIP_RATE_CNTL__GRPH_FLIP_RATE_ENABLE_MASK 0x8
+#define GRPH_FLIP_RATE_CNTL__GRPH_FLIP_RATE_ENABLE__SHIFT 0x3
+#define DCP_GSL_CONTROL__DCP_GSL0_EN_MASK 0x1
+#define DCP_GSL_CONTROL__DCP_GSL0_EN__SHIFT 0x0
+#define DCP_GSL_CONTROL__DCP_GSL1_EN_MASK 0x2
+#define DCP_GSL_CONTROL__DCP_GSL1_EN__SHIFT 0x1
+#define DCP_GSL_CONTROL__DCP_GSL2_EN_MASK 0x4
+#define DCP_GSL_CONTROL__DCP_GSL2_EN__SHIFT 0x2
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY_MASK 0xf000
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY__SHIFT 0xc
+#define DCP_GSL_CONTROL__DCP_GSL_MASTER_EN_MASK 0x10000
+#define DCP_GSL_CONTROL__DCP_GSL_MASTER_EN__SHIFT 0x10
+#define DCP_GSL_CONTROL__DCP_GSL_XDMA_GROUP_MASK 0x60000
+#define DCP_GSL_CONTROL__DCP_GSL_XDMA_GROUP__SHIFT 0x11
+#define DCP_GSL_CONTROL__DCP_GSL_XDMA_GROUP_UNDERFLOW_EN_MASK 0x80000
+#define DCP_GSL_CONTROL__DCP_GSL_XDMA_GROUP_UNDERFLOW_EN__SHIFT 0x13
+#define DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE_MASK 0x3000000
+#define DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE__SHIFT 0x18
+#define DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING_MASK 0x8000000
+#define DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING__SHIFT 0x1b
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY_MASK 0xf0000000
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY__SHIFT 0x1c
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_20BPP_MASK 0xf
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_20BPP__SHIFT 0x0
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_30BPP_MASK 0x1f0
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_30BPP__SHIFT 0x4
+#define DCP_DEBUG_SG__DCP_DEBUG_SG_MASK 0xffffffff
+#define DCP_DEBUG_SG__DCP_DEBUG_SG__SHIFT 0x0
+#define DCP_DEBUG_SG2__DCP_DEBUG_SG2_MASK 0xffffffff
+#define DCP_DEBUG_SG2__DCP_DEBUG_SG2__SHIFT 0x0
+#define DCP_DVMM_DEBUG__DCP_DVMM_DEBUG_MASK 0xffffffff
+#define DCP_DVMM_DEBUG__DCP_DVMM_DEBUG__SHIFT 0x0
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_INDEX_MASK 0xff
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DCP_TEST_DEBUG_DATA__DCP_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DCP_TEST_DEBUG_DATA__DCP_TEST_DEBUG_DATA__SHIFT 0x0
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_EN_MASK 0x1
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_EN__SHIFT 0x0
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_MODE_MASK 0x300
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_MODE__SHIFT 0x8
+#define GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_SURFACE_PENDING_MASK 0x10000
+#define GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_SURFACE_PENDING__SHIFT 0x10
+#define GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_SURFACE_PENDING_MASK 0x20000
+#define GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_SURFACE_PENDING__SHIFT 0x11
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_SELECT_DISABLE_MASK 0x10000000
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_SELECT_DISABLE__SHIFT 0x1c
+#define DCP_DEBUG2__DCP_DEBUG2_MASK 0xffffffff
+#define DCP_DEBUG2__DCP_DEBUG2__SHIFT 0x0
+#define HW_ROTATION__GRPH_ROTATION_ANGLE_MASK 0x7
+#define HW_ROTATION__GRPH_ROTATION_ANGLE__SHIFT 0x0
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL__GRPH_XDMA_CACHE_UNDERFLOW_CNT_EN_MASK 0x1
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL__GRPH_XDMA_CACHE_UNDERFLOW_CNT_EN__SHIFT 0x0
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL__GRPH_XDMA_CACHE_UNDERFLOW_CNT_MODE_MASK 0x2
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL__GRPH_XDMA_CACHE_UNDERFLOW_CNT_MODE__SHIFT 0x1
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL__GRPH_XDMA_CACHE_UNDERFLOW_FRAME_CNT_MASK 0x1fff0
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL__GRPH_XDMA_CACHE_UNDERFLOW_FRAME_CNT__SHIFT 0x4
+#define REGAMMA_CONTROL__GRPH_REGAMMA_MODE_MASK 0x7
+#define REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT 0x0
+#define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX_MASK 0x1ff
+#define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX__SHIFT 0x0
+#define REGAMMA_LUT_DATA__REGAMMA_LUT_DATA_MASK 0x7ffff
+#define REGAMMA_LUT_DATA__REGAMMA_LUT_DATA__SHIFT 0x0
+#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK_MASK 0x7
+#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START_MASK 0x3ffff
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START__SHIFT 0x0
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START_SEGMENT_MASK 0x7f00000
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START_SEGMENT__SHIFT 0x14
+#define REGAMMA_CNTLA_SLOPE_CNTL__REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE_MASK 0x3ffff
+#define REGAMMA_CNTLA_SLOPE_CNTL__REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE__SHIFT 0x0
+#define REGAMMA_CNTLA_END_CNTL1__REGAMMA_CNTLA_EXP_REGION_END_MASK 0xffff
+#define REGAMMA_CNTLA_END_CNTL1__REGAMMA_CNTLA_EXP_REGION_END__SHIFT 0x0
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_SLOPE_MASK 0xffff
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_SLOPE__SHIFT 0x0
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_BASE_MASK 0xffff0000
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_BASE__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START_MASK 0x3ffff
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START__SHIFT 0x0
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START_SEGMENT_MASK 0x7f00000
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START_SEGMENT__SHIFT 0x14
+#define REGAMMA_CNTLB_SLOPE_CNTL__REGAMMA_CNTLB_EXP_REGION_LINEAR_SLOPE_MASK 0x3ffff
+#define REGAMMA_CNTLB_SLOPE_CNTL__REGAMMA_CNTLB_EXP_REGION_LINEAR_SLOPE__SHIFT 0x0
+#define REGAMMA_CNTLB_END_CNTL1__REGAMMA_CNTLB_EXP_REGION_END_MASK 0xffff
+#define REGAMMA_CNTLB_END_CNTL1__REGAMMA_CNTLB_EXP_REGION_END__SHIFT 0x0
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_SLOPE_MASK 0xffff
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_SLOPE__SHIFT 0x0
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_BASE_MASK 0xffff0000
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_BASE__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define ALPHA_CONTROL__ALPHA_ROUND_TRUNC_MODE_MASK 0x1
+#define ALPHA_CONTROL__ALPHA_ROUND_TRUNC_MODE__SHIFT 0x0
+#define ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK 0x2
+#define ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA__SHIFT 0x1
+#define GRPH_XDMA_RECOVERY_SURFACE_ADDRESS__GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_MASK 0xffffff00
+#define GRPH_XDMA_RECOVERY_SURFACE_ADDRESS__GRPH_XDMA_RECOVERY_SURFACE_ADDRESS__SHIFT 0x8
+#define GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH__GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH_MASK 0xff
+#define GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH__GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_CNT_MASK 0xfffff
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_CNT__SHIFT 0x0
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_CNT_STATUS_MASK 0x1000000
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_CNT_STATUS__SHIFT 0x18
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_FRAME_MASK_MASK 0x2000000
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_FRAME_MASK__SHIFT 0x19
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_FRAME_ACK_MASK 0x4000000
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_FRAME_ACK__SHIFT 0x1a
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_INT_MASK 0x10000000
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_INT__SHIFT 0x1c
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_INT_MASK_MASK 0x20000000
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_INT_MASK__SHIFT 0x1d
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_INT_ACK_MASK 0x40000000
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_INT_ACK__SHIFT 0x1e
+#define GRPH_SURFACE_COUNTER_CONTROL__GRPH_SURFACE_COUNTER_EN_MASK 0x1
+#define GRPH_SURFACE_COUNTER_CONTROL__GRPH_SURFACE_COUNTER_EN__SHIFT 0x0
+#define GRPH_SURFACE_COUNTER_CONTROL__GRPH_SURFACE_COUNTER_EVENT_SELECT_MASK 0x1e
+#define GRPH_SURFACE_COUNTER_CONTROL__GRPH_SURFACE_COUNTER_EVENT_SELECT__SHIFT 0x1
+#define GRPH_SURFACE_COUNTER_CONTROL__GRPH_SURFACE_COUNTER_ERR_WRAP_OCCURED_MASK 0x200
+#define GRPH_SURFACE_COUNTER_CONTROL__GRPH_SURFACE_COUNTER_ERR_WRAP_OCCURED__SHIFT 0x9
+#define GRPH_SURFACE_COUNTER_OUTPUT__GRPH_SURFACE_COUNTER_MIN_MASK 0xffff
+#define GRPH_SURFACE_COUNTER_OUTPUT__GRPH_SURFACE_COUNTER_MIN__SHIFT 0x0
+#define GRPH_SURFACE_COUNTER_OUTPUT__GRPH_SURFACE_COUNTER_MAX_MASK 0xffff0000
+#define GRPH_SURFACE_COUNTER_OUTPUT__GRPH_SURFACE_COUNTER_MAX__SHIFT 0x10
+#define DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x7
+#define DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x70
+#define DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x100
+#define DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG_FE_CNTL__DIG_START_MASK 0x400
+#define DIG_FE_CNTL__DIG_START__SHIFT 0xa
+#define DIG_FE_CNTL__DIG_SYMCLK_FE_ON_MASK 0x1000000
+#define DIG_FE_CNTL__DIG_SYMCLK_FE_ON__SHIFT 0x18
+#define DIG_FE_CNTL__TMDS_PIXEL_ENCODING_MASK 0x10000000
+#define DIG_FE_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x1c
+#define DIG_FE_CNTL__TMDS_COLOR_FORMAT_MASK 0xc0000000
+#define DIG_FE_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x1e
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x1
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x10
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x300
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3fffffff
+#define DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x3ff
+#define DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x1
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x2
+#define DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x10
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x20
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x40
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x3ff0000
+#define DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0xffffff
+#define DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x1000000
+#define DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR_MASK 0x1
+#define DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR__SHIFT 0x0
+#define DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x2
+#define DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0xfc
+#define DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x100
+#define DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8
+#define DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0xfc00
+#define DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x1f0000
+#define DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x3c00000
+#define DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x4000000
+#define DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a
+#define DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000
+#define DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG_DISPCLK_SWITCH_CNTL__DIG_DISPCLK_SWITCH_POINT_MASK 0x1
+#define DIG_DISPCLK_SWITCH_CNTL__DIG_DISPCLK_SWITCH_POINT__SHIFT 0x0
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_MASK 0x1
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED__SHIFT 0x0
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK 0x10
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT__SHIFT 0x4
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_ACK_MASK 0x100
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_ACK__SHIFT 0x8
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK_MASK 0x1000
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK__SHIFT 0xc
+#define HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x1
+#define HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x4
+#define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x8
+#define HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x10
+#define HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x100
+#define HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x200
+#define HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x1000000
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x1
+#define HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x10000
+#define HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x100000
+#define HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define HDMI_STATUS__HDMI_ERROR_INT_MASK 0x8000000
+#define HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x30
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x100
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x8
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x1f0000
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x1
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x2
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x30
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x100
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x1000
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x70000
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x1
+#define HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x10
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x20
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x100
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x200
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x3f0000
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x1
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND__SHIFT 0x0
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK 0x2
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT__SHIFT 0x1
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x10
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x20
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x100
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x200
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK 0x3f
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT 0x0
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x3f00
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x3f0000
+#define HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x1
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x2
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x10
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x20
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_MASK 0x3f0000
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE__SHIFT 0x10
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_MASK 0x3f000000
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE__SHIFT 0x18
+#define HDMI_GC__HDMI_GC_AVMUTE_MASK 0x1
+#define HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x4
+#define HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x10
+#define HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define HDMI_GC__HDMI_PACKING_PHASE_MASK 0xf00
+#define HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x1000
+#define HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x1
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x2
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0xff00
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0xff0000
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x1000000
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT_ISRC1_0__AFMT_ISRC_STATUS_MASK 0x7
+#define AFMT_ISRC1_0__AFMT_ISRC_STATUS__SHIFT 0x0
+#define AFMT_ISRC1_0__AFMT_ISRC_CONTINUE_MASK 0x40
+#define AFMT_ISRC1_0__AFMT_ISRC_CONTINUE__SHIFT 0x6
+#define AFMT_ISRC1_0__AFMT_ISRC_VALID_MASK 0x80
+#define AFMT_ISRC1_0__AFMT_ISRC_VALID__SHIFT 0x7
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0_MASK 0xff
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0__SHIFT 0x0
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1_MASK 0xff00
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1__SHIFT 0x8
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2_MASK 0xff0000
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2__SHIFT 0x10
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3_MASK 0xff000000
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3__SHIFT 0x18
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4_MASK 0xff
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4__SHIFT 0x0
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5_MASK 0xff00
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5__SHIFT 0x8
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6_MASK 0xff0000
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6__SHIFT 0x10
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7_MASK 0xff000000
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7__SHIFT 0x18
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8_MASK 0xff
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8__SHIFT 0x0
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9_MASK 0xff00
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9__SHIFT 0x8
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10_MASK 0xff0000
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10__SHIFT 0x10
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11_MASK 0xff000000
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11__SHIFT 0x18
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12_MASK 0xff
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12__SHIFT 0x0
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13_MASK 0xff00
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13__SHIFT 0x8
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14_MASK 0xff0000
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14__SHIFT 0x10
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15_MASK 0xff000000
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15__SHIFT 0x18
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16_MASK 0xff
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16__SHIFT 0x0
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17_MASK 0xff00
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17__SHIFT 0x8
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18_MASK 0xff0000
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18__SHIFT 0x10
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19_MASK 0xff000000
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19__SHIFT 0x18
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20_MASK 0xff
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20__SHIFT 0x0
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21_MASK 0xff00
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21__SHIFT 0x8
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22_MASK 0xff0000
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22__SHIFT 0x10
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23_MASK 0xff000000
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23__SHIFT 0x18
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24_MASK 0xff
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24__SHIFT 0x0
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25_MASK 0xff00
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25__SHIFT 0x8
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26_MASK 0xff0000
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26__SHIFT 0x10
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27_MASK 0xff000000
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27__SHIFT 0x18
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28_MASK 0xff
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28__SHIFT 0x0
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29_MASK 0xff00
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29__SHIFT 0x8
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30_MASK 0xff0000
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30__SHIFT 0x10
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31_MASK 0xff000000
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31__SHIFT 0x18
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_CHECKSUM_MASK 0xff
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_S_MASK 0x300
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_S__SHIFT 0x8
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_B_MASK 0xc00
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_B__SHIFT 0xa
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_A_MASK 0x1000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_A__SHIFT 0xc
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Y_MASK 0xe000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Y__SHIFT 0xd
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_R_MASK 0xf0000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_R__SHIFT 0x10
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_M_MASK 0x300000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_M__SHIFT 0x14
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_C_MASK 0xc00000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_C__SHIFT 0x16
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_SC_MASK 0x3000000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_SC__SHIFT 0x18
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Q_MASK 0xc000000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Q__SHIFT 0x1a
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_EC_MASK 0x70000000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_EC__SHIFT 0x1c
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_ITC_MASK 0x80000000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_ITC__SHIFT 0x1f
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_VIC_MASK 0xff
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_VIC__SHIFT 0x0
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_PR_MASK 0xf00
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_PR__SHIFT 0x8
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_CN_MASK 0x3000
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_CN__SHIFT 0xc
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_YQ_MASK 0xc000
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_YQ__SHIFT 0xe
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_TOP_MASK 0xffff0000
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_TOP__SHIFT 0x10
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_BOTTOM_MASK 0xffff
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_BOTTOM__SHIFT 0x0
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_LEFT_MASK 0xffff0000
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_LEFT__SHIFT 0x10
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_RIGHT_MASK 0xffff
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_RIGHT__SHIFT 0x0
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_VERSION_MASK 0xff000000
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_VERSION__SHIFT 0x18
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM_MASK 0xff
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0_MASK 0xff00
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0__SHIFT 0x8
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1_MASK 0xff0000
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1__SHIFT 0x10
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2_MASK 0xff000000
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2__SHIFT 0x18
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3_MASK 0xff
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3__SHIFT 0x0
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF_MASK 0x300
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF__SHIFT 0x8
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR_MASK 0x1000
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR__SHIFT 0xc
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB0_MASK 0xff
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB0__SHIFT 0x0
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB1_MASK 0xff00
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB1__SHIFT 0x8
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB2_MASK 0xff0000
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB2__SHIFT 0x10
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB3_MASK 0xff000000
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB3__SHIFT 0x18
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE0_MASK 0xff
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE0__SHIFT 0x0
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE1_MASK 0xff00
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE1__SHIFT 0x8
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE2_MASK 0xff0000
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE2__SHIFT 0x10
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE3_MASK 0xff000000
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE3__SHIFT 0x18
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE4_MASK 0xff
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE4__SHIFT 0x0
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE5_MASK 0xff00
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE5__SHIFT 0x8
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE6_MASK 0xff0000
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE6__SHIFT 0x10
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE7_MASK 0xff000000
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE7__SHIFT 0x18
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE8_MASK 0xff
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE8__SHIFT 0x0
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE9_MASK 0xff00
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE9__SHIFT 0x8
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE10_MASK 0xff0000
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE10__SHIFT 0x10
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE11_MASK 0xff000000
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE11__SHIFT 0x18
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE12_MASK 0xff
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE12__SHIFT 0x0
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE13_MASK 0xff00
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE13__SHIFT 0x8
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE14_MASK 0xff0000
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE14__SHIFT 0x10
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE15_MASK 0xff000000
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE15__SHIFT 0x18
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE16_MASK 0xff
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE16__SHIFT 0x0
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE17_MASK 0xff00
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE17__SHIFT 0x8
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE18_MASK 0xff0000
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE18__SHIFT 0x10
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE19_MASK 0xff000000
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE19__SHIFT 0x18
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE20_MASK 0xff
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE20__SHIFT 0x0
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE21_MASK 0xff00
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE21__SHIFT 0x8
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE22_MASK 0xff0000
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE22__SHIFT 0x10
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE23_MASK 0xff000000
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE23__SHIFT 0x18
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE24_MASK 0xff
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE24__SHIFT 0x0
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE25_MASK 0xff00
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE25__SHIFT 0x8
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE26_MASK 0xff0000
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE26__SHIFT 0x10
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE27_MASK 0xff000000
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE27__SHIFT 0x18
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE28_MASK 0xff
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE28__SHIFT 0x0
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE29_MASK 0xff00
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE29__SHIFT 0x8
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE30_MASK 0xff0000
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE30__SHIFT 0x10
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE31_MASK 0xff000000
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE31__SHIFT 0x18
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_SEND_MASK 0x1
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_SEND__SHIFT 0x0
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_CONT_MASK 0x2
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_CONT__SHIFT 0x1
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_SEND_MASK 0x10
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_SEND__SHIFT 0x4
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_CONT_MASK 0x20
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_CONT__SHIFT 0x5
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_LINE_MASK 0x3f0000
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_LINE__SHIFT 0x10
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_LINE_MASK 0x3f000000
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_LINE__SHIFT 0x18
+#define HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xfffff000
+#define HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0xfffff
+#define HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xfffff000
+#define HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0xfffff
+#define HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xfffff000
+#define HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0xfffff
+#define HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xfffff000
+#define HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0xfffff
+#define HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0xff
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x700
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x7800
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0xff0000
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1f000000
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0xff
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x7800
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x8000
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x30000
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT_60958_0__AFMT_60958_CS_A_MASK 0x1
+#define AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT_60958_0__AFMT_60958_CS_B_MASK 0x2
+#define AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT_60958_0__AFMT_60958_CS_C_MASK 0x4
+#define AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT_60958_0__AFMT_60958_CS_D_MASK 0x38
+#define AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0xc0
+#define AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0xff00
+#define AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0xf0000
+#define AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0xf00000
+#define AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0xf000000
+#define AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000
+#define AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0xf
+#define AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0xf0
+#define AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x10000
+#define AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x40000
+#define AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0xf00000
+#define AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x1
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x10
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x100
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0xf000
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xffff0000
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0xffffff
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0xffffff
+#define AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xff000000
+#define AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0xffffff
+#define AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0xffffff
+#define AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0xf
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0xf0
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0xf00
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0xf000
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0xf0000
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0xf00000
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x1
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xffffff00
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x10
+#define AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x100
+#define AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x1000000
+#define AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000
+#define AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x1
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x800
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x1000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x4000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x800000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x1000000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x4000000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC0_UPDATE_MASK 0x4
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC0_UPDATE__SHIFT 0x2
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC2_UPDATE_MASK 0x8
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC2_UPDATE__SHIFT 0x3
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX_MASK 0xc0000000
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX__SHIFT 0x1e
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x40
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x80
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE_MASK 0x400
+#define AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE__SHIFT 0xa
+#define AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x7
+#define AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_FS_DIV_SEL_MASK 0x7
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_FS_DIV_SEL__SHIFT 0x0
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_BASE_MASK 0x100
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_BASE__SHIFT 0x8
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_MULTI_MASK 0x7000
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_MULTI__SHIFT 0xc
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_DIV_MASK 0x70000
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_DIV__SHIFT 0x10
+#define AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x1
+#define AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x100
+#define AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x1
+#define DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG_BE_CNTL__DIG_SWAP_MASK 0x2
+#define DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x4
+#define DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x7f00
+#define DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG_BE_CNTL__DIG_MODE_MASK 0x70000
+#define DIG_BE_CNTL__DIG_MODE__SHIFT 0x10
+#define DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000
+#define DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG_BE_EN_CNTL__DIG_ENABLE_MASK 0x1
+#define DIG_BE_EN_CNTL__DIG_ENABLE__SHIFT 0x0
+#define DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON_MASK 0x100
+#define DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON__SHIFT 0x8
+#define TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x1
+#define TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x1
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x2
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x4
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x8
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x3
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x300
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x3
+#define TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x3ff
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x3ff0000
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x3ff
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x3ff0000
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define TMDS_DEBUG__TMDS_DEBUG_EN_MASK 0x1
+#define TMDS_DEBUG__TMDS_DEBUG_EN__SHIFT 0x0
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC_MASK 0x100
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC__SHIFT 0x8
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC_EN_MASK 0x200
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC_EN__SHIFT 0x9
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC_MASK 0x10000
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC__SHIFT 0x10
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC_EN_MASK 0x20000
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC_EN__SHIFT 0x11
+#define TMDS_DEBUG__TMDS_DEBUG_DE_MASK 0x1000000
+#define TMDS_DEBUG__TMDS_DEBUG_DE__SHIFT 0x18
+#define TMDS_DEBUG__TMDS_DEBUG_DE_EN_MASK 0x2000000
+#define TMDS_DEBUG__TMDS_DEBUG_DE_EN__SHIFT 0x19
+#define TMDS_CTL_BITS__TMDS_CTL0_MASK 0x1
+#define TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define TMDS_CTL_BITS__TMDS_CTL1_MASK 0x100
+#define TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define TMDS_CTL_BITS__TMDS_CTL2_MASK 0x10000
+#define TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define TMDS_CTL_BITS__TMDS_CTL3_MASK 0x1000000
+#define TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x1
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x70
+#define TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x100
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0xf0000
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x1000000
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0xf
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x70
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x80
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x300
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x400
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x800
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x1000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0xf0000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x700000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x800000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x3000000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x4000000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x8000000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0xf
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x70
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x80
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x300
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x400
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x800
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x1000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0xf0000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x700000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x800000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x3000000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x4000000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x8000000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG_VERSION__DIG_TYPE_MASK 0x1
+#define DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG_LANE_ENABLE__DIG_LANE0EN_MASK 0x1
+#define DIG_LANE_ENABLE__DIG_LANE0EN__SHIFT 0x0
+#define DIG_LANE_ENABLE__DIG_LANE1EN_MASK 0x2
+#define DIG_LANE_ENABLE__DIG_LANE1EN__SHIFT 0x1
+#define DIG_LANE_ENABLE__DIG_LANE2EN_MASK 0x4
+#define DIG_LANE_ENABLE__DIG_LANE2EN__SHIFT 0x2
+#define DIG_LANE_ENABLE__DIG_LANE3EN_MASK 0x8
+#define DIG_LANE_ENABLE__DIG_LANE3EN__SHIFT 0x3
+#define DIG_LANE_ENABLE__DIG_CLK_EN_MASK 0x100
+#define DIG_LANE_ENABLE__DIG_CLK_EN__SHIFT 0x8
+#define DIG_TEST_DEBUG_INDEX__DIG_TEST_DEBUG_INDEX_MASK 0xff
+#define DIG_TEST_DEBUG_INDEX__DIG_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DIG_TEST_DEBUG_INDEX__DIG_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DIG_TEST_DEBUG_INDEX__DIG_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DIG_TEST_DEBUG_DATA__DIG_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DIG_TEST_DEBUG_DATA__DIG_TEST_DEBUG_DATA__SHIFT 0x0
+#define DIG_FE_TEST_DEBUG_INDEX__DIG_FE_TEST_DEBUG_INDEX_MASK 0xff
+#define DIG_FE_TEST_DEBUG_INDEX__DIG_FE_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DIG_FE_TEST_DEBUG_INDEX__DIG_FE_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DIG_FE_TEST_DEBUG_INDEX__DIG_FE_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DIG_FE_TEST_DEBUG_DATA__DIG_FE_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DIG_FE_TEST_DEBUG_DATA__DIG_FE_TEST_DEBUG_DATA__SHIFT 0x0
+#define DMCU_CTRL__RESET_UC_MASK 0x1
+#define DMCU_CTRL__RESET_UC__SHIFT 0x0
+#define DMCU_CTRL__IGNORE_PWRMGT_MASK 0x2
+#define DMCU_CTRL__IGNORE_PWRMGT__SHIFT 0x1
+#define DMCU_CTRL__DISABLE_IRQ_TO_UC_MASK 0x4
+#define DMCU_CTRL__DISABLE_IRQ_TO_UC__SHIFT 0x2
+#define DMCU_CTRL__DISABLE_XIRQ_TO_UC_MASK 0x8
+#define DMCU_CTRL__DISABLE_XIRQ_TO_UC__SHIFT 0x3
+#define DMCU_CTRL__DMCU_ENABLE_MASK 0x10
+#define DMCU_CTRL__DMCU_ENABLE__SHIFT 0x4
+#define DMCU_CTRL__DMCU_DYN_CLK_GATING_EN_MASK 0x100
+#define DMCU_CTRL__DMCU_DYN_CLK_GATING_EN__SHIFT 0x8
+#define DMCU_CTRL__UC_REG_RD_TIMEOUT_MASK 0xffff0000
+#define DMCU_CTRL__UC_REG_RD_TIMEOUT__SHIFT 0x10
+#define DMCU_STATUS__UC_IN_RESET_MASK 0x1
+#define DMCU_STATUS__UC_IN_RESET__SHIFT 0x0
+#define DMCU_STATUS__UC_IN_WAIT_MODE_MASK 0x2
+#define DMCU_STATUS__UC_IN_WAIT_MODE__SHIFT 0x1
+#define DMCU_STATUS__UC_IN_STOP_MODE_MASK 0x4
+#define DMCU_STATUS__UC_IN_STOP_MODE__SHIFT 0x2
+#define DMCU_PC_START_ADDR__PC_START_ADDR_LSB_MASK 0xff
+#define DMCU_PC_START_ADDR__PC_START_ADDR_LSB__SHIFT 0x0
+#define DMCU_PC_START_ADDR__PC_START_ADDR_MSB_MASK 0xff00
+#define DMCU_PC_START_ADDR__PC_START_ADDR_MSB__SHIFT 0x8
+#define DMCU_FW_START_ADDR__FW_START_ADDR_LSB_MASK 0xff
+#define DMCU_FW_START_ADDR__FW_START_ADDR_LSB__SHIFT 0x0
+#define DMCU_FW_START_ADDR__FW_START_ADDR_MSB_MASK 0xff00
+#define DMCU_FW_START_ADDR__FW_START_ADDR_MSB__SHIFT 0x8
+#define DMCU_FW_END_ADDR__FW_END_ADDR_LSB_MASK 0xff
+#define DMCU_FW_END_ADDR__FW_END_ADDR_LSB__SHIFT 0x0
+#define DMCU_FW_END_ADDR__FW_END_ADDR_MSB_MASK 0xff00
+#define DMCU_FW_END_ADDR__FW_END_ADDR_MSB__SHIFT 0x8
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_LSB_MASK 0xff
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_LSB__SHIFT 0x0
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_MSB_MASK 0xff00
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_MSB__SHIFT 0x8
+#define DMCU_FW_CS_HI__FW_CHECKSUM_HI_MASK 0xffffffff
+#define DMCU_FW_CS_HI__FW_CHECKSUM_HI__SHIFT 0x0
+#define DMCU_FW_CS_LO__FW_CHECKSUM_LO_MASK 0xffffffff
+#define DMCU_FW_CS_LO__FW_CHECKSUM_LO__SHIFT 0x0
+#define DMCU_RAM_ACCESS_CTRL__ERAM_WR_ADDR_AUTO_INC_MASK 0x1
+#define DMCU_RAM_ACCESS_CTRL__ERAM_WR_ADDR_AUTO_INC__SHIFT 0x0
+#define DMCU_RAM_ACCESS_CTRL__ERAM_RD_ADDR_AUTO_INC_MASK 0x2
+#define DMCU_RAM_ACCESS_CTRL__ERAM_RD_ADDR_AUTO_INC__SHIFT 0x1
+#define DMCU_RAM_ACCESS_CTRL__IRAM_WR_ADDR_AUTO_INC_MASK 0x4
+#define DMCU_RAM_ACCESS_CTRL__IRAM_WR_ADDR_AUTO_INC__SHIFT 0x2
+#define DMCU_RAM_ACCESS_CTRL__IRAM_RD_ADDR_AUTO_INC_MASK 0x8
+#define DMCU_RAM_ACCESS_CTRL__IRAM_RD_ADDR_AUTO_INC__SHIFT 0x3
+#define DMCU_RAM_ACCESS_CTRL__ERAM_HOST_ACCESS_EN_MASK 0x10
+#define DMCU_RAM_ACCESS_CTRL__ERAM_HOST_ACCESS_EN__SHIFT 0x4
+#define DMCU_RAM_ACCESS_CTRL__IRAM_HOST_ACCESS_EN_MASK 0x20
+#define DMCU_RAM_ACCESS_CTRL__IRAM_HOST_ACCESS_EN__SHIFT 0x5
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_ADDR_MASK 0xffff
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_ADDR__SHIFT 0x0
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BE_MASK 0xf0000
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BE__SHIFT 0x10
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BYTE_MODE_MASK 0x100000
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BYTE_MODE__SHIFT 0x14
+#define DMCU_ERAM_WR_DATA__ERAM_WR_DATA_MASK 0xffffffff
+#define DMCU_ERAM_WR_DATA__ERAM_WR_DATA__SHIFT 0x0
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_ADDR_MASK 0xffff
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_ADDR__SHIFT 0x0
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BE_MASK 0xf0000
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BE__SHIFT 0x10
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BYTE_MODE_MASK 0x100000
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BYTE_MODE__SHIFT 0x14
+#define DMCU_ERAM_RD_DATA__ERAM_RD_DATA_MASK 0xffffffff
+#define DMCU_ERAM_RD_DATA__ERAM_RD_DATA__SHIFT 0x0
+#define DMCU_IRAM_WR_CTRL__IRAM_WR_ADDR_MASK 0x3ff
+#define DMCU_IRAM_WR_CTRL__IRAM_WR_ADDR__SHIFT 0x0
+#define DMCU_IRAM_WR_DATA__IRAM_WR_DATA_MASK 0xff
+#define DMCU_IRAM_WR_DATA__IRAM_WR_DATA__SHIFT 0x0
+#define DMCU_IRAM_RD_CTRL__IRAM_RD_ADDR_MASK 0x3ff
+#define DMCU_IRAM_RD_CTRL__IRAM_RD_ADDR__SHIFT 0x0
+#define DMCU_IRAM_RD_DATA__IRAM_RD_DATA_MASK 0xff
+#define DMCU_IRAM_RD_DATA__IRAM_RD_DATA__SHIFT 0x0
+#define DMCU_EVENT_TRIGGER__GEN_SW_INT_TO_UC_MASK 0x1
+#define DMCU_EVENT_TRIGGER__GEN_SW_INT_TO_UC__SHIFT 0x0
+#define DMCU_EVENT_TRIGGER__UC_INTERNAL_INT_CODE_MASK 0x7f0000
+#define DMCU_EVENT_TRIGGER__UC_INTERNAL_INT_CODE__SHIFT 0x10
+#define DMCU_EVENT_TRIGGER__GEN_UC_INTERNAL_INT_TO_HOST_MASK 0x800000
+#define DMCU_EVENT_TRIGGER__GEN_UC_INTERNAL_INT_TO_HOST__SHIFT 0x17
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_IRQ_N_PIN_MASK 0x1
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_IRQ_N_PIN__SHIFT 0x0
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_XIRQ_N_PIN_MASK 0x2
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_XIRQ_N_PIN__SHIFT 0x1
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_SOFTWARE_INTERRUPT_MASK 0x4
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_SOFTWARE_INTERRUPT__SHIFT 0x2
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_ILLEGAL_OPCODE_TRAP_MASK 0x8
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_ILLEGAL_OPCODE_TRAP__SHIFT 0x3
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_4_MASK 0x10
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_4__SHIFT 0x4
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_3_MASK 0x20
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_3__SHIFT 0x5
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_2_MASK 0x40
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_2__SHIFT 0x6
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_1_MASK 0x80
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_1__SHIFT 0x7
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OVERFLOW_MASK 0x100
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OVERFLOW__SHIFT 0x8
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_REAL_TIME_INTERRUPT_MASK 0x200
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_REAL_TIME_INTERRUPT__SHIFT 0x9
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_4_OUTPUT_COMPARE_5_MASK 0x400
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_4_OUTPUT_COMPARE_5__SHIFT 0xa
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_3_MASK 0x800
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_3__SHIFT 0xb
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_2_MASK 0x1000
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_2__SHIFT 0xc
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_1_MASK 0x2000
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_1__SHIFT 0xd
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_INPUT_EDGE_MASK 0x4000
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_INPUT_EDGE__SHIFT 0xe
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_OVERFLOW_MASK 0x8000
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_OVERFLOW__SHIFT 0xf
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_STATUS_MASK 0x2000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_STATUS__SHIFT 0xd
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_OCCURRED_MASK 0x4000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_OCCURRED__SHIFT 0xe
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_CLEAR_MASK 0x4000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_CLEAR__SHIFT 0xe
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_STATUS_MASK 0x8000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_STATUS__SHIFT 0xf
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_OCCURRED_MASK 0x10000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_OCCURRED__SHIFT 0x10
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_CLEAR_MASK 0x10000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_CLEAR__SHIFT 0x10
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_STATUS_MASK 0x20000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_STATUS__SHIFT 0x11
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_OCCURRED_MASK 0x40000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_OCCURRED__SHIFT 0x12
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_CLEAR_MASK 0x40000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_CLEAR__SHIFT 0x12
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_STATUS_MASK 0x80000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_STATUS__SHIFT 0x13
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_OCCURRED_MASK 0x100000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_OCCURRED__SHIFT 0x14
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_CLEAR_MASK 0x100000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_CLEAR__SHIFT 0x14
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_STATUS_MASK 0x200000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_STATUS__SHIFT 0x15
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_OCCURRED_MASK 0x400000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_OCCURRED__SHIFT 0x16
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_CLEAR_MASK 0x400000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_CLEAR__SHIFT 0x16
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_STATUS_MASK 0x800000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_STATUS__SHIFT 0x17
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_OCCURRED_MASK 0x1000000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_OCCURRED__SHIFT 0x18
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_CLEAR_MASK 0x1000000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_CLEAR__SHIFT 0x18
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_OCCURRED_MASK 0x1
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_OCCURRED__SHIFT 0x0
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_CLEAR_MASK 0x1
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_CLEAR__SHIFT 0x0
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_OCCURRED_MASK 0x2
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_OCCURRED__SHIFT 0x1
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_CLEAR_MASK 0x2
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_CLEAR__SHIFT 0x1
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_OCCURRED_MASK 0x4
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_OCCURRED__SHIFT 0x2
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_CLEAR_MASK 0x4
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_CLEAR__SHIFT 0x2
+#define DMCU_INTERRUPT_STATUS__MCP_INT_OCCURRED_MASK 0x8
+#define DMCU_INTERRUPT_STATUS__MCP_INT_OCCURRED__SHIFT 0x3
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_UP_INT_OCCURRED_MASK 0x10
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_UP_INT_CLEAR_MASK 0x10
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_UP_INT_CLEAR__SHIFT 0x4
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_DOWN_INT_OCCURRED_MASK 0x20
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_DOWN_INT_OCCURRED__SHIFT 0x5
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_DOWN_INT_CLEAR_MASK 0x20
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_DOWN_INT_CLEAR__SHIFT 0x5
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_OCCURRED_MASK 0x100
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_OCCURRED__SHIFT 0x8
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_CLEAR_MASK 0x100
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_CLEAR__SHIFT 0x8
+#define DMCU_INTERRUPT_STATUS__SCP_INT_OCCURRED_MASK 0x200
+#define DMCU_INTERRUPT_STATUS__SCP_INT_OCCURRED__SHIFT 0x9
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_OCCURRED_MASK 0x400
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_OCCURRED__SHIFT 0xa
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_CLEAR_MASK 0x400
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_CLEAR__SHIFT 0xa
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_OCCURRED_MASK 0x800
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_OCCURRED__SHIFT 0xb
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_CLEAR_MASK 0x800
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_CLEAR__SHIFT 0xb
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_OCCURRED_MASK 0x1000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_OCCURRED__SHIFT 0xc
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_CLEAR_MASK 0x1000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_CLEAR__SHIFT 0xc
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_OCCURRED_MASK 0x2000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_OCCURRED__SHIFT 0xd
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_CLEAR_MASK 0x2000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_OCCURRED_MASK 0x4000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_OCCURRED__SHIFT 0xe
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_CLEAR_MASK 0x4000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_CLEAR__SHIFT 0xe
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_OCCURRED_MASK 0x8000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_OCCURRED__SHIFT 0xf
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_CLEAR_MASK 0x8000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_CLEAR__SHIFT 0xf
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_OCCURRED_MASK 0x10000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_OCCURRED__SHIFT 0x10
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_CLEAR_MASK 0x10000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_CLEAR__SHIFT 0x10
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_OCCURRED_MASK 0x20000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_OCCURRED__SHIFT 0x11
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_CLEAR_MASK 0x20000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_CLEAR__SHIFT 0x11
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_OCCURRED_MASK 0x40000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_OCCURRED__SHIFT 0x12
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_CLEAR_MASK 0x40000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_CLEAR__SHIFT 0x12
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_OCCURRED_MASK 0x80000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_OCCURRED__SHIFT 0x13
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_CLEAR_MASK 0x80000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_CLEAR__SHIFT 0x13
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_OCCURRED_MASK 0x100000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_OCCURRED__SHIFT 0x14
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_CLEAR_MASK 0x100000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_CLEAR__SHIFT 0x14
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_OCCURRED_MASK 0x200000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_OCCURRED__SHIFT 0x15
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_CLEAR_MASK 0x200000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_CLEAR__SHIFT 0x15
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_OCCURRED_MASK 0x400000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_OCCURRED__SHIFT 0x16
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_CLEAR_MASK 0x400000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_CLEAR__SHIFT 0x16
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_OCCURRED_MASK 0x800000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_OCCURRED__SHIFT 0x17
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_CLEAR_MASK 0x800000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_CLEAR__SHIFT 0x17
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_OCCURRED_MASK 0x1000000
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_OCCURRED__SHIFT 0x18
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_CLEAR_MASK 0x1000000
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_CLEAR__SHIFT 0x18
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_OCCURRED_MASK 0x2000000
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_OCCURRED__SHIFT 0x19
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_CLEAR_MASK 0x2000000
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_CLEAR__SHIFT 0x19
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_OCCURRED_MASK 0x4000000
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_OCCURRED__SHIFT 0x1a
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_CLEAR_MASK 0x4000000
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_CLEAR__SHIFT 0x1a
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_OCCURRED_MASK 0x8000000
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_OCCURRED__SHIFT 0x1b
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_CLEAR_MASK 0x8000000
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_CLEAR__SHIFT 0x1b
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_OCCURRED_MASK 0x10000000
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_OCCURRED__SHIFT 0x1c
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_CLEAR_MASK 0x10000000
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_CLEAR__SHIFT 0x1c
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_OCCURRED_MASK 0x20000000
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_OCCURRED__SHIFT 0x1d
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_CLEAR_MASK 0x20000000
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_CLEAR__SHIFT 0x1d
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_UP_INT_OCCURRED_MASK 0x1
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_UP_INT_CLEAR_MASK 0x1
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_UP_INT_CLEAR__SHIFT 0x0
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_UP_INT_OCCURRED_MASK 0x2
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_UP_INT_OCCURRED__SHIFT 0x1
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_UP_INT_CLEAR_MASK 0x2
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_OCCURRED_MASK 0x4
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_OCCURRED__SHIFT 0x2
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_CLEAR_MASK 0x4
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_CLEAR__SHIFT 0x2
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_OCCURRED_MASK 0x8
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_OCCURRED__SHIFT 0x3
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_CLEAR_MASK 0x8
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DMCU_INTERRUPT_STATUS_1__DCFEV0_VBLANK_INT_OCCURRED_MASK 0x10
+#define DMCU_INTERRUPT_STATUS_1__DCFEV0_VBLANK_INT_OCCURRED__SHIFT 0x4
+#define DMCU_INTERRUPT_STATUS_1__DCFEV0_VBLANK_INT_CLEAR_MASK 0x10
+#define DMCU_INTERRUPT_STATUS_1__DCFEV0_VBLANK_INT_CLEAR__SHIFT 0x4
+#define DMCU_INTERRUPT_STATUS_1__DCFEV1_VBLANK_INT_OCCURRED_MASK 0x20
+#define DMCU_INTERRUPT_STATUS_1__DCFEV1_VBLANK_INT_OCCURRED__SHIFT 0x5
+#define DMCU_INTERRUPT_STATUS_1__DCFEV1_VBLANK_INT_CLEAR_MASK 0x20
+#define DMCU_INTERRUPT_STATUS_1__DCFEV1_VBLANK_INT_CLEAR__SHIFT 0x5
+#define DMCU_INTERRUPT_STATUS_1__DMCU_GENERIC_INTERRUPT_OCCURRED_MASK 0x2000
+#define DMCU_INTERRUPT_STATUS_1__DMCU_GENERIC_INTERRUPT_OCCURRED__SHIFT 0xd
+#define DMCU_INTERRUPT_STATUS_1__DMCU_GENERIC_INTERRUPT_CLEAR_MASK 0x2000
+#define DMCU_INTERRUPT_STATUS_1__DMCU_GENERIC_INTERRUPT_CLEAR__SHIFT 0xd
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_HG_READY_INT_MASK_MASK 0x1
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_HG_READY_INT_MASK__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_LS_READY_INT_MASK_MASK 0x2
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_LS_READY_INT_MASK__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_BL_UPDATE_INT_MASK_MASK 0x4
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_BL_UPDATE_INT_MASK__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__SCP_INT_MASK_MASK 0x200
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__SCP_INT_MASK__SHIFT 0x9
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_INTERNAL_INT_MASK_MASK 0x400
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_INTERNAL_INT_MASK__SHIFT 0xa
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_REG_RD_TIMEOUT_INT_MASK_MASK 0x800
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_REG_RD_TIMEOUT_INT_MASK__SHIFT 0xb
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_HG_READY_INT_TO_UC_EN_MASK 0x1
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_HG_READY_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_LS_READY_INT_TO_UC_EN_MASK 0x2
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_LS_READY_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_BL_UPDATE_INT_TO_UC_EN_MASK 0x4
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_BL_UPDATE_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__MCP_INT_TO_UC_EN_MASK 0x8
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__MCP_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DSI_POWER_UP_INT_TO_UC_EN_MASK 0x10
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DSI_POWER_UP_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DSI_POWER_DOWN_INT_TO_UC_EN_MASK 0x20
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DSI_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN1_INT_TO_UC_EN_MASK 0x40
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN1_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN2_INT_TO_UC_EN_MASK 0x80
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN2_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__EXTERNAL_SW_INT_TO_UC_EN_MASK 0x100
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__EXTERNAL_SW_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN3_INT_TO_UC_EN_MASK 0x200
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN3_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN4_INT_TO_UC_EN_MASK 0x400
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN4_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN5_INT_TO_UC_EN_MASK 0x800
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN5_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_UP_INT_TO_UC_EN_MASK 0x1000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_UP_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_UP_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_UP_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_UP_INT_TO_UC_EN_MASK 0x4000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_UP_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_UP_INT_TO_UC_EN_MASK 0x8000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_UP_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_UP_INT_TO_UC_EN_MASK 0x10000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_UP_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_UP_INT_TO_UC_EN_MASK 0x20000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_UP_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_DOWN_INT_TO_UC_EN_MASK 0x40000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_DOWN_INT_TO_UC_EN_MASK 0x80000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_DOWN_INT_TO_UC_EN_MASK 0x100000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x14
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_DOWN_INT_TO_UC_EN_MASK 0x200000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x15
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_DOWN_INT_TO_UC_EN_MASK 0x400000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x16
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_DOWN_INT_TO_UC_EN_MASK 0x800000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x17
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK1_INT_TO_UC_EN_MASK 0x1000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK1_INT_TO_UC_EN__SHIFT 0x18
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK2_INT_TO_UC_EN_MASK 0x2000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK2_INT_TO_UC_EN__SHIFT 0x19
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK3_INT_TO_UC_EN_MASK 0x4000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK3_INT_TO_UC_EN__SHIFT 0x1a
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK4_INT_TO_UC_EN_MASK 0x8000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK4_INT_TO_UC_EN__SHIFT 0x1b
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK5_INT_TO_UC_EN_MASK 0x10000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK5_INT_TO_UC_EN__SHIFT 0x1c
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK6_INT_TO_UC_EN_MASK 0x20000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK6_INT_TO_UC_EN__SHIFT 0x1d
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN6_INT_TO_UC_EN_MASK 0x40000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN6_INT_TO_UC_EN__SHIFT 0x1e
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV0_POWER_UP_INT_TO_UC_EN_MASK 0x1
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV0_POWER_UP_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_TO_UC_EN_MASK 0x2
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCFEV0_VBLANK_INT_TO_UC_EN_MASK 0x4
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCFEV0_VBLANK_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV1_POWER_UP_INT_TO_UC_EN_MASK 0x8
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV1_POWER_UP_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_TO_UC_EN_MASK 0x10
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCFEV1_VBLANK_INT_TO_UC_EN_MASK 0x20
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCFEV1_VBLANK_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DMCU_GENERIC_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DMCU_GENERIC_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_HG_READY_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_HG_READY_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_LS_READY_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_LS_READY_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_BL_UPDATE_INT_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_BL_UPDATE_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__MCP_INT_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__MCP_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DSI_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DSI_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DSI_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DSI_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN1_INT_XIRQ_IRQ_SEL_MASK 0x40
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN1_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN2_INT_XIRQ_IRQ_SEL_MASK 0x80
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN2_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__EXTERNAL_SW_INT_XIRQ_IRQ_SEL_MASK 0x100
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__EXTERNAL_SW_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN3_INT_XIRQ_IRQ_SEL_MASK 0x200
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN3_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN4_INT_XIRQ_IRQ_SEL_MASK 0x400
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN4_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN5_INT_XIRQ_IRQ_SEL_MASK 0x800
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN5_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x1000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x4000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x8000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x10000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x20000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x40000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x80000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x100000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x200000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x400000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x800000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x17
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK1_INT_XIRQ_IRQ_SEL_MASK 0x1000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK1_INT_XIRQ_IRQ_SEL__SHIFT 0x18
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK2_INT_XIRQ_IRQ_SEL_MASK 0x2000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK2_INT_XIRQ_IRQ_SEL__SHIFT 0x19
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK3_INT_XIRQ_IRQ_SEL_MASK 0x4000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK3_INT_XIRQ_IRQ_SEL__SHIFT 0x1a
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK4_INT_XIRQ_IRQ_SEL_MASK 0x8000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK4_INT_XIRQ_IRQ_SEL__SHIFT 0x1b
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK5_INT_XIRQ_IRQ_SEL_MASK 0x10000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK5_INT_XIRQ_IRQ_SEL__SHIFT 0x1c
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK6_INT_XIRQ_IRQ_SEL_MASK 0x20000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK6_INT_XIRQ_IRQ_SEL__SHIFT 0x1d
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN6_INT_XIRQ_IRQ_SEL_MASK 0x40000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN6_INT_XIRQ_IRQ_SEL__SHIFT 0x1e
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV0_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV0_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCFEV0_VBLANK_INT_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCFEV0_VBLANK_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV1_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV1_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCFEV1_VBLANK_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCFEV1_VBLANK_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DMCU_GENERIC_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DMCU_GENERIC_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DC_DMCU_SCRATCH__DMCU_SCRATCH_MASK 0xffffffff
+#define DC_DMCU_SCRATCH__DMCU_SCRATCH__SHIFT 0x0
+#define DMCU_INT_CNT__DMCU_ABM1_HG_READY_INT_CNT_MASK 0xff
+#define DMCU_INT_CNT__DMCU_ABM1_HG_READY_INT_CNT__SHIFT 0x0
+#define DMCU_INT_CNT__DMCU_ABM1_LS_READY_INT_CNT_MASK 0xff00
+#define DMCU_INT_CNT__DMCU_ABM1_LS_READY_INT_CNT__SHIFT 0x8
+#define DMCU_INT_CNT__DMCU_ABM1_BL_UPDATE_INT_CNT_MASK 0xff0000
+#define DMCU_INT_CNT__DMCU_ABM1_BL_UPDATE_INT_CNT__SHIFT 0x10
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_LO_SMPL_BYTE_POS_MASK 0x3
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_LO_SMPL_BYTE_POS__SHIFT 0x0
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_HI_SMPL_BYTE_POS_MASK 0xc
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_HI_SMPL_BYTE_POS__SHIFT 0x2
+#define DMCU_UC_CLK_GATING_CNTL__UC_IRAM_RD_DELAY_MASK 0x7
+#define DMCU_UC_CLK_GATING_CNTL__UC_IRAM_RD_DELAY__SHIFT 0x0
+#define DMCU_UC_CLK_GATING_CNTL__UC_ERAM_RD_DELAY_MASK 0x700
+#define DMCU_UC_CLK_GATING_CNTL__UC_ERAM_RD_DELAY__SHIFT 0x8
+#define DMCU_UC_CLK_GATING_CNTL__UC_RBBM_RD_CLK_GATING_EN_MASK 0x10000
+#define DMCU_UC_CLK_GATING_CNTL__UC_RBBM_RD_CLK_GATING_EN__SHIFT 0x10
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE0_MASK 0xff
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE0__SHIFT 0x0
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE1_MASK 0xff00
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE1__SHIFT 0x8
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE2_MASK 0xff0000
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE2__SHIFT 0x10
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE3_MASK 0xff000000
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE3__SHIFT 0x18
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE0_MASK 0xff
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE0__SHIFT 0x0
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE1_MASK 0xff00
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE1__SHIFT 0x8
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE2_MASK 0xff0000
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE2__SHIFT 0x10
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE3_MASK 0xff000000
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE3__SHIFT 0x18
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE0_MASK 0xff
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE0__SHIFT 0x0
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE1_MASK 0xff00
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE1__SHIFT 0x8
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE2_MASK 0xff0000
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE2__SHIFT 0x10
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE3_MASK 0xff000000
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE3__SHIFT 0x18
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE0_MASK 0xff
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE0__SHIFT 0x0
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE1_MASK 0xff00
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE1__SHIFT 0x8
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE2_MASK 0xff0000
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE2__SHIFT 0x10
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE3_MASK 0xff000000
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE3__SHIFT 0x18
+#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x1
+#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT__SHIFT 0x0
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE0_MASK 0xff
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE0__SHIFT 0x0
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE1_MASK 0xff00
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE1__SHIFT 0x8
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE2_MASK 0xff0000
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE2__SHIFT 0x10
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE3_MASK 0xff000000
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE3__SHIFT 0x18
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE0_MASK 0xff
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE0__SHIFT 0x0
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE1_MASK 0xff00
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE1__SHIFT 0x8
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE2_MASK 0xff0000
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE2__SHIFT 0x10
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE3_MASK 0xff000000
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE3__SHIFT 0x18
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE0_MASK 0xff
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE0__SHIFT 0x0
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE1_MASK 0xff00
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE1__SHIFT 0x8
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE2_MASK 0xff0000
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE2__SHIFT 0x10
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE3_MASK 0xff000000
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE3__SHIFT 0x18
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE0_MASK 0xff
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE0__SHIFT 0x0
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE1_MASK 0xff00
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE1__SHIFT 0x8
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE2_MASK 0xff0000
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE2__SHIFT 0x10
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE3_MASK 0xff000000
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE3__SHIFT 0x18
+#define SLAVE_COMM_CNTL_REG__SLAVE_COMM_INTERRUPT_MASK 0x1
+#define SLAVE_COMM_CNTL_REG__SLAVE_COMM_INTERRUPT__SHIFT 0x0
+#define SLAVE_COMM_CNTL_REG__COMM_PORT_MSG_TO_HOST_IN_PROGRESS_MASK 0x100
+#define SLAVE_COMM_CNTL_REG__COMM_PORT_MSG_TO_HOST_IN_PROGRESS__SHIFT 0x8
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_INDEX_MASK 0xff
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DMCU_TEST_DEBUG_DATA__DMCU_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DMCU_TEST_DEBUG_DATA__DMCU_TEST_DEBUG_DATA__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER0_INT_CLEAR_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER1_INT_CLEAR_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER2_INT_CLEAR_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER3_INT_CLEAR_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER4_INT_CLEAR_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER5_INT_CLEAR_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER6_INT_CLEAR_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER7_INT_CLEAR_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER0_INT_CLEAR_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER1_INT_CLEAR_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER2_INT_CLEAR_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER3_INT_CLEAR_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER4_INT_CLEAR_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER5_INT_CLEAR_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER6_INT_CLEAR_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER7_INT_CLEAR_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER0_INT_CLEAR_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER1_INT_CLEAR_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER2_INT_CLEAR_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER3_INT_CLEAR_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER4_INT_CLEAR_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER5_INT_CLEAR_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER6_INT_CLEAR_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER7_INT_CLEAR_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER0_INT_CLEAR_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER1_INT_CLEAR_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER2_INT_CLEAR_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER3_INT_CLEAR_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER4_INT_CLEAR_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER5_INT_CLEAR_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER6_INT_CLEAR_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER7_INT_CLEAR_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER0_INT_CLEAR_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER1_INT_CLEAR_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER2_INT_CLEAR_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER3_INT_CLEAR_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER4_INT_CLEAR_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER5_INT_CLEAR_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER6_INT_CLEAR_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER7_INT_CLEAR_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER0_INT_CLEAR_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER1_INT_CLEAR_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER2_INT_CLEAR_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER3_INT_CLEAR_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER4_INT_CLEAR_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER5_INT_CLEAR_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER6_INT_CLEAR_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER7_INT_CLEAR_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER0_INT_CLEAR_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER1_INT_CLEAR_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER2_INT_CLEAR_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER3_INT_CLEAR_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER4_INT_CLEAR_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER5_INT_CLEAR_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER6_INT_CLEAR_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER7_INT_CLEAR_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER0_INT_CLEAR_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER1_INT_CLEAR_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER2_INT_CLEAR_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER3_INT_CLEAR_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER4_INT_CLEAR_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER5_INT_CLEAR_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER6_INT_CLEAR_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER7_INT_CLEAR_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER0_INT_CLEAR_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER1_INT_CLEAR_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER2_INT_CLEAR_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER3_INT_CLEAR_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER4_INT_CLEAR_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER5_INT_CLEAR_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER6_INT_CLEAR_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER7_INT_CLEAR_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER0_INT_CLEAR_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER1_INT_CLEAR_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER2_INT_CLEAR_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER3_INT_CLEAR_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER4_INT_CLEAR_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER5_INT_CLEAR_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER6_INT_CLEAR_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER7_INT_CLEAR_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER0_INT_CLEAR_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER1_INT_CLEAR_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER2_INT_CLEAR_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER3_INT_CLEAR_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER4_INT_CLEAR_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER5_INT_CLEAR_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER6_INT_CLEAR_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER7_INT_CLEAR_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER0_INT_OCCURRED_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER0_INT_OCCURRED__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER0_INT_CLEAR_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER0_INT_CLEAR__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER1_INT_OCCURRED_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER1_INT_OCCURRED__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER1_INT_CLEAR_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER1_INT_CLEAR__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER2_INT_OCCURRED_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER2_INT_OCCURRED__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER2_INT_CLEAR_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER2_INT_CLEAR__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER3_INT_OCCURRED_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER3_INT_OCCURRED__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER3_INT_CLEAR_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER3_INT_CLEAR__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER4_INT_OCCURRED_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER4_INT_OCCURRED__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER4_INT_CLEAR_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER4_INT_CLEAR__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER5_INT_OCCURRED_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER5_INT_OCCURRED__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER5_INT_CLEAR_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER5_INT_CLEAR__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER6_INT_OCCURRED_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER6_INT_OCCURRED__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER6_INT_CLEAR_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER6_INT_CLEAR__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER7_INT_OCCURRED_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER7_INT_OCCURRED__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER7_INT_CLEAR_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER7_INT_CLEAR__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER_OFF_INT_OCCURRED_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER_OFF_INT_OCCURRED__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER_OFF_INT_CLEAR_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER_OFF_INT_CLEAR__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER0_INT_CLEAR_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER1_INT_CLEAR_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER2_INT_CLEAR_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER3_INT_CLEAR_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER4_INT_CLEAR_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER5_INT_CLEAR_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER6_INT_CLEAR_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER7_INT_CLEAR_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER0_INT_CLEAR_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER1_INT_CLEAR_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER2_INT_CLEAR_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER3_INT_CLEAR_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER4_INT_CLEAR_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER5_INT_CLEAR_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER6_INT_CLEAR_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER7_INT_CLEAR_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER0_INT_TO_UC_EN_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER0_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER1_INT_TO_UC_EN_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER1_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER2_INT_TO_UC_EN_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER2_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER3_INT_TO_UC_EN_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER3_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER4_INT_TO_UC_EN_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER4_INT_TO_UC_EN__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER5_INT_TO_UC_EN_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER5_INT_TO_UC_EN__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER6_INT_TO_UC_EN_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER6_INT_TO_UC_EN__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER7_INT_TO_UC_EN_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER7_INT_TO_UC_EN__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER_OFF_INT_TO_UC_EN_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_MSA_RECEIVED_INT_OCCURRED_MASK 0x1
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_MSA_RECEIVED_INT_OCCURRED__SHIFT 0x0
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_MSA_RECEIVED_INT_CLEAR_MASK 0x1
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_MSA_RECEIVED_INT_CLEAR__SHIFT 0x0
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_OCCURRED_MASK 0x2
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_OCCURRED__SHIFT 0x1
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_CLEAR_MASK 0x2
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_CLEAR__SHIFT 0x1
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT0_OCCURRED_MASK 0x4
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT0_OCCURRED__SHIFT 0x2
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT0_CLEAR_MASK 0x4
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT0_CLEAR__SHIFT 0x2
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT1_OCCURRED_MASK 0x8
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT1_OCCURRED__SHIFT 0x3
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT1_CLEAR_MASK 0x8
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT1_CLEAR__SHIFT 0x3
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_SDP_RECEIVED_INT_OCCURRED_MASK 0x10
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_SDP_RECEIVED_INT_OCCURRED__SHIFT 0x4
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_SDP_RECEIVED_INT_CLEAR_MASK 0x10
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_SDP_RECEIVED_INT_CLEAR__SHIFT 0x4
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_MSA_RECEIVED_INT_OCCURRED_MASK 0x20
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_MSA_RECEIVED_INT_OCCURRED__SHIFT 0x5
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_MSA_RECEIVED_INT_CLEAR_MASK 0x20
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_MSA_RECEIVED_INT_CLEAR__SHIFT 0x5
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_OCCURRED_MASK 0x40
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_OCCURRED__SHIFT 0x6
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_CLEAR_MASK 0x40
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_CLEAR__SHIFT 0x6
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT0_OCCURRED_MASK 0x80
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT0_OCCURRED__SHIFT 0x7
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT0_CLEAR_MASK 0x80
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT0_CLEAR__SHIFT 0x7
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT1_OCCURRED_MASK 0x100
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT1_OCCURRED__SHIFT 0x8
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT1_CLEAR_MASK 0x100
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT1_CLEAR__SHIFT 0x8
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_SDP_RECEIVED_INT_OCCURRED_MASK 0x200
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_SDP_RECEIVED_INT_OCCURRED__SHIFT 0x9
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_SDP_RECEIVED_INT_CLEAR_MASK 0x200
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_SDP_RECEIVED_INT_CLEAR__SHIFT 0x9
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x400
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xa
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x400
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xa
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x800
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xb
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x800
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xb
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x1000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xc
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x1000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xc
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x2000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xd
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x2000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xd
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x4000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xe
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x4000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xe
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x8000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xf
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x8000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xf
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x10000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0x10
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x10000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0x10
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_OCCURRED_MASK 0x20000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_OCCURRED__SHIFT 0x11
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_CLEAR_MASK 0x20000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_CLEAR__SHIFT 0x11
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_OCCURRED_MASK 0x40000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_OCCURRED__SHIFT 0x12
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_CLEAR_MASK 0x40000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_CLEAR__SHIFT 0x12
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_OCCURRED_MASK 0x80000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_OCCURRED__SHIFT 0x13
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_CLEAR_MASK 0x80000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_CLEAR__SHIFT 0x13
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_OCCURRED_MASK 0x100000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_OCCURRED__SHIFT 0x14
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_CLEAR_MASK 0x100000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_CLEAR__SHIFT 0x14
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_OCCURRED_MASK 0x200000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_OCCURRED__SHIFT 0x15
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_CLEAR_MASK 0x200000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_CLEAR__SHIFT 0x15
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_AUX_INT_OCCURRED_MASK 0x400000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_AUX_INT_OCCURRED__SHIFT 0x16
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_AUX_INT_CLEAR_MASK 0x400000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_AUX_INT_CLEAR__SHIFT 0x16
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_I2C_INT_OCCURRED_MASK 0x800000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_I2C_INT_OCCURRED__SHIFT 0x17
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_I2C_INT_CLEAR_MASK 0x800000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_I2C_INT_CLEAR__SHIFT 0x17
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_CPU_INT_OCCURRED_MASK 0x1000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_CPU_INT_OCCURRED__SHIFT 0x18
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_CPU_INT_CLEAR_MASK 0x1000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_CPU_INT_CLEAR__SHIFT 0x18
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_OCCURRED_MASK 0x2000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_OCCURRED__SHIFT 0x19
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_CLEAR_MASK 0x2000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_CLEAR__SHIFT 0x19
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_OCCURRED_MASK 0x4000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_OCCURRED__SHIFT 0x1a
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_CLEAR_MASK 0x4000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_CLEAR__SHIFT 0x1a
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_OCCURRED_MASK 0x8000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_OCCURRED__SHIFT 0x1b
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_CLEAR_MASK 0x8000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_CLEAR__SHIFT 0x1b
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_OCCURRED_MASK 0x10000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_OCCURRED__SHIFT 0x1c
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_CLEAR_MASK 0x10000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_CLEAR__SHIFT 0x1c
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_MSA_RECEIVED_INT_TO_UC_EN_MASK 0x1
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_MSA_RECEIVED_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_TO_UC_EN_MASK 0x2
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VERTICAL_INT0_TO_UC_EN_MASK 0x4
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VERTICAL_INT0_TO_UC_EN__SHIFT 0x2
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VERTICAL_INT1_TO_UC_EN_MASK 0x8
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VERTICAL_INT1_TO_UC_EN__SHIFT 0x3
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_SDP_RECEIVED_INT_TO_UC_EN_MASK 0x10
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_SDP_RECEIVED_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_MSA_RECEIVED_INT_TO_UC_EN_MASK 0x20
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_MSA_RECEIVED_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_TO_UC_EN_MASK 0x40
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VERTICAL_INT0_TO_UC_EN_MASK 0x80
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VERTICAL_INT0_TO_UC_EN__SHIFT 0x7
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VERTICAL_INT1_TO_UC_EN_MASK 0x100
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VERTICAL_INT1_TO_UC_EN__SHIFT 0x8
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_SDP_RECEIVED_INT_TO_UC_EN_MASK 0x200
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_SDP_RECEIVED_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x400
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x800
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x1000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x4000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x8000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x10000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_TO_UC_EN_MASK 0x20000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_TO_UC_EN_MASK 0x40000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_TO_UC_EN_MASK 0x80000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_TO_UC_EN_MASK 0x100000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_TO_UC_EN__SHIFT 0x14
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_TO_UC_EN_MASK 0x200000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_TO_UC_EN__SHIFT 0x15
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_AUX_INT_TO_UC_EN_MASK 0x400000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_AUX_INT_TO_UC_EN__SHIFT 0x16
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_I2C_INT_TO_UC_EN_MASK 0x800000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_I2C_INT_TO_UC_EN__SHIFT 0x17
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_CPU_INT_TO_UC_EN_MASK 0x1000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_CPU_INT_TO_UC_EN__SHIFT 0x18
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_TO_UC_EN_MASK 0x2000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_TO_UC_EN__SHIFT 0x19
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_TO_UC_EN_MASK 0x4000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_TO_UC_EN__SHIFT 0x1a
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_TO_UC_EN_MASK 0x8000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_TO_UC_EN__SHIFT 0x1b
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_TO_UC_EN_MASK 0x10000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_TO_UC_EN__SHIFT 0x1c
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_MSA_RECEIVED_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_MSA_RECEIVED_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VERTICAL_INT0_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VERTICAL_INT0_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VERTICAL_INT1_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VERTICAL_INT1_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_SDP_RECEIVED_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_SDP_RECEIVED_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_MSA_RECEIVED_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_MSA_RECEIVED_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_XIRQ_IRQ_SEL_MASK 0x40
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VERTICAL_INT0_XIRQ_IRQ_SEL_MASK 0x80
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VERTICAL_INT0_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VERTICAL_INT1_XIRQ_IRQ_SEL_MASK 0x100
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VERTICAL_INT1_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_SDP_RECEIVED_INT_XIRQ_IRQ_SEL_MASK 0x200
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_SDP_RECEIVED_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x400
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x800
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x1000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x4000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x8000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x10000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_XIRQ_IRQ_SEL_MASK 0x20000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_XIRQ_IRQ_SEL_MASK 0x40000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_XIRQ_IRQ_SEL_MASK 0x80000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_XIRQ_IRQ_SEL_MASK 0x100000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_XIRQ_IRQ_SEL_MASK 0x200000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_AUX_INT_XIRQ_IRQ_SEL_MASK 0x400000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_AUX_INT_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_I2C_INT_XIRQ_IRQ_SEL_MASK 0x800000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_I2C_INT_XIRQ_IRQ_SEL__SHIFT 0x17
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_CPU_INT_XIRQ_IRQ_SEL_MASK 0x1000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_CPU_INT_XIRQ_IRQ_SEL__SHIFT 0x18
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_XIRQ_IRQ_SEL_MASK 0x2000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_XIRQ_IRQ_SEL__SHIFT 0x19
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_XIRQ_IRQ_SEL_MASK 0x4000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_XIRQ_IRQ_SEL__SHIFT 0x1a
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_XIRQ_IRQ_SEL_MASK 0x8000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_XIRQ_IRQ_SEL__SHIFT 0x1b
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_XIRQ_IRQ_SEL_MASK 0x10000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_XIRQ_IRQ_SEL__SHIFT 0x1c
+#define DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x10
+#define DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x100
+#define DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE_MASK 0x20000
+#define DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE__SHIFT 0x11
+#define DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x7
+#define DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP_PIXEL_FORMAT__DP_DYN_RANGE_MASK 0x100
+#define DP_PIXEL_FORMAT__DP_DYN_RANGE__SHIFT 0x8
+#define DP_PIXEL_FORMAT__DP_YCBCR_RANGE_MASK 0x10000
+#define DP_PIXEL_FORMAT__DP_YCBCR_RANGE__SHIFT 0x10
+#define DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x7000000
+#define DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE_MASK 0xff
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE__SHIFT 0x0
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE_ENABLE_MASK 0x100
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE_ENABLE__SHIFT 0x8
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC1_BIT7_OVERRIDE_MASK 0x200
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC1_BIT7_OVERRIDE__SHIFT 0x9
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC1_BIT7_OVERRIDE_ENABLE_MASK 0x20000
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC1_BIT7_OVERRIDE_ENABLE__SHIFT 0x11
+#define DP_CONFIG__DP_UDI_LANES_MASK 0x3
+#define DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x1
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x300
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x10000
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x100000
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x1
+#define DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x10
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x20
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x40
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x80
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x100
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x1000
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP_MSA_MISC__DP_MSA_MISC1_MASK 0x78
+#define DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x3
+#define DP_MSA_MISC__DP_MSA_MISC2_MASK 0xff00
+#define DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP_MSA_MISC__DP_MSA_MISC3_MASK 0xff0000
+#define DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP_MSA_MISC__DP_MSA_MISC4_MASK 0xff000000
+#define DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP_VID_TIMING__DP_VID_TIMING_MODE_MASK 0x1
+#define DP_VID_TIMING__DP_VID_TIMING_MODE__SHIFT 0x0
+#define DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x10
+#define DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x100
+#define DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP_VID_TIMING__DP_VID_M_DOUBLE_VALUE_EN_MASK 0x200
+#define DP_VID_TIMING__DP_VID_M_DOUBLE_VALUE_EN__SHIFT 0x9
+#define DP_VID_TIMING__DP_VID_N_DIV_MASK 0xff000000
+#define DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP_VID_N__DP_VID_N_MASK 0xffffff
+#define DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP_VID_M__DP_VID_M_MASK 0xffffff
+#define DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x3ffff
+#define DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x1000000
+#define DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000
+#define DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x1
+#define DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0xfff
+#define DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP_VID_MSA_VBID__DP_VID_MSA_TOP_FIELD_MODE_MASK 0x10000
+#define DP_VID_MSA_VBID__DP_VID_MSA_TOP_FIELD_MODE__SHIFT 0x10
+#define DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x1000000
+#define DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x1
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x2
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x4
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x1
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x2
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x4
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x8
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x10000
+#define DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x1000000
+#define DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x3
+#define DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP_DPHY_SYM0__DPHY_SYM1_MASK 0x3ff
+#define DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP_DPHY_SYM0__DPHY_SYM2_MASK 0xffc00
+#define DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3ff00000
+#define DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP_DPHY_SYM1__DPHY_SYM4_MASK 0x3ff
+#define DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP_DPHY_SYM1__DPHY_SYM5_MASK 0xffc00
+#define DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3ff00000
+#define DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP_DPHY_SYM2__DPHY_SYM7_MASK 0x3ff
+#define DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP_DPHY_SYM2__DPHY_SYM8_MASK 0xffc00
+#define DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x100
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x10000
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x1000000
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x1
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x30
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x3ff
+#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x8000
+#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x10000
+#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x1
+#define DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x10
+#define DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x100
+#define DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x1
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x30
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0xff0000
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0xff
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0xff00
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0xff0000
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xff000000
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x3f
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x3f00
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x1
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x100
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x10000
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x1
+#define DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x2
+#define DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x4
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0xfff00
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xfff00000
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x7
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x10
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x100
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x1000
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x7
+#define DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TIMING_OVERRIDE_EN_MASK 0x1
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TIMING_OVERRIDE_EN__SHIFT 0x0
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TOTAL_OVERRIDE_MASK 0x3fff0
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TOTAL_OVERRIDE__SHIFT 0x4
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_START_OVERRIDE_MASK 0x3fff
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_START_OVERRIDE__SHIFT 0x0
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_END_OVERRIDE_MASK 0x3fff0000
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_END_OVERRIDE__SHIFT 0x10
+#define DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x1
+#define DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x10
+#define DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x100
+#define DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x1000
+#define DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x10000
+#define DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x100000
+#define DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x200000
+#define DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x400000
+#define DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x800000
+#define DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP_SEC_CNTL__DP_SEC_AVI_ENABLE_MASK 0x1000000
+#define DP_SEC_CNTL__DP_SEC_AVI_ENABLE__SHIFT 0x18
+#define DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000
+#define DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x1
+#define DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x10
+#define DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x20
+#define DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x40
+#define DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x80
+#define DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xffff0000
+#define DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0xfff
+#define DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xffff0000
+#define DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0xffff
+#define DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xffff0000
+#define DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x3fff
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xffff0000
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x100000
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x1000000
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0xffffff
+#define DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0xffffff
+#define DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0xffffff
+#define DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0xffffff
+#define DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x1
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0xe
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x10
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x3f00
+#define DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x10000
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x3ffffff
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xfc000000
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x1
+#define DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x7
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x3f00
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x70000
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3f000000
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x7
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x3f00
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x70000
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3f000000
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x7
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x3f00
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x70000
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3f000000
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x3
+#define DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x100
+#define DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x3ff
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x30000
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x1
+#define DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x10
+#define DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x100
+#define DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP_MSE_MISC_CNTL__DP_MSE_OUTPUT_DPDBG_DATA_MASK 0x10000
+#define DP_MSE_MISC_CNTL__DP_MSE_OUTPUT_DPDBG_DATA__SHIFT 0x10
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x7
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x3f00
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x70000
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3f000000
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x7
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x3f00
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x70000
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3f000000
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x7
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x3f00
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x70000
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3f000000
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_INDEX_MASK 0xff
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DP_TEST_DEBUG_DATA__DP_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DP_TEST_DEBUG_DATA__DP_TEST_DEBUG_DATA__SHIFT 0x0
+#define DP_FE_TEST_DEBUG_INDEX__DP_FE_TEST_DEBUG_INDEX_MASK 0xff
+#define DP_FE_TEST_DEBUG_INDEX__DP_FE_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DP_FE_TEST_DEBUG_INDEX__DP_FE_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DP_FE_TEST_DEBUG_INDEX__DP_FE_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DP_FE_TEST_DEBUG_DATA__DP_FE_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DP_FE_TEST_DEBUG_DATA__DP_FE_TEST_DEBUG_DATA__SHIFT 0x0
+#define AUX_CONTROL__AUX_EN_MASK 0x1
+#define AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define AUX_CONTROL__AUX_RESET_MASK 0x10
+#define AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define AUX_CONTROL__AUX_RESET_DONE_MASK 0x20
+#define AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define AUX_CONTROL__AUX_LS_READ_EN_MASK 0x100
+#define AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x1000
+#define AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x10000
+#define AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x40000
+#define AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define AUX_CONTROL__AUX_HPD_SEL_MASK 0x700000
+#define AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x1000000
+#define AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000
+#define AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000
+#define AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define AUX_CONTROL__SPARE_0_MASK 0x40000000
+#define AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define AUX_CONTROL__SPARE_1_MASK 0x80000000
+#define AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define AUX_SW_CONTROL__AUX_SW_GO_MASK 0x1
+#define AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x4
+#define AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0xf0
+#define AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x1f0000
+#define AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x3
+#define AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0xc
+#define AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x100
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x400
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x10000
+#define AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x10000
+#define AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x20000
+#define AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x1000000
+#define AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x1000000
+#define AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x2000000
+#define AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x1
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x2
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x4
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x10
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x20
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x40
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x100
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x200
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x400
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x1000
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x2000
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x4000
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define AUX_SW_STATUS__AUX_SW_DONE_MASK 0x1
+#define AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define AUX_SW_STATUS__AUX_SW_REQ_MASK 0x2
+#define AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x70
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x80
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x100
+#define AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x200
+#define AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x400
+#define AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x800
+#define AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x1000
+#define AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x4000
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x20000
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x40000
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x80000
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x100000
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x400000
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x800000
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1f000000
+#define AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xc0000000
+#define AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1e
+#define AUX_LS_STATUS__AUX_LS_DONE_MASK 0x1
+#define AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define AUX_LS_STATUS__AUX_LS_REQ_MASK 0x2
+#define AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x70
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x80
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x100
+#define AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x200
+#define AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x400
+#define AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x800
+#define AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x1000
+#define AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x4000
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x20000
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x40000
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x80000
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x100000
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x400000
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x800000
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1f000000
+#define AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000
+#define AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000
+#define AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000
+#define AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x1
+#define AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define AUX_SW_DATA__AUX_SW_DATA_MASK 0xff00
+#define AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define AUX_SW_DATA__AUX_SW_INDEX_MASK 0x1f0000
+#define AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000
+#define AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define AUX_LS_DATA__AUX_LS_DATA_MASK 0xff00
+#define AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define AUX_LS_DATA__AUX_LS_INDEX_MASK 0x1f0000
+#define AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x1
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x30
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x1ff0000
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x7
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x3f00
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x70000
+#define AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x70
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x700
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x3000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x10000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x20000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x40000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x80000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x300000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TIMEOUT_LEN_MASK 0x7000000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TIMEOUT_LEN__SHIFT 0x18
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0xff
+#define AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x1
+#define AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x70
+#define AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x1ff0000
+#define AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x7
+#define AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x1f00
+#define AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x1f0000
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3fe00000
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x1f
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x1f00
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x30000
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x300000
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x1
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x10
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x100
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x1e00
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x10000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x100000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x200000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x400000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x800000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x1000000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x2000000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xf0000000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x1
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x2
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x70
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x80
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x100
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x200
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x400
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x800
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x1000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x4000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x20000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x40000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x80000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x100000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x400000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x800000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1f000000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define AUX_TEST_DEBUG_INDEX__AUX_TEST_DEBUG_INDEX_MASK 0xff
+#define AUX_TEST_DEBUG_INDEX__AUX_TEST_DEBUG_INDEX__SHIFT 0x0
+#define AUX_TEST_DEBUG_INDEX__AUX_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define AUX_TEST_DEBUG_INDEX__AUX_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define AUX_TEST_DEBUG_DATA__AUX_TEST_DEBUG_DATA_MASK 0xffffffff
+#define AUX_TEST_DEBUG_DATA__AUX_TEST_DEBUG_DATA__SHIFT 0x0
+#define DP_AUX_DEBUG_A__DP_AUX_DEBUG_A_MASK 0xffffffff
+#define DP_AUX_DEBUG_A__DP_AUX_DEBUG_A__SHIFT 0x0
+#define DP_AUX_DEBUG_B__DP_AUX_DEBUG_B_MASK 0xffffffff
+#define DP_AUX_DEBUG_B__DP_AUX_DEBUG_B__SHIFT 0x0
+#define DP_AUX_DEBUG_C__DP_AUX_DEBUG_C_MASK 0xffffffff
+#define DP_AUX_DEBUG_C__DP_AUX_DEBUG_C__SHIFT 0x0
+#define DP_AUX_DEBUG_D__DP_AUX_DEBUG_D_MASK 0xffffffff
+#define DP_AUX_DEBUG_D__DP_AUX_DEBUG_D__SHIFT 0x0
+#define DP_AUX_DEBUG_E__DP_AUX_DEBUG_E_MASK 0xffffffff
+#define DP_AUX_DEBUG_E__DP_AUX_DEBUG_E__SHIFT 0x0
+#define DP_AUX_DEBUG_F__DP_AUX_DEBUG_F_MASK 0xffffffff
+#define DP_AUX_DEBUG_F__DP_AUX_DEBUG_F__SHIFT 0x0
+#define DP_AUX_DEBUG_G__DP_AUX_DEBUG_G_MASK 0xffffffff
+#define DP_AUX_DEBUG_G__DP_AUX_DEBUG_G__SHIFT 0x0
+#define DP_AUX_DEBUG_H__DP_AUX_DEBUG_H_MASK 0xffffffff
+#define DP_AUX_DEBUG_H__DP_AUX_DEBUG_H__SHIFT 0x0
+#define DP_AUX_DEBUG_I__DP_AUX_DEBUG_I_MASK 0xffffffff
+#define DP_AUX_DEBUG_I__DP_AUX_DEBUG_I__SHIFT 0x0
+#define DP_AUX_DEBUG_J__DP_AUX_DEBUG_J_MASK 0xffffffff
+#define DP_AUX_DEBUG_J__DP_AUX_DEBUG_J__SHIFT 0x0
+#define DP_AUX_DEBUG_K__DP_AUX_DEBUG_K_MASK 0xffffffff
+#define DP_AUX_DEBUG_K__DP_AUX_DEBUG_K__SHIFT 0x0
+#define DP_AUX_DEBUG_L__DP_AUX_DEBUG_L_MASK 0xffffffff
+#define DP_AUX_DEBUG_L__DP_AUX_DEBUG_L__SHIFT 0x0
+#define DP_AUX_DEBUG_M__DP_AUX_DEBUG_M_MASK 0xffffffff
+#define DP_AUX_DEBUG_M__DP_AUX_DEBUG_M__SHIFT 0x0
+#define DP_AUX_DEBUG_N__DP_AUX_DEBUG_N_MASK 0xffffffff
+#define DP_AUX_DEBUG_N__DP_AUX_DEBUG_N__SHIFT 0x0
+#define DP_AUX_DEBUG_O__DP_AUX_DEBUG_O_MASK 0xffffffff
+#define DP_AUX_DEBUG_O__DP_AUX_DEBUG_O__SHIFT 0x0
+#define DP_AUX_DEBUG_P__DP_AUX_DEBUG_P_MASK 0xffffffff
+#define DP_AUX_DEBUG_P__DP_AUX_DEBUG_P__SHIFT 0x0
+#define DP_AUX_DEBUG_Q__DP_AUX_DEBUG_Q_MASK 0xffffffff
+#define DP_AUX_DEBUG_Q__DP_AUX_DEBUG_Q__SHIFT 0x0
+#define DVO_ENABLE__DVO_ENABLE_MASK 0x1
+#define DVO_ENABLE__DVO_ENABLE__SHIFT 0x0
+#define DVO_ENABLE__DVO_PIXEL_WIDTH_MASK 0x30
+#define DVO_ENABLE__DVO_PIXEL_WIDTH__SHIFT 0x4
+#define DVO_SOURCE_SELECT__DVO_SOURCE_SELECT_MASK 0x7
+#define DVO_SOURCE_SELECT__DVO_SOURCE_SELECT__SHIFT 0x0
+#define DVO_SOURCE_SELECT__DVO_STEREOSYNC_SELECT_MASK 0x70000
+#define DVO_SOURCE_SELECT__DVO_STEREOSYNC_SELECT__SHIFT 0x10
+#define DVO_OUTPUT__DVO_OUTPUT_ENABLE_MODE_MASK 0x3
+#define DVO_OUTPUT__DVO_OUTPUT_ENABLE_MODE__SHIFT 0x0
+#define DVO_OUTPUT__DVO_CLOCK_MODE_MASK 0x100
+#define DVO_OUTPUT__DVO_CLOCK_MODE__SHIFT 0x8
+#define DVO_CONTROL__DVO_RATE_SELECT_MASK 0x1
+#define DVO_CONTROL__DVO_RATE_SELECT__SHIFT 0x0
+#define DVO_CONTROL__DVO_SDRCLK_SEL_MASK 0x2
+#define DVO_CONTROL__DVO_SDRCLK_SEL__SHIFT 0x1
+#define DVO_CONTROL__DVO_DVPDATA_WIDTH_MASK 0x30
+#define DVO_CONTROL__DVO_DVPDATA_WIDTH__SHIFT 0x4
+#define DVO_CONTROL__DVO_DUAL_CHANNEL_EN_MASK 0x100
+#define DVO_CONTROL__DVO_DUAL_CHANNEL_EN__SHIFT 0x8
+#define DVO_CONTROL__DVO_RESET_FIFO_MASK 0x10000
+#define DVO_CONTROL__DVO_RESET_FIFO__SHIFT 0x10
+#define DVO_CONTROL__DVO_SYNC_PHASE_MASK 0x20000
+#define DVO_CONTROL__DVO_SYNC_PHASE__SHIFT 0x11
+#define DVO_CONTROL__DVO_INVERT_DVOCLK_MASK 0x40000
+#define DVO_CONTROL__DVO_INVERT_DVOCLK__SHIFT 0x12
+#define DVO_CONTROL__DVO_HSYNC_POLARITY_MASK 0x100000
+#define DVO_CONTROL__DVO_HSYNC_POLARITY__SHIFT 0x14
+#define DVO_CONTROL__DVO_VSYNC_POLARITY_MASK 0x200000
+#define DVO_CONTROL__DVO_VSYNC_POLARITY__SHIFT 0x15
+#define DVO_CONTROL__DVO_DE_POLARITY_MASK 0x400000
+#define DVO_CONTROL__DVO_DE_POLARITY__SHIFT 0x16
+#define DVO_CONTROL__DVO_COLOR_FORMAT_MASK 0x3000000
+#define DVO_CONTROL__DVO_COLOR_FORMAT__SHIFT 0x18
+#define DVO_CONTROL__DVO_CTL3_MASK 0x80000000
+#define DVO_CONTROL__DVO_CTL3__SHIFT 0x1f
+#define DVO_CRC_EN__DVO_CRC2_EN_MASK 0x10000
+#define DVO_CRC_EN__DVO_CRC2_EN__SHIFT 0x10
+#define DVO_CRC2_SIG_MASK__DVO_CRC2_SIG_MASK_MASK 0x7ffffff
+#define DVO_CRC2_SIG_MASK__DVO_CRC2_SIG_MASK__SHIFT 0x0
+#define DVO_CRC2_SIG_RESULT__DVO_CRC2_SIG_RESULT_MASK 0x7ffffff
+#define DVO_CRC2_SIG_RESULT__DVO_CRC2_SIG_RESULT__SHIFT 0x0
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_LEVEL_ERROR_MASK 0x1
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_LEVEL_ERROR__SHIFT 0x0
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_USE_OVERWRITE_LEVEL_MASK 0x2
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_OVERWRITE_LEVEL_MASK 0xfc
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_ERROR_ACK_MASK 0x100
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_ERROR_ACK__SHIFT 0x8
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CAL_AVERAGE_LEVEL_MASK 0xfc00
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MAXIMUM_LEVEL_MASK 0xf0000
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MINIMUM_LEVEL_MASK 0x3c00000
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CALIBRATED_MASK 0x20000000
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CALIBRATED__SHIFT 0x1d
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DVO_TEST_DEBUG_INDEX__DVO_TEST_DEBUG_INDEX_MASK 0xff
+#define DVO_TEST_DEBUG_INDEX__DVO_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DVO_TEST_DEBUG_INDEX__DVO_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DVO_TEST_DEBUG_INDEX__DVO_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DVO_TEST_DEBUG_DATA__DVO_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DVO_TEST_DEBUG_DATA__DVO_TEST_DEBUG_DATA__SHIFT 0x0
+#define FBC_CNTL__FBC_GRPH_COMP_EN_MASK 0x1
+#define FBC_CNTL__FBC_GRPH_COMP_EN__SHIFT 0x0
+#define FBC_CNTL__FBC_SRC_SEL_MASK 0xe
+#define FBC_CNTL__FBC_SRC_SEL__SHIFT 0x1
+#define FBC_CNTL__FBC_COMP_CLK_GATE_EN_MASK 0x100
+#define FBC_CNTL__FBC_COMP_CLK_GATE_EN__SHIFT 0x8
+#define FBC_CNTL__FBC_DECOMP_CLK_GATE_EN_MASK 0x400
+#define FBC_CNTL__FBC_DECOMP_CLK_GATE_EN__SHIFT 0xa
+#define FBC_CNTL__FBC_COHERENCY_MODE_MASK 0x30000
+#define FBC_CNTL__FBC_COHERENCY_MODE__SHIFT 0x10
+#define FBC_CNTL__FBC_SOFT_COMPRESS_EN_MASK 0x2000000
+#define FBC_CNTL__FBC_SOFT_COMPRESS_EN__SHIFT 0x19
+#define FBC_CNTL__FBC_EN_MASK 0x80000000
+#define FBC_CNTL__FBC_EN__SHIFT 0x1f
+#define FBC_IDLE_FORCE_CLEAR_MASK__FBC_IDLE_FORCE_CLEAR_MASK_MASK 0xffffffff
+#define FBC_IDLE_FORCE_CLEAR_MASK__FBC_IDLE_FORCE_CLEAR_MASK__SHIFT 0x0
+#define FBC_START_STOP_DELAY__FBC_DECOMP_START_DELAY_MASK 0x1f
+#define FBC_START_STOP_DELAY__FBC_DECOMP_START_DELAY__SHIFT 0x0
+#define FBC_START_STOP_DELAY__FBC_DECOMP_STOP_DELAY_MASK 0x80
+#define FBC_START_STOP_DELAY__FBC_DECOMP_STOP_DELAY__SHIFT 0x7
+#define FBC_START_STOP_DELAY__FBC_COMP_START_DELAY_MASK 0x1f00
+#define FBC_START_STOP_DELAY__FBC_COMP_START_DELAY__SHIFT 0x8
+#define FBC_COMP_CNTL__FBC_MIN_COMPRESSION_MASK 0xf
+#define FBC_COMP_CNTL__FBC_MIN_COMPRESSION__SHIFT 0x0
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO08_EN_MASK 0x10000
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO08_EN__SHIFT 0x10
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO16_EN_MASK 0x20000
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO16_EN__SHIFT 0x11
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB04_EN_MASK 0x40000
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB04_EN__SHIFT 0x12
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB08_EN_MASK 0x80000
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB08_EN__SHIFT 0x13
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB16_EN_MASK 0x100000
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB16_EN__SHIFT 0x14
+#define FBC_COMP_MODE__FBC_RLE_EN_MASK 0x1
+#define FBC_COMP_MODE__FBC_RLE_EN__SHIFT 0x0
+#define FBC_COMP_MODE__FBC_DPCM4_RGB_EN_MASK 0x100
+#define FBC_COMP_MODE__FBC_DPCM4_RGB_EN__SHIFT 0x8
+#define FBC_COMP_MODE__FBC_DPCM8_RGB_EN_MASK 0x200
+#define FBC_COMP_MODE__FBC_DPCM8_RGB_EN__SHIFT 0x9
+#define FBC_COMP_MODE__FBC_DPCM4_YUV_EN_MASK 0x400
+#define FBC_COMP_MODE__FBC_DPCM4_YUV_EN__SHIFT 0xa
+#define FBC_COMP_MODE__FBC_DPCM8_YUV_EN_MASK 0x800
+#define FBC_COMP_MODE__FBC_DPCM8_YUV_EN__SHIFT 0xb
+#define FBC_COMP_MODE__FBC_IND_EN_MASK 0x10000
+#define FBC_COMP_MODE__FBC_IND_EN__SHIFT 0x10
+#define FBC_DEBUG0__FBC_PERF_MUX0_MASK 0xff
+#define FBC_DEBUG0__FBC_PERF_MUX0__SHIFT 0x0
+#define FBC_DEBUG0__FBC_PERF_MUX1_MASK 0xff00
+#define FBC_DEBUG0__FBC_PERF_MUX1__SHIFT 0x8
+#define FBC_DEBUG0__FBC_COMP_WAKE_DIS_MASK 0x10000
+#define FBC_DEBUG0__FBC_COMP_WAKE_DIS__SHIFT 0x10
+#define FBC_DEBUG0__FBC_DEBUG0_MASK 0xfe0000
+#define FBC_DEBUG0__FBC_DEBUG0__SHIFT 0x11
+#define FBC_DEBUG0__FBC_DEBUG_MUX_MASK 0xff000000
+#define FBC_DEBUG0__FBC_DEBUG_MUX__SHIFT 0x18
+#define FBC_DEBUG1__FBC_DEBUG1_MASK 0xffffffff
+#define FBC_DEBUG1__FBC_DEBUG1__SHIFT 0x0
+#define FBC_DEBUG2__FBC_DEBUG2_MASK 0xffffffff
+#define FBC_DEBUG2__FBC_DEBUG2__SHIFT 0x0
+#define FBC_IND_LUT0__FBC_IND_LUT0_MASK 0xffffffff
+#define FBC_IND_LUT0__FBC_IND_LUT0__SHIFT 0x0
+#define FBC_IND_LUT1__FBC_IND_LUT1_MASK 0xffffffff
+#define FBC_IND_LUT1__FBC_IND_LUT1__SHIFT 0x0
+#define FBC_IND_LUT2__FBC_IND_LUT2_MASK 0xffffffff
+#define FBC_IND_LUT2__FBC_IND_LUT2__SHIFT 0x0
+#define FBC_IND_LUT3__FBC_IND_LUT3_MASK 0xffffffff
+#define FBC_IND_LUT3__FBC_IND_LUT3__SHIFT 0x0
+#define FBC_IND_LUT4__FBC_IND_LUT4_MASK 0xffffffff
+#define FBC_IND_LUT4__FBC_IND_LUT4__SHIFT 0x0
+#define FBC_IND_LUT5__FBC_IND_LUT5_MASK 0xffffffff
+#define FBC_IND_LUT5__FBC_IND_LUT5__SHIFT 0x0
+#define FBC_IND_LUT6__FBC_IND_LUT6_MASK 0xffffffff
+#define FBC_IND_LUT6__FBC_IND_LUT6__SHIFT 0x0
+#define FBC_IND_LUT7__FBC_IND_LUT7_MASK 0xffffffff
+#define FBC_IND_LUT7__FBC_IND_LUT7__SHIFT 0x0
+#define FBC_IND_LUT8__FBC_IND_LUT8_MASK 0xffffffff
+#define FBC_IND_LUT8__FBC_IND_LUT8__SHIFT 0x0
+#define FBC_IND_LUT9__FBC_IND_LUT9_MASK 0xffffffff
+#define FBC_IND_LUT9__FBC_IND_LUT9__SHIFT 0x0
+#define FBC_IND_LUT10__FBC_IND_LUT10_MASK 0xffffffff
+#define FBC_IND_LUT10__FBC_IND_LUT10__SHIFT 0x0
+#define FBC_IND_LUT11__FBC_IND_LUT11_MASK 0xffffffff
+#define FBC_IND_LUT11__FBC_IND_LUT11__SHIFT 0x0
+#define FBC_IND_LUT12__FBC_IND_LUT12_MASK 0xffffffff
+#define FBC_IND_LUT12__FBC_IND_LUT12__SHIFT 0x0
+#define FBC_IND_LUT13__FBC_IND_LUT13_MASK 0xffffffff
+#define FBC_IND_LUT13__FBC_IND_LUT13__SHIFT 0x0
+#define FBC_IND_LUT14__FBC_IND_LUT14_MASK 0xffffffff
+#define FBC_IND_LUT14__FBC_IND_LUT14__SHIFT 0x0
+#define FBC_IND_LUT15__FBC_IND_LUT15_MASK 0xffffffff
+#define FBC_IND_LUT15__FBC_IND_LUT15__SHIFT 0x0
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_0_MASK 0xfff
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_0__SHIFT 0x0
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_1_MASK 0xfff0000
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_1__SHIFT 0x10
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_2_MASK 0xfff
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_2__SHIFT 0x0
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_3_MASK 0xfff0000
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_3__SHIFT 0x10
+#define FBC_CLIENT_REGION_MASK__FBC_MEMORY_REGION_MASK_MASK 0xf0000
+#define FBC_CLIENT_REGION_MASK__FBC_MEMORY_REGION_MASK__SHIFT 0x10
+#define FBC_DEBUG_COMP__FBC_COMP_SWAP_MASK 0x3
+#define FBC_DEBUG_COMP__FBC_COMP_SWAP__SHIFT 0x0
+#define FBC_DEBUG_COMP__FBC_COMP_RSIZE_MASK 0x8
+#define FBC_DEBUG_COMP__FBC_COMP_RSIZE__SHIFT 0x3
+#define FBC_DEBUG_COMP__FBC_COMP_BUSY_HYSTERESIS_MASK 0xf0
+#define FBC_DEBUG_COMP__FBC_COMP_BUSY_HYSTERESIS__SHIFT 0x4
+#define FBC_DEBUG_COMP__FBC_COMP_CLK_CNTL_MASK 0x300
+#define FBC_DEBUG_COMP__FBC_COMP_CLK_CNTL__SHIFT 0x8
+#define FBC_DEBUG_COMP__FBC_COMP_PRIVILEGED_ACCESS_ENABLE_MASK 0x400
+#define FBC_DEBUG_COMP__FBC_COMP_PRIVILEGED_ACCESS_ENABLE__SHIFT 0xa
+#define FBC_DEBUG_COMP__FBC_COMP_ADDRESS_TRANSLATION_ENABLE_MASK 0x800
+#define FBC_DEBUG_COMP__FBC_COMP_ADDRESS_TRANSLATION_ENABLE__SHIFT 0xb
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_ADDR_MASK 0xfff
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_ADDR__SHIFT 0x0
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_WR_DATA_MASK 0x10000
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_WR_DATA__SHIFT 0x10
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_RD_DATA_MASK 0x20000
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_RD_DATA__SHIFT 0x11
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_EN_MASK 0x80000000
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_EN__SHIFT 0x1f
+#define FBC_DEBUG_CSR_RDATA__FBC_DEBUG_CSR_RDATA_MASK 0xffffffff
+#define FBC_DEBUG_CSR_RDATA__FBC_DEBUG_CSR_RDATA__SHIFT 0x0
+#define FBC_DEBUG_CSR_WDATA__FBC_DEBUG_CSR_WDATA_MASK 0xffffffff
+#define FBC_DEBUG_CSR_WDATA__FBC_DEBUG_CSR_WDATA__SHIFT 0x0
+#define FBC_DEBUG_CSR_RDATA_HI__FBC_DEBUG_CSR_RDATA_HI_MASK 0xff
+#define FBC_DEBUG_CSR_RDATA_HI__FBC_DEBUG_CSR_RDATA_HI__SHIFT 0x0
+#define FBC_DEBUG_CSR_WDATA_HI__FBC_DEBUG_CSR_WDATA_HI_MASK 0xff
+#define FBC_DEBUG_CSR_WDATA_HI__FBC_DEBUG_CSR_WDATA_HI__SHIFT 0x0
+#define FBC_MISC__FBC_DECOMPRESS_ERROR_MASK 0x3
+#define FBC_MISC__FBC_DECOMPRESS_ERROR__SHIFT 0x0
+#define FBC_MISC__FBC_STOP_ON_ERROR_MASK 0x4
+#define FBC_MISC__FBC_STOP_ON_ERROR__SHIFT 0x2
+#define FBC_MISC__FBC_INVALIDATE_ON_ERROR_MASK 0x8
+#define FBC_MISC__FBC_INVALIDATE_ON_ERROR__SHIFT 0x3
+#define FBC_MISC__FBC_ERROR_PIXEL_MASK 0xf0
+#define FBC_MISC__FBC_ERROR_PIXEL__SHIFT 0x4
+#define FBC_MISC__FBC_DIVIDE_X_MASK 0x300
+#define FBC_MISC__FBC_DIVIDE_X__SHIFT 0x8
+#define FBC_MISC__FBC_DIVIDE_Y_MASK 0x400
+#define FBC_MISC__FBC_DIVIDE_Y__SHIFT 0xa
+#define FBC_MISC__FBC_RSM_WRITE_VALUE_MASK 0x800
+#define FBC_MISC__FBC_RSM_WRITE_VALUE__SHIFT 0xb
+#define FBC_MISC__FBC_RSM_UNCOMP_DATA_IMMEDIATELY_MASK 0x1000
+#define FBC_MISC__FBC_RSM_UNCOMP_DATA_IMMEDIATELY__SHIFT 0xc
+#define FBC_MISC__FBC_STOP_ON_HFLIP_EVENT_MASK 0x2000
+#define FBC_MISC__FBC_STOP_ON_HFLIP_EVENT__SHIFT 0xd
+#define FBC_MISC__FBC_DECOMPRESS_ERROR_CLEAR_MASK 0x10000
+#define FBC_MISC__FBC_DECOMPRESS_ERROR_CLEAR__SHIFT 0x10
+#define FBC_MISC__FBC_RESET_AT_ENABLE_MASK 0x100000
+#define FBC_MISC__FBC_RESET_AT_ENABLE__SHIFT 0x14
+#define FBC_MISC__FBC_RESET_AT_DISABLE_MASK 0x200000
+#define FBC_MISC__FBC_RESET_AT_DISABLE__SHIFT 0x15
+#define FBC_MISC__FBC_SLOW_REQ_INTERVAL_MASK 0x1f000000
+#define FBC_MISC__FBC_SLOW_REQ_INTERVAL__SHIFT 0x18
+#define FBC_MISC__FBC_FORCE_DECOMPRESSOR_EN_MASK 0x80000000
+#define FBC_MISC__FBC_FORCE_DECOMPRESSOR_EN__SHIFT 0x1f
+#define FBC_STATUS__FBC_ENABLE_STATUS_MASK 0x1
+#define FBC_STATUS__FBC_ENABLE_STATUS__SHIFT 0x0
+#define FBC_ALPHA_CNTL__FBC_ALPHA_COMP_EN_MASK 0x1
+#define FBC_ALPHA_CNTL__FBC_ALPHA_COMP_EN__SHIFT 0x0
+#define FBC_ALPHA_CNTL__FBC_FORCE_COPY_TO_COMP_BUF_MASK 0x10
+#define FBC_ALPHA_CNTL__FBC_FORCE_COPY_TO_COMP_BUF__SHIFT 0x4
+#define FBC_ALPHA_CNTL__FBC_ZERO_ALPHA_CHUNK_SKIP_EN_MASK 0x100
+#define FBC_ALPHA_CNTL__FBC_ZERO_ALPHA_CHUNK_SKIP_EN__SHIFT 0x8
+#define FBC_ALPHA_RGB_OVERRIDE__FBC_ZERO_ALPHA_R_VAL_MASK 0xff
+#define FBC_ALPHA_RGB_OVERRIDE__FBC_ZERO_ALPHA_R_VAL__SHIFT 0x0
+#define FBC_ALPHA_RGB_OVERRIDE__FBC_ZERO_ALPHA_G_VAL_MASK 0xff000
+#define FBC_ALPHA_RGB_OVERRIDE__FBC_ZERO_ALPHA_G_VAL__SHIFT 0xc
+#define FBC_ALPHA_RGB_OVERRIDE__FBC_ZERO_ALPHA_B_VAL_MASK 0xff000000
+#define FBC_ALPHA_RGB_OVERRIDE__FBC_ZERO_ALPHA_B_VAL__SHIFT 0x18
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_INDEX_MASK 0xff
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define FBC_TEST_DEBUG_DATA__FBC_TEST_DEBUG_DATA_MASK 0xffffffff
+#define FBC_TEST_DEBUG_DATA__FBC_TEST_DEBUG_DATA__SHIFT 0x0
+#define FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0xffff
+#define FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xffff0000
+#define FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0xffff
+#define FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xffff0000
+#define FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0xffff
+#define FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xffff0000
+#define FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x1
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x10
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x1
+#define FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT_CONTROL__FMT_STEREOSYNC_OVR_POL_MASK 0x10
+#define FMT_CONTROL__FMT_STEREOSYNC_OVR_POL__SHIFT 0x4
+#define FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0xf00
+#define FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x3000
+#define FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x30000
+#define FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0xc0000
+#define FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x100000
+#define FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x200000
+#define FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT_CONTROL__FMT_SRC_SELECT_MASK 0x7000000
+#define FMT_CONTROL__FMT_SRC_SELECT__SHIFT 0x18
+#define FMT_CONTROL__FMT_420_PIXEL_PHASE_LOCKED_MASK 0x40000000
+#define FMT_CONTROL__FMT_420_PIXEL_PHASE_LOCKED__SHIFT 0x1e
+#define FMT_CONTROL__FMT_420_PIXEL_PHASE_LOCKED_CLEAR_MASK 0x80000000
+#define FMT_CONTROL__FMT_420_PIXEL_PHASE_LOCKED_CLEAR__SHIFT 0x1f
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x1
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x2
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x30
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x100
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x600
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x1800
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x2000
+#define FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x4000
+#define FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x8000
+#define FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x10000
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x60000
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x600000
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x1000000
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x2000000
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0xc000000
+#define FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000
+#define FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xc0000000
+#define FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0xff
+#define FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xffff0000
+#define FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0xff
+#define FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xffff0000
+#define FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0xff
+#define FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xffff0000
+#define FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x1
+#define FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x70000
+#define FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT_CRC_CNTL__FMT_CRC_EN_MASK 0x1
+#define FMT_CRC_CNTL__FMT_CRC_EN__SHIFT 0x0
+#define FMT_CRC_CNTL__FMT_DTMTEST_CRC_EN_MASK 0x2
+#define FMT_CRC_CNTL__FMT_DTMTEST_CRC_EN__SHIFT 0x1
+#define FMT_CRC_CNTL__FMT_CRC_CONT_EN_MASK 0x10
+#define FMT_CRC_CNTL__FMT_CRC_CONT_EN__SHIFT 0x4
+#define FMT_CRC_CNTL__FMT_ONE_SHOT_CRC_PENDING_MASK 0x20
+#define FMT_CRC_CNTL__FMT_ONE_SHOT_CRC_PENDING__SHIFT 0x5
+#define FMT_CRC_CNTL__FMT_CRC_INCLUDE_OVERSCAN_MASK 0x40
+#define FMT_CRC_CNTL__FMT_CRC_INCLUDE_OVERSCAN__SHIFT 0x6
+#define FMT_CRC_CNTL__FMT_CRC_ONLY_BLANKB_MASK 0x100
+#define FMT_CRC_CNTL__FMT_CRC_ONLY_BLANKB__SHIFT 0x8
+#define FMT_CRC_CNTL__FMT_CRC_PSR_MODE_ENABLE_MASK 0x200
+#define FMT_CRC_CNTL__FMT_CRC_PSR_MODE_ENABLE__SHIFT 0x9
+#define FMT_CRC_CNTL__FMT_CRC_INTERLACE_MODE_MASK 0x3000
+#define FMT_CRC_CNTL__FMT_CRC_INTERLACE_MODE__SHIFT 0xc
+#define FMT_CRC_CNTL__FMT_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x10000
+#define FMT_CRC_CNTL__FMT_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x10
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_ENABLE_MASK 0x100000
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_ENABLE__SHIFT 0x14
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_SELECT_MASK 0x1000000
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_SELECT__SHIFT 0x18
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_RED_MASK_MASK 0xffff
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_RED_MASK__SHIFT 0x0
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_GREEN_MASK_MASK 0xffff0000
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_BLUE_MASK_MASK 0xffff
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_CONTROL_MASK_MASK 0xffff0000
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_RED_MASK 0xffff
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_RED__SHIFT 0x0
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_GREEN_MASK 0xffff0000
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_GREEN__SHIFT 0x10
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_BLUE_MASK 0xffff
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_BLUE__SHIFT 0x0
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_CONTROL_MASK 0xffff0000
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_CONTROL__SHIFT 0x10
+#define FMT_DEBUG_CNTL__FMT_DEBUG_COLOR_SELECT_MASK 0x3
+#define FMT_DEBUG_CNTL__FMT_DEBUG_COLOR_SELECT__SHIFT 0x0
+#define FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x1fff
+#define FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT_420_HBLANK_EARLY_START__FMT_420_HBLANK_EARLY_START_MASK 0xfff
+#define FMT_420_HBLANK_EARLY_START__FMT_420_HBLANK_EARLY_START__SHIFT 0x0
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_INDEX_MASK 0xff
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_INDEX__SHIFT 0x0
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define FMT_TEST_DEBUG_DATA__FMT_TEST_DEBUG_DATA_MASK 0xffffffff
+#define FMT_TEST_DEBUG_DATA__FMT_TEST_DEBUG_DATA__SHIFT 0x0
+#define FMT_DEBUG0__FMT_DEBUG0_MASK 0xffffffff
+#define FMT_DEBUG0__FMT_DEBUG0__SHIFT 0x0
+#define FMT_DEBUG1__FMT_DEBUG1_MASK 0xffffffff
+#define FMT_DEBUG1__FMT_DEBUG1__SHIFT 0x0
+#define FMT_DEBUG2__FMT_DEBUG2_MASK 0xffffffff
+#define FMT_DEBUG2__FMT_DEBUG2__SHIFT 0x0
+#define FMT_DEBUG3__FMT_DEBUG3_MASK 0xffffffff
+#define FMT_DEBUG3__FMT_DEBUG3__SHIFT 0x0
+#define FMT_DEBUG_ID__FMT_DEBUG_ID_MASK 0xffffffff
+#define FMT_DEBUG_ID__FMT_DEBUG_ID__SHIFT 0x0
+#define LB_DATA_FORMAT__PIXEL_DEPTH_MASK 0x3
+#define LB_DATA_FORMAT__PIXEL_DEPTH__SHIFT 0x0
+#define LB_DATA_FORMAT__PIXEL_EXPAN_MODE_MASK 0x4
+#define LB_DATA_FORMAT__PIXEL_EXPAN_MODE__SHIFT 0x2
+#define LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x8
+#define LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x3
+#define LB_DATA_FORMAT__PIXEL_REDUCE_MODE_MASK 0x10
+#define LB_DATA_FORMAT__PIXEL_REDUCE_MODE__SHIFT 0x4
+#define LB_DATA_FORMAT__DYNAMIC_PIXEL_DEPTH_MASK 0x20
+#define LB_DATA_FORMAT__DYNAMIC_PIXEL_DEPTH__SHIFT 0x5
+#define LB_DATA_FORMAT__PREFILL_EN_MASK 0x100
+#define LB_DATA_FORMAT__PREFILL_EN__SHIFT 0x8
+#define LB_DATA_FORMAT__PREFETCH_MASK 0x1000
+#define LB_DATA_FORMAT__PREFETCH__SHIFT 0xc
+#define LB_DATA_FORMAT__REQUEST_MODE_MASK 0x1000000
+#define LB_DATA_FORMAT__REQUEST_MODE__SHIFT 0x18
+#define LB_DATA_FORMAT__ALPHA_EN_MASK 0x80000000
+#define LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x1f
+#define LB_MEMORY_CTRL__LB_MEMORY_SIZE_MASK 0x1fff
+#define LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT 0x0
+#define LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0xf0000
+#define LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define LB_MEMORY_CTRL__LB_MEMORY_CONFIG_MASK 0x300000
+#define LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT 0x14
+#define LB_MEMORY_SIZE_STATUS__LB_MEMORY_SIZE_STATUS_MASK 0x1fff
+#define LB_MEMORY_SIZE_STATUS__LB_MEMORY_SIZE_STATUS__SHIFT 0x0
+#define LB_DESKTOP_HEIGHT__DESKTOP_HEIGHT_MASK 0x7fff
+#define LB_DESKTOP_HEIGHT__DESKTOP_HEIGHT__SHIFT 0x0
+#define LB_VLINE_START_END__VLINE_START_MASK 0x3fff
+#define LB_VLINE_START_END__VLINE_START__SHIFT 0x0
+#define LB_VLINE_START_END__VLINE_END_MASK 0x7fff0000
+#define LB_VLINE_START_END__VLINE_END__SHIFT 0x10
+#define LB_VLINE_START_END__VLINE_INV_MASK 0x80000000
+#define LB_VLINE_START_END__VLINE_INV__SHIFT 0x1f
+#define LB_VLINE2_START_END__VLINE2_START_MASK 0x3fff
+#define LB_VLINE2_START_END__VLINE2_START__SHIFT 0x0
+#define LB_VLINE2_START_END__VLINE2_END_MASK 0x7fff0000
+#define LB_VLINE2_START_END__VLINE2_END__SHIFT 0x10
+#define LB_VLINE2_START_END__VLINE2_INV_MASK 0x80000000
+#define LB_VLINE2_START_END__VLINE2_INV__SHIFT 0x1f
+#define LB_V_COUNTER__V_COUNTER_MASK 0x7fff
+#define LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define LB_SNAPSHOT_V_COUNTER__SNAPSHOT_V_COUNTER_MASK 0x7fff
+#define LB_SNAPSHOT_V_COUNTER__SNAPSHOT_V_COUNTER__SHIFT 0x0
+#define LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK 0x1
+#define LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK__SHIFT 0x0
+#define LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK 0x10
+#define LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK__SHIFT 0x4
+#define LB_INTERRUPT_MASK__VLINE2_INTERRUPT_MASK_MASK 0x100
+#define LB_INTERRUPT_MASK__VLINE2_INTERRUPT_MASK__SHIFT 0x8
+#define LB_VLINE_STATUS__VLINE_OCCURRED_MASK 0x1
+#define LB_VLINE_STATUS__VLINE_OCCURRED__SHIFT 0x0
+#define LB_VLINE_STATUS__VLINE_ACK_MASK 0x10
+#define LB_VLINE_STATUS__VLINE_ACK__SHIFT 0x4
+#define LB_VLINE_STATUS__VLINE_STAT_MASK 0x1000
+#define LB_VLINE_STATUS__VLINE_STAT__SHIFT 0xc
+#define LB_VLINE_STATUS__VLINE_INTERRUPT_MASK 0x10000
+#define LB_VLINE_STATUS__VLINE_INTERRUPT__SHIFT 0x10
+#define LB_VLINE_STATUS__VLINE_INTERRUPT_TYPE_MASK 0x20000
+#define LB_VLINE_STATUS__VLINE_INTERRUPT_TYPE__SHIFT 0x11
+#define LB_VLINE2_STATUS__VLINE2_OCCURRED_MASK 0x1
+#define LB_VLINE2_STATUS__VLINE2_OCCURRED__SHIFT 0x0
+#define LB_VLINE2_STATUS__VLINE2_ACK_MASK 0x10
+#define LB_VLINE2_STATUS__VLINE2_ACK__SHIFT 0x4
+#define LB_VLINE2_STATUS__VLINE2_STAT_MASK 0x1000
+#define LB_VLINE2_STATUS__VLINE2_STAT__SHIFT 0xc
+#define LB_VLINE2_STATUS__VLINE2_INTERRUPT_MASK 0x10000
+#define LB_VLINE2_STATUS__VLINE2_INTERRUPT__SHIFT 0x10
+#define LB_VLINE2_STATUS__VLINE2_INTERRUPT_TYPE_MASK 0x20000
+#define LB_VLINE2_STATUS__VLINE2_INTERRUPT_TYPE__SHIFT 0x11
+#define LB_VBLANK_STATUS__VBLANK_OCCURRED_MASK 0x1
+#define LB_VBLANK_STATUS__VBLANK_OCCURRED__SHIFT 0x0
+#define LB_VBLANK_STATUS__VBLANK_ACK_MASK 0x10
+#define LB_VBLANK_STATUS__VBLANK_ACK__SHIFT 0x4
+#define LB_VBLANK_STATUS__VBLANK_STAT_MASK 0x1000
+#define LB_VBLANK_STATUS__VBLANK_STAT__SHIFT 0xc
+#define LB_VBLANK_STATUS__VBLANK_INTERRUPT_MASK 0x10000
+#define LB_VBLANK_STATUS__VBLANK_INTERRUPT__SHIFT 0x10
+#define LB_VBLANK_STATUS__VBLANK_INTERRUPT_TYPE_MASK 0x20000
+#define LB_VBLANK_STATUS__VBLANK_INTERRUPT_TYPE__SHIFT 0x11
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL_MASK 0x3
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL__SHIFT 0x0
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL2_MASK 0x10
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL2__SHIFT 0x4
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_DELAY_MASK 0xff00
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_DELAY__SHIFT 0x8
+#define LB_SYNC_RESET_SEL__LB_SYNC_DURATION_MASK 0xc00000
+#define LB_SYNC_RESET_SEL__LB_SYNC_DURATION__SHIFT 0x16
+#define LB_BLACK_KEYER_R_CR__LB_BLACK_KEYER_R_CR_MASK 0xfff0
+#define LB_BLACK_KEYER_R_CR__LB_BLACK_KEYER_R_CR__SHIFT 0x4
+#define LB_BLACK_KEYER_G_Y__LB_BLACK_KEYER_G_Y_MASK 0xfff0
+#define LB_BLACK_KEYER_G_Y__LB_BLACK_KEYER_G_Y__SHIFT 0x4
+#define LB_BLACK_KEYER_B_CB__LB_BLACK_KEYER_B_CB_MASK 0xfff0
+#define LB_BLACK_KEYER_B_CB__LB_BLACK_KEYER_B_CB__SHIFT 0x4
+#define LB_KEYER_COLOR_CTRL__LB_KEYER_COLOR_EN_MASK 0x1
+#define LB_KEYER_COLOR_CTRL__LB_KEYER_COLOR_EN__SHIFT 0x0
+#define LB_KEYER_COLOR_CTRL__LB_KEYER_COLOR_REP_EN_MASK 0x100
+#define LB_KEYER_COLOR_CTRL__LB_KEYER_COLOR_REP_EN__SHIFT 0x8
+#define LB_KEYER_COLOR_R_CR__LB_KEYER_COLOR_R_CR_MASK 0xfff0
+#define LB_KEYER_COLOR_R_CR__LB_KEYER_COLOR_R_CR__SHIFT 0x4
+#define LB_KEYER_COLOR_G_Y__LB_KEYER_COLOR_G_Y_MASK 0xfff0
+#define LB_KEYER_COLOR_G_Y__LB_KEYER_COLOR_G_Y__SHIFT 0x4
+#define LB_KEYER_COLOR_B_CB__LB_KEYER_COLOR_B_CB_MASK 0xfff0
+#define LB_KEYER_COLOR_B_CB__LB_KEYER_COLOR_B_CB__SHIFT 0x4
+#define LB_KEYER_COLOR_REP_R_CR__LB_KEYER_COLOR_REP_R_CR_MASK 0xfff0
+#define LB_KEYER_COLOR_REP_R_CR__LB_KEYER_COLOR_REP_R_CR__SHIFT 0x4
+#define LB_KEYER_COLOR_REP_G_Y__LB_KEYER_COLOR_REP_G_Y_MASK 0xfff0
+#define LB_KEYER_COLOR_REP_G_Y__LB_KEYER_COLOR_REP_G_Y__SHIFT 0x4
+#define LB_KEYER_COLOR_REP_B_CB__LB_KEYER_COLOR_REP_B_CB_MASK 0xfff0
+#define LB_KEYER_COLOR_REP_B_CB__LB_KEYER_COLOR_REP_B_CB__SHIFT 0x4
+#define LB_BUFFER_LEVEL_STATUS__REQ_FIFO_LEVEL_MASK 0x3f
+#define LB_BUFFER_LEVEL_STATUS__REQ_FIFO_LEVEL__SHIFT 0x0
+#define LB_BUFFER_LEVEL_STATUS__REQ_FIFO_FULL_CNTL_MASK 0xfc00
+#define LB_BUFFER_LEVEL_STATUS__REQ_FIFO_FULL_CNTL__SHIFT 0xa
+#define LB_BUFFER_LEVEL_STATUS__DATA_BUFFER_LEVEL_MASK 0xfff0000
+#define LB_BUFFER_LEVEL_STATUS__DATA_BUFFER_LEVEL__SHIFT 0x10
+#define LB_BUFFER_LEVEL_STATUS__DATA_FIFO_FULL_CNTL_MASK 0xf0000000
+#define LB_BUFFER_LEVEL_STATUS__DATA_FIFO_FULL_CNTL__SHIFT 0x1c
+#define LB_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_ON_MASK 0xfff
+#define LB_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_ON__SHIFT 0x0
+#define LB_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_OFF_MASK 0xfff0000
+#define LB_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_OFF__SHIFT 0x10
+#define LB_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_LEVEL_MASK 0xfff
+#define LB_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_LEVEL__SHIFT 0x0
+#define LB_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_STAT_MASK 0x10000
+#define LB_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_STAT__SHIFT 0x10
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_MARGIN_MASK 0xf
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_MARGIN__SHIFT 0x0
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_STAT_MASK 0x10
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_STAT__SHIFT 0x4
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_OCCURRED_MASK 0x100
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_OCCURRED__SHIFT 0x8
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_ACK_MASK 0x1000
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_ACK__SHIFT 0xc
+#define LB_BUFFER_STATUS__LB_BUFFER_FULL_STAT_MASK 0x10000
+#define LB_BUFFER_STATUS__LB_BUFFER_FULL_STAT__SHIFT 0x10
+#define LB_BUFFER_STATUS__LB_BUFFER_FULL_OCCURRED_MASK 0x100000
+#define LB_BUFFER_STATUS__LB_BUFFER_FULL_OCCURRED__SHIFT 0x14
+#define LB_BUFFER_STATUS__LB_BUFFER_FULL_ACK_MASK 0x1000000
+#define LB_BUFFER_STATUS__LB_BUFFER_FULL_ACK__SHIFT 0x18
+#define LB_NO_OUTSTANDING_REQ_STATUS__LB_NO_OUTSTANDING_REQ_STAT_MASK 0x1
+#define LB_NO_OUTSTANDING_REQ_STATUS__LB_NO_OUTSTANDING_REQ_STAT__SHIFT 0x0
+#define MVP_AFR_FLIP_MODE__MVP_AFR_FLIP_MODE_MASK 0x3
+#define MVP_AFR_FLIP_MODE__MVP_AFR_FLIP_MODE__SHIFT 0x0
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_NUM_ENTRIES_MASK 0xf
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_NUM_ENTRIES__SHIFT 0x0
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_MASK 0x10
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET__SHIFT 0x4
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_FLAG_MASK 0x100
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_FLAG__SHIFT 0x8
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_ACK_MASK 0x1000
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_ACK__SHIFT 0xc
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT_MODE_MASK 0x3
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT_MODE__SHIFT 0x0
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT_MASK 0x7fff00
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT__SHIFT 0x8
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_OFFSET_MASK 0x3f000000
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_OFFSET__SHIFT 0x18
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_AUTO_ENABLE_MASK 0x40000000
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_AUTO_ENABLE__SHIFT 0x1e
+#define DC_MVP_LB_CONTROL__MVP_SWAP_LOCK_IN_MODE_MASK 0x3
+#define DC_MVP_LB_CONTROL__MVP_SWAP_LOCK_IN_MODE__SHIFT 0x0
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_SEL_MASK 0x100
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_SEL__SHIFT 0x8
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ONE_MASK 0x1000
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ONE__SHIFT 0xc
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ZERO_MASK 0x10000
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ZERO__SHIFT 0x10
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_STATUS_MASK 0x100000
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_STATUS__SHIFT 0x14
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_IN_CAP_MASK 0x10000000
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_IN_CAP__SHIFT 0x1c
+#define DC_MVP_LB_CONTROL__DC_MVP_SPARE_FLOPS_MASK 0x80000000
+#define DC_MVP_LB_CONTROL__DC_MVP_SPARE_FLOPS__SHIFT 0x1f
+#define LB_DEBUG__LB_DEBUG_MASK 0xffffffff
+#define LB_DEBUG__LB_DEBUG__SHIFT 0x0
+#define LB_DEBUG2__LB_DEBUG2_MASK 0xffffffff
+#define LB_DEBUG2__LB_DEBUG2__SHIFT 0x0
+#define LB_DEBUG3__LB_DEBUG3_MASK 0xffffffff
+#define LB_DEBUG3__LB_DEBUG3__SHIFT 0x0
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_INDEX_MASK 0xff
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_INDEX__SHIFT 0x0
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define LB_TEST_DEBUG_DATA__LB_TEST_DEBUG_DATA_MASK 0xffffffff
+#define LB_TEST_DEBUG_DATA__LB_TEST_DEBUG_DATA__SHIFT 0x0
+#define LBV_DATA_FORMAT__PIXEL_DEPTH_MASK 0x3
+#define LBV_DATA_FORMAT__PIXEL_DEPTH__SHIFT 0x0
+#define LBV_DATA_FORMAT__PIXEL_EXPAN_MODE_MASK 0x4
+#define LBV_DATA_FORMAT__PIXEL_EXPAN_MODE__SHIFT 0x2
+#define LBV_DATA_FORMAT__INTERLEAVE_EN_MASK 0x8
+#define LBV_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x3
+#define LBV_DATA_FORMAT__PIXEL_REDUCE_MODE_MASK 0x10
+#define LBV_DATA_FORMAT__PIXEL_REDUCE_MODE__SHIFT 0x4
+#define LBV_DATA_FORMAT__DYNAMIC_PIXEL_DEPTH_MASK 0x20
+#define LBV_DATA_FORMAT__DYNAMIC_PIXEL_DEPTH__SHIFT 0x5
+#define LBV_DATA_FORMAT__DITHER_EN_MASK 0x40
+#define LBV_DATA_FORMAT__DITHER_EN__SHIFT 0x6
+#define LBV_DATA_FORMAT__DOWNSCALE_PREFETCH_EN_MASK 0x80
+#define LBV_DATA_FORMAT__DOWNSCALE_PREFETCH_EN__SHIFT 0x7
+#define LBV_DATA_FORMAT__PREFETCH_MASK 0x1000
+#define LBV_DATA_FORMAT__PREFETCH__SHIFT 0xc
+#define LBV_DATA_FORMAT__REQUEST_MODE_MASK 0x1000000
+#define LBV_DATA_FORMAT__REQUEST_MODE__SHIFT 0x18
+#define LBV_DATA_FORMAT__ALPHA_EN_MASK 0x80000000
+#define LBV_DATA_FORMAT__ALPHA_EN__SHIFT 0x1f
+#define LBV_MEMORY_CTRL__LB_MEMORY_SIZE_MASK 0xfff
+#define LBV_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT 0x0
+#define LBV_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0xf0000
+#define LBV_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define LBV_MEMORY_CTRL__LB_MEMORY_CONFIG_MASK 0x300000
+#define LBV_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT 0x14
+#define LBV_MEMORY_SIZE_STATUS__LB_MEMORY_SIZE_STATUS_MASK 0xfff
+#define LBV_MEMORY_SIZE_STATUS__LB_MEMORY_SIZE_STATUS__SHIFT 0x0
+#define LBV_DESKTOP_HEIGHT__DESKTOP_HEIGHT_MASK 0x7fff
+#define LBV_DESKTOP_HEIGHT__DESKTOP_HEIGHT__SHIFT 0x0
+#define LBV_VLINE_START_END__VLINE_START_MASK 0x3fff
+#define LBV_VLINE_START_END__VLINE_START__SHIFT 0x0
+#define LBV_VLINE_START_END__VLINE_END_MASK 0x7fff0000
+#define LBV_VLINE_START_END__VLINE_END__SHIFT 0x10
+#define LBV_VLINE_START_END__VLINE_INV_MASK 0x80000000
+#define LBV_VLINE_START_END__VLINE_INV__SHIFT 0x1f
+#define LBV_VLINE2_START_END__VLINE2_START_MASK 0x3fff
+#define LBV_VLINE2_START_END__VLINE2_START__SHIFT 0x0
+#define LBV_VLINE2_START_END__VLINE2_END_MASK 0x7fff0000
+#define LBV_VLINE2_START_END__VLINE2_END__SHIFT 0x10
+#define LBV_VLINE2_START_END__VLINE2_INV_MASK 0x80000000
+#define LBV_VLINE2_START_END__VLINE2_INV__SHIFT 0x1f
+#define LBV_V_COUNTER__V_COUNTER_MASK 0x7fff
+#define LBV_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define LBV_SNAPSHOT_V_COUNTER__SNAPSHOT_V_COUNTER_MASK 0x7fff
+#define LBV_SNAPSHOT_V_COUNTER__SNAPSHOT_V_COUNTER__SHIFT 0x0
+#define LBV_V_COUNTER_CHROMA__V_COUNTER_CHROMA_MASK 0x7fff
+#define LBV_V_COUNTER_CHROMA__V_COUNTER_CHROMA__SHIFT 0x0
+#define LBV_SNAPSHOT_V_COUNTER_CHROMA__SNAPSHOT_V_COUNTER_CHROMA_MASK 0x7fff
+#define LBV_SNAPSHOT_V_COUNTER_CHROMA__SNAPSHOT_V_COUNTER_CHROMA__SHIFT 0x0
+#define LBV_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK 0x1
+#define LBV_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK__SHIFT 0x0
+#define LBV_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK 0x10
+#define LBV_INTERRUPT_MASK__VLINE_INTERRUPT_MASK__SHIFT 0x4
+#define LBV_INTERRUPT_MASK__VLINE2_INTERRUPT_MASK_MASK 0x100
+#define LBV_INTERRUPT_MASK__VLINE2_INTERRUPT_MASK__SHIFT 0x8
+#define LBV_VLINE_STATUS__VLINE_OCCURRED_MASK 0x1
+#define LBV_VLINE_STATUS__VLINE_OCCURRED__SHIFT 0x0
+#define LBV_VLINE_STATUS__VLINE_ACK_MASK 0x10
+#define LBV_VLINE_STATUS__VLINE_ACK__SHIFT 0x4
+#define LBV_VLINE_STATUS__VLINE_STAT_MASK 0x1000
+#define LBV_VLINE_STATUS__VLINE_STAT__SHIFT 0xc
+#define LBV_VLINE_STATUS__VLINE_INTERRUPT_MASK 0x10000
+#define LBV_VLINE_STATUS__VLINE_INTERRUPT__SHIFT 0x10
+#define LBV_VLINE_STATUS__VLINE_INTERRUPT_TYPE_MASK 0x20000
+#define LBV_VLINE_STATUS__VLINE_INTERRUPT_TYPE__SHIFT 0x11
+#define LBV_VLINE2_STATUS__VLINE2_OCCURRED_MASK 0x1
+#define LBV_VLINE2_STATUS__VLINE2_OCCURRED__SHIFT 0x0
+#define LBV_VLINE2_STATUS__VLINE2_ACK_MASK 0x10
+#define LBV_VLINE2_STATUS__VLINE2_ACK__SHIFT 0x4
+#define LBV_VLINE2_STATUS__VLINE2_STAT_MASK 0x1000
+#define LBV_VLINE2_STATUS__VLINE2_STAT__SHIFT 0xc
+#define LBV_VLINE2_STATUS__VLINE2_INTERRUPT_MASK 0x10000
+#define LBV_VLINE2_STATUS__VLINE2_INTERRUPT__SHIFT 0x10
+#define LBV_VLINE2_STATUS__VLINE2_INTERRUPT_TYPE_MASK 0x20000
+#define LBV_VLINE2_STATUS__VLINE2_INTERRUPT_TYPE__SHIFT 0x11
+#define LBV_VBLANK_STATUS__VBLANK_OCCURRED_MASK 0x1
+#define LBV_VBLANK_STATUS__VBLANK_OCCURRED__SHIFT 0x0
+#define LBV_VBLANK_STATUS__VBLANK_ACK_MASK 0x10
+#define LBV_VBLANK_STATUS__VBLANK_ACK__SHIFT 0x4
+#define LBV_VBLANK_STATUS__VBLANK_STAT_MASK 0x1000
+#define LBV_VBLANK_STATUS__VBLANK_STAT__SHIFT 0xc
+#define LBV_VBLANK_STATUS__VBLANK_INTERRUPT_MASK 0x10000
+#define LBV_VBLANK_STATUS__VBLANK_INTERRUPT__SHIFT 0x10
+#define LBV_VBLANK_STATUS__VBLANK_INTERRUPT_TYPE_MASK 0x20000
+#define LBV_VBLANK_STATUS__VBLANK_INTERRUPT_TYPE__SHIFT 0x11
+#define LBV_SYNC_RESET_SEL__LB_SYNC_RESET_SEL_MASK 0x3
+#define LBV_SYNC_RESET_SEL__LB_SYNC_RESET_SEL__SHIFT 0x0
+#define LBV_SYNC_RESET_SEL__LB_SYNC_RESET_SEL2_MASK 0x10
+#define LBV_SYNC_RESET_SEL__LB_SYNC_RESET_SEL2__SHIFT 0x4
+#define LBV_SYNC_RESET_SEL__LB_SYNC_RESET_DELAY_MASK 0xff00
+#define LBV_SYNC_RESET_SEL__LB_SYNC_RESET_DELAY__SHIFT 0x8
+#define LBV_SYNC_RESET_SEL__LB_SYNC_DURATION_MASK 0xc00000
+#define LBV_SYNC_RESET_SEL__LB_SYNC_DURATION__SHIFT 0x16
+#define LBV_BLACK_KEYER_R_CR__LB_BLACK_KEYER_R_CR_MASK 0xfff0
+#define LBV_BLACK_KEYER_R_CR__LB_BLACK_KEYER_R_CR__SHIFT 0x4
+#define LBV_BLACK_KEYER_G_Y__LB_BLACK_KEYER_G_Y_MASK 0xfff0
+#define LBV_BLACK_KEYER_G_Y__LB_BLACK_KEYER_G_Y__SHIFT 0x4
+#define LBV_BLACK_KEYER_B_CB__LB_BLACK_KEYER_B_CB_MASK 0xfff0
+#define LBV_BLACK_KEYER_B_CB__LB_BLACK_KEYER_B_CB__SHIFT 0x4
+#define LBV_KEYER_COLOR_CTRL__LB_KEYER_COLOR_EN_MASK 0x1
+#define LBV_KEYER_COLOR_CTRL__LB_KEYER_COLOR_EN__SHIFT 0x0
+#define LBV_KEYER_COLOR_CTRL__LB_KEYER_COLOR_REP_EN_MASK 0x100
+#define LBV_KEYER_COLOR_CTRL__LB_KEYER_COLOR_REP_EN__SHIFT 0x8
+#define LBV_KEYER_COLOR_R_CR__LB_KEYER_COLOR_R_CR_MASK 0xfff0
+#define LBV_KEYER_COLOR_R_CR__LB_KEYER_COLOR_R_CR__SHIFT 0x4
+#define LBV_KEYER_COLOR_G_Y__LB_KEYER_COLOR_G_Y_MASK 0xfff0
+#define LBV_KEYER_COLOR_G_Y__LB_KEYER_COLOR_G_Y__SHIFT 0x4
+#define LBV_KEYER_COLOR_B_CB__LB_KEYER_COLOR_B_CB_MASK 0xfff0
+#define LBV_KEYER_COLOR_B_CB__LB_KEYER_COLOR_B_CB__SHIFT 0x4
+#define LBV_KEYER_COLOR_REP_R_CR__LB_KEYER_COLOR_REP_R_CR_MASK 0xfff0
+#define LBV_KEYER_COLOR_REP_R_CR__LB_KEYER_COLOR_REP_R_CR__SHIFT 0x4
+#define LBV_KEYER_COLOR_REP_G_Y__LB_KEYER_COLOR_REP_G_Y_MASK 0xfff0
+#define LBV_KEYER_COLOR_REP_G_Y__LB_KEYER_COLOR_REP_G_Y__SHIFT 0x4
+#define LBV_KEYER_COLOR_REP_B_CB__LB_KEYER_COLOR_REP_B_CB_MASK 0xfff0
+#define LBV_KEYER_COLOR_REP_B_CB__LB_KEYER_COLOR_REP_B_CB__SHIFT 0x4
+#define LBV_BUFFER_LEVEL_STATUS__REQ_FIFO_LEVEL_MASK 0x3f
+#define LBV_BUFFER_LEVEL_STATUS__REQ_FIFO_LEVEL__SHIFT 0x0
+#define LBV_BUFFER_LEVEL_STATUS__REQ_FIFO_FULL_CNTL_MASK 0xfc00
+#define LBV_BUFFER_LEVEL_STATUS__REQ_FIFO_FULL_CNTL__SHIFT 0xa
+#define LBV_BUFFER_LEVEL_STATUS__DATA_BUFFER_LEVEL_MASK 0xfff0000
+#define LBV_BUFFER_LEVEL_STATUS__DATA_BUFFER_LEVEL__SHIFT 0x10
+#define LBV_BUFFER_LEVEL_STATUS__DATA_FIFO_FULL_CNTL_MASK 0xf0000000
+#define LBV_BUFFER_LEVEL_STATUS__DATA_FIFO_FULL_CNTL__SHIFT 0x1c
+#define LBV_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_ON_MASK 0xfff
+#define LBV_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_ON__SHIFT 0x0
+#define LBV_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_OFF_MASK 0xfff0000
+#define LBV_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_OFF__SHIFT 0x10
+#define LBV_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_LEVEL_MASK 0xfff
+#define LBV_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_LEVEL__SHIFT 0x0
+#define LBV_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_STAT_MASK 0x10000
+#define LBV_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_STAT__SHIFT 0x10
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_MARGIN_MASK 0xf
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_MARGIN__SHIFT 0x0
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_STAT_MASK 0x10
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_STAT__SHIFT 0x4
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_OCCURRED_MASK 0x100
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_OCCURRED__SHIFT 0x8
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_ACK_MASK 0x1000
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_ACK__SHIFT 0xc
+#define LBV_BUFFER_STATUS__LB_BUFFER_FULL_STAT_MASK 0x10000
+#define LBV_BUFFER_STATUS__LB_BUFFER_FULL_STAT__SHIFT 0x10
+#define LBV_BUFFER_STATUS__LB_BUFFER_FULL_OCCURRED_MASK 0x100000
+#define LBV_BUFFER_STATUS__LB_BUFFER_FULL_OCCURRED__SHIFT 0x14
+#define LBV_BUFFER_STATUS__LB_BUFFER_FULL_ACK_MASK 0x1000000
+#define LBV_BUFFER_STATUS__LB_BUFFER_FULL_ACK__SHIFT 0x18
+#define LBV_BUFFER_STATUS__LB_ENABLE_HIGH_THROUGHPUT_MASK 0x2000000
+#define LBV_BUFFER_STATUS__LB_ENABLE_HIGH_THROUGHPUT__SHIFT 0x19
+#define LBV_BUFFER_STATUS__LB_HIGH_THROUGHPUT_CNTL_MASK 0x1c000000
+#define LBV_BUFFER_STATUS__LB_HIGH_THROUGHPUT_CNTL__SHIFT 0x1a
+#define LBV_NO_OUTSTANDING_REQ_STATUS__LB_NO_OUTSTANDING_REQ_STAT_MASK 0x1
+#define LBV_NO_OUTSTANDING_REQ_STATUS__LB_NO_OUTSTANDING_REQ_STAT__SHIFT 0x0
+#define LBV_DEBUG__LB_DEBUG_MASK 0xffffffff
+#define LBV_DEBUG__LB_DEBUG__SHIFT 0x0
+#define LBV_DEBUG2__LB_DEBUG2_MASK 0xffffffff
+#define LBV_DEBUG2__LB_DEBUG2__SHIFT 0x0
+#define LBV_DEBUG3__LB_DEBUG3_MASK 0xffffffff
+#define LBV_DEBUG3__LB_DEBUG3__SHIFT 0x0
+#define LBV_TEST_DEBUG_INDEX__LB_TEST_DEBUG_INDEX_MASK 0xff
+#define LBV_TEST_DEBUG_INDEX__LB_TEST_DEBUG_INDEX__SHIFT 0x0
+#define LBV_TEST_DEBUG_INDEX__LB_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define LBV_TEST_DEBUG_INDEX__LB_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define LBV_TEST_DEBUG_DATA__LB_TEST_DEBUG_DATA_MASK 0xffffffff
+#define LBV_TEST_DEBUG_DATA__LB_TEST_DEBUG_DATA__SHIFT 0x0
+#define MVP_CONTROL1__MVP_EN_MASK 0x1
+#define MVP_CONTROL1__MVP_EN__SHIFT 0x0
+#define MVP_CONTROL1__MVP_MIXER_MODE_MASK 0x70
+#define MVP_CONTROL1__MVP_MIXER_MODE__SHIFT 0x4
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL_MASK 0x100
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL__SHIFT 0x8
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL_DELAY_UNTIL_END_OF_BLANK_MASK 0x200
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL_DELAY_UNTIL_END_OF_BLANK__SHIFT 0x9
+#define MVP_CONTROL1__MVP_ARBITRATION_MODE_FOR_AFR_MANUAL_SWITCH_MODE_MASK 0x400
+#define MVP_CONTROL1__MVP_ARBITRATION_MODE_FOR_AFR_MANUAL_SWITCH_MODE__SHIFT 0xa
+#define MVP_CONTROL1__MVP_RATE_CONTROL_MASK 0x1000
+#define MVP_CONTROL1__MVP_RATE_CONTROL__SHIFT 0xc
+#define MVP_CONTROL1__MVP_CHANNEL_CONTROL_MASK 0x10000
+#define MVP_CONTROL1__MVP_CHANNEL_CONTROL__SHIFT 0x10
+#define MVP_CONTROL1__MVP_GPU_CHAIN_LOCATION_MASK 0x300000
+#define MVP_CONTROL1__MVP_GPU_CHAIN_LOCATION__SHIFT 0x14
+#define MVP_CONTROL1__MVP_DISABLE_MSB_EXPAND_MASK 0x1000000
+#define MVP_CONTROL1__MVP_DISABLE_MSB_EXPAND__SHIFT 0x18
+#define MVP_CONTROL1__MVP_30BPP_EN_MASK 0x10000000
+#define MVP_CONTROL1__MVP_30BPP_EN__SHIFT 0x1c
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_A_MASK 0x40000000
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_A__SHIFT 0x1e
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_B_MASK 0x80000000
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_B__SHIFT 0x1f
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL0_SEL_MASK 0x1
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL0_SEL__SHIFT 0x0
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL2_SEL_MASK 0x10
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL2_SEL__SHIFT 0x4
+#define MVP_CONTROL2__MVP_MUXA_CLK_SEL_MASK 0x100
+#define MVP_CONTROL2__MVP_MUXA_CLK_SEL__SHIFT 0x8
+#define MVP_CONTROL2__MVP_MUXB_CLK_SEL_MASK 0x1000
+#define MVP_CONTROL2__MVP_MUXB_CLK_SEL__SHIFT 0xc
+#define MVP_CONTROL2__MVP_DVOCNTL_MUX_MASK 0x10000
+#define MVP_CONTROL2__MVP_DVOCNTL_MUX__SHIFT 0x10
+#define MVP_CONTROL2__MVP_FLOW_CONTROL_OUT_EN_MASK 0x100000
+#define MVP_CONTROL2__MVP_FLOW_CONTROL_OUT_EN__SHIFT 0x14
+#define MVP_CONTROL2__MVP_SWAP_LOCK_OUT_EN_MASK 0x1000000
+#define MVP_CONTROL2__MVP_SWAP_LOCK_OUT_EN__SHIFT 0x18
+#define MVP_CONTROL2__MVP_SWAP_AB_IN_DC_DDR_MASK 0x10000000
+#define MVP_CONTROL2__MVP_SWAP_AB_IN_DC_DDR__SHIFT 0x1c
+#define MVP_FIFO_CONTROL__MVP_STOP_SLAVE_WM_MASK 0xff
+#define MVP_FIFO_CONTROL__MVP_STOP_SLAVE_WM__SHIFT 0x0
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_WM_MASK 0xff00
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_WM__SHIFT 0x8
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_CNT_MASK 0xff0000
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_CNT__SHIFT 0x10
+#define MVP_FIFO_STATUS__MVP_FIFO_LEVEL_MASK 0xff
+#define MVP_FIFO_STATUS__MVP_FIFO_LEVEL__SHIFT 0x0
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_MASK 0x100
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW__SHIFT 0x8
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_OCCURRED_MASK 0x1000
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_OCCURRED__SHIFT 0xc
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_ACK_MASK 0x10000
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_ACK__SHIFT 0x10
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_MASK 0x100000
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW__SHIFT 0x14
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_OCCURRED_MASK 0x1000000
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_OCCURRED__SHIFT 0x18
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_ACK_MASK 0x10000000
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_ACK__SHIFT 0x1c
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_MASK_MASK 0x40000000
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_MASK__SHIFT 0x1e
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_INT_STATUS_MASK 0x80000000
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_INT_STATUS__SHIFT 0x1f
+#define MVP_SLAVE_STATUS__MVP_SLAVE_PIXELS_PER_LINE_RCVED_MASK 0x1fff
+#define MVP_SLAVE_STATUS__MVP_SLAVE_PIXELS_PER_LINE_RCVED__SHIFT 0x0
+#define MVP_SLAVE_STATUS__MVP_SLAVE_LINES_PER_FRAME_RCVED_MASK 0x1fff0000
+#define MVP_SLAVE_STATUS__MVP_SLAVE_LINES_PER_FRAME_RCVED__SHIFT 0x10
+#define MVP_INBAND_CNTL_CAP__MVP_IGNOR_INBAND_CNTL_MASK 0x1
+#define MVP_INBAND_CNTL_CAP__MVP_IGNOR_INBAND_CNTL__SHIFT 0x0
+#define MVP_INBAND_CNTL_CAP__MVP_PASSING_INBAND_CNTL_EN_MASK 0x10
+#define MVP_INBAND_CNTL_CAP__MVP_PASSING_INBAND_CNTL_EN__SHIFT 0x4
+#define MVP_INBAND_CNTL_CAP__MVP_INBAND_CNTL_CHAR_CAP_MASK 0xffffff00
+#define MVP_INBAND_CNTL_CAP__MVP_INBAND_CNTL_CHAR_CAP__SHIFT 0x8
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_R_MASK 0x3ff
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_R__SHIFT 0x0
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_G_MASK 0xffc00
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_G__SHIFT 0xa
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_B_MASK 0x3ff00000
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_B__SHIFT 0x14
+#define MVP_CRC_CNTL__MVP_CRC_BLUE_MASK_MASK 0xff
+#define MVP_CRC_CNTL__MVP_CRC_BLUE_MASK__SHIFT 0x0
+#define MVP_CRC_CNTL__MVP_CRC_GREEN_MASK_MASK 0xff00
+#define MVP_CRC_CNTL__MVP_CRC_GREEN_MASK__SHIFT 0x8
+#define MVP_CRC_CNTL__MVP_CRC_RED_MASK_MASK 0xff0000
+#define MVP_CRC_CNTL__MVP_CRC_RED_MASK__SHIFT 0x10
+#define MVP_CRC_CNTL__MVP_CRC_EN_MASK 0x10000000
+#define MVP_CRC_CNTL__MVP_CRC_EN__SHIFT 0x1c
+#define MVP_CRC_CNTL__MVP_CRC_CONT_EN_MASK 0x20000000
+#define MVP_CRC_CNTL__MVP_CRC_CONT_EN__SHIFT 0x1d
+#define MVP_CRC_CNTL__MVP_DC_DDR_CRC_EVEN_ODD_PIX_SEL_MASK 0x40000000
+#define MVP_CRC_CNTL__MVP_DC_DDR_CRC_EVEN_ODD_PIX_SEL__SHIFT 0x1e
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_BLUE_RESULT_MASK 0xffff
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_BLUE_RESULT__SHIFT 0x0
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_GREEN_RESULT_MASK 0xffff0000
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_GREEN_RESULT__SHIFT 0x10
+#define MVP_CRC_RESULT_RED__MVP_CRC_RED_RESULT_MASK 0xffff
+#define MVP_CRC_RESULT_RED__MVP_CRC_RED_RESULT__SHIFT 0x0
+#define MVP_CONTROL3__MVP_RESET_IN_BETWEEN_FRAMES_MASK 0x1
+#define MVP_CONTROL3__MVP_RESET_IN_BETWEEN_FRAMES__SHIFT 0x0
+#define MVP_CONTROL3__MVP_DDR_SC_AB_SEL_MASK 0x10
+#define MVP_CONTROL3__MVP_DDR_SC_AB_SEL__SHIFT 0x4
+#define MVP_CONTROL3__MVP_DDR_SC_B_START_MODE_MASK 0x100
+#define MVP_CONTROL3__MVP_DDR_SC_B_START_MODE__SHIFT 0x8
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ONE_MASK 0x1000
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ONE__SHIFT 0xc
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ZERO_MASK 0x10000
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ZERO__SHIFT 0x10
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_CASCADE_EN_MASK 0x100000
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_CASCADE_EN__SHIFT 0x14
+#define MVP_CONTROL3__MVP_SWAP_48BIT_EN_MASK 0x1000000
+#define MVP_CONTROL3__MVP_SWAP_48BIT_EN__SHIFT 0x18
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_IN_CAP_MASK 0x10000000
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_IN_CAP__SHIFT 0x1c
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_PIXEL_ERROR_CNT_MASK 0x1fff
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_PIXEL_ERROR_CNT__SHIFT 0x0
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_LINE_ERROR_CNT_MASK 0x1fff0000
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_LINE_ERROR_CNT__SHIFT 0x10
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_DATA_CHK_EN_MASK 0x80000000
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_DATA_CHK_EN__SHIFT 0x1f
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT_MASK 0x1fff
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT__SHIFT 0x0
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT_RESET_MASK 0x80000000
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT_RESET__SHIFT 0x1f
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_EN_MASK 0x1
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_EN__SHIFT 0x0
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_EN_MASK 0x2
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_EN__SHIFT 0x1
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_SEL_MASK 0x4
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_SEL__SHIFT 0x2
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_SEL_MASK 0x8
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_SEL__SHIFT 0x3
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_MANUAL_HSYNC_FLIP_MASK 0x10
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_MANUAL_HSYNC_FLIP__SHIFT 0x4
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_AUTO_VSYNC_FLIP_MASK 0x20
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_AUTO_VSYNC_FLIP__SHIFT 0x5
+#define MVP_DEBUG__MVP_EN_FIX_AFR_MANUAL_SWITCH_IN_SFR_MASK 0x40
+#define MVP_DEBUG__MVP_EN_FIX_AFR_MANUAL_SWITCH_IN_SFR__SHIFT 0x6
+#define MVP_DEBUG__MVP_DIS_READ_POINTER_RESET_DELAY_MASK 0x80
+#define MVP_DEBUG__MVP_DIS_READ_POINTER_RESET_DELAY__SHIFT 0x7
+#define MVP_DEBUG__MVP_DEBUG_BITS_MASK 0xffffff00
+#define MVP_DEBUG__MVP_DEBUG_BITS__SHIFT 0x8
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_INDEX_MASK 0xff
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_INDEX__SHIFT 0x0
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define MVP_TEST_DEBUG_DATA__MVP_TEST_DEBUG_DATA_MASK 0xffffffff
+#define MVP_TEST_DEBUG_DATA__MVP_TEST_DEBUG_DATA__SHIFT 0x0
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A_H_MASK 0x1
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A_H__SHIFT 0x0
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A_MASK 0x1fffffe
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A__SHIFT 0x1
+#define MVP_DEBUG_13__IDED_MVP_DATA_B_H_MASK 0x1
+#define MVP_DEBUG_13__IDED_MVP_DATA_B_H__SHIFT 0x0
+#define MVP_DEBUG_13__IDED_MVP_DATA_B_MASK 0x1fffffe
+#define MVP_DEBUG_13__IDED_MVP_DATA_B__SHIFT 0x1
+#define MVP_DEBUG_13__IDED_START_READ_B_MASK 0x2000000
+#define MVP_DEBUG_13__IDED_START_READ_B__SHIFT 0x19
+#define MVP_DEBUG_13__IDED_READ_FIFO_ENTRY_DE_B_MASK 0x4000000
+#define MVP_DEBUG_13__IDED_READ_FIFO_ENTRY_DE_B__SHIFT 0x1a
+#define MVP_DEBUG_13__IDED_WRITE_ADD_B_MASK 0x38000000
+#define MVP_DEBUG_13__IDED_WRITE_ADD_B__SHIFT 0x1b
+#define MVP_DEBUG_14__IDEE_READ_ADD_MASK 0x7
+#define MVP_DEBUG_14__IDEE_READ_ADD__SHIFT 0x0
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_A_MASK 0x38
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_A__SHIFT 0x3
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_B_MASK 0x1c0
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_B__SHIFT 0x6
+#define MVP_DEBUG_14__IDEE_START_READ_MASK 0x200
+#define MVP_DEBUG_14__IDEE_START_READ__SHIFT 0x9
+#define MVP_DEBUG_14__IDEE_START_READ_B_MASK 0x400
+#define MVP_DEBUG_14__IDEE_START_READ_B__SHIFT 0xa
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_A_MASK 0x800
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_A__SHIFT 0xb
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_B_MASK 0x1000
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_B__SHIFT 0xc
+#define MVP_DEBUG_14__IDEE_WRITE2FIFO_MASK 0x2000
+#define MVP_DEBUG_14__IDEE_WRITE2FIFO__SHIFT 0xd
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE_MASK 0x4000
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE__SHIFT 0xe
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE_B_MASK 0x8000
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE_B__SHIFT 0xf
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE_MASK 0x10000
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE__SHIFT 0x10
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE_B_MASK 0x20000
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE_B__SHIFT 0x11
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENABLE_MASK 0x40000
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENABLE__SHIFT 0x12
+#define MVP_DEBUG_14__IDEE_CRTC1_CNTL_CAPTURE_START_A_MASK 0x80000
+#define MVP_DEBUG_14__IDEE_CRTC1_CNTL_CAPTURE_START_A__SHIFT 0x13
+#define MVP_DEBUG_14__IDEE_CRC_PHASE_MASK 0x100000
+#define MVP_DEBUG_14__IDEE_CRC_PHASE__SHIFT 0x14
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WEN_MASK 0x1
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WEN__SHIFT 0x0
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WDATA_MASK 0xfffffff0
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WDATA__SHIFT 0x4
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_READ_MASK 0x1
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_READ__SHIFT 0x0
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_STOP_LEVEL_MASK 0x2
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_STOP_LEVEL__SHIFT 0x1
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_PAUSE_LEVEL_MASK 0x4
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_PAUSE_LEVEL__SHIFT 0x2
+#define MVP_DEBUG_16__IDCC_FLOW_CONTROL_OUT_MASK 0x8
+#define MVP_DEBUG_16__IDCC_FLOW_CONTROL_OUT__SHIFT 0x3
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_NUM_ENTRIES_MASK 0xff0
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_NUM_ENTRIES__SHIFT 0x4
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_OVERFLOW_MASK 0x1000
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_OVERFLOW__SHIFT 0xc
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_UNDERFLOW_MASK 0x2000
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_UNDERFLOW__SHIFT 0xd
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_READ_ADDR_MASK 0xff0000
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_READ_ADDR__SHIFT 0x10
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_WRITE_ADDR_MASK 0xff000000
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_WRITE_ADDR__SHIFT 0x18
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ_MASK 0x1
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ__SHIFT 0x0
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_PHASE_MASK 0x2
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_PHASE__SHIFT 0x1
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ_DATA_MASK 0xfffffffc
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ_DATA__SHIFT 0x2
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_TAP_PAIR_IDX_MASK 0xf
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_PHASE_MASK 0xf00
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_PHASE__SHIFT 0x8
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_FILTER_TYPE_MASK 0x70000
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_FILTER_TYPE__SHIFT 0x10
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_MASK 0x3fff
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_EN_MASK 0x8000
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_MASK 0x3fff0000
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_EN_MASK 0x80000000
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define SCL_MODE__SCL_MODE_MASK 0x3
+#define SCL_MODE__SCL_MODE__SHIFT 0x0
+#define SCL_MODE__SCL_PSCL_EN_MASK 0x10
+#define SCL_MODE__SCL_PSCL_EN__SHIFT 0x4
+#define SCL_TAP_CONTROL__SCL_V_NUM_OF_TAPS_MASK 0x7
+#define SCL_TAP_CONTROL__SCL_V_NUM_OF_TAPS__SHIFT 0x0
+#define SCL_TAP_CONTROL__SCL_H_NUM_OF_TAPS_MASK 0xf00
+#define SCL_TAP_CONTROL__SCL_H_NUM_OF_TAPS__SHIFT 0x8
+#define SCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x1
+#define SCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define SCL_CONTROL__SCL_EARLY_EOL_MODE_MASK 0x10
+#define SCL_CONTROL__SCL_EARLY_EOL_MODE__SHIFT 0x4
+#define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE_MASK 0x3
+#define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE__SHIFT 0x0
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0xf
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0xf00
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define SCL_AUTOMATIC_MODE_CONTROL__SCL_V_CALC_AUTO_RATIO_EN_MASK 0x1
+#define SCL_AUTOMATIC_MODE_CONTROL__SCL_V_CALC_AUTO_RATIO_EN__SHIFT 0x0
+#define SCL_AUTOMATIC_MODE_CONTROL__SCL_H_CALC_AUTO_RATIO_EN_MASK 0x10000
+#define SCL_AUTOMATIC_MODE_CONTROL__SCL_H_CALC_AUTO_RATIO_EN__SHIFT 0x10
+#define SCL_HORZ_FILTER_CONTROL__SCL_H_FILTER_PICK_NEAREST_MASK 0x1
+#define SCL_HORZ_FILTER_CONTROL__SCL_H_FILTER_PICK_NEAREST__SHIFT 0x0
+#define SCL_HORZ_FILTER_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x100
+#define SCL_HORZ_FILTER_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x8
+#define SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x3ffffff
+#define SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0xffffff
+#define SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0xf000000
+#define SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define SCL_VERT_FILTER_CONTROL__SCL_V_FILTER_PICK_NEAREST_MASK 0x1
+#define SCL_VERT_FILTER_CONTROL__SCL_V_FILTER_PICK_NEAREST__SHIFT 0x0
+#define SCL_VERT_FILTER_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x100
+#define SCL_VERT_FILTER_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x8
+#define SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x3ffffff
+#define SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0xffffff
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x7000000
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0xffffff
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x7000000
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define SCL_ROUND_OFFSET__SCL_ROUND_OFFSET_RGB_Y_MASK 0xffff
+#define SCL_ROUND_OFFSET__SCL_ROUND_OFFSET_RGB_Y__SHIFT 0x0
+#define SCL_ROUND_OFFSET__SCL_ROUND_OFFSET_CBCR_MASK 0xffff0000
+#define SCL_ROUND_OFFSET__SCL_ROUND_OFFSET_CBCR__SHIFT 0x10
+#define SCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x1
+#define SCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define SCL_UPDATE__SCL_UPDATE_TAKEN_MASK 0x100
+#define SCL_UPDATE__SCL_UPDATE_TAKEN__SHIFT 0x8
+#define SCL_UPDATE__SCL_UPDATE_LOCK_MASK 0x10000
+#define SCL_UPDATE__SCL_UPDATE_LOCK__SHIFT 0x10
+#define SCL_UPDATE__SCL_COEF_UPDATE_COMPLETE_MASK 0x1000000
+#define SCL_UPDATE__SCL_COEF_UPDATE_COMPLETE__SHIFT 0x18
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_SCALE_FACTOR_MASK 0x7
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_SCALE_FACTOR__SHIFT 0x0
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_EN_MASK 0x10
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_EN__SHIFT 0x4
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_SCALE_FACTOR_MASK 0x700
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_SCALE_FACTOR__SHIFT 0x8
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_EN_MASK 0x1000
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_EN__SHIFT 0xc
+#define SCL_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x1
+#define SCL_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x0
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_FLAG_MASK 0x1
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_FLAG__SHIFT 0x0
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_ACK_MASK 0x100
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_ACK__SHIFT 0x8
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_MASK_MASK 0x1000
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_MASK__SHIFT 0xc
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_INT_STATUS_MASK 0x10000
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_INT_STATUS__SHIFT 0x10
+#define VIEWPORT_START_SECONDARY__VIEWPORT_Y_START_SECONDARY_MASK 0x3fff
+#define VIEWPORT_START_SECONDARY__VIEWPORT_Y_START_SECONDARY__SHIFT 0x0
+#define VIEWPORT_START_SECONDARY__VIEWPORT_X_START_SECONDARY_MASK 0x3fff0000
+#define VIEWPORT_START_SECONDARY__VIEWPORT_X_START_SECONDARY__SHIFT 0x10
+#define VIEWPORT_START__VIEWPORT_Y_START_MASK 0x3fff
+#define VIEWPORT_START__VIEWPORT_Y_START__SHIFT 0x0
+#define VIEWPORT_START__VIEWPORT_X_START_MASK 0x3fff0000
+#define VIEWPORT_START__VIEWPORT_X_START__SHIFT 0x10
+#define VIEWPORT_SIZE__VIEWPORT_HEIGHT_MASK 0x3fff
+#define VIEWPORT_SIZE__VIEWPORT_HEIGHT__SHIFT 0x0
+#define VIEWPORT_SIZE__VIEWPORT_WIDTH_MASK 0x3fff0000
+#define VIEWPORT_SIZE__VIEWPORT_WIDTH__SHIFT 0x10
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x1fff
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1fff0000
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x1fff
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1fff0000
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE_MASK 0x1
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE__SHIFT 0x0
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE_ACK_MASK 0x10
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE_ACK__SHIFT 0x4
+#define SCL_MODE_CHANGE_DET1__SCL_ALU_H_SCALE_RATIO_MASK 0xfffff80
+#define SCL_MODE_CHANGE_DET1__SCL_ALU_H_SCALE_RATIO__SHIFT 0x7
+#define SCL_MODE_CHANGE_DET2__SCL_ALU_V_SCALE_RATIO_MASK 0x1fffff
+#define SCL_MODE_CHANGE_DET2__SCL_ALU_V_SCALE_RATIO__SHIFT 0x0
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_HEIGHT_MASK 0x3fff
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_HEIGHT__SHIFT 0x0
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_WIDTH_MASK 0x3fff0000
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_WIDTH__SHIFT 0x10
+#define SCL_MODE_CHANGE_MASK__SCL_MODE_CHANGE_MASK_MASK 0x1
+#define SCL_MODE_CHANGE_MASK__SCL_MODE_CHANGE_MASK__SHIFT 0x0
+#define SCL_DEBUG2__SCL_DEBUG_REQ_MODE_MASK 0x1
+#define SCL_DEBUG2__SCL_DEBUG_REQ_MODE__SHIFT 0x0
+#define SCL_DEBUG2__SCL_DEBUG_EOF_MODE_MASK 0x6
+#define SCL_DEBUG2__SCL_DEBUG_EOF_MODE__SHIFT 0x1
+#define SCL_DEBUG2__SCL_DEBUG2_MASK 0xfffffff8
+#define SCL_DEBUG2__SCL_DEBUG2__SHIFT 0x3
+#define SCL_DEBUG__SCL_DEBUG_MASK 0xffffffff
+#define SCL_DEBUG__SCL_DEBUG__SHIFT 0x0
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_INDEX_MASK 0xff
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_INDEX__SHIFT 0x0
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define SCL_TEST_DEBUG_DATA__SCL_TEST_DEBUG_DATA_MASK 0xffffffff
+#define SCL_TEST_DEBUG_DATA__SCL_TEST_DEBUG_DATA__SHIFT 0x0
+#define SCLV_COEF_RAM_SELECT__SCL_C_RAM_TAP_PAIR_IDX_MASK 0x3
+#define SCLV_COEF_RAM_SELECT__SCL_C_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define SCLV_COEF_RAM_SELECT__SCL_C_RAM_PHASE_MASK 0x7f00
+#define SCLV_COEF_RAM_SELECT__SCL_C_RAM_PHASE__SHIFT 0x8
+#define SCLV_COEF_RAM_SELECT__SCL_C_RAM_FILTER_TYPE_MASK 0x30000
+#define SCLV_COEF_RAM_SELECT__SCL_C_RAM_FILTER_TYPE__SHIFT 0x10
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_MASK 0x3fff
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_EN_MASK 0x8000
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_MASK 0x3fff0000
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_EN_MASK 0x80000000
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define SCLV_MODE__SCL_MODE_MASK 0x3
+#define SCLV_MODE__SCL_MODE__SHIFT 0x0
+#define SCLV_MODE__SCL_MODE_C_MASK 0xc
+#define SCLV_MODE__SCL_MODE_C__SHIFT 0x2
+#define SCLV_MODE__SCL_PSCL_EN_MASK 0x10
+#define SCLV_MODE__SCL_PSCL_EN__SHIFT 0x4
+#define SCLV_MODE__SCL_PSCL_EN_C_MASK 0x20
+#define SCLV_MODE__SCL_PSCL_EN_C__SHIFT 0x5
+#define SCLV_MODE__SCL_INTERLACE_SOURCE_MASK 0x300
+#define SCLV_MODE__SCL_INTERLACE_SOURCE__SHIFT 0x8
+#define SCLV_TAP_CONTROL__SCL_V_NUM_OF_TAPS_MASK 0x7
+#define SCLV_TAP_CONTROL__SCL_V_NUM_OF_TAPS__SHIFT 0x0
+#define SCLV_TAP_CONTROL__SCL_H_NUM_OF_TAPS_MASK 0x70
+#define SCLV_TAP_CONTROL__SCL_H_NUM_OF_TAPS__SHIFT 0x4
+#define SCLV_TAP_CONTROL__SCL_V_NUM_OF_TAPS_C_MASK 0x700
+#define SCLV_TAP_CONTROL__SCL_V_NUM_OF_TAPS_C__SHIFT 0x8
+#define SCLV_TAP_CONTROL__SCL_H_NUM_OF_TAPS_C_MASK 0x7000
+#define SCLV_TAP_CONTROL__SCL_H_NUM_OF_TAPS_C__SHIFT 0xc
+#define SCLV_CONTROL__SCL_BOUNDARY_MODE_MASK 0x1
+#define SCLV_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define SCLV_CONTROL__SCL_EARLY_EOL_MODE_MASK 0x10
+#define SCLV_CONTROL__SCL_EARLY_EOL_MODE__SHIFT 0x4
+#define SCLV_CONTROL__SCL_TOTAL_PHASE_MASK 0x100
+#define SCLV_CONTROL__SCL_TOTAL_PHASE__SHIFT 0x8
+#define SCLV_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0xf
+#define SCLV_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define SCLV_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0xf00
+#define SCLV_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define SCLV_AUTOMATIC_MODE_CONTROL__SCL_V_CALC_AUTO_RATIO_EN_MASK 0x1
+#define SCLV_AUTOMATIC_MODE_CONTROL__SCL_V_CALC_AUTO_RATIO_EN__SHIFT 0x0
+#define SCLV_AUTOMATIC_MODE_CONTROL__SCL_H_CALC_AUTO_RATIO_EN_MASK 0x10000
+#define SCLV_AUTOMATIC_MODE_CONTROL__SCL_H_CALC_AUTO_RATIO_EN__SHIFT 0x10
+#define SCLV_HORZ_FILTER_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x100
+#define SCLV_HORZ_FILTER_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x8
+#define SCLV_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x3ffffff
+#define SCLV_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define SCLV_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0xffffff
+#define SCLV_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define SCLV_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0xf000000
+#define SCLV_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define SCLV_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x3ffffff
+#define SCLV_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define SCLV_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0xffffff
+#define SCLV_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define SCLV_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0xf000000
+#define SCLV_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define SCLV_VERT_FILTER_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x100
+#define SCLV_VERT_FILTER_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x8
+#define SCLV_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x3ffffff
+#define SCLV_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define SCLV_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0xffffff
+#define SCLV_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define SCLV_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x7000000
+#define SCLV_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define SCLV_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0xffffff
+#define SCLV_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define SCLV_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x7000000
+#define SCLV_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define SCLV_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x3ffffff
+#define SCLV_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define SCLV_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0xffffff
+#define SCLV_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define SCLV_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x7000000
+#define SCLV_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define SCLV_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0xffffff
+#define SCLV_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define SCLV_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x7000000
+#define SCLV_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define SCLV_ROUND_OFFSET__SCL_ROUND_OFFSET_RGB_Y_MASK 0xffff
+#define SCLV_ROUND_OFFSET__SCL_ROUND_OFFSET_RGB_Y__SHIFT 0x0
+#define SCLV_ROUND_OFFSET__SCL_ROUND_OFFSET_CBCR_MASK 0xffff0000
+#define SCLV_ROUND_OFFSET__SCL_ROUND_OFFSET_CBCR__SHIFT 0x10
+#define SCLV_UPDATE__SCL_UPDATE_PENDING_MASK 0x1
+#define SCLV_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define SCLV_UPDATE__SCL_UPDATE_TAKEN_MASK 0x100
+#define SCLV_UPDATE__SCL_UPDATE_TAKEN__SHIFT 0x8
+#define SCLV_UPDATE__SCL_UPDATE_LOCK_MASK 0x10000
+#define SCLV_UPDATE__SCL_UPDATE_LOCK__SHIFT 0x10
+#define SCLV_UPDATE__SCL_COEF_UPDATE_COMPLETE_MASK 0x1000000
+#define SCLV_UPDATE__SCL_COEF_UPDATE_COMPLETE__SHIFT 0x18
+#define SCLV_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x1
+#define SCLV_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x0
+#define SCLV_VIEWPORT_START__VIEWPORT_Y_START_MASK 0x3fff
+#define SCLV_VIEWPORT_START__VIEWPORT_Y_START__SHIFT 0x0
+#define SCLV_VIEWPORT_START__VIEWPORT_X_START_MASK 0x3fff0000
+#define SCLV_VIEWPORT_START__VIEWPORT_X_START__SHIFT 0x10
+#define SCLV_VIEWPORT_START_SECONDARY__VIEWPORT_Y_START_SECONDARY_MASK 0x3fff
+#define SCLV_VIEWPORT_START_SECONDARY__VIEWPORT_Y_START_SECONDARY__SHIFT 0x0
+#define SCLV_VIEWPORT_START_SECONDARY__VIEWPORT_X_START_SECONDARY_MASK 0x3fff0000
+#define SCLV_VIEWPORT_START_SECONDARY__VIEWPORT_X_START_SECONDARY__SHIFT 0x10
+#define SCLV_VIEWPORT_SIZE__VIEWPORT_HEIGHT_MASK 0x1fff
+#define SCLV_VIEWPORT_SIZE__VIEWPORT_HEIGHT__SHIFT 0x0
+#define SCLV_VIEWPORT_SIZE__VIEWPORT_WIDTH_MASK 0x1fff0000
+#define SCLV_VIEWPORT_SIZE__VIEWPORT_WIDTH__SHIFT 0x10
+#define SCLV_VIEWPORT_START_C__VIEWPORT_Y_START_C_MASK 0x3fff
+#define SCLV_VIEWPORT_START_C__VIEWPORT_Y_START_C__SHIFT 0x0
+#define SCLV_VIEWPORT_START_C__VIEWPORT_X_START_C_MASK 0x3fff0000
+#define SCLV_VIEWPORT_START_C__VIEWPORT_X_START_C__SHIFT 0x10
+#define SCLV_VIEWPORT_START_SECONDARY_C__VIEWPORT_Y_START_SECONDARY_C_MASK 0x3fff
+#define SCLV_VIEWPORT_START_SECONDARY_C__VIEWPORT_Y_START_SECONDARY_C__SHIFT 0x0
+#define SCLV_VIEWPORT_START_SECONDARY_C__VIEWPORT_X_START_SECONDARY_C_MASK 0x3fff0000
+#define SCLV_VIEWPORT_START_SECONDARY_C__VIEWPORT_X_START_SECONDARY_C__SHIFT 0x10
+#define SCLV_VIEWPORT_SIZE_C__VIEWPORT_HEIGHT_C_MASK 0x1fff
+#define SCLV_VIEWPORT_SIZE_C__VIEWPORT_HEIGHT_C__SHIFT 0x0
+#define SCLV_VIEWPORT_SIZE_C__VIEWPORT_WIDTH_C_MASK 0x1fff0000
+#define SCLV_VIEWPORT_SIZE_C__VIEWPORT_WIDTH_C__SHIFT 0x10
+#define SCLV_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x1fff
+#define SCLV_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define SCLV_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1fff0000
+#define SCLV_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define SCLV_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x1fff
+#define SCLV_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define SCLV_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1fff0000
+#define SCLV_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define SCLV_MODE_CHANGE_DET1__SCL_MODE_CHANGE_MASK 0x1
+#define SCLV_MODE_CHANGE_DET1__SCL_MODE_CHANGE__SHIFT 0x0
+#define SCLV_MODE_CHANGE_DET1__SCL_MODE_CHANGE_ACK_MASK 0x10
+#define SCLV_MODE_CHANGE_DET1__SCL_MODE_CHANGE_ACK__SHIFT 0x4
+#define SCLV_MODE_CHANGE_DET1__SCL_ALU_H_SCALE_RATIO_MASK 0xfffff80
+#define SCLV_MODE_CHANGE_DET1__SCL_ALU_H_SCALE_RATIO__SHIFT 0x7
+#define SCLV_MODE_CHANGE_DET2__SCL_ALU_V_SCALE_RATIO_MASK 0x1fffff
+#define SCLV_MODE_CHANGE_DET2__SCL_ALU_V_SCALE_RATIO__SHIFT 0x0
+#define SCLV_MODE_CHANGE_DET3__SCL_ALU_SOURCE_HEIGHT_MASK 0x3fff
+#define SCLV_MODE_CHANGE_DET3__SCL_ALU_SOURCE_HEIGHT__SHIFT 0x0
+#define SCLV_MODE_CHANGE_DET3__SCL_ALU_SOURCE_WIDTH_MASK 0x3fff0000
+#define SCLV_MODE_CHANGE_DET3__SCL_ALU_SOURCE_WIDTH__SHIFT 0x10
+#define SCLV_MODE_CHANGE_MASK__SCL_MODE_CHANGE_MASK_MASK 0x1
+#define SCLV_MODE_CHANGE_MASK__SCL_MODE_CHANGE_MASK__SHIFT 0x0
+#define SCLV_HORZ_FILTER_INIT_BOT__SCL_H_INIT_FRAC_BOT_MASK 0xffffff
+#define SCLV_HORZ_FILTER_INIT_BOT__SCL_H_INIT_FRAC_BOT__SHIFT 0x0
+#define SCLV_HORZ_FILTER_INIT_BOT__SCL_H_INIT_INT_BOT_MASK 0xf000000
+#define SCLV_HORZ_FILTER_INIT_BOT__SCL_H_INIT_INT_BOT__SHIFT 0x18
+#define SCLV_HORZ_FILTER_INIT_BOT_C__SCL_H_INIT_FRAC_BOT_C_MASK 0xffffff
+#define SCLV_HORZ_FILTER_INIT_BOT_C__SCL_H_INIT_FRAC_BOT_C__SHIFT 0x0
+#define SCLV_HORZ_FILTER_INIT_BOT_C__SCL_H_INIT_INT_BOT_C_MASK 0xf000000
+#define SCLV_HORZ_FILTER_INIT_BOT_C__SCL_H_INIT_INT_BOT_C__SHIFT 0x18
+#define SCLV_DEBUG2__SCL_DEBUG_REQ_MODE_MASK 0x1
+#define SCLV_DEBUG2__SCL_DEBUG_REQ_MODE__SHIFT 0x0
+#define SCLV_DEBUG2__SCL_DEBUG_EOF_MODE_MASK 0x6
+#define SCLV_DEBUG2__SCL_DEBUG_EOF_MODE__SHIFT 0x1
+#define SCLV_DEBUG2__SCL_DEBUG2_MASK 0xfffffff8
+#define SCLV_DEBUG2__SCL_DEBUG2__SHIFT 0x3
+#define SCLV_DEBUG__SCL_DEBUG_MASK 0xffffffff
+#define SCLV_DEBUG__SCL_DEBUG__SHIFT 0x0
+#define SCLV_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_INDEX_MASK 0xff
+#define SCLV_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_INDEX__SHIFT 0x0
+#define SCLV_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define SCLV_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define SCLV_TEST_DEBUG_DATA__SCL_TEST_DEBUG_DATA_MASK 0xffffffff
+#define SCLV_TEST_DEBUG_DATA__SCL_TEST_DEBUG_DATA__SHIFT 0x0
+#define COL_MAN_UPDATE__COL_MAN_UPDATE_PENDING_MASK 0x1
+#define COL_MAN_UPDATE__COL_MAN_UPDATE_PENDING__SHIFT 0x0
+#define COL_MAN_UPDATE__COL_MAN_UPDATE_TAKEN_MASK 0x2
+#define COL_MAN_UPDATE__COL_MAN_UPDATE_TAKEN__SHIFT 0x1
+#define COL_MAN_UPDATE__COL_MAN_UPDATE_LOCK_MASK 0x10000
+#define COL_MAN_UPDATE__COL_MAN_UPDATE_LOCK__SHIFT 0x10
+#define COL_MAN_UPDATE__COL_MAN_DISABLE_MULTIPLE_UPDATE_MASK 0x1000000
+#define COL_MAN_UPDATE__COL_MAN_DISABLE_MULTIPLE_UPDATE__SHIFT 0x18
+#define COL_MAN_INPUT_CSC_CONTROL__INPUT_CSC_MODE_MASK 0x3
+#define COL_MAN_INPUT_CSC_CONTROL__INPUT_CSC_MODE__SHIFT 0x0
+#define COL_MAN_INPUT_CSC_CONTROL__INPUT_CSC_INPUT_TYPE_MASK 0x300
+#define COL_MAN_INPUT_CSC_CONTROL__INPUT_CSC_INPUT_TYPE__SHIFT 0x8
+#define COL_MAN_INPUT_CSC_CONTROL__INPUT_CSC_CONVERSION_MODE_MASK 0x10000
+#define COL_MAN_INPUT_CSC_CONTROL__INPUT_CSC_CONVERSION_MODE__SHIFT 0x10
+#define INPUT_CSC_C11_C12_A__INPUT_CSC_C11_A_MASK 0xffff
+#define INPUT_CSC_C11_C12_A__INPUT_CSC_C11_A__SHIFT 0x0
+#define INPUT_CSC_C11_C12_A__INPUT_CSC_C12_A_MASK 0xffff0000
+#define INPUT_CSC_C11_C12_A__INPUT_CSC_C12_A__SHIFT 0x10
+#define INPUT_CSC_C13_C14_A__INPUT_CSC_C13_A_MASK 0xffff
+#define INPUT_CSC_C13_C14_A__INPUT_CSC_C13_A__SHIFT 0x0
+#define INPUT_CSC_C13_C14_A__INPUT_CSC_C14_A_MASK 0xffff0000
+#define INPUT_CSC_C13_C14_A__INPUT_CSC_C14_A__SHIFT 0x10
+#define INPUT_CSC_C21_C22_A__INPUT_CSC_C21_A_MASK 0xffff
+#define INPUT_CSC_C21_C22_A__INPUT_CSC_C21_A__SHIFT 0x0
+#define INPUT_CSC_C21_C22_A__INPUT_CSC_C22_A_MASK 0xffff0000
+#define INPUT_CSC_C21_C22_A__INPUT_CSC_C22_A__SHIFT 0x10
+#define INPUT_CSC_C23_C24_A__INPUT_CSC_C23_A_MASK 0xffff
+#define INPUT_CSC_C23_C24_A__INPUT_CSC_C23_A__SHIFT 0x0
+#define INPUT_CSC_C23_C24_A__INPUT_CSC_C24_A_MASK 0xffff0000
+#define INPUT_CSC_C23_C24_A__INPUT_CSC_C24_A__SHIFT 0x10
+#define INPUT_CSC_C31_C32_A__INPUT_CSC_C31_A_MASK 0xffff
+#define INPUT_CSC_C31_C32_A__INPUT_CSC_C31_A__SHIFT 0x0
+#define INPUT_CSC_C31_C32_A__INPUT_CSC_C32_A_MASK 0xffff0000
+#define INPUT_CSC_C31_C32_A__INPUT_CSC_C32_A__SHIFT 0x10
+#define INPUT_CSC_C33_C34_A__INPUT_CSC_C33_A_MASK 0xffff
+#define INPUT_CSC_C33_C34_A__INPUT_CSC_C33_A__SHIFT 0x0
+#define INPUT_CSC_C33_C34_A__INPUT_CSC_C34_A_MASK 0xffff0000
+#define INPUT_CSC_C33_C34_A__INPUT_CSC_C34_A__SHIFT 0x10
+#define INPUT_CSC_C11_C12_B__INPUT_CSC_C11_B_MASK 0xffff
+#define INPUT_CSC_C11_C12_B__INPUT_CSC_C11_B__SHIFT 0x0
+#define INPUT_CSC_C11_C12_B__INPUT_CSC_C12_B_MASK 0xffff0000
+#define INPUT_CSC_C11_C12_B__INPUT_CSC_C12_B__SHIFT 0x10
+#define INPUT_CSC_C13_C14_B__INPUT_CSC_C13_B_MASK 0xffff
+#define INPUT_CSC_C13_C14_B__INPUT_CSC_C13_B__SHIFT 0x0
+#define INPUT_CSC_C13_C14_B__INPUT_CSC_C14_B_MASK 0xffff0000
+#define INPUT_CSC_C13_C14_B__INPUT_CSC_C14_B__SHIFT 0x10
+#define INPUT_CSC_C21_C22_B__INPUT_CSC_C21_B_MASK 0xffff
+#define INPUT_CSC_C21_C22_B__INPUT_CSC_C21_B__SHIFT 0x0
+#define INPUT_CSC_C21_C22_B__INPUT_CSC_C22_B_MASK 0xffff0000
+#define INPUT_CSC_C21_C22_B__INPUT_CSC_C22_B__SHIFT 0x10
+#define INPUT_CSC_C23_C24_B__INPUT_CSC_C23_B_MASK 0xffff
+#define INPUT_CSC_C23_C24_B__INPUT_CSC_C23_B__SHIFT 0x0
+#define INPUT_CSC_C23_C24_B__INPUT_CSC_C24_B_MASK 0xffff0000
+#define INPUT_CSC_C23_C24_B__INPUT_CSC_C24_B__SHIFT 0x10
+#define INPUT_CSC_C31_C32_B__INPUT_CSC_C31_B_MASK 0xffff
+#define INPUT_CSC_C31_C32_B__INPUT_CSC_C31_B__SHIFT 0x0
+#define INPUT_CSC_C31_C32_B__INPUT_CSC_C32_B_MASK 0xffff0000
+#define INPUT_CSC_C31_C32_B__INPUT_CSC_C32_B__SHIFT 0x10
+#define INPUT_CSC_C33_C34_B__INPUT_CSC_C33_B_MASK 0xffff
+#define INPUT_CSC_C33_C34_B__INPUT_CSC_C33_B__SHIFT 0x0
+#define INPUT_CSC_C33_C34_B__INPUT_CSC_C34_B_MASK 0xffff0000
+#define INPUT_CSC_C33_C34_B__INPUT_CSC_C34_B__SHIFT 0x10
+#define PRESCALE_CONTROL__PRESCALE_MODE_MASK 0x3
+#define PRESCALE_CONTROL__PRESCALE_MODE__SHIFT 0x0
+#define PRESCALE_VALUES_R__PRESCALE_BIAS_R_MASK 0xffff
+#define PRESCALE_VALUES_R__PRESCALE_BIAS_R__SHIFT 0x0
+#define PRESCALE_VALUES_R__PRESCALE_SCALE_R_MASK 0xffff0000
+#define PRESCALE_VALUES_R__PRESCALE_SCALE_R__SHIFT 0x10
+#define PRESCALE_VALUES_G__PRESCALE_BIAS_G_MASK 0xffff
+#define PRESCALE_VALUES_G__PRESCALE_BIAS_G__SHIFT 0x0
+#define PRESCALE_VALUES_G__PRESCALE_SCALE_G_MASK 0xffff0000
+#define PRESCALE_VALUES_G__PRESCALE_SCALE_G__SHIFT 0x10
+#define PRESCALE_VALUES_B__PRESCALE_BIAS_B_MASK 0xffff
+#define PRESCALE_VALUES_B__PRESCALE_BIAS_B__SHIFT 0x0
+#define PRESCALE_VALUES_B__PRESCALE_SCALE_B_MASK 0xffff0000
+#define PRESCALE_VALUES_B__PRESCALE_SCALE_B__SHIFT 0x10
+#define COL_MAN_OUTPUT_CSC_CONTROL__OUTPUT_CSC_MODE_MASK 0x7
+#define COL_MAN_OUTPUT_CSC_CONTROL__OUTPUT_CSC_MODE__SHIFT 0x0
+#define OUTPUT_CSC_C11_C12_A__OUTPUT_CSC_C11_A_MASK 0xffff
+#define OUTPUT_CSC_C11_C12_A__OUTPUT_CSC_C11_A__SHIFT 0x0
+#define OUTPUT_CSC_C11_C12_A__OUTPUT_CSC_C12_A_MASK 0xffff0000
+#define OUTPUT_CSC_C11_C12_A__OUTPUT_CSC_C12_A__SHIFT 0x10
+#define OUTPUT_CSC_C13_C14_A__OUTPUT_CSC_C13_A_MASK 0xffff
+#define OUTPUT_CSC_C13_C14_A__OUTPUT_CSC_C13_A__SHIFT 0x0
+#define OUTPUT_CSC_C13_C14_A__OUTPUT_CSC_C14_A_MASK 0xffff0000
+#define OUTPUT_CSC_C13_C14_A__OUTPUT_CSC_C14_A__SHIFT 0x10
+#define OUTPUT_CSC_C21_C22_A__OUTPUT_CSC_C21_A_MASK 0xffff
+#define OUTPUT_CSC_C21_C22_A__OUTPUT_CSC_C21_A__SHIFT 0x0
+#define OUTPUT_CSC_C21_C22_A__OUTPUT_CSC_C22_A_MASK 0xffff0000
+#define OUTPUT_CSC_C21_C22_A__OUTPUT_CSC_C22_A__SHIFT 0x10
+#define OUTPUT_CSC_C23_C24_A__OUTPUT_CSC_C23_A_MASK 0xffff
+#define OUTPUT_CSC_C23_C24_A__OUTPUT_CSC_C23_A__SHIFT 0x0
+#define OUTPUT_CSC_C23_C24_A__OUTPUT_CSC_C24_A_MASK 0xffff0000
+#define OUTPUT_CSC_C23_C24_A__OUTPUT_CSC_C24_A__SHIFT 0x10
+#define OUTPUT_CSC_C31_C32_A__OUTPUT_CSC_C31_A_MASK 0xffff
+#define OUTPUT_CSC_C31_C32_A__OUTPUT_CSC_C31_A__SHIFT 0x0
+#define OUTPUT_CSC_C31_C32_A__OUTPUT_CSC_C32_A_MASK 0xffff0000
+#define OUTPUT_CSC_C31_C32_A__OUTPUT_CSC_C32_A__SHIFT 0x10
+#define OUTPUT_CSC_C33_C34_A__OUTPUT_CSC_C33_A_MASK 0xffff
+#define OUTPUT_CSC_C33_C34_A__OUTPUT_CSC_C33_A__SHIFT 0x0
+#define OUTPUT_CSC_C33_C34_A__OUTPUT_CSC_C34_A_MASK 0xffff0000
+#define OUTPUT_CSC_C33_C34_A__OUTPUT_CSC_C34_A__SHIFT 0x10
+#define OUTPUT_CSC_C11_C12_B__OUTPUT_CSC_C11_B_MASK 0xffff
+#define OUTPUT_CSC_C11_C12_B__OUTPUT_CSC_C11_B__SHIFT 0x0
+#define OUTPUT_CSC_C11_C12_B__OUTPUT_CSC_C12_B_MASK 0xffff0000
+#define OUTPUT_CSC_C11_C12_B__OUTPUT_CSC_C12_B__SHIFT 0x10
+#define OUTPUT_CSC_C13_C14_B__OUTPUT_CSC_C13_B_MASK 0xffff
+#define OUTPUT_CSC_C13_C14_B__OUTPUT_CSC_C13_B__SHIFT 0x0
+#define OUTPUT_CSC_C13_C14_B__OUTPUT_CSC_C14_B_MASK 0xffff0000
+#define OUTPUT_CSC_C13_C14_B__OUTPUT_CSC_C14_B__SHIFT 0x10
+#define OUTPUT_CSC_C21_C22_B__OUTPUT_CSC_C21_B_MASK 0xffff
+#define OUTPUT_CSC_C21_C22_B__OUTPUT_CSC_C21_B__SHIFT 0x0
+#define OUTPUT_CSC_C21_C22_B__OUTPUT_CSC_C22_B_MASK 0xffff0000
+#define OUTPUT_CSC_C21_C22_B__OUTPUT_CSC_C22_B__SHIFT 0x10
+#define OUTPUT_CSC_C23_C24_B__OUTPUT_CSC_C23_B_MASK 0xffff
+#define OUTPUT_CSC_C23_C24_B__OUTPUT_CSC_C23_B__SHIFT 0x0
+#define OUTPUT_CSC_C23_C24_B__OUTPUT_CSC_C24_B_MASK 0xffff0000
+#define OUTPUT_CSC_C23_C24_B__OUTPUT_CSC_C24_B__SHIFT 0x10
+#define OUTPUT_CSC_C31_C32_B__OUTPUT_CSC_C31_B_MASK 0xffff
+#define OUTPUT_CSC_C31_C32_B__OUTPUT_CSC_C31_B__SHIFT 0x0
+#define OUTPUT_CSC_C31_C32_B__OUTPUT_CSC_C32_B_MASK 0xffff0000
+#define OUTPUT_CSC_C31_C32_B__OUTPUT_CSC_C32_B__SHIFT 0x10
+#define OUTPUT_CSC_C33_C34_B__OUTPUT_CSC_C33_B_MASK 0xffff
+#define OUTPUT_CSC_C33_C34_B__OUTPUT_CSC_C33_B__SHIFT 0x0
+#define OUTPUT_CSC_C33_C34_B__OUTPUT_CSC_C34_B_MASK 0xffff0000
+#define OUTPUT_CSC_C33_C34_B__OUTPUT_CSC_C34_B__SHIFT 0x10
+#define DENORM_CLAMP_CONTROL__DENORM_MODE_MASK 0x3
+#define DENORM_CLAMP_CONTROL__DENORM_MODE__SHIFT 0x0
+#define DENORM_CLAMP_CONTROL__DENORM_10BIT_OUT_MASK 0x100
+#define DENORM_CLAMP_CONTROL__DENORM_10BIT_OUT__SHIFT 0x8
+#define DENORM_CLAMP_RANGE_R_CR__RANGE_CLAMP_MAX_R_CR_MASK 0xfff
+#define DENORM_CLAMP_RANGE_R_CR__RANGE_CLAMP_MAX_R_CR__SHIFT 0x0
+#define DENORM_CLAMP_RANGE_R_CR__RANGE_CLAMP_MIN_R_CR_MASK 0xfff000
+#define DENORM_CLAMP_RANGE_R_CR__RANGE_CLAMP_MIN_R_CR__SHIFT 0xc
+#define DENORM_CLAMP_RANGE_G_Y__RANGE_CLAMP_MAX_G_Y_MASK 0xfff
+#define DENORM_CLAMP_RANGE_G_Y__RANGE_CLAMP_MAX_G_Y__SHIFT 0x0
+#define DENORM_CLAMP_RANGE_G_Y__RANGE_CLAMP_MIN_G_Y_MASK 0xfff000
+#define DENORM_CLAMP_RANGE_G_Y__RANGE_CLAMP_MIN_G_Y__SHIFT 0xc
+#define DENORM_CLAMP_RANGE_B_CB__RANGE_CLAMP_MAX_B_CB_MASK 0xfff
+#define DENORM_CLAMP_RANGE_B_CB__RANGE_CLAMP_MAX_B_CB__SHIFT 0x0
+#define DENORM_CLAMP_RANGE_B_CB__RANGE_CLAMP_MIN_B_CB_MASK 0xfff000
+#define DENORM_CLAMP_RANGE_B_CB__RANGE_CLAMP_MIN_B_CB__SHIFT 0xc
+#define COL_MAN_FP_CONVERTED_FIELD__COL_MAN_FP_CONVERTED_FIELD_DATA_MASK 0x3ffff
+#define COL_MAN_FP_CONVERTED_FIELD__COL_MAN_FP_CONVERTED_FIELD_DATA__SHIFT 0x0
+#define COL_MAN_FP_CONVERTED_FIELD__COL_MAN_FP_CONVERTED_FIELD_INDEX_MASK 0x3f00000
+#define COL_MAN_FP_CONVERTED_FIELD__COL_MAN_FP_CONVERTED_FIELD_INDEX__SHIFT 0x14
+#define GAMMA_CORR_CONTROL__GAMMA_CORR_MODE_MASK 0x3
+#define GAMMA_CORR_CONTROL__GAMMA_CORR_MODE__SHIFT 0x0
+#define GAMMA_CORR_LUT_INDEX__GAMMA_CORR_LUT_INDEX_MASK 0xff
+#define GAMMA_CORR_LUT_INDEX__GAMMA_CORR_LUT_INDEX__SHIFT 0x0
+#define GAMMA_CORR_LUT_DATA__GAMMA_CORR_LUT_DATA_MASK 0x7ffff
+#define GAMMA_CORR_LUT_DATA__GAMMA_CORR_LUT_DATA__SHIFT 0x0
+#define GAMMA_CORR_LUT_WRITE_EN_MASK__GAMMA_CORR_LUT_WRITE_EN_MASK_MASK 0x7
+#define GAMMA_CORR_LUT_WRITE_EN_MASK__GAMMA_CORR_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_START_CNTL__GAMMA_CORR_CNTLA_EXP_REGION_START_MASK 0x3ffff
+#define GAMMA_CORR_CNTLA_START_CNTL__GAMMA_CORR_CNTLA_EXP_REGION_START__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_START_CNTL__GAMMA_CORR_CNTLA_EXP_REGION_START_SEGMENT_MASK 0x7f00000
+#define GAMMA_CORR_CNTLA_START_CNTL__GAMMA_CORR_CNTLA_EXP_REGION_START_SEGMENT__SHIFT 0x14
+#define GAMMA_CORR_CNTLA_SLOPE_CNTL__GAMMA_CORR_CNTLA_EXP_REGION_LINEAR_SLOPE_MASK 0x3ffff
+#define GAMMA_CORR_CNTLA_SLOPE_CNTL__GAMMA_CORR_CNTLA_EXP_REGION_LINEAR_SLOPE__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_END_CNTL1__GAMMA_CORR_CNTLA_EXP_REGION_END_MASK 0xffff
+#define GAMMA_CORR_CNTLA_END_CNTL1__GAMMA_CORR_CNTLA_EXP_REGION_END__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_END_CNTL2__GAMMA_CORR_CNTLA_EXP_REGION_END_SLOPE_MASK 0xffff
+#define GAMMA_CORR_CNTLA_END_CNTL2__GAMMA_CORR_CNTLA_EXP_REGION_END_SLOPE__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_END_CNTL2__GAMMA_CORR_CNTLA_EXP_REGION_END_BASE_MASK 0xffff0000
+#define GAMMA_CORR_CNTLA_END_CNTL2__GAMMA_CORR_CNTLA_EXP_REGION_END_BASE__SHIFT 0x10
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION0_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION0_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION1_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION1_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION1_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION2_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION2_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION3_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION3_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION3_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION4_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION4_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION5_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION5_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION5_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION6_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION6_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION7_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION7_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION7_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION8_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION8_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION9_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION9_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION9_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION10_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION10_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION11_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION11_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION11_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION12_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION12_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION13_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION13_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION13_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION14_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION14_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION15_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION15_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION15_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_START_CNTL__GAMMA_CORR_CNTLB_EXP_REGION_START_MASK 0x3ffff
+#define GAMMA_CORR_CNTLB_START_CNTL__GAMMA_CORR_CNTLB_EXP_REGION_START__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_START_CNTL__GAMMA_CORR_CNTLB_EXP_REGION_START_SEGMENT_MASK 0x7f00000
+#define GAMMA_CORR_CNTLB_START_CNTL__GAMMA_CORR_CNTLB_EXP_REGION_START_SEGMENT__SHIFT 0x14
+#define GAMMA_CORR_CNTLB_SLOPE_CNTL__GAMMA_CORR_CNTLB_EXP_REGION_LINEAR_SLOPE_MASK 0x3ffff
+#define GAMMA_CORR_CNTLB_SLOPE_CNTL__GAMMA_CORR_CNTLB_EXP_REGION_LINEAR_SLOPE__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_END_CNTL1__GAMMA_CORR_CNTLB_EXP_REGION_END_MASK 0xffff
+#define GAMMA_CORR_CNTLB_END_CNTL1__GAMMA_CORR_CNTLB_EXP_REGION_END__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_END_CNTL2__GAMMA_CORR_CNTLB_EXP_REGION_END_SLOPE_MASK 0xffff
+#define GAMMA_CORR_CNTLB_END_CNTL2__GAMMA_CORR_CNTLB_EXP_REGION_END_SLOPE__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_END_CNTL2__GAMMA_CORR_CNTLB_EXP_REGION_END_BASE_MASK 0xffff0000
+#define GAMMA_CORR_CNTLB_END_CNTL2__GAMMA_CORR_CNTLB_EXP_REGION_END_BASE__SHIFT 0x10
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION0_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION0_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION1_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION1_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION1_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION2_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION2_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION3_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION3_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION3_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION4_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION4_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION5_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION5_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION5_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION6_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION6_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION7_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION7_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION7_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION8_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION8_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION9_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION9_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION9_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION10_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION10_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION11_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION11_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION11_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION12_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION12_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION13_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION13_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION13_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION14_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION14_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION15_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION15_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION15_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1b
+#define PACK_FIFO_ERROR__PACK_FIFO_L_UNDERFLOW_OCCURED_MASK 0x1
+#define PACK_FIFO_ERROR__PACK_FIFO_L_UNDERFLOW_OCCURED__SHIFT 0x0
+#define PACK_FIFO_ERROR__PACK_FIFO_L_UNDERFLOW_ACK_MASK 0x2
+#define PACK_FIFO_ERROR__PACK_FIFO_L_UNDERFLOW_ACK__SHIFT 0x1
+#define PACK_FIFO_ERROR__PACK_FIFO_C_UNDERFLOW_OCCURED_MASK 0x100
+#define PACK_FIFO_ERROR__PACK_FIFO_C_UNDERFLOW_OCCURED__SHIFT 0x8
+#define PACK_FIFO_ERROR__PACK_FIFO_C_UNDERFLOW_ACK_MASK 0x200
+#define PACK_FIFO_ERROR__PACK_FIFO_C_UNDERFLOW_ACK__SHIFT 0x9
+#define PACK_FIFO_ERROR__PACK_FIFO_L_OVERFLOW_OCCURED_MASK 0x10000
+#define PACK_FIFO_ERROR__PACK_FIFO_L_OVERFLOW_OCCURED__SHIFT 0x10
+#define PACK_FIFO_ERROR__PACK_FIFO_L_OVERFLOW_ACK_MASK 0x20000
+#define PACK_FIFO_ERROR__PACK_FIFO_L_OVERFLOW_ACK__SHIFT 0x11
+#define PACK_FIFO_ERROR__PACK_FIFO_C_OVERFLOW_OCCURED_MASK 0x1000000
+#define PACK_FIFO_ERROR__PACK_FIFO_C_OVERFLOW_OCCURED__SHIFT 0x18
+#define PACK_FIFO_ERROR__PACK_FIFO_C_OVERFLOW_ACK_MASK 0x2000000
+#define PACK_FIFO_ERROR__PACK_FIFO_C_OVERFLOW_ACK__SHIFT 0x19
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_UNDERFLOW_OCCURED_MASK 0x1
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_UNDERFLOW_OCCURED__SHIFT 0x0
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_UNDERFLOW_ACK_MASK 0x2
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_UNDERFLOW_ACK__SHIFT 0x1
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_OVERFLOW_OCCURED_MASK 0x100
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_OVERFLOW_OCCURED__SHIFT 0x8
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_OVERFLOW_ACK_MASK 0x200
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_OVERFLOW_ACK__SHIFT 0x9
+#define INPUT_GAMMA_LUT_AUTOFILL__INPUT_GAMMA_LUT_AUTOFILL_MASK 0x1
+#define INPUT_GAMMA_LUT_AUTOFILL__INPUT_GAMMA_LUT_AUTOFILL__SHIFT 0x0
+#define INPUT_GAMMA_LUT_AUTOFILL__INPUT_GAMMA_LUT_AUTOFILL_DONE_MASK 0x2
+#define INPUT_GAMMA_LUT_AUTOFILL__INPUT_GAMMA_LUT_AUTOFILL_DONE__SHIFT 0x1
+#define INPUT_GAMMA_LUT_RW_INDEX__INPUT_GAMMA_LUT_RW_INDEX_MASK 0xff
+#define INPUT_GAMMA_LUT_RW_INDEX__INPUT_GAMMA_LUT_RW_INDEX__SHIFT 0x0
+#define INPUT_GAMMA_LUT_SEQ_COLOR__INPUT_GAMMA_LUT_SEQ_COLOR_MASK 0xffff
+#define INPUT_GAMMA_LUT_SEQ_COLOR__INPUT_GAMMA_LUT_SEQ_COLOR__SHIFT 0x0
+#define INPUT_GAMMA_LUT_PWL_DATA__INPUT_GAMMA_LUT_BASE_MASK 0xffff
+#define INPUT_GAMMA_LUT_PWL_DATA__INPUT_GAMMA_LUT_BASE__SHIFT 0x0
+#define INPUT_GAMMA_LUT_PWL_DATA__INPUT_GAMMA_LUT_DELTA_MASK 0xffff0000
+#define INPUT_GAMMA_LUT_PWL_DATA__INPUT_GAMMA_LUT_DELTA__SHIFT 0x10
+#define INPUT_GAMMA_LUT_30_COLOR__INPUT_GAMMA_LUT_COLOR_10_BLUE_MASK 0x3ff
+#define INPUT_GAMMA_LUT_30_COLOR__INPUT_GAMMA_LUT_COLOR_10_BLUE__SHIFT 0x0
+#define INPUT_GAMMA_LUT_30_COLOR__INPUT_GAMMA_LUT_COLOR_10_GREEN_MASK 0xffc00
+#define INPUT_GAMMA_LUT_30_COLOR__INPUT_GAMMA_LUT_COLOR_10_GREEN__SHIFT 0xa
+#define INPUT_GAMMA_LUT_30_COLOR__INPUT_GAMMA_LUT_COLOR_10_RED_MASK 0x3ff00000
+#define INPUT_GAMMA_LUT_30_COLOR__INPUT_GAMMA_LUT_COLOR_10_RED__SHIFT 0x14
+#define COL_MAN_INPUT_GAMMA_CONTROL1__INPUT_GAMMA_MODE_MASK 0x3
+#define COL_MAN_INPUT_GAMMA_CONTROL1__INPUT_GAMMA_MODE__SHIFT 0x0
+#define COL_MAN_INPUT_GAMMA_CONTROL1__INPUT_GAMMA_LUT_10BIT_BYPASS_EN_MASK 0x4000000
+#define COL_MAN_INPUT_GAMMA_CONTROL1__INPUT_GAMMA_LUT_10BIT_BYPASS_EN__SHIFT 0x1a
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_INC_B_MASK 0x1e
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_INC_B__SHIFT 0x1
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_B_SIGNED_EN_MASK 0x20
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_B_SIGNED_EN__SHIFT 0x5
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_B_FORMAT_MASK 0xc0
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_B_FORMAT__SHIFT 0x6
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_INC_G_MASK 0xf00
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_INC_G__SHIFT 0x8
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_G_SIGNED_EN_MASK 0x1000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_G_SIGNED_EN__SHIFT 0xc
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_G_FORMAT_MASK 0x6000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_G_FORMAT__SHIFT 0xd
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_INC_R_MASK 0x78000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_INC_R__SHIFT 0xf
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_R_SIGNED_EN_MASK 0x80000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_R_SIGNED_EN__SHIFT 0x13
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_R_FORMAT_MASK 0x300000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_R_FORMAT__SHIFT 0x14
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_RW_MODE_MASK 0x400000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_RW_MODE__SHIFT 0x16
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_WRITE_EN_MASK_MASK 0x3800000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_WRITE_EN_MASK__SHIFT 0x17
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_VGA_ACCESS_ENABLE_MASK 0x4000000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_VGA_ACCESS_ENABLE__SHIFT 0x1a
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_10BIT_BYPASS_DBL_BUF_EN_MASK 0x8000000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_10BIT_BYPASS_DBL_BUF_EN__SHIFT 0x1b
+#define INPUT_GAMMA_BW_OFFSETS_B__INPUT_GAMMA_BLACK_OFFSET_B_MASK 0xffff
+#define INPUT_GAMMA_BW_OFFSETS_B__INPUT_GAMMA_BLACK_OFFSET_B__SHIFT 0x0
+#define INPUT_GAMMA_BW_OFFSETS_B__INPUT_GAMMA_WHITE_OFFSET_B_MASK 0xffff0000
+#define INPUT_GAMMA_BW_OFFSETS_B__INPUT_GAMMA_WHITE_OFFSET_B__SHIFT 0x10
+#define INPUT_GAMMA_BW_OFFSETS_G__INPUT_GAMMA_BLACK_OFFSET_G_MASK 0xffff
+#define INPUT_GAMMA_BW_OFFSETS_G__INPUT_GAMMA_BLACK_OFFSET_G__SHIFT 0x0
+#define INPUT_GAMMA_BW_OFFSETS_G__INPUT_GAMMA_WHITE_OFFSET_G_MASK 0xffff0000
+#define INPUT_GAMMA_BW_OFFSETS_G__INPUT_GAMMA_WHITE_OFFSET_G__SHIFT 0x10
+#define INPUT_GAMMA_BW_OFFSETS_R__INPUT_GAMMA_BLACK_OFFSET_R_MASK 0xffff
+#define INPUT_GAMMA_BW_OFFSETS_R__INPUT_GAMMA_BLACK_OFFSET_R__SHIFT 0x0
+#define INPUT_GAMMA_BW_OFFSETS_R__INPUT_GAMMA_WHITE_OFFSET_R_MASK 0xffff0000
+#define INPUT_GAMMA_BW_OFFSETS_R__INPUT_GAMMA_WHITE_OFFSET_R__SHIFT 0x10
+#define COL_MAN_DEBUG_CONTROL__COL_MAN_GLOBAL_PASSTHROUGH_ENABLE_MASK 0x1
+#define COL_MAN_DEBUG_CONTROL__COL_MAN_GLOBAL_PASSTHROUGH_ENABLE__SHIFT 0x0
+#define COL_MAN_TEST_DEBUG_INDEX__COL_MAN_TEST_DEBUG_INDEX_MASK 0xff
+#define COL_MAN_TEST_DEBUG_INDEX__COL_MAN_TEST_DEBUG_INDEX__SHIFT 0x0
+#define COL_MAN_TEST_DEBUG_INDEX__COL_MAN_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define COL_MAN_TEST_DEBUG_INDEX__COL_MAN_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define COL_MAN_TEST_DEBUG_DATA__COL_MAN_TEST_DEBUG_DATA_MASK 0xffffffff
+#define COL_MAN_TEST_DEBUG_DATA__COL_MAN_TEST_DEBUG_DATA__SHIFT 0x0
+#define UNP_GRPH_ENABLE__GRPH_ENABLE_MASK 0x1
+#define UNP_GRPH_ENABLE__GRPH_ENABLE__SHIFT 0x0
+#define UNP_GRPH_CONTROL__GRPH_DEPTH_MASK 0x3
+#define UNP_GRPH_CONTROL__GRPH_DEPTH__SHIFT 0x0
+#define UNP_GRPH_CONTROL__GRPH_NUM_BANKS_MASK 0xc
+#define UNP_GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT 0x2
+#define UNP_GRPH_CONTROL__GRPH_Z_MASK 0x30
+#define UNP_GRPH_CONTROL__GRPH_Z__SHIFT 0x4
+#define UNP_GRPH_CONTROL__GRPH_BANK_WIDTH_L_MASK 0xc0
+#define UNP_GRPH_CONTROL__GRPH_BANK_WIDTH_L__SHIFT 0x6
+#define UNP_GRPH_CONTROL__GRPH_FORMAT_MASK 0x700
+#define UNP_GRPH_CONTROL__GRPH_FORMAT__SHIFT 0x8
+#define UNP_GRPH_CONTROL__GRPH_BANK_HEIGHT_L_MASK 0x1800
+#define UNP_GRPH_CONTROL__GRPH_BANK_HEIGHT_L__SHIFT 0xb
+#define UNP_GRPH_CONTROL__GRPH_TILE_SPLIT_L_MASK 0xe000
+#define UNP_GRPH_CONTROL__GRPH_TILE_SPLIT_L__SHIFT 0xd
+#define UNP_GRPH_CONTROL__GRPH_ADDRESS_TRANSLATION_ENABLE_MASK 0x10000
+#define UNP_GRPH_CONTROL__GRPH_ADDRESS_TRANSLATION_ENABLE__SHIFT 0x10
+#define UNP_GRPH_CONTROL__GRPH_PRIVILEGED_ACCESS_ENABLE_MASK 0x20000
+#define UNP_GRPH_CONTROL__GRPH_PRIVILEGED_ACCESS_ENABLE__SHIFT 0x11
+#define UNP_GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT_L_MASK 0xc0000
+#define UNP_GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT_L__SHIFT 0x12
+#define UNP_GRPH_CONTROL__GRPH_ARRAY_MODE_MASK 0xf00000
+#define UNP_GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT 0x14
+#define UNP_GRPH_CONTROL__GRPH_PIPE_CONFIG_MASK 0x1f000000
+#define UNP_GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT 0x18
+#define UNP_GRPH_CONTROL__GRPH_MICRO_TILE_MODE_L_MASK 0x60000000
+#define UNP_GRPH_CONTROL__GRPH_MICRO_TILE_MODE_L__SHIFT 0x1d
+#define UNP_GRPH_CONTROL__GRPH_COLOR_EXPANSION_MODE_MASK 0x80000000
+#define UNP_GRPH_CONTROL__GRPH_COLOR_EXPANSION_MODE__SHIFT 0x1f
+#define UNP_GRPH_CONTROL_C__GRPH_BANK_WIDTH_C_MASK 0xc0
+#define UNP_GRPH_CONTROL_C__GRPH_BANK_WIDTH_C__SHIFT 0x6
+#define UNP_GRPH_CONTROL_C__GRPH_BANK_HEIGHT_C_MASK 0x1800
+#define UNP_GRPH_CONTROL_C__GRPH_BANK_HEIGHT_C__SHIFT 0xb
+#define UNP_GRPH_CONTROL_C__GRPH_TILE_SPLIT_C_MASK 0xe000
+#define UNP_GRPH_CONTROL_C__GRPH_TILE_SPLIT_C__SHIFT 0xd
+#define UNP_GRPH_CONTROL_C__GRPH_MACRO_TILE_ASPECT_C_MASK 0xc0000
+#define UNP_GRPH_CONTROL_C__GRPH_MACRO_TILE_ASPECT_C__SHIFT 0x12
+#define UNP_GRPH_CONTROL_C__GRPH_MICRO_TILE_MODE_C_MASK 0x60000000
+#define UNP_GRPH_CONTROL_C__GRPH_MICRO_TILE_MODE_C__SHIFT 0x1d
+#define UNP_GRPH_CONTROL_EXP__VIDEO_FORMAT_MASK 0x7
+#define UNP_GRPH_CONTROL_EXP__VIDEO_FORMAT__SHIFT 0x0
+#define UNP_GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP_MASK 0x3
+#define UNP_GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT 0x0
+#define UNP_GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR_MASK 0x30
+#define UNP_GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT 0x4
+#define UNP_GRPH_SWAP_CNTL__GRPH_GREEN_CROSSBAR_MASK 0xc0
+#define UNP_GRPH_SWAP_CNTL__GRPH_GREEN_CROSSBAR__SHIFT 0x6
+#define UNP_GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR_MASK 0x300
+#define UNP_GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT 0x8
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L__GRPH_PRIMARY_SURFACE_ADDRESS_L_MASK 0xffffff00
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L__GRPH_PRIMARY_SURFACE_ADDRESS_L__SHIFT 0x8
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C__GRPH_PRIMARY_SURFACE_ADDRESS_C_MASK 0xffffff00
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C__GRPH_PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x8
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L_MASK 0xff
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L__SHIFT 0x0
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0xff
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L_MASK 0xffffff00
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L__SHIFT 0x8
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C_MASK 0xffffff00
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C__SHIFT 0x8
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L_MASK 0xff
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L__SHIFT 0x0
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C_MASK 0xff
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_L__GRPH_SECONDARY_SURFACE_ADDRESS_L_MASK 0xffffff00
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_L__GRPH_SECONDARY_SURFACE_ADDRESS_L__SHIFT 0x8
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_C__GRPH_SECONDARY_SURFACE_ADDRESS_C_MASK 0xffffff00
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_C__GRPH_SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x8
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L_MASK 0xff
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L__SHIFT 0x0
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0xff
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L_MASK 0xffffff00
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L__SHIFT 0x8
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C_MASK 0xffffff00
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C__SHIFT 0x8
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L_MASK 0xff
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L__SHIFT 0x0
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C_MASK 0xff
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define UNP_GRPH_PITCH_L__GRPH_PITCH_L_MASK 0x7fff
+#define UNP_GRPH_PITCH_L__GRPH_PITCH_L__SHIFT 0x0
+#define UNP_GRPH_PITCH_C__GRPH_PITCH_C_MASK 0x7fff
+#define UNP_GRPH_PITCH_C__GRPH_PITCH_C__SHIFT 0x0
+#define UNP_GRPH_SURFACE_OFFSET_X_L__GRPH_SURFACE_OFFSET_X_L_MASK 0x3fff
+#define UNP_GRPH_SURFACE_OFFSET_X_L__GRPH_SURFACE_OFFSET_X_L__SHIFT 0x0
+#define UNP_GRPH_SURFACE_OFFSET_X_C__GRPH_SURFACE_OFFSET_X_C_MASK 0x3fff
+#define UNP_GRPH_SURFACE_OFFSET_X_C__GRPH_SURFACE_OFFSET_X_C__SHIFT 0x0
+#define UNP_GRPH_SURFACE_OFFSET_Y_L__GRPH_SURFACE_OFFSET_Y_L_MASK 0x3fff
+#define UNP_GRPH_SURFACE_OFFSET_Y_L__GRPH_SURFACE_OFFSET_Y_L__SHIFT 0x0
+#define UNP_GRPH_SURFACE_OFFSET_Y_C__GRPH_SURFACE_OFFSET_Y_C_MASK 0x3fff
+#define UNP_GRPH_SURFACE_OFFSET_Y_C__GRPH_SURFACE_OFFSET_Y_C__SHIFT 0x0
+#define UNP_GRPH_X_START_L__GRPH_X_START_L_MASK 0x3fff
+#define UNP_GRPH_X_START_L__GRPH_X_START_L__SHIFT 0x0
+#define UNP_GRPH_X_START_C__GRPH_X_START_C_MASK 0x3fff
+#define UNP_GRPH_X_START_C__GRPH_X_START_C__SHIFT 0x0
+#define UNP_GRPH_Y_START_L__GRPH_Y_START_L_MASK 0x3fff
+#define UNP_GRPH_Y_START_L__GRPH_Y_START_L__SHIFT 0x0
+#define UNP_GRPH_Y_START_C__GRPH_Y_START_C_MASK 0x3fff
+#define UNP_GRPH_Y_START_C__GRPH_Y_START_C__SHIFT 0x0
+#define UNP_GRPH_X_END_L__GRPH_X_END_L_MASK 0x7fff
+#define UNP_GRPH_X_END_L__GRPH_X_END_L__SHIFT 0x0
+#define UNP_GRPH_X_END_C__GRPH_X_END_C_MASK 0x7fff
+#define UNP_GRPH_X_END_C__GRPH_X_END_C__SHIFT 0x0
+#define UNP_GRPH_Y_END_L__GRPH_Y_END_L_MASK 0x7fff
+#define UNP_GRPH_Y_END_L__GRPH_Y_END_L__SHIFT 0x0
+#define UNP_GRPH_Y_END_C__GRPH_Y_END_C_MASK 0x7fff
+#define UNP_GRPH_Y_END_C__GRPH_Y_END_C__SHIFT 0x0
+#define UNP_GRPH_UPDATE__GRPH_MODE_UPDATE_PENDING_MASK 0x1
+#define UNP_GRPH_UPDATE__GRPH_MODE_UPDATE_PENDING__SHIFT 0x0
+#define UNP_GRPH_UPDATE__GRPH_MODE_UPDATE_TAKEN_MASK 0x2
+#define UNP_GRPH_UPDATE__GRPH_MODE_UPDATE_TAKEN__SHIFT 0x1
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK 0x4
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING__SHIFT 0x2
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_UPDATE_TAKEN_MASK 0x8
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_UPDATE_TAKEN__SHIFT 0x3
+#define UNP_GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK 0x10000
+#define UNP_GRPH_UPDATE__GRPH_UPDATE_LOCK__SHIFT 0x10
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_IGNORE_UPDATE_LOCK_MASK 0x100000
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_IGNORE_UPDATE_LOCK__SHIFT 0x14
+#define UNP_GRPH_UPDATE__GRPH_MODE_DISABLE_MULTIPLE_UPDATE_MASK 0x1000000
+#define UNP_GRPH_UPDATE__GRPH_MODE_DISABLE_MULTIPLE_UPDATE__SHIFT 0x18
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE_MASK 0x10000000
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE__SHIFT 0x1c
+#define UNP_PIPE_OUTSTANDING_REQUEST_LIMIT__UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_L_MASK 0xff
+#define UNP_PIPE_OUTSTANDING_REQUEST_LIMIT__UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_L__SHIFT 0x0
+#define UNP_PIPE_OUTSTANDING_REQUEST_LIMIT__UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_C_MASK 0xff00
+#define UNP_PIPE_OUTSTANDING_REQUEST_LIMIT__UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_C__SHIFT 0x8
+#define UNP_GRPH_SURFACE_ADDRESS_INUSE_L__GRPH_SURFACE_ADDRESS_INUSE_L_MASK 0xffffff00
+#define UNP_GRPH_SURFACE_ADDRESS_INUSE_L__GRPH_SURFACE_ADDRESS_INUSE_L__SHIFT 0x8
+#define UNP_GRPH_SURFACE_ADDRESS_INUSE_C__GRPH_SURFACE_ADDRESS_INUSE_C_MASK 0xffffff00
+#define UNP_GRPH_SURFACE_ADDRESS_INUSE_C__GRPH_SURFACE_ADDRESS_INUSE_C__SHIFT 0x8
+#define UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_L__GRPH_SURFACE_ADDRESS_HIGH_INUSE_L_MASK 0xff
+#define UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_L__GRPH_SURFACE_ADDRESS_HIGH_INUSE_L__SHIFT 0x0
+#define UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_C__GRPH_SURFACE_ADDRESS_HIGH_INUSE_C_MASK 0xff
+#define UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_C__GRPH_SURFACE_ADDRESS_HIGH_INUSE_C__SHIFT 0x0
+#define UNP_DVMM_PTE_CONTROL__DVMM_USE_SINGLE_PTE_MASK 0x1
+#define UNP_DVMM_PTE_CONTROL__DVMM_USE_SINGLE_PTE__SHIFT 0x0
+#define UNP_DVMM_PTE_CONTROL__DVMM_PAGE_WIDTH_MASK 0x1e
+#define UNP_DVMM_PTE_CONTROL__DVMM_PAGE_WIDTH__SHIFT 0x1
+#define UNP_DVMM_PTE_CONTROL__DVMM_PAGE_HEIGHT_MASK 0x1e0
+#define UNP_DVMM_PTE_CONTROL__DVMM_PAGE_HEIGHT__SHIFT 0x5
+#define UNP_DVMM_PTE_CONTROL__DVMM_MIN_PTE_BEFORE_FLIP_MASK 0x7fe00
+#define UNP_DVMM_PTE_CONTROL__DVMM_MIN_PTE_BEFORE_FLIP__SHIFT 0x9
+#define UNP_DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE0_MASK 0x100000
+#define UNP_DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE0__SHIFT 0x14
+#define UNP_DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE1_MASK 0x200000
+#define UNP_DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE1__SHIFT 0x15
+#define UNP_GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK 0x1
+#define UNP_GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED__SHIFT 0x0
+#define UNP_GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK 0x100
+#define UNP_GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR__SHIFT 0x8
+#define UNP_GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK 0x1
+#define UNP_GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK__SHIFT 0x0
+#define UNP_GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_TYPE_MASK 0x100
+#define UNP_GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_TYPE__SHIFT 0x8
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_EN_MASK 0x1
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_EN__SHIFT 0x0
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_MODE_MASK 0x30
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_MODE__SHIFT 0x4
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STACK_INTERLACE_FLIP_EN_MASK 0x100
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STACK_INTERLACE_FLIP_EN__SHIFT 0x8
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STACK_INTERLACE_FLIP_MODE_MASK 0x3000
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STACK_INTERLACE_FLIP_MODE__SHIFT 0xc
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_SURFACE_PENDING_MASK 0x10000
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_SURFACE_PENDING__SHIFT 0x10
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_SURFACE_PENDING_MASK 0x20000
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_SURFACE_PENDING__SHIFT 0x11
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_BOTTOM_SURFACE_PENDING_MASK 0x40000
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_BOTTOM_SURFACE_PENDING__SHIFT 0x12
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_BOTTOM_SURFACE_PENDING_MASK 0x80000
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_BOTTOM_SURFACE_PENDING__SHIFT 0x13
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_SELECT_DISABLE_MASK 0x10000000
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_SELECT_DISABLE__SHIFT 0x1c
+#define UNP_FLIP_CONTROL__GRPH_SURFACE_UPDATE_PENDING_MODE_MASK 0x1
+#define UNP_FLIP_CONTROL__GRPH_SURFACE_UPDATE_PENDING_MODE__SHIFT 0x0
+#define UNP_FLIP_CONTROL__UNP_DEBUG_SG_MASK 0xfffffffc
+#define UNP_FLIP_CONTROL__UNP_DEBUG_SG__SHIFT 0x2
+#define UNP_CRC_CONTROL__UNP_CRC_ENABLE_MASK 0x1
+#define UNP_CRC_CONTROL__UNP_CRC_ENABLE__SHIFT 0x0
+#define UNP_CRC_CONTROL__UNP_CRC_SOURCE_SEL_MASK 0x1c
+#define UNP_CRC_CONTROL__UNP_CRC_SOURCE_SEL__SHIFT 0x2
+#define UNP_CRC_CONTROL__UNP_CRC_LINE_SEL_MASK 0x300
+#define UNP_CRC_CONTROL__UNP_CRC_LINE_SEL__SHIFT 0x8
+#define UNP_CRC_MASK__UNP_CRC_MASK_MASK 0xffffffff
+#define UNP_CRC_MASK__UNP_CRC_MASK__SHIFT 0x0
+#define UNP_CRC_CURRENT__UNP_CRC_CURRENT_MASK 0xffffffff
+#define UNP_CRC_CURRENT__UNP_CRC_CURRENT__SHIFT 0x0
+#define UNP_CRC_LAST__UNP_CRC_LAST_MASK 0xffffffff
+#define UNP_CRC_LAST__UNP_CRC_LAST__SHIFT 0x0
+#define UNP_LB_DATA_GAP_BETWEEN_CHUNK__UNP_LB_GAP_BETWEEN_CHUNK_MASK 0x1f0
+#define UNP_LB_DATA_GAP_BETWEEN_CHUNK__UNP_LB_GAP_BETWEEN_CHUNK__SHIFT 0x4
+#define UNP_HW_ROTATION__ROTATION_ANGLE_MASK 0x7
+#define UNP_HW_ROTATION__ROTATION_ANGLE__SHIFT 0x0
+#define UNP_HW_ROTATION__PIXEL_DROP_MASK 0x10
+#define UNP_HW_ROTATION__PIXEL_DROP__SHIFT 0x4
+#define UNP_HW_ROTATION__BUFFER_MODE_MASK 0x100
+#define UNP_HW_ROTATION__BUFFER_MODE__SHIFT 0x8
+#define UNP_DEBUG__UNP_DEBUG_MASK 0xffffffff
+#define UNP_DEBUG__UNP_DEBUG__SHIFT 0x0
+#define UNP_DEBUG2__UNP_DEBUG2_MASK 0xffffffff
+#define UNP_DEBUG2__UNP_DEBUG2__SHIFT 0x0
+#define UNP_DVMM_DEBUG__UNP_L_DVMM_DEBUG_MASK 0xffff
+#define UNP_DVMM_DEBUG__UNP_L_DVMM_DEBUG__SHIFT 0x0
+#define UNP_DVMM_DEBUG__UNP_C_DVMM_DEBUG_MASK 0xffff0000
+#define UNP_DVMM_DEBUG__UNP_C_DVMM_DEBUG__SHIFT 0x10
+#define UNP_TEST_DEBUG_INDEX__UNP_TEST_DEBUG_INDEX_MASK 0xff
+#define UNP_TEST_DEBUG_INDEX__UNP_TEST_DEBUG_INDEX__SHIFT 0x0
+#define UNP_TEST_DEBUG_INDEX__UNP_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define UNP_TEST_DEBUG_INDEX__UNP_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define UNP_TEST_DEBUG_DATA__UNP_TEST_DEBUG_DATA_MASK 0xffffffff
+#define UNP_TEST_DEBUG_DATA__UNP_TEST_DEBUG_DATA__SHIFT 0x0
+#define GENMO_WT__GENMO_MONO_ADDRESS_B_MASK 0x1
+#define GENMO_WT__GENMO_MONO_ADDRESS_B__SHIFT 0x0
+#define GENMO_WT__VGA_RAM_EN_MASK 0x2
+#define GENMO_WT__VGA_RAM_EN__SHIFT 0x1
+#define GENMO_WT__VGA_CKSEL_MASK 0xc
+#define GENMO_WT__VGA_CKSEL__SHIFT 0x2
+#define GENMO_WT__ODD_EVEN_MD_PGSEL_MASK 0x20
+#define GENMO_WT__ODD_EVEN_MD_PGSEL__SHIFT 0x5
+#define GENMO_WT__VGA_HSYNC_POL_MASK 0x40
+#define GENMO_WT__VGA_HSYNC_POL__SHIFT 0x6
+#define GENMO_WT__VGA_VSYNC_POL_MASK 0x80
+#define GENMO_WT__VGA_VSYNC_POL__SHIFT 0x7
+#define GENMO_RD__GENMO_MONO_ADDRESS_B_MASK 0x1
+#define GENMO_RD__GENMO_MONO_ADDRESS_B__SHIFT 0x0
+#define GENMO_RD__VGA_RAM_EN_MASK 0x2
+#define GENMO_RD__VGA_RAM_EN__SHIFT 0x1
+#define GENMO_RD__VGA_CKSEL_MASK 0xc
+#define GENMO_RD__VGA_CKSEL__SHIFT 0x2
+#define GENMO_RD__ODD_EVEN_MD_PGSEL_MASK 0x20
+#define GENMO_RD__ODD_EVEN_MD_PGSEL__SHIFT 0x5
+#define GENMO_RD__VGA_HSYNC_POL_MASK 0x40
+#define GENMO_RD__VGA_HSYNC_POL__SHIFT 0x6
+#define GENMO_RD__VGA_VSYNC_POL_MASK 0x80
+#define GENMO_RD__VGA_VSYNC_POL__SHIFT 0x7
+#define GENENB__BLK_IO_BASE_MASK 0xff
+#define GENENB__BLK_IO_BASE__SHIFT 0x0
+#define GENFC_WT__VSYNC_SEL_W_MASK 0x8
+#define GENFC_WT__VSYNC_SEL_W__SHIFT 0x3
+#define GENFC_RD__VSYNC_SEL_R_MASK 0x8
+#define GENFC_RD__VSYNC_SEL_R__SHIFT 0x3
+#define GENS0__SENSE_SWITCH_MASK 0x10
+#define GENS0__SENSE_SWITCH__SHIFT 0x4
+#define GENS0__CRT_INTR_MASK 0x80
+#define GENS0__CRT_INTR__SHIFT 0x7
+#define GENS1__NO_DISPLAY_MASK 0x1
+#define GENS1__NO_DISPLAY__SHIFT 0x0
+#define GENS1__VGA_VSTATUS_MASK 0x8
+#define GENS1__VGA_VSTATUS__SHIFT 0x3
+#define GENS1__PIXEL_READ_BACK_MASK 0x30
+#define GENS1__PIXEL_READ_BACK__SHIFT 0x4
+#define DAC_DATA__DAC_DATA_MASK 0x3f
+#define DAC_DATA__DAC_DATA__SHIFT 0x0
+#define DAC_MASK__DAC_MASK_MASK 0xff
+#define DAC_MASK__DAC_MASK__SHIFT 0x0
+#define DAC_R_INDEX__DAC_R_INDEX_MASK 0xff
+#define DAC_R_INDEX__DAC_R_INDEX__SHIFT 0x0
+#define DAC_W_INDEX__DAC_W_INDEX_MASK 0xff
+#define DAC_W_INDEX__DAC_W_INDEX__SHIFT 0x0
+#define SEQ8_IDX__SEQ_IDX_MASK 0x7
+#define SEQ8_IDX__SEQ_IDX__SHIFT 0x0
+#define SEQ8_DATA__SEQ_DATA_MASK 0xff
+#define SEQ8_DATA__SEQ_DATA__SHIFT 0x0
+#define SEQ00__SEQ_RST0B_MASK 0x1
+#define SEQ00__SEQ_RST0B__SHIFT 0x0
+#define SEQ00__SEQ_RST1B_MASK 0x2
+#define SEQ00__SEQ_RST1B__SHIFT 0x1
+#define SEQ01__SEQ_DOT8_MASK 0x1
+#define SEQ01__SEQ_DOT8__SHIFT 0x0
+#define SEQ01__SEQ_SHIFT2_MASK 0x4
+#define SEQ01__SEQ_SHIFT2__SHIFT 0x2
+#define SEQ01__SEQ_PCLKBY2_MASK 0x8
+#define SEQ01__SEQ_PCLKBY2__SHIFT 0x3
+#define SEQ01__SEQ_SHIFT4_MASK 0x10
+#define SEQ01__SEQ_SHIFT4__SHIFT 0x4
+#define SEQ01__SEQ_MAXBW_MASK 0x20
+#define SEQ01__SEQ_MAXBW__SHIFT 0x5
+#define SEQ02__SEQ_MAP0_EN_MASK 0x1
+#define SEQ02__SEQ_MAP0_EN__SHIFT 0x0
+#define SEQ02__SEQ_MAP1_EN_MASK 0x2
+#define SEQ02__SEQ_MAP1_EN__SHIFT 0x1
+#define SEQ02__SEQ_MAP2_EN_MASK 0x4
+#define SEQ02__SEQ_MAP2_EN__SHIFT 0x2
+#define SEQ02__SEQ_MAP3_EN_MASK 0x8
+#define SEQ02__SEQ_MAP3_EN__SHIFT 0x3
+#define SEQ03__SEQ_FONT_B1_MASK 0x1
+#define SEQ03__SEQ_FONT_B1__SHIFT 0x0
+#define SEQ03__SEQ_FONT_B2_MASK 0x2
+#define SEQ03__SEQ_FONT_B2__SHIFT 0x1
+#define SEQ03__SEQ_FONT_A1_MASK 0x4
+#define SEQ03__SEQ_FONT_A1__SHIFT 0x2
+#define SEQ03__SEQ_FONT_A2_MASK 0x8
+#define SEQ03__SEQ_FONT_A2__SHIFT 0x3
+#define SEQ03__SEQ_FONT_B0_MASK 0x10
+#define SEQ03__SEQ_FONT_B0__SHIFT 0x4
+#define SEQ03__SEQ_FONT_A0_MASK 0x20
+#define SEQ03__SEQ_FONT_A0__SHIFT 0x5
+#define SEQ04__SEQ_256K_MASK 0x2
+#define SEQ04__SEQ_256K__SHIFT 0x1
+#define SEQ04__SEQ_ODDEVEN_MASK 0x4
+#define SEQ04__SEQ_ODDEVEN__SHIFT 0x2
+#define SEQ04__SEQ_CHAIN_MASK 0x8
+#define SEQ04__SEQ_CHAIN__SHIFT 0x3
+#define CRTC8_IDX__VCRTC_IDX_MASK 0x3f
+#define CRTC8_IDX__VCRTC_IDX__SHIFT 0x0
+#define CRTC8_DATA__VCRTC_DATA_MASK 0xff
+#define CRTC8_DATA__VCRTC_DATA__SHIFT 0x0
+#define CRT00__H_TOTAL_MASK 0xff
+#define CRT00__H_TOTAL__SHIFT 0x0
+#define CRT01__H_DISP_END_MASK 0xff
+#define CRT01__H_DISP_END__SHIFT 0x0
+#define CRT02__H_BLANK_START_MASK 0xff
+#define CRT02__H_BLANK_START__SHIFT 0x0
+#define CRT03__H_BLANK_END_MASK 0x1f
+#define CRT03__H_BLANK_END__SHIFT 0x0
+#define CRT03__H_DE_SKEW_MASK 0x60
+#define CRT03__H_DE_SKEW__SHIFT 0x5
+#define CRT03__CR10CR11_R_DIS_B_MASK 0x80
+#define CRT03__CR10CR11_R_DIS_B__SHIFT 0x7
+#define CRT04__H_SYNC_START_MASK 0xff
+#define CRT04__H_SYNC_START__SHIFT 0x0
+#define CRT05__H_SYNC_END_MASK 0x1f
+#define CRT05__H_SYNC_END__SHIFT 0x0
+#define CRT05__H_SYNC_SKEW_MASK 0x60
+#define CRT05__H_SYNC_SKEW__SHIFT 0x5
+#define CRT05__H_BLANK_END_B5_MASK 0x80
+#define CRT05__H_BLANK_END_B5__SHIFT 0x7
+#define CRT06__V_TOTAL_MASK 0xff
+#define CRT06__V_TOTAL__SHIFT 0x0
+#define CRT07__V_TOTAL_B8_MASK 0x1
+#define CRT07__V_TOTAL_B8__SHIFT 0x0
+#define CRT07__V_DISP_END_B8_MASK 0x2
+#define CRT07__V_DISP_END_B8__SHIFT 0x1
+#define CRT07__V_SYNC_START_B8_MASK 0x4
+#define CRT07__V_SYNC_START_B8__SHIFT 0x2
+#define CRT07__V_BLANK_START_B8_MASK 0x8
+#define CRT07__V_BLANK_START_B8__SHIFT 0x3
+#define CRT07__LINE_CMP_B8_MASK 0x10
+#define CRT07__LINE_CMP_B8__SHIFT 0x4
+#define CRT07__V_TOTAL_B9_MASK 0x20
+#define CRT07__V_TOTAL_B9__SHIFT 0x5
+#define CRT07__V_DISP_END_B9_MASK 0x40
+#define CRT07__V_DISP_END_B9__SHIFT 0x6
+#define CRT07__V_SYNC_START_B9_MASK 0x80
+#define CRT07__V_SYNC_START_B9__SHIFT 0x7
+#define CRT08__ROW_SCAN_START_MASK 0x1f
+#define CRT08__ROW_SCAN_START__SHIFT 0x0
+#define CRT08__BYTE_PAN_MASK 0x60
+#define CRT08__BYTE_PAN__SHIFT 0x5
+#define CRT09__MAX_ROW_SCAN_MASK 0x1f
+#define CRT09__MAX_ROW_SCAN__SHIFT 0x0
+#define CRT09__V_BLANK_START_B9_MASK 0x20
+#define CRT09__V_BLANK_START_B9__SHIFT 0x5
+#define CRT09__LINE_CMP_B9_MASK 0x40
+#define CRT09__LINE_CMP_B9__SHIFT 0x6
+#define CRT09__DOUBLE_CHAR_HEIGHT_MASK 0x80
+#define CRT09__DOUBLE_CHAR_HEIGHT__SHIFT 0x7
+#define CRT0A__CURSOR_START_MASK 0x1f
+#define CRT0A__CURSOR_START__SHIFT 0x0
+#define CRT0A__CURSOR_DISABLE_MASK 0x20
+#define CRT0A__CURSOR_DISABLE__SHIFT 0x5
+#define CRT0B__CURSOR_END_MASK 0x1f
+#define CRT0B__CURSOR_END__SHIFT 0x0
+#define CRT0B__CURSOR_SKEW_MASK 0x60
+#define CRT0B__CURSOR_SKEW__SHIFT 0x5
+#define CRT0C__DISP_START_MASK 0xff
+#define CRT0C__DISP_START__SHIFT 0x0
+#define CRT0D__DISP_START_MASK 0xff
+#define CRT0D__DISP_START__SHIFT 0x0
+#define CRT0E__CURSOR_LOC_HI_MASK 0xff
+#define CRT0E__CURSOR_LOC_HI__SHIFT 0x0
+#define CRT0F__CURSOR_LOC_LO_MASK 0xff
+#define CRT0F__CURSOR_LOC_LO__SHIFT 0x0
+#define CRT10__V_SYNC_START_MASK 0xff
+#define CRT10__V_SYNC_START__SHIFT 0x0
+#define CRT11__V_SYNC_END_MASK 0xf
+#define CRT11__V_SYNC_END__SHIFT 0x0
+#define CRT11__V_INTR_CLR_MASK 0x10
+#define CRT11__V_INTR_CLR__SHIFT 0x4
+#define CRT11__V_INTR_EN_MASK 0x20
+#define CRT11__V_INTR_EN__SHIFT 0x5
+#define CRT11__SEL5_REFRESH_CYC_MASK 0x40
+#define CRT11__SEL5_REFRESH_CYC__SHIFT 0x6
+#define CRT11__C0T7_WR_ONLY_MASK 0x80
+#define CRT11__C0T7_WR_ONLY__SHIFT 0x7
+#define CRT12__V_DISP_END_MASK 0xff
+#define CRT12__V_DISP_END__SHIFT 0x0
+#define CRT13__DISP_PITCH_MASK 0xff
+#define CRT13__DISP_PITCH__SHIFT 0x0
+#define CRT14__UNDRLN_LOC_MASK 0x1f
+#define CRT14__UNDRLN_LOC__SHIFT 0x0
+#define CRT14__ADDR_CNT_BY4_MASK 0x20
+#define CRT14__ADDR_CNT_BY4__SHIFT 0x5
+#define CRT14__DOUBLE_WORD_MASK 0x40
+#define CRT14__DOUBLE_WORD__SHIFT 0x6
+#define CRT15__V_BLANK_START_MASK 0xff
+#define CRT15__V_BLANK_START__SHIFT 0x0
+#define CRT16__V_BLANK_END_MASK 0xff
+#define CRT16__V_BLANK_END__SHIFT 0x0
+#define CRT17__RA0_AS_A13B_MASK 0x1
+#define CRT17__RA0_AS_A13B__SHIFT 0x0
+#define CRT17__RA1_AS_A14B_MASK 0x2
+#define CRT17__RA1_AS_A14B__SHIFT 0x1
+#define CRT17__VCOUNT_BY2_MASK 0x4
+#define CRT17__VCOUNT_BY2__SHIFT 0x2
+#define CRT17__ADDR_CNT_BY2_MASK 0x8
+#define CRT17__ADDR_CNT_BY2__SHIFT 0x3
+#define CRT17__WRAP_A15TOA0_MASK 0x20
+#define CRT17__WRAP_A15TOA0__SHIFT 0x5
+#define CRT17__BYTE_MODE_MASK 0x40
+#define CRT17__BYTE_MODE__SHIFT 0x6
+#define CRT17__CRTC_SYNC_EN_MASK 0x80
+#define CRT17__CRTC_SYNC_EN__SHIFT 0x7
+#define CRT18__LINE_CMP_MASK 0xff
+#define CRT18__LINE_CMP__SHIFT 0x0
+#define CRT1E__GRPH_DEC_RD1_MASK 0x2
+#define CRT1E__GRPH_DEC_RD1__SHIFT 0x1
+#define CRT1F__GRPH_DEC_RD0_MASK 0xff
+#define CRT1F__GRPH_DEC_RD0__SHIFT 0x0
+#define CRT22__GRPH_LATCH_DATA_MASK 0xff
+#define CRT22__GRPH_LATCH_DATA__SHIFT 0x0
+#define GRPH8_IDX__GRPH_IDX_MASK 0xf
+#define GRPH8_IDX__GRPH_IDX__SHIFT 0x0
+#define GRPH8_DATA__GRPH_DATA_MASK 0xff
+#define GRPH8_DATA__GRPH_DATA__SHIFT 0x0
+#define GRA00__GRPH_SET_RESET0_MASK 0x1
+#define GRA00__GRPH_SET_RESET0__SHIFT 0x0
+#define GRA00__GRPH_SET_RESET1_MASK 0x2
+#define GRA00__GRPH_SET_RESET1__SHIFT 0x1
+#define GRA00__GRPH_SET_RESET2_MASK 0x4
+#define GRA00__GRPH_SET_RESET2__SHIFT 0x2
+#define GRA00__GRPH_SET_RESET3_MASK 0x8
+#define GRA00__GRPH_SET_RESET3__SHIFT 0x3
+#define GRA01__GRPH_SET_RESET_ENA0_MASK 0x1
+#define GRA01__GRPH_SET_RESET_ENA0__SHIFT 0x0
+#define GRA01__GRPH_SET_RESET_ENA1_MASK 0x2
+#define GRA01__GRPH_SET_RESET_ENA1__SHIFT 0x1
+#define GRA01__GRPH_SET_RESET_ENA2_MASK 0x4
+#define GRA01__GRPH_SET_RESET_ENA2__SHIFT 0x2
+#define GRA01__GRPH_SET_RESET_ENA3_MASK 0x8
+#define GRA01__GRPH_SET_RESET_ENA3__SHIFT 0x3
+#define GRA02__GRPH_CCOMP_MASK 0xf
+#define GRA02__GRPH_CCOMP__SHIFT 0x0
+#define GRA03__GRPH_ROTATE_MASK 0x7
+#define GRA03__GRPH_ROTATE__SHIFT 0x0
+#define GRA03__GRPH_FN_SEL_MASK 0x18
+#define GRA03__GRPH_FN_SEL__SHIFT 0x3
+#define GRA04__GRPH_RMAP_MASK 0x3
+#define GRA04__GRPH_RMAP__SHIFT 0x0
+#define GRA05__GRPH_WRITE_MODE_MASK 0x3
+#define GRA05__GRPH_WRITE_MODE__SHIFT 0x0
+#define GRA05__GRPH_READ1_MASK 0x8
+#define GRA05__GRPH_READ1__SHIFT 0x3
+#define GRA05__CGA_ODDEVEN_MASK 0x10
+#define GRA05__CGA_ODDEVEN__SHIFT 0x4
+#define GRA05__GRPH_OES_MASK 0x20
+#define GRA05__GRPH_OES__SHIFT 0x5
+#define GRA05__GRPH_PACK_MASK 0x40
+#define GRA05__GRPH_PACK__SHIFT 0x6
+#define GRA06__GRPH_GRAPHICS_MASK 0x1
+#define GRA06__GRPH_GRAPHICS__SHIFT 0x0
+#define GRA06__GRPH_ODDEVEN_MASK 0x2
+#define GRA06__GRPH_ODDEVEN__SHIFT 0x1
+#define GRA06__GRPH_ADRSEL_MASK 0xc
+#define GRA06__GRPH_ADRSEL__SHIFT 0x2
+#define GRA07__GRPH_XCARE0_MASK 0x1
+#define GRA07__GRPH_XCARE0__SHIFT 0x0
+#define GRA07__GRPH_XCARE1_MASK 0x2
+#define GRA07__GRPH_XCARE1__SHIFT 0x1
+#define GRA07__GRPH_XCARE2_MASK 0x4
+#define GRA07__GRPH_XCARE2__SHIFT 0x2
+#define GRA07__GRPH_XCARE3_MASK 0x8
+#define GRA07__GRPH_XCARE3__SHIFT 0x3
+#define GRA08__GRPH_BMSK_MASK 0xff
+#define GRA08__GRPH_BMSK__SHIFT 0x0
+#define ATTRX__ATTR_IDX_MASK 0x1f
+#define ATTRX__ATTR_IDX__SHIFT 0x0
+#define ATTRX__ATTR_PAL_RW_ENB_MASK 0x20
+#define ATTRX__ATTR_PAL_RW_ENB__SHIFT 0x5
+#define ATTRDW__ATTR_DATA_MASK 0xff
+#define ATTRDW__ATTR_DATA__SHIFT 0x0
+#define ATTRDR__ATTR_DATA_MASK 0xff
+#define ATTRDR__ATTR_DATA__SHIFT 0x0
+#define ATTR00__ATTR_PAL_MASK 0x3f
+#define ATTR00__ATTR_PAL__SHIFT 0x0
+#define ATTR01__ATTR_PAL_MASK 0x3f
+#define ATTR01__ATTR_PAL__SHIFT 0x0
+#define ATTR02__ATTR_PAL_MASK 0x3f
+#define ATTR02__ATTR_PAL__SHIFT 0x0
+#define ATTR03__ATTR_PAL_MASK 0x3f
+#define ATTR03__ATTR_PAL__SHIFT 0x0
+#define ATTR04__ATTR_PAL_MASK 0x3f
+#define ATTR04__ATTR_PAL__SHIFT 0x0
+#define ATTR05__ATTR_PAL_MASK 0x3f
+#define ATTR05__ATTR_PAL__SHIFT 0x0
+#define ATTR06__ATTR_PAL_MASK 0x3f
+#define ATTR06__ATTR_PAL__SHIFT 0x0
+#define ATTR07__ATTR_PAL_MASK 0x3f
+#define ATTR07__ATTR_PAL__SHIFT 0x0
+#define ATTR08__ATTR_PAL_MASK 0x3f
+#define ATTR08__ATTR_PAL__SHIFT 0x0
+#define ATTR09__ATTR_PAL_MASK 0x3f
+#define ATTR09__ATTR_PAL__SHIFT 0x0
+#define ATTR0A__ATTR_PAL_MASK 0x3f
+#define ATTR0A__ATTR_PAL__SHIFT 0x0
+#define ATTR0B__ATTR_PAL_MASK 0x3f
+#define ATTR0B__ATTR_PAL__SHIFT 0x0
+#define ATTR0C__ATTR_PAL_MASK 0x3f
+#define ATTR0C__ATTR_PAL__SHIFT 0x0
+#define ATTR0D__ATTR_PAL_MASK 0x3f
+#define ATTR0D__ATTR_PAL__SHIFT 0x0
+#define ATTR0E__ATTR_PAL_MASK 0x3f
+#define ATTR0E__ATTR_PAL__SHIFT 0x0
+#define ATTR0F__ATTR_PAL_MASK 0x3f
+#define ATTR0F__ATTR_PAL__SHIFT 0x0
+#define ATTR10__ATTR_GRPH_MODE_MASK 0x1
+#define ATTR10__ATTR_GRPH_MODE__SHIFT 0x0
+#define ATTR10__ATTR_MONO_EN_MASK 0x2
+#define ATTR10__ATTR_MONO_EN__SHIFT 0x1
+#define ATTR10__ATTR_LGRPH_EN_MASK 0x4
+#define ATTR10__ATTR_LGRPH_EN__SHIFT 0x2
+#define ATTR10__ATTR_BLINK_EN_MASK 0x8
+#define ATTR10__ATTR_BLINK_EN__SHIFT 0x3
+#define ATTR10__ATTR_PANTOPONLY_MASK 0x20
+#define ATTR10__ATTR_PANTOPONLY__SHIFT 0x5
+#define ATTR10__ATTR_PCLKBY2_MASK 0x40
+#define ATTR10__ATTR_PCLKBY2__SHIFT 0x6
+#define ATTR10__ATTR_CSEL_EN_MASK 0x80
+#define ATTR10__ATTR_CSEL_EN__SHIFT 0x7
+#define ATTR11__ATTR_OVSC_MASK 0xff
+#define ATTR11__ATTR_OVSC__SHIFT 0x0
+#define ATTR12__ATTR_MAP_EN_MASK 0xf
+#define ATTR12__ATTR_MAP_EN__SHIFT 0x0
+#define ATTR12__ATTR_VSMUX_MASK 0x30
+#define ATTR12__ATTR_VSMUX__SHIFT 0x4
+#define ATTR13__ATTR_PPAN_MASK 0xf
+#define ATTR13__ATTR_PPAN__SHIFT 0x0
+#define ATTR14__ATTR_CSEL1_MASK 0x3
+#define ATTR14__ATTR_CSEL1__SHIFT 0x0
+#define ATTR14__ATTR_CSEL2_MASK 0xc
+#define ATTR14__ATTR_CSEL2__SHIFT 0x2
+#define VGA_RENDER_CONTROL__VGA_BLINK_RATE_MASK 0x1f
+#define VGA_RENDER_CONTROL__VGA_BLINK_RATE__SHIFT 0x0
+#define VGA_RENDER_CONTROL__VGA_BLINK_MODE_MASK 0x60
+#define VGA_RENDER_CONTROL__VGA_BLINK_MODE__SHIFT 0x5
+#define VGA_RENDER_CONTROL__VGA_CURSOR_BLINK_INVERT_MASK 0x80
+#define VGA_RENDER_CONTROL__VGA_CURSOR_BLINK_INVERT__SHIFT 0x7
+#define VGA_RENDER_CONTROL__VGA_EXTD_ADDR_COUNT_ENABLE_MASK 0x100
+#define VGA_RENDER_CONTROL__VGA_EXTD_ADDR_COUNT_ENABLE__SHIFT 0x8
+#define VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK 0x30000
+#define VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL__SHIFT 0x10
+#define VGA_RENDER_CONTROL__VGA_LOCK_8DOT_MASK 0x1000000
+#define VGA_RENDER_CONTROL__VGA_LOCK_8DOT__SHIFT 0x18
+#define VGA_RENDER_CONTROL__VGAREG_LINECMP_COMPATIBILITY_SEL_MASK 0x2000000
+#define VGA_RENDER_CONTROL__VGAREG_LINECMP_COMPATIBILITY_SEL__SHIFT 0x19
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_A_MASK 0x7
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_A__SHIFT 0x0
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_B_MASK 0x700
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_B__SHIFT 0x8
+#define VGA_SEQUENCER_RESET_CONTROL__D1_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x1
+#define VGA_SEQUENCER_RESET_CONTROL__D1_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x0
+#define VGA_SEQUENCER_RESET_CONTROL__D2_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x2
+#define VGA_SEQUENCER_RESET_CONTROL__D2_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x1
+#define VGA_SEQUENCER_RESET_CONTROL__D3_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x4
+#define VGA_SEQUENCER_RESET_CONTROL__D3_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x2
+#define VGA_SEQUENCER_RESET_CONTROL__D4_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x8
+#define VGA_SEQUENCER_RESET_CONTROL__D4_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x3
+#define VGA_SEQUENCER_RESET_CONTROL__D5_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x10
+#define VGA_SEQUENCER_RESET_CONTROL__D5_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x4
+#define VGA_SEQUENCER_RESET_CONTROL__D6_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x20
+#define VGA_SEQUENCER_RESET_CONTROL__D6_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x5
+#define VGA_SEQUENCER_RESET_CONTROL__D1_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x100
+#define VGA_SEQUENCER_RESET_CONTROL__D1_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x8
+#define VGA_SEQUENCER_RESET_CONTROL__D2_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x200
+#define VGA_SEQUENCER_RESET_CONTROL__D2_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x9
+#define VGA_SEQUENCER_RESET_CONTROL__D3_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x400
+#define VGA_SEQUENCER_RESET_CONTROL__D3_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0xa
+#define VGA_SEQUENCER_RESET_CONTROL__D4_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x800
+#define VGA_SEQUENCER_RESET_CONTROL__D4_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0xb
+#define VGA_SEQUENCER_RESET_CONTROL__D5_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x1000
+#define VGA_SEQUENCER_RESET_CONTROL__D5_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0xc
+#define VGA_SEQUENCER_RESET_CONTROL__D6_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x2000
+#define VGA_SEQUENCER_RESET_CONTROL__D6_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0xd
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_ENABLE_MASK 0x10000
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_ENABLE__SHIFT 0x10
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_REGISTER_SELECT_MASK 0x20000
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_REGISTER_SELECT__SHIFT 0x11
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_INDEX_SELECT_MASK 0xfc0000
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_INDEX_SELECT__SHIFT 0x12
+#define VGA_MODE_CONTROL__VGA_ATI_LINEAR_MASK 0x1
+#define VGA_MODE_CONTROL__VGA_ATI_LINEAR__SHIFT 0x0
+#define VGA_MODE_CONTROL__VGA_LUT_PALETTE_UPDATE_MODE_MASK 0x30
+#define VGA_MODE_CONTROL__VGA_LUT_PALETTE_UPDATE_MODE__SHIFT 0x4
+#define VGA_MODE_CONTROL__VGA_128K_APERTURE_PAGING_MASK 0x100
+#define VGA_MODE_CONTROL__VGA_128K_APERTURE_PAGING__SHIFT 0x8
+#define VGA_MODE_CONTROL__VGA_TEXT_132_COLUMNS_EN_MASK 0x10000
+#define VGA_MODE_CONTROL__VGA_TEXT_132_COLUMNS_EN__SHIFT 0x10
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_PITCH_SELECT_MASK 0x3
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_PITCH_SELECT__SHIFT 0x0
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_HEIGHT_SELECT_MASK 0x300
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_HEIGHT_SELECT__SHIFT 0x8
+#define VGA_MEMORY_BASE_ADDRESS__VGA_MEMORY_BASE_ADDRESS_MASK 0xffffffff
+#define VGA_MEMORY_BASE_ADDRESS__VGA_MEMORY_BASE_ADDRESS__SHIFT 0x0
+#define VGA_MEMORY_BASE_ADDRESS_HIGH__VGA_MEMORY_BASE_ADDRESS_HIGH_MASK 0xff
+#define VGA_MEMORY_BASE_ADDRESS_HIGH__VGA_MEMORY_BASE_ADDRESS_HIGH__SHIFT 0x0
+#define VGA_DISPBUF1_SURFACE_ADDR__VGA_DISPBUF1_SURFACE_ADDR_MASK 0x1ffffff
+#define VGA_DISPBUF1_SURFACE_ADDR__VGA_DISPBUF1_SURFACE_ADDR__SHIFT 0x0
+#define VGA_DISPBUF2_SURFACE_ADDR__VGA_DISPBUF2_SURFACE_ADDR_MASK 0x1ffffff
+#define VGA_DISPBUF2_SURFACE_ADDR__VGA_DISPBUF2_SURFACE_ADDR__SHIFT 0x0
+#define VGA_HDP_CONTROL__VGA_MEM_PAGE_SELECT_EN_MASK 0x1
+#define VGA_HDP_CONTROL__VGA_MEM_PAGE_SELECT_EN__SHIFT 0x0
+#define VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK 0x10
+#define VGA_HDP_CONTROL__VGA_MEMORY_DISABLE__SHIFT 0x4
+#define VGA_HDP_CONTROL__VGA_RBBM_LOCK_DISABLE_MASK 0x100
+#define VGA_HDP_CONTROL__VGA_RBBM_LOCK_DISABLE__SHIFT 0x8
+#define VGA_HDP_CONTROL__VGA_SOFT_RESET_MASK 0x10000
+#define VGA_HDP_CONTROL__VGA_SOFT_RESET__SHIFT 0x10
+#define VGA_HDP_CONTROL__VGA_TEST_RESET_CONTROL_MASK 0x1000000
+#define VGA_HDP_CONTROL__VGA_TEST_RESET_CONTROL__SHIFT 0x18
+#define VGA_CACHE_CONTROL__VGA_WRITE_THROUGH_CACHE_DIS_MASK 0x1
+#define VGA_CACHE_CONTROL__VGA_WRITE_THROUGH_CACHE_DIS__SHIFT 0x0
+#define VGA_CACHE_CONTROL__VGA_READ_CACHE_DISABLE_MASK 0x100
+#define VGA_CACHE_CONTROL__VGA_READ_CACHE_DISABLE__SHIFT 0x8
+#define VGA_CACHE_CONTROL__VGA_READ_BUFFER_INVALIDATE_MASK 0x10000
+#define VGA_CACHE_CONTROL__VGA_READ_BUFFER_INVALIDATE__SHIFT 0x10
+#define VGA_CACHE_CONTROL__VGA_DCCIF_W256ONLY_MASK 0x100000
+#define VGA_CACHE_CONTROL__VGA_DCCIF_W256ONLY__SHIFT 0x14
+#define VGA_CACHE_CONTROL__VGA_DCCIF_WC_TIMEOUT_MASK 0x3f000000
+#define VGA_CACHE_CONTROL__VGA_DCCIF_WC_TIMEOUT__SHIFT 0x18
+#define D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK 0x1
+#define D1VGA_CONTROL__D1VGA_MODE_ENABLE__SHIFT 0x0
+#define D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK 0x100
+#define D1VGA_CONTROL__D1VGA_TIMING_SELECT__SHIFT 0x8
+#define D1VGA_CONTROL__D1VGA_SYNC_POLARITY_SELECT_MASK 0x200
+#define D1VGA_CONTROL__D1VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D1VGA_CONTROL__D1VGA_OVERSCAN_COLOR_EN_MASK 0x10000
+#define D1VGA_CONTROL__D1VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D1VGA_CONTROL__D1VGA_ROTATE_MASK 0x3000000
+#define D1VGA_CONTROL__D1VGA_ROTATE__SHIFT 0x18
+#define D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK 0x1
+#define D2VGA_CONTROL__D2VGA_MODE_ENABLE__SHIFT 0x0
+#define D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK 0x100
+#define D2VGA_CONTROL__D2VGA_TIMING_SELECT__SHIFT 0x8
+#define D2VGA_CONTROL__D2VGA_SYNC_POLARITY_SELECT_MASK 0x200
+#define D2VGA_CONTROL__D2VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D2VGA_CONTROL__D2VGA_OVERSCAN_COLOR_EN_MASK 0x10000
+#define D2VGA_CONTROL__D2VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D2VGA_CONTROL__D2VGA_ROTATE_MASK 0x3000000
+#define D2VGA_CONTROL__D2VGA_ROTATE__SHIFT 0x18
+#define D3VGA_CONTROL__D3VGA_MODE_ENABLE_MASK 0x1
+#define D3VGA_CONTROL__D3VGA_MODE_ENABLE__SHIFT 0x0
+#define D3VGA_CONTROL__D3VGA_TIMING_SELECT_MASK 0x100
+#define D3VGA_CONTROL__D3VGA_TIMING_SELECT__SHIFT 0x8
+#define D3VGA_CONTROL__D3VGA_SYNC_POLARITY_SELECT_MASK 0x200
+#define D3VGA_CONTROL__D3VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D3VGA_CONTROL__D3VGA_OVERSCAN_COLOR_EN_MASK 0x10000
+#define D3VGA_CONTROL__D3VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D3VGA_CONTROL__D3VGA_ROTATE_MASK 0x3000000
+#define D3VGA_CONTROL__D3VGA_ROTATE__SHIFT 0x18
+#define D4VGA_CONTROL__D4VGA_MODE_ENABLE_MASK 0x1
+#define D4VGA_CONTROL__D4VGA_MODE_ENABLE__SHIFT 0x0
+#define D4VGA_CONTROL__D4VGA_TIMING_SELECT_MASK 0x100
+#define D4VGA_CONTROL__D4VGA_TIMING_SELECT__SHIFT 0x8
+#define D4VGA_CONTROL__D4VGA_SYNC_POLARITY_SELECT_MASK 0x200
+#define D4VGA_CONTROL__D4VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D4VGA_CONTROL__D4VGA_OVERSCAN_COLOR_EN_MASK 0x10000
+#define D4VGA_CONTROL__D4VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D4VGA_CONTROL__D4VGA_ROTATE_MASK 0x3000000
+#define D4VGA_CONTROL__D4VGA_ROTATE__SHIFT 0x18
+#define D5VGA_CONTROL__D5VGA_MODE_ENABLE_MASK 0x1
+#define D5VGA_CONTROL__D5VGA_MODE_ENABLE__SHIFT 0x0
+#define D5VGA_CONTROL__D5VGA_TIMING_SELECT_MASK 0x100
+#define D5VGA_CONTROL__D5VGA_TIMING_SELECT__SHIFT 0x8
+#define D5VGA_CONTROL__D5VGA_SYNC_POLARITY_SELECT_MASK 0x200
+#define D5VGA_CONTROL__D5VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D5VGA_CONTROL__D5VGA_OVERSCAN_COLOR_EN_MASK 0x10000
+#define D5VGA_CONTROL__D5VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D5VGA_CONTROL__D5VGA_ROTATE_MASK 0x3000000
+#define D5VGA_CONTROL__D5VGA_ROTATE__SHIFT 0x18
+#define D6VGA_CONTROL__D6VGA_MODE_ENABLE_MASK 0x1
+#define D6VGA_CONTROL__D6VGA_MODE_ENABLE__SHIFT 0x0
+#define D6VGA_CONTROL__D6VGA_TIMING_SELECT_MASK 0x100
+#define D6VGA_CONTROL__D6VGA_TIMING_SELECT__SHIFT 0x8
+#define D6VGA_CONTROL__D6VGA_SYNC_POLARITY_SELECT_MASK 0x200
+#define D6VGA_CONTROL__D6VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D6VGA_CONTROL__D6VGA_OVERSCAN_COLOR_EN_MASK 0x10000
+#define D6VGA_CONTROL__D6VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D6VGA_CONTROL__D6VGA_ROTATE_MASK 0x3000000
+#define D6VGA_CONTROL__D6VGA_ROTATE__SHIFT 0x18
+#define VGA_HW_DEBUG__VGA_HW_DEBUG_MASK 0xffffffff
+#define VGA_HW_DEBUG__VGA_HW_DEBUG__SHIFT 0x0
+#define VGA_STATUS__VGA_MEM_ACCESS_STATUS_MASK 0x1
+#define VGA_STATUS__VGA_MEM_ACCESS_STATUS__SHIFT 0x0
+#define VGA_STATUS__VGA_REG_ACCESS_STATUS_MASK 0x2
+#define VGA_STATUS__VGA_REG_ACCESS_STATUS__SHIFT 0x1
+#define VGA_STATUS__VGA_DISPLAY_SWITCH_STATUS_MASK 0x4
+#define VGA_STATUS__VGA_DISPLAY_SWITCH_STATUS__SHIFT 0x2
+#define VGA_STATUS__VGA_MODE_AUTO_TRIGGER_STATUS_MASK 0x8
+#define VGA_STATUS__VGA_MODE_AUTO_TRIGGER_STATUS__SHIFT 0x3
+#define VGA_INTERRUPT_CONTROL__VGA_MEM_ACCESS_INT_MASK_MASK 0x1
+#define VGA_INTERRUPT_CONTROL__VGA_MEM_ACCESS_INT_MASK__SHIFT 0x0
+#define VGA_INTERRUPT_CONTROL__VGA_REG_ACCESS_INT_MASK_MASK 0x100
+#define VGA_INTERRUPT_CONTROL__VGA_REG_ACCESS_INT_MASK__SHIFT 0x8
+#define VGA_INTERRUPT_CONTROL__VGA_DISPLAY_SWITCH_INT_MASK_MASK 0x10000
+#define VGA_INTERRUPT_CONTROL__VGA_DISPLAY_SWITCH_INT_MASK__SHIFT 0x10
+#define VGA_INTERRUPT_CONTROL__VGA_MODE_AUTO_TRIGGER_INT_MASK_MASK 0x1000000
+#define VGA_INTERRUPT_CONTROL__VGA_MODE_AUTO_TRIGGER_INT_MASK__SHIFT 0x18
+#define VGA_STATUS_CLEAR__VGA_MEM_ACCESS_INT_CLEAR_MASK 0x1
+#define VGA_STATUS_CLEAR__VGA_MEM_ACCESS_INT_CLEAR__SHIFT 0x0
+#define VGA_STATUS_CLEAR__VGA_REG_ACCESS_INT_CLEAR_MASK 0x100
+#define VGA_STATUS_CLEAR__VGA_REG_ACCESS_INT_CLEAR__SHIFT 0x8
+#define VGA_STATUS_CLEAR__VGA_DISPLAY_SWITCH_INT_CLEAR_MASK 0x10000
+#define VGA_STATUS_CLEAR__VGA_DISPLAY_SWITCH_INT_CLEAR__SHIFT 0x10
+#define VGA_STATUS_CLEAR__VGA_MODE_AUTO_TRIGGER_INT_CLEAR_MASK 0x1000000
+#define VGA_STATUS_CLEAR__VGA_MODE_AUTO_TRIGGER_INT_CLEAR__SHIFT 0x18
+#define VGA_INTERRUPT_STATUS__VGA_MEM_ACCESS_INT_STATUS_MASK 0x1
+#define VGA_INTERRUPT_STATUS__VGA_MEM_ACCESS_INT_STATUS__SHIFT 0x0
+#define VGA_INTERRUPT_STATUS__VGA_REG_ACCESS_INT_STATUS_MASK 0x2
+#define VGA_INTERRUPT_STATUS__VGA_REG_ACCESS_INT_STATUS__SHIFT 0x1
+#define VGA_INTERRUPT_STATUS__VGA_DISPLAY_SWITCH_INT_STATUS_MASK 0x4
+#define VGA_INTERRUPT_STATUS__VGA_DISPLAY_SWITCH_INT_STATUS__SHIFT 0x2
+#define VGA_INTERRUPT_STATUS__VGA_MODE_AUTO_TRIGGER_INT_STATUS_MASK 0x8
+#define VGA_INTERRUPT_STATUS__VGA_MODE_AUTO_TRIGGER_INT_STATUS__SHIFT 0x3
+#define VGA_MAIN_CONTROL__VGA_CRTC_TIMEOUT_MASK 0x3
+#define VGA_MAIN_CONTROL__VGA_CRTC_TIMEOUT__SHIFT 0x0
+#define VGA_MAIN_CONTROL__VGA_RENDER_TIMEOUT_COUNT_MASK 0x18
+#define VGA_MAIN_CONTROL__VGA_RENDER_TIMEOUT_COUNT__SHIFT 0x3
+#define VGA_MAIN_CONTROL__VGA_VIRTUAL_VERTICAL_RETRACE_DURATION_MASK 0xe0
+#define VGA_MAIN_CONTROL__VGA_VIRTUAL_VERTICAL_RETRACE_DURATION__SHIFT 0x5
+#define VGA_MAIN_CONTROL__VGA_READBACK_VGA_VSTATUS_SOURCE_SELECT_MASK 0x300
+#define VGA_MAIN_CONTROL__VGA_READBACK_VGA_VSTATUS_SOURCE_SELECT__SHIFT 0x8
+#define VGA_MAIN_CONTROL__VGA_MC_WRITE_CLEAN_WAIT_DELAY_MASK 0xf000
+#define VGA_MAIN_CONTROL__VGA_MC_WRITE_CLEAN_WAIT_DELAY__SHIFT 0xc
+#define VGA_MAIN_CONTROL__VGA_READBACK_NO_DISPLAY_SOURCE_SELECT_MASK 0x30000
+#define VGA_MAIN_CONTROL__VGA_READBACK_NO_DISPLAY_SOURCE_SELECT__SHIFT 0x10
+#define VGA_MAIN_CONTROL__VGA_READBACK_CRT_INTR_SOURCE_SELECT_MASK 0x3000000
+#define VGA_MAIN_CONTROL__VGA_READBACK_CRT_INTR_SOURCE_SELECT__SHIFT 0x18
+#define VGA_MAIN_CONTROL__VGA_READBACK_SENSE_SWITCH_SELECT_MASK 0x4000000
+#define VGA_MAIN_CONTROL__VGA_READBACK_SENSE_SWITCH_SELECT__SHIFT 0x1a
+#define VGA_MAIN_CONTROL__VGA_READ_URGENT_ENABLE_MASK 0x8000000
+#define VGA_MAIN_CONTROL__VGA_READ_URGENT_ENABLE__SHIFT 0x1b
+#define VGA_MAIN_CONTROL__VGA_WRITES_URGENT_ENABLE_MASK 0x10000000
+#define VGA_MAIN_CONTROL__VGA_WRITES_URGENT_ENABLE__SHIFT 0x1c
+#define VGA_MAIN_CONTROL__VGA_EXTERNAL_DAC_SENSE_MASK 0x20000000
+#define VGA_MAIN_CONTROL__VGA_EXTERNAL_DAC_SENSE__SHIFT 0x1d
+#define VGA_MAIN_CONTROL__VGA_MAIN_TEST_VSTATUS_NO_DISPLAY_CRTC_TIMEOUT_MASK 0x80000000
+#define VGA_MAIN_CONTROL__VGA_MAIN_TEST_VSTATUS_NO_DISPLAY_CRTC_TIMEOUT__SHIFT 0x1f
+#define VGA_TEST_CONTROL__VGA_TEST_ENABLE_MASK 0x1
+#define VGA_TEST_CONTROL__VGA_TEST_ENABLE__SHIFT 0x0
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_START_MASK 0x100
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_START__SHIFT 0x8
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DONE_MASK 0x10000
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DONE__SHIFT 0x10
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DISPBUF_SELECT_MASK 0x1000000
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DISPBUF_SELECT__SHIFT 0x18
+#define VGA_DEBUG_READBACK_INDEX__VGA_DEBUG_READBACK_INDEX_MASK 0xff
+#define VGA_DEBUG_READBACK_INDEX__VGA_DEBUG_READBACK_INDEX__SHIFT 0x0
+#define VGA_DEBUG_READBACK_DATA__VGA_DEBUG_READBACK_DATA_MASK 0xffffffff
+#define VGA_DEBUG_READBACK_DATA__VGA_DEBUG_READBACK_DATA__SHIFT 0x0
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE0_ADDR_MASK 0x3ff
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE0_ADDR__SHIFT 0x0
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE1_ADDR_MASK 0x3ff0000
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE1_ADDR__SHIFT 0x10
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE0_ADDR_MASK 0x3ff
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE0_ADDR__SHIFT 0x0
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE1_ADDR_MASK 0x3ff0000
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE1_ADDR__SHIFT 0x10
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_INDEX_MASK 0xff
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_INDEX__SHIFT 0x0
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define VGA_TEST_DEBUG_DATA__VGA_TEST_DEBUG_DATA_MASK 0xffffffff
+#define VGA_TEST_DEBUG_DATA__VGA_TEST_DEBUG_DATA__SHIFT 0x0
+#define VGADCC_DBG_DCCIF_C__DBG_DCCIF_C_MASK 0xffffffff
+#define VGADCC_DBG_DCCIF_C__DBG_DCCIF_C__SHIFT 0x0
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_LEVEL_MASK 0x3
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_LEVEL__SHIFT 0x0
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_FINE_CONTROL_MASK 0x3f00
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_FINE_CONTROL__SHIFT 0x8
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_BANDGAP_ADJUSTMENT_MASK 0x3f0000
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_BANDGAP_ADJUSTMENT__SHIFT 0x10
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_ANALOG_MONITOR_MASK 0xf000000
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_ANALOG_MONITOR__SHIFT 0x18
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_COREMON_MASK 0x10000000
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_COREMON__SHIFT 0x1c
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_INITB_MASK 0x1
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_INITB__SHIFT 0x0
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_EN_MASK 0x2
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_EN__SHIFT 0x1
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_DACADJ_EN_MASK 0x4
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_DACADJ_EN__SHIFT 0x2
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_WAIT_ADJUST_MASK 0x3ff0
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_WAIT_ADJUST__SHIFT 0x4
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_MASK_MASK 0x700000
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_MASK__SHIFT 0x14
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_COMPLETE_MASK 0x10000000
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_COMPLETE__SHIFT 0x1c
+#define DPG_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION_MASK 0xffff
+#define DPG_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION__SHIFT 0x0
+#define DPG_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT_MASK 0xffff0000
+#define DPG_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT__SHIFT 0x10
+#define DPG_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT_MASK 0xffff
+#define DPG_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT__SHIFT 0x0
+#define DPG_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT_MASK 0xffff0000
+#define DPG_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT__SHIFT 0x10
+#define DPG_WATERMARK_MASK_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK_MASK 0x7
+#define DPG_WATERMARK_MASK_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK__SHIFT 0x0
+#define DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK_MASK 0x700
+#define DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT 0x8
+#define DPG_WATERMARK_MASK_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK_MASK 0x70000
+#define DPG_WATERMARK_MASK_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK__SHIFT 0x10
+#define DPG_WATERMARK_MASK_CONTROL__DISABLE_FLIP_URGENT_MASK 0x1000000
+#define DPG_WATERMARK_MASK_CONTROL__DISABLE_FLIP_URGENT__SHIFT 0x18
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK_MASK 0xffff
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT 0x0
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK_MASK 0xffff0000
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT 0x10
+#define DPG_PIPE_DPM_CONTROL__DPM_ENABLE_MASK 0x1
+#define DPG_PIPE_DPM_CONTROL__DPM_ENABLE__SHIFT 0x0
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE_MASK 0x10
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE__SHIFT 0x4
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON_MASK 0x100
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON__SHIFT 0x8
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK_MASK 0x3000
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK__SHIFT 0xc
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK 0xffff0000
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK__SHIFT 0x10
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK 0x1
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE__SHIFT 0x0
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR_MASK 0x10
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR__SHIFT 0x4
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON_MASK 0x20
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON__SHIFT 0x5
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA_MASK 0x40
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA__SHIFT 0x6
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC_MASK 0x80
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC__SHIFT 0x7
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON_MASK 0x100
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON__SHIFT 0x8
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK_MASK 0x200
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK__SHIFT 0x9
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH_MASK 0x400
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH__SHIFT 0xa
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON_MASK 0x800
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON__SHIFT 0xb
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK 0xffff0000
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK__SHIFT 0x10
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE_MASK 0x1
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE__SHIFT 0x0
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST_MASK 0x10
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST__SHIFT 0x4
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST_MASK 0x100
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST__SHIFT 0x8
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON_MASK 0x200
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON__SHIFT 0x9
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT_MASK 0x400
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT__SHIFT 0xa
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK 0xffff8000
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK__SHIFT 0xf
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH_MASK 0x1
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH__SHIFT 0x0
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH_MASK 0x10
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH__SHIFT 0x4
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH_MASK 0x20
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH__SHIFT 0x5
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH_MASK 0x40
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH__SHIFT 0x6
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH_MASK 0x80
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH__SHIFT 0x7
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH_MASK 0x100
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH__SHIFT 0x8
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH_MASK 0x200
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH__SHIFT 0x9
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH_MASK 0x400
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH__SHIFT 0xa
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH_MASK 0x800
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH__SHIFT 0xb
+#define DPG_REPEATER_PROGRAM__REG_DPG_DMIFRC_REPEATER_MASK 0x7
+#define DPG_REPEATER_PROGRAM__REG_DPG_DMIFRC_REPEATER__SHIFT 0x0
+#define DPG_REPEATER_PROGRAM__REG_DMIFRC_DPG_REPEATER_MASK 0x70
+#define DPG_REPEATER_PROGRAM__REG_DMIFRC_DPG_REPEATER__SHIFT 0x4
+#define DPG_HW_DEBUG_A__DPG_HW_DEBUG_A_MASK 0xffffffff
+#define DPG_HW_DEBUG_A__DPG_HW_DEBUG_A__SHIFT 0x0
+#define DPG_HW_DEBUG_B__DPG_HW_DEBUG_B_MASK 0xffffffff
+#define DPG_HW_DEBUG_B__DPG_HW_DEBUG_B__SHIFT 0x0
+#define DPG_HW_DEBUG_11__DPG_HW_DEBUG_11_MASK 0x1
+#define DPG_HW_DEBUG_11__DPG_HW_DEBUG_11__SHIFT 0x0
+#define DPG_CHK_PRE_PROC_CNTL__DPG_DISABLE_DMIF_BUF_CHK_MASK 0x1
+#define DPG_CHK_PRE_PROC_CNTL__DPG_DISABLE_DMIF_BUF_CHK__SHIFT 0x0
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_UNMAPPED_MASK 0x1
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_UNMAPPED__SHIFT 0x0
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_MAPPED_MASK 0x2
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_MAPPED__SHIFT 0x1
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_UNMAPPED_CLR_MASK 0x10
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_UNMAPPED_CLR__SHIFT 0x4
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_MAPPED_CLR_MASK 0x20
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_MAPPED_CLR__SHIFT 0x5
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_INDEX_MASK 0xff
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DPG_TEST_DEBUG_DATA__DPG_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DPG_TEST_DEBUG_DATA__DPG_TEST_DEBUG_DATA__SHIFT 0x0
+#define DPGV0_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION_MASK 0xffff
+#define DPGV0_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION__SHIFT 0x0
+#define DPGV0_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT_MASK 0xffff0000
+#define DPGV0_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT__SHIFT 0x10
+#define DPGV1_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION_MASK 0xffff
+#define DPGV1_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION__SHIFT 0x0
+#define DPGV1_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT_MASK 0xffff0000
+#define DPGV1_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT__SHIFT 0x10
+#define DPGV0_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT_MASK 0xffff
+#define DPGV0_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT__SHIFT 0x0
+#define DPGV0_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT_MASK 0xffff0000
+#define DPGV0_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT__SHIFT 0x10
+#define DPGV1_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT_MASK 0xffff
+#define DPGV1_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT__SHIFT 0x0
+#define DPGV1_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT_MASK 0xffff0000
+#define DPGV1_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT__SHIFT 0x10
+#define DPGV0_WATERMARK_MASK_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK_MASK 0x3
+#define DPGV0_WATERMARK_MASK_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK__SHIFT 0x0
+#define DPGV0_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK_MASK 0x300
+#define DPGV0_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT 0x8
+#define DPGV0_WATERMARK_MASK_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK_MASK 0x30000
+#define DPGV0_WATERMARK_MASK_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK__SHIFT 0x10
+#define DPGV0_WATERMARK_MASK_CONTROL__DISABLE_FLIP_URGENT_MASK 0x1000000
+#define DPGV0_WATERMARK_MASK_CONTROL__DISABLE_FLIP_URGENT__SHIFT 0x18
+#define DPGV1_WATERMARK_MASK_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK_MASK 0x3
+#define DPGV1_WATERMARK_MASK_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK__SHIFT 0x0
+#define DPGV1_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK_MASK 0x300
+#define DPGV1_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT 0x8
+#define DPGV1_WATERMARK_MASK_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK_MASK 0x30000
+#define DPGV1_WATERMARK_MASK_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK__SHIFT 0x10
+#define DPGV1_WATERMARK_MASK_CONTROL__DISABLE_FLIP_URGENT_MASK 0x1000000
+#define DPGV1_WATERMARK_MASK_CONTROL__DISABLE_FLIP_URGENT__SHIFT 0x18
+#define DPGV0_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK_MASK 0xffff
+#define DPGV0_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT 0x0
+#define DPGV0_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK_MASK 0xffff0000
+#define DPGV0_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT 0x10
+#define DPGV1_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK_MASK 0xffff
+#define DPGV1_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT 0x0
+#define DPGV1_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK_MASK 0xffff0000
+#define DPGV1_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT 0x10
+#define DPGV0_PIPE_DPM_CONTROL__DPM_ENABLE_MASK 0x1
+#define DPGV0_PIPE_DPM_CONTROL__DPM_ENABLE__SHIFT 0x0
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE_MASK 0x10
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE__SHIFT 0x4
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON_MASK 0x100
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON__SHIFT 0x8
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK_MASK 0x3000
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK__SHIFT 0xc
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK 0xffff0000
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK__SHIFT 0x10
+#define DPGV1_PIPE_DPM_CONTROL__DPM_ENABLE_MASK 0x1
+#define DPGV1_PIPE_DPM_CONTROL__DPM_ENABLE__SHIFT 0x0
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE_MASK 0x10
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE__SHIFT 0x4
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON_MASK 0x100
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON__SHIFT 0x8
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK_MASK 0x3000
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK__SHIFT 0xc
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK 0xffff0000
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK__SHIFT 0x10
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK 0x1
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_ENABLE__SHIFT 0x0
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR_MASK 0x10
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR__SHIFT 0x4
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON_MASK 0x20
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON__SHIFT 0x5
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA_MASK 0x40
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA__SHIFT 0x6
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC_MASK 0x80
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC__SHIFT 0x7
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON_MASK 0x100
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON__SHIFT 0x8
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK_MASK 0x200
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK__SHIFT 0x9
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH_MASK 0x400
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH__SHIFT 0xa
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON_MASK 0x800
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON__SHIFT 0xb
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK 0xffff0000
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK__SHIFT 0x10
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK 0x1
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_ENABLE__SHIFT 0x0
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR_MASK 0x10
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR__SHIFT 0x4
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON_MASK 0x20
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON__SHIFT 0x5
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA_MASK 0x40
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA__SHIFT 0x6
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC_MASK 0x80
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC__SHIFT 0x7
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON_MASK 0x100
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON__SHIFT 0x8
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK_MASK 0x200
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK__SHIFT 0x9
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH_MASK 0x400
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH__SHIFT 0xa
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON_MASK 0x800
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON__SHIFT 0xb
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK 0xffff0000
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK__SHIFT 0x10
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE_MASK 0x1
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE__SHIFT 0x0
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST_MASK 0x10
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST__SHIFT 0x4
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST_MASK 0x100
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST__SHIFT 0x8
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON_MASK 0x200
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON__SHIFT 0x9
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT_MASK 0x400
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT__SHIFT 0xa
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK 0xffff0000
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK__SHIFT 0x10
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE_MASK 0x1
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE__SHIFT 0x0
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST_MASK 0x10
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST__SHIFT 0x4
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST_MASK 0x100
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST__SHIFT 0x8
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON_MASK 0x200
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON__SHIFT 0x9
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT_MASK 0x400
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT__SHIFT 0xa
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK 0xffff0000
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK__SHIFT 0x10
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH_MASK 0x1
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH__SHIFT 0x0
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH_MASK 0x10
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH__SHIFT 0x4
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH_MASK 0x20
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH__SHIFT 0x5
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH_MASK 0x40
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH__SHIFT 0x6
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH_MASK 0x80
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH__SHIFT 0x7
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH_MASK 0x100
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH__SHIFT 0x8
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH_MASK 0x200
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH__SHIFT 0x9
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH_MASK 0x400
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH__SHIFT 0xa
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH_MASK 0x800
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH__SHIFT 0xb
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH_MASK 0x1
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH__SHIFT 0x0
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH_MASK 0x10
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH__SHIFT 0x4
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH_MASK 0x20
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH__SHIFT 0x5
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH_MASK 0x40
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH__SHIFT 0x6
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH_MASK 0x80
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH__SHIFT 0x7
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH_MASK 0x100
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH__SHIFT 0x8
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH_MASK 0x200
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH__SHIFT 0x9
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH_MASK 0x400
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH__SHIFT 0xa
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH_MASK 0x800
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH__SHIFT 0xb
+#define DPGV0_REPEATER_PROGRAM__REG_DPG_DMIFRC_REPEATER_MASK 0x7
+#define DPGV0_REPEATER_PROGRAM__REG_DPG_DMIFRC_REPEATER__SHIFT 0x0
+#define DPGV0_REPEATER_PROGRAM__REG_DMIFRC_DPG_REPEATER_MASK 0x70
+#define DPGV0_REPEATER_PROGRAM__REG_DMIFRC_DPG_REPEATER__SHIFT 0x4
+#define DPGV1_REPEATER_PROGRAM__REG_DPG_DMIFRC_REPEATER_MASK 0x7
+#define DPGV1_REPEATER_PROGRAM__REG_DPG_DMIFRC_REPEATER__SHIFT 0x0
+#define DPGV1_REPEATER_PROGRAM__REG_DMIFRC_DPG_REPEATER_MASK 0x70
+#define DPGV1_REPEATER_PROGRAM__REG_DMIFRC_DPG_REPEATER__SHIFT 0x4
+#define DPGV0_HW_DEBUG_A__DPG_HW_DEBUG_A_MASK 0xffffffff
+#define DPGV0_HW_DEBUG_A__DPG_HW_DEBUG_A__SHIFT 0x0
+#define DPGV1_HW_DEBUG_A__DPG_HW_DEBUG_A_MASK 0xffffffff
+#define DPGV1_HW_DEBUG_A__DPG_HW_DEBUG_A__SHIFT 0x0
+#define DPGV0_HW_DEBUG_B__DPG_HW_DEBUG_B_MASK 0xffffffff
+#define DPGV0_HW_DEBUG_B__DPG_HW_DEBUG_B__SHIFT 0x0
+#define DPGV1_HW_DEBUG_B__DPG_HW_DEBUG_B_MASK 0xffffffff
+#define DPGV1_HW_DEBUG_B__DPG_HW_DEBUG_B__SHIFT 0x0
+#define DPGV0_HW_DEBUG_11__DPG_HW_DEBUG_11_MASK 0x1
+#define DPGV0_HW_DEBUG_11__DPG_HW_DEBUG_11__SHIFT 0x0
+#define DPGV1_HW_DEBUG_11__DPG_HW_DEBUG_11_MASK 0x1
+#define DPGV1_HW_DEBUG_11__DPG_HW_DEBUG_11__SHIFT 0x0
+#define DPGV0_CHK_PRE_PROC_CNTL__DPG_DISABLE_DMIF_BUF_CHK_MASK 0x1
+#define DPGV0_CHK_PRE_PROC_CNTL__DPG_DISABLE_DMIF_BUF_CHK__SHIFT 0x0
+#define DPGV1_CHK_PRE_PROC_CNTL__DPG_DISABLE_DMIF_BUF_CHK_MASK 0x1
+#define DPGV1_CHK_PRE_PROC_CNTL__DPG_DISABLE_DMIF_BUF_CHK__SHIFT 0x0
+#define DPGV_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_INDEX_MASK 0xff
+#define DPGV_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DPGV_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DPGV_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DPGV_TEST_DEBUG_DATA__DPG_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DPGV_TEST_DEBUG_DATA__DPG_TEST_DEBUG_DATA__SHIFT 0x0
+#define AZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x1ffff
+#define AZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xffffffff
+#define AZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xffffffff
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xffffffff
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xffffffff
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xffffffff
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xffffffff
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0xfff
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x1f0000
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xffffffff
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3fffffff
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x1e
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x1f
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0xf
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0xf0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x4
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x200
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x9
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x400
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0xa
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x1
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0xff
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0xff00
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x8
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0xff0000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x10
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xff000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x18
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1_MASK 0xff
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2_MASK 0xff
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3_MASK 0xff
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x7f
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xffffffff
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xffffffff
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT_MASK 0x7
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT_MASK 0x70
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT__SHIFT 0x4
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW_MASK 0x3f
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xffffffff
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0xfff
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x1f0000
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xffffffff
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3fffffff
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x1e
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x1f
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0xf
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0xf0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x4
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x200
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x9
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x400
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0xa
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x1
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0xff
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0xff00
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x8
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0xff0000
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xff000000
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x18
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x7f
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_MASK 0x7
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x10
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_MASK 0x7
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x10
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_DEBUG__DISABLE_FORMAT_COMPARISON_MASK 0x3f
+#define AZALIA_F0_CODEC_DEBUG__DISABLE_FORMAT_COMPARISON__SHIFT 0x0
+#define AZALIA_F0_CODEC_DEBUG__CODEC_DEBUG_MASK 0xffffffc0
+#define AZALIA_F0_CODEC_DEBUG__CODEC_DEBUG__SHIFT 0x6
+#define AZALIA_F0_GTC_GROUP_OFFSET0__GTC_GROUP_OFFSET0_MASK 0xffffffff
+#define AZALIA_F0_GTC_GROUP_OFFSET0__GTC_GROUP_OFFSET0__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET1__GTC_GROUP_OFFSET1_MASK 0xffffffff
+#define AZALIA_F0_GTC_GROUP_OFFSET1__GTC_GROUP_OFFSET1__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET2__GTC_GROUP_OFFSET2_MASK 0xffffffff
+#define AZALIA_F0_GTC_GROUP_OFFSET2__GTC_GROUP_OFFSET2__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET3__GTC_GROUP_OFFSET3_MASK 0xffffffff
+#define AZALIA_F0_GTC_GROUP_OFFSET3__GTC_GROUP_OFFSET3__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET4__GTC_GROUP_OFFSET4_MASK 0xffffffff
+#define AZALIA_F0_GTC_GROUP_OFFSET4__GTC_GROUP_OFFSET4__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET5__GTC_GROUP_OFFSET5_MASK 0xffffffff
+#define AZALIA_F0_GTC_GROUP_OFFSET5__GTC_GROUP_OFFSET5__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET6__GTC_GROUP_OFFSET6_MASK 0xffffffff
+#define AZALIA_F0_GTC_GROUP_OFFSET6__GTC_GROUP_OFFSET6__SHIFT 0x0
+#define GLOBAL_CAPABILITIES__SIXTY_FOUR_BIT_ADDRESS_SUPPORTED_MASK 0x1
+#define GLOBAL_CAPABILITIES__SIXTY_FOUR_BIT_ADDRESS_SUPPORTED__SHIFT 0x0
+#define GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS_MASK 0x6
+#define GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS__SHIFT 0x1
+#define GLOBAL_CAPABILITIES__NUMBER_OF_BIDIRECTIONAL_STREAMS_SUPPORTED_MASK 0xf8
+#define GLOBAL_CAPABILITIES__NUMBER_OF_BIDIRECTIONAL_STREAMS_SUPPORTED__SHIFT 0x3
+#define GLOBAL_CAPABILITIES__NUMBER_OF_INPUT_STREAMS_SUPPORTED_MASK 0xf00
+#define GLOBAL_CAPABILITIES__NUMBER_OF_INPUT_STREAMS_SUPPORTED__SHIFT 0x8
+#define GLOBAL_CAPABILITIES__NUMBER_OF_OUTPUT_STREAMS_SUPPORTED_MASK 0xf000
+#define GLOBAL_CAPABILITIES__NUMBER_OF_OUTPUT_STREAMS_SUPPORTED__SHIFT 0xc
+#define MINOR_VERSION__MINOR_VERSION_MASK 0xff
+#define MINOR_VERSION__MINOR_VERSION__SHIFT 0x0
+#define MAJOR_VERSION__MAJOR_VERSION_MASK 0xff
+#define MAJOR_VERSION__MAJOR_VERSION__SHIFT 0x0
+#define OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY_MASK 0xffff
+#define OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY_MASK 0xffff
+#define INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define GLOBAL_CONTROL__CONTROLLER_RESET_MASK 0x1
+#define GLOBAL_CONTROL__CONTROLLER_RESET__SHIFT 0x0
+#define GLOBAL_CONTROL__FLUSH_CONTROL_MASK 0x2
+#define GLOBAL_CONTROL__FLUSH_CONTROL__SHIFT 0x1
+#define GLOBAL_CONTROL__ACCEPT_UNSOLICITED_RESPONSE_ENABLE_MASK 0x100
+#define GLOBAL_CONTROL__ACCEPT_UNSOLICITED_RESPONSE_ENABLE__SHIFT 0x8
+#define WAKE_ENABLE__SDIN_WAKE_ENABLE_FLAG_MASK 0x1
+#define WAKE_ENABLE__SDIN_WAKE_ENABLE_FLAG__SHIFT 0x0
+#define STATE_CHANGE_STATUS__STATE_CHANGE_STATUS_MASK 0x1
+#define STATE_CHANGE_STATUS__STATE_CHANGE_STATUS__SHIFT 0x0
+#define GLOBAL_STATUS__FLUSH_STATUS_MASK 0x2
+#define GLOBAL_STATUS__FLUSH_STATUS__SHIFT 0x1
+#define OUTPUT_STREAM_PAYLOAD_CAPABILITY__OUTSTRMPAY_MASK 0xffff
+#define OUTPUT_STREAM_PAYLOAD_CAPABILITY__OUTSTRMPAY__SHIFT 0x0
+#define INPUT_STREAM_PAYLOAD_CAPABILITY__INSTRMPAY_MASK 0xffff
+#define INPUT_STREAM_PAYLOAD_CAPABILITY__INSTRMPAY__SHIFT 0x0
+#define INTERRUPT_CONTROL__STREAM_0_INTERRUPT_ENABLE_MASK 0x1
+#define INTERRUPT_CONTROL__STREAM_0_INTERRUPT_ENABLE__SHIFT 0x0
+#define INTERRUPT_CONTROL__STREAM_1_INTERRUPT_ENABLE_MASK 0x2
+#define INTERRUPT_CONTROL__STREAM_1_INTERRUPT_ENABLE__SHIFT 0x1
+#define INTERRUPT_CONTROL__STREAM_2_INTERRUPT_ENABLE_MASK 0x4
+#define INTERRUPT_CONTROL__STREAM_2_INTERRUPT_ENABLE__SHIFT 0x2
+#define INTERRUPT_CONTROL__STREAM_3_INTERRUPT_ENABLE_MASK 0x8
+#define INTERRUPT_CONTROL__STREAM_3_INTERRUPT_ENABLE__SHIFT 0x3
+#define INTERRUPT_CONTROL__STREAM_4_INTERRUPT_ENABLE_MASK 0x10
+#define INTERRUPT_CONTROL__STREAM_4_INTERRUPT_ENABLE__SHIFT 0x4
+#define INTERRUPT_CONTROL__STREAM_5_INTERRUPT_ENABLE_MASK 0x20
+#define INTERRUPT_CONTROL__STREAM_5_INTERRUPT_ENABLE__SHIFT 0x5
+#define INTERRUPT_CONTROL__STREAM_6_INTERRUPT_ENABLE_MASK 0x40
+#define INTERRUPT_CONTROL__STREAM_6_INTERRUPT_ENABLE__SHIFT 0x6
+#define INTERRUPT_CONTROL__STREAM_7_INTERRUPT_ENABLE_MASK 0x80
+#define INTERRUPT_CONTROL__STREAM_7_INTERRUPT_ENABLE__SHIFT 0x7
+#define INTERRUPT_CONTROL__STREAM_8_INTERRUPT_ENABLE_MASK 0x100
+#define INTERRUPT_CONTROL__STREAM_8_INTERRUPT_ENABLE__SHIFT 0x8
+#define INTERRUPT_CONTROL__STREAM_9_INTERRUPT_ENABLE_MASK 0x200
+#define INTERRUPT_CONTROL__STREAM_9_INTERRUPT_ENABLE__SHIFT 0x9
+#define INTERRUPT_CONTROL__STREAM_10_INTERRUPT_ENABLE_MASK 0x400
+#define INTERRUPT_CONTROL__STREAM_10_INTERRUPT_ENABLE__SHIFT 0xa
+#define INTERRUPT_CONTROL__STREAM_11_INTERRUPT_ENABLE_MASK 0x800
+#define INTERRUPT_CONTROL__STREAM_11_INTERRUPT_ENABLE__SHIFT 0xb
+#define INTERRUPT_CONTROL__STREAM_12_INTERRUPT_ENABLE_MASK 0x1000
+#define INTERRUPT_CONTROL__STREAM_12_INTERRUPT_ENABLE__SHIFT 0xc
+#define INTERRUPT_CONTROL__STREAM_13_INTERRUPT_ENABLE_MASK 0x2000
+#define INTERRUPT_CONTROL__STREAM_13_INTERRUPT_ENABLE__SHIFT 0xd
+#define INTERRUPT_CONTROL__STREAM_14_INTERRUPT_ENABLE_MASK 0x4000
+#define INTERRUPT_CONTROL__STREAM_14_INTERRUPT_ENABLE__SHIFT 0xe
+#define INTERRUPT_CONTROL__STREAM_15_INTERRUPT_ENABLE_MASK 0x8000
+#define INTERRUPT_CONTROL__STREAM_15_INTERRUPT_ENABLE__SHIFT 0xf
+#define INTERRUPT_CONTROL__CONTROLLER_INTERRUPT_ENABLE_MASK 0x40000000
+#define INTERRUPT_CONTROL__CONTROLLER_INTERRUPT_ENABLE__SHIFT 0x1e
+#define INTERRUPT_CONTROL__GLOBAL_INTERRUPT_ENABLE_MASK 0x80000000
+#define INTERRUPT_CONTROL__GLOBAL_INTERRUPT_ENABLE__SHIFT 0x1f
+#define INTERRUPT_STATUS__STREAM_0_INTERRUPT_STATUS_MASK 0x1
+#define INTERRUPT_STATUS__STREAM_0_INTERRUPT_STATUS__SHIFT 0x0
+#define INTERRUPT_STATUS__STREAM_1_INTERRUPT_STATUS_MASK 0x2
+#define INTERRUPT_STATUS__STREAM_1_INTERRUPT_STATUS__SHIFT 0x1
+#define INTERRUPT_STATUS__STREAM_2_INTERRUPT_STATUS_MASK 0x4
+#define INTERRUPT_STATUS__STREAM_2_INTERRUPT_STATUS__SHIFT 0x2
+#define INTERRUPT_STATUS__STREAM_3_INTERRUPT_STATUS_MASK 0x8
+#define INTERRUPT_STATUS__STREAM_3_INTERRUPT_STATUS__SHIFT 0x3
+#define INTERRUPT_STATUS__STREAM_4_INTERRUPT_STATUS_MASK 0x10
+#define INTERRUPT_STATUS__STREAM_4_INTERRUPT_STATUS__SHIFT 0x4
+#define INTERRUPT_STATUS__STREAM_5_INTERRUPT_STATUS_MASK 0x20
+#define INTERRUPT_STATUS__STREAM_5_INTERRUPT_STATUS__SHIFT 0x5
+#define INTERRUPT_STATUS__STREAM_6_INTERRUPT_STATUS_MASK 0x40
+#define INTERRUPT_STATUS__STREAM_6_INTERRUPT_STATUS__SHIFT 0x6
+#define INTERRUPT_STATUS__STREAM_7_INTERRUPT_STATUS_MASK 0x80
+#define INTERRUPT_STATUS__STREAM_7_INTERRUPT_STATUS__SHIFT 0x7
+#define INTERRUPT_STATUS__STREAM_8_INTERRUPT_STATUS_MASK 0x100
+#define INTERRUPT_STATUS__STREAM_8_INTERRUPT_STATUS__SHIFT 0x8
+#define INTERRUPT_STATUS__STREAM_9_INTERRUPT_STATUS_MASK 0x200
+#define INTERRUPT_STATUS__STREAM_9_INTERRUPT_STATUS__SHIFT 0x9
+#define INTERRUPT_STATUS__STREAM_10_INTERRUPT_STATUS_MASK 0x400
+#define INTERRUPT_STATUS__STREAM_10_INTERRUPT_STATUS__SHIFT 0xa
+#define INTERRUPT_STATUS__STREAM_11_INTERRUPT_STATUS_MASK 0x800
+#define INTERRUPT_STATUS__STREAM_11_INTERRUPT_STATUS__SHIFT 0xb
+#define INTERRUPT_STATUS__STREAM_12_INTERRUPT_STATUS_MASK 0x1000
+#define INTERRUPT_STATUS__STREAM_12_INTERRUPT_STATUS__SHIFT 0xc
+#define INTERRUPT_STATUS__STREAM_13_INTERRUPT_STATUS_MASK 0x2000
+#define INTERRUPT_STATUS__STREAM_13_INTERRUPT_STATUS__SHIFT 0xd
+#define INTERRUPT_STATUS__STREAM_14_INTERRUPT_STATUS_MASK 0x4000
+#define INTERRUPT_STATUS__STREAM_14_INTERRUPT_STATUS__SHIFT 0xe
+#define INTERRUPT_STATUS__STREAM_15_INTERRUPT_STATUS_MASK 0x8000
+#define INTERRUPT_STATUS__STREAM_15_INTERRUPT_STATUS__SHIFT 0xf
+#define INTERRUPT_STATUS__CONTROLLER_INTERRUPT_STATUS_MASK 0x40000000
+#define INTERRUPT_STATUS__CONTROLLER_INTERRUPT_STATUS__SHIFT 0x1e
+#define INTERRUPT_STATUS__GLOBAL_INTERRUPT_STATUS_MASK 0x80000000
+#define INTERRUPT_STATUS__GLOBAL_INTERRUPT_STATUS__SHIFT 0x1f
+#define WALL_CLOCK_COUNTER__WALL_CLOCK_COUNTER_MASK 0xffffffff
+#define WALL_CLOCK_COUNTER__WALL_CLOCK_COUNTER__SHIFT 0x0
+#define STREAM_SYNCHRONIZATION__STREAM_0_SYNCHRONIZATION_MASK 0x1
+#define STREAM_SYNCHRONIZATION__STREAM_0_SYNCHRONIZATION__SHIFT 0x0
+#define STREAM_SYNCHRONIZATION__STREAM_1_SYNCHRONIZATION_MASK 0x2
+#define STREAM_SYNCHRONIZATION__STREAM_1_SYNCHRONIZATION__SHIFT 0x1
+#define STREAM_SYNCHRONIZATION__STREAM_2_SYNCHRONIZATION_MASK 0x4
+#define STREAM_SYNCHRONIZATION__STREAM_2_SYNCHRONIZATION__SHIFT 0x2
+#define STREAM_SYNCHRONIZATION__STREAM_3_SYNCHRONIZATION_MASK 0x8
+#define STREAM_SYNCHRONIZATION__STREAM_3_SYNCHRONIZATION__SHIFT 0x3
+#define STREAM_SYNCHRONIZATION__STREAM_4_SYNCHRONIZATION_MASK 0x10
+#define STREAM_SYNCHRONIZATION__STREAM_4_SYNCHRONIZATION__SHIFT 0x4
+#define STREAM_SYNCHRONIZATION__STREAM_5_SYNCHRONIZATION_MASK 0x20
+#define STREAM_SYNCHRONIZATION__STREAM_5_SYNCHRONIZATION__SHIFT 0x5
+#define STREAM_SYNCHRONIZATION__STREAM_6_SYNCHRONIZATION_MASK 0x40
+#define STREAM_SYNCHRONIZATION__STREAM_6_SYNCHRONIZATION__SHIFT 0x6
+#define STREAM_SYNCHRONIZATION__STREAM_7_SYNCHRONIZATION_MASK 0x80
+#define STREAM_SYNCHRONIZATION__STREAM_7_SYNCHRONIZATION__SHIFT 0x7
+#define STREAM_SYNCHRONIZATION__STREAM_8_SYNCHRONIZATION_MASK 0x100
+#define STREAM_SYNCHRONIZATION__STREAM_8_SYNCHRONIZATION__SHIFT 0x8
+#define STREAM_SYNCHRONIZATION__STREAM_9_SYNCHRONIZATION_MASK 0x200
+#define STREAM_SYNCHRONIZATION__STREAM_9_SYNCHRONIZATION__SHIFT 0x9
+#define STREAM_SYNCHRONIZATION__STREAM_10_SYNCHRONIZATION_MASK 0x400
+#define STREAM_SYNCHRONIZATION__STREAM_10_SYNCHRONIZATION__SHIFT 0xa
+#define STREAM_SYNCHRONIZATION__STREAM_11_SYNCHRONIZATION_MASK 0x800
+#define STREAM_SYNCHRONIZATION__STREAM_11_SYNCHRONIZATION__SHIFT 0xb
+#define STREAM_SYNCHRONIZATION__STREAM_12_SYNCHRONIZATION_MASK 0x1000
+#define STREAM_SYNCHRONIZATION__STREAM_12_SYNCHRONIZATION__SHIFT 0xc
+#define STREAM_SYNCHRONIZATION__STREAM_13_SYNCHRONIZATION_MASK 0x2000
+#define STREAM_SYNCHRONIZATION__STREAM_13_SYNCHRONIZATION__SHIFT 0xd
+#define STREAM_SYNCHRONIZATION__STREAM_14_SYNCHRONIZATION_MASK 0x4000
+#define STREAM_SYNCHRONIZATION__STREAM_14_SYNCHRONIZATION__SHIFT 0xe
+#define STREAM_SYNCHRONIZATION__STREAM_15_SYNCHRONIZATION_MASK 0x8000
+#define STREAM_SYNCHRONIZATION__STREAM_15_SYNCHRONIZATION__SHIFT 0xf
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x7f
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x0
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_ADDRESS_MASK 0xffffff80
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define CORB_UPPER_BASE_ADDRESS__CORB_UPPER_BASE_ADDRESS_MASK 0xffffffff
+#define CORB_UPPER_BASE_ADDRESS__CORB_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define CORB_WRITE_POINTER__CORB_WRITE_POINTER_MASK 0xff
+#define CORB_WRITE_POINTER__CORB_WRITE_POINTER__SHIFT 0x0
+#define CORB_READ_POINTER__CORB_READ_POINTER_MASK 0xff
+#define CORB_READ_POINTER__CORB_READ_POINTER__SHIFT 0x0
+#define CORB_READ_POINTER__CORB_READ_POINTER_RESET_MASK 0x8000
+#define CORB_READ_POINTER__CORB_READ_POINTER_RESET__SHIFT 0xf
+#define CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE_MASK 0x1
+#define CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE__SHIFT 0x0
+#define CORB_CONTROL__ENABLE_CORB_DMA_ENGINE_MASK 0x2
+#define CORB_CONTROL__ENABLE_CORB_DMA_ENGINE__SHIFT 0x1
+#define CORB_STATUS__CORB_MEMORY_ERROR_INDICATION_MASK 0x1
+#define CORB_STATUS__CORB_MEMORY_ERROR_INDICATION__SHIFT 0x0
+#define CORB_SIZE__CORB_SIZE_MASK 0x3
+#define CORB_SIZE__CORB_SIZE__SHIFT 0x0
+#define CORB_SIZE__CORB_SIZE_CAPABILITY_MASK 0xf0
+#define CORB_SIZE__CORB_SIZE_CAPABILITY__SHIFT 0x4
+#define RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x7f
+#define RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x0
+#define RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS_MASK 0xffffff80
+#define RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS_MASK 0xffffffff
+#define RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_MASK 0xff
+#define RIRB_WRITE_POINTER__RIRB_WRITE_POINTER__SHIFT 0x0
+#define RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET_MASK 0x8000
+#define RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET__SHIFT 0xf
+#define RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT_MASK 0xff
+#define RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT__SHIFT 0x0
+#define RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL_MASK 0x1
+#define RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL__SHIFT 0x0
+#define RIRB_CONTROL__RIRB_DMA_ENABLE_MASK 0x2
+#define RIRB_CONTROL__RIRB_DMA_ENABLE__SHIFT 0x1
+#define RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL_MASK 0x4
+#define RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL__SHIFT 0x2
+#define RIRB_STATUS__RESPONSE_INTERRUPT_MASK 0x1
+#define RIRB_STATUS__RESPONSE_INTERRUPT__SHIFT 0x0
+#define RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS_MASK 0x4
+#define RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS__SHIFT 0x2
+#define RIRB_SIZE__RIRB_SIZE_MASK 0x3
+#define RIRB_SIZE__RIRB_SIZE__SHIFT 0x0
+#define RIRB_SIZE__RIRB_SIZE_CAPABILITY_MASK 0xf0
+#define RIRB_SIZE__RIRB_SIZE_CAPABILITY__SHIFT 0x4
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD_MASK 0xfffffff
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD__SHIFT 0x0
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS_MASK 0xf0000000
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS__SHIFT 0x1c
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0xffff
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xffffffff
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ_MASK 0xffffffff
+#define IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ__SHIFT 0x0
+#define IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY_MASK 0x1
+#define IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY__SHIFT 0x0
+#define IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID_MASK 0x2
+#define IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID__SHIFT 0x1
+#define DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE_MASK 0x1
+#define DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE__SHIFT 0x0
+#define DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x7e
+#define DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x1
+#define DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS_MASK 0xffffff80
+#define DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS_MASK 0xffffffff
+#define DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS_MASK 0xffffffff
+#define WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STREAM_RESET_MASK 0x1
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STREAM_RESET__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STREAM_RUN_MASK 0x2
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STREAM_RUN__SHIFT 0x1
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__INTERRUPT_ON_COMPLETION_ENABLE_MASK 0x4
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__INTERRUPT_ON_COMPLETION_ENABLE__SHIFT 0x2
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__FIFO_ERROR_INTERRUPT_ENABLE_MASK 0x8
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__FIFO_ERROR_INTERRUPT_ENABLE__SHIFT 0x3
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__DESCRIPTOR_ERROR_INTERRUPT_ENABLE_MASK 0x10
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__DESCRIPTOR_ERROR_INTERRUPT_ENABLE__SHIFT 0x4
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STRIPE_CONTROL_MASK 0x30000
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STRIPE_CONTROL__SHIFT 0x10
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__TRAFFIC_PRIORITY_MASK 0x40000
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__TRAFFIC_PRIORITY__SHIFT 0x12
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STREAM_NUMBER_MASK 0xf00000
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STREAM_NUMBER__SHIFT 0x14
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__BUFFER_COMPLETION_INTERRUPT_STATUS_MASK 0x4000000
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__BUFFER_COMPLETION_INTERRUPT_STATUS__SHIFT 0x1a
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__FIFO_ERROR_MASK 0x8000000
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__FIFO_ERROR__SHIFT 0x1b
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__DESCRIPTOR_ERROR_MASK 0x10000000
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__DESCRIPTOR_ERROR__SHIFT 0x1c
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__FIFO_READY_MASK 0x20000000
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__FIFO_READY__SHIFT 0x1d
+#define OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER__LINK_POSITION_IN_BUFFER_MASK 0xffffffff
+#define OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER__LINK_POSITION_IN_BUFFER__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH__CYCLIC_BUFFER_LENGTH_MASK 0xffffffff
+#define OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH__CYCLIC_BUFFER_LENGTH__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX__LAST_VALID_INDEX_MASK 0xff
+#define OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX__LAST_VALID_INDEX__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE__FIFO_SIZE_MASK 0xffff
+#define OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE__FIFO_SIZE__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__NUMBER_OF_CHANNELS_MASK 0xf
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__BITS_PER_SAMPLE_MASK 0x70
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x700
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x3800
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__SAMPLE_BASE_RATE_MASK 0x4000
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS__BUFFER_DESCRIPTOR_LIST_LOWER_BASE_ADDRESS_UNIMPLEMENTED_BITS_MASK 0x7f
+#define OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS__BUFFER_DESCRIPTOR_LIST_LOWER_BASE_ADDRESS_UNIMPLEMENTED_BITS__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS__BUFFER_DESCRIPTOR_LIST_LOWER_BASE_ADDRESS_MASK 0xffffff80
+#define OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS__BUFFER_DESCRIPTOR_LIST_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS__BUFFER_DESCRIPTOR_LIST_UPPER_BASE_ADDRESS_MASK 0xffffffff
+#define OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS__BUFFER_DESCRIPTOR_LIST_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS__LINK_POSITION_IN_BUFFER_ALIAS_MASK 0xffffffff
+#define OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS__LINK_POSITION_IN_BUFFER_ALIAS__SHIFT 0x0
+#define AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x1ffff
+#define AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xffffffff
+#define AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x10
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0xfff
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x1f0000
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xffffffff
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x70
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x700
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x3800
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x4000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x8000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R_MASK 0x8000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R__SHIFT 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x2
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x8
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x10
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x20
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x40
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x80
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x7f00
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x800000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC_MASK 0x7f
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE_MASK 0x80
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x3
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x700000
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0xff
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x2
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x70
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x4
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x10
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x20
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x40
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x80
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0xff00
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x10000
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x1000000
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x40
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x3f
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x80
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7fffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0xf
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0xf00
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0xf000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0xf0000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0xf00000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3f000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xc0000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC_MASK 0xf
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE_MASK 0xf
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION_MASK 0x3f
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY_MASK 0xc0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION_MASK 0x7f
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION_MASK 0x100
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION_MASK 0x200
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION__SHIFT 0x9
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO_MASK 0xfc00
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO__SHIFT 0xa
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0xff
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LFE_PLAYBACK_LEVEL_MASK 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LFE_PLAYBACK_LEVEL__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT_MASK 0x78
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT_MASK 0x80
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE_MASK 0x78
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR__SHIFT 0x0
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC_MASK 0xff
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC_MASK 0xff00
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE_MASK 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX_MASK 0xff
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID_MASK 0xffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID_MASK 0xffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN_MASK 0xff
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID__SHIFT 0x0
+#define SINK_DESCRIPTION0__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION0__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION1__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION1__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION2__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION2__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION3__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION3__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION4__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION4__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION5__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION5__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION6__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION6__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION7__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION7__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION8__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION8__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION9__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION9__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION10__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION10__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION11__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION11__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION12__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION12__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION13__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION13__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION14__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION14__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION15__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION15__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION16__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION16__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION17__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION17__DESCRIPTION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x3
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x3c
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x3
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x78
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x80
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x3f
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x40
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0xf
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x10
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0xf
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x10
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x60
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x80
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0xf
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0xf0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0xf
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0xf0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0xf
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0xf0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0xf
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0xf0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0xff00
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0xff
+#define AZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0xff00
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0xff0000
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING_MASK 0x1
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING__SHIFT 0x0
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE_MASK 0x10
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE__SHIFT 0x4
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE_MASK 0xffff
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE__SHIFT 0x0
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE_MASK 0xffff0000
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE__SHIFT 0x10
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO_MASK 0x300
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO__SHIFT 0x8
+#define AZALIA_SCLK_CONTROL__AUDIO_SCLK_CONTROL_MASK 0x30
+#define AZALIA_SCLK_CONTROL__AUDIO_SCLK_CONTROL__SHIFT 0x4
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE_MASK 0xffffffff
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE__SHIFT 0x0
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP_MASK 0x3
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP_MASK 0xc
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS_MASK 0x30
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS_MASK 0xc0
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD_MASK 0x10000
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD__SHIFT 0x10
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL_MASK 0x20000
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL__SHIFT 0x11
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP_MASK 0x3
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP_MASK 0xc
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS_MASK 0x30
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS_MASK 0xc0
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP_MASK 0x1
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP__SHIFT 0x0
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP_MASK 0x10
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP__SHIFT 0x4
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER_MASK 0x1e0
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER__SHIFT 0x5
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP_MASK 0x1
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS_MASK 0x10
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER__APPLICATION_POSITION_IN_CYCLIC_BUFFER_MASK 0xffffffff
+#define AZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER__APPLICATION_POSITION_IN_CYCLIC_BUFFER__SHIFT 0x0
+#define AZALIA_CYCLIC_BUFFER_SYNC__CYCLIC_BUFFER_SYNC_ENABLE_MASK 0x1
+#define AZALIA_CYCLIC_BUFFER_SYNC__CYCLIC_BUFFER_SYNC_ENABLE__SHIFT 0x0
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS_MASK 0x6
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS__SHIFT 0x1
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY_MASK 0xffff
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY_MASK 0xffff0000
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY__SHIFT 0x10
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL_MASK 0xff
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL__SHIFT 0x0
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE_MASK 0x100
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE__SHIFT 0x8
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__INPUT_LATENCY_HIDING_LEVEL_MASK 0xff0000
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__INPUT_LATENCY_HIDING_LEVEL__SHIFT 0x10
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY_MASK 0xffff
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INSTRMPAY_MASK 0xffff0000
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INSTRMPAY__SHIFT 0x10
+#define AZALIA_CONTROLLER_DEBUG__CONTROLLER_DEBUG_MASK 0xffffffff
+#define AZALIA_CONTROLLER_DEBUG__CONTROLLER_DEBUG__SHIFT 0x0
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_FORCE_MASK 0x3
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_FORCE__SHIFT 0x0
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_DIS_MASK 0x4
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_DIS__SHIFT 0x2
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_FORCE_MASK 0x18
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_FORCE__SHIFT 0x3
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_DIS_MASK 0x20
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_DIS__SHIFT 0x5
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_FORCE_MASK 0xc0
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_FORCE__SHIFT 0x6
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_DIS_MASK 0x100
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_DIS__SHIFT 0x8
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_FORCE_MASK 0x600
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_FORCE__SHIFT 0x9
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_DIS_MASK 0x800
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_DIS__SHIFT 0xb
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_FORCE_MASK 0x3000
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_FORCE__SHIFT 0xc
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_DIS_MASK 0x4000
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_DIS__SHIFT 0xe
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_FORCE_MASK 0x18000
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_FORCE__SHIFT 0xf
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_DIS_MASK 0x20000
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_DIS__SHIFT 0x11
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_FORCE_MASK 0xc0000
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_FORCE__SHIFT 0x12
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_DIS_MASK 0x100000
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_DIS__SHIFT 0x14
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_MODE_SEL_MASK 0x30000000
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_MODE_SEL__SHIFT 0x1c
+#define AZALIA_MEM_PWR_STATUS__AZ_MEM_PWR_STATE_MASK 0x3
+#define AZALIA_MEM_PWR_STATUS__AZ_MEM_PWR_STATE__SHIFT 0x0
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM0_MEM_PWR_STATE_MASK 0xc
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM0_MEM_PWR_STATE__SHIFT 0x2
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM1_MEM_PWR_STATE_MASK 0x30
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM1_MEM_PWR_STATE__SHIFT 0x4
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM2_MEM_PWR_STATE_MASK 0xc0
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM2_MEM_PWR_STATE__SHIFT 0x6
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM3_MEM_PWR_STATE_MASK 0x300
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM3_MEM_PWR_STATE__SHIFT 0x8
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM4_MEM_PWR_STATE_MASK 0xc00
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM4_MEM_PWR_STATE__SHIFT 0xa
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM5_MEM_PWR_STATE_MASK 0x3000
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM5_MEM_PWR_STATE__SHIFT 0xc
+#define DCI_PG_DEBUG_CONFIG__DCI_PG_DBG_EN_MASK 0x1
+#define DCI_PG_DEBUG_CONFIG__DCI_PG_DBG_EN__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_EN_MASK 0x1
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_EN__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_BLOCK_MODE_MASK 0x10
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_INSTANCE_SEL_MASK 0x700
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC0_CONTROL1__INPUT_CRC_BLOCK_SIZE_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CONTROL1__INPUT_CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL2__INPUT_CRC_BLOCK_ITERATION_MASK 0xffff
+#define AZALIA_INPUT_CRC0_CONTROL2__INPUT_CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_COMPLETE_MASK 0x1
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE_MASK 0x10
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL_MASK 0x700
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC0_RESULT__INPUT_CRC_RESULT_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_RESULT__INPUT_CRC_RESULT__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL0__INPUT_CRC_CHANNEL0_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL0__INPUT_CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL1__INPUT_CRC_CHANNEL1_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL1__INPUT_CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL2__INPUT_CRC_CHANNEL2_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL2__INPUT_CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL3__INPUT_CRC_CHANNEL3_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL3__INPUT_CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL4__INPUT_CRC_CHANNEL4_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL4__INPUT_CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL5__INPUT_CRC_CHANNEL5_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL5__INPUT_CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL6__INPUT_CRC_CHANNEL6_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL6__INPUT_CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL7__INPUT_CRC_CHANNEL7_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL7__INPUT_CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_EN_MASK 0x1
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_EN__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_BLOCK_MODE_MASK 0x10
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_INSTANCE_SEL_MASK 0x700
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC1_CONTROL1__INPUT_CRC_BLOCK_SIZE_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CONTROL1__INPUT_CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL2__INPUT_CRC_BLOCK_ITERATION_MASK 0xffff
+#define AZALIA_INPUT_CRC1_CONTROL2__INPUT_CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_COMPLETE_MASK 0x1
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE_MASK 0x10
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL_MASK 0x700
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC1_RESULT__INPUT_CRC_RESULT_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_RESULT__INPUT_CRC_RESULT__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL0__INPUT_CRC_CHANNEL0_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL0__INPUT_CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL1__INPUT_CRC_CHANNEL1_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL1__INPUT_CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL2__INPUT_CRC_CHANNEL2_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL2__INPUT_CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL3__INPUT_CRC_CHANNEL3_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL3__INPUT_CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL4__INPUT_CRC_CHANNEL4_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL4__INPUT_CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL5__INPUT_CRC_CHANNEL5_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL5__INPUT_CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL6__INPUT_CRC_CHANNEL6_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL6__INPUT_CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL7__INPUT_CRC_CHANNEL7_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL7__INPUT_CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL0__CRC_EN_MASK 0x1
+#define AZALIA_CRC0_CONTROL0__CRC_EN__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL0__CRC_BLOCK_MODE_MASK 0x10
+#define AZALIA_CRC0_CONTROL0__CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_CRC0_CONTROL0__CRC_INSTANCE_SEL_MASK 0x700
+#define AZALIA_CRC0_CONTROL0__CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_CRC0_CONTROL0__CRC_SOURCE_SEL_MASK 0x1000
+#define AZALIA_CRC0_CONTROL0__CRC_SOURCE_SEL__SHIFT 0xc
+#define AZALIA_CRC0_CONTROL1__CRC_BLOCK_SIZE_MASK 0xffffffff
+#define AZALIA_CRC0_CONTROL1__CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL2__CRC_BLOCK_ITERATION_MASK 0xffff
+#define AZALIA_CRC0_CONTROL2__CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL3__CRC_COMPLETE_MASK 0x1
+#define AZALIA_CRC0_CONTROL3__CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL3__CRC_BLOCK_COMPLETE_PHASE_MASK 0x10
+#define AZALIA_CRC0_CONTROL3__CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_CRC0_CONTROL3__CRC_CHANNEL_RESULT_SEL_MASK 0x700
+#define AZALIA_CRC0_CONTROL3__CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_CRC0_RESULT__CRC_RESULT_MASK 0xffffffff
+#define AZALIA_CRC0_RESULT__CRC_RESULT__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL0__CRC_CHANNEL0_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL0__CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL1__CRC_CHANNEL1_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL1__CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL2__CRC_CHANNEL2_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL2__CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL3__CRC_CHANNEL3_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL3__CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL4__CRC_CHANNEL4_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL4__CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL5__CRC_CHANNEL5_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL5__CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL6__CRC_CHANNEL6_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL6__CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL7__CRC_CHANNEL7_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL7__CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL0__CRC_EN_MASK 0x1
+#define AZALIA_CRC1_CONTROL0__CRC_EN__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL0__CRC_BLOCK_MODE_MASK 0x10
+#define AZALIA_CRC1_CONTROL0__CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_CRC1_CONTROL0__CRC_INSTANCE_SEL_MASK 0x700
+#define AZALIA_CRC1_CONTROL0__CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_CRC1_CONTROL0__CRC_SOURCE_SEL_MASK 0x1000
+#define AZALIA_CRC1_CONTROL0__CRC_SOURCE_SEL__SHIFT 0xc
+#define AZALIA_CRC1_CONTROL1__CRC_BLOCK_SIZE_MASK 0xffffffff
+#define AZALIA_CRC1_CONTROL1__CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL2__CRC_BLOCK_ITERATION_MASK 0xffff
+#define AZALIA_CRC1_CONTROL2__CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL3__CRC_COMPLETE_MASK 0x1
+#define AZALIA_CRC1_CONTROL3__CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL3__CRC_BLOCK_COMPLETE_PHASE_MASK 0x10
+#define AZALIA_CRC1_CONTROL3__CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_CRC1_CONTROL3__CRC_CHANNEL_RESULT_SEL_MASK 0x700
+#define AZALIA_CRC1_CONTROL3__CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_CRC1_RESULT__CRC_RESULT_MASK 0xffffffff
+#define AZALIA_CRC1_RESULT__CRC_RESULT__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL0__CRC_CHANNEL0_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL0__CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL1__CRC_CHANNEL1_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL1__CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL2__CRC_CHANNEL2_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL2__CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL3__CRC_CHANNEL3_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL3__CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL4__CRC_CHANNEL4_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL4__CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL5__CRC_CHANNEL5_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL5__CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL6__CRC_CHANNEL6_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL6__CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL7__CRC_CHANNEL7_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL7__CRC_CHANNEL7__SHIFT 0x0
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_INDEX_MASK 0xff
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_INDEX__SHIFT 0x0
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define AZ_TEST_DEBUG_DATA__AZ_TEST_DEBUG_DATA_MASK 0xffffffff
+#define AZ_TEST_DEBUG_DATA__AZ_TEST_DEBUG_DATA__SHIFT 0x0
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0xff
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x100
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xffffffff
+#define AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x7f
+#define AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x7f00
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0xff0000
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x1
+#define AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xffffffff
+#define AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xffffffff
+#define AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xffffffff
+#define AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZALIA_STREAM_DEBUG__STREAM_DEBUG_DATA_MASK 0xffffffff
+#define AZALIA_STREAM_DEBUG__STREAM_DEBUG_DATA__SHIFT 0x0
+#define AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x3fff
+#define AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xffffffff
+#define AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_PIN_DEBUG__AZALIA_DEBUG__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x10
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0xf
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x70
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x700
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x3800
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x4000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x8000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0xf
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0xf0
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x1
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x2
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x4
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x8
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x10
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x20
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x40
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x80
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x7f00
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x800000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xffffffff
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0xfff
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x1f0000
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x3
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x700000
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0xff
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x1
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x2
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x4
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x70
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_OFFSET_DEBUG__PRESENTATION_TIME_OFFSET_DEBUG_MASK 0xffffffff
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_OFFSET_DEBUG__PRESENTATION_TIME_OFFSET_DEBUG__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xffffffff
+#define AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xffffffff
+#define AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xffffffff
+#define AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x2
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x4
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x8
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x10
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x20
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x40
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x80
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x10000
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x1000000
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x3f
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x80
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7fffffff
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x40
+#define AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x7f
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x10000
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x20000
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0xfc0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x3000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x2
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x100
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x200
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0xf000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x10000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x20000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0xf00000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x1000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x2000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xf0000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x2
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x100
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x200
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0xf000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x10000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x20000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0xf00000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x1000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x2000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xf0000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0xffff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xffff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xffffffff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xffffffff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xff000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xff000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xff000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xff000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x3ffffff
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0xf
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0xf0
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0xf00
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0xf000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0xf0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0xf00000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3f000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xc0000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x3
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x3c
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x3
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x4
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x78
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x80
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x3f
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x40
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0xf
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x10
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0xf
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x10
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x60
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x80
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0xf
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0xf0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0xf
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0xf0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0xf
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0xf0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0xf
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0xf0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xffffffff
+#define AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xffffffff
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xffffffff
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x2
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x3
+#define AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x1
+#define AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x1
+#define AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x10
+#define AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x100
+#define AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x1
+#define AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x10
+#define AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x100
+#define AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x1
+#define AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x10
+#define AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x100
+#define AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x3fff
+#define AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xffffffff
+#define AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PIN_DEBUG__AZALIA_INPUT_DEBUG_MASK 0xffffffff
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PIN_DEBUG__AZALIA_INPUT_DEBUG__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x10
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0xf
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x70
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x700
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x3800
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x4000
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x8000
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0xf
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0xf0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x2
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x4
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x8
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x10
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x20
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x40
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x80
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x7f00
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x800000
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xffffffff
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0xfff
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x1f0000
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x2
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x20
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x40
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x80
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0xff00
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x10000
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x1000000
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x3f
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x80
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7fffffff
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x20
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x2
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x100
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x200
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0xf000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x10000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x20000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0xf00000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x1000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x2000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xf0000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x2
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x100
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x200
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0xf000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x10000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x20000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0xf00000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x1000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x2000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xf0000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0xff
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x3ffffff
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0xf
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0xf0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0xf00
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0xf000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0xf0000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0xf00000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3f000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xc0000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x6
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x20
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x7
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0xff00
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0xff0000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0xff00
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xffffffff
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xffffffff
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x1ffff
+#define AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xffffffff
+#define AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0xfff
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x1f0000
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xffffffff
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0xf
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x70
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x700
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x3800
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x4000
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x8000
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0xf
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x20
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x40
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x80
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x7f00
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x800000
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x20
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x40
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x80
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0xff00
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x10000
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x1000000
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x20
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x3f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x80
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7fffffff
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0xf
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0xf00
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0xf000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0xf0000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0xf00000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3f000000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xc0000000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC_MASK 0xf
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE_MASK 0xf
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION_MASK 0x3f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY_MASK 0xc0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_CAPABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_ENABLE_MASK 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0xff
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x20
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0xff00
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0xff0000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L__CHANNEL_STATUS_L_MASK 0xffffffff
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L__CHANNEL_STATUS_L__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H__CHANNEL_STATUS_H_MASK 0xffffffff
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H__CHANNEL_STATUS_H__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0xff00
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xffffffff
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xffffffff
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define BLND_CONTROL__BLND_GLOBAL_GAIN_MASK 0xff
+#define BLND_CONTROL__BLND_GLOBAL_GAIN__SHIFT 0x0
+#define BLND_CONTROL__BLND_MODE_MASK 0x300
+#define BLND_CONTROL__BLND_MODE__SHIFT 0x8
+#define BLND_CONTROL__BLND_STEREO_TYPE_MASK 0xc00
+#define BLND_CONTROL__BLND_STEREO_TYPE__SHIFT 0xa
+#define BLND_CONTROL__BLND_STEREO_POLARITY_MASK 0x1000
+#define BLND_CONTROL__BLND_STEREO_POLARITY__SHIFT 0xc
+#define BLND_CONTROL__BLND_FEEDTHROUGH_EN_MASK 0x2000
+#define BLND_CONTROL__BLND_FEEDTHROUGH_EN__SHIFT 0xd
+#define BLND_CONTROL__BLND_ALPHA_MODE_MASK 0x30000
+#define BLND_CONTROL__BLND_ALPHA_MODE__SHIFT 0x10
+#define BLND_CONTROL__BLND_ACTIVE_OVERLAP_ONLY_MASK 0x40000
+#define BLND_CONTROL__BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x12
+#define BLND_CONTROL__BLND_MULTIPLIED_MODE_MASK 0x100000
+#define BLND_CONTROL__BLND_MULTIPLIED_MODE__SHIFT 0x14
+#define BLND_CONTROL__BLND_GLOBAL_ALPHA_MASK 0xff000000
+#define BLND_CONTROL__BLND_GLOBAL_ALPHA__SHIFT 0x18
+#define BLND_SM_CONTROL2__SM_MODE_MASK 0x7
+#define BLND_SM_CONTROL2__SM_MODE__SHIFT 0x0
+#define BLND_SM_CONTROL2__SM_FRAME_ALTERNATE_MASK 0x10
+#define BLND_SM_CONTROL2__SM_FRAME_ALTERNATE__SHIFT 0x4
+#define BLND_SM_CONTROL2__SM_FIELD_ALTERNATE_MASK 0x20
+#define BLND_SM_CONTROL2__SM_FIELD_ALTERNATE__SHIFT 0x5
+#define BLND_SM_CONTROL2__SM_FORCE_NEXT_FRAME_POL_MASK 0x300
+#define BLND_SM_CONTROL2__SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define BLND_SM_CONTROL2__SM_FORCE_NEXT_TOP_POL_MASK 0x30000
+#define BLND_SM_CONTROL2__SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define BLND_SM_CONTROL2__SM_CURRENT_FRAME_POL_MASK 0x1000000
+#define BLND_SM_CONTROL2__SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define BLND_CONTROL2__PTI_ENABLE_MASK 0x1
+#define BLND_CONTROL2__PTI_ENABLE__SHIFT 0x0
+#define BLND_CONTROL2__PTI_NEW_PIXEL_GAP_MASK 0x30
+#define BLND_CONTROL2__PTI_NEW_PIXEL_GAP__SHIFT 0x4
+#define BLND_CONTROL2__BLND_NEW_PIXEL_MODE_MASK 0x40
+#define BLND_CONTROL2__BLND_NEW_PIXEL_MODE__SHIFT 0x6
+#define BLND_CONTROL2__BLND_SUPERAA_DEGAMMA_EN_MASK 0x80
+#define BLND_CONTROL2__BLND_SUPERAA_DEGAMMA_EN__SHIFT 0x7
+#define BLND_CONTROL2__BLND_SUPERAA_REGAMMA_EN_MASK 0x100
+#define BLND_CONTROL2__BLND_SUPERAA_REGAMMA_EN__SHIFT 0x8
+#define BLND_UPDATE__BLND_UPDATE_PENDING_MASK 0x1
+#define BLND_UPDATE__BLND_UPDATE_PENDING__SHIFT 0x0
+#define BLND_UPDATE__BLND_UPDATE_TAKEN_MASK 0x100
+#define BLND_UPDATE__BLND_UPDATE_TAKEN__SHIFT 0x8
+#define BLND_UPDATE__BLND_UPDATE_LOCK_MASK 0x10000
+#define BLND_UPDATE__BLND_UPDATE_LOCK__SHIFT 0x10
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_OCCURED_MASK 0x1
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_OCCURED__SHIFT 0x0
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_ACK_MASK 0x100
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_ACK__SHIFT 0x8
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_MASK_MASK 0x1000
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_MASK__SHIFT 0xc
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_PIPE_INDEX_MASK 0x30000
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_PIPE_INDEX__SHIFT 0x10
+#define BLND_V_UPDATE_LOCK__BLND_DCP_GRPH_V_UPDATE_LOCK_MASK 0x1
+#define BLND_V_UPDATE_LOCK__BLND_DCP_GRPH_V_UPDATE_LOCK__SHIFT 0x0
+#define BLND_V_UPDATE_LOCK__BLND_DCP_GRPH_SURF_V_UPDATE_LOCK_MASK 0x2
+#define BLND_V_UPDATE_LOCK__BLND_DCP_GRPH_SURF_V_UPDATE_LOCK__SHIFT 0x1
+#define BLND_V_UPDATE_LOCK__BLND_DCP_CUR_V_UPDATE_LOCK_MASK 0x10000
+#define BLND_V_UPDATE_LOCK__BLND_DCP_CUR_V_UPDATE_LOCK__SHIFT 0x10
+#define BLND_V_UPDATE_LOCK__BLND_DCP_CUR2_V_UPDATE_LOCK_MASK 0x1000000
+#define BLND_V_UPDATE_LOCK__BLND_DCP_CUR2_V_UPDATE_LOCK__SHIFT 0x18
+#define BLND_V_UPDATE_LOCK__BLND_SCL_V_UPDATE_LOCK_MASK 0x10000000
+#define BLND_V_UPDATE_LOCK__BLND_SCL_V_UPDATE_LOCK__SHIFT 0x1c
+#define BLND_V_UPDATE_LOCK__BLND_BLND_V_UPDATE_LOCK_MASK 0x20000000
+#define BLND_V_UPDATE_LOCK__BLND_BLND_V_UPDATE_LOCK__SHIFT 0x1d
+#define BLND_V_UPDATE_LOCK__BLND_V_UPDATE_LOCK_MODE_MASK 0x80000000
+#define BLND_V_UPDATE_LOCK__BLND_V_UPDATE_LOCK_MODE__SHIFT 0x1f
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_UPDATE_PENDING_MASK 0x1
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_UPDATE_PENDING__SHIFT 0x0
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_UPDATE_PENDING_MASK 0x2
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_UPDATE_PENDING__SHIFT 0x1
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_SURF_UPDATE_PENDING_MASK 0x4
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_SURF_UPDATE_PENDING__SHIFT 0x2
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_SURF_UPDATE_PENDING_MASK 0x8
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_SURF_UPDATE_PENDING__SHIFT 0x3
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDC_CUR_UPDATE_PENDING_MASK 0x40
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDC_CUR_UPDATE_PENDING__SHIFT 0x6
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDO_CUR_UPDATE_PENDING_MASK 0x80
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDO_CUR_UPDATE_PENDING__SHIFT 0x7
+#define BLND_REG_UPDATE_STATUS__SCL_BLNDC_UPDATE_PENDING_MASK 0x100
+#define BLND_REG_UPDATE_STATUS__SCL_BLNDC_UPDATE_PENDING__SHIFT 0x8
+#define BLND_REG_UPDATE_STATUS__SCL_BLNDO_UPDATE_PENDING_MASK 0x200
+#define BLND_REG_UPDATE_STATUS__SCL_BLNDO_UPDATE_PENDING__SHIFT 0x9
+#define BLND_REG_UPDATE_STATUS__BLND_BLNDC_UPDATE_PENDING_MASK 0x400
+#define BLND_REG_UPDATE_STATUS__BLND_BLNDC_UPDATE_PENDING__SHIFT 0xa
+#define BLND_REG_UPDATE_STATUS__BLND_BLNDO_UPDATE_PENDING_MASK 0x800
+#define BLND_REG_UPDATE_STATUS__BLND_BLNDO_UPDATE_PENDING__SHIFT 0xb
+#define BLND_DEBUG__BLND_CNV_MUX_SELECT_MASK 0x1
+#define BLND_DEBUG__BLND_CNV_MUX_SELECT__SHIFT 0x0
+#define BLND_DEBUG__BLND_DEBUG_MASK 0xfffffffe
+#define BLND_DEBUG__BLND_DEBUG__SHIFT 0x1
+#define BLND_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_INDEX_MASK 0xff
+#define BLND_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_INDEX__SHIFT 0x0
+#define BLND_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define BLND_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define BLND_TEST_DEBUG_DATA__BLND_TEST_DEBUG_DATA_MASK 0xffffffff
+#define BLND_TEST_DEBUG_DATA__BLND_TEST_DEBUG_DATA__SHIFT 0x0
+#define WB_ENABLE__WB_ENABLE_MASK 0x1
+#define WB_ENABLE__WB_ENABLE__SHIFT 0x0
+#define WB_EC_CONFIG__DISPCLK_R_WB_GATE_DIS_MASK 0x1
+#define WB_EC_CONFIG__DISPCLK_R_WB_GATE_DIS__SHIFT 0x0
+#define WB_EC_CONFIG__DISPCLK_G_WB_GATE_DIS_MASK 0x2
+#define WB_EC_CONFIG__DISPCLK_G_WB_GATE_DIS__SHIFT 0x1
+#define WB_EC_CONFIG__DISPCLK_G_WBSCL_GATE_DIS_MASK 0x4
+#define WB_EC_CONFIG__DISPCLK_G_WBSCL_GATE_DIS__SHIFT 0x2
+#define WB_EC_CONFIG__WB_TEST_CLK_SEL_MASK 0x78
+#define WB_EC_CONFIG__WB_TEST_CLK_SEL__SHIFT 0x3
+#define WB_EC_CONFIG__WB_LB_LS_DIS_MASK 0x80
+#define WB_EC_CONFIG__WB_LB_LS_DIS__SHIFT 0x7
+#define WB_EC_CONFIG__WB_LB_SD_DIS_MASK 0x100
+#define WB_EC_CONFIG__WB_LB_SD_DIS__SHIFT 0x8
+#define WB_EC_CONFIG__WB_LUT_LS_DIS_MASK 0x200
+#define WB_EC_CONFIG__WB_LUT_LS_DIS__SHIFT 0x9
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_MODE_SEL_MASK 0x3000
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_MODE_SEL__SHIFT 0xc
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_DIS_MASK 0x4000
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_DIS__SHIFT 0xe
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_FORCE_MASK 0x18000
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_FORCE__SHIFT 0xf
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_STATE_SM_MASK 0x60000
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_STATE_SM__SHIFT 0x11
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_STATE_BG_MASK 0x180000
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_STATE_BG__SHIFT 0x13
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_STATE_MASK 0x600000
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_STATE__SHIFT 0x15
+#define WB_EC_CONFIG__WB_RAM_PW_SAVE_MODE_MASK 0x800000
+#define WB_EC_CONFIG__WB_RAM_PW_SAVE_MODE__SHIFT 0x17
+#define WB_EC_CONFIG__LB_MEM_PWR_STATE_SM_MASK 0x3000000
+#define WB_EC_CONFIG__LB_MEM_PWR_STATE_SM__SHIFT 0x18
+#define WB_EC_CONFIG__LB_MEM_PWR_STATE_BG_MASK 0xc000000
+#define WB_EC_CONFIG__LB_MEM_PWR_STATE_BG__SHIFT 0x1a
+#define WB_EC_CONFIG__LB_MEM_PWR_STATE_MASK 0x30000000
+#define WB_EC_CONFIG__LB_MEM_PWR_STATE__SHIFT 0x1c
+#define WB_EC_CONFIG__LUT_MEM_PWR_STATE_MASK 0xc0000000
+#define WB_EC_CONFIG__LUT_MEM_PWR_STATE__SHIFT 0x1e
+#define CNV_MODE__CNV_FRAME_CAPTURE_RATE_MASK 0x300
+#define CNV_MODE__CNV_FRAME_CAPTURE_RATE__SHIFT 0x8
+#define CNV_MODE__CNV_WINDOW_CROP_EN_MASK 0x1000
+#define CNV_MODE__CNV_WINDOW_CROP_EN__SHIFT 0xc
+#define CNV_MODE__CNV_STEREO_TYPE_MASK 0x6000
+#define CNV_MODE__CNV_STEREO_TYPE__SHIFT 0xd
+#define CNV_MODE__CNV_INTERLACED_MODE_MASK 0x8000
+#define CNV_MODE__CNV_INTERLACED_MODE__SHIFT 0xf
+#define CNV_MODE__CNV_EYE_SELECTION_MASK 0x30000
+#define CNV_MODE__CNV_EYE_SELECTION__SHIFT 0x10
+#define CNV_MODE__CNV_STEREO_POLARITY_MASK 0x40000
+#define CNV_MODE__CNV_STEREO_POLARITY__SHIFT 0x12
+#define CNV_MODE__CNV_INTERLACED_FIELD_ORDER_MASK 0x80000
+#define CNV_MODE__CNV_INTERLACED_FIELD_ORDER__SHIFT 0x13
+#define CNV_MODE__CNV_STEREO_SPLIT_MASK 0x100000
+#define CNV_MODE__CNV_STEREO_SPLIT__SHIFT 0x14
+#define CNV_MODE__CNV_NEW_CONTENT_MASK 0x1000000
+#define CNV_MODE__CNV_NEW_CONTENT__SHIFT 0x18
+#define CNV_MODE__CNV_FRAME_CAPTURE_EN_MASK 0x80000000
+#define CNV_MODE__CNV_FRAME_CAPTURE_EN__SHIFT 0x1f
+#define CNV_WINDOW_START__CNV_WINDOW_START_X_MASK 0xfff
+#define CNV_WINDOW_START__CNV_WINDOW_START_X__SHIFT 0x0
+#define CNV_WINDOW_START__CNV_WINDOW_START_Y_MASK 0xfff0000
+#define CNV_WINDOW_START__CNV_WINDOW_START_Y__SHIFT 0x10
+#define CNV_WINDOW_SIZE__CNV_WINDOW_WIDTH_MASK 0xfff
+#define CNV_WINDOW_SIZE__CNV_WINDOW_WIDTH__SHIFT 0x0
+#define CNV_WINDOW_SIZE__CNV_WINDOW_HEIGHT_MASK 0xfff0000
+#define CNV_WINDOW_SIZE__CNV_WINDOW_HEIGHT__SHIFT 0x10
+#define CNV_UPDATE__CNV_UPDATE_PENDING_MASK 0x1
+#define CNV_UPDATE__CNV_UPDATE_PENDING__SHIFT 0x0
+#define CNV_UPDATE__CNV_UPDATE_TAKEN_MASK 0x100
+#define CNV_UPDATE__CNV_UPDATE_TAKEN__SHIFT 0x8
+#define CNV_UPDATE__CNV_UPDATE_LOCK_MASK 0x10000
+#define CNV_UPDATE__CNV_UPDATE_LOCK__SHIFT 0x10
+#define CNV_SOURCE_SIZE__CNV_SOURCE_WIDTH_MASK 0x7fff
+#define CNV_SOURCE_SIZE__CNV_SOURCE_WIDTH__SHIFT 0x0
+#define CNV_SOURCE_SIZE__CNV_SOURCE_HEIGHT_MASK 0x7fff0000
+#define CNV_SOURCE_SIZE__CNV_SOURCE_HEIGHT__SHIFT 0x10
+#define CNV_CSC_CONTROL__CNV_CSC_BYPASS_MASK 0x1
+#define CNV_CSC_CONTROL__CNV_CSC_BYPASS__SHIFT 0x0
+#define CNV_CSC_C11_C12__CNV_CSC_C11_MASK 0x1fff
+#define CNV_CSC_C11_C12__CNV_CSC_C11__SHIFT 0x0
+#define CNV_CSC_C11_C12__CNV_CSC_C12_MASK 0x1fff0000
+#define CNV_CSC_C11_C12__CNV_CSC_C12__SHIFT 0x10
+#define CNV_CSC_C13_C14__CNV_CSC_C13_MASK 0x1fff
+#define CNV_CSC_C13_C14__CNV_CSC_C13__SHIFT 0x0
+#define CNV_CSC_C13_C14__CNV_CSC_C14_MASK 0x7fff0000
+#define CNV_CSC_C13_C14__CNV_CSC_C14__SHIFT 0x10
+#define CNV_CSC_C21_C22__CNV_CSC_C21_MASK 0x1fff
+#define CNV_CSC_C21_C22__CNV_CSC_C21__SHIFT 0x0
+#define CNV_CSC_C21_C22__CNV_CSC_C22_MASK 0x1fff0000
+#define CNV_CSC_C21_C22__CNV_CSC_C22__SHIFT 0x10
+#define CNV_CSC_C23_C24__CNV_CSC_C23_MASK 0x1fff
+#define CNV_CSC_C23_C24__CNV_CSC_C23__SHIFT 0x0
+#define CNV_CSC_C23_C24__CNV_CSC_C24_MASK 0x7fff0000
+#define CNV_CSC_C23_C24__CNV_CSC_C24__SHIFT 0x10
+#define CNV_CSC_C31_C32__CNV_CSC_C31_MASK 0x1fff
+#define CNV_CSC_C31_C32__CNV_CSC_C31__SHIFT 0x0
+#define CNV_CSC_C31_C32__CNV_CSC_C32_MASK 0x1fff0000
+#define CNV_CSC_C31_C32__CNV_CSC_C32__SHIFT 0x10
+#define CNV_CSC_C33_C34__CNV_CSC_C33_MASK 0x1fff
+#define CNV_CSC_C33_C34__CNV_CSC_C33__SHIFT 0x0
+#define CNV_CSC_C33_C34__CNV_CSC_C34_MASK 0x7fff0000
+#define CNV_CSC_C33_C34__CNV_CSC_C34__SHIFT 0x10
+#define CNV_CSC_ROUND_OFFSET_R__CNV_CSC_ROUND_OFFSET_R_MASK 0xffff
+#define CNV_CSC_ROUND_OFFSET_R__CNV_CSC_ROUND_OFFSET_R__SHIFT 0x0
+#define CNV_CSC_ROUND_OFFSET_G__CNV_CSC_ROUND_OFFSET_G_MASK 0xffff
+#define CNV_CSC_ROUND_OFFSET_G__CNV_CSC_ROUND_OFFSET_G__SHIFT 0x0
+#define CNV_CSC_ROUND_OFFSET_B__CNV_CSC_ROUND_OFFSET_B_MASK 0xffff
+#define CNV_CSC_ROUND_OFFSET_B__CNV_CSC_ROUND_OFFSET_B__SHIFT 0x0
+#define CNV_CSC_CLAMP_R__CNV_CSC_CLAMP_UPPER_R_MASK 0xffff
+#define CNV_CSC_CLAMP_R__CNV_CSC_CLAMP_UPPER_R__SHIFT 0x0
+#define CNV_CSC_CLAMP_R__CNV_CSC_CLAMP_LOWER_R_MASK 0xffff0000
+#define CNV_CSC_CLAMP_R__CNV_CSC_CLAMP_LOWER_R__SHIFT 0x10
+#define CNV_CSC_CLAMP_G__CNV_CSC_CLAMP_UPPER_G_MASK 0xffff
+#define CNV_CSC_CLAMP_G__CNV_CSC_CLAMP_UPPER_G__SHIFT 0x0
+#define CNV_CSC_CLAMP_G__CNV_CSC_CLAMP_LOWER_G_MASK 0xffff0000
+#define CNV_CSC_CLAMP_G__CNV_CSC_CLAMP_LOWER_G__SHIFT 0x10
+#define CNV_CSC_CLAMP_B__CNV_CSC_CLAMP_UPPER_B_MASK 0xffff
+#define CNV_CSC_CLAMP_B__CNV_CSC_CLAMP_UPPER_B__SHIFT 0x0
+#define CNV_CSC_CLAMP_B__CNV_CSC_CLAMP_LOWER_B_MASK 0xffff0000
+#define CNV_CSC_CLAMP_B__CNV_CSC_CLAMP_LOWER_B__SHIFT 0x10
+#define CNV_TEST_CNTL__CNV_TEST_CRC_EN_MASK 0x10
+#define CNV_TEST_CNTL__CNV_TEST_CRC_EN__SHIFT 0x4
+#define CNV_TEST_CNTL__CNV_TEST_CRC_CONT_EN_MASK 0x100
+#define CNV_TEST_CNTL__CNV_TEST_CRC_CONT_EN__SHIFT 0x8
+#define CNV_TEST_CNTL__CNV_TEST_CRC_DE_ONLY_MASK 0x10000
+#define CNV_TEST_CNTL__CNV_TEST_CRC_DE_ONLY__SHIFT 0x10
+#define CNV_TEST_CRC_RED__CNV_TEST_CRC_RED_MASK_MASK 0xfff0
+#define CNV_TEST_CRC_RED__CNV_TEST_CRC_RED_MASK__SHIFT 0x4
+#define CNV_TEST_CRC_RED__CNV_TEST_CRC_SIG_RED_MASK 0xffff0000
+#define CNV_TEST_CRC_RED__CNV_TEST_CRC_SIG_RED__SHIFT 0x10
+#define CNV_TEST_CRC_GREEN__CNV_TEST_CRC_GREEN_MASK_MASK 0xfff0
+#define CNV_TEST_CRC_GREEN__CNV_TEST_CRC_GREEN_MASK__SHIFT 0x4
+#define CNV_TEST_CRC_GREEN__CNV_TEST_CRC_SIG_GREEN_MASK 0xffff0000
+#define CNV_TEST_CRC_GREEN__CNV_TEST_CRC_SIG_GREEN__SHIFT 0x10
+#define CNV_TEST_CRC_BLUE__CNV_TEST_CRC_BLUE_MASK_MASK 0xfff0
+#define CNV_TEST_CRC_BLUE__CNV_TEST_CRC_BLUE_MASK__SHIFT 0x4
+#define CNV_TEST_CRC_BLUE__CNV_TEST_CRC_SIG_BLUE_MASK 0xffff0000
+#define CNV_TEST_CRC_BLUE__CNV_TEST_CRC_SIG_BLUE__SHIFT 0x10
+#define WB_DEBUG_CTRL__WB_DEBUG_EN_MASK 0x1
+#define WB_DEBUG_CTRL__WB_DEBUG_EN__SHIFT 0x0
+#define WB_DEBUG_CTRL__WB_DEBUG_SEL_MASK 0xc0
+#define WB_DEBUG_CTRL__WB_DEBUG_SEL__SHIFT 0x6
+#define WB_DBG_MODE__WB_DBG_MODE_EN_MASK 0x1
+#define WB_DBG_MODE__WB_DBG_MODE_EN__SHIFT 0x0
+#define WB_DBG_MODE__WB_DBG_DIN_FMT_MASK 0x2
+#define WB_DBG_MODE__WB_DBG_DIN_FMT__SHIFT 0x1
+#define WB_DBG_MODE__WB_DBG_36MODE_MASK 0x4
+#define WB_DBG_MODE__WB_DBG_36MODE__SHIFT 0x2
+#define WB_DBG_MODE__WB_DBG_CMAP_MASK 0x8
+#define WB_DBG_MODE__WB_DBG_CMAP__SHIFT 0x3
+#define WB_DBG_MODE__WB_DBG_PXLRATE_ERROR_MASK 0x100
+#define WB_DBG_MODE__WB_DBG_PXLRATE_ERROR__SHIFT 0x8
+#define WB_DBG_MODE__WB_DBG_SOURCE_WIDTH_MASK 0x7fff0000
+#define WB_DBG_MODE__WB_DBG_SOURCE_WIDTH__SHIFT 0x10
+#define WB_HW_DEBUG__WB_HW_DEBUG_MASK 0xffffffff
+#define WB_HW_DEBUG__WB_HW_DEBUG__SHIFT 0x0
+#define CNV_INPUT_SELECT__CNV_INPUT_SRC_SELECT_MASK 0x3
+#define CNV_INPUT_SELECT__CNV_INPUT_SRC_SELECT__SHIFT 0x0
+#define CNV_INPUT_SELECT__CNV_INPUT_PIPE_SELECT_MASK 0x1c
+#define CNV_INPUT_SELECT__CNV_INPUT_PIPE_SELECT__SHIFT 0x2
+#define WB_SOFT_RESET__WB_SOFT_RESET_MASK 0x1
+#define WB_SOFT_RESET__WB_SOFT_RESET__SHIFT 0x0
+#define WB_WARM_UP_MODE_CTL1__WIDTH_WARMUP_MASK 0x7fff
+#define WB_WARM_UP_MODE_CTL1__WIDTH_WARMUP__SHIFT 0x0
+#define WB_WARM_UP_MODE_CTL1__HEIGHT_WARMUP_MASK 0x7fff0000
+#define WB_WARM_UP_MODE_CTL1__HEIGHT_WARMUP__SHIFT 0x10
+#define WB_WARM_UP_MODE_CTL1__GMC_WARM_UP_ENABLE_MASK 0x80000000
+#define WB_WARM_UP_MODE_CTL1__GMC_WARM_UP_ENABLE__SHIFT 0x1f
+#define WB_WARM_UP_MODE_CTL2__DATA_VALUE_WARMUP_MASK 0xff
+#define WB_WARM_UP_MODE_CTL2__DATA_VALUE_WARMUP__SHIFT 0x0
+#define WB_WARM_UP_MODE_CTL2__MODE_WARMUP_MASK 0x100
+#define WB_WARM_UP_MODE_CTL2__MODE_WARMUP__SHIFT 0x8
+#define CNV_TEST_DEBUG_INDEX__CNV_TEST_DEBUG_INDEX_MASK 0xff
+#define CNV_TEST_DEBUG_INDEX__CNV_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CNV_TEST_DEBUG_INDEX__CNV_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define CNV_TEST_DEBUG_INDEX__CNV_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CNV_TEST_DEBUG_DATA__CNV_TEST_DEBUG_DATA_MASK 0xffffffff
+#define CNV_TEST_DEBUG_DATA__CNV_TEST_DEBUG_DATA__SHIFT 0x0
+#define DCFE_CLOCK_CONTROL__DISPCLK_R_DCFE_GATE_DISABLE_MASK 0x10
+#define DCFE_CLOCK_CONTROL__DISPCLK_R_DCFE_GATE_DISABLE__SHIFT 0x4
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_DCP_GATE_DISABLE_MASK 0x100
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_DCP_GATE_DISABLE__SHIFT 0x8
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_SCL_GATE_DISABLE_MASK 0x1000
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_SCL_GATE_DISABLE__SHIFT 0xc
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_PSCL_GATE_DISABLE_MASK 0x8000
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_PSCL_GATE_DISABLE__SHIFT 0xf
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_PIPE_REQUEST_DIS_GATE_DISABLE_MASK 0x20000
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_PIPE_REQUEST_DIS_GATE_DISABLE__SHIFT 0x11
+#define DCFE_CLOCK_CONTROL__DCFE_TEST_CLK_SEL_MASK 0x1f000000
+#define DCFE_CLOCK_CONTROL__DCFE_TEST_CLK_SEL__SHIFT 0x18
+#define DCFE_CLOCK_CONTROL__DCFE_CLOCK_ENABLE_MASK 0x80000000
+#define DCFE_CLOCK_CONTROL__DCFE_CLOCK_ENABLE__SHIFT 0x1f
+#define DCFE_SOFT_RESET__DCP_PIXPIPE_SOFT_RESET_MASK 0x1
+#define DCFE_SOFT_RESET__DCP_PIXPIPE_SOFT_RESET__SHIFT 0x0
+#define DCFE_SOFT_RESET__DCP_REQ_SOFT_RESET_MASK 0x2
+#define DCFE_SOFT_RESET__DCP_REQ_SOFT_RESET__SHIFT 0x1
+#define DCFE_SOFT_RESET__SCL_ALU_SOFT_RESET_MASK 0x4
+#define DCFE_SOFT_RESET__SCL_ALU_SOFT_RESET__SHIFT 0x2
+#define DCFE_SOFT_RESET__SCL_SOFT_RESET_MASK 0x8
+#define DCFE_SOFT_RESET__SCL_SOFT_RESET__SHIFT 0x3
+#define DCFE_SOFT_RESET__CRTC_SOFT_RESET_MASK 0x10
+#define DCFE_SOFT_RESET__CRTC_SOFT_RESET__SHIFT 0x4
+#define DCFE_SOFT_RESET__PSCL_SOFT_RESET_MASK 0x20
+#define DCFE_SOFT_RESET__PSCL_SOFT_RESET__SHIFT 0x5
+#define DCFE_DBG_CONFIG__DCFE_DBG_EN_MASK 0x1
+#define DCFE_DBG_CONFIG__DCFE_DBG_EN__SHIFT 0x0
+#define DCFE_DBG_CONFIG__DCFE_DBG_SEL_MASK 0xf0
+#define DCFE_DBG_CONFIG__DCFE_DBG_SEL__SHIFT 0x4
+#define DCFE_MEM_PWR_CTRL__DCP_LUT_MEM_PWR_FORCE_MASK 0x3
+#define DCFE_MEM_PWR_CTRL__DCP_LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DCFE_MEM_PWR_CTRL__DCP_LUT_MEM_PWR_DIS_MASK 0x4
+#define DCFE_MEM_PWR_CTRL__DCP_LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DCFE_MEM_PWR_CTRL__DCP_REGAMMA_MEM_PWR_FORCE_MASK 0x18
+#define DCFE_MEM_PWR_CTRL__DCP_REGAMMA_MEM_PWR_FORCE__SHIFT 0x3
+#define DCFE_MEM_PWR_CTRL__DCP_REGAMMA_MEM_PWR_DIS_MASK 0x20
+#define DCFE_MEM_PWR_CTRL__DCP_REGAMMA_MEM_PWR_DIS__SHIFT 0x5
+#define DCFE_MEM_PWR_CTRL__SCL_COEFF_MEM_PWR_FORCE_MASK 0xc0
+#define DCFE_MEM_PWR_CTRL__SCL_COEFF_MEM_PWR_FORCE__SHIFT 0x6
+#define DCFE_MEM_PWR_CTRL__SCL_COEFF_MEM_PWR_DIS_MASK 0x100
+#define DCFE_MEM_PWR_CTRL__SCL_COEFF_MEM_PWR_DIS__SHIFT 0x8
+#define DCFE_MEM_PWR_CTRL__DCP_CURSOR_MEM_PWR_FORCE_MASK 0x600
+#define DCFE_MEM_PWR_CTRL__DCP_CURSOR_MEM_PWR_FORCE__SHIFT 0x9
+#define DCFE_MEM_PWR_CTRL__DCP_CURSOR_MEM_PWR_DIS_MASK 0x800
+#define DCFE_MEM_PWR_CTRL__DCP_CURSOR_MEM_PWR_DIS__SHIFT 0xb
+#define DCFE_MEM_PWR_CTRL__LB0_ALPHA_MEM_PWR_FORCE_MASK 0x3000
+#define DCFE_MEM_PWR_CTRL__LB0_ALPHA_MEM_PWR_FORCE__SHIFT 0xc
+#define DCFE_MEM_PWR_CTRL__LB0_ALPHA_MEM_PWR_DIS_MASK 0x4000
+#define DCFE_MEM_PWR_CTRL__LB0_ALPHA_MEM_PWR_DIS__SHIFT 0xe
+#define DCFE_MEM_PWR_CTRL__LB1_ALPHA_MEM_PWR_FORCE_MASK 0x18000
+#define DCFE_MEM_PWR_CTRL__LB1_ALPHA_MEM_PWR_FORCE__SHIFT 0xf
+#define DCFE_MEM_PWR_CTRL__LB1_ALPHA_MEM_PWR_DIS_MASK 0x20000
+#define DCFE_MEM_PWR_CTRL__LB1_ALPHA_MEM_PWR_DIS__SHIFT 0x11
+#define DCFE_MEM_PWR_CTRL__LB2_ALPHA_MEM_PWR_FORCE_MASK 0xc0000
+#define DCFE_MEM_PWR_CTRL__LB2_ALPHA_MEM_PWR_FORCE__SHIFT 0x12
+#define DCFE_MEM_PWR_CTRL__LB2_ALPHA_MEM_PWR_DIS_MASK 0x100000
+#define DCFE_MEM_PWR_CTRL__LB2_ALPHA_MEM_PWR_DIS__SHIFT 0x14
+#define DCFE_MEM_PWR_CTRL__LB0_MEM_PWR_FORCE_MASK 0x600000
+#define DCFE_MEM_PWR_CTRL__LB0_MEM_PWR_FORCE__SHIFT 0x15
+#define DCFE_MEM_PWR_CTRL__LB0_MEM_PWR_DIS_MASK 0x800000
+#define DCFE_MEM_PWR_CTRL__LB0_MEM_PWR_DIS__SHIFT 0x17
+#define DCFE_MEM_PWR_CTRL__LB1_MEM_PWR_FORCE_MASK 0x3000000
+#define DCFE_MEM_PWR_CTRL__LB1_MEM_PWR_FORCE__SHIFT 0x18
+#define DCFE_MEM_PWR_CTRL__LB1_MEM_PWR_DIS_MASK 0x4000000
+#define DCFE_MEM_PWR_CTRL__LB1_MEM_PWR_DIS__SHIFT 0x1a
+#define DCFE_MEM_PWR_CTRL__LB2_MEM_PWR_FORCE_MASK 0x18000000
+#define DCFE_MEM_PWR_CTRL__LB2_MEM_PWR_FORCE__SHIFT 0x1b
+#define DCFE_MEM_PWR_CTRL__LB2_MEM_PWR_DIS_MASK 0x20000000
+#define DCFE_MEM_PWR_CTRL__LB2_MEM_PWR_DIS__SHIFT 0x1d
+#define DCFE_MEM_PWR_CTRL2__DCP_LUT_MEM_PWR_MODE_SEL_MASK 0x3
+#define DCFE_MEM_PWR_CTRL2__DCP_LUT_MEM_PWR_MODE_SEL__SHIFT 0x0
+#define DCFE_MEM_PWR_CTRL2__DCP_REGAMMA_MEM_PWR_MODE_SEL_MASK 0xc
+#define DCFE_MEM_PWR_CTRL2__DCP_REGAMMA_MEM_PWR_MODE_SEL__SHIFT 0x2
+#define DCFE_MEM_PWR_CTRL2__SCL_COEFF_MEM_PWR_MODE_SEL_MASK 0x30
+#define DCFE_MEM_PWR_CTRL2__SCL_COEFF_MEM_PWR_MODE_SEL__SHIFT 0x4
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR_MEM_PWR_MODE_SEL_MASK 0xc0
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR_MEM_PWR_MODE_SEL__SHIFT 0x6
+#define DCFE_MEM_PWR_CTRL2__LB_ALPHA_MEM_PWR_MODE_SEL_MASK 0x300
+#define DCFE_MEM_PWR_CTRL2__LB_ALPHA_MEM_PWR_MODE_SEL__SHIFT 0x8
+#define DCFE_MEM_PWR_CTRL2__LB_MEM_PWR_MODE_SEL_MASK 0xc00
+#define DCFE_MEM_PWR_CTRL2__LB_MEM_PWR_MODE_SEL__SHIFT 0xa
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR2_MEM_PWR_MODE_SEL_MASK 0x3000
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR2_MEM_PWR_MODE_SEL__SHIFT 0xc
+#define DCFE_MEM_PWR_CTRL2__BLND_MEM_PWR_MODE_SEL_MASK 0xc000
+#define DCFE_MEM_PWR_CTRL2__BLND_MEM_PWR_MODE_SEL__SHIFT 0xe
+#define DCFE_MEM_PWR_CTRL2__BLND_MEM_PWR_FORCE_MASK 0x30000
+#define DCFE_MEM_PWR_CTRL2__BLND_MEM_PWR_FORCE__SHIFT 0x10
+#define DCFE_MEM_PWR_CTRL2__BLND_MEM_PWR_DIS_MASK 0x40000
+#define DCFE_MEM_PWR_CTRL2__BLND_MEM_PWR_DIS__SHIFT 0x12
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR2_MEM_PWR_FORCE_MASK 0x600000
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR2_MEM_PWR_FORCE__SHIFT 0x15
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR2_MEM_PWR_DIS_MASK 0x800000
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR2_MEM_PWR_DIS__SHIFT 0x17
+#define DCFE_MEM_PWR_STATUS__DCP_LUT_MEM_PWR_STATE_MASK 0x3
+#define DCFE_MEM_PWR_STATUS__DCP_LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DCFE_MEM_PWR_STATUS__DCP_REGAMMA_MEM_PWR_STATE_MASK 0xc
+#define DCFE_MEM_PWR_STATUS__DCP_REGAMMA_MEM_PWR_STATE__SHIFT 0x2
+#define DCFE_MEM_PWR_STATUS__SCL_COEFF_MEM_PWR_STATE_MASK 0x30
+#define DCFE_MEM_PWR_STATUS__SCL_COEFF_MEM_PWR_STATE__SHIFT 0x4
+#define DCFE_MEM_PWR_STATUS__DCP_CURSOR_MEM_PWR_STATE_MASK 0xc0
+#define DCFE_MEM_PWR_STATUS__DCP_CURSOR_MEM_PWR_STATE__SHIFT 0x6
+#define DCFE_MEM_PWR_STATUS__DCP_CURSOR2_MEM_PWR_STATE_MASK 0x300
+#define DCFE_MEM_PWR_STATUS__DCP_CURSOR2_MEM_PWR_STATE__SHIFT 0x8
+#define DCFE_MEM_PWR_STATUS__LB0_ALPHA_MEM_PWR_STATE_MASK 0xc00
+#define DCFE_MEM_PWR_STATUS__LB0_ALPHA_MEM_PWR_STATE__SHIFT 0xa
+#define DCFE_MEM_PWR_STATUS__LB1_ALPHA_MEM_PWR_STATE_MASK 0x3000
+#define DCFE_MEM_PWR_STATUS__LB1_ALPHA_MEM_PWR_STATE__SHIFT 0xc
+#define DCFE_MEM_PWR_STATUS__LB2_ALPHA_MEM_PWR_STATE_MASK 0xc000
+#define DCFE_MEM_PWR_STATUS__LB2_ALPHA_MEM_PWR_STATE__SHIFT 0xe
+#define DCFE_MEM_PWR_STATUS__LB0_MEM_PWR_STATE_MASK 0x30000
+#define DCFE_MEM_PWR_STATUS__LB0_MEM_PWR_STATE__SHIFT 0x10
+#define DCFE_MEM_PWR_STATUS__LB1_MEM_PWR_STATE_MASK 0xc0000
+#define DCFE_MEM_PWR_STATUS__LB1_MEM_PWR_STATE__SHIFT 0x12
+#define DCFE_MEM_PWR_STATUS__LB2_MEM_PWR_STATE_MASK 0x300000
+#define DCFE_MEM_PWR_STATUS__LB2_MEM_PWR_STATE__SHIFT 0x14
+#define DCFE_MEM_PWR_STATUS__BLND_MEM_PWR_STATE_MASK 0xc00000
+#define DCFE_MEM_PWR_STATUS__BLND_MEM_PWR_STATE__SHIFT 0x16
+#define DCFE_MISC__DCFE_DPG_ALLOW_SR_ECO_EN_MASK 0x1
+#define DCFE_MISC__DCFE_DPG_ALLOW_SR_ECO_EN__SHIFT 0x0
+#define DCFE_FLUSH__FLUSH_OCCURED_MASK 0x1
+#define DCFE_FLUSH__FLUSH_OCCURED__SHIFT 0x0
+#define DCFE_FLUSH__CLEAR_FLUSH_OCCURED_MASK 0x2
+#define DCFE_FLUSH__CLEAR_FLUSH_OCCURED__SHIFT 0x1
+#define DCFE_FLUSH__FLUSH_DEEP_MASK 0x4
+#define DCFE_FLUSH__FLUSH_DEEP__SHIFT 0x2
+#define DCFE_FLUSH__CLEAR_FLUSH_DEEP_MASK 0x8
+#define DCFE_FLUSH__CLEAR_FLUSH_DEEP__SHIFT 0x3
+#define DCFE_FLUSH__ALL_MC_REQ_RET_MASK 0x10
+#define DCFE_FLUSH__ALL_MC_REQ_RET__SHIFT 0x4
+#define DCFEV_CLOCK_CONTROL__DISPCLK_R_DCFEV_GATE_DISABLE_MASK 0x8
+#define DCFEV_CLOCK_CONTROL__DISPCLK_R_DCFEV_GATE_DISABLE__SHIFT 0x3
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_UNP_GATE_DISABLE_MASK 0x80
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_UNP_GATE_DISABLE__SHIFT 0x7
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_SCLV_GATE_DISABLE_MASK 0x200
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_SCLV_GATE_DISABLE__SHIFT 0x9
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_COL_MAN_GATE_DISABLE_MASK 0x800
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_COL_MAN_GATE_DISABLE__SHIFT 0xb
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_PSCLV_GATE_DISABLE_MASK 0x2000
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_PSCLV_GATE_DISABLE__SHIFT 0xd
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_CRTC_GATE_DISABLE_MASK 0x8000
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_CRTC_GATE_DISABLE__SHIFT 0xf
+#define DCFEV_CLOCK_CONTROL__DCFEV_TEST_CLK_SEL_MASK 0x1f000000
+#define DCFEV_CLOCK_CONTROL__DCFEV_TEST_CLK_SEL__SHIFT 0x18
+#define DCFEV_CLOCK_CONTROL__DCFEV_CLOCK_ENABLE_MASK 0x80000000
+#define DCFEV_CLOCK_CONTROL__DCFEV_CLOCK_ENABLE__SHIFT 0x1f
+#define DCFEV_SOFT_RESET__UNP_PIXPIPE_SOFT_RESET_MASK 0x1
+#define DCFEV_SOFT_RESET__UNP_PIXPIPE_SOFT_RESET__SHIFT 0x0
+#define DCFEV_SOFT_RESET__UNP_REQ_SOFT_RESET_MASK 0x2
+#define DCFEV_SOFT_RESET__UNP_REQ_SOFT_RESET__SHIFT 0x1
+#define DCFEV_SOFT_RESET__SCLV_ALU_SOFT_RESET_MASK 0x4
+#define DCFEV_SOFT_RESET__SCLV_ALU_SOFT_RESET__SHIFT 0x2
+#define DCFEV_SOFT_RESET__SCLV_SOFT_RESET_MASK 0x8
+#define DCFEV_SOFT_RESET__SCLV_SOFT_RESET__SHIFT 0x3
+#define DCFEV_SOFT_RESET__CRTC_SOFT_RESET_MASK 0x10
+#define DCFEV_SOFT_RESET__CRTC_SOFT_RESET__SHIFT 0x4
+#define DCFEV_SOFT_RESET__PSCLV_SOFT_RESET_MASK 0x20
+#define DCFEV_SOFT_RESET__PSCLV_SOFT_RESET__SHIFT 0x5
+#define DCFEV_SOFT_RESET__COL_MAN_SOFT_RESET_MASK 0x40
+#define DCFEV_SOFT_RESET__COL_MAN_SOFT_RESET__SHIFT 0x6
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_SCLK_G_DMIFTRK_GATE_DIS_MASK 0x8
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_SCLK_G_DMIFTRK_GATE_DIS__SHIFT 0x3
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_DISPCLK_G_DMIFVL_GATE_DIS_MASK 0x10
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_DISPCLK_G_DMIFVL_GATE_DIS__SHIFT 0x4
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_DISPCLK_G_DMIFVC_GATE_DIS_MASK 0x20
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_DISPCLK_G_DMIFVC_GATE_DIS__SHIFT 0x5
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_SOFT_RESET_MASK 0x40
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_SOFT_RESET__SHIFT 0x6
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_TEST_CLK_SEL_MASK 0x1f000000
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_TEST_CLK_SEL__SHIFT 0x18
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_BUFFER_MODE_MASK 0x80000000
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_BUFFER_MODE__SHIFT 0x1f
+#define DCFEV_DBG_CONFIG__DCFEV_DBG_EN_MASK 0x1
+#define DCFEV_DBG_CONFIG__DCFEV_DBG_EN__SHIFT 0x0
+#define DCFEV_DBG_CONFIG__DCFEV_DBG_SEL_MASK 0xf0
+#define DCFEV_DBG_CONFIG__DCFEV_DBG_SEL__SHIFT 0x4
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_SEL_MASK 0x3
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_SEL__SHIFT 0x0
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_0_FORCE_MASK 0x4
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_0_FORCE__SHIFT 0x2
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_1_FORCE_MASK 0x8
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_1_FORCE__SHIFT 0x3
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_2_FORCE_MASK 0x10
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_2_FORCE__SHIFT 0x4
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_3_FORCE_MASK 0x20
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_3_FORCE__SHIFT 0x5
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_4_FORCE_MASK 0x40
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_4_FORCE__SHIFT 0x6
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_0_FORCE_MASK 0x80
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_0_FORCE__SHIFT 0x7
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_1_FORCE_MASK 0x100
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_1_FORCE__SHIFT 0x8
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_2_FORCE_MASK 0x200
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_2_FORCE__SHIFT 0x9
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_3_FORCE_MASK 0x400
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_3_FORCE__SHIFT 0xa
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_4_FORCE_MASK 0x800
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_4_FORCE__SHIFT 0xb
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_0_STATE_MASK 0x3
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_0_STATE__SHIFT 0x0
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_1_STATE_MASK 0xc
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_1_STATE__SHIFT 0x2
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_2_STATE_MASK 0x30
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_2_STATE__SHIFT 0x4
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_3_STATE_MASK 0xc0
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_3_STATE__SHIFT 0x6
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_4_STATE_MASK 0x300
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_4_STATE__SHIFT 0x8
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_0_STATE_MASK 0xc00
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_0_STATE__SHIFT 0xa
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_1_STATE_MASK 0x3000
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_1_STATE__SHIFT 0xc
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_2_STATE_MASK 0xc000
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_2_STATE__SHIFT 0xe
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_3_STATE_MASK 0x30000
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_3_STATE__SHIFT 0x10
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_4_STATE_MASK 0xc0000
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_4_STATE__SHIFT 0x12
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_GAMMA_CORR_MEM_PWR_FORCE_MASK 0x3
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_GAMMA_CORR_MEM_PWR_FORCE__SHIFT 0x0
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_GAMMA_CORR_MEM_PWR_DIS_MASK 0x4
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_GAMMA_CORR_MEM_PWR_DIS__SHIFT 0x2
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_INPUT_GAMMA_MEM_PWR_FORCE_MASK 0x18
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_INPUT_GAMMA_MEM_PWR_FORCE__SHIFT 0x3
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_INPUT_GAMMA_MEM_PWR_DIS_MASK 0x20
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_INPUT_GAMMA_MEM_PWR_DIS__SHIFT 0x5
+#define DCFEV_MEM_PWR_CTRL__SCLV_COEFF_MEM_PWR_FORCE_MASK 0xc0
+#define DCFEV_MEM_PWR_CTRL__SCLV_COEFF_MEM_PWR_FORCE__SHIFT 0x6
+#define DCFEV_MEM_PWR_CTRL__SCLV_COEFF_MEM_PWR_DIS_MASK 0x100
+#define DCFEV_MEM_PWR_CTRL__SCLV_COEFF_MEM_PWR_DIS__SHIFT 0x8
+#define DCFEV_MEM_PWR_CTRL__LBV0_MEM_PWR_FORCE_MASK 0x600
+#define DCFEV_MEM_PWR_CTRL__LBV0_MEM_PWR_FORCE__SHIFT 0x9
+#define DCFEV_MEM_PWR_CTRL__LBV0_MEM_PWR_DIS_MASK 0x800
+#define DCFEV_MEM_PWR_CTRL__LBV0_MEM_PWR_DIS__SHIFT 0xb
+#define DCFEV_MEM_PWR_CTRL__LBV1_MEM_PWR_FORCE_MASK 0x3000
+#define DCFEV_MEM_PWR_CTRL__LBV1_MEM_PWR_FORCE__SHIFT 0xc
+#define DCFEV_MEM_PWR_CTRL__LBV1_MEM_PWR_DIS_MASK 0x4000
+#define DCFEV_MEM_PWR_CTRL__LBV1_MEM_PWR_DIS__SHIFT 0xe
+#define DCFEV_MEM_PWR_CTRL__LBV2_MEM_PWR_FORCE_MASK 0x18000
+#define DCFEV_MEM_PWR_CTRL__LBV2_MEM_PWR_FORCE__SHIFT 0xf
+#define DCFEV_MEM_PWR_CTRL__LBV2_MEM_PWR_DIS_MASK 0x20000
+#define DCFEV_MEM_PWR_CTRL__LBV2_MEM_PWR_DIS__SHIFT 0x11
+#define DCFEV_MEM_PWR_CTRL2__COL_MAN_GAMMA_CORR_MEM_PWR_MODE_SEL_MASK 0x3
+#define DCFEV_MEM_PWR_CTRL2__COL_MAN_GAMMA_CORR_MEM_PWR_MODE_SEL__SHIFT 0x0
+#define DCFEV_MEM_PWR_CTRL2__COL_MAN_INPUT_GAMMA_MEM_PWR_MODE_SEL_MASK 0xc
+#define DCFEV_MEM_PWR_CTRL2__COL_MAN_INPUT_GAMMA_MEM_PWR_MODE_SEL__SHIFT 0x2
+#define DCFEV_MEM_PWR_CTRL2__SCLV_COEFF_MEM_PWR_MODE_SEL_MASK 0x30
+#define DCFEV_MEM_PWR_CTRL2__SCLV_COEFF_MEM_PWR_MODE_SEL__SHIFT 0x4
+#define DCFEV_MEM_PWR_CTRL2__LBV_MEM_PWR_MODE_SEL_MASK 0xc0
+#define DCFEV_MEM_PWR_CTRL2__LBV_MEM_PWR_MODE_SEL__SHIFT 0x6
+#define DCFEV_MEM_PWR_STATUS__COL_MAN_GAMMA_CORR_MEM_PWR_STATE_MASK 0x3
+#define DCFEV_MEM_PWR_STATUS__COL_MAN_GAMMA_CORR_MEM_PWR_STATE__SHIFT 0x0
+#define DCFEV_MEM_PWR_STATUS__COL_MAN_INPUT_GAMMA_MEM_PWR_STATE_MASK 0xc
+#define DCFEV_MEM_PWR_STATUS__COL_MAN_INPUT_GAMMA_MEM_PWR_STATE__SHIFT 0x2
+#define DCFEV_MEM_PWR_STATUS__SCLV_COEFF_MEM_PWR_STATE_MASK 0x30
+#define DCFEV_MEM_PWR_STATUS__SCLV_COEFF_MEM_PWR_STATE__SHIFT 0x4
+#define DCFEV_MEM_PWR_STATUS__LBV0_MEM_PWR_STATE_MASK 0xc0
+#define DCFEV_MEM_PWR_STATUS__LBV0_MEM_PWR_STATE__SHIFT 0x6
+#define DCFEV_MEM_PWR_STATUS__LBV1_MEM_PWR_STATE_MASK 0x300
+#define DCFEV_MEM_PWR_STATUS__LBV1_MEM_PWR_STATE__SHIFT 0x8
+#define DCFEV_MEM_PWR_STATUS__LBV2_MEM_PWR_STATE_MASK 0xc00
+#define DCFEV_MEM_PWR_STATUS__LBV2_MEM_PWR_STATE__SHIFT 0xa
+#define DCFEV_MEM_PWR_STATUS__LBV3_MEM_PWR_STATE_MASK 0x3000
+#define DCFEV_MEM_PWR_STATUS__LBV3_MEM_PWR_STATE__SHIFT 0xc
+#define DCFEV_L_FLUSH__FLUSH_OCCURED_MASK 0x1
+#define DCFEV_L_FLUSH__FLUSH_OCCURED__SHIFT 0x0
+#define DCFEV_L_FLUSH__CLEAR_FLUSH_OCCURED_MASK 0x2
+#define DCFEV_L_FLUSH__CLEAR_FLUSH_OCCURED__SHIFT 0x1
+#define DCFEV_L_FLUSH__FLUSH_DEEP_MASK 0x4
+#define DCFEV_L_FLUSH__FLUSH_DEEP__SHIFT 0x2
+#define DCFEV_L_FLUSH__CLEAR_FLUSH_DEEP_MASK 0x8
+#define DCFEV_L_FLUSH__CLEAR_FLUSH_DEEP__SHIFT 0x3
+#define DCFEV_L_FLUSH__ALL_MC_REQ_RET_MASK 0x10
+#define DCFEV_L_FLUSH__ALL_MC_REQ_RET__SHIFT 0x4
+#define DCFEV_C_FLUSH__FLUSH_OCCURED_MASK 0x1
+#define DCFEV_C_FLUSH__FLUSH_OCCURED__SHIFT 0x0
+#define DCFEV_C_FLUSH__CLEAR_FLUSH_OCCURED_MASK 0x2
+#define DCFEV_C_FLUSH__CLEAR_FLUSH_OCCURED__SHIFT 0x1
+#define DCFEV_C_FLUSH__FLUSH_DEEP_MASK 0x4
+#define DCFEV_C_FLUSH__FLUSH_DEEP__SHIFT 0x2
+#define DCFEV_C_FLUSH__CLEAR_FLUSH_DEEP_MASK 0x8
+#define DCFEV_C_FLUSH__CLEAR_FLUSH_DEEP__SHIFT 0x3
+#define DCFEV_C_FLUSH__ALL_MC_REQ_RET_MASK 0x10
+#define DCFEV_C_FLUSH__ALL_MC_REQ_RET__SHIFT 0x4
+#define DCFEV_DMIFV_DEBUG__DMIFV_DEBUG_BUS_SEL_MASK 0xf
+#define DCFEV_DMIFV_DEBUG__DMIFV_DEBUG_BUS_SEL__SHIFT 0x0
+#define DCFEV_DMIFV_DEBUG__DMIFV_DEBUG_LUMA_VS_CHROMA_MASK 0x10
+#define DCFEV_DMIFV_DEBUG__DMIFV_DEBUG_LUMA_VS_CHROMA__SHIFT 0x4
+#define DCFEV_DMIFV_DEBUG__DMIFV_DEBUG_LOWER_UPPER_MASK 0x20
+#define DCFEV_DMIFV_DEBUG__DMIFV_DEBUG_LOWER_UPPER__SHIFT 0x5
+#define DCFEV_MISC__DCFEV_DPG_ALLOW_SR_ECO_EN_MASK 0x1
+#define DCFEV_MISC__DCFEV_DPG_ALLOW_SR_ECO_EN__SHIFT 0x0
+#define DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x1
+#define DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x2
+#define DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x10
+#define DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x100
+#define DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0xff000
+#define DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xff000000
+#define DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x1
+#define DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x100
+#define DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x10000
+#define DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x100000
+#define DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x1000000
+#define DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x1fff
+#define DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x3ff0000
+#define DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000
+#define DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0xff
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0xff000
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x1000000
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0xff
+#define DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0xff00000
+#define DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define DCO_SCRATCH0__DCO_SCRATCH0_MASK 0xffffffff
+#define DCO_SCRATCH0__DCO_SCRATCH0__SHIFT 0x0
+#define DCO_SCRATCH1__DCO_SCRATCH1_MASK 0xffffffff
+#define DCO_SCRATCH1__DCO_SCRATCH1__SHIFT 0x0
+#define DCO_SCRATCH2__DCO_SCRATCH2_MASK 0xffffffff
+#define DCO_SCRATCH2__DCO_SCRATCH2__SHIFT 0x0
+#define DCO_SCRATCH3__DCO_SCRATCH3_MASK 0xffffffff
+#define DCO_SCRATCH3__DCO_SCRATCH3__SHIFT 0x0
+#define DCO_SCRATCH4__DCO_SCRATCH4_MASK 0xffffffff
+#define DCO_SCRATCH4__DCO_SCRATCH4__SHIFT 0x0
+#define DCO_SCRATCH5__DCO_SCRATCH5_MASK 0xffffffff
+#define DCO_SCRATCH5__DCO_SCRATCH5__SHIFT 0x0
+#define DCO_SCRATCH6__DCO_SCRATCH6_MASK 0xffffffff
+#define DCO_SCRATCH6__DCO_SCRATCH6__SHIFT 0x0
+#define DCO_SCRATCH7__DCO_SCRATCH7_MASK 0xffffffff
+#define DCO_SCRATCH7__DCO_SCRATCH7__SHIFT 0x0
+#define DCE_VCE_CONTROL__DC_VCE_VIDEO_PIPE_SELECT_MASK 0x7
+#define DCE_VCE_CONTROL__DC_VCE_VIDEO_PIPE_SELECT__SHIFT 0x0
+#define DCE_VCE_CONTROL__DC_VCE_AUDIO_STREAM_SELECT_MASK 0x70
+#define DCE_VCE_CONTROL__DC_VCE_AUDIO_STREAM_SELECT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS__SCL_DISP1_MODE_CHANGE_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS__SCL_DISP1_MODE_CHANGE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS__D1BLND_DATA_UNDERFLOW_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS__D1BLND_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS__CRTC1_SNAPSHOT_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS__CRTC1_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_COUNT_NOW_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGA_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGB_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS__CRTC1_VSYNC_NOM_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS__CRTC1_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS__CRTC1_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x400
+#define DISP_INTERRUPT_STATUS__CRTC1_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS__DIGA_DISPCLK_SWITCH_ALLOWED_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS__DIGA_DISPCLK_SWITCH_ALLOWED_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS__DACB_AUTODETECT_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS__DACB_AUTODETECT_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT_MASK 0x1000000
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS__DC_I2C_HW_DONE_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS__DC_I2C_HW_DONE_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS__DMCU_UC_INTERNAL_INT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS__DMCU_UC_INTERNAL_INT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS__DMCU_SCP_INT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS__DMCU_SCP_INT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS__ABM1_HG_READY_INT_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS__ABM1_HG_READY_INT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS__ABM1_LS_READY_INT_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS__ABM1_LS_READY_INT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE__SCL_DISP2_MODE_CHANGE_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE__SCL_DISP2_MODE_CHANGE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE__D2BLND_DATA_UNDERFLOW_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE__D2BLND_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SNAPSHOT_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_COUNT_NOW_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGA_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGB_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_VSYNC_NOM_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D1_VLINE2_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D1_VLINE2_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE2_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE2_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D3_VLINE2_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D3_VLINE2_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_EXT_TIMING_SYNC_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_VERTICAL_INTERRUPT0_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_VERTICAL_INTERRUPT1_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_VERTICAL_INTERRUPT2_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE2__SCL_DISP3_MODE_CHANGE_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE2__SCL_DISP3_MODE_CHANGE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE2__D3BLND_DATA_UNDERFLOW_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE2__D3BLND_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SNAPSHOT_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_COUNT_NOW_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGA_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGB_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_VSYNC_NOM_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D4_VLINE2_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D4_VLINE2_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D5_VLINE2_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D5_VLINE2_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D6_VLINE2_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D6_VLINE2_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_EXT_TIMING_SYNC_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_VERTICAL_INTERRUPT0_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_VERTICAL_INTERRUPT1_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_VERTICAL_INTERRUPT2_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE3__SCL_DISP4_MODE_CHANGE_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE3__SCL_DISP4_MODE_CHANGE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE3__D4BLND_DATA_UNDERFLOW_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE3__D4BLND_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SNAPSHOT_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_COUNT_NOW_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGA_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGB_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_VSYNC_NOM_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE3__BUFMGR_IHIF_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__BUFMGR_IHIF_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL_HOST_CONFLICT_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL_HOST_CONFLICT_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL_DATA_OVERFLOW_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL_DATA_OVERFLOW_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_EXT_TIMING_SYNC_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_VERTICAL_INTERRUPT0_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_VERTICAL_INTERRUPT1_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_VERTICAL_INTERRUPT2_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE4__SCL_DISP5_MODE_CHANGE_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE4__SCL_DISP5_MODE_CHANGE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE4__D5BLND_DATA_UNDERFLOW_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE4__D5BLND_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SNAPSHOT_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_COUNT_NOW_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGA_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGB_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_VSYNC_NOM_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_EXT_TIMING_SYNC_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x1000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_EXT_TIMING_SYNC_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_VERTICAL_INTERRUPT0_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_VERTICAL_INTERRUPT1_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_VERTICAL_INTERRUPT2_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE5__SCL_DISP6_MODE_CHANGE_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE5__SCL_DISP6_MODE_CHANGE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE5__D6BLND_DATA_UNDERFLOW_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE5__D6BLND_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SNAPSHOT_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_COUNT_NOW_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGA_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGB_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VSYNC_NOM_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_EXT_TIMING_SYNC_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x1000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC5_VERTICAL_INTERRUPT0_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC5_VERTICAL_INTERRUPT0__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC5_VERTICAL_INTERRUPT1_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC5_VERTICAL_INTERRUPT1__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC5_VERTICAL_INTERRUPT2_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC5_VERTICAL_INTERRUPT2__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VERTICAL_INTERRUPT0_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VERTICAL_INTERRUPT1_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VERTICAL_INTERRUPT2_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DISP_INTERRUPT_STATUS_CONTINUE6_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DISP_INTERRUPT_STATUS_CONTINUE6__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER0_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER1_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER2_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER3_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER4_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER5_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER6_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER7_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE6__BUFMGR_CWB0_IHIF_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE6__BUFMGR_CWB0_IHIF_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE6__BUFMGR_CWB1_IHIF_INTERRUPT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE6__BUFMGR_CWB1_IHIF_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DIGG_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DIGG_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DIGG_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DIGG_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_ERROR_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_ERROR_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_ERROR_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_ERROR_INTERRUPT_MASK 0x1000000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_ERROR_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_ERROR_INTERRUPT_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DISP_INTERRUPT_STATUS_CONTINUE7_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DISP_INTERRUPT_STATUS_CONTINUE7__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER0_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER1_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER2_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER3_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER4_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER5_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER6_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER7_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER0_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER1_INTERRUPT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER2_INTERRUPT_MASK 0x800
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER2_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER3_INTERRUPT_MASK 0x1000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER3_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER4_INTERRUPT_MASK 0x2000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER4_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER5_INTERRUPT_MASK 0x4000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER5_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER6_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER6_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER7_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER0_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER1_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER2_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER3_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER4_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER5_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER6_INTERRUPT_MASK 0x1000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER7_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER0_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER1_INTERRUPT_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER2_INTERRUPT_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER3_INTERRUPT_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DISP_INTERRUPT_STATUS_CONTINUE8_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DISP_INTERRUPT_STATUS_CONTINUE8__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER0_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER1_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER2_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER3_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER4_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER5_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER6_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER7_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER0_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER1_INTERRUPT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER2_INTERRUPT_MASK 0x800
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER2_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER3_INTERRUPT_MASK 0x1000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER3_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER4_INTERRUPT_MASK 0x2000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER4_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER5_INTERRUPT_MASK 0x4000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER5_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER6_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER6_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER7_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER0_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER1_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER2_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER3_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER4_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER5_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER6_INTERRUPT_MASK 0x1000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER7_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER4_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER5_INTERRUPT_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER6_INTERRUPT_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER7_INTERRUPT_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DISP_INTERRUPT_STATUS_CONTINUE9_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DISP_INTERRUPT_STATUS_CONTINUE9__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER0_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER1_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER2_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER3_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER4_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER5_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER6_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER7_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER0_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER1_INTERRUPT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER2_INTERRUPT_MASK 0x800
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER2_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER3_INTERRUPT_MASK 0x1000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER3_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER4_INTERRUPT_MASK 0x2000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER4_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER5_INTERRUPT_MASK 0x4000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER5_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER6_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER6_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER7_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER0_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER1_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER2_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER3_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER4_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER5_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER6_INTERRUPT_MASK 0x1000000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER7_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WB_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WB_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DISP_INTERRUPT_STATUS_CONTINUE10_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DISP_INTERRUPT_STATUS_CONTINUE10__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPA_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPA_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPA_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPA_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPB_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPB_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPB_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x800
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPB_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER0_INTERRUPT_MASK 0x1000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER0_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER1_INTERRUPT_MASK 0x2000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER1_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER2_INTERRUPT_MASK 0x4000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER2_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER3_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER3_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER4_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER4_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER5_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER5_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER6_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER6_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER7_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER7_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER_OFF_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER_OFF_INTERRUPT__SHIFT 0x14
+#define DCO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE_MASK 0x1
+#define DCO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE__SHIFT 0x0
+#define DCO_MEM_PWR_STATUS__MVP_MEM_PWR_STATE_MASK 0x4
+#define DCO_MEM_PWR_STATUS__MVP_MEM_PWR_STATE__SHIFT 0x2
+#define DCO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE_MASK 0x8
+#define DCO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE__SHIFT 0x3
+#define DCO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE_MASK 0x10
+#define DCO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE__SHIFT 0x4
+#define DCO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE_MASK 0x20
+#define DCO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE__SHIFT 0x5
+#define DCO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE_MASK 0x40
+#define DCO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE__SHIFT 0x6
+#define DCO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE_MASK 0x80
+#define DCO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE__SHIFT 0x7
+#define DCO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE_MASK 0x100
+#define DCO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE__SHIFT 0x8
+#define DCO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE_MASK 0x200
+#define DCO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE__SHIFT 0x9
+#define DCO_MEM_PWR_STATUS__HDMI0_MEM_PWR_STATE_MASK 0xc00
+#define DCO_MEM_PWR_STATUS__HDMI0_MEM_PWR_STATE__SHIFT 0xa
+#define DCO_MEM_PWR_STATUS__HDMI1_MEM_PWR_STATE_MASK 0x3000
+#define DCO_MEM_PWR_STATUS__HDMI1_MEM_PWR_STATE__SHIFT 0xc
+#define DCO_MEM_PWR_STATUS__HDMI2_MEM_PWR_STATE_MASK 0xc000
+#define DCO_MEM_PWR_STATUS__HDMI2_MEM_PWR_STATE__SHIFT 0xe
+#define DCO_MEM_PWR_STATUS__HDMI3_MEM_PWR_STATE_MASK 0x30000
+#define DCO_MEM_PWR_STATUS__HDMI3_MEM_PWR_STATE__SHIFT 0x10
+#define DCO_MEM_PWR_STATUS__HDMI4_MEM_PWR_STATE_MASK 0xc0000
+#define DCO_MEM_PWR_STATUS__HDMI4_MEM_PWR_STATE__SHIFT 0x12
+#define DCO_MEM_PWR_STATUS__HDMI5_MEM_PWR_STATE_MASK 0x300000
+#define DCO_MEM_PWR_STATUS__HDMI5_MEM_PWR_STATE__SHIFT 0x14
+#define DCO_MEM_PWR_STATUS__HDMI6_MEM_PWR_STATE_MASK 0xc00000
+#define DCO_MEM_PWR_STATUS__HDMI6_MEM_PWR_STATE__SHIFT 0x16
+#define DCO_MEM_PWR_STATUS1__DPLPA_MEM_PWR_STATE_MASK 0x1
+#define DCO_MEM_PWR_STATUS1__DPLPA_MEM_PWR_STATE__SHIFT 0x0
+#define DCO_MEM_PWR_STATUS1__DPLPB_MEM_PWR_STATE_MASK 0x2
+#define DCO_MEM_PWR_STATUS1__DPLPB_MEM_PWR_STATE__SHIFT 0x1
+#define DCO_MEM_PWR_STATUS1__HDMILP0_MEM_PWR_STATE_MASK 0xc00
+#define DCO_MEM_PWR_STATUS1__HDMILP0_MEM_PWR_STATE__SHIFT 0xa
+#define DCO_MEM_PWR_STATUS1__HDMILP1_MEM_PWR_STATE_MASK 0x3000
+#define DCO_MEM_PWR_STATUS1__HDMILP1_MEM_PWR_STATE__SHIFT 0xc
+#define DCO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE_MASK 0x1
+#define DCO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE__SHIFT 0x0
+#define DCO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS_MASK 0x2
+#define DCO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS__SHIFT 0x1
+#define DCO_MEM_PWR_CTRL__MVP_LIGHT_SLEEP_DIS_MASK 0x8
+#define DCO_MEM_PWR_CTRL__MVP_LIGHT_SLEEP_DIS__SHIFT 0x3
+#define DCO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS_MASK 0x10
+#define DCO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS__SHIFT 0x4
+#define DCO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS_MASK 0x20
+#define DCO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS__SHIFT 0x5
+#define DCO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS_MASK 0x40
+#define DCO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS__SHIFT 0x6
+#define DCO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS_MASK 0x80
+#define DCO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS__SHIFT 0x7
+#define DCO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS_MASK 0x100
+#define DCO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS__SHIFT 0x8
+#define DCO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS_MASK 0x200
+#define DCO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS__SHIFT 0x9
+#define DCO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS_MASK 0x400
+#define DCO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS__SHIFT 0xa
+#define DCO_MEM_PWR_CTRL__HDMI0_MEM_PWR_FORCE_MASK 0x1800
+#define DCO_MEM_PWR_CTRL__HDMI0_MEM_PWR_FORCE__SHIFT 0xb
+#define DCO_MEM_PWR_CTRL__HDMI0_MEM_PWR_DIS_MASK 0x2000
+#define DCO_MEM_PWR_CTRL__HDMI0_MEM_PWR_DIS__SHIFT 0xd
+#define DCO_MEM_PWR_CTRL__HDMI1_MEM_PWR_FORCE_MASK 0xc000
+#define DCO_MEM_PWR_CTRL__HDMI1_MEM_PWR_FORCE__SHIFT 0xe
+#define DCO_MEM_PWR_CTRL__HDMI1_MEM_PWR_DIS_MASK 0x10000
+#define DCO_MEM_PWR_CTRL__HDMI1_MEM_PWR_DIS__SHIFT 0x10
+#define DCO_MEM_PWR_CTRL__HDMI2_MEM_PWR_FORCE_MASK 0x60000
+#define DCO_MEM_PWR_CTRL__HDMI2_MEM_PWR_FORCE__SHIFT 0x11
+#define DCO_MEM_PWR_CTRL__HDMI2_MEM_PWR_DIS_MASK 0x80000
+#define DCO_MEM_PWR_CTRL__HDMI2_MEM_PWR_DIS__SHIFT 0x13
+#define DCO_MEM_PWR_CTRL__HDMI3_MEM_PWR_FORCE_MASK 0x300000
+#define DCO_MEM_PWR_CTRL__HDMI3_MEM_PWR_FORCE__SHIFT 0x14
+#define DCO_MEM_PWR_CTRL__HDMI3_MEM_PWR_DIS_MASK 0x400000
+#define DCO_MEM_PWR_CTRL__HDMI3_MEM_PWR_DIS__SHIFT 0x16
+#define DCO_MEM_PWR_CTRL__HDMI4_MEM_PWR_FORCE_MASK 0x1800000
+#define DCO_MEM_PWR_CTRL__HDMI4_MEM_PWR_FORCE__SHIFT 0x17
+#define DCO_MEM_PWR_CTRL__HDMI4_MEM_PWR_DIS_MASK 0x2000000
+#define DCO_MEM_PWR_CTRL__HDMI4_MEM_PWR_DIS__SHIFT 0x19
+#define DCO_MEM_PWR_CTRL__HDMI5_MEM_PWR_FORCE_MASK 0xc000000
+#define DCO_MEM_PWR_CTRL__HDMI5_MEM_PWR_FORCE__SHIFT 0x1a
+#define DCO_MEM_PWR_CTRL__HDMI5_MEM_PWR_DIS_MASK 0x10000000
+#define DCO_MEM_PWR_CTRL__HDMI5_MEM_PWR_DIS__SHIFT 0x1c
+#define DCO_MEM_PWR_CTRL__HDMI6_MEM_PWR_FORCE_MASK 0x60000000
+#define DCO_MEM_PWR_CTRL__HDMI6_MEM_PWR_FORCE__SHIFT 0x1d
+#define DCO_MEM_PWR_CTRL__HDMI6_MEM_PWR_DIS_MASK 0x80000000
+#define DCO_MEM_PWR_CTRL__HDMI6_MEM_PWR_DIS__SHIFT 0x1f
+#define DCO_MEM_PWR_CTRL2__HDMI_MEM_PWR_MODE_SEL_MASK 0x3
+#define DCO_MEM_PWR_CTRL2__HDMI_MEM_PWR_MODE_SEL__SHIFT 0x0
+#define DCO_MEM_PWR_CTRL2__DPLPA_LIGHT_SLEEP_DIS_MASK 0x4
+#define DCO_MEM_PWR_CTRL2__DPLPA_LIGHT_SLEEP_DIS__SHIFT 0x2
+#define DCO_MEM_PWR_CTRL2__DPLPB_LIGHT_SLEEP_DIS_MASK 0x8
+#define DCO_MEM_PWR_CTRL2__DPLPB_LIGHT_SLEEP_DIS__SHIFT 0x3
+#define DCO_MEM_PWR_CTRL2__HDMILP0_MEM_PWR_FORCE_MASK 0x30000
+#define DCO_MEM_PWR_CTRL2__HDMILP0_MEM_PWR_FORCE__SHIFT 0x10
+#define DCO_MEM_PWR_CTRL2__HDMILP0_MEM_PWR_DIS_MASK 0x40000
+#define DCO_MEM_PWR_CTRL2__HDMILP0_MEM_PWR_DIS__SHIFT 0x12
+#define DCO_MEM_PWR_CTRL2__HDMILP1_MEM_PWR_FORCE_MASK 0x180000
+#define DCO_MEM_PWR_CTRL2__HDMILP1_MEM_PWR_FORCE__SHIFT 0x13
+#define DCO_MEM_PWR_CTRL2__HDMILP1_MEM_PWR_DIS_MASK 0x200000
+#define DCO_MEM_PWR_CTRL2__HDMILP1_MEM_PWR_DIS__SHIFT 0x15
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_SOURCE_SEL_MASK 0x7
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_SOURCE_SEL__SHIFT 0x0
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_PWR_FORCE_MASK 0x30
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_PWR_FORCE__SHIFT 0x4
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_PWR_DIS_MASK 0x100
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_PWR_DIS__SHIFT 0x8
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_PWR_STATE_MASK 0x3000
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_PWR_STATE__SHIFT 0xc
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_SOURCE_SEL_MASK 0x7
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_SOURCE_SEL__SHIFT 0x0
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_PWR_FORCE_MASK 0x30
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_PWR_FORCE__SHIFT 0x4
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_PWR_DIS_MASK 0x100
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_PWR_DIS__SHIFT 0x8
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_PWR_STATE_MASK 0x3000
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_PWR_STATE__SHIFT 0xc
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_SOURCE_SEL_MASK 0x7
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_SOURCE_SEL__SHIFT 0x0
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_PWR_FORCE_MASK 0x30
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_PWR_FORCE__SHIFT 0x4
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_PWR_DIS_MASK 0x100
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_PWR_DIS__SHIFT 0x8
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_PWR_STATE_MASK 0x3000
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_PWR_STATE__SHIFT 0xc
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_SOURCE_SEL_MASK 0x7
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_SOURCE_SEL__SHIFT 0x0
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_PWR_FORCE_MASK 0x30
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_PWR_FORCE__SHIFT 0x4
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_PWR_DIS_MASK 0x100
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_PWR_DIS__SHIFT 0x8
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_PWR_STATE_MASK 0x3000
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_PWR_STATE__SHIFT 0xc
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_SOURCE_SEL_MASK 0x7
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_SOURCE_SEL__SHIFT 0x0
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_PWR_FORCE_MASK 0x30
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_PWR_FORCE__SHIFT 0x4
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_PWR_DIS_MASK 0x100
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_PWR_DIS__SHIFT 0x8
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_PWR_STATE_MASK 0x3000
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_PWR_STATE__SHIFT 0xc
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_SOURCE_SEL_MASK 0x7
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_SOURCE_SEL__SHIFT 0x0
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_PWR_FORCE_MASK 0x30
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_PWR_FORCE__SHIFT 0x4
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_PWR_DIS_MASK 0x100
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_PWR_DIS__SHIFT 0x8
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_PWR_STATE_MASK 0x3000
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_PWR_STATE__SHIFT 0xc
+#define DCO_CLK_CNTL__DISPCLK_R_DCO_GATE_DIS_MASK 0x20
+#define DCO_CLK_CNTL__DISPCLK_R_DCO_GATE_DIS__SHIFT 0x5
+#define DCO_CLK_CNTL__DISPCLK_G_ABM_GATE_DIS_MASK 0x40
+#define DCO_CLK_CNTL__DISPCLK_G_ABM_GATE_DIS__SHIFT 0x6
+#define DCO_CLK_CNTL__DISPCLK_G_DVO_GATE_DIS_MASK 0x80
+#define DCO_CLK_CNTL__DISPCLK_G_DVO_GATE_DIS__SHIFT 0x7
+#define DCO_CLK_CNTL__DISPCLK_G_DACA_GATE_DIS_MASK 0x100
+#define DCO_CLK_CNTL__DISPCLK_G_DACA_GATE_DIS__SHIFT 0x8
+#define DCO_CLK_CNTL__DISPCLK_G_DACB_GATE_DIS_MASK 0x200
+#define DCO_CLK_CNTL__DISPCLK_G_DACB_GATE_DIS__SHIFT 0x9
+#define DCO_CLK_CNTL__REFCLK_R_DCO_GATE_DIS_MASK 0x400
+#define DCO_CLK_CNTL__REFCLK_R_DCO_GATE_DIS__SHIFT 0xa
+#define DCO_CLK_CNTL__DISPCLK_G_FMT0_GATE_DIS_MASK 0x10000
+#define DCO_CLK_CNTL__DISPCLK_G_FMT0_GATE_DIS__SHIFT 0x10
+#define DCO_CLK_CNTL__DISPCLK_G_FMT1_GATE_DIS_MASK 0x20000
+#define DCO_CLK_CNTL__DISPCLK_G_FMT1_GATE_DIS__SHIFT 0x11
+#define DCO_CLK_CNTL__DISPCLK_G_FMT2_GATE_DIS_MASK 0x40000
+#define DCO_CLK_CNTL__DISPCLK_G_FMT2_GATE_DIS__SHIFT 0x12
+#define DCO_CLK_CNTL__DISPCLK_G_FMT3_GATE_DIS_MASK 0x80000
+#define DCO_CLK_CNTL__DISPCLK_G_FMT3_GATE_DIS__SHIFT 0x13
+#define DCO_CLK_CNTL__DISPCLK_G_FMT4_GATE_DIS_MASK 0x100000
+#define DCO_CLK_CNTL__DISPCLK_G_FMT4_GATE_DIS__SHIFT 0x14
+#define DCO_CLK_CNTL__DISPCLK_G_FMT5_GATE_DIS_MASK 0x200000
+#define DCO_CLK_CNTL__DISPCLK_G_FMT5_GATE_DIS__SHIFT 0x15
+#define DCO_CLK_CNTL__DISPCLK_G_DIGLPA_GATE_DIS_MASK 0x400000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGLPA_GATE_DIS__SHIFT 0x16
+#define DCO_CLK_CNTL__DISPCLK_G_DIGLPB_GATE_DIS_MASK 0x800000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGLPB_GATE_DIS__SHIFT 0x17
+#define DCO_CLK_CNTL__DISPCLK_G_DIGA_GATE_DIS_MASK 0x1000000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGA_GATE_DIS__SHIFT 0x18
+#define DCO_CLK_CNTL__DISPCLK_G_DIGB_GATE_DIS_MASK 0x2000000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGB_GATE_DIS__SHIFT 0x19
+#define DCO_CLK_CNTL__DISPCLK_G_DIGC_GATE_DIS_MASK 0x4000000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGC_GATE_DIS__SHIFT 0x1a
+#define DCO_CLK_CNTL__DISPCLK_G_DIGD_GATE_DIS_MASK 0x8000000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGD_GATE_DIS__SHIFT 0x1b
+#define DCO_CLK_CNTL__DISPCLK_G_DIGE_GATE_DIS_MASK 0x10000000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGE_GATE_DIS__SHIFT 0x1c
+#define DCO_CLK_CNTL__DISPCLK_G_DIGF_GATE_DIS_MASK 0x20000000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGF_GATE_DIS__SHIFT 0x1d
+#define DCO_CLK_CNTL__DISPCLK_G_DIGG_GATE_DIS_MASK 0x40000000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGG_GATE_DIS__SHIFT 0x1e
+#define DCO_CLK_CNTL2__DCO_TEST_CLK_SEL_MASK 0x7f
+#define DCO_CLK_CNTL2__DCO_TEST_CLK_SEL__SHIFT 0x0
+#define DCO_CLK_CNTL2__SCLK_G_AFMTA_GATE_DIS_MASK 0x80
+#define DCO_CLK_CNTL2__SCLK_G_AFMTA_GATE_DIS__SHIFT 0x7
+#define DCO_CLK_CNTL2__SCLK_G_AFMTB_GATE_DIS_MASK 0x100
+#define DCO_CLK_CNTL2__SCLK_G_AFMTB_GATE_DIS__SHIFT 0x8
+#define DCO_CLK_CNTL2__SCLK_G_AFMTC_GATE_DIS_MASK 0x200
+#define DCO_CLK_CNTL2__SCLK_G_AFMTC_GATE_DIS__SHIFT 0x9
+#define DCO_CLK_CNTL2__SCLK_G_AFMTD_GATE_DIS_MASK 0x400
+#define DCO_CLK_CNTL2__SCLK_G_AFMTD_GATE_DIS__SHIFT 0xa
+#define DCO_CLK_CNTL2__SCLK_G_AFMTE_GATE_DIS_MASK 0x800
+#define DCO_CLK_CNTL2__SCLK_G_AFMTE_GATE_DIS__SHIFT 0xb
+#define DCO_CLK_CNTL2__SCLK_G_AFMTF_GATE_DIS_MASK 0x1000
+#define DCO_CLK_CNTL2__SCLK_G_AFMTF_GATE_DIS__SHIFT 0xc
+#define DCO_CLK_CNTL2__SCLK_G_AFMTG_GATE_DIS_MASK 0x2000
+#define DCO_CLK_CNTL2__SCLK_G_AFMTG_GATE_DIS__SHIFT 0xd
+#define DCO_CLK_CNTL2__SCLK_G_AFMTLPA_GATE_DIS_MASK 0x8000
+#define DCO_CLK_CNTL2__SCLK_G_AFMTLPA_GATE_DIS__SHIFT 0xf
+#define DCO_CLK_CNTL2__SCLK_G_AFMTLPB_GATE_DIS_MASK 0x10000
+#define DCO_CLK_CNTL2__SCLK_G_AFMTLPB_GATE_DIS__SHIFT 0x10
+#define DCO_CLK_CNTL2__SYMCLKA_FE_G_AFMT_GATE_DIS_MASK 0x20000
+#define DCO_CLK_CNTL2__SYMCLKA_FE_G_AFMT_GATE_DIS__SHIFT 0x11
+#define DCO_CLK_CNTL2__SYMCLKB_FE_G_AFMT_GATE_DIS_MASK 0x40000
+#define DCO_CLK_CNTL2__SYMCLKB_FE_G_AFMT_GATE_DIS__SHIFT 0x12
+#define DCO_CLK_CNTL2__SYMCLKC_FE_G_AFMT_GATE_DIS_MASK 0x80000
+#define DCO_CLK_CNTL2__SYMCLKC_FE_G_AFMT_GATE_DIS__SHIFT 0x13
+#define DCO_CLK_CNTL2__SYMCLKD_FE_G_AFMT_GATE_DIS_MASK 0x100000
+#define DCO_CLK_CNTL2__SYMCLKD_FE_G_AFMT_GATE_DIS__SHIFT 0x14
+#define DCO_CLK_CNTL2__SYMCLKE_FE_G_AFMT_GATE_DIS_MASK 0x200000
+#define DCO_CLK_CNTL2__SYMCLKE_FE_G_AFMT_GATE_DIS__SHIFT 0x15
+#define DCO_CLK_CNTL2__SYMCLKF_FE_G_AFMT_GATE_DIS_MASK 0x400000
+#define DCO_CLK_CNTL2__SYMCLKF_FE_G_AFMT_GATE_DIS__SHIFT 0x16
+#define DCO_CLK_CNTL2__SYMCLKG_FE_G_AFMT_GATE_DIS_MASK 0x800000
+#define DCO_CLK_CNTL2__SYMCLKG_FE_G_AFMT_GATE_DIS__SHIFT 0x17
+#define DCO_CLK_CNTL2__SYMCLKLPA_FE_G_AFMT_GATE_DIS_MASK 0x2000000
+#define DCO_CLK_CNTL2__SYMCLKLPA_FE_G_AFMT_GATE_DIS__SHIFT 0x19
+#define DCO_CLK_CNTL2__SYMCLKLPB_FE_G_AFMT_GATE_DIS_MASK 0x4000000
+#define DCO_CLK_CNTL2__SYMCLKLPB_FE_G_AFMT_GATE_DIS__SHIFT 0x1a
+#define DCO_CLK_CNTL3__SYMCLKA_FE_G_TMDS_GATE_DIS_MASK 0x1
+#define DCO_CLK_CNTL3__SYMCLKA_FE_G_TMDS_GATE_DIS__SHIFT 0x0
+#define DCO_CLK_CNTL3__SYMCLKB_FE_G_TMDS_GATE_DIS_MASK 0x2
+#define DCO_CLK_CNTL3__SYMCLKB_FE_G_TMDS_GATE_DIS__SHIFT 0x1
+#define DCO_CLK_CNTL3__SYMCLKC_FE_G_TMDS_GATE_DIS_MASK 0x4
+#define DCO_CLK_CNTL3__SYMCLKC_FE_G_TMDS_GATE_DIS__SHIFT 0x2
+#define DCO_CLK_CNTL3__SYMCLKD_FE_G_TMDS_GATE_DIS_MASK 0x8
+#define DCO_CLK_CNTL3__SYMCLKD_FE_G_TMDS_GATE_DIS__SHIFT 0x3
+#define DCO_CLK_CNTL3__SYMCLKE_FE_G_TMDS_GATE_DIS_MASK 0x10
+#define DCO_CLK_CNTL3__SYMCLKE_FE_G_TMDS_GATE_DIS__SHIFT 0x4
+#define DCO_CLK_CNTL3__SYMCLKF_FE_G_TMDS_GATE_DIS_MASK 0x20
+#define DCO_CLK_CNTL3__SYMCLKF_FE_G_TMDS_GATE_DIS__SHIFT 0x5
+#define DCO_CLK_CNTL3__SYMCLKG_FE_G_TMDS_GATE_DIS_MASK 0x40
+#define DCO_CLK_CNTL3__SYMCLKG_FE_G_TMDS_GATE_DIS__SHIFT 0x6
+#define DCO_CLK_CNTL3__SYMCLKLPA_FE_G_TMDS_GATE_DIS_MASK 0x100
+#define DCO_CLK_CNTL3__SYMCLKLPA_FE_G_TMDS_GATE_DIS__SHIFT 0x8
+#define DCO_CLK_CNTL3__SYMCLKLPB_FE_G_TMDS_GATE_DIS_MASK 0x200
+#define DCO_CLK_CNTL3__SYMCLKLPB_FE_G_TMDS_GATE_DIS__SHIFT 0x9
+#define DCO_CLK_CNTL3__SYMCLKA_G_TMDS_GATE_DIS_MASK 0x400
+#define DCO_CLK_CNTL3__SYMCLKA_G_TMDS_GATE_DIS__SHIFT 0xa
+#define DCO_CLK_CNTL3__SYMCLKB_G_TMDS_GATE_DIS_MASK 0x800
+#define DCO_CLK_CNTL3__SYMCLKB_G_TMDS_GATE_DIS__SHIFT 0xb
+#define DCO_CLK_CNTL3__SYMCLKC_G_TMDS_GATE_DIS_MASK 0x1000
+#define DCO_CLK_CNTL3__SYMCLKC_G_TMDS_GATE_DIS__SHIFT 0xc
+#define DCO_CLK_CNTL3__SYMCLKD_G_TMDS_GATE_DIS_MASK 0x2000
+#define DCO_CLK_CNTL3__SYMCLKD_G_TMDS_GATE_DIS__SHIFT 0xd
+#define DCO_CLK_CNTL3__SYMCLKE_G_TMDS_GATE_DIS_MASK 0x4000
+#define DCO_CLK_CNTL3__SYMCLKE_G_TMDS_GATE_DIS__SHIFT 0xe
+#define DCO_CLK_CNTL3__SYMCLKF_G_TMDS_GATE_DIS_MASK 0x8000
+#define DCO_CLK_CNTL3__SYMCLKF_G_TMDS_GATE_DIS__SHIFT 0xf
+#define DCO_CLK_CNTL3__SYMCLKG_G_TMDS_GATE_DIS_MASK 0x10000
+#define DCO_CLK_CNTL3__SYMCLKG_G_TMDS_GATE_DIS__SHIFT 0x10
+#define DCO_CLK_CNTL3__SYMCLKLPA_G_TMDS_GATE_DIS_MASK 0x40000
+#define DCO_CLK_CNTL3__SYMCLKLPA_G_TMDS_GATE_DIS__SHIFT 0x12
+#define DCO_CLK_CNTL3__SYMCLKLPB_G_TMDS_GATE_DIS_MASK 0x80000
+#define DCO_CLK_CNTL3__SYMCLKLPB_G_TMDS_GATE_DIS__SHIFT 0x13
+#define DPDBG_CNTL__DPDBG_ENABLE_MASK 0x1
+#define DPDBG_CNTL__DPDBG_ENABLE__SHIFT 0x0
+#define DPDBG_CNTL__DPDBG_INPUT_ENABLE_MASK 0x2
+#define DPDBG_CNTL__DPDBG_INPUT_ENABLE__SHIFT 0x1
+#define DPDBG_CNTL__DPDBG_SYMCLK_ON_MASK 0x10
+#define DPDBG_CNTL__DPDBG_SYMCLK_ON__SHIFT 0x4
+#define DPDBG_CNTL__DPDBG_ERROR_DETECTION_MODE_MASK 0x100
+#define DPDBG_CNTL__DPDBG_ERROR_DETECTION_MODE__SHIFT 0x8
+#define DPDBG_CNTL__DPDBG_LINE_LENGTH_MASK 0xffff0000
+#define DPDBG_CNTL__DPDBG_LINE_LENGTH__SHIFT 0x10
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_MASK_MASK 0x1
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_MASK__SHIFT 0x0
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_TYPE_MASK 0x2
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_TYPE__SHIFT 0x1
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_ACK_MASK 0x100
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_ACK__SHIFT 0x8
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_OCCURRED_MASK 0x10000
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_OCCURRED__SHIFT 0x10
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_STATUS_MASK 0x1000000
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_STATUS__SHIFT 0x18
+#define DCO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET_MASK 0x1
+#define DCO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET__SHIFT 0x0
+#define DCO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF_MASK 0x100
+#define DCO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF__SHIFT 0x8
+#define DCO_SOFT_RESET__DACA_SOFT_RESET_MASK 0x1
+#define DCO_SOFT_RESET__DACA_SOFT_RESET__SHIFT 0x0
+#define DCO_SOFT_RESET__I2S0_SPDIF0_SOFT_RESET_MASK 0x10
+#define DCO_SOFT_RESET__I2S0_SPDIF0_SOFT_RESET__SHIFT 0x4
+#define DCO_SOFT_RESET__I2S1_SOFT_RESET_MASK 0x20
+#define DCO_SOFT_RESET__I2S1_SOFT_RESET__SHIFT 0x5
+#define DCO_SOFT_RESET__SPDIF1_SOFT_RESET_MASK 0x40
+#define DCO_SOFT_RESET__SPDIF1_SOFT_RESET__SHIFT 0x6
+#define DCO_SOFT_RESET__DB_CLK_SOFT_RESET_MASK 0x1000
+#define DCO_SOFT_RESET__DB_CLK_SOFT_RESET__SHIFT 0xc
+#define DCO_SOFT_RESET__FMT0_SOFT_RESET_MASK 0x10000
+#define DCO_SOFT_RESET__FMT0_SOFT_RESET__SHIFT 0x10
+#define DCO_SOFT_RESET__FMT1_SOFT_RESET_MASK 0x20000
+#define DCO_SOFT_RESET__FMT1_SOFT_RESET__SHIFT 0x11
+#define DCO_SOFT_RESET__FMT2_SOFT_RESET_MASK 0x40000
+#define DCO_SOFT_RESET__FMT2_SOFT_RESET__SHIFT 0x12
+#define DCO_SOFT_RESET__FMT3_SOFT_RESET_MASK 0x80000
+#define DCO_SOFT_RESET__FMT3_SOFT_RESET__SHIFT 0x13
+#define DCO_SOFT_RESET__FMT4_SOFT_RESET_MASK 0x100000
+#define DCO_SOFT_RESET__FMT4_SOFT_RESET__SHIFT 0x14
+#define DCO_SOFT_RESET__FMT5_SOFT_RESET_MASK 0x200000
+#define DCO_SOFT_RESET__FMT5_SOFT_RESET__SHIFT 0x15
+#define DCO_SOFT_RESET__MVP_SOFT_RESET_MASK 0x1000000
+#define DCO_SOFT_RESET__MVP_SOFT_RESET__SHIFT 0x18
+#define DCO_SOFT_RESET__ABM_SOFT_RESET_MASK 0x2000000
+#define DCO_SOFT_RESET__ABM_SOFT_RESET__SHIFT 0x19
+#define DCO_SOFT_RESET__DVO_SOFT_RESET_MASK 0x8000000
+#define DCO_SOFT_RESET__DVO_SOFT_RESET__SHIFT 0x1b
+#define DIG_SOFT_RESET__DIGA_FE_SOFT_RESET_MASK 0x1
+#define DIG_SOFT_RESET__DIGA_FE_SOFT_RESET__SHIFT 0x0
+#define DIG_SOFT_RESET__DIGA_BE_SOFT_RESET_MASK 0x2
+#define DIG_SOFT_RESET__DIGA_BE_SOFT_RESET__SHIFT 0x1
+#define DIG_SOFT_RESET__DIGB_FE_SOFT_RESET_MASK 0x10
+#define DIG_SOFT_RESET__DIGB_FE_SOFT_RESET__SHIFT 0x4
+#define DIG_SOFT_RESET__DIGB_BE_SOFT_RESET_MASK 0x20
+#define DIG_SOFT_RESET__DIGB_BE_SOFT_RESET__SHIFT 0x5
+#define DIG_SOFT_RESET__DIGC_FE_SOFT_RESET_MASK 0x100
+#define DIG_SOFT_RESET__DIGC_FE_SOFT_RESET__SHIFT 0x8
+#define DIG_SOFT_RESET__DIGC_BE_SOFT_RESET_MASK 0x200
+#define DIG_SOFT_RESET__DIGC_BE_SOFT_RESET__SHIFT 0x9
+#define DIG_SOFT_RESET__DIGD_FE_SOFT_RESET_MASK 0x1000
+#define DIG_SOFT_RESET__DIGD_FE_SOFT_RESET__SHIFT 0xc
+#define DIG_SOFT_RESET__DIGD_BE_SOFT_RESET_MASK 0x2000
+#define DIG_SOFT_RESET__DIGD_BE_SOFT_RESET__SHIFT 0xd
+#define DIG_SOFT_RESET__DIGE_FE_SOFT_RESET_MASK 0x10000
+#define DIG_SOFT_RESET__DIGE_FE_SOFT_RESET__SHIFT 0x10
+#define DIG_SOFT_RESET__DIGE_BE_SOFT_RESET_MASK 0x20000
+#define DIG_SOFT_RESET__DIGE_BE_SOFT_RESET__SHIFT 0x11
+#define DIG_SOFT_RESET__DIGF_FE_SOFT_RESET_MASK 0x100000
+#define DIG_SOFT_RESET__DIGF_FE_SOFT_RESET__SHIFT 0x14
+#define DIG_SOFT_RESET__DIGF_BE_SOFT_RESET_MASK 0x200000
+#define DIG_SOFT_RESET__DIGF_BE_SOFT_RESET__SHIFT 0x15
+#define DIG_SOFT_RESET__DIGG_FE_SOFT_RESET_MASK 0x1000000
+#define DIG_SOFT_RESET__DIGG_FE_SOFT_RESET__SHIFT 0x18
+#define DIG_SOFT_RESET__DIGG_BE_SOFT_RESET_MASK 0x2000000
+#define DIG_SOFT_RESET__DIGG_BE_SOFT_RESET__SHIFT 0x19
+#define DIG_SOFT_RESET__DPDBG_SOFT_RESET_MASK 0x80000000
+#define DIG_SOFT_RESET__DPDBG_SOFT_RESET__SHIFT 0x1f
+#define DIG_SOFT_RESET_2__DIGLPA_FE_SOFT_RESET_MASK 0x1
+#define DIG_SOFT_RESET_2__DIGLPA_FE_SOFT_RESET__SHIFT 0x0
+#define DIG_SOFT_RESET_2__DIGLPA_BE_SOFT_RESET_MASK 0x2
+#define DIG_SOFT_RESET_2__DIGLPA_BE_SOFT_RESET__SHIFT 0x1
+#define DIG_SOFT_RESET_2__DIGLPB_FE_SOFT_RESET_MASK 0x10
+#define DIG_SOFT_RESET_2__DIGLPB_FE_SOFT_RESET__SHIFT 0x4
+#define DIG_SOFT_RESET_2__DIGLPB_BE_SOFT_RESET_MASK 0x20
+#define DIG_SOFT_RESET_2__DIGLPB_BE_SOFT_RESET__SHIFT 0x5
+#define DCO_STEREOSYNC_SEL__GENERICA_STEREOSYNC_SEL_MASK 0x7
+#define DCO_STEREOSYNC_SEL__GENERICA_STEREOSYNC_SEL__SHIFT 0x0
+#define DCO_STEREOSYNC_SEL__GENERICB_STEREOSYNC_SEL_MASK 0x70000
+#define DCO_STEREOSYNC_SEL__GENERICB_STEREOSYNC_SEL__SHIFT 0x10
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_ENABLE_MASK 0x1
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_ENABLE__SHIFT 0x0
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_TYPE_MASK 0x10
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_TYPE__SHIFT 0x4
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_STATUS_MASK 0x100
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_STATUS__SHIFT 0x8
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_MASK_MASK 0x1000
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_MASK__SHIFT 0xc
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_INTERVAL_MASK 0xfff0000
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_INTERVAL__SHIFT 0x10
+#define DCO_PSP_INTERRUPT_STATUS__DCO_PSP_INTERRUPT_STATUS_MASK 0x1
+#define DCO_PSP_INTERRUPT_STATUS__DCO_PSP_INTERRUPT_STATUS__SHIFT 0x0
+#define DCO_PSP_INTERRUPT_STATUS__DCO_PSP_INTERRUPT_MESSAGE_MASK 0xfffffffe
+#define DCO_PSP_INTERRUPT_STATUS__DCO_PSP_INTERRUPT_MESSAGE__SHIFT 0x1
+#define DCO_PSP_INTERRUPT_CLEAR__DCO_PSP_INTERRUPT_CLEAR_MASK 0x1
+#define DCO_PSP_INTERRUPT_CLEAR__DCO_PSP_INTERRUPT_CLEAR__SHIFT 0x0
+#define DCO_GENERIC_INTERRUPT_MESSAGE__DCO_GENERIC_INTERRUPT_STATUS_MASK 0x1
+#define DCO_GENERIC_INTERRUPT_MESSAGE__DCO_GENERIC_INTERRUPT_STATUS__SHIFT 0x0
+#define DCO_GENERIC_INTERRUPT_MESSAGE__DCO_GENERIC_INTERRUPT_MESSAGE_MASK 0xfffffffe
+#define DCO_GENERIC_INTERRUPT_MESSAGE__DCO_GENERIC_INTERRUPT_MESSAGE__SHIFT 0x1
+#define DCO_GENERIC_INTERRUPT_CLEAR__DCO_GENERIC_INTERRUPT_CLEAR_MASK 0x1
+#define DCO_GENERIC_INTERRUPT_CLEAR__DCO_GENERIC_INTERRUPT_CLEAR__SHIFT 0x0
+#define DCO_TEST_DEBUG_INDEX__DCO_TEST_DEBUG_INDEX_MASK 0xff
+#define DCO_TEST_DEBUG_INDEX__DCO_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCO_TEST_DEBUG_INDEX__DCO_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DCO_TEST_DEBUG_INDEX__DCO_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DCO_TEST_DEBUG_DATA__DCO_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DCO_TEST_DEBUG_DATA__DCO_TEST_DEBUG_DATA__SHIFT 0x0
+#define DC_I2C_CONTROL__DC_I2C_GO_MASK 0x1
+#define DC_I2C_CONTROL__DC_I2C_GO__SHIFT 0x0
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET_MASK 0x2
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET__SHIFT 0x1
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET_MASK 0x4
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET__SHIFT 0x2
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET_MASK 0x8
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET__SHIFT 0x3
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT_MASK 0x700
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT__SHIFT 0x8
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT_MASK 0x300000
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT__SHIFT 0x14
+#define DC_I2C_CONTROL__DC_I2C_DBG_REF_SEL_MASK 0x80000000
+#define DC_I2C_CONTROL__DC_I2C_DBG_REF_SEL__SHIFT 0x1f
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_MASK 0x3
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY__SHIFT 0x0
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS_MASK 0xc
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO_MASK 0x10
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO__SHIFT 0x4
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER_MASK 0x100
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER__SHIFT 0x8
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER_MASK 0x1000
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER__SHIFT 0xc
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ_MASK 0x100000
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ__SHIFT 0x14
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG_MASK 0x200000
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG__SHIFT 0x15
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ_MASK 0x1000000
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ__SHIFT 0x18
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG_MASK 0x2000000
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG__SHIFT 0x19
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT_MASK 0x1
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT__SHIFT 0x0
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK_MASK 0x2
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK__SHIFT 0x1
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK_MASK 0x4
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK__SHIFT 0x2
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT_MASK 0x10
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT__SHIFT 0x4
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK_MASK 0x20
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK__SHIFT 0x5
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK_MASK 0x40
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK__SHIFT 0x6
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT_MASK 0x100
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT__SHIFT 0x8
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK_MASK 0x200
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK__SHIFT 0x9
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK_MASK 0x400
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK__SHIFT 0xa
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT_MASK 0x1000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT__SHIFT 0xc
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK_MASK 0x2000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK__SHIFT 0xd
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK_MASK 0x4000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK__SHIFT 0xe
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT_MASK 0x10000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT__SHIFT 0x10
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK_MASK 0x20000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK__SHIFT 0x11
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK_MASK 0x40000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK__SHIFT 0x12
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT_MASK 0x100000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT__SHIFT 0x14
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK_MASK 0x200000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK__SHIFT 0x15
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK_MASK 0x400000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK__SHIFT 0x16
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT_MASK 0x1000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT__SHIFT 0x18
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK_MASK 0x2000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK__SHIFT 0x19
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK_MASK 0x4000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK__SHIFT 0x1a
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT_MASK 0x8000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT__SHIFT 0x1b
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK_MASK 0x10000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK__SHIFT 0x1c
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK_MASK 0x20000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK__SHIFT 0x1d
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS_MASK 0x3
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS__SHIFT 0x0
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK 0x4
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE__SHIFT 0x2
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK 0x10
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED__SHIFT 0x4
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK 0x20
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT__SHIFT 0x5
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED_MASK 0x40
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED__SHIFT 0x6
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW_MASK 0x80
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW__SHIFT 0x7
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK 0x100
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK__SHIFT 0x8
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0_MASK 0x1000
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0__SHIFT 0xc
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1_MASK 0x2000
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1__SHIFT 0xd
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2_MASK 0x4000
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2__SHIFT 0xe
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3_MASK 0x8000
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3__SHIFT 0xf
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ_MASK 0x40000
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ__SHIFT 0x12
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS_MASK 0x3
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE_MASK 0x8
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ_MASK 0x10000
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG_MASK 0x20000
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS_MASK 0x100000
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES_MASK 0xf000000
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE_MASK 0x70000000
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS_MASK 0x3
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE_MASK 0x8
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ_MASK 0x10000
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG_MASK 0x20000
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS_MASK 0x100000
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES_MASK 0xf000000
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE_MASK 0x70000000
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS_MASK 0x3
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE_MASK 0x8
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ_MASK 0x10000
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG_MASK 0x20000
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS_MASK 0x100000
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES_MASK 0xf000000
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE_MASK 0x70000000
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS_MASK 0x3
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE_MASK 0x8
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ_MASK 0x10000
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG_MASK 0x20000
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS_MASK 0x100000
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES_MASK 0xf000000
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE_MASK 0x70000000
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS_MASK 0x3
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE_MASK 0x8
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ_MASK 0x10000
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG_MASK 0x20000
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS_MASK 0x100000
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES_MASK 0xf000000
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE_MASK 0x70000000
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_STATUS_MASK 0x3
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_DONE_MASK 0x8
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_REQ_MASK 0x10000
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_URG_MASK 0x20000
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATUS_MASK 0x100000
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_NUM_VALID_TRIES_MASK 0xf000000
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATE_MASK 0x70000000
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD_MASK 0x3
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL_MASK 0x300
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE_MASK 0xffff0000
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN_MASK 0x1
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL_MASK 0x2
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE_MASK 0x10
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE_MASK 0x20
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE_MASK 0x40
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN_MASK 0x80
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY_MASK 0xff00
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY_MASK 0xff0000
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT_MASK 0xff000000
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD_MASK 0x3
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL_MASK 0x300
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE_MASK 0xffff0000
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN_MASK 0x1
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL_MASK 0x2
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE_MASK 0x10
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE_MASK 0x20
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE_MASK 0x40
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN_MASK 0x80
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY_MASK 0xff00
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY_MASK 0xff0000
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT_MASK 0xff000000
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD_MASK 0x3
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_START_STOP_TIMING_CNTL_MASK 0x300
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE_MASK 0xffff0000
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN_MASK 0x1
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL_MASK 0x2
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE_MASK 0x10
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE_MASK 0x20
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE_MASK 0x40
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN_MASK 0x80
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY_MASK 0xff00
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY_MASK 0xff0000
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT_MASK 0xff000000
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD_MASK 0x3
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_START_STOP_TIMING_CNTL_MASK 0x300
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE_MASK 0xffff0000
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN_MASK 0x1
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL_MASK 0x2
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE_MASK 0x10
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE_MASK 0x20
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE_MASK 0x40
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN_MASK 0x80
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY_MASK 0xff00
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY_MASK 0xff0000
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT_MASK 0xff000000
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD_MASK 0x3
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_START_STOP_TIMING_CNTL_MASK 0x300
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE_MASK 0xffff0000
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN_MASK 0x1
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL_MASK 0x2
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE_MASK 0x10
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE_MASK 0x20
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE_MASK 0x40
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN_MASK 0x80
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY_MASK 0xff00
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY_MASK 0xff0000
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT_MASK 0xff000000
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_THRESHOLD_MASK 0x3
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_START_STOP_TIMING_CNTL_MASK 0x300
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_PRESCALE_MASK 0xffff0000
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_EN_MASK 0x1
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_SEL_MASK 0x2
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_ENABLE_MASK 0x10
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_MODE_MASK 0x20
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_ENABLE_MASK 0x40
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_CLK_DRIVE_EN_MASK 0x80
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_BYTE_DELAY_MASK 0xff00
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_TRANSACTION_DELAY_MASK 0xff0000
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_TIME_LIMIT_MASK 0xff000000
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0_MASK 0x1
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0__SHIFT 0x0
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0_MASK 0x100
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0__SHIFT 0x8
+#define DC_I2C_TRANSACTION0__DC_I2C_START0_MASK 0x1000
+#define DC_I2C_TRANSACTION0__DC_I2C_START0__SHIFT 0xc
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0_MASK 0x2000
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0__SHIFT 0xd
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0_MASK 0x3ff0000
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0__SHIFT 0x10
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1_MASK 0x1
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1__SHIFT 0x0
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1_MASK 0x100
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1__SHIFT 0x8
+#define DC_I2C_TRANSACTION1__DC_I2C_START1_MASK 0x1000
+#define DC_I2C_TRANSACTION1__DC_I2C_START1__SHIFT 0xc
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1_MASK 0x2000
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1__SHIFT 0xd
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1_MASK 0x3ff0000
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1__SHIFT 0x10
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2_MASK 0x1
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2__SHIFT 0x0
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2_MASK 0x100
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2__SHIFT 0x8
+#define DC_I2C_TRANSACTION2__DC_I2C_START2_MASK 0x1000
+#define DC_I2C_TRANSACTION2__DC_I2C_START2__SHIFT 0xc
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2_MASK 0x2000
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2__SHIFT 0xd
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2_MASK 0x3ff0000
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2__SHIFT 0x10
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3_MASK 0x1
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3__SHIFT 0x0
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3_MASK 0x100
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3__SHIFT 0x8
+#define DC_I2C_TRANSACTION3__DC_I2C_START3_MASK 0x1000
+#define DC_I2C_TRANSACTION3__DC_I2C_START3__SHIFT 0xc
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3_MASK 0x2000
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3__SHIFT 0xd
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3_MASK 0x3ff0000
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3__SHIFT 0x10
+#define DC_I2C_DATA__DC_I2C_DATA_RW_MASK 0x1
+#define DC_I2C_DATA__DC_I2C_DATA_RW__SHIFT 0x0
+#define DC_I2C_DATA__DC_I2C_DATA_MASK 0xff00
+#define DC_I2C_DATA__DC_I2C_DATA__SHIFT 0x8
+#define DC_I2C_DATA__DC_I2C_INDEX_MASK 0x3ff0000
+#define DC_I2C_DATA__DC_I2C_INDEX__SHIFT 0x10
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE_MASK 0x80000000
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE__SHIFT 0x1f
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_STATUS_MASK 0x3
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_DONE_MASK 0x8
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_REQ_MASK 0x10000
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_URG_MASK 0x20000
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_URG__SHIFT 0x11
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATUS_MASK 0x100000
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_NUM_VALID_TRIES_MASK 0xf000000
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATE_MASK 0x70000000
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_THRESHOLD_MASK 0x3
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_START_STOP_TIMING_CNTL_MASK 0x300
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_PRESCALE_MASK 0xffff0000
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_EN_MASK 0x1
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_SEL_MASK 0x2
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_ENABLE_MASK 0x10
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_MODE_MASK 0x20
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_ENABLE_MASK 0x40
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_ENABLE__SHIFT 0x6
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_CLK_DRIVE_EN_MASK 0x80
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_BYTE_DELAY_MASK 0xff00
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_TRANSACTION_DELAY_MASK 0xff0000
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_TIME_LIMIT_MASK 0xff000000
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME_MASK 0xffff
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME__SHIFT 0x0
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID_MASK 0xf00000
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID__SHIFT 0x14
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET_MASK 0x10000000
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET__SHIFT 0x1c
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_OCCURRED_MASK 0x1
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_OCCURRED__SHIFT 0x0
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_INT_MASK 0x2
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_INT__SHIFT 0x1
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_ACK_MASK 0x4
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_ACK__SHIFT 0x2
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_MASK_MASK 0x8
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_MASK__SHIFT 0x3
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_OCCURRED_MASK 0x10
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_OCCURRED__SHIFT 0x4
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_INT_MASK 0x20
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_INT__SHIFT 0x5
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_ACK_MASK 0x40
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_ACK__SHIFT 0x6
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_MASK_MASK 0x80
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_MASK__SHIFT 0x7
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_OCCURRED_MASK 0x100
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_OCCURRED__SHIFT 0x8
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_INT_MASK 0x200
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_INT__SHIFT 0x9
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_ACK_MASK 0x400
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_ACK__SHIFT 0xa
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_MASK_MASK 0x800
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_MASK__SHIFT 0xb
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_OCCURRED_MASK 0x1000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_OCCURRED__SHIFT 0xc
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_INT_MASK 0x2000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_INT__SHIFT 0xd
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_ACK_MASK 0x4000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_ACK__SHIFT 0xe
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_MASK_MASK 0x8000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_MASK__SHIFT 0xf
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_OCCURRED_MASK 0x10000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_OCCURRED__SHIFT 0x10
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_INT_MASK 0x20000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_INT__SHIFT 0x11
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_ACK_MASK 0x40000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_ACK__SHIFT 0x12
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_MASK_MASK 0x80000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_MASK__SHIFT 0x13
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_OCCURRED_MASK 0x100000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_OCCURRED__SHIFT 0x14
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_INT_MASK 0x200000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_INT__SHIFT 0x15
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_ACK_MASK 0x400000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_ACK__SHIFT 0x16
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_MASK_MASK 0x800000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_MASK__SHIFT 0x17
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_OCCURRED_MASK 0x1000000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_OCCURRED__SHIFT 0x18
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_INT_MASK 0x2000000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_INT__SHIFT 0x19
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_ACK_MASK 0x4000000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_ACK__SHIFT 0x1a
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_MASK_MASK 0x8000000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_MASK__SHIFT 0x1b
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_ACK_ENABLE_MASK 0x40000000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_ACK_ENABLE__SHIFT 0x1e
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_INT_TYPE_MASK 0x80000000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_INT_TYPE__SHIFT 0x1f
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_GO_MASK 0x1
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_GO__SHIFT 0x0
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SOFT_RESET_MASK 0x2
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SOFT_RESET__SHIFT 0x1
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SEND_RESET_MASK 0x4
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SEND_RESET__SHIFT 0x2
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_ENABLE_MASK 0x8
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_ENABLE__SHIFT 0x3
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_DBG_REF_SEL_MASK 0x80000000
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_DBG_REF_SEL__SHIFT 0x1f
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_INT_MASK 0x1
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_INT__SHIFT 0x0
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_ACK_MASK 0x2
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_ACK__SHIFT 0x1
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_MASK_MASK 0x4
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_MASK__SHIFT 0x2
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_OCCURRED_MASK 0x100
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_OCCURRED__SHIFT 0x8
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_INT_MASK 0x200
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_INT__SHIFT 0x9
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_ACK_MASK 0x400
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_ACK__SHIFT 0xa
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_MASK_MASK 0x800
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_MASK__SHIFT 0xb
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_INT_TYPE_MASK 0x1000
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_INT_TYPE__SHIFT 0xc
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STATUS_MASK 0xf
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STATUS__SHIFT 0x0
+#define GENERIC_I2C_STATUS__GENERIC_I2C_DONE_MASK 0x10
+#define GENERIC_I2C_STATUS__GENERIC_I2C_DONE__SHIFT 0x4
+#define GENERIC_I2C_STATUS__GENERIC_I2C_ABORTED_MASK 0x20
+#define GENERIC_I2C_STATUS__GENERIC_I2C_ABORTED__SHIFT 0x5
+#define GENERIC_I2C_STATUS__GENERIC_I2C_TIMEOUT_MASK 0x40
+#define GENERIC_I2C_STATUS__GENERIC_I2C_TIMEOUT__SHIFT 0x6
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STOPPED_ON_NACK_MASK 0x200
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STOPPED_ON_NACK__SHIFT 0x9
+#define GENERIC_I2C_STATUS__GENERIC_I2C_NACK_MASK 0x400
+#define GENERIC_I2C_STATUS__GENERIC_I2C_NACK__SHIFT 0xa
+#define GENERIC_I2C_SPEED__GENERIC_I2C_THRESHOLD_MASK 0x3
+#define GENERIC_I2C_SPEED__GENERIC_I2C_THRESHOLD__SHIFT 0x0
+#define GENERIC_I2C_SPEED__GENERIC_I2C_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define GENERIC_I2C_SPEED__GENERIC_I2C_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define GENERIC_I2C_SPEED__GENERIC_I2C_START_STOP_TIMING_CNTL_MASK 0x300
+#define GENERIC_I2C_SPEED__GENERIC_I2C_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define GENERIC_I2C_SPEED__GENERIC_I2C_PRESCALE_MASK 0xffff0000
+#define GENERIC_I2C_SPEED__GENERIC_I2C_PRESCALE__SHIFT 0x10
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_EN_MASK 0x1
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_EN__SHIFT 0x0
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_SEL_MASK 0x2
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_SEL__SHIFT 0x1
+#define GENERIC_I2C_SETUP__GENERIC_I2C_CLK_DRIVE_EN_MASK 0x80
+#define GENERIC_I2C_SETUP__GENERIC_I2C_CLK_DRIVE_EN__SHIFT 0x7
+#define GENERIC_I2C_SETUP__GENERIC_I2C_INTRA_BYTE_DELAY_MASK 0xff00
+#define GENERIC_I2C_SETUP__GENERIC_I2C_INTRA_BYTE_DELAY__SHIFT 0x8
+#define GENERIC_I2C_SETUP__GENERIC_I2C_TIME_LIMIT_MASK 0xff000000
+#define GENERIC_I2C_SETUP__GENERIC_I2C_TIME_LIMIT__SHIFT 0x18
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_RW_MASK 0x1
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_RW__SHIFT 0x0
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP_ON_NACK_MASK 0x100
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP_ON_NACK__SHIFT 0x8
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_ACK_ON_READ_MASK 0x200
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_ACK_ON_READ__SHIFT 0x9
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_START_MASK 0x1000
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_START__SHIFT 0xc
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP_MASK 0x2000
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP__SHIFT 0xd
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_COUNT_MASK 0xf0000
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_COUNT__SHIFT 0x10
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA_RW_MASK 0x1
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA_RW__SHIFT 0x0
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA_MASK 0xff00
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA__SHIFT 0x8
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX_MASK 0xf0000
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX__SHIFT 0x10
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX_WRITE_MASK 0x80000000
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX_WRITE__SHIFT 0x1f
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SCL_PIN_SEL_MASK 0x7f
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SCL_PIN_SEL__SHIFT 0x0
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SDA_PIN_SEL_MASK 0x7f00
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SDA_PIN_SEL__SHIFT 0x8
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_OUTPUT_MASK 0x1
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_OUTPUT__SHIFT 0x0
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_INPUT_MASK 0x2
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_INPUT__SHIFT 0x1
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_EN_MASK 0x4
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_EN__SHIFT 0x2
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_OUTPUT_MASK 0x10
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_OUTPUT__SHIFT 0x4
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_INPUT_MASK 0x20
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_INPUT__SHIFT 0x5
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_EN_MASK 0x40
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_EN__SHIFT 0x6
+#define BLNDV_CONTROL__BLND_GLOBAL_GAIN_MASK 0xff
+#define BLNDV_CONTROL__BLND_GLOBAL_GAIN__SHIFT 0x0
+#define BLNDV_CONTROL__BLND_MODE_MASK 0x300
+#define BLNDV_CONTROL__BLND_MODE__SHIFT 0x8
+#define BLNDV_CONTROL__BLND_STEREO_TYPE_MASK 0xc00
+#define BLNDV_CONTROL__BLND_STEREO_TYPE__SHIFT 0xa
+#define BLNDV_CONTROL__BLND_STEREO_POLARITY_MASK 0x1000
+#define BLNDV_CONTROL__BLND_STEREO_POLARITY__SHIFT 0xc
+#define BLNDV_CONTROL__BLND_FEEDTHROUGH_EN_MASK 0x2000
+#define BLNDV_CONTROL__BLND_FEEDTHROUGH_EN__SHIFT 0xd
+#define BLNDV_CONTROL__BLND_ALPHA_MODE_MASK 0x30000
+#define BLNDV_CONTROL__BLND_ALPHA_MODE__SHIFT 0x10
+#define BLNDV_CONTROL__BLND_ACTIVE_OVERLAP_ONLY_MASK 0x40000
+#define BLNDV_CONTROL__BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x12
+#define BLNDV_CONTROL__BLND_MULTIPLIED_MODE_MASK 0x100000
+#define BLNDV_CONTROL__BLND_MULTIPLIED_MODE__SHIFT 0x14
+#define BLNDV_CONTROL__BLND_GLOBAL_ALPHA_MASK 0xff000000
+#define BLNDV_CONTROL__BLND_GLOBAL_ALPHA__SHIFT 0x18
+#define BLNDV_SM_CONTROL2__SM_MODE_MASK 0x7
+#define BLNDV_SM_CONTROL2__SM_MODE__SHIFT 0x0
+#define BLNDV_SM_CONTROL2__SM_FRAME_ALTERNATE_MASK 0x10
+#define BLNDV_SM_CONTROL2__SM_FRAME_ALTERNATE__SHIFT 0x4
+#define BLNDV_SM_CONTROL2__SM_FIELD_ALTERNATE_MASK 0x20
+#define BLNDV_SM_CONTROL2__SM_FIELD_ALTERNATE__SHIFT 0x5
+#define BLNDV_SM_CONTROL2__SM_FORCE_NEXT_FRAME_POL_MASK 0x300
+#define BLNDV_SM_CONTROL2__SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define BLNDV_SM_CONTROL2__SM_FORCE_NEXT_TOP_POL_MASK 0x30000
+#define BLNDV_SM_CONTROL2__SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define BLNDV_SM_CONTROL2__SM_CURRENT_FRAME_POL_MASK 0x1000000
+#define BLNDV_SM_CONTROL2__SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define BLNDV_CONTROL2__PTI_ENABLE_MASK 0x1
+#define BLNDV_CONTROL2__PTI_ENABLE__SHIFT 0x0
+#define BLNDV_CONTROL2__PTI_NEW_PIXEL_GAP_MASK 0x30
+#define BLNDV_CONTROL2__PTI_NEW_PIXEL_GAP__SHIFT 0x4
+#define BLNDV_CONTROL2__BLND_NEW_PIXEL_MODE_MASK 0x40
+#define BLNDV_CONTROL2__BLND_NEW_PIXEL_MODE__SHIFT 0x6
+#define BLNDV_CONTROL2__BLND_SUPERAA_DEGAMMA_EN_MASK 0x80
+#define BLNDV_CONTROL2__BLND_SUPERAA_DEGAMMA_EN__SHIFT 0x7
+#define BLNDV_CONTROL2__BLND_SUPERAA_REGAMMA_EN_MASK 0x100
+#define BLNDV_CONTROL2__BLND_SUPERAA_REGAMMA_EN__SHIFT 0x8
+#define BLNDV_UPDATE__BLND_UPDATE_PENDING_MASK 0x1
+#define BLNDV_UPDATE__BLND_UPDATE_PENDING__SHIFT 0x0
+#define BLNDV_UPDATE__BLND_UPDATE_TAKEN_MASK 0x100
+#define BLNDV_UPDATE__BLND_UPDATE_TAKEN__SHIFT 0x8
+#define BLNDV_UPDATE__BLND_UPDATE_LOCK_MASK 0x10000
+#define BLNDV_UPDATE__BLND_UPDATE_LOCK__SHIFT 0x10
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_OCCURED_MASK 0x1
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_OCCURED__SHIFT 0x0
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_ACK_MASK 0x100
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_ACK__SHIFT 0x8
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_MASK_MASK 0x1000
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_MASK__SHIFT 0xc
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_PIPE_INDEX_MASK 0x30000
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_PIPE_INDEX__SHIFT 0x10
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_GRPH_V_UPDATE_LOCK_MASK 0x1
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_GRPH_V_UPDATE_LOCK__SHIFT 0x0
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_GRPH_SURF_V_UPDATE_LOCK_MASK 0x2
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_GRPH_SURF_V_UPDATE_LOCK__SHIFT 0x1
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_CUR_V_UPDATE_LOCK_MASK 0x10000
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_CUR_V_UPDATE_LOCK__SHIFT 0x10
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_CUR2_V_UPDATE_LOCK_MASK 0x1000000
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_CUR2_V_UPDATE_LOCK__SHIFT 0x18
+#define BLNDV_V_UPDATE_LOCK__BLND_SCL_V_UPDATE_LOCK_MASK 0x10000000
+#define BLNDV_V_UPDATE_LOCK__BLND_SCL_V_UPDATE_LOCK__SHIFT 0x1c
+#define BLNDV_V_UPDATE_LOCK__BLND_BLND_V_UPDATE_LOCK_MASK 0x20000000
+#define BLNDV_V_UPDATE_LOCK__BLND_BLND_V_UPDATE_LOCK__SHIFT 0x1d
+#define BLNDV_V_UPDATE_LOCK__BLND_V_UPDATE_LOCK_MODE_MASK 0x80000000
+#define BLNDV_V_UPDATE_LOCK__BLND_V_UPDATE_LOCK_MODE__SHIFT 0x1f
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_UPDATE_PENDING_MASK 0x1
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_UPDATE_PENDING__SHIFT 0x0
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_UPDATE_PENDING_MASK 0x2
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_UPDATE_PENDING__SHIFT 0x1
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_SURF_UPDATE_PENDING_MASK 0x4
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_SURF_UPDATE_PENDING__SHIFT 0x2
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_SURF_UPDATE_PENDING_MASK 0x8
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_SURF_UPDATE_PENDING__SHIFT 0x3
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDC_CUR_UPDATE_PENDING_MASK 0x40
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDC_CUR_UPDATE_PENDING__SHIFT 0x6
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDO_CUR_UPDATE_PENDING_MASK 0x80
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDO_CUR_UPDATE_PENDING__SHIFT 0x7
+#define BLNDV_REG_UPDATE_STATUS__SCL_BLNDC_UPDATE_PENDING_MASK 0x100
+#define BLNDV_REG_UPDATE_STATUS__SCL_BLNDC_UPDATE_PENDING__SHIFT 0x8
+#define BLNDV_REG_UPDATE_STATUS__SCL_BLNDO_UPDATE_PENDING_MASK 0x200
+#define BLNDV_REG_UPDATE_STATUS__SCL_BLNDO_UPDATE_PENDING__SHIFT 0x9
+#define BLNDV_REG_UPDATE_STATUS__BLND_BLNDC_UPDATE_PENDING_MASK 0x400
+#define BLNDV_REG_UPDATE_STATUS__BLND_BLNDC_UPDATE_PENDING__SHIFT 0xa
+#define BLNDV_REG_UPDATE_STATUS__BLND_BLNDO_UPDATE_PENDING_MASK 0x800
+#define BLNDV_REG_UPDATE_STATUS__BLND_BLNDO_UPDATE_PENDING__SHIFT 0xb
+#define BLNDV_DEBUG__BLND_CNV_MUX_SELECT_MASK 0x1
+#define BLNDV_DEBUG__BLND_CNV_MUX_SELECT__SHIFT 0x0
+#define BLNDV_DEBUG__BLND_DEBUG_MASK 0xfffffffe
+#define BLNDV_DEBUG__BLND_DEBUG__SHIFT 0x1
+#define BLNDV_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_INDEX_MASK 0xff
+#define BLNDV_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_INDEX__SHIFT 0x0
+#define BLNDV_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define BLNDV_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define BLNDV_TEST_DEBUG_DATA__BLND_TEST_DEBUG_DATA_MASK 0xffffffff
+#define BLNDV_TEST_DEBUG_DATA__BLND_TEST_DEBUG_DATA__SHIFT 0x0
+#define CRTCV_H_TOTAL__CRTC_H_TOTAL_MASK 0x3fff
+#define CRTCV_H_TOTAL__CRTC_H_TOTAL__SHIFT 0x0
+#define CRTCV_H_BLANK_START_END__CRTC_H_BLANK_START_MASK 0x3fff
+#define CRTCV_H_BLANK_START_END__CRTC_H_BLANK_START__SHIFT 0x0
+#define CRTCV_H_BLANK_START_END__CRTC_H_BLANK_END_MASK 0x3fff0000
+#define CRTCV_H_BLANK_START_END__CRTC_H_BLANK_END__SHIFT 0x10
+#define CRTCV_H_SYNC_A__CRTC_H_SYNC_A_START_MASK 0x3fff
+#define CRTCV_H_SYNC_A__CRTC_H_SYNC_A_START__SHIFT 0x0
+#define CRTCV_H_SYNC_A__CRTC_H_SYNC_A_END_MASK 0x3fff0000
+#define CRTCV_H_SYNC_A__CRTC_H_SYNC_A_END__SHIFT 0x10
+#define CRTCV_V_TOTAL__CRTC_V_TOTAL_MASK 0x3fff
+#define CRTCV_V_TOTAL__CRTC_V_TOTAL__SHIFT 0x0
+#define CRTCV_V_BLANK_START_END__CRTC_V_BLANK_START_MASK 0x3fff
+#define CRTCV_V_BLANK_START_END__CRTC_V_BLANK_START__SHIFT 0x0
+#define CRTCV_V_BLANK_START_END__CRTC_V_BLANK_END_MASK 0x3fff0000
+#define CRTCV_V_BLANK_START_END__CRTC_V_BLANK_END__SHIFT 0x10
+#define CRTCV_V_SYNC_A__CRTC_V_SYNC_A_START_MASK 0x3fff
+#define CRTCV_V_SYNC_A__CRTC_V_SYNC_A_START__SHIFT 0x0
+#define CRTCV_V_SYNC_A__CRTC_V_SYNC_A_END_MASK 0x3fff0000
+#define CRTCV_V_SYNC_A__CRTC_V_SYNC_A_END__SHIFT 0x10
+#define CRTCV_CONTROL__CRTC_MASTER_EN_MASK 0x1
+#define CRTCV_CONTROL__CRTC_MASTER_EN__SHIFT 0x0
+#define CRTCV_CONTROL__CRTC_SYNC_RESET_SEL_MASK 0x10
+#define CRTCV_CONTROL__CRTC_SYNC_RESET_SEL__SHIFT 0x4
+#define CRTCV_CONTROL__CRTC_DISABLE_POINT_CNTL_MASK 0x300
+#define CRTCV_CONTROL__CRTC_DISABLE_POINT_CNTL__SHIFT 0x8
+#define CRTCV_CONTROL__CRTC_START_POINT_CNTL_MASK 0x1000
+#define CRTCV_CONTROL__CRTC_START_POINT_CNTL__SHIFT 0xc
+#define CRTCV_CONTROL__CRTC_FIELD_NUMBER_CNTL_MASK 0x2000
+#define CRTCV_CONTROL__CRTC_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define CRTCV_CONTROL__CRTC_FIELD_NUMBER_POLARITY_MASK 0x4000
+#define CRTCV_CONTROL__CRTC_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define CRTCV_CONTROL__CRTC_CURRENT_MASTER_EN_STATE_MASK 0x10000
+#define CRTCV_CONTROL__CRTC_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define CRTCV_CONTROL__CRTC_HBLANK_EARLY_CONTROL_MASK 0x700000
+#define CRTCV_CONTROL__CRTC_HBLANK_EARLY_CONTROL__SHIFT 0x14
+#define CRTCV_CONTROL__CRTC_DISP_READ_REQUEST_DISABLE_MASK 0x1000000
+#define CRTCV_CONTROL__CRTC_DISP_READ_REQUEST_DISABLE__SHIFT 0x18
+#define CRTCV_CONTROL__CRTC_SOF_PULL_EN_MASK 0x20000000
+#define CRTCV_CONTROL__CRTC_SOF_PULL_EN__SHIFT 0x1d
+#define CRTCV_CONTROL__CRTC_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000
+#define CRTCV_CONTROL__CRTC_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e
+#define CRTCV_CONTROL__CRTC_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000
+#define CRTCV_CONTROL__CRTC_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f
+#define CRTCV_START_LINE_CONTROL__CRTC_PROGRESSIVE_START_LINE_EARLY_MASK 0x1
+#define CRTCV_START_LINE_CONTROL__CRTC_PROGRESSIVE_START_LINE_EARLY__SHIFT 0x0
+#define CRTCV_START_LINE_CONTROL__CRTC_INTERLACE_START_LINE_EARLY_MASK 0x2
+#define CRTCV_START_LINE_CONTROL__CRTC_INTERLACE_START_LINE_EARLY__SHIFT 0x1
+#define CRTCV_START_LINE_CONTROL__CRTC_PREFETCH_EN_MASK 0x4
+#define CRTCV_START_LINE_CONTROL__CRTC_PREFETCH_EN__SHIFT 0x2
+#define CRTCV_START_LINE_CONTROL__CRTC_LEGACY_REQUESTOR_EN_MASK 0x100
+#define CRTCV_START_LINE_CONTROL__CRTC_LEGACY_REQUESTOR_EN__SHIFT 0x8
+#define CRTCV_START_LINE_CONTROL__CRTC_ADVANCED_START_LINE_POSITION_MASK 0xff000
+#define CRTCV_START_LINE_CONTROL__CRTC_ADVANCED_START_LINE_POSITION__SHIFT 0xc
+#define CRTCV_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_BLUE_MASK 0x3ff
+#define CRTCV_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_BLUE__SHIFT 0x0
+#define CRTCV_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_GREEN_MASK 0xffc00
+#define CRTCV_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_GREEN__SHIFT 0xa
+#define CRTCV_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_RED_MASK 0x3ff00000
+#define CRTCV_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_RED__SHIFT 0x14
+#define CRTCV_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_BLUE_EXT_MASK 0x3
+#define CRTCV_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_BLUE_EXT__SHIFT 0x0
+#define CRTCV_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_GREEN_EXT_MASK 0x300
+#define CRTCV_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_GREEN_EXT__SHIFT 0x8
+#define CRTCV_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_RED_EXT_MASK 0x30000
+#define CRTCV_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_RED_EXT__SHIFT 0x10
+#define CRTCV_BLACK_COLOR__CRTC_BLACK_COLOR_B_CB_MASK 0x3ff
+#define CRTCV_BLACK_COLOR__CRTC_BLACK_COLOR_B_CB__SHIFT 0x0
+#define CRTCV_BLACK_COLOR__CRTC_BLACK_COLOR_G_Y_MASK 0xffc00
+#define CRTCV_BLACK_COLOR__CRTC_BLACK_COLOR_G_Y__SHIFT 0xa
+#define CRTCV_BLACK_COLOR__CRTC_BLACK_COLOR_R_CR_MASK 0x3ff00000
+#define CRTCV_BLACK_COLOR__CRTC_BLACK_COLOR_R_CR__SHIFT 0x14
+#define CRTCV_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_B_CB_EXT_MASK 0x3
+#define CRTCV_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_B_CB_EXT__SHIFT 0x0
+#define CRTCV_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_G_Y_EXT_MASK 0x300
+#define CRTCV_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_G_Y_EXT__SHIFT 0x8
+#define CRTCV_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_R_CR_EXT_MASK 0x30000
+#define CRTCV_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_R_CR_EXT__SHIFT 0x10
+#define CRTCV_CRC_CNTL__CRTC_CRC_EN_MASK 0x1
+#define CRTCV_CRC_CNTL__CRTC_CRC_EN__SHIFT 0x0
+#define CRTCV_CRC_CNTL__CRTC_CRC_CONT_EN_MASK 0x10
+#define CRTCV_CRC_CNTL__CRTC_CRC_CONT_EN__SHIFT 0x4
+#define CRTCV_CRC_CNTL__CRTC_CRC_STEREO_MODE_MASK 0x300
+#define CRTCV_CRC_CNTL__CRTC_CRC_STEREO_MODE__SHIFT 0x8
+#define CRTCV_CRC_CNTL__CRTC_CRC_INTERLACE_MODE_MASK 0x3000
+#define CRTCV_CRC_CNTL__CRTC_CRC_INTERLACE_MODE__SHIFT 0xc
+#define CRTCV_CRC_CNTL__CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x10000
+#define CRTCV_CRC_CNTL__CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x10
+#define CRTCV_CRC_CNTL__CRTC_CRC0_SELECT_MASK 0x700000
+#define CRTCV_CRC_CNTL__CRTC_CRC0_SELECT__SHIFT 0x14
+#define CRTCV_CRC_CNTL__CRTC_CRC1_SELECT_MASK 0x7000000
+#define CRTCV_CRC_CNTL__CRTC_CRC1_SELECT__SHIFT 0x18
+#define CRTCV_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_START_MASK 0x3fff
+#define CRTCV_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define CRTCV_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_END_MASK 0x3fff0000
+#define CRTCV_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define CRTCV_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_START_MASK 0x3fff
+#define CRTCV_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define CRTCV_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_END_MASK 0x3fff0000
+#define CRTCV_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define CRTCV_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_START_MASK 0x3fff
+#define CRTCV_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define CRTCV_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_END_MASK 0x3fff0000
+#define CRTCV_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define CRTCV_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_START_MASK 0x3fff
+#define CRTCV_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define CRTCV_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_END_MASK 0x3fff0000
+#define CRTCV_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define CRTCV_CRC0_DATA_RG__CRC0_R_CR_MASK 0xffff
+#define CRTCV_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define CRTCV_CRC0_DATA_RG__CRC0_G_Y_MASK 0xffff0000
+#define CRTCV_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define CRTCV_CRC0_DATA_B__CRC0_B_CB_MASK 0xffff
+#define CRTCV_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define CRTCV_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_START_MASK 0x3fff
+#define CRTCV_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define CRTCV_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_END_MASK 0x3fff0000
+#define CRTCV_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define CRTCV_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_START_MASK 0x3fff
+#define CRTCV_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define CRTCV_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_END_MASK 0x3fff0000
+#define CRTCV_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define CRTCV_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_START_MASK 0x3fff
+#define CRTCV_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define CRTCV_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_END_MASK 0x3fff0000
+#define CRTCV_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define CRTCV_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_START_MASK 0x3fff
+#define CRTCV_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define CRTCV_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_END_MASK 0x3fff0000
+#define CRTCV_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define CRTCV_CRC1_DATA_RG__CRC1_R_CR_MASK 0xffff
+#define CRTCV_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define CRTCV_CRC1_DATA_RG__CRC1_G_Y_MASK 0xffff0000
+#define CRTCV_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define CRTCV_CRC1_DATA_B__CRC1_B_CB_MASK 0xffff
+#define CRTCV_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define CRTCV_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_INDEX_MASK 0xff
+#define CRTCV_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CRTCV_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define CRTCV_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CRTCV_TEST_DEBUG_DATA__CRTC_TEST_DEBUG_DATA_MASK 0xffffffff
+#define CRTCV_TEST_DEBUG_DATA__CRTC_TEST_DEBUG_DATA__SHIFT 0x0
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_SWAP_MASK 0x300
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_SWAP__SHIFT 0x8
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_VMID_MASK 0xf000
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_VMID__SHIFT 0xc
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_PRIV_MASK 0x10000
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_PRIV__SHIFT 0x10
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_ARRAY_MODE_MASK 0xf
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_ARRAY_MODE__SHIFT 0x0
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_TILE_SPLIT_MASK 0x70
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_TILE_SPLIT__SHIFT 0x4
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_WIDTH_MASK 0x300
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_WIDTH__SHIFT 0x8
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_HEIGHT_MASK 0xc00
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_HEIGHT__SHIFT 0xa
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_MACRO_TILE_ASPECT_MASK 0x3000
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_MACRO_TILE_ASPECT__SHIFT 0xc
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_NUM_BANKS_MASK 0x300000
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_NUM_BANKS__SHIFT 0x14
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_INTERLEAVE_SIZE_MASK 0x7
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_INTERLEAVE_SIZE__SHIFT 0x0
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_MICRO_TILE_MODE_MASK 0x700000
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_MICRO_TILE_MODE__SHIFT 0x14
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_CONFIG_MASK 0xf8000000
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_CONFIG__SHIFT 0x1b
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_STAT_MASK 0x100
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_STAT__SHIFT 0x8
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_MASK_MASK 0x200
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_MASK__SHIFT 0x9
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_ACK_MASK 0x400
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_ACK__SHIFT 0xa
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_STAT_MASK 0x10000
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_STAT__SHIFT 0x10
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_MASK_MASK 0x20000
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_MASK__SHIFT 0x11
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_ACK_MASK 0x40000
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_ACK__SHIFT 0x12
+#define XDMA_INTERRUPT__XDMA_PERF_MEAS_STAT_MASK 0x100000
+#define XDMA_INTERRUPT__XDMA_PERF_MEAS_STAT__SHIFT 0x14
+#define XDMA_INTERRUPT__XDMA_PERF_MEAS_MASK_MASK 0x200000
+#define XDMA_INTERRUPT__XDMA_PERF_MEAS_MASK__SHIFT 0x15
+#define XDMA_INTERRUPT__XDMA_PERF_MEAS_ACK_MASK 0x400000
+#define XDMA_INTERRUPT__XDMA_PERF_MEAS_ACK__SHIFT 0x16
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_ON_DELAY_MASK 0xf
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_ON_DELAY__SHIFT 0x0
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_OFF_DELAY_MASK 0xff0
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_GATE_DIS_MASK 0x8000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_GATE_DIS__SHIFT 0xf
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_REG_GATE_DIS_MASK 0x10000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_REG_GATE_DIS__SHIFT 0x10
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_0_MASK 0x20000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_0__SHIFT 0x11
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_1_MASK 0x40000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_1__SHIFT 0x12
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_2_MASK 0x80000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_2__SHIFT 0x13
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_3_MASK 0x100000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_3__SHIFT 0x14
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_4_MASK 0x200000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_4__SHIFT 0x15
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_5_MASK 0x400000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_5__SHIFT 0x16
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SDYN_GATE_DIS_MASK 0x800000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SDYN_GATE_DIS__SHIFT 0x17
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MSTAT_GATE_DIS_MASK 0x1000000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MSTAT_GATE_DIS__SHIFT 0x18
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SSTAT_GATE_DIS_MASK 0x2000000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SSTAT_GATE_DIS__SHIFT 0x19
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_CORE_IDLE_STATE_MASK 0x3
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_CORE_IDLE_STATE__SHIFT 0x0
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_IDLE_STATE_MASK 0xc
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_IDLE_STATE__SHIFT 0x2
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_PCIE_STATE_MASK 0x180000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_PCIE_STATE__SHIFT 0x13
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_PCIE_TRANS_MASK 0x200000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_PCIE_TRANS__SHIFT 0x15
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_RD_STATE_MASK 0xc00000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_RD_STATE__SHIFT 0x16
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_RD_TRANS_MASK 0x2000000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_RD_TRANS__SHIFT 0x19
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_WR_STATE_MASK 0xc000000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_WR_STATE__SHIFT 0x1a
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_WR_TRANS_MASK 0x10000000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_WR_TRANS__SHIFT 0x1c
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_BIF_STATE_MASK 0x60000000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_BIF_STATE__SHIFT 0x1d
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_BIF_TRANS_MASK 0x80000000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_BIF_TRANS__SHIFT 0x1f
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_STATUS_MASK 0xf
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_STATUS__SHIFT 0x0
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_CLEAR_MASK 0x100
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_CLEAR__SHIFT 0x8
+#define XDMA_PERF_MEAS_STATUS__XDMA_PERF_MEAS_STATUS_MASK 0xff
+#define XDMA_PERF_MEAS_STATUS__XDMA_PERF_MEAS_STATUS__SHIFT 0x0
+#define XDMA_IF_STATUS__XDMA_MC_PCIEWR_BUSY_MASK 0x1
+#define XDMA_IF_STATUS__XDMA_MC_PCIEWR_BUSY__SHIFT 0x0
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_INDEX_MASK 0xff
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_INDEX__SHIFT 0x0
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define XDMA_TEST_DEBUG_DATA__XDMA_TEST_DEBUG_DATA_MASK 0xffffffff
+#define XDMA_TEST_DEBUG_DATA__XDMA_TEST_DEBUG_DATA__SHIFT 0x0
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_DELAY_MASK 0x7
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_DELAY__SHIFT 0x0
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_TIMEOUT_DIS_MASK 0x8
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_TIMEOUT_DIS__SHIFT 0x3
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_TIMEOUT_DELAY_MASK 0xffff8000
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_TIMEOUT_DELAY__SHIFT 0xf
+#define XDMA_PG_CONTROL__XDMA_PG_CONTROL_MASK 0xffffffff
+#define XDMA_PG_CONTROL__XDMA_PG_CONTROL__SHIFT 0x0
+#define XDMA_PG_WDATA__XDMA_PG_WDATA_MASK 0xffffffff
+#define XDMA_PG_WDATA__XDMA_PG_WDATA__SHIFT 0x0
+#define XDMA_PG_STATUS__XDMA_SERDES_RDATA_MASK 0xffffff
+#define XDMA_PG_STATUS__XDMA_SERDES_RDATA__SHIFT 0x0
+#define XDMA_PG_STATUS__XDMA_PGFSM_READ_READY_MASK 0x1000000
+#define XDMA_PG_STATUS__XDMA_PGFSM_READ_READY__SHIFT 0x18
+#define XDMA_PG_STATUS__XDMA_SERDES_BUSY_MASK 0x2000000
+#define XDMA_PG_STATUS__XDMA_SERDES_BUSY__SHIFT 0x19
+#define XDMA_PG_STATUS__XDMA_SERDES_SMU_POWER_STATUS_MASK 0x4000000
+#define XDMA_PG_STATUS__XDMA_SERDES_SMU_POWER_STATUS__SHIFT 0x1a
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_AON_TEST_DEBUG_INDEX_MASK 0xff
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_AON_TEST_DEBUG_INDEX__SHIFT 0x0
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_AON_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_AON_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_DEBUG_SEL_MASK 0x200
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_DEBUG_SEL__SHIFT 0x9
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_DEBUG_OUT_EN_MASK 0x400
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_DEBUG_OUT_EN__SHIFT 0xa
+#define XDMA_AON_TEST_DEBUG_DATA__XDMA_AON_TEST_DEBUG_DATA_MASK 0xffffffff
+#define XDMA_AON_TEST_DEBUG_DATA__XDMA_AON_TEST_DEBUG_DATA__SHIFT 0x0
+#define XDMA_MSTR_CNTL__XDMA_MSTR_ALPHA_POSITION_MASK 0x3000
+#define XDMA_MSTR_CNTL__XDMA_MSTR_ALPHA_POSITION__SHIFT 0xc
+#define XDMA_MSTR_CNTL__XDMA_MSTR_MEM_READY_MASK 0x4000
+#define XDMA_MSTR_CNTL__XDMA_MSTR_MEM_READY__SHIFT 0xe
+#define XDMA_MSTR_CNTL__XDMA_MSTR_ENABLE_MASK 0x10000
+#define XDMA_MSTR_CNTL__XDMA_MSTR_ENABLE__SHIFT 0x10
+#define XDMA_MSTR_CNTL__XDMA_MSTR_DEBUG_MODE_MASK 0x40000
+#define XDMA_MSTR_CNTL__XDMA_MSTR_DEBUG_MODE__SHIFT 0x12
+#define XDMA_MSTR_CNTL__XDMA_MSTR_SOFT_RESET_MASK 0x100000
+#define XDMA_MSTR_CNTL__XDMA_MSTR_SOFT_RESET__SHIFT 0x14
+#define XDMA_MSTR_CNTL__XDMA_MSTR_BIF_STALL_EN_MASK 0x200000
+#define XDMA_MSTR_CNTL__XDMA_MSTR_BIF_STALL_EN__SHIFT 0x15
+#define XDMA_MSTR_STATUS__XDMA_MSTR_VCOUNT_CURRENT_MASK 0x3fff
+#define XDMA_MSTR_STATUS__XDMA_MSTR_VCOUNT_CURRENT__SHIFT 0x0
+#define XDMA_MSTR_STATUS__XDMA_MSTR_WRITE_LINE_CURRENT_MASK 0xfff0000
+#define XDMA_MSTR_STATUS__XDMA_MSTR_WRITE_LINE_CURRENT__SHIFT 0x10
+#define XDMA_MSTR_STATUS__XDMA_MSTR_STATUS_SELECT_MASK 0x70000000
+#define XDMA_MSTR_STATUS__XDMA_MSTR_STATUS_SELECT__SHIFT 0x1c
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_SWAP_MASK 0x300
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_SWAP__SHIFT 0x8
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_VMID_MASK 0xf000
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_VMID__SHIFT 0xc
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_PRIV_MASK 0x10000
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_PRIV__SHIFT 0x10
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_MASK 0xffffffff
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR__SHIFT 0x0
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH_MASK 0xff
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH__SHIFT 0x0
+#define XDMA_MSTR_LOCAL_SURFACE_PITCH__XDMA_MSTR_LOCAL_SURFACE_PITCH_MASK 0x3fff
+#define XDMA_MSTR_LOCAL_SURFACE_PITCH__XDMA_MSTR_LOCAL_SURFACE_PITCH__SHIFT 0x0
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_CLIENT_STALL_MASK 0x1
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_CLIENT_STALL__SHIFT 0x0
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_URGENT_LEVEL_MASK 0xf00
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_URGENT_LEVEL__SHIFT 0x8
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_STALL_DELAY_MASK 0xf000
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_STALL_DELAY__SHIFT 0xc
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_CLIENT_STALL_MASK 0x1
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_CLIENT_STALL__SHIFT 0x0
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LIMIT_MASK 0xf0
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LIMIT__SHIFT 0x4
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LEVEL_MASK 0xf00
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LEVEL__SHIFT 0x8
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_STALL_DELAY_MASK 0xf000
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_STALL_DELAY__SHIFT 0xc
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_TIMER_MASK 0xffff0000
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_TIMER__SHIFT 0x10
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_TAG_MASK 0x3ff
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_TAG__SHIFT 0x0
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_MASK 0x3000
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK__SHIFT 0xc
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_CLR_MASK 0x10000
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_CLR__SHIFT 0x10
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_TAG_MASK 0x3ff
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_TAG__SHIFT 0x0
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_MASK 0x3000
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK__SHIFT 0xc
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_CLR_MASK 0x10000
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_CLR__SHIFT 0x10
+#define XDMA_MSTR_VSYNC_GSL_CHECK__XDMA_MSTR_VSYNC_GSL_CHECK_SEL_MASK 0x7
+#define XDMA_MSTR_VSYNC_GSL_CHECK__XDMA_MSTR_VSYNC_GSL_CHECK_SEL__SHIFT 0x0
+#define XDMA_MSTR_VSYNC_GSL_CHECK__XDMA_MSTR_VSYNC_GSL_CHECK_V_COUNT_MASK 0x3fff00
+#define XDMA_MSTR_VSYNC_GSL_CHECK__XDMA_MSTR_VSYNC_GSL_CHECK_V_COUNT__SHIFT 0x8
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_CACHE_LINES_MASK 0xff
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_CACHE_LINES__SHIFT 0x0
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_READ_REQUEST_MASK 0x100
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_READ_REQUEST__SHIFT 0x8
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_FRAME_MODE_MASK 0x200
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_FRAME_MODE__SHIFT 0x9
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_SOFT_RESET_MASK 0x400
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_SOFT_RESET__SHIFT 0xa
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_CACHE_INVALIDATE_MASK 0x800
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_CACHE_INVALIDATE__SHIFT 0xb
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_REQUEST_CHANNEL_ID_MASK 0x7000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_REQUEST_CHANNEL_ID__SHIFT 0xc
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_FLIP_MODE_MASK 0x8000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_FLIP_MODE__SHIFT 0xf
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_REQUEST_MIN_MASK 0xff0000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_REQUEST_MIN__SHIFT 0x10
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_ACTIVE_MASK 0x1000000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_ACTIVE__SHIFT 0x18
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_FLUSHING_MASK 0x2000000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_FLUSHING__SHIFT 0x19
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_FLIP_PENDING_MASK 0x4000000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_FLIP_PENDING__SHIFT 0x1a
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_VSYNC_GSL_ENABLE_MASK 0x8000000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_VSYNC_GSL_ENABLE__SHIFT 0x1b
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_SUPERAA_ENABLE_MASK 0x10000000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_SUPERAA_ENABLE__SHIFT 0x1c
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_HSYNC_GSL_GROUP_MASK 0x60000000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_HSYNC_GSL_GROUP__SHIFT 0x1d
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_GSL_GROUP_MASTER_MASK 0x80000000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_GSL_GROUP_MASTER__SHIFT 0x1f
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_SIZE_MASK 0x3fff
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_SIZE__SHIFT 0x0
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_PREFETCH_MASK 0x3fff0000
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_PREFETCH__SHIFT 0x10
+#define XDMA_MSTR_CHANNEL_DIM__XDMA_MSTR_CHANNEL_WIDTH_MASK 0x3fff
+#define XDMA_MSTR_CHANNEL_DIM__XDMA_MSTR_CHANNEL_WIDTH__SHIFT 0x0
+#define XDMA_MSTR_CHANNEL_DIM__XDMA_MSTR_CHANNEL_HEIGHT_MASK 0x3fff0000
+#define XDMA_MSTR_CHANNEL_DIM__XDMA_MSTR_CHANNEL_HEIGHT__SHIFT 0x10
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_ACTIVE_HEIGHT_MASK 0x3fff
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_ACTIVE_HEIGHT__SHIFT 0x0
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_FRAME_HEIGHT_MASK 0x3fff0000
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_FRAME_HEIGHT__SHIFT 0x10
+#define XDMA_MSTR_REMOTE_SURFACE_BASE__XDMA_MSTR_REMOTE_SURFACE_BASE_MASK 0xffffffff
+#define XDMA_MSTR_REMOTE_SURFACE_BASE__XDMA_MSTR_REMOTE_SURFACE_BASE__SHIFT 0x0
+#define XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH__XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH_MASK 0xff
+#define XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH__XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH__SHIFT 0x0
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS__XDMA_MSTR_REMOTE_GPU_ADDRESS_MASK 0xffffffff
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS__XDMA_MSTR_REMOTE_GPU_ADDRESS__SHIFT 0x0
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH__XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH_MASK 0xff
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH__XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH__SHIFT 0x0
+#define XDMA_MSTR_CACHE_BASE_ADDR__XDMA_MSTR_CACHE_BASE_ADDR_MASK 0xffffffff
+#define XDMA_MSTR_CACHE_BASE_ADDR__XDMA_MSTR_CACHE_BASE_ADDR__SHIFT 0x0
+#define XDMA_MSTR_CACHE_BASE_ADDR_HIGH__XDMA_MSTR_CACHE_BASE_ADDR_HIGH_MASK 0xff
+#define XDMA_MSTR_CACHE_BASE_ADDR_HIGH__XDMA_MSTR_CACHE_BASE_ADDR_HIGH__SHIFT 0x0
+#define XDMA_MSTR_CACHE__XDMA_MSTR_CACHE_PITCH_MASK 0x3fff
+#define XDMA_MSTR_CACHE__XDMA_MSTR_CACHE_PITCH__SHIFT 0x0
+#define XDMA_MSTR_CACHE__XDMA_MSTR_CACHE_TLB_PG_STATE_MASK 0x60000000
+#define XDMA_MSTR_CACHE__XDMA_MSTR_CACHE_TLB_PG_STATE__SHIFT 0x1d
+#define XDMA_MSTR_CACHE__XDMA_MSTR_CACHE_TLB_PG_TRANS_MASK 0x80000000
+#define XDMA_MSTR_CACHE__XDMA_MSTR_CACHE_TLB_PG_TRANS__SHIFT 0x1f
+#define XDMA_MSTR_CHANNEL_START__XDMA_MSTR_CHANNEL_START_X_MASK 0x3fff
+#define XDMA_MSTR_CHANNEL_START__XDMA_MSTR_CHANNEL_START_X__SHIFT 0x0
+#define XDMA_MSTR_CHANNEL_START__XDMA_MSTR_CHANNEL_START_Y_MASK 0x3fff0000
+#define XDMA_MSTR_CHANNEL_START__XDMA_MSTR_CHANNEL_START_Y__SHIFT 0x10
+#define XDMA_MSTR_PERFMEAS_STATUS__XDMA_MSTR_PERFMEAS_DATA_MASK 0xffffff
+#define XDMA_MSTR_PERFMEAS_STATUS__XDMA_MSTR_PERFMEAS_DATA__SHIFT 0x0
+#define XDMA_MSTR_PERFMEAS_STATUS__XDMA_MSTR_PERFMEAS_INDEX_MASK 0x7000000
+#define XDMA_MSTR_PERFMEAS_STATUS__XDMA_MSTR_PERFMEAS_INDEX__SHIFT 0x18
+#define XDMA_MSTR_PERFMEAS_STATUS__XDMA_MSTR_PERFMEAS_INDEX_MODE_MASK 0xc0000000
+#define XDMA_MSTR_PERFMEAS_STATUS__XDMA_MSTR_PERFMEAS_INDEX_MODE__SHIFT 0x1e
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_CACHE_BW_MEAS_ITER_MASK 0xfff
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_CACHE_BW_MEAS_ITER__SHIFT 0x0
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_CACHE_BW_SEGID_SEL_MASK 0x1f000
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_CACHE_BW_SEGID_SEL__SHIFT 0xc
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_CACHE_BW_COUNTER_RST_MASK 0x20000
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_CACHE_BW_COUNTER_RST__SHIFT 0x11
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_LT_MEAS_ITER_MASK 0x7ff80000
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_LT_MEAS_ITER__SHIFT 0x13
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_LT_COUNTER_RST_MASK 0x80000000
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_LT_COUNTER_RST__SHIFT 0x1f
+#define XDMA_SLV_CNTL__XDMA_SLV_READ_LINES_MASK 0x1
+#define XDMA_SLV_CNTL__XDMA_SLV_READ_LINES__SHIFT 0x0
+#define XDMA_SLV_CNTL__XDMA_SLV_MEM_READY_MASK 0x200
+#define XDMA_SLV_CNTL__XDMA_SLV_MEM_READY__SHIFT 0x9
+#define XDMA_SLV_CNTL__XDMA_SLV_ACTIVE_MASK 0x400
+#define XDMA_SLV_CNTL__XDMA_SLV_ACTIVE__SHIFT 0xa
+#define XDMA_SLV_CNTL__XDMA_SLV_ALPHA_POSITION_MASK 0x3000
+#define XDMA_SLV_CNTL__XDMA_SLV_ALPHA_POSITION__SHIFT 0xc
+#define XDMA_SLV_CNTL__XDMA_SLV_ENABLE_MASK 0x10000
+#define XDMA_SLV_CNTL__XDMA_SLV_ENABLE__SHIFT 0x10
+#define XDMA_SLV_CNTL__XDMA_SLV_READ_LAT_TEST_EN_MASK 0x80000
+#define XDMA_SLV_CNTL__XDMA_SLV_READ_LAT_TEST_EN__SHIFT 0x13
+#define XDMA_SLV_CNTL__XDMA_SLV_SOFT_RESET_MASK 0x100000
+#define XDMA_SLV_CNTL__XDMA_SLV_SOFT_RESET__SHIFT 0x14
+#define XDMA_SLV_CNTL__XDMA_SLV_REQ_MAXED_OUT_MASK 0x1000000
+#define XDMA_SLV_CNTL__XDMA_SLV_REQ_MAXED_OUT__SHIFT 0x18
+#define XDMA_SLV_CNTL__XDMA_SLV_WB_BURST_RESET_MASK 0x2000000
+#define XDMA_SLV_CNTL__XDMA_SLV_WB_BURST_RESET__SHIFT 0x19
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_SWAP_MASK 0x300
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_SWAP__SHIFT 0x8
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_VMID_MASK 0xf000
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_VMID__SHIFT 0xc
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_PRIV_MASK 0x10000
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_PRIV__SHIFT 0x10
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_PITCH_MASK 0x3fff
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_PITCH__SHIFT 0x0
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_WIDTH_MASK 0x3fff0000
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_WIDTH__SHIFT 0x10
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_CLIENT_STALL_MASK 0x1
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_CLIENT_STALL__SHIFT 0x0
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LIMIT_MASK 0xf0
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LIMIT__SHIFT 0x4
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LEVEL_MASK 0xf00
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LEVEL__SHIFT 0x8
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_STALL_DELAY_MASK 0xf000
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_STALL_DELAY__SHIFT 0xc
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_TIMER_MASK 0xffff0000
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_TIMER__SHIFT 0x10
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL_MASK 0x1
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL__SHIFT 0x0
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_URGENT_LEVEL_MASK 0xf00
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_URGENT_LEVEL__SHIFT 0x8
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL_DELAY_MASK 0xf000
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL_DELAY__SHIFT 0xc
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_SIZE_MASK 0x1ff
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_SIZE__SHIFT 0x0
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_PERIOD_MASK 0xffff0000
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_PERIOD__SHIFT 0x10
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MIN_MASK 0xffff
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MIN__SHIFT 0x0
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MAX_MASK 0xffff0000
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MAX__SHIFT 0x10
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_ACC_MASK 0xfffff
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_ACC__SHIFT 0x0
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_COUNT_MASK 0xfff00000
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_COUNT__SHIFT 0x14
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_TAG_MASK 0x3ff
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_TAG__SHIFT 0x0
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_MASK 0x3000
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK__SHIFT 0xc
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_CLR_MASK 0x10000
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_CLR__SHIFT 0x10
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_TAG_MASK 0xffff
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_TAG__SHIFT 0x0
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_MASK 0x30000
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK__SHIFT 0x10
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_CLR_MASK 0x80000000
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_CLR__SHIFT 0x1f
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_FREE_ENTRIES_MASK 0x3ff
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_FREE_ENTRIES__SHIFT 0x0
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_BUF_SIZE_MASK 0x3ff000
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_BUF_SIZE__SHIFT 0xc
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_PG_STATE_MASK 0xc00000
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_PG_STATE__SHIFT 0x16
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_PG_TRANS_MASK 0x1000000
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_PG_TRANS__SHIFT 0x18
+#define XDMA_SLV_READ_LATENCY_TIMER__XDMA_SLV_READ_LATENCY_TIMER_MASK 0xffff
+#define XDMA_SLV_READ_LATENCY_TIMER__XDMA_SLV_READ_LATENCY_TIMER__SHIFT 0x0
+#define XDMA_SLV_FLIP_PENDING__XDMA_SLV_FLIP_PENDING_MASK 0x1
+#define XDMA_SLV_FLIP_PENDING__XDMA_SLV_FLIP_PENDING__SHIFT 0x0
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_CHANNEL_WEIGHT_MASK 0x1ff
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_CHANNEL_WEIGHT__SHIFT 0x0
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_STOP_TRANSFER_MASK 0x10000
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_STOP_TRANSFER__SHIFT 0x10
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_CHANNEL_SOFT_RESET_MASK 0x20000
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_CHANNEL_SOFT_RESET__SHIFT 0x11
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_CHANNEL_ACTIVE_MASK 0x1000000
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_CHANNEL_ACTIVE__SHIFT 0x18
+#define XDMA_SLV_REMOTE_GPU_ADDRESS__XDMA_SLV_REMOTE_GPU_ADDRESS_MASK 0xffffffff
+#define XDMA_SLV_REMOTE_GPU_ADDRESS__XDMA_SLV_REMOTE_GPU_ADDRESS__SHIFT 0x0
+#define XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH__XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH_MASK 0xff
+#define XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH__XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH__SHIFT 0x0
+#define CMD_BUS_TX_CONTROL_LANE0__tx_pwr_MASK 0x7
+#define CMD_BUS_TX_CONTROL_LANE0__tx_pwr__SHIFT 0x0
+#define CMD_BUS_TX_CONTROL_LANE0__tx_pg_en_MASK 0x18
+#define CMD_BUS_TX_CONTROL_LANE0__tx_pg_en__SHIFT 0x3
+#define CMD_BUS_TX_CONTROL_LANE0__tx_rdy_MASK 0x100
+#define CMD_BUS_TX_CONTROL_LANE0__tx_rdy__SHIFT 0x8
+#define CMD_BUS_TX_CONTROL_LANE1__tx_pwr_MASK 0x7
+#define CMD_BUS_TX_CONTROL_LANE1__tx_pwr__SHIFT 0x0
+#define CMD_BUS_TX_CONTROL_LANE1__tx_pg_en_MASK 0x18
+#define CMD_BUS_TX_CONTROL_LANE1__tx_pg_en__SHIFT 0x3
+#define CMD_BUS_TX_CONTROL_LANE1__tx_rdy_MASK 0x100
+#define CMD_BUS_TX_CONTROL_LANE1__tx_rdy__SHIFT 0x8
+#define CMD_BUS_TX_CONTROL_LANE2__tx_pwr_MASK 0x7
+#define CMD_BUS_TX_CONTROL_LANE2__tx_pwr__SHIFT 0x0
+#define CMD_BUS_TX_CONTROL_LANE2__tx_pg_en_MASK 0x18
+#define CMD_BUS_TX_CONTROL_LANE2__tx_pg_en__SHIFT 0x3
+#define CMD_BUS_TX_CONTROL_LANE2__tx_rdy_MASK 0x100
+#define CMD_BUS_TX_CONTROL_LANE2__tx_rdy__SHIFT 0x8
+#define CMD_BUS_TX_CONTROL_LANE3__tx_pwr_MASK 0x7
+#define CMD_BUS_TX_CONTROL_LANE3__tx_pwr__SHIFT 0x0
+#define CMD_BUS_TX_CONTROL_LANE3__tx_pg_en_MASK 0x18
+#define CMD_BUS_TX_CONTROL_LANE3__tx_pg_en__SHIFT 0x3
+#define CMD_BUS_TX_CONTROL_LANE3__tx_rdy_MASK 0x100
+#define CMD_BUS_TX_CONTROL_LANE3__tx_rdy__SHIFT 0x8
+#define MARGIN_DEEMPH_LANE0__txmarg_sel_MASK 0x7
+#define MARGIN_DEEMPH_LANE0__txmarg_sel__SHIFT 0x0
+#define MARGIN_DEEMPH_LANE0__deemph_sel_MASK 0x18
+#define MARGIN_DEEMPH_LANE0__deemph_sel__SHIFT 0x3
+#define MARGIN_DEEMPH_LANE0__tx_margin_en_MASK 0x20
+#define MARGIN_DEEMPH_LANE0__tx_margin_en__SHIFT 0x5
+#define MARGIN_DEEMPH_LANE1__txmarg_sel_MASK 0x7
+#define MARGIN_DEEMPH_LANE1__txmarg_sel__SHIFT 0x0
+#define MARGIN_DEEMPH_LANE1__deemph_sel_MASK 0x18
+#define MARGIN_DEEMPH_LANE1__deemph_sel__SHIFT 0x3
+#define MARGIN_DEEMPH_LANE1__tx_margin_en_MASK 0x20
+#define MARGIN_DEEMPH_LANE1__tx_margin_en__SHIFT 0x5
+#define MARGIN_DEEMPH_LANE2__txmarg_sel_MASK 0x7
+#define MARGIN_DEEMPH_LANE2__txmarg_sel__SHIFT 0x0
+#define MARGIN_DEEMPH_LANE2__deemph_sel_MASK 0x18
+#define MARGIN_DEEMPH_LANE2__deemph_sel__SHIFT 0x3
+#define MARGIN_DEEMPH_LANE2__tx_margin_en_MASK 0x20
+#define MARGIN_DEEMPH_LANE2__tx_margin_en__SHIFT 0x5
+#define MARGIN_DEEMPH_LANE3__txmarg_sel_MASK 0x7
+#define MARGIN_DEEMPH_LANE3__txmarg_sel__SHIFT 0x0
+#define MARGIN_DEEMPH_LANE3__deemph_sel_MASK 0x18
+#define MARGIN_DEEMPH_LANE3__deemph_sel__SHIFT 0x3
+#define MARGIN_DEEMPH_LANE3__tx_margin_en_MASK 0x20
+#define MARGIN_DEEMPH_LANE3__tx_margin_en__SHIFT 0x5
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__twosym_en_MASK 0x6
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__twosym_en__SHIFT 0x1
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__link_speed_MASK 0x18
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__link_speed__SHIFT 0x3
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__gang_mode_MASK 0xe0
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__gang_mode__SHIFT 0x5
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__max_linkrate_MASK 0x300
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__max_linkrate__SHIFT 0x8
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pcs_freq_MASK 0xc00
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pcs_freq__SHIFT 0xa
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pcs_clken_MASK 0x1000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pcs_clken__SHIFT 0xc
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pcs_clkdone_MASK 0x2000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pcs_clkdone__SHIFT 0xd
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pll1_always_on_MASK 0x4000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pll1_always_on__SHIFT 0xe
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__rdclk_div2_en_MASK 0x8000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__rdclk_div2_en__SHIFT 0xf
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__tx_boost_adj_MASK 0xf0000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__tx_boost_adj__SHIFT 0x10
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__tx_boost_en_MASK 0x100000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__tx_boost_en__SHIFT 0x14
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__tx_binary_ron_code_offset_MASK 0xc00000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__tx_binary_ron_code_offset__SHIFT 0x16
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__twosym_en_MASK 0x6
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__twosym_en__SHIFT 0x1
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__link_speed_MASK 0x18
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__link_speed__SHIFT 0x3
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__gang_mode_MASK 0xe0
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__gang_mode__SHIFT 0x5
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__max_linkrate_MASK 0x300
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__max_linkrate__SHIFT 0x8
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pcs_freq_MASK 0xc00
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pcs_freq__SHIFT 0xa
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pcs_clken_MASK 0x1000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pcs_clken__SHIFT 0xc
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pcs_clkdone_MASK 0x2000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pcs_clkdone__SHIFT 0xd
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pll1_always_on_MASK 0x4000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pll1_always_on__SHIFT 0xe
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__rdclk_div2_en_MASK 0x8000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__rdclk_div2_en__SHIFT 0xf
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__tx_boost_adj_MASK 0xf0000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__tx_boost_adj__SHIFT 0x10
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__tx_boost_en_MASK 0x100000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__tx_boost_en__SHIFT 0x14
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__tx_binary_ron_code_offset_MASK 0xc00000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__tx_binary_ron_code_offset__SHIFT 0x16
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__twosym_en_MASK 0x6
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__twosym_en__SHIFT 0x1
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__link_speed_MASK 0x18
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__link_speed__SHIFT 0x3
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__gang_mode_MASK 0xe0
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__gang_mode__SHIFT 0x5
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__max_linkrate_MASK 0x300
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__max_linkrate__SHIFT 0x8
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pcs_freq_MASK 0xc00
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pcs_freq__SHIFT 0xa
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pcs_clken_MASK 0x1000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pcs_clken__SHIFT 0xc
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pcs_clkdone_MASK 0x2000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pcs_clkdone__SHIFT 0xd
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pll1_always_on_MASK 0x4000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pll1_always_on__SHIFT 0xe
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__rdclk_div2_en_MASK 0x8000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__rdclk_div2_en__SHIFT 0xf
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__tx_boost_adj_MASK 0xf0000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__tx_boost_adj__SHIFT 0x10
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__tx_boost_en_MASK 0x100000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__tx_boost_en__SHIFT 0x14
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__tx_binary_ron_code_offset_MASK 0xc00000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__tx_binary_ron_code_offset__SHIFT 0x16
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__twosym_en_MASK 0x6
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__twosym_en__SHIFT 0x1
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__link_speed_MASK 0x18
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__link_speed__SHIFT 0x3
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__gang_mode_MASK 0xe0
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__gang_mode__SHIFT 0x5
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__max_linkrate_MASK 0x300
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__max_linkrate__SHIFT 0x8
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pcs_freq_MASK 0xc00
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pcs_freq__SHIFT 0xa
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pcs_clken_MASK 0x1000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pcs_clken__SHIFT 0xc
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pcs_clkdone_MASK 0x2000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pcs_clkdone__SHIFT 0xd
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pll1_always_on_MASK 0x4000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pll1_always_on__SHIFT 0xe
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__rdclk_div2_en_MASK 0x8000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__rdclk_div2_en__SHIFT 0xf
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__tx_boost_adj_MASK 0xf0000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__tx_boost_adj__SHIFT 0x10
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__tx_boost_en_MASK 0x100000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__tx_boost_en__SHIFT 0x14
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__tx_binary_ron_code_offset_MASK 0xc00000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__tx_binary_ron_code_offset__SHIFT 0x16
+#define TX_DISP_RFU0_LANE0__rfu_value0_MASK 0xffffffff
+#define TX_DISP_RFU0_LANE0__rfu_value0__SHIFT 0x0
+#define TX_DISP_RFU0_LANE1__rfu_value0_MASK 0xffffffff
+#define TX_DISP_RFU0_LANE1__rfu_value0__SHIFT 0x0
+#define TX_DISP_RFU0_LANE2__rfu_value0_MASK 0xffffffff
+#define TX_DISP_RFU0_LANE2__rfu_value0__SHIFT 0x0
+#define TX_DISP_RFU0_LANE3__rfu_value0_MASK 0xffffffff
+#define TX_DISP_RFU0_LANE3__rfu_value0__SHIFT 0x0
+#define TX_DISP_RFU1_LANE0__rfu_value1_MASK 0xffffffff
+#define TX_DISP_RFU1_LANE0__rfu_value1__SHIFT 0x0
+#define TX_DISP_RFU1_LANE1__rfu_value1_MASK 0xffffffff
+#define TX_DISP_RFU1_LANE1__rfu_value1__SHIFT 0x0
+#define TX_DISP_RFU1_LANE2__rfu_value1_MASK 0xffffffff
+#define TX_DISP_RFU1_LANE2__rfu_value1__SHIFT 0x0
+#define TX_DISP_RFU1_LANE3__rfu_value1_MASK 0xffffffff
+#define TX_DISP_RFU1_LANE3__rfu_value1__SHIFT 0x0
+#define TX_DISP_RFU2_LANE0__rfu_value2_MASK 0xffffffff
+#define TX_DISP_RFU2_LANE0__rfu_value2__SHIFT 0x0
+#define TX_DISP_RFU2_LANE1__rfu_value2_MASK 0xffffffff
+#define TX_DISP_RFU2_LANE1__rfu_value2__SHIFT 0x0
+#define TX_DISP_RFU2_LANE2__rfu_value2_MASK 0xffffffff
+#define TX_DISP_RFU2_LANE2__rfu_value2__SHIFT 0x0
+#define TX_DISP_RFU2_LANE3__rfu_value2_MASK 0xffffffff
+#define TX_DISP_RFU2_LANE3__rfu_value2__SHIFT 0x0
+#define TX_DISP_RFU3_LANE0__rfu_value3_MASK 0xffffffff
+#define TX_DISP_RFU3_LANE0__rfu_value3__SHIFT 0x0
+#define TX_DISP_RFU3_LANE1__rfu_value3_MASK 0xffffffff
+#define TX_DISP_RFU3_LANE1__rfu_value3__SHIFT 0x0
+#define TX_DISP_RFU3_LANE2__rfu_value3_MASK 0xffffffff
+#define TX_DISP_RFU3_LANE2__rfu_value3__SHIFT 0x0
+#define TX_DISP_RFU3_LANE3__rfu_value3_MASK 0xffffffff
+#define TX_DISP_RFU3_LANE3__rfu_value3__SHIFT 0x0
+#define TX_DISP_RFU4_LANE0__rfu_value4_MASK 0xffffffff
+#define TX_DISP_RFU4_LANE0__rfu_value4__SHIFT 0x0
+#define TX_DISP_RFU4_LANE1__rfu_value4_MASK 0xffffffff
+#define TX_DISP_RFU4_LANE1__rfu_value4__SHIFT 0x0
+#define TX_DISP_RFU4_LANE2__rfu_value4_MASK 0xffffffff
+#define TX_DISP_RFU4_LANE2__rfu_value4__SHIFT 0x0
+#define TX_DISP_RFU4_LANE3__rfu_value4_MASK 0xffffffff
+#define TX_DISP_RFU4_LANE3__rfu_value4__SHIFT 0x0
+#define TX_DISP_RFU5_LANE0__rfu_value5_MASK 0xffffffff
+#define TX_DISP_RFU5_LANE0__rfu_value5__SHIFT 0x0
+#define TX_DISP_RFU5_LANE1__rfu_value5_MASK 0xffffffff
+#define TX_DISP_RFU5_LANE1__rfu_value5__SHIFT 0x0
+#define TX_DISP_RFU5_LANE2__rfu_value5_MASK 0xffffffff
+#define TX_DISP_RFU5_LANE2__rfu_value5__SHIFT 0x0
+#define TX_DISP_RFU5_LANE3__rfu_value5_MASK 0xffffffff
+#define TX_DISP_RFU5_LANE3__rfu_value5__SHIFT 0x0
+#define TX_DISP_RFU6_LANE0__rfu_value6_MASK 0xffffffff
+#define TX_DISP_RFU6_LANE0__rfu_value6__SHIFT 0x0
+#define TX_DISP_RFU6_LANE1__rfu_value6_MASK 0xffffffff
+#define TX_DISP_RFU6_LANE1__rfu_value6__SHIFT 0x0
+#define TX_DISP_RFU6_LANE2__rfu_value6_MASK 0xffffffff
+#define TX_DISP_RFU6_LANE2__rfu_value6__SHIFT 0x0
+#define TX_DISP_RFU6_LANE3__rfu_value6_MASK 0xffffffff
+#define TX_DISP_RFU6_LANE3__rfu_value6__SHIFT 0x0
+#define TX_DISP_RFU7_LANE0__rfu_value7_MASK 0xffffffff
+#define TX_DISP_RFU7_LANE0__rfu_value7__SHIFT 0x0
+#define TX_DISP_RFU7_LANE1__rfu_value7_MASK 0xffffffff
+#define TX_DISP_RFU7_LANE1__rfu_value7__SHIFT 0x0
+#define TX_DISP_RFU7_LANE2__rfu_value7_MASK 0xffffffff
+#define TX_DISP_RFU7_LANE2__rfu_value7__SHIFT 0x0
+#define TX_DISP_RFU7_LANE3__rfu_value7_MASK 0xffffffff
+#define TX_DISP_RFU7_LANE3__rfu_value7__SHIFT 0x0
+#define TX_DISP_RFU8_LANE0__rfu_value8_MASK 0xffffffff
+#define TX_DISP_RFU8_LANE0__rfu_value8__SHIFT 0x0
+#define TX_DISP_RFU8_LANE1__rfu_value8_MASK 0xffffffff
+#define TX_DISP_RFU8_LANE1__rfu_value8__SHIFT 0x0
+#define TX_DISP_RFU8_LANE2__rfu_value8_MASK 0xffffffff
+#define TX_DISP_RFU8_LANE2__rfu_value8__SHIFT 0x0
+#define TX_DISP_RFU8_LANE3__rfu_value8_MASK 0xffffffff
+#define TX_DISP_RFU8_LANE3__rfu_value8__SHIFT 0x0
+#define TX_DISP_RFU9_LANE0__rfu_value9_MASK 0xffffffff
+#define TX_DISP_RFU9_LANE0__rfu_value9__SHIFT 0x0
+#define TX_DISP_RFU9_LANE1__rfu_value9_MASK 0xffffffff
+#define TX_DISP_RFU9_LANE1__rfu_value9__SHIFT 0x0
+#define TX_DISP_RFU9_LANE2__rfu_value9_MASK 0xffffffff
+#define TX_DISP_RFU9_LANE2__rfu_value9__SHIFT 0x0
+#define TX_DISP_RFU9_LANE3__rfu_value9_MASK 0xffffffff
+#define TX_DISP_RFU9_LANE3__rfu_value9__SHIFT 0x0
+#define TX_DISP_RFU10_LANE0__rfu_value10_MASK 0xffffffff
+#define TX_DISP_RFU10_LANE0__rfu_value10__SHIFT 0x0
+#define TX_DISP_RFU10_LANE1__rfu_value10_MASK 0xffffffff
+#define TX_DISP_RFU10_LANE1__rfu_value10__SHIFT 0x0
+#define TX_DISP_RFU10_LANE2__rfu_value10_MASK 0xffffffff
+#define TX_DISP_RFU10_LANE2__rfu_value10__SHIFT 0x0
+#define TX_DISP_RFU10_LANE3__rfu_value10_MASK 0xffffffff
+#define TX_DISP_RFU10_LANE3__rfu_value10__SHIFT 0x0
+#define TX_DISP_RFU11_LANE0__rfu_value11_MASK 0xffffffff
+#define TX_DISP_RFU11_LANE0__rfu_value11__SHIFT 0x0
+#define TX_DISP_RFU11_LANE1__rfu_value11_MASK 0xffffffff
+#define TX_DISP_RFU11_LANE1__rfu_value11__SHIFT 0x0
+#define TX_DISP_RFU11_LANE2__rfu_value11_MASK 0xffffffff
+#define TX_DISP_RFU11_LANE2__rfu_value11__SHIFT 0x0
+#define TX_DISP_RFU11_LANE3__rfu_value11_MASK 0xffffffff
+#define TX_DISP_RFU11_LANE3__rfu_value11__SHIFT 0x0
+#define TX_DISP_RFU12_LANE0__rfu_value12_MASK 0xffffffff
+#define TX_DISP_RFU12_LANE0__rfu_value12__SHIFT 0x0
+#define TX_DISP_RFU12_LANE1__rfu_value12_MASK 0xffffffff
+#define TX_DISP_RFU12_LANE1__rfu_value12__SHIFT 0x0
+#define TX_DISP_RFU12_LANE2__rfu_value12_MASK 0xffffffff
+#define TX_DISP_RFU12_LANE2__rfu_value12__SHIFT 0x0
+#define TX_DISP_RFU12_LANE3__rfu_value12_MASK 0xffffffff
+#define TX_DISP_RFU12_LANE3__rfu_value12__SHIFT 0x0
+#define COMMON_MAR_DEEMPH_NOM__tx_margin_nom_MASK 0xff
+#define COMMON_MAR_DEEMPH_NOM__tx_margin_nom__SHIFT 0x0
+#define COMMON_MAR_DEEMPH_NOM__deemph_gen1_nom_MASK 0xff00
+#define COMMON_MAR_DEEMPH_NOM__deemph_gen1_nom__SHIFT 0x8
+#define COMMON_MAR_DEEMPH_NOM__deemph35_gen2_nom_MASK 0xff0000
+#define COMMON_MAR_DEEMPH_NOM__deemph35_gen2_nom__SHIFT 0x10
+#define COMMON_MAR_DEEMPH_NOM__deemph60_gen2_nom_MASK 0xff000000
+#define COMMON_MAR_DEEMPH_NOM__deemph60_gen2_nom__SHIFT 0x18
+#define COMMON_LANE_PWRMGMT__pgdelay_MASK 0xf
+#define COMMON_LANE_PWRMGMT__pgdelay__SHIFT 0x0
+#define COMMON_LANE_PWRMGMT__pgmask_MASK 0x3f0
+#define COMMON_LANE_PWRMGMT__pgmask__SHIFT 0x4
+#define COMMON_LANE_PWRMGMT__vprot_en_MASK 0x800
+#define COMMON_LANE_PWRMGMT__vprot_en__SHIFT 0xb
+#define COMMON_TXCNTRL__rdptr_rst_val_gen3_MASK 0x1f
+#define COMMON_TXCNTRL__rdptr_rst_val_gen3__SHIFT 0x0
+#define COMMON_TXCNTRL__clkgate_dis_MASK 0x20
+#define COMMON_TXCNTRL__clkgate_dis__SHIFT 0x5
+#define COMMON_TXCNTRL__slew_rate_ctl_gen1_MASK 0x1c0
+#define COMMON_TXCNTRL__slew_rate_ctl_gen1__SHIFT 0x6
+#define COMMON_TXCNTRL__slew_rate_ctl_gen2_MASK 0xe00
+#define COMMON_TXCNTRL__slew_rate_ctl_gen2__SHIFT 0x9
+#define COMMON_TXCNTRL__slew_rate_ctl_gen3_MASK 0x7000
+#define COMMON_TXCNTRL__slew_rate_ctl_gen3__SHIFT 0xc
+#define COMMON_TXCNTRL__dual_dvi_mstr_en_MASK 0x8000
+#define COMMON_TXCNTRL__dual_dvi_mstr_en__SHIFT 0xf
+#define COMMON_TXCNTRL__dual_dvi_en_MASK 0x10000
+#define COMMON_TXCNTRL__dual_dvi_en__SHIFT 0x10
+#define COMMON_TMDP__tmdp_spare_MASK 0xffffffff
+#define COMMON_TMDP__tmdp_spare__SHIFT 0x0
+#define COMMON_LANE_RESETS__lane_0_reset_l_MASK 0x1
+#define COMMON_LANE_RESETS__lane_0_reset_l__SHIFT 0x0
+#define COMMON_LANE_RESETS__lane_1_reset_l_MASK 0x2
+#define COMMON_LANE_RESETS__lane_1_reset_l__SHIFT 0x1
+#define COMMON_LANE_RESETS__lane_2_reset_l_MASK 0x4
+#define COMMON_LANE_RESETS__lane_2_reset_l__SHIFT 0x2
+#define COMMON_LANE_RESETS__lane_3_reset_l_MASK 0x8
+#define COMMON_LANE_RESETS__lane_3_reset_l__SHIFT 0x3
+#define COMMON_LANE_RESETS__lane_4_reset_l_MASK 0x10
+#define COMMON_LANE_RESETS__lane_4_reset_l__SHIFT 0x4
+#define COMMON_LANE_RESETS__lane_5_reset_l_MASK 0x20
+#define COMMON_LANE_RESETS__lane_5_reset_l__SHIFT 0x5
+#define COMMON_LANE_RESETS__lane_6_reset_l_MASK 0x40
+#define COMMON_LANE_RESETS__lane_6_reset_l__SHIFT 0x6
+#define COMMON_LANE_RESETS__lane_7_reset_l_MASK 0x80
+#define COMMON_LANE_RESETS__lane_7_reset_l__SHIFT 0x7
+#define COMMON_ZCALCODE_CTRL__zcalcode_override_MASK 0x1
+#define COMMON_ZCALCODE_CTRL__zcalcode_override__SHIFT 0x0
+#define COMMON_ZCALCODE_CTRL__tx_binary_code_override_val_MASK 0x3e
+#define COMMON_ZCALCODE_CTRL__tx_binary_code_override_val__SHIFT 0x1
+#define COMMON_ZCALCODE_CTRL__tx_driver_fifty_ohms_MASK 0x200000
+#define COMMON_ZCALCODE_CTRL__tx_driver_fifty_ohms__SHIFT 0x15
+#define COMMON_DISP_RFU1__rfu_value1_MASK 0xffffffff
+#define COMMON_DISP_RFU1__rfu_value1__SHIFT 0x0
+#define COMMON_DISP_RFU2__rfu_value2_MASK 0xffffffff
+#define COMMON_DISP_RFU2__rfu_value2__SHIFT 0x0
+#define COMMON_DISP_RFU3__rfu_value3_MASK 0xffffffff
+#define COMMON_DISP_RFU3__rfu_value3__SHIFT 0x0
+#define COMMON_DISP_RFU4__rfu_value4_MASK 0xffffffff
+#define COMMON_DISP_RFU4__rfu_value4__SHIFT 0x0
+#define COMMON_DISP_RFU5__rfu_value5_MASK 0xffffffff
+#define COMMON_DISP_RFU5__rfu_value5__SHIFT 0x0
+#define COMMON_DISP_RFU6__rfu_value6_MASK 0xffffffff
+#define COMMON_DISP_RFU6__rfu_value6__SHIFT 0x0
+#define COMMON_DISP_RFU7__rfu_value7_MASK 0xffffffff
+#define COMMON_DISP_RFU7__rfu_value7__SHIFT 0x0
+#define FREQ_CTRL0__fcw0_frac_MASK 0xffff
+#define FREQ_CTRL0__fcw0_frac__SHIFT 0x0
+#define FREQ_CTRL0__fcw0_int_MASK 0x1ff0000
+#define FREQ_CTRL0__fcw0_int__SHIFT 0x10
+#define FREQ_CTRL1__fcw1_frac_MASK 0xffff
+#define FREQ_CTRL1__fcw1_frac__SHIFT 0x0
+#define FREQ_CTRL1__fcw1_int_MASK 0x1ff0000
+#define FREQ_CTRL1__fcw1_int__SHIFT 0x10
+#define FREQ_CTRL2__fcw_denom_MASK 0xffff
+#define FREQ_CTRL2__fcw_denom__SHIFT 0x0
+#define FREQ_CTRL2__fcw_slew_frac_MASK 0xffff0000
+#define FREQ_CTRL2__fcw_slew_frac__SHIFT 0x10
+#define FREQ_CTRL3__refclk_div_MASK 0x3
+#define FREQ_CTRL3__refclk_div__SHIFT 0x0
+#define FREQ_CTRL3__vco_pre_div_MASK 0x18
+#define FREQ_CTRL3__vco_pre_div__SHIFT 0x3
+#define FREQ_CTRL3__fracn_en_MASK 0x40
+#define FREQ_CTRL3__fracn_en__SHIFT 0x6
+#define FREQ_CTRL3__ssc_en_MASK 0x100
+#define FREQ_CTRL3__ssc_en__SHIFT 0x8
+#define FREQ_CTRL3__fcw_sel_MASK 0x400
+#define FREQ_CTRL3__fcw_sel__SHIFT 0xa
+#define FREQ_CTRL3__freq_jump_en_MASK 0x1000
+#define FREQ_CTRL3__freq_jump_en__SHIFT 0xc
+#define FREQ_CTRL3__tdc_resolution_MASK 0xff0000
+#define FREQ_CTRL3__tdc_resolution__SHIFT 0x10
+#define FREQ_CTRL3__dpll_cfg_1_MASK 0xff000000
+#define FREQ_CTRL3__dpll_cfg_1__SHIFT 0x18
+#define BW_CTRL_COARSE__gi_coarse_mant_MASK 0x3
+#define BW_CTRL_COARSE__gi_coarse_mant__SHIFT 0x0
+#define BW_CTRL_COARSE__gi_coarse_exp_MASK 0x3c
+#define BW_CTRL_COARSE__gi_coarse_exp__SHIFT 0x2
+#define BW_CTRL_COARSE__gp_coarse_mant_MASK 0x780
+#define BW_CTRL_COARSE__gp_coarse_mant__SHIFT 0x7
+#define BW_CTRL_COARSE__gp_coarse_exp_MASK 0xf000
+#define BW_CTRL_COARSE__gp_coarse_exp__SHIFT 0xc
+#define BW_CTRL_COARSE__nctl_coarse_res_MASK 0x7e0000
+#define BW_CTRL_COARSE__nctl_coarse_res__SHIFT 0x11
+#define BW_CTRL_COARSE__nctl_coarse_frac_res_MASK 0x3000000
+#define BW_CTRL_COARSE__nctl_coarse_frac_res__SHIFT 0x18
+#define BW_CTRL_FINE__dpll_cfg_3_MASK 0x3ff
+#define BW_CTRL_FINE__dpll_cfg_3__SHIFT 0x0
+#define CAL_CTRL__bypass_freq_lock_MASK 0x1
+#define CAL_CTRL__bypass_freq_lock__SHIFT 0x0
+#define CAL_CTRL__tdc_cal_en_MASK 0x2
+#define CAL_CTRL__tdc_cal_en__SHIFT 0x1
+#define CAL_CTRL__tdc_cal_ctrl_MASK 0x1f8
+#define CAL_CTRL__tdc_cal_ctrl__SHIFT 0x3
+#define CAL_CTRL__meas_win_sel_MASK 0x600
+#define CAL_CTRL__meas_win_sel__SHIFT 0x9
+#define CAL_CTRL__kdco_cal_dis_MASK 0x800
+#define CAL_CTRL__kdco_cal_dis__SHIFT 0xb
+#define CAL_CTRL__kdco_ratio_MASK 0x1fe000
+#define CAL_CTRL__kdco_ratio__SHIFT 0xd
+#define CAL_CTRL__kdco_incr_cal_dis_MASK 0x400000
+#define CAL_CTRL__kdco_incr_cal_dis__SHIFT 0x16
+#define CAL_CTRL__nctl_adj_dis_MASK 0x800000
+#define CAL_CTRL__nctl_adj_dis__SHIFT 0x17
+#define CAL_CTRL__refclk_rate_MASK 0xff000000
+#define CAL_CTRL__refclk_rate__SHIFT 0x18
+#define LOOP_CTRL__fbdiv_mask_en_MASK 0x1
+#define LOOP_CTRL__fbdiv_mask_en__SHIFT 0x0
+#define LOOP_CTRL__fb_slip_dis_MASK 0x4
+#define LOOP_CTRL__fb_slip_dis__SHIFT 0x2
+#define LOOP_CTRL__clk_tdc_sel_MASK 0x30
+#define LOOP_CTRL__clk_tdc_sel__SHIFT 0x4
+#define LOOP_CTRL__clk_nctl_sel_MASK 0x180
+#define LOOP_CTRL__clk_nctl_sel__SHIFT 0x7
+#define LOOP_CTRL__sig_del_patt_sel_MASK 0x400
+#define LOOP_CTRL__sig_del_patt_sel__SHIFT 0xa
+#define LOOP_CTRL__nctl_sig_del_dis_MASK 0x1000
+#define LOOP_CTRL__nctl_sig_del_dis__SHIFT 0xc
+#define LOOP_CTRL__fbclk_track_refclk_MASK 0x4000
+#define LOOP_CTRL__fbclk_track_refclk__SHIFT 0xe
+#define LOOP_CTRL__prbs_en_MASK 0x10000
+#define LOOP_CTRL__prbs_en__SHIFT 0x10
+#define LOOP_CTRL__tdc_clk_gate_en_MASK 0x40000
+#define LOOP_CTRL__tdc_clk_gate_en__SHIFT 0x12
+#define LOOP_CTRL__phase_offset_MASK 0x7f00000
+#define LOOP_CTRL__phase_offset__SHIFT 0x14
+#define VREG_CFG__bleeder_ac_MASK 0x1
+#define VREG_CFG__bleeder_ac__SHIFT 0x0
+#define VREG_CFG__bleeder_en_MASK 0x2
+#define VREG_CFG__bleeder_en__SHIFT 0x1
+#define VREG_CFG__is_1p2_MASK 0x4
+#define VREG_CFG__is_1p2__SHIFT 0x2
+#define VREG_CFG__reg_obs_sel_MASK 0x18
+#define VREG_CFG__reg_obs_sel__SHIFT 0x3
+#define VREG_CFG__reg_on_mode_MASK 0x60
+#define VREG_CFG__reg_on_mode__SHIFT 0x5
+#define VREG_CFG__rlad_tap_sel_MASK 0x780
+#define VREG_CFG__rlad_tap_sel__SHIFT 0x7
+#define VREG_CFG__reg_off_hi_MASK 0x800
+#define VREG_CFG__reg_off_hi__SHIFT 0xb
+#define VREG_CFG__reg_off_lo_MASK 0x1000
+#define VREG_CFG__reg_off_lo__SHIFT 0xc
+#define VREG_CFG__scale_driver_MASK 0x6000
+#define VREG_CFG__scale_driver__SHIFT 0xd
+#define VREG_CFG__sel_bump_MASK 0x8000
+#define VREG_CFG__sel_bump__SHIFT 0xf
+#define VREG_CFG__sel_rladder_x_MASK 0x10000
+#define VREG_CFG__sel_rladder_x__SHIFT 0x10
+#define VREG_CFG__short_rc_filt_x_MASK 0x20000
+#define VREG_CFG__short_rc_filt_x__SHIFT 0x11
+#define VREG_CFG__vref_pwr_on_MASK 0x40000
+#define VREG_CFG__vref_pwr_on__SHIFT 0x12
+#define VREG_CFG__dpll_cfg_2_MASK 0xff00000
+#define VREG_CFG__dpll_cfg_2__SHIFT 0x14
+#define OBSERVE0__lock_det_tdc_steps_MASK 0x1f
+#define OBSERVE0__lock_det_tdc_steps__SHIFT 0x0
+#define OBSERVE0__clear_sticky_lock_MASK 0x40
+#define OBSERVE0__clear_sticky_lock__SHIFT 0x6
+#define OBSERVE0__lock_det_dis_MASK 0x100
+#define OBSERVE0__lock_det_dis__SHIFT 0x8
+#define OBSERVE0__dco_cfg_MASK 0x3fc00
+#define OBSERVE0__dco_cfg__SHIFT 0xa
+#define OBSERVE0__anaobs_sel_MASK 0xe00000
+#define OBSERVE0__anaobs_sel__SHIFT 0x15
+#define OBSERVE1__digobs_sel_MASK 0xf
+#define OBSERVE1__digobs_sel__SHIFT 0x0
+#define OBSERVE1__digobs_trig_sel_MASK 0x1e0
+#define OBSERVE1__digobs_trig_sel__SHIFT 0x5
+#define OBSERVE1__digobs_div_MASK 0xc00
+#define OBSERVE1__digobs_div__SHIFT 0xa
+#define OBSERVE1__digobs_trig_div_MASK 0x6000
+#define OBSERVE1__digobs_trig_div__SHIFT 0xd
+#define OBSERVE1__lock_timer_MASK 0x3fff0000
+#define OBSERVE1__lock_timer__SHIFT 0x10
+#define DFT_OUT__dft_data_MASK 0xffffffff
+#define DFT_OUT__dft_data__SHIFT 0x0
+#define PLL_WRAP_CNTRL1__wrap_cfg_sel_clk_MASK 0x3
+#define PLL_WRAP_CNTRL1__wrap_cfg_sel_clk__SHIFT 0x0
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_freq_programming_ovveride_MASK 0x1
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_freq_programming_ovveride__SHIFT 0x0
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_pwr_state_ovrride_MASK 0x2
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_pwr_state_ovrride__SHIFT 0x1
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_pwr_state_MASK 0xc
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_pwr_state__SHIFT 0x2
+#define PLL_WRAP_CNTRL__wrap_cfg_tx_pdiv_val_MASK 0xe0
+#define PLL_WRAP_CNTRL__wrap_cfg_tx_pdiv_val__SHIFT 0x5
+#define PLL_WRAP_CNTRL__wrap_cfg_tx_pixdiv_val_MASK 0x100
+#define PLL_WRAP_CNTRL__wrap_cfg_tx_pixdiv_val__SHIFT 0x8
+#define PLL_WRAP_CNTRL__wrap_cfg_cml_cmos_sel_MASK 0x400
+#define PLL_WRAP_CNTRL__wrap_cfg_cml_cmos_sel__SHIFT 0xa
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_rdy_MASK 0x2000
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_rdy__SHIFT 0xd
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_update_MASK 0x4000
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_update__SHIFT 0xe
+#define PLL_WRAP_CNTRL__wrap_cfg_ref_values_chg_MASK 0x8000
+#define PLL_WRAP_CNTRL__wrap_cfg_ref_values_chg__SHIFT 0xf
+#define PLL_WRAP_CNTRL__wrap_cfg_clk_gate_w_rdy_MASK 0x10000
+#define PLL_WRAP_CNTRL__wrap_cfg_clk_gate_w_rdy__SHIFT 0x10
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_dsm_sel_MASK 0xe0000
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_dsm_sel__SHIFT 0x11
+#define PPLL_VREG_CFG__pw_pc_bleeder_ac_MASK 0x1
+#define PPLL_VREG_CFG__pw_pc_bleeder_ac__SHIFT 0x0
+#define PPLL_VREG_CFG__pw_pc_bleeder_en_MASK 0x2
+#define PPLL_VREG_CFG__pw_pc_bleeder_en__SHIFT 0x1
+#define PPLL_VREG_CFG__pw_pc_is_1p2_MASK 0x4
+#define PPLL_VREG_CFG__pw_pc_is_1p2__SHIFT 0x2
+#define PPLL_VREG_CFG__pw_pc_reg_obs_sel_MASK 0x18
+#define PPLL_VREG_CFG__pw_pc_reg_obs_sel__SHIFT 0x3
+#define PPLL_VREG_CFG__pw_pc_reg_on_mode_MASK 0x60
+#define PPLL_VREG_CFG__pw_pc_reg_on_mode__SHIFT 0x5
+#define PPLL_VREG_CFG__pw_pc_rlad_tap_sel_MASK 0x780
+#define PPLL_VREG_CFG__pw_pc_rlad_tap_sel__SHIFT 0x7
+#define PPLL_VREG_CFG__pw_pc_reg_off_hi_MASK 0x800
+#define PPLL_VREG_CFG__pw_pc_reg_off_hi__SHIFT 0xb
+#define PPLL_VREG_CFG__pw_pc_reg_off_lo_MASK 0x1000
+#define PPLL_VREG_CFG__pw_pc_reg_off_lo__SHIFT 0xc
+#define PPLL_VREG_CFG__pw_pc_scale_driver_MASK 0x6000
+#define PPLL_VREG_CFG__pw_pc_scale_driver__SHIFT 0xd
+#define PPLL_VREG_CFG__pw_pc_sel_bump_MASK 0x8000
+#define PPLL_VREG_CFG__pw_pc_sel_bump__SHIFT 0xf
+#define PPLL_VREG_CFG__pw_pc_sel_rladder_x_MASK 0x10000
+#define PPLL_VREG_CFG__pw_pc_sel_rladder_x__SHIFT 0x10
+#define PPLL_VREG_CFG__pw_pc_short_rc_filt_x_MASK 0x20000
+#define PPLL_VREG_CFG__pw_pc_short_rc_filt_x__SHIFT 0x11
+#define PPLL_VREG_CFG__pw_pc_vref_pwr_on_MASK 0x40000
+#define PPLL_VREG_CFG__pw_pc_vref_pwr_on__SHIFT 0x12
+#define PPLL_VREG_CFG__pw_pc_dpll_cfg_2_MASK 0xff00000
+#define PPLL_VREG_CFG__pw_pc_dpll_cfg_2__SHIFT 0x14
+#define PPLL_MODE_CNTL__pw_pc_refclk_gate_dis_MASK 0x1
+#define PPLL_MODE_CNTL__pw_pc_refclk_gate_dis__SHIFT 0x0
+#define PPLL_MODE_CNTL__pw_pc_multi_phase_en_MASK 0xf00
+#define PPLL_MODE_CNTL__pw_pc_multi_phase_en__SHIFT 0x8
+#define PPLL_MODE_CNTL__reg_tmg_pwr_state_MASK 0x30000
+#define PPLL_MODE_CNTL__reg_tmg_pwr_state__SHIFT 0x10
+#define PPLL_FREQ_CTRL0__reg_tmg_fcw0_frac_MASK 0xffff
+#define PPLL_FREQ_CTRL0__reg_tmg_fcw0_frac__SHIFT 0x0
+#define PPLL_FREQ_CTRL0__reg_tmg_fcw0_int_MASK 0x1ff0000
+#define PPLL_FREQ_CTRL0__reg_tmg_fcw0_int__SHIFT 0x10
+#define PPLL_FREQ_CTRL1__reg_tmg_fcw1_frac_MASK 0xffff
+#define PPLL_FREQ_CTRL1__reg_tmg_fcw1_frac__SHIFT 0x0
+#define PPLL_FREQ_CTRL1__reg_tmg_fcw1_int_MASK 0x1ff0000
+#define PPLL_FREQ_CTRL1__reg_tmg_fcw1_int__SHIFT 0x10
+#define PPLL_FREQ_CTRL2__reg_tmg_fcw_denom_MASK 0xffff
+#define PPLL_FREQ_CTRL2__reg_tmg_fcw_denom__SHIFT 0x0
+#define PPLL_FREQ_CTRL2__reg_tmg_fcw_slew_frac_MASK 0xffff0000
+#define PPLL_FREQ_CTRL2__reg_tmg_fcw_slew_frac__SHIFT 0x10
+#define PPLL_FREQ_CTRL3__reg_tmg_refclk_div_MASK 0x3
+#define PPLL_FREQ_CTRL3__reg_tmg_refclk_div__SHIFT 0x0
+#define PPLL_FREQ_CTRL3__reg_tmg_vco_pre_div_MASK 0x18
+#define PPLL_FREQ_CTRL3__reg_tmg_vco_pre_div__SHIFT 0x3
+#define PPLL_FREQ_CTRL3__reg_tmg_fracn_en_MASK 0x40
+#define PPLL_FREQ_CTRL3__reg_tmg_fracn_en__SHIFT 0x6
+#define PPLL_FREQ_CTRL3__reg_tmg_ssc_en_MASK 0x100
+#define PPLL_FREQ_CTRL3__reg_tmg_ssc_en__SHIFT 0x8
+#define PPLL_FREQ_CTRL3__reg_tmg_fcw_sel_MASK 0x400
+#define PPLL_FREQ_CTRL3__reg_tmg_fcw_sel__SHIFT 0xa
+#define PPLL_FREQ_CTRL3__reg_tmg_freq_jump_en_MASK 0x1000
+#define PPLL_FREQ_CTRL3__reg_tmg_freq_jump_en__SHIFT 0xc
+#define PPLL_FREQ_CTRL3__reg_tmg_tdc_resol_MASK 0xff0000
+#define PPLL_FREQ_CTRL3__reg_tmg_tdc_resol__SHIFT 0x10
+#define PPLL_FREQ_CTRL3__pw_pc_dpll_cfg_1_MASK 0xff000000
+#define PPLL_FREQ_CTRL3__pw_pc_dpll_cfg_1__SHIFT 0x18
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gi_crse_mant_MASK 0x3
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gi_crse_mant__SHIFT 0x0
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gi_crse_exp_MASK 0x3c
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gi_crse_exp__SHIFT 0x2
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gp_crse_mant_MASK 0x780
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gp_crse_mant__SHIFT 0x7
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gp_crse_exp_MASK 0xf000
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gp_crse_exp__SHIFT 0xc
+#define PPLL_BW_CTRL_COARSE__reg_tmg_nctl_crse_res_MASK 0x7e0000
+#define PPLL_BW_CTRL_COARSE__reg_tmg_nctl_crse_res__SHIFT 0x11
+#define PPLL_BW_CTRL_COARSE__reg_tmg_nctl_crse_frac_res_MASK 0x3000000
+#define PPLL_BW_CTRL_COARSE__reg_tmg_nctl_crse_frac_res__SHIFT 0x18
+#define PPLL_BW_CTRL_FINE__pw_pc_dpll_cfg_3_MASK 0x3ff
+#define PPLL_BW_CTRL_FINE__pw_pc_dpll_cfg_3__SHIFT 0x0
+#define PPLL_CAL_CTRL__pw_pc_bypass_freq_lock_MASK 0x1
+#define PPLL_CAL_CTRL__pw_pc_bypass_freq_lock__SHIFT 0x0
+#define PPLL_CAL_CTRL__pw_pc_tdc_cal_en_MASK 0x2
+#define PPLL_CAL_CTRL__pw_pc_tdc_cal_en__SHIFT 0x1
+#define PPLL_CAL_CTRL__pw_pc_tdc_cal_ctrl_MASK 0x1f8
+#define PPLL_CAL_CTRL__pw_pc_tdc_cal_ctrl__SHIFT 0x3
+#define PPLL_CAL_CTRL__pw_pc_meas_win_sel_MASK 0x600
+#define PPLL_CAL_CTRL__pw_pc_meas_win_sel__SHIFT 0x9
+#define PPLL_CAL_CTRL__pw_pc_kdco_cal_dis_MASK 0x800
+#define PPLL_CAL_CTRL__pw_pc_kdco_cal_dis__SHIFT 0xb
+#define PPLL_CAL_CTRL__pw_pc_kdco_ratio_MASK 0x1fe000
+#define PPLL_CAL_CTRL__pw_pc_kdco_ratio__SHIFT 0xd
+#define PPLL_CAL_CTRL__pw_pc_kdco_incr_cal_dis_MASK 0x400000
+#define PPLL_CAL_CTRL__pw_pc_kdco_incr_cal_dis__SHIFT 0x16
+#define PPLL_CAL_CTRL__pw_pc_nctl_adj_dis_MASK 0x800000
+#define PPLL_CAL_CTRL__pw_pc_nctl_adj_dis__SHIFT 0x17
+#define PPLL_CAL_CTRL__pw_pc_refclk_rate_MASK 0xff000000
+#define PPLL_CAL_CTRL__pw_pc_refclk_rate__SHIFT 0x18
+#define PPLL_LOOP_CTRL__pw_pc_fbdiv_mask_en_MASK 0x1
+#define PPLL_LOOP_CTRL__pw_pc_fbdiv_mask_en__SHIFT 0x0
+#define PPLL_LOOP_CTRL__pw_pc_fb_slip_dis_MASK 0x4
+#define PPLL_LOOP_CTRL__pw_pc_fb_slip_dis__SHIFT 0x2
+#define PPLL_LOOP_CTRL__pw_pc_clk_tdc_sel_MASK 0x30
+#define PPLL_LOOP_CTRL__pw_pc_clk_tdc_sel__SHIFT 0x4
+#define PPLL_LOOP_CTRL__pw_pc_clk_nctl_sel_MASK 0x180
+#define PPLL_LOOP_CTRL__pw_pc_clk_nctl_sel__SHIFT 0x7
+#define PPLL_LOOP_CTRL__pw_pc_sig_del_patt_sel_MASK 0x400
+#define PPLL_LOOP_CTRL__pw_pc_sig_del_patt_sel__SHIFT 0xa
+#define PPLL_LOOP_CTRL__pw_pc_nctl_sig_del_dis_MASK 0x1000
+#define PPLL_LOOP_CTRL__pw_pc_nctl_sig_del_dis__SHIFT 0xc
+#define PPLL_LOOP_CTRL__pw_pc_fbclk_track_refclk_MASK 0x4000
+#define PPLL_LOOP_CTRL__pw_pc_fbclk_track_refclk__SHIFT 0xe
+#define PPLL_LOOP_CTRL__pw_pc_prbs_en_MASK 0x10000
+#define PPLL_LOOP_CTRL__pw_pc_prbs_en__SHIFT 0x10
+#define PPLL_LOOP_CTRL__pw_pc_tdc_clk_gate_en_MASK 0x40000
+#define PPLL_LOOP_CTRL__pw_pc_tdc_clk_gate_en__SHIFT 0x12
+#define PPLL_LOOP_CTRL__pw_pc_phase_offset_MASK 0x7f00000
+#define PPLL_LOOP_CTRL__pw_pc_phase_offset__SHIFT 0x14
+#define PPLL_REFCLK_CNTL__regs_pw_refclk0_recv_en_MASK 0x1
+#define PPLL_REFCLK_CNTL__regs_pw_refclk0_recv_en__SHIFT 0x0
+#define PPLL_REFCLK_CNTL__regs_pw_refclk1_recv_en_MASK 0x2
+#define PPLL_REFCLK_CNTL__regs_pw_refclk1_recv_en__SHIFT 0x1
+#define PPLL_REFCLK_CNTL__regs_pw_refclk2_recv_en_MASK 0x4
+#define PPLL_REFCLK_CNTL__regs_pw_refclk2_recv_en__SHIFT 0x2
+#define PPLL_REFCLK_CNTL__regs_pw_refclk3_recv_en_MASK 0x8
+#define PPLL_REFCLK_CNTL__regs_pw_refclk3_recv_en__SHIFT 0x3
+#define PPLL_REFCLK_CNTL__regs_pw_refclk0_recv_sel_MASK 0x100
+#define PPLL_REFCLK_CNTL__regs_pw_refclk0_recv_sel__SHIFT 0x8
+#define PPLL_REFCLK_CNTL__regs_pw_refclk1_recv_sel_MASK 0x200
+#define PPLL_REFCLK_CNTL__regs_pw_refclk1_recv_sel__SHIFT 0x9
+#define PPLL_REFCLK_CNTL__regs_pw_refclk2_recv_sel_MASK 0x400
+#define PPLL_REFCLK_CNTL__regs_pw_refclk2_recv_sel__SHIFT 0xa
+#define PPLL_REFCLK_CNTL__regs_pw_refclk3_recv_sel_MASK 0x800
+#define PPLL_REFCLK_CNTL__regs_pw_refclk3_recv_sel__SHIFT 0xb
+#define PPLL_REFCLK_CNTL__regs_pw_refdivsrc_MASK 0xc000
+#define PPLL_REFCLK_CNTL__regs_pw_refdivsrc__SHIFT 0xe
+#define PPLL_REFCLK_CNTL__regs_pw_ref2core_sel_MASK 0x10000
+#define PPLL_REFCLK_CNTL__regs_pw_ref2core_sel__SHIFT 0x10
+#define PPLL_CLKOUT_CNTL__regs_pw_pixclk_pre_pdivsel_MASK 0x100
+#define PPLL_CLKOUT_CNTL__regs_pw_pixclk_pre_pdivsel__SHIFT 0x8
+#define PPLL_CLKOUT_CNTL__regs_pw_pixclk_pdivsel_MASK 0x200
+#define PPLL_CLKOUT_CNTL__regs_pw_pixclk_pdivsel__SHIFT 0x9
+#define PPLL_CLKOUT_CNTL__regs_pw_dvoclk_pre_pdivsel_MASK 0x400
+#define PPLL_CLKOUT_CNTL__regs_pw_dvoclk_pre_pdivsel__SHIFT 0xa
+#define PPLL_CLKOUT_CNTL__regs_pw_dvoclk_pdivsel_MASK 0x800
+#define PPLL_CLKOUT_CNTL__regs_pw_dvoclk_pdivsel__SHIFT 0xb
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_en_MASK 0x1000
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_en__SHIFT 0xc
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_pre_pdivsel_MASK 0x2000
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_pre_pdivsel__SHIFT 0xd
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_pdivsel_MASK 0x4000
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_pdivsel__SHIFT 0xe
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_obs_sel_MASK 0x8000
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_obs_sel__SHIFT 0xf
+#define PPLL_CLKOUT_CNTL__regs_pw_refclk_sel_MASK 0x30000
+#define PPLL_CLKOUT_CNTL__regs_pw_refclk_sel__SHIFT 0x10
+#define PPLL_CLKOUT_CNTL__regs_cc_resetb_MASK 0x100000
+#define PPLL_CLKOUT_CNTL__regs_cc_resetb__SHIFT 0x14
+#define PPLL_DFT_CNTL__regs_pw_obs_en_MASK 0x1
+#define PPLL_DFT_CNTL__regs_pw_obs_en__SHIFT 0x0
+#define PPLL_DFT_CNTL__regs_pw_obs_div_sel_1_MASK 0x6
+#define PPLL_DFT_CNTL__regs_pw_obs_div_sel_1__SHIFT 0x1
+#define PPLL_DFT_CNTL__regs_pw_obs_clk_sel_1_MASK 0xf0
+#define PPLL_DFT_CNTL__regs_pw_obs_clk_sel_1__SHIFT 0x4
+#define PPLL_DFT_CNTL__regs_pw_obs_clk_sel_2_MASK 0xf00
+#define PPLL_DFT_CNTL__regs_pw_obs_clk_sel_2__SHIFT 0x8
+#define PPLL_DFT_CNTL__regs_pw_obs_sel_MASK 0x3000
+#define PPLL_DFT_CNTL__regs_pw_obs_sel__SHIFT 0xc
+#define PPLL_ANALOG_CNTL__regs_pw_spare_MASK 0xff
+#define PPLL_ANALOG_CNTL__regs_pw_spare__SHIFT 0x0
+#define PPLL_POSTDIV__reg_tmg_postdiv_MASK 0xf00
+#define PPLL_POSTDIV__reg_tmg_postdiv__SHIFT 0x8
+#define PPLL_POSTDIV__reg_tmg_pixclk_pdiv2_MASK 0x1000
+#define PPLL_POSTDIV__reg_tmg_pixclk_pdiv2__SHIFT 0xc
+#define PPLL_DEBUG0__pw_pc_phase_jump_trig_MASK 0x2
+#define PPLL_DEBUG0__pw_pc_phase_jump_trig__SHIFT 0x1
+#define PPLL_DEBUG0__pw_pc_fine_tdc_dis_MASK 0x4
+#define PPLL_DEBUG0__pw_pc_fine_tdc_dis__SHIFT 0x2
+#define PPLL_DEBUG0__pw_pc_coarse_tdc_dis_MASK 0x8
+#define PPLL_DEBUG0__pw_pc_coarse_tdc_dis__SHIFT 0x3
+#define PPLL_DEBUG0__pw_pc_alt_nctl_en_MASK 0x10
+#define PPLL_DEBUG0__pw_pc_alt_nctl_en__SHIFT 0x4
+#define PPLL_DEBUG0__pw_pc_alt_nctl_MASK 0x1ffffe0
+#define PPLL_DEBUG0__pw_pc_alt_nctl__SHIFT 0x5
+#define PPLL_DEBUG0__pw_pc_nctl_coarse_step_dis_MASK 0x2000000
+#define PPLL_DEBUG0__pw_pc_nctl_coarse_step_dis__SHIFT 0x19
+#define PPLL_DEBUG0__pw_pc_trig_coarse_step_MASK 0x4000000
+#define PPLL_DEBUG0__pw_pc_trig_coarse_step__SHIFT 0x1a
+#define PPLL_DEBUG0__pw_pc_dft_sel_MASK 0x38000000
+#define PPLL_DEBUG0__pw_pc_dft_sel__SHIFT 0x1b
+#define PPLL_DEBUG0__pw_pc_dft_capture_MASK 0x40000000
+#define PPLL_DEBUG0__pw_pc_dft_capture__SHIFT 0x1e
+#define PPLL_OBSERVE0__pw_pc_lock_det_tdc_steps_MASK 0x1f
+#define PPLL_OBSERVE0__pw_pc_lock_det_tdc_steps__SHIFT 0x0
+#define PPLL_OBSERVE0__pw_pc_clear_sticky_lock_MASK 0x40
+#define PPLL_OBSERVE0__pw_pc_clear_sticky_lock__SHIFT 0x6
+#define PPLL_OBSERVE0__pw_pc_lock_det_dis_MASK 0x100
+#define PPLL_OBSERVE0__pw_pc_lock_det_dis__SHIFT 0x8
+#define PPLL_OBSERVE0__pw_pc_dco_cfg_MASK 0x3fc00
+#define PPLL_OBSERVE0__pw_pc_dco_cfg__SHIFT 0xa
+#define PPLL_OBSERVE0__pw_pc_anaobs_sel_MASK 0xe00000
+#define PPLL_OBSERVE0__pw_pc_anaobs_sel__SHIFT 0x15
+#define PPLL_OBSERVE1__pw_pc_digobs_sel_MASK 0xf
+#define PPLL_OBSERVE1__pw_pc_digobs_sel__SHIFT 0x0
+#define PPLL_OBSERVE1__pw_pc_digobs_trig_sel_MASK 0x1e0
+#define PPLL_OBSERVE1__pw_pc_digobs_trig_sel__SHIFT 0x5
+#define PPLL_OBSERVE1__pw_pc_digobs_div_MASK 0xc00
+#define PPLL_OBSERVE1__pw_pc_digobs_div__SHIFT 0xa
+#define PPLL_OBSERVE1__pw_pc_digobs_trig_div_MASK 0x3000
+#define PPLL_OBSERVE1__pw_pc_digobs_trig_div__SHIFT 0xc
+#define PPLL_OBSERVE1__reg_tmg_lock_timer_MASK 0x3fff0000
+#define PPLL_OBSERVE1__reg_tmg_lock_timer__SHIFT 0x10
+#define PPLL_UPDATE_CNTL__reg_tmg_PLL_UPDATE_LOCK_MASK 0x4
+#define PPLL_UPDATE_CNTL__reg_tmg_PLL_UPDATE_LOCK__SHIFT 0x2
+#define PPLL_UPDATE_CNTL__reg_tmg_PLL_UPDATE_POINT_MASK 0x8
+#define PPLL_UPDATE_CNTL__reg_tmg_PLL_UPDATE_POINT__SHIFT 0x3
+#define PPLL_UPDATE_CNTL__tmg_reg_UPDATE_PENDING_MASK 0x100
+#define PPLL_UPDATE_CNTL__tmg_reg_UPDATE_PENDING__SHIFT 0x8
+#define PPLL_UPDATE_CNTL__pc_pw_pll_rdy_MASK 0x200
+#define PPLL_UPDATE_CNTL__pc_pw_pll_rdy__SHIFT 0x9
+#define PPLL_UPDATE_CNTL__TieLow1_MASK 0x10000
+#define PPLL_UPDATE_CNTL__TieLow1__SHIFT 0x10
+#define PPLL_OBSERVE0_OUT__disppll_core_obsout_MASK 0xffffffff
+#define PPLL_OBSERVE0_OUT__disppll_core_obsout__SHIFT 0x0
+#define PPLL_STATUS_DEBUG1__dbg_pll_rdy_MASK 0x1
+#define PPLL_STATUS_DEBUG1__dbg_pll_rdy__SHIFT 0x0
+#define PPLL_STATUS_DEBUG1__core_disppll_pwr_ok_vddp_MASK 0x2
+#define PPLL_STATUS_DEBUG1__core_disppll_pwr_ok_vddp__SHIFT 0x1
+#define PPLL_STATUS_DEBUG1__core_disppll_rcu_dc_resetb_vddp_MASK 0x4
+#define PPLL_STATUS_DEBUG1__core_disppll_rcu_dc_resetb_vddp__SHIFT 0x2
+#define PPLL_DEBUG_MUX_CNTL__DEBUG_BUS_MUX_SEL_MASK 0x1f
+#define PPLL_DEBUG_MUX_CNTL__DEBUG_BUS_MUX_SEL__SHIFT 0x0
+#define PPLL_DIV_UPDATE_DEBUG__TieLow2_MASK 0x1
+#define PPLL_DIV_UPDATE_DEBUG__TieLow2__SHIFT 0x0
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_FB_DIV_CHANGED_MASK 0x2
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_FB_DIV_CHANGED__SHIFT 0x1
+#define PPLL_DIV_UPDATE_DEBUG__dbg_UPDATE_PENDING_MASK 0x4
+#define PPLL_DIV_UPDATE_DEBUG__dbg_UPDATE_PENDING__SHIFT 0x2
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_CURRENT_STATE_MASK 0x18
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_CURRENT_STATE__SHIFT 0x3
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_UPDATE_ENABLE_MASK 0x20
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_UPDATE_ENABLE__SHIFT 0x5
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_UPDATE_REQ_MASK 0x40
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_UPDATE_REQ__SHIFT 0x6
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_UPDATE_ACK_MASK 0x80
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_UPDATE_ACK__SHIFT 0x7
+#define PPLL_STATUS_DEBUG0__obsout_MASK 0xffffffff
+#define PPLL_STATUS_DEBUG0__obsout__SHIFT 0x0
+#define COMP_EN_CTL__comp_en_MASK 0x1
+#define COMP_EN_CTL__comp_en__SHIFT 0x0
+#define COMP_EN_CTL__comp_en_override_MASK 0x4
+#define COMP_EN_CTL__comp_en_override__SHIFT 0x2
+#define COMP_EN_CTL__comp_done_MASK 0x10
+#define COMP_EN_CTL__comp_done__SHIFT 0x4
+#define COMP_EN_CTL__zcal_code_override_MASK 0x40
+#define COMP_EN_CTL__zcal_code_override__SHIFT 0x6
+#define COMP_EN_CTL__zcal_cal_rtt_MASK 0x80
+#define COMP_EN_CTL__zcal_cal_rtt__SHIFT 0x7
+#define COMP_EN_CTL__zcal_base_en_MASK 0x100
+#define COMP_EN_CTL__zcal_base_en__SHIFT 0x8
+#define COMP_EN_CTL__zcal_ht_rtt_sel_MASK 0x200
+#define COMP_EN_CTL__zcal_ht_rtt_sel__SHIFT 0x9
+#define COMP_EN_CTL__zcal_code_MASK 0x7c00
+#define COMP_EN_CTL__zcal_code__SHIFT 0xa
+#define COMP_EN_CTL__zcal_ron_cal_mode_MASK 0x10000
+#define COMP_EN_CTL__zcal_ron_cal_mode__SHIFT 0x10
+#define COMP_EN_CTL__zcal_ana_dbg_sel_MASK 0x60000
+#define COMP_EN_CTL__zcal_ana_dbg_sel__SHIFT 0x11
+#define COMP_EN_CTL__cfg_cml_cmos_sel_MASK 0x80000
+#define COMP_EN_CTL__cfg_cml_cmos_sel__SHIFT 0x13
+#define COMP_EN_CTL__dsm_sel_MASK 0xf00000
+#define COMP_EN_CTL__dsm_sel__SHIFT 0x14
+#define DPCSTX_PHY_CNTL__DPCS_PHY_RESET_MASK 0x1
+#define DPCSTX_PHY_CNTL__DPCS_PHY_RESET__SHIFT 0x0
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x1
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x2
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x4
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x8
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX0_EN_MASK 0x10
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX1_EN_MASK 0x20
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX2_EN_MASK 0x40
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX3_EN_MASK 0x80
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define DPCSTX_TX_CNTL__DPCS_TX_RESYNC_MASK 0x1
+#define DPCSTX_TX_CNTL__DPCS_TX_RESYNC__SHIFT 0x0
+#define DPCSTX_TX_CNTL__DPCS_TX_STAGGERING_EN_MASK 0x2
+#define DPCSTX_TX_CNTL__DPCS_TX_STAGGERING_EN__SHIFT 0x1
+#define DPCSTX_TX_CNTL__DPCS_TX_HIGH_IMP_IDLE_OVERRIDE_EN_MASK 0x4
+#define DPCSTX_TX_CNTL__DPCS_TX_HIGH_IMP_IDLE_OVERRIDE_EN__SHIFT 0x2
+#define DPCSTX_TX_CNTL__DPCS_TX_HIGH_IMP_IDLE_MASK 0xf0
+#define DPCSTX_TX_CNTL__DPCS_TX_HIGH_IMP_IDLE__SHIFT 0x4
+#define DPCSTX_TX_CNTL__DPCS_TX_STAGGERING_DELAY_MASK 0x700
+#define DPCSTX_TX_CNTL__DPCS_TX_STAGGERING_DELAY__SHIFT 0x8
+#define DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x1000
+#define DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x2000
+#define DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x4000
+#define DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x10000
+#define DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x20000
+#define DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX_TX_CNTL__DPCS_TX_FIFO_WR_START_DELAY_MASK 0xf00000
+#define DPCSTX_TX_CNTL__DPCS_TX_FIFO_WR_START_DELAY__SHIFT 0x14
+#define DPCSTX_TX_CNTL__DPCS_TX_DVI_LINK_MODE_MASK 0x3000000
+#define DPCSTX_TX_CNTL__DPCS_TX_DVI_LINK_MODE__SHIFT 0x18
+#define DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000
+#define DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0xf
+#define DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY_MASK 0xff00
+#define DPCSTX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY__SHIFT 0x8
+#define DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000
+#define DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW_MASK 0x1
+#define DPCSTX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR_MASK 0x2
+#define DPCSTX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK_MASK 0x10
+#define DPCSTX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX0_FIFO_ERROR_MASK 0x1
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX0_FIFO_ERROR__SHIFT 0x0
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX1_FIFO_ERROR_MASK 0x2
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX1_FIFO_ERROR__SHIFT 0x1
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX2_FIFO_ERROR_MASK 0x4
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX2_FIFO_ERROR__SHIFT 0x2
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX3_FIFO_ERROR_MASK 0x8
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX3_FIFO_ERROR__SHIFT 0x3
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX_ERROR_CLR_MASK 0x100
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX_ERROR_CLR__SHIFT 0x8
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX_FIFO_ERROR_MASK_MASK 0x1000
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0xc
+#define DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x3ffff
+#define DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xffffffff
+#define DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR_MASK 0x3ffff
+#define DPCSTX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR__SHIFT 0x0
+#define DPCSTX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA_MASK 0xffffffff
+#define DPCSTX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA__SHIFT 0x0
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x1
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x6
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x38
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x3
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_CLOCK_SEL_MASK 0x700
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_CLOCK_SEL__SHIFT 0x8
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL_MASK 0x3800
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL__SHIFT 0xb
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x4000
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x10000
+#define DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0xe0000
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x11
+#define DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX_MASK 0xff000000
+#define DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX__SHIFT 0x18
+#define DPCSTX_TEST_DEBUG_DATA__DPCS_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DPCSTX_TEST_DEBUG_DATA__DPCS_TEST_DEBUG_DATA__SHIFT 0x0
+
+#endif /* DCE_11_2_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
index a9b6923..ebaf67b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
@@ -1391,6 +1391,8 @@
 #define mmRLC_CGTT_MGCG_OVERRIDE                                                0xec48
 #define mmRLC_CGCG_CGLS_CTRL                                                    0xec49
 #define mmRLC_CGCG_RAMP_CTRL                                                    0xec4a
+#define mmRLC_CGCG_CGLS_CTRL_3D                                                 0xec9d
+#define mmRLC_CGCG_RAMP_CTRL_3D                                                 0xec9e
 #define mmRLC_DYN_PG_STATUS                                                     0xec4b
 #define mmRLC_DYN_PG_REQUEST                                                    0xec4c
 #define mmRLC_PG_DELAY                                                          0xec4d
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
index b2d4aaf..6f6fb34 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
@@ -111,5 +111,6 @@
 #define mmUVD_MIF_RECON1_ADDR_CONFIG                                            0x39c5
 #define ixUVD_MIF_SCLR_ADDR_CONFIG                                              0x4
 #define mmUVD_JPEG_ADDR_CONFIG                                                  0x3a1f
+#define mmUVD_GP_SCRATCH4                                                       0x3d38
 
 #endif /* UVD_6_0_D_H */
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index eaf451e..32f3e34 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -79,9 +79,23 @@
 #define ATOM_PPLL0            2
 #define ATOM_PPLL3            3
 
+#define ATOM_PHY_PLL0         4
+#define ATOM_PHY_PLL1         5
+
 #define ATOM_EXT_PLL1         8
+#define ATOM_GCK_DFS          8
 #define ATOM_EXT_PLL2         9
+#define ATOM_FCH_CLK          9
 #define ATOM_EXT_CLOCK        10
+#define ATOM_DP_DTO           11
+
+#define ATOM_COMBOPHY_PLL0    20
+#define ATOM_COMBOPHY_PLL1    21
+#define ATOM_COMBOPHY_PLL2    22
+#define ATOM_COMBOPHY_PLL3    23
+#define ATOM_COMBOPHY_PLL4    24
+#define ATOM_COMBOPHY_PLL5    25
+
 #define ATOM_PPLL_INVALID     0xFF
 
 #define ENCODER_REFCLK_SRC_P1PLL       0
@@ -224,6 +238,31 @@
   UCHAR  ucReserved;
 }ATOM_ROM_HEADER;
 
+
+typedef struct _ATOM_ROM_HEADER_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER      sHeader;
+  UCHAR  uaFirmWareSignature[4];    //Signature to distinguish between Atombios and non-atombios,
+                                    //atombios should init it as "ATOM", don't change the position
+  USHORT usBiosRuntimeSegmentAddress;
+  USHORT usProtectedModeInfoOffset;
+  USHORT usConfigFilenameOffset;
+  USHORT usCRC_BlockOffset;
+  USHORT usBIOS_BootupMessageOffset;
+  USHORT usInt10Offset;
+  USHORT usPciBusDevInitCode;
+  USHORT usIoBaseAddress;
+  USHORT usSubsystemVendorID;
+  USHORT usSubsystemID;
+  USHORT usPCI_InfoOffset;
+  USHORT usMasterCommandTableOffset;//Offest for SW to get all command table offsets, Don't change the position
+  USHORT usMasterDataTableOffset;   //Offest for SW to get all data table offsets, Don't change the position
+  UCHAR  ucExtendedFunctionCode;
+  UCHAR  ucReserved;
+  ULONG  ulPSPDirTableOffset;
+}ATOM_ROM_HEADER_V2_1;
+
+
 //==============================Command Table Portion====================================
 
 
@@ -272,12 +311,12 @@
   USHORT GetSCLKOverMCLKRatio;                   //Atomic Table,  only used by Bios
   USHORT SetCRTC_Timing;                         //Atomic Table,  directly used by various SW components,latest version 1.1
   USHORT SetCRTC_OverScan;                       //Atomic Table,  used by various SW components,latest version 1.1
-  USHORT SetCRTC_Replication;                    //Atomic Table,  used only by Bios
+  USHORT GetSMUClockInfo;                         //Atomic Table,  used only by Bios
   USHORT SelectCRTC_Source;                      //Atomic Table,  directly used by various SW components,latest version 1.1
   USHORT EnableGraphSurfaces;                    //Atomic Table,  used only by Bios
   USHORT UpdateCRTC_DoubleBufferRegisters;       //Atomic Table,  used only by Bios
   USHORT LUT_AutoFill;                           //Atomic Table,  only used by Bios
-  USHORT EnableHW_IconCursor;                    //Atomic Table,  only used by Bios
+  USHORT SetDCEClock;                            //Atomic Table,  start from DCE11.1, shared by driver and VBIOS, change DISPCLK and DPREFCLK
   USHORT GetMemoryClock;                         //Atomic Table,  directly used by various SW components,latest version 1.1
   USHORT GetEngineClock;                         //Atomic Table,  directly used by various SW components,latest version 1.1
   USHORT SetCRTC_UsingDTDTiming;                 //Atomic Table,  directly used by various SW components,latest version 1.1
@@ -292,7 +331,7 @@
   USHORT PowerConnectorDetection;                //Atomic Table,  directly used by various SW components,latest version 1.1
   USHORT MC_Synchronization;                     //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
   USHORT ComputeMemoryEnginePLL;                 //Atomic Table,  indirectly used by various SW components,called from SetMemory/EngineClock
-  USHORT MemoryRefreshConversion;                //Atomic Table,  indirectly used by various SW components,called from SetMemory or SetEngineClock
+  USHORT Gfx_Init;                               //Atomic Table,  indirectly used by various SW components,called from SetMemory or SetEngineClock
   USHORT VRAM_GetCurrentInfoBlock;               //Atomic Table,  used only by Bios
   USHORT DynamicMemorySettings;                  //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
   USHORT MemoryTraining;                         //Atomic Table,  used only by Bios
@@ -333,6 +372,10 @@
 #define LCD1OutputControl                        HW_Misc_Operation
 #define TV1OutputControl                         Gfx_Harvesting
 #define TVEncoderControl                         SMC_Init
+#define EnableHW_IconCursor                      SetDCEClock
+#define SetCRTC_Replication                      GetSMUClockInfo
+
+#define MemoryRefreshConversion                  Gfx_Init
 
 typedef struct _ATOM_MASTER_COMMAND_TABLE
 {
@@ -425,6 +468,9 @@
 #define b3FIRST_TIME_CHANGE_CLOCK                 0x08       //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
 #define b3SKIP_SW_PROGRAM_PLL                     0x10       //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
 #define b3DRAM_SELF_REFRESH_EXIT                  0x20       //Applicable to DRAM self refresh exit only. when set, it means it will go to program DRAM self refresh exit path
+#define b3SRIOV_INIT_BOOT                         0x40       //Use by HV GPU driver only, to load uCode. for ASIC_InitTable SCLK parameter only
+#define b3SRIOV_LOAD_UCODE                        0x40       //Use by HV GPU driver only, to load uCode. for ASIC_InitTable SCLK parameter only
+#define b3SRIOV_SKIP_ASIC_INIT                    0x02       //Use by HV GPU driver only, skip ASIC_Init for primary adapter boot. for ASIC_InitTable SCLK parameter only
 
 typedef struct _ATOM_COMPUTE_CLOCK_FREQ
 {
@@ -518,6 +564,33 @@
 //ucPllCntlFlag
 #define SPLL_CNTL_FLAG_VCO_MODE_MASK            0x03
 
+typedef struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_7
+{
+  ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+  ULONG   ulReserved[5];
+}COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_7;
+
+//ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag
+#define COMPUTE_GPUCLK_INPUT_FLAG_CLK_TYPE_MASK            0x0f
+#define COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK           0x00
+#define COMPUTE_GPUCLK_INPUT_FLAG_SCLK                     0x01
+
+typedef struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7
+{
+  COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4  ulClock;         //Output Parameter: ucPostDiv=DFS divider
+  USHORT  usSclk_fcw_frac;                  //fractional divider of fcw = usSclk_fcw_frac/65536
+  USHORT  usSclk_fcw_int;                   //integer divider of fcwc
+  UCHAR   ucSclkPostDiv;                    //PLL post divider = 2^ucSclkPostDiv
+  UCHAR   ucSclkVcoMode;                    //0: 4G~8Ghz, 1:3G~6Ghz,3: 2G~4Ghz, 2:Reserved
+  UCHAR   ucSclkPllRange;                   //GreenTable SCLK PLL range entry index ( 0~7 )
+  UCHAR   ucSscEnable;
+  USHORT  usSsc_fcw1_frac;                  //fcw1_frac when SSC enable
+  USHORT  usSsc_fcw1_int;                   //fcw1_int when SSC enable
+  USHORT  usReserved;
+  USHORT  usPcc_fcw_int;
+  USHORT  usSsc_fcw_slew_frac;              //fcw_slew_frac when SSC enable
+  USHORT  usPcc_fcw_slew_frac;
+}COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7;
 
 // ucInputFlag
 #define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN  1   // 1-StrobeMode, 0-PerformanceMode
@@ -557,12 +630,16 @@
   ULONG ulReserved;
 }COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2;
 
+//Input parameter of DynamicMemorySettingsTable
+//when ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag = COMPUTE_MEMORY_PLL_PARAM
 typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
 {
   ATOM_COMPUTE_CLOCK_FREQ ulClock;
   ULONG ulReserved[2];
 }DYNAMICE_MEMORY_SETTINGS_PARAMETER;
 
+//Input parameter of DynamicMemorySettingsTable
+//when ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag == COMPUTE_ENGINE_PLL_PARAM
 typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
 {
   ATOM_COMPUTE_CLOCK_FREQ ulClock;
@@ -570,6 +647,29 @@
   ULONG ulReserved;
 }DYNAMICE_ENGINE_SETTINGS_PARAMETER;
 
+//Input parameter of DynamicMemorySettingsTable ver2.1 and above
+//when ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag == ADJUST_MC_SETTING_PARAM
+typedef struct _DYNAMICE_MC_DPM_SETTINGS_PARAMETER
+{
+  ATOM_COMPUTE_CLOCK_FREQ ulClock;
+  UCHAR ucMclkDPMState;
+  UCHAR ucReserved[3];
+  ULONG ulReserved;
+}DYNAMICE_MC_DPM_SETTINGS_PARAMETER;
+
+//ucMclkDPMState
+#define DYNAMIC_MC_DPM_SETTING_LOW_DPM_STATE       0
+#define DYNAMIC_MC_DPM_SETTING_MEDIUM_DPM_STATE    1
+#define DYNAMIC_MC_DPM_SETTING_HIGH_DPM_STATE      2
+
+typedef union _DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1
+{
+  DYNAMICE_MEMORY_SETTINGS_PARAMETER asMCReg;
+  DYNAMICE_ENGINE_SETTINGS_PARAMETER asMCArbReg;
+  DYNAMICE_MC_DPM_SETTINGS_PARAMETER asDPMMCReg;
+}DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1;
+
+
 /****************************************************************************/
 // Structures used by SetEngineClockTable
 /****************************************************************************/
@@ -584,6 +684,13 @@
   COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
 }SET_ENGINE_CLOCK_PS_ALLOCATION;
 
+typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION_V1_2
+{
+  ULONG ulTargetEngineClock;          //In 10Khz unit
+  COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_7 sReserved;
+}SET_ENGINE_CLOCK_PS_ALLOCATION_V1_2;
+
+
 /****************************************************************************/
 // Structures used by SetMemoryClockTable
 /****************************************************************************/
@@ -827,6 +934,12 @@
 #define ATOM_ENCODER_CMD_SETUP                        0x0f
 #define ATOM_ENCODER_CMD_SETUP_PANEL_MODE            0x10
 
+// New Command for DIGxEncoderControlTable v1.5
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN4    0x14
+#define ATOM_ENCODER_CMD_STREAM_SETUP                 0x0F      //change name ATOM_ENCODER_CMD_SETUP
+#define ATOM_ENCODER_CMD_LINK_SETUP                   0x11      //internal use, called by other Command Table
+#define ATOM_ENCODER_CMD_ENCODER_BLANK                0x12      //internal use, called by other Command Table
+
 // ucStatus
 #define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE    0x10
 #define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE  0x00
@@ -955,6 +1068,69 @@
 #define DP_PANEL_MODE_INTERNAL_DP2_MODE                  0x01
 #define DP_PANEL_MODE_INTERNAL_DP1_MODE                  0x11
 
+
+typedef struct _ENCODER_STREAM_SETUP_PARAMETERS_V5
+{
+  UCHAR ucDigId;           // 0~6 map to DIG0~DIG6
+  UCHAR ucAction;          // =  ATOM_ENOCODER_CMD_STREAM_SETUP
+  UCHAR ucDigMode;         // ATOM_ENCODER_MODE_DP/ATOM_ENCODER_MODE_DVI/ATOM_ENCODER_MODE_HDMI
+  UCHAR ucLaneNum;         // Lane number
+  ULONG ulPixelClock;      // Pixel Clock in 10Khz
+  UCHAR ucBitPerColor;
+  UCHAR ucLinkRateIn270Mhz;//= DP link rate/270Mhz, =6: 1.62G  = 10: 2.7G, =20: 5.4Ghz, =30: 8.1Ghz etc
+  UCHAR ucReserved[2];
+}ENCODER_STREAM_SETUP_PARAMETERS_V5;
+
+typedef struct _ENCODER_LINK_SETUP_PARAMETERS_V5
+{
+  UCHAR ucDigId;           // 0~6 map to DIG0~DIG6
+  UCHAR ucAction;          // =  ATOM_ENOCODER_CMD_LINK_SETUP
+  UCHAR ucDigMode;         // ATOM_ENCODER_MODE_DP/ATOM_ENCODER_MODE_DVI/ATOM_ENCODER_MODE_HDMI
+  UCHAR ucLaneNum;         // Lane number
+  ULONG ulSymClock;        // Symbol Clock in 10Khz
+  UCHAR ucHPDSel;
+  UCHAR ucDigEncoderSel;   // DIG stream( front-end ) selection, bit0 means DIG0 FE is enable,
+  UCHAR ucReserved[2];
+}ENCODER_LINK_SETUP_PARAMETERS_V5;
+
+typedef struct _DP_PANEL_MODE_SETUP_PARAMETERS_V5
+{
+  UCHAR ucDigId;           // 0~6 map to DIG0~DIG6
+  UCHAR ucAction;          // = ATOM_ENCODER_CMD_DPLINK_SETUP
+  UCHAR ucPanelMode;       // =0:     external DP
+                           // =0x1:   internal DP2
+                           // =0x11:  internal DP1 NutMeg/Travis DP Translator
+  UCHAR ucReserved;
+  ULONG ulReserved[2];
+}DP_PANEL_MODE_SETUP_PARAMETERS_V5;
+
+typedef struct _ENCODER_GENERIC_CMD_PARAMETERS_V5
+{
+  UCHAR ucDigId;           // 0~6 map to DIG0~DIG6
+  UCHAR ucAction;          // = rest of generic encoder command which does not carry any parameters
+  UCHAR ucReserved[2];
+  ULONG ulReserved[2];
+}ENCODER_GENERIC_CMD_PARAMETERS_V5;
+
+//ucDigId
+#define ATOM_ENCODER_CONFIG_V5_DIG0_ENCODER                 0x00
+#define ATOM_ENCODER_CONFIG_V5_DIG1_ENCODER                 0x01
+#define ATOM_ENCODER_CONFIG_V5_DIG2_ENCODER                 0x02
+#define ATOM_ENCODER_CONFIG_V5_DIG3_ENCODER                 0x03
+#define ATOM_ENCODER_CONFIG_V5_DIG4_ENCODER                 0x04
+#define ATOM_ENCODER_CONFIG_V5_DIG5_ENCODER                 0x05
+#define ATOM_ENCODER_CONFIG_V5_DIG6_ENCODER                 0x06
+
+
+typedef union _DIG_ENCODER_CONTROL_PARAMETERS_V5
+{
+  ENCODER_GENERIC_CMD_PARAMETERS_V5  asCmdParam;
+  ENCODER_STREAM_SETUP_PARAMETERS_V5 asStreamParam;
+  ENCODER_LINK_SETUP_PARAMETERS_V5  asLinkParam;
+  DP_PANEL_MODE_SETUP_PARAMETERS_V5 asDPPanelModeParam;
+}DIG_ENCODER_CONTROL_PARAMETERS_V5;
+
+
 /****************************************************************************/
 // Structures used by UNIPHYTransmitterControlTable
 //                    LVTMATransmitterControlTable
@@ -1371,6 +1547,49 @@
 
 #define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5            DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
 
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6
+{
+  UCHAR ucPhyId;           // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF
+  UCHAR ucAction;          // define as ATOM_TRANSMITER_ACTION_xxx
+  union
+  {
+    UCHAR ucDigMode;       // ATOM_ENCODER_MODE_DP/ATOM_ENCODER_MODE_DVI/ATOM_ENCODER_MODE_HDMI
+    UCHAR ucDPLaneSet;     // DP voltage swing and pre-emphasis value defined in DPCD DP_LANE_SET, "DP_LANE_SET__xDB_y_zV"
+  };
+  UCHAR ucLaneNum;         // Lane number
+  ULONG ulSymClock;        // Symbol Clock in 10Khz
+  UCHAR ucHPDSel;          // =1: HPD1, =2: HPD2, .... =6: HPD6, =0: HPD is not assigned
+  UCHAR ucDigEncoderSel;   // DIG stream( front-end ) selection, bit0 means DIG0 FE is enable,
+  UCHAR ucConnObjId;       // Connector Object Id defined in ObjectId.h
+  UCHAR ucReserved;
+  ULONG ulReserved;
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6;
+
+
+// ucDigEncoderSel
+#define ATOM_TRANMSITTER_V6__DIGA_SEL                       0x01
+#define ATOM_TRANMSITTER_V6__DIGB_SEL                       0x02
+#define ATOM_TRANMSITTER_V6__DIGC_SEL                       0x04
+#define ATOM_TRANMSITTER_V6__DIGD_SEL                       0x08
+#define ATOM_TRANMSITTER_V6__DIGE_SEL                       0x10
+#define ATOM_TRANMSITTER_V6__DIGF_SEL                       0x20
+#define ATOM_TRANMSITTER_V6__DIGG_SEL                       0x40
+
+// ucDigMode
+#define ATOM_TRANSMITTER_DIGMODE_V6_DP                      0
+#define ATOM_TRANSMITTER_DIGMODE_V6_DVI                     2
+#define ATOM_TRANSMITTER_DIGMODE_V6_HDMI                    3
+#define ATOM_TRANSMITTER_DIGMODE_V6_DP_MST                  5
+
+//ucHPDSel
+#define ATOM_TRANSMITTER_V6_NO_HPD_SEL                      0x00
+#define ATOM_TRANSMITTER_V6_HPD1_SEL                        0x01
+#define ATOM_TRANSMITTER_V6_HPD2_SEL                        0x02
+#define ATOM_TRANSMITTER_V6_HPD3_SEL                        0x03
+#define ATOM_TRANSMITTER_V6_HPD4_SEL                        0x04
+#define ATOM_TRANSMITTER_V6_HPD5_SEL                        0x05
+#define ATOM_TRANSMITTER_V6_HPD6_SEL                        0x06
+
 
 /****************************************************************************/
 // Structures used by ExternalEncoderControlTable V1.3
@@ -1784,6 +2003,101 @@
   PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput;
 }GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3;
 
+typedef struct _PIXEL_CLOCK_PARAMETERS_V7
+{
+    ULONG  ulPixelClock;               // target the pixel clock to drive the CRTC timing in unit of 100Hz.
+
+    UCHAR  ucPpll;                     // ATOM_PHY_PLL0/ATOM_PHY_PLL1/ATOM_PPLL0
+    UCHAR  ucTransmitterID;            // ASIC encoder id defined in objectId.h,
+                                       // indicate which graphic encoder will be used.
+    UCHAR  ucEncoderMode;              // Encoder mode:
+    UCHAR  ucMiscInfo;                 // bit[0]= Force program PLL for pixclk
+                                       // bit[1]= Force program PHY PLL only ( internally used by VBIOS only in DP case which PHYPLL is programmed for SYMCLK, not Pixclk )
+                                       // bit[5:4]= RefClock source for PPLL.
+                                       //          =0: XTLAIN( default mode )
+                                       //          =1: pcie
+                                       //          =2: GENLK
+    UCHAR  ucCRTC;                     // ATOM_CRTC1~6, indicate the CRTC controller to
+    UCHAR  ucDeepColorRatio;           // HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:36bpp
+    UCHAR  ucReserved[2];
+    ULONG  ulReserved;
+}PIXEL_CLOCK_PARAMETERS_V7;
+
+//ucMiscInfo
+#define PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL         0x01
+#define PIXEL_CLOCK_V7_MISC_PROG_PHYPLL             0x02
+#define PIXEL_CLOCK_V7_MISC_YUV420_MODE             0x04
+#define PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN         0x08
+#define PIXEL_CLOCK_V7_MISC_REF_DIV_SRC             0x30
+#define PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN      0x00
+#define PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_PCIE        0x10
+#define PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK       0x20
+
+//ucDeepColorRatio
+#define PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS          0x00      //00 - DCCG_DEEP_COLOR_DTO_DISABLE: Disable Deep Color DTO
+#define PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4          0x01      //01 - DCCG_DEEP_COLOR_DTO_5_4_RATIO: Set Deep Color DTO to 5:4
+#define PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2          0x02      //02 - DCCG_DEEP_COLOR_DTO_3_2_RATIO: Set Deep Color DTO to 3:2
+#define PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1          0x03      //03 - DCCG_DEEP_COLOR_DTO_2_1_RATIO: Set Deep Color DTO to 2:1
+
+// SetDCEClockTable input parameter for DCE11.1
+typedef struct _SET_DCE_CLOCK_PARAMETERS_V1_1
+{
+  ULONG  ulDISPClkFreq;       // target DISPCLK frquency in unit of 10kHz, return real DISPCLK frequency. when ucFlag[1]=1, in unit of 100Hz.
+  UCHAR  ucFlag;              // bit0=1: DPREFCLK bypass DFS bit0=0: DPREFCLK not bypass DFS
+  UCHAR  ucCrtc;              // use when enable DCCG pixel clock ucFlag[1]=1
+  UCHAR  ucPpllId;            // use when enable DCCG pixel clock ucFlag[1]=1
+  UCHAR  ucDeepColorRatio;    // use when enable DCCG pixel clock ucFlag[1]=1
+}SET_DCE_CLOCK_PARAMETERS_V1_1;
+
+
+typedef struct _SET_DCE_CLOCK_PS_ALLOCATION_V1_1
+{
+  SET_DCE_CLOCK_PARAMETERS_V1_1 asParam;
+  ULONG ulReserved[2];
+}SET_DCE_CLOCK_PS_ALLOCATION_V1_1;
+
+//SET_DCE_CLOCK_PARAMETERS_V1_1.ucFlag
+#define SET_DCE_CLOCK_FLAG_GEN_DPREFCLK            0x01
+#define SET_DCE_CLOCK_FLAG_DPREFCLK_BYPASS         0x01
+#define SET_DCE_CLOCK_FLAG_ENABLE_PIXCLK           0x02
+
+// SetDCEClockTable input parameter for DCE11.2( POLARIS10 and POLARIS11 ) and above
+typedef struct _SET_DCE_CLOCK_PARAMETERS_V2_1
+{
+  ULONG  ulDCEClkFreq;                               // target DCE frequency in unit of 10KHZ, return real DISPCLK/DPREFCLK frequency.
+  UCHAR  ucDCEClkType;                               // =0: DISPCLK  =1: DPREFCLK  =2: PIXCLK
+  UCHAR  ucDCEClkSrc;                                // ATOM_PLL0 or ATOM_GCK_DFS or ATOM_FCH_CLK or ATOM_COMBOPHY_PLLx
+  UCHAR  ucDCEClkFlag;                               // Bit [1:0] = PPLL ref clock source ( when ucDCEClkSrc= ATOM_PPLL0 )
+  UCHAR  ucCRTC;                                     // ucDisp Pipe Id, ATOM_CRTC0/1/2/..., use only when ucDCEClkType = PIXCLK
+}SET_DCE_CLOCK_PARAMETERS_V2_1;
+
+//ucDCEClkType
+#define DCE_CLOCK_TYPE_DISPCLK                        0
+#define DCE_CLOCK_TYPE_DPREFCLK                       1
+#define DCE_CLOCK_TYPE_PIXELCLK                       2        // used by VBIOS internally, called by SetPixelClockTable
+
+//ucDCEClkFlag when ucDCEClkType == DPREFCLK
+#define DCE_CLOCK_FLAG_PLL_REFCLK_SRC_MASK            0x03
+#define DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENERICA        0x00
+#define DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENLK           0x01
+#define DCE_CLOCK_FLAG_PLL_REFCLK_SRC_PCIE            0x02
+#define DCE_CLOCK_FLAG_PLL_REFCLK_SRC_XTALIN          0x03
+
+//ucDCEClkFlag when ucDCEClkType == PIXCLK
+#define DCE_CLOCK_FLAG_PCLK_DEEPCOLOR_RATIO_MASK      0x03
+#define DCE_CLOCK_FLAG_PCLK_DEEPCOLOR_RATIO_DIS       0x00      //00 - DCCG_DEEP_COLOR_DTO_DISABLE: Disable Deep Color DTO
+#define DCE_CLOCK_FLAG_PCLK_DEEPCOLOR_RATIO_5_4       0x01      //01 - DCCG_DEEP_COLOR_DTO_5_4_RATIO: Set Deep Color DTO to 5:4
+#define DCE_CLOCK_FLAG_PCLK_DEEPCOLOR_RATIO_3_2       0x02      //02 - DCCG_DEEP_COLOR_DTO_3_2_RATIO: Set Deep Color DTO to 3:2
+#define DCE_CLOCK_FLAG_PCLK_DEEPCOLOR_RATIO_2_1       0x03      //03 - DCCG_DEEP_COLOR_DTO_2_1_RATIO: Set Deep Color DTO to 2:1
+#define DCE_CLOCK_FLAG_PIXCLK_YUV420_MODE             0x04
+
+typedef struct _SET_DCE_CLOCK_PS_ALLOCATION_V2_1
+{
+  SET_DCE_CLOCK_PARAMETERS_V2_1 asParam;
+  ULONG ulReserved[2];
+}SET_DCE_CLOCK_PS_ALLOCATION_V2_1;
+
+
 
 /****************************************************************************/
 // Structures used by AdjustDisplayPllTable
@@ -2300,6 +2614,11 @@
 #define VOLTAGE_TYPE_VDDCI                   4
 #define VOLTAGE_TYPE_VDDGFX                  5
 #define VOLTAGE_TYPE_PCC                     6
+#define VOLTAGE_TYPE_MVPP                    7
+#define VOLTAGE_TYPE_LEDDPM                  8
+#define VOLTAGE_TYPE_PCC_MVDD                9
+#define VOLTAGE_TYPE_PCIE_VDDC               10
+#define VOLTAGE_TYPE_PCIE_VDDR               11
 
 #define VOLTAGE_TYPE_GENERIC_I2C_1           0x11
 #define VOLTAGE_TYPE_GENERIC_I2C_2           0x12
@@ -2396,6 +2715,39 @@
   USHORT   usTDP_Power;                                  // TDP_Current in unit  of 0.1W
 }GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2;
 
+
+// New Added from CI Hawaii for GetVoltageInfoTable, input parameter structure
+typedef struct  _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3
+{
+  UCHAR    ucVoltageType;               // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+  UCHAR    ucVoltageMode;               // Input: Indicate action: Get voltage info
+  USHORT   usVoltageLevel;              // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
+  ULONG    ulSCLKFreq;                  // Input: when ucVoltageMode= ATOM_GET_VOLTAGE_EVV_VOLTAGE, DPM state SCLK frequency, Define in PPTable SCLK/Voltage dependence table
+  ULONG    ulReserved[3];
+}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3;
+
+// New Added from CI Hawaii for EVV feature
+typedef struct  _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3
+{
+  ULONG    ulVoltageLevel;                               // real voltage level in unit of 0.01mv
+  ULONG    ulReserved[4];
+}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3;
+
+
+/****************************************************************************/
+// Structures used by GetSMUClockInfo
+/****************************************************************************/
+typedef struct  _GET_SMU_CLOCK_INFO_INPUT_PARAMETER_V2_1
+{
+  ULONG ulDfsPllOutputFreq:24;
+  ULONG ucDfsDivider:8;
+}GET_SMU_CLOCK_INFO_INPUT_PARAMETER_V2_1;
+
+typedef struct  _GET_SMU_CLOCK_INFO_OUTPUT_PARAMETER_V2_1
+{
+  ULONG ulDfsOutputFreq;
+}GET_SMU_CLOCK_INFO_OUTPUT_PARAMETER_V2_1;
+
 /****************************************************************************/
 // Structures used by TVEncoderControlTable
 /****************************************************************************/
@@ -2429,13 +2781,13 @@
   USHORT        PaletteData;              // Only used by BIOS
   USHORT        LCD_Info;                 // Shared by various SW components,latest version 1.3, was called LVDS_Info
   USHORT        DIGTransmitterInfo;       // Internal used by VBIOS only version 3.1
-  USHORT        AnalogTV_Info;            // Shared by various SW components,latest version 1.1
+  USHORT        SMU_Info;                 // Shared by various SW components,latest version 1.1
   USHORT        SupportedDevicesInfo;     // Will be obsolete from R600
   USHORT        GPIO_I2C_Info;            // Shared by various SW components,latest version 1.2 will be used from R600
   USHORT        VRAM_UsageByFirmware;     // Shared by various SW components,latest version 1.3 will be used from R600
   USHORT        GPIO_Pin_LUT;             // Shared by various SW components,latest version 1.1
   USHORT        VESA_ToInternalModeLUT;   // Only used by Bios
-  USHORT        ComponentVideoInfo;       // Shared by various SW components,latest version 2.1 will be used from R600
+  USHORT        GFX_Info;                 // Shared by various SW components,latest version 2.1 will be used from R600
   USHORT        PowerPlayInfo;            // Shared by various SW components,latest version 2.1,new design from R600
   USHORT        GPUVirtualizationInfo;    // Will be obsolete from R600
   USHORT        SaveRestoreInfo;          // Only used by Bios
@@ -2455,7 +2807,7 @@
   USHORT        ASIC_ProfilingInfo;       // New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600
   USHORT        VoltageObjectInfo;        // Shared by various SW components, latest version 1.1
   USHORT        PowerSourceInfo;          // Shared by various SW components, latest versoin 1.1
-  USHORT	      ServiceInfo;
+  USHORT        ServiceInfo;
 }ATOM_MASTER_LIST_OF_DATA_TABLES;
 
 typedef struct _ATOM_MASTER_DATA_TABLE
@@ -2469,6 +2821,8 @@
 #define DAC_Info                 PaletteData
 #define TMDS_Info                DIGTransmitterInfo
 #define CompassionateData        GPUVirtualizationInfo
+#define AnalogTV_Info            SMU_Info
+#define ComponentVideoInfo       GFX_Info
 
 /****************************************************************************/
 // Structure used in MultimediaCapabilityInfoTable
@@ -4278,10 +4632,15 @@
 #define MAX_NUMBER_OF_EXT_DISPLAY_PATH    7
 
 //usCaps
-#define  EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE               0x01
-#define  EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN             0x02
-#define  EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204          0x04
-#define  EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT     0x08
+#define  EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE               0x0001
+#define  EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN             0x0002
+#define  EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK              0x007C
+#define  EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204          (0x01 << 2 )     //PI redriver chip
+#define  EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT     (0x02 << 2 )     //TI retimer chip
+#define  EXT_DISPLAY_PATH_CAPS__HDMI20_PARADE_PS175        (0x03 << 2 )     //Parade DP->HDMI recoverter chip
+
+
+
 
 typedef  struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
 {
@@ -4325,10 +4684,10 @@
 #define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE          19
 #define ATOM_ENCODER_CAP_RECORD_TYPE                   20
 #define ATOM_BRACKET_LAYOUT_RECORD_TYPE                21
-
+#define ATOM_CONNECTOR_FORCED_TMDS_CAP_RECORD_TYPE     22
 
 //Must be updated when new record type is added,equal to that record definition!
-#define ATOM_MAX_OBJECT_RECORD_NUMBER             ATOM_ENCODER_CAP_RECORD_TYPE
+#define ATOM_MAX_OBJECT_RECORD_NUMBER                  ATOM_CONNECTOR_FORCED_TMDS_CAP_RECORD_TYPE
 
 typedef struct  _ATOM_I2C_RECORD
 {
@@ -4458,10 +4817,12 @@
   UCHAR                       ucPadding[2];
 }ATOM_ENCODER_DVO_CF_RECORD;
 
-// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
-#define ATOM_ENCODER_CAP_RECORD_HBR2                  0x01         // DP1.2 HBR2 is supported by HW encoder
+// Bit maps for ATOM_ENCODER_CAP_RECORD.usEncoderCap
+#define ATOM_ENCODER_CAP_RECORD_HBR2                  0x01         // DP1.2 HBR2 is supported by HW encoder, it is retired in NI. the real meaning from SI is MST_EN
+#define ATOM_ENCODER_CAP_RECORD_MST_EN                0x01         // from SI, this bit means DP MST is enable or not.
 #define ATOM_ENCODER_CAP_RECORD_HBR2_EN               0x02         // DP1.2 HBR2 setting is qualified and HBR2 can be enabled
 #define ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN          0x04         // HDMI2.0 6Gbps enable or not.
+#define ATOM_ENCODER_CAP_RECORD_HBR3_EN               0x08         // DP1.3 HBR3 is supported by board.
 
 typedef struct  _ATOM_ENCODER_CAP_RECORD
 {
@@ -4482,6 +4843,31 @@
   };
 }ATOM_ENCODER_CAP_RECORD;
 
+// Used after SI
+typedef struct  _ATOM_ENCODER_CAP_RECORD_V2
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  union {
+    USHORT                    usEncoderCap;
+    struct {
+#if ATOM_BIG_ENDIAN
+      USHORT                  usReserved:12;        // Bit4-15 may be defined for other capability in future
+      USHORT                  usHBR3En:1;           // bit3 is for DP1.3 HBR3 enable
+      USHORT                  usHDMI6GEn:1;         // Bit2 is for HDMI6Gbps enable, this bit is used starting from CZ( APU) Ellemere (dGPU)
+      USHORT                  usHBR2En:1;           // Bit1 is for DP1.2 HBR2 enable
+      USHORT                  usMSTEn:1;            // Bit0 is for DP1.2 MST enable
+#else
+      USHORT                  usMSTEn:1;            // Bit0 is for DP1.2 MST enable
+      USHORT                  usHBR2En:1;           // Bit1 is for DP1.2 HBR2 enable
+      USHORT                  usHDMI6GEn:1;         // Bit2 is for HDMI6Gbps enable, this bit is used starting from CZ( APU) Ellemere (dGPU)
+      USHORT                  usHBR3En:1;           // bit3 is for DP1.3 HBR3 enable
+      USHORT                  usReserved:12;        // Bit4-15 may be defined for other capability in future
+#endif
+    };
+  };
+}ATOM_ENCODER_CAP_RECORD_V2;
+
+
 // value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
 #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA   1
 #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB   2
@@ -4554,6 +4940,16 @@
   USHORT                      usReserved;
 }ATOM_CONNECTOR_REMOTE_CAP_RECORD;
 
+
+typedef struct  _ATOM_CONNECTOR_FORCED_TMDS_CAP_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  // override TMDS capability on this connector when it operate in TMDS mode.  usMaxTmdsClkRate = max TMDS Clock in Mhz/2.5
+  UCHAR                       ucMaxTmdsClkRateIn2_5Mhz;
+  UCHAR                       ucReserved;
+} ATOM_CONNECTOR_FORCED_TMDS_CAP_RECORD;
+
+
 typedef struct  _ATOM_CONNECTOR_LAYOUT_INFO
 {
    USHORT usConnectorObjectId;
@@ -4657,12 +5053,12 @@
 #define VOLTAGE_CONTROL_ID_UP1801             0x0C
 #define VOLTAGE_CONTROL_ID_ST6788A            0x0D
 #define VOLTAGE_CONTROL_ID_CHLIR3564SVI2      0x0E
-#define VOLTAGE_CONTROL_ID_AD527x      	      0x0F
-#define VOLTAGE_CONTROL_ID_NCP81022    	      0x10
-#define VOLTAGE_CONTROL_ID_LTC2635			  0x11
-#define VOLTAGE_CONTROL_ID_NCP4208	          0x12
+#define VOLTAGE_CONTROL_ID_AD527x             0x0F
+#define VOLTAGE_CONTROL_ID_NCP81022           0x10
+#define VOLTAGE_CONTROL_ID_LTC2635            0x11
+#define VOLTAGE_CONTROL_ID_NCP4208            0x12
 #define VOLTAGE_CONTROL_ID_IR35xx             0x13
-#define VOLTAGE_CONTROL_ID_RT9403	          0x14
+#define VOLTAGE_CONTROL_ID_RT9403             0x14
 
 #define VOLTAGE_CONTROL_ID_GENERIC_I2C        0x40
 
@@ -4784,11 +5180,38 @@
    ULONG    ulReserved;
 }ATOM_SVID2_VOLTAGE_OBJECT_V3;
 
+
+
+typedef struct  _ATOM_MERGED_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;    // voltage mode = VOLTAGE_OBJ_MERGED_POWER
+   UCHAR    ucMergedVType;                   // VDDC/VDCCI/....
+   UCHAR    ucReserved[3];
+}ATOM_MERGED_VOLTAGE_OBJECT_V3;
+
+
+typedef struct _ATOM_EVV_DPM_INFO
+{
+  ULONG ulDPMSclk;            // DPM state SCLK
+  USHORT usVAdjOffset;        // Adjust Voltage offset in unit of mv
+  UCHAR ucDPMTblVIndex;       // Voltage Index in SMC_DPM_Table structure VddcTable/VddGfxTable
+  UCHAR ucDPMState;           // DPMState0~7
+} ATOM_EVV_DPM_INFO;
+
+// ucVoltageMode = VOLTAGE_OBJ_EVV
+typedef struct  _ATOM_EVV_VOLTAGE_OBJECT_V3
+{
+  ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;    // voltage mode = VOLTAGE_OBJ_SVID2
+  ATOM_EVV_DPM_INFO asEvvDpmList[8];
+}ATOM_EVV_VOLTAGE_OBJECT_V3;
+
+
 typedef union _ATOM_VOLTAGE_OBJECT_V3{
   ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
   ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
   ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
   ATOM_SVID2_VOLTAGE_OBJECT_V3 asSVID2Obj;
+  ATOM_EVV_VOLTAGE_OBJECT_V3 asEvvObj;
 }ATOM_VOLTAGE_OBJECT_V3;
 
 typedef struct  _ATOM_VOLTAGE_OBJECT_INFO_V3_1
@@ -4963,7 +5386,11 @@
   ULONG  ulLkgEncodeMax;
   ULONG  ulLkgEncodeMin;
   ULONG  ulEfuseLogisticAlpha;
+
+  union{
   USHORT usPowerDpm0;
+  USHORT usParamNegFlag;          //bit0 =1 :indicate ulRoBeta is Negative, bit1=1 indicate Kv_m  max is postive
+  };
   USHORT usPowerDpm1;
   USHORT usPowerDpm2;
   USHORT usPowerDpm3;
@@ -5067,6 +5494,86 @@
   ULONG ulReserved[8];            // Reserved for future ASIC
 }ATOM_ASIC_PROFILING_INFO_V3_4;
 
+// for  Polaris10/Polaris11 speed EVV algorithm
+typedef struct  _ATOM_ASIC_PROFILING_INFO_V3_5
+{
+  ATOM_COMMON_TABLE_HEADER         asHeader;
+  ULONG  ulMaxVddc;               //Maximum voltage for all parts, in unit of 0.01mv
+  ULONG  ulMinVddc;               //Minimum voltage for all parts, in unit of 0.01mv
+  USHORT usLkgEuseIndex;          //Efuse Lkg_FT address ( BYTE address )
+  UCHAR  ucLkgEfuseBitLSB;        //Efuse Lkg_FT bit shift in 32bit DWORD
+  UCHAR  ucLkgEfuseLength;        //Efuse Lkg_FT length
+  ULONG  ulLkgEncodeLn_MaxDivMin; //value of ln(Max_Lkg_Ft/Min_Lkg_Ft ) in unit of 0.00001 ( unit=100000 )
+  ULONG  ulLkgEncodeMax;          //Maximum Lkg_Ft measured value ( or efuse decode value ), in unit of 0.00001 ( unit=100000 )
+  ULONG  ulLkgEncodeMin;          //Minimum Lkg_Ft measured value ( or efuse decode value ), in unit of 0.00001 ( unit=100000 )
+  EFUSE_LINEAR_FUNC_PARAM sRoFuse;//Efuse RO info: DWORD address, bit shift, length, max/min measure value. in unit of 1.
+  ULONG  ulEvvDefaultVddc;        //def="EVV_DEFAULT_VDDC" descr="return default VDDC(v) when Efuse not cut" unit="100000"/>
+  ULONG  ulEvvNoCalcVddc;         //def="EVV_NOCALC_VDDC" descr="return VDDC(v) when Calculation is bad" unit="100000"/>
+  ULONG  ulSpeed_Model;           //def="EVV_SPEED_MODEL" descr="0 = Greek model, 1 = multivariate model" unit="1"/>
+  ULONG  ulSM_A0;                 //def="EVV_SM_A0" descr="Leakage coeff(Multivariant Mode)." unit="100000"/>
+  ULONG  ulSM_A1;                 //def="EVV_SM_A1" descr="Leakage/SCLK coeff(Multivariant Mode)." unit="1000000"/>
+  ULONG  ulSM_A2;                 //def="EVV_SM_A2" descr="Alpha( Greek Mode ) or VDDC/SCLK coeff(Multivariant Mode)." unit="100000"/>
+  ULONG  ulSM_A3;                 //def="EVV_SM_A3" descr="Beta( Greek Mode ) or SCLK coeff(Multivariant Mode)." unit="100000"/>
+  ULONG  ulSM_A4;                 //def="EVV_SM_A4" descr="VDDC^2/SCLK coeff(Multivariant Mode)." unit="100000"/>
+  ULONG  ulSM_A5;                 //def="EVV_SM_A5" descr="VDDC^2 coeff(Multivariant Mode)." unit="100000"/>
+  ULONG  ulSM_A6;                 //def="EVV_SM_A6" descr="Gamma( Greek Mode ) or VDDC coeff(Multivariant Mode)." unit="100000"/>
+  ULONG  ulSM_A7;                 //def="EVV_SM_A7" descr="Epsilon( Greek Mode ) or constant(Multivariant Mode)." unit="100000"/>
+  UCHAR  ucSM_A0_sign;            //def="EVV_SM_A0_SIGN" descr="=0 SM_A0 is postive. =1: SM_A0 is negative" unit="1"/>
+  UCHAR  ucSM_A1_sign;            //def="EVV_SM_A1_SIGN" descr="=0 SM_A1 is postive. =1: SM_A1 is negative" unit="1"/>
+  UCHAR  ucSM_A2_sign;            //def="EVV_SM_A2_SIGN" descr="=0 SM_A2 is postive. =1: SM_A2 is negative" unit="1"/>
+  UCHAR  ucSM_A3_sign;            //def="EVV_SM_A3_SIGN" descr="=0 SM_A3 is postive. =1: SM_A3 is negative" unit="1"/>
+  UCHAR  ucSM_A4_sign;            //def="EVV_SM_A4_SIGN" descr="=0 SM_A4 is postive. =1: SM_A4 is negative" unit="1"/>
+  UCHAR  ucSM_A5_sign;            //def="EVV_SM_A5_SIGN" descr="=0 SM_A5 is postive. =1: SM_A5 is negative" unit="1"/>
+  UCHAR  ucSM_A6_sign;            //def="EVV_SM_A6_SIGN" descr="=0 SM_A6 is postive. =1: SM_A6 is negative" unit="1"/>
+  UCHAR  ucSM_A7_sign;            //def="EVV_SM_A7_SIGN" descr="=0 SM_A7 is postive. =1: SM_A7 is negative" unit="1"/>
+  ULONG  ulMargin_RO_a;           //def="EVV_MARGIN_RO_A" descr="A Term to represent RO equation in Ax2+Bx+C, unit=1"
+  ULONG  ulMargin_RO_b;           //def="EVV_MARGIN_RO_B" descr="B Term to represent RO equation in Ax2+Bx+C, unit=1"
+  ULONG  ulMargin_RO_c;           //def="EVV_MARGIN_RO_C" descr="C Term to represent RO equation in Ax2+Bx+C, unit=1"
+  ULONG  ulMargin_fixed;          //def="EVV_MARGIN_FIXED" descr="Fixed MHz to add to SCLK margin, unit=1" unit="1"/>
+  ULONG  ulMargin_Fmax_mean;      //def="EVV_MARGIN_FMAX_MEAN" descr="Percentage to add for Fmas mean margin unit=10000" unit="10000"/>
+  ULONG  ulMargin_plat_mean;      //def="EVV_MARGIN_PLAT_MEAN" descr="Percentage to add for platform mean margin unit=10000" unit="10000"/>
+  ULONG  ulMargin_Fmax_sigma;     //def="EVV_MARGIN_FMAX_SIGMA" descr="Percentage to add for Fmax sigma margin unit=10000" unit="10000"/>
+  ULONG  ulMargin_plat_sigma;     //def="EVV_MARGIN_PLAT_SIGMA" descr="Percentage to add for platform sigma margin unit=10000" unit="10000"/>
+  ULONG  ulMargin_DC_sigma;       //def="EVV_MARGIN_DC_SIGMA" descr="Regulator DC tolerance margin (mV) unit=100" unit="100"/>
+  ULONG  ulReserved[12];
+}ATOM_ASIC_PROFILING_INFO_V3_5;
+
+
+typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{
+  ULONG  ulMaxSclkFreq;
+  UCHAR  ucVco_setting;      // 1: 3-6GHz, 3: 2-4GHz
+  UCHAR  ucPostdiv;          // divide by 2^n
+  USHORT ucFcw_pcc;
+  USHORT ucFcw_trans_upper;
+  USHORT ucRcw_trans_lower;
+}ATOM_SCLK_FCW_RANGE_ENTRY_V1;
+
+
+// SMU_InfoTable for  Polaris10/Polaris11
+typedef struct  _ATOM_SMU_INFO_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER         asHeader;
+  UCHAR ucSclkEntryNum;            // for potential future extend, indicate the number of ATOM_SCLK_FCW_RANGE_ENTRY_V1
+  UCHAR ucReserved[3];
+  ATOM_SCLK_FCW_RANGE_ENTRY_V1     asSclkFcwRangeEntry[8];
+}ATOM_SMU_INFO_V2_1;
+
+
+// GFX_InfoTable for Polaris10/Polaris11
+typedef struct  _ATOM_GFX_INFO_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER asHeader;
+  UCHAR GfxIpMinVer;
+  UCHAR GfxIpMajVer;
+  UCHAR max_shader_engines;
+  UCHAR max_tile_pipes;
+  UCHAR max_cu_per_sh;
+  UCHAR max_sh_per_se;
+  UCHAR max_backends_per_se;
+  UCHAR max_texture_channel_caches;
+}ATOM_GFX_INFO_V2_1;
+
+
 typedef struct _ATOM_POWER_SOURCE_OBJECT
 {
    UCHAR  ucPwrSrcId;                                   // Power source
@@ -5765,14 +6272,6 @@
 
 **********************************************************************************************************************/
 
-// this Table is used for Kaveri/Kabini APU
-typedef struct _ATOM_FUSION_SYSTEM_INFO_V2
-{
-  ATOM_INTEGRATED_SYSTEM_INFO_V1_8    sIntegratedSysInfo;       // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition
-  ULONG                               ulPowerplayTable[128];    // Update comments here to link new powerplay table definition structure
-}ATOM_FUSION_SYSTEM_INFO_V2;
-
-
 typedef struct _ATOM_I2C_REG_INFO
 {
   UCHAR ucI2cRegIndex;
@@ -5859,7 +6358,50 @@
 #define EDP_VS_VARIABLE_PREM_MODE           5
 
 
-// this IntegrateSystemInfoTable is used for Carrizo
+// ulGPUCapInfo
+#define SYS_INFO_V1_9_GPUCAPSINFO_DISABLE_AUX_MODE_DETECT                         0x08
+#define SYS_INFO_V1_9_GPUCAPSINFO_ENABEL_DFS_BYPASS                               0x10
+//ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML
+#define SYS_INFO_V1_9_GPUCAPSINFO_GNB_FAST_RESUME_CAPABLE                         0x00010000
+//ulGPUCapInfo[18]=1 indicate the IOMMU is not available
+#define SYS_INFO_V1_9_GPUCAPINFO_IOMMU_DISABLE                                    0x00040000
+//ulGPUCapInfo[19]=1 indicate the MARC Aperture is opened.
+#define SYS_INFO_V1_9_GPUCAPINFO_MARC_APERTURE_ENABLE                             0x00080000
+
+
+typedef struct _DPHY_TIMING_PARA
+{
+    UCHAR  ucProfileID;       // SENSOR_PROFILES
+    ULONG  ucPara;
+} DPHY_TIMING_PARA;
+
+typedef struct _DPHY_ELEC_PARA
+{
+    USHORT  usPara[3];
+} DPHY_ELEC_PARA;
+
+typedef struct _CAMERA_MODULE_INFO
+{
+    UCHAR    ucID;                    // 0: Rear, 1: Front right of user, 2: Front left of user
+    UCHAR    strModuleName[8];
+    DPHY_TIMING_PARA asTimingPara[6]; // Exact number is under estimation and confirmation from sensor vendor
+} CAMERA_MODULE_INFO;
+
+typedef struct _FLASHLIGHT_INFO
+{
+    UCHAR    ucID;         // 0: Rear, 1: Front
+    UCHAR    strName[8];
+} FLASHLIGHT_INFO;
+
+typedef struct _CAMERA_DATA
+{
+    ULONG                   ulVersionCode;
+    CAMERA_MODULE_INFO      asCameraInfo[3];     // Assuming 3 camera sensors max
+    FLASHLIGHT_INFO         asFlashInfo;      // Assuming 1 flashlight max
+    DPHY_ELEC_PARA          asDphyElecPara;
+    ULONG                   ulCrcVal;         // CRC
+}CAMERA_DATA;
+
 typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_10
 {
   ATOM_COMMON_TABLE_HEADER   sHeader;
@@ -5883,7 +6425,7 @@
   USHORT usPanelRefreshRateRange;
   UCHAR  ucMemoryType;
   UCHAR  ucUMAChannelNumber;
-  UCHAR  strVBIOSMsg[40];
+  ULONG  ulMsgReserved[10];
   ATOM_TDP_CONFIG  asTdpConfig;
   ULONG  ulReserved[7];
   ATOM_CLK_VOLT_CAPABILITY_V2   sDispClkVoltageMapping[8];
@@ -5925,8 +6467,27 @@
   UCHAR  ucEDPv1_4VSMode;
   UCHAR  ucReserved2;
   ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+  CAMERA_DATA asCameraInfo;
+  ULONG  ulReserved8[29];
 }ATOM_INTEGRATED_SYSTEM_INFO_V1_10;
 
+
+// this Table is used for Kaveri/Kabini APU
+typedef struct _ATOM_FUSION_SYSTEM_INFO_V2
+{
+  ATOM_INTEGRATED_SYSTEM_INFO_V1_8    sIntegratedSysInfo;       // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition
+  ULONG                               ulPowerplayTable[128];    // Update comments here to link new powerplay table definition structure
+}ATOM_FUSION_SYSTEM_INFO_V2;
+
+
+typedef struct _ATOM_FUSION_SYSTEM_INFO_V3
+{
+  ATOM_INTEGRATED_SYSTEM_INFO_V1_10   sIntegratedSysInfo;           // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition
+  ULONG                               ulPowerplayTable[192];        // Reserve 768 bytes space for PowerPlayInfoTable
+}ATOM_FUSION_SYSTEM_INFO_V3;
+
+#define FUSION_V3_OFFSET_FROM_TOP_OF_FB 0x800
+
 /**************************************************************************/
 // This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
 //Memory SS Info Table
@@ -6193,12 +6754,12 @@
 #define ATOM_S3_DFP1_ACTIVE             0x00000008L
 #define ATOM_S3_CRT2_ACTIVE             0x00000010L
 #define ATOM_S3_LCD2_ACTIVE             0x00000020L
-#define ATOM_S3_DFP6_ACTIVE                     0x00000040L
+#define ATOM_S3_DFP6_ACTIVE             0x00000040L
 #define ATOM_S3_DFP2_ACTIVE             0x00000080L
 #define ATOM_S3_CV_ACTIVE               0x00000100L
-#define ATOM_S3_DFP3_ACTIVE                     0x00000200L
-#define ATOM_S3_DFP4_ACTIVE                     0x00000400L
-#define ATOM_S3_DFP5_ACTIVE                     0x00000800L
+#define ATOM_S3_DFP3_ACTIVE             0x00000200L
+#define ATOM_S3_DFP4_ACTIVE             0x00000400L
+#define ATOM_S3_DFP5_ACTIVE             0x00000800L
 
 
 #define ATOM_S3_DEVICE_ACTIVE_MASK      0x00000FFFL
@@ -6215,9 +6776,9 @@
 #define ATOM_S3_DFP6_CRTC_ACTIVE        0x00400000L
 #define ATOM_S3_DFP2_CRTC_ACTIVE        0x00800000L
 #define ATOM_S3_CV_CRTC_ACTIVE          0x01000000L
-#define ATOM_S3_DFP3_CRTC_ACTIVE            0x02000000L
-#define ATOM_S3_DFP4_CRTC_ACTIVE            0x04000000L
-#define ATOM_S3_DFP5_CRTC_ACTIVE            0x08000000L
+#define ATOM_S3_DFP3_CRTC_ACTIVE        0x02000000L
+#define ATOM_S3_DFP4_CRTC_ACTIVE        0x04000000L
+#define ATOM_S3_DFP5_CRTC_ACTIVE        0x08000000L
 
 
 #define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
@@ -6238,9 +6799,9 @@
 #define ATOM_S3_DFP6_ACTIVEb0           0x40
 #define ATOM_S3_DFP2_ACTIVEb0           0x80
 #define ATOM_S3_CV_ACTIVEb1             0x01
-#define ATOM_S3_DFP3_ACTIVEb1                  0x02
-#define ATOM_S3_DFP4_ACTIVEb1                  0x04
-#define ATOM_S3_DFP5_ACTIVEb1                  0x08
+#define ATOM_S3_DFP3_ACTIVEb1           0x02
+#define ATOM_S3_DFP4_ACTIVEb1           0x04
+#define ATOM_S3_DFP5_ACTIVEb1           0x08
 
 
 #define ATOM_S3_ACTIVE_CRTC1w0          0xFFF
@@ -6254,9 +6815,9 @@
 #define ATOM_S3_DFP6_CRTC_ACTIVEb2      0x40
 #define ATOM_S3_DFP2_CRTC_ACTIVEb2      0x80
 #define ATOM_S3_CV_CRTC_ACTIVEb3        0x01
-#define ATOM_S3_DFP3_CRTC_ACTIVEb3         0x02
-#define ATOM_S3_DFP4_CRTC_ACTIVEb3         0x04
-#define ATOM_S3_DFP5_CRTC_ACTIVEb3         0x08
+#define ATOM_S3_DFP3_CRTC_ACTIVEb3      0x02
+#define ATOM_S3_DFP4_CRTC_ACTIVEb3      0x04
+#define ATOM_S3_DFP5_CRTC_ACTIVEb3      0x08
 
 
 #define ATOM_S3_ACTIVE_CRTC2w1          0xFFF
@@ -6878,15 +7439,18 @@
 #define _32Mx16             0x32
 #define _32Mx32             0x33
 #define _32Mx128            0x35
-#define _64Mx32             0x43
 #define _64Mx8              0x41
 #define _64Mx16             0x42
+#define _64Mx32             0x43
+#define _64Mx128            0x45
 #define _128Mx8             0x51
 #define _128Mx16            0x52
 #define _128Mx32            0x53
 #define _256Mx8             0x61
 #define _256Mx16            0x62
+#define _256Mx32            0x63
 #define _512Mx8             0x71
+#define _512Mx16            0x72
 
 
 #define SAMSUNG             0x1
@@ -7407,6 +7971,17 @@
 }ATOM_MEMORY_TRAINING_INFO;
 
 
+typedef struct _ATOM_MEMORY_TRAINING_INFO_V3_1
+{
+   ATOM_COMMON_TABLE_HEADER   sHeader;
+   ULONG                      ulMCUcodeVersion;
+   USHORT                     usMCIOInitLen;         //len of ATOM_REG_INIT_SETTING array
+   USHORT                     usMCUcodeLen;          //len of ATOM_MC_UCODE_DATA array
+   USHORT                     usMCIORegInitOffset;   //point of offset of ATOM_REG_INIT_SETTING array
+   USHORT                     usMCUcodeOffset;       //point of offset of MC uCode ULONG array.
+}ATOM_MEMORY_TRAINING_INFO_V3_1;
+
+
 typedef struct SW_I2C_CNTL_DATA_PARAMETERS
 {
   UCHAR    ucControl;
@@ -7623,7 +8198,7 @@
 {
    USHORT usTransmitterObjId;
    USHORT usSupportDevice;
-  UCHAR  ucTransmitterCmdTblId;
+   UCHAR  ucTransmitterCmdTblId;
    UCHAR  ucConfig;
    UCHAR  ucEncoderID;                //available 1st encoder ( default )
    UCHAR  ucOptionEncoderID;    //available 2nd encoder ( optional )
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index ab84d49..7464daf 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -26,6 +26,8 @@
 
 #include "amd_shared.h"
 
+struct cgs_device;
+
 /**
  * enum cgs_gpu_mem_type - GPU memory types
  */
@@ -92,6 +94,7 @@
  */
 enum cgs_ucode_id {
 	CGS_UCODE_ID_SMU = 0,
+	CGS_UCODE_ID_SMU_SK,
 	CGS_UCODE_ID_SDMA0,
 	CGS_UCODE_ID_SDMA1,
 	CGS_UCODE_ID_CP_CE,
@@ -111,6 +114,7 @@
 	CGS_SYSTEM_INFO_PCIE_MLW,
 	CGS_SYSTEM_INFO_CG_FLAGS,
 	CGS_SYSTEM_INFO_PG_FLAGS,
+	CGS_SYSTEM_INFO_GFX_CU_INFO,
 	CGS_SYSTEM_INFO_ID_MAXIMUM,
 };
 
@@ -223,7 +227,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_gpu_mem_info_t)(void *cgs_device, enum cgs_gpu_mem_type type,
+typedef int (*cgs_gpu_mem_info_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
 				  uint64_t *mc_start, uint64_t *mc_size,
 				  uint64_t *mem_size);
 
@@ -239,7 +243,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_gmap_kmem_t)(void *cgs_device, void *kmem, uint64_t size,
+typedef int (*cgs_gmap_kmem_t)(struct cgs_device *cgs_device, void *kmem, uint64_t size,
 			       uint64_t min_offset, uint64_t max_offset,
 			       cgs_handle_t *kmem_handle, uint64_t *mcaddr);
 
@@ -250,7 +254,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_gunmap_kmem_t)(void *cgs_device, cgs_handle_t kmem_handle);
+typedef int (*cgs_gunmap_kmem_t)(struct cgs_device *cgs_device, cgs_handle_t kmem_handle);
 
 /**
  * cgs_alloc_gpu_mem() - Allocate GPU memory
@@ -279,7 +283,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_alloc_gpu_mem_t)(void *cgs_device, enum cgs_gpu_mem_type type,
+typedef int (*cgs_alloc_gpu_mem_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
 				   uint64_t size, uint64_t align,
 				   uint64_t min_offset, uint64_t max_offset,
 				   cgs_handle_t *handle);
@@ -291,7 +295,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_free_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+typedef int (*cgs_free_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
 
 /**
  * cgs_gmap_gpu_mem() - GPU-map GPU memory
@@ -303,7 +307,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_gmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
+typedef int (*cgs_gmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle,
 				  uint64_t *mcaddr);
 
 /**
@@ -315,7 +319,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_gunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+typedef int (*cgs_gunmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
 
 /**
  * cgs_kmap_gpu_mem() - Kernel-map GPU memory
@@ -326,7 +330,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_kmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
+typedef int (*cgs_kmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle,
 				  void **map);
 
 /**
@@ -336,7 +340,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_kunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+typedef int (*cgs_kunmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
 
 /**
  * cgs_read_register() - Read an MMIO register
@@ -345,7 +349,7 @@
  *
  * Return:  register value
  */
-typedef uint32_t (*cgs_read_register_t)(void *cgs_device, unsigned offset);
+typedef uint32_t (*cgs_read_register_t)(struct cgs_device *cgs_device, unsigned offset);
 
 /**
  * cgs_write_register() - Write an MMIO register
@@ -353,7 +357,7 @@
  * @offset:	register offset
  * @value:	register value
  */
-typedef void (*cgs_write_register_t)(void *cgs_device, unsigned offset,
+typedef void (*cgs_write_register_t)(struct cgs_device *cgs_device, unsigned offset,
 				     uint32_t value);
 
 /**
@@ -363,7 +367,7 @@
  *
  * Return:  register value
  */
-typedef uint32_t (*cgs_read_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
+typedef uint32_t (*cgs_read_ind_register_t)(struct cgs_device *cgs_device, enum cgs_ind_reg space,
 					    unsigned index);
 
 /**
@@ -372,7 +376,7 @@
  * @offset:	register offset
  * @value:	register value
  */
-typedef void (*cgs_write_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
+typedef void (*cgs_write_ind_register_t)(struct cgs_device *cgs_device, enum cgs_ind_reg space,
 					 unsigned index, uint32_t value);
 
 /**
@@ -382,7 +386,7 @@
  *
  * Return:  Value read
  */
-typedef uint8_t (*cgs_read_pci_config_byte_t)(void *cgs_device, unsigned addr);
+typedef uint8_t (*cgs_read_pci_config_byte_t)(struct cgs_device *cgs_device, unsigned addr);
 
 /**
  * cgs_read_pci_config_word() - Read word from PCI configuration space
@@ -391,7 +395,7 @@
  *
  * Return:  Value read
  */
-typedef uint16_t (*cgs_read_pci_config_word_t)(void *cgs_device, unsigned addr);
+typedef uint16_t (*cgs_read_pci_config_word_t)(struct cgs_device *cgs_device, unsigned addr);
 
 /**
  * cgs_read_pci_config_dword() - Read dword from PCI configuration space
@@ -400,7 +404,7 @@
  *
  * Return:  Value read
  */
-typedef uint32_t (*cgs_read_pci_config_dword_t)(void *cgs_device,
+typedef uint32_t (*cgs_read_pci_config_dword_t)(struct cgs_device *cgs_device,
 						unsigned addr);
 
 /**
@@ -409,7 +413,7 @@
  * @addr:	address
  * @value:	value to write
  */
-typedef void (*cgs_write_pci_config_byte_t)(void *cgs_device, unsigned addr,
+typedef void (*cgs_write_pci_config_byte_t)(struct cgs_device *cgs_device, unsigned addr,
 					    uint8_t value);
 
 /**
@@ -418,7 +422,7 @@
  * @addr:	address, must be word-aligned
  * @value:	value to write
  */
-typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr,
+typedef void (*cgs_write_pci_config_word_t)(struct cgs_device *cgs_device, unsigned addr,
 					    uint16_t value);
 
 /**
@@ -427,7 +431,7 @@
  * @addr:	address, must be dword-aligned
  * @value:	value to write
  */
-typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr,
+typedef void (*cgs_write_pci_config_dword_t)(struct cgs_device *cgs_device, unsigned addr,
 					     uint32_t value);
 
 
@@ -441,7 +445,7 @@
  *
  * Return: 0 on success, -errno otherwise
  */
-typedef int (*cgs_get_pci_resource_t)(void *cgs_device,
+typedef int (*cgs_get_pci_resource_t)(struct cgs_device *cgs_device,
 				      enum cgs_resource_type resource_type,
 				      uint64_t size,
 				      uint64_t offset,
@@ -458,7 +462,7 @@
  * Return: Pointer to start of the table, or NULL on failure
  */
 typedef const void *(*cgs_atom_get_data_table_t)(
-	void *cgs_device, unsigned table,
+	struct cgs_device *cgs_device, unsigned table,
 	uint16_t *size, uint8_t *frev, uint8_t *crev);
 
 /**
@@ -470,7 +474,7 @@
  *
  * Return: 0 on success, -errno otherwise
  */
-typedef int (*cgs_atom_get_cmd_table_revs_t)(void *cgs_device, unsigned table,
+typedef int (*cgs_atom_get_cmd_table_revs_t)(struct cgs_device *cgs_device, unsigned table,
 					     uint8_t *frev, uint8_t *crev);
 
 /**
@@ -481,7 +485,7 @@
  *
  * Return: 0 on success, -errno otherwise
  */
-typedef int (*cgs_atom_exec_cmd_table_t)(void *cgs_device,
+typedef int (*cgs_atom_exec_cmd_table_t)(struct cgs_device *cgs_device,
 					 unsigned table, void *args);
 
 /**
@@ -491,7 +495,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_create_pm_request_t)(void *cgs_device, cgs_handle_t *request);
+typedef int (*cgs_create_pm_request_t)(struct cgs_device *cgs_device, cgs_handle_t *request);
 
 /**
  * cgs_destroy_pm_request() - Destroy a power management request
@@ -500,7 +504,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_destroy_pm_request_t)(void *cgs_device, cgs_handle_t request);
+typedef int (*cgs_destroy_pm_request_t)(struct cgs_device *cgs_device, cgs_handle_t request);
 
 /**
  * cgs_set_pm_request() - Activate or deactiveate a PM request
@@ -516,7 +520,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_set_pm_request_t)(void *cgs_device, cgs_handle_t request,
+typedef int (*cgs_set_pm_request_t)(struct cgs_device *cgs_device, cgs_handle_t request,
 				    int active);
 
 /**
@@ -528,7 +532,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_pm_request_clock_t)(void *cgs_device, cgs_handle_t request,
+typedef int (*cgs_pm_request_clock_t)(struct cgs_device *cgs_device, cgs_handle_t request,
 				      enum cgs_clock clock, unsigned freq);
 
 /**
@@ -540,7 +544,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_pm_request_engine_t)(void *cgs_device, cgs_handle_t request,
+typedef int (*cgs_pm_request_engine_t)(struct cgs_device *cgs_device, cgs_handle_t request,
 				       enum cgs_engine engine, int powered);
 
 /**
@@ -551,7 +555,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_pm_query_clock_limits_t)(void *cgs_device,
+typedef int (*cgs_pm_query_clock_limits_t)(struct cgs_device *cgs_device,
 					   enum cgs_clock clock,
 					   struct cgs_clock_limits *limits);
 
@@ -563,7 +567,7 @@
  *
  * Return: 0 on success, -errno otherwise
  */
-typedef int (*cgs_set_camera_voltages_t)(void *cgs_device, uint32_t mask,
+typedef int (*cgs_set_camera_voltages_t)(struct cgs_device *cgs_device, uint32_t mask,
 					 const uint32_t *voltages);
 /**
  * cgs_get_firmware_info - Get the firmware information from core driver
@@ -573,25 +577,28 @@
  *
  * Return: 0 on success, -errno otherwise
  */
-typedef int (*cgs_get_firmware_info)(void *cgs_device,
+typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
 				     enum cgs_ucode_id type,
 				     struct cgs_firmware_info *info);
 
-typedef int(*cgs_set_powergating_state)(void *cgs_device,
+typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
+					 enum cgs_ucode_id type);
+
+typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
 				  enum amd_ip_block_type block_type,
 				  enum amd_powergating_state state);
 
-typedef int(*cgs_set_clockgating_state)(void *cgs_device,
+typedef int(*cgs_set_clockgating_state)(struct cgs_device *cgs_device,
 				  enum amd_ip_block_type block_type,
 				  enum amd_clockgating_state state);
 
 typedef int(*cgs_get_active_displays_info)(
-					void *cgs_device,
+					struct cgs_device *cgs_device,
 					struct cgs_display_info *info);
 
-typedef int (*cgs_notify_dpm_enabled)(void *cgs_device, bool enabled);
+typedef int (*cgs_notify_dpm_enabled)(struct cgs_device *cgs_device, bool enabled);
 
-typedef int (*cgs_call_acpi_method)(void *cgs_device,
+typedef int (*cgs_call_acpi_method)(struct cgs_device *cgs_device,
 					uint32_t acpi_method,
 					uint32_t acpi_function,
 					void *pinput, void *poutput,
@@ -599,7 +606,7 @@
 					uint32_t input_size,
 					uint32_t output_size);
 
-typedef int (*cgs_query_system_info)(void *cgs_device,
+typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device,
 				struct cgs_system_info *sys_info);
 
 struct cgs_ops {
@@ -641,6 +648,7 @@
 	cgs_set_camera_voltages_t set_camera_voltages;
 	/* Firmware Info */
 	cgs_get_firmware_info get_firmware_info;
+	cgs_rel_firmware rel_firmware;
 	/* cg pg interface*/
 	cgs_set_powergating_state set_powergating_state;
 	cgs_set_clockgating_state set_clockgating_state;
@@ -734,6 +742,8 @@
 	CGS_CALL(set_camera_voltages,dev,mask,voltages)
 #define cgs_get_firmware_info(dev, type, info)	\
 	CGS_CALL(get_firmware_info, dev, type, info)
+#define cgs_rel_firmware(dev, type)	\
+	CGS_CALL(rel_firmware, dev, type)
 #define cgs_set_powergating_state(dev, block_type, state)	\
 	CGS_CALL(set_powergating_state, dev, block_type, state)
 #define cgs_set_clockgating_state(dev, block_type, state)	\
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h
index 3b47ae3..ca4f600 100644
--- a/drivers/gpu/drm/amd/include/cgs_linux.h
+++ b/drivers/gpu/drm/amd/include/cgs_linux.h
@@ -66,7 +66,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned src_id,
+typedef int (*cgs_add_irq_source_t)(struct cgs_device *cgs_device, unsigned src_id,
 				    unsigned num_types,
 				    cgs_irq_source_set_func_t set,
 				    cgs_irq_handler_func_t handler,
@@ -83,7 +83,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
+typedef int (*cgs_irq_get_t)(struct cgs_device *cgs_device, unsigned src_id, unsigned type);
 
 /**
  * cgs_irq_put() - Indicate IRQ source is no longer needed
@@ -98,7 +98,7 @@
  *
  * Return:  0 on success, -errno otherwise
  */
-typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
+typedef int (*cgs_irq_put_t)(struct cgs_device *cgs_device, unsigned src_id, unsigned type);
 
 struct cgs_os_ops {
 	/* IRQ handling */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 9d22900..e629f8a 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -37,6 +37,12 @@
 			return -EINVAL;					\
 	} while (0)
 
+#define PP_CHECK_HW(hwmgr)						\
+	do {								\
+		if ((hwmgr) == NULL || (hwmgr)->hwmgr_func == NULL)	\
+			return -EINVAL;					\
+	} while (0)
+
 static int pp_early_init(void *handle)
 {
 	return 0;
@@ -54,22 +60,29 @@
 	pp_handle = (struct pp_instance *)handle;
 	hwmgr = pp_handle->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->pptable_func == NULL ||
-	    hwmgr->hwmgr_func == NULL ||
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->pptable_func == NULL ||
 	    hwmgr->pptable_func->pptable_init == NULL ||
 	    hwmgr->hwmgr_func->backend_init == NULL)
 		return -EINVAL;
 
 	ret = hwmgr->pptable_func->pptable_init(hwmgr);
-
-	if (ret == 0)
-		ret = hwmgr->hwmgr_func->backend_init(hwmgr);
-
 	if (ret)
-		printk("amdgpu: powerplay initialization failed\n");
-	else
-		printk("amdgpu: powerplay initialized\n");
+		goto err;
 
+	ret = hwmgr->hwmgr_func->backend_init(hwmgr);
+	if (ret)
+		goto err1;
+
+	pr_info("amdgpu: powerplay initialized\n");
+
+	return 0;
+err1:
+	if (hwmgr->pptable_func->pptable_fini)
+		hwmgr->pptable_func->pptable_fini(hwmgr);
+err:
+	pr_err("amdgpu: powerplay initialization failed\n");
 	return ret;
 }
 
@@ -85,10 +98,14 @@
 	pp_handle = (struct pp_instance *)handle;
 	hwmgr = pp_handle->hwmgr;
 
-	if (hwmgr != NULL || hwmgr->hwmgr_func != NULL ||
-	    hwmgr->hwmgr_func->backend_fini != NULL)
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->backend_fini != NULL)
 		ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
 
+	if (hwmgr->pptable_func->pptable_fini)
+		hwmgr->pptable_func->pptable_fini(hwmgr);
+
 	return ret;
 }
 
@@ -172,21 +189,117 @@
 	return 0;
 }
 
-static void pp_print_status(void *handle)
-{
-
-}
 
 static int pp_set_clockgating_state(void *handle,
 				    enum amd_clockgating_state state)
 {
+	struct pp_hwmgr  *hwmgr;
+	uint32_t msg_id, pp_state;
+
+	if (handle == NULL)
+		return -EINVAL;
+
+	hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
+
+	if (state == AMD_CG_STATE_UNGATE)
+		pp_state = 0;
+	else
+		pp_state = PP_STATE_CG | PP_STATE_LS;
+
+	/* Enable/disable GFX blocks clock gating through SMU */
+	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+			PP_BLOCK_GFX_CG,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+			PP_BLOCK_GFX_3D,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+			PP_BLOCK_GFX_RLC,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+			PP_BLOCK_GFX_CP,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+	msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+			PP_BLOCK_GFX_MG,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+
+	/* Enable/disable System blocks clock gating through SMU */
+	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+			PP_BLOCK_SYS_BIF,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+			PP_BLOCK_SYS_BIF,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+			PP_BLOCK_SYS_MC,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+			PP_BLOCK_SYS_ROM,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+			PP_BLOCK_SYS_DRM,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+			PP_BLOCK_SYS_HDP,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+			PP_BLOCK_SYS_SDMA,
+			PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+			pp_state);
+	hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+
 	return 0;
 }
 
 static int pp_set_powergating_state(void *handle,
 				    enum amd_powergating_state state)
 {
-	return 0;
+	struct pp_hwmgr  *hwmgr;
+
+	if (handle == NULL)
+		return -EINVAL;
+
+	hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
+
+	/* Enable/disable GFX per cu powergating through SMU */
+	return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
+			state == AMD_PG_STATE_GATE ? true : false);
 }
 
 static int pp_suspend(void *handle)
@@ -236,6 +349,7 @@
 }
 
 const struct amd_ip_funcs pp_ip_funcs = {
+	.name = "powerplay",
 	.early_init = pp_early_init,
 	.late_init = NULL,
 	.sw_init = pp_sw_init,
@@ -247,7 +361,6 @@
 	.is_idle = pp_is_idle,
 	.wait_for_idle = pp_wait_for_idle,
 	.soft_reset = pp_sw_reset,
-	.print_status = pp_print_status,
 	.set_clockgating_state = pp_set_clockgating_state,
 	.set_powergating_state = pp_set_powergating_state,
 };
@@ -275,9 +388,12 @@
 
 	hwmgr = pp_handle->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
-	    hwmgr->hwmgr_func->force_dpm_level == NULL)
-		return -EINVAL;
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->force_dpm_level == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
 
 	hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
 
@@ -309,9 +425,12 @@
 
 	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
-	    hwmgr->hwmgr_func->get_sclk == NULL)
-		return -EINVAL;
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->get_sclk == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
 
 	return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
 }
@@ -325,9 +444,12 @@
 
 	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
-	    hwmgr->hwmgr_func->get_mclk == NULL)
-		return -EINVAL;
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->get_mclk == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
 
 	return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
 }
@@ -341,9 +463,12 @@
 
 	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
-	    hwmgr->hwmgr_func->powergate_vce == NULL)
-		return -EINVAL;
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
 
 	return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
 }
@@ -357,9 +482,12 @@
 
 	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
-	    hwmgr->hwmgr_func->powergate_uvd == NULL)
-		return -EINVAL;
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
 
 	return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
 }
@@ -455,10 +583,14 @@
 
 	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
-	  hwmgr->hwmgr_func->print_current_perforce_level == NULL)
+	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL)
 		return;
 
+	if (hwmgr->hwmgr_func->print_current_perforce_level == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return;
+	}
+
 	hwmgr->hwmgr_func->print_current_perforce_level(hwmgr, m);
 }
 
@@ -471,9 +603,12 @@
 
 	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
-	  hwmgr->hwmgr_func->set_fan_control_mode == NULL)
-		return -EINVAL;
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
 
 	return hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
 }
@@ -487,9 +622,12 @@
 
 	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
-	  hwmgr->hwmgr_func->get_fan_control_mode == NULL)
-		return -EINVAL;
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
 
 	return hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
 }
@@ -503,9 +641,12 @@
 
 	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
-	  hwmgr->hwmgr_func->set_fan_speed_percent == NULL)
-		return -EINVAL;
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
 
 	return hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
 }
@@ -519,9 +660,12 @@
 
 	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
-	  hwmgr->hwmgr_func->get_fan_speed_percent == NULL)
-		return -EINVAL;
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
 
 	return hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
 }
@@ -535,9 +679,12 @@
 
 	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
-	  hwmgr->hwmgr_func->get_temperature == NULL)
-		return -EINVAL;
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->get_temperature == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
 
 	return hwmgr->hwmgr_func->get_temperature(hwmgr);
 }
@@ -591,9 +738,12 @@
 
 	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
-		hwmgr->hwmgr_func->get_pp_table == NULL)
-		return -EINVAL;
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->get_pp_table == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
 
 	return hwmgr->hwmgr_func->get_pp_table(hwmgr, table);
 }
@@ -607,15 +757,18 @@
 
 	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
-		hwmgr->hwmgr_func->set_pp_table == NULL)
-		return -EINVAL;
+	PP_CHECK_HW(hwmgr);
+
+	if (hwmgr->hwmgr_func->set_pp_table == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
 
 	return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size);
 }
 
 static int pp_dpm_force_clock_level(void *handle,
-		enum pp_clock_type type, int level)
+		enum pp_clock_type type, uint32_t mask)
 {
 	struct pp_hwmgr *hwmgr;
 
@@ -624,11 +777,14 @@
 
 	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
-			hwmgr->hwmgr_func->force_clock_level == NULL)
-		return -EINVAL;
+	PP_CHECK_HW(hwmgr);
 
-	return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, level);
+	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
+
+	return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
 }
 
 static int pp_dpm_print_clock_levels(void *handle,
@@ -641,10 +797,12 @@
 
 	hwmgr = ((struct pp_instance *)handle)->hwmgr;
 
-	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
-			hwmgr->hwmgr_func->print_clock_levels == NULL)
-		return -EINVAL;
+	PP_CHECK_HW(hwmgr);
 
+	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
+		printk(KERN_INFO "%s was not implemented.\n", __func__);
+		return 0;
+	}
 	return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 56856a2..d6635cc 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -24,7 +24,7 @@
 #include "eventactionchains.h"
 #include "eventsubchains.h"
 
-static const pem_event_action *initialize_event[] = {
+static const pem_event_action * const initialize_event[] = {
 	block_adjust_power_state_tasks,
 	power_budget_tasks,
 	system_config_tasks,
@@ -45,7 +45,7 @@
 	initialize_event
 };
 
-static const pem_event_action *uninitialize_event[] = {
+static const pem_event_action * const uninitialize_event[] = {
 	ungate_all_display_phys_tasks,
 	uninitialize_display_phy_access_tasks,
 	disable_gfx_voltage_island_power_gating_tasks,
@@ -64,7 +64,7 @@
 	uninitialize_event
 };
 
-static const pem_event_action *power_source_change_event_pp_enabled[] = {
+static const pem_event_action * const power_source_change_event_pp_enabled[] = {
 	set_power_source_tasks,
 	set_power_saving_state_tasks,
 	adjust_power_state_tasks,
@@ -79,7 +79,7 @@
 	power_source_change_event_pp_enabled
 };
 
-static const pem_event_action *power_source_change_event_pp_disabled[] = {
+static const pem_event_action * const power_source_change_event_pp_disabled[] = {
 	set_power_source_tasks,
 	set_nbmcu_state_tasks,
 	NULL
@@ -90,7 +90,7 @@
 	power_source_change_event_pp_disabled
 };
 
-static const pem_event_action *power_source_change_event_hardware_dc[] = {
+static const pem_event_action * const power_source_change_event_hardware_dc[] = {
 	set_power_source_tasks,
 	set_power_saving_state_tasks,
 	adjust_power_state_tasks,
@@ -106,7 +106,7 @@
 	power_source_change_event_hardware_dc
 };
 
-static const pem_event_action *suspend_event[] = {
+static const pem_event_action * const suspend_event[] = {
 	reset_display_phy_access_tasks,
 	unregister_interrupt_tasks,
 	disable_gfx_voltage_island_power_gating_tasks,
@@ -130,7 +130,7 @@
 	suspend_event
 };
 
-static const pem_event_action *resume_event[] = {
+static const pem_event_action * const resume_event[] = {
 	unblock_hw_access_tasks,
 	resume_connected_standby_tasks,
 	notify_smu_resume_tasks,
@@ -164,7 +164,7 @@
 	resume_event
 };
 
-static const pem_event_action *complete_init_event[] = {
+static const pem_event_action * const complete_init_event[] = {
 	unblock_adjust_power_state_tasks,
 	adjust_power_state_tasks,
 	enable_gfx_clock_gating_tasks,
@@ -178,7 +178,7 @@
 	complete_init_event
 };
 
-static const pem_event_action *enable_gfx_clock_gating_event[] = {
+static const pem_event_action * const enable_gfx_clock_gating_event[] = {
 	enable_gfx_clock_gating_tasks,
 	NULL
 };
@@ -188,7 +188,7 @@
 	enable_gfx_clock_gating_event
 };
 
-static const pem_event_action *disable_gfx_clock_gating_event[] = {
+static const pem_event_action * const disable_gfx_clock_gating_event[] = {
 	disable_gfx_clock_gating_tasks,
 	NULL
 };
@@ -198,7 +198,7 @@
 	disable_gfx_clock_gating_event
 };
 
-static const pem_event_action *enable_cgpg_event[] = {
+static const pem_event_action * const enable_cgpg_event[] = {
 	enable_cgpg_tasks,
 	NULL
 };
@@ -208,7 +208,7 @@
 	enable_cgpg_event
 };
 
-static const pem_event_action *disable_cgpg_event[] = {
+static const pem_event_action * const disable_cgpg_event[] = {
 	disable_cgpg_tasks,
 	NULL
 };
@@ -221,7 +221,7 @@
 
 /* Enable user _2d performance and activate */
 
-static const pem_event_action *enable_user_state_event[] = {
+static const pem_event_action * const enable_user_state_event[] = {
 	create_new_user_performance_state_tasks,
 	adjust_power_state_tasks,
 	NULL
@@ -232,7 +232,7 @@
 	enable_user_state_event
 };
 
-static const pem_event_action *enable_user_2d_performance_event[] = {
+static const pem_event_action * const enable_user_2d_performance_event[] = {
 	enable_user_2d_performance_tasks,
 	add_user_2d_performance_state_tasks,
 	set_performance_state_tasks,
@@ -247,7 +247,7 @@
 };
 
 
-static const pem_event_action *disable_user_2d_performance_event[] = {
+static const pem_event_action * const disable_user_2d_performance_event[] = {
 	disable_user_2d_performance_tasks,
 	delete_user_2d_performance_state_tasks,
 	NULL
@@ -259,7 +259,7 @@
 };
 
 
-static const pem_event_action *display_config_change_event[] = {
+static const pem_event_action * const display_config_change_event[] = {
 	/* countDisplayConfigurationChangeEventTasks, */
 	unblock_adjust_power_state_tasks,
 	set_cpu_power_state,
@@ -278,7 +278,7 @@
 	display_config_change_event
 };
 
-static const pem_event_action *readjust_power_state_event[] = {
+static const pem_event_action * const readjust_power_state_event[] = {
 	adjust_power_state_tasks,
 	NULL
 };
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c
index 1e2ad56..cd1ca07 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c
@@ -62,7 +62,7 @@
 
 int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data)
 {
-	const pem_event_action **paction_chain;
+	const pem_event_action * const *paction_chain;
 	const pem_event_action *psub_chain;
 	int tmp_result = 0;
 	int result = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
index 46410e3..fb88e4e 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
@@ -58,9 +58,6 @@
 	pem_unregister_interrupts(eventmgr);
 
 	pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
-
-	if (eventmgr != NULL)
-		kfree(eventmgr);
 }
 
 int eventmgr_init(struct pp_instance *handle)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index b664e34..f7ce4cb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -8,7 +8,9 @@
 	       tonga_processpptables.o ppatomctrl.o \
                tonga_hwmgr.o pppcielanes.o  tonga_thermal.o\
                fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \
-               fiji_clockpowergating.o fiji_thermal.o
+               fiji_clockpowergating.o fiji_thermal.o \
+	       polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \
+	       polaris10_clockpowergating.o
 
 AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index ff08ce4..436fc16 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -237,7 +237,7 @@
 }
 
 
-static struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
+static const struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
 	/*we don't need an exit table here, because there is only D3 cold on Kv*/
 	{ phm_cf_want_uvd_power_gating, cz_tf_uvd_power_gating_initialize },
 	{ phm_cf_want_vce_power_gating, cz_tf_vce_power_gating_initialize },
@@ -245,7 +245,7 @@
 	{ NULL, NULL }
 };
 
-struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = {
+const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = {
 	0,
 	PHM_MasterTableFlag_None,
 	cz_enable_clock_power_gatings_list
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
index bbbc057..1954cea 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
@@ -28,8 +28,7 @@
 #include "pp_asicblocks.h"
 
 extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
-extern struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
-extern struct phm_master_table_header cz_phm_disable_clock_power_gatings_master;
+extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
 extern int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
 extern int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
 extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 5682490..1f14c47 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -915,7 +915,7 @@
 	return 0;
 }
 
-static struct phm_master_table_item cz_set_power_state_list[] = {
+static const struct phm_master_table_item cz_set_power_state_list[] = {
 	{NULL, cz_tf_update_sclk_limit},
 	{NULL, cz_tf_set_deep_sleep_sclk_threshold},
 	{NULL, cz_tf_set_watermark_threshold},
@@ -925,13 +925,13 @@
 	{NULL, NULL}
 };
 
-static struct phm_master_table_header cz_set_power_state_master = {
+static const struct phm_master_table_header cz_set_power_state_master = {
 	0,
 	PHM_MasterTableFlag_None,
 	cz_set_power_state_list
 };
 
-static struct phm_master_table_item cz_setup_asic_list[] = {
+static const struct phm_master_table_item cz_setup_asic_list[] = {
 	{NULL, cz_tf_reset_active_process_mask},
 	{NULL, cz_tf_upload_pptable_to_smu},
 	{NULL, cz_tf_init_sclk_limit},
@@ -943,7 +943,7 @@
 	{NULL, NULL}
 };
 
-static struct phm_master_table_header cz_setup_asic_master = {
+static const struct phm_master_table_header cz_setup_asic_master = {
 	0,
 	PHM_MasterTableFlag_None,
 	cz_setup_asic_list
@@ -984,14 +984,14 @@
 	return 0;
 }
 
-static struct phm_master_table_item cz_power_down_asic_list[] = {
+static const struct phm_master_table_item cz_power_down_asic_list[] = {
 	{NULL, cz_tf_power_up_display_clock_sys_pll},
 	{NULL, cz_tf_clear_nb_dpm_flag},
 	{NULL, cz_tf_reset_cc6_data},
 	{NULL, NULL}
 };
 
-static struct phm_master_table_header cz_power_down_asic_master = {
+static const struct phm_master_table_header cz_power_down_asic_master = {
 	0,
 	PHM_MasterTableFlag_None,
 	cz_power_down_asic_list
@@ -1095,19 +1095,19 @@
 	return 0;
 }
 
-static struct phm_master_table_item cz_disable_dpm_list[] = {
+static const struct phm_master_table_item cz_disable_dpm_list[] = {
 	{ NULL, cz_tf_check_for_dpm_enabled},
 	{NULL, NULL},
 };
 
 
-static struct phm_master_table_header cz_disable_dpm_master = {
+static const struct phm_master_table_header cz_disable_dpm_master = {
 	0,
 	PHM_MasterTableFlag_None,
 	cz_disable_dpm_list
 };
 
-static struct phm_master_table_item cz_enable_dpm_list[] = {
+static const struct phm_master_table_item cz_enable_dpm_list[] = {
 	{ NULL, cz_tf_check_for_dpm_disabled },
 	{ NULL, cz_tf_program_voting_clients },
 	{ NULL, cz_tf_start_dpm},
@@ -1117,7 +1117,7 @@
 	{NULL, NULL},
 };
 
-static struct phm_master_table_header cz_enable_dpm_master = {
+static const struct phm_master_table_header cz_enable_dpm_master = {
 	0,
 	PHM_MasterTableFlag_None,
 	cz_enable_dpm_list
@@ -1729,7 +1729,7 @@
 }
 
 static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
-		enum pp_clock_type type, int level)
+		enum pp_clock_type type, uint32_t mask)
 {
 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
 		return -EINVAL;
@@ -1738,10 +1738,10 @@
 	case PP_SCLK:
 		smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
 				PPSMC_MSG_SetSclkSoftMin,
-				(1 << level));
+				mask);
 		smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
 				PPSMC_MSG_SetSclkSoftMax,
-				(1 << level));
+				mask);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
index e68edf0..e1b649b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
@@ -47,10 +47,17 @@
 
 	data->uvd_power_gated = bgate;
 
-	if (bgate)
+	if (bgate) {
+		cgs_set_clockgating_state(hwmgr->device,
+					  AMD_IP_BLOCK_TYPE_UVD,
+					  AMD_CG_STATE_GATE);
 		fiji_update_uvd_dpm(hwmgr, true);
-	else
+	} else {
 		fiji_update_uvd_dpm(hwmgr, false);
+		cgs_set_clockgating_state(hwmgr->device,
+					  AMD_IP_BLOCK_TYPE_UVD,
+					  AMD_PG_STATE_UNGATE);
+	}
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 89f31bc..586f732 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -95,23 +95,23 @@
 /* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
  * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
  */
-uint16_t fiji_clock_stretcher_lookup_table[2][4] = { {600, 1050, 3, 0},
-                                                {600, 1050, 6, 1} };
+static const uint16_t fiji_clock_stretcher_lookup_table[2][4] =
+{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
 
 /* [FF, SS] type, [] 4 voltage ranges, and
  * [Floor Freq, Boundary Freq, VID min , VID max]
  */
-uint32_t fiji_clock_stretcher_ddt_table[2][4][4] =
+static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] =
 { { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
   { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
 
 /* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
  * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
  */
-uint8_t fiji_clock_stretch_amount_conversion[2][6] = { {0, 1, 3, 2, 4, 5},
-                                                  {0, 2, 4, 5, 6, 5} };
+static const uint8_t fiji_clock_stretch_amount_conversion[2][6] =
+{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
 
-const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
+static const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
 
 struct fiji_power_state *cast_phw_fiji_power_state(
 				  struct pp_hw_power_state *hw_ps)
@@ -465,14 +465,14 @@
 			table_info->vdd_dep_on_mclk;
 
 	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
-		"VDD dependency on SCLK table is missing. 	\
+		"VDD dependency on SCLK table is missing.	\
 		This table is mandatory", return -EINVAL);
 	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
-		"VDD dependency on SCLK table has to have is missing. 	\
+		"VDD dependency on SCLK table has to have is missing.	\
 		This table is mandatory", return -EINVAL);
 
 	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
-		"VDD dependency on MCLK table is missing. 	\
+		"VDD dependency on MCLK table is missing.	\
 		This table is mandatory", return -EINVAL);
 	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
 		"VDD dependency on MCLK table has to have is missing.	 \
@@ -579,6 +579,18 @@
 	return 0;
 }
 
+static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
+{
+	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+	if (data->soft_pp_table) {
+		kfree(data->soft_pp_table);
+		data->soft_pp_table = NULL;
+	}
+
+	return phm_hwmgr_backend_fini(hwmgr);
+}
+
 static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 {
 	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -734,7 +746,7 @@
 			data->pcie_lane_cap = (uint32_t)sys_info.value;
 	} else {
 		/* Ignore return value in here, we are cleaning up a mess. */
-		tonga_hwmgr_backend_fini(hwmgr);
+		fiji_hwmgr_backend_fini(hwmgr);
 	}
 
 	return 0;
@@ -1818,7 +1830,7 @@
 
 	PP_ASSERT_WITH_CODE(false,
 			"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
-			return vddci_table->entries[i].value);
+			return vddci_table->entries[i-1].value);
 }
 
 static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
@@ -1885,6 +1897,23 @@
 
 	return 0;
 }
+
+static uint8_t fiji_get_sleep_divider_id_from_clock(uint32_t clock,
+		uint32_t clock_insr)
+{
+	uint8_t i;
+	uint32_t temp;
+	uint32_t min = max(clock_insr, (uint32_t)FIJI_MINIMUM_ENGINE_CLOCK);
+
+	PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
+	for (i = FIJI_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
+		temp = clock >> i;
+
+		if (temp >= min || i == 0)
+			break;
+	}
+	return i;
+}
 /**
 * Populates single SMC SCLK structure using the provided engine clock
 *
@@ -1928,17 +1957,13 @@
 
 	threshold = clock * data->fast_watermark_threshold / 100;
 
-	/*
-	* TODO: get minimum clocks from dal configaration
-	* PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks);
-	*/
-	/* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */
 
-	/* get level->DeepSleepDivId
-	if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
-	{
-	level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR);
-	} */
+	data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+		level->DeepSleepDivId = fiji_get_sleep_divider_id_from_clock(clock,
+								hwmgr->display_config.min_core_set_clock_in_sr);
+
 
 	/* Default to slow, highest DPM level will be
 	 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
@@ -3364,7 +3389,7 @@
 				DPM_EVENT_SRC, src);
 		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
 				THERMAL_PROTECTION_DIS,
-				phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 						PHM_PlatformCaps_ThermalController));
 	} else
 		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
@@ -3548,46 +3573,11 @@
 	return 0;
 }
 
-static void fiji_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
-{
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)hwmgr->pptable;
-	struct phm_clock_voltage_dependency_table *table =
-				table_info->vddc_dep_on_dal_pwrl;
-	struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
-	enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
-	uint32_t req_vddc = 0, req_volt, i;
-
-	if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW &&
-			dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE))
-		return;
-
-	for (i= 0; i < table->count; i++) {
-		if (dal_power_level == table->entries[i].clk) {
-			req_vddc = table->entries[i].v;
-			break;
-		}
-	}
-
-	vddc_table = table_info->vdd_dep_on_sclk;
-	for (i= 0; i < vddc_table->count; i++) {
-		if (req_vddc <= vddc_table->entries[i].vddc) {
-			req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE)
-					<< VDDC_SHIFT;
-			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-					PPSMC_MSG_VddC_Request, req_volt);
-			return;
-		}
-	}
-	printk(KERN_ERR "DAL requested level can not"
-			" found a available voltage in VDDC DPM Table \n");
-}
-
 static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr)
 {
 	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
 
-	fiji_apply_dal_min_voltage_request(hwmgr);
+	phm_apply_dal_min_voltage_request(hwmgr);
 
 	if (!data->sclk_dpm_key_disabled) {
 		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
@@ -4066,7 +4056,6 @@
 	struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
 	uint32_t mclk = fiji_ps->performance_levels
 			[fiji_ps->performance_level_count - 1].memory_clock;
-	struct PP_Clocks min_clocks = {0};
 	uint32_t i;
 	struct cgs_display_info info = {0};
 
@@ -4080,10 +4069,8 @@
 	if (i >= sclk_table->count)
 		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
 	else {
-	/* TODO: Check SCLK in DAL's minimum clocks
-	 * in case DeepSleep divider update is required.
-	 */
-		if(data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR)
+		if(data->display_timing.min_clock_in_sr !=
+			hwmgr->display_config.min_core_set_clock_in_sr)
 			data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
 	}
 
@@ -4327,7 +4314,7 @@
 
 	if (data->need_update_smu7_dpm_table &
 			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
-		result = fiji_populate_all_memory_levels(hwmgr);
+		result = fiji_populate_all_graphic_levels(hwmgr);
 		PP_ASSERT_WITH_CODE((0 == result),
 				"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
 				return result);
@@ -5086,24 +5073,40 @@
 {
 	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
 
-	*table = (char *)&data->smc_state_table;
+	if (!data->soft_pp_table) {
+		data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
+					      hwmgr->soft_pp_table_size,
+					      GFP_KERNEL);
+		if (!data->soft_pp_table)
+			return -ENOMEM;
+	}
 
-	return sizeof(struct SMU73_Discrete_DpmTable);
+	*table = (char *)&data->soft_pp_table;
+
+	return hwmgr->soft_pp_table_size;
 }
 
 static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
 {
 	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
 
-	void *table = (void *)&data->smc_state_table;
+	if (!data->soft_pp_table) {
+		data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+		if (!data->soft_pp_table)
+			return -ENOMEM;
+	}
 
-	memcpy(table, buf, size);
+	memcpy(data->soft_pp_table, buf, size);
+
+	hwmgr->soft_pp_table = data->soft_pp_table;
+
+	/* TODO: re-init powerplay to implement modified pptable */
 
 	return 0;
 }
 
 static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
-		enum pp_clock_type type, int level)
+		enum pp_clock_type type, uint32_t mask)
 {
 	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
 
@@ -5115,20 +5118,30 @@
 		if (!data->sclk_dpm_key_disabled)
 			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
 					PPSMC_MSG_SCLKDPM_SetEnabledMask,
-					(1 << level));
+					data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
 		break;
+
 	case PP_MCLK:
 		if (!data->mclk_dpm_key_disabled)
 			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
 					PPSMC_MSG_MCLKDPM_SetEnabledMask,
-					(1 << level));
+					data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
 		break;
+
 	case PP_PCIE:
+	{
+		uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+		uint32_t level = 0;
+
+		while (tmp >>= 1)
+			level++;
+
 		if (!data->pcie_dpm_key_disabled)
 			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
 					PPSMC_MSG_PCIeDPM_ForceLevel,
-					(1 << level));
+					level);
 		break;
+	}
 	default:
 		break;
 	}
@@ -5252,19 +5265,19 @@
 
 	if (data->display_timing.num_existing_displays != info.display_count)
 		is_update_required = true;
-/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
-	if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
-		cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
-		if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+		if(hwmgr->display_config.min_core_set_clock_in_sr != data->display_timing.min_clock_in_sr)
 			is_update_required = true;
-*/
+	}
+
 	return is_update_required;
 }
 
 
 static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
 	.backend_init = &fiji_hwmgr_backend_init,
-	.backend_fini = &tonga_hwmgr_backend_fini,
+	.backend_fini = &fiji_hwmgr_backend_fini,
 	.asic_setup = &fiji_setup_asic_task,
 	.dynamic_state_management_enable = &fiji_enable_dpm_tasks,
 	.force_dpm_level = &fiji_dpm_force_dpm_level,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
index a16f7cd..170edf5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
@@ -263,7 +263,7 @@
 	bool                           enable_tdc_limit_feature;
 	bool                           enable_pkg_pwr_tracking_feature;
 	bool                           disable_uvd_power_tune_feature;
-	struct fiji_pt_defaults       *power_tune_defaults;
+	const struct fiji_pt_defaults  *power_tune_defaults;
 	struct SMU73_Discrete_PmFuses  power_tune_table;
 	uint32_t                       dte_tj_offset;
 	uint32_t                       fast_watermark_threshold;
@@ -302,6 +302,9 @@
 	bool                           pg_acp_init;
 	bool                           frtc_enabled;
 	bool                           frtc_status_changed;
+
+	/* soft pptable for re-uploading into smu */
+	void *soft_pp_table;
 };
 
 /* To convert to Q8.8 format for firmware */
@@ -338,7 +341,6 @@
 #define FIJI_UNUSED_GPIO_PIN       0x7F
 
 extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
-extern int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
 extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr);
 extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
 extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
index 6efcb2b..db23a40 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
@@ -32,7 +32,7 @@
 #define VOLTAGE_SCALE  4
 #define POWERTUNE_DEFAULT_SET_MAX    1
 
-struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
 		/*sviLoadLIneEn,  SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
 		{1,               0xF,             0xFD,
 		/* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
@@ -143,7 +143,7 @@
 int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
 {
 	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct fiji_pt_defaults *defaults = data->power_tune_defaults;
+	const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
 	SMU73_Discrete_DpmTable  *dpm_table = &(data->smc_state_table);
 	struct phm_ppt_v1_information *table_info =
 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -222,7 +222,7 @@
 static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
 {
     struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-    struct fiji_pt_defaults *defaults = data->power_tune_defaults;
+    const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
 
     data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
     data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
@@ -238,7 +238,7 @@
 	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
 	struct phm_ppt_v1_information *table_info =
 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	struct  fiji_pt_defaults *defaults = data->power_tune_defaults;
+	const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
 
 	/* TDC number of fraction bits are changed from 8 to 7
 	 * for Fiji as requested by SMC team
@@ -256,7 +256,7 @@
 static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
 {
 	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-	struct  fiji_pt_defaults *defaults = data->power_tune_defaults;
+	const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
 	uint32_t temp;
 
 	if (fiji_read_smc_sram_dword(hwmgr->smumgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
index e76a7de..92976b6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
@@ -221,8 +221,8 @@
 	if (duty100 == 0)
 		return -EINVAL;
 
-	tmp64 = (uint64_t)speed * 100;
-	do_div(tmp64, duty100);
+	tmp64 = (uint64_t)speed * duty100;
+	do_div(tmp64, 100);
 	duty = (uint32_t)tmp64;
 
 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -615,7 +615,7 @@
 	return fiji_thermal_disable_alert(hwmgr);
 }
 
-static struct phm_master_table_item
+static const struct phm_master_table_item
 fiji_thermal_start_thermal_controller_master_list[] = {
 	{NULL, tf_fiji_thermal_initialize},
 	{NULL, tf_fiji_thermal_set_temperature_range},
@@ -630,14 +630,14 @@
 	{NULL, NULL}
 };
 
-static struct phm_master_table_header
+static const struct phm_master_table_header
 fiji_thermal_start_thermal_controller_master = {
 	0,
 	PHM_MasterTableFlag_None,
 	fiji_thermal_start_thermal_controller_master_list
 };
 
-static struct phm_master_table_item
+static const struct phm_master_table_item
 fiji_thermal_set_temperature_range_master_list[] = {
 	{NULL, tf_fiji_thermal_disable_alert},
 	{NULL, tf_fiji_thermal_set_temperature_range},
@@ -645,7 +645,7 @@
 	{NULL, NULL}
 };
 
-struct phm_master_table_header
+static const struct phm_master_table_header
 fiji_thermal_set_temperature_range_master = {
 	0,
 	PHM_MasterTableFlag_None,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
index 72cfecc..7a705ce 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
@@ -84,7 +84,7 @@
 }
 
 int phm_construct_table(struct pp_hwmgr *hwmgr,
-			struct phm_master_table_header *master_table,
+			const struct phm_master_table_header *master_table,
 			struct phm_runtime_table_header *rt_table)
 {
 	uint32_t function_count = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 5fb98aa..20f20e0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -30,10 +30,14 @@
 #include "pppcielanes.h"
 #include "pp_debug.h"
 #include "ppatomctrl.h"
+#include "ppsmc.h"
+
+#define VOLTAGE_SCALE               4
 
 extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
 extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
 extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr);
+extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
 
 int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
 {
@@ -67,6 +71,10 @@
 		case CHIP_FIJI:
 			fiji_hwmgr_init(hwmgr);
 			break;
+		case CHIP_POLARIS11:
+		case CHIP_POLARIS10:
+			polaris10_hwmgr_init(hwmgr);
+			break;
 		default:
 			return -EINVAL;
 		}
@@ -85,6 +93,13 @@
 	if (hwmgr == NULL || hwmgr->ps == NULL)
 		return -EINVAL;
 
+	/* do hwmgr finish*/
+	kfree(hwmgr->backend);
+
+	kfree(hwmgr->start_thermal_controller.function_list);
+
+	kfree(hwmgr->set_temperature_range.function_list);
+
 	kfree(hwmgr->ps);
 	kfree(hwmgr);
 	return 0;
@@ -454,7 +469,7 @@
 
 	PP_ASSERT_WITH_CODE(false,
 			"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
-			return vddci_table->entries[i].value);
+			return vddci_table->entries[i-1].value);
 }
 
 int phm_find_boot_level(void *table,
@@ -561,3 +576,38 @@
 
 	return level;
 }
+
+void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
+{
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)hwmgr->pptable;
+	struct phm_clock_voltage_dependency_table *table =
+				table_info->vddc_dep_on_dal_pwrl;
+	struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
+	enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
+	uint32_t req_vddc = 0, req_volt, i;
+
+	if (!table || table->count <= 0
+		|| dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
+		|| dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
+		return;
+
+	for (i = 0; i < table->count; i++) {
+		if (dal_power_level == table->entries[i].clk) {
+			req_vddc = table->entries[i].v;
+			break;
+		}
+	}
+
+	vddc_table = table_info->vdd_dep_on_sclk;
+	for (i = 0; i < vddc_table->count; i++) {
+		if (req_vddc <= vddc_table->entries[i].vddc) {
+			req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_VddC_Request, req_volt);
+			return;
+		}
+	}
+	printk(KERN_ERR "DAL requested level can not"
+			" found a available voltage in VDDC DPM Table \n");
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
index c9e6c2d..347fef1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
@@ -92,6 +92,8 @@
 struct phm_ppt_v1_pcie_record {
 	uint8_t gen_speed;
 	uint8_t lane_width;
+	uint16_t usreserved;
+	uint32_t pcie_sclk;
 };
 typedef struct phm_ppt_v1_pcie_record phm_ppt_v1_pcie_record;
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
new file mode 100644
index 0000000..8f142a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "polaris10_clockpowergating.h"
+
+int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
+{
+	if (phm_cf_want_uvd_power_gating(hwmgr))
+		return smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_UVDPowerOFF);
+	return 0;
+}
+
+int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
+{
+	if (phm_cf_want_uvd_power_gating(hwmgr)) {
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				  PHM_PlatformCaps_UVDDynamicPowerGating)) {
+			return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_UVDPowerON, 1);
+		} else {
+			return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_UVDPowerON, 0);
+		}
+	}
+
+	return 0;
+}
+
+int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
+{
+	if (phm_cf_want_vce_power_gating(hwmgr))
+		return smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_VCEPowerOFF);
+	return 0;
+}
+
+int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
+{
+	if (phm_cf_want_vce_power_gating(hwmgr))
+		return smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_VCEPowerON);
+	return 0;
+}
+
+int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
+{
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SamuPowerGating))
+		return smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_SAMPowerOFF);
+	return 0;
+}
+
+int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
+{
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SamuPowerGating))
+		return smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_SAMPowerON);
+	return 0;
+}
+
+int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	data->uvd_power_gated = false;
+	data->vce_power_gated = false;
+	data->samu_power_gated = false;
+
+	polaris10_phm_powerup_uvd(hwmgr);
+	polaris10_phm_powerup_vce(hwmgr);
+	polaris10_phm_powerup_samu(hwmgr);
+
+	return 0;
+}
+
+int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	if (data->uvd_power_gated == bgate)
+		return 0;
+
+	data->uvd_power_gated = bgate;
+
+	if (bgate) {
+		polaris10_update_uvd_dpm(hwmgr, true);
+		polaris10_phm_powerdown_uvd(hwmgr);
+	} else {
+		polaris10_phm_powerup_uvd(hwmgr);
+		polaris10_update_uvd_dpm(hwmgr, false);
+	}
+
+	return 0;
+}
+
+int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	if (data->vce_power_gated == bgate)
+		return 0;
+
+	data->vce_power_gated = bgate;
+
+	if (bgate)
+		polaris10_phm_powerdown_vce(hwmgr);
+	else
+		polaris10_phm_powerup_vce(hwmgr);
+
+	return 0;
+}
+
+int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	if (data->samu_power_gated == bgate)
+		return 0;
+
+	data->samu_power_gated = bgate;
+
+	if (bgate) {
+		polaris10_update_samu_dpm(hwmgr, true);
+		polaris10_phm_powerdown_samu(hwmgr);
+	} else {
+		polaris10_phm_powerup_samu(hwmgr);
+		polaris10_update_samu_dpm(hwmgr, false);
+	}
+
+	return 0;
+}
+
+int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
+					const uint32_t *msg_id)
+{
+	PPSMC_Msg msg;
+	uint32_t value;
+
+	switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
+	case PP_GROUP_GFX:
+		switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
+		case PP_BLOCK_GFX_CG:
+			if (PP_STATE_SUPPORT_CG & *msg_id) {
+				msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_GFX_CGCG_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+			if (PP_STATE_SUPPORT_LS & *msg_id) {
+				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
+					? PPSMC_MSG_EnableClockGatingFeature
+					: PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_GFX_CGLS_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+			break;
+
+		case PP_BLOCK_GFX_3D:
+			if (PP_STATE_SUPPORT_CG & *msg_id) {
+				msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_GFX_3DCG_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+
+			if  (PP_STATE_SUPPORT_LS & *msg_id) {
+				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_GFX_3DLS_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+			break;
+
+		case PP_BLOCK_GFX_RLC:
+			if (PP_STATE_SUPPORT_LS & *msg_id) {
+				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_GFX_RLC_LS_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+			break;
+
+		case PP_BLOCK_GFX_CP:
+			if (PP_STATE_SUPPORT_LS & *msg_id) {
+				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_GFX_CP_LS_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+			break;
+
+		case PP_BLOCK_GFX_MG:
+			if (PP_STATE_SUPPORT_CG & *msg_id) {
+				msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)	?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = (CG_CPF_MGCG_MASK | CG_RLC_MGCG_MASK |
+						CG_GFX_OTHERS_MGCG_MASK);
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+			break;
+
+		default:
+			return -1;
+		}
+		break;
+
+	case PP_GROUP_SYS:
+		switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
+		case PP_BLOCK_SYS_BIF:
+			if (PP_STATE_SUPPORT_CG & *msg_id) {
+				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_SYS_BIF_MGCG_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+			if  (PP_STATE_SUPPORT_LS & *msg_id) {
+				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_SYS_BIF_MGLS_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+			break;
+
+		case PP_BLOCK_SYS_MC:
+			if (PP_STATE_SUPPORT_CG & *msg_id) {
+				msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)	?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_SYS_MC_MGCG_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+
+			if (PP_STATE_SUPPORT_LS & *msg_id) {
+				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_SYS_MC_MGLS_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+			break;
+
+		case PP_BLOCK_SYS_DRM:
+			if (PP_STATE_SUPPORT_CG & *msg_id) {
+				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_SYS_DRM_MGCG_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+			if (PP_STATE_SUPPORT_LS & *msg_id) {
+				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_SYS_DRM_MGLS_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+			break;
+
+		case PP_BLOCK_SYS_HDP:
+			if (PP_STATE_SUPPORT_CG & *msg_id) {
+				msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_SYS_HDP_MGCG_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+
+			if (PP_STATE_SUPPORT_LS & *msg_id) {
+				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_SYS_HDP_MGLS_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+			break;
+
+		case PP_BLOCK_SYS_SDMA:
+			if (PP_STATE_SUPPORT_CG & *msg_id) {
+				msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)	?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_SYS_SDMA_MGCG_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+
+			if (PP_STATE_SUPPORT_LS & *msg_id) {
+				msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_SYS_SDMA_MGLS_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+			break;
+
+		case PP_BLOCK_SYS_ROM:
+			if (PP_STATE_SUPPORT_CG & *msg_id) {
+				msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+						PPSMC_MSG_EnableClockGatingFeature :
+						PPSMC_MSG_DisableClockGatingFeature;
+				value = CG_SYS_ROM_MASK;
+
+				if (smum_send_msg_to_smc_with_parameter(
+						hwmgr->smumgr, msg, value))
+					return -1;
+			}
+			break;
+
+		default:
+			return -1;
+
+		}
+		break;
+
+	default:
+		return -1;
+
+	}
+
+	return 0;
+}
+
+/* This function is for Polaris11 only for now,
+ * Powerplay will only control the static per CU Power Gating.
+ * Dynamic per CU Power Gating will be done in gfx.
+ */
+int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
+{
+	struct cgs_system_info sys_info = {0};
+	uint32_t active_cus;
+	int result;
+
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
+
+	result = cgs_query_system_info(hwmgr->device, &sys_info);
+
+	if (result)
+		return -EINVAL;
+	else
+		active_cus = sys_info.value;
+
+	if (enable)
+		return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_GFX_CU_PG_ENABLE, active_cus);
+	else
+		return smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_GFX_CU_PG_DISABLE);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h
new file mode 100644
index 0000000..88d68cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _POLARIS10_CLOCK_POWER_GATING_H_
+#define _POLARIS10_CLOCK_POWER_GATING_H_
+
+#include "polaris10_hwmgr.h"
+#include "pp_asicblocks.h"
+
+int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
+int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
+int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
+int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
+int polaris10_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
+int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
+int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
+					const uint32_t *msg_id);
+int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
+
+#endif /* _POLARIS10_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h
new file mode 100644
index 0000000..f78ffd9
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef POLARIS10_DYN_DEFAULTS_H
+#define POLARIS10_DYN_DEFAULTS_H
+
+
+enum Polaris10dpm_TrendDetection {
+	Polaris10Adpm_TrendDetection_AUTO,
+	Polaris10Adpm_TrendDetection_UP,
+	Polaris10Adpm_TrendDetection_DOWN
+};
+typedef enum Polaris10dpm_TrendDetection Polaris10dpm_TrendDetection;
+
+/*  We need to fill in the default values */
+
+
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0              0x3FFFC102
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1              0x000400
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2              0xC00080
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3              0xC00200
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4              0xC01680
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5              0xC00033
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6              0xC00033
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7              0x3FFFC000
+
+
+#define PPPOLARIS10_THERMALPROTECTCOUNTER_DFLT            0x200
+#define PPPOLARIS10_STATICSCREENTHRESHOLDUNIT_DFLT        0
+#define PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT            0x00C8
+#define PPPOLARIS10_GFXIDLECLOCKSTOPTHRESHOLD_DFLT        0x200
+#define PPPOLARIS10_REFERENCEDIVIDER_DFLT                  4
+
+#define PPPOLARIS10_ULVVOLTAGECHANGEDELAY_DFLT             1687
+
+#define PPPOLARIS10_CGULVPARAMETER_DFLT                    0x00040035
+#define PPPOLARIS10_CGULVCONTROL_DFLT                      0x00007450
+#define PPPOLARIS10_TARGETACTIVITY_DFLT                     50
+#define PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT                10
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
new file mode 100644
index 0000000..aa6be03
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
@@ -0,0 +1,4961 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <asm/div64.h>
+#include "linux/delay.h"
+#include "pp_acpi.h"
+#include "hwmgr.h"
+#include "polaris10_hwmgr.h"
+#include "polaris10_powertune.h"
+#include "polaris10_dyn_defaults.h"
+#include "polaris10_smumgr.h"
+#include "pp_debug.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "tonga_pptable.h"
+#include "pppcielanes.h"
+#include "amd_pcie_helpers.h"
+#include "hardwaremanager.h"
+#include "tonga_processpptables.h"
+#include "cgs_common.h"
+#include "smu74.h"
+#include "smu_ucode_xfer_vi.h"
+#include "smu74_discrete.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gca/gfx_8_0_d.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "polaris10_thermal.h"
+#include "polaris10_clockpowergating.h"
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define MC_CG_SEQ_DRAMCONF_S0       0x05
+#define MC_CG_SEQ_DRAMCONF_S1       0x06
+#define MC_CG_SEQ_YCLK_SUSPEND      0x04
+#define MC_CG_SEQ_YCLK_RESUME       0x0a
+
+
+#define SMC_RAM_END 0x40000
+
+#define SMC_CG_IND_START            0xc0030000
+#define SMC_CG_IND_END              0xc0040000
+
+#define VOLTAGE_SCALE               4
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+
+#define VDDC_VDDCI_DELTA            200
+
+#define MEM_FREQ_LOW_LATENCY        25000
+#define MEM_FREQ_HIGH_LATENCY       80000
+
+#define MEM_LATENCY_HIGH            45
+#define MEM_LATENCY_LOW             35
+#define MEM_LATENCY_ERR             0xFFFF
+
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+
+
+#define PCIE_BUS_CLK                10000
+#define TCLK                        (PCIE_BUS_CLK / 10)
+
+
+static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] =
+{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
+
+/*  [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
+static const uint32_t polaris10_clock_stretcher_ddt_table[2][4][4] =
+{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+  { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
+
+/*  [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
+static const uint8_t polaris10_clock_stretch_amount_conversion[2][6] =
+{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
+
+/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
+enum DPM_EVENT_SRC {
+	DPM_EVENT_SRC_ANALOG = 0,
+	DPM_EVENT_SRC_EXTERNAL = 1,
+	DPM_EVENT_SRC_DIGITAL = 2,
+	DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+	DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
+};
+
+static const unsigned long PhwPolaris10_Magic = (unsigned long)(PHM_VIslands_Magic);
+
+struct polaris10_power_state *cast_phw_polaris10_power_state(
+				  struct pp_hw_power_state *hw_ps)
+{
+	PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
+				"Invalid Powerstate Type!",
+				 return NULL);
+
+	return (struct polaris10_power_state *)hw_ps;
+}
+
+const struct polaris10_power_state *cast_const_phw_polaris10_power_state(
+				 const struct pp_hw_power_state *hw_ps)
+{
+	PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
+				"Invalid Powerstate Type!",
+				 return NULL);
+
+	return (const struct polaris10_power_state *)hw_ps;
+}
+
+static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+	return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+			CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+			? true : false;
+}
+
+/**
+ * Find the MC microcode version and store it in the HwMgr struct
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+int phm_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
+{
+	cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
+
+	hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
+
+	return 0;
+}
+
+uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+{
+	uint32_t speedCntl = 0;
+
+	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
+	speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
+			ixPCIE_LC_SPEED_CNTL);
+	return((uint16_t)PHM_GET_FIELD(speedCntl,
+			PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
+}
+
+int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+{
+	uint32_t link_width;
+
+	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
+	link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
+			PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
+
+	PP_ASSERT_WITH_CODE((7 >= link_width),
+			"Invalid PCIe lane width!", return 0);
+
+	return decode_pcie_lane_width(link_width);
+}
+
+/**
+* Enable voltage control
+*
+* @param    pHwMgr  the address of the powerplay hardware manager.
+* @return   always PP_Result_OK
+*/
+int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+{
+	PP_ASSERT_WITH_CODE(
+		(hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable) == 0),
+		"Failed to enable voltage DPM during DPM Start Function!",
+		return 1;
+	);
+
+	return 0;
+}
+
+/**
+* Checks if we want to support voltage control
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+*/
+static bool polaris10_voltage_control(const struct pp_hwmgr *hwmgr)
+{
+	const struct polaris10_hwmgr *data =
+			(const struct polaris10_hwmgr *)(hwmgr->backend);
+
+	return (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control);
+}
+
+/**
+* Enable voltage control
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int polaris10_enable_voltage_control(struct pp_hwmgr *hwmgr)
+{
+	/* enable voltage control */
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
+
+	return 0;
+}
+
+/**
+* Create Voltage Tables.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int polaris10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)hwmgr->pptable;
+	int result;
+
+	if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+		result = atomctrl_get_voltage_table_v3(hwmgr,
+				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
+				&(data->mvdd_voltage_table));
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to retrieve MVDD table.",
+				return result);
+	} else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+		result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
+				table_info->vdd_dep_on_mclk);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to retrieve SVI2 MVDD table from dependancy table.",
+				return result;);
+	}
+
+	if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+		result = atomctrl_get_voltage_table_v3(hwmgr,
+				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
+				&(data->vddci_voltage_table));
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to retrieve VDDCI table.",
+				return result);
+	} else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+		result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
+				table_info->vdd_dep_on_mclk);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to retrieve SVI2 VDDCI table from dependancy table.",
+				return result);
+	}
+
+	if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+		result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
+				table_info->vddc_lookup_table);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to retrieve SVI2 VDDC table from lookup table.",
+				return result);
+	}
+
+	PP_ASSERT_WITH_CODE(
+			(data->vddc_voltage_table.count <= (SMU74_MAX_LEVELS_VDDC)),
+			"Too many voltage values for VDDC. Trimming to fit state table.",
+			phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDC,
+								&(data->vddc_voltage_table)));
+
+	PP_ASSERT_WITH_CODE(
+			(data->vddci_voltage_table.count <= (SMU74_MAX_LEVELS_VDDCI)),
+			"Too many voltage values for VDDCI. Trimming to fit state table.",
+			phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDCI,
+					&(data->vddci_voltage_table)));
+
+	PP_ASSERT_WITH_CODE(
+			(data->mvdd_voltage_table.count <= (SMU74_MAX_LEVELS_MVDD)),
+			"Too many voltage values for MVDD. Trimming to fit state table.",
+			phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_MVDD,
+							   &(data->mvdd_voltage_table)));
+
+	return 0;
+}
+
+/**
+* Programs static screed detection parameters
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int polaris10_program_static_screen_threshold_parameters(
+							struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	/* Set static screen threshold unit */
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
+			data->static_screen_threshold_unit);
+	/* Set static screen threshold */
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
+			data->static_screen_threshold);
+
+	return 0;
+}
+
+/**
+* Setup display gap for glitch free memory clock switching.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always  0
+*/
+static int polaris10_enable_display_gap(struct pp_hwmgr *hwmgr)
+{
+	uint32_t display_gap =
+			cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+					ixCG_DISPLAY_GAP_CNTL);
+
+	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
+			DISP_GAP, DISPLAY_GAP_IGNORE);
+
+	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
+			DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_DISPLAY_GAP_CNTL, display_gap);
+
+	return 0;
+}
+
+/**
+* Programs activity state transition voting clients
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always  0
+*/
+static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	/* Clear reset for voting clients before enabling DPM */
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
+
+	return 0;
+}
+
+/**
+* Get the location of various tables inside the FW image.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always  0
+*/
+static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	uint32_t tmp;
+	int result;
+	bool error = false;
+
+	result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU74_Firmware_Header, DpmTable),
+			&tmp, data->sram_end);
+
+	if (0 == result)
+		data->dpm_table_start = tmp;
+
+	error |= (0 != result);
+
+	result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU74_Firmware_Header, SoftRegisters),
+			&tmp, data->sram_end);
+
+	if (!result) {
+		data->soft_regs_start = tmp;
+		smu_data->soft_regs_start = tmp;
+	}
+
+	error |= (0 != result);
+
+	result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU74_Firmware_Header, mcRegisterTable),
+			&tmp, data->sram_end);
+
+	if (!result)
+		data->mc_reg_table_start = tmp;
+
+	result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU74_Firmware_Header, FanTable),
+			&tmp, data->sram_end);
+
+	if (!result)
+		data->fan_table_start = tmp;
+
+	error |= (0 != result);
+
+	result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
+			&tmp, data->sram_end);
+
+	if (!result)
+		data->arb_table_start = tmp;
+
+	error |= (0 != result);
+
+	result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU74_Firmware_Header, Version),
+			&tmp, data->sram_end);
+
+	if (!result)
+		hwmgr->microcode_version_info.SMC = tmp;
+
+	error |= (0 != result);
+
+	return error ? -1 : 0;
+}
+
+/* Copy one arb setting to another and then switch the active set.
+ * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
+ */
+static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
+		uint32_t arb_src, uint32_t arb_dest)
+{
+	uint32_t mc_arb_dram_timing;
+	uint32_t mc_arb_dram_timing2;
+	uint32_t burst_time;
+	uint32_t mc_cg_config;
+
+	switch (arb_src) {
+	case MC_CG_ARB_FREQ_F0:
+		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+		break;
+	case MC_CG_ARB_FREQ_F1:
+		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
+		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
+		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (arb_dest) {
+	case MC_CG_ARB_FREQ_F0:
+		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
+		break;
+	case MC_CG_ARB_FREQ_F1:
+		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
+	mc_cg_config |= 0x0000000F;
+	cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
+	PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
+
+	return 0;
+}
+
+/**
+* Initial switch from ARB F0->F1
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+* This function is to be called from the SetPowerState table.
+*/
+static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
+{
+	return polaris10_copy_and_switch_arb_sets(hwmgr,
+			MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+	uint32_t i, max_entry;
+
+	PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
+			data->use_pcie_power_saving_levels), "No pcie performance levels!",
+			return -EINVAL);
+
+	if (data->use_pcie_performance_levels &&
+			!data->use_pcie_power_saving_levels) {
+		data->pcie_gen_power_saving = data->pcie_gen_performance;
+		data->pcie_lane_power_saving = data->pcie_lane_performance;
+	} else if (!data->use_pcie_performance_levels &&
+			data->use_pcie_power_saving_levels) {
+		data->pcie_gen_performance = data->pcie_gen_power_saving;
+		data->pcie_lane_performance = data->pcie_lane_power_saving;
+	}
+
+	phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
+					SMU74_MAX_LEVELS_LINK,
+					MAX_REGULAR_DPM_NUMBER);
+
+	if (pcie_table != NULL) {
+		/* max_entry is used to make sure we reserve one PCIE level
+		 * for boot level (fix for A+A PSPP issue).
+		 * If PCIE table from PPTable have ULV entry + 8 entries,
+		 * then ignore the last entry.*/
+		max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
+				SMU74_MAX_LEVELS_LINK : pcie_table->count;
+		for (i = 1; i < max_entry; i++) {
+			phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
+					get_pcie_gen_support(data->pcie_gen_cap,
+							pcie_table->entries[i].gen_speed),
+					get_pcie_lane_support(data->pcie_lane_cap,
+							pcie_table->entries[i].lane_width));
+		}
+		data->dpm_table.pcie_speed_table.count = max_entry - 1;
+
+		/* Setup BIF_SCLK levels */
+		for (i = 0; i < max_entry; i++)
+			data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
+	} else {
+		/* Hardcode Pcie Table */
+		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
+				get_pcie_gen_support(data->pcie_gen_cap,
+						PP_Min_PCIEGen),
+				get_pcie_lane_support(data->pcie_lane_cap,
+						PP_Max_PCIELane));
+		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
+				get_pcie_gen_support(data->pcie_gen_cap,
+						PP_Min_PCIEGen),
+				get_pcie_lane_support(data->pcie_lane_cap,
+						PP_Max_PCIELane));
+		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
+				get_pcie_gen_support(data->pcie_gen_cap,
+						PP_Max_PCIEGen),
+				get_pcie_lane_support(data->pcie_lane_cap,
+						PP_Max_PCIELane));
+		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
+				get_pcie_gen_support(data->pcie_gen_cap,
+						PP_Max_PCIEGen),
+				get_pcie_lane_support(data->pcie_lane_cap,
+						PP_Max_PCIELane));
+		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
+				get_pcie_gen_support(data->pcie_gen_cap,
+						PP_Max_PCIEGen),
+				get_pcie_lane_support(data->pcie_lane_cap,
+						PP_Max_PCIELane));
+		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
+				get_pcie_gen_support(data->pcie_gen_cap,
+						PP_Max_PCIEGen),
+				get_pcie_lane_support(data->pcie_lane_cap,
+						PP_Max_PCIELane));
+
+		data->dpm_table.pcie_speed_table.count = 6;
+	}
+	/* Populate last level for boot PCIE level, but do not increment count. */
+	phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
+			data->dpm_table.pcie_speed_table.count,
+			get_pcie_gen_support(data->pcie_gen_cap,
+					PP_Min_PCIEGen),
+			get_pcie_lane_support(data->pcie_lane_cap,
+					PP_Max_PCIELane));
+
+	return 0;
+}
+
+/*
+ * This function is to initalize all DPM state tables
+ * for SMU7 based on the dependency table.
+ * Dynamic state patching function will then trim these
+ * state tables to the allowed range based
+ * on the power policy or external client requests,
+ * such as UVD request, etc.
+ */
+int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint32_t i;
+
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
+			table_info->vdd_dep_on_sclk;
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
+			table_info->vdd_dep_on_mclk;
+
+	PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
+			"SCLK dependency table is missing. This table is mandatory",
+			return -EINVAL);
+	PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
+			"SCLK dependency table has to have is missing."
+			"This table is mandatory",
+			return -EINVAL);
+
+	PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
+			"MCLK dependency table is missing. This table is mandatory",
+			return -EINVAL);
+	PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
+			"MCLK dependency table has to have is missing."
+			"This table is mandatory",
+			return -EINVAL);
+
+	/* clear the state table to reset everything to default */
+	phm_reset_single_dpm_table(
+			&data->dpm_table.sclk_table, SMU74_MAX_LEVELS_GRAPHICS, MAX_REGULAR_DPM_NUMBER);
+	phm_reset_single_dpm_table(
+			&data->dpm_table.mclk_table, SMU74_MAX_LEVELS_MEMORY, MAX_REGULAR_DPM_NUMBER);
+
+
+	/* Initialize Sclk DPM table based on allow Sclk values */
+	data->dpm_table.sclk_table.count = 0;
+	for (i = 0; i < dep_sclk_table->count; i++) {
+		if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
+						dep_sclk_table->entries[i].clk) {
+
+			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
+					dep_sclk_table->entries[i].clk;
+
+			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
+					(i == 0) ? true : false;
+			data->dpm_table.sclk_table.count++;
+		}
+	}
+
+	/* Initialize Mclk DPM table based on allow Mclk values */
+	data->dpm_table.mclk_table.count = 0;
+	for (i = 0; i < dep_mclk_table->count; i++) {
+		if (i == 0 || data->dpm_table.mclk_table.dpm_levels
+				[data->dpm_table.mclk_table.count - 1].value !=
+						dep_mclk_table->entries[i].clk) {
+			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
+							dep_mclk_table->entries[i].clk;
+			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
+							(i == 0) ? true : false;
+			data->dpm_table.mclk_table.count++;
+		}
+	}
+
+	/* setup PCIE gen speed levels */
+	polaris10_setup_default_pcie_table(hwmgr);
+
+	/* save a copy of the default DPM table */
+	memcpy(&(data->golden_dpm_table), &(data->dpm_table),
+			sizeof(struct polaris10_dpm_table));
+
+	return 0;
+}
+
+uint8_t convert_to_vid(uint16_t vddc)
+{
+	return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
+}
+
+/**
+ * Mvdd table preparation for SMC.
+ *
+ * @param    *hwmgr The address of the hardware manager.
+ * @param    *table The SMC DPM table structure to be populated.
+ * @return   0
+ */
+static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+			SMU74_Discrete_DpmTable *table)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	uint32_t count, level;
+
+	if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+		count = data->mvdd_voltage_table.count;
+		if (count > SMU_MAX_SMIO_LEVELS)
+			count = SMU_MAX_SMIO_LEVELS;
+		for (level = 0; level < count; level++) {
+			table->SmioTable2.Pattern[level].Voltage =
+				PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+			/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+			table->SmioTable2.Pattern[level].Smio =
+				(uint8_t) level;
+			table->Smio[level] |=
+				data->mvdd_voltage_table.entries[level].smio_low;
+		}
+		table->SmioMask2 = data->vddci_voltage_table.mask_low;
+
+		table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
+	}
+
+	return 0;
+}
+
+static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
+					struct SMU74_Discrete_DpmTable *table)
+{
+	uint32_t count, level;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	count = data->vddci_voltage_table.count;
+
+	if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+		if (count > SMU_MAX_SMIO_LEVELS)
+			count = SMU_MAX_SMIO_LEVELS;
+		for (level = 0; level < count; ++level) {
+			table->SmioTable1.Pattern[level].Voltage =
+				PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
+			table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
+
+			table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
+		}
+	}
+
+	table->SmioMask1 = data->vddci_voltage_table.mask_low;
+
+	return 0;
+}
+
+/**
+* Preparation of vddc and vddgfx CAC tables for SMC.
+*
+* @param    hwmgr  the address of the hardware manager
+* @param    table  the SMC DPM table structure to be populated
+* @return   always 0
+*/
+static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_DpmTable *table)
+{
+	uint32_t count;
+	uint8_t index;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+			table_info->vddc_lookup_table;
+	/* tables is already swapped, so in order to use the value from it,
+	 * we need to swap it back.
+	 * We are populating vddc CAC data to BapmVddc table
+	 * in split and merged mode
+	 */
+	for (count = 0; count < lookup_table->count; count++) {
+		index = phm_get_voltage_index(lookup_table,
+				data->vddc_voltage_table.entries[count].value);
+		table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
+		table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
+		table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
+	}
+
+	return 0;
+}
+
+/**
+* Preparation of voltage tables for SMC.
+*
+* @param    hwmgr   the address of the hardware manager
+* @param    table   the SMC DPM table structure to be populated
+* @return   always  0
+*/
+
+int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_DpmTable *table)
+{
+	polaris10_populate_smc_vddci_table(hwmgr, table);
+	polaris10_populate_smc_mvdd_table(hwmgr, table);
+	polaris10_populate_cac_table(hwmgr, table);
+
+	return 0;
+}
+
+static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_Ulv *state)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	state->CcPwrDynRm = 0;
+	state->CcPwrDynRm1 = 0;
+
+	state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+	state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+			VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+	state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
+
+	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+	CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+	return 0;
+}
+
+static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_DpmTable *table)
+{
+	return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_DpmTable *table)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct polaris10_dpm_table *dpm_table = &data->dpm_table;
+	int i;
+
+	/* Index (dpm_table->pcie_speed_table.count)
+	 * is reserved for PCIE boot level. */
+	for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+		table->LinkLevel[i].PcieGenSpeed  =
+				(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+		table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+				dpm_table->pcie_speed_table.dpm_levels[i].param1);
+		table->LinkLevel[i].EnabledForActivity = 1;
+		table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+		table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+		table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+	}
+
+	data->smc_state_table.LinkLevelCount =
+			(uint8_t)dpm_table->pcie_speed_table.count;
+	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+	return 0;
+}
+
+static uint32_t polaris10_get_xclk(struct pp_hwmgr *hwmgr)
+{
+	uint32_t reference_clock, tmp;
+	struct cgs_display_info info = {0};
+	struct cgs_mode_info mode_info;
+
+	info.mode_info = &mode_info;
+
+	tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
+
+	if (tmp)
+		return TCLK;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+	reference_clock = mode_info.ref_clock;
+
+	tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
+
+	if (0 != tmp)
+		return reference_clock / 4;
+
+	return reference_clock;
+}
+
+/**
+* Calculates the SCLK dividers using the provided engine clock
+*
+* @param    hwmgr  the address of the hardware manager
+* @param    clock  the engine clock to use to populate the structure
+* @param    sclk   the SMC SCLK structure to be populated
+*/
+static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+		uint32_t clock, SMU_SclkSetting *sclk_setting)
+{
+	const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	const SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
+	struct pp_atomctrl_clock_dividers_ai dividers;
+
+	uint32_t ref_clock;
+	uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
+	uint8_t i;
+	int result;
+	uint64_t temp;
+
+	sclk_setting->SclkFrequency = clock;
+	/* get the engine clock dividers for this clock value */
+	result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock,  &dividers);
+	if (result == 0) {
+		sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
+		sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
+		sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
+		sclk_setting->PllRange = dividers.ucSclkPllRange;
+		sclk_setting->Sclk_slew_rate = 0x400;
+		sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
+		sclk_setting->Pcc_down_slew_rate = 0xffff;
+		sclk_setting->SSc_En = dividers.ucSscEnable;
+		sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
+		sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
+		sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
+		return result;
+	}
+
+	ref_clock = polaris10_get_xclk(hwmgr);
+
+	for (i = 0; i < NUM_SCLK_RANGE; i++) {
+		if (clock > data->range_table[i].trans_lower_frequency
+		&& clock <= data->range_table[i].trans_upper_frequency) {
+			sclk_setting->PllRange = i;
+			break;
+		}
+	}
+
+	sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+	temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+	temp <<= 0x10;
+	do_div(temp, ref_clock);
+	sclk_setting->Fcw_frac = temp & 0xffff;
+
+	pcc_target_percent = 10; /*  Hardcode 10% for now. */
+	pcc_target_freq = clock - (clock * pcc_target_percent / 100);
+	sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+
+	ss_target_percent = 2; /*  Hardcode 2% for now. */
+	sclk_setting->SSc_En = 0;
+	if (ss_target_percent) {
+		sclk_setting->SSc_En = 1;
+		ss_target_freq = clock - (clock * ss_target_percent / 100);
+		sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+		temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+		temp <<= 0x10;
+		do_div(temp, ref_clock);
+		sclk_setting->Fcw1_frac = temp & 0xffff;
+	}
+
+	return 0;
+}
+
+static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+		struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+		uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+	uint32_t i;
+	uint16_t vddci;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	*voltage = *mvdd = 0;
+
+	/* clock - voltage dependency table is empty table */
+	if (dep_table->count == 0)
+		return -EINVAL;
+
+	for (i = 0; i < dep_table->count; i++) {
+		/* find first sclk bigger than request */
+		if (dep_table->entries[i].clk >= clock) {
+			*voltage |= (dep_table->entries[i].vddc *
+					VOLTAGE_SCALE) << VDDC_SHIFT;
+			if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
+				*voltage |= (data->vbios_boot_state.vddci_bootup_value *
+						VOLTAGE_SCALE) << VDDCI_SHIFT;
+			else if (dep_table->entries[i].vddci)
+				*voltage |= (dep_table->entries[i].vddci *
+						VOLTAGE_SCALE) << VDDCI_SHIFT;
+			else {
+				vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+						(dep_table->entries[i].vddc -
+								(uint16_t)data->vddc_vddci_delta));
+				*voltage |= (vddci * VOLTAGE_SCALE) <<	VDDCI_SHIFT;
+			}
+
+			if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+				*mvdd = data->vbios_boot_state.mvdd_bootup_value *
+					VOLTAGE_SCALE;
+			else if (dep_table->entries[i].mvdd)
+				*mvdd = (uint32_t) dep_table->entries[i].mvdd *
+					VOLTAGE_SCALE;
+
+			*voltage |= 1 << PHASES_SHIFT;
+			return 0;
+		}
+	}
+
+	/* sclk is bigger than max sclk in the dependence table */
+	*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+	if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
+		*voltage |= (data->vbios_boot_state.vddci_bootup_value *
+				VOLTAGE_SCALE) << VDDCI_SHIFT;
+	else if (dep_table->entries[i-1].vddci) {
+		vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+				(dep_table->entries[i].vddc -
+						(uint16_t)data->vddc_vddci_delta));
+		*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+	}
+
+	if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+		*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+	else if (dep_table->entries[i].mvdd)
+		*mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+	return 0;
+}
+
+static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] =
+{ {VCO_2_4, POSTDIV_DIV_BY_16,  75, 160, 112},
+  {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
+  {VCO_2_4, POSTDIV_DIV_BY_8,   75, 160, 112},
+  {VCO_3_6, POSTDIV_DIV_BY_8,  112, 224, 160},
+  {VCO_2_4, POSTDIV_DIV_BY_4,   75, 160, 112},
+  {VCO_3_6, POSTDIV_DIV_BY_4,  112, 216, 160},
+  {VCO_2_4, POSTDIV_DIV_BY_2,   75, 160, 108},
+  {VCO_3_6, POSTDIV_DIV_BY_2,  112, 216, 160} };
+
+static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr)
+{
+	uint32_t i, ref_clk;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	SMU74_Discrete_DpmTable  *table = &(data->smc_state_table);
+	struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
+
+	ref_clk = polaris10_get_xclk(hwmgr);
+
+	if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
+		for (i = 0; i < NUM_SCLK_RANGE; i++) {
+			table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
+			table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
+			table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
+
+			table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
+			table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
+
+			CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+			CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+			CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+		}
+		return;
+	}
+
+	for (i = 0; i < NUM_SCLK_RANGE; i++) {
+
+		data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
+		data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
+
+		table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
+		table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
+		table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
+
+		table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
+		table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
+
+		CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+		CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+		CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+	}
+}
+
+/**
+* Populates single SMC SCLK structure using the provided engine clock
+*
+* @param    hwmgr      the address of the hardware manager
+* @param    clock the engine clock to use to populate the structure
+* @param    sclk        the SMC SCLK structure to be populated
+*/
+
+static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+		uint32_t clock, uint16_t sclk_al_threshold,
+		struct SMU74_Discrete_GraphicsLevel *level)
+{
+	int result, i, temp;
+	/* PP_Clocks minClocks; */
+	uint32_t mvdd;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	SMU_SclkSetting curr_sclk_setting = { 0 };
+
+	result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
+
+	/* populate graphics levels */
+	result = polaris10_get_dependency_volt_by_clk(hwmgr,
+			table_info->vdd_dep_on_sclk, clock,
+			&level->MinVoltage, &mvdd);
+
+	PP_ASSERT_WITH_CODE((0 == result),
+			"can not find VDDC voltage value for "
+			"VDDC engine clock dependency table",
+			return result);
+	level->ActivityLevel = sclk_al_threshold;
+
+	level->CcPwrDynRm = 0;
+	level->CcPwrDynRm1 = 0;
+	level->EnabledForActivity = 0;
+	level->EnabledForThrottle = 1;
+	level->UpHyst = 10;
+	level->DownHyst = 0;
+	level->VoltageDownHyst = 0;
+	level->PowerThrottle = 0;
+
+	/*
+	* TODO: get minimum clocks from dal configaration
+	* PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks);
+	*/
+	/* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */
+
+	/* get level->DeepSleepDivId
+	if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+		level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR);
+	*/
+	PP_ASSERT_WITH_CODE((clock >= POLARIS10_MINIMUM_ENGINE_CLOCK), "Engine clock can't satisfy stutter requirement!", return 0);
+	for (i = POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
+		temp = clock >> i;
+
+		if (temp >= POLARIS10_MINIMUM_ENGINE_CLOCK || i == 0)
+			break;
+	}
+
+	level->DeepSleepDivId = i;
+
+	/* Default to slow, highest DPM level will be
+	 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+	 */
+	if (data->update_up_hyst)
+		level->UpHyst = (uint8_t)data->up_hyst;
+	if (data->update_down_hyst)
+		level->DownHyst = (uint8_t)data->down_hyst;
+
+	level->SclkSetting = curr_sclk_setting;
+
+	CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+	CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
+	return 0;
+}
+
+/**
+* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+*
+* @param    hwmgr      the address of the hardware manager
+*/
+static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct polaris10_dpm_table *dpm_table = &data->dpm_table;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+	uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
+	int result = 0;
+	uint32_t array = data->dpm_table_start +
+			offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+	uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
+			SMU74_MAX_LEVELS_GRAPHICS;
+	struct SMU74_Discrete_GraphicsLevel *levels =
+			data->smc_state_table.GraphicsLevel;
+	uint32_t i, max_entry;
+	uint8_t hightest_pcie_level_enabled = 0,
+		lowest_pcie_level_enabled = 0,
+		mid_pcie_level_enabled = 0,
+		count = 0;
+
+	polaris10_get_sclk_range_table(hwmgr);
+
+	for (i = 0; i < dpm_table->sclk_table.count; i++) {
+
+		result = polaris10_populate_single_graphic_level(hwmgr,
+				dpm_table->sclk_table.dpm_levels[i].value,
+				(uint16_t)data->activity_target[i],
+				&(data->smc_state_table.GraphicsLevel[i]));
+		if (result)
+			return result;
+
+		/* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+		if (i > 1)
+			levels[i].DeepSleepDivId = 0;
+	}
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_SPLLShutdownSupport))
+		data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
+
+	data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+	data->smc_state_table.GraphicsDpmLevelCount =
+			(uint8_t)dpm_table->sclk_table.count;
+	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+
+	if (pcie_table != NULL) {
+		PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+				"There must be 1 or more PCIE levels defined in PPTable.",
+				return -EINVAL);
+		max_entry = pcie_entry_cnt - 1;
+		for (i = 0; i < dpm_table->sclk_table.count; i++)
+			levels[i].pcieDpmLevel =
+					(uint8_t) ((i < max_entry) ? i : max_entry);
+	} else {
+		while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+						(1 << (hightest_pcie_level_enabled + 1))) != 0))
+			hightest_pcie_level_enabled++;
+
+		while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+						(1 << lowest_pcie_level_enabled)) == 0))
+			lowest_pcie_level_enabled++;
+
+		while ((count < hightest_pcie_level_enabled) &&
+				((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+						(1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+			count++;
+
+		mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+				hightest_pcie_level_enabled ?
+						(lowest_pcie_level_enabled + 1 + count) :
+						hightest_pcie_level_enabled;
+
+		/* set pcieDpmLevel to hightest_pcie_level_enabled */
+		for (i = 2; i < dpm_table->sclk_table.count; i++)
+			levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+		/* set pcieDpmLevel to lowest_pcie_level_enabled */
+		levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+		/* set pcieDpmLevel to mid_pcie_level_enabled */
+		levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+	}
+	/* level count will send to smc once at init smc table and never change */
+	result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+			(uint32_t)array_size, data->sram_end);
+
+	return result;
+}
+
+static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+		uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	int result = 0;
+	struct cgs_display_info info = {0, 0, NULL};
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+
+	if (table_info->vdd_dep_on_mclk) {
+		result = polaris10_get_dependency_volt_by_clk(hwmgr,
+				table_info->vdd_dep_on_mclk, clock,
+				&mem_level->MinVoltage, &mem_level->MinMvdd);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find MinVddc voltage value from memory "
+				"VDDC voltage dependency table", return result);
+	}
+
+	mem_level->MclkFrequency = clock;
+	mem_level->StutterEnable = 0;
+	mem_level->EnabledForThrottle = 1;
+	mem_level->EnabledForActivity = 0;
+	mem_level->UpHyst = 0;
+	mem_level->DownHyst = 100;
+	mem_level->VoltageDownHyst = 0;
+	mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+	mem_level->StutterEnable = false;
+
+	mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+	data->display_timing.num_existing_displays = info.display_count;
+
+	if ((data->mclk_stutter_mode_threshold) &&
+		(clock <= data->mclk_stutter_mode_threshold) &&
+		(PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+				STUTTER_ENABLE) & 0x1))
+		mem_level->StutterEnable = true;
+
+	if (!result) {
+		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+	}
+	return result;
+}
+
+/**
+* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
+*
+* @param    hwmgr      the address of the hardware manager
+*/
+static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct polaris10_dpm_table *dpm_table = &data->dpm_table;
+	int result;
+	/* populate MCLK dpm table to SMU7 */
+	uint32_t array = data->dpm_table_start +
+			offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
+	uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
+			SMU74_MAX_LEVELS_MEMORY;
+	struct SMU74_Discrete_MemoryLevel *levels =
+			data->smc_state_table.MemoryLevel;
+	uint32_t i;
+
+	for (i = 0; i < dpm_table->mclk_table.count; i++) {
+		PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+				"can not populate memory level as memory clock is zero",
+				return -EINVAL);
+		result = polaris10_populate_single_memory_level(hwmgr,
+				dpm_table->mclk_table.dpm_levels[i].value,
+				&levels[i]);
+		if (i == dpm_table->mclk_table.count - 1) {
+			levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+			levels[i].EnabledForActivity = 1;
+		}
+		if (result)
+			return result;
+	}
+
+	/* in order to prevent MC activity from stutter mode to push DPM up.
+	 * the UVD change complements this by putting the MCLK in
+	 * a higher state by default such that we are not effected by
+	 * up threshold or and MCLK DPM latency.
+	 */
+	levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
+	CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+	data->smc_state_table.MemoryDpmLevelCount =
+			(uint8_t)dpm_table->mclk_table.count;
+	data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+	/* level count will send to smc once at init smc table and never change */
+	result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+			(uint32_t)array_size, data->sram_end);
+
+	return result;
+}
+
+/**
+* Populates the SMC MVDD structure using the provided memory clock.
+*
+* @param    hwmgr      the address of the hardware manager
+* @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
+* @param    voltage     the SMC VOLTAGE structure to be populated
+*/
+int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+		uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+	const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint32_t i = 0;
+
+	if (POLARIS10_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+		/* find mvdd value which clock is more than request */
+		for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+			if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+				smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+				break;
+			}
+		}
+		PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+				"MVDD Voltage is outside the supported range.",
+				return -EINVAL);
+	} else
+		return -EINVAL;
+
+	return 0;
+}
+
+static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+		SMU74_Discrete_DpmTable *table)
+{
+	int result = 0;
+	uint32_t sclk_frequency;
+	const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	SMIO_Pattern vol_level;
+	uint32_t mvdd;
+	uint16_t us_mvdd;
+
+	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	if (!data->sclk_dpm_key_disabled) {
+		/* Get MinVoltage and Frequency from DPM0,
+		 * already converted to SMC_UL */
+		sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
+		result = polaris10_get_dependency_volt_by_clk(hwmgr,
+				table_info->vdd_dep_on_sclk,
+				table->ACPILevel.SclkFrequency,
+				&table->ACPILevel.MinVoltage, &mvdd);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Cannot find ACPI VDDC voltage value "
+				"in Clock Dependency Table", );
+	} else {
+		sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
+		table->ACPILevel.MinVoltage =
+				data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
+	}
+
+	result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency,  &(table->ACPILevel.SclkSetting));
+	PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+	table->ACPILevel.DeepSleepDivId = 0;
+	table->ACPILevel.CcPwrDynRm = 0;
+	table->ACPILevel.CcPwrDynRm1 = 0;
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
+
+	if (!data->mclk_dpm_key_disabled) {
+		/* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+		table->MemoryACPILevel.MclkFrequency =
+				data->dpm_table.mclk_table.dpm_levels[0].value;
+		result = polaris10_get_dependency_volt_by_clk(hwmgr,
+				table_info->vdd_dep_on_mclk,
+				table->MemoryACPILevel.MclkFrequency,
+				&table->MemoryACPILevel.MinVoltage, &mvdd);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Cannot find ACPI VDDCI voltage value "
+				"in Clock Dependency Table",
+				);
+	} else {
+		table->MemoryACPILevel.MclkFrequency =
+				data->vbios_boot_state.mclk_bootup_value;
+		table->MemoryACPILevel.MinVoltage =
+				data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
+	}
+
+	us_mvdd = 0;
+	if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+			(data->mclk_dpm_key_disabled))
+		us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+	else {
+		if (!polaris10_populate_mvdd_value(hwmgr,
+				data->dpm_table.mclk_table.dpm_levels[0].value,
+				&vol_level))
+			us_mvdd = vol_level.Voltage;
+	}
+
+	if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
+		table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
+	else
+		table->MemoryACPILevel.MinMvdd = 0;
+
+	table->MemoryACPILevel.StutterEnable = false;
+
+	table->MemoryACPILevel.EnabledForThrottle = 0;
+	table->MemoryACPILevel.EnabledForActivity = 0;
+	table->MemoryACPILevel.UpHyst = 0;
+	table->MemoryACPILevel.DownHyst = 100;
+	table->MemoryACPILevel.VoltageDownHyst = 0;
+	table->MemoryACPILevel.ActivityLevel =
+			PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+	return result;
+}
+
+static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+		SMU74_Discrete_DpmTable *table)
+{
+	int result = -EINVAL;
+	uint8_t count;
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+			table_info->mm_dep_table;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	table->VceLevelCount = (uint8_t)(mm_table->count);
+	table->VceBootLevel = 0;
+
+	for (count = 0; count < table->VceLevelCount; count++) {
+		table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+		table->VceLevel[count].MinVoltage = 0;
+		table->VceLevel[count].MinVoltage |=
+				(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+		table->VceLevel[count].MinVoltage |=
+				((mm_table->entries[count].vddc - data->vddc_vddci_delta) *
+						VOLTAGE_SCALE) << VDDCI_SHIFT;
+		table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+		/*retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->VceLevel[count].Frequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for VCE engine clock",
+				return result);
+
+		table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+	}
+	return result;
+}
+
+static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+		SMU74_Discrete_DpmTable *table)
+{
+	int result = -EINVAL;
+	uint8_t count;
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+			table_info->mm_dep_table;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	table->SamuBootLevel = 0;
+	table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+	for (count = 0; count < table->SamuLevelCount; count++) {
+		/* not sure whether we need evclk or not */
+		table->SamuLevel[count].MinVoltage = 0;
+		table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+		table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+				VOLTAGE_SCALE) << VDDC_SHIFT;
+		table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+				data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+		table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+		/* retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->SamuLevel[count].Frequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for samu clock", return result);
+
+		table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+	}
+	return result;
+}
+
+static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+		int32_t eng_clock, int32_t mem_clock,
+		SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+	uint32_t dram_timing;
+	uint32_t dram_timing2;
+	uint32_t burst_time;
+	int result;
+
+	result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+			eng_clock, mem_clock);
+	PP_ASSERT_WITH_CODE(result == 0,
+			"Error calling VBIOS to set DRAM_TIMING.", return result);
+
+	dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+	dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+	burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+
+	arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing);
+	arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+	arb_regs->McArbBurstTime   = (uint8_t)burst_time;
+
+	return 0;
+}
+
+static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
+	uint32_t i, j;
+	int result = 0;
+
+	for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+		for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+			result = polaris10_populate_memory_timing_parameters(hwmgr,
+					data->dpm_table.sclk_table.dpm_levels[i].value,
+					data->dpm_table.mclk_table.dpm_levels[j].value,
+					&arb_regs.entries[i][j]);
+			if (result == 0)
+				result = atomctrl_set_ac_timing_ai(hwmgr, data->dpm_table.mclk_table.dpm_levels[j].value, j);
+			if (result != 0)
+				return result;
+		}
+	}
+
+	result = polaris10_copy_bytes_to_smc(
+			hwmgr->smumgr,
+			data->arb_table_start,
+			(uint8_t *)&arb_regs,
+			sizeof(SMU74_Discrete_MCArbDramTimingTable),
+			data->sram_end);
+	return result;
+}
+
+static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_DpmTable *table)
+{
+	int result = -EINVAL;
+	uint8_t count;
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+			table_info->mm_dep_table;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	table->UvdLevelCount = (uint8_t)(mm_table->count);
+	table->UvdBootLevel = 0;
+
+	for (count = 0; count < table->UvdLevelCount; count++) {
+		table->UvdLevel[count].MinVoltage = 0;
+		table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+		table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+		table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+				VOLTAGE_SCALE) << VDDC_SHIFT;
+		table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+				data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+		table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+		/* retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->UvdLevel[count].VclkFrequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for Vclk clock", return result);
+
+		table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->UvdLevel[count].DclkFrequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for Dclk clock", return result);
+
+		table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+
+	}
+	return result;
+}
+
+static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_DpmTable *table)
+{
+	int result = 0;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	table->GraphicsBootLevel = 0;
+	table->MemoryBootLevel = 0;
+
+	/* find boot level from dpm table */
+	result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+			data->vbios_boot_state.sclk_bootup_value,
+			(uint32_t *)&(table->GraphicsBootLevel));
+
+	result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+			data->vbios_boot_state.mclk_bootup_value,
+			(uint32_t *)&(table->MemoryBootLevel));
+
+	table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
+			VOLTAGE_SCALE;
+	table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+			VOLTAGE_SCALE;
+	table->BootMVdd  = data->vbios_boot_state.mvdd_bootup_value *
+			VOLTAGE_SCALE;
+
+	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+	CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+	return 0;
+}
+
+
+static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint8_t count, level;
+
+	count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+
+	for (level = 0; level < count; level++) {
+		if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+				data->vbios_boot_state.sclk_bootup_value) {
+			data->smc_state_table.GraphicsBootLevel = level;
+			break;
+		}
+	}
+
+	count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+	for (level = 0; level < count; level++) {
+		if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+				data->vbios_boot_state.mclk_bootup_value) {
+			data->smc_state_table.MemoryBootLevel = level;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+	uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+			volt_with_cks, value;
+	uint16_t clock_freq_u16;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+			volt_offset = 0;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+			table_info->vdd_dep_on_sclk;
+
+	stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+	/* Read SMU_Eefuse to read and calculate RO and determine
+	 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+	 */
+	efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixSMU_EFUSE_0 + (146 * 4));
+	efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixSMU_EFUSE_0 + (148 * 4));
+	efuse &= 0xFF000000;
+	efuse = efuse >> 24;
+	efuse2 &= 0xF;
+
+	if (efuse2 == 1)
+		ro = (2300 - 1350) * efuse / 255 + 1350;
+	else
+		ro = (2500 - 1000) * efuse / 255 + 1000;
+
+	if (ro >= 1660)
+		type = 0;
+	else
+		type = 1;
+
+	/* Populate Stretch amount */
+	data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+	/* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+	for (i = 0; i < sclk_table->count; i++) {
+		data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+				sclk_table->entries[i].cks_enable << i;
+		volt_without_cks = (uint32_t)((14041 *
+			(sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+			(4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+		volt_with_cks = (uint32_t)((13946 *
+			(sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+			(3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+		if (volt_without_cks >= volt_with_cks)
+			volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+					sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+		data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+	}
+
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+			STRETCH_ENABLE, 0x0);
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+			masterReset, 0x1);
+	/* PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, staticEnable, 0x1); */
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+			masterReset, 0x0);
+
+	/* Populate CKS Lookup Table */
+	if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+		stretch_amount2 = 0;
+	else if (stretch_amount == 3 || stretch_amount == 4)
+		stretch_amount2 = 1;
+	else {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_ClockStretcher);
+		PP_ASSERT_WITH_CODE(false,
+				"Stretch Amount in PPTable not supported\n",
+				return -EINVAL);
+	}
+
+	value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixPWR_CKS_CNTL);
+	value &= 0xFFC2FF87;
+	data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+			polaris10_clock_stretcher_lookup_table[stretch_amount2][0];
+	data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+			polaris10_clock_stretcher_lookup_table[stretch_amount2][1];
+	clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
+			GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].SclkSetting.SclkFrequency) / 100);
+	if (polaris10_clock_stretcher_lookup_table[stretch_amount2][0] < clock_freq_u16
+	&& polaris10_clock_stretcher_lookup_table[stretch_amount2][1] > clock_freq_u16) {
+		/* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+		value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+		/* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+		value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+		/* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+		value |= (polaris10_clock_stretch_amount_conversion
+				[polaris10_clock_stretcher_lookup_table[stretch_amount2][3]]
+				 [stretch_amount]) << 3;
+	}
+	CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq);
+	CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq);
+	data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+			polaris10_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+	data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+			(polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixPWR_CKS_CNTL, value);
+
+	/* Populate DDT Lookup Table */
+	for (i = 0; i < 4; i++) {
+		/* Assign the minimum and maximum VID stored
+		 * in the last row of Clock Stretcher Voltage Table.
+		 */
+		data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].minVID =
+				(uint8_t) polaris10_clock_stretcher_ddt_table[type][i][2];
+		data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].maxVID =
+				(uint8_t) polaris10_clock_stretcher_ddt_table[type][i][3];
+		/* Loop through each SCLK and check the frequency
+		 * to see if it lies within the frequency for clock stretcher.
+		 */
+		for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
+			cks_setting = 0;
+			clock_freq = PP_SMC_TO_HOST_UL(
+					data->smc_state_table.GraphicsLevel[j].SclkSetting.SclkFrequency);
+			/* Check the allowed frequency against the sclk level[j].
+			 *  Sclk's endianness has already been converted,
+			 *  and it's in 10Khz unit,
+			 *  as opposed to Data table, which is in Mhz unit.
+			 */
+			if (clock_freq >= (polaris10_clock_stretcher_ddt_table[type][i][0]) * 100) {
+				cks_setting |= 0x2;
+				if (clock_freq < (polaris10_clock_stretcher_ddt_table[type][i][1]) * 100)
+					cks_setting |= 0x1;
+			}
+			data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting
+							|= cks_setting << (j * 2);
+		}
+		CONVERT_FROM_HOST_TO_SMC_US(
+			data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting);
+	}
+
+	value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+	value &= 0xFFFFFFFE;
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+	return 0;
+}
+
+/**
+* Populates the SMC VRConfig field in DPM table.
+*
+* @param    hwmgr   the address of the hardware manager
+* @param    table   the SMC DPM table structure to be populated
+* @return   always 0
+*/
+static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
+		struct SMU74_Discrete_DpmTable *table)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	uint16_t config;
+
+	config = VR_MERGED_WITH_VDDC;
+	table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+	/* Set Vddc Voltage Controller */
+	if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+		config = VR_SVI2_PLANE_1;
+		table->VRConfig |= config;
+	} else {
+		PP_ASSERT_WITH_CODE(false,
+				"VDDC should be on SVI2 control in merged mode!",
+				);
+	}
+	/* Set Vddci Voltage Controller */
+	if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+		config = VR_SVI2_PLANE_2;  /* only in merged mode */
+		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+	} else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+		config = VR_SMIO_PATTERN_1;
+		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+	} else {
+		config = VR_STATIC_VOLTAGE;
+		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+	}
+	/* Set Mvdd Voltage Controller */
+	if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+		config = VR_SVI2_PLANE_2;
+		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+	} else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+		config = VR_SMIO_PATTERN_2;
+		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+	} else {
+		config = VR_STATIC_VOLTAGE;
+		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+	}
+
+	return 0;
+}
+
+/**
+* Initializes the SMC table and uploads it
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
+	const struct polaris10_ulv_parm *ulv = &(data->ulv);
+	uint8_t i;
+	struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+	pp_atomctrl_clock_dividers_vi dividers;
+
+	result = polaris10_setup_default_dpm_tables(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to setup default DPM tables!", return result);
+
+	if (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control)
+		polaris10_populate_smc_voltage_tables(hwmgr, table);
+
+	table->SystemFlags = 0;
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_AutomaticDCTransition))
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StepVddc))
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+	if (data->is_memory_gddr5)
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+	if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
+		result = polaris10_populate_ulv_state(hwmgr, table);
+		PP_ASSERT_WITH_CODE(0 == result,
+				"Failed to initialize ULV state!", return result);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+				ixCG_ULV_PARAMETER, PPPOLARIS10_CGULVPARAMETER_DFLT);
+	}
+
+	result = polaris10_populate_smc_link_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize Link Level!", return result);
+
+	result = polaris10_populate_all_graphic_levels(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize Graphics Level!", return result);
+
+	result = polaris10_populate_all_memory_levels(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize Memory Level!", return result);
+
+	result = polaris10_populate_smc_acpi_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize ACPI Level!", return result);
+
+	result = polaris10_populate_smc_vce_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize VCE Level!", return result);
+
+	result = polaris10_populate_smc_samu_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize SAMU Level!", return result);
+
+	/* Since only the initial state is completely set up at this point
+	 * (the other states are just copies of the boot state) we only
+	 * need to populate the  ARB settings for the initial state.
+	 */
+	result = polaris10_program_memory_timing_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to Write ARB settings for the initial state.", return result);
+
+	result = polaris10_populate_smc_uvd_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize UVD Level!", return result);
+
+	result = polaris10_populate_smc_boot_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize Boot Level!", return result);
+
+	result = polaris10_populate_smc_initailial_state(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to initialize Boot State!", return result);
+
+	result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to populate BAPM Parameters!", return result);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ClockStretcher)) {
+		result = polaris10_populate_clock_stretcher_data_table(hwmgr);
+		PP_ASSERT_WITH_CODE(0 == result,
+				"Failed to populate Clock Stretcher Data Table!",
+				return result);
+	}
+	table->CurrSclkPllRange = 0xff;
+	table->GraphicsVoltageChangeEnable  = 1;
+	table->GraphicsThermThrottleEnable  = 1;
+	table->GraphicsInterval = 1;
+	table->VoltageInterval  = 1;
+	table->ThermalInterval  = 1;
+	table->TemperatureLimitHigh =
+			table_info->cac_dtp_table->usTargetOperatingTemp *
+			POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
+	table->TemperatureLimitLow  =
+			(table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+			POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
+	table->MemoryVoltageChangeEnable = 1;
+	table->MemoryInterval = 1;
+	table->VoltageResponseTime = 0;
+	table->PhaseResponseTime = 0;
+	table->MemoryThermThrottleEnable = 1;
+	table->PCIeBootLinkLevel = 0;
+	table->PCIeGenInterval = 1;
+	table->VRConfig = 0;
+
+	result = polaris10_populate_vr_config(hwmgr, table);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to populate VRConfig setting!", return result);
+
+	table->ThermGpio = 17;
+	table->SclkStepSize = 0x4000;
+
+	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+		table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+	} else {
+		table->VRHotGpio = POLARIS10_UNUSED_GPIO_PIN;
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_RegulatorHot);
+	}
+
+	if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+			&gpio_pin)) {
+		table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_AutomaticDCTransition);
+	} else {
+		table->AcDcGpio = POLARIS10_UNUSED_GPIO_PIN;
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_AutomaticDCTransition);
+	}
+
+	/* Thermal Output GPIO */
+	if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+			&gpio_pin)) {
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_ThermalOutGPIO);
+
+		table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+		/* For porlarity read GPIOPAD_A with assigned Gpio pin
+		 * since VBIOS will program this register to set 'inactive state',
+		 * driver can then determine 'active state' from this and
+		 * program SMU with correct polarity
+		 */
+		table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
+					& (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+		table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+		/* if required, combine VRHot/PCC with thermal out GPIO */
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
+		&& phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
+			table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+	} else {
+		table->ThermOutGpio = 17;
+		table->ThermOutPolarity = 1;
+		table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+	}
+
+	/* Populate BIF_SCLK levels into SMC DPM table */
+	for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) {
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, data->bif_sclk_table[i], &dividers);
+		PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
+
+		if (i == 0)
+			table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+		else
+			table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+	}
+
+	for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
+		table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
+	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+	CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+	CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+	/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+	result = polaris10_copy_bytes_to_smc(hwmgr->smumgr,
+			data->dpm_table_start +
+			offsetof(SMU74_Discrete_DpmTable, SystemFlags),
+			(uint8_t *)&(table->SystemFlags),
+			sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
+			data->sram_end);
+	PP_ASSERT_WITH_CODE(0 == result,
+			"Failed to upload dpm data to SMC memory!", return result);
+
+	return 0;
+}
+
+/**
+* Initialize the ARB DRAM timing table's index field.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
+{
+	const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	uint32_t tmp;
+	int result;
+
+	/* This is a read-modify-write on the first byte of the ARB table.
+	 * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+	 * is the field 'current'.
+	 * This solution is ugly, but we never write the whole table only
+	 * individual fields in it.
+	 * In reality this field should not be in that structure
+	 * but in a soft register.
+	 */
+	result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
+			data->arb_table_start, &tmp, data->sram_end);
+
+	if (result)
+		return result;
+
+	tmp &= 0x00FFFFFF;
+	tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+	return polaris10_write_smc_sram_dword(hwmgr->smumgr,
+			data->arb_table_start, tmp, data->sram_end);
+}
+
+static int polaris10_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
+{
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_RegulatorHot))
+		return smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_EnableVRHotGPIOInterrupt);
+
+	return 0;
+}
+
+static int polaris10_enable_sclk_control(struct pp_hwmgr *hwmgr)
+{
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+			SCLK_PWRMGT_OFF, 0);
+	return 0;
+}
+
+static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct polaris10_ulv_parm *ulv = &(data->ulv);
+
+	if (ulv->ulv_supported)
+		return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
+
+	return 0;
+}
+
+static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkDeepSleep)) {
+		if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to enable Master Deep Sleep switch failed!",
+					return -1);
+	} else {
+		if (smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to disable Master Deep Sleep switch failed!",
+					return -1);
+		}
+	}
+
+	return 0;
+}
+
+static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	/* enable SCLK dpm */
+	if (!data->sclk_dpm_key_disabled)
+		PP_ASSERT_WITH_CODE(
+		(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
+		"Failed to enable SCLK DPM during DPM Start Function!",
+		return -1);
+
+	/* enable MCLK dpm */
+	if (0 == data->mclk_dpm_key_disabled) {
+
+		PP_ASSERT_WITH_CODE(
+				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+						PPSMC_MSG_MCLKDPM_Enable)),
+				"Failed to enable MCLK DPM during DPM Start Function!",
+				return -1);
+
+
+		PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
+
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
+		udelay(10);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
+	}
+
+	return 0;
+}
+
+static int polaris10_start_dpm(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	/*enable general power management */
+
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+			GLOBAL_PWRMGT_EN, 1);
+
+	/* enable sclk deep sleep */
+
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+			DYNAMIC_PM_EN, 1);
+
+	/* prepare for PCIE DPM */
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			data->soft_regs_start + offsetof(SMU74_SoftRegisters,
+					VoltageChangeTimeout), 0x1000);
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
+			SWRST_COMMAND_1, RESETLC, 0x0);
+/*
+	PP_ASSERT_WITH_CODE(
+			(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+					PPSMC_MSG_Voltage_Cntl_Enable)),
+			"Failed to enable voltage DPM during DPM Start Function!",
+			return -1);
+*/
+
+	if (polaris10_enable_sclk_mclk_dpm(hwmgr)) {
+		printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
+		return -1;
+	}
+
+	/* enable PCIE dpm */
+	if (0 == data->pcie_dpm_key_disabled) {
+		PP_ASSERT_WITH_CODE(
+				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+						PPSMC_MSG_PCIeDPM_Enable)),
+				"Failed to enable pcie DPM during DPM Start Function!",
+				return -1);
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_Falcon_QuickTransition)) {
+		PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_EnableACDCGPIOInterrupt)),
+				"Failed to enable AC DC GPIO Interrupt!",
+				);
+	}
+
+	return 0;
+}
+
+static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
+{
+	bool protection;
+	enum DPM_EVENT_SRC src;
+
+	switch (sources) {
+	default:
+		printk(KERN_ERR "Unknown throttling event sources.");
+		/* fall through */
+	case 0:
+		protection = false;
+		/* src is unused */
+		break;
+	case (1 << PHM_AutoThrottleSource_Thermal):
+		protection = true;
+		src = DPM_EVENT_SRC_DIGITAL;
+		break;
+	case (1 << PHM_AutoThrottleSource_External):
+		protection = true;
+		src = DPM_EVENT_SRC_EXTERNAL;
+		break;
+	case (1 << PHM_AutoThrottleSource_External) |
+			(1 << PHM_AutoThrottleSource_Thermal):
+		protection = true;
+		src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
+		break;
+	}
+	/* Order matters - don't enable thermal protection for the wrong source. */
+	if (protection) {
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
+				DPM_EVENT_SRC, src);
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+				THERMAL_PROTECTION_DIS,
+				!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_ThermalController));
+	} else
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+				THERMAL_PROTECTION_DIS, 1);
+}
+
+static int polaris10_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+		PHM_AutoThrottleSource source)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	if (!(data->active_auto_throttle_sources & (1 << source))) {
+		data->active_auto_throttle_sources |= 1 << source;
+		polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+	}
+	return 0;
+}
+
+static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+	return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
+int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	data->pcie_performance_request = true;
+
+	return 0;
+}
+
+int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+	int tmp_result, result = 0;
+	tmp_result = (!polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
+	PP_ASSERT_WITH_CODE(result == 0,
+			"DPM is already running right now, no need to enable DPM!",
+			return 0);
+
+	if (polaris10_voltage_control(hwmgr)) {
+		tmp_result = polaris10_enable_voltage_control(hwmgr);
+		PP_ASSERT_WITH_CODE(tmp_result == 0,
+				"Failed to enable voltage control!",
+				result = tmp_result);
+
+		tmp_result = polaris10_construct_voltage_tables(hwmgr);
+		PP_ASSERT_WITH_CODE((0 == tmp_result),
+				"Failed to contruct voltage tables!",
+				result = tmp_result);
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_EngineSpreadSpectrumSupport))
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+				GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ThermalController))
+		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+				GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
+
+	tmp_result = polaris10_program_static_screen_threshold_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to program static screen threshold parameters!",
+			result = tmp_result);
+
+	tmp_result = polaris10_enable_display_gap(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable display gap!", result = tmp_result);
+
+	tmp_result = polaris10_program_voting_clients(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to program voting clients!", result = tmp_result);
+
+	tmp_result = polaris10_process_firmware_header(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to process firmware header!", result = tmp_result);
+
+	tmp_result = polaris10_initial_switch_from_arbf0_to_f1(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to initialize switch from ArbF0 to F1!",
+			result = tmp_result);
+
+	tmp_result = polaris10_init_smc_table(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to initialize SMC table!", result = tmp_result);
+
+	tmp_result = polaris10_init_arb_table_index(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to initialize ARB table index!", result = tmp_result);
+
+	tmp_result = polaris10_populate_pm_fuses(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to populate PM fuses!", result = tmp_result);
+
+	tmp_result = polaris10_enable_vrhot_gpio_interrupt(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
+
+	tmp_result = polaris10_enable_sclk_control(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable SCLK control!", result = tmp_result);
+
+	tmp_result = polaris10_enable_smc_voltage_controller(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable voltage control!", result = tmp_result);
+
+	tmp_result = polaris10_enable_ulv(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable ULV!", result = tmp_result);
+
+	tmp_result = polaris10_enable_deep_sleep_master_switch(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable deep sleep master switch!", result = tmp_result);
+
+	tmp_result = polaris10_start_dpm(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to start DPM!", result = tmp_result);
+
+	tmp_result = polaris10_enable_smc_cac(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable SMC CAC!", result = tmp_result);
+
+	tmp_result = polaris10_enable_power_containment(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable power containment!", result = tmp_result);
+
+	tmp_result = polaris10_power_control_set_level(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to power control set level!", result = tmp_result);
+
+	tmp_result = polaris10_enable_thermal_auto_throttle(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable thermal auto throttle!", result = tmp_result);
+
+	tmp_result = polaris10_pcie_performance_request(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"pcie performance request failed!", result = tmp_result);
+
+	return result;
+}
+
+int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+
+	return 0;
+}
+
+int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
+{
+
+	return 0;
+}
+
+int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	if (data->soft_pp_table) {
+		kfree(data->soft_pp_table);
+		data->soft_pp_table = NULL;
+	}
+
+	return phm_hwmgr_backend_fini(hwmgr);
+}
+
+int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkDeepSleep);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+		PHM_PlatformCaps_DynamicPatchPowerState);
+
+	if (data->mvdd_control == POLARIS10_VOLTAGE_CONTROL_NONE)
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_EnableMVDDControl);
+
+	if (data->vddci_control == POLARIS10_VOLTAGE_CONTROL_NONE)
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_ControlVDDCI);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			 PHM_PlatformCaps_TablelessHardwareInterface);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_EnableSMU7ThermalManagement);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_DynamicPowerManagement);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_UnTabledHardwareInterface);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_TablelessHardwareInterface);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_SMC);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_NonABMSupportInPPLib);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_DynamicUVDState);
+
+	/* power tune caps Assume disabled */
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_SQRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_DBRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_TDRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_TCPRamping);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_PowerContainment);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+							PHM_PlatformCaps_CAC);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_RegulatorHot);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_AutomaticDCTransition);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_ODFuzzyFanControlSupport);
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+						PHM_PlatformCaps_FanSpeedInTableIsRPM);
+	if (hwmgr->chip_id == CHIP_POLARIS11)
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_SPLLShutdownSupport);
+	return 0;
+}
+
+static void polaris10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	polaris10_initialize_power_tune_defaults(hwmgr);
+
+	data->pcie_gen_performance.max = PP_PCIEGen1;
+	data->pcie_gen_performance.min = PP_PCIEGen3;
+	data->pcie_gen_power_saving.max = PP_PCIEGen1;
+	data->pcie_gen_power_saving.min = PP_PCIEGen3;
+	data->pcie_lane_performance.max = 0;
+	data->pcie_lane_performance.min = 16;
+	data->pcie_lane_power_saving.max = 0;
+	data->pcie_lane_power_saving.min = 16;
+}
+
+/**
+* Get Leakage VDDC based on leakage ID.
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always 0
+*/
+static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	uint16_t vv_id;
+	uint16_t vddc = 0;
+	uint16_t i, j;
+	uint32_t sclk = 0;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)hwmgr->pptable;
+	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+			table_info->vdd_dep_on_sclk;
+	int result;
+
+	for (i = 0; i < POLARIS10_MAX_LEAKAGE_COUNT; i++) {
+		vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+		if (!phm_get_sclk_for_voltage_evv(hwmgr,
+				table_info->vddc_lookup_table, vv_id, &sclk)) {
+			if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_ClockStretcher)) {
+				for (j = 1; j < sclk_table->count; j++) {
+					if (sclk_table->entries[j].clk == sclk &&
+							sclk_table->entries[j].cks_enable == 0) {
+						sclk += 5000;
+						break;
+					}
+				}
+			}
+
+
+			PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
+							VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
+						"Error retrieving EVV voltage value!",
+						continue);
+
+
+			/* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
+			PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
+					"Invalid VDDC value", result = -EINVAL;);
+
+			/* the voltage should not be zero nor equal to leakage ID */
+			if (vddc != 0 && vddc != vv_id) {
+				data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
+				data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
+				data->vddc_leakage.count++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * Change virtual leakage voltage to actual value.
+ *
+ * @param     hwmgr  the address of the powerplay hardware manager.
+ * @param     pointer to changing voltage
+ * @param     pointer to leakage table
+ */
+static void polaris10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
+		uint16_t *voltage, struct polaris10_leakage_voltage *leakage_table)
+{
+	uint32_t index;
+
+	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
+	for (index = 0; index < leakage_table->count; index++) {
+		/* if this voltage matches a leakage voltage ID */
+		/* patch with actual leakage voltage */
+		if (leakage_table->leakage_id[index] == *voltage) {
+			*voltage = leakage_table->actual_voltage[index];
+			break;
+		}
+	}
+
+	if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
+		printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
+}
+
+/**
+* Patch voltage lookup table by EVV leakages.
+*
+* @param     hwmgr  the address of the powerplay hardware manager.
+* @param     pointer to voltage lookup table
+* @param     pointer to leakage table
+* @return     always 0
+*/
+static int polaris10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
+		phm_ppt_v1_voltage_lookup_table *lookup_table,
+		struct polaris10_leakage_voltage *leakage_table)
+{
+	uint32_t i;
+
+	for (i = 0; i < lookup_table->count; i++)
+		polaris10_patch_with_vdd_leakage(hwmgr,
+				&lookup_table->entries[i].us_vdd, leakage_table);
+
+	return 0;
+}
+
+static int polaris10_patch_clock_voltage_limits_with_vddc_leakage(
+		struct pp_hwmgr *hwmgr, struct polaris10_leakage_voltage *leakage_table,
+		uint16_t *vddc)
+{
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	polaris10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
+	hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
+			table_info->max_clock_voltage_on_dc.vddc;
+	return 0;
+}
+
+static int polaris10_patch_voltage_dependency_tables_with_lookup_table(
+		struct pp_hwmgr *hwmgr)
+{
+	uint8_t entryId;
+	uint8_t voltageId;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+			table_info->vdd_dep_on_sclk;
+	struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
+			table_info->vdd_dep_on_mclk;
+	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+			table_info->mm_dep_table;
+
+	for (entryId = 0; entryId < sclk_table->count; ++entryId) {
+		voltageId = sclk_table->entries[entryId].vddInd;
+		sclk_table->entries[entryId].vddc =
+				table_info->vddc_lookup_table->entries[voltageId].us_vdd;
+	}
+
+	for (entryId = 0; entryId < mclk_table->count; ++entryId) {
+		voltageId = mclk_table->entries[entryId].vddInd;
+		mclk_table->entries[entryId].vddc =
+			table_info->vddc_lookup_table->entries[voltageId].us_vdd;
+	}
+
+	for (entryId = 0; entryId < mm_table->count; ++entryId) {
+		voltageId = mm_table->entries[entryId].vddcInd;
+		mm_table->entries[entryId].vddc =
+			table_info->vddc_lookup_table->entries[voltageId].us_vdd;
+	}
+
+	return 0;
+
+}
+
+static int polaris10_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
+{
+	/* Need to determine if we need calculated voltage. */
+	return 0;
+}
+
+static int polaris10_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
+{
+	/* Need to determine if we need calculated voltage from mm table. */
+	return 0;
+}
+
+static int polaris10_sort_lookup_table(struct pp_hwmgr *hwmgr,
+		struct phm_ppt_v1_voltage_lookup_table *lookup_table)
+{
+	uint32_t table_size, i, j;
+	struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
+	table_size = lookup_table->count;
+
+	PP_ASSERT_WITH_CODE(0 != lookup_table->count,
+		"Lookup table is empty", return -EINVAL);
+
+	/* Sorting voltages */
+	for (i = 0; i < table_size - 1; i++) {
+		for (j = i + 1; j > 0; j--) {
+			if (lookup_table->entries[j].us_vdd <
+					lookup_table->entries[j - 1].us_vdd) {
+				tmp_voltage_lookup_record = lookup_table->entries[j - 1];
+				lookup_table->entries[j - 1] = lookup_table->entries[j];
+				lookup_table->entries[j] = tmp_voltage_lookup_record;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int polaris10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
+{
+	int result = 0;
+	int tmp_result;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	tmp_result = polaris10_patch_lookup_table_with_leakage(hwmgr,
+			table_info->vddc_lookup_table, &(data->vddc_leakage));
+	if (tmp_result)
+		result = tmp_result;
+
+	tmp_result = polaris10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
+			&(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
+	if (tmp_result)
+		result = tmp_result;
+
+	tmp_result = polaris10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
+	if (tmp_result)
+		result = tmp_result;
+
+	tmp_result = polaris10_calc_voltage_dependency_tables(hwmgr);
+	if (tmp_result)
+		result = tmp_result;
+
+	tmp_result = polaris10_calc_mm_voltage_dependency_table(hwmgr);
+	if (tmp_result)
+		result = tmp_result;
+
+	tmp_result = polaris10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
+	if (tmp_result)
+		result = tmp_result;
+
+	return result;
+}
+
+static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
+{
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
+						table_info->vdd_dep_on_sclk;
+	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
+						table_info->vdd_dep_on_mclk;
+
+	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
+		"VDD dependency on SCLK table is missing.	\
+		This table is mandatory", return -EINVAL);
+	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
+		"VDD dependency on SCLK table has to have is missing.	\
+		This table is mandatory", return -EINVAL);
+
+	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
+		"VDD dependency on MCLK table is missing.	\
+		This table is mandatory", return -EINVAL);
+	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
+		"VDD dependency on MCLK table has to have is missing.	 \
+		This table is mandatory", return -EINVAL);
+
+	table_info->max_clock_voltage_on_ac.sclk =
+		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
+	table_info->max_clock_voltage_on_ac.mclk =
+		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
+	table_info->max_clock_voltage_on_ac.vddc =
+		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
+	table_info->max_clock_voltage_on_ac.vddci =
+		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
+
+	hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
+	hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
+	hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
+	hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =table_info->max_clock_voltage_on_ac.vddci;
+
+	return 0;
+}
+
+int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
+	uint32_t temp_reg;
+	int result;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	data->dll_default_on = false;
+	data->sram_end = SMC_RAM_END;
+	data->mclk_dpm0_activity_target = 0xa;
+	data->disable_dpm_mask = 0xFF;
+	data->static_screen_threshold = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
+	data->static_screen_threshold_unit = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
+	data->activity_target[0] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+	data->activity_target[1] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+	data->activity_target[2] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+	data->activity_target[3] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+	data->activity_target[4] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+	data->activity_target[5] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+	data->activity_target[6] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+	data->activity_target[7] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+
+	data->voting_rights_clients0 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0;
+	data->voting_rights_clients1 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1;
+	data->voting_rights_clients2 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2;
+	data->voting_rights_clients3 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3;
+	data->voting_rights_clients4 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4;
+	data->voting_rights_clients5 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5;
+	data->voting_rights_clients6 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6;
+	data->voting_rights_clients7 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7;
+
+	data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
+
+	data->mclk_activity_target = PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT;
+
+	/* need to set voltage control types before EVV patching */
+	data->voltage_control = POLARIS10_VOLTAGE_CONTROL_NONE;
+	data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE;
+	data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE;
+
+	if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+			VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
+		data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_EnableMVDDControl)) {
+		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
+			data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
+		else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
+			data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ControlVDDCI)) {
+		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
+			data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
+		else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
+			data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
+	}
+
+	polaris10_set_features_platform_caps(hwmgr);
+
+	polaris10_init_dpm_defaults(hwmgr);
+
+	/* Get leakage voltage based on leakage ID. */
+	result = polaris10_get_evv_voltages(hwmgr);
+
+	if (result) {
+		printk("Get EVV Voltage Failed.  Abort Driver loading!\n");
+		return -1;
+	}
+
+	polaris10_complete_dependency_tables(hwmgr);
+	polaris10_set_private_data_based_on_pptable(hwmgr);
+
+	/* Initalize Dynamic State Adjustment Rule Settings */
+	result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
+
+	if (0 == result) {
+		struct cgs_system_info sys_info = {0};
+
+		data->is_tlu_enabled = 0;
+
+		hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+							POLARIS10_MAX_HARDWARE_POWERLEVELS;
+		hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
+		hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
+
+
+		if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
+			temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
+			switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
+			case 0:
+				temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
+				break;
+			case 1:
+				temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
+				break;
+			case 2:
+				temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
+				break;
+			case 3:
+				temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
+				break;
+			case 4:
+				temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
+				break;
+			default:
+				PP_ASSERT_WITH_CODE(0,
+				"Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
+				);
+				break;
+			}
+			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
+		}
+
+		if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
+			hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
+			hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
+				(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
+
+			hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
+				(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
+
+			hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
+
+			hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
+
+			hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
+				(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
+
+			hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
+
+			table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
+									(table_info->cac_dtp_table->usDefaultTargetOperatingTemp -50) : 0;
+
+			table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
+			table_info->cac_dtp_table->usOperatingTempStep = 1;
+			table_info->cac_dtp_table->usOperatingTempHyst = 1;
+
+			hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
+				       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
+
+			hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
+				       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
+
+			hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
+				       table_info->cac_dtp_table->usOperatingTempMinLimit;
+
+			hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
+				       table_info->cac_dtp_table->usOperatingTempMaxLimit;
+
+			hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
+				       table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
+
+			hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
+				       table_info->cac_dtp_table->usOperatingTempStep;
+
+			hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
+				       table_info->cac_dtp_table->usTargetOperatingTemp;
+		}
+
+		sys_info.size = sizeof(struct cgs_system_info);
+		sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
+		result = cgs_query_system_info(hwmgr->device, &sys_info);
+		if (result)
+			data->pcie_gen_cap = 0x30007;
+		else
+			data->pcie_gen_cap = (uint32_t)sys_info.value;
+		if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+			data->pcie_spc_cap = 20;
+		sys_info.size = sizeof(struct cgs_system_info);
+		sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
+		result = cgs_query_system_info(hwmgr->device, &sys_info);
+		if (result)
+			data->pcie_lane_cap = 0x2f0000;
+		else
+			data->pcie_lane_cap = (uint32_t)sys_info.value;
+
+		hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
+/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
+		hwmgr->platform_descriptor.clockStep.engineClock = 500;
+		hwmgr->platform_descriptor.clockStep.memoryClock = 500;
+	} else {
+		/* Ignore return value in here, we are cleaning up a mess. */
+		polaris10_hwmgr_backend_fini(hwmgr);
+	}
+
+	return 0;
+}
+
+static int polaris10_force_dpm_highest(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	uint32_t level, tmp;
+
+	if (!data->pcie_dpm_key_disabled) {
+		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+			level = 0;
+			tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+			while (tmp >>= 1)
+				level++;
+
+			if (level)
+				smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+						PPSMC_MSG_PCIeDPM_ForceLevel, level);
+		}
+	}
+
+	if (!data->sclk_dpm_key_disabled) {
+		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+			level = 0;
+			tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
+			while (tmp >>= 1)
+				level++;
+
+			if (level)
+				smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+						PPSMC_MSG_SCLKDPM_SetEnabledMask,
+						(1 << level));
+		}
+	}
+
+	if (!data->mclk_dpm_key_disabled) {
+		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+			level = 0;
+			tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
+			while (tmp >>= 1)
+				level++;
+
+			if (level)
+				smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+						PPSMC_MSG_MCLKDPM_SetEnabledMask,
+						(1 << level));
+		}
+	}
+
+	return 0;
+}
+
+static int polaris10_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	phm_apply_dal_min_voltage_request(hwmgr);
+
+	if (!data->sclk_dpm_key_disabled) {
+		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_SCLKDPM_SetEnabledMask,
+					data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+	}
+
+	if (!data->mclk_dpm_key_disabled) {
+		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_MCLKDPM_SetEnabledMask,
+					data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+	}
+
+	return 0;
+}
+
+static int polaris10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	if (!polaris10_is_dpm_running(hwmgr))
+		return -EINVAL;
+
+	if (!data->pcie_dpm_key_disabled) {
+		smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_PCIeDPM_UnForceLevel);
+	}
+
+	return polaris10_upload_dpm_level_enable_mask(hwmgr);
+}
+
+static int polaris10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data =
+			(struct polaris10_hwmgr *)(hwmgr->backend);
+	uint32_t level;
+
+	if (!data->sclk_dpm_key_disabled)
+		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+			level = phm_get_lowest_enabled_level(hwmgr,
+							      data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+							    PPSMC_MSG_SCLKDPM_SetEnabledMask,
+							    (1 << level));
+
+	}
+
+	if (!data->mclk_dpm_key_disabled) {
+		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+			level = phm_get_lowest_enabled_level(hwmgr,
+							      data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+							    PPSMC_MSG_MCLKDPM_SetEnabledMask,
+							    (1 << level));
+		}
+	}
+
+	if (!data->pcie_dpm_key_disabled) {
+		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+			level = phm_get_lowest_enabled_level(hwmgr,
+							      data->dpm_level_enable_mask.pcie_dpm_enable_mask);
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+							    PPSMC_MSG_PCIeDPM_ForceLevel,
+							    (level));
+		}
+	}
+
+	return 0;
+
+}
+static int polaris10_force_dpm_level(struct pp_hwmgr *hwmgr,
+				enum amd_dpm_forced_level level)
+{
+	int ret = 0;
+
+	switch (level) {
+	case AMD_DPM_FORCED_LEVEL_HIGH:
+		ret = polaris10_force_dpm_highest(hwmgr);
+		if (ret)
+			return ret;
+		break;
+	case AMD_DPM_FORCED_LEVEL_LOW:
+		ret = polaris10_force_dpm_lowest(hwmgr);
+		if (ret)
+			return ret;
+		break;
+	case AMD_DPM_FORCED_LEVEL_AUTO:
+		ret = polaris10_unforce_dpm_levels(hwmgr);
+		if (ret)
+			return ret;
+		break;
+	default:
+		break;
+	}
+
+	hwmgr->dpm_level = level;
+
+	return ret;
+}
+
+static int polaris10_get_power_state_size(struct pp_hwmgr *hwmgr)
+{
+	return sizeof(struct polaris10_power_state);
+}
+
+
+static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+				struct pp_power_state *request_ps,
+			const struct pp_power_state *current_ps)
+{
+
+	struct polaris10_power_state *polaris10_ps =
+				cast_phw_polaris10_power_state(&request_ps->hardware);
+	uint32_t sclk;
+	uint32_t mclk;
+	struct PP_Clocks minimum_clocks = {0};
+	bool disable_mclk_switching;
+	bool disable_mclk_switching_for_frame_lock;
+	struct cgs_display_info info = {0};
+	const struct phm_clock_and_voltage_limits *max_limits;
+	uint32_t i;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	int32_t count;
+	int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
+
+	data->battery_state = (PP_StateUILabel_Battery ==
+			request_ps->classification.ui_label);
+
+	PP_ASSERT_WITH_CODE(polaris10_ps->performance_level_count == 2,
+				 "VI should always have 2 performance levels",
+				);
+
+	max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+			&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
+			&(hwmgr->dyn_state.max_clock_voltage_on_dc);
+
+	/* Cap clock DPM tables at DC MAX if it is in DC. */
+	if (PP_PowerSource_DC == hwmgr->power_source) {
+		for (i = 0; i < polaris10_ps->performance_level_count; i++) {
+			if (polaris10_ps->performance_levels[i].memory_clock > max_limits->mclk)
+				polaris10_ps->performance_levels[i].memory_clock = max_limits->mclk;
+			if (polaris10_ps->performance_levels[i].engine_clock > max_limits->sclk)
+				polaris10_ps->performance_levels[i].engine_clock = max_limits->sclk;
+		}
+	}
+
+	polaris10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
+	polaris10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+
+	/*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
+
+	/* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StablePState)) {
+		max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
+		stable_pstate_sclk = (max_limits->sclk * 75) / 100;
+
+		for (count = table_info->vdd_dep_on_sclk->count - 1;
+				count >= 0; count--) {
+			if (stable_pstate_sclk >=
+					table_info->vdd_dep_on_sclk->entries[count].clk) {
+				stable_pstate_sclk =
+						table_info->vdd_dep_on_sclk->entries[count].clk;
+				break;
+			}
+		}
+
+		if (count < 0)
+			stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
+
+		stable_pstate_mclk = max_limits->mclk;
+
+		minimum_clocks.engineClock = stable_pstate_sclk;
+		minimum_clocks.memoryClock = stable_pstate_mclk;
+	}
+
+	if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
+		minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
+
+	if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
+		minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
+
+	polaris10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
+
+	if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
+		PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
+				hwmgr->platform_descriptor.overdriveLimit.engineClock),
+				"Overdrive sclk exceeds limit",
+				hwmgr->gfx_arbiter.sclk_over_drive =
+						hwmgr->platform_descriptor.overdriveLimit.engineClock);
+
+		if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
+			polaris10_ps->performance_levels[1].engine_clock =
+					hwmgr->gfx_arbiter.sclk_over_drive;
+	}
+
+	if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
+		PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
+				hwmgr->platform_descriptor.overdriveLimit.memoryClock),
+				"Overdrive mclk exceeds limit",
+				hwmgr->gfx_arbiter.mclk_over_drive =
+						hwmgr->platform_descriptor.overdriveLimit.memoryClock);
+
+		if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
+			polaris10_ps->performance_levels[1].memory_clock =
+					hwmgr->gfx_arbiter.mclk_over_drive;
+	}
+
+	disable_mclk_switching_for_frame_lock = phm_cap_enabled(
+				    hwmgr->platform_descriptor.platformCaps,
+				    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
+
+	disable_mclk_switching = (1 < info.display_count) ||
+				    disable_mclk_switching_for_frame_lock;
+
+	sclk = polaris10_ps->performance_levels[0].engine_clock;
+	mclk = polaris10_ps->performance_levels[0].memory_clock;
+
+	if (disable_mclk_switching)
+		mclk = polaris10_ps->performance_levels
+		[polaris10_ps->performance_level_count - 1].memory_clock;
+
+	if (sclk < minimum_clocks.engineClock)
+		sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
+				max_limits->sclk : minimum_clocks.engineClock;
+
+	if (mclk < minimum_clocks.memoryClock)
+		mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
+				max_limits->mclk : minimum_clocks.memoryClock;
+
+	polaris10_ps->performance_levels[0].engine_clock = sclk;
+	polaris10_ps->performance_levels[0].memory_clock = mclk;
+
+	polaris10_ps->performance_levels[1].engine_clock =
+		(polaris10_ps->performance_levels[1].engine_clock >=
+				polaris10_ps->performance_levels[0].engine_clock) ?
+						polaris10_ps->performance_levels[1].engine_clock :
+						polaris10_ps->performance_levels[0].engine_clock;
+
+	if (disable_mclk_switching) {
+		if (mclk < polaris10_ps->performance_levels[1].memory_clock)
+			mclk = polaris10_ps->performance_levels[1].memory_clock;
+
+		polaris10_ps->performance_levels[0].memory_clock = mclk;
+		polaris10_ps->performance_levels[1].memory_clock = mclk;
+	} else {
+		if (polaris10_ps->performance_levels[1].memory_clock <
+				polaris10_ps->performance_levels[0].memory_clock)
+			polaris10_ps->performance_levels[1].memory_clock =
+					polaris10_ps->performance_levels[0].memory_clock;
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StablePState)) {
+		for (i = 0; i < polaris10_ps->performance_level_count; i++) {
+			polaris10_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
+			polaris10_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
+			polaris10_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
+			polaris10_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
+		}
+	}
+	return 0;
+}
+
+
+static int polaris10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+{
+	struct pp_power_state  *ps;
+	struct polaris10_power_state  *polaris10_ps;
+
+	if (hwmgr == NULL)
+		return -EINVAL;
+
+	ps = hwmgr->request_ps;
+
+	if (ps == NULL)
+		return -EINVAL;
+
+	polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
+
+	if (low)
+		return polaris10_ps->performance_levels[0].memory_clock;
+	else
+		return polaris10_ps->performance_levels
+				[polaris10_ps->performance_level_count-1].memory_clock;
+}
+
+static int polaris10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+{
+	struct pp_power_state  *ps;
+	struct polaris10_power_state  *polaris10_ps;
+
+	if (hwmgr == NULL)
+		return -EINVAL;
+
+	ps = hwmgr->request_ps;
+
+	if (ps == NULL)
+		return -EINVAL;
+
+	polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
+
+	if (low)
+		return polaris10_ps->performance_levels[0].engine_clock;
+	else
+		return polaris10_ps->performance_levels
+				[polaris10_ps->performance_level_count-1].engine_clock;
+}
+
+static int polaris10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
+					struct pp_hw_power_state *hw_ps)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct polaris10_power_state *ps = (struct polaris10_power_state *)hw_ps;
+	ATOM_FIRMWARE_INFO_V2_2 *fw_info;
+	uint16_t size;
+	uint8_t frev, crev;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+
+	/* First retrieve the Boot clocks and VDDC from the firmware info table.
+	 * We assume here that fw_info is unchanged if this call fails.
+	 */
+	fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
+			hwmgr->device, index,
+			&size, &frev, &crev);
+	if (!fw_info)
+		/* During a test, there is no firmware info table. */
+		return 0;
+
+	/* Patch the state. */
+	data->vbios_boot_state.sclk_bootup_value =
+			le32_to_cpu(fw_info->ulDefaultEngineClock);
+	data->vbios_boot_state.mclk_bootup_value =
+			le32_to_cpu(fw_info->ulDefaultMemoryClock);
+	data->vbios_boot_state.mvdd_bootup_value =
+			le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
+	data->vbios_boot_state.vddc_bootup_value =
+			le16_to_cpu(fw_info->usBootUpVDDCVoltage);
+	data->vbios_boot_state.vddci_bootup_value =
+			le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
+	data->vbios_boot_state.pcie_gen_bootup_value =
+			phm_get_current_pcie_speed(hwmgr);
+
+	data->vbios_boot_state.pcie_lane_bootup_value =
+			(uint16_t)phm_get_current_pcie_lane_number(hwmgr);
+
+	/* set boot power state */
+	ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
+	ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
+	ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
+	ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
+
+	return 0;
+}
+
+static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
+		void *state, struct pp_power_state *power_state,
+		void *pp_table, uint32_t classification_flag)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct polaris10_power_state  *polaris10_power_state =
+			(struct polaris10_power_state *)(&(power_state->hardware));
+	struct polaris10_performance_level *performance_level;
+	ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
+	ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
+			(ATOM_Tonga_POWERPLAYTABLE *)pp_table;
+	ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
+			(ATOM_Tonga_SCLK_Dependency_Table *)
+			(((unsigned long)powerplay_table) +
+				le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
+	ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
+			(ATOM_Tonga_MCLK_Dependency_Table *)
+			(((unsigned long)powerplay_table) +
+				le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
+
+	/* The following fields are not initialized here: id orderedList allStatesList */
+	power_state->classification.ui_label =
+			(le16_to_cpu(state_entry->usClassification) &
+			ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
+			ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
+	power_state->classification.flags = classification_flag;
+	/* NOTE: There is a classification2 flag in BIOS that is not being used right now */
+
+	power_state->classification.temporary_state = false;
+	power_state->classification.to_be_deleted = false;
+
+	power_state->validation.disallowOnDC =
+			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
+					ATOM_Tonga_DISALLOW_ON_DC));
+
+	power_state->pcie.lanes = 0;
+
+	power_state->display.disableFrameModulation = false;
+	power_state->display.limitRefreshrate = false;
+	power_state->display.enableVariBright =
+			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
+					ATOM_Tonga_ENABLE_VARIBRIGHT));
+
+	power_state->validation.supportedPowerLevels = 0;
+	power_state->uvd_clocks.VCLK = 0;
+	power_state->uvd_clocks.DCLK = 0;
+	power_state->temperatures.min = 0;
+	power_state->temperatures.max = 0;
+
+	performance_level = &(polaris10_power_state->performance_levels
+			[polaris10_power_state->performance_level_count++]);
+
+	PP_ASSERT_WITH_CODE(
+			(polaris10_power_state->performance_level_count < SMU74_MAX_LEVELS_GRAPHICS),
+			"Performance levels exceeds SMC limit!",
+			return -1);
+
+	PP_ASSERT_WITH_CODE(
+			(polaris10_power_state->performance_level_count <=
+					hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
+			"Performance levels exceeds Driver limit!",
+			return -1);
+
+	/* Performance levels are arranged from low to high. */
+	performance_level->memory_clock = mclk_dep_table->entries
+			[state_entry->ucMemoryClockIndexLow].ulMclk;
+	performance_level->engine_clock = sclk_dep_table->entries
+			[state_entry->ucEngineClockIndexLow].ulSclk;
+	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
+			state_entry->ucPCIEGenLow);
+	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
+			state_entry->ucPCIELaneHigh);
+
+	performance_level = &(polaris10_power_state->performance_levels
+			[polaris10_power_state->performance_level_count++]);
+	performance_level->memory_clock = mclk_dep_table->entries
+			[state_entry->ucMemoryClockIndexHigh].ulMclk;
+	performance_level->engine_clock = sclk_dep_table->entries
+			[state_entry->ucEngineClockIndexHigh].ulSclk;
+	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
+			state_entry->ucPCIEGenHigh);
+	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
+			state_entry->ucPCIELaneHigh);
+
+	return 0;
+}
+
+static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
+		unsigned long entry_index, struct pp_power_state *state)
+{
+	int result;
+	struct polaris10_power_state *ps;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
+			table_info->vdd_dep_on_mclk;
+
+	state->hardware.magic = PHM_VIslands_Magic;
+
+	ps = (struct polaris10_power_state *)(&state->hardware);
+
+	result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
+			polaris10_get_pp_table_entry_callback_func);
+
+	/* This is the earliest time we have all the dependency table and the VBIOS boot state
+	 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
+	 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
+	 */
+	if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
+		if (dep_mclk_table->entries[0].clk !=
+				data->vbios_boot_state.mclk_bootup_value)
+			printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
+					"does not match VBIOS boot MCLK level");
+		if (dep_mclk_table->entries[0].vddci !=
+				data->vbios_boot_state.vddci_bootup_value)
+			printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
+					"does not match VBIOS boot VDDCI level");
+	}
+
+	/* set DC compatible flag if this state supports DC */
+	if (!state->validation.disallowOnDC)
+		ps->dc_compatible = true;
+
+	if (state->classification.flags & PP_StateClassificationFlag_ACPI)
+		data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
+
+	ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
+	ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
+
+	if (!result) {
+		uint32_t i;
+
+		switch (state->classification.ui_label) {
+		case PP_StateUILabel_Performance:
+			data->use_pcie_performance_levels = true;
+
+			for (i = 0; i < ps->performance_level_count; i++) {
+				if (data->pcie_gen_performance.max <
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_performance.max =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_gen_performance.min >
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_performance.min =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_lane_performance.max <
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_performance.max =
+							ps->performance_levels[i].pcie_lane;
+
+				if (data->pcie_lane_performance.min >
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_performance.min =
+							ps->performance_levels[i].pcie_lane;
+			}
+			break;
+		case PP_StateUILabel_Battery:
+			data->use_pcie_power_saving_levels = true;
+
+			for (i = 0; i < ps->performance_level_count; i++) {
+				if (data->pcie_gen_power_saving.max <
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_power_saving.max =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_gen_power_saving.min >
+						ps->performance_levels[i].pcie_gen)
+					data->pcie_gen_power_saving.min =
+							ps->performance_levels[i].pcie_gen;
+
+				if (data->pcie_lane_power_saving.max <
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_power_saving.max =
+							ps->performance_levels[i].pcie_lane;
+
+				if (data->pcie_lane_power_saving.min >
+						ps->performance_levels[i].pcie_lane)
+					data->pcie_lane_power_saving.min =
+							ps->performance_levels[i].pcie_lane;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+static void
+polaris10_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
+{
+	uint32_t sclk, mclk, activity_percent;
+	uint32_t offset;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+
+	sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+	smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+
+	mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+	seq_printf(m, "\n [  mclk  ]: %u MHz\n\n [  sclk  ]: %u MHz\n",
+			mclk / 100, sclk / 100);
+
+	offset = data->soft_regs_start + offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
+	activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
+	activity_percent += 0x80;
+	activity_percent >>= 8;
+
+	seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
+
+	seq_printf(m, "uvd    %sabled\n", data->uvd_power_gated ? "dis" : "en");
+
+	seq_printf(m, "vce    %sabled\n", data->vce_power_gated ? "dis" : "en");
+}
+
+static int polaris10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
+{
+	const struct phm_set_power_state_input *states =
+			(const struct phm_set_power_state_input *)input;
+	const struct polaris10_power_state *polaris10_ps =
+			cast_const_phw_polaris10_power_state(states->pnew_state);
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+	uint32_t sclk = polaris10_ps->performance_levels
+			[polaris10_ps->performance_level_count - 1].engine_clock;
+	struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+	uint32_t mclk = polaris10_ps->performance_levels
+			[polaris10_ps->performance_level_count - 1].memory_clock;
+	struct PP_Clocks min_clocks = {0};
+	uint32_t i;
+	struct cgs_display_info info = {0};
+
+	data->need_update_smu7_dpm_table = 0;
+
+	for (i = 0; i < sclk_table->count; i++) {
+		if (sclk == sclk_table->dpm_levels[i].value)
+			break;
+	}
+
+	if (i >= sclk_table->count)
+		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+	else {
+	/* TODO: Check SCLK in DAL's minimum clocks
+	 * in case DeepSleep divider update is required.
+	 */
+		if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
+			(min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
+				data->display_timing.min_clock_in_sr >= POLARIS10_MINIMUM_ENGINE_CLOCK))
+			data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
+	}
+
+	for (i = 0; i < mclk_table->count; i++) {
+		if (mclk == mclk_table->dpm_levels[i].value)
+			break;
+	}
+
+	if (i >= mclk_table->count)
+		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+
+	if (data->display_timing.num_existing_displays != info.display_count)
+		data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
+
+	return 0;
+}
+
+static uint16_t polaris10_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
+		const struct polaris10_power_state *polaris10_ps)
+{
+	uint32_t i;
+	uint32_t sclk, max_sclk = 0;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct polaris10_dpm_table *dpm_table = &data->dpm_table;
+
+	for (i = 0; i < polaris10_ps->performance_level_count; i++) {
+		sclk = polaris10_ps->performance_levels[i].engine_clock;
+		if (max_sclk < sclk)
+			max_sclk = sclk;
+	}
+
+	for (i = 0; i < dpm_table->sclk_table.count; i++) {
+		if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
+			return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
+					dpm_table->pcie_speed_table.dpm_levels
+					[dpm_table->pcie_speed_table.count - 1].value :
+					dpm_table->pcie_speed_table.dpm_levels[i].value);
+	}
+
+	return 0;
+}
+
+static int polaris10_request_link_speed_change_before_state_change(
+		struct pp_hwmgr *hwmgr, const void *input)
+{
+	const struct phm_set_power_state_input *states =
+			(const struct phm_set_power_state_input *)input;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	const struct polaris10_power_state *polaris10_nps =
+			cast_const_phw_polaris10_power_state(states->pnew_state);
+	const struct polaris10_power_state *polaris10_cps =
+			cast_const_phw_polaris10_power_state(states->pcurrent_state);
+
+	uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_nps);
+	uint16_t current_link_speed;
+
+	if (data->force_pcie_gen == PP_PCIEGenInvalid)
+		current_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_cps);
+	else
+		current_link_speed = data->force_pcie_gen;
+
+	data->force_pcie_gen = PP_PCIEGenInvalid;
+	data->pspp_notify_required = false;
+
+	if (target_link_speed > current_link_speed) {
+		switch (target_link_speed) {
+		case PP_PCIEGen3:
+			if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
+				break;
+			data->force_pcie_gen = PP_PCIEGen2;
+			if (current_link_speed == PP_PCIEGen2)
+				break;
+		case PP_PCIEGen2:
+			if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
+				break;
+		default:
+			data->force_pcie_gen = phm_get_current_pcie_speed(hwmgr);
+			break;
+		}
+	} else {
+		if (target_link_speed < current_link_speed)
+			data->pspp_notify_required = true;
+	}
+
+	return 0;
+}
+
+static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	if (0 == data->need_update_smu7_dpm_table)
+		return 0;
+
+	if ((0 == data->sclk_dpm_key_disabled) &&
+		(data->need_update_smu7_dpm_table &
+			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+		PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
+				"Trying to freeze SCLK DPM when DPM is disabled",
+				);
+		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_SCLKDPM_FreezeLevel),
+				"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
+				return -1);
+	}
+
+	if ((0 == data->mclk_dpm_key_disabled) &&
+		(data->need_update_smu7_dpm_table &
+		 DPMTABLE_OD_UPDATE_MCLK)) {
+		PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
+				"Trying to freeze MCLK DPM when DPM is disabled",
+				);
+		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_MCLKDPM_FreezeLevel),
+				"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
+				return -1);
+	}
+
+	return 0;
+}
+
+static int polaris10_populate_and_upload_sclk_mclk_dpm_levels(
+		struct pp_hwmgr *hwmgr, const void *input)
+{
+	int result = 0;
+	const struct phm_set_power_state_input *states =
+			(const struct phm_set_power_state_input *)input;
+	const struct polaris10_power_state *polaris10_ps =
+			cast_const_phw_polaris10_power_state(states->pnew_state);
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	uint32_t sclk = polaris10_ps->performance_levels
+			[polaris10_ps->performance_level_count - 1].engine_clock;
+	uint32_t mclk = polaris10_ps->performance_levels
+			[polaris10_ps->performance_level_count - 1].memory_clock;
+	struct polaris10_dpm_table *dpm_table = &data->dpm_table;
+
+	struct polaris10_dpm_table *golden_dpm_table = &data->golden_dpm_table;
+	uint32_t dpm_count, clock_percent;
+	uint32_t i;
+
+	if (0 == data->need_update_smu7_dpm_table)
+		return 0;
+
+	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
+		dpm_table->sclk_table.dpm_levels
+		[dpm_table->sclk_table.count - 1].value = sclk;
+
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
+		    phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
+		/* Need to do calculation based on the golden DPM table
+		 * as the Heatmap GPU Clock axis is also based on the default values
+		 */
+			PP_ASSERT_WITH_CODE(
+				(golden_dpm_table->sclk_table.dpm_levels
+						[golden_dpm_table->sclk_table.count - 1].value != 0),
+				"Divide by 0!",
+				return -1);
+			dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
+
+			for (i = dpm_count; i > 1; i--) {
+				if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
+					clock_percent =
+					      ((sclk
+						- golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
+						) * 100)
+						/ golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
+
+					dpm_table->sclk_table.dpm_levels[i].value =
+							golden_dpm_table->sclk_table.dpm_levels[i].value +
+							(golden_dpm_table->sclk_table.dpm_levels[i].value *
+								clock_percent)/100;
+
+				} else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
+					clock_percent =
+						((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
+						- sclk) * 100)
+						/ golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
+
+					dpm_table->sclk_table.dpm_levels[i].value =
+							golden_dpm_table->sclk_table.dpm_levels[i].value -
+							(golden_dpm_table->sclk_table.dpm_levels[i].value *
+									clock_percent) / 100;
+				} else
+					dpm_table->sclk_table.dpm_levels[i].value =
+							golden_dpm_table->sclk_table.dpm_levels[i].value;
+			}
+		}
+	}
+
+	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
+		dpm_table->mclk_table.dpm_levels
+			[dpm_table->mclk_table.count - 1].value = mclk;
+
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
+		    phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
+
+			PP_ASSERT_WITH_CODE(
+					(golden_dpm_table->mclk_table.dpm_levels
+						[golden_dpm_table->mclk_table.count-1].value != 0),
+					"Divide by 0!",
+					return -1);
+			dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
+			for (i = dpm_count; i > 1; i--) {
+				if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
+					clock_percent = ((mclk -
+					golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
+					/ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
+
+					dpm_table->mclk_table.dpm_levels[i].value =
+							golden_dpm_table->mclk_table.dpm_levels[i].value +
+							(golden_dpm_table->mclk_table.dpm_levels[i].value *
+							clock_percent) / 100;
+
+				} else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
+					clock_percent = (
+					 (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
+					* 100)
+					/ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
+
+					dpm_table->mclk_table.dpm_levels[i].value =
+							golden_dpm_table->mclk_table.dpm_levels[i].value -
+							(golden_dpm_table->mclk_table.dpm_levels[i].value *
+									clock_percent) / 100;
+				} else
+					dpm_table->mclk_table.dpm_levels[i].value =
+							golden_dpm_table->mclk_table.dpm_levels[i].value;
+			}
+		}
+	}
+
+	if (data->need_update_smu7_dpm_table &
+			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
+		result = polaris10_populate_all_graphic_levels(hwmgr);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
+				return result);
+	}
+
+	if (data->need_update_smu7_dpm_table &
+			(DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+		/*populate MCLK dpm table to SMU7 */
+		result = polaris10_populate_all_memory_levels(hwmgr);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
+				return result);
+	}
+
+	return result;
+}
+
+static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
+			  struct polaris10_single_dpm_table *dpm_table,
+			uint32_t low_limit, uint32_t high_limit)
+{
+	uint32_t i;
+
+	for (i = 0; i < dpm_table->count; i++) {
+		if ((dpm_table->dpm_levels[i].value < low_limit)
+		|| (dpm_table->dpm_levels[i].value > high_limit))
+			dpm_table->dpm_levels[i].enabled = false;
+		else
+			dpm_table->dpm_levels[i].enabled = true;
+	}
+
+	return 0;
+}
+
+static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
+		const struct polaris10_power_state *polaris10_ps)
+{
+	int result = 0;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	uint32_t high_limit_count;
+
+	PP_ASSERT_WITH_CODE((polaris10_ps->performance_level_count >= 1),
+			"power state did not have any performance level",
+			return -1);
+
+	high_limit_count = (1 == polaris10_ps->performance_level_count) ? 0 : 1;
+
+	polaris10_trim_single_dpm_states(hwmgr,
+			&(data->dpm_table.sclk_table),
+			polaris10_ps->performance_levels[0].engine_clock,
+			polaris10_ps->performance_levels[high_limit_count].engine_clock);
+
+	polaris10_trim_single_dpm_states(hwmgr,
+			&(data->dpm_table.mclk_table),
+			polaris10_ps->performance_levels[0].memory_clock,
+			polaris10_ps->performance_levels[high_limit_count].memory_clock);
+
+	return result;
+}
+
+static int polaris10_generate_dpm_level_enable_mask(
+		struct pp_hwmgr *hwmgr, const void *input)
+{
+	int result;
+	const struct phm_set_power_state_input *states =
+			(const struct phm_set_power_state_input *)input;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	const struct polaris10_power_state *polaris10_ps =
+			cast_const_phw_polaris10_power_state(states->pnew_state);
+
+	result = polaris10_trim_dpm_states(hwmgr, polaris10_ps);
+	if (result)
+		return result;
+
+	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
+	data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
+	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
+
+	return 0;
+}
+
+int polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+	return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+			PPSMC_MSG_UVDDPM_Enable :
+			PPSMC_MSG_UVDDPM_Disable);
+}
+
+int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+	return smum_send_msg_to_smc(hwmgr->smumgr, enable?
+			PPSMC_MSG_VCEDPM_Enable :
+			PPSMC_MSG_VCEDPM_Disable);
+}
+
+int polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+	return smum_send_msg_to_smc(hwmgr->smumgr, enable?
+			PPSMC_MSG_SAMUDPM_Enable :
+			PPSMC_MSG_SAMUDPM_Disable);
+}
+
+int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	uint32_t mm_boot_level_offset, mm_boot_level_value;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	if (!bgate) {
+		data->smc_state_table.UvdBootLevel = 0;
+		if (table_info->mm_dep_table->count > 0)
+			data->smc_state_table.UvdBootLevel =
+					(uint8_t) (table_info->mm_dep_table->count - 1);
+		mm_boot_level_offset = data->dpm_table_start +
+				offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
+		mm_boot_level_offset /= 4;
+		mm_boot_level_offset *= 4;
+		mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+				CGS_IND_REG__SMC, mm_boot_level_offset);
+		mm_boot_level_value &= 0x00FFFFFF;
+		mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
+		cgs_write_ind_register(hwmgr->device,
+				CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+		if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_UVDDPM) ||
+			phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_StablePState))
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_UVDDPM_SetEnabledMask,
+					(uint32_t)(1 << data->smc_state_table.UvdBootLevel));
+	}
+
+	return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate);
+}
+
+static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
+{
+	const struct phm_set_power_state_input *states =
+			(const struct phm_set_power_state_input *)input;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	const struct polaris10_power_state *polaris10_nps =
+			cast_const_phw_polaris10_power_state(states->pnew_state);
+	const struct polaris10_power_state *polaris10_cps =
+			cast_const_phw_polaris10_power_state(states->pcurrent_state);
+
+	uint32_t mm_boot_level_offset, mm_boot_level_value;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	if (polaris10_nps->vce_clks.evclk > 0 &&
+	(polaris10_cps == NULL || polaris10_cps->vce_clks.evclk == 0)) {
+
+		data->smc_state_table.VceBootLevel =
+				(uint8_t) (table_info->mm_dep_table->count - 1);
+
+		mm_boot_level_offset = data->dpm_table_start +
+				offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+		mm_boot_level_offset /= 4;
+		mm_boot_level_offset *= 4;
+		mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+				CGS_IND_REG__SMC, mm_boot_level_offset);
+		mm_boot_level_value &= 0xFF00FFFF;
+		mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
+		cgs_write_ind_register(hwmgr->device,
+				CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_VCEDPM_SetEnabledMask,
+					(uint32_t)1 << data->smc_state_table.VceBootLevel);
+
+			polaris10_enable_disable_vce_dpm(hwmgr, true);
+		} else if (polaris10_nps->vce_clks.evclk == 0 &&
+				polaris10_cps != NULL &&
+				polaris10_cps->vce_clks.evclk > 0)
+			polaris10_enable_disable_vce_dpm(hwmgr, false);
+	}
+
+	return 0;
+}
+
+int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	uint32_t mm_boot_level_offset, mm_boot_level_value;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	if (!bgate) {
+		data->smc_state_table.SamuBootLevel =
+				(uint8_t) (table_info->mm_dep_table->count - 1);
+		mm_boot_level_offset = data->dpm_table_start +
+				offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+		mm_boot_level_offset /= 4;
+		mm_boot_level_offset *= 4;
+		mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+				CGS_IND_REG__SMC, mm_boot_level_offset);
+		mm_boot_level_value &= 0xFFFFFF00;
+		mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
+		cgs_write_ind_register(hwmgr->device,
+				CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_StablePState))
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_SAMUDPM_SetEnabledMask,
+					(uint32_t)(1 << data->smc_state_table.SamuBootLevel));
+	}
+
+	return polaris10_enable_disable_samu_dpm(hwmgr, !bgate);
+}
+
+static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	int result = 0;
+	uint32_t low_sclk_interrupt_threshold = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkThrottleLowNotification)
+		&& (hwmgr->gfx_arbiter.sclk_threshold !=
+				data->low_sclk_interrupt_threshold)) {
+		data->low_sclk_interrupt_threshold =
+				hwmgr->gfx_arbiter.sclk_threshold;
+		low_sclk_interrupt_threshold =
+				data->low_sclk_interrupt_threshold;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+		result = polaris10_copy_bytes_to_smc(
+				hwmgr->smumgr,
+				data->dpm_table_start +
+				offsetof(SMU74_Discrete_DpmTable,
+					LowSclkInterruptThreshold),
+				(uint8_t *)&low_sclk_interrupt_threshold,
+				sizeof(uint32_t),
+				data->sram_end);
+	}
+
+	return result;
+}
+
+static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	if (data->need_update_smu7_dpm_table &
+		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+		return polaris10_program_memory_timing_parameters(hwmgr);
+
+	return 0;
+}
+
+static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	if (0 == data->need_update_smu7_dpm_table)
+		return 0;
+
+	if ((0 == data->sclk_dpm_key_disabled) &&
+		(data->need_update_smu7_dpm_table &
+		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+
+		PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
+				"Trying to Unfreeze SCLK DPM when DPM is disabled",
+				);
+		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+			"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
+			return -1);
+	}
+
+	if ((0 == data->mclk_dpm_key_disabled) &&
+		(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+
+		PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
+				"Trying to Unfreeze MCLK DPM when DPM is disabled",
+				);
+		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+				PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+		    "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
+		    return -1);
+	}
+
+	data->need_update_smu7_dpm_table = 0;
+
+	return 0;
+}
+
+static int polaris10_notify_link_speed_change_after_state_change(
+		struct pp_hwmgr *hwmgr, const void *input)
+{
+	const struct phm_set_power_state_input *states =
+			(const struct phm_set_power_state_input *)input;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	const struct polaris10_power_state *polaris10_ps =
+			cast_const_phw_polaris10_power_state(states->pnew_state);
+	uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_ps);
+	uint8_t  request;
+
+	if (data->pspp_notify_required) {
+		if (target_link_speed == PP_PCIEGen3)
+			request = PCIE_PERF_REQ_GEN3;
+		else if (target_link_speed == PP_PCIEGen2)
+			request = PCIE_PERF_REQ_GEN2;
+		else
+			request = PCIE_PERF_REQ_GEN1;
+
+		if (request == PCIE_PERF_REQ_GEN1 &&
+				phm_get_current_pcie_speed(hwmgr) > 0)
+			return 0;
+
+		if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
+			if (PP_PCIEGen2 == target_link_speed)
+				printk("PSPP request to switch to Gen2 from Gen3 Failed!");
+			else
+				printk("PSPP request to switch to Gen1 from Gen2 Failed!");
+		}
+	}
+
+	return 0;
+}
+
+static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+	int tmp_result, result = 0;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	tmp_result = polaris10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to find DPM states clocks in DPM table!",
+			result = tmp_result);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PCIEPerformanceRequest)) {
+		tmp_result =
+			polaris10_request_link_speed_change_before_state_change(hwmgr, input);
+		PP_ASSERT_WITH_CODE((0 == tmp_result),
+				"Failed to request link speed change before state change!",
+				result = tmp_result);
+	}
+
+	tmp_result = polaris10_freeze_sclk_mclk_dpm(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to freeze SCLK MCLK DPM!", result = tmp_result);
+
+	tmp_result = polaris10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to populate and upload SCLK MCLK DPM levels!",
+			result = tmp_result);
+
+	tmp_result = polaris10_generate_dpm_level_enable_mask(hwmgr, input);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to generate DPM level enabled mask!",
+			result = tmp_result);
+
+	tmp_result = polaris10_update_vce_dpm(hwmgr, input);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to update VCE DPM!",
+			result = tmp_result);
+
+	tmp_result = polaris10_update_sclk_threshold(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to update SCLK threshold!",
+			result = tmp_result);
+
+	tmp_result = polaris10_program_mem_timing_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to program memory timing parameters!",
+			result = tmp_result);
+
+	tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to unfreeze SCLK MCLK DPM!",
+			result = tmp_result);
+
+	tmp_result = polaris10_upload_dpm_level_enable_mask(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to upload DPM level enabled mask!",
+			result = tmp_result);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PCIEPerformanceRequest)) {
+		tmp_result =
+			polaris10_notify_link_speed_change_after_state_change(hwmgr, input);
+		PP_ASSERT_WITH_CODE((0 == tmp_result),
+				"Failed to notify link speed change after state change!",
+				result = tmp_result);
+	}
+	data->apply_optimized_settings = false;
+	return result;
+}
+
+static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
+{
+	hwmgr->thermal_controller.
+	advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
+
+	if (phm_is_hw_access_blocked(hwmgr))
+		return 0;
+
+	return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+			PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
+}
+
+int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+{
+	PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
+
+	return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ?  0 : -1;
+}
+
+int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+{
+	uint32_t num_active_displays = 0;
+	struct cgs_display_info info = {0};
+	info.mode_info = NULL;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+
+	num_active_displays = info.display_count;
+
+	if (num_active_displays > 1)  /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
+		polaris10_notify_smc_display_change(hwmgr, false);
+	else
+		polaris10_notify_smc_display_change(hwmgr, true);
+
+	return 0;
+}
+
+/**
+* Programs the display gap
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   always OK
+*/
+int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	uint32_t num_active_displays = 0;
+	uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
+	uint32_t display_gap2;
+	uint32_t pre_vbi_time_in_us;
+	uint32_t frame_time_in_us;
+	uint32_t ref_clock;
+	uint32_t refresh_rate = 0;
+	struct cgs_display_info info = {0};
+	struct cgs_mode_info mode_info;
+
+	info.mode_info = &mode_info;
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+	num_active_displays = info.display_count;
+
+	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
+
+	ref_clock = mode_info.ref_clock;
+	refresh_rate = mode_info.refresh_rate;
+
+	if (0 == refresh_rate)
+		refresh_rate = 60;
+
+	frame_time_in_us = 1000000 / refresh_rate;
+
+	pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+	display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, PreVBlankGap), 0x64);
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
+
+	polaris10_notify_smc_display_change(hwmgr, num_active_displays != 0);
+
+	return 0;
+}
+
+
+int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+{
+	return polaris10_program_display_gap(hwmgr);
+}
+
+/**
+*  Set maximum target operating fan output RPM
+*
+* @param    hwmgr:  the address of the powerplay hardware manager.
+* @param    usMaxFanRpm:  max operating fan RPM value.
+* @return   The response that came from the SMC.
+*/
+static int polaris10_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
+{
+	hwmgr->thermal_controller.
+	advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
+
+	if (phm_is_hw_access_blocked(hwmgr))
+		return 0;
+
+	return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+			PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
+}
+
+int polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+					const void *thermal_interrupt_info)
+{
+	return 0;
+}
+
+bool polaris10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	bool is_update_required = false;
+	struct cgs_display_info info = {0, 0, NULL};
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+
+	if (data->display_timing.num_existing_displays != info.display_count)
+		is_update_required = true;
+/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
+	if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+		cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
+		if (min_clocks.engineClockInSR != data->display_timing.minClockInSR &&
+			(min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
+				data->display_timing.minClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK))
+			is_update_required = true;
+*/
+	return is_update_required;
+}
+
+static inline bool polaris10_are_power_levels_equal(const struct polaris10_performance_level *pl1,
+							   const struct polaris10_performance_level *pl2)
+{
+	return ((pl1->memory_clock == pl2->memory_clock) &&
+		  (pl1->engine_clock == pl2->engine_clock) &&
+		  (pl1->pcie_gen == pl2->pcie_gen) &&
+		  (pl1->pcie_lane == pl2->pcie_lane));
+}
+
+int polaris10_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+{
+	const struct polaris10_power_state *psa = cast_const_phw_polaris10_power_state(pstate1);
+	const struct polaris10_power_state *psb = cast_const_phw_polaris10_power_state(pstate2);
+	int i;
+
+	if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
+		return -EINVAL;
+
+	/* If the two states don't even have the same number of performance levels they cannot be the same state. */
+	if (psa->performance_level_count != psb->performance_level_count) {
+		*equal = false;
+		return 0;
+	}
+
+	for (i = 0; i < psa->performance_level_count; i++) {
+		if (!polaris10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
+			/* If we have found even one performance level pair that is different the states are different. */
+			*equal = false;
+			return 0;
+		}
+	}
+
+	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+	*equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
+	*equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
+	*equal &= (psa->sclk_threshold == psb->sclk_threshold);
+
+	return 0;
+}
+
+int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	uint32_t vbios_version;
+
+	/*  Read MC indirect register offset 0x9F bits [3:0] to see if VBIOS has already loaded a full version of MC ucode or not.*/
+
+	phm_get_mc_microcode_version(hwmgr);
+	vbios_version = hwmgr->microcode_version_info.MC & 0xf;
+	/*  Full version of MC ucode has already been loaded. */
+	if (vbios_version == 0) {
+		data->need_long_memory_training = false;
+		return 0;
+	}
+
+	data->need_long_memory_training = true;
+
+/*
+ *	PPMCME_FirmwareDescriptorEntry *pfd = NULL;
+	pfd = &tonga_mcmeFirmware;
+	if (0 == PHM_READ_FIELD(hwmgr->device, MC_SEQ_SUP_CNTL, RUN))
+		polaris10_load_mc_microcode(hwmgr, pfd->dpmThreshold,
+					pfd->cfgArray, pfd->cfgSize, pfd->ioDebugArray,
+					pfd->ioDebugSize, pfd->ucodeArray, pfd->ucodeSize);
+*/
+	return 0;
+}
+
+/**
+ * Read clock related registers.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+static int polaris10_read_clock_registers(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	data->clock_registers.vCG_SPLL_FUNC_CNTL = cgs_read_ind_register(hwmgr->device,
+						CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL)
+						& CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK;
+
+	data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = cgs_read_ind_register(hwmgr->device,
+						CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2)
+						& CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
+
+	data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = cgs_read_ind_register(hwmgr->device,
+						CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4)
+						& CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK;
+
+	return 0;
+}
+
+/**
+ * Find out if memory is GDDR5.
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+static int polaris10_get_memory_type(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	uint32_t temp;
+
+	temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
+
+	data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
+			((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
+			 MC_SEQ_MISC0_GDDR5_SHIFT));
+
+	return 0;
+}
+
+/**
+ * Enables Dynamic Power Management by SMC
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+static int polaris10_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
+{
+	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			GENERAL_PWRMGT, STATIC_PM_EN, 1);
+
+	return 0;
+}
+
+/**
+ * Initialize PowerGating States for different engines
+ *
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @return   always 0
+ */
+static int polaris10_init_power_gate_state(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	data->uvd_power_gated = false;
+	data->vce_power_gated = false;
+	data->samu_power_gated = false;
+
+	return 0;
+}
+
+static int polaris10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	data->low_sclk_interrupt_threshold = 0;
+
+	return 0;
+}
+
+int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+	int tmp_result, result = 0;
+
+	polaris10_upload_mc_firmware(hwmgr);
+
+	tmp_result = polaris10_read_clock_registers(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to read clock registers!", result = tmp_result);
+
+	tmp_result = polaris10_get_memory_type(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to get memory type!", result = tmp_result);
+
+	tmp_result = polaris10_enable_acpi_power_management(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to enable ACPI power management!", result = tmp_result);
+
+	tmp_result = polaris10_init_power_gate_state(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to init power gate state!", result = tmp_result);
+
+	tmp_result = phm_get_mc_microcode_version(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to get MC microcode version!", result = tmp_result);
+
+	tmp_result = polaris10_init_sclk_threshold(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == tmp_result),
+			"Failed to init sclk threshold!", result = tmp_result);
+
+	return result;
+}
+
+static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	if (!data->soft_pp_table) {
+		data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
+					      hwmgr->soft_pp_table_size,
+					      GFP_KERNEL);
+		if (!data->soft_pp_table)
+			return -ENOMEM;
+	}
+
+	*table = (char *)&data->soft_pp_table;
+
+	return hwmgr->soft_pp_table_size;
+}
+
+static int polaris10_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	if (!data->soft_pp_table) {
+		data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+		if (!data->soft_pp_table)
+			return -ENOMEM;
+	}
+
+	memcpy(data->soft_pp_table, buf, size);
+
+	hwmgr->soft_pp_table = data->soft_pp_table;
+
+	/* TODO: re-init powerplay to implement modified pptable */
+
+	return 0;
+}
+
+static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, uint32_t mask)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+		return -EINVAL;
+
+	switch (type) {
+	case PP_SCLK:
+		if (!data->sclk_dpm_key_disabled)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_SCLKDPM_SetEnabledMask,
+					data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+		break;
+	case PP_MCLK:
+		if (!data->mclk_dpm_key_disabled)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_MCLKDPM_SetEnabledMask,
+					data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+		break;
+	case PP_PCIE:
+	{
+		uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+		uint32_t level = 0;
+
+		while (tmp >>= 1)
+			level++;
+
+		if (!data->pcie_dpm_key_disabled)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_PCIeDPM_ForceLevel,
+					level);
+		break;
+	}
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static uint16_t polaris10_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+{
+	uint32_t speedCntl = 0;
+
+	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
+	speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
+			ixPCIE_LC_SPEED_CNTL);
+	return((uint16_t)PHM_GET_FIELD(speedCntl,
+			PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
+}
+
+static int polaris10_print_clock_levels(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, char *buf)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+	struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+	struct polaris10_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
+	int i, now, size = 0;
+	uint32_t clock, pcie_speed;
+
+	switch (type) {
+	case PP_SCLK:
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+		for (i = 0; i < sclk_table->count; i++) {
+			if (clock > sclk_table->dpm_levels[i].value)
+				continue;
+			break;
+		}
+		now = i;
+
+		for (i = 0; i < sclk_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, sclk_table->dpm_levels[i].value / 100,
+					(i == now) ? "*" : "");
+		break;
+	case PP_MCLK:
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+		for (i = 0; i < mclk_table->count; i++) {
+			if (clock > mclk_table->dpm_levels[i].value)
+				continue;
+			break;
+		}
+		now = i;
+
+		for (i = 0; i < mclk_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, mclk_table->dpm_levels[i].value / 100,
+					(i == now) ? "*" : "");
+		break;
+	case PP_PCIE:
+		pcie_speed = polaris10_get_current_pcie_speed(hwmgr);
+		for (i = 0; i < pcie_table->count; i++) {
+			if (pcie_speed != pcie_table->dpm_levels[i].value)
+				continue;
+			break;
+		}
+		now = i;
+
+		for (i = 0; i < pcie_table->count; i++)
+			size += sprintf(buf + size, "%d: %s %s\n", i,
+					(pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
+					(pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+					(pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+					(i == now) ? "*" : "");
+		break;
+	default:
+		break;
+	}
+	return size;
+}
+
+static int polaris10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+{
+	if (mode) {
+		/* stop auto-manage */
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_MicrocodeFanControl))
+			polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
+		polaris10_fan_ctrl_set_static_mode(hwmgr, mode);
+	} else
+		/* restart auto-manage */
+		polaris10_fan_ctrl_reset_fan_speed_to_default(hwmgr);
+
+	return 0;
+}
+
+static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
+{
+	if (hwmgr->fan_ctrl_is_in_default_mode)
+		return hwmgr->fan_ctrl_default_mode;
+	else
+		return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+				CG_FDO_CTRL2, FDO_PWM_MODE);
+}
+
+static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
+	.backend_init = &polaris10_hwmgr_backend_init,
+	.backend_fini = &polaris10_hwmgr_backend_fini,
+	.asic_setup = &polaris10_setup_asic_task,
+	.dynamic_state_management_enable = &polaris10_enable_dpm_tasks,
+	.apply_state_adjust_rules = polaris10_apply_state_adjust_rules,
+	.force_dpm_level = &polaris10_force_dpm_level,
+	.power_state_set = polaris10_set_power_state_tasks,
+	.get_power_state_size = polaris10_get_power_state_size,
+	.get_mclk = polaris10_dpm_get_mclk,
+	.get_sclk = polaris10_dpm_get_sclk,
+	.patch_boot_state = polaris10_dpm_patch_boot_state,
+	.get_pp_table_entry = polaris10_get_pp_table_entry,
+	.get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
+	.print_current_perforce_level = polaris10_print_current_perforce_level,
+	.powerdown_uvd = polaris10_phm_powerdown_uvd,
+	.powergate_uvd = polaris10_phm_powergate_uvd,
+	.powergate_vce = polaris10_phm_powergate_vce,
+	.disable_clock_power_gating = polaris10_phm_disable_clock_power_gating,
+	.update_clock_gatings = polaris10_phm_update_clock_gatings,
+	.notify_smc_display_config_after_ps_adjustment = polaris10_notify_smc_display_config_after_ps_adjustment,
+	.display_config_changed = polaris10_display_configuration_changed_task,
+	.set_max_fan_pwm_output = polaris10_set_max_fan_pwm_output,
+	.set_max_fan_rpm_output = polaris10_set_max_fan_rpm_output,
+	.get_temperature = polaris10_thermal_get_temperature,
+	.stop_thermal_controller = polaris10_thermal_stop_thermal_controller,
+	.get_fan_speed_info = polaris10_fan_ctrl_get_fan_speed_info,
+	.get_fan_speed_percent = polaris10_fan_ctrl_get_fan_speed_percent,
+	.set_fan_speed_percent = polaris10_fan_ctrl_set_fan_speed_percent,
+	.reset_fan_speed_to_default = polaris10_fan_ctrl_reset_fan_speed_to_default,
+	.get_fan_speed_rpm = polaris10_fan_ctrl_get_fan_speed_rpm,
+	.set_fan_speed_rpm = polaris10_fan_ctrl_set_fan_speed_rpm,
+	.uninitialize_thermal_controller = polaris10_thermal_ctrl_uninitialize_thermal_controller,
+	.register_internal_thermal_interrupt = polaris10_register_internal_thermal_interrupt,
+	.check_smc_update_required_for_display_configuration = polaris10_check_smc_update_required_for_display_configuration,
+	.check_states_equal = polaris10_check_states_equal,
+	.set_fan_control_mode = polaris10_set_fan_control_mode,
+	.get_fan_control_mode = polaris10_get_fan_control_mode,
+	.get_pp_table = polaris10_get_pp_table,
+	.set_pp_table = polaris10_set_pp_table,
+	.force_clock_level = polaris10_force_clock_level,
+	.print_clock_levels = polaris10_print_clock_levels,
+	.enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating,
+};
+
+int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr  *data;
+
+	data = kzalloc (sizeof(struct polaris10_hwmgr), GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	hwmgr->backend = data;
+	hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
+	hwmgr->pptable_func = &tonga_pptable_funcs;
+	pp_polaris10_thermal_initialize(hwmgr);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
new file mode 100644
index 0000000..beedf35
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
@@ -0,0 +1,357 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef POLARIS10_HWMGR_H
+#define POLARIS10_HWMGR_H
+
+#include "hwmgr.h"
+#include "smu74.h"
+#include "smu74_discrete.h"
+#include "ppatomctrl.h"
+#include "polaris10_ppsmc.h"
+#include "polaris10_powertune.h"
+
+#define POLARIS10_MAX_HARDWARE_POWERLEVELS	2
+
+#define POLARIS10_VOLTAGE_CONTROL_NONE                   0x0
+#define POLARIS10_VOLTAGE_CONTROL_BY_GPIO                0x1
+#define POLARIS10_VOLTAGE_CONTROL_BY_SVID2               0x2
+#define POLARIS10_VOLTAGE_CONTROL_MERGED                 0x3
+
+#define DPMTABLE_OD_UPDATE_SCLK     0x00000001
+#define DPMTABLE_OD_UPDATE_MCLK     0x00000002
+#define DPMTABLE_UPDATE_SCLK        0x00000004
+#define DPMTABLE_UPDATE_MCLK        0x00000008
+
+struct polaris10_performance_level {
+	uint32_t  memory_clock;
+	uint32_t  engine_clock;
+	uint16_t  pcie_gen;
+	uint16_t  pcie_lane;
+};
+
+struct polaris10_uvd_clocks {
+	uint32_t  vclk;
+	uint32_t  dclk;
+};
+
+struct polaris10_vce_clocks {
+	uint32_t  evclk;
+	uint32_t  ecclk;
+};
+
+struct polaris10_power_state {
+	uint32_t                  magic;
+	struct polaris10_uvd_clocks    uvd_clks;
+	struct polaris10_vce_clocks    vce_clks;
+	uint32_t                  sam_clk;
+	uint16_t                  performance_level_count;
+	bool                      dc_compatible;
+	uint32_t                  sclk_threshold;
+	struct polaris10_performance_level  performance_levels[POLARIS10_MAX_HARDWARE_POWERLEVELS];
+};
+
+struct polaris10_dpm_level {
+	bool	enabled;
+	uint32_t	value;
+	uint32_t	param1;
+};
+
+#define POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define MAX_REGULAR_DPM_NUMBER 8
+#define POLARIS10_MINIMUM_ENGINE_CLOCK 2500
+
+struct polaris10_single_dpm_table {
+	uint32_t		count;
+	struct polaris10_dpm_level	dpm_levels[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct polaris10_dpm_table {
+	struct polaris10_single_dpm_table  sclk_table;
+	struct polaris10_single_dpm_table  mclk_table;
+	struct polaris10_single_dpm_table  pcie_speed_table;
+	struct polaris10_single_dpm_table  vddc_table;
+	struct polaris10_single_dpm_table  vddci_table;
+	struct polaris10_single_dpm_table  mvdd_table;
+};
+
+struct polaris10_clock_registers {
+	uint32_t  vCG_SPLL_FUNC_CNTL;
+	uint32_t  vCG_SPLL_FUNC_CNTL_2;
+	uint32_t  vCG_SPLL_FUNC_CNTL_3;
+	uint32_t  vCG_SPLL_FUNC_CNTL_4;
+	uint32_t  vCG_SPLL_SPREAD_SPECTRUM;
+	uint32_t  vCG_SPLL_SPREAD_SPECTRUM_2;
+	uint32_t  vDLL_CNTL;
+	uint32_t  vMCLK_PWRMGT_CNTL;
+	uint32_t  vMPLL_AD_FUNC_CNTL;
+	uint32_t  vMPLL_DQ_FUNC_CNTL;
+	uint32_t  vMPLL_FUNC_CNTL;
+	uint32_t  vMPLL_FUNC_CNTL_1;
+	uint32_t  vMPLL_FUNC_CNTL_2;
+	uint32_t  vMPLL_SS1;
+	uint32_t  vMPLL_SS2;
+};
+
+#define DISABLE_MC_LOADMICROCODE   1
+#define DISABLE_MC_CFGPROGRAMMING  2
+
+struct polaris10_voltage_smio_registers {
+	uint32_t vS0_VID_LOWER_SMIO_CNTL;
+};
+
+#define POLARIS10_MAX_LEAKAGE_COUNT  8
+
+struct polaris10_leakage_voltage {
+	uint16_t  count;
+	uint16_t  leakage_id[POLARIS10_MAX_LEAKAGE_COUNT];
+	uint16_t  actual_voltage[POLARIS10_MAX_LEAKAGE_COUNT];
+};
+
+struct polaris10_vbios_boot_state {
+	uint16_t    mvdd_bootup_value;
+	uint16_t    vddc_bootup_value;
+	uint16_t    vddci_bootup_value;
+	uint32_t    sclk_bootup_value;
+	uint32_t    mclk_bootup_value;
+	uint16_t    pcie_gen_bootup_value;
+	uint16_t    pcie_lane_bootup_value;
+};
+
+/* Ultra Low Voltage parameter structure */
+struct polaris10_ulv_parm {
+	bool                           ulv_supported;
+	uint32_t                       cg_ulv_parameter;
+	uint32_t                       ulv_volt_change_delay;
+	struct polaris10_performance_level  ulv_power_level;
+};
+
+struct polaris10_display_timing {
+	uint32_t  min_clock_in_sr;
+	uint32_t  num_existing_displays;
+};
+
+struct polaris10_dpmlevel_enable_mask {
+	uint32_t  uvd_dpm_enable_mask;
+	uint32_t  vce_dpm_enable_mask;
+	uint32_t  acp_dpm_enable_mask;
+	uint32_t  samu_dpm_enable_mask;
+	uint32_t  sclk_dpm_enable_mask;
+	uint32_t  mclk_dpm_enable_mask;
+	uint32_t  pcie_dpm_enable_mask;
+};
+
+struct polaris10_pcie_perf_range {
+	uint16_t  max;
+	uint16_t  min;
+};
+struct polaris10_range_table {
+	uint32_t trans_lower_frequency; /* in 10khz */
+	uint32_t trans_upper_frequency;
+};
+
+struct polaris10_hwmgr {
+	struct polaris10_dpm_table			dpm_table;
+	struct polaris10_dpm_table			golden_dpm_table;
+	SMU74_Discrete_DpmTable				smc_state_table;
+	struct SMU74_Discrete_Ulv            ulv_setting;
+
+	struct polaris10_range_table                range_table[NUM_SCLK_RANGE];
+	uint32_t						voting_rights_clients0;
+	uint32_t						voting_rights_clients1;
+	uint32_t						voting_rights_clients2;
+	uint32_t						voting_rights_clients3;
+	uint32_t						voting_rights_clients4;
+	uint32_t						voting_rights_clients5;
+	uint32_t						voting_rights_clients6;
+	uint32_t						voting_rights_clients7;
+	uint32_t						static_screen_threshold_unit;
+	uint32_t						static_screen_threshold;
+	uint32_t						voltage_control;
+	uint32_t						vddc_vddci_delta;
+
+	uint32_t						active_auto_throttle_sources;
+
+	struct polaris10_clock_registers            clock_registers;
+	struct polaris10_voltage_smio_registers      voltage_smio_registers;
+
+	bool                           is_memory_gddr5;
+	uint16_t                       acpi_vddc;
+	bool                           pspp_notify_required;
+	uint16_t                       force_pcie_gen;
+	uint16_t                       acpi_pcie_gen;
+	uint32_t                       pcie_gen_cap;
+	uint32_t                       pcie_lane_cap;
+	uint32_t                       pcie_spc_cap;
+	struct polaris10_leakage_voltage          vddc_leakage;
+	struct polaris10_leakage_voltage          Vddci_leakage;
+
+	uint32_t                             mvdd_control;
+	uint32_t                             vddc_mask_low;
+	uint32_t                             mvdd_mask_low;
+	uint16_t                            max_vddc_in_pptable;
+	uint16_t                            min_vddc_in_pptable;
+	uint16_t                            max_vddci_in_pptable;
+	uint16_t                            min_vddci_in_pptable;
+	uint32_t                             mclk_strobe_mode_threshold;
+	uint32_t                             mclk_stutter_mode_threshold;
+	uint32_t                             mclk_edc_enable_threshold;
+	uint32_t                             mclk_edcwr_enable_threshold;
+	bool                                is_uvd_enabled;
+	struct polaris10_vbios_boot_state        vbios_boot_state;
+
+	bool                           pcie_performance_request;
+	bool                           battery_state;
+	bool                           is_tlu_enabled;
+
+	/* ---- SMC SRAM Address of firmware header tables ---- */
+	uint32_t                             sram_end;
+	uint32_t                             dpm_table_start;
+	uint32_t                             soft_regs_start;
+	uint32_t                             mc_reg_table_start;
+	uint32_t                             fan_table_start;
+	uint32_t                             arb_table_start;
+
+	/* ---- Stuff originally coming from Evergreen ---- */
+	uint32_t                             vddci_control;
+	struct pp_atomctrl_voltage_table     vddc_voltage_table;
+	struct pp_atomctrl_voltage_table     vddci_voltage_table;
+	struct pp_atomctrl_voltage_table     mvdd_voltage_table;
+
+	uint32_t                             mgcg_cgtt_local2;
+	uint32_t                             mgcg_cgtt_local3;
+	uint32_t                             gpio_debug;
+	uint32_t                             mc_micro_code_feature;
+	uint32_t                             highest_mclk;
+	uint16_t                             acpi_vddci;
+	uint8_t                              mvdd_high_index;
+	uint8_t                              mvdd_low_index;
+	bool                                 dll_default_on;
+	bool                                 performance_request_registered;
+
+	/* ---- Low Power Features ---- */
+	struct polaris10_ulv_parm                 ulv;
+
+	/* ---- CAC Stuff ---- */
+	uint32_t                       cac_table_start;
+	bool                           cac_configuration_required;
+	bool                           driver_calculate_cac_leakage;
+	bool                           cac_enabled;
+
+	/* ---- DPM2 Parameters ---- */
+	uint32_t                       power_containment_features;
+	bool                           enable_dte_feature;
+	bool                           enable_tdc_limit_feature;
+	bool                           enable_pkg_pwr_tracking_feature;
+	bool                           disable_uvd_power_tune_feature;
+	const struct polaris10_pt_defaults       *power_tune_defaults;
+	struct SMU74_Discrete_PmFuses  power_tune_table;
+	uint32_t                       dte_tj_offset;
+	uint32_t                       fast_watermark_threshold;
+
+	/* ---- Phase Shedding ---- */
+	bool                           vddc_phase_shed_control;
+
+	/* ---- DI/DT ---- */
+	struct polaris10_display_timing        display_timing;
+	uint32_t                      bif_sclk_table[SMU74_MAX_LEVELS_LINK];
+
+	/* ---- Thermal Temperature Setting ---- */
+	struct polaris10_dpmlevel_enable_mask     dpm_level_enable_mask;
+	uint32_t                                  need_update_smu7_dpm_table;
+	uint32_t                                  sclk_dpm_key_disabled;
+	uint32_t                                  mclk_dpm_key_disabled;
+	uint32_t                                  pcie_dpm_key_disabled;
+	uint32_t                                  min_engine_clocks;
+	struct polaris10_pcie_perf_range          pcie_gen_performance;
+	struct polaris10_pcie_perf_range          pcie_lane_performance;
+	struct polaris10_pcie_perf_range          pcie_gen_power_saving;
+	struct polaris10_pcie_perf_range          pcie_lane_power_saving;
+	bool                                      use_pcie_performance_levels;
+	bool                                      use_pcie_power_saving_levels;
+	uint32_t                                  activity_target[SMU74_MAX_LEVELS_GRAPHICS];
+	uint32_t                                  mclk_activity_target;
+	uint32_t                                  mclk_dpm0_activity_target;
+	uint32_t                                  low_sclk_interrupt_threshold;
+	uint32_t                                  last_mclk_dpm_enable_mask;
+	bool                                      uvd_enabled;
+
+	/* ---- Power Gating States ---- */
+	bool                           uvd_power_gated;
+	bool                           vce_power_gated;
+	bool                           samu_power_gated;
+	bool                           need_long_memory_training;
+
+	/* Application power optimization parameters */
+	bool                               update_up_hyst;
+	bool                               update_down_hyst;
+	uint32_t                           down_hyst;
+	uint32_t                           up_hyst;
+	uint32_t disable_dpm_mask;
+	bool apply_optimized_settings;
+
+	/* soft pptable for re-uploading into smu */
+	void *soft_pp_table;
+};
+
+/* To convert to Q8.8 format for firmware */
+#define POLARIS10_Q88_FORMAT_CONVERSION_UNIT             256
+
+enum Polaris10_I2CLineID {
+	Polaris10_I2CLineID_DDC1 = 0x90,
+	Polaris10_I2CLineID_DDC2 = 0x91,
+	Polaris10_I2CLineID_DDC3 = 0x92,
+	Polaris10_I2CLineID_DDC4 = 0x93,
+	Polaris10_I2CLineID_DDC5 = 0x94,
+	Polaris10_I2CLineID_DDC6 = 0x95,
+	Polaris10_I2CLineID_SCLSDA = 0x96,
+	Polaris10_I2CLineID_DDCVGA = 0x97
+};
+
+#define POLARIS10_I2C_DDC1DATA          0
+#define POLARIS10_I2C_DDC1CLK           1
+#define POLARIS10_I2C_DDC2DATA          2
+#define POLARIS10_I2C_DDC2CLK           3
+#define POLARIS10_I2C_DDC3DATA          4
+#define POLARIS10_I2C_DDC3CLK           5
+#define POLARIS10_I2C_SDA               40
+#define POLARIS10_I2C_SCL               41
+#define POLARIS10_I2C_DDC4DATA          65
+#define POLARIS10_I2C_DDC4CLK           66
+#define POLARIS10_I2C_DDC5DATA          0x48
+#define POLARIS10_I2C_DDC5CLK           0x49
+#define POLARIS10_I2C_DDC6DATA          0x4a
+#define POLARIS10_I2C_DDC6CLK           0x4b
+#define POLARIS10_I2C_DDCVGADATA        0x4c
+#define POLARIS10_I2C_DDCVGACLK         0x4d
+
+#define POLARIS10_UNUSED_GPIO_PIN       0x7F
+
+int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
+
+int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
+int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
+int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
new file mode 100644
index 0000000..ae96f14
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "hwmgr.h"
+#include "smumgr.h"
+#include "polaris10_hwmgr.h"
+#include "polaris10_powertune.h"
+#include "polaris10_smumgr.h"
+#include "smu74_discrete.h"
+#include "pp_debug.h"
+
+#define VOLTAGE_SCALE  4
+#define POWERTUNE_DEFAULT_SET_MAX    1
+
+static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+	/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+	 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
+	{ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+	{ 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
+};
+
+void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *polaris10_hwmgr = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct  phm_ppt_v1_information *table_info =
+			(struct  phm_ppt_v1_information *)(hwmgr->pptable);
+
+	if (table_info &&
+			table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+			table_info->cac_dtp_table->usPowerTuneDataSetID)
+		polaris10_hwmgr->power_tune_defaults =
+				&polaris10_power_tune_data_set_array
+				[table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+	else
+		polaris10_hwmgr->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
+
+}
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+	uint32_t tmp;
+	tmp = raw_setting * 4096 / 100;
+	return (uint16_t)tmp;
+}
+
+int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
+	SMU74_Discrete_DpmTable  *dpm_table = &(data->smc_state_table);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+	struct pp_advance_fan_control_parameters *fan_table=
+			&hwmgr->thermal_controller.advanceFanControlParameters;
+	int i, j, k;
+	const uint16_t *pdef1;
+	const uint16_t *pdef2;
+
+	dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+	dpm_table->TargetTdp  = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+
+	PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+				"Target Operating Temp is out of Range!",
+				);
+
+	dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+			cac_dtp_table->usTargetOperatingTemp * 256);
+	dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+			cac_dtp_table->usTemperatureLimitHotspot * 256);
+	dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
+			scale_fan_gain_settings(fan_table->usFanGainEdge));
+	dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
+			scale_fan_gain_settings(fan_table->usFanGainHotspot));
+
+	pdef1 = defaults->BAPMTI_R;
+	pdef2 = defaults->BAPMTI_RC;
+
+	for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
+		for (j = 0; j < SMU74_DTE_SOURCES; j++) {
+			for (k = 0; k < SMU74_DTE_SINKS; k++) {
+				dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
+				dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
+				pdef1++;
+				pdef2++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
+
+	data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+	data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+	data->power_tune_table.SviLoadLineTrimVddC = 3;
+	data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+	return 0;
+}
+
+static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+	uint16_t tdc_limit;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
+
+	tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+	data->power_tune_table.TDC_VDDC_PkgLimit =
+			CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+	data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+			defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+	data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+	return 0;
+}
+
+static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
+	uint32_t temp;
+
+	if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
+			fuse_table_offset +
+			offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
+			(uint32_t *)&temp, data->sram_end))
+		PP_ASSERT_WITH_CODE(false,
+				"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+				return -EINVAL);
+	else {
+		data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+		data->power_tune_table.LPMLTemperatureMin =
+				(uint8_t)((temp >> 16) & 0xff);
+		data->power_tune_table.LPMLTemperatureMax =
+				(uint8_t)((temp >> 8) & 0xff);
+		data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+	}
+	return 0;
+}
+
+static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	/* Currently not used. Set all to zero. */
+	for (i = 0; i < 16; i++)
+		data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+	return 0;
+}
+
+static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+		|| 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+		hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+			hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+	data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
+				hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
+	return 0;
+}
+
+static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	/* Currently not used. Set all to zero. */
+	for (i = 0; i < 16; i++)
+		data->power_tune_table.GnbLPML[i] = 0;
+
+	return 0;
+}
+
+static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+	return 0;
+}
+
+static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+	uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+	struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+	hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+	lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+	data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+	data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+	return 0;
+}
+
+int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	uint32_t pm_fuse_table_offset;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment)) {
+		if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
+				SMU7_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU74_Firmware_Header, PmFuseTable),
+				&pm_fuse_table_offset, data->sram_end))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to get pm_fuse_table_offset Failed!",
+					return -EINVAL);
+
+		if (polaris10_populate_svi_load_line(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate SviLoadLine Failed!",
+					return -EINVAL);
+
+		if (polaris10_populate_tdc_limit(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate TDCLimit Failed!", return -EINVAL);
+
+		if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate TdcWaterfallCtl, "
+					"LPMLTemperature Min and Max Failed!",
+					return -EINVAL);
+
+		if (0 != polaris10_populate_temperature_scaler(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate LPMLTemperatureScaler Failed!",
+					return -EINVAL);
+
+		if (polaris10_populate_fuzzy_fan(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate Fuzzy Fan Control parameters Failed!",
+					return -EINVAL);
+
+		if (polaris10_populate_gnb_lpml(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate GnbLPML Failed!",
+					return -EINVAL);
+
+		if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate GnbLPML Min and Max Vid Failed!",
+					return -EINVAL);
+
+		if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+					"Sidd Failed!", return -EINVAL);
+
+		if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+				(uint8_t *)&data->power_tune_table,
+				(sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to download PmFuseTable Failed!",
+					return -EINVAL);
+	}
+	return 0;
+}
+
+int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	int result = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_CAC)) {
+		int smc_result;
+		smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+				(uint16_t)(PPSMC_MSG_EnableCac));
+		PP_ASSERT_WITH_CODE((0 == smc_result),
+				"Failed to enable CAC in SMC.", result = -1);
+
+		data->cac_enabled = (0 == smc_result) ? true : false;
+	}
+	return result;
+}
+
+int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+	if (data->power_containment_features &
+			POWERCONTAINMENT_FEATURE_PkgPwrLimit)
+		return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_PkgPwrSetLimit, n);
+	return 0;
+}
+
+static int polaris10_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
+{
+	return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
+			PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+}
+
+int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	int smc_result;
+	int result = 0;
+
+	data->power_containment_features = 0;
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment)) {
+
+		if (data->enable_tdc_limit_feature) {
+			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+					(uint16_t)(PPSMC_MSG_TDCLimitEnable));
+			PP_ASSERT_WITH_CODE((0 == smc_result),
+					"Failed to enable TDCLimit in SMC.", result = -1;);
+			if (0 == smc_result)
+				data->power_containment_features |=
+						POWERCONTAINMENT_FEATURE_TDCLimit;
+		}
+
+		if (data->enable_pkg_pwr_tracking_feature) {
+			smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+					(uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
+			PP_ASSERT_WITH_CODE((0 == smc_result),
+					"Failed to enable PkgPwrTracking in SMC.", result = -1;);
+			if (0 == smc_result) {
+				struct phm_cac_tdp_table *cac_table =
+						table_info->cac_dtp_table;
+				uint32_t default_limit =
+					(uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
+
+				data->power_containment_features |=
+						POWERCONTAINMENT_FEATURE_PkgPwrLimit;
+
+				if (polaris10_set_power_limit(hwmgr, default_limit))
+					printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
+			}
+		}
+	}
+	return result;
+}
+
+int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
+{
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+	int adjust_percent, target_tdp;
+	int result = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment)) {
+		/* adjustment percentage has already been validated */
+		adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
+				hwmgr->platform_descriptor.TDPAdjustment :
+				(-1 * hwmgr->platform_descriptor.TDPAdjustment);
+		/* SMC requested that target_tdp to be 7 bit fraction in DPM table
+		 * but message to be 8 bit fraction for messages
+		 */
+		target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
+		result = polaris10_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
+	}
+
+	return result;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
new file mode 100644
index 0000000..68bc1cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef POLARIS10_POWERTUNE_H
+#define POLARIS10_POWERTUNE_H
+
+enum polaris10_pt_config_reg_type {
+	POLARIS10_CONFIGREG_MMR = 0,
+	POLARIS10_CONFIGREG_SMC_IND,
+	POLARIS10_CONFIGREG_DIDT_IND,
+	POLARIS10_CONFIGREG_CACHE,
+	POLARIS10_CONFIGREG_MAX
+};
+
+/* PowerContainment Features */
+#define POWERCONTAINMENT_FEATURE_DTE             0x00000001
+#define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
+#define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
+
+struct polaris10_pt_config_reg {
+	uint32_t                           offset;
+	uint32_t                           mask;
+	uint32_t                           shift;
+	uint32_t                           value;
+	enum polaris10_pt_config_reg_type       type;
+};
+
+struct polaris10_pt_defaults {
+	uint8_t   SviLoadLineEn;
+	uint8_t   SviLoadLineVddC;
+	uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
+	uint8_t   TDC_MAWt;
+	uint8_t   TdcWaterfallCtl;
+	uint8_t   DTEAmbientTempBase;
+
+	uint32_t  DisplayCac;
+	uint32_t  BAPM_TEMP_GRADIENT;
+	uint16_t  BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+	uint16_t  BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+};
+
+void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
+int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
+int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr);
+int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr);
+int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
+int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
+
+#endif  /* POLARIS10_POWERTUNE_H */
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
new file mode 100644
index 0000000..aba167f
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
@@ -0,0 +1,712 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <asm/div64.h>
+#include "polaris10_thermal.h"
+#include "polaris10_hwmgr.h"
+#include "polaris10_smumgr.h"
+#include "polaris10_ppsmc.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
+		struct phm_fan_speed_info *fan_speed_info)
+{
+	if (hwmgr->thermal_controller.fanInfo.bNoFan)
+		return 0;
+
+	fan_speed_info->supports_percent_read = true;
+	fan_speed_info->supports_percent_write = true;
+	fan_speed_info->min_percent = 0;
+	fan_speed_info->max_percent = 100;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
+		hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
+		fan_speed_info->supports_rpm_read = true;
+		fan_speed_info->supports_rpm_write = true;
+		fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
+		fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
+	} else {
+		fan_speed_info->min_rpm = 0;
+		fan_speed_info->max_rpm = 0;
+	}
+
+	return 0;
+}
+
+int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+		uint32_t *speed)
+{
+	uint32_t duty100;
+	uint32_t duty;
+	uint64_t tmp64;
+
+	if (hwmgr->thermal_controller.fanInfo.bNoFan)
+		return 0;
+
+	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_FDO_CTRL1, FMAX_DUTY100);
+	duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_THERMAL_STATUS, FDO_PWM_DUTY);
+
+	if (duty100 == 0)
+		return -EINVAL;
+
+
+	tmp64 = (uint64_t)duty * 100;
+	do_div(tmp64, duty100);
+	*speed = (uint32_t)tmp64;
+
+	if (*speed > 100)
+		*speed = 100;
+
+	return 0;
+}
+
+int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
+{
+	uint32_t tach_period;
+	uint32_t crystal_clock_freq;
+
+	if (hwmgr->thermal_controller.fanInfo.bNoFan ||
+			(hwmgr->thermal_controller.fanInfo.
+				ucTachometerPulsesPerRevolution == 0))
+		return 0;
+
+	tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_TACH_STATUS, TACH_PERIOD);
+
+	if (tach_period == 0)
+		return -EINVAL;
+
+	crystal_clock_freq = tonga_get_xclk(hwmgr);
+
+	*speed = 60 * crystal_clock_freq * 10000 / tach_period;
+
+	return 0;
+}
+
+/**
+* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
+* @param    hwmgr  the address of the powerplay hardware manager.
+*           mode    the fan control mode, 0 default, 1 by percent, 5, by RPM
+* @exception Should always succeed.
+*/
+int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+{
+
+	if (hwmgr->fan_ctrl_is_in_default_mode) {
+		hwmgr->fan_ctrl_default_mode =
+				PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,	CGS_IND_REG__SMC,
+						CG_FDO_CTRL2, FDO_PWM_MODE);
+		hwmgr->tmin =
+				PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+						CG_FDO_CTRL2, TMIN);
+		hwmgr->fan_ctrl_is_in_default_mode = false;
+	}
+
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_FDO_CTRL2, TMIN, 0);
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_FDO_CTRL2, FDO_PWM_MODE, mode);
+
+	return 0;
+}
+
+/**
+* Reset Fan Speed Control to default mode.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @exception Should always succeed.
+*/
+int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
+{
+	if (!hwmgr->fan_ctrl_is_in_default_mode) {
+		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+				CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
+		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+				CG_FDO_CTRL2, TMIN, hwmgr->tmin);
+		hwmgr->fan_ctrl_is_in_default_mode = true;
+	}
+
+	return 0;
+}
+
+int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
+{
+	int result;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
+		cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
+		result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
+
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_FanSpeedInTableIsRPM))
+			hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
+					hwmgr->thermal_controller.
+					advanceFanControlParameters.usMaxFanRPM);
+		else
+			hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr,
+					hwmgr->thermal_controller.
+					advanceFanControlParameters.usMaxFanPWM);
+
+	} else {
+		cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
+		result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
+	}
+
+	if (!result && hwmgr->thermal_controller.
+			advanceFanControlParameters.ucTargetTemperature)
+		result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_SetFanTemperatureTarget,
+				hwmgr->thermal_controller.
+				advanceFanControlParameters.ucTargetTemperature);
+
+	return result;
+}
+
+
+int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
+{
+	return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
+}
+
+/**
+* Set Fan Speed in percent.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    speed is the percentage value (0% - 100%) to be set.
+* @exception Fails is the 100% setting appears to be 0.
+*/
+int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+		uint32_t speed)
+{
+	uint32_t duty100;
+	uint32_t duty;
+	uint64_t tmp64;
+
+	if (hwmgr->thermal_controller.fanInfo.bNoFan)
+		return 0;
+
+	if (speed > 100)
+		speed = 100;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_MicrocodeFanControl))
+		polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
+
+	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_FDO_CTRL1, FMAX_DUTY100);
+
+	if (duty100 == 0)
+		return -EINVAL;
+
+	tmp64 = (uint64_t)speed * duty100;
+	do_div(tmp64, 100);
+	duty = (uint32_t)tmp64;
+
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
+
+	return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+}
+
+/**
+* Reset Fan Speed to default.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @exception Always succeeds.
+*/
+int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
+{
+	int result;
+
+	if (hwmgr->thermal_controller.fanInfo.bNoFan)
+		return 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_MicrocodeFanControl)) {
+		result = polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+		if (!result)
+			result = polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
+	} else
+		result = polaris10_fan_ctrl_set_default_mode(hwmgr);
+
+	return result;
+}
+
+/**
+* Set Fan Speed in RPM.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    speed is the percentage value (min - max) to be set.
+* @exception Fails is the speed not lie between min and max.
+*/
+int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
+{
+	uint32_t tach_period;
+	uint32_t crystal_clock_freq;
+
+	if (hwmgr->thermal_controller.fanInfo.bNoFan ||
+			(hwmgr->thermal_controller.fanInfo.
+			ucTachometerPulsesPerRevolution == 0) ||
+			(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
+			(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
+		return 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_MicrocodeFanControl))
+		polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
+
+	crystal_clock_freq = tonga_get_xclk(hwmgr);
+
+	tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
+
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+				CG_TACH_STATUS, TACH_PERIOD, tach_period);
+
+	return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+}
+
+/**
+* Reads the remote temperature from the SIslands thermal controller.
+*
+* @param    hwmgr The address of the hardware manager.
+*/
+int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+{
+	int temp;
+
+	temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_MULT_THERMAL_STATUS, CTF_TEMP);
+
+	/* Bit 9 means the reading is lower than the lowest usable value. */
+	if (temp & 0x200)
+		temp = POLARIS10_THERMAL_MAXIMUM_TEMP_READING;
+	else
+		temp = temp & 0x1ff;
+
+	temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+	return temp;
+}
+
+/**
+* Set the requested temperature range for high and low alert signals
+*
+* @param    hwmgr The address of the hardware manager.
+* @param    range Temperature range to be programmed for high and low alert signals
+* @exception PP_Result_BadInput if the input data is not valid.
+*/
+static int polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+		uint32_t low_temp, uint32_t high_temp)
+{
+	uint32_t low = POLARIS10_THERMAL_MINIMUM_ALERT_TEMP *
+			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+	uint32_t high = POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP *
+			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+	if (low < low_temp)
+		low = low_temp;
+	if (high > high_temp)
+		high = high_temp;
+
+	if (low > high)
+		return -EINVAL;
+
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_THERMAL_INT, DIG_THERM_INTH,
+			(high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_THERMAL_INT, DIG_THERM_INTL,
+			(low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_THERMAL_CTRL, DIG_THERM_DPM,
+			(high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+
+	return 0;
+}
+
+/**
+* Programs thermal controller one-time setting registers
+*
+* @param    hwmgr The address of the hardware manager.
+*/
+static int polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
+{
+	if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
+		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+				CG_TACH_CTRL, EDGE_PER_REV,
+				hwmgr->thermal_controller.fanInfo.
+				ucTachometerPulsesPerRevolution - 1);
+
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
+
+	return 0;
+}
+
+/**
+* Enable thermal alerts on the RV770 thermal controller.
+*
+* @param    hwmgr The address of the hardware manager.
+*/
+static int polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
+{
+	uint32_t alert;
+
+	alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_THERMAL_INT, THERM_INT_MASK);
+	alert &= ~(POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_THERMAL_INT, THERM_INT_MASK, alert);
+
+	/* send message to SMU to enable internal thermal interrupts */
+	return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
+}
+
+/**
+* Disable thermal alerts on the RV770 thermal controller.
+* @param    hwmgr The address of the hardware manager.
+*/
+static int polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
+{
+	uint32_t alert;
+
+	alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_THERMAL_INT, THERM_INT_MASK);
+	alert |= (POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_THERMAL_INT, THERM_INT_MASK, alert);
+
+	/* send message to SMU to disable internal thermal interrupts */
+	return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
+}
+
+/**
+* Uninitialize the thermal controller.
+* Currently just disables alerts.
+* @param    hwmgr The address of the hardware manager.
+*/
+int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
+{
+	int result = polaris10_thermal_disable_alert(hwmgr);
+
+	if (!hwmgr->thermal_controller.fanInfo.bNoFan)
+		polaris10_fan_ctrl_set_default_mode(hwmgr);
+
+	return result;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
+		void *input, void *output, void *storage, int result)
+{
+	struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+	SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+	uint32_t duty100;
+	uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+	uint16_t fdo_min, slope1, slope2;
+	uint32_t reference_clock;
+	int res;
+	uint64_t tmp64;
+
+	if (data->fan_table_start == 0) {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_MicrocodeFanControl);
+		return 0;
+	}
+
+	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+			CG_FDO_CTRL1, FMAX_DUTY100);
+
+	if (duty100 == 0) {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_MicrocodeFanControl);
+		return 0;
+	}
+
+	tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+			usPWMMin * duty100;
+	do_div(tmp64, 10000);
+	fdo_min = (uint16_t)tmp64;
+
+	t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+			hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+	t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+			hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+	pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+			hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+	pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+			hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+	slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+	slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+	fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+			thermal_controller.advanceFanControlParameters.usTMin) / 100);
+	fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+			thermal_controller.advanceFanControlParameters.usTMed) / 100);
+	fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+			thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+	fan_table.Slope1 = cpu_to_be16(slope1);
+	fan_table.Slope2 = cpu_to_be16(slope2);
+
+	fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+	fan_table.HystDown = cpu_to_be16(hwmgr->
+			thermal_controller.advanceFanControlParameters.ucTHyst);
+
+	fan_table.HystUp = cpu_to_be16(1);
+
+	fan_table.HystSlope = cpu_to_be16(1);
+
+	fan_table.TempRespLim = cpu_to_be16(5);
+
+	reference_clock = tonga_get_xclk(hwmgr);
+
+	fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+			thermal_controller.advanceFanControlParameters.ulCycleDelay *
+			reference_clock) / 1600);
+
+	fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+	fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+			hwmgr->device, CGS_IND_REG__SMC,
+			CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+	res = polaris10_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
+			(uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+			data->sram_end);
+
+	if (!res && hwmgr->thermal_controller.
+			advanceFanControlParameters.ucMinimumPWMLimit)
+		res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_SetFanMinPwm,
+				hwmgr->thermal_controller.
+				advanceFanControlParameters.ucMinimumPWMLimit);
+
+	if (!res && hwmgr->thermal_controller.
+			advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+		res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_SetFanSclkTarget,
+				hwmgr->thermal_controller.
+				advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+	if (res)
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_MicrocodeFanControl);
+
+	return 0;
+}
+
+/**
+* Start the fan control on the SMC.
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
+		void *input, void *output, void *storage, int result)
+{
+/* If the fantable setup has failed we could have disabled
+ * PHM_PlatformCaps_MicrocodeFanControl even after
+ * this function was included in the table.
+ * Make sure that we still think controlling the fan is OK.
+*/
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_MicrocodeFanControl)) {
+		polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
+		polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+	}
+
+	return 0;
+}
+
+/**
+* Set temperature range for high and low alerts
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from set temperature range routine
+*/
+int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+		void *input, void *output, void *storage, int result)
+{
+	struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
+
+	if (range == NULL)
+		return -EINVAL;
+
+	return polaris10_thermal_set_temperature_range(hwmgr, range->min, range->max);
+}
+
+/**
+* Programs one-time setting registers
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from initialize thermal controller routine
+*/
+int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr,
+		void *input, void *output, void *storage, int result)
+{
+    return polaris10_thermal_initialize(hwmgr);
+}
+
+/**
+* Enable high and low alerts
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from enable alert routine
+*/
+int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
+		void *input, void *output, void *storage, int result)
+{
+	return polaris10_thermal_enable_alert(hwmgr);
+}
+
+/**
+* Disable high and low alerts
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @param    pInput the pointer to input data
+* @param    pOutput the pointer to output data
+* @param    pStorage the pointer to temporary storage
+* @param    Result the last failure code
+* @return   result from disable alert routine
+*/
+static int tf_polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
+		void *input, void *output, void *storage, int result)
+{
+	return polaris10_thermal_disable_alert(hwmgr);
+}
+
+static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
+		void *input, void *output, void *storage, int result)
+{
+	int ret;
+	struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+	if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
+		return 0;
+
+	ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
+			0 : -1;
+
+	if (!ret)
+		/* If this param is not changed, this function could fire unnecessarily */
+		smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+	return ret;
+}
+
+static const struct phm_master_table_item
+polaris10_thermal_start_thermal_controller_master_list[] = {
+	{NULL, tf_polaris10_thermal_initialize},
+	{NULL, tf_polaris10_thermal_set_temperature_range},
+	{NULL, tf_polaris10_thermal_enable_alert},
+	{NULL, tf_polaris10_thermal_avfs_enable},
+/* We should restrict performance levels to low before we halt the SMC.
+ * On the other hand we are still in boot state when we do this
+ * so it would be pointless.
+ * If this assumption changes we have to revisit this table.
+ */
+	{NULL, tf_polaris10_thermal_setup_fan_table},
+	{NULL, tf_polaris10_thermal_start_smc_fan_control},
+	{NULL, NULL}
+};
+
+static const struct phm_master_table_header
+polaris10_thermal_start_thermal_controller_master = {
+	0,
+	PHM_MasterTableFlag_None,
+	polaris10_thermal_start_thermal_controller_master_list
+};
+
+static const struct phm_master_table_item
+polaris10_thermal_set_temperature_range_master_list[] = {
+	{NULL, tf_polaris10_thermal_disable_alert},
+	{NULL, tf_polaris10_thermal_set_temperature_range},
+	{NULL, tf_polaris10_thermal_enable_alert},
+	{NULL, NULL}
+};
+
+static const struct phm_master_table_header
+polaris10_thermal_set_temperature_range_master = {
+	0,
+	PHM_MasterTableFlag_None,
+	polaris10_thermal_set_temperature_range_master_list
+};
+
+int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
+{
+	if (!hwmgr->thermal_controller.fanInfo.bNoFan)
+		polaris10_fan_ctrl_set_default_mode(hwmgr);
+	return 0;
+}
+
+/**
+* Initializes the thermal controller related functions in the Hardware Manager structure.
+* @param    hwmgr The address of the hardware manager.
+* @exception Any error code from the low-level communication.
+*/
+int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
+{
+	int result;
+
+	result = phm_construct_table(hwmgr,
+			&polaris10_thermal_set_temperature_range_master,
+			&(hwmgr->set_temperature_range));
+
+	if (!result) {
+		result = phm_construct_table(hwmgr,
+				&polaris10_thermal_start_thermal_controller_master,
+				&(hwmgr->start_thermal_controller));
+		if (result)
+			phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
+	}
+
+	if (!result)
+		hwmgr->fan_ctrl_is_in_default_mode = true;
+	return result;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h
new file mode 100644
index 0000000..62f8cbc
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _POLARIS10_THERMAL_H_
+#define _POLARIS10_THERMAL_H_
+
+#include "hwmgr.h"
+
+#define POLARIS10_THERMAL_HIGH_ALERT_MASK         0x1
+#define POLARIS10_THERMAL_LOW_ALERT_MASK          0x2
+
+#define POLARIS10_THERMAL_MINIMUM_TEMP_READING    -256
+#define POLARIS10_THERMAL_MAXIMUM_TEMP_READING    255
+
+#define POLARIS10_THERMAL_MINIMUM_ALERT_TEMP      0
+#define POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP      255
+
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+
+extern int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
+extern int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
+extern int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
+
+extern int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
+extern int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
+extern int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
+extern int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
+extern int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
+extern int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
+extern int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr);
+extern int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
+extern int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
index 7b2d500..58742e0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -1,3 +1,26 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #include <linux/errno.h>
 #include "linux/delay.h"
 #include "hwmgr.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 2a83a4a..da9f5f1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -373,6 +373,37 @@
 	return result;
 }
 
+int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
+		uint32_t clock_value,
+		pp_atomctrl_clock_dividers_ai *dividers)
+{
+	COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
+	int result;
+
+	pll_patameters.ulClock.ulClock = clock_value;
+	pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
+
+	result = cgs_atom_exec_cmd_table
+		(hwmgr->device,
+		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
+		 &pll_patameters);
+
+	if (0 == result) {
+		dividers->usSclk_fcw_frac     = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
+		dividers->usSclk_fcw_int      = le16_to_cpu(pll_patameters.usSclk_fcw_int);
+		dividers->ucSclkPostDiv       = pll_patameters.ucSclkPostDiv;
+		dividers->ucSclkVcoMode       = pll_patameters.ucSclkVcoMode;
+		dividers->ucSclkPllRange      = pll_patameters.ucSclkPllRange;
+		dividers->ucSscEnable         = pll_patameters.ucSscEnable;
+		dividers->usSsc_fcw1_frac     = le16_to_cpu(pll_patameters.usSsc_fcw1_frac);
+		dividers->usSsc_fcw1_int      = le16_to_cpu(pll_patameters.usSsc_fcw1_int);
+		dividers->usPcc_fcw_int       = le16_to_cpu(pll_patameters.usPcc_fcw_int);
+		dividers->usSsc_fcw_slew_frac = le16_to_cpu(pll_patameters.usSsc_fcw_slew_frac);
+		dividers->usPcc_fcw_slew_frac = le16_to_cpu(pll_patameters.usPcc_fcw_slew_frac);
+	}
+	return result;
+}
+
 int atomctrl_get_dfs_pll_dividers_vi(
 		struct pp_hwmgr *hwmgr,
 		uint32_t clock_value,
@@ -618,7 +649,7 @@
 	if (!getASICProfilingInfo)
 		return -1;
 
-	if(getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
+	if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
 			(getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
 			getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
 		return -1;
@@ -891,18 +922,18 @@
 	 *-----------------------
 	 */
 
-	fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4,fSclk), fSM_A5));
+	fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
 	fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
 	fC_Term = fAdd(fMargin_RO_c,
 			fAdd(fMultiply(fSM_A0,fLkg_FT),
-			fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT,fSclk)),
+			fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
 			fAdd(fMultiply(fSM_A3, fSclk),
-			fSubtract(fSM_A7,fRO_fused)))));
+			fSubtract(fSM_A7, fRO_fused)))));
 
 	fVDDC_base = fSubtract(fRO_fused,
 			fSubtract(fMargin_RO_c,
 					fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk))));
-	fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0,fSclk), fSM_A2));
+	fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2));
 
 	repeat = fSubtract(fVDDC_base,
 			fDivide(fMargin_DC_sigma, ConvertToFraction(1000)));
@@ -916,7 +947,7 @@
 			fSubtract(fRO_DC_margin,
 			fSubtract(fSM_A3,
 			fMultiply(fSM_A2, repeat))));
-	fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0,repeat), fSM_A1));
+	fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1));
 
 	fSigma_DC = fSubtract(fSclk, fDC_SCLK);
 
@@ -996,7 +1027,7 @@
 		fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0);
 
 		if (GreaterThan(fV_max, fV_NL) &&
-			(GreaterThan(fV_NL,fEVV_V) ||
+			(GreaterThan(fV_NL, fEVV_V) ||
 			Equal(fV_NL, fEVV_V))) {
 			fV_NL = fMultiply(fV_NL, ConvertToFraction(1000));
 
@@ -1010,10 +1041,10 @@
 }
 
 /** atomctrl_get_voltage_evv_on_sclk gets voltage via call to ATOM COMMAND table.
- * @param hwmgr               	input: pointer to hwManager
+ * @param hwmgr	input: pointer to hwManager
  * @param voltage_type            input: type of EVV voltage VDDC or VDDGFX
  * @param sclk                        input: in 10Khz unit. DPM state SCLK frequency
- *                                   		which is define in PPTable SCLK/VDDC dependence
+ *		which is define in PPTable SCLK/VDDC dependence
  *				table associated with this virtual_voltage_Id
  * @param virtual_voltage_Id      input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
  * @param voltage		       output: real voltage level in unit of mv
@@ -1205,3 +1236,69 @@
 
 	return result;
 }
+
+int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
+								uint8_t level)
+{
+	DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
+	int result;
+
+	memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = memory_clock & SET_CLOCK_FREQ_MASK;
+	memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = ADJUST_MC_SETTING_PARAM;
+	memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
+
+	result = cgs_atom_exec_cmd_table
+		(hwmgr->device,
+		 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
+		 &memory_clock_parameters);
+
+	return result;
+}
+
+int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+				uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage)
+{
+
+	int result;
+	GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space;
+
+	get_voltage_info_param_space.ucVoltageType = voltage_type;
+	get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
+	get_voltage_info_param_space.usVoltageLevel = virtual_voltage_Id;
+	get_voltage_info_param_space.ulSCLKFreq = sclk;
+
+	result = cgs_atom_exec_cmd_table(hwmgr->device,
+			GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
+			&get_voltage_info_param_space);
+
+	if (0 != result)
+		return result;
+
+	*voltage = get_voltage_info_param_space.usVoltageLevel;
+
+	return result;
+}
+
+int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table)
+{
+
+	int i;
+	u8 frev, crev;
+	u16 size;
+
+	ATOM_SMU_INFO_V2_1 *psmu_info =
+		(ATOM_SMU_INFO_V2_1 *)cgs_atom_get_data_table(hwmgr->device,
+			GetIndexIntoMasterTable(DATA, SMU_Info),
+			&size, &frev, &crev);
+
+
+	for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
+		table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
+		table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
+		table->entry[i].usFcw_pcc = psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc;
+		table->entry[i].usFcw_trans_upper = psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper;
+		table->entry[i].usRcw_trans_lower = psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index 627420b..d24ebb5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -101,6 +101,23 @@
 };
 typedef struct pp_atomctrl_clock_dividers_vi pp_atomctrl_clock_dividers_vi;
 
+struct pp_atomctrl_clock_dividers_ai {
+	u16 usSclk_fcw_frac;
+	u16  usSclk_fcw_int;
+	u8   ucSclkPostDiv;
+	u8   ucSclkVcoMode;
+	u8   ucSclkPllRange;
+	u8   ucSscEnable;
+	u16  usSsc_fcw1_frac;
+	u16  usSsc_fcw1_int;
+	u16  usReserved;
+	u16  usPcc_fcw_int;
+	u16  usSsc_fcw_slew_frac;
+	u16  usPcc_fcw_slew_frac;
+};
+typedef struct pp_atomctrl_clock_dividers_ai pp_atomctrl_clock_dividers_ai;
+
+
 union pp_atomctrl_s_mpll_fb_divider {
 	struct {
 		uint32_t cl_kf : 12;
@@ -204,6 +221,21 @@
 
 typedef struct pp_atomctrl_mc_register_address pp_atomctrl_mc_register_address;
 
+#define MAX_SCLK_RANGE 8
+
+struct pp_atom_ctrl_sclk_range_table_entry{
+	uint8_t  ucVco_setting;
+	uint8_t  ucPostdiv;
+	uint16_t usFcw_pcc;
+	uint16_t usFcw_trans_upper;
+	uint16_t usRcw_trans_lower;
+};
+
+
+struct pp_atom_ctrl_sclk_range_table{
+	struct pp_atom_ctrl_sclk_range_table_entry entry[MAX_SCLK_RANGE];
+};
+
 struct pp_atomctrl_mc_reg_table {
 	uint8_t                         last;                    /* number of registers */
 	uint8_t                         num_entries;             /* number of AC timing entries */
@@ -240,7 +272,11 @@
 		uint16_t end_index, uint32_t mask, uint32_t *efuse);
 extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
 		uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug);
-
-
+extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_ai *dividers);
+extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
+								uint8_t level);
+extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+				uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
+extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
 #endif
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
index b10df32..009bd59 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
@@ -127,8 +127,8 @@
 	fInt solution = fPositiveOne; /*Starting off with baseline of 1 */
 	fInt error_term;
 
-	uint32_t k_array[11] = {55452, 27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78};
-	uint32_t expk_array[11] = {2560000, 160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078};
+	static const uint32_t k_array[11] = {55452, 27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78};
+	static const uint32_t expk_array[11] = {2560000, 160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078};
 
 	if (GreaterThan(fZERO, exponent)) {
 		exponent = fNegate(exponent);
@@ -162,8 +162,8 @@
 	fInt solution = ConvertToFraction(0); /*Starting off with baseline of 0 */
 	fInt error_term;
 
-	uint32_t k_array[10] = {160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078};
-	uint32_t logk_array[10] = {27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78};
+	static const uint32_t k_array[10] = {160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078};
+	static const uint32_t logk_array[10] = {27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78};
 
 	while (GreaterThan(fAdd(value, fNegativeOne), upper_bound)) {
 		for (i = 0; i < 10; i++) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 0d5d837..d27e8c4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -51,6 +51,9 @@
 #include "bif/bif_5_0_d.h"
 #include "bif/bif_5_0_sh_mask.h"
 
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
 #include "cgs_linux.h"
 #include "eventmgr.h"
 #include "amd_pcie_helpers.h"
@@ -86,17 +89,17 @@
 typedef uint32_t PECI_RegistryValue;
 
 /* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
-uint16_t PP_ClockStretcherLookupTable[2][4] = {
+static const uint16_t PP_ClockStretcherLookupTable[2][4] = {
 	{600, 1050, 3, 0},
 	{600, 1050, 6, 1} };
 
 /* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
-uint32_t PP_ClockStretcherDDTTable[2][4][4] = {
+static const uint32_t PP_ClockStretcherDDTTable[2][4][4] = {
 	{ {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
 	{ {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
 
 /* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
-uint8_t PP_ClockStretchAmountConversion[2][6] = {
+static const uint8_t PP_ClockStretchAmountConversion[2][6] = {
 	{0, 1, 3, 2, 4, 5},
 	{0, 2, 4, 5, 6, 5} };
 
@@ -110,7 +113,7 @@
 };
 typedef enum DPM_EVENT_SRC DPM_EVENT_SRC;
 
-const unsigned long PhwTonga_Magic = (unsigned long)(PHM_VIslands_Magic);
+static const unsigned long PhwTonga_Magic = (unsigned long)(PHM_VIslands_Magic);
 
 struct tonga_power_state *cast_phw_tonga_power_state(
 				  struct pp_hw_power_state *hw_ps)
@@ -429,19 +432,20 @@
 						}
 					}
 				}
-				PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk
-						(hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
-						 virtual_voltage_id, &vddgfx),
-						"Error retrieving EVV voltage value!", continue);
+				if (0 == atomctrl_get_voltage_evv_on_sclk
+				    (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
+				     virtual_voltage_id, &vddgfx)) {
+					/* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
+					PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -1);
 
-				/* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
-				PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -1);
-
-				/* the voltage should not be zero nor equal to leakage ID */
-				if (vddgfx != 0 && vddgfx != virtual_voltage_id) {
-					data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
-					data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = virtual_voltage_id;
-					data->vddcgfx_leakage.count++;
+					/* the voltage should not be zero nor equal to leakage ID */
+					if (vddgfx != 0 && vddgfx != virtual_voltage_id) {
+						data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
+						data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = virtual_voltage_id;
+						data->vddcgfx_leakage.count++;
+					}
+				} else {
+					printk("Error retrieving EVV voltage value!\n");
 				}
 			}
 		} else {
@@ -449,20 +453,20 @@
 			if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
 						pptable_info->vddc_lookup_table,
 						virtual_voltage_id, &sclk)) {
-				PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk
-						(hwmgr, VOLTAGE_TYPE_VDDC, sclk,
-						 virtual_voltage_id, &vddc),
-						"Error retrieving EVV voltage value!", continue);
+				if (0 == atomctrl_get_voltage_evv_on_sclk
+				    (hwmgr, VOLTAGE_TYPE_VDDC, sclk,
+				     virtual_voltage_id, &vddc)) {
+					/* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
+					PP_ASSERT_WITH_CODE(vddc < 2000, "Invalid VDDC value!", return -1);
 
-				/* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
-				if (vddc > 2000)
-					printk(KERN_ERR "[ powerplay ] Invalid VDDC value! \n");
-
-				/* the voltage should not be zero nor equal to leakage ID */
-				if (vddc != 0 && vddc != virtual_voltage_id) {
-					data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
-					data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
-					data->vddc_leakage.count++;
+					/* the voltage should not be zero nor equal to leakage ID */
+					if (vddc != 0 && vddc != virtual_voltage_id) {
+						data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
+						data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
+						data->vddc_leakage.count++;
+					}
+				} else {
+					printk("Error retrieving EVV voltage value!\n");
 				}
 			}
 		}
@@ -2037,14 +2041,11 @@
 	data->display_timing.num_existing_displays = info.display_count;
 
 	if ((data->mclk_stutter_mode_threshold != 0) &&
-			(memory_clock <= data->mclk_stutter_mode_threshold) &&
-			(data->is_uvd_enabled == 0)
-#if defined(LINUX)
-			&& (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
-			&& (data->display_timing.num_existing_displays <= 2)
-			&& (data->display_timing.num_existing_displays != 0)
-#endif
-	)
+	    (memory_clock <= data->mclk_stutter_mode_threshold) &&
+	    (data->is_uvd_enabled == 0)
+	    && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
+	    && (data->display_timing.num_existing_displays <= 2)
+	    && (data->display_timing.num_existing_displays != 0))
 		memory_level->StutterEnable = 1;
 
 	/* decide strobe mode*/
@@ -2415,6 +2416,24 @@
 	return 0;
 }
 
+static uint8_t tonga_get_sleep_divider_id_from_clock(uint32_t engine_clock,
+		uint32_t min_engine_clock_in_sr)
+{
+	uint32_t i, temp;
+	uint32_t min = max(min_engine_clock_in_sr, (uint32_t)TONGA_MINIMUM_ENGINE_CLOCK);
+
+	PP_ASSERT_WITH_CODE((engine_clock >= min),
+			"Engine clock can't satisfy stutter requirement!", return 0);
+
+	for (i = TONGA_MAX_DEEPSLEEP_DIVIDER_ID;; i--) {
+		temp = engine_clock >> i;
+
+		if(temp >= min || i == 0)
+			break;
+	}
+	return (uint8_t)i;
+}
+
 /**
  * Populates single SMC SCLK structure using the provided engine clock
  *
@@ -2463,12 +2482,12 @@
 	*get the DAL clock. do it in funture.
 	PECI_GetMinClockSettings(hwmgr->peci, &minClocks);
 	data->display_timing.min_clock_insr = minClocks.engineClockInSR;
-
-	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
-	{
-		graphic_level->DeepSleepDivId = PhwTonga_GetSleepDividerIdFromClock(hwmgr, engine_clock, minClocks.engineClockInSR);
-	}
 */
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkDeepSleep))
+		graphic_level->DeepSleepDivId =
+				tonga_get_sleep_divider_id_from_clock(engine_clock,
+						data->display_timing.min_clock_insr);
 
 	/* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
 	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
@@ -2663,7 +2682,7 @@
 struct TONGA_DLL_SPEED_SETTING {
 	uint16_t            Min;                          /* Minimum Data Rate*/
 	uint16_t            Max;                          /* Maximum Data Rate*/
-	uint32_t 			dll_speed;                     /* The desired DLL_SPEED setting*/
+	uint32_t			dll_speed;                     /* The desired DLL_SPEED setting*/
 };
 
 static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
@@ -2828,27 +2847,6 @@
 		}
 	}
 
-	/* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
-	for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
-		data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc;
-		/* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */
-		/* param1 is for corresponding std voltage */
-		data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
-	}
-	data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
-
-	if (NULL != allowed_vdd_mclk_table) {
-		/* Initialize Vddci DPM table based on allow Mclk values */
-		for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
-			data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci;
-			data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
-			data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd;
-			data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
-		}
-		data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
-		data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
-	}
-
 	/* setup PCIE gen speed levels*/
 	tonga_setup_default_pcie_tables(hwmgr);
 
@@ -3296,14 +3294,14 @@
 		pptable_info->vdd_dep_on_mclk;
 
 	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
-		"VDD dependency on SCLK table is missing. 	\
+		"VDD dependency on SCLK table is missing.	\
 		This table is mandatory", return -1);
 	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
-		"VDD dependency on SCLK table has to have is missing. 	\
+		"VDD dependency on SCLK table has to have is missing.	\
 		This table is mandatory", return -1);
 
 	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
-		"VDD dependency on MCLK table is missing. 	\
+		"VDD dependency on MCLK table is missing.	\
 		This table is mandatory", return -1);
 	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
 		"VDD dependency on MCLK table has to have is missing.	 \
@@ -4424,17 +4422,14 @@
 
 int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 {
-	if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
-		kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
-		hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+
+	if (data->soft_pp_table) {
+		kfree(data->soft_pp_table);
+		data->soft_pp_table = NULL;
 	}
 
-	if (NULL != hwmgr->backend) {
-		kfree(hwmgr->backend);
-		hwmgr->backend = NULL;
-	}
-
-	return 0;
+	return phm_hwmgr_backend_fini(hwmgr);
 }
 
 /**
@@ -5315,7 +5310,7 @@
 		(data->need_update_smu7_dpm_table &
 		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
 		PP_ASSERT_WITH_CODE(
-			true == tonga_is_dpm_running(hwmgr),
+			0 == tonga_is_dpm_running(hwmgr),
 			"Trying to freeze SCLK DPM when DPM is disabled",
 			);
 		PP_ASSERT_WITH_CODE(
@@ -5328,7 +5323,7 @@
 	if ((0 == data->mclk_dpm_key_disabled) &&
 		(data->need_update_smu7_dpm_table &
 		 DPMTABLE_OD_UPDATE_MCLK)) {
-		PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr),
+		PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
 			"Trying to freeze MCLK DPM when DPM is disabled",
 			);
 		PP_ASSERT_WITH_CODE(
@@ -5429,7 +5424,7 @@
 	}
 
 	if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
-		result = tonga_populate_all_memory_levels(hwmgr);
+		result = tonga_populate_all_graphic_levels(hwmgr);
 		PP_ASSERT_WITH_CODE((0 == result),
 			"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
 			return result);
@@ -5631,7 +5626,7 @@
 		(data->need_update_smu7_dpm_table &
 		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
 
-		PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr),
+		PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
 			"Trying to Unfreeze SCLK DPM when DPM is disabled",
 			);
 		PP_ASSERT_WITH_CODE(
@@ -5645,7 +5640,7 @@
 		(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
 
 		PP_ASSERT_WITH_CODE(
-				true == tonga_is_dpm_running(hwmgr),
+				0 == tonga_is_dpm_running(hwmgr),
 				"Trying to Unfreeze MCLK DPM when DPM is disabled",
 				);
 		PP_ASSERT_WITH_CODE(
@@ -5874,7 +5869,7 @@
 	if (!fw_info)
 		return 0;
 
-	reference_clock = le16_to_cpu(fw_info->usMinPixelClockPLL_Output);
+	reference_clock = le16_to_cpu(fw_info->usReferenceClock);
 
 	divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
 
@@ -6039,24 +6034,40 @@
 {
 	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
 
-	*table = (char *)&data->smc_state_table;
+	if (!data->soft_pp_table) {
+		data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
+					      hwmgr->soft_pp_table_size,
+					      GFP_KERNEL);
+		if (!data->soft_pp_table)
+			return -ENOMEM;
+	}
 
-	return sizeof(struct SMU72_Discrete_DpmTable);
+	*table = (char *)&data->soft_pp_table;
+
+	return hwmgr->soft_pp_table_size;
 }
 
 static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
 {
 	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
 
-	void *table = (void *)&data->smc_state_table;
+	if (!data->soft_pp_table) {
+		data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+		if (!data->soft_pp_table)
+			return -ENOMEM;
+	}
 
-	memcpy(table, buf, size);
+	memcpy(data->soft_pp_table, buf, size);
+
+	hwmgr->soft_pp_table = data->soft_pp_table;
+
+	/* TODO: re-init powerplay to implement modified pptable */
 
 	return 0;
 }
 
 static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
-		enum pp_clock_type type, int level)
+		enum pp_clock_type type, uint32_t mask)
 {
 	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
 
@@ -6068,20 +6079,28 @@
 		if (!data->sclk_dpm_key_disabled)
 			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
 					PPSMC_MSG_SCLKDPM_SetEnabledMask,
-					(1 << level));
+					data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
 		break;
 	case PP_MCLK:
 		if (!data->mclk_dpm_key_disabled)
 			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
 					PPSMC_MSG_MCLKDPM_SetEnabledMask,
-					(1 << level));
+					data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
 		break;
 	case PP_PCIE:
+	{
+		uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+		uint32_t level = 0;
+
+		while (tmp >>= 1)
+			level++;
+
 		if (!data->pcie_dpm_key_disabled)
 			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
 					PPSMC_MSG_PCIeDPM_ForceLevel,
-					(1 << level));
+					level);
 		break;
+	}
 	default:
 		break;
 	}
@@ -6173,6 +6192,7 @@
 	.powergate_uvd = tonga_phm_powergate_uvd,
 	.powergate_vce = tonga_phm_powergate_vce,
 	.disable_clock_power_gating = tonga_phm_disable_clock_power_gating,
+	.update_clock_gatings = tonga_phm_update_clock_gatings,
 	.notify_smc_display_config_after_ps_adjustment = tonga_notify_smc_display_config_after_ps_adjustment,
 	.display_config_changed = tonga_display_configuration_changed_task,
 	.set_max_fan_pwm_output = tonga_set_max_fan_pwm_output,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
index f88d3bb..573cd39 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
@@ -74,7 +74,7 @@
 };
 
 struct _phw_tonga_dpm_level {
-	bool  		enabled;
+	bool		enabled;
 	uint32_t    value;
 	uint32_t    param1;
 };
@@ -237,20 +237,20 @@
 	irq_handler_func_t             ctf_callback;
 	void                             *ctf_context;
 
-	phw_tonga_clock_registers      	  clock_registers;
+	phw_tonga_clock_registers	  clock_registers;
 	phw_tonga_voltage_smio_registers  voltage_smio_registers;
 
-	bool                         	is_memory_GDDR5;
+	bool	is_memory_GDDR5;
 	uint16_t                          acpi_vddc;
-	bool                         	pspp_notify_required;        /* Flag to indicate if PSPP notification to SBIOS is required */
+	bool	pspp_notify_required;        /* Flag to indicate if PSPP notification to SBIOS is required */
 	uint16_t                          force_pcie_gen;            /* The forced PCI-E speed if not 0xffff */
 	uint16_t                          acpi_pcie_gen;             /* The PCI-E speed at ACPI time */
 	uint32_t                           pcie_gen_cap;             /* The PCI-E speed capabilities bitmap from CAIL */
 	uint32_t                           pcie_lane_cap;            /* The PCI-E lane capabilities bitmap from CAIL */
 	uint32_t                           pcie_spc_cap;             /* Symbol Per Clock Capabilities from registry */
-	phw_tonga_leakage_voltage       	vddc_leakage;            /* The Leakage VDDC supported (based on leakage ID).*/
-	phw_tonga_leakage_voltage       	vddcgfx_leakage;         /* The Leakage VDDC supported (based on leakage ID). */
-	phw_tonga_leakage_voltage       	vddci_leakage;           /* The Leakage VDDCI supported (based on leakage ID). */
+	phw_tonga_leakage_voltage	vddc_leakage;            /* The Leakage VDDC supported (based on leakage ID).*/
+	phw_tonga_leakage_voltage	vddcgfx_leakage;         /* The Leakage VDDC supported (based on leakage ID). */
+	phw_tonga_leakage_voltage	vddci_leakage;           /* The Leakage VDDCI supported (based on leakage ID). */
 
 	uint32_t                           mvdd_control;
 	uint32_t                           vddc_mask_low;
@@ -263,8 +263,8 @@
 	uint32_t                           mclk_stutter_mode_threshold;
 	uint32_t                           mclk_edc_enable_threshold;
 	uint32_t                           mclk_edc_wr_enable_threshold;
-	bool                         	is_uvd_enabled;
-	bool                         	is_xdma_enabled;
+	bool	is_uvd_enabled;
+	bool	is_xdma_enabled;
 	phw_tonga_vbios_boot_state      vbios_boot_state;
 
 	bool                         battery_state;
@@ -353,6 +353,8 @@
 	bool                           acp_power_gated;  /* 1: gated, 0:not gated */
 	bool                           pg_acp_init;
 
+	/* soft pptable for re-uploading into smu */
+	void *soft_pp_table;
 };
 
 typedef struct tonga_hwmgr tonga_hwmgr;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
index 9a4456e..1b44f4e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
@@ -209,6 +209,20 @@
 	ATOM_Tonga_PCIE_Record entries[1];							/* Dynamically allocate entries. */
 } ATOM_Tonga_PCIE_Table;
 
+typedef struct _ATOM_Polaris10_PCIE_Record {
+	UCHAR ucPCIEGenSpeed;
+	UCHAR usPCIELaneWidth;
+	UCHAR ucReserved[2];
+	ULONG ulPCIE_Sclk;
+} ATOM_Polaris10_PCIE_Record;
+
+typedef struct _ATOM_Polaris10_PCIE_Table {
+	UCHAR ucRevId;
+	UCHAR ucNumEntries;                                         /* Number of entries. */
+	ATOM_Polaris10_PCIE_Record entries[1];                      /* Dynamically allocate entries. */
+} ATOM_Polaris10_PCIE_Table;
+
+
 typedef struct _ATOM_Tonga_MM_Dependency_Record {
 	UCHAR   ucVddcInd;											 /* VDDC voltage */
 	USHORT  usVddgfxOffset;									  /* Offset relative to VDDC voltage */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
index b156481..296ec7e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -138,12 +138,15 @@
 
 	u16 size;
 	u8 frev, crev;
-	void *table_address;
+	void *table_address = (void *)hwmgr->soft_pp_table;
 
-	table_address = (ATOM_Tonga_POWERPLAYTABLE *)
-		cgs_atom_get_data_table(hwmgr->device, index, &size, &frev, &crev);
-
-	hwmgr->soft_pp_table = table_address;	/*Cache the result in RAM.*/
+	if (!table_address) {
+		table_address = (ATOM_Tonga_POWERPLAYTABLE *)
+				cgs_atom_get_data_table(hwmgr->device,
+						index, &size, &frev, &crev);
+		hwmgr->soft_pp_table = table_address;	/*Cache the result in RAM.*/
+		hwmgr->soft_pp_table_size = size;
+	}
 
 	return table_address;
 }
@@ -448,48 +451,91 @@
 static int get_pcie_table(
 		struct pp_hwmgr *hwmgr,
 		phm_ppt_v1_pcie_table **pp_tonga_pcie_table,
-		const ATOM_Tonga_PCIE_Table * atom_pcie_table
+		const PPTable_Generic_SubTable_Header * pTable
 		)
 {
 	uint32_t table_size, i, pcie_count;
 	phm_ppt_v1_pcie_table *pcie_table;
 	struct phm_ppt_v1_information *pp_table_information =
 		(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	PP_ASSERT_WITH_CODE((0 != atom_pcie_table->ucNumEntries),
-		"Invalid PowerPlay Table!", return -1);
 
-	table_size = sizeof(uint32_t) +
-		sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
+	if (pTable->ucRevId < 1) {
+		const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)pTable;
+		PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
+			"Invalid PowerPlay Table!", return -1);
 
-	pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
+		table_size = sizeof(uint32_t) +
+			sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
 
-	if (NULL == pcie_table)
-		return -ENOMEM;
+		pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
 
-	memset(pcie_table, 0x00, table_size);
+		if (pcie_table == NULL)
+			return -ENOMEM;
 
-	/*
-	* Make sure the number of pcie entries are less than or equal to sclk dpm levels.
-	* Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
-	*/
-	pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
-	if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
-		pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
-	else
-		printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
-		Disregarding the excess entries... \n");
+		memset(pcie_table, 0x00, table_size);
 
-	pcie_table->count = pcie_count;
+		/*
+		* Make sure the number of pcie entries are less than or equal to sclk dpm levels.
+		* Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
+		*/
+		pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
+		if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
+			pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
+		else
+			printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
+			Disregarding the excess entries... \n");
 
-	for (i = 0; i < pcie_count; i++) {
-		pcie_table->entries[i].gen_speed =
-			atom_pcie_table->entries[i].ucPCIEGenSpeed;
-		pcie_table->entries[i].lane_width =
-			atom_pcie_table->entries[i].usPCIELaneWidth;
+		pcie_table->count = pcie_count;
+
+		for (i = 0; i < pcie_count; i++) {
+			pcie_table->entries[i].gen_speed =
+				atom_pcie_table->entries[i].ucPCIEGenSpeed;
+			pcie_table->entries[i].lane_width =
+				atom_pcie_table->entries[i].usPCIELaneWidth;
+		}
+
+		*pp_tonga_pcie_table = pcie_table;
+	} else {
+		/* Polaris10/Polaris11 and newer. */
+		const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)pTable;
+		PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
+			"Invalid PowerPlay Table!", return -1);
+
+		table_size = sizeof(uint32_t) +
+			sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
+
+		pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
+
+		if (pcie_table == NULL)
+			return -ENOMEM;
+
+		memset(pcie_table, 0x00, table_size);
+
+		/*
+		* Make sure the number of pcie entries are less than or equal to sclk dpm levels.
+		* Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
+		*/
+		pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
+		if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
+			pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
+		else
+			printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
+			Disregarding the excess entries... \n");
+
+		pcie_table->count = pcie_count;
+
+		for (i = 0; i < pcie_count; i++) {
+			pcie_table->entries[i].gen_speed =
+				atom_pcie_table->entries[i].ucPCIEGenSpeed;
+			pcie_table->entries[i].lane_width =
+				atom_pcie_table->entries[i].usPCIELaneWidth;
+			pcie_table->entries[i].pcie_sclk =
+				atom_pcie_table->entries[i].ulPCIE_Sclk;
+		}
+
+		*pp_tonga_pcie_table = pcie_table;
 	}
 
-	*pp_tonga_pcie_table = pcie_table;
-
 	return 0;
 }
 
@@ -668,8 +714,8 @@
 	const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
 		(const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
 		le16_to_cpu(powerplay_table->usHardLimitTableOffset));
-	const ATOM_Tonga_PCIE_Table *pcie_table =
-		(const ATOM_Tonga_PCIE_Table *)(((unsigned long) powerplay_table) +
+	const PPTable_Generic_SubTable_Header *pcie_table =
+		(const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
 		le16_to_cpu(powerplay_table->usPCIETableOffset));
 
 	pp_table_information->vdd_dep_on_sclk = NULL;
@@ -994,48 +1040,44 @@
 	struct phm_ppt_v1_information *pp_table_information =
 		(struct phm_ppt_v1_information *)(hwmgr->pptable);
 
-	if (NULL != hwmgr->soft_pp_table) {
-		kfree(hwmgr->soft_pp_table);
+	if (NULL != hwmgr->soft_pp_table)
 		hwmgr->soft_pp_table = NULL;
-	}
 
-	if (NULL != pp_table_information->vdd_dep_on_sclk)
-		pp_table_information->vdd_dep_on_sclk = NULL;
+	kfree(pp_table_information->vdd_dep_on_sclk);
+	pp_table_information->vdd_dep_on_sclk = NULL;
 
-	if (NULL != pp_table_information->vdd_dep_on_mclk)
-		pp_table_information->vdd_dep_on_mclk = NULL;
+	kfree(pp_table_information->vdd_dep_on_mclk);
+	pp_table_information->vdd_dep_on_mclk = NULL;
 
-	if (NULL != pp_table_information->valid_mclk_values)
-		pp_table_information->valid_mclk_values = NULL;
+	kfree(pp_table_information->valid_mclk_values);
+	pp_table_information->valid_mclk_values = NULL;
 
-	if (NULL != pp_table_information->valid_sclk_values)
-		pp_table_information->valid_sclk_values = NULL;
+	kfree(pp_table_information->valid_sclk_values);
+	pp_table_information->valid_sclk_values = NULL;
 
-	if (NULL != pp_table_information->vddc_lookup_table)
-		pp_table_information->vddc_lookup_table = NULL;
+	kfree(pp_table_information->vddc_lookup_table);
+	pp_table_information->vddc_lookup_table = NULL;
 
-	if (NULL != pp_table_information->vddgfx_lookup_table)
-		pp_table_information->vddgfx_lookup_table = NULL;
+	kfree(pp_table_information->vddgfx_lookup_table);
+	pp_table_information->vddgfx_lookup_table = NULL;
 
-	if (NULL != pp_table_information->mm_dep_table)
-		pp_table_information->mm_dep_table = NULL;
+	kfree(pp_table_information->mm_dep_table);
+	pp_table_information->mm_dep_table = NULL;
 
-	if (NULL != pp_table_information->cac_dtp_table)
-		pp_table_information->cac_dtp_table = NULL;
+	kfree(pp_table_information->cac_dtp_table);
+	pp_table_information->cac_dtp_table = NULL;
 
-	if (NULL != hwmgr->dyn_state.cac_dtp_table)
-		hwmgr->dyn_state.cac_dtp_table = NULL;
+	kfree(hwmgr->dyn_state.cac_dtp_table);
+	hwmgr->dyn_state.cac_dtp_table = NULL;
 
-	if (NULL != pp_table_information->ppm_parameter_table)
-		pp_table_information->ppm_parameter_table = NULL;
+	kfree(pp_table_information->ppm_parameter_table);
+	pp_table_information->ppm_parameter_table = NULL;
 
-	if (NULL != pp_table_information->pcie_table)
-		pp_table_information->pcie_table = NULL;
+	kfree(pp_table_information->pcie_table);
+	pp_table_information->pcie_table = NULL;
 
-	if (NULL != hwmgr->pptable) {
-		kfree(hwmgr->pptable);
-		hwmgr->pptable = NULL;
-	}
+	kfree(hwmgr->pptable);
+	hwmgr->pptable = NULL;
 
 	return result;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
index a188174..47ef1ca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
@@ -195,8 +195,8 @@
 	if (0 == duty100)
 		return -EINVAL;
 
-	tmp64 = (uint64_t)speed * 100;
-	do_div(tmp64, duty100);
+	tmp64 = (uint64_t)speed * duty100;
+	do_div(tmp64, 100);
 	duty = (uint32_t)tmp64;
 
 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
@@ -525,7 +525,7 @@
 	return tonga_thermal_disable_alert(hwmgr);
 }
 
-static struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = {
+static const struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = {
 	{ NULL, tf_tonga_thermal_initialize },
 	{ NULL, tf_tonga_thermal_set_temperature_range },
 	{ NULL, tf_tonga_thermal_enable_alert },
@@ -538,20 +538,20 @@
 	{ NULL, NULL }
 };
 
-static struct phm_master_table_header tonga_thermal_start_thermal_controller_master = {
+static const struct phm_master_table_header tonga_thermal_start_thermal_controller_master = {
 	0,
 	PHM_MasterTableFlag_None,
 	tonga_thermal_start_thermal_controller_master_list
 };
 
-static struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = {
+static const struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = {
 	{ NULL, tf_tonga_thermal_disable_alert},
 	{ NULL, tf_tonga_thermal_set_temperature_range},
 	{ NULL, tf_tonga_thermal_enable_alert},
 	{ NULL, NULL }
 };
 
-struct phm_master_table_header tonga_thermal_set_temperature_range_master = {
+static const struct phm_master_table_header tonga_thermal_set_temperature_range_master = {
 	0,
 	PHM_MasterTableFlag_None,
 	tonga_thermal_set_temperature_range_master_list
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 7255f7d..50b367d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -289,6 +289,9 @@
 
 #define PP_BLOCK_GFX_CG         0x01
 #define PP_BLOCK_GFX_MG         0x02
+#define PP_BLOCK_GFX_3D         0x04
+#define PP_BLOCK_GFX_RLC        0x08
+#define PP_BLOCK_GFX_CP         0x10
 #define PP_BLOCK_SYS_BIF        0x01
 #define PP_BLOCK_SYS_MC         0x02
 #define PP_BLOCK_SYS_ROM        0x04
@@ -337,7 +340,7 @@
 	int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
 	int (*get_pp_table)(void *handle, char **table);
 	int (*set_pp_table)(void *handle, const char *buf, size_t size);
-	int (*force_clock_level)(void *handle, enum pp_clock_type type, int level);
+	int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
 	int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
 };
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
index 10437dc..d63ef83 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
@@ -37,7 +37,7 @@
 
 struct action_chain {
 	const char *description;  /* action chain description for debugging purpose */
-	const pem_event_action **action_chain; /* pointer to chain of event actions */
+	const pem_event_action * const *action_chain; /* pointer to chain of event actions */
 };
 
 struct pem_power_source_ui_state_info {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h
index 0262ad3..8a31665 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h
@@ -46,7 +46,7 @@
 typedef struct PWR_Command_Table PWR_Command_Table;
 
 #define PWR_VIRUS_TABLE_SIZE  10243
-static PWR_Command_Table PwrVirusTable[PWR_VIRUS_TABLE_SIZE] =
+static const PWR_Command_Table PwrVirusTable[PWR_VIRUS_TABLE_SIZE] =
 {
     { PwrCmdWrite, 0x100100b6, mmPCIE_INDEX                               },
     { PwrCmdWrite, 0x00000000, mmPCIE_DATA                                },
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 040d3f7..56f712c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -211,6 +211,7 @@
 	PHM_PlatformCaps_ClockStretcher,
 	PHM_PlatformCaps_TablelessHardwareInterface,
 	PHM_PlatformCaps_EnableDriverEVV,
+	PHM_PlatformCaps_SPLLShutdownSupport,
 	PHM_PlatformCaps_Max
 };
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 928f5a7..28f5714 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -141,7 +141,7 @@
 struct phm_master_table_header {
 	uint32_t storage_size;
 	uint32_t flags;
-	struct phm_master_table_item *master_list;
+	const struct phm_master_table_item *master_list;
 };
 
 struct phm_runtime_table_header {
@@ -199,7 +199,7 @@
 			      void *input, void *output);
 
 extern int phm_construct_table(struct pp_hwmgr *hwmgr,
-			       struct phm_master_table_header *master_table,
+			       const struct phm_master_table_header *master_table,
 			       struct phm_runtime_table_header *rt_table);
 
 extern int phm_destroy_table(struct pp_hwmgr *hwmgr,
@@ -335,8 +335,9 @@
 	int (*power_off_asic)(struct pp_hwmgr *hwmgr);
 	int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table);
 	int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size);
-	int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, int level);
+	int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
 	int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
+	int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable);
 };
 
 struct pp_table_func {
@@ -499,7 +500,7 @@
 	struct phm_ppm_table                          *ppm_parameter_table;
 	struct phm_cac_tdp_table                      *cac_dtp_table;
 	struct phm_clock_voltage_dependency_table	  *vdd_gfx_dependency_on_sclk;
-	struct phm_vq_budgeting_table		  		  *vq_budgeting_table;
+	struct phm_vq_budgeting_table				  *vq_budgeting_table;
 };
 
 struct pp_fan_info {
@@ -576,6 +577,7 @@
 	void *device;
 	struct pp_smumgr *smumgr;
 	const void *soft_pp_table;
+	uint32_t soft_pp_table_size;
 	bool need_pp_table_upload;
 	enum amd_dpm_forced_level dpm_level;
 	bool block_hw_access;
@@ -671,7 +673,7 @@
 extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
 extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
 extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
-
+extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
 
 #define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
new file mode 100644
index 0000000..0c6a413
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
@@ -0,0 +1,409 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef POLARIS10_PP_SMC_H
+#define POLARIS10_PP_SMC_H
+
+
+#pragma pack(push, 1)
+
+
+#define PPSMC_SWSTATE_FLAG_DC                           0x01
+#define PPSMC_SWSTATE_FLAG_UVD                          0x02
+#define PPSMC_SWSTATE_FLAG_VCE                          0x04
+
+#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL             0x00
+#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL             0x01
+#define PPSMC_THERMAL_PROTECT_TYPE_NONE                 0xff
+
+#define PPSMC_SYSTEMFLAG_GPIO_DC                        0x01
+#define PPSMC_SYSTEMFLAG_STEPVDDC                       0x02
+#define PPSMC_SYSTEMFLAG_GDDR5                          0x04
+
+#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP               0x08
+
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT                  0x10
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG           0x20
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK              0x07
+#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK     0x08
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE   0x00
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE  0x01
+
+
+#define PPSMC_DPM2FLAGS_TDPCLMP                         0x01
+#define PPSMC_DPM2FLAGS_PWRSHFT                         0x02
+#define PPSMC_DPM2FLAGS_OCP                             0x04
+
+
+#define PPSMC_DISPLAY_WATERMARK_LOW                     0
+#define PPSMC_DISPLAY_WATERMARK_HIGH                    1
+
+
+#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP    0x01
+#define PPSMC_STATEFLAG_POWERBOOST         0x02
+#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
+#define PPSMC_STATEFLAG_POWERSHIFT         0x08
+#define PPSMC_STATEFLAG_SLOW_READ_MARGIN   0x10
+#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
+#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS   0x40
+
+
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+	FAN_CONTROL_FUZZY,
+	FAN_CONTROL_TABLE
+};
+
+
+#define PPSMC_Result_OK             ((uint16_t)0x01)
+#define PPSMC_Result_NoMore         ((uint16_t)0x02)
+
+#define PPSMC_Result_NotNow         ((uint16_t)0x03)
+#define PPSMC_Result_Failed         ((uint16_t)0xFF)
+#define PPSMC_Result_UnknownCmd     ((uint16_t)0xFE)
+#define PPSMC_Result_UnknownVT      ((uint16_t)0xFD)
+
+typedef uint16_t PPSMC_Result;
+
+#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
+
+
+#define PPSMC_MSG_Halt                      ((uint16_t)0x10)
+#define PPSMC_MSG_Resume                    ((uint16_t)0x11)
+#define PPSMC_MSG_EnableDPMLevel            ((uint16_t)0x12)
+#define PPSMC_MSG_ZeroLevelsDisabled        ((uint16_t)0x13)
+#define PPSMC_MSG_OneLevelsDisabled         ((uint16_t)0x14)
+#define PPSMC_MSG_TwoLevelsDisabled         ((uint16_t)0x15)
+#define PPSMC_MSG_EnableThermalInterrupt    ((uint16_t)0x16)
+#define PPSMC_MSG_RunningOnAC               ((uint16_t)0x17)
+#define PPSMC_MSG_LevelUp                   ((uint16_t)0x18)
+#define PPSMC_MSG_LevelDown                 ((uint16_t)0x19)
+#define PPSMC_MSG_ResetDPMCounters          ((uint16_t)0x1a)
+#define PPSMC_MSG_SwitchToSwState           ((uint16_t)0x20)
+#define PPSMC_MSG_SwitchToSwStateLast       ((uint16_t)0x3f)
+#define PPSMC_MSG_SwitchToInitialState      ((uint16_t)0x40)
+#define PPSMC_MSG_NoForcedLevel             ((uint16_t)0x41)
+#define PPSMC_MSG_ForceHigh                 ((uint16_t)0x42)
+#define PPSMC_MSG_ForceMediumOrHigh         ((uint16_t)0x43)
+#define PPSMC_MSG_SwitchToMinimumPower      ((uint16_t)0x51)
+#define PPSMC_MSG_ResumeFromMinimumPower    ((uint16_t)0x52)
+#define PPSMC_MSG_EnableCac                 ((uint16_t)0x53)
+#define PPSMC_MSG_DisableCac                ((uint16_t)0x54)
+#define PPSMC_DPMStateHistoryStart          ((uint16_t)0x55)
+#define PPSMC_DPMStateHistoryStop           ((uint16_t)0x56)
+#define PPSMC_CACHistoryStart               ((uint16_t)0x57)
+#define PPSMC_CACHistoryStop                ((uint16_t)0x58)
+#define PPSMC_TDPClampingActive             ((uint16_t)0x59)
+#define PPSMC_TDPClampingInactive           ((uint16_t)0x5A)
+#define PPSMC_StartFanControl               ((uint16_t)0x5B)
+#define PPSMC_StopFanControl                ((uint16_t)0x5C)
+#define PPSMC_NoDisplay                     ((uint16_t)0x5D)
+#define PPSMC_HasDisplay                    ((uint16_t)0x5E)
+#define PPSMC_MSG_UVDPowerOFF               ((uint16_t)0x60)
+#define PPSMC_MSG_UVDPowerON                ((uint16_t)0x61)
+#define PPSMC_MSG_EnableULV                 ((uint16_t)0x62)
+#define PPSMC_MSG_DisableULV                ((uint16_t)0x63)
+#define PPSMC_MSG_EnterULV                  ((uint16_t)0x64)
+#define PPSMC_MSG_ExitULV                   ((uint16_t)0x65)
+#define PPSMC_PowerShiftActive              ((uint16_t)0x6A)
+#define PPSMC_PowerShiftInactive            ((uint16_t)0x6B)
+#define PPSMC_OCPActive                     ((uint16_t)0x6C)
+#define PPSMC_OCPInactive                   ((uint16_t)0x6D)
+#define PPSMC_CACLongTermAvgEnable          ((uint16_t)0x6E)
+#define PPSMC_CACLongTermAvgDisable         ((uint16_t)0x6F)
+#define PPSMC_MSG_InferredStateSweep_Start  ((uint16_t)0x70)
+#define PPSMC_MSG_InferredStateSweep_Stop   ((uint16_t)0x71)
+#define PPSMC_MSG_SwitchToLowestInfState    ((uint16_t)0x72)
+#define PPSMC_MSG_SwitchToNonInfState       ((uint16_t)0x73)
+#define PPSMC_MSG_AllStateSweep_Start       ((uint16_t)0x74)
+#define PPSMC_MSG_AllStateSweep_Stop        ((uint16_t)0x75)
+#define PPSMC_MSG_SwitchNextLowerInfState   ((uint16_t)0x76)
+#define PPSMC_MSG_SwitchNextHigherInfState  ((uint16_t)0x77)
+#define PPSMC_MSG_MclkRetrainingTest        ((uint16_t)0x78)
+#define PPSMC_MSG_ForceTDPClamping          ((uint16_t)0x79)
+#define PPSMC_MSG_CollectCAC_PowerCorreln   ((uint16_t)0x7A)
+#define PPSMC_MSG_CollectCAC_WeightCalib    ((uint16_t)0x7B)
+#define PPSMC_MSG_CollectCAC_SQonly         ((uint16_t)0x7C)
+#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
+
+#define PPSMC_MSG_ExtremitiesTest_Start     ((uint16_t)0x7E)
+#define PPSMC_MSG_ExtremitiesTest_Stop      ((uint16_t)0x7F)
+#define PPSMC_FlushDataCache                ((uint16_t)0x80)
+#define PPSMC_FlushInstrCache               ((uint16_t)0x81)
+
+#define PPSMC_MSG_SetEnabledLevels          ((uint16_t)0x82)
+#define PPSMC_MSG_SetForcedLevels           ((uint16_t)0x83)
+
+#define PPSMC_MSG_ResetToDefaults           ((uint16_t)0x84)
+
+#define PPSMC_MSG_SetForcedLevelsAndJump      ((uint16_t)0x85)
+#define PPSMC_MSG_SetCACHistoryMode           ((uint16_t)0x86)
+#define PPSMC_MSG_EnableDTE                   ((uint16_t)0x87)
+#define PPSMC_MSG_DisableDTE                  ((uint16_t)0x88)
+
+#define PPSMC_MSG_SmcSpaceSetAddress          ((uint16_t)0x89)
+#define PPSM_MSG_SmcSpaceWriteDWordInc        ((uint16_t)0x8A)
+#define PPSM_MSG_SmcSpaceWriteWordInc         ((uint16_t)0x8B)
+#define PPSM_MSG_SmcSpaceWriteByteInc         ((uint16_t)0x8C)
+
+#define PPSMC_MSG_BREAK                       ((uint16_t)0xF8)
+
+#define PPSMC_MSG_Test                        ((uint16_t) 0x100)
+#define PPSMC_MSG_DPM_Voltage_Pwrmgt          ((uint16_t) 0x101)
+#define PPSMC_MSG_DPM_Config                  ((uint16_t) 0x102)
+#define PPSMC_MSG_PM_Controller_Start         ((uint16_t) 0x103)
+#define PPSMC_MSG_DPM_ForceState              ((uint16_t) 0x104)
+#define PPSMC_MSG_PG_PowerDownSIMD            ((uint16_t) 0x105)
+#define PPSMC_MSG_PG_PowerUpSIMD              ((uint16_t) 0x106)
+#define PPSMC_MSG_PM_Controller_Stop          ((uint16_t) 0x107)
+#define PPSMC_MSG_PG_SIMD_Config              ((uint16_t) 0x108)
+#define PPSMC_MSG_Voltage_Cntl_Enable         ((uint16_t) 0x109)
+#define PPSMC_MSG_Thermal_Cntl_Enable         ((uint16_t) 0x10a)
+#define PPSMC_MSG_Reset_Service               ((uint16_t) 0x10b)
+#define PPSMC_MSG_VCEPowerOFF                 ((uint16_t) 0x10e)
+#define PPSMC_MSG_VCEPowerON                  ((uint16_t) 0x10f)
+#define PPSMC_MSG_DPM_Disable_VCE_HS          ((uint16_t) 0x110)
+#define PPSMC_MSG_DPM_Enable_VCE_HS           ((uint16_t) 0x111)
+#define PPSMC_MSG_DPM_N_LevelsDisabled        ((uint16_t) 0x112)
+#define PPSMC_MSG_DCEPowerOFF                 ((uint16_t) 0x113)
+#define PPSMC_MSG_DCEPowerON                  ((uint16_t) 0x114)
+#define PPSMC_MSG_PCIE_DDIPowerDown           ((uint16_t) 0x117)
+#define PPSMC_MSG_PCIE_DDIPowerUp             ((uint16_t) 0x118)
+#define PPSMC_MSG_PCIE_CascadePLLPowerDown    ((uint16_t) 0x119)
+#define PPSMC_MSG_PCIE_CascadePLLPowerUp      ((uint16_t) 0x11a)
+#define PPSMC_MSG_SYSPLLPowerOff              ((uint16_t) 0x11b)
+#define PPSMC_MSG_SYSPLLPowerOn               ((uint16_t) 0x11c)
+#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d)
+#define PPSMC_MSG_DCE_AllowVoltageAdjustment  ((uint16_t) 0x11e)
+#define PPSMC_MSG_DISPLAYPHYStatusNotify      ((uint16_t) 0x11f)
+#define PPSMC_MSG_EnableBAPM                  ((uint16_t) 0x120)
+#define PPSMC_MSG_DisableBAPM                 ((uint16_t) 0x121)
+#define PPSMC_MSG_Spmi_Enable                 ((uint16_t) 0x122)
+#define PPSMC_MSG_Spmi_Timer                  ((uint16_t) 0x123)
+#define PPSMC_MSG_LCLK_DPM_Config             ((uint16_t) 0x124)
+#define PPSMC_MSG_VddNB_Request               ((uint16_t) 0x125)
+#define PPSMC_MSG_PCIE_DDIPhyPowerDown        ((uint32_t) 0x126)
+#define PPSMC_MSG_PCIE_DDIPhyPowerUp          ((uint32_t) 0x127)
+#define PPSMC_MSG_MCLKDPM_Config              ((uint16_t) 0x128)
+
+#define PPSMC_MSG_UVDDPM_Config               ((uint16_t) 0x129)
+#define PPSMC_MSG_VCEDPM_Config               ((uint16_t) 0x12A)
+#define PPSMC_MSG_ACPDPM_Config               ((uint16_t) 0x12B)
+#define PPSMC_MSG_SAMUDPM_Config              ((uint16_t) 0x12C)
+#define PPSMC_MSG_UVDDPM_SetEnabledMask       ((uint16_t) 0x12D)
+#define PPSMC_MSG_VCEDPM_SetEnabledMask       ((uint16_t) 0x12E)
+#define PPSMC_MSG_ACPDPM_SetEnabledMask       ((uint16_t) 0x12F)
+#define PPSMC_MSG_SAMUDPM_SetEnabledMask      ((uint16_t) 0x130)
+#define PPSMC_MSG_MCLKDPM_ForceState          ((uint16_t) 0x131)
+#define PPSMC_MSG_MCLKDPM_NoForcedLevel       ((uint16_t) 0x132)
+#define PPSMC_MSG_Thermal_Cntl_Disable        ((uint16_t) 0x133)
+#define PPSMC_MSG_SetTDPLimit                 ((uint16_t) 0x134)
+#define PPSMC_MSG_Voltage_Cntl_Disable        ((uint16_t) 0x135)
+#define PPSMC_MSG_PCIeDPM_Enable              ((uint16_t) 0x136)
+#define PPSMC_MSG_ACPPowerOFF                 ((uint16_t) 0x137)
+#define PPSMC_MSG_ACPPowerON                  ((uint16_t) 0x138)
+#define PPSMC_MSG_SAMPowerOFF                 ((uint16_t) 0x139)
+#define PPSMC_MSG_SAMPowerON                  ((uint16_t) 0x13a)
+#define PPSMC_MSG_SDMAPowerOFF                ((uint16_t) 0x13b)
+#define PPSMC_MSG_SDMAPowerON                 ((uint16_t) 0x13c)
+#define PPSMC_MSG_PCIeDPM_Disable             ((uint16_t) 0x13d)
+#define PPSMC_MSG_IOMMUPowerOFF               ((uint16_t) 0x13e)
+#define PPSMC_MSG_IOMMUPowerON                ((uint16_t) 0x13f)
+#define PPSMC_MSG_NBDPM_Enable                ((uint16_t) 0x140)
+#define PPSMC_MSG_NBDPM_Disable               ((uint16_t) 0x141)
+#define PPSMC_MSG_NBDPM_ForceNominal          ((uint16_t) 0x142)
+#define PPSMC_MSG_NBDPM_ForcePerformance      ((uint16_t) 0x143)
+#define PPSMC_MSG_NBDPM_UnForce               ((uint16_t) 0x144)
+#define PPSMC_MSG_SCLKDPM_SetEnabledMask      ((uint16_t) 0x145)
+#define PPSMC_MSG_MCLKDPM_SetEnabledMask      ((uint16_t) 0x146)
+#define PPSMC_MSG_PCIeDPM_ForceLevel          ((uint16_t) 0x147)
+#define PPSMC_MSG_PCIeDPM_UnForceLevel        ((uint16_t) 0x148)
+#define PPSMC_MSG_EnableACDCGPIOInterrupt     ((uint16_t) 0x149)
+#define PPSMC_MSG_EnableVRHotGPIOInterrupt    ((uint16_t) 0x14a)
+#define PPSMC_MSG_SwitchToAC                  ((uint16_t) 0x14b)
+#define PPSMC_MSG_XDMAPowerOFF                ((uint16_t) 0x14c)
+#define PPSMC_MSG_XDMAPowerON                 ((uint16_t) 0x14d)
+
+#define PPSMC_MSG_DPM_Enable                  ((uint16_t) 0x14e)
+#define PPSMC_MSG_DPM_Disable                 ((uint16_t) 0x14f)
+#define PPSMC_MSG_MCLKDPM_Enable              ((uint16_t) 0x150)
+#define PPSMC_MSG_MCLKDPM_Disable             ((uint16_t) 0x151)
+#define PPSMC_MSG_LCLKDPM_Enable              ((uint16_t) 0x152)
+#define PPSMC_MSG_LCLKDPM_Disable             ((uint16_t) 0x153)
+#define PPSMC_MSG_UVDDPM_Enable               ((uint16_t) 0x154)
+#define PPSMC_MSG_UVDDPM_Disable              ((uint16_t) 0x155)
+#define PPSMC_MSG_SAMUDPM_Enable              ((uint16_t) 0x156)
+#define PPSMC_MSG_SAMUDPM_Disable             ((uint16_t) 0x157)
+#define PPSMC_MSG_ACPDPM_Enable               ((uint16_t) 0x158)
+#define PPSMC_MSG_ACPDPM_Disable              ((uint16_t) 0x159)
+#define PPSMC_MSG_VCEDPM_Enable               ((uint16_t) 0x15a)
+#define PPSMC_MSG_VCEDPM_Disable              ((uint16_t) 0x15b)
+#define PPSMC_MSG_LCLKDPM_SetEnabledMask      ((uint16_t) 0x15c)
+#define PPSMC_MSG_DPM_FPS_Mode                ((uint16_t) 0x15d)
+#define PPSMC_MSG_DPM_Activity_Mode           ((uint16_t) 0x15e)
+#define PPSMC_MSG_VddC_Request                ((uint16_t) 0x15f)
+#define PPSMC_MSG_MCLKDPM_GetEnabledMask      ((uint16_t) 0x160)
+#define PPSMC_MSG_LCLKDPM_GetEnabledMask      ((uint16_t) 0x161)
+#define PPSMC_MSG_SCLKDPM_GetEnabledMask      ((uint16_t) 0x162)
+#define PPSMC_MSG_UVDDPM_GetEnabledMask       ((uint16_t) 0x163)
+#define PPSMC_MSG_SAMUDPM_GetEnabledMask      ((uint16_t) 0x164)
+#define PPSMC_MSG_ACPDPM_GetEnabledMask       ((uint16_t) 0x165)
+#define PPSMC_MSG_VCEDPM_GetEnabledMask       ((uint16_t) 0x166)
+#define PPSMC_MSG_PCIeDPM_SetEnabledMask      ((uint16_t) 0x167)
+#define PPSMC_MSG_PCIeDPM_GetEnabledMask      ((uint16_t) 0x168)
+#define PPSMC_MSG_TDCLimitEnable              ((uint16_t) 0x169)
+#define PPSMC_MSG_TDCLimitDisable             ((uint16_t) 0x16a)
+#define PPSMC_MSG_DPM_AutoRotate_Mode         ((uint16_t) 0x16b)
+#define PPSMC_MSG_DISPCLK_FROM_FCH            ((uint16_t) 0x16c)
+#define PPSMC_MSG_DISPCLK_FROM_DFS            ((uint16_t) 0x16d)
+#define PPSMC_MSG_DPREFCLK_FROM_FCH           ((uint16_t) 0x16e)
+#define PPSMC_MSG_DPREFCLK_FROM_DFS           ((uint16_t) 0x16f)
+#define PPSMC_MSG_PmStatusLogStart            ((uint16_t) 0x170)
+#define PPSMC_MSG_PmStatusLogSample           ((uint16_t) 0x171)
+#define PPSMC_MSG_SCLK_AutoDPM_ON             ((uint16_t) 0x172)
+#define PPSMC_MSG_MCLK_AutoDPM_ON             ((uint16_t) 0x173)
+#define PPSMC_MSG_LCLK_AutoDPM_ON             ((uint16_t) 0x174)
+#define PPSMC_MSG_UVD_AutoDPM_ON              ((uint16_t) 0x175)
+#define PPSMC_MSG_SAMU_AutoDPM_ON             ((uint16_t) 0x176)
+#define PPSMC_MSG_ACP_AutoDPM_ON              ((uint16_t) 0x177)
+#define PPSMC_MSG_VCE_AutoDPM_ON              ((uint16_t) 0x178)
+#define PPSMC_MSG_PCIe_AutoDPM_ON             ((uint16_t) 0x179)
+#define PPSMC_MSG_MASTER_AutoDPM_ON           ((uint16_t) 0x17a)
+#define PPSMC_MSG_MASTER_AutoDPM_OFF          ((uint16_t) 0x17b)
+#define PPSMC_MSG_DYNAMICDISPPHYPOWER         ((uint16_t) 0x17c)
+#define PPSMC_MSG_CAC_COLLECTION_ON           ((uint16_t) 0x17d)
+#define PPSMC_MSG_CAC_COLLECTION_OFF          ((uint16_t) 0x17e)
+#define PPSMC_MSG_CAC_CORRELATION_ON          ((uint16_t) 0x17f)
+#define PPSMC_MSG_CAC_CORRELATION_OFF         ((uint16_t) 0x180)
+#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON        ((uint16_t) 0x181)
+#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF       ((uint16_t) 0x182)
+#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT     ((uint16_t) 0x184)
+#define PPSMC_MSG_PkgPwrLimitEnable           ((uint16_t) 0x185)
+#define PPSMC_MSG_PkgPwrLimitDisable          ((uint16_t) 0x186)
+#define PPSMC_MSG_PkgPwrSetLimit              ((uint16_t) 0x187)
+#define PPSMC_MSG_OverDriveSetTargetTdp       ((uint16_t) 0x188)
+#define PPSMC_MSG_SCLKDPM_FreezeLevel         ((uint16_t) 0x189)
+#define PPSMC_MSG_SCLKDPM_UnfreezeLevel       ((uint16_t) 0x18A)
+#define PPSMC_MSG_MCLKDPM_FreezeLevel         ((uint16_t) 0x18B)
+#define PPSMC_MSG_MCLKDPM_UnfreezeLevel       ((uint16_t) 0x18C)
+#define PPSMC_MSG_START_DRAM_LOGGING          ((uint16_t) 0x18D)
+#define PPSMC_MSG_STOP_DRAM_LOGGING           ((uint16_t) 0x18E)
+#define PPSMC_MSG_MASTER_DeepSleep_ON         ((uint16_t) 0x18F)
+#define PPSMC_MSG_MASTER_DeepSleep_OFF        ((uint16_t) 0x190)
+#define PPSMC_MSG_Remove_DC_Clamp             ((uint16_t) 0x191)
+#define PPSMC_MSG_DisableACDCGPIOInterrupt    ((uint16_t) 0x192)
+#define PPSMC_MSG_OverrideVoltageControl_SetVddc       ((uint16_t) 0x193)
+#define PPSMC_MSG_OverrideVoltageControl_SetVddci      ((uint16_t) 0x194)
+#define PPSMC_MSG_SetVidOffset_1              ((uint16_t) 0x195)
+#define PPSMC_MSG_SetVidOffset_2              ((uint16_t) 0x207)
+#define PPSMC_MSG_GetVidOffset_1              ((uint16_t) 0x196)
+#define PPSMC_MSG_GetVidOffset_2              ((uint16_t) 0x208)
+#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable    ((uint16_t) 0x197)
+#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable   ((uint16_t) 0x198)
+#define PPSMC_MSG_SetTjMax                    ((uint16_t) 0x199)
+#define PPSMC_MSG_SetFanPwmMax                ((uint16_t) 0x19A)
+#define PPSMC_MSG_WaitForMclkSwitchFinish     ((uint16_t) 0x19B)
+#define PPSMC_MSG_ENABLE_THERMAL_DPM          ((uint16_t) 0x19C)
+#define PPSMC_MSG_DISABLE_THERMAL_DPM         ((uint16_t) 0x19D)
+
+#define PPSMC_MSG_API_GetSclkFrequency        ((uint16_t) 0x200)
+#define PPSMC_MSG_API_GetMclkFrequency        ((uint16_t) 0x201)
+#define PPSMC_MSG_API_GetSclkBusy             ((uint16_t) 0x202)
+#define PPSMC_MSG_API_GetMclkBusy             ((uint16_t) 0x203)
+#define PPSMC_MSG_API_GetAsicPower            ((uint16_t) 0x204)
+#define PPSMC_MSG_SetFanRpmMax                ((uint16_t) 0x205)
+#define PPSMC_MSG_SetFanSclkTarget            ((uint16_t) 0x206)
+#define PPSMC_MSG_SetFanMinPwm                ((uint16_t) 0x209)
+#define PPSMC_MSG_SetFanTemperatureTarget     ((uint16_t) 0x20A)
+
+#define PPSMC_MSG_BACO_StartMonitor           ((uint16_t) 0x240)
+#define PPSMC_MSG_BACO_Cancel                 ((uint16_t) 0x241)
+#define PPSMC_MSG_EnableVddGfx                ((uint16_t) 0x242)
+#define PPSMC_MSG_DisableVddGfx               ((uint16_t) 0x243)
+#define PPSMC_MSG_UcodeAddressLow             ((uint16_t) 0x244)
+#define PPSMC_MSG_UcodeAddressHigh            ((uint16_t) 0x245)
+#define PPSMC_MSG_UcodeLoadStatus             ((uint16_t) 0x246)
+
+#define PPSMC_MSG_DRV_DRAM_ADDR_HI            ((uint16_t) 0x250)
+#define PPSMC_MSG_DRV_DRAM_ADDR_LO            ((uint16_t) 0x251)
+#define PPSMC_MSG_SMU_DRAM_ADDR_HI            ((uint16_t) 0x252)
+#define PPSMC_MSG_SMU_DRAM_ADDR_LO            ((uint16_t) 0x253)
+#define PPSMC_MSG_LoadUcodes                  ((uint16_t) 0x254)
+#define PPSMC_MSG_PowerStateNotify            ((uint16_t) 0x255)
+#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI      ((uint16_t) 0x256)
+#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO      ((uint16_t) 0x257)
+#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI          ((uint16_t) 0x258)
+#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO          ((uint16_t) 0x259)
+#define PPSMC_MSG_LoadVBios                   ((uint16_t) 0x25A)
+#define PPSMC_MSG_GetUcodeVersion             ((uint16_t) 0x25B)
+#define DMCUSMC_MSG_PSREntry                  ((uint16_t) 0x25C)
+#define DMCUSMC_MSG_PSRExit                   ((uint16_t) 0x25D)
+#define PPSMC_MSG_EnableClockGatingFeature    ((uint16_t) 0x260)
+#define PPSMC_MSG_DisableClockGatingFeature   ((uint16_t) 0x261)
+#define PPSMC_MSG_IsDeviceRunning             ((uint16_t) 0x262)
+#define PPSMC_MSG_LoadMetaData                ((uint16_t) 0x263)
+#define PPSMC_MSG_TMON_AutoCaliberate_Enable  ((uint16_t) 0x264)
+#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265)
+#define PPSMC_MSG_GetTelemetry1Slope          ((uint16_t) 0x266)
+#define PPSMC_MSG_GetTelemetry1Offset         ((uint16_t) 0x267)
+#define PPSMC_MSG_GetTelemetry2Slope          ((uint16_t) 0x268)
+#define PPSMC_MSG_GetTelemetry2Offset         ((uint16_t) 0x269)
+#define PPSMC_MSG_EnableAvfs                  ((uint16_t) 0x26A)
+#define PPSMC_MSG_DisableAvfs                 ((uint16_t) 0x26B)
+
+#define PPSMC_MSG_PerformBtc                  ((uint16_t) 0x26C)
+#define PPSMC_MSG_VftTableIsValid             ((uint16_t) 0x275)
+#define PPSMC_MSG_UseNewGPIOScheme            ((uint16_t) 0x277)
+#define PPSMC_MSG_GetEnabledPsm               ((uint16_t) 0x400)
+#define PPSMC_MSG_AgmStartPsm                 ((uint16_t) 0x401)
+#define PPSMC_MSG_AgmReadPsm                  ((uint16_t) 0x402)
+#define PPSMC_MSG_AgmResetPsm                 ((uint16_t) 0x403)
+#define PPSMC_MSG_ReadVftCell                 ((uint16_t) 0x404)
+
+#define PPSMC_MSG_GFX_CU_PG_ENABLE            ((uint16_t) 0x280)
+#define PPSMC_MSG_GFX_CU_PG_DISABLE           ((uint16_t) 0x281)
+#define PPSMC_MSG_GetCurrPkgPwr               ((uint16_t) 0x282)
+
+#define PPSMC_MSG_SetGpuPllDfsForSclk         ((uint16_t) 0x300)
+#define PPSMC_MSG_Didt_Block_Function		  ((uint16_t) 0x301)
+
+#define PPSMC_MSG_SecureSRBMWrite             ((uint16_t) 0x600)
+#define PPSMC_MSG_SecureSRBMRead              ((uint16_t) 0x601)
+#define PPSMC_MSG_SetAddress                  ((uint16_t) 0x800)
+#define PPSMC_MSG_GetData                     ((uint16_t) 0x801)
+#define PPSMC_MSG_SetData                     ((uint16_t) 0x802)
+
+typedef uint16_t PPSMC_Msg;
+
+#define PPSMC_EVENT_STATUS_THERMAL          0x00000001
+#define PPSMC_EVENT_STATUS_REGULATORHOT     0x00000002
+#define PPSMC_EVENT_STATUS_DC               0x00000004
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
new file mode 100644
index 0000000..f497e7d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
@@ -0,0 +1,10088 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _POLARIS10_PWRVIRUS_H
+#define _POLARIS10_PWRVIRUS_H
+
+#define mmSMC_IND_INDEX_11                              0x01AC
+#define mmSMC_IND_DATA_11                               0x01AD
+#define mmCP_HYP_MEC1_UCODE_ADDR	0xf81a
+#define mmCP_HYP_MEC1_UCODE_DATA	0xf81b
+#define mmCP_HYP_MEC2_UCODE_ADDR	0xf81c
+#define mmCP_HYP_MEC2_UCODE_DATA	0xf81d
+
+enum PWR_Command {
+	PwrCmdNull = 0,
+	PwrCmdWrite,
+	PwrCmdEnd,
+	PwrCmdMax
+};
+
+typedef enum PWR_Command PWR_Command;
+
+struct PWR_Command_Table {
+	PWR_Command        command;
+	uint32_t              data;
+	uint32_t reg;
+};
+
+typedef struct PWR_Command_Table PWR_Command_Table;
+
+
+#define PWR_VIRUS_TABLE_SIZE  10031
+
+static const PWR_Command_Table pwr_virus_table[PWR_VIRUS_TABLE_SIZE] = {
+	{ PwrCmdWrite, 0x00000000, mmRLC_CNTL                                 },
+	{ PwrCmdWrite, 0x00000002, mmRLC_SRM_CNTL                             },
+	{ PwrCmdWrite, 0x15000000, mmCP_ME_CNTL                               },
+	{ PwrCmdWrite, 0x50000000, mmCP_MEC_CNTL                              },
+	{ PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL                              },
+	{ PwrCmdWrite, 0x0840800a, mmCP_RB0_CNTL                              },
+	{ PwrCmdWrite, 0xf30fff0f, mmTCC_CTRL                                 },
+	{ PwrCmdWrite, 0x00000002, mmTCC_EXE_DISABLE                          },
+	{ PwrCmdWrite, 0x000000ff, mmTCP_ADDR_CONFIG                          },
+	{ PwrCmdWrite, 0x540ff000, mmCP_CPC_IC_BASE_LO                        },
+	{ PwrCmdWrite, 0x000000b4, mmCP_CPC_IC_BASE_HI                        },
+	{ PwrCmdWrite, 0x00010000, mmCP_HYP_MEC1_UCODE_ADDR                   },
+	{ PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00221408, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00591260, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00621387, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00010000, mmCP_HYP_MEC2_UCODE_ADDR                   },
+	{ PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00221408, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00591260, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00621387, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
+	{ PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL                              },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI                           },
+	{ PwrCmdWrite, 0x540fe800, mmCP_DFY_ADDR_LO                           },
+	{ PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e020201, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e040204, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e060205, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x54106f00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x000400b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00004000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00804fac, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL                              },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI                           },
+	{ PwrCmdWrite, 0x540fef00, mmCP_DFY_ADDR_LO                           },
+	{ PwrCmdWrite, 0xc0031502, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00001e00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL                              },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI                           },
+	{ PwrCmdWrite, 0x540ff000, mmCP_DFY_ADDR_LO                           },
+	{ PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000145, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94800001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4080061, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24ccffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3cd08000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9500fffd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1cd0ffcf, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d018001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x050c0019, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x84c00000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000023, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000067, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000006a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000006d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000079, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000084, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000008f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000099, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800000a0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800000af, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x388c0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x08880002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98800003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000002d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc000004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d808001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc800005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24cc0700, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x10cc0014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d0d000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000005d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14d00011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c01b10, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00e0080, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00e0800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x280c0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x280c0010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x280c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ca88004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc800079, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc00006f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28180080, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97400001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd4c0380, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdcc0388, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55dc0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdcc038c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce0c0390, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce0c0394, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce4c0398, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56640020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce4c039c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce8c03a0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56a80020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce8c03a4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcecc03a8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcecc03ac, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf0c03b0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x57300020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf0c03b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf4c03b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x57740020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf4c03bc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf8c03c0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x57b80020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf8c03c4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfcc03c8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x57fc0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfcc03cc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9000033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25dc0010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05dc002f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc12009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d200a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc012009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9000034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25e01c00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12200013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25e40300, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12640008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25e800c0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12a80002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25ec003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e25c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7eae400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c005f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31100006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9500007b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc1c200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4df0388, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4d7038c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d5dc01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4e30390, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4d70394, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d62001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4e70398, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4d7039c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d66401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4eb03a0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4d703a4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d6a801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4ef03a8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4d703ac, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d6ec01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4f303b0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4d703b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d73001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4f703b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4d703bc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d77401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4fb03c0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4d703c4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d7b801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4ff03c8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4d703cc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d7fc01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4d70380, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0083, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc0e0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c0000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0085, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18cc006a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9900fffa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400051, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04180018, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1aac0027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80080, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00017, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04080002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080250, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080228, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000367, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9880fff3, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04080010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd80c0309, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd80c0319, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9880fffc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00e0100, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000016e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d0003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24d4001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24d80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x155c0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05e80180, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc410001b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000031, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9900091a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05280196, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d4fe04, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800001b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000032b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000350, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000352, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000035f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000701, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000047c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000019f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc419325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1d98001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd81325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0044, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27fc0003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c00006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9400036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15540008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd40005b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd40005d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd840006d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11540015, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1998003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1af0007d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15dc000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d65400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300018, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a38003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dd5c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7df1c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800045, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc411326a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc415326b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc419326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d326d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425326e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4293279, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800077, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd000056, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800058, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00059, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x259c8000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce40005a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29988000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd813265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd000073, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17300019, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25140fff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800003a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4153279, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400077, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd00005f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26f00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15100010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd000035, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000035, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1af07fe8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43dc031, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04343000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf413267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dd1c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45dc0160, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc810001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b4c0057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f4f400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55180020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1af4007d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33740003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26d80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1ae8003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9680000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96800009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2a640002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce413277, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce413348, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x958000d8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000315, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04303000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96800041, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1714000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25540800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x459801b0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d77400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x199c01e2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3e5c0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400015, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a640002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1334e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01334f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd413350, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd813351, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd881334d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193273, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3275, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113270, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4153274, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cdcc011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05900008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd00006a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc0006b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d594002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc12e23, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd012e24, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc12e25, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15540002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b340057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b280213, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980198, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd40000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd40000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x20cc003c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdd430000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc01e0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29dc0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2d540002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x078c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001239, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04f80000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dd5c005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd840007c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400069, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c018a6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4412e22, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800007c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c018a2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0019, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd4c005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24cc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9680fffc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd0c002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9680fffd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd013275, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9540188f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc013cfff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd0c009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x38d00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99000006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04cc0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdcc30000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c01882, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000304, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x49980198, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x459801a0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000329, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16ec001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1998003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec00031, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce00000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a18003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94800015, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c0000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24dc00ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31e00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580fff0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95801827, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14dc0011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800006d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a0000ad, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04200032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04080000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27fc0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1af4003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9740004d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4080060, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ca88005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24880001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f4b4009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97400046, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313274, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4100057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d33400c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97400009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1eecffdd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf013273, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf013275, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800003c3, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc429326f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1aa80030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96800006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28240001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e6a8004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800035, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25cc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19e80042, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25dc0006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e8e800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de9c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4293270, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ce8c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd30011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11e80007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd300001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b30003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33300000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4240059, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1660001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e320009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0328000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e72400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0430000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc02ac000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d310002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17300002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa87600, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd0c011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd0c00025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280222, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4280058, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x22ec003d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce813275, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800007b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8380018, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x57b00020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04343108, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc429325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2374007e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32a80003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94800003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800003e7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04200022, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04200010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980104, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x49980104, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800003f2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380081, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf813279, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf41326e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01326d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c0000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x254c0700, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a641fe8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0726, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2a640200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1237b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8813260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4240033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4280034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9000036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce40000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c01755, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9680000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce80000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xde830000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce80000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c0174c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2bb80040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4100044, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19180024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x551c003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000043d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00c8000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd840006c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28200000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000043f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x282000f0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000053, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x195c00e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc5e124dc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e624001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3255, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980158, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x49980158, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980170, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16200010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc429324f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x195400e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1154000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18dc00e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05e80488, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18f807f0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18e40077, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18ec0199, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000048e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000494, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800004de, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000685, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000686, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800006ac, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1ccc001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4293254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1264000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d79400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e7a400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52a8001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15180001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1aec0028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d325c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800004cc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc419324e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26e8003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d324d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d324f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d290004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f8f4001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f52800f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50e00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800004d1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d0dc002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e5e401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f534002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1800002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800004d7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3257, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4213259, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52200002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x259c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15980004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05e804e3, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800004e7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800004f0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000505, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9640fff4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26edf000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05a80507, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000050c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000528, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000057d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800005c2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800005f3, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c0000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99000008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04240012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1be00fe4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce413260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000066, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400068, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c0000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99000009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c0060, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ed6c005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113271, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4153270, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193272, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3273, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280022, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d51401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4213275, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253276, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1400061, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2730000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7db1800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800060, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05dc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00062, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd000063, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000064, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400065, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b700057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b680213, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x46ec0188, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26e01000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9c131fc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x192007ec, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x69dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de20014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x561c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce013344, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc13345, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800068, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2010007d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100060, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2010003d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9540000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8013344, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8013345, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180050, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c0052, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280042, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd813273, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc13275, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9000068, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00124f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x46ec0190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4153249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2154003d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bd800e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc420004d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc0010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd413249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce01326f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f598004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1be800e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce80005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801327a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800005f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800007f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424004c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41326e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04240020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41325e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xda000068, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9540002d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc430000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1be000e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc63124dc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fc14001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000697, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25100007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31100005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9900008e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26a9feff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d30b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00ac006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00e0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28880700, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c0006de, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x30d4000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41530b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19980028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99800002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800006c8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8380023, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fa38011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd3800025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x202400d0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28240006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd81a2a4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c0000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000712, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05e80714, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000071c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000720, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000747, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000071d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800007c4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000732, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000745, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000744, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c00006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c0000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2a64008c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce413265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b301fe8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c0fff1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000723, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41f02f1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000743, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c0ffde, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x041c3000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25dc8000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c004a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x195800e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418004c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd81326e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc0005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25dd7fff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e1a001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x46200200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04283247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1af80057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1af40213, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f6f400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc6990000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x329c325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x329c3269, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c00006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x329c3267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc01defff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d9d8009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000078a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25980000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00fff2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc03e7ff0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f3f0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf013249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc03e4000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc13254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8013254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b300028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9900000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d30b5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bf0003a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b000b80, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x203c003a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300700, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf0130b7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc130b5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x46200008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x259c0003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc800010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31980002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19580066, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0120001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11980003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04240004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da18001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d24db, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd0c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40fff8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580137b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00ee000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113269, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19080070, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x190c00e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2510003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2518000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05a80809, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000080e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000080f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000898, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000946, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800009e1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04a80811, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000815, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000834, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3045, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec1c091, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b000241, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b000003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc02f0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4252087, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x5668001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26a80005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80fffd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00021d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x040c0005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001a41, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43b02f1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b800006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec80278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56f00020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf080280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31100011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x950001fa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aec0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc01c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0180001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11a40006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de6000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x10e40008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e2e000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2110003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1d10ff9e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801325e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0245301, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801325f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0121fff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29108eff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0127ff0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0131fff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801326d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801326e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8013279, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0100010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dd2400c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0180003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dd1c002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04a8089a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000089e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800008fa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31300022, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x964012a4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc02620c0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01c082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41c083, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0260400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000903, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31240022, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ec30011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32f80000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b800011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x67180001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0bfc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x57300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd981325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000915, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9c1325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc0fff6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f818001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001606, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d838001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94800010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3259, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16240014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12640014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a2801f0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12a80010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b800002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8013259, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00075e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4af0228, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1330000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13f40014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07fc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33e80010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9680ffec, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04a80948, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000094c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000099b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9740002c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x964011fe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0260010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01c080, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41c081, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0260800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dda801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e838011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001802, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x469c0390, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4280011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c0011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c0014df, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31280014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce8802ef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a800062, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31280034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a800060, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04a809e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800009ec, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000a45, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4b30258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e72401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x66740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97400041, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04383000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4393267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b800001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b38007e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x4598001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf4002eb, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf4002ec, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf4002ed, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf4002ee, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001715, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0ffbc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94800005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000a55, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x233c0032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc130b6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf0130b6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc49302ef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x5198001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193269, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2598000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8013268, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x53b8001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7db9801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000a5e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c01106, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c010fd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50640020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ce4c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc80c0072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x58e801fc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12a80009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18dc01e2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3e5c0003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9540000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x44cc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55900020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4140011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x44cc0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd812e01, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd012e02, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd412e03, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc410001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4140028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95000005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1e64001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14d00010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ab1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc420001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a0010ac, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd880003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c0003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d403f7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41b0367, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d85800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d001fc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05280adc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000af1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000adf, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ae7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd8d2000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c00010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d803f7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11940014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29544001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29544003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000af4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd44d2000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd44dc000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d0003c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95000006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd8d2c00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000b0a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd44d2c00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28148004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24d800ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00019, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4593240, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c0105e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d324f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313255, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef3400c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14e80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a8000af, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c01043, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18a01fe8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3620005c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a00000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2464003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc6290ce7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16ac001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26ac003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ee6c00d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a00fff8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000367, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9640102e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x199c0037, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19a00035, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c0005d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16f8001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9780000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc035f0ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e764009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19b401f8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce413248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1ae4003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000b7c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19a4003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d3248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bfc01e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13fc0018, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dbd800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1d98ff15, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x592c00fc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd80000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12e00016, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x592c007e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12e00015, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11a0000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1264001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1620000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12e4001b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x5924007e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12640017, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12640018, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd013257, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd413258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00fdb, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9780f5ca, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07740003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x269c003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e5e4004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f67000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f674002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1ab8c006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000bec, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000b47, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc03a8004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc415325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18580037, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x262001ef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d15400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1d54001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd280200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd680208, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcda80210, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b400014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b800017, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc6930200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc6970208, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc69b0210, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd900003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd940003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9000040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9400040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14fc0011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24f800ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33b80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b800007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d83c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94800020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00016, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800015, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24e000ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x321c0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580ffee, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c00014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000c30, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9480000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800f29, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800f23, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99400002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800f1a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x041c0003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9600f502, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c0f500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000f05, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16e4001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9640f4f4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc434000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33740002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b40f4f1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12780001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2bb80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00ac005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00e0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc8000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28884900, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ff3, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400ee1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41c40a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41c40c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41c40d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24d0007f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15580010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x255400ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01c411, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd81c40f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd41c40e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41c410, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18e80033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18ec0034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41c414, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41c415, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd81c413, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18dc0032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c030011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c038011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431c417, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc435c416, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439c419, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43dc418, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf413261, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf013262, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc13263, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf813264, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18dc0030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00017, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d77000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51b80020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f97801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f37001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f3b000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc0000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000018, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ca7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18dc0031, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc435c40b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9740fffd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4280032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000cf4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc032800b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d42011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24cc007f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96800e6c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x596001fc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ce0c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x505c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc0001b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd140001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1c00020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e5e800c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b000024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x122c0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000d1f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96800005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000d57, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0328009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04143000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e51001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d2d0011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19640057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19580213, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19600199, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da6400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e26400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1000025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04142000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99400001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d80034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05280d83, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000d8a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000db1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000dbc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e010001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d75400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580f3d8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x526c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e2ec01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x5ae0073a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9940000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580f3c6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc3a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80fffb, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980fff5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16200002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce01c405, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd441c406, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580f3b1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29540002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580f3a5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00da7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x5aac007e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12d80017, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d9d800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56a00020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e82400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e58c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19d4003d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x20880188, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x54ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x20240090, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28240004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf80003a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd901a2a4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd841325f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc429325f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26ac0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26ac0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b301ff0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b300300, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400039, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c0001a2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12a80014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2220003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18fc0034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24e8000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80e71, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000edd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ea1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000eaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000e7c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000e87, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000e8f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d9e001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4213262, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253261, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4213264, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253263, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18e82005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da1801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1800072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8180072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x59a001fc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd81c400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc421c401, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400041, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425c401, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ede, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3db09001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3db09011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc5a10000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc5a50000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05280eea, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000f11, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000f2e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000f1f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0f26f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7daec01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x5af8073a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7eba800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0f25c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd81c405, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce01c406, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41c406, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0f24e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40f247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0f240, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ef2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3db09002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3db09012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc434000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b780001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf80001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c034001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c038001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01c080, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd41c081, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000f88, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51640020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e52401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01c081, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1334000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24e02000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f63400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc1c083, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000f9d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51e40020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e5a401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13380016, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18e00039, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12200019, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1220001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf81c078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc1c084, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31140005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31140006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05280fb7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28140002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000fc2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000fd1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18e80039, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52a8003b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c0017, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd140004b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9500000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x159c0011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x259800ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31a00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31a40001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e25800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c0fff5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580fff4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000fef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1d100010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01326f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97400003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0340008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000ffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x208801a8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000102f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1cccfe08, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00b33, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4200025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da2400f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da28002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e1ac002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d2ac002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3ef40010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b40f11d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf81325e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xde410000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xde010000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c024001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8100086, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x5510003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001075, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9900000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4100081, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d15800f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d15c002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d520002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cde0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3e20001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x040c0030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1325e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001071, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00b01, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc200000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc40003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4080029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18a400e5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12500009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x248c0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x200c006d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x200c0228, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc410002b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18881fe8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d4072c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18cc00d1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3094000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x38d80000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x311c0003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x30940007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1620001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9940001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000023, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800010c4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c00019, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00041, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418002c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9940000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x259c007f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19a00030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400022, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x199c0fe8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000022, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000023, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc430005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000aac, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07a810d8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000104c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc400040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x200c007d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28240007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x192400fd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06681110, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19180070, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19100078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18f40058, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001117, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001118, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000112d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001130, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001133, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc81c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc02a0200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e8e8009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x22a8003d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x22a80074, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2774001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13740014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7eb6800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25ecffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55700020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15f40010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13740002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x275c001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15dc0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x39e00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dc1c01e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e62000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001165, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e1a0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05cc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e0d000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95000007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e02401e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06640008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05d80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dc2401e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05e00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da2000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9600ffe6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17640002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4200006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2a200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce00001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce81c078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec1c080, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd41c082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12640002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x22640435, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0528117e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x312c0003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001185, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc03a0400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d81c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc130b7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04240008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c0049, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19a000e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29a80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de2c00c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc420007d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce40003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800011a3, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d654001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd41326d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c020001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4240081, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800011b6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253279, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2730003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3b380006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3f38000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0430000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb10004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e57000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e578002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d67c002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0be40001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d3a4002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x202c002c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec1326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3e640010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce81325e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07a811cf, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00feb8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x954009a7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1c07c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41c07d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41c08c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41c079, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01c07e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18f0012f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18f40612, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18cc00c1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cf7400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x39600004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0140004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18fc003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400041, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800011ee, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a6c003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c00006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04200002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800011e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428002c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96800010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26ac007f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1ab00030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1aac0fe8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001205, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0fffa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc03a2800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc03a0800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc03a4000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x30d00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99000052, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9640090f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19180038, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x30dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c0000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1ab0c006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000127f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d3258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313257, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1ab0c012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26a0003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f67800f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c0012e1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x964008d7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9800036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b300677, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800012aa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc03a8002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7edec00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4140032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1858003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d0cc00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d407f0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9900000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d324f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2598003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d5d4001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d52000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d514002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193259, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d958001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dd5c002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd813259, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc1325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1ccc001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b000004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b40000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd980003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9c0003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9800040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd9c00040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97800051, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4353249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b74003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b400002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50700020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04e81324, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d71401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x596401fc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12640009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b74008d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2a640000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000132c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000133b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001344, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42530b5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a68003a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2024003a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25980700, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11980014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d19000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce4130b5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80ffea, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8240011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80ffe0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf81a2a4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c007eb, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d0d001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x591c01fc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45140210, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x595801fc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11980009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29dc0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc0001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400069, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce013249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a307fe8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x23304076, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18cc00e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x10cc0015, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x4514020c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce013248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a2001e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2a204001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a64003c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15dc000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dcdc00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e5dc00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45140248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce013257, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce013258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0434000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdb000024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45540008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce013259, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0337fff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f220009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55300020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d01c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c01d0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06ec0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f01c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8240072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd240001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19682011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x5a6c01fc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7eeac00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfa0000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4380007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17b80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d40038, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9540073d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18c80066, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x30880001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d410001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x4220000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc000078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24e80007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24ec0010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac00006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc5310000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001465, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18f02011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x5aec01fc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96800012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a8146a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f1f0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f1b400c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f1b400d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f334002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97400014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000147b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b400012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b800005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc0001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e024001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000144a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fbfc00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94800007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b800003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800014a9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0328007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc03a0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dd9c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45dc0390, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c428001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c430001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a0800fd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x109c000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce080228, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9880000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0ec75, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26180001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52a80020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x041c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x66580001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc80260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec80288, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf080290, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec80298, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf0802a0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf4802a8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc802b0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd80802b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x178c000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27b8003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cf8c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf8802c0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc802c8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf8802d0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf8802d8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25b8ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24cc000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd2800c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc5230309, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e3a400c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001539, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd880353, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc49b0353, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc48f0228, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd14005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000154f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd080238, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3d200008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd900309, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd910ce7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4190ce6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d918005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580fffd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d918004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd810ce6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdd1054f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000156e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x090c0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdcd050e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x110c0014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc4001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41230a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41230b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41230c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc41230d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc480329, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc48032a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc4802e0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09940001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x44100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580002c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x69100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000157f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4970290, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc49b0288, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc49b02a0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc49f0298, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x041c0040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dcdc002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d924019, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d26400c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c0fffa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001579, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d010021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d914019, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd480298, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd8802a0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x10d40010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12180016, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc51f0309, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d95800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d62000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdd00309, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce113320, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc49b02b0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18dc01e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dd9400e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c0001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800015aa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4a302b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12240004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4ab02a8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce4c0319, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d9d8002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ea14005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800015bc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d25000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c0fff4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd0d3330, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce0802b8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd8802b0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4ab02e0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1aa807f0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc48f02d0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc49702d8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc49b02c8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc49f02c0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96800028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d4e000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9600000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d964002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cde4002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de94001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd64002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e6a000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800015cd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d698002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd4802d8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x129c0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc50f0319, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11a0000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11140001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1198000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd953300, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e0e000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12a8000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce953301, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce100319, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4b70280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f73800a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x536c0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9780eb68, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c0003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001609, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x30b40000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b400011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4b70258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4b30250, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x53780020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb3801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7faf8019, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x67b40001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x57b80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97400002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00fffb, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4bb0260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fab8001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf880260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x66f40001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97400005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4353247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f7f4009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b40fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00fff7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x269c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26a00018, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12200003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26a00060, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06200020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x269c0018, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26a00007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26a40060, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11dc0006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12200006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4b70228, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f514005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001644, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4b30248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd080240, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f130005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001688, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f130004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01051e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42d051f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ed2c005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96c0fffd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01051f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc5170309, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x195c07f0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x196007f6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04340001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x53740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x6b740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001665, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4a702a0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4ab0298, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f634014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8113320, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce480298, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce8802a0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc5170319, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4b702b0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x255c000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f5f4001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8113330, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf4802b0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11340001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x195c07e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x196007ee, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8353300, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e1e4001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8353301, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce4802d0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8100309, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc48f0250, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd4c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x64d80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x54cc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580005c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dd2000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3255, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7df5c00c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25980040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800016f1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a7003e6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a700064, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800016df, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800016f2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18fc0064, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00042, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dd9801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc0001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4bf0258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x53fc0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e7e401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x667c0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7eebc00c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0fff8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x43300007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x53300002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7db30011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd3000025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc03ec005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2bfca200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x203c003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc13256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c0017f5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18fc01e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc13248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00185b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b40ffd5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0ea24, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14d4001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d52400e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc49f0258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4a30250, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de1801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400017, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d534002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4af0270, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dae4005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000174f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00178a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b40fff3, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4ab0268, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7daa4005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32a0001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001765, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8013256, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c0017f2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4b3034b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f13000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf013248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001855, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32a4001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd080260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce880268, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9940ffc0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ec28001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9640005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253255, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431324f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e72400c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26a80040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9680fff7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1aa4003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400049, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1aa400e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32680003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a800046, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9640000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4293260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1aa400e4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32640004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800017e2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc027ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2e6400ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e6a4009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26a800ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4240009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26640008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19e403e6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26680003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12a80004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26640003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12640003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19e400e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19e40064, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06640003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a640003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800017d0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12640005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ea64002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4292083, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ea68005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a80ffdf, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26a400ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40ffca, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2024007b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800017e3, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4a70280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4ab0278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7eae8014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce480278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce880280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43b02eb, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42302ec, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf813245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce013246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fa3801a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x47b8020c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x15e00008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1220000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2a206032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x513c001e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e3e001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000180f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b3c0077, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ff3000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b300032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd200000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd3800002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400018, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000018, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dc30001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04380032, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf80000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc413248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d3269, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27fc000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33fc0003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c00011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0bfc0021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd441326a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x173c0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b300303, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f3f0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ff3c004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc13084, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001842, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x23fc003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc1326d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0bb80026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd441326e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1fb8ffc6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xddc30000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc0000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001852, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc0000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc49f02e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c00018, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c0012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41f02ed, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42302ee, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e2a0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce013084, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x313c0bcc, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x393c051f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3d3c050e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc0000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x393c0560, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3d3c054f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c00007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x393c1538, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3d3c1537, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b740800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18e8007c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a8189a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800018c5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800018f2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d0007e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09240002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc42130b5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a24002c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14cc0004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7cd8c00a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc130b7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce0130b5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2bb80002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf800024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9600e8a8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9640e8a5, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800018a9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dad800c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c0ffd2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580fff9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x442c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9940fff1, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26240007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9940fff7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc023007f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19e4003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7de1c009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dee000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96000007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x261c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99c0fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9940fff0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18e00064, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06281911, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24cc0003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001915, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x800019af, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001a2b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc48032b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc480333, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc48033b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc480343, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98800011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b3c0057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e3e000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04180000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f438001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00068, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4213254, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a1c003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00065, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e1e0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97800062, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x43bc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fcbc001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc7df032b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e1fc00c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0fffa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c0101, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c0102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001994, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001982, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00ffcb, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001995, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98800009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x41bc0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x53fc0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e7fc011, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd3c00025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x653c0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dbd8001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9940ff8f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d91800c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580fff8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9580005d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400058, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95c00053, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e41c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a70003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33240003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a400046, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1a7000e4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001a21, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f270009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x266400ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27240003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06640002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001a0f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e730002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4252083, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e724005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a40ffdf, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x267000ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001a22, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9940ff9f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001a31, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b180057, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e1a000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x30f00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04200005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95800056, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001aa2, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001a90, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf00325b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001aa3, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99800005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd2400025, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x4664001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99800008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2b300008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf000013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x244c00ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc4c0200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc44f0200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc410000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc414000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d158010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x059cc000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccdd0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0037, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc000049, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c003a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9500e69a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d0003b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d40021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd840004a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c003c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x14cc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c00028, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0120840, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x282c0040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001ae8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0121841, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x282c001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01c07c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9ac0fffb, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9940e66b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800004a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0036, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9900fffe, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18cc0021, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc00047, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc000046, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0039, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c003d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24d003ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d47fea, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x18d87ff4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd00004c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd40004e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd80004d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd41c405, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01c406, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x295c0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcdc0001a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11980002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x4110000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0160800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7d15000a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0164010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd41c078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c080, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01c084, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400048, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c003b, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801c40a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd901c40d, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801c410, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801c40e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd801c40f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc40c0040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9940ffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04140096, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1c400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc411c401, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9500fffa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424003e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04d00001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x11100002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd01c40c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0180034, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd81c411, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd841c414, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0a540001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x2468000f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc419c416, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x41980003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc41c003f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7dda0001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x12200002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x10cc0002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xccc1c40c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd901c411, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce41c412, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xce292e40, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc120000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x31144000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xcc3c000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x9780e601, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x188cfff0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x04e40002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x96400003, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80001b74, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL                              },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI                           },
+	{ PwrCmdWrite, 0x54106500, mmCP_DFY_ADDR_LO                           },
+	{ PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e020204, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc00a0505, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xbf8c007f, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xb8900904, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xb8911a04, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xb8920304, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xb8930b44, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x921c0d0c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x921c1c13, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x921d0c12, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x811c1d1c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x811c111c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x921cff1c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000400, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x921dff10, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000100, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x81181d1c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e040218, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL                              },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI                           },
+	{ PwrCmdWrite, 0x54106900, mmCP_DFY_ADDR_LO                           },
+	{ PwrCmdWrite, 0x7e080200, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x7e100204, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xbefc00ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00010000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x24200087, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x262200ff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x000001f0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x20222282, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x28182111, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL                              },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI                           },
+	{ PwrCmdWrite, 0x54116f00, mmCP_DFY_ADDR_LO                           },
+	{ PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xb4540fe8, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000041, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000000c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x54116f00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xb454105e, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x000000c0, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x54117300, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xb4541065, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000500, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000001c, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x54117700, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xb4541069, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000444, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x0000008a, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x54117b00, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL                              },
+	{ PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL                              },
+	{ PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x54116f00, mmCP_MQD_BASE_ADDR                         },
+	{ PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI                      },
+	{ PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI                        },
+	{ PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR                 },
+	{ PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI              },
+	{ PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE                  },
+	{ PwrCmdWrite, 0x00010000, mmCP_HQD_VMID                              },
+	{ PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL                        },
+	{ PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x54117300, mmCP_MQD_BASE_ADDR                         },
+	{ PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI                      },
+	{ PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI                        },
+	{ PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR                 },
+	{ PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI              },
+	{ PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE                  },
+	{ PwrCmdWrite, 0x00010000, mmCP_HQD_VMID                              },
+	{ PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL                        },
+	{ PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x54117700, mmCP_MQD_BASE_ADDR                         },
+	{ PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI                      },
+	{ PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI                        },
+	{ PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR                 },
+	{ PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI              },
+	{ PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE                  },
+	{ PwrCmdWrite, 0x00010000, mmCP_HQD_VMID                              },
+	{ PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL                        },
+	{ PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x54117b00, mmCP_MQD_BASE_ADDR                         },
+	{ PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI                      },
+	{ PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI                        },
+	{ PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR                 },
+	{ PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI              },
+	{ PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE                  },
+	{ PwrCmdWrite, 0x00010000, mmCP_HQD_VMID                              },
+	{ PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL                        },
+	{ PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000104, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000204, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000304, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000404, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000504, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000604, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000704, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000105, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000205, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000305, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000405, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000505, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000605, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000705, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000106, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000206, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000306, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000406, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000506, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000606, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000706, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000107, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000207, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000307, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000407, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000507, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000607, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000707, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000008, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000108, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000208, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000308, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000408, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000508, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000608, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000708, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000009, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000109, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000209, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000309, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000409, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000509, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000609, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000709, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR                           },
+	{ PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR                           },
+	{ PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE                            },
+	{ PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL                            },
+	{ PwrCmdWrite, 0x01010101, mmCP_PQ_WPTR_POLL_CNTL1                    },
+	{ PwrCmdWrite, 0x00000000, mmGRBM_STATUS                              },
+	{ PwrCmdWrite, 0x00000000, mmGRBM_STATUS                              },
+	{ PwrCmdWrite, 0x00000000, mmGRBM_STATUS                              },
+	{ PwrCmdEnd,   0x00000000, 0x00000000                                 },
+};
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74.h b/drivers/gpu/drm/amd/powerplay/inc/smu74.h
new file mode 100644
index 0000000..1a12d85
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu74.h
@@ -0,0 +1,774 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+
+#ifndef SMU74_H
+#define SMU74_H
+
+#pragma pack(push, 1)
+
+#define SMU__DGPU_ONLY
+
+#define SMU__NUM_SCLK_DPM_STATE  8
+#define SMU__NUM_MCLK_DPM_LEVELS 4
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+
+enum SID_OPTION {
+	SID_OPTION_HI,
+	SID_OPTION_LO,
+	SID_OPTION_COUNT
+};
+
+enum Poly3rdOrderCoeff {
+	LEAKAGE_TEMPERATURE_SCALAR,
+	LEAKAGE_VOLTAGE_SCALAR,
+	DYNAMIC_VOLTAGE_SCALAR,
+	POLY_3RD_ORDER_COUNT
+};
+
+struct SMU7_Poly3rdOrder_Data {
+	int32_t a;
+	int32_t b;
+	int32_t c;
+	int32_t d;
+	uint8_t a_shift;
+	uint8_t b_shift;
+	uint8_t c_shift;
+	uint8_t x_shift;
+};
+
+typedef struct SMU7_Poly3rdOrder_Data SMU7_Poly3rdOrder_Data;
+
+struct Power_Calculator_Data {
+	uint16_t NoLoadVoltage;
+	uint16_t LoadVoltage;
+	uint16_t Resistance;
+	uint16_t Temperature;
+	uint16_t BaseLeakage;
+	uint16_t LkgTempScalar;
+	uint16_t LkgVoltScalar;
+	uint16_t LkgAreaScalar;
+	uint16_t LkgPower;
+	uint16_t DynVoltScalar;
+	uint32_t Cac;
+	uint32_t DynPower;
+	uint32_t TotalCurrent;
+	uint32_t TotalPower;
+};
+
+typedef struct Power_Calculator_Data PowerCalculatorData_t;
+
+struct Gc_Cac_Weight_Data {
+	uint8_t index;
+	uint32_t value;
+};
+
+typedef struct Gc_Cac_Weight_Data GcCacWeight_Data;
+
+
+typedef struct {
+	uint32_t high;
+	uint32_t low;
+} data_64_t;
+
+typedef struct {
+	data_64_t high;
+	data_64_t low;
+} data_128_t;
+
+#define SMU7_CONTEXT_ID_SMC        1
+#define SMU7_CONTEXT_ID_VBIOS      2
+
+#define SMU74_MAX_LEVELS_VDDC            16
+#define SMU74_MAX_LEVELS_VDDGFX          16
+#define SMU74_MAX_LEVELS_VDDCI           8
+#define SMU74_MAX_LEVELS_MVDD            4
+
+#define SMU_MAX_SMIO_LEVELS              4
+
+#define SMU74_MAX_LEVELS_GRAPHICS        SMU__NUM_SCLK_DPM_STATE   /* SCLK + SQ DPM + ULV */
+#define SMU74_MAX_LEVELS_MEMORY          SMU__NUM_MCLK_DPM_LEVELS   /* MCLK Levels DPM */
+#define SMU74_MAX_LEVELS_GIO             SMU__NUM_LCLK_DPM_LEVELS  /* LCLK Levels */
+#define SMU74_MAX_LEVELS_LINK            SMU__NUM_PCIE_DPM_LEVELS  /* PCIe speed and number of lanes */
+#define SMU74_MAX_LEVELS_UVD             8   /* VCLK/DCLK levels for UVD */
+#define SMU74_MAX_LEVELS_VCE             8   /* ECLK levels for VCE */
+#define SMU74_MAX_LEVELS_ACP             8   /* ACLK levels for ACP */
+#define SMU74_MAX_LEVELS_SAMU            8   /* SAMCLK levels for SAMU */
+#define SMU74_MAX_ENTRIES_SMIO           32  /* Number of entries in SMIO table */
+
+#define DPM_NO_LIMIT 0
+#define DPM_NO_UP 1
+#define DPM_GO_DOWN 2
+#define DPM_GO_UP 3
+
+#define SMU7_FIRST_DPM_GRAPHICS_LEVEL    0
+#define SMU7_FIRST_DPM_MEMORY_LEVEL      0
+
+#define GPIO_CLAMP_MODE_VRHOT      1
+#define GPIO_CLAMP_MODE_THERM      2
+#define GPIO_CLAMP_MODE_DC         4
+
+#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
+#define SCRATCH_B_TARG_PCIE_INDEX_MASK  (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
+#define SCRATCH_B_CURR_PCIE_INDEX_MASK  (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_UVD_INDEX_SHIFT  6
+#define SCRATCH_B_TARG_UVD_INDEX_MASK   (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
+#define SCRATCH_B_CURR_UVD_INDEX_SHIFT  9
+#define SCRATCH_B_CURR_UVD_INDEX_MASK   (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
+#define SCRATCH_B_TARG_VCE_INDEX_SHIFT  12
+#define SCRATCH_B_TARG_VCE_INDEX_MASK   (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_VCE_INDEX_SHIFT  15
+#define SCRATCH_B_CURR_VCE_INDEX_MASK   (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_ACP_INDEX_SHIFT  18
+#define SCRATCH_B_TARG_ACP_INDEX_MASK   (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
+#define SCRATCH_B_CURR_ACP_INDEX_SHIFT  21
+#define SCRATCH_B_CURR_ACP_INDEX_MASK   (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
+#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
+#define SCRATCH_B_TARG_SAMU_INDEX_MASK  (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
+#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
+#define SCRATCH_B_CURR_SAMU_INDEX_MASK  (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
+
+/* Virtualization Defines */
+#define CG_XDMA_MASK  0x1
+#define CG_XDMA_SHIFT 0
+#define CG_UVD_MASK   0x2
+#define CG_UVD_SHIFT  1
+#define CG_VCE_MASK   0x4
+#define CG_VCE_SHIFT  2
+#define CG_SAMU_MASK  0x8
+#define CG_SAMU_SHIFT 3
+#define CG_GFX_MASK   0x10
+#define CG_GFX_SHIFT  4
+#define CG_SDMA_MASK  0x20
+#define CG_SDMA_SHIFT 5
+#define CG_HDP_MASK   0x40
+#define CG_HDP_SHIFT  6
+#define CG_MC_MASK    0x80
+#define CG_MC_SHIFT   7
+#define CG_DRM_MASK   0x100
+#define CG_DRM_SHIFT  8
+#define CG_ROM_MASK   0x200
+#define CG_ROM_SHIFT  9
+#define CG_BIF_MASK   0x400
+#define CG_BIF_SHIFT  10
+
+
+#define SMU74_DTE_ITERATIONS 5
+#define SMU74_DTE_SOURCES 3
+#define SMU74_DTE_SINKS 1
+#define SMU74_NUM_CPU_TES 0
+#define SMU74_NUM_GPU_TES 1
+#define SMU74_NUM_NON_TES 2
+#define SMU74_DTE_FAN_SCALAR_MIN 0x100
+#define SMU74_DTE_FAN_SCALAR_MAX 0x166
+#define SMU74_DTE_FAN_TEMP_MAX 93
+#define SMU74_DTE_FAN_TEMP_MIN 83
+
+
+#if defined SMU__FUSION_ONLY
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 5
+#define SMU7_DTE_SINKS 3
+#define SMU7_NUM_CPU_TES 2
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+#endif
+
+struct SMU7_HystController_Data {
+	uint8_t waterfall_up;
+	uint8_t waterfall_down;
+	uint8_t waterfall_limit;
+	uint8_t spare;
+	uint16_t release_cnt;
+	uint16_t release_limit;
+};
+
+typedef struct SMU7_HystController_Data SMU7_HystController_Data;
+
+struct SMU74_PIDController {
+	uint32_t Ki;
+	int32_t LFWindupUpperLim;
+	int32_t LFWindupLowerLim;
+	uint32_t StatePrecision;
+	uint32_t LfPrecision;
+	uint32_t LfOffset;
+	uint32_t MaxState;
+	uint32_t MaxLfFraction;
+	uint32_t StateShift;
+};
+
+typedef struct SMU74_PIDController SMU74_PIDController;
+
+struct SMU7_LocalDpmScoreboard {
+	uint32_t PercentageBusy;
+
+	int32_t  PIDError;
+	int32_t  PIDIntegral;
+	int32_t  PIDOutput;
+
+	uint32_t SigmaDeltaAccum;
+	uint32_t SigmaDeltaOutput;
+	uint32_t SigmaDeltaLevel;
+
+	uint32_t UtilizationSetpoint;
+
+	uint8_t  TdpClampMode;
+	uint8_t  TdcClampMode;
+	uint8_t  ThermClampMode;
+	uint8_t  VoltageBusy;
+
+	int8_t   CurrLevel;
+	int8_t   TargLevel;
+	uint8_t  LevelChangeInProgress;
+	uint8_t  UpHyst;
+
+	uint8_t  DownHyst;
+	uint8_t  VoltageDownHyst;
+	uint8_t  DpmEnable;
+	uint8_t  DpmRunning;
+
+	uint8_t  DpmForce;
+	uint8_t  DpmForceLevel;
+	uint8_t  DisplayWatermark;
+	uint8_t  McArbIndex;
+
+	uint32_t MinimumPerfSclk;
+
+	uint8_t  AcpiReq;
+	uint8_t  AcpiAck;
+	uint8_t  GfxClkSlow;
+	uint8_t  GpioClampMode;
+
+	uint8_t  spare2;
+	uint8_t  EnabledLevelsChange;
+	uint8_t  DteClampMode;
+	uint8_t  FpsClampMode;
+
+	uint16_t LevelResidencyCounters[SMU74_MAX_LEVELS_GRAPHICS];
+	uint16_t LevelSwitchCounters[SMU74_MAX_LEVELS_GRAPHICS];
+
+	void     (*TargetStateCalculator)(uint8_t);
+	void     (*SavedTargetStateCalculator)(uint8_t);
+
+	uint16_t AutoDpmInterval;
+	uint16_t AutoDpmRange;
+
+	uint8_t  FpsEnabled;
+	uint8_t  MaxPerfLevel;
+	uint8_t  AllowLowClkInterruptToHost;
+	uint8_t  FpsRunning;
+
+	uint32_t MaxAllowedFrequency;
+
+	uint32_t FilteredSclkFrequency;
+	uint32_t LastSclkFrequency;
+	uint32_t FilteredSclkFrequencyCnt;
+
+	uint8_t MinPerfLevel;
+	uint8_t padding[3];
+
+	uint16_t FpsAlpha;
+	uint16_t DeltaTime;
+	uint32_t CurrentFps;
+	uint32_t FilteredFps;
+	uint32_t FrameCount;
+	uint32_t FrameCountLast;
+	uint16_t FpsTargetScalar;
+	uint16_t FpsWaterfallLimitScalar;
+	uint16_t FpsAlphaScalar;
+	uint16_t spare8;
+	SMU7_HystController_Data HystControllerData;
+};
+
+typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard;
+
+#define SMU7_MAX_VOLTAGE_CLIENTS 12
+
+typedef uint8_t (*VoltageChangeHandler_t)(uint16_t, uint8_t);
+
+#define VDDC_MASK    0x00007FFF
+#define VDDC_SHIFT   0
+#define VDDCI_MASK   0x3FFF8000
+#define VDDCI_SHIFT  15
+#define PHASES_MASK  0xC0000000
+#define PHASES_SHIFT 30
+
+typedef uint32_t SMU_VoltageLevel;
+
+struct SMU7_VoltageScoreboard {
+
+	SMU_VoltageLevel TargetVoltage;
+	uint16_t MaxVid;
+	uint8_t  HighestVidOffset;
+	uint8_t  CurrentVidOffset;
+
+	uint16_t CurrentVddc;
+	uint16_t CurrentVddci;
+
+
+	uint8_t  ControllerBusy;
+	uint8_t  CurrentVid;
+	uint8_t  CurrentVddciVid;
+	uint8_t  padding;
+
+	SMU_VoltageLevel RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS];
+	SMU_VoltageLevel TargetVoltageState;
+	uint8_t  EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS];
+
+	uint8_t  padding2;
+	uint8_t  padding3;
+	uint8_t  ControllerEnable;
+	uint8_t  ControllerRunning;
+	uint16_t CurrentStdVoltageHiSidd;
+	uint16_t CurrentStdVoltageLoSidd;
+	uint8_t  OverrideVoltage;
+	uint8_t  padding4;
+	uint8_t  padding5;
+	uint8_t  CurrentPhases;
+
+	VoltageChangeHandler_t ChangeVddc;
+
+	VoltageChangeHandler_t ChangeVddci;
+	VoltageChangeHandler_t ChangePhase;
+	VoltageChangeHandler_t ChangeMvdd;
+
+	VoltageChangeHandler_t functionLinks[6];
+
+	uint16_t *VddcFollower1;
+
+	int16_t  Driver_OD_RequestedVidOffset1;
+	int16_t  Driver_OD_RequestedVidOffset2;
+};
+
+typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard;
+
+#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
+
+struct SMU7_PCIeLinkSpeedScoreboard {
+	uint8_t     DpmEnable;
+	uint8_t     DpmRunning;
+	uint8_t     DpmForce;
+	uint8_t     DpmForceLevel;
+
+	uint8_t     CurrentLinkSpeed;
+	uint8_t     EnabledLevelsChange;
+	uint16_t    AutoDpmInterval;
+
+	uint16_t    AutoDpmRange;
+	uint16_t    AutoDpmCount;
+
+	uint8_t     DpmMode;
+	uint8_t     AcpiReq;
+	uint8_t     AcpiAck;
+	uint8_t     CurrentLinkLevel;
+
+};
+
+typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard;
+
+#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
+
+#define SMU7_SCALE_I  7
+#define SMU7_SCALE_R 12
+
+struct SMU7_PowerScoreboard {
+	PowerCalculatorData_t VddcPowerData[SID_OPTION_COUNT];
+
+	uint32_t TotalGpuPower;
+	uint32_t TdcCurrent;
+
+	uint16_t   VddciTotalPower;
+	uint16_t   sparesasfsdfd;
+	uint16_t   Vddr1Power;
+	uint16_t   RocPower;
+
+	uint16_t   CalcMeasPowerBlend;
+	uint8_t    SidOptionPower;
+	uint8_t    SidOptionCurrent;
+
+	uint32_t   WinTime;
+
+	uint16_t Telemetry_1_slope;
+	uint16_t Telemetry_2_slope;
+	int32_t Telemetry_1_offset;
+	int32_t Telemetry_2_offset;
+
+	uint32_t VddcCurrentTelemetry;
+	uint32_t VddGfxCurrentTelemetry;
+	uint32_t VddcPowerTelemetry;
+	uint32_t VddGfxPowerTelemetry;
+	uint32_t VddciPowerTelemetry;
+
+	uint32_t VddcPower;
+	uint32_t VddGfxPower;
+	uint32_t VddciPower;
+
+	uint32_t TelemetryCurrent[2];
+	uint32_t TelemetryVoltage[2];
+	uint32_t TelemetryPower[2];
+};
+
+typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard;
+
+struct SMU7_ThermalScoreboard {
+	int16_t  GpuLimit;
+	int16_t  GpuHyst;
+	uint16_t CurrGnbTemp;
+	uint16_t FilteredGnbTemp;
+
+	uint8_t  ControllerEnable;
+	uint8_t  ControllerRunning;
+	uint8_t  AutoTmonCalInterval;
+	uint8_t  AutoTmonCalEnable;
+
+	uint8_t  ThermalDpmEnabled;
+	uint8_t  SclkEnabledMask;
+	uint8_t  spare[2];
+	int32_t  temperature_gradient;
+
+	SMU7_HystController_Data HystControllerData;
+	int32_t  WeightedSensorTemperature;
+	uint16_t TemperatureLimit[SMU74_MAX_LEVELS_GRAPHICS];
+	uint32_t Alpha;
+};
+
+typedef struct SMU7_ThermalScoreboard SMU7_ThermalScoreboard;
+
+#define SMU7_SCLK_DPM_CONFIG_MASK                        0x01
+#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK              0x02
+#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK              0x04
+#define SMU7_MCLK_DPM_CONFIG_MASK                        0x08
+#define SMU7_UVD_DPM_CONFIG_MASK                         0x10
+#define SMU7_VCE_DPM_CONFIG_MASK                         0x20
+#define SMU7_ACP_DPM_CONFIG_MASK                         0x40
+#define SMU7_SAMU_DPM_CONFIG_MASK                        0x80
+#define SMU7_PCIEGEN_DPM_CONFIG_MASK                    0x100
+
+#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE                  0x00000001
+#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE                  0x00000002
+#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE                  0x00000100
+#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE                  0x00000200
+#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE                  0x00010000
+#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE                  0x00020000
+
+/* All 'soft registers' should be uint32_t. */
+struct SMU74_SoftRegisters {
+	uint32_t        RefClockFrequency;
+	uint32_t        PmTimerPeriod;
+	uint32_t        FeatureEnables;
+
+	uint32_t        PreVBlankGap;
+	uint32_t        VBlankTimeout;
+	uint32_t        TrainTimeGap;
+
+	uint32_t        MvddSwitchTime;
+	uint32_t        LongestAcpiTrainTime;
+	uint32_t        AcpiDelay;
+	uint32_t        G5TrainTime;
+	uint32_t        DelayMpllPwron;
+	uint32_t        VoltageChangeTimeout;
+
+	uint32_t        HandshakeDisables;
+
+	uint8_t         DisplayPhy1Config;
+	uint8_t         DisplayPhy2Config;
+	uint8_t         DisplayPhy3Config;
+	uint8_t         DisplayPhy4Config;
+
+	uint8_t         DisplayPhy5Config;
+	uint8_t         DisplayPhy6Config;
+	uint8_t         DisplayPhy7Config;
+	uint8_t         DisplayPhy8Config;
+
+	uint32_t        AverageGraphicsActivity;
+	uint32_t        AverageMemoryActivity;
+	uint32_t        AverageGioActivity;
+
+	uint8_t         SClkDpmEnabledLevels;
+	uint8_t         MClkDpmEnabledLevels;
+	uint8_t         LClkDpmEnabledLevels;
+	uint8_t         PCIeDpmEnabledLevels;
+
+	uint8_t         UVDDpmEnabledLevels;
+	uint8_t         SAMUDpmEnabledLevels;
+	uint8_t         ACPDpmEnabledLevels;
+	uint8_t         VCEDpmEnabledLevels;
+
+	uint32_t        DRAM_LOG_ADDR_H;
+	uint32_t        DRAM_LOG_ADDR_L;
+	uint32_t        DRAM_LOG_PHY_ADDR_H;
+	uint32_t        DRAM_LOG_PHY_ADDR_L;
+	uint32_t        DRAM_LOG_BUFF_SIZE;
+	uint32_t        UlvEnterCount;
+	uint32_t        UlvTime;
+	uint32_t        UcodeLoadStatus;
+	uint32_t        AllowMvddSwitch;
+	uint8_t         Activity_Weight;
+	uint8_t         Reserved8[3];
+};
+
+typedef struct SMU74_SoftRegisters SMU74_SoftRegisters;
+
+struct SMU74_Firmware_Header {
+	uint32_t Digest[5];
+	uint32_t Version;
+	uint32_t HeaderSize;
+	uint32_t Flags;
+	uint32_t EntryPoint;
+	uint32_t CodeSize;
+	uint32_t ImageSize;
+
+	uint32_t Rtos;
+	uint32_t SoftRegisters;
+	uint32_t DpmTable;
+	uint32_t FanTable;
+	uint32_t CacConfigTable;
+	uint32_t CacStatusTable;
+
+
+	uint32_t mcRegisterTable;
+
+
+	uint32_t mcArbDramTimingTable;
+
+
+
+
+	uint32_t PmFuseTable;
+	uint32_t Globals;
+	uint32_t ClockStretcherTable;
+	uint32_t VftTable;
+	uint32_t Reserved[21];
+	uint32_t Signature;
+};
+
+typedef struct SMU74_Firmware_Header SMU74_Firmware_Header;
+
+#define SMU7_FIRMWARE_HEADER_LOCATION 0x20000
+
+enum  DisplayConfig {
+	PowerDown = 1,
+	DP54x4,
+	DP54x2,
+	DP54x1,
+	DP27x4,
+	DP27x2,
+	DP27x1,
+	HDMI297,
+	HDMI162,
+	LVDS,
+	DP324x4,
+	DP324x2,
+	DP324x1
+};
+
+
+#define MC_BLOCK_COUNT 1
+#define CPL_BLOCK_COUNT 5
+#define SE_BLOCK_COUNT 15
+#define GC_BLOCK_COUNT 24
+
+struct SMU7_Local_Cac {
+	uint8_t BlockId;
+	uint8_t SignalId;
+	uint8_t Threshold;
+	uint8_t Padding;
+};
+
+typedef struct SMU7_Local_Cac SMU7_Local_Cac;
+
+struct SMU7_Local_Cac_Table {
+
+	SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT];
+	SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT];
+	SMU7_Local_Cac SeLocalCac[SE_BLOCK_COUNT];
+	SMU7_Local_Cac GcLocalCac[GC_BLOCK_COUNT];
+};
+
+typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table;
+
+#pragma pack(pop)
+
+/* Description of Clock Gating bitmask for Tonga:
+ * System Clock Gating
+ */
+#define CG_SYS_BITMASK_FIRST_BIT      0  /* First bit of Sys CG bitmask */
+#define CG_SYS_BITMASK_LAST_BIT       9  /* Last bit of Sys CG bitmask */
+#define CG_SYS_BIF_MGLS_SHIFT         0
+#define CG_SYS_ROM_SHIFT              1
+#define CG_SYS_MC_MGCG_SHIFT          2
+#define CG_SYS_MC_MGLS_SHIFT          3
+#define CG_SYS_SDMA_MGCG_SHIFT        4
+#define CG_SYS_SDMA_MGLS_SHIFT        5
+#define CG_SYS_DRM_MGCG_SHIFT         6
+#define CG_SYS_HDP_MGCG_SHIFT         7
+#define CG_SYS_HDP_MGLS_SHIFT         8
+#define CG_SYS_DRM_MGLS_SHIFT         9
+#define CG_SYS_BIF_MGCG_SHIFT         10
+
+#define CG_SYS_BIF_MGLS_MASK          0x1
+#define CG_SYS_ROM_MASK               0x2
+#define CG_SYS_MC_MGCG_MASK           0x4
+#define CG_SYS_MC_MGLS_MASK           0x8
+#define CG_SYS_SDMA_MGCG_MASK         0x10
+#define CG_SYS_SDMA_MGLS_MASK         0x20
+#define CG_SYS_DRM_MGCG_MASK          0x40
+#define CG_SYS_HDP_MGCG_MASK          0x80
+#define CG_SYS_HDP_MGLS_MASK          0x100
+#define CG_SYS_DRM_MGLS_MASK          0x200
+#define CG_SYS_BIF_MGCG_MASK          0x400
+
+/* Graphics Clock Gating */
+#define CG_GFX_BITMASK_FIRST_BIT      16 /* First bit of Gfx CG bitmask */
+#define CG_GFX_BITMASK_LAST_BIT       24 /* Last bit of Gfx CG bitmask */
+
+#define CG_GFX_CGCG_SHIFT             16
+#define CG_GFX_CGLS_SHIFT             17
+#define CG_CPF_MGCG_SHIFT             18
+#define CG_RLC_MGCG_SHIFT             19
+#define CG_GFX_OTHERS_MGCG_SHIFT      20
+#define CG_GFX_3DCG_SHIFT             21
+#define CG_GFX_3DLS_SHIFT             22
+#define CG_GFX_RLC_LS_SHIFT           23
+#define CG_GFX_CP_LS_SHIFT            24
+
+#define CG_GFX_CGCG_MASK              0x00010000
+#define CG_GFX_CGLS_MASK              0x00020000
+#define CG_CPF_MGCG_MASK              0x00040000
+#define CG_RLC_MGCG_MASK              0x00080000
+#define CG_GFX_OTHERS_MGCG_MASK       0x00100000
+#define CG_GFX_3DCG_MASK              0x00200000
+#define CG_GFX_3DLS_MASK              0x00400000
+#define CG_GFX_RLC_LS_MASK            0x00800000
+#define CG_GFX_CP_LS_MASK             0x01000000
+
+
+/* Voltage Regulator Configuration
+VR Config info is contained in dpmTable.VRConfig */
+
+#define VRCONF_VDDC_MASK         0x000000FF
+#define VRCONF_VDDC_SHIFT        0
+#define VRCONF_VDDGFX_MASK       0x0000FF00
+#define VRCONF_VDDGFX_SHIFT      8
+#define VRCONF_VDDCI_MASK        0x00FF0000
+#define VRCONF_VDDCI_SHIFT       16
+#define VRCONF_MVDD_MASK         0xFF000000
+#define VRCONF_MVDD_SHIFT        24
+
+#define VR_MERGED_WITH_VDDC      0
+#define VR_SVI2_PLANE_1          1
+#define VR_SVI2_PLANE_2          2
+#define VR_SMIO_PATTERN_1        3
+#define VR_SMIO_PATTERN_2        4
+#define VR_STATIC_VOLTAGE        5
+
+/* Clock Stretcher Configuration */
+
+#define CLOCK_STRETCHER_MAX_ENTRIES 0x4
+#define CKS_LOOKUPTable_MAX_ENTRIES 0x4
+
+/* The 'settings' field is subdivided in the following way: */
+#define CLOCK_STRETCHER_SETTING_DDT_MASK             0x01
+#define CLOCK_STRETCHER_SETTING_DDT_SHIFT            0x0
+#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_MASK  0x1E
+#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_SHIFT 0x1
+#define CLOCK_STRETCHER_SETTING_ENABLE_MASK          0x80
+#define CLOCK_STRETCHER_SETTING_ENABLE_SHIFT         0x7
+
+struct SMU_ClockStretcherDataTableEntry {
+	uint8_t minVID;
+	uint8_t maxVID;
+
+
+	uint16_t setting;
+};
+typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry;
+
+struct SMU_ClockStretcherDataTable {
+	SMU_ClockStretcherDataTableEntry ClockStretcherDataTableEntry[CLOCK_STRETCHER_MAX_ENTRIES];
+};
+typedef struct SMU_ClockStretcherDataTable SMU_ClockStretcherDataTable;
+
+struct SMU_CKS_LOOKUPTableEntry {
+	uint16_t minFreq;
+	uint16_t maxFreq;
+
+	uint8_t setting;
+	uint8_t padding[3];
+};
+typedef struct SMU_CKS_LOOKUPTableEntry SMU_CKS_LOOKUPTableEntry;
+
+struct SMU_CKS_LOOKUPTable {
+	SMU_CKS_LOOKUPTableEntry CKS_LOOKUPTableEntry[CKS_LOOKUPTable_MAX_ENTRIES];
+};
+typedef struct SMU_CKS_LOOKUPTable SMU_CKS_LOOKUPTable;
+
+struct AgmAvfsData_t {
+	uint16_t avgPsmCount[28];
+	uint16_t minPsmCount[28];
+};
+
+typedef struct AgmAvfsData_t AgmAvfsData_t;
+
+enum VFT_COLUMNS {
+	SCLK0,
+	SCLK1,
+	SCLK2,
+	SCLK3,
+	SCLK4,
+	SCLK5,
+	SCLK6,
+	SCLK7,
+
+	NUM_VFT_COLUMNS
+};
+
+#define VFT_TABLE_DEFINED
+
+#define TEMP_RANGE_MAXSTEPS 12
+
+struct VFT_CELL_t {
+	uint16_t Voltage;
+};
+
+typedef struct VFT_CELL_t VFT_CELL_t;
+
+struct VFT_TABLE_t {
+	VFT_CELL_t    Cell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS];
+	uint16_t      AvfsGbv[NUM_VFT_COLUMNS];
+	uint16_t      BtcGbv[NUM_VFT_COLUMNS];
+	uint16_t      Temperature[TEMP_RANGE_MAXSTEPS];
+
+	uint8_t       NumTemperatureSteps;
+	uint8_t       padding[3];
+};
+
+typedef struct VFT_TABLE_t VFT_TABLE_t;
+
+
+#endif
+
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
new file mode 100644
index 0000000..0dfe823
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
@@ -0,0 +1,828 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU74_DISCRETE_H
+#define SMU74_DISCRETE_H
+
+#include "smu74.h"
+
+#pragma pack(push, 1)
+
+
+#define NUM_SCLK_RANGE 8
+
+#define VCO_3_6 1
+#define VCO_2_4 3
+
+#define POSTDIV_DIV_BY_1  0
+#define POSTDIV_DIV_BY_2  1
+#define POSTDIV_DIV_BY_4  2
+#define POSTDIV_DIV_BY_8  3
+#define POSTDIV_DIV_BY_16 4
+
+struct sclkFcwRange_t {
+	uint8_t  vco_setting;
+	uint8_t  postdiv;
+	uint16_t fcw_pcc;
+
+	uint16_t fcw_trans_upper;
+	uint16_t fcw_trans_lower;
+};
+typedef struct sclkFcwRange_t sclkFcwRange_t;
+
+struct SMIO_Pattern {
+	uint16_t Voltage;
+	uint8_t  Smio;
+	uint8_t  padding;
+};
+
+typedef struct SMIO_Pattern SMIO_Pattern;
+
+struct SMIO_Table {
+	SMIO_Pattern Pattern[SMU_MAX_SMIO_LEVELS];
+};
+
+typedef struct SMIO_Table SMIO_Table;
+
+struct SMU_SclkSetting {
+	uint32_t    SclkFrequency;
+	uint16_t    Fcw_int;
+	uint16_t    Fcw_frac;
+	uint16_t    Pcc_fcw_int;
+	uint8_t     PllRange;
+	uint8_t     SSc_En;
+	uint16_t    Sclk_slew_rate;
+	uint16_t    Pcc_up_slew_rate;
+	uint16_t    Pcc_down_slew_rate;
+	uint16_t    Fcw1_int;
+	uint16_t    Fcw1_frac;
+	uint16_t    Sclk_ss_slew_rate;
+};
+typedef struct SMU_SclkSetting SMU_SclkSetting;
+
+struct SMU74_Discrete_GraphicsLevel {
+	SMU_VoltageLevel MinVoltage;
+	uint8_t     pcieDpmLevel;
+	uint8_t     DeepSleepDivId;
+	uint16_t    ActivityLevel;
+	uint32_t    CgSpllFuncCntl3;
+	uint32_t    CgSpllFuncCntl4;
+	uint32_t    CcPwrDynRm;
+	uint32_t    CcPwrDynRm1;
+	uint8_t     SclkDid;
+	uint8_t     padding;
+	uint8_t     EnabledForActivity;
+	uint8_t     EnabledForThrottle;
+	uint8_t     UpHyst;
+	uint8_t     DownHyst;
+	uint8_t     VoltageDownHyst;
+	uint8_t     PowerThrottle;
+	SMU_SclkSetting SclkSetting;
+};
+
+typedef struct SMU74_Discrete_GraphicsLevel SMU74_Discrete_GraphicsLevel;
+
+struct SMU74_Discrete_ACPILevel {
+	uint32_t    Flags;
+	SMU_VoltageLevel MinVoltage;
+	uint32_t    SclkFrequency;
+	uint8_t     SclkDid;
+	uint8_t     DisplayWatermark;
+	uint8_t     DeepSleepDivId;
+	uint8_t     padding;
+	uint32_t    CcPwrDynRm;
+	uint32_t    CcPwrDynRm1;
+
+	SMU_SclkSetting SclkSetting;
+};
+
+typedef struct SMU74_Discrete_ACPILevel SMU74_Discrete_ACPILevel;
+
+struct SMU74_Discrete_Ulv {
+	uint32_t    CcPwrDynRm;
+	uint32_t    CcPwrDynRm1;
+	uint16_t    VddcOffset;
+	uint8_t     VddcOffsetVid;
+	uint8_t     VddcPhase;
+	uint16_t    BifSclkDfs;
+	uint16_t    Reserved;
+};
+
+typedef struct SMU74_Discrete_Ulv SMU74_Discrete_Ulv;
+
+struct SMU74_Discrete_MemoryLevel {
+	SMU_VoltageLevel MinVoltage;
+	uint32_t    MinMvdd;
+
+	uint32_t    MclkFrequency;
+
+	uint8_t     StutterEnable;
+	uint8_t     EnabledForThrottle;
+	uint8_t     EnabledForActivity;
+	uint8_t     padding_0;
+
+	uint8_t     UpHyst;
+	uint8_t     DownHyst;
+	uint8_t     VoltageDownHyst;
+	uint8_t     padding_1;
+
+	uint16_t    ActivityLevel;
+	uint8_t     DisplayWatermark;
+	uint8_t     Reserved;
+};
+
+typedef struct SMU74_Discrete_MemoryLevel SMU74_Discrete_MemoryLevel;
+
+struct SMU74_Discrete_LinkLevel {
+	uint8_t     PcieGenSpeed;
+	uint8_t     PcieLaneCount;
+	uint8_t     EnabledForActivity;
+	uint8_t     SPC;
+	uint32_t    DownThreshold;
+	uint32_t    UpThreshold;
+	uint16_t    BifSclkDfs;
+	uint16_t    Reserved;
+};
+
+typedef struct SMU74_Discrete_LinkLevel SMU74_Discrete_LinkLevel;
+
+struct SMU74_Discrete_MCArbDramTimingTableEntry {
+	uint32_t McArbDramTiming;
+	uint32_t McArbDramTiming2;
+	uint8_t  McArbBurstTime;
+	uint8_t  padding[3];
+};
+
+typedef struct SMU74_Discrete_MCArbDramTimingTableEntry SMU74_Discrete_MCArbDramTimingTableEntry;
+
+struct SMU74_Discrete_MCArbDramTimingTable {
+	SMU74_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
+};
+
+typedef struct SMU74_Discrete_MCArbDramTimingTable SMU74_Discrete_MCArbDramTimingTable;
+
+struct SMU74_Discrete_UvdLevel {
+	uint32_t VclkFrequency;
+	uint32_t DclkFrequency;
+	SMU_VoltageLevel MinVoltage;
+	uint8_t  VclkDivider;
+	uint8_t  DclkDivider;
+	uint8_t  padding[2];
+};
+
+typedef struct SMU74_Discrete_UvdLevel SMU74_Discrete_UvdLevel;
+
+struct SMU74_Discrete_ExtClkLevel {
+	uint32_t Frequency;
+	SMU_VoltageLevel MinVoltage;
+	uint8_t  Divider;
+	uint8_t  padding[3];
+};
+
+typedef struct SMU74_Discrete_ExtClkLevel SMU74_Discrete_ExtClkLevel;
+
+struct SMU74_Discrete_StateInfo {
+	uint32_t SclkFrequency;
+	uint32_t MclkFrequency;
+	uint32_t VclkFrequency;
+	uint32_t DclkFrequency;
+	uint32_t SamclkFrequency;
+	uint32_t AclkFrequency;
+	uint32_t EclkFrequency;
+	uint16_t MvddVoltage;
+	uint16_t padding16;
+	uint8_t  DisplayWatermark;
+	uint8_t  McArbIndex;
+	uint8_t  McRegIndex;
+	uint8_t  SeqIndex;
+	uint8_t  SclkDid;
+	int8_t   SclkIndex;
+	int8_t   MclkIndex;
+	uint8_t  PCIeGen;
+};
+
+typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo;
+
+struct SMU74_Discrete_DpmTable {
+
+	SMU74_PIDController                  GraphicsPIDController;
+	SMU74_PIDController                  MemoryPIDController;
+	SMU74_PIDController                  LinkPIDController;
+
+	uint32_t                            SystemFlags;
+
+	uint32_t                            VRConfig;
+	uint32_t                            SmioMask1;
+	uint32_t                            SmioMask2;
+	SMIO_Table                          SmioTable1;
+	SMIO_Table                          SmioTable2;
+
+	uint32_t                            MvddLevelCount;
+
+
+	uint8_t                             BapmVddcVidHiSidd[SMU74_MAX_LEVELS_VDDC];
+	uint8_t                             BapmVddcVidLoSidd[SMU74_MAX_LEVELS_VDDC];
+	uint8_t                             BapmVddcVidHiSidd2[SMU74_MAX_LEVELS_VDDC];
+
+	uint8_t                             GraphicsDpmLevelCount;
+	uint8_t                             MemoryDpmLevelCount;
+	uint8_t                             LinkLevelCount;
+	uint8_t                             MasterDeepSleepControl;
+
+	uint8_t                             UvdLevelCount;
+	uint8_t                             VceLevelCount;
+	uint8_t                             AcpLevelCount;
+	uint8_t                             SamuLevelCount;
+
+	uint8_t                             ThermOutGpio;
+	uint8_t                             ThermOutPolarity;
+	uint8_t                             ThermOutMode;
+	uint8_t                             BootPhases;
+	uint32_t                            Reserved[4];
+
+	SMU74_Discrete_GraphicsLevel        GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS];
+	SMU74_Discrete_MemoryLevel          MemoryACPILevel;
+	SMU74_Discrete_MemoryLevel          MemoryLevel[SMU74_MAX_LEVELS_MEMORY];
+	SMU74_Discrete_LinkLevel            LinkLevel[SMU74_MAX_LEVELS_LINK];
+	SMU74_Discrete_ACPILevel            ACPILevel;
+	SMU74_Discrete_UvdLevel             UvdLevel[SMU74_MAX_LEVELS_UVD];
+	SMU74_Discrete_ExtClkLevel          VceLevel[SMU74_MAX_LEVELS_VCE];
+	SMU74_Discrete_ExtClkLevel          AcpLevel[SMU74_MAX_LEVELS_ACP];
+	SMU74_Discrete_ExtClkLevel          SamuLevel[SMU74_MAX_LEVELS_SAMU];
+	SMU74_Discrete_Ulv                  Ulv;
+
+	uint8_t                             DisplayWatermark[SMU74_MAX_LEVELS_MEMORY][SMU74_MAX_LEVELS_GRAPHICS];
+
+	uint32_t                            SclkStepSize;
+	uint32_t                            Smio[SMU74_MAX_ENTRIES_SMIO];
+
+	uint8_t                             UvdBootLevel;
+	uint8_t                             VceBootLevel;
+	uint8_t                             AcpBootLevel;
+	uint8_t                             SamuBootLevel;
+
+	uint8_t                             GraphicsBootLevel;
+	uint8_t                             GraphicsVoltageChangeEnable;
+	uint8_t                             GraphicsThermThrottleEnable;
+	uint8_t                             GraphicsInterval;
+
+	uint8_t                             VoltageInterval;
+	uint8_t                             ThermalInterval;
+	uint16_t                            TemperatureLimitHigh;
+
+	uint16_t                            TemperatureLimitLow;
+	uint8_t                             MemoryBootLevel;
+	uint8_t                             MemoryVoltageChangeEnable;
+
+	uint16_t                            BootMVdd;
+	uint8_t                             MemoryInterval;
+	uint8_t                             MemoryThermThrottleEnable;
+
+	uint16_t                            VoltageResponseTime;
+	uint16_t                            PhaseResponseTime;
+
+	uint8_t                             PCIeBootLinkLevel;
+	uint8_t                             PCIeGenInterval;
+	uint8_t                             DTEInterval;
+	uint8_t                             DTEMode;
+
+	uint8_t                             SVI2Enable;
+	uint8_t                             VRHotGpio;
+	uint8_t                             AcDcGpio;
+	uint8_t                             ThermGpio;
+
+	uint16_t                            PPM_PkgPwrLimit;
+	uint16_t                            PPM_TemperatureLimit;
+
+	uint16_t                            DefaultTdp;
+	uint16_t                            TargetTdp;
+
+	uint16_t                            FpsHighThreshold;
+	uint16_t                            FpsLowThreshold;
+
+	uint16_t                            BAPMTI_R[SMU74_DTE_ITERATIONS][SMU74_DTE_SOURCES][SMU74_DTE_SINKS];
+	uint16_t                            BAPMTI_RC[SMU74_DTE_ITERATIONS][SMU74_DTE_SOURCES][SMU74_DTE_SINKS];
+
+	uint16_t                            TemperatureLimitEdge;
+	uint16_t                            TemperatureLimitHotspot;
+
+	uint16_t                            BootVddc;
+	uint16_t                            BootVddci;
+
+	uint16_t                            FanGainEdge;
+	uint16_t                            FanGainHotspot;
+
+	uint32_t                            LowSclkInterruptThreshold;
+	uint32_t                            VddGfxReChkWait;
+
+	uint8_t                             ClockStretcherAmount;
+	uint8_t                             Sclk_CKS_masterEn0_7;
+	uint8_t                             Sclk_CKS_masterEn8_15;
+	uint8_t                             DPMFreezeAndForced;
+
+	uint8_t                             Sclk_voltageOffset[8];
+
+	SMU_ClockStretcherDataTable         ClockStretcherDataTable;
+	SMU_CKS_LOOKUPTable                 CKS_LOOKUPTable;
+
+	uint32_t                            CurrSclkPllRange;
+	sclkFcwRange_t                      SclkFcwRangeTable[NUM_SCLK_RANGE];
+};
+
+typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable;
+
+
+struct SMU74_Discrete_FanTable {
+	uint16_t FdoMode;
+	int16_t  TempMin;
+	int16_t  TempMed;
+	int16_t  TempMax;
+	int16_t  Slope1;
+	int16_t  Slope2;
+	int16_t  FdoMin;
+	int16_t  HystUp;
+	int16_t  HystDown;
+	int16_t  HystSlope;
+	int16_t  TempRespLim;
+	int16_t  TempCurr;
+	int16_t  SlopeCurr;
+	int16_t  PwmCurr;
+	uint32_t RefreshPeriod;
+	int16_t  FdoMax;
+	uint8_t  TempSrc;
+	int8_t   Padding;
+};
+
+typedef struct SMU74_Discrete_FanTable SMU74_Discrete_FanTable;
+
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG             4
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT         (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG)
+
+
+struct SMU7_MclkDpmScoreboard {
+	uint32_t PercentageBusy;
+
+	int32_t  PIDError;
+	int32_t  PIDIntegral;
+	int32_t  PIDOutput;
+
+	uint32_t SigmaDeltaAccum;
+	uint32_t SigmaDeltaOutput;
+	uint32_t SigmaDeltaLevel;
+
+	uint32_t UtilizationSetpoint;
+
+	uint8_t  TdpClampMode;
+	uint8_t  TdcClampMode;
+	uint8_t  ThermClampMode;
+	uint8_t  VoltageBusy;
+
+	int8_t   CurrLevel;
+	int8_t   TargLevel;
+	uint8_t  LevelChangeInProgress;
+	uint8_t  UpHyst;
+
+	uint8_t  DownHyst;
+	uint8_t  VoltageDownHyst;
+	uint8_t  DpmEnable;
+	uint8_t  DpmRunning;
+
+	uint8_t  DpmForce;
+	uint8_t  DpmForceLevel;
+	uint8_t  padding2;
+	uint8_t  McArbIndex;
+
+	uint32_t MinimumPerfMclk;
+
+	uint8_t  AcpiReq;
+	uint8_t  AcpiAck;
+	uint8_t  MclkSwitchInProgress;
+	uint8_t  MclkSwitchCritical;
+
+	uint8_t  IgnoreVBlank;
+	uint8_t  TargetMclkIndex;
+	uint16_t VbiFailureCount;
+	uint8_t  VbiWaitCounter;
+	uint8_t  EnabledLevelsChange;
+
+	uint16_t LevelResidencyCounters[SMU74_MAX_LEVELS_MEMORY];
+	uint16_t LevelSwitchCounters[SMU74_MAX_LEVELS_MEMORY];
+
+	void     (*TargetStateCalculator)(uint8_t);
+	void     (*SavedTargetStateCalculator)(uint8_t);
+
+	uint16_t AutoDpmInterval;
+	uint16_t AutoDpmRange;
+
+	uint16_t VbiTimeoutCount;
+	uint16_t MclkSwitchingTime;
+
+	uint8_t  fastSwitch;
+	uint8_t  Save_PIC_VDDGFX_EXIT;
+	uint8_t  Save_PIC_VDDGFX_ENTER;
+	uint8_t  padding;
+};
+
+typedef struct SMU7_MclkDpmScoreboard SMU7_MclkDpmScoreboard;
+
+struct SMU7_UlvScoreboard {
+	uint8_t     EnterUlv;
+	uint8_t     ExitUlv;
+	uint8_t     UlvActive;
+	uint8_t     WaitingForUlv;
+	uint8_t     UlvEnable;
+	uint8_t     UlvRunning;
+	uint8_t     UlvMasterEnable;
+	uint8_t     padding;
+	uint32_t    UlvAbortedCount;
+	uint32_t    UlvTimeStamp;
+};
+
+typedef struct SMU7_UlvScoreboard SMU7_UlvScoreboard;
+
+struct VddgfxSavedRegisters {
+	uint32_t GPU_DBG[3];
+	uint32_t MEC_BaseAddress_Hi;
+	uint32_t MEC_BaseAddress_Lo;
+	uint32_t THM_TMON0_CTRL2__RDIR_PRESENT;
+	uint32_t THM_TMON1_CTRL2__RDIR_PRESENT;
+	uint32_t CP_INT_CNTL;
+};
+
+typedef struct VddgfxSavedRegisters VddgfxSavedRegisters;
+
+struct SMU7_VddGfxScoreboard {
+	uint8_t     VddGfxEnable;
+	uint8_t     VddGfxActive;
+	uint8_t     VPUResetOccured;
+	uint8_t     padding;
+
+	uint32_t    VddGfxEnteredCount;
+	uint32_t    VddGfxAbortedCount;
+
+	uint32_t    VddGfxVid;
+
+	VddgfxSavedRegisters SavedRegisters;
+};
+
+typedef struct SMU7_VddGfxScoreboard SMU7_VddGfxScoreboard;
+
+struct SMU7_TdcLimitScoreboard {
+	uint8_t  Enable;
+	uint8_t  Running;
+	uint16_t Alpha;
+	uint32_t FilteredIddc;
+	uint32_t IddcLimit;
+	uint32_t IddcHyst;
+	SMU7_HystController_Data HystControllerData;
+};
+
+typedef struct SMU7_TdcLimitScoreboard SMU7_TdcLimitScoreboard;
+
+struct SMU7_PkgPwrLimitScoreboard {
+	uint8_t  Enable;
+	uint8_t  Running;
+	uint16_t Alpha;
+	uint32_t FilteredPkgPwr;
+	uint32_t Limit;
+	uint32_t Hyst;
+	uint32_t LimitFromDriver;
+	SMU7_HystController_Data HystControllerData;
+};
+
+typedef struct SMU7_PkgPwrLimitScoreboard SMU7_PkgPwrLimitScoreboard;
+
+struct SMU7_BapmScoreboard {
+	uint32_t source_powers[SMU74_DTE_SOURCES];
+	uint32_t source_powers_last[SMU74_DTE_SOURCES];
+	int32_t entity_temperatures[SMU74_NUM_GPU_TES];
+	int32_t initial_entity_temperatures[SMU74_NUM_GPU_TES];
+	int32_t Limit;
+	int32_t Hyst;
+	int32_t therm_influence_coeff_table[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS * 2];
+	int32_t therm_node_table[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+	uint16_t ConfigTDPPowerScalar;
+	uint16_t FanSpeedPowerScalar;
+	uint16_t OverDrivePowerScalar;
+	uint16_t OverDriveLimitScalar;
+	uint16_t FinalPowerScalar;
+	uint8_t VariantID;
+	uint8_t spare997;
+
+	SMU7_HystController_Data HystControllerData;
+
+	int32_t temperature_gradient_slope;
+	int32_t temperature_gradient;
+	uint32_t measured_temperature;
+};
+
+
+typedef struct SMU7_BapmScoreboard SMU7_BapmScoreboard;
+
+struct SMU7_AcpiScoreboard {
+	uint32_t SavedInterruptMask[2];
+	uint8_t LastACPIRequest;
+	uint8_t CgBifResp;
+	uint8_t RequestType;
+	uint8_t Padding;
+	SMU74_Discrete_ACPILevel D0Level;
+};
+
+typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard;
+
+struct SMU_QuadraticCoeffs {
+	int32_t m1;
+	uint32_t b;
+
+	int16_t m2;
+	uint8_t m1_shift;
+	uint8_t m2_shift;
+};
+typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
+
+struct SMU74_Discrete_PmFuses {
+	uint8_t BapmVddCVidHiSidd[8];
+	uint8_t BapmVddCVidLoSidd[8];
+	uint8_t VddCVid[8];
+	uint8_t SviLoadLineEn;
+	uint8_t SviLoadLineVddC;
+	uint8_t SviLoadLineTrimVddC;
+	uint8_t SviLoadLineOffsetVddC;
+	uint16_t TDC_VDDC_PkgLimit;
+	uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+	uint8_t TDC_MAWt;
+	uint8_t TdcWaterfallCtl;
+	uint8_t LPMLTemperatureMin;
+	uint8_t LPMLTemperatureMax;
+	uint8_t Reserved;
+
+	uint8_t LPMLTemperatureScaler[16];
+
+	int16_t FuzzyFan_ErrorSetDelta;
+	int16_t FuzzyFan_ErrorRateSetDelta;
+	int16_t FuzzyFan_PwmSetDelta;
+	uint16_t Reserved6;
+
+	uint8_t GnbLPML[16];
+
+	uint8_t GnbLPMLMaxVid;
+	uint8_t GnbLPMLMinVid;
+	uint8_t Reserved1[2];
+
+	uint16_t BapmVddCBaseLeakageHiSidd;
+	uint16_t BapmVddCBaseLeakageLoSidd;
+
+	uint16_t  VFT_Temp[3];
+	uint16_t  padding;
+
+	SMU_QuadraticCoeffs VFT_ATE[3];
+
+	SMU_QuadraticCoeffs AVFS_GB;
+	SMU_QuadraticCoeffs ATE_ACBTC_GB;
+
+	SMU_QuadraticCoeffs P2V;
+
+	uint32_t PsmCharzFreq;
+
+	uint16_t InversionVoltage;
+	uint16_t PsmCharzTemp;
+
+	uint32_t EnabledAvfsModules;
+};
+
+typedef struct SMU74_Discrete_PmFuses SMU74_Discrete_PmFuses;
+
+struct SMU7_Discrete_Log_Header_Table {
+	uint32_t    version;
+	uint32_t    asic_id;
+	uint16_t    flags;
+	uint16_t    entry_size;
+	uint32_t    total_size;
+	uint32_t    num_of_entries;
+	uint8_t     type;
+	uint8_t     mode;
+	uint8_t     filler_0[2];
+	uint32_t    filler_1[2];
+};
+
+typedef struct SMU7_Discrete_Log_Header_Table SMU7_Discrete_Log_Header_Table;
+
+struct SMU7_Discrete_Log_Cntl {
+	uint8_t             Enabled;
+	uint8_t             Type;
+	uint8_t             padding[2];
+	uint32_t            BufferSize;
+	uint32_t            SamplesLogged;
+	uint32_t            SampleSize;
+	uint32_t            AddrL;
+	uint32_t            AddrH;
+};
+
+typedef struct SMU7_Discrete_Log_Cntl SMU7_Discrete_Log_Cntl;
+
+#if defined SMU__DGPU_ONLY
+#define CAC_ACC_NW_NUM_OF_SIGNALS 87
+#endif
+
+
+struct SMU7_Discrete_Cac_Collection_Table {
+	uint32_t temperature;
+	uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS];
+};
+
+typedef struct SMU7_Discrete_Cac_Collection_Table SMU7_Discrete_Cac_Collection_Table;
+
+struct SMU7_Discrete_Cac_Verification_Table {
+	uint32_t VddcTotalPower;
+	uint32_t VddcLeakagePower;
+	uint32_t VddcConstantPower;
+	uint32_t VddcGfxDynamicPower;
+	uint32_t VddcUvdDynamicPower;
+	uint32_t VddcVceDynamicPower;
+	uint32_t VddcAcpDynamicPower;
+	uint32_t VddcPcieDynamicPower;
+	uint32_t VddcDceDynamicPower;
+	uint32_t VddcCurrent;
+	uint32_t VddcVoltage;
+	uint32_t VddciTotalPower;
+	uint32_t VddciLeakagePower;
+	uint32_t VddciConstantPower;
+	uint32_t VddciDynamicPower;
+	uint32_t Vddr1TotalPower;
+	uint32_t Vddr1LeakagePower;
+	uint32_t Vddr1ConstantPower;
+	uint32_t Vddr1DynamicPower;
+	uint32_t spare[4];
+	uint32_t temperature;
+};
+
+typedef struct SMU7_Discrete_Cac_Verification_Table SMU7_Discrete_Cac_Verification_Table;
+
+struct SMU7_Discrete_Pm_Status_Table {
+	int32_t T_meas_max;
+	int32_t T_meas_acc;
+	int32_t T_calc_max;
+	int32_t T_calc_acc;
+	uint32_t P_scalar_acc;
+	uint32_t P_calc_max;
+	uint32_t P_calc_acc;
+
+	uint32_t I_calc_max;
+	uint32_t I_calc_acc;
+	uint32_t I_calc_acc_vddci;
+	uint32_t V_calc_noload_acc;
+	uint32_t V_calc_load_acc;
+	uint32_t V_calc_noload_acc_vddci;
+	uint32_t P_meas_acc;
+	uint32_t V_meas_noload_acc;
+	uint32_t V_meas_load_acc;
+	uint32_t I_meas_acc;
+	uint32_t P_meas_acc_vddci;
+	uint32_t V_meas_noload_acc_vddci;
+	uint32_t V_meas_load_acc_vddci;
+	uint32_t I_meas_acc_vddci;
+
+	uint16_t Sclk_dpm_residency[8];
+	uint16_t Uvd_dpm_residency[8];
+	uint16_t Vce_dpm_residency[8];
+	uint16_t Mclk_dpm_residency[4];
+
+	uint32_t P_vddci_acc;
+	uint32_t P_vddr1_acc;
+	uint32_t P_nte1_acc;
+	uint32_t PkgPwr_max;
+	uint32_t PkgPwr_acc;
+	uint32_t MclkSwitchingTime_max;
+	uint32_t MclkSwitchingTime_acc;
+	uint32_t FanPwm_acc;
+	uint32_t FanRpm_acc;
+
+	uint32_t AccCnt;
+};
+
+typedef struct SMU7_Discrete_Pm_Status_Table SMU7_Discrete_Pm_Status_Table;
+
+#define SMU7_MAX_GFX_CU_COUNT 16
+
+struct SMU7_GfxCuPgScoreboard {
+	uint8_t Enabled;
+	uint8_t WaterfallUp;
+	uint8_t WaterfallDown;
+	uint8_t WaterfallLimit;
+	uint8_t CurrMaxCu;
+	uint8_t TargMaxCu;
+	uint8_t ClampMode;
+	uint8_t Active;
+	uint8_t MaxSupportedCu;
+	uint8_t MinSupportedCu;
+	uint8_t PendingGfxCuHostInterrupt;
+	uint8_t LastFilteredMaxCuInteger;
+	uint16_t FilteredMaxCu;
+	uint16_t FilteredMaxCuAlpha;
+	uint16_t FilterResetCount;
+	uint16_t FilterResetCountLimit;
+	uint8_t ForceCu;
+	uint8_t ForceCuCount;
+	uint8_t spare[2];
+};
+
+typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard;
+
+#define SMU7_SCLK_CAC 0x561
+#define SMU7_MCLK_CAC 0xF9
+#define SMU7_VCLK_CAC 0x2DE
+#define SMU7_DCLK_CAC 0x2DE
+#define SMU7_ECLK_CAC 0x25E
+#define SMU7_ACLK_CAC 0x25E
+#define SMU7_SAMCLK_CAC 0x25E
+#define SMU7_DISPCLK_CAC 0x100
+#define SMU7_CAC_CONSTANT 0x2EE3430
+#define SMU7_CAC_CONSTANT_SHIFT 18
+
+#define SMU7_VDDCI_MCLK_CONST        1765
+#define SMU7_VDDCI_MCLK_CONST_SHIFT  16
+#define SMU7_VDDCI_VDDCI_CONST       50958
+#define SMU7_VDDCI_VDDCI_CONST_SHIFT 14
+#define SMU7_VDDCI_CONST             11781
+#define SMU7_VDDCI_STROBE_PWR        1331
+
+#define SMU7_VDDR1_CONST            693
+#define SMU7_VDDR1_CAC_WEIGHT       20
+#define SMU7_VDDR1_CAC_WEIGHT_SHIFT 19
+#define SMU7_VDDR1_STROBE_PWR       512
+
+#define SMU7_AREA_COEFF_UVD 0xA78
+#define SMU7_AREA_COEFF_VCE 0x190A
+#define SMU7_AREA_COEFF_ACP 0x22D1
+#define SMU7_AREA_COEFF_SAMU 0x534
+
+#define SMU7_THERM_OUT_MODE_DISABLE       0x0
+#define SMU7_THERM_OUT_MODE_THERM_ONLY    0x1
+#define SMU7_THERM_OUT_MODE_THERM_VRHOT   0x2
+
+// DIDT Defines
+#define SQ_Enable_MASK 0x1
+#define SQ_IR_MASK 0x2
+#define SQ_PCC_MASK 0x4
+#define SQ_EDC_MASK 0x8
+
+#define TCP_Enable_MASK 0x100
+#define TCP_IR_MASK 0x200
+#define TCP_PCC_MASK 0x400
+#define TCP_EDC_MASK 0x800
+
+#define TD_Enable_MASK 0x10000
+#define TD_IR_MASK 0x20000
+#define TD_PCC_MASK 0x40000
+#define TD_EDC_MASK 0x80000
+
+#define DB_Enable_MASK 0x1000000
+#define DB_IR_MASK 0x2000000
+#define DB_PCC_MASK 0x4000000 
+#define DB_EDC_MASK 0x8000000
+
+#define SQ_Enable_SHIFT 0
+#define SQ_IR_SHIFT 1
+#define SQ_PCC_SHIFT 2
+#define SQ_EDC_SHIFT 3
+
+#define TCP_Enable_SHIFT 8
+#define TCP_IR_SHIFT 9
+#define TCP_PCC_SHIFT 10
+#define TCP_EDC_SHIFT 11
+
+#define TD_Enable_SHIFT 16
+#define TD_IR_SHIFT 17
+#define TD_PCC_SHIFT 18
+#define TD_EDC_SHIFT 19
+
+#define DB_Enable_SHIFT 24
+#define DB_IR_SHIFT 25
+#define DB_PCC_SHIFT 26 
+#define DB_EDC_SHIFT 27
+
+#pragma pack(pop)
+
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h b/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h
index f8ba071..eb0f79f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
 // CZ Ucode Loading Definitions
 #ifndef SMU_UCODE_XFER_CZ_H
 #define SMU_UCODE_XFER_CZ_H
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h b/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h
index c24a81e..880152c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h
@@ -44,6 +44,7 @@
 #define UCODE_ID_IH_REG_RESTORE   11
 #define UCODE_ID_VBIOS            12
 #define UCODE_ID_MISC_METADATA    13
+#define UCODE_ID_SMU_SK		      14
 #define UCODE_ID_RLC_SCRATCH      32
 #define UCODE_ID_RLC_SRM_ARAM     33
 #define UCODE_ID_RLC_SRM_DRAM     34
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 6c4ef13..f10fb64 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -2,7 +2,7 @@
 # Makefile for the 'smu manager' sub-component of powerplay.
 # It provides the smu management services for the driver.
 
-SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o
+SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o polaris10_smumgr.o
 
 AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index ec222c6..87c023e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -39,7 +39,7 @@
 
 #define SIZE_ALIGN_32(x)    (((x) + 31) / 32 * 32)
 
-static enum cz_scratch_entry firmware_list[] = {
+static const enum cz_scratch_entry firmware_list[] = {
 	CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
 	CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
 	CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
@@ -639,7 +639,7 @@
 
 	cz_smu->driver_buffer_length = 0;
 
-	for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) {
+	for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
 
 		firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
 					firmware_list[i]);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index cdbb9f8..8e52a2e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -44,7 +44,7 @@
 
 #define FIJI_SMC_SIZE 0x20000
 
-struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
+static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
 		/*  Min        Sclk       pcie     DeepSleep Activity  CgSpll      CgSpll    spllSpread  SpllSpread   CcPwr  CcPwr  Sclk   Display     Enabled     Enabled                       Voltage    Power */
 		/* Voltage,  Frequency,  DpmLevel,  DivId,    Level,  FuncCntl3,  FuncCntl4,  Spectrum,   Spectrum2,  DynRm, DynRm1  Did, Watermark, ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
 		{ 0x3c0fd047, 0x30750000,   0x00,     0x03,   0x1e00, 0x00200410, 0x87020000, 0x21680000, 0x0c000000,   0,      0,   0x16,   0x00,       0x01,        0x01,      0x00,   0x00,      0x00,     0x00 },
@@ -189,7 +189,7 @@
 
 int fiji_program_jump_on_start(struct pp_smumgr *smumgr)
 {
-	static unsigned char data[] = { 0xE0, 0x00, 0x80, 0x40 };
+	static const unsigned char data[] = { 0xE0, 0x00, 0x80, 0x40 };
 
 	fiji_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data) + 1);
 
@@ -665,7 +665,7 @@
 {
 	int i, result = -1;
 	uint32_t reg, data;
-	PWR_Command_Table *virus = PwrVirusTable;
+	const PWR_Command_Table *virus = PwrVirusTable;
 	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
 
 	priv->avfs.AvfsBtcStatus = AVFS_LOAD_VIRUS;
@@ -1006,10 +1006,16 @@
 
 static int fiji_smu_fini(struct pp_smumgr *smumgr)
 {
+	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+
+	smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
 	if (smumgr->backend) {
 		kfree(smumgr->backend);
 		smumgr->backend = NULL;
 	}
+
+	cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
new file mode 100644
index 0000000..043b6ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -0,0 +1,985 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "smumgr.h"
+#include "smu74.h"
+#include "smu_ucode_xfer_vi.h"
+#include "polaris10_smumgr.h"
+#include "smu74_discrete.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gca/gfx_8_0_d.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "polaris10_pwrvirus.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+
+#define POLARIS10_SMC_SIZE 0x20000
+#define VOLTAGE_SCALE 4
+
+/* Microcode file is stored in this buffer */
+#define BUFFER_SIZE                 80000
+#define MAX_STRING_SIZE             15
+#define BUFFER_SIZETWO              131072  /* 128 *1024 */
+
+#define SMC_RAM_END 0x40000
+
+static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
+	/*  Min      pcie   DeepSleep Activity  CgSpll      CgSpll    CcPwr  CcPwr  Sclk         Enabled      Enabled                       Voltage    Power */
+	/* Voltage, DpmLevel, DivId,  Level,  FuncCntl3,  FuncCntl4,  DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
+	{ 0x3c0fd047, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x30750000, 0, 0, 0, 0, 0, 0, 0 } },
+	{ 0xa00fd047, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x409c0000, 0, 0, 0, 0, 0, 0, 0 } },
+	{ 0x0410d047, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0, 0, 0x0e, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x50c30000, 0, 0, 0, 0, 0, 0, 0 } },
+	{ 0x6810d047, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x60ea0000, 0, 0, 0, 0, 0, 0, 0 } },
+	{ 0xcc10d047, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xe8fd0000, 0, 0, 0, 0, 0, 0, 0 } },
+	{ 0x3011d047, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x70110100, 0, 0, 0, 0, 0, 0, 0 } },
+	{ 0x9411d047, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xf8240100, 0, 0, 0, 0, 0, 0, 0 } },
+	{ 0xf811d047, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x80380100, 0, 0, 0, 0, 0, 0, 0 } }
+};
+
+static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 =
+	{0x50140000, 0x50140000, 0x00320000, 0x00, 0x00,
+	 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0000, 0x00, 0x00};
+
+/**
+* Set the address for reading/writing the SMC SRAM space.
+* @param    smumgr  the address of the powerplay hardware manager.
+* @param    smcAddress the address in the SMC RAM to access.
+*/
+static int polaris10_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
+{
+	PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
+	PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
+
+	cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
+	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
+
+	return 0;
+}
+
+/**
+* Copy bytes from SMC RAM space into driver memory.
+*
+* @param    smumgr  the address of the powerplay SMU manager.
+* @param    smc_start_address the start address in the SMC RAM to copy bytes from
+* @param    src the byte array to copy the bytes to.
+* @param    byte_count the number of bytes to copy.
+*/
+int polaris10_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
+{
+	uint32_t data;
+	uint32_t addr;
+	uint8_t *dest_byte;
+	uint8_t i, data_byte[4] = {0};
+	uint32_t *pdata = (uint32_t *)&data_byte;
+
+	PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1;);
+	PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
+
+	addr = smc_start_address;
+
+	while (byte_count >= 4) {
+		polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
+
+		*dest = PP_SMC_TO_HOST_UL(data);
+
+		dest += 1;
+		byte_count -= 4;
+		addr += 4;
+	}
+
+	if (byte_count) {
+		polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
+		*pdata = PP_SMC_TO_HOST_UL(data);
+	/* Cast dest into byte type in dest_byte.  This way, we don't overflow if the allocated memory is not 4-byte aligned. */
+		dest_byte = (uint8_t *)dest;
+		for (i = 0; i < byte_count; i++)
+			dest_byte[i] = data_byte[i];
+	}
+
+	return 0;
+}
+
+/**
+* Copy bytes from an array into the SMC RAM space.
+*
+* @param    pSmuMgr  the address of the powerplay SMU manager.
+* @param    smc_start_address the start address in the SMC RAM to copy bytes to.
+* @param    src the byte array to copy the bytes from.
+* @param    byte_count the number of bytes to copy.
+*/
+int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+				const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+	int result;
+	uint32_t data = 0;
+	uint32_t original_data;
+	uint32_t addr = 0;
+	uint32_t extra_shift;
+
+	PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1);
+	PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
+
+	addr = smc_start_address;
+
+	while (byte_count >= 4) {
+	/* Bytes are written into the SMC addres space with the MSB first. */
+		data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
+
+		result = polaris10_set_smc_sram_address(smumgr, addr, limit);
+
+		if (0 != result)
+			return result;
+
+		cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+
+		src += 4;
+		byte_count -= 4;
+		addr += 4;
+	}
+
+	if (0 != byte_count) {
+
+		data = 0;
+
+		result = polaris10_set_smc_sram_address(smumgr, addr, limit);
+
+		if (0 != result)
+			return result;
+
+
+		original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+
+		extra_shift = 8 * (4 - byte_count);
+
+		while (byte_count > 0) {
+			/* Bytes are written into the SMC addres space with the MSB first. */
+			data = (0x100 * data) + *src++;
+			byte_count--;
+		}
+
+		data <<= extra_shift;
+
+		data |= (original_data & ~((~0UL) << extra_shift));
+
+		result = polaris10_set_smc_sram_address(smumgr, addr, limit);
+
+		if (0 != result)
+			return result;
+
+		cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+	}
+
+	return 0;
+}
+
+
+static int polaris10_program_jump_on_start(struct pp_smumgr *smumgr)
+{
+	static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
+
+	polaris10_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
+
+	return 0;
+}
+
+/**
+* Return if the SMC is currently running.
+*
+* @param    smumgr  the address of the powerplay hardware manager.
+*/
+bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
+{
+	return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
+	&& (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
+}
+
+/**
+* Send a message to the SMC, and wait for its response.
+*
+* @param    smumgr  the address of the powerplay hardware manager.
+* @param    msg the message to send.
+* @return   The response that came from the SMC.
+*/
+int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+{
+	if (!polaris10_is_smc_ram_running(smumgr))
+		return -1;
+
+	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+	if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
+		printk("Failed to send Previous Message.\n");
+
+
+	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+
+	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+	if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
+		printk("Failed to send Message.\n");
+
+	return 0;
+}
+
+
+/**
+* Send a message to the SMC, and do not wait for its response.
+*
+* @param    smumgr  the address of the powerplay hardware manager.
+* @param    msg the message to send.
+* @return   Always return 0.
+*/
+int polaris10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
+{
+	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+
+	return 0;
+}
+
+/**
+* Send a message to the SMC with parameter
+*
+* @param    smumgr:  the address of the powerplay hardware manager.
+* @param    msg: the message to send.
+* @param    parameter: the parameter to send
+* @return   The response that came from the SMC.
+*/
+int polaris10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+{
+	if (!polaris10_is_smc_ram_running(smumgr)) {
+		return -1;
+	}
+
+	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+	cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+
+	return polaris10_send_msg_to_smc(smumgr, msg);
+}
+
+
+/**
+* Send a message to the SMC with parameter, do not wait for response
+*
+* @param    smumgr:  the address of the powerplay hardware manager.
+* @param    msg: the message to send.
+* @param    parameter: the parameter to send
+* @return   The response that came from the SMC.
+*/
+int polaris10_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+{
+	cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+
+	return polaris10_send_msg_to_smc_without_waiting(smumgr, msg);
+}
+
+int polaris10_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
+{
+	cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
+
+	cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+
+	SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+	if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
+		printk("Failed to send Message.\n");
+
+	return 0;
+}
+
+/**
+* Wait until the SMC is doing nithing. Doing nothing means that the SMC is either turned off or it is sitting on the STOP instruction.
+*
+* @param    smumgr  the address of the powerplay hardware manager.
+* @param    msg the message to send.
+* @return   The response that came from the SMC.
+*/
+int polaris10_wait_for_smc_inactive(struct pp_smumgr *smumgr)
+{
+	/* If the SMC is not even on it qualifies as inactive. */
+	if (!polaris10_is_smc_ram_running(smumgr))
+		return -1;
+
+	SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
+	return 0;
+}
+
+
+/**
+* Upload the SMC firmware to the SMC microcontroller.
+*
+* @param    smumgr  the address of the powerplay hardware manager.
+* @param    pFirmware the data structure containing the various sections of the firmware.
+*/
+static int polaris10_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
+{
+	uint32_t byte_count = length;
+
+	PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -1);
+
+	cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
+	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
+
+	for (; byte_count >= 4; byte_count -= 4)
+		cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
+
+	SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
+
+	PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -1);
+
+	return 0;
+}
+
+static enum cgs_ucode_id polaris10_convert_fw_type_to_cgs(uint32_t fw_type)
+{
+	enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
+
+	switch (fw_type) {
+	case UCODE_ID_SMU:
+		result = CGS_UCODE_ID_SMU;
+		break;
+	case UCODE_ID_SMU_SK:
+		result = CGS_UCODE_ID_SMU_SK;
+		break;
+	case UCODE_ID_SDMA0:
+		result = CGS_UCODE_ID_SDMA0;
+		break;
+	case UCODE_ID_SDMA1:
+		result = CGS_UCODE_ID_SDMA1;
+		break;
+	case UCODE_ID_CP_CE:
+		result = CGS_UCODE_ID_CP_CE;
+		break;
+	case UCODE_ID_CP_PFP:
+		result = CGS_UCODE_ID_CP_PFP;
+		break;
+	case UCODE_ID_CP_ME:
+		result = CGS_UCODE_ID_CP_ME;
+		break;
+	case UCODE_ID_CP_MEC:
+		result = CGS_UCODE_ID_CP_MEC;
+		break;
+	case UCODE_ID_CP_MEC_JT1:
+		result = CGS_UCODE_ID_CP_MEC_JT1;
+		break;
+	case UCODE_ID_CP_MEC_JT2:
+		result = CGS_UCODE_ID_CP_MEC_JT2;
+		break;
+	case UCODE_ID_RLC_G:
+		result = CGS_UCODE_ID_RLC_G;
+		break;
+	default:
+		break;
+	}
+
+	return result;
+}
+
+static int polaris10_upload_smu_firmware_image(struct pp_smumgr *smumgr)
+{
+	int result = 0;
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+	struct cgs_firmware_info info = {0};
+
+	if (smu_data->security_hard_key == 1)
+		cgs_get_firmware_info(smumgr->device,
+			polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
+	else
+		cgs_get_firmware_info(smumgr->device,
+			polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
+
+	/* TO DO cgs_init_samu_load_smu(smumgr->device, (uint32_t *)info.kptr, info.image_size, smu_data->post_initial_boot);*/
+	result = polaris10_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, POLARIS10_SMC_SIZE);
+
+	return result;
+}
+
+/**
+* Read a 32bit value from the SMC SRAM space.
+* ALL PARAMETERS ARE IN HOST BYTE ORDER.
+* @param    smumgr  the address of the powerplay hardware manager.
+* @param    smcAddress the address in the SMC RAM to access.
+* @param    value and output parameter for the data read from the SMC SRAM.
+*/
+int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
+{
+	int result;
+
+	result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
+
+	if (result)
+		return result;
+
+	*value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+	return 0;
+}
+
+/**
+* Write a 32bit value to the SMC SRAM space.
+* ALL PARAMETERS ARE IN HOST BYTE ORDER.
+* @param    smumgr  the address of the powerplay hardware manager.
+* @param    smc_addr the address in the SMC RAM to access.
+* @param    value to write to the SMC SRAM.
+*/
+int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
+{
+	int result;
+
+	result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
+
+	if (result)
+		return result;
+
+	cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
+
+	return 0;
+}
+
+
+int polaris10_smu_fini(struct pp_smumgr *smumgr)
+{
+	if (smumgr->backend) {
+		kfree(smumgr->backend);
+		smumgr->backend = NULL;
+	}
+	cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
+	return 0;
+}
+
+/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
+static uint32_t polaris10_get_mask_for_firmware_type(uint32_t fw_type)
+{
+	uint32_t result = 0;
+
+	switch (fw_type) {
+	case UCODE_ID_SDMA0:
+		result = UCODE_ID_SDMA0_MASK;
+		break;
+	case UCODE_ID_SDMA1:
+		result = UCODE_ID_SDMA1_MASK;
+		break;
+	case UCODE_ID_CP_CE:
+		result = UCODE_ID_CP_CE_MASK;
+		break;
+	case UCODE_ID_CP_PFP:
+		result = UCODE_ID_CP_PFP_MASK;
+		break;
+	case UCODE_ID_CP_ME:
+		result = UCODE_ID_CP_ME_MASK;
+		break;
+	case UCODE_ID_CP_MEC_JT1:
+	case UCODE_ID_CP_MEC_JT2:
+		result = UCODE_ID_CP_MEC_MASK;
+		break;
+	case UCODE_ID_RLC_G:
+		result = UCODE_ID_RLC_G_MASK;
+		break;
+	default:
+		printk("UCode type is out of range! \n");
+		result = 0;
+	}
+
+	return result;
+}
+
+/* Populate one firmware image to the data structure */
+
+static int polaris10_populate_single_firmware_entry(struct pp_smumgr *smumgr,
+						uint32_t fw_type,
+						struct SMU_Entry *entry)
+{
+	int result = 0;
+	struct cgs_firmware_info info = {0};
+
+	result = cgs_get_firmware_info(smumgr->device,
+				polaris10_convert_fw_type_to_cgs(fw_type),
+				&info);
+
+	if (!result) {
+		entry->version = info.version;
+		entry->id = (uint16_t)fw_type;
+		entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
+		entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
+		entry->meta_data_addr_high = 0;
+		entry->meta_data_addr_low = 0;
+		entry->data_size_byte = info.image_size;
+		entry->num_register_entries = 0;
+	}
+
+	if (fw_type == UCODE_ID_RLC_G)
+		entry->flags = 1;
+	else
+		entry->flags = 0;
+
+	return 0;
+}
+
+static int polaris10_request_smu_load_fw(struct pp_smumgr *smumgr)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	uint32_t fw_to_load;
+
+	int result = 0;
+	struct SMU_DRAMData_TOC *toc;
+
+	if (!smumgr->reload_fw) {
+		printk(KERN_INFO "[ powerplay ] skip reloading...\n");
+		return 0;
+	}
+
+	if (smu_data->soft_regs_start)
+		cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+					smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
+					0x0);
+
+	polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
+	polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
+
+	toc = (struct SMU_DRAMData_TOC *)smu_data->header;
+	toc->num_entries = 0;
+	toc->structure_version = 1;
+
+	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+	PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+
+	polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
+	polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
+
+	fw_to_load = UCODE_ID_RLC_G_MASK
+		   + UCODE_ID_SDMA0_MASK
+		   + UCODE_ID_SDMA1_MASK
+		   + UCODE_ID_CP_CE_MASK
+		   + UCODE_ID_CP_ME_MASK
+		   + UCODE_ID_CP_PFP_MASK
+		   + UCODE_ID_CP_MEC_MASK;
+
+	if (polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
+		printk(KERN_ERR "Fail to Request SMU Load uCode");
+
+	return result;
+}
+
+/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
+static int polaris10_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	uint32_t fw_mask = polaris10_get_mask_for_firmware_type(fw_type);
+	uint32_t ret;
+	/* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */
+	ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
+					smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
+					fw_mask, fw_mask);
+
+	return ret;
+}
+
+static int polaris10_reload_firmware(struct pp_smumgr *smumgr)
+{
+	return smumgr->smumgr_funcs->start_smu(smumgr);
+}
+
+static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
+{
+	int i;
+	int result = -1;
+	uint32_t reg, data;
+
+	const PWR_Command_Table *pvirus = pwr_virus_table;
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+
+	for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
+		switch (pvirus->command) {
+		case PwrCmdWrite:
+			reg  = pvirus->reg;
+			data = pvirus->data;
+			cgs_write_register(smumgr->device, reg, data);
+			break;
+
+		case PwrCmdEnd:
+			result = 0;
+			break;
+
+		default:
+			printk("Table Exit with Invalid Command!");
+			smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
+			result = -1;
+			break;
+		}
+		pvirus++;
+	}
+
+	return result;
+}
+
+static int polaris10_perform_btc(struct pp_smumgr *smumgr)
+{
+	int result = 0;
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+	if (0 != smu_data->avfs.avfs_btc_param) {
+		if (0 != polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+			printk("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
+			result = -1;
+		}
+	}
+	if (smu_data->avfs.avfs_btc_param > 1) {
+		/* Soft-Reset to reset the engine before loading uCode */
+		/* halt */
+		cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000);
+		/* reset everything */
+		cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
+		cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0);
+	}
+	return result;
+}
+
+
+int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+{
+	uint32_t vr_config;
+	uint32_t dpm_table_start;
+
+	uint16_t u16_boot_mvdd;
+	uint32_t graphics_level_address, vr_config_address, graphics_level_size;
+
+	graphics_level_size = sizeof(avfs_graphics_level_polaris10);
+	u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE);
+
+	PP_ASSERT_WITH_CODE(0 == polaris10_read_smc_sram_dword(smumgr,
+				SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable),
+				&dpm_table_start, 0x40000),
+			"[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table",
+			return -1);
+
+	/*  Default value for VRConfig = VR_MERGED_WITH_VDDC + VR_STATIC_VOLTAGE(VDDCI) */
+	vr_config = 0x01000500; /* Real value:0x50001 */
+
+	vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig);
+
+	PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, vr_config_address,
+				(uint8_t *)&vr_config, sizeof(uint32_t), 0x40000),
+			"[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC",
+			return -1);
+
+	graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+
+	PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+				(uint8_t *)(&avfs_graphics_level_polaris10),
+				graphics_level_size, 0x40000),
+			"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!",
+			return -1);
+
+	graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
+
+	PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+				(uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000),
+				"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!",
+			return -1);
+
+	/* MVDD Boot value - neccessary for getting rid of the hang that occurs during Mclk DPM enablement */
+
+	graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd);
+
+	PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+			(uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000),
+			"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!",
+			return -1);
+
+	return 0;
+}
+
+int polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
+{
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+	switch (smu_data->avfs.avfs_btc_status) {
+	case AVFS_BTC_COMPLETED_PREVIOUSLY:
+		break;
+
+	case AVFS_BTC_BOOT: /* Cold Boot State - Post SMU Start */
+
+		smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED;
+		PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(smumgr),
+		"[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
+		return -1);
+
+		if (smu_data->avfs.avfs_btc_param > 1) {
+			printk("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
+			smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
+			PP_ASSERT_WITH_CODE(-1 == polaris10_setup_pwr_virus(smumgr),
+			"[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
+			return -1);
+		}
+
+		smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
+		PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(smumgr),
+					"[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
+				 return -1);
+
+		break;
+
+	case AVFS_BTC_DISABLED:
+	case AVFS_BTC_NOTSUPPORTED:
+		break;
+
+	default:
+		printk("[AVFS] Something is broken. See log!");
+		break;
+	}
+
+	return 0;
+}
+
+static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
+{
+	int result = 0;
+
+	/* Wait for smc boot up */
+	/* SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
+
+	/* Assert reset */
+	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+
+	result = polaris10_upload_smu_firmware_image(smumgr);
+	if (result != 0)
+		return result;
+
+	/* Clear status */
+	cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0);
+
+	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+
+	/* De-assert reset */
+	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+
+	SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
+
+
+	/* Call Test SMU message with 0x20000 offset to trigger SMU start */
+	polaris10_send_msg_to_smc_offset(smumgr);
+
+	/* Wait done bit to be set */
+	/* Check pass/failed indicator */
+
+	SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, SMU_STATUS, SMU_DONE, 0);
+
+	if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+						SMU_STATUS, SMU_PASS))
+		PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1);
+
+	cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
+
+	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+
+	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+	/* Wait for firmware to initialize */
+	SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
+
+	return result;
+}
+
+static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
+{
+	int result = 0;
+
+	/* wait for smc boot up */
+	SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0);
+
+	/* Clear firmware interrupt enable flag */
+	/* SMUM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
+	cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+				ixFIRMWARE_FLAGS, 0);
+
+	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_RESET_CNTL,
+					rst_reg, 1);
+
+	result = polaris10_upload_smu_firmware_image(smumgr);
+	if (result != 0)
+		return result;
+
+	/* Set smc instruct start point at 0x0 */
+	polaris10_program_jump_on_start(smumgr);
+
+	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+
+	SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+	/* Wait for firmware to initialize */
+
+	SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+					FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
+
+	return result;
+}
+
+static int polaris10_start_smu(struct pp_smumgr *smumgr)
+{
+	int result = 0;
+	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	bool SMU_VFT_INTACT;
+
+	/* Only start SMC if SMC RAM is not running */
+	if (!polaris10_is_smc_ram_running(smumgr)) {
+		SMU_VFT_INTACT = false;
+		smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
+		smu_data->security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
+
+		/* Check if SMU is running in protected mode */
+		if (smu_data->protected_mode == 0) {
+			result = polaris10_start_smu_in_non_protection_mode(smumgr);
+		} else {
+			result = polaris10_start_smu_in_protection_mode(smumgr);
+
+			/* If failed, try with different security Key. */
+			if (result != 0) {
+				smu_data->security_hard_key ^= 1;
+				result = polaris10_start_smu_in_protection_mode(smumgr);
+			}
+		}
+
+		if (result != 0)
+			PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
+
+		polaris10_avfs_event_mgr(smumgr, true);
+	} else
+		SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
+
+	smu_data->post_initial_boot = true;
+	polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT);
+	/* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
+	polaris10_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
+					&(smu_data->soft_regs_start), 0x40000);
+
+	result = polaris10_request_smu_load_fw(smumgr);
+
+	return result;
+}
+
+static int polaris10_smu_init(struct pp_smumgr *smumgr)
+{
+	struct polaris10_smumgr *smu_data;
+	uint8_t *internal_buf;
+	uint64_t mc_addr = 0;
+	/* Allocate memory for backend private data */
+	smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	smu_data->header_buffer.data_size =
+		((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
+	smu_data->smu_buffer.data_size = 200*4096;
+	smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
+/* Allocate FW image data structure and header buffer and
+ * send the header buffer address to SMU */
+	smu_allocate_memory(smumgr->device,
+		smu_data->header_buffer.data_size,
+		CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+		PAGE_SIZE,
+		&mc_addr,
+		&smu_data->header_buffer.kaddr,
+		&smu_data->header_buffer.handle);
+
+	smu_data->header = smu_data->header_buffer.kaddr;
+	smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
+	smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+
+	PP_ASSERT_WITH_CODE((NULL != smu_data->header),
+		"Out of memory.",
+		kfree(smumgr->backend);
+		cgs_free_gpu_mem(smumgr->device,
+		(cgs_handle_t)smu_data->header_buffer.handle);
+		return -1);
+
+/* Allocate buffer for SMU internal buffer and send the address to SMU.
+ * Iceland SMU does not need internal buffer.*/
+	smu_allocate_memory(smumgr->device,
+		smu_data->smu_buffer.data_size,
+		CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+		PAGE_SIZE,
+		&mc_addr,
+		&smu_data->smu_buffer.kaddr,
+		&smu_data->smu_buffer.handle);
+
+	internal_buf = smu_data->smu_buffer.kaddr;
+	smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
+	smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+
+	PP_ASSERT_WITH_CODE((NULL != internal_buf),
+		"Out of memory.",
+		kfree(smumgr->backend);
+		cgs_free_gpu_mem(smumgr->device,
+		(cgs_handle_t)smu_data->smu_buffer.handle);
+		return -1;);
+
+	return 0;
+}
+
+static const struct pp_smumgr_func ellsemere_smu_funcs = {
+	.smu_init = polaris10_smu_init,
+	.smu_fini = polaris10_smu_fini,
+	.start_smu = polaris10_start_smu,
+	.check_fw_load_finish = polaris10_check_fw_load_finish,
+	.request_smu_load_fw = polaris10_reload_firmware,
+	.request_smu_load_specific_fw = NULL,
+	.send_msg_to_smc = polaris10_send_msg_to_smc,
+	.send_msg_to_smc_with_parameter = polaris10_send_msg_to_smc_with_parameter,
+	.download_pptable_settings = NULL,
+	.upload_pptable_settings = NULL,
+};
+
+int polaris10_smum_init(struct pp_smumgr *smumgr)
+{
+	struct polaris10_smumgr *polaris10_smu = NULL;
+
+	polaris10_smu = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL);
+
+	if (polaris10_smu == NULL)
+		return -1;
+
+	smumgr->backend = polaris10_smu;
+	smumgr->smumgr_funcs = &ellsemere_smu_funcs;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
new file mode 100644
index 0000000..e5377ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _POLARIS10_SMUMANAGER_H
+#define _POLARIS10_SMUMANAGER_H
+
+#include <polaris10_ppsmc.h>
+#include <pp_endian.h>
+
+struct polaris10_avfs {
+	enum AVFS_BTC_STATUS avfs_btc_status;
+	uint32_t           avfs_btc_param;
+};
+
+struct polaris10_buffer_entry {
+	uint32_t data_size;
+	uint32_t mc_addr_low;
+	uint32_t mc_addr_high;
+	void *kaddr;
+	unsigned long  handle;
+};
+
+struct polaris10_smumgr {
+	uint8_t *header;
+	uint8_t *mec_image;
+	struct polaris10_buffer_entry smu_buffer;
+	struct polaris10_buffer_entry header_buffer;
+	uint32_t soft_regs_start;
+	uint8_t *read_rrm_straps;
+	uint32_t read_drm_straps_mc_address_high;
+	uint32_t read_drm_straps_mc_address_low;
+	uint32_t acpi_optimization;
+	bool post_initial_boot;
+	uint8_t protected_mode;
+	uint8_t security_hard_key;
+	struct polaris10_avfs  avfs;
+};
+
+
+int polaris10_smum_init(struct pp_smumgr *smumgr);
+
+int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit);
+int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit);
+int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+				const uint8_t *src, uint32_t byte_count, uint32_t limit);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 063ae71..0728c1e3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -30,6 +30,7 @@
 #include "cz_smumgr.h"
 #include "tonga_smumgr.h"
 #include "fiji_smumgr.h"
+#include "polaris10_smumgr.h"
 
 int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
 {
@@ -62,6 +63,10 @@
 		case CHIP_FIJI:
 			fiji_smum_init(smumgr);
 			break;
+		case CHIP_POLARIS11:
+		case CHIP_POLARIS10:
+			polaris10_smum_init(smumgr);
+			break;
 		default:
 			return -EINVAL;
 		}
@@ -76,6 +81,7 @@
 
 int smum_fini(struct pp_smumgr *smumgr)
 {
+	kfree(smumgr->device);
 	kfree(smumgr);
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index ebdb43a..b22722e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -145,7 +145,7 @@
 
 int tonga_program_jump_on_start(struct pp_smumgr *smumgr)
 {
-	static unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 };
+	static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 };
 
 	tonga_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1);
 
@@ -328,10 +328,17 @@
 
 static int tonga_smu_fini(struct pp_smumgr *smumgr)
 {
+	struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
+
+	smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
+	smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
 	if (smumgr->backend != NULL) {
 		kfree(smumgr->backend);
 		smumgr->backend = NULL;
 	}
+
+	cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index a5ff945..c16248c 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -319,6 +319,48 @@
 	return added;
 }
 
+static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
+	struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job);
+	schedule_work(&job->work_free_job);
+}
+
+/* job_finish is called after hw fence signaled, and
+ * the job had already been deleted from ring_mirror_list
+ */
+void amd_sched_job_finish(struct amd_sched_job *s_job)
+{
+	struct amd_sched_job *next;
+	struct amd_gpu_scheduler *sched = s_job->sched;
+
+	if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
+		if (cancel_delayed_work(&s_job->work_tdr))
+			amd_sched_job_put(s_job);
+
+		/* queue TDR for next job */
+		next = list_first_entry_or_null(&sched->ring_mirror_list,
+						struct amd_sched_job, node);
+
+		if (next) {
+			INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback);
+			amd_sched_job_get(next);
+			schedule_delayed_work(&next->work_tdr, sched->timeout);
+		}
+	}
+}
+
+void amd_sched_job_begin(struct amd_sched_job *s_job)
+{
+	struct amd_gpu_scheduler *sched = s_job->sched;
+
+	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+		list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job)
+	{
+		INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
+		amd_sched_job_get(s_job);
+		schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+	}
+}
+
 /**
  * Submit a job to the job queue
  *
@@ -330,11 +372,39 @@
 {
 	struct amd_sched_entity *entity = sched_job->s_entity;
 
+	sched_job->use_sched = 1;
+	fence_add_callback(&sched_job->s_fence->base,
+					&sched_job->cb_free_job, amd_sched_free_job);
 	trace_amd_sched_job(sched_job);
 	wait_event(entity->sched->job_scheduled,
 		   amd_sched_entity_in(sched_job));
 }
 
+/* init a sched_job with basic field */
+int amd_sched_job_init(struct amd_sched_job *job,
+						struct amd_gpu_scheduler *sched,
+						struct amd_sched_entity *entity,
+						void (*timeout_cb)(struct work_struct *work),
+						void (*free_cb)(struct kref *refcount),
+						void *owner, struct fence **fence)
+{
+	INIT_LIST_HEAD(&job->node);
+	kref_init(&job->refcount);
+	job->sched = sched;
+	job->s_entity = entity;
+	job->s_fence = amd_sched_fence_create(entity, owner);
+	if (!job->s_fence)
+		return -ENOMEM;
+
+	job->s_fence->s_job = job;
+	job->timeout_callback = timeout_cb;
+	job->free_callback = free_cb;
+
+	if (fence)
+		*fence = &job->s_fence->base;
+	return 0;
+}
+
 /**
  * Return ture if we can push more jobs to the hw.
  */
@@ -383,47 +453,26 @@
 	unsigned long flags;
 
 	atomic_dec(&sched->hw_rq_count);
+
+	/* remove job from ring_mirror_list */
+	spin_lock_irqsave(&sched->job_list_lock, flags);
+	list_del_init(&s_fence->s_job->node);
+	sched->ops->finish_job(s_fence->s_job);
+	spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
 	amd_sched_fence_signal(s_fence);
-	if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
-		cancel_delayed_work(&s_fence->dwork);
-		spin_lock_irqsave(&sched->fence_list_lock, flags);
-		list_del_init(&s_fence->list);
-		spin_unlock_irqrestore(&sched->fence_list_lock, flags);
-	}
+
 	trace_amd_sched_process_job(s_fence);
 	fence_put(&s_fence->base);
 	wake_up_interruptible(&sched->wake_up_worker);
 }
 
-static void amd_sched_fence_work_func(struct work_struct *work)
-{
-	struct amd_sched_fence *s_fence =
-		container_of(work, struct amd_sched_fence, dwork.work);
-	struct amd_gpu_scheduler *sched = s_fence->sched;
-	struct amd_sched_fence *entity, *tmp;
-	unsigned long flags;
-
-	DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
-
-	/* Clean all pending fences */
-	spin_lock_irqsave(&sched->fence_list_lock, flags);
-	list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
-		DRM_ERROR("  fence no %d\n", entity->base.seqno);
-		cancel_delayed_work(&entity->dwork);
-		list_del_init(&entity->list);
-		fence_put(&entity->base);
-	}
-	spin_unlock_irqrestore(&sched->fence_list_lock, flags);
-}
-
 static int amd_sched_main(void *param)
 {
 	struct sched_param sparam = {.sched_priority = 1};
 	struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
 	int r, count;
 
-	spin_lock_init(&sched->fence_list_lock);
-	INIT_LIST_HEAD(&sched->fence_list);
 	sched_setscheduler(current, SCHED_FIFO, &sparam);
 
 	while (!kthread_should_stop()) {
@@ -431,7 +480,6 @@
 		struct amd_sched_fence *s_fence;
 		struct amd_sched_job *sched_job;
 		struct fence *fence;
-		unsigned long flags;
 
 		wait_event_interruptible(sched->wake_up_worker,
 			(entity = amd_sched_select_entity(sched)) ||
@@ -446,15 +494,8 @@
 
 		s_fence = sched_job->s_fence;
 
-		if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
-			INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
-			schedule_delayed_work(&s_fence->dwork, sched->timeout);
-			spin_lock_irqsave(&sched->fence_list_lock, flags);
-			list_add_tail(&s_fence->list, &sched->fence_list);
-			spin_unlock_irqrestore(&sched->fence_list_lock, flags);
-		}
-
 		atomic_inc(&sched->hw_rq_count);
+		amd_sched_job_pre_schedule(sched, sched_job);
 		fence = sched->ops->run_job(sched_job);
 		amd_sched_fence_scheduled(s_fence);
 		if (fence) {
@@ -489,7 +530,7 @@
  * Return 0 on success, otherwise error code.
 */
 int amd_sched_init(struct amd_gpu_scheduler *sched,
-		   struct amd_sched_backend_ops *ops,
+		   const struct amd_sched_backend_ops *ops,
 		   unsigned hw_submission, long timeout, const char *name)
 {
 	int i;
@@ -502,6 +543,8 @@
 
 	init_waitqueue_head(&sched->wake_up_worker);
 	init_waitqueue_head(&sched->job_scheduled);
+	INIT_LIST_HEAD(&sched->ring_mirror_list);
+	spin_lock_init(&sched->job_list_lock);
 	atomic_set(&sched->hw_rq_count, 0);
 	if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
 		sched_fence_slab = kmem_cache_create(
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 9403145..070095a 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -37,7 +37,7 @@
 
 /**
  * A scheduler entity is a wrapper around a job queue or a group
- * of other entities. Entities take turns emitting jobs from their 
+ * of other entities. Entities take turns emitting jobs from their
  * job queues to corresponding hardware ring based on scheduling
  * policy.
 */
@@ -74,14 +74,21 @@
 	struct amd_gpu_scheduler	*sched;
 	spinlock_t			lock;
 	void                            *owner;
-	struct delayed_work		dwork;
-	struct list_head		list;
+	struct amd_sched_job	*s_job;
 };
 
 struct amd_sched_job {
+	struct kref refcount;
 	struct amd_gpu_scheduler        *sched;
 	struct amd_sched_entity         *s_entity;
 	struct amd_sched_fence          *s_fence;
+	bool	use_sched;	/* true if the job goes to scheduler */
+	struct fence_cb                cb_free_job;
+	struct work_struct             work_free_job;
+	struct list_head			   node;
+	struct delayed_work work_tdr;
+	void (*timeout_callback) (struct work_struct *work);
+	void (*free_callback)(struct kref *refcount);
 };
 
 extern const struct fence_ops amd_sched_fence_ops;
@@ -102,6 +109,8 @@
 struct amd_sched_backend_ops {
 	struct fence *(*dependency)(struct amd_sched_job *sched_job);
 	struct fence *(*run_job)(struct amd_sched_job *sched_job);
+	void (*begin_job)(struct amd_sched_job *sched_job);
+	void (*finish_job)(struct amd_sched_job *sched_job);
 };
 
 enum amd_sched_priority {
@@ -114,7 +123,7 @@
  * One scheduler is implemented for each hardware ring
 */
 struct amd_gpu_scheduler {
-	struct amd_sched_backend_ops	*ops;
+	const struct amd_sched_backend_ops	*ops;
 	uint32_t			hw_submission_limit;
 	long				timeout;
 	const char			*name;
@@ -122,13 +131,13 @@
 	wait_queue_head_t		wake_up_worker;
 	wait_queue_head_t		job_scheduled;
 	atomic_t			hw_rq_count;
-	struct list_head		fence_list;
-	spinlock_t			fence_list_lock;
 	struct task_struct		*thread;
+	struct list_head	ring_mirror_list;
+	spinlock_t			job_list_lock;
 };
 
 int amd_sched_init(struct amd_gpu_scheduler *sched,
-		   struct amd_sched_backend_ops *ops,
+		   const struct amd_sched_backend_ops *ops,
 		   uint32_t hw_submission, long timeout, const char *name);
 void amd_sched_fini(struct amd_gpu_scheduler *sched);
 
@@ -144,5 +153,24 @@
 	struct amd_sched_entity *s_entity, void *owner);
 void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
 void amd_sched_fence_signal(struct amd_sched_fence *fence);
+int amd_sched_job_init(struct amd_sched_job *job,
+					struct amd_gpu_scheduler *sched,
+					struct amd_sched_entity *entity,
+					void (*timeout_cb)(struct work_struct *work),
+					void (*free_cb)(struct kref* refcount),
+					void *owner, struct fence **fence);
+void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
+								struct amd_sched_job *s_job);
+void amd_sched_job_finish(struct amd_sched_job *s_job);
+void amd_sched_job_begin(struct amd_sched_job *s_job);
+static inline void amd_sched_job_get(struct amd_sched_job *job) {
+	if (job)
+		kref_get(&job->refcount);
+}
+
+static inline void amd_sched_job_put(struct amd_sched_job *job) {
+	if (job)
+		kref_put(&job->refcount, job->free_callback);
+}
 
 #endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index dc115ae..2a732c4 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -57,6 +57,16 @@
 		FENCE_TRACE(&fence->base, "was already signaled\n");
 }
 
+void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
+				struct amd_sched_job *s_job)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&sched->job_list_lock, flags);
+	list_add_tail(&s_job->node, &sched->ring_mirror_list);
+	sched->ops->begin_job(s_job);
+	spin_unlock_irqrestore(&sched->job_list_lock, flags);
+}
+
 void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
 {
 	struct fence_cb *cur, *tmp;
diff --git a/drivers/gpu/drm/arc/Kconfig b/drivers/gpu/drm/arc/Kconfig
new file mode 100644
index 0000000..f9a13b6
--- /dev/null
+++ b/drivers/gpu/drm/arc/Kconfig
@@ -0,0 +1,10 @@
+config DRM_ARCPGU
+	tristate "ARC PGU"
+	depends on DRM && OF
+	select DRM_KMS_CMA_HELPER
+	select DRM_KMS_FB_HELPER
+	select DRM_KMS_HELPER
+	help
+	  Choose this option if you have an ARC PGU controller.
+
+	  If M is selected the module will be called arcpgu.
diff --git a/drivers/gpu/drm/arc/Makefile b/drivers/gpu/drm/arc/Makefile
new file mode 100644
index 0000000..d48fda7
--- /dev/null
+++ b/drivers/gpu/drm/arc/Makefile
@@ -0,0 +1,2 @@
+arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_drv.o
+obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
diff --git a/drivers/gpu/drm/arc/arcpgu.h b/drivers/gpu/drm/arc/arcpgu.h
new file mode 100644
index 0000000..86574b6
--- /dev/null
+++ b/drivers/gpu/drm/arc/arcpgu.h
@@ -0,0 +1,50 @@
+/*
+ * ARC PGU DRM driver.
+ *
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCPGU_H_
+#define _ARCPGU_H_
+
+struct arcpgu_drm_private {
+	void __iomem		*regs;
+	struct clk		*clk;
+	struct drm_fbdev_cma	*fbdev;
+	struct drm_framebuffer	*fb;
+	struct list_head	event_list;
+	struct drm_crtc		crtc;
+	struct drm_plane	*plane;
+};
+
+#define crtc_to_arcpgu_priv(x) container_of(x, struct arcpgu_drm_private, crtc)
+
+static inline void arc_pgu_write(struct arcpgu_drm_private *arcpgu,
+				 unsigned int reg, u32 value)
+{
+	iowrite32(value, arcpgu->regs + reg);
+}
+
+static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu,
+			       unsigned int reg)
+{
+	return ioread32(arcpgu->regs + reg);
+}
+
+int arc_pgu_setup_crtc(struct drm_device *dev);
+int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np);
+struct drm_fbdev_cma *arcpgu_fbdev_cma_init(struct drm_device *dev,
+	unsigned int preferred_bpp, unsigned int num_crtc,
+	unsigned int max_conn_count);
+
+#endif
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
new file mode 100644
index 0000000..92f8bef
--- /dev/null
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -0,0 +1,257 @@
+/*
+ * ARC PGU DRM driver.
+ *
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <linux/clk.h>
+#include <linux/platform_data/simplefb.h>
+
+#include "arcpgu.h"
+#include "arcpgu_regs.h"
+
+#define ENCODE_PGU_XY(x, y)	((((x) - 1) << 16) | ((y) - 1))
+
+static struct simplefb_format supported_formats[] = {
+	{ "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 },
+	{ "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 },
+};
+
+static void arc_pgu_set_pxl_fmt(struct drm_crtc *crtc)
+{
+	struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
+	uint32_t pixel_format = crtc->primary->state->fb->pixel_format;
+	struct simplefb_format *format = NULL;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
+		if (supported_formats[i].fourcc == pixel_format)
+			format = &supported_formats[i];
+	}
+
+	if (WARN_ON(!format))
+		return;
+
+	if (format->fourcc == DRM_FORMAT_RGB888)
+		arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
+			      arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) |
+					   ARCPGU_MODE_RGB888_MASK);
+
+}
+
+static const struct drm_crtc_funcs arc_pgu_crtc_funcs = {
+	.destroy = drm_crtc_cleanup,
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
+	.reset = drm_atomic_helper_crtc_reset,
+	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+	struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
+	struct drm_display_mode *m = &crtc->state->adjusted_mode;
+	u32 val;
+
+	arc_pgu_write(arcpgu, ARCPGU_REG_FMT,
+		      ENCODE_PGU_XY(m->crtc_htotal, m->crtc_vtotal));
+
+	arc_pgu_write(arcpgu, ARCPGU_REG_HSYNC,
+		      ENCODE_PGU_XY(m->crtc_hsync_start - m->crtc_hdisplay,
+				    m->crtc_hsync_end - m->crtc_hdisplay));
+
+	arc_pgu_write(arcpgu, ARCPGU_REG_VSYNC,
+		      ENCODE_PGU_XY(m->crtc_vsync_start - m->crtc_vdisplay,
+				    m->crtc_vsync_end - m->crtc_vdisplay));
+
+	arc_pgu_write(arcpgu, ARCPGU_REG_ACTIVE,
+		      ENCODE_PGU_XY(m->crtc_hblank_end - m->crtc_hblank_start,
+				    m->crtc_vblank_end - m->crtc_vblank_start));
+
+	val = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL);
+
+	if (m->flags & DRM_MODE_FLAG_PVSYNC)
+		val |= ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST;
+	else
+		val &= ~(ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST);
+
+	if (m->flags & DRM_MODE_FLAG_PHSYNC)
+		val |= ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST;
+	else
+		val &= ~(ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST);
+
+	arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, val);
+	arc_pgu_write(arcpgu, ARCPGU_REG_STRIDE, 0);
+	arc_pgu_write(arcpgu, ARCPGU_REG_START_SET, 1);
+
+	arc_pgu_set_pxl_fmt(crtc);
+
+	clk_set_rate(arcpgu->clk, m->crtc_clock * 1000);
+}
+
+static void arc_pgu_crtc_enable(struct drm_crtc *crtc)
+{
+	struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
+
+	clk_prepare_enable(arcpgu->clk);
+	arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
+		      arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) |
+		      ARCPGU_CTRL_ENABLE_MASK);
+}
+
+static void arc_pgu_crtc_disable(struct drm_crtc *crtc)
+{
+	struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
+
+	if (!crtc->primary->fb)
+		return;
+
+	clk_disable_unprepare(arcpgu->clk);
+	arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
+			      arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) &
+			      ~ARCPGU_CTRL_ENABLE_MASK);
+}
+
+static int arc_pgu_crtc_atomic_check(struct drm_crtc *crtc,
+				     struct drm_crtc_state *state)
+{
+	struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
+	struct drm_display_mode *mode = &state->adjusted_mode;
+	long rate, clk_rate = mode->clock * 1000;
+
+	rate = clk_round_rate(arcpgu->clk, clk_rate);
+	if (rate != clk_rate)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
+				      struct drm_crtc_state *state)
+{
+	struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
+	unsigned long flags;
+
+	if (crtc->state->event) {
+		struct drm_pending_vblank_event *event = crtc->state->event;
+
+		crtc->state->event = NULL;
+		event->pipe = drm_crtc_index(crtc);
+
+		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+		list_add_tail(&event->base.link, &arcpgu->event_list);
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+	}
+}
+
+static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = {
+	.mode_set	= drm_helper_crtc_mode_set,
+	.mode_set_base	= drm_helper_crtc_mode_set_base,
+	.mode_set_nofb	= arc_pgu_crtc_mode_set_nofb,
+	.enable		= arc_pgu_crtc_enable,
+	.disable	= arc_pgu_crtc_disable,
+	.prepare	= arc_pgu_crtc_disable,
+	.commit		= arc_pgu_crtc_enable,
+	.atomic_check	= arc_pgu_crtc_atomic_check,
+	.atomic_begin	= arc_pgu_crtc_atomic_begin,
+};
+
+static void arc_pgu_plane_atomic_update(struct drm_plane *plane,
+					struct drm_plane_state *state)
+{
+	struct arcpgu_drm_private *arcpgu;
+	struct drm_gem_cma_object *gem;
+
+	if (!plane->state->crtc || !plane->state->fb)
+		return;
+
+	arcpgu = crtc_to_arcpgu_priv(plane->state->crtc);
+	gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
+	arc_pgu_write(arcpgu, ARCPGU_REG_BUF0_ADDR, gem->paddr);
+}
+
+static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
+	.prepare_fb = NULL,
+	.cleanup_fb = NULL,
+	.atomic_update = arc_pgu_plane_atomic_update,
+};
+
+static void arc_pgu_plane_destroy(struct drm_plane *plane)
+{
+	drm_plane_helper_disable(plane);
+	drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs arc_pgu_plane_funcs = {
+	.update_plane		= drm_atomic_helper_update_plane,
+	.disable_plane		= drm_atomic_helper_disable_plane,
+	.destroy		= arc_pgu_plane_destroy,
+	.reset			= drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
+};
+
+static struct drm_plane *arc_pgu_plane_init(struct drm_device *drm)
+{
+	struct arcpgu_drm_private *arcpgu = drm->dev_private;
+	struct drm_plane *plane = NULL;
+	u32 formats[ARRAY_SIZE(supported_formats)], i;
+	int ret;
+
+	plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
+	if (!plane)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < ARRAY_SIZE(supported_formats); i++)
+		formats[i] = supported_formats[i].fourcc;
+
+	ret = drm_universal_plane_init(drm, plane, 0xff, &arc_pgu_plane_funcs,
+				       formats, ARRAY_SIZE(formats),
+				       DRM_PLANE_TYPE_PRIMARY, NULL);
+	if (ret)
+		return ERR_PTR(ret);
+
+	drm_plane_helper_add(plane, &arc_pgu_plane_helper_funcs);
+	arcpgu->plane = plane;
+
+	return plane;
+}
+
+int arc_pgu_setup_crtc(struct drm_device *drm)
+{
+	struct arcpgu_drm_private *arcpgu = drm->dev_private;
+	struct drm_plane *primary;
+	int ret;
+
+	primary = arc_pgu_plane_init(drm);
+	if (IS_ERR(primary))
+		return PTR_ERR(primary);
+
+	ret = drm_crtc_init_with_planes(drm, &arcpgu->crtc, primary, NULL,
+					&arc_pgu_crtc_funcs, NULL);
+	if (ret) {
+		arc_pgu_plane_destroy(primary);
+		return ret;
+	}
+
+	drm_crtc_helper_add(&arcpgu->crtc, &arc_pgu_crtc_helper_funcs);
+	return 0;
+}
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
new file mode 100644
index 0000000..76e187a
--- /dev/null
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -0,0 +1,288 @@
+/*
+ * ARC PGU DRM driver.
+ *
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <linux/of_reserved_mem.h>
+
+#include "arcpgu.h"
+#include "arcpgu_regs.h"
+
+static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
+{
+	struct arcpgu_drm_private *arcpgu = dev->dev_private;
+
+	if (arcpgu->fbdev)
+		drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
+}
+
+static int arcpgu_atomic_commit(struct drm_device *dev,
+				    struct drm_atomic_state *state, bool async)
+{
+	return drm_atomic_helper_commit(dev, state, false);
+}
+
+static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
+	.fb_create  = drm_fb_cma_create,
+	.output_poll_changed = arcpgu_fb_output_poll_changed,
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = arcpgu_atomic_commit,
+};
+
+static void arcpgu_setup_mode_config(struct drm_device *drm)
+{
+	drm_mode_config_init(drm);
+	drm->mode_config.min_width = 0;
+	drm->mode_config.min_height = 0;
+	drm->mode_config.max_width = 1920;
+	drm->mode_config.max_height = 1080;
+	drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
+}
+
+int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	int ret;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret)
+		return ret;
+
+	vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+	return 0;
+}
+
+static const struct file_operations arcpgu_drm_ops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.poll = drm_poll,
+	.read = drm_read,
+	.llseek = no_llseek,
+	.mmap = arcpgu_gem_mmap,
+};
+
+static void arcpgu_preclose(struct drm_device *drm, struct drm_file *file)
+{
+	struct arcpgu_drm_private *arcpgu = drm->dev_private;
+	struct drm_pending_vblank_event *e, *t;
+	unsigned long flags;
+
+	spin_lock_irqsave(&drm->event_lock, flags);
+	list_for_each_entry_safe(e, t, &arcpgu->event_list, base.link) {
+		if (e->base.file_priv != file)
+			continue;
+		list_del(&e->base.link);
+		e->base.destroy(&e->base);
+	}
+	spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+static void arcpgu_lastclose(struct drm_device *drm)
+{
+	struct arcpgu_drm_private *arcpgu = drm->dev_private;
+
+	drm_fbdev_cma_restore_mode(arcpgu->fbdev);
+}
+
+static int arcpgu_load(struct drm_device *drm)
+{
+	struct platform_device *pdev = to_platform_device(drm->dev);
+	struct arcpgu_drm_private *arcpgu;
+	struct device_node *encoder_node;
+	struct resource *res;
+	int ret;
+
+	arcpgu = devm_kzalloc(&pdev->dev, sizeof(*arcpgu), GFP_KERNEL);
+	if (arcpgu == NULL)
+		return -ENOMEM;
+
+	drm->dev_private = arcpgu;
+
+	arcpgu->clk = devm_clk_get(drm->dev, "pxlclk");
+	if (IS_ERR(arcpgu->clk))
+		return PTR_ERR(arcpgu->clk);
+
+	INIT_LIST_HEAD(&arcpgu->event_list);
+
+	arcpgu_setup_mode_config(drm);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	arcpgu->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(arcpgu->regs)) {
+		dev_err(drm->dev, "Could not remap IO mem\n");
+		return PTR_ERR(arcpgu->regs);
+	}
+
+	dev_info(drm->dev, "arc_pgu ID: 0x%x\n",
+		 arc_pgu_read(arcpgu, ARCPGU_REG_ID));
+
+	/* Get the optional framebuffer memory resource */
+	ret = of_reserved_mem_device_init(drm->dev);
+	if (ret && ret != -ENODEV)
+		return ret;
+
+	if (dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)))
+		return -ENODEV;
+
+	if (arc_pgu_setup_crtc(drm) < 0)
+		return -ENODEV;
+
+	/* find the encoder node and initialize it */
+	encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0);
+	if (!encoder_node) {
+		dev_err(drm->dev, "failed to get an encoder slave node\n");
+		return -ENODEV;
+	}
+
+	ret = arcpgu_drm_hdmi_init(drm, encoder_node);
+	if (ret < 0)
+		return ret;
+
+	drm_mode_config_reset(drm);
+	drm_kms_helper_poll_init(drm);
+
+	arcpgu->fbdev = drm_fbdev_cma_init(drm, 16,
+					      drm->mode_config.num_crtc,
+					      drm->mode_config.num_connector);
+	if (IS_ERR(arcpgu->fbdev)) {
+		ret = PTR_ERR(arcpgu->fbdev);
+		arcpgu->fbdev = NULL;
+		return -ENODEV;
+	}
+
+	platform_set_drvdata(pdev, arcpgu);
+	return 0;
+}
+
+int arcpgu_unload(struct drm_device *drm)
+{
+	struct arcpgu_drm_private *arcpgu = drm->dev_private;
+
+	if (arcpgu->fbdev) {
+		drm_fbdev_cma_fini(arcpgu->fbdev);
+		arcpgu->fbdev = NULL;
+	}
+	drm_kms_helper_poll_fini(drm);
+	drm_vblank_cleanup(drm);
+	drm_mode_config_cleanup(drm);
+
+	return 0;
+}
+
+static struct drm_driver arcpgu_drm_driver = {
+	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+			   DRIVER_ATOMIC,
+	.preclose = arcpgu_preclose,
+	.lastclose = arcpgu_lastclose,
+	.name = "drm-arcpgu",
+	.desc = "ARC PGU Controller",
+	.date = "20160219",
+	.major = 1,
+	.minor = 0,
+	.patchlevel = 0,
+	.fops = &arcpgu_drm_ops,
+	.dumb_create = drm_gem_cma_dumb_create,
+	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
+	.dumb_destroy = drm_gem_dumb_destroy,
+	.get_vblank_counter = drm_vblank_no_hw_counter,
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_free_object = drm_gem_cma_free_object,
+	.gem_vm_ops = &drm_gem_cma_vm_ops,
+	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_import = drm_gem_prime_import,
+	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+	.gem_prime_vmap = drm_gem_cma_prime_vmap,
+	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+	.gem_prime_mmap = drm_gem_cma_prime_mmap,
+};
+
+static int arcpgu_probe(struct platform_device *pdev)
+{
+	struct drm_device *drm;
+	int ret;
+
+	drm = drm_dev_alloc(&arcpgu_drm_driver, &pdev->dev);
+	if (!drm)
+		return -ENOMEM;
+
+	ret = arcpgu_load(drm);
+	if (ret)
+		goto err_unref;
+
+	ret = drm_dev_register(drm, 0);
+	if (ret)
+		goto err_unload;
+
+	ret = drm_connector_register_all(drm);
+	if (ret)
+		goto err_unregister;
+
+	return 0;
+
+err_unregister:
+	drm_dev_unregister(drm);
+
+err_unload:
+	arcpgu_unload(drm);
+
+err_unref:
+	drm_dev_unref(drm);
+
+	return ret;
+}
+
+static int arcpgu_remove(struct platform_device *pdev)
+{
+	struct drm_device *drm = platform_get_drvdata(pdev);
+
+	drm_connector_unregister_all(drm);
+	drm_dev_unregister(drm);
+	arcpgu_unload(drm);
+	drm_dev_unref(drm);
+
+	return 0;
+}
+
+static const struct of_device_id arcpgu_of_table[] = {
+	{.compatible = "snps,arcpgu"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, arcpgu_of_table);
+
+static struct platform_driver arcpgu_platform_driver = {
+	.probe = arcpgu_probe,
+	.remove = arcpgu_remove,
+	.driver = {
+		   .name = "arcpgu",
+		   .of_match_table = arcpgu_of_table,
+		   },
+};
+
+module_platform_driver(arcpgu_platform_driver);
+
+MODULE_AUTHOR("Carlos Palminha <palminha@synopsys.com>");
+MODULE_DESCRIPTION("ARC PGU DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
new file mode 100644
index 0000000..08b6bae
--- /dev/null
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -0,0 +1,201 @@
+/*
+ * ARC PGU DRM driver.
+ *
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_atomic_helper.h>
+
+#include "arcpgu.h"
+
+struct arcpgu_drm_connector {
+	struct drm_connector connector;
+	struct drm_encoder_slave *encoder_slave;
+};
+
+static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
+{
+	const struct drm_encoder_slave_funcs *sfuncs;
+	struct drm_encoder_slave *slave;
+	struct arcpgu_drm_connector *con =
+		container_of(connector, struct arcpgu_drm_connector, connector);
+
+	slave = con->encoder_slave;
+	if (slave == NULL) {
+		dev_err(connector->dev->dev,
+			"connector_get_modes: cannot find slave encoder for connector\n");
+		return 0;
+	}
+
+	sfuncs = slave->slave_funcs;
+	if (sfuncs->get_modes == NULL)
+		return 0;
+
+	return sfuncs->get_modes(&slave->base, connector);
+}
+
+struct drm_encoder *
+arcpgu_drm_connector_best_encoder(struct drm_connector *connector)
+{
+	struct drm_encoder_slave *slave;
+	struct arcpgu_drm_connector *con =
+		container_of(connector, struct arcpgu_drm_connector, connector);
+
+	slave = con->encoder_slave;
+	if (slave == NULL) {
+		dev_err(connector->dev->dev,
+			"connector_best_encoder: cannot find slave encoder for connector\n");
+		return NULL;
+	}
+
+	return &slave->base;
+}
+
+static enum drm_connector_status
+arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+	enum drm_connector_status status = connector_status_unknown;
+	const struct drm_encoder_slave_funcs *sfuncs;
+	struct drm_encoder_slave *slave;
+
+	struct arcpgu_drm_connector *con =
+		container_of(connector, struct arcpgu_drm_connector, connector);
+
+	slave = con->encoder_slave;
+	if (slave == NULL) {
+		dev_err(connector->dev->dev,
+			"connector_detect: cannot find slave encoder for connector\n");
+		return status;
+	}
+
+	sfuncs = slave->slave_funcs;
+	if (sfuncs && sfuncs->detect)
+		return sfuncs->detect(&slave->base, connector);
+
+	dev_err(connector->dev->dev, "connector_detect: could not detect slave funcs\n");
+	return status;
+}
+
+static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
+{
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_helper_funcs
+arcpgu_drm_connector_helper_funcs = {
+	.get_modes = arcpgu_drm_connector_get_modes,
+	.best_encoder = arcpgu_drm_connector_best_encoder,
+};
+
+static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.reset = drm_atomic_helper_connector_reset,
+	.detect = arcpgu_drm_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = arcpgu_drm_connector_destroy,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static struct drm_encoder_helper_funcs arcpgu_drm_encoder_helper_funcs = {
+	.dpms = drm_i2c_encoder_dpms,
+	.mode_fixup = drm_i2c_encoder_mode_fixup,
+	.mode_set = drm_i2c_encoder_mode_set,
+	.prepare = drm_i2c_encoder_prepare,
+	.commit = drm_i2c_encoder_commit,
+	.detect = drm_i2c_encoder_detect,
+};
+
+static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
+{
+	struct arcpgu_drm_connector *arcpgu_connector;
+	struct drm_i2c_encoder_driver *driver;
+	struct drm_encoder_slave *encoder;
+	struct drm_connector *connector;
+	struct i2c_client *i2c_slave;
+	int ret;
+
+	encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
+	if (encoder == NULL)
+		return -ENOMEM;
+
+	i2c_slave = of_find_i2c_device_by_node(np);
+	if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) {
+		dev_err(drm->dev, "failed to find i2c slave encoder\n");
+		return -EPROBE_DEFER;
+	}
+
+	if (i2c_slave->dev.driver == NULL) {
+		dev_err(drm->dev, "failed to find i2c slave driver\n");
+		return -EPROBE_DEFER;
+	}
+
+	driver =
+	    to_drm_i2c_encoder_driver(to_i2c_driver(i2c_slave->dev.driver));
+	ret = driver->encoder_init(i2c_slave, drm, encoder);
+	if (ret) {
+		dev_err(drm->dev, "failed to initialize i2c encoder slave\n");
+		return ret;
+	}
+
+	encoder->base.possible_crtcs = 1;
+	encoder->base.possible_clones = 0;
+	ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs,
+			       DRM_MODE_ENCODER_TMDS, NULL);
+	if (ret)
+		return ret;
+
+	drm_encoder_helper_add(&encoder->base,
+			       &arcpgu_drm_encoder_helper_funcs);
+
+	arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector),
+					GFP_KERNEL);
+	if (!arcpgu_connector) {
+		ret = -ENOMEM;
+		goto error_encoder_cleanup;
+	}
+
+	connector = &arcpgu_connector->connector;
+	drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
+	ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
+			DRM_MODE_CONNECTOR_HDMIA);
+	if (ret < 0) {
+		dev_err(drm->dev, "failed to initialize drm connector\n");
+		goto error_encoder_cleanup;
+	}
+
+	ret = drm_mode_connector_attach_encoder(connector, &encoder->base);
+	if (ret < 0) {
+		dev_err(drm->dev, "could not attach connector to encoder\n");
+		drm_connector_unregister(connector);
+		goto error_connector_cleanup;
+	}
+
+	arcpgu_connector->encoder_slave = encoder;
+
+	return 0;
+
+error_connector_cleanup:
+	drm_connector_cleanup(connector);
+
+error_encoder_cleanup:
+	drm_encoder_cleanup(&encoder->base);
+	return ret;
+}
diff --git a/drivers/gpu/drm/arc/arcpgu_regs.h b/drivers/gpu/drm/arc/arcpgu_regs.h
new file mode 100644
index 0000000..95a13a8
--- /dev/null
+++ b/drivers/gpu/drm/arc/arcpgu_regs.h
@@ -0,0 +1,40 @@
+/*
+ * ARC PGU DRM driver.
+ *
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARC_PGU_REGS_H_
+#define _ARC_PGU_REGS_H_
+
+#define ARCPGU_REG_CTRL		0x00
+#define ARCPGU_REG_STAT		0x04
+#define ARCPGU_REG_FMT		0x10
+#define ARCPGU_REG_HSYNC	0x14
+#define ARCPGU_REG_VSYNC	0x18
+#define ARCPGU_REG_ACTIVE	0x1c
+#define ARCPGU_REG_BUF0_ADDR	0x40
+#define ARCPGU_REG_STRIDE	0x50
+#define ARCPGU_REG_START_SET	0x84
+
+#define ARCPGU_REG_ID		0x3FC
+
+#define ARCPGU_CTRL_ENABLE_MASK	0x02
+#define ARCPGU_CTRL_VS_POL_MASK	0x1
+#define ARCPGU_CTRL_VS_POL_OFST	0x3
+#define ARCPGU_CTRL_HS_POL_MASK	0x1
+#define ARCPGU_CTRL_HS_POL_OFST	0x4
+#define ARCPGU_MODE_RGB888_MASK	0x04
+#define ARCPGU_STAT_BUSY_MASK	0x02
+
+#endif
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index fef1b04..0813c2f 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -33,8 +33,17 @@
  *
  */
 
+static void hdlcd_crtc_cleanup(struct drm_crtc *crtc)
+{
+	struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+
+	/* stop the controller on cleanup */
+	hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
+	drm_crtc_cleanup(crtc);
+}
+
 static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
-	.destroy = drm_crtc_cleanup,
+	.destroy = hdlcd_crtc_cleanup,
 	.set_config = drm_atomic_helper_set_config,
 	.page_flip = drm_atomic_helper_page_flip,
 	.reset = drm_atomic_helper_crtc_reset,
@@ -97,7 +106,7 @@
 	struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
 	struct drm_display_mode *m = &crtc->state->adjusted_mode;
 	struct videomode vm;
-	unsigned int polarities, line_length, err;
+	unsigned int polarities, err;
 
 	vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay;
 	vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end;
@@ -113,23 +122,18 @@
 	if (m->flags & DRM_MODE_FLAG_PVSYNC)
 		polarities |= HDLCD_POLARITY_VSYNC;
 
-	line_length = crtc->primary->state->fb->pitches[0];
-
 	/* Allow max number of outstanding requests and largest burst size */
 	hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS,
 		    HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16);
 
-	hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length);
-	hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length);
-	hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1);
 	hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1);
 	hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1);
 	hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1);
 	hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1);
+	hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
 	hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1);
 	hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1);
 	hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1);
-	hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
 	hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities);
 
 	err = hdlcd_set_pxl_fmt(crtc);
@@ -144,20 +148,19 @@
 	struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
 
 	clk_prepare_enable(hdlcd->clk);
+	hdlcd_crtc_mode_set_nofb(crtc);
 	hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
-	drm_crtc_vblank_on(crtc);
 }
 
 static void hdlcd_crtc_disable(struct drm_crtc *crtc)
 {
 	struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
 
-	if (!crtc->primary->fb)
+	if (!crtc->state->active)
 		return;
 
-	clk_disable_unprepare(hdlcd->clk);
 	hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
-	drm_crtc_vblank_off(crtc);
+	clk_disable_unprepare(hdlcd->clk);
 }
 
 static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
@@ -179,20 +182,17 @@
 static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
 				    struct drm_crtc_state *state)
 {
-	struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
-	unsigned long flags;
+	struct drm_pending_vblank_event *event = crtc->state->event;
 
-	if (crtc->state->event) {
-		struct drm_pending_vblank_event *event = crtc->state->event;
-
+	if (event) {
 		crtc->state->event = NULL;
-		event->pipe = drm_crtc_index(crtc);
 
-		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
-
-		spin_lock_irqsave(&crtc->dev->event_lock, flags);
-		list_add_tail(&event->base.link, &hdlcd->event_list);
-		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+		spin_lock_irq(&crtc->dev->event_lock);
+		if (drm_crtc_vblank_get(crtc) == 0)
+			drm_crtc_arm_vblank_event(crtc, event);
+		else
+			drm_crtc_send_vblank_event(crtc, event);
+		spin_unlock_irq(&crtc->dev->event_lock);
 	}
 }
 
@@ -225,6 +225,15 @@
 static int hdlcd_plane_atomic_check(struct drm_plane *plane,
 				    struct drm_plane_state *state)
 {
+	u32 src_w, src_h;
+
+	src_w = state->src_w >> 16;
+	src_h = state->src_h >> 16;
+
+	/* we can't do any scaling of the plane source */
+	if ((src_w != state->crtc_w) || (src_h != state->crtc_h))
+		return -EINVAL;
+
 	return 0;
 }
 
@@ -233,20 +242,31 @@
 {
 	struct hdlcd_drm_private *hdlcd;
 	struct drm_gem_cma_object *gem;
+	unsigned int depth, bpp;
+	u32 src_w, src_h, dest_w, dest_h;
 	dma_addr_t scanout_start;
 
-	if (!plane->state->crtc || !plane->state->fb)
+	if (!plane->state->fb)
 		return;
 
-	hdlcd = crtc_to_hdlcd_priv(plane->state->crtc);
+	drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp);
+	src_w = plane->state->src_w >> 16;
+	src_h = plane->state->src_h >> 16;
+	dest_w = plane->state->crtc_w;
+	dest_h = plane->state->crtc_h;
 	gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
-	scanout_start = gem->paddr;
+	scanout_start = gem->paddr + plane->state->fb->offsets[0] +
+		plane->state->crtc_y * plane->state->fb->pitches[0] +
+		plane->state->crtc_x * bpp / 8;
+
+	hdlcd = plane->dev->dev_private;
+	hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]);
+	hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, plane->state->fb->pitches[0]);
+	hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1);
 	hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start);
 }
 
 static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
-	.prepare_fb = NULL,
-	.cleanup_fb = NULL,
 	.atomic_check = hdlcd_plane_atomic_check,
 	.atomic_update = hdlcd_plane_atomic_update,
 };
@@ -294,16 +314,6 @@
 	return plane;
 }
 
-void hdlcd_crtc_suspend(struct drm_crtc *crtc)
-{
-	hdlcd_crtc_disable(crtc);
-}
-
-void hdlcd_crtc_resume(struct drm_crtc *crtc)
-{
-	hdlcd_crtc_enable(crtc);
-}
-
 int hdlcd_setup_crtc(struct drm_device *drm)
 {
 	struct hdlcd_drm_private *hdlcd = drm->dev_private;
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 3ac1ae4..a6ca36f 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -49,8 +49,6 @@
 	atomic_set(&hdlcd->dma_end_count, 0);
 #endif
 
-	INIT_LIST_HEAD(&hdlcd->event_list);
-
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	hdlcd->mmio = devm_ioremap_resource(drm->dev, res);
 	if (IS_ERR(hdlcd->mmio)) {
@@ -84,11 +82,7 @@
 		goto setup_fail;
 	}
 
-	pm_runtime_enable(drm->dev);
-
-	pm_runtime_get_sync(drm->dev);
 	ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
-	pm_runtime_put_sync(drm->dev);
 	if (ret < 0) {
 		DRM_ERROR("failed to install IRQ handler\n");
 		goto irq_fail;
@@ -113,7 +107,7 @@
 }
 
 static int hdlcd_atomic_commit(struct drm_device *dev,
-			       struct drm_atomic_state *state, bool async)
+			       struct drm_atomic_state *state, bool nonblock)
 {
 	return drm_atomic_helper_commit(dev, state, false);
 }
@@ -164,24 +158,9 @@
 		atomic_inc(&hdlcd->vsync_count);
 
 #endif
-	if (irq_status & HDLCD_INTERRUPT_VSYNC) {
-		bool events_sent = false;
-		unsigned long flags;
-		struct drm_pending_vblank_event	*e, *t;
-
+	if (irq_status & HDLCD_INTERRUPT_VSYNC)
 		drm_crtc_handle_vblank(&hdlcd->crtc);
 
-		spin_lock_irqsave(&drm->event_lock, flags);
-		list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) {
-			list_del(&e->base.link);
-			drm_crtc_send_vblank_event(&hdlcd->crtc, e);
-			events_sent = true;
-		}
-		if (events_sent)
-			drm_crtc_vblank_put(&hdlcd->crtc);
-		spin_unlock_irqrestore(&drm->event_lock, flags);
-	}
-
 	/* acknowledge interrupt(s) */
 	hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status);
 
@@ -275,6 +254,7 @@
 static struct drm_info_list hdlcd_debugfs_list[] = {
 	{ "interrupt_count", hdlcd_show_underrun_count, 0 },
 	{ "clocks", hdlcd_show_pxlclock, 0 },
+	{ "fb", drm_fb_cma_debugfs_show, 0 },
 };
 
 static int hdlcd_debugfs_init(struct drm_minor *minor)
@@ -357,6 +337,8 @@
 		return -ENOMEM;
 
 	drm->dev_private = hdlcd;
+	dev_set_drvdata(dev, drm);
+
 	hdlcd_setup_mode_config(drm);
 	ret = hdlcd_load(drm, 0);
 	if (ret)
@@ -366,20 +348,23 @@
 	if (ret)
 		goto err_unload;
 
-	dev_set_drvdata(dev, drm);
-
 	ret = component_bind_all(dev, drm);
 	if (ret) {
 		DRM_ERROR("Failed to bind all components\n");
 		goto err_unregister;
 	}
 
+	ret = pm_runtime_set_active(dev);
+	if (ret)
+		goto err_pm_active;
+
+	pm_runtime_enable(dev);
+
 	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
 	if (ret < 0) {
 		DRM_ERROR("failed to initialise vblank\n");
 		goto err_vblank;
 	}
-	drm->vblank_disable_allowed = true;
 
 	drm_mode_config_reset(drm);
 	drm_kms_helper_poll_init(drm);
@@ -400,16 +385,16 @@
 	drm_mode_config_cleanup(drm);
 	drm_vblank_cleanup(drm);
 err_vblank:
+	pm_runtime_disable(drm->dev);
+err_pm_active:
 	component_unbind_all(dev, drm);
 err_unregister:
 	drm_dev_unregister(drm);
 err_unload:
-	pm_runtime_get_sync(drm->dev);
 	drm_irq_uninstall(drm);
-	pm_runtime_put_sync(drm->dev);
-	pm_runtime_disable(drm->dev);
 	of_reserved_mem_device_release(drm->dev);
 err_free:
+	dev_set_drvdata(dev, NULL);
 	drm_dev_unref(drm);
 
 	return ret;
@@ -496,30 +481,34 @@
 static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
 {
 	struct drm_device *drm = dev_get_drvdata(dev);
-	struct drm_crtc *crtc;
+	struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
 
-	if (pm_runtime_suspended(dev))
+	if (!hdlcd)
 		return 0;
 
-	drm_modeset_lock_all(drm);
-	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
-		hdlcd_crtc_suspend(crtc);
-	drm_modeset_unlock_all(drm);
+	drm_kms_helper_poll_disable(drm);
+
+	hdlcd->state = drm_atomic_helper_suspend(drm);
+	if (IS_ERR(hdlcd->state)) {
+		drm_kms_helper_poll_enable(drm);
+		return PTR_ERR(hdlcd->state);
+	}
+
 	return 0;
 }
 
 static int __maybe_unused hdlcd_pm_resume(struct device *dev)
 {
 	struct drm_device *drm = dev_get_drvdata(dev);
-	struct drm_crtc *crtc;
+	struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
 
-	if (!pm_runtime_suspended(dev))
+	if (!hdlcd)
 		return 0;
 
-	drm_modeset_lock_all(drm);
-	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
-		hdlcd_crtc_resume(crtc);
-	drm_modeset_unlock_all(drm);
+	drm_atomic_helper_resume(drm, hdlcd->state);
+	drm_kms_helper_poll_enable(drm);
+	pm_runtime_set_active(dev);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h
index aa23478..e3950a0 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.h
+++ b/drivers/gpu/drm/arm/hdlcd_drv.h
@@ -9,10 +9,9 @@
 	void __iomem			*mmio;
 	struct clk			*clk;
 	struct drm_fbdev_cma		*fbdev;
-	struct drm_framebuffer		*fb;
-	struct list_head		event_list;
 	struct drm_crtc			crtc;
 	struct drm_plane		*plane;
+	struct drm_atomic_state		*state;
 #ifdef CONFIG_DEBUG_FS
 	atomic_t buffer_underrun_count;
 	atomic_t bus_error_count;
@@ -36,7 +35,5 @@
 
 int hdlcd_setup_crtc(struct drm_device *dev);
 void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd);
-void hdlcd_crtc_suspend(struct drm_crtc *crtc);
-void hdlcd_crtc_resume(struct drm_crtc *crtc);
 
 #endif /* __HDLCD_DRV_H__ */
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 0293eb7..3130aa8 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -897,7 +897,6 @@
 static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
 	struct drm_file *file, uint32_t handle, uint32_t w, uint32_t h)
 {
-	struct drm_device *dev = crtc->dev;
 	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
 	struct armada_gem_object *obj = NULL;
 	int ret;
@@ -911,7 +910,7 @@
 		if (w > 64 || h > 64 || (w > 32 && h > 32))
 			return -ENOMEM;
 
-		obj = armada_gem_object_lookup(dev, file, handle);
+		obj = armada_gem_object_lookup(file, handle);
 		if (!obj)
 			return -ENOENT;
 
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 82043c2..439824a 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -113,7 +113,6 @@
 		goto err_comp;
 
 	dev->irq_enabled = true;
-	dev->vblank_disable_allowed = 1;
 
 	ret = armada_fbdev_init(dev);
 	if (ret)
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index 5fa4bf2..f03c212 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -120,7 +120,7 @@
 		goto err;
 	}
 
-	obj = armada_gem_object_lookup(dev, dfile, mode->handles[0]);
+	obj = armada_gem_object_lookup(dfile, mode->handles[0]);
 	if (!obj) {
 		ret = -ENOENT;
 		goto err;
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index aca7f9c..88e7fc7 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -278,7 +278,7 @@
 	struct armada_gem_object *obj;
 	int ret = 0;
 
-	obj = armada_gem_object_lookup(dev, file, handle);
+	obj = armada_gem_object_lookup(file, handle);
 	if (!obj) {
 		DRM_ERROR("failed to lookup gem object\n");
 		return -EINVAL;
@@ -348,7 +348,7 @@
 	struct armada_gem_object *dobj;
 	unsigned long addr;
 
-	dobj = armada_gem_object_lookup(dev, file, args->handle);
+	dobj = armada_gem_object_lookup(file, args->handle);
 	if (dobj == NULL)
 		return -ENOENT;
 
@@ -391,7 +391,7 @@
 	if (ret)
 		return ret;
 
-	dobj = armada_gem_object_lookup(dev, file, args->handle);
+	dobj = armada_gem_object_lookup(file, args->handle);
 	if (dobj == NULL)
 		return -ENOENT;
 
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
index b000ea3..b88d2b9 100644
--- a/drivers/gpu/drm/armada/armada_gem.h
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -45,9 +45,9 @@
 int armada_gem_map_import(struct armada_gem_object *);
 
 static inline struct armada_gem_object *armada_gem_object_lookup(
-	struct drm_device *dev, struct drm_file *dfile, unsigned handle)
+	struct drm_file *dfile, unsigned handle)
 {
-	struct drm_gem_object *obj = drm_gem_object_lookup(dev, dfile, handle);
+	struct drm_gem_object *obj = drm_gem_object_lookup(dfile, handle);
 
 	return obj ? drm_to_armada_gem(obj) : NULL;
 }
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 9a32d9d..fcd9c07 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -218,10 +218,8 @@
 
 static int __init ast_init(void)
 {
-#ifdef CONFIG_VGA_CONSOLE
 	if (vgacon_text_force() && ast_modeset == -1)
 		return -EINVAL;
-#endif
 
 	if (ast_modeset == 0)
 		return -EINVAL;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index eb57159..908011d 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -367,7 +367,7 @@
 {
 	int ret;
 
-	ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
+	ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
 	if (ret) {
 		if (ret != -ERESTARTSYS && ret != -EBUSY)
 			DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index b1480ac..7bc3aa6 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -333,7 +333,7 @@
 	struct ast_framebuffer *ast_fb;
 	int ret;
 
-	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+	obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
 	if (obj == NULL)
 		return ERR_PTR(-ENOENT);
 
@@ -574,7 +574,7 @@
 	struct drm_gem_object *obj;
 	struct ast_bo *bo;
 
-	obj = drm_gem_object_lookup(dev, file, handle);
+	obj = drm_gem_object_lookup(file, handle);
 	if (obj == NULL)
 		return -ENOENT;
 
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index a965e7e..c337922 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -1141,7 +1141,7 @@
 	if (width > AST_MAX_HWC_WIDTH || height > AST_MAX_HWC_HEIGHT)
 		return -EINVAL;
 
-	obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+	obj = drm_gem_object_lookup(file_priv, handle);
 	if (!obj) {
 		DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
 		return -ENOENT;
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 08f82ea..59f2f93 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -245,6 +245,8 @@
 	.verify_access = ast_bo_verify_access,
 	.io_mem_reserve = &ast_ttm_io_mem_reserve,
 	.io_mem_free = &ast_ttm_io_mem_free,
+	.lru_tail = &ttm_bo_default_lru_tail,
+	.swap_lru_tail = &ttm_bo_default_swap_lru_tail,
 };
 
 int ast_mm_init(struct ast_private *ast)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 58c4f78..bd12231 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -32,6 +32,23 @@
 #include "atmel_hlcdc_dc.h"
 
 /**
+ * Atmel HLCDC CRTC state structure
+ *
+ * @base: base CRTC state
+ * @output_mode: RGBXXX output mode
+ */
+struct atmel_hlcdc_crtc_state {
+	struct drm_crtc_state base;
+	unsigned int output_mode;
+};
+
+static inline struct atmel_hlcdc_crtc_state *
+drm_crtc_state_to_atmel_hlcdc_crtc_state(struct drm_crtc_state *state)
+{
+	return container_of(state, struct atmel_hlcdc_crtc_state, base);
+}
+
+/**
  * Atmel HLCDC CRTC structure
  *
  * @base: base DRM CRTC structure
@@ -59,6 +76,7 @@
 	struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
 	struct regmap *regmap = crtc->dc->hlcdc->regmap;
 	struct drm_display_mode *adj = &c->state->adjusted_mode;
+	struct atmel_hlcdc_crtc_state *state;
 	unsigned long mode_rate;
 	struct videomode vm;
 	unsigned long prate;
@@ -112,15 +130,27 @@
 	if (adj->flags & DRM_MODE_FLAG_NHSYNC)
 		cfg |= ATMEL_HLCDC_HSPOL;
 
+	state = drm_crtc_state_to_atmel_hlcdc_crtc_state(c->state);
+	cfg |= state->output_mode << 8;
+
 	regmap_update_bits(regmap, ATMEL_HLCDC_CFG(5),
 			   ATMEL_HLCDC_HSPOL | ATMEL_HLCDC_VSPOL |
 			   ATMEL_HLCDC_VSPDLYS | ATMEL_HLCDC_VSPDLYE |
 			   ATMEL_HLCDC_DISPPOL | ATMEL_HLCDC_DISPDLY |
 			   ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO |
-			   ATMEL_HLCDC_GUARDTIME_MASK,
+			   ATMEL_HLCDC_GUARDTIME_MASK | ATMEL_HLCDC_MODE_MASK,
 			   cfg);
 }
 
+static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *c,
+					const struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode)
+{
+	struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+
+	return atmel_hlcdc_dc_mode_valid(crtc->dc, adjusted_mode) == MODE_OK;
+}
+
 static void atmel_hlcdc_crtc_disable(struct drm_crtc *c)
 {
 	struct drm_device *dev = c->dev;
@@ -221,15 +251,79 @@
 	}
 }
 
+#define ATMEL_HLCDC_RGB444_OUTPUT	BIT(0)
+#define ATMEL_HLCDC_RGB565_OUTPUT	BIT(1)
+#define ATMEL_HLCDC_RGB666_OUTPUT	BIT(2)
+#define ATMEL_HLCDC_RGB888_OUTPUT	BIT(3)
+#define ATMEL_HLCDC_OUTPUT_MODE_MASK	GENMASK(3, 0)
+
+static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state)
+{
+	unsigned int output_fmts = ATMEL_HLCDC_OUTPUT_MODE_MASK;
+	struct atmel_hlcdc_crtc_state *hstate;
+	struct drm_connector_state *cstate;
+	struct drm_connector *connector;
+	struct atmel_hlcdc_crtc *crtc;
+	int i;
+
+	crtc = drm_crtc_to_atmel_hlcdc_crtc(state->crtc);
+
+	for_each_connector_in_state(state->state, connector, cstate, i) {
+		struct drm_display_info *info = &connector->display_info;
+		unsigned int supported_fmts = 0;
+		int j;
+
+		if (!cstate->crtc)
+			continue;
+
+		for (j = 0; j < info->num_bus_formats; j++) {
+			switch (info->bus_formats[j]) {
+			case MEDIA_BUS_FMT_RGB444_1X12:
+				supported_fmts |= ATMEL_HLCDC_RGB444_OUTPUT;
+				break;
+			case MEDIA_BUS_FMT_RGB565_1X16:
+				supported_fmts |= ATMEL_HLCDC_RGB565_OUTPUT;
+				break;
+			case MEDIA_BUS_FMT_RGB666_1X18:
+				supported_fmts |= ATMEL_HLCDC_RGB666_OUTPUT;
+				break;
+			case MEDIA_BUS_FMT_RGB888_1X24:
+				supported_fmts |= ATMEL_HLCDC_RGB888_OUTPUT;
+				break;
+			default:
+				break;
+			}
+		}
+
+		if (crtc->dc->desc->conflicting_output_formats)
+			output_fmts &= supported_fmts;
+		else
+			output_fmts |= supported_fmts;
+	}
+
+	if (!output_fmts)
+		return -EINVAL;
+
+	hstate = drm_crtc_state_to_atmel_hlcdc_crtc_state(state);
+	hstate->output_mode = fls(output_fmts) - 1;
+
+	return 0;
+}
+
 static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
 					 struct drm_crtc_state *s)
 {
-	struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+	int ret;
 
-	if (atmel_hlcdc_dc_mode_valid(crtc->dc, &s->adjusted_mode) != MODE_OK)
-		return -EINVAL;
+	ret = atmel_hlcdc_crtc_select_output_mode(s);
+	if (ret)
+		return ret;
 
-	return atmel_hlcdc_plane_prepare_disc_area(s);
+	ret = atmel_hlcdc_plane_prepare_disc_area(s);
+	if (ret)
+		return ret;
+
+	return atmel_hlcdc_plane_prepare_ahb_routing(s);
 }
 
 static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
@@ -254,6 +348,7 @@
 }
 
 static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
+	.mode_fixup = atmel_hlcdc_crtc_mode_fixup,
 	.mode_set = drm_helper_crtc_mode_set,
 	.mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb,
 	.mode_set_base = drm_helper_crtc_mode_set_base,
@@ -292,13 +387,60 @@
 	atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
 }
 
+void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
+{
+	struct atmel_hlcdc_crtc_state *state;
+
+	if (crtc->state) {
+		__drm_atomic_helper_crtc_destroy_state(crtc->state);
+		state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
+		kfree(state);
+		crtc->state = NULL;
+	}
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (state) {
+		crtc->state = &state->base;
+		crtc->state->crtc = crtc;
+	}
+}
+
+static struct drm_crtc_state *
+atmel_hlcdc_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+	struct atmel_hlcdc_crtc_state *state, *cur;
+
+	if (WARN_ON(!crtc->state))
+		return NULL;
+
+	state = kmalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return NULL;
+	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+	cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
+	state->output_mode = cur->output_mode;
+
+	return &state->base;
+}
+
+static void atmel_hlcdc_crtc_destroy_state(struct drm_crtc *crtc,
+					   struct drm_crtc_state *s)
+{
+	struct atmel_hlcdc_crtc_state *state;
+
+	state = drm_crtc_state_to_atmel_hlcdc_crtc_state(s);
+	__drm_atomic_helper_crtc_destroy_state(s);
+	kfree(state);
+}
+
 static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = {
 	.page_flip = drm_atomic_helper_page_flip,
 	.set_config = drm_atomic_helper_set_config,
 	.destroy = atmel_hlcdc_crtc_destroy,
-	.reset = drm_atomic_helper_crtc_reset,
-	.atomic_duplicate_state =  drm_atomic_helper_crtc_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+	.reset = atmel_hlcdc_crtc_reset,
+	.atomic_duplicate_state =  atmel_hlcdc_crtc_duplicate_state,
+	.atomic_destroy_state = atmel_hlcdc_crtc_destroy_state,
 };
 
 int atmel_hlcdc_crtc_create(struct drm_device *dev)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 3d8d164..8ded764 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -50,6 +50,10 @@
 	.min_height = 0,
 	.max_width = 1280,
 	.max_height = 860,
+	.max_spw = 0x3f,
+	.max_vpw = 0x3f,
+	.max_hpw = 0xff,
+	.conflicting_output_formats = true,
 	.nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9n12_layers),
 	.layers = atmel_hlcdc_at91sam9n12_layers,
 };
@@ -134,6 +138,10 @@
 	.min_height = 0,
 	.max_width = 800,
 	.max_height = 600,
+	.max_spw = 0x3f,
+	.max_vpw = 0x3f,
+	.max_hpw = 0xff,
+	.conflicting_output_formats = true,
 	.nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9x5_layers),
 	.layers = atmel_hlcdc_at91sam9x5_layers,
 };
@@ -237,6 +245,10 @@
 	.min_height = 0,
 	.max_width = 2048,
 	.max_height = 2048,
+	.max_spw = 0x3f,
+	.max_vpw = 0x3f,
+	.max_hpw = 0x1ff,
+	.conflicting_output_formats = true,
 	.nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d3_layers),
 	.layers = atmel_hlcdc_sama5d3_layers,
 };
@@ -320,6 +332,9 @@
 	.min_height = 0,
 	.max_width = 2048,
 	.max_height = 2048,
+	.max_spw = 0xff,
+	.max_vpw = 0xff,
+	.max_hpw = 0x3ff,
 	.nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d4_layers),
 	.layers = atmel_hlcdc_sama5d4_layers,
 };
@@ -358,19 +373,19 @@
 	int hback_porch = mode->htotal - mode->hsync_end;
 	int hsync_len = mode->hsync_end - mode->hsync_start;
 
-	if (hsync_len > 0x40 || hsync_len < 1)
+	if (hsync_len > dc->desc->max_spw + 1 || hsync_len < 1)
 		return MODE_HSYNC;
 
-	if (vsync_len > 0x40 || vsync_len < 1)
+	if (vsync_len > dc->desc->max_spw + 1 || vsync_len < 1)
 		return MODE_VSYNC;
 
-	if (hfront_porch > 0x200 || hfront_porch < 1 ||
-	    hback_porch > 0x200 || hback_porch < 1 ||
+	if (hfront_porch > dc->desc->max_hpw + 1 || hfront_porch < 1 ||
+	    hback_porch > dc->desc->max_hpw + 1 || hback_porch < 1 ||
 	    mode->hdisplay < 1)
 		return MODE_H_ILLEGAL;
 
-	if (vfront_porch > 0x40 || vfront_porch < 1 ||
-	    vback_porch > 0x40 || vback_porch < 0 ||
+	if (vfront_porch > dc->desc->max_vpw + 1 || vfront_porch < 1 ||
+	    vback_porch > dc->desc->max_vpw || vback_porch < 0 ||
 	    mode->vdisplay < 1)
 		return MODE_V_ILLEGAL;
 
@@ -427,11 +442,102 @@
 	}
 }
 
+struct atmel_hlcdc_dc_commit {
+	struct work_struct work;
+	struct drm_device *dev;
+	struct drm_atomic_state *state;
+};
+
+static void
+atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit)
+{
+	struct drm_device *dev = commit->dev;
+	struct atmel_hlcdc_dc *dc = dev->dev_private;
+	struct drm_atomic_state *old_state = commit->state;
+
+	/* Apply the atomic update. */
+	drm_atomic_helper_commit_modeset_disables(dev, old_state);
+	drm_atomic_helper_commit_planes(dev, old_state, false);
+	drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+	drm_atomic_helper_wait_for_vblanks(dev, old_state);
+
+	drm_atomic_helper_cleanup_planes(dev, old_state);
+
+	drm_atomic_state_free(old_state);
+
+	/* Complete the commit, wake up any waiter. */
+	spin_lock(&dc->commit.wait.lock);
+	dc->commit.pending = false;
+	wake_up_all_locked(&dc->commit.wait);
+	spin_unlock(&dc->commit.wait.lock);
+
+	kfree(commit);
+}
+
+static void atmel_hlcdc_dc_atomic_work(struct work_struct *work)
+{
+	struct atmel_hlcdc_dc_commit *commit =
+		container_of(work, struct atmel_hlcdc_dc_commit, work);
+
+	atmel_hlcdc_dc_atomic_complete(commit);
+}
+
+static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
+					struct drm_atomic_state *state,
+					bool async)
+{
+	struct atmel_hlcdc_dc *dc = dev->dev_private;
+	struct atmel_hlcdc_dc_commit *commit;
+	int ret;
+
+	ret = drm_atomic_helper_prepare_planes(dev, state);
+	if (ret)
+		return ret;
+
+	/* Allocate the commit object. */
+	commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+	if (!commit) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	INIT_WORK(&commit->work, atmel_hlcdc_dc_atomic_work);
+	commit->dev = dev;
+	commit->state = state;
+
+	spin_lock(&dc->commit.wait.lock);
+	ret = wait_event_interruptible_locked(dc->commit.wait,
+					      !dc->commit.pending);
+	if (ret == 0)
+		dc->commit.pending = true;
+	spin_unlock(&dc->commit.wait.lock);
+
+	if (ret) {
+		kfree(commit);
+		goto error;
+	}
+
+	/* Swap the state, this is the point of no return. */
+	drm_atomic_helper_swap_state(dev, state);
+
+	if (async)
+		queue_work(dc->wq, &commit->work);
+	else
+		atmel_hlcdc_dc_atomic_complete(commit);
+
+	return 0;
+
+error:
+	drm_atomic_helper_cleanup_planes(dev, state);
+	return ret;
+}
+
 static const struct drm_mode_config_funcs mode_config_funcs = {
 	.fb_create = atmel_hlcdc_fb_create,
 	.output_poll_changed = atmel_hlcdc_fb_output_poll_changed,
 	.atomic_check = drm_atomic_helper_check,
-	.atomic_commit = drm_atomic_helper_commit,
+	.atomic_commit = atmel_hlcdc_dc_atomic_commit,
 };
 
 static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
@@ -445,7 +551,7 @@
 
 	ret = atmel_hlcdc_create_outputs(dev);
 	if (ret) {
-		dev_err(dev->dev, "failed to create panel: %d\n", ret);
+		dev_err(dev->dev, "failed to create HLCDC outputs: %d\n", ret);
 		return ret;
 	}
 
@@ -509,6 +615,7 @@
 	if (!dc->wq)
 		return -ENOMEM;
 
+	init_waitqueue_head(&dc->commit.wait);
 	dc->desc = match->data;
 	dc->hlcdc = dev_get_drvdata(dev->dev->parent);
 	dev->dev_private = dc;
@@ -584,38 +691,10 @@
 	destroy_workqueue(dc->wq);
 }
 
-static int atmel_hlcdc_dc_connector_plug_all(struct drm_device *dev)
-{
-	struct drm_connector *connector, *failed;
-	int ret;
-
-	mutex_lock(&dev->mode_config.mutex);
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		ret = drm_connector_register(connector);
-		if (ret) {
-			failed = connector;
-			goto err;
-		}
-	}
-	mutex_unlock(&dev->mode_config.mutex);
-	return 0;
-
-err:
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (failed == connector)
-			break;
-
-		drm_connector_unregister(connector);
-	}
-	mutex_unlock(&dev->mode_config.mutex);
-
-	return ret;
-}
-
 static void atmel_hlcdc_dc_connector_unplug_all(struct drm_device *dev)
 {
 	mutex_lock(&dev->mode_config.mutex);
-	drm_connector_unplug_all(dev);
+	drm_connector_unregister_all(dev);
 	mutex_unlock(&dev->mode_config.mutex);
 }
 
@@ -736,7 +815,7 @@
 	if (ret)
 		goto err_unload;
 
-	ret = atmel_hlcdc_dc_connector_plug_all(ddev);
+	ret = drm_connector_register_all(ddev);
 	if (ret)
 		goto err_unregister;
 
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index fed517f..7a47f8c 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -50,6 +50,11 @@
  * @min_height: minimum height supported by the Display Controller
  * @max_width: maximum width supported by the Display Controller
  * @max_height: maximum height supported by the Display Controller
+ * @max_spw: maximum vertical/horizontal pulse width
+ * @max_vpw: maximum vertical back/front porch width
+ * @max_hpw: maximum horizontal back/front porch width
+ * @conflicting_output_formats: true if RGBXXX output formats conflict with
+ *				each other.
  * @layers: a layer description table describing available layers
  * @nlayers: layer description table size
  */
@@ -58,6 +63,10 @@
 	int min_height;
 	int max_width;
 	int max_height;
+	int max_spw;
+	int max_vpw;
+	int max_hpw;
+	bool conflicting_output_formats;
 	const struct atmel_hlcdc_layer_desc *layers;
 	int nlayers;
 };
@@ -128,6 +137,7 @@
  * @planes: instantiated planes
  * @layers: active HLCDC layer
  * @wq: display controller workqueue
+ * @commit: used for async commit handling
  */
 struct atmel_hlcdc_dc {
 	const struct atmel_hlcdc_dc_desc *desc;
@@ -137,6 +147,10 @@
 	struct atmel_hlcdc_planes *planes;
 	struct atmel_hlcdc_layer *layers[ATMEL_HLCDC_MAX_LAYERS];
 	struct workqueue_struct *wq;
+	struct {
+		wait_queue_head_t wait;
+		bool pending;
+	} commit;
 };
 
 extern struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_formats;
@@ -149,6 +163,7 @@
 atmel_hlcdc_create_planes(struct drm_device *dev);
 
 int atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state);
+int atmel_hlcdc_plane_prepare_ahb_routing(struct drm_crtc_state *c_state);
 
 void atmel_hlcdc_crtc_irq(struct drm_crtc *c);
 
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 0f7ec01..39802c0 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -27,16 +27,6 @@
 #include "atmel_hlcdc_dc.h"
 
 /**
- * Atmel HLCDC RGB output mode
- */
-enum atmel_hlcdc_connector_rgb_mode {
-	ATMEL_HLCDC_CONNECTOR_RGB444,
-	ATMEL_HLCDC_CONNECTOR_RGB565,
-	ATMEL_HLCDC_CONNECTOR_RGB666,
-	ATMEL_HLCDC_CONNECTOR_RGB888,
-};
-
-/**
  * Atmel HLCDC RGB connector structure
  *
  * This structure stores RGB slave device information.
@@ -44,13 +34,13 @@
  * @connector: DRM connector
  * @encoder: DRM encoder
  * @dc: pointer to the atmel_hlcdc_dc structure
- * @dpms: current DPMS mode
+ * @panel: panel connected on the RGB output
  */
 struct atmel_hlcdc_rgb_output {
 	struct drm_connector connector;
 	struct drm_encoder encoder;
 	struct atmel_hlcdc_dc *dc;
-	int dpms;
+	struct drm_panel *panel;
 };
 
 static inline struct atmel_hlcdc_rgb_output *
@@ -66,91 +56,31 @@
 	return container_of(encoder, struct atmel_hlcdc_rgb_output, encoder);
 }
 
-/**
- * Atmel HLCDC Panel device structure
- *
- * This structure is specialization of the slave device structure to
- * interface with drm panels.
- *
- * @base: base slave device fields
- * @panel: drm panel attached to this slave device
- */
-struct atmel_hlcdc_panel {
-	struct atmel_hlcdc_rgb_output base;
-	struct drm_panel *panel;
-};
-
-static inline struct atmel_hlcdc_panel *
-atmel_hlcdc_rgb_output_to_panel(struct atmel_hlcdc_rgb_output *output)
-{
-	return container_of(output, struct atmel_hlcdc_panel, base);
-}
-
-static void atmel_hlcdc_panel_encoder_enable(struct drm_encoder *encoder)
+static void atmel_hlcdc_rgb_encoder_enable(struct drm_encoder *encoder)
 {
 	struct atmel_hlcdc_rgb_output *rgb =
 			drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
-	struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
 
-	drm_panel_enable(panel->panel);
-}
-
-static void atmel_hlcdc_panel_encoder_disable(struct drm_encoder *encoder)
-{
-	struct atmel_hlcdc_rgb_output *rgb =
-			drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
-	struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
-
-	drm_panel_disable(panel->panel);
-}
-
-static bool
-atmel_hlcdc_panel_encoder_mode_fixup(struct drm_encoder *encoder,
-				     const struct drm_display_mode *mode,
-				     struct drm_display_mode *adjusted)
-{
-	return true;
-}
-
-static void
-atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder,
-				 struct drm_display_mode *mode,
-				 struct drm_display_mode *adjusted)
-{
-	struct atmel_hlcdc_rgb_output *rgb =
-			drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
-	struct drm_display_info *info = &rgb->connector.display_info;
-	unsigned int cfg;
-
-	cfg = 0;
-
-	if (info->num_bus_formats) {
-		switch (info->bus_formats[0]) {
-		case MEDIA_BUS_FMT_RGB565_1X16:
-			cfg |= ATMEL_HLCDC_CONNECTOR_RGB565 << 8;
-			break;
-		case MEDIA_BUS_FMT_RGB666_1X18:
-			cfg |= ATMEL_HLCDC_CONNECTOR_RGB666 << 8;
-			break;
-		case MEDIA_BUS_FMT_RGB888_1X24:
-			cfg |= ATMEL_HLCDC_CONNECTOR_RGB888 << 8;
-			break;
-		case MEDIA_BUS_FMT_RGB444_1X12:
-		default:
-			break;
-		}
+	if (rgb->panel) {
+		drm_panel_prepare(rgb->panel);
+		drm_panel_enable(rgb->panel);
 	}
+}
 
-	regmap_update_bits(rgb->dc->hlcdc->regmap, ATMEL_HLCDC_CFG(5),
-			   ATMEL_HLCDC_MODE_MASK,
-			   cfg);
+static void atmel_hlcdc_rgb_encoder_disable(struct drm_encoder *encoder)
+{
+	struct atmel_hlcdc_rgb_output *rgb =
+			drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
+
+	if (rgb->panel) {
+		drm_panel_disable(rgb->panel);
+		drm_panel_unprepare(rgb->panel);
+	}
 }
 
 static const struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = {
-	.mode_fixup = atmel_hlcdc_panel_encoder_mode_fixup,
-	.mode_set = atmel_hlcdc_rgb_encoder_mode_set,
-	.disable = atmel_hlcdc_panel_encoder_disable,
-	.enable = atmel_hlcdc_panel_encoder_enable,
+	.disable = atmel_hlcdc_rgb_encoder_disable,
+	.enable = atmel_hlcdc_rgb_encoder_enable,
 };
 
 static void atmel_hlcdc_rgb_encoder_destroy(struct drm_encoder *encoder)
@@ -167,9 +97,11 @@
 {
 	struct atmel_hlcdc_rgb_output *rgb =
 			drm_connector_to_atmel_hlcdc_rgb_output(connector);
-	struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
 
-	return panel->panel->funcs->get_modes(panel->panel);
+	if (rgb->panel)
+		return rgb->panel->funcs->get_modes(rgb->panel);
+
+	return 0;
 }
 
 static int atmel_hlcdc_rgb_mode_valid(struct drm_connector *connector,
@@ -201,7 +133,13 @@
 static enum drm_connector_status
 atmel_hlcdc_panel_connector_detect(struct drm_connector *connector, bool force)
 {
-	return connector_status_connected;
+	struct atmel_hlcdc_rgb_output *rgb =
+			drm_connector_to_atmel_hlcdc_rgb_output(connector);
+
+	if (rgb->panel)
+		return connector_status_connected;
+
+	return connector_status_disconnected;
 }
 
 static void
@@ -209,9 +147,10 @@
 {
 	struct atmel_hlcdc_rgb_output *rgb =
 			drm_connector_to_atmel_hlcdc_rgb_output(connector);
-	struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
 
-	drm_panel_detach(panel->panel);
+	if (rgb->panel)
+		drm_panel_detach(rgb->panel);
+
 	drm_connector_cleanup(connector);
 }
 
@@ -225,88 +164,122 @@
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-static int atmel_hlcdc_create_panel_output(struct drm_device *dev,
-					   struct of_endpoint *ep)
+static int atmel_hlcdc_check_endpoint(struct drm_device *dev,
+				      const struct of_endpoint *ep)
 {
-	struct atmel_hlcdc_dc *dc = dev->dev_private;
 	struct device_node *np;
-	struct drm_panel *p = NULL;
-	struct atmel_hlcdc_panel *panel;
-	int ret;
+	void *obj;
 
 	np = of_graph_get_remote_port_parent(ep->local_node);
-	if (!np)
-		return -EINVAL;
 
-	p = of_drm_find_panel(np);
+	obj = of_drm_find_panel(np);
+	if (!obj)
+		obj = of_drm_find_bridge(np);
+
 	of_node_put(np);
 
-	if (!p)
-		return -EPROBE_DEFER;
+	return obj ? 0 : -EPROBE_DEFER;
+}
 
-	panel = devm_kzalloc(dev->dev, sizeof(*panel), GFP_KERNEL);
-	if (!panel)
+static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
+				       const struct of_endpoint *ep)
+{
+	struct atmel_hlcdc_dc *dc = dev->dev_private;
+	struct atmel_hlcdc_rgb_output *output;
+	struct device_node *np;
+	struct drm_panel *panel;
+	struct drm_bridge *bridge;
+	int ret;
+
+	output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL);
+	if (!output)
 		return -EINVAL;
 
-	panel->base.dpms = DRM_MODE_DPMS_OFF;
+	output->dc = dc;
 
-	panel->base.dc = dc;
-
-	drm_encoder_helper_add(&panel->base.encoder,
+	drm_encoder_helper_add(&output->encoder,
 			       &atmel_hlcdc_panel_encoder_helper_funcs);
-	ret = drm_encoder_init(dev, &panel->base.encoder,
+	ret = drm_encoder_init(dev, &output->encoder,
 			       &atmel_hlcdc_panel_encoder_funcs,
-			       DRM_MODE_ENCODER_LVDS, NULL);
+			       DRM_MODE_ENCODER_NONE, NULL);
 	if (ret)
 		return ret;
 
-	panel->base.connector.dpms = DRM_MODE_DPMS_OFF;
-	panel->base.connector.polled = DRM_CONNECTOR_POLL_CONNECT;
-	drm_connector_helper_add(&panel->base.connector,
-				 &atmel_hlcdc_panel_connector_helper_funcs);
-	ret = drm_connector_init(dev, &panel->base.connector,
-				 &atmel_hlcdc_panel_connector_funcs,
-				 DRM_MODE_CONNECTOR_LVDS);
-	if (ret)
-		goto err_encoder_cleanup;
+	output->encoder.possible_crtcs = 0x1;
 
-	drm_mode_connector_attach_encoder(&panel->base.connector,
-					  &panel->base.encoder);
-	panel->base.encoder.possible_crtcs = 0x1;
+	np = of_graph_get_remote_port_parent(ep->local_node);
 
-	drm_panel_attach(p, &panel->base.connector);
-	panel->panel = p;
+	ret = -EPROBE_DEFER;
 
-	return 0;
+	panel = of_drm_find_panel(np);
+	if (panel) {
+		of_node_put(np);
+		output->connector.dpms = DRM_MODE_DPMS_OFF;
+		output->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
+		drm_connector_helper_add(&output->connector,
+				&atmel_hlcdc_panel_connector_helper_funcs);
+		ret = drm_connector_init(dev, &output->connector,
+					 &atmel_hlcdc_panel_connector_funcs,
+					 DRM_MODE_CONNECTOR_Unknown);
+		if (ret)
+			goto err_encoder_cleanup;
+
+		drm_mode_connector_attach_encoder(&output->connector,
+						  &output->encoder);
+
+		ret = drm_panel_attach(panel, &output->connector);
+		if (ret) {
+			drm_connector_cleanup(&output->connector);
+			goto err_encoder_cleanup;
+		}
+
+		output->panel = panel;
+
+		return 0;
+	}
+
+	bridge = of_drm_find_bridge(np);
+	of_node_put(np);
+
+	if (bridge) {
+		output->encoder.bridge = bridge;
+		bridge->encoder = &output->encoder;
+		ret = drm_bridge_attach(dev, bridge);
+		if (!ret)
+			return 0;
+	}
 
 err_encoder_cleanup:
-	drm_encoder_cleanup(&panel->base.encoder);
+	drm_encoder_cleanup(&output->encoder);
 
 	return ret;
 }
 
 int atmel_hlcdc_create_outputs(struct drm_device *dev)
 {
-	struct device_node *port_np, *np;
+	struct device_node *ep_np = NULL;
 	struct of_endpoint ep;
 	int ret;
 
-	port_np = of_get_child_by_name(dev->dev->of_node, "port");
-	if (!port_np)
-		return -EINVAL;
+	for_each_endpoint_of_node(dev->dev->of_node, ep_np) {
+		ret = of_graph_parse_endpoint(ep_np, &ep);
+		if (!ret)
+			ret = atmel_hlcdc_check_endpoint(dev, &ep);
 
-	np = of_get_child_by_name(port_np, "endpoint");
-	of_node_put(port_np);
+		of_node_put(ep_np);
+		if (ret)
+			return ret;
+	}
 
-	if (!np)
-		return -EINVAL;
+	for_each_endpoint_of_node(dev->dev->of_node, ep_np) {
+		ret = of_graph_parse_endpoint(ep_np, &ep);
+		if (!ret)
+			ret = atmel_hlcdc_attach_endpoint(dev, &ep);
 
-	ret = of_graph_parse_endpoint(np, &ep);
-	of_node_put(port_np);
+		of_node_put(ep_np);
+		if (ret)
+			return ret;
+	}
 
-	if (ret)
-		return ret;
-
-	/* We currently only support panel output */
-	return atmel_hlcdc_create_panel_output(dev, &ep);
+	return 0;
 }
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index d65dcae..aef3ca8 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -37,6 +37,7 @@
  * @xstride: value to add to the pixel pointer between each line
  * @pstride: value to add to the pixel pointer between each pixel
  * @nplanes: number of planes (deduced from pixel_format)
+ * @prepared: plane update has been prepared
  */
 struct atmel_hlcdc_plane_state {
 	struct drm_plane_state base;
@@ -58,12 +59,15 @@
 	int disc_w;
 	int disc_h;
 
+	int ahb_id;
+
 	/* These fields are private and should not be touched */
 	int bpp[ATMEL_HLCDC_MAX_PLANES];
 	unsigned int offsets[ATMEL_HLCDC_MAX_PLANES];
 	int xstride[ATMEL_HLCDC_MAX_PLANES];
 	int pstride[ATMEL_HLCDC_MAX_PLANES];
 	int nplanes;
+	bool prepared;
 };
 
 static inline struct atmel_hlcdc_plane_state *
@@ -359,8 +363,10 @@
 
 	atmel_hlcdc_layer_update_cfg(&plane->layer,
 				     ATMEL_HLCDC_LAYER_DMA_CFG_ID,
-				     ATMEL_HLCDC_LAYER_DMA_BLEN_MASK,
-				     ATMEL_HLCDC_LAYER_DMA_BLEN_INCR16);
+				     ATMEL_HLCDC_LAYER_DMA_BLEN_MASK |
+				     ATMEL_HLCDC_LAYER_DMA_SIF,
+				     ATMEL_HLCDC_LAYER_DMA_BLEN_INCR16 |
+				     state->ahb_id);
 
 	atmel_hlcdc_layer_update_cfg(&plane->layer, layout->general_config,
 				     ATMEL_HLCDC_LAYER_ITER2BL |
@@ -435,6 +441,41 @@
 	}
 }
 
+int atmel_hlcdc_plane_prepare_ahb_routing(struct drm_crtc_state *c_state)
+{
+	unsigned int ahb_load[2] = { };
+	struct drm_plane *plane;
+
+	drm_atomic_crtc_state_for_each_plane(plane, c_state) {
+		struct atmel_hlcdc_plane_state *plane_state;
+		struct drm_plane_state *plane_s;
+		unsigned int pixels, load = 0;
+		int i;
+
+		plane_s = drm_atomic_get_plane_state(c_state->state, plane);
+		if (IS_ERR(plane_s))
+			return PTR_ERR(plane_s);
+
+		plane_state =
+			drm_plane_state_to_atmel_hlcdc_plane_state(plane_s);
+
+		pixels = (plane_state->src_w * plane_state->src_h) -
+			 (plane_state->disc_w * plane_state->disc_h);
+
+		for (i = 0; i < plane_state->nplanes; i++)
+			load += pixels * plane_state->bpp[i];
+
+		if (ahb_load[0] <= ahb_load[1])
+			plane_state->ahb_id = 0;
+		else
+			plane_state->ahb_id = 1;
+
+		ahb_load[plane_state->ahb_id] += load;
+	}
+
+	return 0;
+}
+
 int
 atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state)
 {
@@ -714,12 +755,54 @@
 static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p,
 					const struct drm_plane_state *new_state)
 {
+	/*
+	 * FIXME: we should avoid this const -> non-const cast but it's
+	 * currently the only solution we have to modify the ->prepared
+	 * state and rollback the update request.
+	 * Ideally, we should rework the code to attach all the resources
+	 * to atmel_hlcdc_plane_state (including the DMA desc allocation),
+	 * but this require a complete rework of the atmel_hlcdc_layer
+	 * code.
+	 */
+	struct drm_plane_state *s = (struct drm_plane_state *)new_state;
 	struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+	struct atmel_hlcdc_plane_state *state =
+			drm_plane_state_to_atmel_hlcdc_plane_state(s);
+	int ret;
 
-	if (!new_state->fb)
-		return 0;
+	ret = atmel_hlcdc_layer_update_start(&plane->layer);
+	if (!ret)
+		state->prepared = true;
 
-	return atmel_hlcdc_layer_update_start(&plane->layer);
+	return ret;
+}
+
+static void atmel_hlcdc_plane_cleanup_fb(struct drm_plane *p,
+				const struct drm_plane_state *old_state)
+{
+	/*
+	 * FIXME: we should avoid this const -> non-const cast but it's
+	 * currently the only solution we have to modify the ->prepared
+	 * state and rollback the update request.
+	 * Ideally, we should rework the code to attach all the resources
+	 * to atmel_hlcdc_plane_state (including the DMA desc allocation),
+	 * but this require a complete rework of the atmel_hlcdc_layer
+	 * code.
+	 */
+	struct drm_plane_state *s = (struct drm_plane_state *)old_state;
+	struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+	struct atmel_hlcdc_plane_state *state =
+			drm_plane_state_to_atmel_hlcdc_plane_state(s);
+
+	/*
+	 * The Request has already been applied or cancelled, nothing to do
+	 * here.
+	 */
+	if (!state->prepared)
+		return;
+
+	atmel_hlcdc_layer_update_rollback(&plane->layer);
+	state->prepared = false;
 }
 
 static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
@@ -844,6 +927,7 @@
 
 static struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
 	.prepare_fb = atmel_hlcdc_plane_prepare_fb,
+	.cleanup_fb = atmel_hlcdc_plane_cleanup_fb,
 	.atomic_check = atmel_hlcdc_plane_atomic_check,
 	.atomic_update = atmel_hlcdc_plane_atomic_update,
 	.atomic_disable = atmel_hlcdc_plane_atomic_disable,
@@ -883,6 +967,7 @@
 		return NULL;
 
 	copy->disc_updated = false;
+	copy->prepared = false;
 
 	if (copy->base.fb)
 		drm_framebuffer_reference(copy->base.fb);
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 7520bf8..e1ec498 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -82,7 +82,7 @@
 
 	bo = gem_to_bochs_bo(gobj);
 
-	ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
+	ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
 	if (ret)
 		return ret;
 
@@ -162,22 +162,7 @@
 	return 0;
 }
 
-void bochs_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
-			u16 blue, int regno)
-{
-}
-
-void bochs_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
-			u16 *blue, int regno)
-{
-	*red   = regno;
-	*green = regno;
-	*blue  = regno;
-}
-
 static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
-	.gamma_set = bochs_fb_gamma_set,
-	.gamma_get = bochs_fb_gamma_get,
 	.fb_probe = bochsfb_create,
 };
 
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 96926f0..207a2cb 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -43,7 +43,7 @@
 	if (old_fb) {
 		bochs_fb = to_bochs_framebuffer(old_fb);
 		bo = gem_to_bochs_bo(bochs_fb->obj);
-		ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
+		ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
 		if (ret) {
 			DRM_ERROR("failed to reserve old_fb bo\n");
 		} else {
@@ -57,7 +57,7 @@
 
 	bochs_fb = to_bochs_framebuffer(crtc->primary->fb);
 	bo = gem_to_bochs_bo(bochs_fb->obj);
-	ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
+	ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
 	if (ret)
 		return ret;
 
@@ -93,11 +93,6 @@
 {
 }
 
-static void bochs_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
-				 u16 *blue, uint32_t start, uint32_t size)
-{
-}
-
 static int bochs_crtc_page_flip(struct drm_crtc *crtc,
 				struct drm_framebuffer *fb,
 				struct drm_pending_vblank_event *event,
@@ -120,7 +115,6 @@
 
 /* These provide the minimum set of functions required to handle a CRTC */
 static const struct drm_crtc_funcs bochs_crtc_funcs = {
-	.gamma_set = bochs_crtc_gamma_set,
 	.set_config = drm_crtc_helper_set_config,
 	.destroy = drm_crtc_cleanup,
 	.page_flip = bochs_crtc_page_flip,
@@ -140,7 +134,6 @@
 	struct drm_crtc *crtc = &bochs->crtc;
 
 	drm_crtc_init(dev, crtc, &bochs_crtc_funcs);
-	drm_mode_crtc_set_gamma_size(crtc, 256);
 	drm_crtc_helper_add(crtc, &bochs_helper_funcs);
 }
 
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index d812ad0..6cf912c 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -212,6 +212,8 @@
 	.verify_access = bochs_bo_verify_access,
 	.io_mem_reserve = &bochs_ttm_io_mem_reserve,
 	.io_mem_free = &bochs_ttm_io_mem_free,
+	.lru_tail = &ttm_bo_default_lru_tail,
+	.swap_lru_tail = &ttm_bo_default_swap_lru_tail,
 };
 
 int bochs_mm_init(struct bochs_device *bochs)
@@ -456,7 +458,7 @@
 	struct drm_gem_object *obj;
 	struct bochs_bo *bo;
 
-	obj = drm_gem_object_lookup(dev, file, handle);
+	obj = drm_gem_object_lookup(file, handle);
 	if (obj == NULL)
 		return -ENOENT;
 
@@ -518,7 +520,7 @@
 	if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888)
 		return ERR_PTR(-ENOENT);
 
-	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+	obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
 	if (obj == NULL)
 		return ERR_PTR(-ENOENT);
 
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 27e2022..8f7423f 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -7,6 +7,16 @@
 menu "Display Interface Bridges"
 	depends on DRM && DRM_BRIDGE
 
+config DRM_ANALOGIX_ANX78XX
+	tristate "Analogix ANX78XX bridge"
+	select DRM_KMS_HELPER
+	select REGMAP_I2C
+	---help---
+	  ANX78XX is an ultra-low Full-HD SlimPort transmitter
+	  designed for portable devices. The ANX78XX transforms
+	  the HDMI output of an application processor to MyDP
+	  or DisplayPort.
+
 config DRM_DW_HDMI
 	tristate
 	select DRM_KMS_HELPER
@@ -40,4 +50,6 @@
 	---help---
 	  Parade eDP-LVDS bridge chip driver.
 
+source "drivers/gpu/drm/bridge/analogix/Kconfig"
+
 endmenu
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index f13c33d..96b13b3 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,6 +1,8 @@
 ccflags-y := -Iinclude/drm
 
+obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
 obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
 obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
 obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
 obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
new file mode 100644
index 0000000..d087b05
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -0,0 +1,1514 @@
+/*
+ * Copyright(c) 2016, Analogix Semiconductor.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Based on anx7808 driver obtained from chromeos with copyright:
+ * Copyright(c) 2013, Google Inc.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+
+#include "analogix-anx78xx.h"
+
+#define I2C_NUM_ADDRESSES	5
+#define I2C_IDX_TX_P0		0
+#define I2C_IDX_TX_P1		1
+#define I2C_IDX_TX_P2		2
+#define I2C_IDX_RX_P0		3
+#define I2C_IDX_RX_P1		4
+
+#define XTAL_CLK		270 /* 27M */
+#define AUX_CH_BUFFER_SIZE	16
+#define AUX_WAIT_TIMEOUT_MS	15
+
+static const u8 anx78xx_i2c_addresses[] = {
+	[I2C_IDX_TX_P0] = TX_P0,
+	[I2C_IDX_TX_P1] = TX_P1,
+	[I2C_IDX_TX_P2] = TX_P2,
+	[I2C_IDX_RX_P0] = RX_P0,
+	[I2C_IDX_RX_P1] = RX_P1,
+};
+
+struct anx78xx_platform_data {
+	struct regulator *dvdd10;
+	struct gpio_desc *gpiod_hpd;
+	struct gpio_desc *gpiod_pd;
+	struct gpio_desc *gpiod_reset;
+
+	int hpd_irq;
+	int intp_irq;
+};
+
+struct anx78xx {
+	struct drm_dp_aux aux;
+	struct drm_bridge bridge;
+	struct i2c_client *client;
+	struct edid *edid;
+	struct drm_connector connector;
+	struct drm_dp_link link;
+	struct anx78xx_platform_data pdata;
+	struct mutex lock;
+
+	/*
+	 * I2C Slave addresses of ANX7814 are mapped as TX_P0, TX_P1, TX_P2,
+	 * RX_P0 and RX_P1.
+	 */
+	struct i2c_client *i2c_dummy[I2C_NUM_ADDRESSES];
+	struct regmap *map[I2C_NUM_ADDRESSES];
+
+	u16 chipid;
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+	bool powered;
+};
+
+static inline struct anx78xx *connector_to_anx78xx(struct drm_connector *c)
+{
+	return container_of(c, struct anx78xx, connector);
+}
+
+static inline struct anx78xx *bridge_to_anx78xx(struct drm_bridge *bridge)
+{
+	return container_of(bridge, struct anx78xx, bridge);
+}
+
+static int anx78xx_set_bits(struct regmap *map, u8 reg, u8 mask)
+{
+	return regmap_update_bits(map, reg, mask, mask);
+}
+
+static int anx78xx_clear_bits(struct regmap *map, u8 reg, u8 mask)
+{
+	return regmap_update_bits(map, reg, mask, 0);
+}
+
+static bool anx78xx_aux_op_finished(struct anx78xx *anx78xx)
+{
+	unsigned int value;
+	int err;
+
+	err = regmap_read(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL2_REG,
+			  &value);
+	if (err < 0)
+		return false;
+
+	return (value & SP_AUX_EN) == 0;
+}
+
+static int anx78xx_aux_wait(struct anx78xx *anx78xx)
+{
+	unsigned long timeout;
+	unsigned int status;
+	int err;
+
+	timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1;
+
+	while (!anx78xx_aux_op_finished(anx78xx)) {
+		if (time_after(jiffies, timeout)) {
+			if (!anx78xx_aux_op_finished(anx78xx)) {
+				DRM_ERROR("Timed out waiting AUX to finish\n");
+				return -ETIMEDOUT;
+			}
+
+			break;
+		}
+
+		usleep_range(1000, 2000);
+	}
+
+	/* Read the AUX channel access status */
+	err = regmap_read(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_CH_STATUS_REG,
+			  &status);
+	if (err < 0) {
+		DRM_ERROR("Failed to read from AUX channel: %d\n", err);
+		return err;
+	}
+
+	if (status & SP_AUX_STATUS) {
+		DRM_ERROR("Failed to wait for AUX channel (status: %02x)\n",
+			  status);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int anx78xx_aux_address(struct anx78xx *anx78xx, unsigned int addr)
+{
+	int err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_ADDR_7_0_REG,
+			   addr & 0xff);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_ADDR_15_8_REG,
+			   (addr & 0xff00) >> 8);
+	if (err)
+		return err;
+
+	/*
+	 * DP AUX CH Address Register #2, only update bits[3:0]
+	 * [7:4] RESERVED
+	 * [3:0] AUX_ADDR[19:16], Register control AUX CH address.
+	 */
+	err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P0],
+				 SP_AUX_ADDR_19_16_REG,
+				 SP_AUX_ADDR_19_16_MASK,
+				 (addr & 0xf0000) >> 16);
+
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static ssize_t anx78xx_aux_transfer(struct drm_dp_aux *aux,
+				    struct drm_dp_aux_msg *msg)
+{
+	struct anx78xx *anx78xx = container_of(aux, struct anx78xx, aux);
+	u8 ctrl1 = msg->request;
+	u8 ctrl2 = SP_AUX_EN;
+	u8 *buffer = msg->buffer;
+	int err;
+
+	/* The DP AUX transmit and receive buffer has 16 bytes. */
+	if (WARN_ON(msg->size > AUX_CH_BUFFER_SIZE))
+		return -E2BIG;
+
+	/* Zero-sized messages specify address-only transactions. */
+	if (msg->size < 1)
+		ctrl2 |= SP_ADDR_ONLY;
+	else	/* For non-zero-sized set the length field. */
+		ctrl1 |= (msg->size - 1) << SP_AUX_LENGTH_SHIFT;
+
+	if ((msg->request & DP_AUX_I2C_READ) == 0) {
+		/* When WRITE | MOT write values to data buffer */
+		err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P0],
+					SP_DP_BUF_DATA0_REG, buffer,
+					msg->size);
+		if (err)
+			return err;
+	}
+
+	/* Write address and request */
+	err = anx78xx_aux_address(anx78xx, msg->address);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL1_REG,
+			   ctrl1);
+	if (err)
+		return err;
+
+	/* Start transaction */
+	err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P0],
+				 SP_DP_AUX_CH_CTRL2_REG, SP_ADDR_ONLY |
+				 SP_AUX_EN, ctrl2);
+	if (err)
+		return err;
+
+	err = anx78xx_aux_wait(anx78xx);
+	if (err)
+		return err;
+
+	msg->reply = DP_AUX_I2C_REPLY_ACK;
+
+	if ((msg->size > 0) && (msg->request & DP_AUX_I2C_READ)) {
+		/* Read values from data buffer */
+		err = regmap_bulk_read(anx78xx->map[I2C_IDX_TX_P0],
+				       SP_DP_BUF_DATA0_REG, buffer,
+				       msg->size);
+		if (err)
+			return err;
+	}
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
+				 SP_DP_AUX_CH_CTRL2_REG, SP_ADDR_ONLY);
+	if (err)
+		return err;
+
+	return msg->size;
+}
+
+static int anx78xx_set_hpd(struct anx78xx *anx78xx)
+{
+	int err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0],
+				 SP_TMDS_CTRL_BASE + 7, SP_PD_RT);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL3_REG,
+			       SP_HPD_OUT);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int anx78xx_clear_hpd(struct anx78xx *anx78xx)
+{
+	int err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL3_REG,
+				 SP_HPD_OUT);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
+			       SP_TMDS_CTRL_BASE + 7, SP_PD_RT);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static const struct reg_sequence tmds_phy_initialization[] = {
+	{ SP_TMDS_CTRL_BASE +  1, 0x90 },
+	{ SP_TMDS_CTRL_BASE +  2, 0xa9 },
+	{ SP_TMDS_CTRL_BASE +  6, 0x92 },
+	{ SP_TMDS_CTRL_BASE +  7, 0x80 },
+	{ SP_TMDS_CTRL_BASE + 20, 0xf2 },
+	{ SP_TMDS_CTRL_BASE + 22, 0xc4 },
+	{ SP_TMDS_CTRL_BASE + 23, 0x18 },
+};
+
+static int anx78xx_rx_initialization(struct anx78xx *anx78xx)
+{
+	int err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG,
+			   SP_AUD_MUTE | SP_VID_MUTE);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_CHIP_CTRL_REG,
+			       SP_MAN_HDMI5V_DET | SP_PLLLOCK_CKDT_EN |
+			       SP_DIGITAL_CKDT_EN);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
+			       SP_SOFTWARE_RESET1_REG, SP_HDCP_MAN_RST |
+			       SP_SW_MAN_RST | SP_TMDS_RST | SP_VIDEO_RST);
+	if (err)
+		return err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0],
+				 SP_SOFTWARE_RESET1_REG, SP_HDCP_MAN_RST |
+				 SP_SW_MAN_RST | SP_TMDS_RST | SP_VIDEO_RST);
+	if (err)
+		return err;
+
+	/* Sync detect change, GP set mute */
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
+			       SP_AUD_EXCEPTION_ENABLE_BASE + 1, BIT(5) |
+			       BIT(6));
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
+			       SP_AUD_EXCEPTION_ENABLE_BASE + 3,
+			       SP_AEC_EN21);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_AUDVID_CTRL_REG,
+			       SP_AVC_EN | SP_AAC_OE | SP_AAC_EN);
+	if (err)
+		return err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0],
+				 SP_SYSTEM_POWER_DOWN1_REG, SP_PWDN_CTRL);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
+			       SP_VID_DATA_RANGE_CTRL_REG, SP_R2Y_INPUT_LIMIT);
+	if (err)
+		return err;
+
+	/* Enable DDC stretch */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+			   SP_DP_EXTRA_I2C_DEV_ADDR_REG, SP_I2C_EXTRA_ADDR);
+	if (err)
+		return err;
+
+	/* TMDS phy initialization */
+	err = regmap_multi_reg_write(anx78xx->map[I2C_IDX_RX_P0],
+				     tmds_phy_initialization,
+				     ARRAY_SIZE(tmds_phy_initialization));
+	if (err)
+		return err;
+
+	err = anx78xx_clear_hpd(anx78xx);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static const u8 dp_tx_output_precise_tune_bits[20] = {
+	0x01, 0x03, 0x07, 0x7f, 0x71, 0x6b, 0x7f,
+	0x73, 0x7f, 0x7f, 0x00, 0x00, 0x00, 0x00,
+	0x0c, 0x42, 0x1e, 0x3e, 0x72, 0x7e,
+};
+
+static int anx78xx_link_phy_initialization(struct anx78xx *anx78xx)
+{
+	int err;
+
+	/*
+	 * REVISIT : It is writing to a RESERVED bits in Analog Control 0
+	 * register.
+	 */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_ANALOG_CTRL0_REG,
+			   0x02);
+	if (err)
+		return err;
+
+	/*
+	 * Write DP TX output emphasis precise tune bits.
+	 */
+	err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P1],
+				SP_DP_TX_LT_CTRL0_REG,
+				dp_tx_output_precise_tune_bits,
+				ARRAY_SIZE(dp_tx_output_precise_tune_bits));
+
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int anx78xx_xtal_clk_sel(struct anx78xx *anx78xx)
+{
+	unsigned int value;
+	int err;
+
+	err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P2],
+				 SP_ANALOG_DEBUG2_REG,
+				 SP_XTAL_FRQ | SP_FORCE_SW_OFF_BYPASS,
+				 SP_XTAL_FRQ_27M);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL3_REG,
+			   XTAL_CLK & SP_WAIT_COUNTER_7_0_MASK);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL4_REG,
+			   ((XTAL_CLK & 0xff00) >> 2) | (XTAL_CLK / 10));
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+			   SP_I2C_GEN_10US_TIMER0_REG, XTAL_CLK & 0xff);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+			   SP_I2C_GEN_10US_TIMER1_REG,
+			   (XTAL_CLK & 0xff00) >> 8);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_MISC_CTRL_REG,
+			   XTAL_CLK / 10 - 1);
+	if (err)
+		return err;
+
+	err = regmap_read(anx78xx->map[I2C_IDX_RX_P0],
+			  SP_HDMI_US_TIMER_CTRL_REG,
+			  &value);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_RX_P0],
+			   SP_HDMI_US_TIMER_CTRL_REG,
+			   (value & SP_MS_TIMER_MARGIN_10_8_MASK) |
+			   ((((XTAL_CLK / 10) >> 1) - 2) << 3));
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static const struct reg_sequence otp_key_protect[] = {
+	{ SP_OTP_KEY_PROTECT1_REG, SP_OTP_PSW1 },
+	{ SP_OTP_KEY_PROTECT2_REG, SP_OTP_PSW2 },
+	{ SP_OTP_KEY_PROTECT3_REG, SP_OTP_PSW3 },
+};
+
+static int anx78xx_tx_initialization(struct anx78xx *anx78xx)
+{
+	int err;
+
+	/* Set terminal resistor to 50 ohm */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL2_REG,
+			   0x30);
+	if (err)
+		return err;
+
+	/* Enable aux double diff output */
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_DP_AUX_CH_CTRL2_REG, 0x08);
+	if (err)
+		return err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
+				 SP_DP_HDCP_CTRL_REG, SP_AUTO_EN |
+				 SP_AUTO_START);
+	if (err)
+		return err;
+
+	err = regmap_multi_reg_write(anx78xx->map[I2C_IDX_TX_P0],
+				     otp_key_protect,
+				     ARRAY_SIZE(otp_key_protect));
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_HDCP_KEY_COMMAND_REG, SP_DISABLE_SYNC_HDCP);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL8_REG,
+			   SP_VID_VRES_TH);
+	if (err)
+		return err;
+
+	/*
+	 * DP HDCP auto authentication wait timer (when downstream starts to
+	 * auth, DP side will wait for this period then do auth automatically)
+	 */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_HDCP_AUTO_TIMER_REG,
+			   0x00);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_DP_HDCP_CTRL_REG, SP_LINK_POLLING);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_DP_LINK_DEBUG_CTRL_REG, SP_M_VID_DEBUG);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2],
+			       SP_ANALOG_DEBUG2_REG, SP_POWERON_TIME_1P5MS);
+	if (err)
+		return err;
+
+	err = anx78xx_xtal_clk_sel(anx78xx);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_DEFER_CTRL_REG,
+			   SP_DEFER_CTRL_EN | 0x0c);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_DP_POLLING_CTRL_REG,
+			       SP_AUTO_POLLING_DISABLE);
+	if (err)
+		return err;
+
+	/*
+	 * Short the link integrity check timer to speed up bstatus
+	 * polling for HDCP CTS item 1A-07
+	 */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+			   SP_HDCP_LINK_CHECK_TIMER_REG, 0x1d);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_DP_MISC_CTRL_REG, SP_EQ_TRAINING_LOOP);
+	if (err)
+		return err;
+
+	/* Power down the main link by default */
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_DP_ANALOG_POWER_DOWN_REG, SP_CH0_PD);
+	if (err)
+		return err;
+
+	err = anx78xx_link_phy_initialization(anx78xx);
+	if (err)
+		return err;
+
+	/* Gen m_clk with downspreading */
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_DP_M_CALCULATION_CTRL_REG, SP_M_GEN_CLK_SEL);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int anx78xx_enable_interrupts(struct anx78xx *anx78xx)
+{
+	int err;
+
+	/*
+	 * BIT0: INT pin assertion polarity: 1 = assert high
+	 * BIT1: INT pin output type: 0 = push/pull
+	 */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_INT_CTRL_REG, 0x01);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2],
+			   SP_COMMON_INT_MASK4_REG, SP_HPD_LOST | SP_HPD_PLUG);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_MASK1_REG,
+			   SP_TRAINING_FINISH);
+	if (err)
+		return err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_INT_MASK1_REG,
+			   SP_CKDT_CHG | SP_SCDT_CHG);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static void anx78xx_poweron(struct anx78xx *anx78xx)
+{
+	struct anx78xx_platform_data *pdata = &anx78xx->pdata;
+	int err;
+
+	if (WARN_ON(anx78xx->powered))
+		return;
+
+	if (pdata->dvdd10) {
+		err = regulator_enable(pdata->dvdd10);
+		if (err) {
+			DRM_ERROR("Failed to enable DVDD10 regulator: %d\n",
+				  err);
+			return;
+		}
+
+		usleep_range(1000, 2000);
+	}
+
+	gpiod_set_value_cansleep(pdata->gpiod_reset, 1);
+	usleep_range(1000, 2000);
+
+	gpiod_set_value_cansleep(pdata->gpiod_pd, 0);
+	usleep_range(1000, 2000);
+
+	gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
+
+	/* Power on registers module */
+	anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG,
+			 SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD);
+	anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG,
+			   SP_REGISTER_PD | SP_TOTAL_PD);
+
+	anx78xx->powered = true;
+}
+
+static void anx78xx_poweroff(struct anx78xx *anx78xx)
+{
+	struct anx78xx_platform_data *pdata = &anx78xx->pdata;
+	int err;
+
+	if (WARN_ON(!anx78xx->powered))
+		return;
+
+	gpiod_set_value_cansleep(pdata->gpiod_reset, 1);
+	usleep_range(1000, 2000);
+
+	gpiod_set_value_cansleep(pdata->gpiod_pd, 1);
+	usleep_range(1000, 2000);
+
+	if (pdata->dvdd10) {
+		err = regulator_disable(pdata->dvdd10);
+		if (err) {
+			DRM_ERROR("Failed to disable DVDD10 regulator: %d\n",
+				  err);
+			return;
+		}
+
+		usleep_range(1000, 2000);
+	}
+
+	anx78xx->powered = false;
+}
+
+static int anx78xx_start(struct anx78xx *anx78xx)
+{
+	int err;
+
+	/* Power on all modules */
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2],
+				 SP_POWERDOWN_CTRL_REG,
+				 SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD |
+				 SP_LINK_PD);
+
+	err = anx78xx_enable_interrupts(anx78xx);
+	if (err) {
+		DRM_ERROR("Failed to enable interrupts: %d\n", err);
+		goto err_poweroff;
+	}
+
+	err = anx78xx_rx_initialization(anx78xx);
+	if (err) {
+		DRM_ERROR("Failed receiver initialization: %d\n", err);
+		goto err_poweroff;
+	}
+
+	err = anx78xx_tx_initialization(anx78xx);
+	if (err) {
+		DRM_ERROR("Failed transmitter initialization: %d\n", err);
+		goto err_poweroff;
+	}
+
+	/*
+	 * This delay seems to help keep the hardware in a good state. Without
+	 * it, there are times where it fails silently.
+	 */
+	usleep_range(10000, 15000);
+
+	return 0;
+
+err_poweroff:
+	DRM_ERROR("Failed SlimPort transmitter initialization: %d\n", err);
+	anx78xx_poweroff(anx78xx);
+
+	return err;
+}
+
+static int anx78xx_init_pdata(struct anx78xx *anx78xx)
+{
+	struct anx78xx_platform_data *pdata = &anx78xx->pdata;
+	struct device *dev = &anx78xx->client->dev;
+
+	/* 1.0V digital core power regulator  */
+	pdata->dvdd10 = devm_regulator_get(dev, "dvdd10");
+	if (IS_ERR(pdata->dvdd10)) {
+		DRM_ERROR("DVDD10 regulator not found\n");
+		return PTR_ERR(pdata->dvdd10);
+	}
+
+	/* GPIO for HPD */
+	pdata->gpiod_hpd = devm_gpiod_get(dev, "hpd", GPIOD_IN);
+	if (IS_ERR(pdata->gpiod_hpd))
+		return PTR_ERR(pdata->gpiod_hpd);
+
+	/* GPIO for chip power down */
+	pdata->gpiod_pd = devm_gpiod_get(dev, "pd", GPIOD_OUT_HIGH);
+	if (IS_ERR(pdata->gpiod_pd))
+		return PTR_ERR(pdata->gpiod_pd);
+
+	/* GPIO for chip reset */
+	pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+
+	return PTR_ERR_OR_ZERO(pdata->gpiod_reset);
+}
+
+static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
+{
+	u8 dp_bw, value;
+	int err;
+
+	err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG,
+			   0x0);
+	if (err)
+		return err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2],
+				 SP_POWERDOWN_CTRL_REG,
+				 SP_TOTAL_PD);
+	if (err)
+		return err;
+
+	err = drm_dp_dpcd_readb(&anx78xx->aux, DP_MAX_LINK_RATE, &dp_bw);
+	if (err < 0)
+		return err;
+
+	switch (dp_bw) {
+	case DP_LINK_BW_1_62:
+	case DP_LINK_BW_2_7:
+	case DP_LINK_BW_5_4:
+		break;
+
+	default:
+		DRM_DEBUG_KMS("DP bandwidth (%#02x) not supported\n", dp_bw);
+		return -EINVAL;
+	}
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG,
+			       SP_VIDEO_MUTE);
+	if (err)
+		return err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2],
+				 SP_VID_CTRL1_REG, SP_VIDEO_EN);
+	if (err)
+		return err;
+
+	/* Get DPCD info */
+	err = drm_dp_dpcd_read(&anx78xx->aux, DP_DPCD_REV,
+			       &anx78xx->dpcd, DP_RECEIVER_CAP_SIZE);
+	if (err < 0) {
+		DRM_ERROR("Failed to read DPCD: %d\n", err);
+		return err;
+	}
+
+	/* Clear channel x SERDES power down */
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
+				 SP_DP_ANALOG_POWER_DOWN_REG, SP_CH0_PD);
+	if (err)
+		return err;
+
+	/* Check link capabilities */
+	err = drm_dp_link_probe(&anx78xx->aux, &anx78xx->link);
+	if (err < 0) {
+		DRM_ERROR("Failed to probe link capabilities: %d\n", err);
+		return err;
+	}
+
+	/* Power up the sink */
+	err = drm_dp_link_power_up(&anx78xx->aux, &anx78xx->link);
+	if (err < 0) {
+		DRM_ERROR("Failed to power up DisplayPort link: %d\n", err);
+		return err;
+	}
+
+	/* Possibly enable downspread on the sink */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+			   SP_DP_DOWNSPREAD_CTRL1_REG, 0);
+	if (err)
+		return err;
+
+	if (anx78xx->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5) {
+		DRM_DEBUG("Enable downspread on the sink\n");
+		/* 4000PPM */
+		err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+				   SP_DP_DOWNSPREAD_CTRL1_REG, 8);
+		if (err)
+			return err;
+
+		err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_DOWNSPREAD_CTRL,
+					 DP_SPREAD_AMP_0_5);
+		if (err < 0)
+			return err;
+	} else {
+		err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_DOWNSPREAD_CTRL, 0);
+		if (err < 0)
+			return err;
+	}
+
+	/* Set the lane count and the link rate on the sink */
+	if (drm_dp_enhanced_frame_cap(anx78xx->dpcd))
+		err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+				       SP_DP_SYSTEM_CTRL_BASE + 4,
+				       SP_ENHANCED_MODE);
+	else
+		err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
+					 SP_DP_SYSTEM_CTRL_BASE + 4,
+					 SP_ENHANCED_MODE);
+	if (err)
+		return err;
+
+	value = drm_dp_link_rate_to_bw_code(anx78xx->link.rate);
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+			   SP_DP_MAIN_LINK_BW_SET_REG, value);
+	if (err)
+		return err;
+
+	err = drm_dp_link_configure(&anx78xx->aux, &anx78xx->link);
+	if (err < 0) {
+		DRM_ERROR("Failed to configure DisplayPort link: %d\n", err);
+		return err;
+	}
+
+	/* Start training on the source */
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_LT_CTRL_REG,
+			   SP_LT_EN);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int anx78xx_config_dp_output(struct anx78xx *anx78xx)
+{
+	int err;
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG,
+				 SP_VIDEO_MUTE);
+	if (err)
+		return err;
+
+	/* Enable DP output */
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG,
+			       SP_VIDEO_EN);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int anx78xx_send_video_infoframe(struct anx78xx *anx78xx,
+					struct hdmi_avi_infoframe *frame)
+{
+	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+	int err;
+
+	err = hdmi_avi_infoframe_pack(frame, buffer, sizeof(buffer));
+	if (err < 0) {
+		DRM_ERROR("Failed to pack AVI infoframe: %d\n", err);
+		return err;
+	}
+
+	err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
+				 SP_PACKET_SEND_CTRL_REG, SP_AVI_IF_EN);
+	if (err)
+		return err;
+
+	err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P2],
+				SP_INFOFRAME_AVI_DB1_REG, buffer,
+				frame->length);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_PACKET_SEND_CTRL_REG, SP_AVI_IF_UD);
+	if (err)
+		return err;
+
+	err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+			       SP_PACKET_SEND_CTRL_REG, SP_AVI_IF_EN);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int anx78xx_get_downstream_info(struct anx78xx *anx78xx)
+{
+	u8 value;
+	int err;
+
+	err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SINK_COUNT, &value);
+	if (err < 0) {
+		DRM_ERROR("Get sink count failed %d\n", err);
+		return err;
+	}
+
+	if (!DP_GET_SINK_COUNT(value)) {
+		DRM_ERROR("Downstream disconnected\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int anx78xx_get_modes(struct drm_connector *connector)
+{
+	struct anx78xx *anx78xx = connector_to_anx78xx(connector);
+	int err, num_modes = 0;
+
+	if (WARN_ON(!anx78xx->powered))
+		return 0;
+
+	if (anx78xx->edid)
+		return drm_add_edid_modes(connector, anx78xx->edid);
+
+	mutex_lock(&anx78xx->lock);
+
+	err = anx78xx_get_downstream_info(anx78xx);
+	if (err) {
+		DRM_ERROR("Failed to get downstream info: %d\n", err);
+		goto unlock;
+	}
+
+	anx78xx->edid = drm_get_edid(connector, &anx78xx->aux.ddc);
+	if (!anx78xx->edid) {
+		DRM_ERROR("Failed to read EDID\n");
+		goto unlock;
+	}
+
+	err = drm_mode_connector_update_edid_property(connector,
+						      anx78xx->edid);
+	if (err) {
+		DRM_ERROR("Failed to update EDID property: %d\n", err);
+		goto unlock;
+	}
+
+	num_modes = drm_add_edid_modes(connector, anx78xx->edid);
+	/* Store the ELD */
+	drm_edid_to_eld(connector, anx78xx->edid);
+
+unlock:
+	mutex_unlock(&anx78xx->lock);
+
+	return num_modes;
+}
+
+static struct drm_encoder *anx78xx_best_encoder(struct drm_connector *connector)
+{
+	struct anx78xx *anx78xx = connector_to_anx78xx(connector);
+
+	return anx78xx->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = {
+	.get_modes = anx78xx_get_modes,
+	.best_encoder = anx78xx_best_encoder,
+};
+
+static enum drm_connector_status anx78xx_detect(struct drm_connector *connector,
+						bool force)
+{
+	struct anx78xx *anx78xx = connector_to_anx78xx(connector);
+
+	if (!gpiod_get_value(anx78xx->pdata.gpiod_hpd))
+		return connector_status_disconnected;
+
+	return connector_status_connected;
+}
+
+static void anx78xx_connector_destroy(struct drm_connector *connector)
+{
+	drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs anx78xx_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.detect = anx78xx_detect,
+	.destroy = anx78xx_connector_destroy,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int anx78xx_bridge_attach(struct drm_bridge *bridge)
+{
+	struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
+	int err;
+
+	if (!bridge->encoder) {
+		DRM_ERROR("Parent encoder object not found");
+		return -ENODEV;
+	}
+
+	/* Register aux channel */
+	anx78xx->aux.name = "DP-AUX";
+	anx78xx->aux.dev = &anx78xx->client->dev;
+	anx78xx->aux.transfer = anx78xx_aux_transfer;
+
+	err = drm_dp_aux_register(&anx78xx->aux);
+	if (err < 0) {
+		DRM_ERROR("Failed to register aux channel: %d\n", err);
+		return err;
+	}
+
+	err = drm_connector_init(bridge->dev, &anx78xx->connector,
+				 &anx78xx_connector_funcs,
+				 DRM_MODE_CONNECTOR_DisplayPort);
+	if (err) {
+		DRM_ERROR("Failed to initialize connector: %d\n", err);
+		return err;
+	}
+
+	drm_connector_helper_add(&anx78xx->connector,
+				 &anx78xx_connector_helper_funcs);
+
+	err = drm_connector_register(&anx78xx->connector);
+	if (err) {
+		DRM_ERROR("Failed to register connector: %d\n", err);
+		return err;
+	}
+
+	anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+	err = drm_mode_connector_attach_encoder(&anx78xx->connector,
+						bridge->encoder);
+	if (err) {
+		DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static bool anx78xx_bridge_mode_fixup(struct drm_bridge *bridge,
+				      const struct drm_display_mode *mode,
+				      struct drm_display_mode *adjusted_mode)
+{
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		return false;
+
+	/* Max 1200p at 5.4 Ghz, one lane */
+	if (mode->clock > 154000)
+		return false;
+
+	return true;
+}
+
+static void anx78xx_bridge_disable(struct drm_bridge *bridge)
+{
+	struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
+
+	/* Power off all modules except configuration registers access */
+	anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG,
+			 SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD);
+}
+
+static void anx78xx_bridge_mode_set(struct drm_bridge *bridge,
+				    struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode)
+{
+	struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
+	struct hdmi_avi_infoframe frame;
+	int err;
+
+	if (WARN_ON(!anx78xx->powered))
+		return;
+
+	mutex_lock(&anx78xx->lock);
+
+	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, adjusted_mode);
+	if (err) {
+		DRM_ERROR("Failed to setup AVI infoframe: %d\n", err);
+		goto unlock;
+	}
+
+	err = anx78xx_send_video_infoframe(anx78xx, &frame);
+	if (err)
+		DRM_ERROR("Failed to send AVI infoframe: %d\n", err);
+
+unlock:
+	mutex_unlock(&anx78xx->lock);
+}
+
+static void anx78xx_bridge_enable(struct drm_bridge *bridge)
+{
+	struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
+	int err;
+
+	err = anx78xx_start(anx78xx);
+	if (err) {
+		DRM_ERROR("Failed to initialize: %d\n", err);
+		return;
+	}
+
+	err = anx78xx_set_hpd(anx78xx);
+	if (err)
+		DRM_ERROR("Failed to set HPD: %d\n", err);
+}
+
+static const struct drm_bridge_funcs anx78xx_bridge_funcs = {
+	.attach = anx78xx_bridge_attach,
+	.mode_fixup = anx78xx_bridge_mode_fixup,
+	.disable = anx78xx_bridge_disable,
+	.mode_set = anx78xx_bridge_mode_set,
+	.enable = anx78xx_bridge_enable,
+};
+
+static irqreturn_t anx78xx_hpd_threaded_handler(int irq, void *data)
+{
+	struct anx78xx *anx78xx = data;
+	int err;
+
+	if (anx78xx->powered)
+		return IRQ_HANDLED;
+
+	mutex_lock(&anx78xx->lock);
+
+	/* Cable is pulled, power on the chip */
+	anx78xx_poweron(anx78xx);
+
+	err = anx78xx_enable_interrupts(anx78xx);
+	if (err)
+		DRM_ERROR("Failed to enable interrupts: %d\n", err);
+
+	mutex_unlock(&anx78xx->lock);
+
+	return IRQ_HANDLED;
+}
+
+static int anx78xx_handle_dp_int_1(struct anx78xx *anx78xx, u8 irq)
+{
+	int err;
+
+	DRM_DEBUG_KMS("Handle DP interrupt 1: %02x\n", irq);
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_STATUS1_REG,
+			   irq);
+	if (err)
+		return err;
+
+	if (irq & SP_TRAINING_FINISH) {
+		DRM_DEBUG_KMS("IRQ: hardware link training finished\n");
+		err = anx78xx_config_dp_output(anx78xx);
+	}
+
+	return err;
+}
+
+static bool anx78xx_handle_common_int_4(struct anx78xx *anx78xx, u8 irq)
+{
+	bool event = false;
+	int err;
+
+	DRM_DEBUG_KMS("Handle common interrupt 4: %02x\n", irq);
+
+	err = regmap_write(anx78xx->map[I2C_IDX_TX_P2],
+			   SP_COMMON_INT_STATUS4_REG, irq);
+	if (err) {
+		DRM_ERROR("Failed to write SP_COMMON_INT_STATUS4 %d\n", err);
+		return event;
+	}
+
+	if (irq & SP_HPD_LOST) {
+		DRM_DEBUG_KMS("IRQ: Hot plug detect - cable is pulled out\n");
+		event = true;
+		anx78xx_poweroff(anx78xx);
+		/* Free cached EDID */
+		kfree(anx78xx->edid);
+		anx78xx->edid = NULL;
+	} else if (irq & SP_HPD_PLUG) {
+		DRM_DEBUG_KMS("IRQ: Hot plug detect - cable plug\n");
+		event = true;
+	}
+
+	return event;
+}
+
+static void anx78xx_handle_hdmi_int_1(struct anx78xx *anx78xx, u8 irq)
+{
+	unsigned int value;
+	int err;
+
+	DRM_DEBUG_KMS("Handle HDMI interrupt 1: %02x\n", irq);
+
+	err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_INT_STATUS1_REG,
+			   irq);
+	if (err) {
+		DRM_ERROR("Write HDMI int 1 failed: %d\n", err);
+		return;
+	}
+
+	if ((irq & SP_CKDT_CHG) || (irq & SP_SCDT_CHG)) {
+		DRM_DEBUG_KMS("IRQ: HDMI input detected\n");
+
+		err = regmap_read(anx78xx->map[I2C_IDX_RX_P0],
+				  SP_SYSTEM_STATUS_REG, &value);
+		if (err) {
+			DRM_ERROR("Read system status reg failed: %d\n", err);
+			return;
+		}
+
+		if (!(value & SP_TMDS_CLOCK_DET)) {
+			DRM_DEBUG_KMS("IRQ: *** Waiting for HDMI clock ***\n");
+			return;
+		}
+
+		if (!(value & SP_TMDS_DE_DET)) {
+			DRM_DEBUG_KMS("IRQ: *** Waiting for HDMI signal ***\n");
+			return;
+		}
+
+		err = anx78xx_dp_link_training(anx78xx);
+		if (err)
+			DRM_ERROR("Failed to start link training: %d\n", err);
+	}
+}
+
+static irqreturn_t anx78xx_intp_threaded_handler(int unused, void *data)
+{
+	struct anx78xx *anx78xx = data;
+	bool event = false;
+	unsigned int irq;
+	int err;
+
+	mutex_lock(&anx78xx->lock);
+
+	err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_STATUS1_REG,
+			  &irq);
+	if (err) {
+		DRM_ERROR("Failed to read DP interrupt 1 status: %d\n", err);
+		goto unlock;
+	}
+
+	if (irq)
+		anx78xx_handle_dp_int_1(anx78xx, irq);
+
+	err = regmap_read(anx78xx->map[I2C_IDX_TX_P2],
+			  SP_COMMON_INT_STATUS4_REG, &irq);
+	if (err) {
+		DRM_ERROR("Failed to read common interrupt 4 status: %d\n",
+			  err);
+		goto unlock;
+	}
+
+	if (irq)
+		event = anx78xx_handle_common_int_4(anx78xx, irq);
+
+	/* Make sure we are still powered after handle HPD events */
+	if (!anx78xx->powered)
+		goto unlock;
+
+	err = regmap_read(anx78xx->map[I2C_IDX_RX_P0], SP_INT_STATUS1_REG,
+			  &irq);
+	if (err) {
+		DRM_ERROR("Failed to read HDMI int 1 status: %d\n", err);
+		goto unlock;
+	}
+
+	if (irq)
+		anx78xx_handle_hdmi_int_1(anx78xx, irq);
+
+unlock:
+	mutex_unlock(&anx78xx->lock);
+
+	if (event)
+		drm_helper_hpd_irq_event(anx78xx->connector.dev);
+
+	return IRQ_HANDLED;
+}
+
+static void unregister_i2c_dummy_clients(struct anx78xx *anx78xx)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(anx78xx->i2c_dummy); i++)
+		if (anx78xx->i2c_dummy[i])
+			i2c_unregister_device(anx78xx->i2c_dummy[i]);
+}
+
+static const struct regmap_config anx78xx_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+};
+
+static const u16 anx78xx_chipid_list[] = {
+	0x7812,
+	0x7814,
+	0x7818,
+};
+
+static int anx78xx_i2c_probe(struct i2c_client *client,
+			     const struct i2c_device_id *id)
+{
+	struct anx78xx *anx78xx;
+	struct anx78xx_platform_data *pdata;
+	unsigned int i, idl, idh, version;
+	bool found = false;
+	int err;
+
+	anx78xx = devm_kzalloc(&client->dev, sizeof(*anx78xx), GFP_KERNEL);
+	if (!anx78xx)
+		return -ENOMEM;
+
+	pdata = &anx78xx->pdata;
+
+	mutex_init(&anx78xx->lock);
+
+#if IS_ENABLED(CONFIG_OF)
+	anx78xx->bridge.of_node = client->dev.of_node;
+#endif
+
+	anx78xx->client = client;
+	i2c_set_clientdata(client, anx78xx);
+
+	err = anx78xx_init_pdata(anx78xx);
+	if (err) {
+		DRM_ERROR("Failed to initialize pdata: %d\n", err);
+		return err;
+	}
+
+	pdata->hpd_irq = gpiod_to_irq(pdata->gpiod_hpd);
+	if (pdata->hpd_irq < 0) {
+		DRM_ERROR("Failed to get HPD IRQ: %d\n", pdata->hpd_irq);
+		return -ENODEV;
+	}
+
+	pdata->intp_irq = client->irq;
+	if (!pdata->intp_irq) {
+		DRM_ERROR("Failed to get CABLE_DET and INTP IRQ\n");
+		return -ENODEV;
+	}
+
+	/* Map slave addresses of ANX7814 */
+	for (i = 0; i < I2C_NUM_ADDRESSES; i++) {
+		anx78xx->i2c_dummy[i] = i2c_new_dummy(client->adapter,
+						anx78xx_i2c_addresses[i] >> 1);
+		if (!anx78xx->i2c_dummy[i]) {
+			err = -ENOMEM;
+			DRM_ERROR("Failed to reserve I2C bus %02x\n",
+				  anx78xx_i2c_addresses[i]);
+			goto err_unregister_i2c;
+		}
+
+		anx78xx->map[i] = devm_regmap_init_i2c(anx78xx->i2c_dummy[i],
+						       &anx78xx_regmap_config);
+		if (IS_ERR(anx78xx->map[i])) {
+			err = PTR_ERR(anx78xx->map[i]);
+			DRM_ERROR("Failed regmap initialization %02x\n",
+				  anx78xx_i2c_addresses[i]);
+			goto err_unregister_i2c;
+		}
+	}
+
+	/* Look for supported chip ID */
+	anx78xx_poweron(anx78xx);
+
+	err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_IDL_REG,
+			  &idl);
+	if (err)
+		goto err_poweroff;
+
+	err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_IDH_REG,
+			  &idh);
+	if (err)
+		goto err_poweroff;
+
+	anx78xx->chipid = (u8)idl | ((u8)idh << 8);
+
+	err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_VERSION_REG,
+			  &version);
+	if (err)
+		goto err_poweroff;
+
+	for (i = 0; i < ARRAY_SIZE(anx78xx_chipid_list); i++) {
+		if (anx78xx->chipid == anx78xx_chipid_list[i]) {
+			DRM_INFO("Found ANX%x (ver. %d) SlimPort Transmitter\n",
+				 anx78xx->chipid, version);
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		DRM_ERROR("ANX%x (ver. %d) not supported by this driver\n",
+			  anx78xx->chipid, version);
+		err = -ENODEV;
+		goto err_poweroff;
+	}
+
+	err = devm_request_threaded_irq(&client->dev, pdata->hpd_irq, NULL,
+					anx78xx_hpd_threaded_handler,
+					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+					"anx78xx-hpd", anx78xx);
+	if (err) {
+		DRM_ERROR("Failed to request CABLE_DET threaded IRQ: %d\n",
+			  err);
+		goto err_poweroff;
+	}
+
+	err = devm_request_threaded_irq(&client->dev, pdata->intp_irq, NULL,
+					anx78xx_intp_threaded_handler,
+					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+					"anx78xx-intp", anx78xx);
+	if (err) {
+		DRM_ERROR("Failed to request INTP threaded IRQ: %d\n", err);
+		goto err_poweroff;
+	}
+
+	anx78xx->bridge.funcs = &anx78xx_bridge_funcs;
+
+	err = drm_bridge_add(&anx78xx->bridge);
+	if (err < 0) {
+		DRM_ERROR("Failed to add drm bridge: %d\n", err);
+		goto err_poweroff;
+	}
+
+	/* If cable is pulled out, just poweroff and wait for HPD event */
+	if (!gpiod_get_value(anx78xx->pdata.gpiod_hpd))
+		anx78xx_poweroff(anx78xx);
+
+	return 0;
+
+err_poweroff:
+	anx78xx_poweroff(anx78xx);
+
+err_unregister_i2c:
+	unregister_i2c_dummy_clients(anx78xx);
+	return err;
+}
+
+static int anx78xx_i2c_remove(struct i2c_client *client)
+{
+	struct anx78xx *anx78xx = i2c_get_clientdata(client);
+
+	drm_bridge_remove(&anx78xx->bridge);
+
+	unregister_i2c_dummy_clients(anx78xx);
+
+	kfree(anx78xx->edid);
+
+	return 0;
+}
+
+static const struct i2c_device_id anx78xx_id[] = {
+	{ "anx7814", 0 },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, anx78xx_id);
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id anx78xx_match_table[] = {
+	{ .compatible = "analogix,anx7814", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, anx78xx_match_table);
+#endif
+
+static struct i2c_driver anx78xx_driver = {
+	.driver = {
+		   .name = "anx7814",
+		   .of_match_table = of_match_ptr(anx78xx_match_table),
+		  },
+	.probe = anx78xx_i2c_probe,
+	.remove = anx78xx_i2c_remove,
+	.id_table = anx78xx_id,
+};
+module_i2c_driver(anx78xx_driver);
+
+MODULE_DESCRIPTION("ANX78xx SlimPort Transmitter driver");
+MODULE_AUTHOR("Enric Balletbo i Serra <enric.balletbo@collabora.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.h b/drivers/gpu/drm/bridge/analogix-anx78xx.h
new file mode 100644
index 0000000..38753c8
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.h
@@ -0,0 +1,719 @@
+/*
+ * Copyright(c) 2016, Analogix Semiconductor. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ANX78xx_H
+#define __ANX78xx_H
+
+#define TX_P0				0x70
+#define TX_P1				0x7a
+#define TX_P2				0x72
+
+#define RX_P0				0x7e
+#define RX_P1				0x80
+
+/***************************************************************/
+/* Register definition of device address 0x7e                  */
+/***************************************************************/
+
+/*
+ * System Control and Status
+ */
+
+/* Software Reset Register 1 */
+#define SP_SOFTWARE_RESET1_REG		0x11
+#define SP_VIDEO_RST			BIT(4)
+#define SP_HDCP_MAN_RST			BIT(2)
+#define SP_TMDS_RST			BIT(1)
+#define SP_SW_MAN_RST			BIT(0)
+
+/* System Status Register */
+#define SP_SYSTEM_STATUS_REG		0x14
+#define SP_TMDS_CLOCK_DET		BIT(1)
+#define SP_TMDS_DE_DET			BIT(0)
+
+/* HDMI Status Register */
+#define SP_HDMI_STATUS_REG		0x15
+#define SP_HDMI_AUD_LAYOUT		BIT(3)
+#define SP_HDMI_DET			BIT(0)
+#  define SP_DVI_MODE			0
+#  define SP_HDMI_MODE			1
+
+/* HDMI Mute Control Register */
+#define SP_HDMI_MUTE_CTRL_REG		0x16
+#define SP_AUD_MUTE			BIT(1)
+#define SP_VID_MUTE			BIT(0)
+
+/* System Power Down Register 1 */
+#define SP_SYSTEM_POWER_DOWN1_REG	0x18
+#define SP_PWDN_CTRL			BIT(0)
+
+/*
+ * Audio and Video Auto Control
+ */
+
+/* Auto Audio and Video Control register */
+#define SP_AUDVID_CTRL_REG		0x20
+#define SP_AVC_OE			BIT(7)
+#define SP_AAC_OE			BIT(6)
+#define SP_AVC_EN			BIT(1)
+#define SP_AAC_EN			BIT(0)
+
+/* Audio Exception Enable Registers */
+#define SP_AUD_EXCEPTION_ENABLE_BASE	(0x24 - 1)
+/* Bits for Audio Exception Enable Register 3 */
+#define SP_AEC_EN21			BIT(5)
+
+/*
+ * Interrupt
+ */
+
+/* Interrupt Status Register 1 */
+#define SP_INT_STATUS1_REG		0x31
+/* Bits for Interrupt Status Register 1 */
+#define SP_HDMI_DVI			BIT(7)
+#define SP_CKDT_CHG			BIT(6)
+#define SP_SCDT_CHG			BIT(5)
+#define SP_PCLK_CHG			BIT(4)
+#define SP_PLL_UNLOCK			BIT(3)
+#define SP_CABLE_PLUG_CHG		BIT(2)
+#define SP_SET_MUTE			BIT(1)
+#define SP_SW_INTR			BIT(0)
+/* Bits for Interrupt Status Register 2 */
+#define SP_HDCP_ERR			BIT(5)
+#define SP_AUDIO_SAMPLE_CHG		BIT(0)	/* undocumented */
+/* Bits for Interrupt Status Register 3 */
+#define SP_AUD_MODE_CHG			BIT(0)
+/* Bits for Interrupt Status Register 5 */
+#define SP_AUDIO_RCV			BIT(0)
+/* Bits for Interrupt Status Register 6 */
+#define SP_INT_STATUS6_REG		0x36
+#define SP_CTS_RCV			BIT(7)
+#define SP_NEW_AUD_PKT			BIT(4)
+#define SP_NEW_AVI_PKT			BIT(1)
+#define SP_NEW_CP_PKT			BIT(0)
+/* Bits for Interrupt Status Register 7 */
+#define SP_NO_VSI			BIT(7)
+#define SP_NEW_VS			BIT(4)
+
+/* Interrupt Mask 1 Status Registers */
+#define SP_INT_MASK1_REG		0x41
+
+/* HDMI US TIMER Control Register */
+#define SP_HDMI_US_TIMER_CTRL_REG	0x49
+#define SP_MS_TIMER_MARGIN_10_8_MASK	0x07
+
+/*
+ * TMDS Control
+ */
+
+/* TMDS Control Registers */
+#define SP_TMDS_CTRL_BASE		(0x50 - 1)
+/* Bits for TMDS Control Register 7 */
+#define SP_PD_RT			BIT(0)
+
+/*
+ * Video Control
+ */
+
+/* Video Status Register */
+#define SP_VIDEO_STATUS_REG		0x70
+#define SP_COLOR_DEPTH_MASK		0xf0
+#define SP_COLOR_DEPTH_SHIFT		4
+#  define SP_COLOR_DEPTH_MODE_LEGACY	0x00
+#  define SP_COLOR_DEPTH_MODE_24BIT	0x04
+#  define SP_COLOR_DEPTH_MODE_30BIT	0x05
+#  define SP_COLOR_DEPTH_MODE_36BIT	0x06
+#  define SP_COLOR_DEPTH_MODE_48BIT	0x07
+
+/* Video Data Range Control Register */
+#define SP_VID_DATA_RANGE_CTRL_REG	0x83
+#define SP_R2Y_INPUT_LIMIT		BIT(1)
+
+/* Pixel Clock High Resolution Counter Registers */
+#define SP_PCLK_HIGHRES_CNT_BASE	(0x8c - 1)
+
+/*
+ * Audio Control
+ */
+
+/* Number of Audio Channels Status Registers */
+#define SP_AUD_CH_STATUS_REG_NUM	6
+
+/* Audio IN S/PDIF Channel Status Registers */
+#define SP_AUD_SPDIF_CH_STATUS_BASE	0xc7
+
+/* Audio IN S/PDIF Channel Status Register 4 */
+#define SP_FS_FREQ_MASK			0x0f
+#  define SP_FS_FREQ_44100HZ		0x00
+#  define SP_FS_FREQ_48000HZ		0x02
+#  define SP_FS_FREQ_32000HZ		0x03
+#  define SP_FS_FREQ_88200HZ		0x08
+#  define SP_FS_FREQ_96000HZ		0x0a
+#  define SP_FS_FREQ_176400HZ		0x0c
+#  define SP_FS_FREQ_192000HZ		0x0e
+
+/*
+ * Micellaneous Control Block
+ */
+
+/* CHIP Control Register */
+#define SP_CHIP_CTRL_REG		0xe3
+#define SP_MAN_HDMI5V_DET		BIT(3)
+#define SP_PLLLOCK_CKDT_EN		BIT(2)
+#define SP_ANALOG_CKDT_EN		BIT(1)
+#define SP_DIGITAL_CKDT_EN		BIT(0)
+
+/* Packet Receiving Status Register */
+#define SP_PACKET_RECEIVING_STATUS_REG	0xf3
+#define SP_AVI_RCVD			BIT(5)
+#define SP_VSI_RCVD			BIT(1)
+
+/***************************************************************/
+/* Register definition of device address 0x80                  */
+/***************************************************************/
+
+/* HDCP BCAPS Shadow Register */
+#define SP_HDCP_BCAPS_SHADOW_REG	0x2a
+#define SP_BCAPS_REPEATER		BIT(5)
+
+/* HDCP Status Register */
+#define SP_RX_HDCP_STATUS_REG		0x3f
+#define SP_AUTH_EN			BIT(4)
+
+/*
+ * InfoFrame and Control Packet Registers
+ */
+
+/* AVI InfoFrame packet checksum */
+#define SP_AVI_INFOFRAME_CHECKSUM	0xa3
+
+/* AVI InfoFrame Registers */
+#define SP_AVI_INFOFRAME_DATA_BASE	0xa4
+
+#define SP_AVI_COLOR_F_MASK		0x60
+#define SP_AVI_COLOR_F_SHIFT		5
+
+/* Audio InfoFrame Registers */
+#define SP_AUD_INFOFRAME_DATA_BASE	0xc4
+#define SP_AUD_INFOFRAME_LAYOUT_MASK	0x0f
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet type code */
+#define SP_MPEG_VS_INFOFRAME_TYPE_REG	0xe0
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet length */
+#define SP_MPEG_VS_INFOFRAME_LEN_REG	0xe2
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet version number */
+#define SP_MPEG_VS_INFOFRAME_VER_REG	0xe1
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet content */
+#define SP_MPEG_VS_INFOFRAME_DATA_BASE	0xe4
+
+/* General Control Packet Register */
+#define SP_GENERAL_CTRL_PACKET_REG	0x9f
+#define SP_CLEAR_AVMUTE			BIT(4)
+#define SP_SET_AVMUTE			BIT(0)
+
+/***************************************************************/
+/* Register definition of device address 0x70                  */
+/***************************************************************/
+
+/* HDCP Status Register */
+#define SP_TX_HDCP_STATUS_REG		0x00
+#define SP_AUTH_FAIL			BIT(5)
+#define SP_AUTHEN_PASS			BIT(1)
+
+/* HDCP Control Register 0 */
+#define SP_HDCP_CTRL0_REG		0x01
+#define SP_RX_REPEATER			BIT(6)
+#define SP_RE_AUTH			BIT(5)
+#define SP_SW_AUTH_OK			BIT(4)
+#define SP_HARD_AUTH_EN			BIT(3)
+#define SP_HDCP_ENC_EN			BIT(2)
+#define SP_BKSV_SRM_PASS		BIT(1)
+#define SP_KSVLIST_VLD			BIT(0)
+/* HDCP Function Enabled */
+#define SP_HDCP_FUNCTION_ENABLED	(BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* HDCP Receiver BSTATUS Register 0 */
+#define	SP_HDCP_RX_BSTATUS0_REG		0x1b
+/* HDCP Receiver BSTATUS Register 1 */
+#define	SP_HDCP_RX_BSTATUS1_REG		0x1c
+
+/* HDCP Embedded "Blue Screen" Content Registers */
+#define SP_HDCP_VID0_BLUE_SCREEN_REG	0x2c
+#define SP_HDCP_VID1_BLUE_SCREEN_REG	0x2d
+#define SP_HDCP_VID2_BLUE_SCREEN_REG	0x2e
+
+/* HDCP Wait R0 Timing Register */
+#define SP_HDCP_WAIT_R0_TIME_REG	0x40
+
+/* HDCP Link Integrity Check Timer Register */
+#define SP_HDCP_LINK_CHECK_TIMER_REG	0x41
+
+/* HDCP Repeater Ready Wait Timer Register */
+#define SP_HDCP_RPTR_RDY_WAIT_TIME_REG	0x42
+
+/* HDCP Auto Timer Register */
+#define SP_HDCP_AUTO_TIMER_REG		0x51
+
+/* HDCP Key Status Register */
+#define SP_HDCP_KEY_STATUS_REG		0x5e
+
+/* HDCP Key Command Register */
+#define SP_HDCP_KEY_COMMAND_REG		0x5f
+#define SP_DISABLE_SYNC_HDCP		BIT(2)
+
+/* OTP Memory Key Protection Registers */
+#define SP_OTP_KEY_PROTECT1_REG		0x60
+#define SP_OTP_KEY_PROTECT2_REG		0x61
+#define SP_OTP_KEY_PROTECT3_REG		0x62
+#define SP_OTP_PSW1			0xa2
+#define SP_OTP_PSW2			0x7e
+#define SP_OTP_PSW3			0xc6
+
+/* DP System Control Registers */
+#define SP_DP_SYSTEM_CTRL_BASE		(0x80 - 1)
+/* Bits for DP System Control Register 2 */
+#define SP_CHA_STA			BIT(2)
+/* Bits for DP System Control Register 3 */
+#define SP_HPD_STATUS			BIT(6)
+#define SP_STRM_VALID			BIT(2)
+/* Bits for DP System Control Register 4 */
+#define SP_ENHANCED_MODE		BIT(3)
+
+/* DP Video Control Register */
+#define SP_DP_VIDEO_CTRL_REG		0x84
+#define SP_COLOR_F_MASK			0x06
+#define SP_COLOR_F_SHIFT		1
+#define SP_BPC_MASK			0xe0
+#define SP_BPC_SHIFT			5
+#  define SP_BPC_6BITS			0x00
+#  define SP_BPC_8BITS			0x01
+#  define SP_BPC_10BITS			0x02
+#  define SP_BPC_12BITS			0x03
+
+/* DP Audio Control Register */
+#define SP_DP_AUDIO_CTRL_REG		0x87
+#define SP_AUD_EN			BIT(0)
+
+/* 10us Pulse Generate Timer Registers */
+#define SP_I2C_GEN_10US_TIMER0_REG	0x88
+#define SP_I2C_GEN_10US_TIMER1_REG	0x89
+
+/* Packet Send Control Register */
+#define SP_PACKET_SEND_CTRL_REG		0x90
+#define SP_AUD_IF_UP			BIT(7)
+#define SP_AVI_IF_UD			BIT(6)
+#define SP_MPEG_IF_UD			BIT(5)
+#define SP_SPD_IF_UD			BIT(4)
+#define SP_AUD_IF_EN			BIT(3)
+#define SP_AVI_IF_EN			BIT(2)
+#define SP_MPEG_IF_EN			BIT(1)
+#define SP_SPD_IF_EN			BIT(0)
+
+/* DP HDCP Control Register */
+#define SP_DP_HDCP_CTRL_REG		0x92
+#define SP_AUTO_EN			BIT(7)
+#define SP_AUTO_START			BIT(5)
+#define SP_LINK_POLLING			BIT(1)
+
+/* DP Main Link Bandwidth Setting Register */
+#define SP_DP_MAIN_LINK_BW_SET_REG	0xa0
+#define SP_LINK_BW_SET_MASK		0x1f
+#define SP_INITIAL_SLIM_M_AUD_SEL	BIT(5)
+
+/* DP Training Pattern Set Register */
+#define SP_DP_TRAINING_PATTERN_SET_REG	0xa2
+
+/* DP Lane 0 Link Training Control Register */
+#define SP_DP_LANE0_LT_CTRL_REG		0xa3
+#define SP_TX_SW_SET_MASK		0x1b
+#define SP_MAX_PRE_REACH		BIT(5)
+#define SP_MAX_DRIVE_REACH		BIT(4)
+#define SP_PRE_EMP_LEVEL1		BIT(3)
+#define SP_DRVIE_CURRENT_LEVEL1		BIT(0)
+
+/* DP Link Training Control Register */
+#define SP_DP_LT_CTRL_REG		0xa8
+#define SP_LT_ERROR_TYPE_MASK		0x70
+#  define SP_LT_NO_ERROR		0x00
+#  define SP_LT_AUX_WRITE_ERROR		0x01
+#  define SP_LT_MAX_DRIVE_REACHED	0x02
+#  define SP_LT_WRONG_LANE_COUNT_SET	0x03
+#  define SP_LT_LOOP_SAME_5_TIME	0x04
+#  define SP_LT_CR_FAIL_IN_EQ		0x05
+#  define SP_LT_EQ_LOOP_5_TIME		0x06
+#define SP_LT_EN			BIT(0)
+
+/* DP CEP Training Control Registers */
+#define SP_DP_CEP_TRAINING_CTRL0_REG	0xa9
+#define SP_DP_CEP_TRAINING_CTRL1_REG	0xaa
+
+/* DP Debug Register 1 */
+#define SP_DP_DEBUG1_REG		0xb0
+#define SP_DEBUG_PLL_LOCK		BIT(4)
+#define SP_POLLING_EN			BIT(1)
+
+/* DP Polling Control Register */
+#define SP_DP_POLLING_CTRL_REG		0xb4
+#define SP_AUTO_POLLING_DISABLE		BIT(0)
+
+/* DP Link Debug Control Register */
+#define SP_DP_LINK_DEBUG_CTRL_REG	0xb8
+#define SP_M_VID_DEBUG			BIT(5)
+#define SP_NEW_PRBS7			BIT(4)
+#define SP_INSERT_ER			BIT(1)
+#define SP_PRBS31_EN			BIT(0)
+
+/* AUX Misc control Register */
+#define SP_AUX_MISC_CTRL_REG		0xbf
+
+/* DP PLL control Register */
+#define SP_DP_PLL_CTRL_REG		0xc7
+#define SP_PLL_RST			BIT(6)
+
+/* DP Analog Power Down Register */
+#define SP_DP_ANALOG_POWER_DOWN_REG	0xc8
+#define SP_CH0_PD			BIT(0)
+
+/* DP Misc Control Register */
+#define SP_DP_MISC_CTRL_REG		0xcd
+#define SP_EQ_TRAINING_LOOP		BIT(6)
+
+/* DP Extra I2C Device Address Register */
+#define SP_DP_EXTRA_I2C_DEV_ADDR_REG	0xce
+#define SP_I2C_STRETCH_DISABLE		BIT(7)
+
+#define SP_I2C_EXTRA_ADDR		0x50
+
+/* DP Downspread Control Register 1 */
+#define SP_DP_DOWNSPREAD_CTRL1_REG	0xd0
+
+/* DP M Value Calculation Control Register */
+#define SP_DP_M_CALCULATION_CTRL_REG	0xd9
+#define SP_M_GEN_CLK_SEL		BIT(0)
+
+/* AUX Channel Access Status Register */
+#define SP_AUX_CH_STATUS_REG		0xe0
+#define SP_AUX_STATUS			0x0f
+
+/* AUX Channel DEFER Control Register */
+#define SP_AUX_DEFER_CTRL_REG		0xe2
+#define SP_DEFER_CTRL_EN		BIT(7)
+
+/* DP Buffer Data Count Register */
+#define SP_BUF_DATA_COUNT_REG		0xe4
+#define SP_BUF_DATA_COUNT_MASK		0x1f
+#define SP_BUF_CLR			BIT(7)
+
+/* DP AUX Channel Control Register 1 */
+#define SP_DP_AUX_CH_CTRL1_REG		0xe5
+#define SP_AUX_TX_COMM_MASK		0x0f
+#define SP_AUX_LENGTH_MASK		0xf0
+#define SP_AUX_LENGTH_SHIFT		4
+
+/* DP AUX CH Address Register 0 */
+#define SP_AUX_ADDR_7_0_REG		0xe6
+
+/* DP AUX CH Address Register 1 */
+#define SP_AUX_ADDR_15_8_REG		0xe7
+
+/* DP AUX CH Address Register 2 */
+#define SP_AUX_ADDR_19_16_REG		0xe8
+#define SP_AUX_ADDR_19_16_MASK		0x0f
+
+/* DP AUX Channel Control Register 2 */
+#define SP_DP_AUX_CH_CTRL2_REG		0xe9
+#define SP_AUX_SEL_RXCM			BIT(6)
+#define SP_AUX_CHSEL			BIT(3)
+#define SP_AUX_PN_INV			BIT(2)
+#define SP_ADDR_ONLY			BIT(1)
+#define SP_AUX_EN			BIT(0)
+
+/* DP Video Stream Control InfoFrame Register */
+#define SP_DP_3D_VSC_CTRL_REG		0xea
+#define SP_INFO_FRAME_VSC_EN		BIT(0)
+
+/* DP Video Stream Data Byte 1 Register */
+#define SP_DP_VSC_DB1_REG		0xeb
+
+/* DP AUX Channel Control Register 3 */
+#define SP_DP_AUX_CH_CTRL3_REG		0xec
+#define SP_WAIT_COUNTER_7_0_MASK	0xff
+
+/* DP AUX Channel Control Register 4 */
+#define SP_DP_AUX_CH_CTRL4_REG		0xed
+
+/* DP AUX Buffer Data Registers */
+#define SP_DP_BUF_DATA0_REG		0xf0
+
+/***************************************************************/
+/* Register definition of device address 0x72                  */
+/***************************************************************/
+
+/*
+ * Core Register Definitions
+ */
+
+/* Device ID Low Byte Register */
+#define SP_DEVICE_IDL_REG		0x02
+
+/* Device ID High Byte Register */
+#define SP_DEVICE_IDH_REG		0x03
+
+/* Device version register */
+#define SP_DEVICE_VERSION_REG		0x04
+
+/* Power Down Control Register */
+#define SP_POWERDOWN_CTRL_REG		0x05
+#define SP_REGISTER_PD			BIT(7)
+#define SP_HDCP_PD			BIT(5)
+#define SP_AUDIO_PD			BIT(4)
+#define SP_VIDEO_PD			BIT(3)
+#define SP_LINK_PD			BIT(2)
+#define SP_TOTAL_PD			BIT(1)
+
+/* Reset Control Register 1 */
+#define SP_RESET_CTRL1_REG		0x06
+#define SP_MISC_RST			BIT(7)
+#define SP_VIDCAP_RST			BIT(6)
+#define SP_VIDFIF_RST			BIT(5)
+#define SP_AUDFIF_RST			BIT(4)
+#define SP_AUDCAP_RST			BIT(3)
+#define SP_HDCP_RST			BIT(2)
+#define SP_SW_RST			BIT(1)
+#define SP_HW_RST			BIT(0)
+
+/* Reset Control Register 2 */
+#define SP_RESET_CTRL2_REG		0x07
+#define SP_AUX_RST			BIT(2)
+#define SP_SERDES_FIFO_RST		BIT(1)
+#define SP_I2C_REG_RST			BIT(0)
+
+/* Video Control Register 1 */
+#define SP_VID_CTRL1_REG		0x08
+#define SP_VIDEO_EN			BIT(7)
+#define SP_VIDEO_MUTE			BIT(2)
+#define SP_DE_GEN			BIT(1)
+#define SP_DEMUX			BIT(0)
+
+/* Video Control Register 2 */
+#define SP_VID_CTRL2_REG		0x09
+#define SP_IN_COLOR_F_MASK		0x03
+#define SP_IN_YC_BIT_SEL		BIT(2)
+#define SP_IN_BPC_MASK			0x70
+#define SP_IN_BPC_SHIFT			4
+#  define SP_IN_BPC_12BIT		0x03
+#  define SP_IN_BPC_10BIT		0x02
+#  define SP_IN_BPC_8BIT		0x01
+#  define SP_IN_BPC_6BIT		0x00
+#define SP_IN_D_RANGE			BIT(7)
+
+/* Video Control Register 3 */
+#define SP_VID_CTRL3_REG		0x0a
+#define SP_HPD_OUT			BIT(6)
+
+/* Video Control Register 5 */
+#define SP_VID_CTRL5_REG		0x0c
+#define SP_CSC_STD_SEL			BIT(7)
+#define SP_XVYCC_RNG_LMT		BIT(6)
+#define SP_RANGE_Y2R			BIT(5)
+#define SP_CSPACE_Y2R			BIT(4)
+#define SP_RGB_RNG_LMT			BIT(3)
+#define SP_Y_RNG_LMT			BIT(2)
+#define SP_RANGE_R2Y			BIT(1)
+#define SP_CSPACE_R2Y			BIT(0)
+
+/* Video Control Register 6 */
+#define SP_VID_CTRL6_REG		0x0d
+#define SP_TEST_PATTERN_EN		BIT(7)
+#define SP_VIDEO_PROCESS_EN		BIT(6)
+#define SP_VID_US_MODE			BIT(3)
+#define SP_VID_DS_MODE			BIT(2)
+#define SP_UP_SAMPLE			BIT(1)
+#define SP_DOWN_SAMPLE			BIT(0)
+
+/* Video Control Register 8 */
+#define SP_VID_CTRL8_REG		0x0f
+#define SP_VID_VRES_TH			BIT(0)
+
+/* Total Line Status Low Byte Register */
+#define SP_TOTAL_LINE_STAL_REG		0x24
+
+/* Total Line Status High Byte Register */
+#define SP_TOTAL_LINE_STAH_REG		0x25
+
+/* Active Line Status Low Byte Register */
+#define SP_ACT_LINE_STAL_REG		0x26
+
+/* Active Line Status High Byte Register */
+#define SP_ACT_LINE_STAH_REG		0x27
+
+/* Vertical Front Porch Status Register */
+#define SP_V_F_PORCH_STA_REG		0x28
+
+/* Vertical SYNC Width Status Register */
+#define SP_V_SYNC_STA_REG		0x29
+
+/* Vertical Back Porch Status Register */
+#define SP_V_B_PORCH_STA_REG		0x2a
+
+/* Total Pixel Status Low Byte Register */
+#define SP_TOTAL_PIXEL_STAL_REG		0x2b
+
+/* Total Pixel Status High Byte Register */
+#define SP_TOTAL_PIXEL_STAH_REG		0x2c
+
+/* Active Pixel Status Low Byte Register */
+#define SP_ACT_PIXEL_STAL_REG		0x2d
+
+/* Active Pixel Status High Byte Register */
+#define SP_ACT_PIXEL_STAH_REG		0x2e
+
+/* Horizontal Front Porch Status Low Byte Register */
+#define SP_H_F_PORCH_STAL_REG		0x2f
+
+/* Horizontal Front Porch Statys High Byte Register */
+#define SP_H_F_PORCH_STAH_REG		0x30
+
+/* Horizontal SYNC Width Status Low Byte Register */
+#define SP_H_SYNC_STAL_REG		0x31
+
+/* Horizontal SYNC Width Status High Byte Register */
+#define SP_H_SYNC_STAH_REG		0x32
+
+/* Horizontal Back Porch Status Low Byte Register */
+#define SP_H_B_PORCH_STAL_REG		0x33
+
+/* Horizontal Back Porch Status High Byte Register */
+#define SP_H_B_PORCH_STAH_REG		0x34
+
+/* InfoFrame AVI Packet DB1 Register */
+#define SP_INFOFRAME_AVI_DB1_REG	0x70
+
+/* Bit Control Specific Register */
+#define SP_BIT_CTRL_SPECIFIC_REG	0x80
+#define SP_BIT_CTRL_SELECT_SHIFT	1
+#define SP_ENABLE_BIT_CTRL		BIT(0)
+
+/* InfoFrame Audio Packet DB1 Register */
+#define SP_INFOFRAME_AUD_DB1_REG	0x83
+
+/* InfoFrame MPEG Packet DB1 Register */
+#define SP_INFOFRAME_MPEG_DB1_REG	0xb0
+
+/* Audio Channel Status Registers */
+#define SP_AUD_CH_STATUS_BASE		0xd0
+
+/* Audio Channel Num Register 5 */
+#define SP_I2S_CHANNEL_NUM_MASK		0xe0
+#  define SP_I2S_CH_NUM_1		(0x00 << 5)
+#  define SP_I2S_CH_NUM_2		(0x01 << 5)
+#  define SP_I2S_CH_NUM_3		(0x02 << 5)
+#  define SP_I2S_CH_NUM_4		(0x03 << 5)
+#  define SP_I2S_CH_NUM_5		(0x04 << 5)
+#  define SP_I2S_CH_NUM_6		(0x05 << 5)
+#  define SP_I2S_CH_NUM_7		(0x06 << 5)
+#  define SP_I2S_CH_NUM_8		(0x07 << 5)
+#define SP_EXT_VUCP			BIT(2)
+#define SP_VBIT				BIT(1)
+#define SP_AUDIO_LAYOUT			BIT(0)
+
+/* Analog Debug Register 2 */
+#define SP_ANALOG_DEBUG2_REG		0xdd
+#define SP_FORCE_SW_OFF_BYPASS		0x20
+#define SP_XTAL_FRQ			0x1c
+#  define SP_XTAL_FRQ_19M2		(0x00 << 2)
+#  define SP_XTAL_FRQ_24M		(0x01 << 2)
+#  define SP_XTAL_FRQ_25M		(0x02 << 2)
+#  define SP_XTAL_FRQ_26M		(0x03 << 2)
+#  define SP_XTAL_FRQ_27M		(0x04 << 2)
+#  define SP_XTAL_FRQ_38M4		(0x05 << 2)
+#  define SP_XTAL_FRQ_52M		(0x06 << 2)
+#define SP_POWERON_TIME_1P5MS		0x03
+
+/* Analog Control 0 Register */
+#define SP_ANALOG_CTRL0_REG		0xe1
+
+/* Common Interrupt Status Register 1 */
+#define SP_COMMON_INT_STATUS_BASE	(0xf1 - 1)
+#define SP_PLL_LOCK_CHG			0x40
+
+/* Common Interrupt Status Register 2 */
+#define SP_COMMON_INT_STATUS2		0xf2
+#define SP_HDCP_AUTH_CHG		BIT(1)
+#define SP_HDCP_AUTH_DONE		BIT(0)
+
+#define SP_HDCP_LINK_CHECK_FAIL		BIT(0)
+
+/* Common Interrupt Status Register 4 */
+#define SP_COMMON_INT_STATUS4_REG	0xf4
+#define SP_HPD_IRQ			BIT(6)
+#define SP_HPD_ESYNC_ERR		BIT(4)
+#define SP_HPD_CHG			BIT(2)
+#define SP_HPD_LOST			BIT(1)
+#define SP_HPD_PLUG			BIT(0)
+
+/* DP Interrupt Status Register */
+#define SP_DP_INT_STATUS1_REG		0xf7
+#define SP_TRAINING_FINISH		BIT(5)
+#define SP_POLLING_ERR			BIT(4)
+
+/* Common Interrupt Mask Register */
+#define SP_COMMON_INT_MASK_BASE		(0xf8 - 1)
+
+#define SP_COMMON_INT_MASK4_REG		0xfb
+
+/* DP Interrupts Mask Register */
+#define SP_DP_INT_MASK1_REG		0xfe
+
+/* Interrupt Control Register */
+#define SP_INT_CTRL_REG			0xff
+
+/***************************************************************/
+/* Register definition of device address 0x7a                  */
+/***************************************************************/
+
+/* DP TX Link Training Control Register */
+#define SP_DP_TX_LT_CTRL0_REG		0x30
+
+/* PD 1.2 Lint Training 80bit Pattern Register */
+#define SP_DP_LT_80BIT_PATTERN0_REG	0x80
+#define SP_DP_LT_80BIT_PATTERN_REG_NUM	10
+
+/* Audio Interface Control Register 0 */
+#define SP_AUD_INTERFACE_CTRL0_REG	0x5f
+#define SP_AUD_INTERFACE_DISABLE	0x80
+
+/* Audio Interface Control Register 2 */
+#define SP_AUD_INTERFACE_CTRL2_REG	0x60
+#define SP_M_AUD_ADJUST_ST		0x04
+
+/* Audio Interface Control Register 3 */
+#define SP_AUD_INTERFACE_CTRL3_REG	0x62
+
+/* Audio Interface Control Register 4 */
+#define SP_AUD_INTERFACE_CTRL4_REG	0x67
+
+/* Audio Interface Control Register 5 */
+#define SP_AUD_INTERFACE_CTRL5_REG	0x68
+
+/* Audio Interface Control Register 6 */
+#define SP_AUD_INTERFACE_CTRL6_REG	0x69
+
+/* Firmware Version Register */
+#define SP_FW_VER_REG			0xb7
+
+#endif
diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
new file mode 100644
index 0000000..80f286f
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/Kconfig
@@ -0,0 +1,3 @@
+config DRM_ANALOGIX_DP
+	tristate
+	depends on DRM
diff --git a/drivers/gpu/drm/bridge/analogix/Makefile b/drivers/gpu/drm/bridge/analogix/Makefile
new file mode 100644
index 0000000..cd4010b
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/Makefile
@@ -0,0 +1,2 @@
+analogix_dp-objs := analogix_dp_core.o analogix_dp_reg.o
+obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix_dp.o
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
new file mode 100644
index 0000000..7699597
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -0,0 +1,1430 @@
+/*
+* Analogix DP (Display Port) core interface driver.
+*
+* Copyright (C) 2012 Samsung Electronics Co., Ltd.
+* Author: Jingoo Han <jg1.han@samsung.com>
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*/
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/component.h>
+#include <linux/phy/phy.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include <drm/bridge/analogix_dp.h>
+
+#include "analogix_dp_core.h"
+
+#define to_dp(nm)	container_of(nm, struct analogix_dp_device, nm)
+
+struct bridge_init {
+	struct i2c_client *client;
+	struct device_node *node;
+};
+
+static void analogix_dp_init_dp(struct analogix_dp_device *dp)
+{
+	analogix_dp_reset(dp);
+
+	analogix_dp_swreset(dp);
+
+	analogix_dp_init_analog_param(dp);
+	analogix_dp_init_interrupt(dp);
+
+	/* SW defined function Normal operation */
+	analogix_dp_enable_sw_function(dp);
+
+	analogix_dp_config_interrupt(dp);
+	analogix_dp_init_analog_func(dp);
+
+	analogix_dp_init_hpd(dp);
+	analogix_dp_init_aux(dp);
+}
+
+static int analogix_dp_detect_hpd(struct analogix_dp_device *dp)
+{
+	int timeout_loop = 0;
+
+	while (timeout_loop < DP_TIMEOUT_LOOP_COUNT) {
+		if (analogix_dp_get_plug_in_status(dp) == 0)
+			return 0;
+
+		timeout_loop++;
+		usleep_range(10, 11);
+	}
+
+	/*
+	 * Some edp screen do not have hpd signal, so we can't just
+	 * return failed when hpd plug in detect failed, DT property
+	 * "force-hpd" would indicate whether driver need this.
+	 */
+	if (!dp->force_hpd)
+		return -ETIMEDOUT;
+
+	/*
+	 * The eDP TRM indicate that if HPD_STATUS(RO) is 0, AUX CH
+	 * will not work, so we need to give a force hpd action to
+	 * set HPD_STATUS manually.
+	 */
+	dev_dbg(dp->dev, "failed to get hpd plug status, try to force hpd\n");
+
+	analogix_dp_force_hpd(dp);
+
+	if (analogix_dp_get_plug_in_status(dp) != 0) {
+		dev_err(dp->dev, "failed to get hpd plug in status\n");
+		return -EINVAL;
+	}
+
+	dev_dbg(dp->dev, "success to get plug in status after force hpd\n");
+
+	return 0;
+}
+
+static unsigned char analogix_dp_calc_edid_check_sum(unsigned char *edid_data)
+{
+	int i;
+	unsigned char sum = 0;
+
+	for (i = 0; i < EDID_BLOCK_LENGTH; i++)
+		sum = sum + edid_data[i];
+
+	return sum;
+}
+
+static int analogix_dp_read_edid(struct analogix_dp_device *dp)
+{
+	unsigned char *edid = dp->edid;
+	unsigned int extend_block = 0;
+	unsigned char sum;
+	unsigned char test_vector;
+	int retval;
+
+	/*
+	 * EDID device address is 0x50.
+	 * However, if necessary, you must have set upper address
+	 * into E-EDID in I2C device, 0x30.
+	 */
+
+	/* Read Extension Flag, Number of 128-byte EDID extension blocks */
+	retval = analogix_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
+						EDID_EXTENSION_FLAG,
+						&extend_block);
+	if (retval)
+		return retval;
+
+	if (extend_block > 0) {
+		dev_dbg(dp->dev, "EDID data includes a single extension!\n");
+
+		/* Read EDID data */
+		retval = analogix_dp_read_bytes_from_i2c(dp,
+						I2C_EDID_DEVICE_ADDR,
+						EDID_HEADER_PATTERN,
+						EDID_BLOCK_LENGTH,
+						&edid[EDID_HEADER_PATTERN]);
+		if (retval != 0) {
+			dev_err(dp->dev, "EDID Read failed!\n");
+			return -EIO;
+		}
+		sum = analogix_dp_calc_edid_check_sum(edid);
+		if (sum != 0) {
+			dev_err(dp->dev, "EDID bad checksum!\n");
+			return -EIO;
+		}
+
+		/* Read additional EDID data */
+		retval = analogix_dp_read_bytes_from_i2c(dp,
+				I2C_EDID_DEVICE_ADDR,
+				EDID_BLOCK_LENGTH,
+				EDID_BLOCK_LENGTH,
+				&edid[EDID_BLOCK_LENGTH]);
+		if (retval != 0) {
+			dev_err(dp->dev, "EDID Read failed!\n");
+			return -EIO;
+		}
+		sum = analogix_dp_calc_edid_check_sum(&edid[EDID_BLOCK_LENGTH]);
+		if (sum != 0) {
+			dev_err(dp->dev, "EDID bad checksum!\n");
+			return -EIO;
+		}
+
+		analogix_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
+						&test_vector);
+		if (test_vector & DP_TEST_LINK_EDID_READ) {
+			analogix_dp_write_byte_to_dpcd(dp,
+				DP_TEST_EDID_CHECKSUM,
+				edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]);
+			analogix_dp_write_byte_to_dpcd(dp,
+				DP_TEST_RESPONSE,
+				DP_TEST_EDID_CHECKSUM_WRITE);
+		}
+	} else {
+		dev_info(dp->dev, "EDID data does not include any extensions.\n");
+
+		/* Read EDID data */
+		retval = analogix_dp_read_bytes_from_i2c(dp,
+				I2C_EDID_DEVICE_ADDR, EDID_HEADER_PATTERN,
+				EDID_BLOCK_LENGTH, &edid[EDID_HEADER_PATTERN]);
+		if (retval != 0) {
+			dev_err(dp->dev, "EDID Read failed!\n");
+			return -EIO;
+		}
+		sum = analogix_dp_calc_edid_check_sum(edid);
+		if (sum != 0) {
+			dev_err(dp->dev, "EDID bad checksum!\n");
+			return -EIO;
+		}
+
+		analogix_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
+						&test_vector);
+		if (test_vector & DP_TEST_LINK_EDID_READ) {
+			analogix_dp_write_byte_to_dpcd(dp,
+				DP_TEST_EDID_CHECKSUM, edid[EDID_CHECKSUM]);
+			analogix_dp_write_byte_to_dpcd(dp,
+				DP_TEST_RESPONSE, DP_TEST_EDID_CHECKSUM_WRITE);
+		}
+	}
+
+	dev_dbg(dp->dev, "EDID Read success!\n");
+	return 0;
+}
+
+static int analogix_dp_handle_edid(struct analogix_dp_device *dp)
+{
+	u8 buf[12];
+	int i;
+	int retval;
+
+	/* Read DPCD DP_DPCD_REV~RECEIVE_PORT1_CAP_1 */
+	retval = analogix_dp_read_bytes_from_dpcd(dp, DP_DPCD_REV, 12, buf);
+	if (retval)
+		return retval;
+
+	/* Read EDID */
+	for (i = 0; i < 3; i++) {
+		retval = analogix_dp_read_edid(dp);
+		if (!retval)
+			break;
+	}
+
+	return retval;
+}
+
+static void
+analogix_dp_enable_rx_to_enhanced_mode(struct analogix_dp_device *dp,
+				       bool enable)
+{
+	u8 data;
+
+	analogix_dp_read_byte_from_dpcd(dp, DP_LANE_COUNT_SET, &data);
+
+	if (enable)
+		analogix_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
+					       DP_LANE_COUNT_ENHANCED_FRAME_EN |
+					       DPCD_LANE_COUNT_SET(data));
+	else
+		analogix_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
+					       DPCD_LANE_COUNT_SET(data));
+}
+
+static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp)
+{
+	u8 data;
+	int retval;
+
+	analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
+	retval = DPCD_ENHANCED_FRAME_CAP(data);
+
+	return retval;
+}
+
+static void analogix_dp_set_enhanced_mode(struct analogix_dp_device *dp)
+{
+	u8 data;
+
+	data = analogix_dp_is_enhanced_mode_available(dp);
+	analogix_dp_enable_rx_to_enhanced_mode(dp, data);
+	analogix_dp_enable_enhanced_mode(dp, data);
+}
+
+static void analogix_dp_training_pattern_dis(struct analogix_dp_device *dp)
+{
+	analogix_dp_set_training_pattern(dp, DP_NONE);
+
+	analogix_dp_write_byte_to_dpcd(dp, DP_TRAINING_PATTERN_SET,
+				       DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+analogix_dp_set_lane_lane_pre_emphasis(struct analogix_dp_device *dp,
+				       int pre_emphasis, int lane)
+{
+	switch (lane) {
+	case 0:
+		analogix_dp_set_lane0_pre_emphasis(dp, pre_emphasis);
+		break;
+	case 1:
+		analogix_dp_set_lane1_pre_emphasis(dp, pre_emphasis);
+		break;
+
+	case 2:
+		analogix_dp_set_lane2_pre_emphasis(dp, pre_emphasis);
+		break;
+
+	case 3:
+		analogix_dp_set_lane3_pre_emphasis(dp, pre_emphasis);
+		break;
+	}
+}
+
+static int analogix_dp_link_start(struct analogix_dp_device *dp)
+{
+	u8 buf[4];
+	int lane, lane_count, pll_tries, retval;
+
+	lane_count = dp->link_train.lane_count;
+
+	dp->link_train.lt_state = CLOCK_RECOVERY;
+	dp->link_train.eq_loop = 0;
+
+	for (lane = 0; lane < lane_count; lane++)
+		dp->link_train.cr_loop[lane] = 0;
+
+	/* Set link rate and count as you want to establish*/
+	analogix_dp_set_link_bandwidth(dp, dp->link_train.link_rate);
+	analogix_dp_set_lane_count(dp, dp->link_train.lane_count);
+
+	/* Setup RX configuration */
+	buf[0] = dp->link_train.link_rate;
+	buf[1] = dp->link_train.lane_count;
+	retval = analogix_dp_write_bytes_to_dpcd(dp, DP_LINK_BW_SET, 2, buf);
+	if (retval)
+		return retval;
+
+	/* Set TX pre-emphasis to minimum */
+	for (lane = 0; lane < lane_count; lane++)
+		analogix_dp_set_lane_lane_pre_emphasis(dp,
+			PRE_EMPHASIS_LEVEL_0, lane);
+
+	/* Wait for PLL lock */
+	pll_tries = 0;
+	while (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+		if (pll_tries == DP_TIMEOUT_LOOP_COUNT) {
+			dev_err(dp->dev, "Wait for PLL lock timed out\n");
+			return -ETIMEDOUT;
+		}
+
+		pll_tries++;
+		usleep_range(90, 120);
+	}
+
+	/* Set training pattern 1 */
+	analogix_dp_set_training_pattern(dp, TRAINING_PTN1);
+
+	/* Set RX training pattern */
+	retval = analogix_dp_write_byte_to_dpcd(dp,
+			DP_TRAINING_PATTERN_SET,
+			DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1);
+	if (retval)
+		return retval;
+
+	for (lane = 0; lane < lane_count; lane++)
+		buf[lane] = DP_TRAIN_PRE_EMPH_LEVEL_0 |
+			    DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+
+	retval = analogix_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
+						 lane_count, buf);
+
+	return retval;
+}
+
+static unsigned char analogix_dp_get_lane_status(u8 link_status[2], int lane)
+{
+	int shift = (lane & 1) * 4;
+	u8 link_value = link_status[lane >> 1];
+
+	return (link_value >> shift) & 0xf;
+}
+
+static int analogix_dp_clock_recovery_ok(u8 link_status[2], int lane_count)
+{
+	int lane;
+	u8 lane_status;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = analogix_dp_get_lane_status(link_status, lane);
+		if ((lane_status & DP_LANE_CR_DONE) == 0)
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static int analogix_dp_channel_eq_ok(u8 link_status[2], u8 link_align,
+				     int lane_count)
+{
+	int lane;
+	u8 lane_status;
+
+	if ((link_align & DP_INTERLANE_ALIGN_DONE) == 0)
+		return -EINVAL;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = analogix_dp_get_lane_status(link_status, lane);
+		lane_status &= DP_CHANNEL_EQ_BITS;
+		if (lane_status != DP_CHANNEL_EQ_BITS)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static unsigned char
+analogix_dp_get_adjust_request_voltage(u8 adjust_request[2], int lane)
+{
+	int shift = (lane & 1) * 4;
+	u8 link_value = adjust_request[lane >> 1];
+
+	return (link_value >> shift) & 0x3;
+}
+
+static unsigned char analogix_dp_get_adjust_request_pre_emphasis(
+					u8 adjust_request[2],
+					int lane)
+{
+	int shift = (lane & 1) * 4;
+	u8 link_value = adjust_request[lane >> 1];
+
+	return ((link_value >> shift) & 0xc) >> 2;
+}
+
+static void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp,
+					       u8 training_lane_set, int lane)
+{
+	switch (lane) {
+	case 0:
+		analogix_dp_set_lane0_link_training(dp, training_lane_set);
+		break;
+	case 1:
+		analogix_dp_set_lane1_link_training(dp, training_lane_set);
+		break;
+
+	case 2:
+		analogix_dp_set_lane2_link_training(dp, training_lane_set);
+		break;
+
+	case 3:
+		analogix_dp_set_lane3_link_training(dp, training_lane_set);
+		break;
+	}
+}
+
+static unsigned int
+analogix_dp_get_lane_link_training(struct analogix_dp_device *dp,
+				   int lane)
+{
+	u32 reg;
+
+	switch (lane) {
+	case 0:
+		reg = analogix_dp_get_lane0_link_training(dp);
+		break;
+	case 1:
+		reg = analogix_dp_get_lane1_link_training(dp);
+		break;
+	case 2:
+		reg = analogix_dp_get_lane2_link_training(dp);
+		break;
+	case 3:
+		reg = analogix_dp_get_lane3_link_training(dp);
+		break;
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+
+	return reg;
+}
+
+static void analogix_dp_reduce_link_rate(struct analogix_dp_device *dp)
+{
+	analogix_dp_training_pattern_dis(dp);
+	analogix_dp_set_enhanced_mode(dp);
+
+	dp->link_train.lt_state = FAILED;
+}
+
+static void analogix_dp_get_adjust_training_lane(struct analogix_dp_device *dp,
+						 u8 adjust_request[2])
+{
+	int lane, lane_count;
+	u8 voltage_swing, pre_emphasis, training_lane;
+
+	lane_count = dp->link_train.lane_count;
+	for (lane = 0; lane < lane_count; lane++) {
+		voltage_swing = analogix_dp_get_adjust_request_voltage(
+						adjust_request, lane);
+		pre_emphasis = analogix_dp_get_adjust_request_pre_emphasis(
+						adjust_request, lane);
+		training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
+				DPCD_PRE_EMPHASIS_SET(pre_emphasis);
+
+		if (voltage_swing == VOLTAGE_LEVEL_3)
+			training_lane |= DP_TRAIN_MAX_SWING_REACHED;
+		if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
+			training_lane |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+		dp->link_train.training_lane[lane] = training_lane;
+	}
+}
+
+static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
+{
+	int lane, lane_count, retval;
+	u8 voltage_swing, pre_emphasis, training_lane;
+	u8 link_status[2], adjust_request[2];
+
+	usleep_range(100, 101);
+
+	lane_count = dp->link_train.lane_count;
+
+	retval =  analogix_dp_read_bytes_from_dpcd(dp,
+			DP_LANE0_1_STATUS, 2, link_status);
+	if (retval)
+		return retval;
+
+	retval =  analogix_dp_read_bytes_from_dpcd(dp,
+			DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
+	if (retval)
+		return retval;
+
+	if (analogix_dp_clock_recovery_ok(link_status, lane_count) == 0) {
+		/* set training pattern 2 for EQ */
+		analogix_dp_set_training_pattern(dp, TRAINING_PTN2);
+
+		retval = analogix_dp_write_byte_to_dpcd(dp,
+				DP_TRAINING_PATTERN_SET,
+				DP_LINK_SCRAMBLING_DISABLE |
+				DP_TRAINING_PATTERN_2);
+		if (retval)
+			return retval;
+
+		dev_info(dp->dev, "Link Training Clock Recovery success\n");
+		dp->link_train.lt_state = EQUALIZER_TRAINING;
+	} else {
+		for (lane = 0; lane < lane_count; lane++) {
+			training_lane = analogix_dp_get_lane_link_training(
+							dp, lane);
+			voltage_swing = analogix_dp_get_adjust_request_voltage(
+							adjust_request, lane);
+			pre_emphasis = analogix_dp_get_adjust_request_pre_emphasis(
+							adjust_request, lane);
+
+			if (DPCD_VOLTAGE_SWING_GET(training_lane) ==
+					voltage_swing &&
+			    DPCD_PRE_EMPHASIS_GET(training_lane) ==
+					pre_emphasis)
+				dp->link_train.cr_loop[lane]++;
+
+			if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP ||
+			    voltage_swing == VOLTAGE_LEVEL_3 ||
+			    pre_emphasis == PRE_EMPHASIS_LEVEL_3) {
+				dev_err(dp->dev, "CR Max reached (%d,%d,%d)\n",
+					dp->link_train.cr_loop[lane],
+					voltage_swing, pre_emphasis);
+				analogix_dp_reduce_link_rate(dp);
+				return -EIO;
+			}
+		}
+	}
+
+	analogix_dp_get_adjust_training_lane(dp, adjust_request);
+
+	for (lane = 0; lane < lane_count; lane++)
+		analogix_dp_set_lane_link_training(dp,
+			dp->link_train.training_lane[lane], lane);
+
+	retval = analogix_dp_write_bytes_to_dpcd(dp,
+			DP_TRAINING_LANE0_SET, lane_count,
+			dp->link_train.training_lane);
+	if (retval)
+		return retval;
+
+	return retval;
+}
+
+static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
+{
+	int lane, lane_count, retval;
+	u32 reg;
+	u8 link_align, link_status[2], adjust_request[2];
+
+	usleep_range(400, 401);
+
+	lane_count = dp->link_train.lane_count;
+
+	retval = analogix_dp_read_bytes_from_dpcd(dp,
+			DP_LANE0_1_STATUS, 2, link_status);
+	if (retval)
+		return retval;
+
+	if (analogix_dp_clock_recovery_ok(link_status, lane_count)) {
+		analogix_dp_reduce_link_rate(dp);
+		return -EIO;
+	}
+
+	retval = analogix_dp_read_bytes_from_dpcd(dp,
+			DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
+	if (retval)
+		return retval;
+
+	retval = analogix_dp_read_byte_from_dpcd(dp,
+			DP_LANE_ALIGN_STATUS_UPDATED, &link_align);
+	if (retval)
+		return retval;
+
+	analogix_dp_get_adjust_training_lane(dp, adjust_request);
+
+	if (!analogix_dp_channel_eq_ok(link_status, link_align, lane_count)) {
+		/* traing pattern Set to Normal */
+		analogix_dp_training_pattern_dis(dp);
+
+		dev_info(dp->dev, "Link Training success!\n");
+
+		analogix_dp_get_link_bandwidth(dp, &reg);
+		dp->link_train.link_rate = reg;
+		dev_dbg(dp->dev, "final bandwidth = %.2x\n",
+			dp->link_train.link_rate);
+
+		analogix_dp_get_lane_count(dp, &reg);
+		dp->link_train.lane_count = reg;
+		dev_dbg(dp->dev, "final lane count = %.2x\n",
+			dp->link_train.lane_count);
+
+		/* set enhanced mode if available */
+		analogix_dp_set_enhanced_mode(dp);
+		dp->link_train.lt_state = FINISHED;
+
+		return 0;
+	}
+
+	/* not all locked */
+	dp->link_train.eq_loop++;
+
+	if (dp->link_train.eq_loop > MAX_EQ_LOOP) {
+		dev_err(dp->dev, "EQ Max loop\n");
+		analogix_dp_reduce_link_rate(dp);
+		return -EIO;
+	}
+
+	for (lane = 0; lane < lane_count; lane++)
+		analogix_dp_set_lane_link_training(dp,
+			dp->link_train.training_lane[lane], lane);
+
+	retval = analogix_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
+			lane_count, dp->link_train.training_lane);
+
+	return retval;
+}
+
+static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp,
+					     u8 *bandwidth)
+{
+	u8 data;
+
+	/*
+	 * For DP rev.1.1, Maximum link rate of Main Link lanes
+	 * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps
+	 * For DP rev.1.2, Maximum link rate of Main Link lanes
+	 * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps, 0x14 = 5.4Gbps
+	 */
+	analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LINK_RATE, &data);
+	*bandwidth = data;
+}
+
+static void analogix_dp_get_max_rx_lane_count(struct analogix_dp_device *dp,
+					      u8 *lane_count)
+{
+	u8 data;
+
+	/*
+	 * For DP rev.1.1, Maximum number of Main Link lanes
+	 * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes
+	 */
+	analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
+	*lane_count = DPCD_MAX_LANE_COUNT(data);
+}
+
+static void analogix_dp_init_training(struct analogix_dp_device *dp,
+				      enum link_lane_count_type max_lane,
+				      int max_rate)
+{
+	/*
+	 * MACRO_RST must be applied after the PLL_LOCK to avoid
+	 * the DP inter pair skew issue for at least 10 us
+	 */
+	analogix_dp_reset_macro(dp);
+
+	/* Initialize by reading RX's DPCD */
+	analogix_dp_get_max_rx_bandwidth(dp, &dp->link_train.link_rate);
+	analogix_dp_get_max_rx_lane_count(dp, &dp->link_train.lane_count);
+
+	if ((dp->link_train.link_rate != DP_LINK_BW_1_62) &&
+	    (dp->link_train.link_rate != DP_LINK_BW_2_7) &&
+	    (dp->link_train.link_rate != DP_LINK_BW_5_4)) {
+		dev_err(dp->dev, "Rx Max Link Rate is abnormal :%x !\n",
+			dp->link_train.link_rate);
+		dp->link_train.link_rate = DP_LINK_BW_1_62;
+	}
+
+	if (dp->link_train.lane_count == 0) {
+		dev_err(dp->dev, "Rx Max Lane count is abnormal :%x !\n",
+			dp->link_train.lane_count);
+		dp->link_train.lane_count = (u8)LANE_COUNT1;
+	}
+
+	/* Setup TX lane count & rate */
+	if (dp->link_train.lane_count > max_lane)
+		dp->link_train.lane_count = max_lane;
+	if (dp->link_train.link_rate > max_rate)
+		dp->link_train.link_rate = max_rate;
+
+	/* All DP analog module power up */
+	analogix_dp_set_analog_power_down(dp, POWER_ALL, 0);
+}
+
+static int analogix_dp_sw_link_training(struct analogix_dp_device *dp)
+{
+	int retval = 0, training_finished = 0;
+
+	dp->link_train.lt_state = START;
+
+	/* Process here */
+	while (!retval && !training_finished) {
+		switch (dp->link_train.lt_state) {
+		case START:
+			retval = analogix_dp_link_start(dp);
+			if (retval)
+				dev_err(dp->dev, "LT link start failed!\n");
+			break;
+		case CLOCK_RECOVERY:
+			retval = analogix_dp_process_clock_recovery(dp);
+			if (retval)
+				dev_err(dp->dev, "LT CR failed!\n");
+			break;
+		case EQUALIZER_TRAINING:
+			retval = analogix_dp_process_equalizer_training(dp);
+			if (retval)
+				dev_err(dp->dev, "LT EQ failed!\n");
+			break;
+		case FINISHED:
+			training_finished = 1;
+			break;
+		case FAILED:
+			return -EREMOTEIO;
+		}
+	}
+	if (retval)
+		dev_err(dp->dev, "eDP link training failed (%d)\n", retval);
+
+	return retval;
+}
+
+static int analogix_dp_set_link_train(struct analogix_dp_device *dp,
+				      u32 count, u32 bwtype)
+{
+	int i;
+	int retval;
+
+	for (i = 0; i < DP_TIMEOUT_LOOP_COUNT; i++) {
+		analogix_dp_init_training(dp, count, bwtype);
+		retval = analogix_dp_sw_link_training(dp);
+		if (retval == 0)
+			break;
+
+		usleep_range(100, 110);
+	}
+
+	return retval;
+}
+
+static int analogix_dp_config_video(struct analogix_dp_device *dp)
+{
+	int retval = 0;
+	int timeout_loop = 0;
+	int done_count = 0;
+
+	analogix_dp_config_video_slave_mode(dp);
+
+	analogix_dp_set_video_color_format(dp);
+
+	if (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+		dev_err(dp->dev, "PLL is not locked yet.\n");
+		return -EINVAL;
+	}
+
+	for (;;) {
+		timeout_loop++;
+		if (analogix_dp_is_slave_video_stream_clock_on(dp) == 0)
+			break;
+		if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
+			dev_err(dp->dev, "Timeout of video streamclk ok\n");
+			return -ETIMEDOUT;
+		}
+
+		usleep_range(1, 2);
+	}
+
+	/* Set to use the register calculated M/N video */
+	analogix_dp_set_video_cr_mn(dp, CALCULATED_M, 0, 0);
+
+	/* For video bist, Video timing must be generated by register */
+	analogix_dp_set_video_timing_mode(dp, VIDEO_TIMING_FROM_CAPTURE);
+
+	/* Disable video mute */
+	analogix_dp_enable_video_mute(dp, 0);
+
+	/* Configure video slave mode */
+	analogix_dp_enable_video_master(dp, 0);
+
+	timeout_loop = 0;
+
+	for (;;) {
+		timeout_loop++;
+		if (analogix_dp_is_video_stream_on(dp) == 0) {
+			done_count++;
+			if (done_count > 10)
+				break;
+		} else if (done_count) {
+			done_count = 0;
+		}
+		if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
+			dev_err(dp->dev, "Timeout of video streamclk ok\n");
+			return -ETIMEDOUT;
+		}
+
+		usleep_range(1000, 1001);
+	}
+
+	if (retval != 0)
+		dev_err(dp->dev, "Video stream is not detected!\n");
+
+	return retval;
+}
+
+static void analogix_dp_enable_scramble(struct analogix_dp_device *dp,
+					bool enable)
+{
+	u8 data;
+
+	if (enable) {
+		analogix_dp_enable_scrambling(dp);
+
+		analogix_dp_read_byte_from_dpcd(dp, DP_TRAINING_PATTERN_SET,
+						&data);
+		analogix_dp_write_byte_to_dpcd(dp,
+			DP_TRAINING_PATTERN_SET,
+			(u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
+	} else {
+		analogix_dp_disable_scrambling(dp);
+
+		analogix_dp_read_byte_from_dpcd(dp, DP_TRAINING_PATTERN_SET,
+						&data);
+		analogix_dp_write_byte_to_dpcd(dp,
+			DP_TRAINING_PATTERN_SET,
+			(u8)(data | DP_LINK_SCRAMBLING_DISABLE));
+	}
+}
+
+static irqreturn_t analogix_dp_hardirq(int irq, void *arg)
+{
+	struct analogix_dp_device *dp = arg;
+	irqreturn_t ret = IRQ_NONE;
+	enum dp_irq_type irq_type;
+
+	irq_type = analogix_dp_get_irq_type(dp);
+	if (irq_type != DP_IRQ_TYPE_UNKNOWN) {
+		analogix_dp_mute_hpd_interrupt(dp);
+		ret = IRQ_WAKE_THREAD;
+	}
+
+	return ret;
+}
+
+static irqreturn_t analogix_dp_irq_thread(int irq, void *arg)
+{
+	struct analogix_dp_device *dp = arg;
+	enum dp_irq_type irq_type;
+
+	irq_type = analogix_dp_get_irq_type(dp);
+	if (irq_type & DP_IRQ_TYPE_HP_CABLE_IN ||
+	    irq_type & DP_IRQ_TYPE_HP_CABLE_OUT) {
+		dev_dbg(dp->dev, "Detected cable status changed!\n");
+		if (dp->drm_dev)
+			drm_helper_hpd_irq_event(dp->drm_dev);
+	}
+
+	if (irq_type != DP_IRQ_TYPE_UNKNOWN) {
+		analogix_dp_clear_hotplug_interrupts(dp);
+		analogix_dp_unmute_hpd_interrupt(dp);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void analogix_dp_commit(struct analogix_dp_device *dp)
+{
+	int ret;
+
+	/* Keep the panel disabled while we configure video */
+	if (dp->plat_data->panel) {
+		if (drm_panel_disable(dp->plat_data->panel))
+			DRM_ERROR("failed to disable the panel\n");
+	}
+
+	ret = analogix_dp_set_link_train(dp, dp->video_info.max_lane_count,
+					 dp->video_info.max_link_rate);
+	if (ret) {
+		dev_err(dp->dev, "unable to do link train\n");
+		return;
+	}
+
+	analogix_dp_enable_scramble(dp, 1);
+	analogix_dp_enable_rx_to_enhanced_mode(dp, 1);
+	analogix_dp_enable_enhanced_mode(dp, 1);
+
+	analogix_dp_init_video(dp);
+	ret = analogix_dp_config_video(dp);
+	if (ret)
+		dev_err(dp->dev, "unable to config video\n");
+
+	/* Safe to enable the panel now */
+	if (dp->plat_data->panel) {
+		if (drm_panel_enable(dp->plat_data->panel))
+			DRM_ERROR("failed to enable the panel\n");
+	}
+
+	/* Enable video */
+	analogix_dp_start_video(dp);
+}
+
+int analogix_dp_get_modes(struct drm_connector *connector)
+{
+	struct analogix_dp_device *dp = to_dp(connector);
+	struct edid *edid = (struct edid *)dp->edid;
+	int num_modes = 0;
+
+	if (analogix_dp_handle_edid(dp) == 0) {
+		drm_mode_connector_update_edid_property(&dp->connector, edid);
+		num_modes += drm_add_edid_modes(&dp->connector, edid);
+	}
+
+	if (dp->plat_data->panel)
+		num_modes += drm_panel_get_modes(dp->plat_data->panel);
+
+	if (dp->plat_data->get_modes)
+		num_modes += dp->plat_data->get_modes(dp->plat_data);
+
+	return num_modes;
+}
+
+static struct drm_encoder *
+analogix_dp_best_encoder(struct drm_connector *connector)
+{
+	struct analogix_dp_device *dp = to_dp(connector);
+
+	return dp->encoder;
+}
+
+static const struct drm_connector_helper_funcs analogix_dp_connector_helper_funcs = {
+	.get_modes = analogix_dp_get_modes,
+	.best_encoder = analogix_dp_best_encoder,
+};
+
+enum drm_connector_status
+analogix_dp_detect(struct drm_connector *connector, bool force)
+{
+	struct analogix_dp_device *dp = to_dp(connector);
+
+	if (analogix_dp_detect_hpd(dp))
+		return connector_status_disconnected;
+
+	return connector_status_connected;
+}
+
+static void analogix_dp_connector_destroy(struct drm_connector *connector)
+{
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+
+}
+
+static const struct drm_connector_funcs analogix_dp_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.detect = analogix_dp_detect,
+	.destroy = analogix_dp_connector_destroy,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
+{
+	struct analogix_dp_device *dp = bridge->driver_private;
+	struct drm_encoder *encoder = dp->encoder;
+	struct drm_connector *connector = &dp->connector;
+	int ret;
+
+	if (!bridge->encoder) {
+		DRM_ERROR("Parent encoder object not found");
+		return -ENODEV;
+	}
+
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+	ret = drm_connector_init(dp->drm_dev, connector,
+				 &analogix_dp_connector_funcs,
+				 DRM_MODE_CONNECTOR_eDP);
+	if (ret) {
+		DRM_ERROR("Failed to initialize connector with drm\n");
+		return ret;
+	}
+
+	drm_connector_helper_add(connector,
+				 &analogix_dp_connector_helper_funcs);
+	drm_mode_connector_attach_encoder(connector, encoder);
+
+	/*
+	 * NOTE: the connector registration is implemented in analogix
+	 * platform driver, that to say connector would be exist after
+	 * plat_data->attch return, that's why we record the connector
+	 * point after plat attached.
+	 */
+	 if (dp->plat_data->attach) {
+		 ret = dp->plat_data->attach(dp->plat_data, bridge, connector);
+		 if (ret) {
+			 DRM_ERROR("Failed at platform attch func\n");
+			 return ret;
+		 }
+	}
+
+	if (dp->plat_data->panel) {
+		ret = drm_panel_attach(dp->plat_data->panel, &dp->connector);
+		if (ret) {
+			DRM_ERROR("Failed to attach panel\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
+{
+	struct analogix_dp_device *dp = bridge->driver_private;
+
+	if (dp->dpms_mode == DRM_MODE_DPMS_ON)
+		return;
+
+	pm_runtime_get_sync(dp->dev);
+
+	if (dp->plat_data->power_on)
+		dp->plat_data->power_on(dp->plat_data);
+
+	phy_power_on(dp->phy);
+	analogix_dp_init_dp(dp);
+	enable_irq(dp->irq);
+	analogix_dp_commit(dp);
+
+	dp->dpms_mode = DRM_MODE_DPMS_ON;
+}
+
+static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
+{
+	struct analogix_dp_device *dp = bridge->driver_private;
+
+	if (dp->dpms_mode != DRM_MODE_DPMS_ON)
+		return;
+
+	if (dp->plat_data->panel) {
+		if (drm_panel_disable(dp->plat_data->panel)) {
+			DRM_ERROR("failed to disable the panel\n");
+			return;
+		}
+	}
+
+	disable_irq(dp->irq);
+	phy_power_off(dp->phy);
+
+	if (dp->plat_data->power_off)
+		dp->plat_data->power_off(dp->plat_data);
+
+	pm_runtime_put_sync(dp->dev);
+
+	dp->dpms_mode = DRM_MODE_DPMS_OFF;
+}
+
+static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
+					struct drm_display_mode *orig_mode,
+					struct drm_display_mode *mode)
+{
+	struct analogix_dp_device *dp = bridge->driver_private;
+	struct drm_display_info *display_info = &dp->connector.display_info;
+	struct video_info *video = &dp->video_info;
+	struct device_node *dp_node = dp->dev->of_node;
+	int vic;
+
+	/* Input video interlaces & hsync pol & vsync pol */
+	video->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+	video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
+	video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
+
+	/* Input video dynamic_range & colorimetry */
+	vic = drm_match_cea_mode(mode);
+	if ((vic == 6) || (vic == 7) || (vic == 21) || (vic == 22) ||
+	    (vic == 2) || (vic == 3) || (vic == 17) || (vic == 18)) {
+		video->dynamic_range = CEA;
+		video->ycbcr_coeff = COLOR_YCBCR601;
+	} else if (vic) {
+		video->dynamic_range = CEA;
+		video->ycbcr_coeff = COLOR_YCBCR709;
+	} else {
+		video->dynamic_range = VESA;
+		video->ycbcr_coeff = COLOR_YCBCR709;
+	}
+
+	/* Input vide bpc and color_formats */
+	switch (display_info->bpc) {
+	case 12:
+		video->color_depth = COLOR_12;
+		break;
+	case 10:
+		video->color_depth = COLOR_10;
+		break;
+	case 8:
+		video->color_depth = COLOR_8;
+		break;
+	case 6:
+		video->color_depth = COLOR_6;
+		break;
+	default:
+		video->color_depth = COLOR_8;
+		break;
+	}
+	if (display_info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+		video->color_space = COLOR_YCBCR444;
+	else if (display_info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+		video->color_space = COLOR_YCBCR422;
+	else if (display_info->color_formats & DRM_COLOR_FORMAT_RGB444)
+		video->color_space = COLOR_RGB;
+	else
+		video->color_space = COLOR_RGB;
+
+	/*
+	 * NOTE: those property parsing code is used for providing backward
+	 * compatibility for samsung platform.
+	 * Due to we used the "of_property_read_u32" interfaces, when this
+	 * property isn't present, the "video_info" can keep the original
+	 * values and wouldn't be modified.
+	 */
+	of_property_read_u32(dp_node, "samsung,color-space",
+			     &video->color_space);
+	of_property_read_u32(dp_node, "samsung,dynamic-range",
+			     &video->dynamic_range);
+	of_property_read_u32(dp_node, "samsung,ycbcr-coeff",
+			     &video->ycbcr_coeff);
+	of_property_read_u32(dp_node, "samsung,color-depth",
+			     &video->color_depth);
+	if (of_property_read_bool(dp_node, "hsync-active-high"))
+		video->h_sync_polarity = true;
+	if (of_property_read_bool(dp_node, "vsync-active-high"))
+		video->v_sync_polarity = true;
+	if (of_property_read_bool(dp_node, "interlaced"))
+		video->interlaced = true;
+}
+
+static void analogix_dp_bridge_nop(struct drm_bridge *bridge)
+{
+	/* do nothing */
+}
+
+static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
+	.enable = analogix_dp_bridge_enable,
+	.disable = analogix_dp_bridge_disable,
+	.pre_enable = analogix_dp_bridge_nop,
+	.post_disable = analogix_dp_bridge_nop,
+	.mode_set = analogix_dp_bridge_mode_set,
+	.attach = analogix_dp_bridge_attach,
+};
+
+static int analogix_dp_create_bridge(struct drm_device *drm_dev,
+				     struct analogix_dp_device *dp)
+{
+	struct drm_bridge *bridge;
+	int ret;
+
+	bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL);
+	if (!bridge) {
+		DRM_ERROR("failed to allocate for drm bridge\n");
+		return -ENOMEM;
+	}
+
+	dp->bridge = bridge;
+
+	dp->encoder->bridge = bridge;
+	bridge->driver_private = dp;
+	bridge->encoder = dp->encoder;
+	bridge->funcs = &analogix_dp_bridge_funcs;
+
+	ret = drm_bridge_attach(drm_dev, bridge);
+	if (ret) {
+		DRM_ERROR("failed to attach drm bridge\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
+{
+	struct device_node *dp_node = dp->dev->of_node;
+	struct video_info *video_info = &dp->video_info;
+
+	switch (dp->plat_data->dev_type) {
+	case RK3288_DP:
+		/*
+		 * Like Rk3288 DisplayPort TRM indicate that "Main link
+		 * containing 4 physical lanes of 2.7/1.62 Gbps/lane".
+		 */
+		video_info->max_link_rate = 0x0A;
+		video_info->max_lane_count = 0x04;
+		break;
+	case EXYNOS_DP:
+		/*
+		 * NOTE: those property parseing code is used for
+		 * providing backward compatibility for samsung platform.
+		 */
+		of_property_read_u32(dp_node, "samsung,link-rate",
+				     &video_info->max_link_rate);
+		of_property_read_u32(dp_node, "samsung,lane-count",
+				     &video_info->max_lane_count);
+		break;
+	}
+
+	return 0;
+}
+
+int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
+		     struct analogix_dp_plat_data *plat_data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct analogix_dp_device *dp;
+	struct resource *res;
+	unsigned int irq_flags;
+	int ret;
+
+	if (!plat_data) {
+		dev_err(dev, "Invalided input plat_data\n");
+		return -EINVAL;
+	}
+
+	dp = devm_kzalloc(dev, sizeof(struct analogix_dp_device), GFP_KERNEL);
+	if (!dp)
+		return -ENOMEM;
+
+	dev_set_drvdata(dev, dp);
+
+	dp->dev = &pdev->dev;
+	dp->dpms_mode = DRM_MODE_DPMS_OFF;
+
+	/*
+	 * platform dp driver need containor_of the plat_data to get
+	 * the driver private data, so we need to store the point of
+	 * plat_data, not the context of plat_data.
+	 */
+	dp->plat_data = plat_data;
+
+	ret = analogix_dp_dt_parse_pdata(dp);
+	if (ret)
+		return ret;
+
+	dp->phy = devm_phy_get(dp->dev, "dp");
+	if (IS_ERR(dp->phy)) {
+		dev_err(dp->dev, "no DP phy configured\n");
+		ret = PTR_ERR(dp->phy);
+		if (ret) {
+			/*
+			 * phy itself is not enabled, so we can move forward
+			 * assigning NULL to phy pointer.
+			 */
+			if (ret == -ENOSYS || ret == -ENODEV)
+				dp->phy = NULL;
+			else
+				return ret;
+		}
+	}
+
+	dp->clock = devm_clk_get(&pdev->dev, "dp");
+	if (IS_ERR(dp->clock)) {
+		dev_err(&pdev->dev, "failed to get clock\n");
+		return PTR_ERR(dp->clock);
+	}
+
+	clk_prepare_enable(dp->clock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(dp->reg_base))
+		return PTR_ERR(dp->reg_base);
+
+	dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd");
+
+	dp->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpios", 0);
+	if (!gpio_is_valid(dp->hpd_gpio))
+		dp->hpd_gpio = of_get_named_gpio(dev->of_node,
+						 "samsung,hpd-gpio", 0);
+
+	if (gpio_is_valid(dp->hpd_gpio)) {
+		/*
+		 * Set up the hotplug GPIO from the device tree as an interrupt.
+		 * Simply specifying a different interrupt in the device tree
+		 * doesn't work since we handle hotplug rather differently when
+		 * using a GPIO.  We also need the actual GPIO specifier so
+		 * that we can get the current state of the GPIO.
+		 */
+		ret = devm_gpio_request_one(&pdev->dev, dp->hpd_gpio, GPIOF_IN,
+					    "hpd_gpio");
+		if (ret) {
+			dev_err(&pdev->dev, "failed to get hpd gpio\n");
+			return ret;
+		}
+		dp->irq = gpio_to_irq(dp->hpd_gpio);
+		irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+	} else {
+		dp->hpd_gpio = -ENODEV;
+		dp->irq = platform_get_irq(pdev, 0);
+		irq_flags = 0;
+	}
+
+	if (dp->irq == -ENXIO) {
+		dev_err(&pdev->dev, "failed to get irq\n");
+		return -ENODEV;
+	}
+
+	pm_runtime_enable(dev);
+
+	phy_power_on(dp->phy);
+
+	if (dp->plat_data->panel) {
+		if (drm_panel_prepare(dp->plat_data->panel)) {
+			DRM_ERROR("failed to setup the panel\n");
+			return -EBUSY;
+		}
+	}
+
+	analogix_dp_init_dp(dp);
+
+	ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
+					analogix_dp_hardirq,
+					analogix_dp_irq_thread,
+					irq_flags, "analogix-dp", dp);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to request irq\n");
+		goto err_disable_pm_runtime;
+	}
+	disable_irq(dp->irq);
+
+	dp->drm_dev = drm_dev;
+	dp->encoder = dp->plat_data->encoder;
+
+	ret = analogix_dp_create_bridge(drm_dev, dp);
+	if (ret) {
+		DRM_ERROR("failed to create bridge (%d)\n", ret);
+		drm_encoder_cleanup(dp->encoder);
+		goto err_disable_pm_runtime;
+	}
+
+	return 0;
+
+err_disable_pm_runtime:
+	pm_runtime_disable(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_bind);
+
+void analogix_dp_unbind(struct device *dev, struct device *master,
+			void *data)
+{
+	struct analogix_dp_device *dp = dev_get_drvdata(dev);
+
+	analogix_dp_bridge_disable(dp->bridge);
+
+	if (dp->plat_data->panel) {
+		if (drm_panel_unprepare(dp->plat_data->panel))
+			DRM_ERROR("failed to turnoff the panel\n");
+	}
+
+	pm_runtime_disable(dev);
+}
+EXPORT_SYMBOL_GPL(analogix_dp_unbind);
+
+#ifdef CONFIG_PM
+int analogix_dp_suspend(struct device *dev)
+{
+	struct analogix_dp_device *dp = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(dp->clock);
+
+	if (dp->plat_data->panel) {
+		if (drm_panel_unprepare(dp->plat_data->panel))
+			DRM_ERROR("failed to turnoff the panel\n");
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_suspend);
+
+int analogix_dp_resume(struct device *dev)
+{
+	struct analogix_dp_device *dp = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_prepare_enable(dp->clock);
+	if (ret < 0) {
+		DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret);
+		return ret;
+	}
+
+	if (dp->plat_data->panel) {
+		if (drm_panel_prepare(dp->plat_data->panel)) {
+			DRM_ERROR("failed to setup the panel\n");
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_resume);
+#endif
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("Analogix DP Core Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
new file mode 100644
index 0000000..f09275d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -0,0 +1,281 @@
+/*
+ * Header file for Analogix DP (Display Port) core interface driver.
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _ANALOGIX_DP_CORE_H
+#define _ANALOGIX_DP_CORE_H
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_dp_helper.h>
+
+#define DP_TIMEOUT_LOOP_COUNT 100
+#define MAX_CR_LOOP 5
+#define MAX_EQ_LOOP 5
+
+/* I2C EDID Chip ID, Slave Address */
+#define I2C_EDID_DEVICE_ADDR			0x50
+#define I2C_E_EDID_DEVICE_ADDR			0x30
+
+#define EDID_BLOCK_LENGTH			0x80
+#define EDID_HEADER_PATTERN			0x00
+#define EDID_EXTENSION_FLAG			0x7e
+#define EDID_CHECKSUM				0x7f
+
+/* DP_MAX_LANE_COUNT */
+#define DPCD_ENHANCED_FRAME_CAP(x)		(((x) >> 7) & 0x1)
+#define DPCD_MAX_LANE_COUNT(x)			((x) & 0x1f)
+
+/* DP_LANE_COUNT_SET */
+#define DPCD_LANE_COUNT_SET(x)			((x) & 0x1f)
+
+/* DP_TRAINING_LANE0_SET */
+#define DPCD_PRE_EMPHASIS_SET(x)		(((x) & 0x3) << 3)
+#define DPCD_PRE_EMPHASIS_GET(x)		(((x) >> 3) & 0x3)
+#define DPCD_VOLTAGE_SWING_SET(x)		(((x) & 0x3) << 0)
+#define DPCD_VOLTAGE_SWING_GET(x)		(((x) >> 0) & 0x3)
+
+enum link_lane_count_type {
+	LANE_COUNT1 = 1,
+	LANE_COUNT2 = 2,
+	LANE_COUNT4 = 4
+};
+
+enum link_training_state {
+	START,
+	CLOCK_RECOVERY,
+	EQUALIZER_TRAINING,
+	FINISHED,
+	FAILED
+};
+
+enum voltage_swing_level {
+	VOLTAGE_LEVEL_0,
+	VOLTAGE_LEVEL_1,
+	VOLTAGE_LEVEL_2,
+	VOLTAGE_LEVEL_3,
+};
+
+enum pre_emphasis_level {
+	PRE_EMPHASIS_LEVEL_0,
+	PRE_EMPHASIS_LEVEL_1,
+	PRE_EMPHASIS_LEVEL_2,
+	PRE_EMPHASIS_LEVEL_3,
+};
+
+enum pattern_set {
+	PRBS7,
+	D10_2,
+	TRAINING_PTN1,
+	TRAINING_PTN2,
+	DP_NONE
+};
+
+enum color_space {
+	COLOR_RGB,
+	COLOR_YCBCR422,
+	COLOR_YCBCR444
+};
+
+enum color_depth {
+	COLOR_6,
+	COLOR_8,
+	COLOR_10,
+	COLOR_12
+};
+
+enum color_coefficient {
+	COLOR_YCBCR601,
+	COLOR_YCBCR709
+};
+
+enum dynamic_range {
+	VESA,
+	CEA
+};
+
+enum pll_status {
+	PLL_UNLOCKED,
+	PLL_LOCKED
+};
+
+enum clock_recovery_m_value_type {
+	CALCULATED_M,
+	REGISTER_M
+};
+
+enum video_timing_recognition_type {
+	VIDEO_TIMING_FROM_CAPTURE,
+	VIDEO_TIMING_FROM_REGISTER
+};
+
+enum analog_power_block {
+	AUX_BLOCK,
+	CH0_BLOCK,
+	CH1_BLOCK,
+	CH2_BLOCK,
+	CH3_BLOCK,
+	ANALOG_TOTAL,
+	POWER_ALL
+};
+
+enum dp_irq_type {
+	DP_IRQ_TYPE_HP_CABLE_IN,
+	DP_IRQ_TYPE_HP_CABLE_OUT,
+	DP_IRQ_TYPE_HP_CHANGE,
+	DP_IRQ_TYPE_UNKNOWN,
+};
+
+struct video_info {
+	char *name;
+
+	bool h_sync_polarity;
+	bool v_sync_polarity;
+	bool interlaced;
+
+	enum color_space color_space;
+	enum dynamic_range dynamic_range;
+	enum color_coefficient ycbcr_coeff;
+	enum color_depth color_depth;
+
+	int max_link_rate;
+	enum link_lane_count_type max_lane_count;
+};
+
+struct link_train {
+	int eq_loop;
+	int cr_loop[4];
+
+	u8 link_rate;
+	u8 lane_count;
+	u8 training_lane[4];
+
+	enum link_training_state lt_state;
+};
+
+struct analogix_dp_device {
+	struct drm_encoder	*encoder;
+	struct device		*dev;
+	struct drm_device	*drm_dev;
+	struct drm_connector	connector;
+	struct drm_bridge	*bridge;
+	struct clk		*clock;
+	unsigned int		irq;
+	void __iomem		*reg_base;
+
+	struct video_info	video_info;
+	struct link_train	link_train;
+	struct phy		*phy;
+	int			dpms_mode;
+	int			hpd_gpio;
+	bool                    force_hpd;
+	unsigned char           edid[EDID_BLOCK_LENGTH * 2];
+
+	struct analogix_dp_plat_data *plat_data;
+};
+
+/* analogix_dp_reg.c */
+void analogix_dp_enable_video_mute(struct analogix_dp_device *dp, bool enable);
+void analogix_dp_stop_video(struct analogix_dp_device *dp);
+void analogix_dp_lane_swap(struct analogix_dp_device *dp, bool enable);
+void analogix_dp_init_analog_param(struct analogix_dp_device *dp);
+void analogix_dp_init_interrupt(struct analogix_dp_device *dp);
+void analogix_dp_reset(struct analogix_dp_device *dp);
+void analogix_dp_swreset(struct analogix_dp_device *dp);
+void analogix_dp_config_interrupt(struct analogix_dp_device *dp);
+void analogix_dp_mute_hpd_interrupt(struct analogix_dp_device *dp);
+void analogix_dp_unmute_hpd_interrupt(struct analogix_dp_device *dp);
+enum pll_status analogix_dp_get_pll_lock_status(struct analogix_dp_device *dp);
+void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable);
+void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
+				       enum analog_power_block block,
+				       bool enable);
+void analogix_dp_init_analog_func(struct analogix_dp_device *dp);
+void analogix_dp_init_hpd(struct analogix_dp_device *dp);
+void analogix_dp_force_hpd(struct analogix_dp_device *dp);
+enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp);
+void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp);
+void analogix_dp_reset_aux(struct analogix_dp_device *dp);
+void analogix_dp_init_aux(struct analogix_dp_device *dp);
+int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp);
+void analogix_dp_enable_sw_function(struct analogix_dp_device *dp);
+int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp);
+int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp,
+				   unsigned int reg_addr,
+				   unsigned char data);
+int analogix_dp_read_byte_from_dpcd(struct analogix_dp_device *dp,
+				    unsigned int reg_addr,
+				    unsigned char *data);
+int analogix_dp_write_bytes_to_dpcd(struct analogix_dp_device *dp,
+				    unsigned int reg_addr,
+				    unsigned int count,
+				    unsigned char data[]);
+int analogix_dp_read_bytes_from_dpcd(struct analogix_dp_device *dp,
+				     unsigned int reg_addr,
+				     unsigned int count,
+				     unsigned char data[]);
+int analogix_dp_select_i2c_device(struct analogix_dp_device *dp,
+				  unsigned int device_addr,
+				  unsigned int reg_addr);
+int analogix_dp_read_byte_from_i2c(struct analogix_dp_device *dp,
+				   unsigned int device_addr,
+				   unsigned int reg_addr,
+				   unsigned int *data);
+int analogix_dp_read_bytes_from_i2c(struct analogix_dp_device *dp,
+				    unsigned int device_addr,
+				    unsigned int reg_addr,
+				    unsigned int count,
+				    unsigned char edid[]);
+void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype);
+void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype);
+void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count);
+void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count);
+void analogix_dp_enable_enhanced_mode(struct analogix_dp_device *dp,
+				      bool enable);
+void analogix_dp_set_training_pattern(struct analogix_dp_device *dp,
+				      enum pattern_set pattern);
+void analogix_dp_set_lane0_pre_emphasis(struct analogix_dp_device *dp,
+					u32 level);
+void analogix_dp_set_lane1_pre_emphasis(struct analogix_dp_device *dp,
+					u32 level);
+void analogix_dp_set_lane2_pre_emphasis(struct analogix_dp_device *dp,
+					u32 level);
+void analogix_dp_set_lane3_pre_emphasis(struct analogix_dp_device *dp,
+					u32 level);
+void analogix_dp_set_lane0_link_training(struct analogix_dp_device *dp,
+					 u32 training_lane);
+void analogix_dp_set_lane1_link_training(struct analogix_dp_device *dp,
+					 u32 training_lane);
+void analogix_dp_set_lane2_link_training(struct analogix_dp_device *dp,
+					 u32 training_lane);
+void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp,
+					 u32 training_lane);
+u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp);
+u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp);
+u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp);
+u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp);
+void analogix_dp_reset_macro(struct analogix_dp_device *dp);
+void analogix_dp_init_video(struct analogix_dp_device *dp);
+
+void analogix_dp_set_video_color_format(struct analogix_dp_device *dp);
+int analogix_dp_is_slave_video_stream_clock_on(struct analogix_dp_device *dp);
+void analogix_dp_set_video_cr_mn(struct analogix_dp_device *dp,
+				 enum clock_recovery_m_value_type type,
+				 u32 m_value,
+				 u32 n_value);
+void analogix_dp_set_video_timing_mode(struct analogix_dp_device *dp, u32 type);
+void analogix_dp_enable_video_master(struct analogix_dp_device *dp,
+				     bool enable);
+void analogix_dp_start_video(struct analogix_dp_device *dp);
+int analogix_dp_is_video_stream_on(struct analogix_dp_device *dp);
+void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp);
+void analogix_dp_enable_scrambling(struct analogix_dp_device *dp);
+void analogix_dp_disable_scrambling(struct analogix_dp_device *dp);
+#endif /* _ANALOGIX_DP_CORE_H */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
new file mode 100644
index 0000000..49205ef
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -0,0 +1,1320 @@
+/*
+ * Analogix DP (Display port) core register interface driver.
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <drm/bridge/analogix_dp.h>
+
+#include "analogix_dp_core.h"
+#include "analogix_dp_reg.h"
+
+#define COMMON_INT_MASK_1	0
+#define COMMON_INT_MASK_2	0
+#define COMMON_INT_MASK_3	0
+#define COMMON_INT_MASK_4	(HOTPLUG_CHG | HPD_LOST | PLUG)
+#define INT_STA_MASK		INT_HPD
+
+void analogix_dp_enable_video_mute(struct analogix_dp_device *dp, bool enable)
+{
+	u32 reg;
+
+	if (enable) {
+		reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+		reg |= HDCP_VIDEO_MUTE;
+		writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+	} else {
+		reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+		reg &= ~HDCP_VIDEO_MUTE;
+		writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+	}
+}
+
+void analogix_dp_stop_video(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+	reg &= ~VIDEO_EN;
+	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+}
+
+void analogix_dp_lane_swap(struct analogix_dp_device *dp, bool enable)
+{
+	u32 reg;
+
+	if (enable)
+		reg = LANE3_MAP_LOGIC_LANE_0 | LANE2_MAP_LOGIC_LANE_1 |
+		      LANE1_MAP_LOGIC_LANE_2 | LANE0_MAP_LOGIC_LANE_3;
+	else
+		reg = LANE3_MAP_LOGIC_LANE_3 | LANE2_MAP_LOGIC_LANE_2 |
+		      LANE1_MAP_LOGIC_LANE_1 | LANE0_MAP_LOGIC_LANE_0;
+
+	writel(reg, dp->reg_base + ANALOGIX_DP_LANE_MAP);
+}
+
+void analogix_dp_init_analog_param(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = TX_TERMINAL_CTRL_50_OHM;
+	writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_1);
+
+	reg = SEL_24M | TX_DVDD_BIT_1_0625V;
+	writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_2);
+
+	if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) {
+		writel(REF_CLK_24M, dp->reg_base + ANALOGIX_DP_PLL_REG_1);
+		writel(0x95, dp->reg_base + ANALOGIX_DP_PLL_REG_2);
+		writel(0x40, dp->reg_base + ANALOGIX_DP_PLL_REG_3);
+		writel(0x58, dp->reg_base + ANALOGIX_DP_PLL_REG_4);
+		writel(0x22, dp->reg_base + ANALOGIX_DP_PLL_REG_5);
+	}
+
+	reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO;
+	writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_3);
+
+	reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM |
+		TX_CUR1_2X | TX_CUR_16_MA;
+	writel(reg, dp->reg_base + ANALOGIX_DP_PLL_FILTER_CTL_1);
+
+	reg = CH3_AMP_400_MV | CH2_AMP_400_MV |
+		CH1_AMP_400_MV | CH0_AMP_400_MV;
+	writel(reg, dp->reg_base + ANALOGIX_DP_TX_AMP_TUNING_CTL);
+}
+
+void analogix_dp_init_interrupt(struct analogix_dp_device *dp)
+{
+	/* Set interrupt pin assertion polarity as high */
+	writel(INT_POL1 | INT_POL0, dp->reg_base + ANALOGIX_DP_INT_CTL);
+
+	/* Clear pending regisers */
+	writel(0xff, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1);
+	writel(0x4f, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_2);
+	writel(0xe0, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_3);
+	writel(0xe7, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4);
+	writel(0x63, dp->reg_base + ANALOGIX_DP_INT_STA);
+
+	/* 0:mask,1: unmask */
+	writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_1);
+	writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_2);
+	writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_3);
+	writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4);
+	writel(0x00, dp->reg_base + ANALOGIX_DP_INT_STA_MASK);
+}
+
+void analogix_dp_reset(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	analogix_dp_stop_video(dp);
+	analogix_dp_enable_video_mute(dp, 0);
+
+	reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N |
+		AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N |
+		HDCP_FUNC_EN_N | SW_FUNC_EN_N;
+	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
+
+	reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N |
+		SERDES_FIFO_FUNC_EN_N |
+		LS_CLK_DOMAIN_FUNC_EN_N;
+	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+
+	usleep_range(20, 30);
+
+	analogix_dp_lane_swap(dp, 0);
+
+	writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_1);
+	writel(0x40, dp->reg_base + ANALOGIX_DP_SYS_CTL_2);
+	writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+	writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+
+	writel(0x0, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+	writel(0x0, dp->reg_base + ANALOGIX_DP_HDCP_CTL);
+
+	writel(0x5e, dp->reg_base + ANALOGIX_DP_HPD_DEGLITCH_L);
+	writel(0x1a, dp->reg_base + ANALOGIX_DP_HPD_DEGLITCH_H);
+
+	writel(0x10, dp->reg_base + ANALOGIX_DP_LINK_DEBUG_CTL);
+
+	writel(0x0, dp->reg_base + ANALOGIX_DP_PHY_TEST);
+
+	writel(0x0, dp->reg_base + ANALOGIX_DP_VIDEO_FIFO_THRD);
+	writel(0x20, dp->reg_base + ANALOGIX_DP_AUDIO_MARGIN);
+
+	writel(0x4, dp->reg_base + ANALOGIX_DP_M_VID_GEN_FILTER_TH);
+	writel(0x2, dp->reg_base + ANALOGIX_DP_M_AUD_GEN_FILTER_TH);
+
+	writel(0x00000101, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
+}
+
+void analogix_dp_swreset(struct analogix_dp_device *dp)
+{
+	writel(RESET_DP_TX, dp->reg_base + ANALOGIX_DP_TX_SW_RESET);
+}
+
+void analogix_dp_config_interrupt(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	/* 0: mask, 1: unmask */
+	reg = COMMON_INT_MASK_1;
+	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_1);
+
+	reg = COMMON_INT_MASK_2;
+	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_2);
+
+	reg = COMMON_INT_MASK_3;
+	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_3);
+
+	reg = COMMON_INT_MASK_4;
+	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4);
+
+	reg = INT_STA_MASK;
+	writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK);
+}
+
+void analogix_dp_mute_hpd_interrupt(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	/* 0: mask, 1: unmask */
+	reg = readl(dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4);
+	reg &= ~COMMON_INT_MASK_4;
+	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4);
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA_MASK);
+	reg &= ~INT_STA_MASK;
+	writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK);
+}
+
+void analogix_dp_unmute_hpd_interrupt(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	/* 0: mask, 1: unmask */
+	reg = COMMON_INT_MASK_4;
+	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4);
+
+	reg = INT_STA_MASK;
+	writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK);
+}
+
+enum pll_status analogix_dp_get_pll_lock_status(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_DEBUG_CTL);
+	if (reg & PLL_LOCK)
+		return PLL_LOCKED;
+	else
+		return PLL_UNLOCKED;
+}
+
+void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable)
+{
+	u32 reg;
+
+	if (enable) {
+		reg = readl(dp->reg_base + ANALOGIX_DP_PLL_CTL);
+		reg |= DP_PLL_PD;
+		writel(reg, dp->reg_base + ANALOGIX_DP_PLL_CTL);
+	} else {
+		reg = readl(dp->reg_base + ANALOGIX_DP_PLL_CTL);
+		reg &= ~DP_PLL_PD;
+		writel(reg, dp->reg_base + ANALOGIX_DP_PLL_CTL);
+	}
+}
+
+void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
+				       enum analog_power_block block,
+				       bool enable)
+{
+	u32 reg;
+	u32 phy_pd_addr = ANALOGIX_DP_PHY_PD;
+
+	if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP))
+		phy_pd_addr = ANALOGIX_DP_PD;
+
+	switch (block) {
+	case AUX_BLOCK:
+		if (enable) {
+			reg = readl(dp->reg_base + phy_pd_addr);
+			reg |= AUX_PD;
+			writel(reg, dp->reg_base + phy_pd_addr);
+		} else {
+			reg = readl(dp->reg_base + phy_pd_addr);
+			reg &= ~AUX_PD;
+			writel(reg, dp->reg_base + phy_pd_addr);
+		}
+		break;
+	case CH0_BLOCK:
+		if (enable) {
+			reg = readl(dp->reg_base + phy_pd_addr);
+			reg |= CH0_PD;
+			writel(reg, dp->reg_base + phy_pd_addr);
+		} else {
+			reg = readl(dp->reg_base + phy_pd_addr);
+			reg &= ~CH0_PD;
+			writel(reg, dp->reg_base + phy_pd_addr);
+		}
+		break;
+	case CH1_BLOCK:
+		if (enable) {
+			reg = readl(dp->reg_base + phy_pd_addr);
+			reg |= CH1_PD;
+			writel(reg, dp->reg_base + phy_pd_addr);
+		} else {
+			reg = readl(dp->reg_base + phy_pd_addr);
+			reg &= ~CH1_PD;
+			writel(reg, dp->reg_base + phy_pd_addr);
+		}
+		break;
+	case CH2_BLOCK:
+		if (enable) {
+			reg = readl(dp->reg_base + phy_pd_addr);
+			reg |= CH2_PD;
+			writel(reg, dp->reg_base + phy_pd_addr);
+		} else {
+			reg = readl(dp->reg_base + phy_pd_addr);
+			reg &= ~CH2_PD;
+			writel(reg, dp->reg_base + phy_pd_addr);
+		}
+		break;
+	case CH3_BLOCK:
+		if (enable) {
+			reg = readl(dp->reg_base + phy_pd_addr);
+			reg |= CH3_PD;
+			writel(reg, dp->reg_base + phy_pd_addr);
+		} else {
+			reg = readl(dp->reg_base + phy_pd_addr);
+			reg &= ~CH3_PD;
+			writel(reg, dp->reg_base + phy_pd_addr);
+		}
+		break;
+	case ANALOG_TOTAL:
+		if (enable) {
+			reg = readl(dp->reg_base + phy_pd_addr);
+			reg |= DP_PHY_PD;
+			writel(reg, dp->reg_base + phy_pd_addr);
+		} else {
+			reg = readl(dp->reg_base + phy_pd_addr);
+			reg &= ~DP_PHY_PD;
+			writel(reg, dp->reg_base + phy_pd_addr);
+		}
+		break;
+	case POWER_ALL:
+		if (enable) {
+			reg = DP_PHY_PD | AUX_PD | CH3_PD | CH2_PD |
+				CH1_PD | CH0_PD;
+			writel(reg, dp->reg_base + phy_pd_addr);
+		} else {
+			writel(0x00, dp->reg_base + phy_pd_addr);
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+void analogix_dp_init_analog_func(struct analogix_dp_device *dp)
+{
+	u32 reg;
+	int timeout_loop = 0;
+
+	analogix_dp_set_analog_power_down(dp, POWER_ALL, 0);
+
+	reg = PLL_LOCK_CHG;
+	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1);
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_DEBUG_CTL);
+	reg &= ~(F_PLL_LOCK | PLL_LOCK_CTRL);
+	writel(reg, dp->reg_base + ANALOGIX_DP_DEBUG_CTL);
+
+	/* Power up PLL */
+	if (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+		analogix_dp_set_pll_power_down(dp, 0);
+
+		while (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+			timeout_loop++;
+			if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
+				dev_err(dp->dev, "failed to get pll lock status\n");
+				return;
+			}
+			usleep_range(10, 20);
+		}
+	}
+
+	/* Enable Serdes FIFO function and Link symbol clock domain module */
+	reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+	reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
+		| AUX_FUNC_EN_N);
+	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+}
+
+void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	if (gpio_is_valid(dp->hpd_gpio))
+		return;
+
+	reg = HOTPLUG_CHG | HPD_LOST | PLUG;
+	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4);
+
+	reg = INT_HPD;
+	writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA);
+}
+
+void analogix_dp_init_hpd(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	if (gpio_is_valid(dp->hpd_gpio))
+		return;
+
+	analogix_dp_clear_hotplug_interrupts(dp);
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+	reg &= ~(F_HPD | HPD_CTRL);
+	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+}
+
+void analogix_dp_force_hpd(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+	reg = (F_HPD | HPD_CTRL);
+	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+}
+
+enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	if (gpio_is_valid(dp->hpd_gpio)) {
+		reg = gpio_get_value(dp->hpd_gpio);
+		if (reg)
+			return DP_IRQ_TYPE_HP_CABLE_IN;
+		else
+			return DP_IRQ_TYPE_HP_CABLE_OUT;
+	} else {
+		/* Parse hotplug interrupt status register */
+		reg = readl(dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4);
+
+		if (reg & PLUG)
+			return DP_IRQ_TYPE_HP_CABLE_IN;
+
+		if (reg & HPD_LOST)
+			return DP_IRQ_TYPE_HP_CABLE_OUT;
+
+		if (reg & HOTPLUG_CHG)
+			return DP_IRQ_TYPE_HP_CHANGE;
+
+		return DP_IRQ_TYPE_UNKNOWN;
+	}
+}
+
+void analogix_dp_reset_aux(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	/* Disable AUX channel module */
+	reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+	reg |= AUX_FUNC_EN_N;
+	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+}
+
+void analogix_dp_init_aux(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	/* Clear inerrupts related to AUX channel */
+	reg = RPLY_RECEIV | AUX_ERR;
+	writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA);
+
+	analogix_dp_reset_aux(dp);
+
+	/* Disable AUX transaction H/W retry */
+	if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP))
+		reg = AUX_BIT_PERIOD_EXPECTED_DELAY(0) |
+		      AUX_HW_RETRY_COUNT_SEL(3) |
+		      AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
+	else
+		reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) |
+		      AUX_HW_RETRY_COUNT_SEL(0) |
+		      AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
+	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_HW_RETRY_CTL);
+
+	/* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */
+	reg = DEFER_CTRL_EN | DEFER_COUNT(1);
+	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_DEFER_CTL);
+
+	/* Enable AUX channel module */
+	reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+	reg &= ~AUX_FUNC_EN_N;
+	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+}
+
+int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	if (gpio_is_valid(dp->hpd_gpio)) {
+		if (gpio_get_value(dp->hpd_gpio))
+			return 0;
+	} else {
+		reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+		if (reg & HPD_STATUS)
+			return 0;
+	}
+
+	return -EINVAL;
+}
+
+void analogix_dp_enable_sw_function(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
+	reg &= ~SW_FUNC_EN_N;
+	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
+}
+
+int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp)
+{
+	int reg;
+	int retval = 0;
+	int timeout_loop = 0;
+
+	/* Enable AUX CH operation */
+	reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
+	reg |= AUX_EN;
+	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
+
+	/* Is AUX CH command reply received? */
+	reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+	while (!(reg & RPLY_RECEIV)) {
+		timeout_loop++;
+		if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
+			dev_err(dp->dev, "AUX CH command reply failed!\n");
+			return -ETIMEDOUT;
+		}
+		reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+		usleep_range(10, 11);
+	}
+
+	/* Clear interrupt source for AUX CH command reply */
+	writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA);
+
+	/* Clear interrupt source for AUX CH access error */
+	reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+	if (reg & AUX_ERR) {
+		writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA);
+		return -EREMOTEIO;
+	}
+
+	/* Check AUX CH error access status */
+	reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA);
+	if ((reg & AUX_STATUS_MASK) != 0) {
+		dev_err(dp->dev, "AUX CH error happens: %d\n\n",
+			reg & AUX_STATUS_MASK);
+		return -EREMOTEIO;
+	}
+
+	return retval;
+}
+
+int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp,
+				   unsigned int reg_addr,
+				   unsigned char data)
+{
+	u32 reg;
+	int i;
+	int retval;
+
+	for (i = 0; i < 3; i++) {
+		/* Clear AUX CH data buffer */
+		reg = BUF_CLR;
+		writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+		/* Select DPCD device address */
+		reg = AUX_ADDR_7_0(reg_addr);
+		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
+		reg = AUX_ADDR_15_8(reg_addr);
+		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
+		reg = AUX_ADDR_19_16(reg_addr);
+		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
+
+		/* Write data buffer */
+		reg = (unsigned int)data;
+		writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
+
+		/*
+		 * Set DisplayPort transaction and write 1 byte
+		 * If bit 3 is 1, DisplayPort transaction.
+		 * If Bit 3 is 0, I2C transaction.
+		 */
+		reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
+		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+		/* Start AUX transaction */
+		retval = analogix_dp_start_aux_transaction(dp);
+		if (retval == 0)
+			break;
+
+		dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
+	}
+
+	return retval;
+}
+
+int analogix_dp_read_byte_from_dpcd(struct analogix_dp_device *dp,
+				    unsigned int reg_addr,
+				    unsigned char *data)
+{
+	u32 reg;
+	int i;
+	int retval;
+
+	for (i = 0; i < 3; i++) {
+		/* Clear AUX CH data buffer */
+		reg = BUF_CLR;
+		writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+		/* Select DPCD device address */
+		reg = AUX_ADDR_7_0(reg_addr);
+		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
+		reg = AUX_ADDR_15_8(reg_addr);
+		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
+		reg = AUX_ADDR_19_16(reg_addr);
+		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
+
+		/*
+		 * Set DisplayPort transaction and read 1 byte
+		 * If bit 3 is 1, DisplayPort transaction.
+		 * If Bit 3 is 0, I2C transaction.
+		 */
+		reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
+		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+		/* Start AUX transaction */
+		retval = analogix_dp_start_aux_transaction(dp);
+		if (retval == 0)
+			break;
+
+		dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
+	}
+
+	/* Read data buffer */
+	reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
+	*data = (unsigned char)(reg & 0xff);
+
+	return retval;
+}
+
+int analogix_dp_write_bytes_to_dpcd(struct analogix_dp_device *dp,
+				    unsigned int reg_addr,
+				    unsigned int count,
+				    unsigned char data[])
+{
+	u32 reg;
+	unsigned int start_offset;
+	unsigned int cur_data_count;
+	unsigned int cur_data_idx;
+	int i;
+	int retval = 0;
+
+	/* Clear AUX CH data buffer */
+	reg = BUF_CLR;
+	writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+	start_offset = 0;
+	while (start_offset < count) {
+		/* Buffer size of AUX CH is 16 * 4bytes */
+		if ((count - start_offset) > 16)
+			cur_data_count = 16;
+		else
+			cur_data_count = count - start_offset;
+
+		for (i = 0; i < 3; i++) {
+			/* Select DPCD device address */
+			reg = AUX_ADDR_7_0(reg_addr + start_offset);
+			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
+			reg = AUX_ADDR_15_8(reg_addr + start_offset);
+			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
+			reg = AUX_ADDR_19_16(reg_addr + start_offset);
+			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
+
+			for (cur_data_idx = 0; cur_data_idx < cur_data_count;
+			     cur_data_idx++) {
+				reg = data[start_offset + cur_data_idx];
+				writel(reg, dp->reg_base +
+				       ANALOGIX_DP_BUF_DATA_0 +
+				       4 * cur_data_idx);
+			}
+
+			/*
+			 * Set DisplayPort transaction and write
+			 * If bit 3 is 1, DisplayPort transaction.
+			 * If Bit 3 is 0, I2C transaction.
+			 */
+			reg = AUX_LENGTH(cur_data_count) |
+				AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
+			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+			/* Start AUX transaction */
+			retval = analogix_dp_start_aux_transaction(dp);
+			if (retval == 0)
+				break;
+
+			dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
+				__func__);
+		}
+
+		start_offset += cur_data_count;
+	}
+
+	return retval;
+}
+
+int analogix_dp_read_bytes_from_dpcd(struct analogix_dp_device *dp,
+				     unsigned int reg_addr,
+				     unsigned int count,
+				     unsigned char data[])
+{
+	u32 reg;
+	unsigned int start_offset;
+	unsigned int cur_data_count;
+	unsigned int cur_data_idx;
+	int i;
+	int retval = 0;
+
+	/* Clear AUX CH data buffer */
+	reg = BUF_CLR;
+	writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+	start_offset = 0;
+	while (start_offset < count) {
+		/* Buffer size of AUX CH is 16 * 4bytes */
+		if ((count - start_offset) > 16)
+			cur_data_count = 16;
+		else
+			cur_data_count = count - start_offset;
+
+		/* AUX CH Request Transaction process */
+		for (i = 0; i < 3; i++) {
+			/* Select DPCD device address */
+			reg = AUX_ADDR_7_0(reg_addr + start_offset);
+			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
+			reg = AUX_ADDR_15_8(reg_addr + start_offset);
+			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
+			reg = AUX_ADDR_19_16(reg_addr + start_offset);
+			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
+
+			/*
+			 * Set DisplayPort transaction and read
+			 * If bit 3 is 1, DisplayPort transaction.
+			 * If Bit 3 is 0, I2C transaction.
+			 */
+			reg = AUX_LENGTH(cur_data_count) |
+				AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
+			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+			/* Start AUX transaction */
+			retval = analogix_dp_start_aux_transaction(dp);
+			if (retval == 0)
+				break;
+
+			dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
+				__func__);
+		}
+
+		for (cur_data_idx = 0; cur_data_idx < cur_data_count;
+		    cur_data_idx++) {
+			reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0
+						 + 4 * cur_data_idx);
+			data[start_offset + cur_data_idx] =
+				(unsigned char)reg;
+		}
+
+		start_offset += cur_data_count;
+	}
+
+	return retval;
+}
+
+int analogix_dp_select_i2c_device(struct analogix_dp_device *dp,
+				  unsigned int device_addr,
+				  unsigned int reg_addr)
+{
+	u32 reg;
+	int retval;
+
+	/* Set EDID device address */
+	reg = device_addr;
+	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
+	writel(0x0, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
+	writel(0x0, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
+
+	/* Set offset from base address of EDID device */
+	writel(reg_addr, dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
+
+	/*
+	 * Set I2C transaction and write address
+	 * If bit 3 is 1, DisplayPort transaction.
+	 * If Bit 3 is 0, I2C transaction.
+	 */
+	reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_MOT |
+		AUX_TX_COMM_WRITE;
+	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+	/* Start AUX transaction */
+	retval = analogix_dp_start_aux_transaction(dp);
+	if (retval != 0)
+		dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
+
+	return retval;
+}
+
+int analogix_dp_read_byte_from_i2c(struct analogix_dp_device *dp,
+				   unsigned int device_addr,
+				   unsigned int reg_addr,
+				   unsigned int *data)
+{
+	u32 reg;
+	int i;
+	int retval;
+
+	for (i = 0; i < 3; i++) {
+		/* Clear AUX CH data buffer */
+		reg = BUF_CLR;
+		writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+		/* Select EDID device */
+		retval = analogix_dp_select_i2c_device(dp, device_addr,
+						       reg_addr);
+		if (retval != 0)
+			continue;
+
+		/*
+		 * Set I2C transaction and read data
+		 * If bit 3 is 1, DisplayPort transaction.
+		 * If Bit 3 is 0, I2C transaction.
+		 */
+		reg = AUX_TX_COMM_I2C_TRANSACTION |
+			AUX_TX_COMM_READ;
+		writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+		/* Start AUX transaction */
+		retval = analogix_dp_start_aux_transaction(dp);
+		if (retval == 0)
+			break;
+
+		dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
+	}
+
+	/* Read data */
+	if (retval == 0)
+		*data = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
+
+	return retval;
+}
+
+int analogix_dp_read_bytes_from_i2c(struct analogix_dp_device *dp,
+				    unsigned int device_addr,
+				    unsigned int reg_addr,
+				    unsigned int count,
+				    unsigned char edid[])
+{
+	u32 reg;
+	unsigned int i, j;
+	unsigned int cur_data_idx;
+	unsigned int defer = 0;
+	int retval = 0;
+
+	for (i = 0; i < count; i += 16) {
+		for (j = 0; j < 3; j++) {
+			/* Clear AUX CH data buffer */
+			reg = BUF_CLR;
+			writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+			/* Set normal AUX CH command */
+			reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
+			reg &= ~ADDR_ONLY;
+			writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
+
+			/*
+			 * If Rx sends defer, Tx sends only reads
+			 * request without sending address
+			 */
+			if (!defer)
+				retval = analogix_dp_select_i2c_device(dp,
+						device_addr, reg_addr + i);
+			else
+				defer = 0;
+
+			if (retval == 0) {
+				/*
+				 * Set I2C transaction and write data
+				 * If bit 3 is 1, DisplayPort transaction.
+				 * If Bit 3 is 0, I2C transaction.
+				 */
+				reg = AUX_LENGTH(16) |
+					AUX_TX_COMM_I2C_TRANSACTION |
+					AUX_TX_COMM_READ;
+				writel(reg, dp->reg_base +
+					ANALOGIX_DP_AUX_CH_CTL_1);
+
+				/* Start AUX transaction */
+				retval = analogix_dp_start_aux_transaction(dp);
+				if (retval == 0)
+					break;
+
+				dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
+					__func__);
+			}
+			/* Check if Rx sends defer */
+			reg = readl(dp->reg_base + ANALOGIX_DP_AUX_RX_COMM);
+			if (reg == AUX_RX_COMM_AUX_DEFER ||
+			    reg == AUX_RX_COMM_I2C_DEFER) {
+				dev_err(dp->dev, "Defer: %d\n\n", reg);
+				defer = 1;
+			}
+		}
+
+		for (cur_data_idx = 0; cur_data_idx < 16; cur_data_idx++) {
+			reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0
+						 + 4 * cur_data_idx);
+			edid[i + cur_data_idx] = (unsigned char)reg;
+		}
+	}
+
+	return retval;
+}
+
+void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype)
+{
+	u32 reg;
+
+	reg = bwtype;
+	if ((bwtype == DP_LINK_BW_2_7) || (bwtype == DP_LINK_BW_1_62))
+		writel(reg, dp->reg_base + ANALOGIX_DP_LINK_BW_SET);
+}
+
+void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_LINK_BW_SET);
+	*bwtype = reg;
+}
+
+void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count)
+{
+	u32 reg;
+
+	reg = count;
+	writel(reg, dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET);
+}
+
+void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET);
+	*count = reg;
+}
+
+void analogix_dp_enable_enhanced_mode(struct analogix_dp_device *dp,
+				      bool enable)
+{
+	u32 reg;
+
+	if (enable) {
+		reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+		reg |= ENHANCED;
+		writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+	} else {
+		reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+		reg &= ~ENHANCED;
+		writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+	}
+}
+
+void analogix_dp_set_training_pattern(struct analogix_dp_device *dp,
+				      enum pattern_set pattern)
+{
+	u32 reg;
+
+	switch (pattern) {
+	case PRBS7:
+		reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_PRBS7;
+		writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+		break;
+	case D10_2:
+		reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_D10_2;
+		writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+		break;
+	case TRAINING_PTN1:
+		reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN1;
+		writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+		break;
+	case TRAINING_PTN2:
+		reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN2;
+		writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+		break;
+	case DP_NONE:
+		reg = SCRAMBLING_ENABLE |
+			LINK_QUAL_PATTERN_SET_DISABLE |
+			SW_TRAINING_PATTERN_SET_NORMAL;
+		writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+		break;
+	default:
+		break;
+	}
+}
+
+void analogix_dp_set_lane0_pre_emphasis(struct analogix_dp_device *dp,
+					u32 level)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
+	reg &= ~PRE_EMPHASIS_SET_MASK;
+	reg |= level << PRE_EMPHASIS_SET_SHIFT;
+	writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
+}
+
+void analogix_dp_set_lane1_pre_emphasis(struct analogix_dp_device *dp,
+					u32 level)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
+	reg &= ~PRE_EMPHASIS_SET_MASK;
+	reg |= level << PRE_EMPHASIS_SET_SHIFT;
+	writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
+}
+
+void analogix_dp_set_lane2_pre_emphasis(struct analogix_dp_device *dp,
+					u32 level)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
+	reg &= ~PRE_EMPHASIS_SET_MASK;
+	reg |= level << PRE_EMPHASIS_SET_SHIFT;
+	writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
+}
+
+void analogix_dp_set_lane3_pre_emphasis(struct analogix_dp_device *dp,
+					u32 level)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
+	reg &= ~PRE_EMPHASIS_SET_MASK;
+	reg |= level << PRE_EMPHASIS_SET_SHIFT;
+	writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
+}
+
+void analogix_dp_set_lane0_link_training(struct analogix_dp_device *dp,
+					 u32 training_lane)
+{
+	u32 reg;
+
+	reg = training_lane;
+	writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
+}
+
+void analogix_dp_set_lane1_link_training(struct analogix_dp_device *dp,
+					 u32 training_lane)
+{
+	u32 reg;
+
+	reg = training_lane;
+	writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
+}
+
+void analogix_dp_set_lane2_link_training(struct analogix_dp_device *dp,
+					 u32 training_lane)
+{
+	u32 reg;
+
+	reg = training_lane;
+	writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
+}
+
+void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp,
+					 u32 training_lane)
+{
+	u32 reg;
+
+	reg = training_lane;
+	writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
+}
+
+u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
+	return reg;
+}
+
+u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
+	return reg;
+}
+
+u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
+	return reg;
+}
+
+u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
+	return reg;
+}
+
+void analogix_dp_reset_macro(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_PHY_TEST);
+	reg |= MACRO_RST;
+	writel(reg, dp->reg_base + ANALOGIX_DP_PHY_TEST);
+
+	/* 10 us is the minimum reset time. */
+	usleep_range(10, 20);
+
+	reg &= ~MACRO_RST;
+	writel(reg, dp->reg_base + ANALOGIX_DP_PHY_TEST);
+}
+
+void analogix_dp_init_video(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = VSYNC_DET | VID_FORMAT_CHG | VID_CLK_CHG;
+	writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1);
+
+	reg = 0x0;
+	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_1);
+
+	reg = CHA_CRI(4) | CHA_CTRL;
+	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_2);
+
+	reg = 0x0;
+	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+
+	reg = VID_HRES_TH(2) | VID_VRES_TH(0);
+	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_8);
+}
+
+void analogix_dp_set_video_color_format(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	/* Configure the input color depth, color space, dynamic range */
+	reg = (dp->video_info.dynamic_range << IN_D_RANGE_SHIFT) |
+		(dp->video_info.color_depth << IN_BPC_SHIFT) |
+		(dp->video_info.color_space << IN_COLOR_F_SHIFT);
+	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_2);
+
+	/* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */
+	reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
+	reg &= ~IN_YC_COEFFI_MASK;
+	if (dp->video_info.ycbcr_coeff)
+		reg |= IN_YC_COEFFI_ITU709;
+	else
+		reg |= IN_YC_COEFFI_ITU601;
+	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
+}
+
+int analogix_dp_is_slave_video_stream_clock_on(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_1);
+	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_1);
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_1);
+
+	if (!(reg & DET_STA)) {
+		dev_dbg(dp->dev, "Input stream clock not detected.\n");
+		return -EINVAL;
+	}
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_2);
+	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_2);
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_2);
+	dev_dbg(dp->dev, "wait SYS_CTL_2.\n");
+
+	if (reg & CHA_STA) {
+		dev_dbg(dp->dev, "Input stream clk is changing\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void analogix_dp_set_video_cr_mn(struct analogix_dp_device *dp,
+				 enum clock_recovery_m_value_type type,
+				 u32 m_value, u32 n_value)
+{
+	u32 reg;
+
+	if (type == REGISTER_M) {
+		reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+		reg |= FIX_M_VID;
+		writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+		reg = m_value & 0xff;
+		writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_0);
+		reg = (m_value >> 8) & 0xff;
+		writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_1);
+		reg = (m_value >> 16) & 0xff;
+		writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_2);
+
+		reg = n_value & 0xff;
+		writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_0);
+		reg = (n_value >> 8) & 0xff;
+		writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_1);
+		reg = (n_value >> 16) & 0xff;
+		writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_2);
+	} else  {
+		reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+		reg &= ~FIX_M_VID;
+		writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+
+		writel(0x00, dp->reg_base + ANALOGIX_DP_N_VID_0);
+		writel(0x80, dp->reg_base + ANALOGIX_DP_N_VID_1);
+		writel(0x00, dp->reg_base + ANALOGIX_DP_N_VID_2);
+	}
+}
+
+void analogix_dp_set_video_timing_mode(struct analogix_dp_device *dp, u32 type)
+{
+	u32 reg;
+
+	if (type == VIDEO_TIMING_FROM_CAPTURE) {
+		reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+		reg &= ~FORMAT_SEL;
+		writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+	} else {
+		reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+		reg |= FORMAT_SEL;
+		writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+	}
+}
+
+void analogix_dp_enable_video_master(struct analogix_dp_device *dp, bool enable)
+{
+	u32 reg;
+
+	if (enable) {
+		reg = readl(dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
+		reg &= ~VIDEO_MODE_MASK;
+		reg |= VIDEO_MASTER_MODE_EN | VIDEO_MODE_MASTER_MODE;
+		writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
+	} else {
+		reg = readl(dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
+		reg &= ~VIDEO_MODE_MASK;
+		reg |= VIDEO_MODE_SLAVE_MODE;
+		writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
+	}
+}
+
+void analogix_dp_start_video(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+	reg |= VIDEO_EN;
+	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+}
+
+int analogix_dp_is_video_stream_on(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+	writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+	if (!(reg & STRM_VALID)) {
+		dev_dbg(dp->dev, "Input video stream is not detected.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
+	reg &= ~(MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N);
+	reg |= MASTER_VID_FUNC_EN_N;
+	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+	reg &= ~INTERACE_SCAN_CFG;
+	reg |= (dp->video_info.interlaced << 2);
+	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+	reg &= ~VSYNC_POLARITY_CFG;
+	reg |= (dp->video_info.v_sync_polarity << 1);
+	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+	reg &= ~HSYNC_POLARITY_CFG;
+	reg |= (dp->video_info.h_sync_polarity << 0);
+	writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+
+	reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE;
+	writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
+}
+
+void analogix_dp_enable_scrambling(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+	reg &= ~SCRAMBLING_DISABLE;
+	writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+}
+
+void analogix_dp_disable_scrambling(struct analogix_dp_device *dp)
+{
+	u32 reg;
+
+	reg = readl(dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+	reg |= SCRAMBLING_DISABLE;
+	writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
similarity index 62%
rename from drivers/gpu/drm/exynos/exynos_dp_reg.h
rename to drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
index 2e9bd0e..337912b 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_reg.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
@@ -1,5 +1,5 @@
 /*
- * Register definition file for Samsung DP driver
+ * Register definition file for Analogix DP core driver
  *
  * Copyright (C) 2012 Samsung Electronics Co., Ltd.
  * Author: Jingoo Han <jg1.han@samsung.com>
@@ -9,96 +9,104 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef _EXYNOS_DP_REG_H
-#define _EXYNOS_DP_REG_H
+#ifndef _ANALOGIX_DP_REG_H
+#define _ANALOGIX_DP_REG_H
 
-#define EXYNOS_DP_TX_SW_RESET			0x14
-#define EXYNOS_DP_FUNC_EN_1			0x18
-#define EXYNOS_DP_FUNC_EN_2			0x1C
-#define EXYNOS_DP_VIDEO_CTL_1			0x20
-#define EXYNOS_DP_VIDEO_CTL_2			0x24
-#define EXYNOS_DP_VIDEO_CTL_3			0x28
+#define ANALOGIX_DP_TX_SW_RESET			0x14
+#define ANALOGIX_DP_FUNC_EN_1			0x18
+#define ANALOGIX_DP_FUNC_EN_2			0x1C
+#define ANALOGIX_DP_VIDEO_CTL_1			0x20
+#define ANALOGIX_DP_VIDEO_CTL_2			0x24
+#define ANALOGIX_DP_VIDEO_CTL_3			0x28
 
-#define EXYNOS_DP_VIDEO_CTL_8			0x3C
-#define EXYNOS_DP_VIDEO_CTL_10			0x44
+#define ANALOGIX_DP_VIDEO_CTL_8			0x3C
+#define ANALOGIX_DP_VIDEO_CTL_10		0x44
 
-#define EXYNOS_DP_LANE_MAP			0x35C
+#define ANALOGIX_DP_PLL_REG_1			0xfc
+#define ANALOGIX_DP_PLL_REG_2			0x9e4
+#define ANALOGIX_DP_PLL_REG_3			0x9e8
+#define ANALOGIX_DP_PLL_REG_4			0x9ec
+#define ANALOGIX_DP_PLL_REG_5			0xa00
 
-#define EXYNOS_DP_ANALOG_CTL_1			0x370
-#define EXYNOS_DP_ANALOG_CTL_2			0x374
-#define EXYNOS_DP_ANALOG_CTL_3			0x378
-#define EXYNOS_DP_PLL_FILTER_CTL_1		0x37C
-#define EXYNOS_DP_TX_AMP_TUNING_CTL		0x380
+#define ANALOGIX_DP_PD				0x12c
 
-#define EXYNOS_DP_AUX_HW_RETRY_CTL		0x390
+#define ANALOGIX_DP_LANE_MAP			0x35C
 
-#define EXYNOS_DP_COMMON_INT_STA_1		0x3C4
-#define EXYNOS_DP_COMMON_INT_STA_2		0x3C8
-#define EXYNOS_DP_COMMON_INT_STA_3		0x3CC
-#define EXYNOS_DP_COMMON_INT_STA_4		0x3D0
-#define EXYNOS_DP_INT_STA			0x3DC
-#define EXYNOS_DP_COMMON_INT_MASK_1		0x3E0
-#define EXYNOS_DP_COMMON_INT_MASK_2		0x3E4
-#define EXYNOS_DP_COMMON_INT_MASK_3		0x3E8
-#define EXYNOS_DP_COMMON_INT_MASK_4		0x3EC
-#define EXYNOS_DP_INT_STA_MASK			0x3F8
-#define EXYNOS_DP_INT_CTL			0x3FC
+#define ANALOGIX_DP_ANALOG_CTL_1		0x370
+#define ANALOGIX_DP_ANALOG_CTL_2		0x374
+#define ANALOGIX_DP_ANALOG_CTL_3		0x378
+#define ANALOGIX_DP_PLL_FILTER_CTL_1		0x37C
+#define ANALOGIX_DP_TX_AMP_TUNING_CTL		0x380
 
-#define EXYNOS_DP_SYS_CTL_1			0x600
-#define EXYNOS_DP_SYS_CTL_2			0x604
-#define EXYNOS_DP_SYS_CTL_3			0x608
-#define EXYNOS_DP_SYS_CTL_4			0x60C
+#define ANALOGIX_DP_AUX_HW_RETRY_CTL		0x390
 
-#define EXYNOS_DP_PKT_SEND_CTL			0x640
-#define EXYNOS_DP_HDCP_CTL			0x648
+#define ANALOGIX_DP_COMMON_INT_STA_1		0x3C4
+#define ANALOGIX_DP_COMMON_INT_STA_2		0x3C8
+#define ANALOGIX_DP_COMMON_INT_STA_3		0x3CC
+#define ANALOGIX_DP_COMMON_INT_STA_4		0x3D0
+#define ANALOGIX_DP_INT_STA			0x3DC
+#define ANALOGIX_DP_COMMON_INT_MASK_1		0x3E0
+#define ANALOGIX_DP_COMMON_INT_MASK_2		0x3E4
+#define ANALOGIX_DP_COMMON_INT_MASK_3		0x3E8
+#define ANALOGIX_DP_COMMON_INT_MASK_4		0x3EC
+#define ANALOGIX_DP_INT_STA_MASK		0x3F8
+#define ANALOGIX_DP_INT_CTL			0x3FC
 
-#define EXYNOS_DP_LINK_BW_SET			0x680
-#define EXYNOS_DP_LANE_COUNT_SET		0x684
-#define EXYNOS_DP_TRAINING_PTN_SET		0x688
-#define EXYNOS_DP_LN0_LINK_TRAINING_CTL		0x68C
-#define EXYNOS_DP_LN1_LINK_TRAINING_CTL		0x690
-#define EXYNOS_DP_LN2_LINK_TRAINING_CTL		0x694
-#define EXYNOS_DP_LN3_LINK_TRAINING_CTL		0x698
+#define ANALOGIX_DP_SYS_CTL_1			0x600
+#define ANALOGIX_DP_SYS_CTL_2			0x604
+#define ANALOGIX_DP_SYS_CTL_3			0x608
+#define ANALOGIX_DP_SYS_CTL_4			0x60C
 
-#define EXYNOS_DP_DEBUG_CTL			0x6C0
-#define EXYNOS_DP_HPD_DEGLITCH_L		0x6C4
-#define EXYNOS_DP_HPD_DEGLITCH_H		0x6C8
-#define EXYNOS_DP_LINK_DEBUG_CTL		0x6E0
+#define ANALOGIX_DP_PKT_SEND_CTL		0x640
+#define ANALOGIX_DP_HDCP_CTL			0x648
 
-#define EXYNOS_DP_M_VID_0			0x700
-#define EXYNOS_DP_M_VID_1			0x704
-#define EXYNOS_DP_M_VID_2			0x708
-#define EXYNOS_DP_N_VID_0			0x70C
-#define EXYNOS_DP_N_VID_1			0x710
-#define EXYNOS_DP_N_VID_2			0x714
+#define ANALOGIX_DP_LINK_BW_SET			0x680
+#define ANALOGIX_DP_LANE_COUNT_SET		0x684
+#define ANALOGIX_DP_TRAINING_PTN_SET		0x688
+#define ANALOGIX_DP_LN0_LINK_TRAINING_CTL	0x68C
+#define ANALOGIX_DP_LN1_LINK_TRAINING_CTL	0x690
+#define ANALOGIX_DP_LN2_LINK_TRAINING_CTL	0x694
+#define ANALOGIX_DP_LN3_LINK_TRAINING_CTL	0x698
 
-#define EXYNOS_DP_PLL_CTL			0x71C
-#define EXYNOS_DP_PHY_PD			0x720
-#define EXYNOS_DP_PHY_TEST			0x724
+#define ANALOGIX_DP_DEBUG_CTL			0x6C0
+#define ANALOGIX_DP_HPD_DEGLITCH_L		0x6C4
+#define ANALOGIX_DP_HPD_DEGLITCH_H		0x6C8
+#define ANALOGIX_DP_LINK_DEBUG_CTL		0x6E0
 
-#define EXYNOS_DP_VIDEO_FIFO_THRD		0x730
-#define EXYNOS_DP_AUDIO_MARGIN			0x73C
+#define ANALOGIX_DP_M_VID_0			0x700
+#define ANALOGIX_DP_M_VID_1			0x704
+#define ANALOGIX_DP_M_VID_2			0x708
+#define ANALOGIX_DP_N_VID_0			0x70C
+#define ANALOGIX_DP_N_VID_1			0x710
+#define ANALOGIX_DP_N_VID_2			0x714
 
-#define EXYNOS_DP_M_VID_GEN_FILTER_TH		0x764
-#define EXYNOS_DP_M_AUD_GEN_FILTER_TH		0x778
-#define EXYNOS_DP_AUX_CH_STA			0x780
-#define EXYNOS_DP_AUX_CH_DEFER_CTL		0x788
-#define EXYNOS_DP_AUX_RX_COMM			0x78C
-#define EXYNOS_DP_BUFFER_DATA_CTL		0x790
-#define EXYNOS_DP_AUX_CH_CTL_1			0x794
-#define EXYNOS_DP_AUX_ADDR_7_0			0x798
-#define EXYNOS_DP_AUX_ADDR_15_8			0x79C
-#define EXYNOS_DP_AUX_ADDR_19_16		0x7A0
-#define EXYNOS_DP_AUX_CH_CTL_2			0x7A4
+#define ANALOGIX_DP_PLL_CTL			0x71C
+#define ANALOGIX_DP_PHY_PD			0x720
+#define ANALOGIX_DP_PHY_TEST			0x724
 
-#define EXYNOS_DP_BUF_DATA_0			0x7C0
+#define ANALOGIX_DP_VIDEO_FIFO_THRD		0x730
+#define ANALOGIX_DP_AUDIO_MARGIN		0x73C
 
-#define EXYNOS_DP_SOC_GENERAL_CTL		0x800
+#define ANALOGIX_DP_M_VID_GEN_FILTER_TH		0x764
+#define ANALOGIX_DP_M_AUD_GEN_FILTER_TH		0x778
+#define ANALOGIX_DP_AUX_CH_STA			0x780
+#define ANALOGIX_DP_AUX_CH_DEFER_CTL		0x788
+#define ANALOGIX_DP_AUX_RX_COMM			0x78C
+#define ANALOGIX_DP_BUFFER_DATA_CTL		0x790
+#define ANALOGIX_DP_AUX_CH_CTL_1		0x794
+#define ANALOGIX_DP_AUX_ADDR_7_0		0x798
+#define ANALOGIX_DP_AUX_ADDR_15_8		0x79C
+#define ANALOGIX_DP_AUX_ADDR_19_16		0x7A0
+#define ANALOGIX_DP_AUX_CH_CTL_2		0x7A4
 
-/* EXYNOS_DP_TX_SW_RESET */
+#define ANALOGIX_DP_BUF_DATA_0			0x7C0
+
+#define ANALOGIX_DP_SOC_GENERAL_CTL		0x800
+
+/* ANALOGIX_DP_TX_SW_RESET */
 #define RESET_DP_TX				(0x1 << 0)
 
-/* EXYNOS_DP_FUNC_EN_1 */
+/* ANALOGIX_DP_FUNC_EN_1 */
 #define MASTER_VID_FUNC_EN_N			(0x1 << 7)
 #define SLAVE_VID_FUNC_EN_N			(0x1 << 5)
 #define AUD_FIFO_FUNC_EN_N			(0x1 << 4)
@@ -107,17 +115,17 @@
 #define CRC_FUNC_EN_N				(0x1 << 1)
 #define SW_FUNC_EN_N				(0x1 << 0)
 
-/* EXYNOS_DP_FUNC_EN_2 */
+/* ANALOGIX_DP_FUNC_EN_2 */
 #define SSC_FUNC_EN_N				(0x1 << 7)
 #define AUX_FUNC_EN_N				(0x1 << 2)
 #define SERDES_FIFO_FUNC_EN_N			(0x1 << 1)
 #define LS_CLK_DOMAIN_FUNC_EN_N			(0x1 << 0)
 
-/* EXYNOS_DP_VIDEO_CTL_1 */
+/* ANALOGIX_DP_VIDEO_CTL_1 */
 #define VIDEO_EN				(0x1 << 7)
 #define HDCP_VIDEO_MUTE				(0x1 << 6)
 
-/* EXYNOS_DP_VIDEO_CTL_1 */
+/* ANALOGIX_DP_VIDEO_CTL_1 */
 #define IN_D_RANGE_MASK				(0x1 << 7)
 #define IN_D_RANGE_SHIFT			(7)
 #define IN_D_RANGE_CEA				(0x1 << 7)
@@ -134,7 +142,7 @@
 #define IN_COLOR_F_YCBCR422			(0x1 << 0)
 #define IN_COLOR_F_RGB				(0x0 << 0)
 
-/* EXYNOS_DP_VIDEO_CTL_3 */
+/* ANALOGIX_DP_VIDEO_CTL_3 */
 #define IN_YC_COEFFI_MASK			(0x1 << 7)
 #define IN_YC_COEFFI_SHIFT			(7)
 #define IN_YC_COEFFI_ITU709			(0x1 << 7)
@@ -144,17 +152,21 @@
 #define VID_CHK_UPDATE_TYPE_1			(0x1 << 4)
 #define VID_CHK_UPDATE_TYPE_0			(0x0 << 4)
 
-/* EXYNOS_DP_VIDEO_CTL_8 */
+/* ANALOGIX_DP_VIDEO_CTL_8 */
 #define VID_HRES_TH(x)				(((x) & 0xf) << 4)
 #define VID_VRES_TH(x)				(((x) & 0xf) << 0)
 
-/* EXYNOS_DP_VIDEO_CTL_10 */
+/* ANALOGIX_DP_VIDEO_CTL_10 */
 #define FORMAT_SEL				(0x1 << 4)
 #define INTERACE_SCAN_CFG			(0x1 << 2)
 #define VSYNC_POLARITY_CFG			(0x1 << 1)
 #define HSYNC_POLARITY_CFG			(0x1 << 0)
 
-/* EXYNOS_DP_LANE_MAP */
+/* ANALOGIX_DP_PLL_REG_1 */
+#define REF_CLK_24M				(0x1 << 1)
+#define REF_CLK_27M				(0x0 << 1)
+
+/* ANALOGIX_DP_LANE_MAP */
 #define LANE3_MAP_LOGIC_LANE_0			(0x0 << 6)
 #define LANE3_MAP_LOGIC_LANE_1			(0x1 << 6)
 #define LANE3_MAP_LOGIC_LANE_2			(0x2 << 6)
@@ -172,30 +184,30 @@
 #define LANE0_MAP_LOGIC_LANE_2			(0x2 << 0)
 #define LANE0_MAP_LOGIC_LANE_3			(0x3 << 0)
 
-/* EXYNOS_DP_ANALOG_CTL_1 */
+/* ANALOGIX_DP_ANALOG_CTL_1 */
 #define TX_TERMINAL_CTRL_50_OHM			(0x1 << 4)
 
-/* EXYNOS_DP_ANALOG_CTL_2 */
+/* ANALOGIX_DP_ANALOG_CTL_2 */
 #define SEL_24M					(0x1 << 3)
 #define TX_DVDD_BIT_1_0625V			(0x4 << 0)
 
-/* EXYNOS_DP_ANALOG_CTL_3 */
+/* ANALOGIX_DP_ANALOG_CTL_3 */
 #define DRIVE_DVDD_BIT_1_0625V			(0x4 << 5)
 #define VCO_BIT_600_MICRO			(0x5 << 0)
 
-/* EXYNOS_DP_PLL_FILTER_CTL_1 */
+/* ANALOGIX_DP_PLL_FILTER_CTL_1 */
 #define PD_RING_OSC				(0x1 << 6)
 #define AUX_TERMINAL_CTRL_50_OHM		(0x2 << 4)
 #define TX_CUR1_2X				(0x1 << 2)
 #define TX_CUR_16_MA				(0x3 << 0)
 
-/* EXYNOS_DP_TX_AMP_TUNING_CTL */
+/* ANALOGIX_DP_TX_AMP_TUNING_CTL */
 #define CH3_AMP_400_MV				(0x0 << 24)
 #define CH2_AMP_400_MV				(0x0 << 16)
 #define CH1_AMP_400_MV				(0x0 << 8)
 #define CH0_AMP_400_MV				(0x0 << 0)
 
-/* EXYNOS_DP_AUX_HW_RETRY_CTL */
+/* ANALOGIX_DP_AUX_HW_RETRY_CTL */
 #define AUX_BIT_PERIOD_EXPECTED_DELAY(x)	(((x) & 0x7) << 8)
 #define AUX_HW_RETRY_INTERVAL_MASK		(0x3 << 3)
 #define AUX_HW_RETRY_INTERVAL_600_MICROSECONDS	(0x0 << 3)
@@ -204,7 +216,7 @@
 #define AUX_HW_RETRY_INTERVAL_1800_MICROSECONDS	(0x3 << 3)
 #define AUX_HW_RETRY_COUNT_SEL(x)		(((x) & 0x7) << 0)
 
-/* EXYNOS_DP_COMMON_INT_STA_1 */
+/* ANALOGIX_DP_COMMON_INT_STA_1 */
 #define VSYNC_DET				(0x1 << 7)
 #define PLL_LOCK_CHG				(0x1 << 6)
 #define SPDIF_ERR				(0x1 << 5)
@@ -214,19 +226,19 @@
 #define VID_CLK_CHG				(0x1 << 1)
 #define SW_INT					(0x1 << 0)
 
-/* EXYNOS_DP_COMMON_INT_STA_2 */
+/* ANALOGIX_DP_COMMON_INT_STA_2 */
 #define ENC_EN_CHG				(0x1 << 6)
 #define HW_BKSV_RDY				(0x1 << 3)
 #define HW_SHA_DONE				(0x1 << 2)
 #define HW_AUTH_STATE_CHG			(0x1 << 1)
 #define HW_AUTH_DONE				(0x1 << 0)
 
-/* EXYNOS_DP_COMMON_INT_STA_3 */
+/* ANALOGIX_DP_COMMON_INT_STA_3 */
 #define AFIFO_UNDER				(0x1 << 7)
 #define AFIFO_OVER				(0x1 << 6)
 #define R0_CHK_FLAG				(0x1 << 5)
 
-/* EXYNOS_DP_COMMON_INT_STA_4 */
+/* ANALOGIX_DP_COMMON_INT_STA_4 */
 #define PSR_ACTIVE				(0x1 << 7)
 #define PSR_INACTIVE				(0x1 << 6)
 #define SPDIF_BI_PHASE_ERR			(0x1 << 5)
@@ -234,29 +246,29 @@
 #define HPD_LOST				(0x1 << 1)
 #define PLUG					(0x1 << 0)
 
-/* EXYNOS_DP_INT_STA */
+/* ANALOGIX_DP_INT_STA */
 #define INT_HPD					(0x1 << 6)
 #define HW_TRAINING_FINISH			(0x1 << 5)
 #define RPLY_RECEIV				(0x1 << 1)
 #define AUX_ERR					(0x1 << 0)
 
-/* EXYNOS_DP_INT_CTL */
+/* ANALOGIX_DP_INT_CTL */
 #define SOFT_INT_CTRL				(0x1 << 2)
 #define INT_POL1				(0x1 << 1)
 #define INT_POL0				(0x1 << 0)
 
-/* EXYNOS_DP_SYS_CTL_1 */
+/* ANALOGIX_DP_SYS_CTL_1 */
 #define DET_STA					(0x1 << 2)
 #define FORCE_DET				(0x1 << 1)
 #define DET_CTRL				(0x1 << 0)
 
-/* EXYNOS_DP_SYS_CTL_2 */
+/* ANALOGIX_DP_SYS_CTL_2 */
 #define CHA_CRI(x)				(((x) & 0xf) << 4)
 #define CHA_STA					(0x1 << 2)
 #define FORCE_CHA				(0x1 << 1)
 #define CHA_CTRL				(0x1 << 0)
 
-/* EXYNOS_DP_SYS_CTL_3 */
+/* ANALOGIX_DP_SYS_CTL_3 */
 #define HPD_STATUS				(0x1 << 6)
 #define F_HPD					(0x1 << 5)
 #define HPD_CTRL				(0x1 << 4)
@@ -265,13 +277,13 @@
 #define F_VALID					(0x1 << 1)
 #define VALID_CTRL				(0x1 << 0)
 
-/* EXYNOS_DP_SYS_CTL_4 */
+/* ANALOGIX_DP_SYS_CTL_4 */
 #define FIX_M_AUD				(0x1 << 4)
 #define ENHANCED				(0x1 << 3)
 #define FIX_M_VID				(0x1 << 2)
 #define M_VID_UPDATE_CTRL			(0x3 << 0)
 
-/* EXYNOS_DP_TRAINING_PTN_SET */
+/* ANALOGIX_DP_TRAINING_PTN_SET */
 #define SCRAMBLER_TYPE				(0x1 << 9)
 #define HW_LINK_TRAINING_PATTERN		(0x1 << 8)
 #define SCRAMBLING_DISABLE			(0x1 << 5)
@@ -285,24 +297,24 @@
 #define SW_TRAINING_PATTERN_SET_PTN1		(0x1 << 0)
 #define SW_TRAINING_PATTERN_SET_NORMAL		(0x0 << 0)
 
-/* EXYNOS_DP_LN0_LINK_TRAINING_CTL */
+/* ANALOGIX_DP_LN0_LINK_TRAINING_CTL */
 #define PRE_EMPHASIS_SET_MASK			(0x3 << 3)
 #define PRE_EMPHASIS_SET_SHIFT			(3)
 
-/* EXYNOS_DP_DEBUG_CTL */
+/* ANALOGIX_DP_DEBUG_CTL */
 #define PLL_LOCK				(0x1 << 4)
 #define F_PLL_LOCK				(0x1 << 3)
 #define PLL_LOCK_CTRL				(0x1 << 2)
 #define PN_INV					(0x1 << 0)
 
-/* EXYNOS_DP_PLL_CTL */
+/* ANALOGIX_DP_PLL_CTL */
 #define DP_PLL_PD				(0x1 << 7)
 #define DP_PLL_RESET				(0x1 << 6)
 #define DP_PLL_LOOP_BIT_DEFAULT			(0x1 << 4)
 #define DP_PLL_REF_BIT_1_1250V			(0x5 << 0)
 #define DP_PLL_REF_BIT_1_2500V			(0x7 << 0)
 
-/* EXYNOS_DP_PHY_PD */
+/* ANALOGIX_DP_PHY_PD */
 #define DP_PHY_PD				(0x1 << 5)
 #define AUX_PD					(0x1 << 4)
 #define CH3_PD					(0x1 << 3)
@@ -310,28 +322,28 @@
 #define CH1_PD					(0x1 << 1)
 #define CH0_PD					(0x1 << 0)
 
-/* EXYNOS_DP_PHY_TEST */
+/* ANALOGIX_DP_PHY_TEST */
 #define MACRO_RST				(0x1 << 5)
 #define CH1_TEST				(0x1 << 1)
 #define CH0_TEST				(0x1 << 0)
 
-/* EXYNOS_DP_AUX_CH_STA */
+/* ANALOGIX_DP_AUX_CH_STA */
 #define AUX_BUSY				(0x1 << 4)
 #define AUX_STATUS_MASK				(0xf << 0)
 
-/* EXYNOS_DP_AUX_CH_DEFER_CTL */
+/* ANALOGIX_DP_AUX_CH_DEFER_CTL */
 #define DEFER_CTRL_EN				(0x1 << 7)
 #define DEFER_COUNT(x)				(((x) & 0x7f) << 0)
 
-/* EXYNOS_DP_AUX_RX_COMM */
+/* ANALOGIX_DP_AUX_RX_COMM */
 #define AUX_RX_COMM_I2C_DEFER			(0x2 << 2)
 #define AUX_RX_COMM_AUX_DEFER			(0x2 << 0)
 
-/* EXYNOS_DP_BUFFER_DATA_CTL */
+/* ANALOGIX_DP_BUFFER_DATA_CTL */
 #define BUF_CLR					(0x1 << 7)
 #define BUF_DATA_COUNT(x)			(((x) & 0x1f) << 0)
 
-/* EXYNOS_DP_AUX_CH_CTL_1 */
+/* ANALOGIX_DP_AUX_CH_CTL_1 */
 #define AUX_LENGTH(x)				(((x - 1) & 0xf) << 4)
 #define AUX_TX_COMM_MASK			(0xf << 0)
 #define AUX_TX_COMM_DP_TRANSACTION		(0x1 << 3)
@@ -340,20 +352,20 @@
 #define AUX_TX_COMM_WRITE			(0x0 << 0)
 #define AUX_TX_COMM_READ			(0x1 << 0)
 
-/* EXYNOS_DP_AUX_ADDR_7_0 */
+/* ANALOGIX_DP_AUX_ADDR_7_0 */
 #define AUX_ADDR_7_0(x)				(((x) >> 0) & 0xff)
 
-/* EXYNOS_DP_AUX_ADDR_15_8 */
+/* ANALOGIX_DP_AUX_ADDR_15_8 */
 #define AUX_ADDR_15_8(x)			(((x) >> 8) & 0xff)
 
-/* EXYNOS_DP_AUX_ADDR_19_16 */
+/* ANALOGIX_DP_AUX_ADDR_19_16 */
 #define AUX_ADDR_19_16(x)			(((x) >> 16) & 0x0f)
 
-/* EXYNOS_DP_AUX_CH_CTL_2 */
+/* ANALOGIX_DP_AUX_CH_CTL_2 */
 #define ADDR_ONLY				(0x1 << 1)
 #define AUX_EN					(0x1 << 0)
 
-/* EXYNOS_DP_SOC_GENERAL_CTL */
+/* ANALOGIX_DP_SOC_GENERAL_CTL */
 #define AUDIO_MODE_SPDIF_MODE			(0x1 << 8)
 #define AUDIO_MODE_MASTER_MODE			(0x0 << 8)
 #define MASTER_VIDEO_INTERLACE_EN		(0x1 << 4)
@@ -363,4 +375,4 @@
 #define VIDEO_MODE_SLAVE_MODE			(0x1 << 0)
 #define VIDEO_MODE_MASTER_MODE			(0x0 << 0)
 
-#endif /* _EXYNOS_DP_REG_H */
+#endif /* _ANALOGIX_DP_REG_H */
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index 9795b72..c9d9412 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -1413,11 +1413,6 @@
 	mutex_unlock(&hdmi->mutex);
 }
 
-static void dw_hdmi_bridge_nop(struct drm_bridge *bridge)
-{
-	/* do nothing */
-}
-
 static enum drm_connector_status
 dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
 {
@@ -1536,8 +1531,6 @@
 static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
 	.enable = dw_hdmi_bridge_enable,
 	.disable = dw_hdmi_bridge_disable,
-	.pre_enable = dw_hdmi_bridge_nop,
-	.post_disable = dw_hdmi_bridge_nop,
 	.mode_set = dw_hdmi_bridge_mode_set,
 };
 
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 7bc394e..dc83f69 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -163,10 +163,8 @@
 
 static int __init cirrus_init(void)
 {
-#ifdef CONFIG_VGA_CONSOLE
 	if (vgacon_text_force() && cirrus_modeset == -1)
 		return -EINVAL;
-#endif
 
 	if (cirrus_modeset == 0)
 		return -EINVAL;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index b774d63..2188d6b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -245,7 +245,7 @@
 {
 	int ret;
 
-	ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
+	ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
 	if (ret) {
 		if (ret != -ERESTARTSYS && ret != -EBUSY)
 			DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 0907715..32d32c5 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -61,7 +61,7 @@
 				      bpp, mode_cmd->pitches[0]))
 		return ERR_PTR(-EINVAL);
 
-	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+	obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
 	if (obj == NULL)
 		return ERR_PTR(-ENOENT);
 
@@ -295,7 +295,7 @@
 	struct drm_gem_object *obj;
 	struct cirrus_bo *bo;
 
-	obj = drm_gem_object_lookup(dev, file, handle);
+	obj = drm_gem_object_lookup(file, handle);
 	if (obj == NULL)
 		return -ENOENT;
 
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index dfffd52..6768b7b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -245,6 +245,8 @@
 	.verify_access = cirrus_bo_verify_access,
 	.io_mem_reserve = &cirrus_ttm_io_mem_reserve,
 	.io_mem_free = &cirrus_ttm_io_mem_free,
+	.lru_tail = &ttm_bo_default_lru_tail,
+	.swap_lru_tail = &ttm_bo_default_swap_lru_tail,
 };
 
 int cirrus_mm_init(struct cirrus_device *cirrus)
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index a10ea6a..605bd24 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -423,7 +423,7 @@
 }
 
 /**
- * drm_agp_clear - Clear AGP resource list
+ * drm_legacy_agp_clear - Clear AGP resource list
  * @dev: DRM device
  *
  * Iterate over all AGP resources and remove them. But keep the AGP head
@@ -434,7 +434,7 @@
  * resources from getting destroyed. Drivers are responsible of cleaning them up
  * during device shutdown.
  */
-void drm_agp_clear(struct drm_device *dev)
+void drm_legacy_agp_clear(struct drm_device *dev)
 {
 	struct drm_agp_mem *entry, *tempe;
 
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 8ee1db8..c204ef3 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -31,6 +31,8 @@
 #include <drm/drm_mode.h>
 #include <drm/drm_plane_helper.h>
 
+#include "drm_crtc_internal.h"
+
 /**
  * drm_atomic_state_default_release -
  * release memory initialized by drm_atomic_state_init
@@ -142,18 +144,11 @@
 		if (!connector)
 			continue;
 
-		/*
-		 * FIXME: Async commits can race with connector unplugging and
-		 * there's currently nothing that prevents cleanup up state for
-		 * deleted connectors. As long as the callback doesn't look at
-		 * the connector we'll be fine though, so make sure that's the
-		 * case by setting all connector pointers to NULL.
-		 */
-		state->connector_states[i]->connector = NULL;
-		connector->funcs->atomic_destroy_state(NULL,
+		connector->funcs->atomic_destroy_state(connector,
 						       state->connector_states[i]);
 		state->connectors[i] = NULL;
 		state->connector_states[i] = NULL;
+		drm_connector_unreference(connector);
 	}
 
 	for (i = 0; i < config->num_crtc; i++) {
@@ -261,6 +256,8 @@
 	int ret, index = drm_crtc_index(crtc);
 	struct drm_crtc_state *crtc_state;
 
+	WARN_ON(!state->acquire_ctx);
+
 	crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
 	if (crtc_state)
 		return crtc_state;
@@ -354,6 +351,8 @@
 	drm_property_unreference_blob(state->mode_blob);
 	state->mode_blob = NULL;
 
+	memset(&state->mode, 0, sizeof(state->mode));
+
 	if (blob) {
 		if (blob->length != sizeof(struct drm_mode_modeinfo) ||
 		    drm_mode_convert_umode(&state->mode,
@@ -366,7 +365,6 @@
 		DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
 				 state->mode.name, state);
 	} else {
-		memset(&state->mode, 0, sizeof(state->mode));
 		state->enable = false;
 		DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
 				 state);
@@ -620,6 +618,8 @@
 	int ret, index = drm_plane_index(plane);
 	struct drm_plane_state *plane_state;
 
+	WARN_ON(!state->acquire_ctx);
+
 	plane_state = drm_atomic_get_existing_plane_state(state, plane);
 	if (plane_state)
 		return plane_state;
@@ -888,6 +888,8 @@
 	struct drm_mode_config *config = &connector->dev->mode_config;
 	struct drm_connector_state *connector_state;
 
+	WARN_ON(!state->acquire_ctx);
+
 	ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
 	if (ret)
 		return ERR_PTR(ret);
@@ -924,6 +926,7 @@
 	if (!connector_state)
 		return ERR_PTR(-ENOMEM);
 
+	drm_connector_reference(connector);
 	state->connector_states[index] = connector_state;
 	state->connectors[index] = connector;
 	connector_state->state = state;
@@ -1158,12 +1161,18 @@
 {
 	struct drm_crtc_state *crtc_state;
 
-	if (conn_state->crtc && conn_state->crtc != crtc) {
+	if (conn_state->crtc == crtc)
+		return 0;
+
+	if (conn_state->crtc) {
 		crtc_state = drm_atomic_get_existing_crtc_state(conn_state->state,
 								conn_state->crtc);
 
 		crtc_state->connector_mask &=
 			~(1 << drm_connector_index(conn_state->connector));
+
+		drm_connector_unreference(conn_state->connector);
+		conn_state->crtc = NULL;
 	}
 
 	if (crtc) {
@@ -1173,16 +1182,16 @@
 
 		crtc_state->connector_mask |=
 			1 << drm_connector_index(conn_state->connector);
-	}
 
-	conn_state->crtc = crtc;
+		drm_connector_reference(conn_state->connector);
+		conn_state->crtc = crtc;
 
-	if (crtc)
 		DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
 				 conn_state, crtc->base.id, crtc->name);
-	else
+	} else {
 		DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
 				 conn_state);
+	}
 
 	return 0;
 }
@@ -1388,7 +1397,7 @@
 EXPORT_SYMBOL(drm_atomic_commit);
 
 /**
- * drm_atomic_async_commit - atomic&async configuration commit
+ * drm_atomic_nonblocking_commit - atomic&nonblocking configuration commit
  * @state: atomic configuration to check
  *
  * Note that this function can return -EDEADLK if the driver needed to acquire
@@ -1403,7 +1412,7 @@
  * Returns:
  * 0 on success, negative error code on failure.
  */
-int drm_atomic_async_commit(struct drm_atomic_state *state)
+int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
 {
 	struct drm_mode_config *config = &state->dev->mode_config;
 	int ret;
@@ -1412,11 +1421,11 @@
 	if (ret)
 		return ret;
 
-	DRM_DEBUG_ATOMIC("commiting %p asynchronously\n", state);
+	DRM_DEBUG_ATOMIC("commiting %p nonblocking\n", state);
 
 	return config->funcs->atomic_commit(state->dev, state, true);
 }
-EXPORT_SYMBOL(drm_atomic_async_commit);
+EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
 
 /*
  * The big monstor ioctl
@@ -1614,12 +1623,19 @@
 		}
 
 		obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
-		if (!obj || !obj->properties) {
+		if (!obj) {
+			ret = -ENOENT;
+			goto out;
+		}
+
+		if (!obj->properties) {
+			drm_mode_object_unreference(obj);
 			ret = -ENOENT;
 			goto out;
 		}
 
 		if (get_user(count_props, count_props_ptr + copied_objs)) {
+			drm_mode_object_unreference(obj);
 			ret = -EFAULT;
 			goto out;
 		}
@@ -1632,12 +1648,14 @@
 			struct drm_property *prop;
 
 			if (get_user(prop_id, props_ptr + copied_props)) {
+				drm_mode_object_unreference(obj);
 				ret = -EFAULT;
 				goto out;
 			}
 
 			prop = drm_property_find(dev, prop_id);
 			if (!prop) {
+				drm_mode_object_unreference(obj);
 				ret = -ENOENT;
 				goto out;
 			}
@@ -1645,13 +1663,16 @@
 			if (copy_from_user(&prop_value,
 					   prop_values_ptr + copied_props,
 					   sizeof(prop_value))) {
+				drm_mode_object_unreference(obj);
 				ret = -EFAULT;
 				goto out;
 			}
 
 			ret = atomic_set_prop(state, obj, prop, prop_value);
-			if (ret)
+			if (ret) {
+				drm_mode_object_unreference(obj);
 				goto out;
+			}
 
 			copied_props++;
 		}
@@ -1662,6 +1683,7 @@
 			plane_mask |= (1 << drm_plane_index(plane));
 			plane->old_fb = plane->fb;
 		}
+		drm_mode_object_unreference(obj);
 	}
 
 	if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -1685,7 +1707,7 @@
 		 */
 		ret = drm_atomic_check_only(state);
 	} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
-		ret = drm_atomic_async_commit(state);
+		ret = drm_atomic_nonblocking_commit(state);
 	} else {
 		ret = drm_atomic_commit(state);
 	}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4befe25..ddfa0d1 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -384,8 +384,6 @@
 		 */
 		encoder = conn_state->best_encoder;
 		funcs = encoder->helper_private;
-		if (!funcs)
-			continue;
 
 		ret = drm_bridge_mode_fixup(encoder->bridge, &crtc_state->mode,
 				&crtc_state->adjusted_mode);
@@ -394,7 +392,7 @@
 			return -EINVAL;
 		}
 
-		if (funcs->atomic_check) {
+		if (funcs && funcs->atomic_check) {
 			ret = funcs->atomic_check(encoder, crtc_state,
 						  conn_state);
 			if (ret) {
@@ -402,7 +400,7 @@
 						 encoder->base.id, encoder->name);
 				return ret;
 			}
-		} else if (funcs->mode_fixup) {
+		} else if (funcs && funcs->mode_fixup) {
 			ret = funcs->mode_fixup(encoder, &crtc_state->mode,
 						&crtc_state->adjusted_mode);
 			if (!ret) {
@@ -707,12 +705,14 @@
 		drm_bridge_disable(encoder->bridge);
 
 		/* Right function depends upon target state. */
-		if (connector->state->crtc && funcs->prepare)
-			funcs->prepare(encoder);
-		else if (funcs->disable)
-			funcs->disable(encoder);
-		else
-			funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+		if (funcs) {
+			if (connector->state->crtc && funcs->prepare)
+				funcs->prepare(encoder);
+			else if (funcs->disable)
+				funcs->disable(encoder);
+			else if (funcs->dpms)
+				funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+		}
 
 		drm_bridge_post_disable(encoder->bridge);
 	}
@@ -873,7 +873,7 @@
 		 * Each encoder has at most one connector (since we always steal
 		 * it away), so we won't call mode_set hooks twice.
 		 */
-		if (funcs->mode_set)
+		if (funcs && funcs->mode_set)
 			funcs->mode_set(encoder, mode, adjusted_mode);
 
 		drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
@@ -974,17 +974,29 @@
 		 */
 		drm_bridge_pre_enable(encoder->bridge);
 
-		if (funcs->enable)
-			funcs->enable(encoder);
-		else
-			funcs->commit(encoder);
+		if (funcs) {
+			if (funcs->enable)
+				funcs->enable(encoder);
+			else if (funcs->commit)
+				funcs->commit(encoder);
+		}
 
 		drm_bridge_enable(encoder->bridge);
 	}
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
 
-static void wait_for_fences(struct drm_device *dev,
+/**
+ * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
+ * @dev: DRM device
+ * @state: atomic state object with old state structures
+ *
+ * For implicit sync, driver should fish the exclusive fence out from the
+ * incoming fb's and stash it in the drm_plane_state.  This is called after
+ * drm_atomic_helper_swap_state() so it uses the current plane state (and
+ * just uses the atomic state to find the changed planes)
+ */
+void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
 			    struct drm_atomic_state *state)
 {
 	struct drm_plane *plane;
@@ -1002,6 +1014,7 @@
 		plane->state->fence = NULL;
 	}
 }
+EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
 
 /**
  * drm_atomic_helper_framebuffer_changed - check if framebuffer has changed
@@ -1092,6 +1105,8 @@
 					drm_crtc_vblank_count(crtc),
 				msecs_to_jiffies(50));
 
+		WARN(!ret, "[CRTC:%d] vblank wait timed out\n", crtc->base.id);
+
 		drm_crtc_vblank_put(crtc);
 	}
 }
@@ -1101,13 +1116,13 @@
  * drm_atomic_helper_commit - commit validated state object
  * @dev: DRM device
  * @state: the driver state object
- * @async: asynchronous commit
+ * @nonblocking: whether nonblocking behavior is requested.
  *
  * This function commits a with drm_atomic_helper_check() pre-validated state
  * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement asynchronous commits.
+ * now this doesn't implement nonblocking commits.
  *
- * Note that right now this function does not support async commits, and hence
+ * Note that right now this function does not support nonblocking commits, hence
  * driver writers must implement their own version for now. Also note that the
  * default ordering of how the various stages are called is to match the legacy
  * modeset helper library closest. One peculiarity of that is that it doesn't
@@ -1128,11 +1143,11 @@
  */
 int drm_atomic_helper_commit(struct drm_device *dev,
 			     struct drm_atomic_state *state,
-			     bool async)
+			     bool nonblock)
 {
 	int ret;
 
-	if (async)
+	if (nonblock)
 		return -EBUSY;
 
 	ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -1163,7 +1178,7 @@
 	 * current layout.
 	 */
 
-	wait_for_fences(dev, state);
+	drm_atomic_helper_wait_for_fences(dev, state);
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
 
@@ -1182,20 +1197,20 @@
 EXPORT_SYMBOL(drm_atomic_helper_commit);
 
 /**
- * DOC: implementing async commit
+ * DOC: implementing nonblocking commit
  *
- * For now the atomic helpers don't support async commit directly. If there is
- * real need it could be added though, using the dma-buf fence infrastructure
- * for generic synchronization with outstanding rendering.
+ * For now the atomic helpers don't support nonblocking commit directly. If
+ * there is real need it could be added though, using the dma-buf fence
+ * infrastructure for generic synchronization with outstanding rendering.
  *
- * For now drivers have to implement async commit themselves, with the following
- * sequence being the recommended one:
+ * For now drivers have to implement nonblocking commit themselves, with the
+ * following sequence being the recommended one:
  *
  * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
  * which commit needs to call which can fail, so we want to run it first and
  * synchronously.
  *
- * 2. Synchronize with any outstanding asynchronous commit worker threads which
+ * 2. Synchronize with any outstanding nonblocking commit worker threads which
  * might be affected the new state update. This can be done by either cancelling
  * or flushing the work items, depending upon whether the driver can deal with
  * cancelled updates. Note that it is important to ensure that the framebuffer
@@ -1209,9 +1224,9 @@
  * 3. The software state is updated synchronously with
  * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
  * locks means concurrent callers never see inconsistent state. And doing this
- * while it's guaranteed that no relevant async worker runs means that async
- * workers do not need grab any locks. Actually they must not grab locks, for
- * otherwise the work flushing will deadlock.
+ * while it's guaranteed that no relevant nonblocking worker runs means that
+ * nonblocking workers do not need grab any locks. Actually they must not grab
+ * locks, for otherwise the work flushing will deadlock.
  *
  * 4. Schedule a work item to do all subsequent steps, using the split-out
  * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
@@ -2358,11 +2373,11 @@
 		goto fail;
 	}
 
-	ret = drm_atomic_async_commit(state);
+	ret = drm_atomic_nonblocking_commit(state);
 	if (ret != 0)
 		goto fail;
 
-	/* Driver takes ownership of state on successful async commit. */
+	/* Driver takes ownership of state on successful commit. */
 	return 0;
 fail:
 	if (ret == -EDEADLK)
@@ -2468,6 +2483,23 @@
 EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
 
 /**
+ * drm_atomic_helper_best_encoder - Helper for &drm_connector_helper_funcs
+ *                                  ->best_encoder callback
+ * @connector: Connector control structure
+ *
+ * This is a &drm_connector_helper_funcs ->best_encoder callback helper for
+ * connectors that support exactly 1 encoder, statically determined at driver
+ * init time.
+ */
+struct drm_encoder *
+drm_atomic_helper_best_encoder(struct drm_connector *connector)
+{
+	WARN_ON(connector->encoder_ids[1]);
+	return drm_encoder_find(connector->dev, connector->encoder_ids[0]);
+}
+EXPORT_SYMBOL(drm_atomic_helper_best_encoder);
+
+/**
  * DOC: atomic state reset and initialization
  *
  * Both the drm core and the atomic helpers assume that there is always the full
@@ -2497,12 +2529,9 @@
  */
 void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
 {
-	if (crtc->state) {
-		drm_property_unreference_blob(crtc->state->mode_blob);
-		drm_property_unreference_blob(crtc->state->degamma_lut);
-		drm_property_unreference_blob(crtc->state->ctm);
-		drm_property_unreference_blob(crtc->state->gamma_lut);
-	}
+	if (crtc->state)
+		__drm_atomic_helper_crtc_destroy_state(crtc->state);
+
 	kfree(crtc->state);
 	crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
 
@@ -2566,15 +2595,13 @@
 
 /**
  * __drm_atomic_helper_crtc_destroy_state - release CRTC state
- * @crtc: CRTC object
  * @state: CRTC state object to release
  *
  * Releases all resources stored in the CRTC state without actually freeing
  * the memory of the CRTC state. This is useful for drivers that subclass the
  * CRTC state.
  */
-void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
-					    struct drm_crtc_state *state)
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
 {
 	drm_property_unreference_blob(state->mode_blob);
 	drm_property_unreference_blob(state->degamma_lut);
@@ -2594,7 +2621,7 @@
 void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
 					  struct drm_crtc_state *state)
 {
-	__drm_atomic_helper_crtc_destroy_state(crtc, state);
+	__drm_atomic_helper_crtc_destroy_state(state);
 	kfree(state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
@@ -2608,8 +2635,8 @@
  */
 void drm_atomic_helper_plane_reset(struct drm_plane *plane)
 {
-	if (plane->state && plane->state->fb)
-		drm_framebuffer_unreference(plane->state->fb);
+	if (plane->state)
+		__drm_atomic_helper_plane_destroy_state(plane->state);
 
 	kfree(plane->state);
 	plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
@@ -2664,15 +2691,13 @@
 
 /**
  * __drm_atomic_helper_plane_destroy_state - release plane state
- * @plane: plane object
  * @state: plane state object to release
  *
  * Releases all resources stored in the plane state without actually freeing
  * the memory of the plane state. This is useful for drivers that subclass the
  * plane state.
  */
-void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
-					     struct drm_plane_state *state)
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
 {
 	if (state->fb)
 		drm_framebuffer_unreference(state->fb);
@@ -2690,7 +2715,7 @@
 void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
 					   struct drm_plane_state *state)
 {
-	__drm_atomic_helper_plane_destroy_state(plane, state);
+	__drm_atomic_helper_plane_destroy_state(state);
 	kfree(state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
@@ -2730,6 +2755,9 @@
 	struct drm_connector_state *conn_state =
 		kzalloc(sizeof(*conn_state), GFP_KERNEL);
 
+	if (connector->state)
+		__drm_atomic_helper_connector_destroy_state(connector->state);
+
 	kfree(connector->state);
 	__drm_atomic_helper_connector_reset(connector, conn_state);
 }
@@ -2748,6 +2776,8 @@
 					    struct drm_connector_state *state)
 {
 	memcpy(state, connector->state, sizeof(*state));
+	if (state->crtc)
+		drm_connector_reference(connector);
 }
 EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
 
@@ -2859,7 +2889,6 @@
 
 /**
  * __drm_atomic_helper_connector_destroy_state - release connector state
- * @connector: connector object
  * @state: connector state object to release
  *
  * Releases all resources stored in the connector state without actually
@@ -2867,14 +2896,15 @@
  * subclass the connector state.
  */
 void
-__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
-					    struct drm_connector_state *state)
+__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
 {
 	/*
 	 * This is currently a placeholder so that drivers that subclass the
 	 * state will automatically do the right thing if code is ever added
 	 * to this function.
 	 */
+	if (state->crtc)
+		drm_connector_unreference(state->connector);
 }
 EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
 
@@ -2889,7 +2919,7 @@
 void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
 					  struct drm_connector_state *state)
 {
-	__drm_atomic_helper_connector_destroy_state(connector, state);
+	__drm_atomic_helper_connector_destroy_state(state);
 	kfree(state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index f1a204d..9b34158c 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -396,6 +396,10 @@
 	if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
 		return -EPERM;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	err = drm_addmap_core(dev, map->offset, map->size, map->type,
 			      map->flags, &maplist);
 
@@ -416,6 +420,62 @@
 	return 0;
 }
 
+/*
+ * Get a mapping information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_map structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the mapping with the specified offset and copies its information
+ * into userspace
+ */
+int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv)
+{
+	struct drm_map *map = data;
+	struct drm_map_list *r_list = NULL;
+	struct list_head *list;
+	int idx;
+	int i;
+
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	idx = map->offset;
+	if (idx < 0)
+		return -EINVAL;
+
+	i = 0;
+	mutex_lock(&dev->struct_mutex);
+	list_for_each(list, &dev->maplist) {
+		if (i == idx) {
+			r_list = list_entry(list, struct drm_map_list, head);
+			break;
+		}
+		i++;
+	}
+	if (!r_list || !r_list->map) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	map->offset = r_list->map->offset;
+	map->size = r_list->map->size;
+	map->type = r_list->map->type;
+	map->flags = r_list->map->flags;
+	map->handle = (void *)(unsigned long) r_list->user_token;
+	map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
 /**
  * Remove a map private from list and deallocate resources if the mapping
  * isn't in use.
@@ -482,18 +542,35 @@
 }
 EXPORT_SYMBOL(drm_legacy_rmmap_locked);
 
-int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
+void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
 {
-	int ret;
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
 
 	mutex_lock(&dev->struct_mutex);
-	ret = drm_legacy_rmmap_locked(dev, map);
+	drm_legacy_rmmap_locked(dev, map);
 	mutex_unlock(&dev->struct_mutex);
-
-	return ret;
 }
 EXPORT_SYMBOL(drm_legacy_rmmap);
 
+void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
+{
+	struct drm_map_list *r_list, *list_temp;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
+		if (r_list->master == master) {
+			drm_legacy_rmmap_locked(dev, r_list->map);
+			r_list = NULL;
+		}
+	}
+	mutex_unlock(&dev->struct_mutex);
+}
+
 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
  * the last close of the device, and this is necessary for cleanup when things
  * exit uncleanly.  Therefore, having userland manually remove mappings seems
@@ -517,6 +594,10 @@
 	struct drm_map_list *r_list;
 	int ret;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	mutex_lock(&dev->struct_mutex);
 	list_for_each_entry(r_list, &dev->maplist, head) {
 		if (r_list->map &&
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index e08f962..0e3cc66 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -168,6 +168,7 @@
 	{ DRM_MODE_CONNECTOR_eDP, "eDP" },
 	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
 	{ DRM_MODE_CONNECTOR_DSI, "DSI" },
+	{ DRM_MODE_CONNECTOR_DPI, "DPI" },
 };
 
 static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
@@ -179,6 +180,7 @@
 	{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
 	{ DRM_MODE_ENCODER_DSI, "DSI" },
 	{ DRM_MODE_ENCODER_DPMST, "DP MST" },
+	{ DRM_MODE_ENCODER_DPI, "DPI" },
 };
 
 static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
@@ -275,7 +277,8 @@
 static int drm_mode_object_get_reg(struct drm_device *dev,
 				   struct drm_mode_object *obj,
 				   uint32_t obj_type,
-				   bool register_obj)
+				   bool register_obj,
+				   void (*obj_free_cb)(struct kref *kref))
 {
 	int ret;
 
@@ -288,6 +291,10 @@
 		 */
 		obj->id = ret;
 		obj->type = obj_type;
+		if (obj_free_cb) {
+			obj->free_cb = obj_free_cb;
+			kref_init(&obj->refcount);
+		}
 	}
 	mutex_unlock(&dev->mode_config.idr_mutex);
 
@@ -311,7 +318,7 @@
 int drm_mode_object_get(struct drm_device *dev,
 			struct drm_mode_object *obj, uint32_t obj_type)
 {
-	return drm_mode_object_get_reg(dev, obj, obj_type, true);
+	return drm_mode_object_get_reg(dev, obj, obj_type, true, NULL);
 }
 
 static void drm_mode_object_register(struct drm_device *dev,
@@ -323,19 +330,24 @@
 }
 
 /**
- * drm_mode_object_put - free a modeset identifer
+ * drm_mode_object_unregister - free a modeset identifer
  * @dev: DRM device
  * @object: object to free
  *
- * Free @id from @dev's unique identifier pool. Note that despite the _get
- * postfix modeset identifiers are _not_ reference counted. Hence don't use this
+ * Free @id from @dev's unique identifier pool.
+ * This function can be called multiple times, and guards against
+ * multiple removals.
+ * These modeset identifiers are _not_ reference counted. Hence don't use this
  * for reference counted modeset objects like framebuffers.
  */
-void drm_mode_object_put(struct drm_device *dev,
+void drm_mode_object_unregister(struct drm_device *dev,
 			 struct drm_mode_object *object)
 {
 	mutex_lock(&dev->mode_config.idr_mutex);
-	idr_remove(&dev->mode_config.crtc_idr, object->id);
+	if (object->id) {
+		idr_remove(&dev->mode_config.crtc_idr, object->id);
+		object->id = 0;
+	}
 	mutex_unlock(&dev->mode_config.idr_mutex);
 }
 
@@ -350,11 +362,11 @@
 		obj = NULL;
 	if (obj && obj->id != id)
 		obj = NULL;
-	/* don't leak out unref'd fb's */
-	if (obj &&
-	    (obj->type == DRM_MODE_OBJECT_FB ||
-	     obj->type == DRM_MODE_OBJECT_BLOB))
-		obj = NULL;
+
+	if (obj && obj->free_cb) {
+		if (!kref_get_unless_zero(&obj->refcount))
+			obj = NULL;
+	}
 	mutex_unlock(&dev->mode_config.idr_mutex);
 
 	return obj;
@@ -366,25 +378,70 @@
  * @id: id of the mode object
  * @type: type of the mode object
  *
- * Note that framebuffers cannot be looked up with this functions - since those
- * are reference counted, they need special treatment.  Even with
- * DRM_MODE_OBJECT_ANY (although that will simply return NULL
- * rather than WARN_ON()).
+ * This function is used to look up a modeset object. It will acquire a
+ * reference for reference counted objects. This reference must be dropped again
+ * by callind drm_mode_object_unreference().
  */
 struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
 		uint32_t id, uint32_t type)
 {
 	struct drm_mode_object *obj = NULL;
 
-	/* Framebuffers are reference counted and need their own lookup
-	 * function.*/
-	WARN_ON(type == DRM_MODE_OBJECT_FB || type == DRM_MODE_OBJECT_BLOB);
 	obj = _object_find(dev, id, type);
 	return obj;
 }
 EXPORT_SYMBOL(drm_mode_object_find);
 
 /**
+ * drm_mode_object_unreference - decr the object refcnt
+ * @obj: mode_object
+ *
+ * This functions decrements the object's refcount if it is a refcounted modeset
+ * object. It is a no-op on any other object. This is used to drop references
+ * acquired with drm_mode_object_reference().
+ */
+void drm_mode_object_unreference(struct drm_mode_object *obj)
+{
+	if (obj->free_cb) {
+		DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+		kref_put(&obj->refcount, obj->free_cb);
+	}
+}
+EXPORT_SYMBOL(drm_mode_object_unreference);
+
+/**
+ * drm_mode_object_reference - incr the object refcnt
+ * @obj: mode_object
+ *
+ * This functions increments the object's refcount if it is a refcounted modeset
+ * object. It is a no-op on any other object. References should be dropped again
+ * by calling drm_mode_object_unreference().
+ */
+void drm_mode_object_reference(struct drm_mode_object *obj)
+{
+	if (obj->free_cb) {
+		DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+		kref_get(&obj->refcount);
+	}
+}
+EXPORT_SYMBOL(drm_mode_object_reference);
+
+static void drm_framebuffer_free(struct kref *kref)
+{
+	struct drm_framebuffer *fb =
+			container_of(kref, struct drm_framebuffer, base.refcount);
+	struct drm_device *dev = fb->dev;
+
+	/*
+	 * The lookup idr holds a weak reference, which has not necessarily been
+	 * removed at this point. Check for that.
+	 */
+	drm_mode_object_unregister(dev, &fb->base);
+
+	fb->funcs->destroy(fb);
+}
+
+/**
  * drm_framebuffer_init - initialize a framebuffer
  * @dev: DRM device
  * @fb: framebuffer to be initialized
@@ -407,71 +464,26 @@
 {
 	int ret;
 
-	mutex_lock(&dev->mode_config.fb_lock);
-	kref_init(&fb->refcount);
 	INIT_LIST_HEAD(&fb->filp_head);
 	fb->dev = dev;
 	fb->funcs = funcs;
 
-	ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
+	ret = drm_mode_object_get_reg(dev, &fb->base, DRM_MODE_OBJECT_FB,
+				      false, drm_framebuffer_free);
 	if (ret)
 		goto out;
 
+	mutex_lock(&dev->mode_config.fb_lock);
 	dev->mode_config.num_fb++;
 	list_add(&fb->head, &dev->mode_config.fb_list);
-out:
 	mutex_unlock(&dev->mode_config.fb_lock);
 
+	drm_mode_object_register(dev, &fb->base);
+out:
 	return ret;
 }
 EXPORT_SYMBOL(drm_framebuffer_init);
 
-/* dev->mode_config.fb_lock must be held! */
-static void __drm_framebuffer_unregister(struct drm_device *dev,
-					 struct drm_framebuffer *fb)
-{
-	drm_mode_object_put(dev, &fb->base);
-
-	fb->base.id = 0;
-}
-
-static void drm_framebuffer_free(struct kref *kref)
-{
-	struct drm_framebuffer *fb =
-			container_of(kref, struct drm_framebuffer, refcount);
-	struct drm_device *dev = fb->dev;
-
-	/*
-	 * The lookup idr holds a weak reference, which has not necessarily been
-	 * removed at this point. Check for that.
-	 */
-	mutex_lock(&dev->mode_config.fb_lock);
-	if (fb->base.id) {
-		/* Mark fb as reaped and drop idr ref. */
-		__drm_framebuffer_unregister(dev, fb);
-	}
-	mutex_unlock(&dev->mode_config.fb_lock);
-
-	fb->funcs->destroy(fb);
-}
-
-static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev,
-							uint32_t id)
-{
-	struct drm_mode_object *obj = NULL;
-	struct drm_framebuffer *fb;
-
-	mutex_lock(&dev->mode_config.idr_mutex);
-	obj = idr_find(&dev->mode_config.crtc_idr, id);
-	if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id))
-		fb = NULL;
-	else
-		fb = obj_to_fb(obj);
-	mutex_unlock(&dev->mode_config.idr_mutex);
-
-	return fb;
-}
-
 /**
  * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
  * @dev: drm device
@@ -484,47 +496,17 @@
 struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
 					       uint32_t id)
 {
-	struct drm_framebuffer *fb;
+	struct drm_mode_object *obj;
+	struct drm_framebuffer *fb = NULL;
 
-	mutex_lock(&dev->mode_config.fb_lock);
-	fb = __drm_framebuffer_lookup(dev, id);
-	if (fb) {
-		if (!kref_get_unless_zero(&fb->refcount))
-			fb = NULL;
-	}
-	mutex_unlock(&dev->mode_config.fb_lock);
-
+	obj = _object_find(dev, id, DRM_MODE_OBJECT_FB);
+	if (obj)
+		fb = obj_to_fb(obj);
 	return fb;
 }
 EXPORT_SYMBOL(drm_framebuffer_lookup);
 
 /**
- * drm_framebuffer_unreference - unref a framebuffer
- * @fb: framebuffer to unref
- *
- * This functions decrements the fb's refcount and frees it if it drops to zero.
- */
-void drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
-	DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
-	kref_put(&fb->refcount, drm_framebuffer_free);
-}
-EXPORT_SYMBOL(drm_framebuffer_unreference);
-
-/**
- * drm_framebuffer_reference - incr the fb refcnt
- * @fb: framebuffer
- *
- * This functions increments the fb's refcount.
- */
-void drm_framebuffer_reference(struct drm_framebuffer *fb)
-{
-	DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
-	kref_get(&fb->refcount);
-}
-EXPORT_SYMBOL(drm_framebuffer_reference);
-
-/**
  * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
  * @fb: fb to unregister
  *
@@ -542,10 +524,8 @@
 
 	dev = fb->dev;
 
-	mutex_lock(&dev->mode_config.fb_lock);
 	/* Mark fb as reaped and drop idr ref. */
-	__drm_framebuffer_unregister(dev, fb);
-	mutex_unlock(&dev->mode_config.fb_lock);
+	drm_mode_object_unregister(dev, &fb->base);
 }
 EXPORT_SYMBOL(drm_framebuffer_unregister_private);
 
@@ -619,7 +599,7 @@
 	 * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
 	 * in this manner.
 	 */
-	if (atomic_read(&fb->refcount.refcount) > 1) {
+	if (drm_framebuffer_read_refcount(fb) > 1) {
 		drm_modeset_lock_all(dev);
 		/* remove from any CRTC */
 		drm_for_each_crtc(crtc, dev) {
@@ -705,7 +685,7 @@
 				       drm_num_crtcs(dev));
 	}
 	if (!crtc->name) {
-		drm_mode_object_put(dev, &crtc->base);
+		drm_mode_object_unregister(dev, &crtc->base);
 		return -ENOMEM;
 	}
 
@@ -747,7 +727,7 @@
 
 	drm_modeset_lock_fini(&crtc->mutex);
 
-	drm_mode_object_put(dev, &crtc->base);
+	drm_mode_object_unregister(dev, &crtc->base);
 	list_del(&crtc->head);
 	dev->mode_config.num_crtc--;
 
@@ -884,6 +864,16 @@
 		      mode->interlace ?  " interlaced" : "");
 }
 
+static void drm_connector_free(struct kref *kref)
+{
+	struct drm_connector *connector =
+		container_of(kref, struct drm_connector, base.refcount);
+	struct drm_device *dev = connector->dev;
+
+	drm_mode_object_unregister(dev, &connector->base);
+	connector->funcs->destroy(connector);
+}
+
 /**
  * drm_connector_init - Init a preallocated connector
  * @dev: DRM device
@@ -909,7 +899,9 @@
 
 	drm_modeset_lock_all(dev);
 
-	ret = drm_mode_object_get_reg(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR, false);
+	ret = drm_mode_object_get_reg(dev, &connector->base,
+				      DRM_MODE_OBJECT_CONNECTOR,
+				      false, drm_connector_free);
 	if (ret)
 		goto out_unlock;
 
@@ -972,7 +964,7 @@
 		ida_remove(&config->connector_ida, connector->connector_id);
 out_put:
 	if (ret)
-		drm_mode_object_put(dev, &connector->base);
+		drm_mode_object_unregister(dev, &connector->base);
 
 out_unlock:
 	drm_modeset_unlock_all(dev);
@@ -1010,7 +1002,7 @@
 		   connector->connector_id);
 
 	kfree(connector->display_info.bus_formats);
-	drm_mode_object_put(dev, &connector->base);
+	drm_mode_object_unregister(dev, &connector->base);
 	kfree(connector->name);
 	connector->name = NULL;
 	list_del(&connector->head);
@@ -1038,8 +1030,6 @@
 {
 	int ret;
 
-	drm_mode_object_register(connector->dev, &connector->base);
-
 	ret = drm_sysfs_connector_add(connector);
 	if (ret)
 		return ret;
@@ -1050,6 +1040,8 @@
 		return ret;
 	}
 
+	drm_mode_object_register(connector->dev, &connector->base);
+
 	return 0;
 }
 EXPORT_SYMBOL(drm_connector_register);
@@ -1067,25 +1059,65 @@
 }
 EXPORT_SYMBOL(drm_connector_unregister);
 
-
 /**
- * drm_connector_unplug_all - unregister connector userspace interfaces
+ * drm_connector_register_all - register all connectors
  * @dev: drm device
  *
- * This function unregisters all connector userspace interfaces in sysfs. Should
- * be call when the device is disconnected, e.g. from an usb driver's
- * ->disconnect callback.
+ * This function registers all connectors in sysfs and other places so that
+ * userspace can start to access them. Drivers can call it after calling
+ * drm_dev_register() to complete the device registration, if they don't call
+ * drm_connector_register() on each connector individually.
+ *
+ * When a device is unplugged and should be removed from userspace access,
+ * call drm_connector_unregister_all(), which is the inverse of this
+ * function.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
  */
-void drm_connector_unplug_all(struct drm_device *dev)
+int drm_connector_register_all(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	int ret;
+
+	mutex_lock(&dev->mode_config.mutex);
+
+	drm_for_each_connector(connector, dev) {
+		ret = drm_connector_register(connector);
+		if (ret)
+			goto err;
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return 0;
+
+err:
+	mutex_unlock(&dev->mode_config.mutex);
+	drm_connector_unregister_all(dev);
+	return ret;
+}
+EXPORT_SYMBOL(drm_connector_register_all);
+
+/**
+ * drm_connector_unregister_all - unregister connector userspace interfaces
+ * @dev: drm device
+ *
+ * This functions unregisters all connectors from sysfs and other places so
+ * that userspace can no longer access them. Drivers should call this as the
+ * first step tearing down the device instace, or when the underlying
+ * physical device disappeared (e.g. USB unplug), right before calling
+ * drm_dev_unregister().
+ */
+void drm_connector_unregister_all(struct drm_device *dev)
 {
 	struct drm_connector *connector;
 
 	/* FIXME: taking the mode config mutex ends up in a clash with sysfs */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
 		drm_connector_unregister(connector);
-
 }
-EXPORT_SYMBOL(drm_connector_unplug_all);
+EXPORT_SYMBOL(drm_connector_unregister_all);
 
 /**
  * drm_encoder_init - Init a preallocated encoder
@@ -1138,7 +1170,7 @@
 
 out_put:
 	if (ret)
-		drm_mode_object_put(dev, &encoder->base);
+		drm_mode_object_unregister(dev, &encoder->base);
 
 out_unlock:
 	drm_modeset_unlock_all(dev);
@@ -1181,7 +1213,7 @@
 	struct drm_device *dev = encoder->dev;
 
 	drm_modeset_lock_all(dev);
-	drm_mode_object_put(dev, &encoder->base);
+	drm_mode_object_unregister(dev, &encoder->base);
 	kfree(encoder->name);
 	list_del(&encoder->head);
 	dev->mode_config.num_encoder--;
@@ -1242,7 +1274,7 @@
 					    GFP_KERNEL);
 	if (!plane->format_types) {
 		DRM_DEBUG_KMS("out of memory when allocating plane\n");
-		drm_mode_object_put(dev, &plane->base);
+		drm_mode_object_unregister(dev, &plane->base);
 		return -ENOMEM;
 	}
 
@@ -1258,7 +1290,7 @@
 	}
 	if (!plane->name) {
 		kfree(plane->format_types);
-		drm_mode_object_put(dev, &plane->base);
+		drm_mode_object_unregister(dev, &plane->base);
 		return -ENOMEM;
 	}
 
@@ -1338,7 +1370,7 @@
 
 	drm_modeset_lock_all(dev);
 	kfree(plane->format_types);
-	drm_mode_object_put(dev, &plane->base);
+	drm_mode_object_unregister(dev, &plane->base);
 
 	BUG_ON(list_empty(&plane->head));
 
@@ -1918,8 +1950,6 @@
 		copied = 0;
 		crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
 		drm_for_each_crtc(crtc, dev) {
-			DRM_DEBUG_KMS("[CRTC:%d:%s]\n",
-				      crtc->base.id, crtc->name);
 			if (put_user(crtc->base.id, crtc_id + copied)) {
 				ret = -EFAULT;
 				goto out;
@@ -1934,8 +1964,6 @@
 		copied = 0;
 		encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
 		drm_for_each_encoder(encoder, dev) {
-			DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
-					encoder->name);
 			if (put_user(encoder->base.id, encoder_id +
 				     copied)) {
 				ret = -EFAULT;
@@ -1951,9 +1979,6 @@
 		copied = 0;
 		connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
 		drm_for_each_connector(connector, dev) {
-			DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-				connector->base.id,
-				connector->name);
 			if (put_user(connector->base.id,
 				     connector_id + copied)) {
 				ret = -EFAULT;
@@ -1964,9 +1989,6 @@
 	}
 	card_res->count_connectors = connector_count;
 
-	DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
-		  card_res->count_connectors, card_res->count_encoders);
-
 out:
 	mutex_unlock(&dev->mode_config.mutex);
 	return ret;
@@ -2125,11 +2147,9 @@
 
 	memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
 
-	DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
-
 	mutex_lock(&dev->mode_config.mutex);
 
-	connector = drm_connector_find(dev, out_resp->connector_id);
+	connector = drm_connector_lookup(dev, out_resp->connector_id);
 	if (!connector) {
 		ret = -ENOENT;
 		goto out_unlock;
@@ -2213,6 +2233,7 @@
 out:
 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
 
+	drm_connector_unreference(connector);
 out_unlock:
 	mutex_unlock(&dev->mode_config.mutex);
 
@@ -2800,8 +2821,6 @@
 			goto out;
 		}
 
-		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
-
 		/*
 		 * Check whether the primary plane supports the fb pixel format.
 		 * Drivers not implementing the universal planes API use a
@@ -2857,13 +2876,14 @@
 		}
 
 		for (i = 0; i < crtc_req->count_connectors; i++) {
+			connector_set[i] = NULL;
 			set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
 			if (get_user(out_id, &set_connectors_ptr[i])) {
 				ret = -EFAULT;
 				goto out;
 			}
 
-			connector = drm_connector_find(dev, out_id);
+			connector = drm_connector_lookup(dev, out_id);
 			if (!connector) {
 				DRM_DEBUG_KMS("Connector id %d unknown\n",
 						out_id);
@@ -2891,6 +2911,12 @@
 	if (fb)
 		drm_framebuffer_unreference(fb);
 
+	if (connector_set) {
+		for (i = 0; i < crtc_req->count_connectors; i++) {
+			if (connector_set[i])
+				drm_connector_unreference(connector_set[i]);
+		}
+	}
 	kfree(connector_set);
 	drm_mode_destroy(dev, mode);
 	drm_modeset_unlock_all(dev);
@@ -3423,17 +3449,35 @@
 	if (IS_ERR(fb))
 		return PTR_ERR(fb);
 
-	/* Transfer ownership to the filp for reaping on close */
-
 	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
-	mutex_lock(&file_priv->fbs_lock);
 	r->fb_id = fb->base.id;
+
+	/* Transfer ownership to the filp for reaping on close */
+	mutex_lock(&file_priv->fbs_lock);
 	list_add(&fb->filp_head, &file_priv->fbs);
 	mutex_unlock(&file_priv->fbs_lock);
 
 	return 0;
 }
 
+struct drm_mode_rmfb_work {
+	struct work_struct work;
+	struct list_head fbs;
+};
+
+static void drm_mode_rmfb_work_fn(struct work_struct *w)
+{
+	struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
+
+	while (!list_empty(&arg->fbs)) {
+		struct drm_framebuffer *fb =
+			list_first_entry(&arg->fbs, typeof(*fb), filp_head);
+
+		list_del_init(&fb->filp_head);
+		drm_framebuffer_remove(fb);
+	}
+}
+
 /**
  * drm_mode_rmfb - remove an FB from the configuration
  * @dev: drm device for the ioctl
@@ -3458,30 +3502,49 @@
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
 
-	mutex_lock(&file_priv->fbs_lock);
-	mutex_lock(&dev->mode_config.fb_lock);
-	fb = __drm_framebuffer_lookup(dev, *id);
+	fb = drm_framebuffer_lookup(dev, *id);
 	if (!fb)
-		goto fail_lookup;
+		return -ENOENT;
 
+	mutex_lock(&file_priv->fbs_lock);
 	list_for_each_entry(fbl, &file_priv->fbs, filp_head)
 		if (fb == fbl)
 			found = 1;
-	if (!found)
-		goto fail_lookup;
+	if (!found) {
+		mutex_unlock(&file_priv->fbs_lock);
+		goto fail_unref;
+	}
 
 	list_del_init(&fb->filp_head);
-	mutex_unlock(&dev->mode_config.fb_lock);
 	mutex_unlock(&file_priv->fbs_lock);
 
+	/* drop the reference we picked up in framebuffer lookup */
 	drm_framebuffer_unreference(fb);
 
+	/*
+	 * we now own the reference that was stored in the fbs list
+	 *
+	 * drm_framebuffer_remove may fail with -EINTR on pending signals,
+	 * so run this in a separate stack as there's no way to correctly
+	 * handle this after the fb is already removed from the lookup table.
+	 */
+	if (drm_framebuffer_read_refcount(fb) > 1) {
+		struct drm_mode_rmfb_work arg;
+
+		INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
+		INIT_LIST_HEAD(&arg.fbs);
+		list_add_tail(&fb->filp_head, &arg.fbs);
+
+		schedule_work(&arg.work);
+		flush_work(&arg.work);
+		destroy_work_on_stack(&arg.work);
+	} else
+		drm_framebuffer_unreference(fb);
+
 	return 0;
 
-fail_lookup:
-	mutex_unlock(&dev->mode_config.fb_lock);
-	mutex_unlock(&file_priv->fbs_lock);
-
+fail_unref:
+	drm_framebuffer_unreference(fb);
 	return -ENOENT;
 }
 
@@ -3627,7 +3690,6 @@
 	return ret;
 }
 
-
 /**
  * drm_fb_release - remove and free the FBs on this file
  * @priv: drm file for the ioctl
@@ -3642,6 +3704,9 @@
 void drm_fb_release(struct drm_file *priv)
 {
 	struct drm_framebuffer *fb, *tfb;
+	struct drm_mode_rmfb_work arg;
+
+	INIT_LIST_HEAD(&arg.fbs);
 
 	/*
 	 * When the file gets released that means no one else can access the fb
@@ -3654,10 +3719,22 @@
 	 * at it any more.
 	 */
 	list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
-		list_del_init(&fb->filp_head);
+		if (drm_framebuffer_read_refcount(fb) > 1) {
+			list_move_tail(&fb->filp_head, &arg.fbs);
+		} else {
+			list_del_init(&fb->filp_head);
 
-		/* This drops the fpriv->fbs reference. */
-		drm_framebuffer_unreference(fb);
+			/* This drops the fpriv->fbs reference. */
+			drm_framebuffer_unreference(fb);
+		}
+	}
+
+	if (!list_empty(&arg.fbs)) {
+		INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
+
+		schedule_work(&arg.work);
+		flush_work(&arg.work);
+		destroy_work_on_stack(&arg.work);
 	}
 }
 
@@ -4029,7 +4106,7 @@
 
 	if (property->num_values)
 		kfree(property->values);
-	drm_mode_object_put(dev, &property->base);
+	drm_mode_object_unregister(dev, &property->base);
 	list_del(&property->head);
 	kfree(property);
 }
@@ -4234,6 +4311,20 @@
 	return ret;
 }
 
+static void drm_property_free_blob(struct kref *kref)
+{
+	struct drm_property_blob *blob =
+		container_of(kref, struct drm_property_blob, base.refcount);
+
+	mutex_lock(&blob->dev->mode_config.blob_lock);
+	list_del(&blob->head_global);
+	mutex_unlock(&blob->dev->mode_config.blob_lock);
+
+	drm_mode_object_unregister(blob->dev, &blob->base);
+
+	kfree(blob);
+}
+
 /**
  * drm_property_create_blob - Create new blob property
  *
@@ -4271,20 +4362,16 @@
 	if (data)
 		memcpy(blob->data, data, length);
 
-	mutex_lock(&dev->mode_config.blob_lock);
-
-	ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
+	ret = drm_mode_object_get_reg(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
+				      true, drm_property_free_blob);
 	if (ret) {
 		kfree(blob);
-		mutex_unlock(&dev->mode_config.blob_lock);
 		return ERR_PTR(-EINVAL);
 	}
 
-	kref_init(&blob->refcount);
-
+	mutex_lock(&dev->mode_config.blob_lock);
 	list_add_tail(&blob->head_global,
 	              &dev->mode_config.property_blob_list);
-
 	mutex_unlock(&dev->mode_config.blob_lock);
 
 	return blob;
@@ -4292,27 +4379,6 @@
 EXPORT_SYMBOL(drm_property_create_blob);
 
 /**
- * drm_property_free_blob - Blob property destructor
- *
- * Internal free function for blob properties; must not be used directly.
- *
- * @kref: Reference
- */
-static void drm_property_free_blob(struct kref *kref)
-{
-	struct drm_property_blob *blob =
-		container_of(kref, struct drm_property_blob, refcount);
-
-	WARN_ON(!mutex_is_locked(&blob->dev->mode_config.blob_lock));
-
-	list_del(&blob->head_global);
-	list_del(&blob->head_file);
-	drm_mode_object_put(blob->dev, &blob->base);
-
-	kfree(blob);
-}
-
-/**
  * drm_property_unreference_blob - Unreference a blob property
  *
  * Drop a reference on a blob property. May free the object.
@@ -4321,42 +4387,14 @@
  */
 void drm_property_unreference_blob(struct drm_property_blob *blob)
 {
-	struct drm_device *dev;
-
 	if (!blob)
 		return;
 
-	dev = blob->dev;
-
-	DRM_DEBUG("%p: blob ID: %d (%d)\n", blob, blob->base.id, atomic_read(&blob->refcount.refcount));
-
-	if (kref_put_mutex(&blob->refcount, drm_property_free_blob,
-			   &dev->mode_config.blob_lock))
-		mutex_unlock(&dev->mode_config.blob_lock);
-	else
-		might_lock(&dev->mode_config.blob_lock);
+	drm_mode_object_unreference(&blob->base);
 }
 EXPORT_SYMBOL(drm_property_unreference_blob);
 
 /**
- * drm_property_unreference_blob_locked - Unreference a blob property with blob_lock held
- *
- * Drop a reference on a blob property. May free the object. This must be
- * called with blob_lock held.
- *
- * @blob: Pointer to blob property
- */
-static void drm_property_unreference_blob_locked(struct drm_property_blob *blob)
-{
-	if (!blob)
-		return;
-
-	DRM_DEBUG("%p: blob ID: %d (%d)\n", blob, blob->base.id, atomic_read(&blob->refcount.refcount));
-
-	kref_put(&blob->refcount, drm_property_free_blob);
-}
-
-/**
  * drm_property_destroy_user_blobs - destroy all blobs created by this client
  * @dev:       DRM device
  * @file_priv: destroy all blobs owned by this file handle
@@ -4366,14 +4404,14 @@
 {
 	struct drm_property_blob *blob, *bt;
 
-	mutex_lock(&dev->mode_config.blob_lock);
-
+	/*
+	 * When the file gets released that means no one else can access the
+	 * blob list any more, so no need to grab dev->blob_lock.
+	 */
 	list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) {
 		list_del_init(&blob->head_file);
-		drm_property_unreference_blob_locked(blob);
+		drm_property_unreference_blob(blob);
 	}
-
-	mutex_unlock(&dev->mode_config.blob_lock);
 }
 
 /**
@@ -4385,35 +4423,11 @@
  */
 struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob)
 {
-	DRM_DEBUG("%p: blob ID: %d (%d)\n", blob, blob->base.id, atomic_read(&blob->refcount.refcount));
-	kref_get(&blob->refcount);
+	drm_mode_object_reference(&blob->base);
 	return blob;
 }
 EXPORT_SYMBOL(drm_property_reference_blob);
 
-/*
- * Like drm_property_lookup_blob, but does not return an additional reference.
- * Must be called with blob_lock held.
- */
-static struct drm_property_blob *__drm_property_lookup_blob(struct drm_device *dev,
-							    uint32_t id)
-{
-	struct drm_mode_object *obj = NULL;
-	struct drm_property_blob *blob;
-
-	WARN_ON(!mutex_is_locked(&dev->mode_config.blob_lock));
-
-	mutex_lock(&dev->mode_config.idr_mutex);
-	obj = idr_find(&dev->mode_config.crtc_idr, id);
-	if (!obj || (obj->type != DRM_MODE_OBJECT_BLOB) || (obj->id != id))
-		blob = NULL;
-	else
-		blob = obj_to_blob(obj);
-	mutex_unlock(&dev->mode_config.idr_mutex);
-
-	return blob;
-}
-
 /**
  * drm_property_lookup_blob - look up a blob property and take a reference
  * @dev: drm device
@@ -4426,16 +4440,12 @@
 struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
 					           uint32_t id)
 {
-	struct drm_property_blob *blob;
+	struct drm_mode_object *obj;
+	struct drm_property_blob *blob = NULL;
 
-	mutex_lock(&dev->mode_config.blob_lock);
-	blob = __drm_property_lookup_blob(dev, id);
-	if (blob) {
-		if (!kref_get_unless_zero(&blob->refcount))
-			blob = NULL;
-	}
-	mutex_unlock(&dev->mode_config.blob_lock);
-
+	obj = _object_find(dev, id, DRM_MODE_OBJECT_BLOB);
+	if (obj)
+		blob = obj_to_blob(obj);
 	return blob;
 }
 EXPORT_SYMBOL(drm_property_lookup_blob);
@@ -4540,26 +4550,21 @@
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
 
-	drm_modeset_lock_all(dev);
-	mutex_lock(&dev->mode_config.blob_lock);
-	blob = __drm_property_lookup_blob(dev, out_resp->blob_id);
-	if (!blob) {
-		ret = -ENOENT;
-		goto done;
-	}
+	blob = drm_property_lookup_blob(dev, out_resp->blob_id);
+	if (!blob)
+		return -ENOENT;
 
 	if (out_resp->length == blob->length) {
 		blob_ptr = (void __user *)(unsigned long)out_resp->data;
 		if (copy_to_user(blob_ptr, blob->data, blob->length)) {
 			ret = -EFAULT;
-			goto done;
+			goto unref;
 		}
 	}
 	out_resp->length = blob->length;
+unref:
+	drm_property_unreference_blob(blob);
 
-done:
-	mutex_unlock(&dev->mode_config.blob_lock);
-	drm_modeset_unlock_all(dev);
 	return ret;
 }
 
@@ -4638,13 +4643,11 @@
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
 
-	mutex_lock(&dev->mode_config.blob_lock);
-	blob = __drm_property_lookup_blob(dev, out_resp->blob_id);
-	if (!blob) {
-		ret = -ENOENT;
-		goto err;
-	}
+	blob = drm_property_lookup_blob(dev, out_resp->blob_id);
+	if (!blob)
+		return -ENOENT;
 
+	mutex_lock(&dev->mode_config.blob_lock);
 	/* Ensure the property was actually created by this user. */
 	list_for_each_entry(bt, &file_priv->blobs, head_file) {
 		if (bt == blob) {
@@ -4661,13 +4664,18 @@
 	/* We must drop head_file here, because we may not be the last
 	 * reference on the blob. */
 	list_del_init(&blob->head_file);
-	drm_property_unreference_blob_locked(blob);
 	mutex_unlock(&dev->mode_config.blob_lock);
 
+	/* One reference from lookup, and one from the filp. */
+	drm_property_unreference_blob(blob);
+	drm_property_unreference_blob(blob);
+
 	return 0;
 
 err:
 	mutex_unlock(&dev->mode_config.blob_lock);
+	drm_property_unreference_blob(blob);
+
 	return ret;
 }
 
@@ -4831,19 +4839,8 @@
 		if (value == 0)
 			return true;
 
-		/* handle refcnt'd objects specially: */
-		if (property->values[0] == DRM_MODE_OBJECT_FB) {
-			struct drm_framebuffer *fb;
-			fb = drm_framebuffer_lookup(property->dev, value);
-			if (fb) {
-				*ref = &fb->base;
-				return true;
-			} else {
-				return false;
-			}
-		} else {
-			return _object_find(property->dev, value, property->values[0]) != NULL;
-		}
+		*ref = _object_find(property->dev, value, property->values[0]);
+		return *ref != NULL;
 	}
 
 	for (i = 0; i < property->num_values; i++)
@@ -4859,8 +4856,7 @@
 		return;
 
 	if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
-		if (property->values[0] == DRM_MODE_OBJECT_FB)
-			drm_framebuffer_unreference(obj_to_fb(ref));
+		drm_mode_object_unreference(ref);
 	} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
 		drm_property_unreference_blob(obj_to_blob(ref));
 }
@@ -4991,7 +4987,7 @@
 	}
 	if (!obj->properties) {
 		ret = -EINVAL;
-		goto out;
+		goto out_unref;
 	}
 
 	ret = get_properties(obj, file_priv->atomic,
@@ -4999,6 +4995,8 @@
 			(uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
 			&arg->count_props);
 
+out_unref:
+	drm_mode_object_unreference(obj);
 out:
 	drm_modeset_unlock_all(dev);
 	return ret;
@@ -5041,25 +5039,25 @@
 		goto out;
 	}
 	if (!arg_obj->properties)
-		goto out;
+		goto out_unref;
 
 	for (i = 0; i < arg_obj->properties->count; i++)
 		if (arg_obj->properties->properties[i]->base.id == arg->prop_id)
 			break;
 
 	if (i == arg_obj->properties->count)
-		goto out;
+		goto out_unref;
 
 	prop_obj = drm_mode_object_find(dev, arg->prop_id,
 					DRM_MODE_OBJECT_PROPERTY);
 	if (!prop_obj) {
 		ret = -ENOENT;
-		goto out;
+		goto out_unref;
 	}
 	property = obj_to_property(prop_obj);
 
 	if (!drm_property_change_valid_get(property, arg->value, &ref))
-		goto out;
+		goto out_unref;
 
 	switch (arg_obj->type) {
 	case DRM_MODE_OBJECT_CONNECTOR:
@@ -5077,6 +5075,8 @@
 
 	drm_property_change_valid_put(property, ref);
 
+out_unref:
+	drm_mode_object_unreference(arg_obj);
 out:
 	drm_modeset_unlock_all(dev);
 	return ret;
@@ -5914,6 +5914,15 @@
 		drm_property_destroy(dev, property);
 	}
 
+	list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+				 head) {
+		plane->funcs->destroy(plane);
+	}
+
+	list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+		crtc->funcs->destroy(crtc);
+	}
+
 	list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
 				 head_global) {
 		drm_property_unreference_blob(blob);
@@ -5929,16 +5938,7 @@
 	 */
 	WARN_ON(!list_empty(&dev->mode_config.fb_list));
 	list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
-		drm_framebuffer_free(&fb->refcount);
-	}
-
-	list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
-				 head) {
-		plane->funcs->destroy(plane);
-	}
-
-	list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
-		crtc->funcs->destroy(crtc);
+		drm_framebuffer_free(&fb->base.refcount);
 	}
 
 	ida_destroy(&dev->mode_config.connector_ida);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 79555d2..a6e4243 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -170,11 +170,14 @@
 {
 	const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
 
+	if (!encoder_funcs)
+		return;
+
 	drm_bridge_disable(encoder->bridge);
 
 	if (encoder_funcs->disable)
 		(*encoder_funcs->disable)(encoder);
-	else
+	else if (encoder_funcs->dpms)
 		(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
 
 	drm_bridge_post_disable(encoder->bridge);
@@ -248,6 +251,9 @@
 
 	drm_for_each_encoder(encoder, dev) {
 		encoder_funcs = encoder->helper_private;
+		if (!encoder_funcs)
+			continue;
+
 		/* Disable unused encoders */
 		if (encoder->crtc == NULL)
 			drm_encoder_disable(encoder);
@@ -326,6 +332,10 @@
 		if (encoder->crtc != crtc)
 			continue;
 
+		encoder_funcs = encoder->helper_private;
+		if (!encoder_funcs)
+			continue;
+
 		ret = drm_bridge_mode_fixup(encoder->bridge,
 			mode, adjusted_mode);
 		if (!ret) {
@@ -360,11 +370,15 @@
 		if (encoder->crtc != crtc)
 			continue;
 
+		encoder_funcs = encoder->helper_private;
+		if (!encoder_funcs)
+			continue;
+
 		drm_bridge_disable(encoder->bridge);
 
-		encoder_funcs = encoder->helper_private;
 		/* Disable the encoders as the first thing we do. */
-		encoder_funcs->prepare(encoder);
+		if (encoder_funcs->prepare)
+			encoder_funcs->prepare(encoder);
 
 		drm_bridge_post_disable(encoder->bridge);
 	}
@@ -385,11 +399,15 @@
 		if (encoder->crtc != crtc)
 			continue;
 
+		encoder_funcs = encoder->helper_private;
+		if (!encoder_funcs)
+			continue;
+
 		DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
 			encoder->base.id, encoder->name,
 			mode->base.id, mode->name);
-		encoder_funcs = encoder->helper_private;
-		encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+		if (encoder_funcs->mode_set)
+			encoder_funcs->mode_set(encoder, mode, adjusted_mode);
 
 		drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
 	}
@@ -402,10 +420,14 @@
 		if (encoder->crtc != crtc)
 			continue;
 
+		encoder_funcs = encoder->helper_private;
+		if (!encoder_funcs)
+			continue;
+
 		drm_bridge_pre_enable(encoder->bridge);
 
-		encoder_funcs = encoder->helper_private;
-		encoder_funcs->commit(encoder);
+		if (encoder_funcs->commit)
+			encoder_funcs->commit(encoder);
 
 		drm_bridge_enable(encoder->bridge);
 	}
@@ -456,6 +478,9 @@
 			 * between them is henceforth no longer available.
 			 */
 			connector->dpms = DRM_MODE_DPMS_OFF;
+
+			/* we keep a reference while the encoder is bound */
+			drm_connector_unreference(connector);
 		}
 	}
 
@@ -606,6 +631,11 @@
 		mode_changed = true;
 	}
 
+	/* take a reference on all connectors in set */
+	for (ro = 0; ro < set->num_connectors; ro++) {
+		drm_connector_reference(set->connectors[ro]);
+	}
+
 	/* a) traverse passed in connector list and get encoders for them */
 	count = 0;
 	drm_for_each_connector(connector, dev) {
@@ -724,6 +754,12 @@
 		}
 	}
 
+	/* after fail drop reference on all connectors in save set */
+	count = 0;
+	drm_for_each_connector(connector, dev) {
+		drm_connector_unreference(&save_connectors[count++]);
+	}
+
 	kfree(save_connectors);
 	kfree(save_encoders);
 	return 0;
@@ -740,6 +776,11 @@
 		*connector = save_connectors[count++];
 	}
 
+	/* after fail drop reference on all connectors in set */
+	for (ro = 0; ro < set->num_connectors; ro++) {
+		drm_connector_unreference(set->connectors[ro]);
+	}
+
 	/* Try to restore the config */
 	if (mode_changed &&
 	    !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
@@ -771,12 +812,15 @@
 	struct drm_bridge *bridge = encoder->bridge;
 	const struct drm_encoder_helper_funcs *encoder_funcs;
 
+	encoder_funcs = encoder->helper_private;
+	if (!encoder_funcs)
+		return;
+
 	if (mode == DRM_MODE_DPMS_ON)
 		drm_bridge_pre_enable(bridge);
 	else
 		drm_bridge_disable(bridge);
 
-	encoder_funcs = encoder->helper_private;
 	if (encoder_funcs->dpms)
 		encoder_funcs->dpms(encoder, mode);
 
@@ -1053,10 +1097,12 @@
 
 	if (plane->funcs->atomic_duplicate_state)
 		plane_state = plane->funcs->atomic_duplicate_state(plane);
-	else if (plane->state)
+	else {
+		if (!plane->state)
+			drm_atomic_helper_plane_reset(plane);
+
 		plane_state = drm_atomic_helper_plane_duplicate_state(plane);
-	else
-		plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+	}
 	if (!plane_state)
 		return -ENOMEM;
 	plane_state->plane = plane;
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 247dc8b..a78c138 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -33,8 +33,8 @@
 
 int drm_mode_object_get(struct drm_device *dev,
 			struct drm_mode_object *obj, uint32_t obj_type);
-void drm_mode_object_put(struct drm_device *dev,
-			 struct drm_mode_object *object);
+void drm_mode_object_unregister(struct drm_device *dev,
+				struct drm_mode_object *object);
 
 /* drm_atomic.c */
 int drm_atomic_get_property(struct drm_mode_object *obj,
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index f73b38b..3334baa 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -159,6 +159,12 @@
 		uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
 		ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
 
+		if (signal_pending(current)) {
+			res = num_bytes_processed ?
+				num_bytes_processed : -ERESTARTSYS;
+			goto out;
+		}
+
 		res = drm_dp_dpcd_read(aux_dev->aux, *offset, localbuf, todo);
 		if (res <= 0) {
 			res = num_bytes_processed ? num_bytes_processed : res;
@@ -202,6 +208,12 @@
 		uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
 		ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
 
+		if (signal_pending(current)) {
+			res = num_bytes_processed ?
+				num_bytes_processed : -ERESTARTSYS;
+			goto out;
+		}
+
 		if (__copy_from_user(localbuf,
 				     buf + num_bytes_processed, todo)) {
 			res = num_bytes_processed ?
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
new file mode 100644
index 0000000..a7b2a75
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <drm/drm_dp_dual_mode_helper.h>
+#include <drm/drmP.h>
+
+/**
+ * DOC: dp dual mode helpers
+ *
+ * Helper functions to deal with DP dual mode (aka. DP++) adaptors.
+ *
+ * Type 1:
+ * Adaptor registers (if any) and the sink DDC bus may be accessed via I2C.
+ *
+ * Type 2:
+ * Adaptor registers and sink DDC bus can be accessed either via I2C or
+ * I2C-over-AUX. Source devices may choose to implement either of these
+ * access methods.
+ */
+
+#define DP_DUAL_MODE_SLAVE_ADDRESS 0x40
+
+/**
+ * drm_dp_dual_mode_read - Read from the DP dual mode adaptor register(s)
+ * @adapter: I2C adapter for the DDC bus
+ * @offset: register offset
+ * @buffer: buffer for return data
+ * @size: sizo of the buffer
+ *
+ * Reads @size bytes from the DP dual mode adaptor registers
+ * starting at @offset.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure
+ */
+ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
+			      u8 offset, void *buffer, size_t size)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
+			.flags = 0,
+			.len = 1,
+			.buf = &offset,
+		},
+		{
+			.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
+			.flags = I2C_M_RD,
+			.len = size,
+			.buf = buffer,
+		},
+	};
+	int ret;
+
+	ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
+	if (ret < 0)
+		return ret;
+	if (ret != ARRAY_SIZE(msgs))
+		return -EPROTO;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_read);
+
+/**
+ * drm_dp_dual_mode_write - Write to the DP dual mode adaptor register(s)
+ * @adapter: I2C adapter for the DDC bus
+ * @offset: register offset
+ * @buffer: buffer for write data
+ * @size: sizo of the buffer
+ *
+ * Writes @size bytes to the DP dual mode adaptor registers
+ * starting at @offset.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure
+ */
+ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
+			       u8 offset, const void *buffer, size_t size)
+{
+	struct i2c_msg msg = {
+		.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
+		.flags = 0,
+		.len = 1 + size,
+		.buf = NULL,
+	};
+	void *data;
+	int ret;
+
+	data = kmalloc(msg.len, GFP_TEMPORARY);
+	if (!data)
+		return -ENOMEM;
+
+	msg.buf = data;
+
+	memcpy(data, &offset, 1);
+	memcpy(data + 1, buffer, size);
+
+	ret = i2c_transfer(adapter, &msg, 1);
+
+	kfree(data);
+
+	if (ret < 0)
+		return ret;
+	if (ret != 1)
+		return -EPROTO;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_write);
+
+static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
+{
+	static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] =
+		"DP-HDMI ADAPTOR\x04";
+
+	return memcmp(hdmi_id, dp_dual_mode_hdmi_id,
+		      sizeof(dp_dual_mode_hdmi_id)) == 0;
+}
+
+static bool is_type2_adaptor(uint8_t adaptor_id)
+{
+	return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
+			      DP_DUAL_MODE_REV_TYPE2);
+}
+
+/**
+ * drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
+ * @adapter: I2C adapter for the DDC bus
+ *
+ * Attempt to identify the type of the DP dual mode adaptor used.
+ *
+ * Note that when the answer is @DRM_DP_DUAL_MODE_UNKNOWN it's not
+ * certain whether we're dealing with a native HDMI port or
+ * a type 1 DVI dual mode adaptor. The driver will have to use
+ * some other hardware/driver specific mechanism to make that
+ * distinction.
+ *
+ * Returns:
+ * The type of the DP dual mode adaptor used
+ */
+enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
+{
+	char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = {};
+	uint8_t adaptor_id = 0x00;
+	ssize_t ret;
+
+	/*
+	 * Let's see if the adaptor is there the by reading the
+	 * HDMI ID registers.
+	 *
+	 * Note that type 1 DVI adaptors are not required to implemnt
+	 * any registers, and that presents a problem for detection.
+	 * If the i2c transfer is nacked, we may or may not be dealing
+	 * with a type 1 DVI adaptor. Some other mechanism of detecting
+	 * the presence of the adaptor is required. One way would be
+	 * to check the state of the CONFIG1 pin, Another method would
+	 * simply require the driver to know whether the port is a DP++
+	 * port or a native HDMI port. Both of these methods are entirely
+	 * hardware/driver specific so we can't deal with them here.
+	 */
+	ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
+				    hdmi_id, sizeof(hdmi_id));
+	if (ret)
+		return DRM_DP_DUAL_MODE_UNKNOWN;
+
+	/*
+	 * Sigh. Some (maybe all?) type 1 adaptors are broken and ack
+	 * the offset but ignore it, and instead they just always return
+	 * data from the start of the HDMI ID buffer. So for a broken
+	 * type 1 HDMI adaptor a single byte read will always give us
+	 * 0x44, and for a type 1 DVI adaptor it should give 0x00
+	 * (assuming it implements any registers). Fortunately neither
+	 * of those values will match the type 2 signature of the
+	 * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with
+	 * the type 2 adaptor detection safely even in the presence
+	 * of broken type 1 adaptors.
+	 */
+	ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
+				    &adaptor_id, sizeof(adaptor_id));
+	if (ret == 0) {
+		if (is_type2_adaptor(adaptor_id)) {
+			if (is_hdmi_adaptor(hdmi_id))
+				return DRM_DP_DUAL_MODE_TYPE2_HDMI;
+			else
+				return DRM_DP_DUAL_MODE_TYPE2_DVI;
+		}
+	}
+
+	if (is_hdmi_adaptor(hdmi_id))
+		return DRM_DP_DUAL_MODE_TYPE1_HDMI;
+	else
+		return DRM_DP_DUAL_MODE_TYPE1_DVI;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_detect);
+
+/**
+ * drm_dp_dual_mode_max_tmds_clock - Max TMDS clock for DP dual mode adaptor
+ * @type: DP dual mode adaptor type
+ * @adapter: I2C adapter for the DDC bus
+ *
+ * Determine the max TMDS clock the adaptor supports based on the
+ * type of the dual mode adaptor and the DP_DUAL_MODE_MAX_TMDS_CLOCK
+ * register (on type2 adaptors). As some type 1 adaptors have
+ * problems with registers (see comments in drm_dp_dual_mode_detect())
+ * we don't read the register on those, instead we simply assume
+ * a 165 MHz limit based on the specification.
+ *
+ * Returns:
+ * Maximum supported TMDS clock rate for the DP dual mode adaptor in kHz.
+ */
+int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
+				    struct i2c_adapter *adapter)
+{
+	uint8_t max_tmds_clock;
+	ssize_t ret;
+
+	/* native HDMI so no limit */
+	if (type == DRM_DP_DUAL_MODE_NONE)
+		return 0;
+
+	/*
+	 * Type 1 adaptors are limited to 165MHz
+	 * Type 2 adaptors can tells us their limit
+	 */
+	if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
+		return 165000;
+
+	ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_MAX_TMDS_CLOCK,
+				    &max_tmds_clock, sizeof(max_tmds_clock));
+	if (ret || max_tmds_clock == 0x00 || max_tmds_clock == 0xff) {
+		DRM_DEBUG_KMS("Failed to query max TMDS clock\n");
+		return 165000;
+	}
+
+	return max_tmds_clock * 5000 / 2;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_max_tmds_clock);
+
+/**
+ * drm_dp_dual_mode_get_tmds_output - Get the state of the TMDS output buffers in the DP dual mode adaptor
+ * @type: DP dual mode adaptor type
+ * @adapter: I2C adapter for the DDC bus
+ * @enabled: current state of the TMDS output buffers
+ *
+ * Get the state of the TMDS output buffers in the adaptor. For
+ * type2 adaptors this is queried from the DP_DUAL_MODE_TMDS_OEN
+ * register. As some type 1 adaptors have problems with registers
+ * (see comments in drm_dp_dual_mode_detect()) we don't read the
+ * register on those, instead we simply assume that the buffers
+ * are always enabled.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure
+ */
+int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
+				     struct i2c_adapter *adapter,
+				     bool *enabled)
+{
+	uint8_t tmds_oen;
+	ssize_t ret;
+
+	if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) {
+		*enabled = true;
+		return 0;
+	}
+
+	ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
+				    &tmds_oen, sizeof(tmds_oen));
+	if (ret) {
+		DRM_DEBUG_KMS("Failed to query state of TMDS output buffers\n");
+		return ret;
+	}
+
+	*enabled = !(tmds_oen & DP_DUAL_MODE_TMDS_DISABLE);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output);
+
+/**
+ * drm_dp_dual_mode_set_tmds_output - Enable/disable TMDS output buffers in the DP dual mode adaptor
+ * @type: DP dual mode adaptor type
+ * @adapter: I2C adapter for the DDC bus
+ * @enable: enable (as opposed to disable) the TMDS output buffers
+ *
+ * Set the state of the TMDS output buffers in the adaptor. For
+ * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As
+ * some type 1 adaptors have problems with registers (see comments
+ * in drm_dp_dual_mode_detect()) we avoid touching the register,
+ * making this function a no-op on type 1 adaptors.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure
+ */
+int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
+				     struct i2c_adapter *adapter, bool enable)
+{
+	uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
+	ssize_t ret;
+
+	if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
+		return 0;
+
+	ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
+				     &tmds_oen, sizeof(tmds_oen));
+	if (ret) {
+		DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
+			      enable ? "enable" : "disable");
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
+
+/**
+ * drm_dp_get_dual_mode_type_name - Get the name of the DP dual mode adaptor type as a string
+ * @type: DP dual mode adaptor type
+ *
+ * Returns:
+ * String representation of the DP dual mode adaptor type
+ */
+const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type)
+{
+	switch (type) {
+	case DRM_DP_DUAL_MODE_NONE:
+		return "none";
+	case DRM_DP_DUAL_MODE_TYPE1_DVI:
+		return "type 1 DVI";
+	case DRM_DP_DUAL_MODE_TYPE1_HDMI:
+		return "type 1 HDMI";
+	case DRM_DP_DUAL_MODE_TYPE2_DVI:
+		return "type 2 DVI";
+	case DRM_DP_DUAL_MODE_TYPE2_HDMI:
+		return "type 2 HDMI";
+	default:
+		WARN_ON(type != DRM_DP_DUAL_MODE_UNKNOWN);
+		return "unknown";
+	}
+}
+EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index df64ed1..eeaf5a7 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -178,8 +178,8 @@
 			      unsigned int offset, void *buffer, size_t size)
 {
 	struct drm_dp_aux_msg msg;
-	unsigned int retry;
-	int err = 0;
+	unsigned int retry, native_reply;
+	int err = 0, ret = 0;
 
 	memset(&msg, 0, sizeof(msg));
 	msg.address = offset;
@@ -196,38 +196,39 @@
 	 * sufficient, bump to 32 which makes Dell 4k monitors happier.
 	 */
 	for (retry = 0; retry < 32; retry++) {
-
-		err = aux->transfer(aux, &msg);
-		if (err < 0) {
-			if (err == -EBUSY)
-				continue;
-
-			goto unlock;
+		if (ret != 0 && ret != -ETIMEDOUT) {
+			usleep_range(AUX_RETRY_INTERVAL,
+				     AUX_RETRY_INTERVAL + 100);
 		}
 
+		ret = aux->transfer(aux, &msg);
 
-		switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
-		case DP_AUX_NATIVE_REPLY_ACK:
-			if (err < size)
-				err = -EPROTO;
-			goto unlock;
+		if (ret > 0) {
+			native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
+			if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
+				if (ret == size)
+					goto unlock;
 
-		case DP_AUX_NATIVE_REPLY_NACK:
-			err = -EIO;
-			goto unlock;
-
-		case DP_AUX_NATIVE_REPLY_DEFER:
-			usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
-			break;
+				ret = -EPROTO;
+			} else
+				ret = -EIO;
 		}
+
+		/*
+		 * We want the error we return to be the error we received on
+		 * the first transaction, since we may get a different error the
+		 * next time we retry
+		 */
+		if (!err)
+			err = ret;
 	}
 
 	DRM_DEBUG_KMS("too many retries, giving up\n");
-	err = -EIO;
+	ret = err;
 
 unlock:
 	mutex_unlock(&aux->hw_mutex);
-	return err;
+	return ret;
 }
 
 /**
@@ -247,6 +248,25 @@
 ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
 			 void *buffer, size_t size)
 {
+	int ret;
+
+	/*
+	 * HP ZR24w corrupts the first DPCD access after entering power save
+	 * mode. Eg. on a read, the entire buffer will be filled with the same
+	 * byte. Do a throw away read to avoid corrupting anything we care
+	 * about. Afterwards things will work correctly until the monitor
+	 * gets woken up and subsequently re-enters power save mode.
+	 *
+	 * The user pressing any button on the monitor is enough to wake it
+	 * up, so there is no particularly good place to do the workaround.
+	 * We just have to do it before any DPCD access and hope that the
+	 * monitor doesn't power down exactly after the throw away read.
+	 */
+	ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer,
+				 1);
+	if (ret != 1)
+		return ret;
+
 	return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
 				  size);
 }
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 71ea052..a13edf5 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2756,7 +2756,7 @@
 
 	seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
 	list_for_each_entry(port, &mstb->ports, next) {
-		seq_printf(m, "%sport: %d: ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
+		seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
 		if (port->mstb)
 			drm_dp_mst_dump_mstb(m, port->mstb);
 	}
@@ -2777,6 +2777,16 @@
 	return false;
 }
 
+static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
+			       struct drm_dp_mst_port *port, char *name,
+			       int namelen)
+{
+	struct edid *mst_edid;
+
+	mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
+	drm_edid_get_monitor_name(mst_edid, name, namelen);
+}
+
 /**
  * drm_dp_mst_dump_topology(): dump topology to seq file.
  * @m: seq_file to dump output to
@@ -2789,6 +2799,7 @@
 {
 	int i;
 	struct drm_dp_mst_port *port;
+
 	mutex_lock(&mgr->lock);
 	if (mgr->mst_primary)
 		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
@@ -2797,14 +2808,21 @@
 	mutex_unlock(&mgr->lock);
 
 	mutex_lock(&mgr->payload_lock);
-	seq_printf(m, "vcpi: %lx %lx\n", mgr->payload_mask, mgr->vcpi_mask);
+	seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
+		mgr->max_payloads);
 
 	for (i = 0; i < mgr->max_payloads; i++) {
 		if (mgr->proposed_vcpis[i]) {
+			char name[14];
+
 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
-			seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
+			fetch_monitor_name(mgr, port, name, sizeof(name));
+			seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
+				   port->port_num, port->vcpi.vcpi,
+				   port->vcpi.num_slots,
+				   (*name != 0) ? name :  "Unknown");
 		} else
-			seq_printf(m, "vcpi %d:unsed\n", i);
+			seq_printf(m, "vcpi %d:unused\n", i);
 	}
 	for (i = 0; i < mgr->max_payloads; i++) {
 		seq_printf(m, "payload %d: %d, %d, %d\n",
@@ -2844,8 +2862,9 @@
 		for (i = 0; i < 0x3; i++)
 			seq_printf(m, "%02x", buf[i]);
 		seq_printf(m, " devid: ");
-		for (i = 0x3; i < 0x8; i++)
+		for (i = 0x3; i < 0x8 && buf[i]; i++)
 			seq_printf(m, "%c", buf[i]);
+
 		seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
 		seq_printf(m, "\n");
 		bret = dump_dp_payload_table(mgr, buf);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 167c8d3..bff8922 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -37,13 +37,23 @@
 #include "drm_legacy.h"
 #include "drm_internal.h"
 
-unsigned int drm_debug = 0;	/* bitmask of DRM_UT_x */
+/*
+ * drm_debug: Enable debug output.
+ * Bitmask of DRM_UT_x. See include/drm/drmP.h for details.
+ */
+unsigned int drm_debug = 0;
 EXPORT_SYMBOL(drm_debug);
 
 MODULE_AUTHOR(CORE_AUTHOR);
 MODULE_DESCRIPTION(CORE_DESC);
 MODULE_LICENSE("GPL and additional rights");
-MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
+"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
+"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
+"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
+"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
+"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
+"\t\tBit 5 (0x20) will enable VBL messages (vblank code)");
 module_param_named(debug, drm_debug, int, 0600);
 
 static DEFINE_SPINLOCK(drm_minor_lock);
@@ -111,19 +121,11 @@
 {
 	struct drm_master *master = container_of(kref, struct drm_master, refcount);
 	struct drm_device *dev = master->minor->dev;
-	struct drm_map_list *r_list, *list_temp;
 
-	mutex_lock(&dev->struct_mutex);
 	if (dev->driver->master_destroy)
 		dev->driver->master_destroy(dev, master);
 
-	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
-		if (r_list->master == master) {
-			drm_legacy_rmmap_locked(dev, r_list->map);
-			r_list = NULL;
-		}
-	}
-	mutex_unlock(&dev->struct_mutex);
+	drm_legacy_master_rmmaps(dev, master);
 
 	idr_destroy(&master->magic_map);
 	kfree(master->unique);
@@ -588,6 +590,7 @@
 	spin_lock_init(&dev->buf_lock);
 	spin_lock_init(&dev->event_lock);
 	mutex_init(&dev->struct_mutex);
+	mutex_init(&dev->filelist_mutex);
 	mutex_init(&dev->ctxlist_mutex);
 	mutex_init(&dev->master_mutex);
 
@@ -715,7 +718,11 @@
  *
  * Register the DRM device @dev with the system, advertise device to user-space
  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
- * previously.
+ * previously. Right after drm_dev_register() the driver should call
+ * drm_connector_register_all() to register all connectors in sysfs. This is
+ * a separate call for backward compatibility with drivers still using
+ * the deprecated ->load() callback, where connectors are registered from within
+ * the ->load() callback.
  *
  * Never call this twice on any device!
  *
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 558ef9f..7df26d4 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3293,6 +3293,46 @@
 		*(u8 **)data = t->data.other_data.data.str.str;
 }
 
+static int get_monitor_name(struct edid *edid, char name[13])
+{
+	char *edid_name = NULL;
+	int mnl;
+
+	if (!edid || !name)
+		return 0;
+
+	drm_for_each_detailed_block((u8 *)edid, monitor_name, &edid_name);
+	for (mnl = 0; edid_name && mnl < 13; mnl++) {
+		if (edid_name[mnl] == 0x0a)
+			break;
+
+		name[mnl] = edid_name[mnl];
+	}
+
+	return mnl;
+}
+
+/**
+ * drm_edid_get_monitor_name - fetch the monitor name from the edid
+ * @edid: monitor EDID information
+ * @name: pointer to a character array to hold the name of the monitor
+ * @bufsize: The size of the name buffer (should be at least 14 chars.)
+ *
+ */
+void drm_edid_get_monitor_name(struct edid *edid, char *name, int bufsize)
+{
+	int name_length;
+	char buf[13];
+	
+	if (bufsize <= 0)
+		return;
+
+	name_length = min(get_monitor_name(edid, buf), bufsize - 1);
+	memcpy(name, buf, name_length);
+	name[name_length] = '\0';
+}
+EXPORT_SYMBOL(drm_edid_get_monitor_name);
+
 /**
  * drm_edid_to_eld - build ELD from EDID
  * @connector: connector corresponding to the HDMI/DP sink
@@ -3306,7 +3346,6 @@
 {
 	uint8_t *eld = connector->eld;
 	u8 *cea;
-	u8 *name;
 	u8 *db;
 	int total_sad_count = 0;
 	int mnl;
@@ -3320,14 +3359,8 @@
 		return;
 	}
 
-	name = NULL;
-	drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
-	/* max: 13 bytes EDID, 16 bytes ELD */
-	for (mnl = 0; name && mnl < 13; mnl++) {
-		if (name[mnl] == 0x0a)
-			break;
-		eld[20 + mnl] = name[mnl];
-	}
+	mnl = get_monitor_name(edid, eld + 20);
+
 	eld[4] = (cea[1] << 5) | mnl;
 	DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20);
 
@@ -3868,6 +3901,133 @@
 		info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
 }
 
+static int validate_displayid(u8 *displayid, int length, int idx)
+{
+	int i;
+	u8 csum = 0;
+	struct displayid_hdr *base;
+
+	base = (struct displayid_hdr *)&displayid[idx];
+
+	DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
+		      base->rev, base->bytes, base->prod_id, base->ext_count);
+
+	if (base->bytes + 5 > length - idx)
+		return -EINVAL;
+	for (i = idx; i <= base->bytes + 5; i++) {
+		csum += displayid[i];
+	}
+	if (csum) {
+		DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *dev,
+							    struct displayid_detailed_timings_1 *timings)
+{
+	struct drm_display_mode *mode;
+	unsigned pixel_clock = (timings->pixel_clock[0] |
+				(timings->pixel_clock[1] << 8) |
+				(timings->pixel_clock[2] << 16));
+	unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
+	unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
+	unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
+	unsigned hsync_width = (timings->hsw[0] | timings->hsw[1] << 8) + 1;
+	unsigned vactive = (timings->vactive[0] | timings->vactive[1] << 8) + 1;
+	unsigned vblank = (timings->vblank[0] | timings->vblank[1] << 8) + 1;
+	unsigned vsync = (timings->vsync[0] | (timings->vsync[1] & 0x7f) << 8) + 1;
+	unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1;
+	bool hsync_positive = (timings->hsync[1] >> 7) & 0x1;
+	bool vsync_positive = (timings->vsync[1] >> 7) & 0x1;
+	mode = drm_mode_create(dev);
+	if (!mode)
+		return NULL;
+
+	mode->clock = pixel_clock * 10;
+	mode->hdisplay = hactive;
+	mode->hsync_start = mode->hdisplay + hsync;
+	mode->hsync_end = mode->hsync_start + hsync_width;
+	mode->htotal = mode->hdisplay + hblank;
+
+	mode->vdisplay = vactive;
+	mode->vsync_start = mode->vdisplay + vsync;
+	mode->vsync_end = mode->vsync_start + vsync_width;
+	mode->vtotal = mode->vdisplay + vblank;
+
+	mode->flags = 0;
+	mode->flags |= hsync_positive ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+	mode->flags |= vsync_positive ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+	mode->type = DRM_MODE_TYPE_DRIVER;
+
+	if (timings->flags & 0x80)
+		mode->type |= DRM_MODE_TYPE_PREFERRED;
+	mode->vrefresh = drm_mode_vrefresh(mode);
+	drm_mode_set_name(mode);
+
+	return mode;
+}
+
+static int add_displayid_detailed_1_modes(struct drm_connector *connector,
+					  struct displayid_block *block)
+{
+	struct displayid_detailed_timing_block *det = (struct displayid_detailed_timing_block *)block;
+	int i;
+	int num_timings;
+	struct drm_display_mode *newmode;
+	int num_modes = 0;
+	/* blocks must be multiple of 20 bytes length */
+	if (block->num_bytes % 20)
+		return 0;
+
+	num_timings = block->num_bytes / 20;
+	for (i = 0; i < num_timings; i++) {
+		struct displayid_detailed_timings_1 *timings = &det->timings[i];
+
+		newmode = drm_mode_displayid_detailed(connector->dev, timings);
+		if (!newmode)
+			continue;
+
+		drm_mode_probed_add(connector, newmode);
+		num_modes++;
+	}
+	return num_modes;
+}
+
+static int add_displayid_detailed_modes(struct drm_connector *connector,
+					struct edid *edid)
+{
+	u8 *displayid;
+	int ret;
+	int idx = 1;
+	int length = EDID_LENGTH;
+	struct displayid_block *block;
+	int num_modes = 0;
+
+	displayid = drm_find_displayid_extension(edid);
+	if (!displayid)
+		return 0;
+
+	ret = validate_displayid(displayid, length, idx);
+	if (ret)
+		return 0;
+
+	idx += sizeof(struct displayid_hdr);
+	while (block = (struct displayid_block *)&displayid[idx],
+	       idx + sizeof(struct displayid_block) <= length &&
+	       idx + sizeof(struct displayid_block) + block->num_bytes <= length &&
+	       block->num_bytes > 0) {
+		idx += block->num_bytes + sizeof(struct displayid_block);
+		switch (block->tag) {
+		case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
+			num_modes += add_displayid_detailed_1_modes(connector, block);
+			break;
+		}
+	}
+	return num_modes;
+}
+
 /**
  * drm_add_edid_modes - add modes from EDID data, if available
  * @connector: connector we're probing
@@ -3913,6 +4073,7 @@
 	num_modes += add_established_modes(connector, edid);
 	num_modes += add_cea_modes(connector, edid);
 	num_modes += add_alternate_cea_modes(connector, edid);
+	num_modes += add_displayid_detailed_modes(connector, edid);
 	if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
 		num_modes += add_inferred_modes(connector, edid);
 
@@ -4119,96 +4280,98 @@
 }
 EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
 
+static int drm_parse_tiled_block(struct drm_connector *connector,
+				 struct displayid_block *block)
+{
+	struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
+	u16 w, h;
+	u8 tile_v_loc, tile_h_loc;
+	u8 num_v_tile, num_h_tile;
+	struct drm_tile_group *tg;
+
+	w = tile->tile_size[0] | tile->tile_size[1] << 8;
+	h = tile->tile_size[2] | tile->tile_size[3] << 8;
+
+	num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
+	num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
+	tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
+	tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
+
+	connector->has_tile = true;
+	if (tile->tile_cap & 0x80)
+		connector->tile_is_single_monitor = true;
+
+	connector->num_h_tile = num_h_tile + 1;
+	connector->num_v_tile = num_v_tile + 1;
+	connector->tile_h_loc = tile_h_loc;
+	connector->tile_v_loc = tile_v_loc;
+	connector->tile_h_size = w + 1;
+	connector->tile_v_size = h + 1;
+
+	DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
+	DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
+	DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
+		      num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
+	DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
+
+	tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
+	if (!tg) {
+		tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
+	}
+	if (!tg)
+		return -ENOMEM;
+
+	if (connector->tile_group != tg) {
+		/* if we haven't got a pointer,
+		   take the reference, drop ref to old tile group */
+		if (connector->tile_group) {
+			drm_mode_put_tile_group(connector->dev, connector->tile_group);
+		}
+		connector->tile_group = tg;
+	} else
+		/* if same tile group, then release the ref we just took. */
+		drm_mode_put_tile_group(connector->dev, tg);
+	return 0;
+}
+
 static int drm_parse_display_id(struct drm_connector *connector,
 				u8 *displayid, int length,
 				bool is_edid_extension)
 {
 	/* if this is an EDID extension the first byte will be 0x70 */
 	int idx = 0;
-	struct displayid_hdr *base;
 	struct displayid_block *block;
-	u8 csum = 0;
-	int i;
+	int ret;
 
 	if (is_edid_extension)
 		idx = 1;
 
-	base = (struct displayid_hdr *)&displayid[idx];
+	ret = validate_displayid(displayid, length, idx);
+	if (ret)
+		return ret;
 
-	DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
-		      base->rev, base->bytes, base->prod_id, base->ext_count);
+	idx += sizeof(struct displayid_hdr);
+	while (block = (struct displayid_block *)&displayid[idx],
+	       idx + sizeof(struct displayid_block) <= length &&
+	       idx + sizeof(struct displayid_block) + block->num_bytes <= length &&
+	       block->num_bytes > 0) {
+		idx += block->num_bytes + sizeof(struct displayid_block);
+		DRM_DEBUG_KMS("block id 0x%x, rev %d, len %d\n",
+			      block->tag, block->rev, block->num_bytes);
 
-	if (base->bytes + 5 > length - idx)
-		return -EINVAL;
-
-	for (i = idx; i <= base->bytes + 5; i++) {
-		csum += displayid[i];
-	}
-	if (csum) {
-		DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum);
-		return -EINVAL;
-	}
-
-	block = (struct displayid_block *)&displayid[idx + 4];
-	DRM_DEBUG_KMS("block id %d, rev %d, len %d\n",
-		      block->tag, block->rev, block->num_bytes);
-
-	switch (block->tag) {
-	case DATA_BLOCK_TILED_DISPLAY: {
-		struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
-
-		u16 w, h;
-		u8 tile_v_loc, tile_h_loc;
-		u8 num_v_tile, num_h_tile;
-		struct drm_tile_group *tg;
-
-		w = tile->tile_size[0] | tile->tile_size[1] << 8;
-		h = tile->tile_size[2] | tile->tile_size[3] << 8;
-
-		num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
-		num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
-		tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
-		tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
-
-		connector->has_tile = true;
-		if (tile->tile_cap & 0x80)
-			connector->tile_is_single_monitor = true;
-
-		connector->num_h_tile = num_h_tile + 1;
-		connector->num_v_tile = num_v_tile + 1;
-		connector->tile_h_loc = tile_h_loc;
-		connector->tile_v_loc = tile_v_loc;
-		connector->tile_h_size = w + 1;
-		connector->tile_v_size = h + 1;
-
-		DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
-		DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
-		DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
-		       num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
-		DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
-
-		tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
-		if (!tg) {
-			tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
+		switch (block->tag) {
+		case DATA_BLOCK_TILED_DISPLAY:
+			ret = drm_parse_tiled_block(connector, block);
+			if (ret)
+				return ret;
+			break;
+		case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
+			/* handled in mode gathering code. */
+			break;
+		default:
+			DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag);
+			break;
 		}
-		if (!tg)
-			return -ENOMEM;
-
-		if (connector->tile_group != tg) {
-			/* if we haven't got a pointer,
-			   take the reference, drop ref to old tile group */
-			if (connector->tile_group) {
-				drm_mode_put_tile_group(connector->dev, connector->tile_group);
-			}
-			connector->tile_group = tg;
-		} else
-			/* if same tile group, then release the ref we just took. */
-			drm_mode_put_tile_group(connector->dev, tg);
-	}
-		break;
-	default:
-		printk("unknown displayid tag %d\n", block->tag);
-		break;
 	}
 	return 0;
 }
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index bb88e3d..5075fae 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -25,6 +25,8 @@
 #include <drm/drm_fb_cma_helper.h>
 #include <linux/module.h>
 
+#define DEFAULT_FBDEFIO_DELAY_MS 50
+
 struct drm_fb_cma {
 	struct drm_framebuffer		fb;
 	struct drm_gem_cma_object	*obj[4];
@@ -35,6 +37,59 @@
 	struct drm_fb_cma	*fb;
 };
 
+/**
+ * DOC: framebuffer cma helper functions
+ *
+ * Provides helper functions for creating a cma (contiguous memory allocator)
+ * backed framebuffer.
+ *
+ * drm_fb_cma_create() is used in the &drm_mode_config_funcs ->fb_create
+ * callback function to create a cma backed framebuffer.
+ *
+ * An fbdev framebuffer backed by cma is also available by calling
+ * drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down.
+ * If the &drm_framebuffer_funcs ->dirty callback is set, fb_deferred_io
+ * will be set up automatically. dirty() is called by
+ * drm_fb_helper_deferred_io() in process context (struct delayed_work).
+ *
+ * Example fbdev deferred io code:
+ *
+ *     static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
+ *                                      struct drm_file *file_priv,
+ *                                      unsigned flags, unsigned color,
+ *                                      struct drm_clip_rect *clips,
+ *                                      unsigned num_clips)
+ *     {
+ *         struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
+ *         ... push changes ...
+ *         return 0;
+ *     }
+ *
+ *     static struct drm_framebuffer_funcs driver_fbdev_fb_funcs = {
+ *         .destroy       = drm_fb_cma_destroy,
+ *         .create_handle = drm_fb_cma_create_handle,
+ *         .dirty         = driver_fbdev_fb_dirty,
+ *     };
+ *
+ *     static int driver_fbdev_create(struct drm_fb_helper *helper,
+ *             struct drm_fb_helper_surface_size *sizes)
+ *     {
+ *         return drm_fbdev_cma_create_with_funcs(helper, sizes,
+ *                                                &driver_fbdev_fb_funcs);
+ *     }
+ *
+ *     static const struct drm_fb_helper_funcs driver_fb_helper_funcs = {
+ *         .fb_probe = driver_fbdev_create,
+ *     };
+ *
+ *     Initialize:
+ *     fbdev = drm_fbdev_cma_init_with_funcs(dev, 16,
+ *                                           dev->mode_config.num_crtc,
+ *                                           dev->mode_config.num_connector,
+ *                                           &driver_fb_helper_funcs);
+ *
+ */
+
 static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
 {
 	return container_of(helper, struct drm_fbdev_cma, fb_helper);
@@ -45,7 +100,7 @@
 	return container_of(fb, struct drm_fb_cma, fb);
 }
 
-static void drm_fb_cma_destroy(struct drm_framebuffer *fb)
+void drm_fb_cma_destroy(struct drm_framebuffer *fb)
 {
 	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
 	int i;
@@ -58,8 +113,9 @@
 	drm_framebuffer_cleanup(fb);
 	kfree(fb_cma);
 }
+EXPORT_SYMBOL(drm_fb_cma_destroy);
 
-static int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
+int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
 	struct drm_file *file_priv, unsigned int *handle)
 {
 	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
@@ -67,6 +123,7 @@
 	return drm_gem_handle_create(file_priv,
 			&fb_cma->obj[0]->base, handle);
 }
+EXPORT_SYMBOL(drm_fb_cma_create_handle);
 
 static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
 	.destroy	= drm_fb_cma_destroy,
@@ -76,7 +133,7 @@
 static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
 	const struct drm_mode_fb_cmd2 *mode_cmd,
 	struct drm_gem_cma_object **obj,
-	unsigned int num_planes)
+	unsigned int num_planes, const struct drm_framebuffer_funcs *funcs)
 {
 	struct drm_fb_cma *fb_cma;
 	int ret;
@@ -91,7 +148,7 @@
 	for (i = 0; i < num_planes; i++)
 		fb_cma->obj[i] = obj[i];
 
-	ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs);
+	ret = drm_framebuffer_init(dev, &fb_cma->fb, funcs);
 	if (ret) {
 		dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret);
 		kfree(fb_cma);
@@ -102,13 +159,17 @@
 }
 
 /**
- * drm_fb_cma_create() - (struct drm_mode_config_funcs *)->fb_create callback function
+ * drm_fb_cma_create_with_funcs() - helper function for the
+ *                                  &drm_mode_config_funcs ->fb_create
+ *                                  callback function
  *
- * If your hardware has special alignment or pitch requirements these should be
- * checked before calling this function.
+ * This can be used to set &drm_framebuffer_funcs for drivers that need the
+ * dirty() callback. Use drm_fb_cma_create() if you don't need to change
+ * &drm_framebuffer_funcs.
  */
-struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
-	struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
+struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
+	struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
+	const struct drm_framebuffer_funcs *funcs)
 {
 	struct drm_fb_cma *fb_cma;
 	struct drm_gem_cma_object *objs[4];
@@ -126,7 +187,7 @@
 		unsigned int height = mode_cmd->height / (i ? vsub : 1);
 		unsigned int min_size;
 
-		obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]);
+		obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
 		if (!obj) {
 			dev_err(dev->dev, "Failed to lookup GEM object\n");
 			ret = -ENXIO;
@@ -145,7 +206,7 @@
 		objs[i] = to_drm_gem_cma_obj(obj);
 	}
 
-	fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i);
+	fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, funcs);
 	if (IS_ERR(fb_cma)) {
 		ret = PTR_ERR(fb_cma);
 		goto err_gem_object_unreference;
@@ -158,6 +219,21 @@
 		drm_gem_object_unreference_unlocked(&objs[i]->base);
 	return ERR_PTR(ret);
 }
+EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
+
+/**
+ * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function
+ *
+ * If your hardware has special alignment or pitch requirements these should be
+ * checked before calling this function. Use drm_fb_cma_create_with_funcs() if
+ * you need to set &drm_framebuffer_funcs ->dirty.
+ */
+struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
+	struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	return drm_fb_cma_create_with_funcs(dev, file_priv, mode_cmd,
+					    &drm_fb_cma_funcs);
+}
 EXPORT_SYMBOL_GPL(drm_fb_cma_create);
 
 /**
@@ -233,8 +309,67 @@
 	.fb_setcmap	= drm_fb_helper_setcmap,
 };
 
-static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
-	struct drm_fb_helper_surface_size *sizes)
+static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
+					  struct vm_area_struct *vma)
+{
+	fb_deferred_io_mmap(info, vma);
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	return 0;
+}
+
+static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
+				    struct drm_gem_cma_object *cma_obj)
+{
+	struct fb_deferred_io *fbdefio;
+	struct fb_ops *fbops;
+
+	/*
+	 * Per device structures are needed because:
+	 * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
+	 * fbdefio: individual delays
+	 */
+	fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
+	fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
+	if (!fbdefio || !fbops) {
+		kfree(fbdefio);
+		return -ENOMEM;
+	}
+
+	/* can't be offset from vaddr since dirty() uses cma_obj */
+	fbi->screen_buffer = cma_obj->vaddr;
+	/* fb_deferred_io_fault() needs a physical address */
+	fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));
+
+	*fbops = *fbi->fbops;
+	fbi->fbops = fbops;
+
+	fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
+	fbdefio->deferred_io = drm_fb_helper_deferred_io;
+	fbi->fbdefio = fbdefio;
+	fb_deferred_io_init(fbi);
+	fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
+
+	return 0;
+}
+
+static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
+{
+	if (!fbi->fbdefio)
+		return;
+
+	fb_deferred_io_cleanup(fbi);
+	kfree(fbi->fbdefio);
+	kfree(fbi->fbops);
+}
+
+/*
+ * For use in a (struct drm_fb_helper_funcs *)->fb_probe callback function that
+ * needs custom struct drm_framebuffer_funcs, like dirty() for deferred_io use.
+ */
+int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
+	struct drm_fb_helper_surface_size *sizes,
+	const struct drm_framebuffer_funcs *funcs)
 {
 	struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
@@ -270,7 +405,7 @@
 		goto err_gem_free_object;
 	}
 
-	fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1);
+	fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1, funcs);
 	if (IS_ERR(fbdev_cma->fb)) {
 		dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
 		ret = PTR_ERR(fbdev_cma->fb);
@@ -296,31 +431,48 @@
 	fbi->screen_size = size;
 	fbi->fix.smem_len = size;
 
+	if (funcs->dirty) {
+		ret = drm_fbdev_cma_defio_init(fbi, obj);
+		if (ret)
+			goto err_cma_destroy;
+	}
+
 	return 0;
 
+err_cma_destroy:
+	drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
+	drm_fb_cma_destroy(&fbdev_cma->fb->fb);
 err_fb_info_destroy:
 	drm_fb_helper_release_fbi(helper);
 err_gem_free_object:
-	dev->driver->gem_free_object(&obj->base);
+	drm_gem_object_unreference_unlocked(&obj->base);
 	return ret;
 }
+EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs);
+
+static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
+	struct drm_fb_helper_surface_size *sizes)
+{
+	return drm_fbdev_cma_create_with_funcs(helper, sizes, &drm_fb_cma_funcs);
+}
 
 static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
 	.fb_probe = drm_fbdev_cma_create,
 };
 
 /**
- * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
+ * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
  * @dev: DRM device
  * @preferred_bpp: Preferred bits per pixel for the device
  * @num_crtc: Number of CRTCs
  * @max_conn_count: Maximum number of connectors
+ * @funcs: fb helper functions, in particular fb_probe()
  *
  * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
  */
-struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
 	unsigned int preferred_bpp, unsigned int num_crtc,
-	unsigned int max_conn_count)
+	unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs)
 {
 	struct drm_fbdev_cma *fbdev_cma;
 	struct drm_fb_helper *helper;
@@ -334,7 +486,7 @@
 
 	helper = &fbdev_cma->fb_helper;
 
-	drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
+	drm_fb_helper_prepare(dev, helper, funcs);
 
 	ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
 	if (ret < 0) {
@@ -364,6 +516,24 @@
 
 	return ERR_PTR(ret);
 }
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
+
+/**
+ * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device
+ * @num_crtc: Number of CRTCs
+ * @max_conn_count: Maximum number of connectors
+ *
+ * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
+ */
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+	unsigned int preferred_bpp, unsigned int num_crtc,
+	unsigned int max_conn_count)
+{
+	return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, num_crtc,
+				max_conn_count, &drm_fb_cma_helper_funcs);
+}
 EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
 
 /**
@@ -373,6 +543,7 @@
 void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
 {
 	drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
+	drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
 	drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
 
 	if (fbdev_cma->fb) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 855108e..7c2eb75 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -84,6 +84,15 @@
  * and set up an initial configuration using the detected hardware, drivers
  * should call drm_fb_helper_single_add_all_connectors() followed by
  * drm_fb_helper_initial_config().
+ *
+ * If &drm_framebuffer_funcs ->dirty is set, the
+ * drm_fb_helper_{cfb,sys}_{write,fillrect,copyarea,imageblit} functions will
+ * accumulate changes and schedule &drm_fb_helper ->dirty_work to run right
+ * away. This worker then calls the dirty() function ensuring that it will
+ * always run in process context since the fb_*() function could be running in
+ * atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io
+ * callback it will also schedule dirty_work with the damage collected from the
+ * mmap page writes.
  */
 
 /**
@@ -153,40 +162,13 @@
 	if (!fb_helper_connector)
 		return -ENOMEM;
 
+	drm_connector_reference(connector);
 	fb_helper_connector->connector = connector;
 	fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
 	return 0;
 }
 EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
 
-static void remove_from_modeset(struct drm_mode_set *set,
-		struct drm_connector *connector)
-{
-	int i, j;
-
-	for (i = 0; i < set->num_connectors; i++) {
-		if (set->connectors[i] == connector)
-			break;
-	}
-
-	if (i == set->num_connectors)
-		return;
-
-	for (j = i + 1; j < set->num_connectors; j++) {
-		set->connectors[j - 1] = set->connectors[j];
-	}
-	set->num_connectors--;
-
-	/*
-	 * TODO maybe need to makes sure we set it back to !=NULL somewhere?
-	 */
-	if (set->num_connectors == 0) {
-		set->fb = NULL;
-		drm_mode_destroy(connector->dev, set->mode);
-		set->mode = NULL;
-	}
-}
-
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
 				       struct drm_connector *connector)
 {
@@ -206,6 +188,7 @@
 	if (i == fb_helper->connector_count)
 		return -EINVAL;
 	fb_helper_connector = fb_helper->connector_info[i];
+	drm_connector_unreference(fb_helper_connector->connector);
 
 	for (j = i + 1; j < fb_helper->connector_count; j++) {
 		fb_helper->connector_info[j - 1] = fb_helper->connector_info[j];
@@ -213,10 +196,6 @@
 	fb_helper->connector_count--;
 	kfree(fb_helper_connector);
 
-	/* also cleanup dangling references to the connector: */
-	for (i = 0; i < fb_helper->crtc_count; i++)
-		remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
-
 	return 0;
 }
 EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
@@ -626,8 +605,10 @@
 {
 	int i;
 
-	for (i = 0; i < helper->connector_count; i++)
+	for (i = 0; i < helper->connector_count; i++) {
+		drm_connector_unreference(helper->connector_info[i]->connector);
 		kfree(helper->connector_info[i]);
+	}
 	kfree(helper->connector_info);
 	for (i = 0; i < helper->crtc_count; i++) {
 		kfree(helper->crtc_info[i].mode_set.connectors);
@@ -637,6 +618,23 @@
 	kfree(helper->crtc_info);
 }
 
+static void drm_fb_helper_dirty_work(struct work_struct *work)
+{
+	struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
+						    dirty_work);
+	struct drm_clip_rect *clip = &helper->dirty_clip;
+	struct drm_clip_rect clip_copy;
+	unsigned long flags;
+
+	spin_lock_irqsave(&helper->dirty_lock, flags);
+	clip_copy = *clip;
+	clip->x1 = clip->y1 = ~0;
+	clip->x2 = clip->y2 = 0;
+	spin_unlock_irqrestore(&helper->dirty_lock, flags);
+
+	helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+}
+
 /**
  * drm_fb_helper_prepare - setup a drm_fb_helper structure
  * @dev: DRM device
@@ -650,6 +648,9 @@
 			   const struct drm_fb_helper_funcs *funcs)
 {
 	INIT_LIST_HEAD(&helper->kernel_fb_list);
+	spin_lock_init(&helper->dirty_lock);
+	INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work);
+	helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0;
 	helper->funcs = funcs;
 	helper->dev = dev;
 }
@@ -834,6 +835,59 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
 
+static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
+				u32 width, u32 height)
+{
+	struct drm_fb_helper *helper = info->par;
+	struct drm_clip_rect *clip = &helper->dirty_clip;
+	unsigned long flags;
+
+	if (!helper->fb->funcs->dirty)
+		return;
+
+	spin_lock_irqsave(&helper->dirty_lock, flags);
+	clip->x1 = min_t(u32, clip->x1, x);
+	clip->y1 = min_t(u32, clip->y1, y);
+	clip->x2 = max_t(u32, clip->x2, x + width);
+	clip->y2 = max_t(u32, clip->y2, y + height);
+	spin_unlock_irqrestore(&helper->dirty_lock, flags);
+
+	schedule_work(&helper->dirty_work);
+}
+
+/**
+ * drm_fb_helper_deferred_io() - fbdev deferred_io callback function
+ * @info: fb_info struct pointer
+ * @pagelist: list of dirty mmap framebuffer pages
+ *
+ * This function is used as the &fb_deferred_io ->deferred_io
+ * callback function for flushing the fbdev mmap writes.
+ */
+void drm_fb_helper_deferred_io(struct fb_info *info,
+			       struct list_head *pagelist)
+{
+	unsigned long start, end, min, max;
+	struct page *page;
+	u32 y1, y2;
+
+	min = ULONG_MAX;
+	max = 0;
+	list_for_each_entry(page, pagelist, lru) {
+		start = page->index << PAGE_SHIFT;
+		end = start + PAGE_SIZE - 1;
+		min = min(min, start);
+		max = max(max, end);
+	}
+
+	if (min < max) {
+		y1 = min / info->fix.line_length;
+		y2 = min_t(u32, DIV_ROUND_UP(max, info->fix.line_length),
+			   info->var.yres);
+		drm_fb_helper_dirty(info, 0, y1, info->var.xres, y2 - y1);
+	}
+}
+EXPORT_SYMBOL(drm_fb_helper_deferred_io);
+
 /**
  * drm_fb_helper_sys_read - wrapper around fb_sys_read
  * @info: fb_info struct pointer
@@ -862,7 +916,14 @@
 ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	return fb_sys_write(info, buf, count, ppos);
+	ssize_t ret;
+
+	ret = fb_sys_write(info, buf, count, ppos);
+	if (ret > 0)
+		drm_fb_helper_dirty(info, 0, 0, info->var.xres,
+				    info->var.yres);
+
+	return ret;
 }
 EXPORT_SYMBOL(drm_fb_helper_sys_write);
 
@@ -877,6 +938,8 @@
 				const struct fb_fillrect *rect)
 {
 	sys_fillrect(info, rect);
+	drm_fb_helper_dirty(info, rect->dx, rect->dy,
+			    rect->width, rect->height);
 }
 EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
 
@@ -891,6 +954,8 @@
 				const struct fb_copyarea *area)
 {
 	sys_copyarea(info, area);
+	drm_fb_helper_dirty(info, area->dx, area->dy,
+			    area->width, area->height);
 }
 EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
 
@@ -905,6 +970,8 @@
 				 const struct fb_image *image)
 {
 	sys_imageblit(info, image);
+	drm_fb_helper_dirty(info, image->dx, image->dy,
+			    image->width, image->height);
 }
 EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
 
@@ -919,6 +986,8 @@
 				const struct fb_fillrect *rect)
 {
 	cfb_fillrect(info, rect);
+	drm_fb_helper_dirty(info, rect->dx, rect->dy,
+			    rect->width, rect->height);
 }
 EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
 
@@ -933,6 +1002,8 @@
 				const struct fb_copyarea *area)
 {
 	cfb_copyarea(info, area);
+	drm_fb_helper_dirty(info, area->dx, area->dy,
+			    area->width, area->height);
 }
 EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
 
@@ -947,6 +1018,8 @@
 				 const struct fb_image *image)
 {
 	cfb_imageblit(info, image);
+	drm_fb_helper_dirty(info, image->dx, image->dy,
+			    image->width, image->height);
 }
 EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
 
@@ -1895,7 +1968,6 @@
 			  int n, int width, int height)
 {
 	int c, o;
-	struct drm_device *dev = fb_helper->dev;
 	struct drm_connector *connector;
 	const struct drm_connector_helper_funcs *connector_funcs;
 	struct drm_encoder *encoder;
@@ -1914,7 +1986,7 @@
 	if (modes[n] == NULL)
 		return best_score;
 
-	crtcs = kzalloc(dev->mode_config.num_connector *
+	crtcs = kzalloc(fb_helper->connector_count *
 			sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
 	if (!crtcs)
 		return best_score;
@@ -1960,7 +2032,7 @@
 		if (score > best_score) {
 			best_score = score;
 			memcpy(best_crtcs, crtcs,
-			       dev->mode_config.num_connector *
+			       fb_helper->connector_count *
 			       sizeof(struct drm_fb_helper_crtc *));
 		}
 	}
@@ -2104,8 +2176,8 @@
  * cmdline option.
  *
  * The other option is to just disable fbdev emulation since very likely the
- * first modest from userspace will crash in the same way, and is even easier to
- * debug. This can be done by setting the drm_kms_helper.fbdev_emulation=0
+ * first modeset from userspace will crash in the same way, and is even easier
+ * to debug. This can be done by setting the drm_kms_helper.fbdev_emulation=0
  * kernel cmdline option.
  *
  * RETURNS:
@@ -2150,7 +2222,7 @@
  * hotplug interrupt).
  *
  * Note that drivers may call this even before calling
- * drm_fb_helper_initial_config but only aftert drm_fb_helper_init. This allows
+ * drm_fb_helper_initial_config but only after drm_fb_helper_init. This allows
  * for a race-free fbcon setup and will make sure that the fbdev emulation will
  * not miss any hotplug events.
  *
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index aeef58e..7af7f8b 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -297,9 +297,9 @@
 	}
 	mutex_unlock(&dev->master_mutex);
 
-	mutex_lock(&dev->struct_mutex);
+	mutex_lock(&dev->filelist_mutex);
 	list_add(&priv->lhead, &dev->filelist);
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev->filelist_mutex);
 
 #ifdef __alpha__
 	/*
@@ -381,14 +381,26 @@
  */
 static void drm_legacy_dev_reinit(struct drm_device *dev)
 {
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		return;
+	if (dev->irq_enabled)
+		drm_irq_uninstall(dev);
+
+	mutex_lock(&dev->struct_mutex);
+
+	drm_legacy_agp_clear(dev);
+
+	drm_legacy_sg_cleanup(dev);
+	drm_legacy_vma_flush(dev);
+	drm_legacy_dma_takedown(dev);
+
+	mutex_unlock(&dev->struct_mutex);
 
 	dev->sigdata.lock = NULL;
 
 	dev->context_flag = 0;
 	dev->last_context = 0;
 	dev->if_version = 0;
+
+	DRM_DEBUG("lastclose completed\n");
 }
 
 /*
@@ -400,7 +412,7 @@
  *
  * \sa drm_device
  */
-int drm_lastclose(struct drm_device * dev)
+void drm_lastclose(struct drm_device * dev)
 {
 	DRM_DEBUG("\n");
 
@@ -408,23 +420,8 @@
 		dev->driver->lastclose(dev);
 	DRM_DEBUG("driver lastclose completed\n");
 
-	if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_irq_uninstall(dev);
-
-	mutex_lock(&dev->struct_mutex);
-
-	drm_agp_clear(dev);
-
-	drm_legacy_sg_cleanup(dev);
-	drm_legacy_vma_flush(dev);
-	drm_legacy_dma_takedown(dev);
-
-	mutex_unlock(&dev->struct_mutex);
-
-	drm_legacy_dev_reinit(dev);
-
-	DRM_DEBUG("lastclose completed\n");
-	return 0;
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_legacy_dev_reinit(dev);
 }
 
 /**
@@ -445,14 +442,16 @@
 	struct drm_file *file_priv = filp->private_data;
 	struct drm_minor *minor = file_priv->minor;
 	struct drm_device *dev = minor->dev;
-	int retcode = 0;
 
 	mutex_lock(&drm_global_mutex);
 
 	DRM_DEBUG("open_count = %d\n", dev->open_count);
 
-	mutex_lock(&dev->struct_mutex);
+	mutex_lock(&dev->filelist_mutex);
 	list_del(&file_priv->lhead);
+	mutex_unlock(&dev->filelist_mutex);
+
+	mutex_lock(&dev->struct_mutex);
 	if (file_priv->magic)
 		idr_remove(&file_priv->master->magic_map, file_priv->magic);
 	mutex_unlock(&dev->struct_mutex);
@@ -538,7 +537,7 @@
 	 */
 
 	if (!--dev->open_count) {
-		retcode = drm_lastclose(dev);
+		drm_lastclose(dev);
 		if (drm_device_is_unplugged(dev))
 			drm_put_dev(dev);
 	}
@@ -546,7 +545,7 @@
 
 	drm_minor_release(minor);
 
-	return retcode;
+	return 0;
 }
 EXPORT_SYMBOL(drm_release);
 
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index da0c532..3215606 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -279,7 +279,6 @@
 int
 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
 {
-	struct drm_device *dev;
 	struct drm_gem_object *obj;
 
 	/* This is gross. The idr system doesn't let us try a delete and
@@ -294,18 +293,19 @@
 	spin_lock(&filp->table_lock);
 
 	/* Check if we currently have a reference on the object */
-	obj = idr_find(&filp->object_idr, handle);
-	if (obj == NULL) {
-		spin_unlock(&filp->table_lock);
+	obj = idr_replace(&filp->object_idr, NULL, handle);
+	spin_unlock(&filp->table_lock);
+	if (IS_ERR_OR_NULL(obj))
 		return -EINVAL;
-	}
-	dev = obj->dev;
 
-	/* Release reference and decrement refcount. */
+	/* Release driver's reference and decrement refcount. */
+	drm_gem_object_release_handle(handle, obj, filp);
+
+	/* And finally make the handle available for future allocations. */
+	spin_lock(&filp->table_lock);
 	idr_remove(&filp->object_idr, handle);
 	spin_unlock(&filp->table_lock);
 
-	drm_gem_object_release_handle(handle, obj, filp);
 	return 0;
 }
 EXPORT_SYMBOL(drm_gem_handle_delete);
@@ -422,6 +422,10 @@
  * @obj: obj in question
  *
  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
+ *
+ * Note that drm_gem_object_release() already calls this function, so drivers
+ * don't have to take care of releasing the mmap offset themselves when freeing
+ * the GEM object.
  */
 void
 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
@@ -445,6 +449,9 @@
  * This routine allocates and attaches a fake offset for @obj, in cases where
  * the virtual size differs from the physical size (ie. obj->size).  Otherwise
  * just use drm_gem_create_mmap_offset().
+ *
+ * This function is idempotent and handles an already allocated mmap offset
+ * transparently. Drivers do not need to check for this case.
  */
 int
 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
@@ -466,6 +473,9 @@
  * structures.
  *
  * This routine allocates and attaches a fake offset for @obj.
+ *
+ * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
+ * the fake offset again.
  */
 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
 {
@@ -578,7 +588,6 @@
 
 /**
  * drm_gem_object_lookup - look up a GEM object from it's handle
- * @dev: DRM device
  * @filp: DRM file private date
  * @handle: userspace handle
  *
@@ -588,8 +597,7 @@
  * otherwise.
  */
 struct drm_gem_object *
-drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
-		      u32 handle)
+drm_gem_object_lookup(struct drm_file *filp, u32 handle)
 {
 	struct drm_gem_object *obj;
 
@@ -597,12 +605,8 @@
 
 	/* Check if we currently have a reference on the object */
 	obj = idr_find(&filp->object_idr, handle);
-	if (obj == NULL) {
-		spin_unlock(&filp->table_lock);
-		return NULL;
-	}
-
-	drm_gem_object_reference(obj);
+	if (obj)
+		drm_gem_object_reference(obj);
 
 	spin_unlock(&filp->table_lock);
 
@@ -655,7 +659,7 @@
 	if (!drm_core_check_feature(dev, DRIVER_GEM))
 		return -ENODEV;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = drm_gem_object_lookup(file_priv, args->handle);
 	if (obj == NULL)
 		return -ENOENT;
 
@@ -759,6 +763,13 @@
 	idr_destroy(&file_private->object_idr);
 }
 
+/**
+ * drm_gem_object_release - release GEM buffer object resources
+ * @obj: GEM buffer object
+ *
+ * This releases any structures and resources used by @obj and is the invers of
+ * drm_gem_object_init().
+ */
 void
 drm_gem_object_release(struct drm_gem_object *obj)
 {
@@ -787,14 +798,67 @@
 		container_of(kref, struct drm_gem_object, refcount);
 	struct drm_device *dev = obj->dev;
 
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+	if (dev->driver->gem_free_object_unlocked) {
+		dev->driver->gem_free_object_unlocked(obj);
+	} else if (dev->driver->gem_free_object) {
+		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	if (dev->driver->gem_free_object != NULL)
 		dev->driver->gem_free_object(obj);
+	}
 }
 EXPORT_SYMBOL(drm_gem_object_free);
 
 /**
+ * drm_gem_object_unreference_unlocked - release a GEM BO reference
+ * @obj: GEM buffer object
+ *
+ * This releases a reference to @obj. Callers must not hold the
+ * dev->struct_mutex lock when calling this function.
+ *
+ * See also __drm_gem_object_unreference().
+ */
+void
+drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
+{
+	struct drm_device *dev;
+
+	if (!obj)
+		return;
+
+	dev = obj->dev;
+	might_lock(&dev->struct_mutex);
+
+	if (dev->driver->gem_free_object_unlocked)
+		kref_put(&obj->refcount, drm_gem_object_free);
+	else if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
+				&dev->struct_mutex))
+		mutex_unlock(&dev->struct_mutex);
+}
+EXPORT_SYMBOL(drm_gem_object_unreference_unlocked);
+
+/**
+ * drm_gem_object_unreference - release a GEM BO reference
+ * @obj: GEM buffer object
+ *
+ * This releases a reference to @obj. Callers must hold the dev->struct_mutex
+ * lock when calling this function, even when the driver doesn't use
+ * dev->struct_mutex for anything.
+ *
+ * For drivers not encumbered with legacy locking use
+ * drm_gem_object_unreference_unlocked() instead.
+ */
+void
+drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+	if (obj) {
+		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
+		kref_put(&obj->refcount, drm_gem_object_free);
+	}
+}
+EXPORT_SYMBOL(drm_gem_object_unreference);
+
+/**
  * drm_gem_vm_open - vma->ops->open implementation for GEM
  * @vma: VM area structure
  *
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 1f500a1..1d6c335 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -121,7 +121,7 @@
 	return cma_obj;
 
 error:
-	drm->driver->gem_free_object(&cma_obj->base);
+	drm_gem_object_unreference_unlocked(&cma_obj->base);
 	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -162,18 +162,12 @@
 	 * and handle has the id what user can see.
 	 */
 	ret = drm_gem_handle_create(file_priv, gem_obj, handle);
-	if (ret)
-		goto err_handle_create;
-
 	/* drop reference from allocate - handle holds it now. */
 	drm_gem_object_unreference_unlocked(gem_obj);
+	if (ret)
+		return ERR_PTR(ret);
 
 	return cma_obj;
-
-err_handle_create:
-	drm->driver->gem_free_object(gem_obj);
-
-	return ERR_PTR(ret);
 }
 
 /**
@@ -291,7 +285,7 @@
 {
 	struct drm_gem_object *gem_obj;
 
-	gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
+	gem_obj = drm_gem_object_lookup(file_priv, handle);
 	if (!gem_obj) {
 		dev_err(drm->dev, "failed to lookup GEM object\n");
 		return -EINVAL;
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index cbb4fc0..5d469b2 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -174,7 +174,7 @@
 	/* dev->filelist is sorted youngest first, but we want to present
 	 * oldest first (i.e. kernel, servers, clients), so walk backwardss.
 	 */
-	mutex_lock(&dev->struct_mutex);
+	mutex_lock(&dev->filelist_mutex);
 	list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
 		struct task_struct *task;
 
@@ -190,7 +190,7 @@
 			   priv->magic);
 		rcu_read_unlock();
 	}
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev->filelist_mutex);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 43cbda3..902cf6a 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -26,7 +26,7 @@
 
 /* drm_fops.c */
 extern struct mutex drm_global_mutex;
-int drm_lastclose(struct drm_device *dev);
+void drm_lastclose(struct drm_device *dev);
 
 /* drm_pci.c */
 int drm_pci_set_unique(struct drm_device *dev,
@@ -37,8 +37,6 @@
 
 /* drm_vm.c */
 int drm_vma_info(struct seq_file *m, void *data);
-void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
-void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
 
 /* drm_prime.c */
 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 8ce2a0c..b7a39771c 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -150,58 +150,6 @@
 }
 
 /*
- * Get a mapping information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_map structure.
- *
- * \return zero on success or a negative number on failure.
- *
- * Searches for the mapping with the specified offset and copies its information
- * into userspace
- */
-static int drm_getmap(struct drm_device *dev, void *data,
-	       struct drm_file *file_priv)
-{
-	struct drm_map *map = data;
-	struct drm_map_list *r_list = NULL;
-	struct list_head *list;
-	int idx;
-	int i;
-
-	idx = map->offset;
-	if (idx < 0)
-		return -EINVAL;
-
-	i = 0;
-	mutex_lock(&dev->struct_mutex);
-	list_for_each(list, &dev->maplist) {
-		if (i == idx) {
-			r_list = list_entry(list, struct drm_map_list, head);
-			break;
-		}
-		i++;
-	}
-	if (!r_list || !r_list->map) {
-		mutex_unlock(&dev->struct_mutex);
-		return -EINVAL;
-	}
-
-	map->offset = r_list->map->offset;
-	map->size = r_list->map->size;
-	map->type = r_list->map->type;
-	map->flags = r_list->map->flags;
-	map->handle = (void *)(unsigned long) r_list->user_token;
-	map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
-
-	mutex_unlock(&dev->struct_mutex);
-
-	return 0;
-}
-
-/*
  * Get client information.
  *
  * \param inode device inode.
@@ -558,7 +506,7 @@
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 881c5a6..0fac801 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -348,9 +348,6 @@
 	unsigned int pipe = vblank->pipe;
 	unsigned long irqflags;
 
-	if (!dev->vblank_disable_allowed)
-		return;
-
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
 		DRM_DEBUG("disabling vblank on crtc %u\n", pipe);
@@ -437,8 +434,6 @@
 			 "get_vblank_timestamp == NULL\n");
 	}
 
-	dev->vblank_disable_allowed = false;
-
 	return 0;
 
 err:
@@ -863,10 +858,7 @@
 	/* Subtract time delta from raw timestamp to get final
 	 * vblank_time timestamp for end of vblank.
 	 */
-	if (delta_ns < 0)
-		etime = ktime_add_ns(etime, -delta_ns);
-	else
-		etime = ktime_sub_ns(etime, delta_ns);
+	etime = ktime_sub_ns(etime, delta_ns);
 	*vblank_time = ktime_to_timeval(etime);
 
 	DRM_DEBUG_VBL("crtc %u : v 0x%x p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
@@ -1588,7 +1580,6 @@
 
 	if (vblank->inmodeset) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
-		dev->vblank_disable_allowed = true;
 		drm_reset_vblank_timestamp(dev, pipe);
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index 9b73178..d3b6ee3 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -63,6 +63,8 @@
 
 #define DRM_MAP_HASH_OFFSET 0x10000000
 
+int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
 int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
 int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
 int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index f7448a5..e5e6f50 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -98,7 +98,7 @@
 	if (!mode)
 		return;
 
-	drm_mode_object_put(dev, &mode->base);
+	drm_mode_object_unregister(dev, &mode->base);
 
 	kfree(mode);
 }
@@ -1518,6 +1518,8 @@
 	if (out->status != MODE_OK)
 		goto out;
 
+	drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
+
 	ret = 0;
 
 out:
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 2ef988e..3dfe3c8 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -30,12 +30,36 @@
 static DEFINE_MUTEX(panel_lock);
 static LIST_HEAD(panel_list);
 
+/**
+ * DOC: drm panel
+ *
+ * The DRM panel helpers allow drivers to register panel objects with a
+ * central registry and provide functions to retrieve those panels in display
+ * drivers.
+ */
+
+/**
+ * drm_panel_init - initialize a panel
+ * @panel: DRM panel
+ *
+ * Sets up internal fields of the panel so that it can subsequently be added
+ * to the registry.
+ */
 void drm_panel_init(struct drm_panel *panel)
 {
 	INIT_LIST_HEAD(&panel->list);
 }
 EXPORT_SYMBOL(drm_panel_init);
 
+/**
+ * drm_panel_add - add a panel to the global registry
+ * @panel: panel to add
+ *
+ * Add a panel to the global registry so that it can be looked up by display
+ * drivers.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
 int drm_panel_add(struct drm_panel *panel)
 {
 	mutex_lock(&panel_lock);
@@ -46,6 +70,12 @@
 }
 EXPORT_SYMBOL(drm_panel_add);
 
+/**
+ * drm_panel_remove - remove a panel from the global registry
+ * @panel: DRM panel
+ *
+ * Removes a panel from the global registry.
+ */
 void drm_panel_remove(struct drm_panel *panel)
 {
 	mutex_lock(&panel_lock);
@@ -54,6 +84,18 @@
 }
 EXPORT_SYMBOL(drm_panel_remove);
 
+/**
+ * drm_panel_attach - attach a panel to a connector
+ * @panel: DRM panel
+ * @connector: DRM connector
+ *
+ * After obtaining a pointer to a DRM panel a display driver calls this
+ * function to attach a panel to a connector.
+ *
+ * An error is returned if the panel is already attached to another connector.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
 int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
 {
 	if (panel->connector)
@@ -66,6 +108,15 @@
 }
 EXPORT_SYMBOL(drm_panel_attach);
 
+/**
+ * drm_panel_detach - detach a panel from a connector
+ * @panel: DRM panel
+ *
+ * Detaches a panel from the connector it is attached to. If a panel is not
+ * attached to any connector this is effectively a no-op.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
 int drm_panel_detach(struct drm_panel *panel)
 {
 	panel->connector = NULL;
@@ -76,6 +127,16 @@
 EXPORT_SYMBOL(drm_panel_detach);
 
 #ifdef CONFIG_OF
+/**
+ * of_drm_find_panel - look up a panel using a device tree node
+ * @np: device tree node of the panel
+ *
+ * Searches the set of registered panels for one that matches the given device
+ * tree node. If a matching panel is found, return a pointer to it.
+ *
+ * Return: A pointer to the panel registered for the specified device tree
+ * node or NULL if no panel matching the device tree node can be found.
+ */
 struct drm_panel *of_drm_find_panel(struct device_node *np)
 {
 	struct drm_panel *panel;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index a1fff11..29d5a54 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -250,7 +250,7 @@
 {
 	if (dev->agp) {
 		arch_phys_wc_del(dev->agp->agp_mtrr);
-		drm_agp_clear(dev);
+		drm_legacy_agp_clear(dev);
 		kfree(dev->agp);
 		dev->agp = NULL;
 	}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index df6cdc7..aab0f3f 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -407,7 +407,7 @@
 	struct dma_buf *dmabuf;
 
 	mutex_lock(&file_priv->prime.lock);
-	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	obj = drm_gem_object_lookup(file_priv, handle);
 	if (!obj)  {
 		ret = -ENOENT;
 		goto out_unlock;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index e714b5a..0329080 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -264,10 +264,8 @@
 		count = drm_add_edid_modes(connector, edid);
 		drm_edid_to_eld(connector, edid);
 	} else {
-#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
 		count = drm_load_edid_firmware(connector);
 		if (count == 0)
-#endif
 			count = (*connector_funcs->get_modes)(connector);
 	}
 
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index d503f8e..fa7fadc 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -208,9 +208,12 @@
 			   char *buf)
 {
 	struct drm_connector *connector = to_drm_connector(device);
+	enum drm_connector_status status;
+
+	status = READ_ONCE(connector->status);
 
 	return snprintf(buf, PAGE_SIZE, "%s\n",
-			drm_get_connector_status_name(connector->status));
+			drm_get_connector_status_name(status));
 }
 
 static ssize_t dpms_show(struct device *device,
@@ -231,9 +234,11 @@
 			   char *buf)
 {
 	struct drm_connector *connector = to_drm_connector(device);
+	bool enabled;
 
-	return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" :
-			"disabled");
+	enabled = READ_ONCE(connector->encoder);
+
+	return snprintf(buf, PAGE_SIZE, enabled ? "enabled\n" : "disabled\n");
 }
 
 static ssize_t edid_show(struct file *filp, struct kobject *kobj,
@@ -287,102 +292,6 @@
 	return written;
 }
 
-static ssize_t tv_subconnector_show(struct device *device,
-				    struct device_attribute *attr,
-				    char *buf)
-{
-	struct drm_connector *connector = to_drm_connector(device);
-	struct drm_device *dev = connector->dev;
-	struct drm_property *prop;
-	uint64_t subconnector;
-	int ret;
-
-	prop = dev->mode_config.tv_subconnector_property;
-	if (!prop) {
-		DRM_ERROR("Unable to find subconnector property\n");
-		return 0;
-	}
-
-	ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
-	if (ret)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%s",
-			drm_get_tv_subconnector_name((int)subconnector));
-}
-
-static ssize_t tv_select_subconnector_show(struct device *device,
-					   struct device_attribute *attr,
-					   char *buf)
-{
-	struct drm_connector *connector = to_drm_connector(device);
-	struct drm_device *dev = connector->dev;
-	struct drm_property *prop;
-	uint64_t subconnector;
-	int ret;
-
-	prop = dev->mode_config.tv_select_subconnector_property;
-	if (!prop) {
-		DRM_ERROR("Unable to find select subconnector property\n");
-		return 0;
-	}
-
-	ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
-	if (ret)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%s",
-			drm_get_tv_select_name((int)subconnector));
-}
-
-static ssize_t dvii_subconnector_show(struct device *device,
-				      struct device_attribute *attr,
-				      char *buf)
-{
-	struct drm_connector *connector = to_drm_connector(device);
-	struct drm_device *dev = connector->dev;
-	struct drm_property *prop;
-	uint64_t subconnector;
-	int ret;
-
-	prop = dev->mode_config.dvi_i_subconnector_property;
-	if (!prop) {
-		DRM_ERROR("Unable to find subconnector property\n");
-		return 0;
-	}
-
-	ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
-	if (ret)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%s",
-			drm_get_dvi_i_subconnector_name((int)subconnector));
-}
-
-static ssize_t dvii_select_subconnector_show(struct device *device,
-					     struct device_attribute *attr,
-					     char *buf)
-{
-	struct drm_connector *connector = to_drm_connector(device);
-	struct drm_device *dev = connector->dev;
-	struct drm_property *prop;
-	uint64_t subconnector;
-	int ret;
-
-	prop = dev->mode_config.dvi_i_select_subconnector_property;
-	if (!prop) {
-		DRM_ERROR("Unable to find select subconnector property\n");
-		return 0;
-	}
-
-	ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
-	if (ret)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%s",
-			drm_get_dvi_i_select_name((int)subconnector));
-}
-
 static DEVICE_ATTR_RW(status);
 static DEVICE_ATTR_RO(enabled);
 static DEVICE_ATTR_RO(dpms);
@@ -396,54 +305,6 @@
 	NULL
 };
 
-static DEVICE_ATTR_RO(tv_subconnector);
-static DEVICE_ATTR_RO(tv_select_subconnector);
-
-static struct attribute *connector_tv_dev_attrs[] = {
-	&dev_attr_tv_subconnector.attr,
-	&dev_attr_tv_select_subconnector.attr,
-	NULL
-};
-
-static DEVICE_ATTR_RO(dvii_subconnector);
-static DEVICE_ATTR_RO(dvii_select_subconnector);
-
-static struct attribute *connector_dvii_dev_attrs[] = {
-	&dev_attr_dvii_subconnector.attr,
-	&dev_attr_dvii_select_subconnector.attr,
-	NULL
-};
-
-/* Connector type related helpers */
-static int kobj_connector_type(struct kobject *kobj)
-{
-	struct device *dev = kobj_to_dev(kobj);
-	struct drm_connector *connector = to_drm_connector(dev);
-
-	return connector->connector_type;
-}
-
-static umode_t connector_is_dvii(struct kobject *kobj,
-				 struct attribute *attr, int idx)
-{
-	return kobj_connector_type(kobj) == DRM_MODE_CONNECTOR_DVII ?
-		attr->mode : 0;
-}
-
-static umode_t connector_is_tv(struct kobject *kobj,
-			       struct attribute *attr, int idx)
-{
-	switch (kobj_connector_type(kobj)) {
-	case DRM_MODE_CONNECTOR_Composite:
-	case DRM_MODE_CONNECTOR_SVIDEO:
-	case DRM_MODE_CONNECTOR_Component:
-	case DRM_MODE_CONNECTOR_TV:
-		return attr->mode;
-	}
-
-	return 0;
-}
-
 static struct bin_attribute edid_attr = {
 	.attr.name = "edid",
 	.attr.mode = 0444,
@@ -461,20 +322,8 @@
 	.bin_attrs = connector_bin_attrs,
 };
 
-static const struct attribute_group connector_tv_dev_group = {
-	.attrs = connector_tv_dev_attrs,
-	.is_visible = connector_is_tv,
-};
-
-static const struct attribute_group connector_dvii_dev_group = {
-	.attrs = connector_dvii_dev_attrs,
-	.is_visible = connector_is_dvii,
-};
-
 static const struct attribute_group *connector_dev_groups[] = {
 	&connector_dev_group,
-	&connector_tv_dev_group,
-	&connector_dvii_dev_group,
 	NULL
 };
 
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index f90bd5f..ac9f4b3 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -395,16 +395,8 @@
 	.close = drm_vm_close,
 };
 
-/**
- * \c open method for shared virtual memory.
- *
- * \param vma virtual memory area.
- *
- * Create a new drm_vma_entry structure as the \p vma private data entry and
- * add it to drm_device::vmalist.
- */
-void drm_vm_open_locked(struct drm_device *dev,
-		struct vm_area_struct *vma)
+static void drm_vm_open_locked(struct drm_device *dev,
+			       struct vm_area_struct *vma)
 {
 	struct drm_vma_entry *vma_entry;
 
@@ -429,8 +421,8 @@
 	mutex_unlock(&dev->struct_mutex);
 }
 
-void drm_vm_close_locked(struct drm_device *dev,
-		struct vm_area_struct *vma)
+static void drm_vm_close_locked(struct drm_device *dev,
+				struct vm_area_struct *vma)
 {
 	struct drm_vma_entry *pt, *temp;
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index e885898..3d4f56d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -314,7 +314,7 @@
 	if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
 		return -EINVAL;
 
-	obj = drm_gem_object_lookup(dev, file, args->handle);
+	obj = drm_gem_object_lookup(file, args->handle);
 	if (!obj)
 		return -ENOENT;
 
@@ -335,7 +335,7 @@
 	if (args->flags)
 		return -EINVAL;
 
-	obj = drm_gem_object_lookup(dev, file, args->handle);
+	obj = drm_gem_object_lookup(file, args->handle);
 	if (!obj)
 		return -ENOENT;
 
@@ -356,7 +356,7 @@
 	if (args->pad)
 		return -EINVAL;
 
-	obj = drm_gem_object_lookup(dev, file, args->handle);
+	obj = drm_gem_object_lookup(file, args->handle);
 	if (!obj)
 		return -ENOENT;
 
@@ -441,7 +441,7 @@
 	if (!gpu)
 		return -ENXIO;
 
-	obj = drm_gem_object_lookup(dev, file, args->handle);
+	obj = drm_gem_object_lookup(file, args->handle);
 	if (!obj)
 		return -ENOENT;
 
@@ -497,7 +497,7 @@
 	.open               = etnaviv_open,
 	.preclose           = etnaviv_preclose,
 	.set_busid          = drm_platform_set_busid,
-	.gem_free_object    = etnaviv_gem_free_object,
+	.gem_free_object_unlocked = etnaviv_gem_free_object,
 	.gem_vm_ops         = &vm_ops,
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 281c6ec..df9bcba 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -129,10 +129,9 @@
 	/* when we start tracking the pin count, then do something here */
 }
 
-static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
+static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
 		struct vm_area_struct *vma)
 {
-	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 	pgprot_t vm_page_prot;
 
 	vma->vm_flags &= ~VM_PFNMAP;
@@ -151,9 +150,9 @@
 		 * in particular in the case of mmap'd dmabufs)
 		 */
 		fput(vma->vm_file);
-		get_file(obj->filp);
+		get_file(etnaviv_obj->base.filp);
 		vma->vm_pgoff = 0;
-		vma->vm_file  = obj->filp;
+		vma->vm_file  = etnaviv_obj->base.filp;
 
 		vma->vm_page_prot = vm_page_prot;
 	}
@@ -173,7 +172,7 @@
 	}
 
 	obj = to_etnaviv_bo(vma->vm_private_data);
-	return etnaviv_gem_mmap_obj(vma->vm_private_data, vma);
+	return obj->ops->mmap(obj, vma);
 }
 
 int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -545,6 +544,7 @@
 	.get_pages = etnaviv_gem_shmem_get_pages,
 	.release = etnaviv_gem_shmem_release,
 	.vmap = etnaviv_gem_vmap_impl,
+	.mmap = etnaviv_gem_mmap_obj,
 };
 
 void etnaviv_gem_free_object(struct drm_gem_object *obj)
@@ -886,10 +886,17 @@
 	put_task_struct(etnaviv_obj->userptr.task);
 }
 
+static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
+		struct vm_area_struct *vma)
+{
+	return -EINVAL;
+}
+
 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
 	.get_pages = etnaviv_gem_userptr_get_pages,
 	.release = etnaviv_gem_userptr_release,
 	.vmap = etnaviv_gem_vmap_impl,
+	.mmap = etnaviv_gem_userptr_mmap_obj,
 };
 
 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 02665d8..e63ff11 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -79,6 +79,7 @@
 	int (*get_pages)(struct etnaviv_gem_object *);
 	void (*release)(struct etnaviv_gem_object *);
 	void *(*vmap)(struct etnaviv_gem_object *);
+	int (*mmap)(struct etnaviv_gem_object *, struct vm_area_struct *);
 };
 
 static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 4e67395..b93618c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -84,10 +84,17 @@
 	return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf);
 }
 
+static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
+		struct vm_area_struct *vma)
+{
+	return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
+}
+
 static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
 	/* .get_pages should never be called */
 	.release = etnaviv_gem_prime_release,
 	.vmap = etnaviv_gem_prime_vmap_impl,
+	.mmap = etnaviv_gem_prime_mmap_obj,
 };
 
 struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index d8a9a9c..ff6aa5d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1528,8 +1528,8 @@
 	INIT_WORK(&gpu->recover_work, recover_worker);
 	init_waitqueue_head(&gpu->fence_event);
 
-	setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
-			(unsigned long)gpu);
+	setup_deferrable_timer(&gpu->hangcheck_timer, hangcheck_handler,
+			       (unsigned long)gpu);
 
 	priv->gpu[priv->num_gpus++] = gpu;
 
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index baddf33..d814b30 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -71,8 +71,9 @@
 	  This enables support for Exynos MIPI-DSI device.
 
 config DRM_EXYNOS_DP
-	bool "Display Port"
+	bool "EXYNOS specific extensions for Analogix DP driver"
 	depends on DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON
+	select DRM_ANALOGIX_DP
 	default DRM_EXYNOS
 	select DRM_PANEL
 	help
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 23d2f95..f663490 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -12,7 +12,7 @@
 exynosdrm-$(CONFIG_DRM_EXYNOS7_DECON)	+= exynos7_drm_decon.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_DPI)	+= exynos_drm_dpi.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_DSI)	+= exynos_drm_dsi.o
-exynosdrm-$(CONFIG_DRM_EXYNOS_DP)	+= exynos_dp_core.o exynos_dp_reg.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_DP)	+= exynos_dp.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_MIXER)	+= exynos_mixer.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI)	+= exynos_hdmi.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI)	+= exynos_drm_vidi.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 5245bc5..ac21b40 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -28,6 +28,10 @@
 #define WINDOWS_NR	3
 #define MIN_FB_WIDTH_FOR_16WORD_BURST	128
 
+#define IFTYPE_I80	(1 << 0)
+#define I80_HW_TRG	(1 << 1)
+#define IFTYPE_HDMI	(1 << 2)
+
 static const char * const decon_clks_name[] = {
 	"pclk",
 	"aclk_decon",
@@ -38,12 +42,6 @@
 	"sclk_decon_eclk",
 };
 
-enum decon_iftype {
-	IFTYPE_RGB,
-	IFTYPE_I80,
-	IFTYPE_HDMI
-};
-
 enum decon_flag_bits {
 	BIT_CLKS_ENABLED,
 	BIT_IRQS_ENABLED,
@@ -61,7 +59,7 @@
 	struct clk			*clks[ARRAY_SIZE(decon_clks_name)];
 	int				pipe;
 	unsigned long			flags;
-	enum decon_iftype		out_type;
+	unsigned long			out_type;
 	int				first_win;
 };
 
@@ -95,7 +93,7 @@
 
 	if (!test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) {
 		val = VIDINTCON0_INTEN;
-		if (ctx->out_type == IFTYPE_I80)
+		if (ctx->out_type & IFTYPE_I80)
 			val |= VIDINTCON0_FRAMEDONE;
 		else
 			val |= VIDINTCON0_INTFRMEN;
@@ -119,11 +117,11 @@
 
 static void decon_setup_trigger(struct decon_context *ctx)
 {
-	u32 val = (ctx->out_type != IFTYPE_HDMI)
+	u32 val = !(ctx->out_type & I80_HW_TRG)
 		? TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
 		  TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN
 		: TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
-		  TRIGCON_HWTRIGMASK_I80_RGB | TRIGCON_HWTRIGEN_I80_RGB;
+		  TRIGCON_HWTRIGMASK | TRIGCON_HWTRIGEN;
 	writel(val, ctx->addr + DECON_TRIGCON);
 }
 
@@ -136,7 +134,7 @@
 	if (test_bit(BIT_SUSPENDED, &ctx->flags))
 		return;
 
-	if (ctx->out_type == IFTYPE_HDMI) {
+	if (ctx->out_type & IFTYPE_HDMI) {
 		m->crtc_hsync_start = m->crtc_hdisplay + 10;
 		m->crtc_hsync_end = m->crtc_htotal - 92;
 		m->crtc_vsync_start = m->crtc_vdisplay + 1;
@@ -149,19 +147,24 @@
 	val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
 	writel(val, ctx->addr + DECON_CMU);
 
+	if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
+		decon_setup_trigger(ctx);
+
 	/* lcd on and use command if */
 	val = VIDOUT_LCD_ON;
-	if (ctx->out_type == IFTYPE_I80)
+	if (ctx->out_type & IFTYPE_I80) {
 		val |= VIDOUT_COMMAND_IF;
-	else
+	} else {
 		val |= VIDOUT_RGB_IF;
+	}
+
 	writel(val, ctx->addr + DECON_VIDOUTCON0);
 
 	val = VIDTCON2_LINEVAL(m->vdisplay - 1) |
 		VIDTCON2_HOZVAL(m->hdisplay - 1);
 	writel(val, ctx->addr + DECON_VIDTCON2);
 
-	if (ctx->out_type != IFTYPE_I80) {
+	if (!(ctx->out_type & IFTYPE_I80)) {
 		val = VIDTCON00_VBPD_F(
 				m->crtc_vtotal - m->crtc_vsync_end - 1) |
 			VIDTCON00_VFPD_F(
@@ -183,10 +186,10 @@
 		writel(val, ctx->addr + DECON_VIDTCON11);
 	}
 
-	decon_setup_trigger(ctx);
-
 	/* enable output and display signal */
 	decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID | VIDCON0_ENVID_F, ~0);
+
+	decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
 }
 
 static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
@@ -300,7 +303,7 @@
 	val = dma_addr + pitch * state->src.h;
 	writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
 
-	if (ctx->out_type != IFTYPE_HDMI)
+	if (!(ctx->out_type & IFTYPE_HDMI))
 		val = BIT_VAL(pitch - state->crtc.w * bpp, 27, 14)
 			| BIT_VAL(state->crtc.w * bpp, 13, 0);
 	else
@@ -312,9 +315,6 @@
 
 	/* window enable */
 	decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
-
-	/* standalone update */
-	decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
 }
 
 static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -326,15 +326,7 @@
 	if (test_bit(BIT_SUSPENDED, &ctx->flags))
 		return;
 
-	decon_shadow_protect_win(ctx, win, true);
-
-	/* window disable */
 	decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
-
-	decon_shadow_protect_win(ctx, win, false);
-
-	/* standalone update */
-	decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
 }
 
 static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -348,7 +340,10 @@
 	for (i = ctx->first_win; i < WINDOWS_NR; i++)
 		decon_shadow_protect_win(ctx, i, false);
 
-	if (ctx->out_type == IFTYPE_I80)
+	/* standalone update */
+	decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+
+	if (ctx->out_type & IFTYPE_I80)
 		set_bit(BIT_WIN_UPDATED, &ctx->flags);
 }
 
@@ -374,7 +369,7 @@
 
 	WARN(tries == 0, "failed to software reset DECON\n");
 
-	if (ctx->out_type != IFTYPE_HDMI)
+	if (!(ctx->out_type & IFTYPE_HDMI))
 		return;
 
 	writel(VIDCON0_CLKVALUP | VIDCON0_VLCKFREE, ctx->addr + DECON_VIDCON0);
@@ -383,7 +378,6 @@
 	writel(VIDCON1_VCLK_RUN_VDEN_DISABLE, ctx->addr + DECON_VIDCON1);
 	writel(CRCCTRL_CRCEN | CRCCTRL_CRCSTART_F | CRCCTRL_CRCCLKEN,
 	       ctx->addr + DECON_CRCCTRL);
-	decon_setup_trigger(ctx);
 }
 
 static void decon_enable(struct exynos_drm_crtc *crtc)
@@ -395,8 +389,12 @@
 
 	pm_runtime_get_sync(ctx->dev);
 
+	exynos_drm_pipe_clk_enable(crtc, true);
+
 	set_bit(BIT_CLKS_ENABLED, &ctx->flags);
 
+	decon_swreset(ctx);
+
 	/* if vblank was enabled status, enable it again. */
 	if (test_and_clear_bit(BIT_IRQS_ENABLED, &ctx->flags))
 		decon_enable_vblank(ctx->crtc);
@@ -424,6 +422,8 @@
 
 	clear_bit(BIT_CLKS_ENABLED, &ctx->flags);
 
+	exynos_drm_pipe_clk_enable(crtc, false);
+
 	pm_runtime_put_sync(ctx->dev);
 
 	set_bit(BIT_SUSPENDED, &ctx->flags);
@@ -433,13 +433,12 @@
 {
 	struct decon_context *ctx = crtc->ctx;
 
-	if (!test_bit(BIT_CLKS_ENABLED, &ctx->flags))
+	if (!test_bit(BIT_CLKS_ENABLED, &ctx->flags) ||
+	    (ctx->out_type & I80_HW_TRG))
 		return;
 
 	if (test_and_clear_bit(BIT_WIN_UPDATED, &ctx->flags))
 		decon_set_bits(ctx, DECON_TRIGCON, TRIGCON_SWTRIGCMD, ~0);
-
-	drm_crtc_handle_vblank(&ctx->crtc->base);
 }
 
 static void decon_clear_channels(struct exynos_drm_crtc *crtc)
@@ -459,8 +458,10 @@
 		decon_shadow_protect_win(ctx, win, true);
 		decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
 		decon_shadow_protect_win(ctx, win, false);
-		decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
 	}
+
+	decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+
 	/* TODO: wait for possible vsync */
 	msleep(50);
 
@@ -509,7 +510,7 @@
 	}
 
 	exynos_plane = &ctx->planes[ctx->first_win];
-	out_type = (ctx->out_type == IFTYPE_HDMI) ? EXYNOS_DISPLAY_TYPE_HDMI
+	out_type = (ctx->out_type & IFTYPE_HDMI) ? EXYNOS_DISPLAY_TYPE_HDMI
 						  : EXYNOS_DISPLAY_TYPE_LCD;
 	ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
 					ctx->pipe, out_type,
@@ -570,6 +571,7 @@
 
 		/* clear */
 		writel(val, ctx->addr + DECON_VIDINTCON1);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
 	}
 
 out:
@@ -617,11 +619,11 @@
 static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
 	{
 		.compatible = "samsung,exynos5433-decon",
-		.data = (void *)IFTYPE_RGB
+		.data = (void *)I80_HW_TRG
 	},
 	{
 		.compatible = "samsung,exynos5433-decon-tv",
-		.data = (void *)IFTYPE_HDMI
+		.data = (void *)(I80_HW_TRG | IFTYPE_HDMI)
 	},
 	{},
 };
@@ -629,7 +631,6 @@
 
 static int exynos5433_decon_probe(struct platform_device *pdev)
 {
-	const struct of_device_id *of_id;
 	struct device *dev = &pdev->dev;
 	struct decon_context *ctx;
 	struct resource *res;
@@ -642,14 +643,13 @@
 
 	__set_bit(BIT_SUSPENDED, &ctx->flags);
 	ctx->dev = dev;
+	ctx->out_type = (unsigned long)of_device_get_match_data(dev);
 
-	of_id = of_match_device(exynos5433_decon_driver_dt_match, &pdev->dev);
-	ctx->out_type = (enum decon_iftype)of_id->data;
-
-	if (ctx->out_type == IFTYPE_HDMI)
+	if (ctx->out_type & IFTYPE_HDMI) {
 		ctx->first_win = 1;
-	else if (of_get_child_by_name(dev->of_node, "i80-if-timings"))
-		ctx->out_type = IFTYPE_I80;
+	} else if (of_get_child_by_name(dev->of_node, "i80-if-timings")) {
+		ctx->out_type |= IFTYPE_I80;
+	}
 
 	for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
 		struct clk *clk;
@@ -674,7 +674,7 @@
 	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
-			(ctx->out_type == IFTYPE_I80) ? "lcd_sys" : "vsync");
+			(ctx->out_type & IFTYPE_I80) ? "lcd_sys" : "vsync");
 	if (!res) {
 		dev_err(dev, "cannot find IRQ resource\n");
 		return -ENXIO;
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 9336107..f6223f9 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -593,7 +593,6 @@
 	.commit = decon_commit,
 	.enable_vblank = decon_enable_vblank,
 	.disable_vblank = decon_disable_vblank,
-	.wait_for_vblank = decon_wait_for_vblank,
 	.atomic_begin = decon_atomic_begin,
 	.update_plane = decon_update_plane,
 	.disable_plane = decon_disable_plane,
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
new file mode 100644
index 0000000..468498e
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -0,0 +1,311 @@
+/*
+ * Samsung SoC DP (Display Port) interface driver.
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/of_graph.h>
+#include <linux/component.h>
+#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include <drm/bridge/analogix_dp.h>
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_crtc.h"
+
+#define to_dp(nm)	container_of(nm, struct exynos_dp_device, nm)
+
+struct exynos_dp_device {
+	struct drm_encoder         encoder;
+	struct drm_connector       connector;
+	struct drm_bridge          *ptn_bridge;
+	struct drm_device          *drm_dev;
+	struct device              *dev;
+
+	struct videomode           vm;
+	struct analogix_dp_plat_data plat_data;
+};
+
+int exynos_dp_crtc_clock_enable(struct analogix_dp_plat_data *plat_data,
+				bool enable)
+{
+	struct exynos_dp_device *dp = to_dp(plat_data);
+	struct drm_encoder *encoder = &dp->encoder;
+
+	if (!encoder->crtc)
+		return -EPERM;
+
+	exynos_drm_pipe_clk_enable(to_exynos_crtc(encoder->crtc), enable);
+
+	return 0;
+}
+
+static int exynos_dp_poweron(struct analogix_dp_plat_data *plat_data)
+{
+	return exynos_dp_crtc_clock_enable(plat_data, true);
+}
+
+static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data)
+{
+	return exynos_dp_crtc_clock_enable(plat_data, false);
+}
+
+static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data)
+{
+	struct exynos_dp_device *dp = to_dp(plat_data);
+	struct drm_connector *connector = &dp->connector;
+	struct drm_display_mode *mode;
+	int num_modes = 0;
+
+	if (dp->plat_data.panel)
+		return num_modes;
+
+	mode = drm_mode_create(connector->dev);
+	if (!mode) {
+		DRM_ERROR("failed to create a new display mode.\n");
+		return num_modes;
+	}
+
+	drm_display_mode_from_videomode(&dp->vm, mode);
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
+
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	drm_mode_set_name(mode);
+	drm_mode_probed_add(connector, mode);
+
+	return num_modes + 1;
+}
+
+static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
+				   struct drm_bridge *bridge,
+				   struct drm_connector *connector)
+{
+	struct exynos_dp_device *dp = to_dp(plat_data);
+	struct drm_encoder *encoder = &dp->encoder;
+	int ret;
+
+	drm_connector_register(connector);
+
+	/* Pre-empt DP connector creation if there's a bridge */
+	if (dp->ptn_bridge) {
+		bridge->next = dp->ptn_bridge;
+		dp->ptn_bridge->encoder = encoder;
+		ret = drm_bridge_attach(encoder->dev, dp->ptn_bridge);
+		if (ret) {
+			DRM_ERROR("Failed to attach bridge to drm\n");
+			bridge->next = NULL;
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void exynos_dp_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void exynos_dp_nop(struct drm_encoder *encoder)
+{
+	/* do nothing */
+}
+
+static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
+	.mode_set = exynos_dp_mode_set,
+	.enable = exynos_dp_nop,
+	.disable = exynos_dp_nop,
+};
+
+static const struct drm_encoder_funcs exynos_dp_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
+{
+	int ret;
+
+	ret = of_get_videomode(dp->dev->of_node, &dp->vm, OF_USE_NATIVE_MODE);
+	if (ret) {
+		DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
+		return ret;
+	}
+	return 0;
+}
+
+static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
+{
+	struct exynos_dp_device *dp = dev_get_drvdata(dev);
+	struct drm_encoder *encoder = &dp->encoder;
+	struct drm_device *drm_dev = data;
+	int pipe, ret;
+
+	/*
+	 * Just like the probe function said, we don't need the
+	 * device drvrate anymore, we should leave the charge to
+	 * analogix dp driver, set the device drvdata to NULL.
+	 */
+	dev_set_drvdata(dev, NULL);
+
+	dp->dev = dev;
+	dp->drm_dev = drm_dev;
+
+	dp->plat_data.dev_type = EXYNOS_DP;
+	dp->plat_data.power_on = exynos_dp_poweron;
+	dp->plat_data.power_off = exynos_dp_poweroff;
+	dp->plat_data.attach = exynos_dp_bridge_attach;
+	dp->plat_data.get_modes = exynos_dp_get_modes;
+
+	if (!dp->plat_data.panel && !dp->ptn_bridge) {
+		ret = exynos_dp_dt_parse_panel(dp);
+		if (ret)
+			return ret;
+	}
+
+	pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+						  EXYNOS_DISPLAY_TYPE_LCD);
+	if (pipe < 0)
+		return pipe;
+
+	encoder->possible_crtcs = 1 << pipe;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS, NULL);
+
+	drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
+
+	dp->plat_data.encoder = encoder;
+
+	return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
+}
+
+static void exynos_dp_unbind(struct device *dev, struct device *master,
+			     void *data)
+{
+	return analogix_dp_unbind(dev, master, data);
+}
+
+static const struct component_ops exynos_dp_ops = {
+	.bind	= exynos_dp_bind,
+	.unbind	= exynos_dp_unbind,
+};
+
+static int exynos_dp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = NULL, *endpoint = NULL;
+	struct exynos_dp_device *dp;
+
+	dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
+			  GFP_KERNEL);
+	if (!dp)
+		return -ENOMEM;
+
+	/*
+	 * We just use the drvdata until driver run into component
+	 * add function, and then we would set drvdata to null, so
+	 * that analogix dp driver would take charge of the drvdata.
+	 */
+	platform_set_drvdata(pdev, dp);
+
+	/* This is for the backward compatibility. */
+	np = of_parse_phandle(dev->of_node, "panel", 0);
+	if (np) {
+		dp->plat_data.panel = of_drm_find_panel(np);
+		of_node_put(np);
+		if (!dp->plat_data.panel)
+			return -EPROBE_DEFER;
+		goto out;
+	}
+
+	endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+	if (endpoint) {
+		np = of_graph_get_remote_port_parent(endpoint);
+		if (np) {
+			/* The remote port can be either a panel or a bridge */
+			dp->plat_data.panel = of_drm_find_panel(np);
+			if (!dp->plat_data.panel) {
+				dp->ptn_bridge = of_drm_find_bridge(np);
+				if (!dp->ptn_bridge) {
+					of_node_put(np);
+					return -EPROBE_DEFER;
+				}
+			}
+			of_node_put(np);
+		} else {
+			DRM_ERROR("no remote endpoint device node found.\n");
+			return -EINVAL;
+		}
+	} else {
+		DRM_ERROR("no port endpoint subnode found.\n");
+		return -EINVAL;
+	}
+
+out:
+	return component_add(&pdev->dev, &exynos_dp_ops);
+}
+
+static int exynos_dp_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &exynos_dp_ops);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int exynos_dp_suspend(struct device *dev)
+{
+	return analogix_dp_suspend(dev);
+}
+
+static int exynos_dp_resume(struct device *dev)
+{
+	return analogix_dp_resume(dev);
+}
+#endif
+
+static const struct dev_pm_ops exynos_dp_pm_ops = {
+	SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL)
+};
+
+static const struct of_device_id exynos_dp_match[] = {
+	{ .compatible = "samsung,exynos5-dp" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, exynos_dp_match);
+
+struct platform_driver dp_driver = {
+	.probe		= exynos_dp_probe,
+	.remove		= exynos_dp_remove,
+	.driver		= {
+		.name	= "exynos-dp",
+		.owner	= THIS_MODULE,
+		.pm	= &exynos_dp_pm_ops,
+		.of_match_table = exynos_dp_match,
+	},
+};
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("Samsung Specific Analogix-DP Driver Extension");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
deleted file mode 100644
index cff8dc7..0000000
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ /dev/null
@@ -1,1499 +0,0 @@
-/*
- * Samsung SoC DP (Display Port) interface driver.
- *
- * Copyright (C) 2012 Samsung Electronics Co., Ltd.
- * Author: Jingoo Han <jg1.han@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/of_graph.h>
-#include <linux/gpio.h>
-#include <linux/component.h>
-#include <linux/phy/phy.h>
-#include <video/of_display_timing.h>
-#include <video/of_videomode.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_panel.h>
-
-#include "exynos_dp_core.h"
-#include "exynos_drm_crtc.h"
-
-#define ctx_from_connector(c)	container_of(c, struct exynos_dp_device, \
-					connector)
-
-static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
-{
-	return to_exynos_crtc(dp->encoder.crtc);
-}
-
-static inline struct exynos_dp_device *encoder_to_dp(
-						struct drm_encoder *e)
-{
-	return container_of(e, struct exynos_dp_device, encoder);
-}
-
-struct bridge_init {
-	struct i2c_client *client;
-	struct device_node *node;
-};
-
-static void exynos_dp_init_dp(struct exynos_dp_device *dp)
-{
-	exynos_dp_reset(dp);
-
-	exynos_dp_swreset(dp);
-
-	exynos_dp_init_analog_param(dp);
-	exynos_dp_init_interrupt(dp);
-
-	/* SW defined function Normal operation */
-	exynos_dp_enable_sw_function(dp);
-
-	exynos_dp_config_interrupt(dp);
-	exynos_dp_init_analog_func(dp);
-
-	exynos_dp_init_hpd(dp);
-	exynos_dp_init_aux(dp);
-}
-
-static int exynos_dp_detect_hpd(struct exynos_dp_device *dp)
-{
-	int timeout_loop = 0;
-
-	while (exynos_dp_get_plug_in_status(dp) != 0) {
-		timeout_loop++;
-		if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
-			dev_err(dp->dev, "failed to get hpd plug status\n");
-			return -ETIMEDOUT;
-		}
-		usleep_range(10, 11);
-	}
-
-	return 0;
-}
-
-static unsigned char exynos_dp_calc_edid_check_sum(unsigned char *edid_data)
-{
-	int i;
-	unsigned char sum = 0;
-
-	for (i = 0; i < EDID_BLOCK_LENGTH; i++)
-		sum = sum + edid_data[i];
-
-	return sum;
-}
-
-static int exynos_dp_read_edid(struct exynos_dp_device *dp)
-{
-	unsigned char edid[EDID_BLOCK_LENGTH * 2];
-	unsigned int extend_block = 0;
-	unsigned char sum;
-	unsigned char test_vector;
-	int retval;
-
-	/*
-	 * EDID device address is 0x50.
-	 * However, if necessary, you must have set upper address
-	 * into E-EDID in I2C device, 0x30.
-	 */
-
-	/* Read Extension Flag, Number of 128-byte EDID extension blocks */
-	retval = exynos_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
-				EDID_EXTENSION_FLAG,
-				&extend_block);
-	if (retval)
-		return retval;
-
-	if (extend_block > 0) {
-		dev_dbg(dp->dev, "EDID data includes a single extension!\n");
-
-		/* Read EDID data */
-		retval = exynos_dp_read_bytes_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
-						EDID_HEADER_PATTERN,
-						EDID_BLOCK_LENGTH,
-						&edid[EDID_HEADER_PATTERN]);
-		if (retval != 0) {
-			dev_err(dp->dev, "EDID Read failed!\n");
-			return -EIO;
-		}
-		sum = exynos_dp_calc_edid_check_sum(edid);
-		if (sum != 0) {
-			dev_err(dp->dev, "EDID bad checksum!\n");
-			return -EIO;
-		}
-
-		/* Read additional EDID data */
-		retval = exynos_dp_read_bytes_from_i2c(dp,
-				I2C_EDID_DEVICE_ADDR,
-				EDID_BLOCK_LENGTH,
-				EDID_BLOCK_LENGTH,
-				&edid[EDID_BLOCK_LENGTH]);
-		if (retval != 0) {
-			dev_err(dp->dev, "EDID Read failed!\n");
-			return -EIO;
-		}
-		sum = exynos_dp_calc_edid_check_sum(&edid[EDID_BLOCK_LENGTH]);
-		if (sum != 0) {
-			dev_err(dp->dev, "EDID bad checksum!\n");
-			return -EIO;
-		}
-
-		exynos_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
-					&test_vector);
-		if (test_vector & DP_TEST_LINK_EDID_READ) {
-			exynos_dp_write_byte_to_dpcd(dp,
-				DP_TEST_EDID_CHECKSUM,
-				edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]);
-			exynos_dp_write_byte_to_dpcd(dp,
-				DP_TEST_RESPONSE,
-				DP_TEST_EDID_CHECKSUM_WRITE);
-		}
-	} else {
-		dev_info(dp->dev, "EDID data does not include any extensions.\n");
-
-		/* Read EDID data */
-		retval = exynos_dp_read_bytes_from_i2c(dp,
-				I2C_EDID_DEVICE_ADDR,
-				EDID_HEADER_PATTERN,
-				EDID_BLOCK_LENGTH,
-				&edid[EDID_HEADER_PATTERN]);
-		if (retval != 0) {
-			dev_err(dp->dev, "EDID Read failed!\n");
-			return -EIO;
-		}
-		sum = exynos_dp_calc_edid_check_sum(edid);
-		if (sum != 0) {
-			dev_err(dp->dev, "EDID bad checksum!\n");
-			return -EIO;
-		}
-
-		exynos_dp_read_byte_from_dpcd(dp,
-			DP_TEST_REQUEST,
-			&test_vector);
-		if (test_vector & DP_TEST_LINK_EDID_READ) {
-			exynos_dp_write_byte_to_dpcd(dp,
-				DP_TEST_EDID_CHECKSUM,
-				edid[EDID_CHECKSUM]);
-			exynos_dp_write_byte_to_dpcd(dp,
-				DP_TEST_RESPONSE,
-				DP_TEST_EDID_CHECKSUM_WRITE);
-		}
-	}
-
-	dev_dbg(dp->dev, "EDID Read success!\n");
-	return 0;
-}
-
-static int exynos_dp_handle_edid(struct exynos_dp_device *dp)
-{
-	u8 buf[12];
-	int i;
-	int retval;
-
-	/* Read DPCD DP_DPCD_REV~RECEIVE_PORT1_CAP_1 */
-	retval = exynos_dp_read_bytes_from_dpcd(dp, DP_DPCD_REV,
-				12, buf);
-	if (retval)
-		return retval;
-
-	/* Read EDID */
-	for (i = 0; i < 3; i++) {
-		retval = exynos_dp_read_edid(dp);
-		if (!retval)
-			break;
-	}
-
-	return retval;
-}
-
-static void exynos_dp_enable_rx_to_enhanced_mode(struct exynos_dp_device *dp,
-						bool enable)
-{
-	u8 data;
-
-	exynos_dp_read_byte_from_dpcd(dp, DP_LANE_COUNT_SET, &data);
-
-	if (enable)
-		exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
-			DP_LANE_COUNT_ENHANCED_FRAME_EN |
-			DPCD_LANE_COUNT_SET(data));
-	else
-		exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
-			DPCD_LANE_COUNT_SET(data));
-}
-
-static int exynos_dp_is_enhanced_mode_available(struct exynos_dp_device *dp)
-{
-	u8 data;
-	int retval;
-
-	exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
-	retval = DPCD_ENHANCED_FRAME_CAP(data);
-
-	return retval;
-}
-
-static void exynos_dp_set_enhanced_mode(struct exynos_dp_device *dp)
-{
-	u8 data;
-
-	data = exynos_dp_is_enhanced_mode_available(dp);
-	exynos_dp_enable_rx_to_enhanced_mode(dp, data);
-	exynos_dp_enable_enhanced_mode(dp, data);
-}
-
-static void exynos_dp_training_pattern_dis(struct exynos_dp_device *dp)
-{
-	exynos_dp_set_training_pattern(dp, DP_NONE);
-
-	exynos_dp_write_byte_to_dpcd(dp,
-		DP_TRAINING_PATTERN_SET,
-		DP_TRAINING_PATTERN_DISABLE);
-}
-
-static void exynos_dp_set_lane_lane_pre_emphasis(struct exynos_dp_device *dp,
-					int pre_emphasis, int lane)
-{
-	switch (lane) {
-	case 0:
-		exynos_dp_set_lane0_pre_emphasis(dp, pre_emphasis);
-		break;
-	case 1:
-		exynos_dp_set_lane1_pre_emphasis(dp, pre_emphasis);
-		break;
-
-	case 2:
-		exynos_dp_set_lane2_pre_emphasis(dp, pre_emphasis);
-		break;
-
-	case 3:
-		exynos_dp_set_lane3_pre_emphasis(dp, pre_emphasis);
-		break;
-	}
-}
-
-static int exynos_dp_link_start(struct exynos_dp_device *dp)
-{
-	u8 buf[4];
-	int lane, lane_count, pll_tries, retval;
-
-	lane_count = dp->link_train.lane_count;
-
-	dp->link_train.lt_state = CLOCK_RECOVERY;
-	dp->link_train.eq_loop = 0;
-
-	for (lane = 0; lane < lane_count; lane++)
-		dp->link_train.cr_loop[lane] = 0;
-
-	/* Set link rate and count as you want to establish*/
-	exynos_dp_set_link_bandwidth(dp, dp->link_train.link_rate);
-	exynos_dp_set_lane_count(dp, dp->link_train.lane_count);
-
-	/* Setup RX configuration */
-	buf[0] = dp->link_train.link_rate;
-	buf[1] = dp->link_train.lane_count;
-	retval = exynos_dp_write_bytes_to_dpcd(dp, DP_LINK_BW_SET,
-				2, buf);
-	if (retval)
-		return retval;
-
-	/* Set TX pre-emphasis to minimum */
-	for (lane = 0; lane < lane_count; lane++)
-		exynos_dp_set_lane_lane_pre_emphasis(dp,
-			PRE_EMPHASIS_LEVEL_0, lane);
-
-	/* Wait for PLL lock */
-	pll_tries = 0;
-	while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
-		if (pll_tries == DP_TIMEOUT_LOOP_COUNT) {
-			dev_err(dp->dev, "Wait for PLL lock timed out\n");
-			return -ETIMEDOUT;
-		}
-
-		pll_tries++;
-		usleep_range(90, 120);
-	}
-
-	/* Set training pattern 1 */
-	exynos_dp_set_training_pattern(dp, TRAINING_PTN1);
-
-	/* Set RX training pattern */
-	retval = exynos_dp_write_byte_to_dpcd(dp,
-			DP_TRAINING_PATTERN_SET,
-			DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1);
-	if (retval)
-		return retval;
-
-	for (lane = 0; lane < lane_count; lane++)
-		buf[lane] = DP_TRAIN_PRE_EMPH_LEVEL_0 |
-			    DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
-
-	retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
-			lane_count, buf);
-
-	return retval;
-}
-
-static unsigned char exynos_dp_get_lane_status(u8 link_status[2], int lane)
-{
-	int shift = (lane & 1) * 4;
-	u8 link_value = link_status[lane>>1];
-
-	return (link_value >> shift) & 0xf;
-}
-
-static int exynos_dp_clock_recovery_ok(u8 link_status[2], int lane_count)
-{
-	int lane;
-	u8 lane_status;
-
-	for (lane = 0; lane < lane_count; lane++) {
-		lane_status = exynos_dp_get_lane_status(link_status, lane);
-		if ((lane_status & DP_LANE_CR_DONE) == 0)
-			return -EINVAL;
-	}
-	return 0;
-}
-
-static int exynos_dp_channel_eq_ok(u8 link_status[2], u8 link_align,
-				int lane_count)
-{
-	int lane;
-	u8 lane_status;
-
-	if ((link_align & DP_INTERLANE_ALIGN_DONE) == 0)
-		return -EINVAL;
-
-	for (lane = 0; lane < lane_count; lane++) {
-		lane_status = exynos_dp_get_lane_status(link_status, lane);
-		lane_status &= DP_CHANNEL_EQ_BITS;
-		if (lane_status != DP_CHANNEL_EQ_BITS)
-			return -EINVAL;
-	}
-
-	return 0;
-}
-
-static unsigned char exynos_dp_get_adjust_request_voltage(u8 adjust_request[2],
-							int lane)
-{
-	int shift = (lane & 1) * 4;
-	u8 link_value = adjust_request[lane>>1];
-
-	return (link_value >> shift) & 0x3;
-}
-
-static unsigned char exynos_dp_get_adjust_request_pre_emphasis(
-					u8 adjust_request[2],
-					int lane)
-{
-	int shift = (lane & 1) * 4;
-	u8 link_value = adjust_request[lane>>1];
-
-	return ((link_value >> shift) & 0xc) >> 2;
-}
-
-static void exynos_dp_set_lane_link_training(struct exynos_dp_device *dp,
-					u8 training_lane_set, int lane)
-{
-	switch (lane) {
-	case 0:
-		exynos_dp_set_lane0_link_training(dp, training_lane_set);
-		break;
-	case 1:
-		exynos_dp_set_lane1_link_training(dp, training_lane_set);
-		break;
-
-	case 2:
-		exynos_dp_set_lane2_link_training(dp, training_lane_set);
-		break;
-
-	case 3:
-		exynos_dp_set_lane3_link_training(dp, training_lane_set);
-		break;
-	}
-}
-
-static unsigned int exynos_dp_get_lane_link_training(
-				struct exynos_dp_device *dp,
-				int lane)
-{
-	u32 reg;
-
-	switch (lane) {
-	case 0:
-		reg = exynos_dp_get_lane0_link_training(dp);
-		break;
-	case 1:
-		reg = exynos_dp_get_lane1_link_training(dp);
-		break;
-	case 2:
-		reg = exynos_dp_get_lane2_link_training(dp);
-		break;
-	case 3:
-		reg = exynos_dp_get_lane3_link_training(dp);
-		break;
-	default:
-		WARN_ON(1);
-		return 0;
-	}
-
-	return reg;
-}
-
-static void exynos_dp_reduce_link_rate(struct exynos_dp_device *dp)
-{
-	exynos_dp_training_pattern_dis(dp);
-	exynos_dp_set_enhanced_mode(dp);
-
-	dp->link_train.lt_state = FAILED;
-}
-
-static void exynos_dp_get_adjust_training_lane(struct exynos_dp_device *dp,
-					u8 adjust_request[2])
-{
-	int lane, lane_count;
-	u8 voltage_swing, pre_emphasis, training_lane;
-
-	lane_count = dp->link_train.lane_count;
-	for (lane = 0; lane < lane_count; lane++) {
-		voltage_swing = exynos_dp_get_adjust_request_voltage(
-						adjust_request, lane);
-		pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
-						adjust_request, lane);
-		training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
-				DPCD_PRE_EMPHASIS_SET(pre_emphasis);
-
-		if (voltage_swing == VOLTAGE_LEVEL_3)
-			training_lane |= DP_TRAIN_MAX_SWING_REACHED;
-		if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
-			training_lane |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
-
-		dp->link_train.training_lane[lane] = training_lane;
-	}
-}
-
-static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
-{
-	int lane, lane_count, retval;
-	u8 voltage_swing, pre_emphasis, training_lane;
-	u8 link_status[2], adjust_request[2];
-
-	usleep_range(100, 101);
-
-	lane_count = dp->link_train.lane_count;
-
-	retval =  exynos_dp_read_bytes_from_dpcd(dp,
-			DP_LANE0_1_STATUS, 2, link_status);
-	if (retval)
-		return retval;
-
-	retval =  exynos_dp_read_bytes_from_dpcd(dp,
-			DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
-	if (retval)
-		return retval;
-
-	if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
-		/* set training pattern 2 for EQ */
-		exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
-
-		retval = exynos_dp_write_byte_to_dpcd(dp,
-				DP_TRAINING_PATTERN_SET,
-				DP_LINK_SCRAMBLING_DISABLE |
-				DP_TRAINING_PATTERN_2);
-		if (retval)
-			return retval;
-
-		dev_info(dp->dev, "Link Training Clock Recovery success\n");
-		dp->link_train.lt_state = EQUALIZER_TRAINING;
-	} else {
-		for (lane = 0; lane < lane_count; lane++) {
-			training_lane = exynos_dp_get_lane_link_training(
-							dp, lane);
-			voltage_swing = exynos_dp_get_adjust_request_voltage(
-							adjust_request, lane);
-			pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
-							adjust_request, lane);
-
-			if (DPCD_VOLTAGE_SWING_GET(training_lane) ==
-					voltage_swing &&
-			    DPCD_PRE_EMPHASIS_GET(training_lane) ==
-					pre_emphasis)
-				dp->link_train.cr_loop[lane]++;
-
-			if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP ||
-			    voltage_swing == VOLTAGE_LEVEL_3 ||
-			    pre_emphasis == PRE_EMPHASIS_LEVEL_3) {
-				dev_err(dp->dev, "CR Max reached (%d,%d,%d)\n",
-					dp->link_train.cr_loop[lane],
-					voltage_swing, pre_emphasis);
-				exynos_dp_reduce_link_rate(dp);
-				return -EIO;
-			}
-		}
-	}
-
-	exynos_dp_get_adjust_training_lane(dp, adjust_request);
-
-	for (lane = 0; lane < lane_count; lane++)
-		exynos_dp_set_lane_link_training(dp,
-			dp->link_train.training_lane[lane], lane);
-
-	retval = exynos_dp_write_bytes_to_dpcd(dp,
-			DP_TRAINING_LANE0_SET, lane_count,
-			dp->link_train.training_lane);
-	if (retval)
-		return retval;
-
-	return retval;
-}
-
-static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
-{
-	int lane, lane_count, retval;
-	u32 reg;
-	u8 link_align, link_status[2], adjust_request[2];
-
-	usleep_range(400, 401);
-
-	lane_count = dp->link_train.lane_count;
-
-	retval = exynos_dp_read_bytes_from_dpcd(dp,
-			DP_LANE0_1_STATUS, 2, link_status);
-	if (retval)
-		return retval;
-
-	if (exynos_dp_clock_recovery_ok(link_status, lane_count)) {
-		exynos_dp_reduce_link_rate(dp);
-		return -EIO;
-	}
-
-	retval = exynos_dp_read_bytes_from_dpcd(dp,
-			DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
-	if (retval)
-		return retval;
-
-	retval = exynos_dp_read_byte_from_dpcd(dp,
-			DP_LANE_ALIGN_STATUS_UPDATED, &link_align);
-	if (retval)
-		return retval;
-
-	exynos_dp_get_adjust_training_lane(dp, adjust_request);
-
-	if (!exynos_dp_channel_eq_ok(link_status, link_align, lane_count)) {
-		/* traing pattern Set to Normal */
-		exynos_dp_training_pattern_dis(dp);
-
-		dev_info(dp->dev, "Link Training success!\n");
-
-		exynos_dp_get_link_bandwidth(dp, &reg);
-		dp->link_train.link_rate = reg;
-		dev_dbg(dp->dev, "final bandwidth = %.2x\n",
-			dp->link_train.link_rate);
-
-		exynos_dp_get_lane_count(dp, &reg);
-		dp->link_train.lane_count = reg;
-		dev_dbg(dp->dev, "final lane count = %.2x\n",
-			dp->link_train.lane_count);
-
-		/* set enhanced mode if available */
-		exynos_dp_set_enhanced_mode(dp);
-		dp->link_train.lt_state = FINISHED;
-
-		return 0;
-	}
-
-	/* not all locked */
-	dp->link_train.eq_loop++;
-
-	if (dp->link_train.eq_loop > MAX_EQ_LOOP) {
-		dev_err(dp->dev, "EQ Max loop\n");
-		exynos_dp_reduce_link_rate(dp);
-		return -EIO;
-	}
-
-	for (lane = 0; lane < lane_count; lane++)
-		exynos_dp_set_lane_link_training(dp,
-			dp->link_train.training_lane[lane], lane);
-
-	retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
-			lane_count, dp->link_train.training_lane);
-
-	return retval;
-}
-
-static void exynos_dp_get_max_rx_bandwidth(struct exynos_dp_device *dp,
-					u8 *bandwidth)
-{
-	u8 data;
-
-	/*
-	 * For DP rev.1.1, Maximum link rate of Main Link lanes
-	 * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps
-	 */
-	exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LINK_RATE, &data);
-	*bandwidth = data;
-}
-
-static void exynos_dp_get_max_rx_lane_count(struct exynos_dp_device *dp,
-					u8 *lane_count)
-{
-	u8 data;
-
-	/*
-	 * For DP rev.1.1, Maximum number of Main Link lanes
-	 * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes
-	 */
-	exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
-	*lane_count = DPCD_MAX_LANE_COUNT(data);
-}
-
-static void exynos_dp_init_training(struct exynos_dp_device *dp,
-			enum link_lane_count_type max_lane,
-			enum link_rate_type max_rate)
-{
-	/*
-	 * MACRO_RST must be applied after the PLL_LOCK to avoid
-	 * the DP inter pair skew issue for at least 10 us
-	 */
-	exynos_dp_reset_macro(dp);
-
-	/* Initialize by reading RX's DPCD */
-	exynos_dp_get_max_rx_bandwidth(dp, &dp->link_train.link_rate);
-	exynos_dp_get_max_rx_lane_count(dp, &dp->link_train.lane_count);
-
-	if ((dp->link_train.link_rate != LINK_RATE_1_62GBPS) &&
-	   (dp->link_train.link_rate != LINK_RATE_2_70GBPS)) {
-		dev_err(dp->dev, "Rx Max Link Rate is abnormal :%x !\n",
-			dp->link_train.link_rate);
-		dp->link_train.link_rate = LINK_RATE_1_62GBPS;
-	}
-
-	if (dp->link_train.lane_count == 0) {
-		dev_err(dp->dev, "Rx Max Lane count is abnormal :%x !\n",
-			dp->link_train.lane_count);
-		dp->link_train.lane_count = (u8)LANE_COUNT1;
-	}
-
-	/* Setup TX lane count & rate */
-	if (dp->link_train.lane_count > max_lane)
-		dp->link_train.lane_count = max_lane;
-	if (dp->link_train.link_rate > max_rate)
-		dp->link_train.link_rate = max_rate;
-
-	/* All DP analog module power up */
-	exynos_dp_set_analog_power_down(dp, POWER_ALL, 0);
-}
-
-static int exynos_dp_sw_link_training(struct exynos_dp_device *dp)
-{
-	int retval = 0, training_finished = 0;
-
-	dp->link_train.lt_state = START;
-
-	/* Process here */
-	while (!retval && !training_finished) {
-		switch (dp->link_train.lt_state) {
-		case START:
-			retval = exynos_dp_link_start(dp);
-			if (retval)
-				dev_err(dp->dev, "LT link start failed!\n");
-			break;
-		case CLOCK_RECOVERY:
-			retval = exynos_dp_process_clock_recovery(dp);
-			if (retval)
-				dev_err(dp->dev, "LT CR failed!\n");
-			break;
-		case EQUALIZER_TRAINING:
-			retval = exynos_dp_process_equalizer_training(dp);
-			if (retval)
-				dev_err(dp->dev, "LT EQ failed!\n");
-			break;
-		case FINISHED:
-			training_finished = 1;
-			break;
-		case FAILED:
-			return -EREMOTEIO;
-		}
-	}
-	if (retval)
-		dev_err(dp->dev, "eDP link training failed (%d)\n", retval);
-
-	return retval;
-}
-
-static int exynos_dp_set_link_train(struct exynos_dp_device *dp,
-				u32 count,
-				u32 bwtype)
-{
-	int i;
-	int retval;
-
-	for (i = 0; i < DP_TIMEOUT_LOOP_COUNT; i++) {
-		exynos_dp_init_training(dp, count, bwtype);
-		retval = exynos_dp_sw_link_training(dp);
-		if (retval == 0)
-			break;
-
-		usleep_range(100, 110);
-	}
-
-	return retval;
-}
-
-static int exynos_dp_config_video(struct exynos_dp_device *dp)
-{
-	int retval = 0;
-	int timeout_loop = 0;
-	int done_count = 0;
-
-	exynos_dp_config_video_slave_mode(dp);
-
-	exynos_dp_set_video_color_format(dp);
-
-	if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
-		dev_err(dp->dev, "PLL is not locked yet.\n");
-		return -EINVAL;
-	}
-
-	for (;;) {
-		timeout_loop++;
-		if (exynos_dp_is_slave_video_stream_clock_on(dp) == 0)
-			break;
-		if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
-			dev_err(dp->dev, "Timeout of video streamclk ok\n");
-			return -ETIMEDOUT;
-		}
-
-		usleep_range(1, 2);
-	}
-
-	/* Set to use the register calculated M/N video */
-	exynos_dp_set_video_cr_mn(dp, CALCULATED_M, 0, 0);
-
-	/* For video bist, Video timing must be generated by register */
-	exynos_dp_set_video_timing_mode(dp, VIDEO_TIMING_FROM_CAPTURE);
-
-	/* Disable video mute */
-	exynos_dp_enable_video_mute(dp, 0);
-
-	/* Configure video slave mode */
-	exynos_dp_enable_video_master(dp, 0);
-
-	timeout_loop = 0;
-
-	for (;;) {
-		timeout_loop++;
-		if (exynos_dp_is_video_stream_on(dp) == 0) {
-			done_count++;
-			if (done_count > 10)
-				break;
-		} else if (done_count) {
-			done_count = 0;
-		}
-		if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
-			dev_err(dp->dev, "Timeout of video streamclk ok\n");
-			return -ETIMEDOUT;
-		}
-
-		usleep_range(1000, 1001);
-	}
-
-	if (retval != 0)
-		dev_err(dp->dev, "Video stream is not detected!\n");
-
-	return retval;
-}
-
-static void exynos_dp_enable_scramble(struct exynos_dp_device *dp, bool enable)
-{
-	u8 data;
-
-	if (enable) {
-		exynos_dp_enable_scrambling(dp);
-
-		exynos_dp_read_byte_from_dpcd(dp,
-			DP_TRAINING_PATTERN_SET,
-			&data);
-		exynos_dp_write_byte_to_dpcd(dp,
-			DP_TRAINING_PATTERN_SET,
-			(u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
-	} else {
-		exynos_dp_disable_scrambling(dp);
-
-		exynos_dp_read_byte_from_dpcd(dp,
-			DP_TRAINING_PATTERN_SET,
-			&data);
-		exynos_dp_write_byte_to_dpcd(dp,
-			DP_TRAINING_PATTERN_SET,
-			(u8)(data | DP_LINK_SCRAMBLING_DISABLE));
-	}
-}
-
-static irqreturn_t exynos_dp_irq_handler(int irq, void *arg)
-{
-	struct exynos_dp_device *dp = arg;
-
-	enum dp_irq_type irq_type;
-
-	irq_type = exynos_dp_get_irq_type(dp);
-	switch (irq_type) {
-	case DP_IRQ_TYPE_HP_CABLE_IN:
-		dev_dbg(dp->dev, "Received irq - cable in\n");
-		schedule_work(&dp->hotplug_work);
-		exynos_dp_clear_hotplug_interrupts(dp);
-		break;
-	case DP_IRQ_TYPE_HP_CABLE_OUT:
-		dev_dbg(dp->dev, "Received irq - cable out\n");
-		exynos_dp_clear_hotplug_interrupts(dp);
-		break;
-	case DP_IRQ_TYPE_HP_CHANGE:
-		/*
-		 * We get these change notifications once in a while, but there
-		 * is nothing we can do with them. Just ignore it for now and
-		 * only handle cable changes.
-		 */
-		dev_dbg(dp->dev, "Received irq - hotplug change; ignoring.\n");
-		exynos_dp_clear_hotplug_interrupts(dp);
-		break;
-	default:
-		dev_err(dp->dev, "Received irq - unknown type!\n");
-		break;
-	}
-	return IRQ_HANDLED;
-}
-
-static void exynos_dp_hotplug(struct work_struct *work)
-{
-	struct exynos_dp_device *dp;
-
-	dp = container_of(work, struct exynos_dp_device, hotplug_work);
-
-	if (dp->drm_dev)
-		drm_helper_hpd_irq_event(dp->drm_dev);
-}
-
-static void exynos_dp_commit(struct drm_encoder *encoder)
-{
-	struct exynos_dp_device *dp = encoder_to_dp(encoder);
-	int ret;
-
-	/* Keep the panel disabled while we configure video */
-	if (dp->panel) {
-		if (drm_panel_disable(dp->panel))
-			DRM_ERROR("failed to disable the panel\n");
-	}
-
-	ret = exynos_dp_detect_hpd(dp);
-	if (ret) {
-		/* Cable has been disconnected, we're done */
-		return;
-	}
-
-	ret = exynos_dp_handle_edid(dp);
-	if (ret) {
-		dev_err(dp->dev, "unable to handle edid\n");
-		return;
-	}
-
-	ret = exynos_dp_set_link_train(dp, dp->video_info->lane_count,
-					dp->video_info->link_rate);
-	if (ret) {
-		dev_err(dp->dev, "unable to do link train\n");
-		return;
-	}
-
-	exynos_dp_enable_scramble(dp, 1);
-	exynos_dp_enable_rx_to_enhanced_mode(dp, 1);
-	exynos_dp_enable_enhanced_mode(dp, 1);
-
-	exynos_dp_set_lane_count(dp, dp->video_info->lane_count);
-	exynos_dp_set_link_bandwidth(dp, dp->video_info->link_rate);
-
-	exynos_dp_init_video(dp);
-	ret = exynos_dp_config_video(dp);
-	if (ret)
-		dev_err(dp->dev, "unable to config video\n");
-
-	/* Safe to enable the panel now */
-	if (dp->panel) {
-		if (drm_panel_enable(dp->panel))
-			DRM_ERROR("failed to enable the panel\n");
-	}
-
-	/* Enable video */
-	exynos_dp_start_video(dp);
-}
-
-static enum drm_connector_status exynos_dp_detect(
-				struct drm_connector *connector, bool force)
-{
-	return connector_status_connected;
-}
-
-static void exynos_dp_connector_destroy(struct drm_connector *connector)
-{
-	drm_connector_unregister(connector);
-	drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs exynos_dp_connector_funcs = {
-	.dpms = drm_atomic_helper_connector_dpms,
-	.fill_modes = drm_helper_probe_single_connector_modes,
-	.detect = exynos_dp_detect,
-	.destroy = exynos_dp_connector_destroy,
-	.reset = drm_atomic_helper_connector_reset,
-	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int exynos_dp_get_modes(struct drm_connector *connector)
-{
-	struct exynos_dp_device *dp = ctx_from_connector(connector);
-	struct drm_display_mode *mode;
-
-	if (dp->panel)
-		return drm_panel_get_modes(dp->panel);
-
-	mode = drm_mode_create(connector->dev);
-	if (!mode) {
-		DRM_ERROR("failed to create a new display mode.\n");
-		return 0;
-	}
-
-	drm_display_mode_from_videomode(&dp->vm, mode);
-	connector->display_info.width_mm = mode->width_mm;
-	connector->display_info.height_mm = mode->height_mm;
-
-	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-	drm_mode_set_name(mode);
-	drm_mode_probed_add(connector, mode);
-
-	return 1;
-}
-
-static struct drm_encoder *exynos_dp_best_encoder(
-			struct drm_connector *connector)
-{
-	struct exynos_dp_device *dp = ctx_from_connector(connector);
-
-	return &dp->encoder;
-}
-
-static const struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
-	.get_modes = exynos_dp_get_modes,
-	.best_encoder = exynos_dp_best_encoder,
-};
-
-/* returns the number of bridges attached */
-static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp,
-		struct drm_encoder *encoder)
-{
-	int ret;
-
-	encoder->bridge->next = dp->ptn_bridge;
-	dp->ptn_bridge->encoder = encoder;
-	ret = drm_bridge_attach(encoder->dev, dp->ptn_bridge);
-	if (ret) {
-		DRM_ERROR("Failed to attach bridge to drm\n");
-		return ret;
-	}
-
-	return 0;
-}
-
-static int exynos_dp_bridge_attach(struct drm_bridge *bridge)
-{
-	struct exynos_dp_device *dp = bridge->driver_private;
-	struct drm_encoder *encoder = &dp->encoder;
-	struct drm_connector *connector = &dp->connector;
-	int ret;
-
-	/* Pre-empt DP connector creation if there's a bridge */
-	if (dp->ptn_bridge) {
-		ret = exynos_drm_attach_lcd_bridge(dp, encoder);
-		if (!ret)
-			return 0;
-	}
-
-	connector->polled = DRM_CONNECTOR_POLL_HPD;
-
-	ret = drm_connector_init(dp->drm_dev, connector,
-			&exynos_dp_connector_funcs, DRM_MODE_CONNECTOR_eDP);
-	if (ret) {
-		DRM_ERROR("Failed to initialize connector with drm\n");
-		return ret;
-	}
-
-	drm_connector_helper_add(connector, &exynos_dp_connector_helper_funcs);
-	drm_connector_register(connector);
-	drm_mode_connector_attach_encoder(connector, encoder);
-
-	if (dp->panel)
-		ret = drm_panel_attach(dp->panel, &dp->connector);
-
-	return ret;
-}
-
-static void exynos_dp_bridge_enable(struct drm_bridge *bridge)
-{
-	struct exynos_dp_device *dp = bridge->driver_private;
-	struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
-
-	if (dp->dpms_mode == DRM_MODE_DPMS_ON)
-		return;
-
-	pm_runtime_get_sync(dp->dev);
-
-	if (dp->panel) {
-		if (drm_panel_prepare(dp->panel)) {
-			DRM_ERROR("failed to setup the panel\n");
-			return;
-		}
-	}
-
-	if (crtc->ops->clock_enable)
-		crtc->ops->clock_enable(dp_to_crtc(dp), true);
-
-	phy_power_on(dp->phy);
-	exynos_dp_init_dp(dp);
-	enable_irq(dp->irq);
-	exynos_dp_commit(&dp->encoder);
-
-	dp->dpms_mode = DRM_MODE_DPMS_ON;
-}
-
-static void exynos_dp_bridge_disable(struct drm_bridge *bridge)
-{
-	struct exynos_dp_device *dp = bridge->driver_private;
-	struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
-
-	if (dp->dpms_mode != DRM_MODE_DPMS_ON)
-		return;
-
-	if (dp->panel) {
-		if (drm_panel_disable(dp->panel)) {
-			DRM_ERROR("failed to disable the panel\n");
-			return;
-		}
-	}
-
-	disable_irq(dp->irq);
-	flush_work(&dp->hotplug_work);
-	phy_power_off(dp->phy);
-
-	if (crtc->ops->clock_enable)
-		crtc->ops->clock_enable(dp_to_crtc(dp), false);
-
-	if (dp->panel) {
-		if (drm_panel_unprepare(dp->panel))
-			DRM_ERROR("failed to turnoff the panel\n");
-	}
-
-	pm_runtime_put_sync(dp->dev);
-
-	dp->dpms_mode = DRM_MODE_DPMS_OFF;
-}
-
-static void exynos_dp_bridge_nop(struct drm_bridge *bridge)
-{
-	/* do nothing */
-}
-
-static const struct drm_bridge_funcs exynos_dp_bridge_funcs = {
-	.enable = exynos_dp_bridge_enable,
-	.disable = exynos_dp_bridge_disable,
-	.pre_enable = exynos_dp_bridge_nop,
-	.post_disable = exynos_dp_bridge_nop,
-	.attach = exynos_dp_bridge_attach,
-};
-
-static int exynos_dp_create_connector(struct drm_encoder *encoder)
-{
-	struct exynos_dp_device *dp = encoder_to_dp(encoder);
-	struct drm_device *drm_dev = dp->drm_dev;
-	struct drm_bridge *bridge;
-	int ret;
-
-	bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL);
-	if (!bridge) {
-		DRM_ERROR("failed to allocate for drm bridge\n");
-		return -ENOMEM;
-	}
-
-	dp->bridge = bridge;
-
-	encoder->bridge = bridge;
-	bridge->driver_private = dp;
-	bridge->encoder = encoder;
-	bridge->funcs = &exynos_dp_bridge_funcs;
-
-	ret = drm_bridge_attach(drm_dev, bridge);
-	if (ret) {
-		DRM_ERROR("failed to attach drm bridge\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static void exynos_dp_mode_set(struct drm_encoder *encoder,
-			       struct drm_display_mode *mode,
-			       struct drm_display_mode *adjusted_mode)
-{
-}
-
-static void exynos_dp_enable(struct drm_encoder *encoder)
-{
-}
-
-static void exynos_dp_disable(struct drm_encoder *encoder)
-{
-}
-
-static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
-	.mode_set = exynos_dp_mode_set,
-	.enable = exynos_dp_enable,
-	.disable = exynos_dp_disable,
-};
-
-static const struct drm_encoder_funcs exynos_dp_encoder_funcs = {
-	.destroy = drm_encoder_cleanup,
-};
-
-static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
-{
-	struct device_node *dp_node = dev->of_node;
-	struct video_info *dp_video_config;
-
-	dp_video_config = devm_kzalloc(dev,
-				sizeof(*dp_video_config), GFP_KERNEL);
-	if (!dp_video_config)
-		return ERR_PTR(-ENOMEM);
-
-	dp_video_config->h_sync_polarity =
-		of_property_read_bool(dp_node, "hsync-active-high");
-
-	dp_video_config->v_sync_polarity =
-		of_property_read_bool(dp_node, "vsync-active-high");
-
-	dp_video_config->interlaced =
-		of_property_read_bool(dp_node, "interlaced");
-
-	if (of_property_read_u32(dp_node, "samsung,color-space",
-				&dp_video_config->color_space)) {
-		dev_err(dev, "failed to get color-space\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	if (of_property_read_u32(dp_node, "samsung,dynamic-range",
-				&dp_video_config->dynamic_range)) {
-		dev_err(dev, "failed to get dynamic-range\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	if (of_property_read_u32(dp_node, "samsung,ycbcr-coeff",
-				&dp_video_config->ycbcr_coeff)) {
-		dev_err(dev, "failed to get ycbcr-coeff\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	if (of_property_read_u32(dp_node, "samsung,color-depth",
-				&dp_video_config->color_depth)) {
-		dev_err(dev, "failed to get color-depth\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	if (of_property_read_u32(dp_node, "samsung,link-rate",
-				&dp_video_config->link_rate)) {
-		dev_err(dev, "failed to get link-rate\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	if (of_property_read_u32(dp_node, "samsung,lane-count",
-				&dp_video_config->lane_count)) {
-		dev_err(dev, "failed to get lane-count\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	return dp_video_config;
-}
-
-static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
-{
-	int ret;
-
-	ret = of_get_videomode(dp->dev->of_node, &dp->vm, OF_USE_NATIVE_MODE);
-	if (ret) {
-		DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
-		return ret;
-	}
-	return 0;
-}
-
-static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
-{
-	struct exynos_dp_device *dp = dev_get_drvdata(dev);
-	struct platform_device *pdev = to_platform_device(dev);
-	struct drm_device *drm_dev = data;
-	struct drm_encoder *encoder = &dp->encoder;
-	struct resource *res;
-	unsigned int irq_flags;
-	int pipe, ret = 0;
-
-	dp->dev = &pdev->dev;
-	dp->dpms_mode = DRM_MODE_DPMS_OFF;
-
-	dp->video_info = exynos_dp_dt_parse_pdata(&pdev->dev);
-	if (IS_ERR(dp->video_info))
-		return PTR_ERR(dp->video_info);
-
-	dp->phy = devm_phy_get(dp->dev, "dp");
-	if (IS_ERR(dp->phy)) {
-		dev_err(dp->dev, "no DP phy configured\n");
-		ret = PTR_ERR(dp->phy);
-		if (ret) {
-			/*
-			 * phy itself is not enabled, so we can move forward
-			 * assigning NULL to phy pointer.
-			 */
-			if (ret == -ENOSYS || ret == -ENODEV)
-				dp->phy = NULL;
-			else
-				return ret;
-		}
-	}
-
-	if (!dp->panel && !dp->ptn_bridge) {
-		ret = exynos_dp_dt_parse_panel(dp);
-		if (ret)
-			return ret;
-	}
-
-	dp->clock = devm_clk_get(&pdev->dev, "dp");
-	if (IS_ERR(dp->clock)) {
-		dev_err(&pdev->dev, "failed to get clock\n");
-		return PTR_ERR(dp->clock);
-	}
-
-	clk_prepare_enable(dp->clock);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-	dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(dp->reg_base))
-		return PTR_ERR(dp->reg_base);
-
-	dp->hpd_gpio = of_get_named_gpio(dev->of_node, "samsung,hpd-gpio", 0);
-
-	if (gpio_is_valid(dp->hpd_gpio)) {
-		/*
-		 * Set up the hotplug GPIO from the device tree as an interrupt.
-		 * Simply specifying a different interrupt in the device tree
-		 * doesn't work since we handle hotplug rather differently when
-		 * using a GPIO.  We also need the actual GPIO specifier so
-		 * that we can get the current state of the GPIO.
-		 */
-		ret = devm_gpio_request_one(&pdev->dev, dp->hpd_gpio, GPIOF_IN,
-					    "hpd_gpio");
-		if (ret) {
-			dev_err(&pdev->dev, "failed to get hpd gpio\n");
-			return ret;
-		}
-		dp->irq = gpio_to_irq(dp->hpd_gpio);
-		irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
-	} else {
-		dp->hpd_gpio = -ENODEV;
-		dp->irq = platform_get_irq(pdev, 0);
-		irq_flags = 0;
-	}
-
-	if (dp->irq == -ENXIO) {
-		dev_err(&pdev->dev, "failed to get irq\n");
-		return -ENODEV;
-	}
-
-	INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug);
-
-	ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler,
-			irq_flags, "exynos-dp", dp);
-	if (ret) {
-		dev_err(&pdev->dev, "failed to request irq\n");
-		return ret;
-	}
-	disable_irq(dp->irq);
-
-	dp->drm_dev = drm_dev;
-
-	pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
-						  EXYNOS_DISPLAY_TYPE_LCD);
-	if (pipe < 0)
-		return pipe;
-
-	encoder->possible_crtcs = 1 << pipe;
-
-	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
-	drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
-			 DRM_MODE_ENCODER_TMDS, NULL);
-
-	drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
-
-	ret = exynos_dp_create_connector(encoder);
-	if (ret) {
-		DRM_ERROR("failed to create connector ret = %d\n", ret);
-		drm_encoder_cleanup(encoder);
-		return ret;
-	}
-
-	return 0;
-}
-
-static void exynos_dp_unbind(struct device *dev, struct device *master,
-				void *data)
-{
-	struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
-	exynos_dp_disable(&dp->encoder);
-}
-
-static const struct component_ops exynos_dp_ops = {
-	.bind	= exynos_dp_bind,
-	.unbind	= exynos_dp_unbind,
-};
-
-static int exynos_dp_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct device_node *np = NULL, *endpoint = NULL;
-	struct exynos_dp_device *dp;
-	int ret;
-
-	dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
-				GFP_KERNEL);
-	if (!dp)
-		return -ENOMEM;
-
-	platform_set_drvdata(pdev, dp);
-
-	/* This is for the backward compatibility. */
-	np = of_parse_phandle(dev->of_node, "panel", 0);
-	if (np) {
-		dp->panel = of_drm_find_panel(np);
-		of_node_put(np);
-		if (!dp->panel)
-			return -EPROBE_DEFER;
-		goto out;
-	}
-
-	endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
-	if (endpoint) {
-		np = of_graph_get_remote_port_parent(endpoint);
-		if (np) {
-			/* The remote port can be either a panel or a bridge */
-			dp->panel = of_drm_find_panel(np);
-			if (!dp->panel) {
-				dp->ptn_bridge = of_drm_find_bridge(np);
-				if (!dp->ptn_bridge) {
-					of_node_put(np);
-					return -EPROBE_DEFER;
-				}
-			}
-			of_node_put(np);
-		} else {
-			DRM_ERROR("no remote endpoint device node found.\n");
-			return -EINVAL;
-		}
-	} else {
-		DRM_ERROR("no port endpoint subnode found.\n");
-		return -EINVAL;
-	}
-
-out:
-	pm_runtime_enable(dev);
-
-	ret = component_add(&pdev->dev, &exynos_dp_ops);
-	if (ret)
-		goto err_disable_pm_runtime;
-
-	return ret;
-
-err_disable_pm_runtime:
-	pm_runtime_disable(dev);
-
-	return ret;
-}
-
-static int exynos_dp_remove(struct platform_device *pdev)
-{
-	pm_runtime_disable(&pdev->dev);
-	component_del(&pdev->dev, &exynos_dp_ops);
-
-	return 0;
-}
-
-#ifdef CONFIG_PM
-static int exynos_dp_suspend(struct device *dev)
-{
-	struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
-	clk_disable_unprepare(dp->clock);
-
-	return 0;
-}
-
-static int exynos_dp_resume(struct device *dev)
-{
-	struct exynos_dp_device *dp = dev_get_drvdata(dev);
-	int ret;
-
-	ret = clk_prepare_enable(dp->clock);
-	if (ret < 0) {
-		DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-#endif
-
-static const struct dev_pm_ops exynos_dp_pm_ops = {
-	SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL)
-};
-
-static const struct of_device_id exynos_dp_match[] = {
-	{ .compatible = "samsung,exynos5-dp" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, exynos_dp_match);
-
-struct platform_driver dp_driver = {
-	.probe		= exynos_dp_probe,
-	.remove		= exynos_dp_remove,
-	.driver		= {
-		.name	= "exynos-dp",
-		.owner	= THIS_MODULE,
-		.pm	= &exynos_dp_pm_ops,
-		.of_match_table = exynos_dp_match,
-	},
-};
-
-MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC DP Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
deleted file mode 100644
index b5c2d8f..0000000
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Header file for Samsung DP (Display Port) interface driver.
- *
- * Copyright (C) 2012 Samsung Electronics Co., Ltd.
- * Author: Jingoo Han <jg1.han@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DP_CORE_H
-#define _EXYNOS_DP_CORE_H
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/exynos_drm.h>
-#include <video/videomode.h>
-
-#include "exynos_drm_drv.h"
-
-#define DP_TIMEOUT_LOOP_COUNT 100
-#define MAX_CR_LOOP 5
-#define MAX_EQ_LOOP 5
-
-enum link_rate_type {
-	LINK_RATE_1_62GBPS = 0x06,
-	LINK_RATE_2_70GBPS = 0x0a
-};
-
-enum link_lane_count_type {
-	LANE_COUNT1 = 1,
-	LANE_COUNT2 = 2,
-	LANE_COUNT4 = 4
-};
-
-enum link_training_state {
-	START,
-	CLOCK_RECOVERY,
-	EQUALIZER_TRAINING,
-	FINISHED,
-	FAILED
-};
-
-enum voltage_swing_level {
-	VOLTAGE_LEVEL_0,
-	VOLTAGE_LEVEL_1,
-	VOLTAGE_LEVEL_2,
-	VOLTAGE_LEVEL_3,
-};
-
-enum pre_emphasis_level {
-	PRE_EMPHASIS_LEVEL_0,
-	PRE_EMPHASIS_LEVEL_1,
-	PRE_EMPHASIS_LEVEL_2,
-	PRE_EMPHASIS_LEVEL_3,
-};
-
-enum pattern_set {
-	PRBS7,
-	D10_2,
-	TRAINING_PTN1,
-	TRAINING_PTN2,
-	DP_NONE
-};
-
-enum color_space {
-	COLOR_RGB,
-	COLOR_YCBCR422,
-	COLOR_YCBCR444
-};
-
-enum color_depth {
-	COLOR_6,
-	COLOR_8,
-	COLOR_10,
-	COLOR_12
-};
-
-enum color_coefficient {
-	COLOR_YCBCR601,
-	COLOR_YCBCR709
-};
-
-enum dynamic_range {
-	VESA,
-	CEA
-};
-
-enum pll_status {
-	PLL_UNLOCKED,
-	PLL_LOCKED
-};
-
-enum clock_recovery_m_value_type {
-	CALCULATED_M,
-	REGISTER_M
-};
-
-enum video_timing_recognition_type {
-	VIDEO_TIMING_FROM_CAPTURE,
-	VIDEO_TIMING_FROM_REGISTER
-};
-
-enum analog_power_block {
-	AUX_BLOCK,
-	CH0_BLOCK,
-	CH1_BLOCK,
-	CH2_BLOCK,
-	CH3_BLOCK,
-	ANALOG_TOTAL,
-	POWER_ALL
-};
-
-enum dp_irq_type {
-	DP_IRQ_TYPE_HP_CABLE_IN,
-	DP_IRQ_TYPE_HP_CABLE_OUT,
-	DP_IRQ_TYPE_HP_CHANGE,
-	DP_IRQ_TYPE_UNKNOWN,
-};
-
-struct video_info {
-	char *name;
-
-	bool h_sync_polarity;
-	bool v_sync_polarity;
-	bool interlaced;
-
-	enum color_space color_space;
-	enum dynamic_range dynamic_range;
-	enum color_coefficient ycbcr_coeff;
-	enum color_depth color_depth;
-
-	enum link_rate_type link_rate;
-	enum link_lane_count_type lane_count;
-};
-
-struct link_train {
-	int eq_loop;
-	int cr_loop[4];
-
-	u8 link_rate;
-	u8 lane_count;
-	u8 training_lane[4];
-
-	enum link_training_state lt_state;
-};
-
-struct exynos_dp_device {
-	struct drm_encoder	encoder;
-	struct device		*dev;
-	struct drm_device	*drm_dev;
-	struct drm_connector	connector;
-	struct drm_panel	*panel;
-	struct drm_bridge	*bridge;
-	struct drm_bridge	*ptn_bridge;
-	struct clk		*clock;
-	unsigned int		irq;
-	void __iomem		*reg_base;
-
-	struct video_info	*video_info;
-	struct link_train	link_train;
-	struct work_struct	hotplug_work;
-	struct phy		*phy;
-	int			dpms_mode;
-	int			hpd_gpio;
-	struct videomode	vm;
-};
-
-/* exynos_dp_reg.c */
-void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable);
-void exynos_dp_stop_video(struct exynos_dp_device *dp);
-void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable);
-void exynos_dp_init_analog_param(struct exynos_dp_device *dp);
-void exynos_dp_init_interrupt(struct exynos_dp_device *dp);
-void exynos_dp_reset(struct exynos_dp_device *dp);
-void exynos_dp_swreset(struct exynos_dp_device *dp);
-void exynos_dp_config_interrupt(struct exynos_dp_device *dp);
-enum pll_status exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp);
-void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable);
-void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
-				enum analog_power_block block,
-				bool enable);
-void exynos_dp_init_analog_func(struct exynos_dp_device *dp);
-void exynos_dp_init_hpd(struct exynos_dp_device *dp);
-enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp);
-void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp);
-void exynos_dp_reset_aux(struct exynos_dp_device *dp);
-void exynos_dp_init_aux(struct exynos_dp_device *dp);
-int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp);
-void exynos_dp_enable_sw_function(struct exynos_dp_device *dp);
-int exynos_dp_start_aux_transaction(struct exynos_dp_device *dp);
-int exynos_dp_write_byte_to_dpcd(struct exynos_dp_device *dp,
-				unsigned int reg_addr,
-				unsigned char data);
-int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp,
-				unsigned int reg_addr,
-				unsigned char *data);
-int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp,
-				unsigned int reg_addr,
-				unsigned int count,
-				unsigned char data[]);
-int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp,
-				unsigned int reg_addr,
-				unsigned int count,
-				unsigned char data[]);
-int exynos_dp_select_i2c_device(struct exynos_dp_device *dp,
-				unsigned int device_addr,
-				unsigned int reg_addr);
-int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp,
-				unsigned int device_addr,
-				unsigned int reg_addr,
-				unsigned int *data);
-int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp,
-				unsigned int device_addr,
-				unsigned int reg_addr,
-				unsigned int count,
-				unsigned char edid[]);
-void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype);
-void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype);
-void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count);
-void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count);
-void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable);
-void exynos_dp_set_training_pattern(struct exynos_dp_device *dp,
-				 enum pattern_set pattern);
-void exynos_dp_set_lane0_pre_emphasis(struct exynos_dp_device *dp, u32 level);
-void exynos_dp_set_lane1_pre_emphasis(struct exynos_dp_device *dp, u32 level);
-void exynos_dp_set_lane2_pre_emphasis(struct exynos_dp_device *dp, u32 level);
-void exynos_dp_set_lane3_pre_emphasis(struct exynos_dp_device *dp, u32 level);
-void exynos_dp_set_lane0_link_training(struct exynos_dp_device *dp,
-				u32 training_lane);
-void exynos_dp_set_lane1_link_training(struct exynos_dp_device *dp,
-				u32 training_lane);
-void exynos_dp_set_lane2_link_training(struct exynos_dp_device *dp,
-				u32 training_lane);
-void exynos_dp_set_lane3_link_training(struct exynos_dp_device *dp,
-				u32 training_lane);
-u32 exynos_dp_get_lane0_link_training(struct exynos_dp_device *dp);
-u32 exynos_dp_get_lane1_link_training(struct exynos_dp_device *dp);
-u32 exynos_dp_get_lane2_link_training(struct exynos_dp_device *dp);
-u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp);
-void exynos_dp_reset_macro(struct exynos_dp_device *dp);
-void exynos_dp_init_video(struct exynos_dp_device *dp);
-
-void exynos_dp_set_video_color_format(struct exynos_dp_device *dp);
-int exynos_dp_is_slave_video_stream_clock_on(struct exynos_dp_device *dp);
-void exynos_dp_set_video_cr_mn(struct exynos_dp_device *dp,
-			enum clock_recovery_m_value_type type,
-			u32 m_value,
-			u32 n_value);
-void exynos_dp_set_video_timing_mode(struct exynos_dp_device *dp, u32 type);
-void exynos_dp_enable_video_master(struct exynos_dp_device *dp, bool enable);
-void exynos_dp_start_video(struct exynos_dp_device *dp);
-int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp);
-void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp);
-void exynos_dp_enable_scrambling(struct exynos_dp_device *dp);
-void exynos_dp_disable_scrambling(struct exynos_dp_device *dp);
-
-/* I2C EDID Chip ID, Slave Address */
-#define I2C_EDID_DEVICE_ADDR			0x50
-#define I2C_E_EDID_DEVICE_ADDR			0x30
-
-#define EDID_BLOCK_LENGTH			0x80
-#define EDID_HEADER_PATTERN			0x00
-#define EDID_EXTENSION_FLAG			0x7e
-#define EDID_CHECKSUM				0x7f
-
-/* DP_MAX_LANE_COUNT */
-#define DPCD_ENHANCED_FRAME_CAP(x)		(((x) >> 7) & 0x1)
-#define DPCD_MAX_LANE_COUNT(x)			((x) & 0x1f)
-
-/* DP_LANE_COUNT_SET */
-#define DPCD_LANE_COUNT_SET(x)			((x) & 0x1f)
-
-/* DP_TRAINING_LANE0_SET */
-#define DPCD_PRE_EMPHASIS_SET(x)		(((x) & 0x3) << 3)
-#define DPCD_PRE_EMPHASIS_GET(x)		(((x) >> 3) & 0x3)
-#define DPCD_VOLTAGE_SWING_SET(x)		(((x) & 0x3) << 0)
-#define DPCD_VOLTAGE_SWING_GET(x)		(((x) >> 0) & 0x3)
-
-#endif /* _EXYNOS_DP_CORE_H */
diff --git a/drivers/gpu/drm/exynos/exynos_dp_reg.c b/drivers/gpu/drm/exynos/exynos_dp_reg.c
deleted file mode 100644
index c1f87a2..0000000
--- a/drivers/gpu/drm/exynos/exynos_dp_reg.c
+++ /dev/null
@@ -1,1263 +0,0 @@
-/*
- * Samsung DP (Display port) register interface driver.
- *
- * Copyright (C) 2012 Samsung Electronics Co., Ltd.
- * Author: Jingoo Han <jg1.han@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-
-#include "exynos_dp_core.h"
-#include "exynos_dp_reg.h"
-
-#define COMMON_INT_MASK_1	0
-#define COMMON_INT_MASK_2	0
-#define COMMON_INT_MASK_3	0
-#define COMMON_INT_MASK_4	(HOTPLUG_CHG | HPD_LOST | PLUG)
-#define INT_STA_MASK		INT_HPD
-
-void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable)
-{
-	u32 reg;
-
-	if (enable) {
-		reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
-		reg |= HDCP_VIDEO_MUTE;
-		writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
-	} else {
-		reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
-		reg &= ~HDCP_VIDEO_MUTE;
-		writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
-	}
-}
-
-void exynos_dp_stop_video(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
-	reg &= ~VIDEO_EN;
-	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
-}
-
-void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable)
-{
-	u32 reg;
-
-	if (enable)
-		reg = LANE3_MAP_LOGIC_LANE_0 | LANE2_MAP_LOGIC_LANE_1 |
-			LANE1_MAP_LOGIC_LANE_2 | LANE0_MAP_LOGIC_LANE_3;
-	else
-		reg = LANE3_MAP_LOGIC_LANE_3 | LANE2_MAP_LOGIC_LANE_2 |
-			LANE1_MAP_LOGIC_LANE_1 | LANE0_MAP_LOGIC_LANE_0;
-
-	writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP);
-}
-
-void exynos_dp_init_analog_param(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = TX_TERMINAL_CTRL_50_OHM;
-	writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1);
-
-	reg = SEL_24M | TX_DVDD_BIT_1_0625V;
-	writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2);
-
-	reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO;
-	writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3);
-
-	reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM |
-		TX_CUR1_2X | TX_CUR_16_MA;
-	writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1);
-
-	reg = CH3_AMP_400_MV | CH2_AMP_400_MV |
-		CH1_AMP_400_MV | CH0_AMP_400_MV;
-	writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL);
-}
-
-void exynos_dp_init_interrupt(struct exynos_dp_device *dp)
-{
-	/* Set interrupt pin assertion polarity as high */
-	writel(INT_POL1 | INT_POL0, dp->reg_base + EXYNOS_DP_INT_CTL);
-
-	/* Clear pending regisers */
-	writel(0xff, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1);
-	writel(0x4f, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_2);
-	writel(0xe0, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_3);
-	writel(0xe7, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
-	writel(0x63, dp->reg_base + EXYNOS_DP_INT_STA);
-
-	/* 0:mask,1: unmask */
-	writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1);
-	writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2);
-	writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3);
-	writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4);
-	writel(0x00, dp->reg_base + EXYNOS_DP_INT_STA_MASK);
-}
-
-void exynos_dp_reset(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	exynos_dp_stop_video(dp);
-	exynos_dp_enable_video_mute(dp, 0);
-
-	reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N |
-		AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N |
-		HDCP_FUNC_EN_N | SW_FUNC_EN_N;
-	writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1);
-
-	reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N |
-		SERDES_FIFO_FUNC_EN_N |
-		LS_CLK_DOMAIN_FUNC_EN_N;
-	writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
-
-	usleep_range(20, 30);
-
-	exynos_dp_lane_swap(dp, 0);
-
-	writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_1);
-	writel(0x40, dp->reg_base + EXYNOS_DP_SYS_CTL_2);
-	writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
-	writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
-
-	writel(0x0, dp->reg_base + EXYNOS_DP_PKT_SEND_CTL);
-	writel(0x0, dp->reg_base + EXYNOS_DP_HDCP_CTL);
-
-	writel(0x5e, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_L);
-	writel(0x1a, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_H);
-
-	writel(0x10, dp->reg_base + EXYNOS_DP_LINK_DEBUG_CTL);
-
-	writel(0x0, dp->reg_base + EXYNOS_DP_PHY_TEST);
-
-	writel(0x0, dp->reg_base + EXYNOS_DP_VIDEO_FIFO_THRD);
-	writel(0x20, dp->reg_base + EXYNOS_DP_AUDIO_MARGIN);
-
-	writel(0x4, dp->reg_base + EXYNOS_DP_M_VID_GEN_FILTER_TH);
-	writel(0x2, dp->reg_base + EXYNOS_DP_M_AUD_GEN_FILTER_TH);
-
-	writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
-}
-
-void exynos_dp_swreset(struct exynos_dp_device *dp)
-{
-	writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
-}
-
-void exynos_dp_config_interrupt(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	/* 0: mask, 1: unmask */
-	reg = COMMON_INT_MASK_1;
-	writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1);
-
-	reg = COMMON_INT_MASK_2;
-	writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2);
-
-	reg = COMMON_INT_MASK_3;
-	writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3);
-
-	reg = COMMON_INT_MASK_4;
-	writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4);
-
-	reg = INT_STA_MASK;
-	writel(reg, dp->reg_base + EXYNOS_DP_INT_STA_MASK);
-}
-
-enum pll_status exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL);
-	if (reg & PLL_LOCK)
-		return PLL_LOCKED;
-	else
-		return PLL_UNLOCKED;
-}
-
-void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable)
-{
-	u32 reg;
-
-	if (enable) {
-		reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL);
-		reg |= DP_PLL_PD;
-		writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL);
-	} else {
-		reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL);
-		reg &= ~DP_PLL_PD;
-		writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL);
-	}
-}
-
-void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
-				enum analog_power_block block,
-				bool enable)
-{
-	u32 reg;
-
-	switch (block) {
-	case AUX_BLOCK:
-		if (enable) {
-			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
-			reg |= AUX_PD;
-			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
-		} else {
-			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
-			reg &= ~AUX_PD;
-			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
-		}
-		break;
-	case CH0_BLOCK:
-		if (enable) {
-			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
-			reg |= CH0_PD;
-			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
-		} else {
-			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
-			reg &= ~CH0_PD;
-			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
-		}
-		break;
-	case CH1_BLOCK:
-		if (enable) {
-			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
-			reg |= CH1_PD;
-			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
-		} else {
-			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
-			reg &= ~CH1_PD;
-			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
-		}
-		break;
-	case CH2_BLOCK:
-		if (enable) {
-			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
-			reg |= CH2_PD;
-			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
-		} else {
-			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
-			reg &= ~CH2_PD;
-			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
-		}
-		break;
-	case CH3_BLOCK:
-		if (enable) {
-			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
-			reg |= CH3_PD;
-			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
-		} else {
-			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
-			reg &= ~CH3_PD;
-			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
-		}
-		break;
-	case ANALOG_TOTAL:
-		if (enable) {
-			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
-			reg |= DP_PHY_PD;
-			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
-		} else {
-			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
-			reg &= ~DP_PHY_PD;
-			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
-		}
-		break;
-	case POWER_ALL:
-		if (enable) {
-			reg = DP_PHY_PD | AUX_PD | CH3_PD | CH2_PD |
-				CH1_PD | CH0_PD;
-			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
-		} else {
-			writel(0x00, dp->reg_base + EXYNOS_DP_PHY_PD);
-		}
-		break;
-	default:
-		break;
-	}
-}
-
-void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
-{
-	u32 reg;
-	int timeout_loop = 0;
-
-	exynos_dp_set_analog_power_down(dp, POWER_ALL, 0);
-
-	reg = PLL_LOCK_CHG;
-	writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1);
-
-	reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL);
-	reg &= ~(F_PLL_LOCK | PLL_LOCK_CTRL);
-	writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL);
-
-	/* Power up PLL */
-	if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
-		exynos_dp_set_pll_power_down(dp, 0);
-
-		while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
-			timeout_loop++;
-			if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
-				dev_err(dp->dev, "failed to get pll lock status\n");
-				return;
-			}
-			usleep_range(10, 20);
-		}
-	}
-
-	/* Enable Serdes FIFO function and Link symbol clock domain module */
-	reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
-	reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
-		| AUX_FUNC_EN_N);
-	writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
-}
-
-void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	if (gpio_is_valid(dp->hpd_gpio))
-		return;
-
-	reg = HOTPLUG_CHG | HPD_LOST | PLUG;
-	writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
-
-	reg = INT_HPD;
-	writel(reg, dp->reg_base + EXYNOS_DP_INT_STA);
-}
-
-void exynos_dp_init_hpd(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	if (gpio_is_valid(dp->hpd_gpio))
-		return;
-
-	exynos_dp_clear_hotplug_interrupts(dp);
-
-	reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
-	reg &= ~(F_HPD | HPD_CTRL);
-	writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
-}
-
-enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	if (gpio_is_valid(dp->hpd_gpio)) {
-		reg = gpio_get_value(dp->hpd_gpio);
-		if (reg)
-			return DP_IRQ_TYPE_HP_CABLE_IN;
-		else
-			return DP_IRQ_TYPE_HP_CABLE_OUT;
-	} else {
-		/* Parse hotplug interrupt status register */
-		reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
-
-		if (reg & PLUG)
-			return DP_IRQ_TYPE_HP_CABLE_IN;
-
-		if (reg & HPD_LOST)
-			return DP_IRQ_TYPE_HP_CABLE_OUT;
-
-		if (reg & HOTPLUG_CHG)
-			return DP_IRQ_TYPE_HP_CHANGE;
-
-		return DP_IRQ_TYPE_UNKNOWN;
-	}
-}
-
-void exynos_dp_reset_aux(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	/* Disable AUX channel module */
-	reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
-	reg |= AUX_FUNC_EN_N;
-	writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
-}
-
-void exynos_dp_init_aux(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	/* Clear inerrupts related to AUX channel */
-	reg = RPLY_RECEIV | AUX_ERR;
-	writel(reg, dp->reg_base + EXYNOS_DP_INT_STA);
-
-	exynos_dp_reset_aux(dp);
-
-	/* Disable AUX transaction H/W retry */
-	reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) | AUX_HW_RETRY_COUNT_SEL(0)|
-		AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
-	writel(reg, dp->reg_base + EXYNOS_DP_AUX_HW_RETRY_CTL);
-
-	/* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */
-	reg = DEFER_CTRL_EN | DEFER_COUNT(1);
-	writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_DEFER_CTL);
-
-	/* Enable AUX channel module */
-	reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
-	reg &= ~AUX_FUNC_EN_N;
-	writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
-}
-
-int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	if (gpio_is_valid(dp->hpd_gpio)) {
-		if (gpio_get_value(dp->hpd_gpio))
-			return 0;
-	} else {
-		reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
-		if (reg & HPD_STATUS)
-			return 0;
-	}
-
-	return -EINVAL;
-}
-
-void exynos_dp_enable_sw_function(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1);
-	reg &= ~SW_FUNC_EN_N;
-	writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1);
-}
-
-int exynos_dp_start_aux_transaction(struct exynos_dp_device *dp)
-{
-	int reg;
-	int retval = 0;
-	int timeout_loop = 0;
-
-	/* Enable AUX CH operation */
-	reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
-	reg |= AUX_EN;
-	writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
-
-	/* Is AUX CH command reply received? */
-	reg = readl(dp->reg_base + EXYNOS_DP_INT_STA);
-	while (!(reg & RPLY_RECEIV)) {
-		timeout_loop++;
-		if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
-			dev_err(dp->dev, "AUX CH command reply failed!\n");
-			return -ETIMEDOUT;
-		}
-		reg = readl(dp->reg_base + EXYNOS_DP_INT_STA);
-		usleep_range(10, 11);
-	}
-
-	/* Clear interrupt source for AUX CH command reply */
-	writel(RPLY_RECEIV, dp->reg_base + EXYNOS_DP_INT_STA);
-
-	/* Clear interrupt source for AUX CH access error */
-	reg = readl(dp->reg_base + EXYNOS_DP_INT_STA);
-	if (reg & AUX_ERR) {
-		writel(AUX_ERR, dp->reg_base + EXYNOS_DP_INT_STA);
-		return -EREMOTEIO;
-	}
-
-	/* Check AUX CH error access status */
-	reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_STA);
-	if ((reg & AUX_STATUS_MASK) != 0) {
-		dev_err(dp->dev, "AUX CH error happens: %d\n\n",
-			reg & AUX_STATUS_MASK);
-		return -EREMOTEIO;
-	}
-
-	return retval;
-}
-
-int exynos_dp_write_byte_to_dpcd(struct exynos_dp_device *dp,
-				unsigned int reg_addr,
-				unsigned char data)
-{
-	u32 reg;
-	int i;
-	int retval;
-
-	for (i = 0; i < 3; i++) {
-		/* Clear AUX CH data buffer */
-		reg = BUF_CLR;
-		writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
-
-		/* Select DPCD device address */
-		reg = AUX_ADDR_7_0(reg_addr);
-		writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
-		reg = AUX_ADDR_15_8(reg_addr);
-		writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
-		reg = AUX_ADDR_19_16(reg_addr);
-		writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
-
-		/* Write data buffer */
-		reg = (unsigned int)data;
-		writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0);
-
-		/*
-		 * Set DisplayPort transaction and write 1 byte
-		 * If bit 3 is 1, DisplayPort transaction.
-		 * If Bit 3 is 0, I2C transaction.
-		 */
-		reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
-		writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
-
-		/* Start AUX transaction */
-		retval = exynos_dp_start_aux_transaction(dp);
-		if (retval == 0)
-			break;
-		else
-			dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
-				__func__);
-	}
-
-	return retval;
-}
-
-int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp,
-				unsigned int reg_addr,
-				unsigned char *data)
-{
-	u32 reg;
-	int i;
-	int retval;
-
-	for (i = 0; i < 3; i++) {
-		/* Clear AUX CH data buffer */
-		reg = BUF_CLR;
-		writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
-
-		/* Select DPCD device address */
-		reg = AUX_ADDR_7_0(reg_addr);
-		writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
-		reg = AUX_ADDR_15_8(reg_addr);
-		writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
-		reg = AUX_ADDR_19_16(reg_addr);
-		writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
-
-		/*
-		 * Set DisplayPort transaction and read 1 byte
-		 * If bit 3 is 1, DisplayPort transaction.
-		 * If Bit 3 is 0, I2C transaction.
-		 */
-		reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
-		writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
-
-		/* Start AUX transaction */
-		retval = exynos_dp_start_aux_transaction(dp);
-		if (retval == 0)
-			break;
-		else
-			dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
-				__func__);
-	}
-
-	/* Read data buffer */
-	reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0);
-	*data = (unsigned char)(reg & 0xff);
-
-	return retval;
-}
-
-int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp,
-				unsigned int reg_addr,
-				unsigned int count,
-				unsigned char data[])
-{
-	u32 reg;
-	unsigned int start_offset;
-	unsigned int cur_data_count;
-	unsigned int cur_data_idx;
-	int i;
-	int retval = 0;
-
-	/* Clear AUX CH data buffer */
-	reg = BUF_CLR;
-	writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
-
-	start_offset = 0;
-	while (start_offset < count) {
-		/* Buffer size of AUX CH is 16 * 4bytes */
-		if ((count - start_offset) > 16)
-			cur_data_count = 16;
-		else
-			cur_data_count = count - start_offset;
-
-		for (i = 0; i < 3; i++) {
-			/* Select DPCD device address */
-			reg = AUX_ADDR_7_0(reg_addr + start_offset);
-			writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
-			reg = AUX_ADDR_15_8(reg_addr + start_offset);
-			writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
-			reg = AUX_ADDR_19_16(reg_addr + start_offset);
-			writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
-
-			for (cur_data_idx = 0; cur_data_idx < cur_data_count;
-			     cur_data_idx++) {
-				reg = data[start_offset + cur_data_idx];
-				writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0
-							  + 4 * cur_data_idx);
-			}
-
-			/*
-			 * Set DisplayPort transaction and write
-			 * If bit 3 is 1, DisplayPort transaction.
-			 * If Bit 3 is 0, I2C transaction.
-			 */
-			reg = AUX_LENGTH(cur_data_count) |
-				AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
-			writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
-
-			/* Start AUX transaction */
-			retval = exynos_dp_start_aux_transaction(dp);
-			if (retval == 0)
-				break;
-			else
-				dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
-					__func__);
-		}
-
-		start_offset += cur_data_count;
-	}
-
-	return retval;
-}
-
-int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp,
-				unsigned int reg_addr,
-				unsigned int count,
-				unsigned char data[])
-{
-	u32 reg;
-	unsigned int start_offset;
-	unsigned int cur_data_count;
-	unsigned int cur_data_idx;
-	int i;
-	int retval = 0;
-
-	/* Clear AUX CH data buffer */
-	reg = BUF_CLR;
-	writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
-
-	start_offset = 0;
-	while (start_offset < count) {
-		/* Buffer size of AUX CH is 16 * 4bytes */
-		if ((count - start_offset) > 16)
-			cur_data_count = 16;
-		else
-			cur_data_count = count - start_offset;
-
-		/* AUX CH Request Transaction process */
-		for (i = 0; i < 3; i++) {
-			/* Select DPCD device address */
-			reg = AUX_ADDR_7_0(reg_addr + start_offset);
-			writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
-			reg = AUX_ADDR_15_8(reg_addr + start_offset);
-			writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
-			reg = AUX_ADDR_19_16(reg_addr + start_offset);
-			writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
-
-			/*
-			 * Set DisplayPort transaction and read
-			 * If bit 3 is 1, DisplayPort transaction.
-			 * If Bit 3 is 0, I2C transaction.
-			 */
-			reg = AUX_LENGTH(cur_data_count) |
-				AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
-			writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
-
-			/* Start AUX transaction */
-			retval = exynos_dp_start_aux_transaction(dp);
-			if (retval == 0)
-				break;
-			else
-				dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
-					__func__);
-		}
-
-		for (cur_data_idx = 0; cur_data_idx < cur_data_count;
-		    cur_data_idx++) {
-			reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0
-						 + 4 * cur_data_idx);
-			data[start_offset + cur_data_idx] =
-				(unsigned char)reg;
-		}
-
-		start_offset += cur_data_count;
-	}
-
-	return retval;
-}
-
-int exynos_dp_select_i2c_device(struct exynos_dp_device *dp,
-				unsigned int device_addr,
-				unsigned int reg_addr)
-{
-	u32 reg;
-	int retval;
-
-	/* Set EDID device address */
-	reg = device_addr;
-	writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
-	writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
-	writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
-
-	/* Set offset from base address of EDID device */
-	writel(reg_addr, dp->reg_base + EXYNOS_DP_BUF_DATA_0);
-
-	/*
-	 * Set I2C transaction and write address
-	 * If bit 3 is 1, DisplayPort transaction.
-	 * If Bit 3 is 0, I2C transaction.
-	 */
-	reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_MOT |
-		AUX_TX_COMM_WRITE;
-	writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
-
-	/* Start AUX transaction */
-	retval = exynos_dp_start_aux_transaction(dp);
-	if (retval != 0)
-		dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
-
-	return retval;
-}
-
-int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp,
-				unsigned int device_addr,
-				unsigned int reg_addr,
-				unsigned int *data)
-{
-	u32 reg;
-	int i;
-	int retval;
-
-	for (i = 0; i < 3; i++) {
-		/* Clear AUX CH data buffer */
-		reg = BUF_CLR;
-		writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
-
-		/* Select EDID device */
-		retval = exynos_dp_select_i2c_device(dp, device_addr, reg_addr);
-		if (retval != 0)
-			continue;
-
-		/*
-		 * Set I2C transaction and read data
-		 * If bit 3 is 1, DisplayPort transaction.
-		 * If Bit 3 is 0, I2C transaction.
-		 */
-		reg = AUX_TX_COMM_I2C_TRANSACTION |
-			AUX_TX_COMM_READ;
-		writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
-
-		/* Start AUX transaction */
-		retval = exynos_dp_start_aux_transaction(dp);
-		if (retval == 0)
-			break;
-		else
-			dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
-				__func__);
-	}
-
-	/* Read data */
-	if (retval == 0)
-		*data = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0);
-
-	return retval;
-}
-
-int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp,
-				unsigned int device_addr,
-				unsigned int reg_addr,
-				unsigned int count,
-				unsigned char edid[])
-{
-	u32 reg;
-	unsigned int i, j;
-	unsigned int cur_data_idx;
-	unsigned int defer = 0;
-	int retval = 0;
-
-	for (i = 0; i < count; i += 16) {
-		for (j = 0; j < 3; j++) {
-			/* Clear AUX CH data buffer */
-			reg = BUF_CLR;
-			writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
-
-			/* Set normal AUX CH command */
-			reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
-			reg &= ~ADDR_ONLY;
-			writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
-
-			/*
-			 * If Rx sends defer, Tx sends only reads
-			 * request without sending address
-			 */
-			if (!defer)
-				retval = exynos_dp_select_i2c_device(dp,
-						device_addr, reg_addr + i);
-			else
-				defer = 0;
-
-			if (retval == 0) {
-				/*
-				 * Set I2C transaction and write data
-				 * If bit 3 is 1, DisplayPort transaction.
-				 * If Bit 3 is 0, I2C transaction.
-				 */
-				reg = AUX_LENGTH(16) |
-					AUX_TX_COMM_I2C_TRANSACTION |
-					AUX_TX_COMM_READ;
-				writel(reg, dp->reg_base +
-					EXYNOS_DP_AUX_CH_CTL_1);
-
-				/* Start AUX transaction */
-				retval = exynos_dp_start_aux_transaction(dp);
-				if (retval == 0)
-					break;
-				else
-					dev_dbg(dp->dev,
-						"%s: Aux Transaction fail!\n",
-						__func__);
-			}
-			/* Check if Rx sends defer */
-			reg = readl(dp->reg_base + EXYNOS_DP_AUX_RX_COMM);
-			if (reg == AUX_RX_COMM_AUX_DEFER ||
-				reg == AUX_RX_COMM_I2C_DEFER) {
-				dev_err(dp->dev, "Defer: %d\n\n", reg);
-				defer = 1;
-			}
-		}
-
-		for (cur_data_idx = 0; cur_data_idx < 16; cur_data_idx++) {
-			reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0
-						 + 4 * cur_data_idx);
-			edid[i + cur_data_idx] = (unsigned char)reg;
-		}
-	}
-
-	return retval;
-}
-
-void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype)
-{
-	u32 reg;
-
-	reg = bwtype;
-	if ((bwtype == LINK_RATE_2_70GBPS) || (bwtype == LINK_RATE_1_62GBPS))
-		writel(reg, dp->reg_base + EXYNOS_DP_LINK_BW_SET);
-}
-
-void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_LINK_BW_SET);
-	*bwtype = reg;
-}
-
-void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count)
-{
-	u32 reg;
-
-	reg = count;
-	writel(reg, dp->reg_base + EXYNOS_DP_LANE_COUNT_SET);
-}
-
-void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_LANE_COUNT_SET);
-	*count = reg;
-}
-
-void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable)
-{
-	u32 reg;
-
-	if (enable) {
-		reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
-		reg |= ENHANCED;
-		writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
-	} else {
-		reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
-		reg &= ~ENHANCED;
-		writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
-	}
-}
-
-void exynos_dp_set_training_pattern(struct exynos_dp_device *dp,
-				 enum pattern_set pattern)
-{
-	u32 reg;
-
-	switch (pattern) {
-	case PRBS7:
-		reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_PRBS7;
-		writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
-		break;
-	case D10_2:
-		reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_D10_2;
-		writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
-		break;
-	case TRAINING_PTN1:
-		reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN1;
-		writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
-		break;
-	case TRAINING_PTN2:
-		reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN2;
-		writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
-		break;
-	case DP_NONE:
-		reg = SCRAMBLING_ENABLE |
-			LINK_QUAL_PATTERN_SET_DISABLE |
-			SW_TRAINING_PATTERN_SET_NORMAL;
-		writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
-		break;
-	default:
-		break;
-	}
-}
-
-void exynos_dp_set_lane0_pre_emphasis(struct exynos_dp_device *dp, u32 level)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
-	reg &= ~PRE_EMPHASIS_SET_MASK;
-	reg |= level << PRE_EMPHASIS_SET_SHIFT;
-	writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
-}
-
-void exynos_dp_set_lane1_pre_emphasis(struct exynos_dp_device *dp, u32 level)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
-	reg &= ~PRE_EMPHASIS_SET_MASK;
-	reg |= level << PRE_EMPHASIS_SET_SHIFT;
-	writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
-}
-
-void exynos_dp_set_lane2_pre_emphasis(struct exynos_dp_device *dp, u32 level)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
-	reg &= ~PRE_EMPHASIS_SET_MASK;
-	reg |= level << PRE_EMPHASIS_SET_SHIFT;
-	writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
-}
-
-void exynos_dp_set_lane3_pre_emphasis(struct exynos_dp_device *dp, u32 level)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
-	reg &= ~PRE_EMPHASIS_SET_MASK;
-	reg |= level << PRE_EMPHASIS_SET_SHIFT;
-	writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
-}
-
-void exynos_dp_set_lane0_link_training(struct exynos_dp_device *dp,
-					u32 training_lane)
-{
-	u32 reg;
-
-	reg = training_lane;
-	writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
-}
-
-void exynos_dp_set_lane1_link_training(struct exynos_dp_device *dp,
-					u32 training_lane)
-{
-	u32 reg;
-
-	reg = training_lane;
-	writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
-}
-
-void exynos_dp_set_lane2_link_training(struct exynos_dp_device *dp,
-					u32 training_lane)
-{
-	u32 reg;
-
-	reg = training_lane;
-	writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
-}
-
-void exynos_dp_set_lane3_link_training(struct exynos_dp_device *dp,
-					u32 training_lane)
-{
-	u32 reg;
-
-	reg = training_lane;
-	writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
-}
-
-u32 exynos_dp_get_lane0_link_training(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
-	return reg;
-}
-
-u32 exynos_dp_get_lane1_link_training(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
-	return reg;
-}
-
-u32 exynos_dp_get_lane2_link_training(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
-	return reg;
-}
-
-u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
-	return reg;
-}
-
-void exynos_dp_reset_macro(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_PHY_TEST);
-	reg |= MACRO_RST;
-	writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST);
-
-	/* 10 us is the minimum reset time. */
-	usleep_range(10, 20);
-
-	reg &= ~MACRO_RST;
-	writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST);
-}
-
-void exynos_dp_init_video(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = VSYNC_DET | VID_FORMAT_CHG | VID_CLK_CHG;
-	writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1);
-
-	reg = 0x0;
-	writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1);
-
-	reg = CHA_CRI(4) | CHA_CTRL;
-	writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2);
-
-	reg = 0x0;
-	writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
-
-	reg = VID_HRES_TH(2) | VID_VRES_TH(0);
-	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_8);
-}
-
-void exynos_dp_set_video_color_format(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	/* Configure the input color depth, color space, dynamic range */
-	reg = (dp->video_info->dynamic_range << IN_D_RANGE_SHIFT) |
-		(dp->video_info->color_depth << IN_BPC_SHIFT) |
-		(dp->video_info->color_space << IN_COLOR_F_SHIFT);
-	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_2);
-
-	/* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */
-	reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_3);
-	reg &= ~IN_YC_COEFFI_MASK;
-	if (dp->video_info->ycbcr_coeff)
-		reg |= IN_YC_COEFFI_ITU709;
-	else
-		reg |= IN_YC_COEFFI_ITU601;
-	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_3);
-}
-
-int exynos_dp_is_slave_video_stream_clock_on(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1);
-	writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1);
-
-	reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1);
-
-	if (!(reg & DET_STA)) {
-		dev_dbg(dp->dev, "Input stream clock not detected.\n");
-		return -EINVAL;
-	}
-
-	reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2);
-	writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2);
-
-	reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2);
-	dev_dbg(dp->dev, "wait SYS_CTL_2.\n");
-
-	if (reg & CHA_STA) {
-		dev_dbg(dp->dev, "Input stream clk is changing\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-void exynos_dp_set_video_cr_mn(struct exynos_dp_device *dp,
-		enum clock_recovery_m_value_type type,
-		u32 m_value,
-		u32 n_value)
-{
-	u32 reg;
-
-	if (type == REGISTER_M) {
-		reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
-		reg |= FIX_M_VID;
-		writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
-		reg = m_value & 0xff;
-		writel(reg, dp->reg_base + EXYNOS_DP_M_VID_0);
-		reg = (m_value >> 8) & 0xff;
-		writel(reg, dp->reg_base + EXYNOS_DP_M_VID_1);
-		reg = (m_value >> 16) & 0xff;
-		writel(reg, dp->reg_base + EXYNOS_DP_M_VID_2);
-
-		reg = n_value & 0xff;
-		writel(reg, dp->reg_base + EXYNOS_DP_N_VID_0);
-		reg = (n_value >> 8) & 0xff;
-		writel(reg, dp->reg_base + EXYNOS_DP_N_VID_1);
-		reg = (n_value >> 16) & 0xff;
-		writel(reg, dp->reg_base + EXYNOS_DP_N_VID_2);
-	} else  {
-		reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
-		reg &= ~FIX_M_VID;
-		writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
-
-		writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_0);
-		writel(0x80, dp->reg_base + EXYNOS_DP_N_VID_1);
-		writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_2);
-	}
-}
-
-void exynos_dp_set_video_timing_mode(struct exynos_dp_device *dp, u32 type)
-{
-	u32 reg;
-
-	if (type == VIDEO_TIMING_FROM_CAPTURE) {
-		reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
-		reg &= ~FORMAT_SEL;
-		writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
-	} else {
-		reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
-		reg |= FORMAT_SEL;
-		writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
-	}
-}
-
-void exynos_dp_enable_video_master(struct exynos_dp_device *dp, bool enable)
-{
-	u32 reg;
-
-	if (enable) {
-		reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
-		reg &= ~VIDEO_MODE_MASK;
-		reg |= VIDEO_MASTER_MODE_EN | VIDEO_MODE_MASTER_MODE;
-		writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
-	} else {
-		reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
-		reg &= ~VIDEO_MODE_MASK;
-		reg |= VIDEO_MODE_SLAVE_MODE;
-		writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
-	}
-}
-
-void exynos_dp_start_video(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
-	reg |= VIDEO_EN;
-	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
-}
-
-int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
-	writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
-
-	reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
-	if (!(reg & STRM_VALID)) {
-		dev_dbg(dp->dev, "Input video stream is not detected.\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1);
-	reg &= ~(MASTER_VID_FUNC_EN_N|SLAVE_VID_FUNC_EN_N);
-	reg |= MASTER_VID_FUNC_EN_N;
-	writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1);
-
-	reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
-	reg &= ~INTERACE_SCAN_CFG;
-	reg |= (dp->video_info->interlaced << 2);
-	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
-
-	reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
-	reg &= ~VSYNC_POLARITY_CFG;
-	reg |= (dp->video_info->v_sync_polarity << 1);
-	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
-
-	reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
-	reg &= ~HSYNC_POLARITY_CFG;
-	reg |= (dp->video_info->h_sync_polarity << 0);
-	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
-
-	reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE;
-	writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
-}
-
-void exynos_dp_enable_scrambling(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
-	reg &= ~SCRAMBLING_DISABLE;
-	writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
-}
-
-void exynos_dp_disable_scrambling(struct exynos_dp_device *dp)
-{
-	u32 reg;
-
-	reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
-	reg |= SCRAMBLING_DISABLE;
-	writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index e36579c..785ffa6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -157,9 +157,8 @@
 
 int exynos_drm_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-	struct exynos_drm_private *private = dev->dev_private;
-	struct exynos_drm_crtc *exynos_crtc =
-		to_exynos_crtc(private->crtc[pipe]);
+	struct exynos_drm_crtc *exynos_crtc = exynos_drm_crtc_from_pipe(dev,
+									pipe);
 
 	if (exynos_crtc->ops->enable_vblank)
 		return exynos_crtc->ops->enable_vblank(exynos_crtc);
@@ -169,9 +168,8 @@
 
 void exynos_drm_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-	struct exynos_drm_private *private = dev->dev_private;
-	struct exynos_drm_crtc *exynos_crtc =
-		to_exynos_crtc(private->crtc[pipe]);
+	struct exynos_drm_crtc *exynos_crtc = exynos_drm_crtc_from_pipe(dev,
+									pipe);
 
 	if (exynos_crtc->ops->disable_vblank)
 		exynos_crtc->ops->disable_vblank(exynos_crtc);
@@ -235,20 +233,15 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
 	e = exynos_crtc->event;
 	if (e && e->base.file_priv == file) {
 		exynos_crtc->event = NULL;
-		/*
-		 * event will be destroyed by core part
-		 * so below line should be removed later with core changes
-		 */
-		e->base.destroy(&e->base);
-		/*
-		 * event_space will be increased by core part
-		 * so below line should be removed later with core changes.
-		 */
-		file->event_space += sizeof(e->event);
 		atomic_dec(&exynos_crtc->pending_update);
 	}
+
 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+	if (e && e->base.file_priv == file)
+		drm_event_cancel_free(crtc->dev, &e->base);
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 75e570f..5e38e74 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -15,6 +15,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_atomic_helper.h>
 
+#include <linux/of_graph.h>
 #include <linux/regulator/consumer.h>
 
 #include <video/of_videomode.h>
@@ -164,67 +165,6 @@
 	.destroy = drm_encoder_cleanup,
 };
 
-/* of_* functions will be removed after merge of of_graph patches */
-static struct device_node *
-of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg)
-{
-	struct device_node *np;
-
-	for_each_child_of_node(parent, np) {
-		u32 r;
-
-		if (!np->name || of_node_cmp(np->name, name))
-			continue;
-
-		if (of_property_read_u32(np, "reg", &r) < 0)
-			r = 0;
-
-		if (reg == r)
-			break;
-	}
-
-	return np;
-}
-
-static struct device_node *of_graph_get_port_by_reg(struct device_node *parent,
-						    u32 reg)
-{
-	struct device_node *ports, *port;
-
-	ports = of_get_child_by_name(parent, "ports");
-	if (ports)
-		parent = ports;
-
-	port = of_get_child_by_name_reg(parent, "port", reg);
-
-	of_node_put(ports);
-
-	return port;
-}
-
-static struct device_node *
-of_graph_get_endpoint_by_reg(struct device_node *port, u32 reg)
-{
-	return of_get_child_by_name_reg(port, "endpoint", reg);
-}
-
-static struct device_node *
-of_graph_get_remote_port_parent(const struct device_node *node)
-{
-	struct device_node *np;
-	unsigned int depth;
-
-	np = of_parse_phandle(node, "remote-endpoint", 0);
-
-	/* Walk 3 levels up only if there is 'ports' node. */
-	for (depth = 3; depth && np; depth--) {
-		np = of_get_next_parent(np);
-		if (depth == 2 && of_node_cmp(np->name, "ports"))
-			break;
-	}
-	return np;
-}
-
 enum {
 	FIMD_PORT_IN0,
 	FIMD_PORT_IN1,
@@ -237,12 +177,7 @@
 {
 	struct device_node *np, *ep;
 
-	np = of_graph_get_port_by_reg(dev->of_node, FIMD_PORT_RGB);
-	if (!np)
-		return NULL;
-
-	ep = of_graph_get_endpoint_by_reg(np, 0);
-	of_node_put(np);
+	ep = of_graph_get_endpoint_by_regs(dev->of_node, FIMD_PORT_RGB, 0);
 	if (!ep)
 		return NULL;
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 5344940..2dd820e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -212,13 +212,6 @@
 	 */
 	dev->irq_enabled = true;
 
-	/*
-	 * with vblank_disable_allowed = true, vblank interrupt will be disabled
-	 * by drm timer once a current process gives up ownership of
-	 * vblank event.(after drm_vblank_put function is called)
-	 */
-	dev->vblank_disable_allowed = true;
-
 	/* init kms poll for handling hpd */
 	drm_kms_helper_poll_init(dev);
 
@@ -270,7 +263,7 @@
 }
 
 int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
-			 bool async)
+			 bool nonblock)
 {
 	struct exynos_drm_private *priv = dev->dev_private;
 	struct exynos_atomic_commit *commit;
@@ -308,7 +301,7 @@
 
 	drm_atomic_helper_swap_state(dev, state);
 
-	if (async)
+	if (nonblock)
 		schedule_work(&commit->work);
 	else
 		exynos_atomic_commit_complete(commit);
@@ -418,7 +411,7 @@
 	.get_vblank_counter	= drm_vblank_no_hw_counter,
 	.enable_vblank		= exynos_drm_crtc_enable_vblank,
 	.disable_vblank		= exynos_drm_crtc_disable_vblank,
-	.gem_free_object	= exynos_drm_gem_free_object,
+	.gem_free_object_unlocked = exynos_drm_gem_free_object,
 	.gem_vm_ops		= &exynos_drm_gem_vm_ops,
 	.dumb_create		= exynos_drm_gem_dumb_create,
 	.dumb_map_offset	= exynos_drm_gem_dumb_map_offset,
@@ -431,6 +424,7 @@
 	.gem_prime_import_sg_table	= exynos_drm_gem_prime_import_sg_table,
 	.gem_prime_vmap		= exynos_drm_gem_prime_vmap,
 	.gem_prime_vunmap	= exynos_drm_gem_prime_vunmap,
+	.gem_prime_mmap		= exynos_drm_gem_prime_mmap,
 	.ioctls			= exynos_ioctls,
 	.num_ioctls		= ARRAY_SIZE(exynos_ioctls),
 	.fops			= &exynos_drm_driver_fops,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 502f750..cc33ec9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -120,8 +120,6 @@
  * @commit: set current hw specific display mode to hw.
  * @enable_vblank: specific driver callback for enabling vblank interrupt.
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
- * @wait_for_vblank: wait for vblank interrupt to make sure that
- *	hardware overlay is updated.
  * @atomic_check: validate state
  * @atomic_begin: prepare device to receive an update
  * @atomic_flush: mark the end of device update
@@ -129,10 +127,6 @@
  * @disable_plane: disable hardware specific overlay.
  * @te_handler: trigger to transfer video image at the tearing effect
  *	synchronization signal if there is a page flip request.
- * @clock_enable: optional function enabling/disabling display domain clock,
- *	called from exynos-dp driver before powering up (with
- *	'enable' argument as true) and after powering down (with
- *	'enable' as false).
  */
 struct exynos_drm_crtc;
 struct exynos_drm_crtc_ops {
@@ -141,7 +135,6 @@
 	void (*commit)(struct exynos_drm_crtc *crtc);
 	int (*enable_vblank)(struct exynos_drm_crtc *crtc);
 	void (*disable_vblank)(struct exynos_drm_crtc *crtc);
-	void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
 	int (*atomic_check)(struct exynos_drm_crtc *crtc,
 			    struct drm_crtc_state *state);
 	void (*atomic_begin)(struct exynos_drm_crtc *crtc);
@@ -151,7 +144,10 @@
 			      struct exynos_drm_plane *plane);
 	void (*atomic_flush)(struct exynos_drm_crtc *crtc);
 	void (*te_handler)(struct exynos_drm_crtc *crtc);
-	void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
+};
+
+struct exynos_drm_clk {
+	void (*enable)(struct exynos_drm_clk *clk, bool enable);
 };
 
 /*
@@ -182,8 +178,16 @@
 	atomic_t			pending_update;
 	const struct exynos_drm_crtc_ops	*ops;
 	void				*ctx;
+	struct exynos_drm_clk		*pipe_clk;
 };
 
+static inline void exynos_drm_pipe_clk_enable(struct exynos_drm_crtc *crtc,
+					      bool enable)
+{
+	if (crtc->pipe_clk)
+		crtc->pipe_clk->enable(crtc->pipe_clk, enable);
+}
+
 struct exynos_drm_g2d_private {
 	struct device		*dev;
 	struct list_head	inuse_cmdlist;
@@ -232,6 +236,14 @@
 	wait_queue_head_t	wait;
 };
 
+static inline struct exynos_drm_crtc *
+exynos_drm_crtc_from_pipe(struct drm_device *dev, int pipe)
+{
+	struct exynos_drm_private *private = dev->dev_private;
+
+	return to_exynos_crtc(private->crtc[pipe]);
+}
+
 static inline struct device *to_dma_dev(struct drm_device *dev)
 {
 	struct exynos_drm_private *priv = dev->dev_private;
@@ -296,7 +308,7 @@
 #endif
 
 int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
-			 bool async);
+			 bool nonblock);
 
 
 extern struct platform_driver fimd_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 63c84a1..601ecf8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -280,7 +280,7 @@
 	spinlock_t transfer_lock; /* protects transfer_list */
 	struct list_head transfer_list;
 
-	struct exynos_dsi_driver_data *driver_data;
+	const struct exynos_dsi_driver_data *driver_data;
 	struct device_node *bridge_node;
 };
 
@@ -532,15 +532,6 @@
 	{ }
 };
 
-static inline struct exynos_dsi_driver_data *exynos_dsi_get_driver_data(
-						struct platform_device *pdev)
-{
-	const struct of_device_id *of_id =
-			of_match_device(exynos_dsi_of_match, &pdev->dev);
-
-	return (struct exynos_dsi_driver_data *)of_id->data;
-}
-
 static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
 {
 	if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
@@ -564,7 +555,7 @@
 static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
 		unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s)
 {
-	struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+	const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
 	unsigned long best_freq = 0;
 	u32 min_delta = 0xffffffff;
 	u8 p_min, p_max;
@@ -618,7 +609,7 @@
 static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
 					unsigned long freq)
 {
-	struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+	const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
 	unsigned long fin, fout;
 	int timeout;
 	u8 p, s;
@@ -712,7 +703,7 @@
 
 static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
 {
-	struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+	const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
 	const unsigned int *reg_values = driver_data->reg_values;
 	u32 reg;
 
@@ -790,7 +781,7 @@
 
 static int exynos_dsi_init_link(struct exynos_dsi *dsi)
 {
-	struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+	const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
 	int timeout;
 	u32 reg;
 	u32 lanes_mask;
@@ -1334,7 +1325,7 @@
 
 static int exynos_dsi_init(struct exynos_dsi *dsi)
 {
-	struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+	const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
 
 	exynos_dsi_reset(dsi);
 	exynos_dsi_enable_irq(dsi);
@@ -1641,50 +1632,6 @@
 
 MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
 
-/* of_* functions will be removed after merge of of_graph patches */
-static struct device_node *
-of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg)
-{
-	struct device_node *np;
-
-	for_each_child_of_node(parent, np) {
-		u32 r;
-
-		if (!np->name || of_node_cmp(np->name, name))
-			continue;
-
-		if (of_property_read_u32(np, "reg", &r) < 0)
-			r = 0;
-
-		if (reg == r)
-			break;
-	}
-
-	return np;
-}
-
-static struct device_node *of_graph_get_port_by_reg(struct device_node *parent,
-						    u32 reg)
-{
-	struct device_node *ports, *port;
-
-	ports = of_get_child_by_name(parent, "ports");
-	if (ports)
-		parent = ports;
-
-	port = of_get_child_by_name_reg(parent, "port", reg);
-
-	of_node_put(ports);
-
-	return port;
-}
-
-static struct device_node *
-of_graph_get_endpoint_by_reg(struct device_node *port, u32 reg)
-{
-	return of_get_child_by_name_reg(port, "endpoint", reg);
-}
-
 static int exynos_dsi_of_read_u32(const struct device_node *np,
 				  const char *propname, u32 *out_value)
 {
@@ -1706,7 +1653,7 @@
 {
 	struct device *dev = dsi->dev;
 	struct device_node *node = dev->of_node;
-	struct device_node *port, *ep;
+	struct device_node *ep;
 	int ret;
 
 	ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
@@ -1714,16 +1661,9 @@
 	if (ret < 0)
 		return ret;
 
-	port = of_graph_get_port_by_reg(node, DSI_PORT_OUT);
-	if (!port) {
-		dev_err(dev, "no output port specified\n");
-		return -EINVAL;
-	}
-
-	ep = of_graph_get_endpoint_by_reg(port, 0);
-	of_node_put(port);
+	ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0);
 	if (!ep) {
-		dev_err(dev, "no endpoint specified in output port\n");
+		dev_err(dev, "no output port with endpoint specified\n");
 		return -EINVAL;
 	}
 
@@ -1833,7 +1773,7 @@
 	dsi->dsi_host.dev = dev;
 
 	dsi->dev = dev;
-	dsi->driver_data = exynos_dsi_get_driver_data(pdev);
+	dsi->driver_data = of_device_get_match_data(dev);
 
 	ret = exynos_dsi_parse_dt(dsi);
 	if (ret)
@@ -1917,7 +1857,7 @@
 {
 	struct drm_encoder *encoder = dev_get_drvdata(dev);
 	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
-	struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+	const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
 	int ret, i;
 
 	usleep_range(10000, 20000);
@@ -1948,7 +1888,7 @@
 {
 	struct drm_encoder *encoder = dev_get_drvdata(dev);
 	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
-	struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+	const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
 	int ret, i;
 
 	ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 81cc553..e016640 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -97,20 +97,9 @@
 				     &exynos_fb->exynos_gem[0]->base, handle);
 }
 
-static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
-				struct drm_file *file_priv, unsigned flags,
-				unsigned color, struct drm_clip_rect *clips,
-				unsigned num_clips)
-{
-	/* TODO */
-
-	return 0;
-}
-
 static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
 	.destroy	= exynos_drm_fb_destroy,
 	.create_handle	= exynos_drm_fb_create_handle,
-	.dirty		= exynos_drm_fb_dirty,
 };
 
 struct drm_framebuffer *
@@ -163,8 +152,7 @@
 	int ret;
 
 	for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
-		obj = drm_gem_object_lookup(dev, file_priv,
-					    mode_cmd->handles[i]);
+		obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
 		if (!obj) {
 			DRM_ERROR("failed to lookup gem object\n");
 			ret = -ENOENT;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 72d7c0b..67dcd68 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -138,8 +138,6 @@
 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
 							  sizes->surface_depth);
 
-	mutex_lock(&dev->struct_mutex);
-
 	size = mode_cmd.pitches[0] * mode_cmd.height;
 
 	exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
@@ -154,10 +152,8 @@
 						   size);
 	}
 
-	if (IS_ERR(exynos_gem)) {
-		ret = PTR_ERR(exynos_gem);
-		goto out;
-	}
+	if (IS_ERR(exynos_gem))
+		return PTR_ERR(exynos_gem);
 
 	exynos_fbdev->exynos_gem = exynos_gem;
 
@@ -173,7 +169,6 @@
 	if (ret < 0)
 		goto err_destroy_framebuffer;
 
-	mutex_unlock(&dev->struct_mutex);
 	return ret;
 
 err_destroy_framebuffer:
@@ -181,13 +176,12 @@
 err_destroy_gem:
 	exynos_drm_gem_destroy(exynos_gem);
 
-/*
- * if failed, all resources allocated above would be released by
- * drm_mode_config_cleanup() when drm_load() had been called prior
- * to any specific driver such as fimd or hdmi driver.
- */
-out:
-	mutex_unlock(&dev->struct_mutex);
+	/*
+	 * if failed, all resources allocated above would be released by
+	 * drm_mode_config_cleanup() when drm_load() had been called prior
+	 * to any specific driver such as fimd or hdmi driver.
+	 */
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 018449f..3efe1aa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -68,10 +68,15 @@
 /* color key value register for hardware window 1 ~ 4. */
 #define WKEYCON1_BASE(x)		((WKEYCON1 + 0x140) + ((x - 1) * 8))
 
-/* I80 / RGB trigger control register */
+/* I80 trigger control register */
 #define TRIGCON				0x1A4
-#define TRGMODE_I80_RGB_ENABLE_I80	(1 << 0)
-#define SWTRGCMD_I80_RGB_ENABLE		(1 << 1)
+#define TRGMODE_ENABLE			(1 << 0)
+#define SWTRGCMD_ENABLE			(1 << 1)
+/* Exynos3250, 3472, 4415, 5260 5410, 5420 and 5422 only supported. */
+#define HWTRGEN_ENABLE			(1 << 3)
+#define HWTRGMASK_ENABLE		(1 << 4)
+/* Exynos3250, 3472, 4415, 5260, 5420 and 5422 only supported. */
+#define HWTRIGEN_PER_ENABLE		(1 << 31)
 
 /* display mode change control register except exynos4 */
 #define VIDOUT_CON			0x000
@@ -89,12 +94,16 @@
 /* FIMD has totally five hardware windows. */
 #define WINDOWS_NR	5
 
+/* HW trigger flag on i80 panel. */
+#define I80_HW_TRG     (1 << 1)
+
 struct fimd_driver_data {
 	unsigned int timing_base;
 	unsigned int lcdblk_offset;
 	unsigned int lcdblk_vt_shift;
 	unsigned int lcdblk_bypass_shift;
 	unsigned int lcdblk_mic_bypass_shift;
+	unsigned int trg_type;
 
 	unsigned int has_shadowcon:1;
 	unsigned int has_clksel:1;
@@ -102,20 +111,26 @@
 	unsigned int has_vidoutcon:1;
 	unsigned int has_vtsel:1;
 	unsigned int has_mic_bypass:1;
+	unsigned int has_dp_clk:1;
+	unsigned int has_hw_trigger:1;
+	unsigned int has_trigger_per_te:1;
 };
 
 static struct fimd_driver_data s3c64xx_fimd_driver_data = {
 	.timing_base = 0x0,
 	.has_clksel = 1,
 	.has_limited_fmt = 1,
+	.has_hw_trigger = 1,
 };
 
 static struct fimd_driver_data exynos3_fimd_driver_data = {
 	.timing_base = 0x20000,
 	.lcdblk_offset = 0x210,
 	.lcdblk_bypass_shift = 1,
+	.trg_type = I80_HW_TRG,
 	.has_shadowcon = 1,
 	.has_vidoutcon = 1,
+	.has_trigger_per_te = 1,
 };
 
 static struct fimd_driver_data exynos4_fimd_driver_data = {
@@ -132,9 +147,11 @@
 	.lcdblk_offset = 0x210,
 	.lcdblk_vt_shift = 10,
 	.lcdblk_bypass_shift = 1,
+	.trg_type = I80_HW_TRG,
 	.has_shadowcon = 1,
 	.has_vidoutcon = 1,
 	.has_vtsel = 1,
+	.has_trigger_per_te = 1,
 };
 
 static struct fimd_driver_data exynos5_fimd_driver_data = {
@@ -145,6 +162,7 @@
 	.has_shadowcon = 1,
 	.has_vidoutcon = 1,
 	.has_vtsel = 1,
+	.has_dp_clk = 1,
 };
 
 static struct fimd_driver_data exynos5420_fimd_driver_data = {
@@ -153,10 +171,14 @@
 	.lcdblk_vt_shift = 24,
 	.lcdblk_bypass_shift = 15,
 	.lcdblk_mic_bypass_shift = 11,
+	.trg_type = I80_HW_TRG,
 	.has_shadowcon = 1,
 	.has_vidoutcon = 1,
 	.has_vtsel = 1,
 	.has_mic_bypass = 1,
+	.has_dp_clk = 1,
+	.has_hw_trigger = 1,
+	.has_trigger_per_te = 1,
 };
 
 struct fimd_context {
@@ -182,8 +204,9 @@
 	atomic_t			win_updated;
 	atomic_t			triggering;
 
-	struct fimd_driver_data *driver_data;
+	const struct fimd_driver_data *driver_data;
 	struct drm_encoder *encoder;
+	struct exynos_drm_clk		dp_clk;
 };
 
 static const struct of_device_id fimd_driver_dt_match[] = {
@@ -219,15 +242,6 @@
 	DRM_FORMAT_ARGB8888,
 };
 
-static inline struct fimd_driver_data *drm_fimd_get_driver_data(
-	struct platform_device *pdev)
-{
-	const struct of_device_id *of_id =
-			of_match_device(fimd_driver_dt_match, &pdev->dev);
-
-	return (struct fimd_driver_data *)of_id->data;
-}
-
 static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
 {
 	struct fimd_context *ctx = crtc->ctx;
@@ -383,9 +397,16 @@
 static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
 		const struct drm_display_mode *mode)
 {
-	unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
+	unsigned long ideal_clk;
 	u32 clkdiv;
 
+	if (mode->clock == 0) {
+		DRM_ERROR("Mode has zero clock value.\n");
+		return 0xff;
+	}
+
+	ideal_clk = mode->clock * 1000;
+
 	if (ctx->i80_if) {
 		/*
 		 * The frame done interrupt should be occurred prior to the
@@ -400,11 +421,31 @@
 	return (clkdiv < 0x100) ? clkdiv : 0xff;
 }
 
+static void fimd_setup_trigger(struct fimd_context *ctx)
+{
+	void __iomem *timing_base = ctx->regs + ctx->driver_data->timing_base;
+	u32 trg_type = ctx->driver_data->trg_type;
+	u32 val = readl(timing_base + TRIGCON);
+
+	val &= ~(TRGMODE_ENABLE);
+
+	if (trg_type == I80_HW_TRG) {
+		if (ctx->driver_data->has_hw_trigger)
+			val |= HWTRGEN_ENABLE | HWTRGMASK_ENABLE;
+		if (ctx->driver_data->has_trigger_per_te)
+			val |= HWTRIGEN_PER_ENABLE;
+	} else {
+		val |= TRGMODE_ENABLE;
+	}
+
+	writel(val, timing_base + TRIGCON);
+}
+
 static void fimd_commit(struct exynos_drm_crtc *crtc)
 {
 	struct fimd_context *ctx = crtc->ctx;
 	struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
-	struct fimd_driver_data *driver_data = ctx->driver_data;
+	const struct fimd_driver_data *driver_data = ctx->driver_data;
 	void *timing_base = ctx->regs + driver_data->timing_base;
 	u32 val, clkdiv;
 
@@ -495,6 +536,8 @@
 	       VIDTCON2_HOZVAL_E(mode->hdisplay - 1);
 	writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
 
+	fimd_setup_trigger(ctx);
+
 	/*
 	 * fields of register with prefix '_F' would be updated
 	 * at vsync(same as dma start)
@@ -827,7 +870,7 @@
 static void fimd_trigger(struct device *dev)
 {
 	struct fimd_context *ctx = dev_get_drvdata(dev);
-	struct fimd_driver_data *driver_data = ctx->driver_data;
+	const struct fimd_driver_data *driver_data = ctx->driver_data;
 	void *timing_base = ctx->regs + driver_data->timing_base;
 	u32 reg;
 
@@ -842,7 +885,7 @@
 	atomic_set(&ctx->triggering, 1);
 
 	reg = readl(timing_base + TRIGCON);
-	reg |= (TRGMODE_I80_RGB_ENABLE_I80 | SWTRGCMD_I80_RGB_ENABLE);
+	reg |= (TRGMODE_ENABLE | SWTRGCMD_ENABLE);
 	writel(reg, timing_base + TRIGCON);
 
 	/*
@@ -856,11 +899,15 @@
 static void fimd_te_handler(struct exynos_drm_crtc *crtc)
 {
 	struct fimd_context *ctx = crtc->ctx;
+	u32 trg_type = ctx->driver_data->trg_type;
 
 	/* Checks the crtc is detached already from encoder */
 	if (ctx->pipe < 0 || !ctx->drm_dev)
 		return;
 
+	if (trg_type == I80_HW_TRG)
+		goto out;
+
 	/*
 	 * If there is a page flip request, triggers and handles the page flip
 	 * event so that current fb can be updated into panel GRAM.
@@ -868,6 +915,7 @@
 	if (atomic_add_unless(&ctx->win_updated, -1, 0))
 		fimd_trigger(ctx->dev);
 
+out:
 	/* Wakes up vsync event queue */
 	if (atomic_read(&ctx->wait_vsync_event)) {
 		atomic_set(&ctx->wait_vsync_event, 0);
@@ -878,21 +926,11 @@
 		drm_crtc_handle_vblank(&ctx->crtc->base);
 }
 
-static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
+static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable)
 {
-	struct fimd_context *ctx = crtc->ctx;
-	u32 val;
-
-	/*
-	 * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
-	 * clock. On these SoCs the bootloader may enable it but any
-	 * power domain off/on will reset it to disable state.
-	 */
-	if (ctx->driver_data != &exynos5_fimd_driver_data &&
-	    ctx->driver_data != &exynos5420_fimd_driver_data)
-		return;
-
-	val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
+	struct fimd_context *ctx = container_of(clk, struct fimd_context,
+						dp_clk);
+	u32 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
 	writel(val, ctx->regs + DP_MIE_CLKCON);
 }
 
@@ -902,13 +940,11 @@
 	.commit = fimd_commit,
 	.enable_vblank = fimd_enable_vblank,
 	.disable_vblank = fimd_disable_vblank,
-	.wait_for_vblank = fimd_wait_for_vblank,
 	.atomic_begin = fimd_atomic_begin,
 	.update_plane = fimd_update_plane,
 	.disable_plane = fimd_disable_plane,
 	.atomic_flush = fimd_atomic_flush,
 	.te_handler = fimd_te_handler,
-	.clock_enable = fimd_dp_clock_enable,
 };
 
 static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
@@ -987,6 +1023,11 @@
 	if (IS_ERR(ctx->crtc))
 		return PTR_ERR(ctx->crtc);
 
+	if (ctx->driver_data->has_dp_clk) {
+		ctx->dp_clk.enable = fimd_dp_clock_enable;
+		ctx->crtc->pipe_clk = &ctx->dp_clk;
+	}
+
 	if (ctx->encoder)
 		exynos_dpi_bind(drm_dev, ctx->encoder);
 
@@ -1035,7 +1076,7 @@
 
 	ctx->dev = dev;
 	ctx->suspended = true;
-	ctx->driver_data = drm_fimd_get_driver_data(pdev);
+	ctx->driver_data = of_device_get_match_data(dev);
 
 	if (of_property_read_bool(dev->of_node, "samsung,invert-vden"))
 		ctx->vidcon1 |= VIDCON1_INV_VDEN;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 193d360..4935523 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -383,8 +383,8 @@
 		return;
 
 out:
-	exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
-					DMA_BIDIRECTIONAL);
+	dma_unmap_sg(to_dma_dev(drm_dev), g2d_userptr->sgt->sgl,
+			g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL);
 
 	pages = frame_vector_pages(g2d_userptr->vec);
 	if (!IS_ERR(pages)) {
@@ -501,10 +501,10 @@
 
 	g2d_userptr->sgt = sgt;
 
-	ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
-						DMA_BIDIRECTIONAL);
-	if (ret < 0) {
+	if (!dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents,
+				DMA_BIDIRECTIONAL)) {
 		DRM_ERROR("failed to map sgt with dma region.\n");
+		ret = -ENOMEM;
 		goto err_sg_free_table;
 	}
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 2914d62..cdf9f1a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -177,7 +177,7 @@
 	struct exynos_drm_gem *exynos_gem;
 	struct drm_gem_object *obj;
 
-	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+	obj = drm_gem_object_lookup(file_priv, gem_handle);
 	if (!obj) {
 		DRM_ERROR("failed to lookup gem object.\n");
 		return 0;
@@ -296,7 +296,7 @@
 	struct exynos_drm_gem *exynos_gem;
 	struct drm_gem_object *obj;
 
-	obj = drm_gem_object_lookup(dev, filp, gem_handle);
+	obj = drm_gem_object_lookup(filp, gem_handle);
 	if (!obj) {
 		DRM_ERROR("failed to lookup gem object.\n");
 		return ERR_PTR(-EINVAL);
@@ -313,7 +313,7 @@
 {
 	struct drm_gem_object *obj;
 
-	obj = drm_gem_object_lookup(dev, filp, gem_handle);
+	obj = drm_gem_object_lookup(filp, gem_handle);
 	if (!obj) {
 		DRM_ERROR("failed to lookup gem object.\n");
 		return;
@@ -362,12 +362,9 @@
 	struct drm_exynos_gem_info *args = data;
 	struct drm_gem_object *obj;
 
-	mutex_lock(&dev->struct_mutex);
-
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = drm_gem_object_lookup(file_priv, args->handle);
 	if (!obj) {
 		DRM_ERROR("failed to lookup gem object.\n");
-		mutex_unlock(&dev->struct_mutex);
 		return -EINVAL;
 	}
 
@@ -376,38 +373,11 @@
 	args->flags = exynos_gem->flags;
 	args->size = exynos_gem->size;
 
-	drm_gem_object_unreference(obj);
-	mutex_unlock(&dev->struct_mutex);
+	drm_gem_object_unreference_unlocked(obj);
 
 	return 0;
 }
 
-int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
-				struct sg_table *sgt,
-				enum dma_data_direction dir)
-{
-	int nents;
-
-	mutex_lock(&drm_dev->struct_mutex);
-
-	nents = dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir);
-	if (!nents) {
-		DRM_ERROR("failed to map sgl with dma.\n");
-		mutex_unlock(&drm_dev->struct_mutex);
-		return nents;
-	}
-
-	mutex_unlock(&drm_dev->struct_mutex);
-	return 0;
-}
-
-void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
-				struct sg_table *sgt,
-				enum dma_data_direction dir)
-{
-	dma_unmap_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir);
-}
-
 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
 {
 	exynos_drm_gem_destroy(to_exynos_gem(obj));
@@ -458,27 +428,22 @@
 	struct drm_gem_object *obj;
 	int ret = 0;
 
-	mutex_lock(&dev->struct_mutex);
-
 	/*
 	 * get offset of memory allocated for drm framebuffer.
 	 * - this callback would be called by user application
 	 *	with DRM_IOCTL_MODE_MAP_DUMB command.
 	 */
 
-	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	obj = drm_gem_object_lookup(file_priv, handle);
 	if (!obj) {
 		DRM_ERROR("failed to lookup gem object.\n");
-		ret = -EINVAL;
-		goto unlock;
+		return -EINVAL;
 	}
 
 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
 	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
 
-	drm_gem_object_unreference(obj);
-unlock:
-	mutex_unlock(&dev->struct_mutex);
+	drm_gem_object_unreference_unlocked(obj);
 	return ret;
 }
 
@@ -516,22 +481,12 @@
 	}
 }
 
-int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
+				   struct vm_area_struct *vma)
 {
-	struct exynos_drm_gem *exynos_gem;
-	struct drm_gem_object *obj;
+	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
 	int ret;
 
-	/* set vm_area_struct. */
-	ret = drm_gem_mmap(filp, vma);
-	if (ret < 0) {
-		DRM_ERROR("failed to mmap.\n");
-		return ret;
-	}
-
-	obj = vma->vm_private_data;
-	exynos_gem = to_exynos_gem(obj);
-
 	DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
 
 	/* non-cachable as default. */
@@ -556,6 +511,26 @@
 	return ret;
 }
 
+int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_gem_object *obj;
+	int ret;
+
+	/* set vm_area_struct. */
+	ret = drm_gem_mmap(filp, vma);
+	if (ret < 0) {
+		DRM_ERROR("failed to mmap.\n");
+		return ret;
+	}
+
+	obj = vma->vm_private_data;
+
+	if (obj->import_attach)
+		return dma_buf_mmap(obj->dma_buf, vma, 0);
+
+	return exynos_drm_gem_mmap_obj(obj, vma);
+}
+
 /* low-level interface prime helpers */
 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
@@ -630,3 +605,15 @@
 {
 	/* Nothing to do */
 }
+
+int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
+			      struct vm_area_struct *vma)
+{
+	int ret;
+
+	ret = drm_gem_mmap_obj(obj, obj->size, vma);
+	if (ret < 0)
+		return ret;
+
+	return exynos_drm_gem_mmap_obj(obj, vma);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 0022305..7810074 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -121,16 +121,6 @@
 /* set vm_flags and we can change the vm attribute to other one at here. */
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 
-/* map sgt with dma region. */
-int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
-				struct sg_table *sgt,
-				enum dma_data_direction dir);
-
-/* unmap sgt from dma region. */
-void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
-				struct sg_table *sgt,
-				enum dma_data_direction dir);
-
 /* low-level interface prime helpers */
 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj);
 struct drm_gem_object *
@@ -139,5 +129,7 @@
 				     struct sg_table *sgt);
 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj);
 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
+			      struct vm_area_struct *vma);
 
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 50185ac..55f1d37 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -166,7 +166,7 @@
 {
 	struct exynos_drm_plane_state *old_exynos_state =
 					to_exynos_plane_state(old_state);
-	__drm_atomic_helper_plane_destroy_state(plane, old_state);
+	__drm_atomic_helper_plane_destroy_state(old_state);
 	kfree(old_exynos_state);
 }
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index f18fbe4..404367a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -15,6 +15,7 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 
 #include <drm/drmP.h>
@@ -696,7 +697,6 @@
 	struct device *dev = &pdev->dev;
 	struct rot_context *rot;
 	struct exynos_drm_ippdrv *ippdrv;
-	const struct of_device_id *match;
 	int ret;
 
 	if (!dev->of_node) {
@@ -708,13 +708,8 @@
 	if (!rot)
 		return -ENOMEM;
 
-	match = of_match_node(exynos_rotator_match, dev->of_node);
-	if (!match) {
-		dev_err(dev, "failed to match node\n");
-		return -ENODEV;
-	}
-	rot->limit_tbl = (struct rot_limit_table *)match->data;
-
+	rot->limit_tbl = (struct rot_limit_table *)
+				of_device_get_match_data(dev);
 	rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	rot->regs = devm_ioremap_resource(dev, rot->regs_res);
 	if (IS_ERR(rot->regs))
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index e148d72..58de5a4 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -7,9 +7,9 @@
  *
  * Based on drivers/media/video/s5p-tv/hdmi_drv.c
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
  * option) any later version.
  *
  */
@@ -49,14 +49,16 @@
 
 /* AVI header and aspect ratio */
 #define HDMI_AVI_VERSION		0x02
-#define HDMI_AVI_LENGTH		0x0D
+#define HDMI_AVI_LENGTH			0x0d
 
 /* AUI header info */
-#define HDMI_AUI_VERSION	0x01
-#define HDMI_AUI_LENGTH	0x0A
-#define AVI_SAME_AS_PIC_ASPECT_RATIO 0x8
-#define AVI_4_3_CENTER_RATIO	0x9
-#define AVI_16_9_CENTER_RATIO	0xa
+#define HDMI_AUI_VERSION		0x01
+#define HDMI_AUI_LENGTH			0x0a
+
+/* AVI active format aspect ratio */
+#define AVI_SAME_AS_PIC_ASPECT_RATIO	0x08
+#define AVI_4_3_CENTER_RATIO		0x09
+#define AVI_16_9_CENTER_RATIO		0x0a
 
 enum hdmi_type {
 	HDMI_TYPE13,
@@ -90,11 +92,34 @@
 	"vdd_pll",
 };
 
+struct hdmiphy_config {
+	int pixel_clock;
+	u8 conf[32];
+};
+
+struct hdmiphy_configs {
+	int count;
+	const struct hdmiphy_config *data;
+};
+
+struct string_array_spec {
+	int count;
+	const char * const *data;
+};
+
+#define INIT_ARRAY_SPEC(a) { .count = ARRAY_SIZE(a), .data = a }
+
 struct hdmi_driver_data {
 	unsigned int type;
-	const struct hdmiphy_config *phy_confs;
-	unsigned int phy_conf_count;
 	unsigned int is_apb_phy:1;
+	unsigned int has_sysreg:1;
+	struct hdmiphy_configs phy_confs;
+	struct string_array_spec clk_gates;
+	/*
+	 * Array of triplets (p_off, p_on, clock), where p_off and p_on are
+	 * required parents of clock when HDMI-PHY is respectively off or on.
+	 */
+	struct string_array_spec clk_muxes;
 };
 
 struct hdmi_context {
@@ -116,13 +141,12 @@
 	struct gpio_desc		*hpd_gpio;
 	int				irq;
 	struct regmap			*pmureg;
-	struct clk			*hdmi;
-	struct clk			*sclk_hdmi;
-	struct clk			*sclk_pixel;
-	struct clk			*sclk_hdmiphy;
-	struct clk			*mout_hdmi;
+	struct regmap			*sysreg;
+	struct clk			**clk_gates;
+	struct clk			**clk_muxes;
 	struct regulator_bulk_data	regul_bulk[ARRAY_SIZE(supply)];
 	struct regulator		*reg_hdmi_en;
+	struct exynos_drm_clk		phy_clk;
 };
 
 static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
@@ -135,12 +159,6 @@
 	return container_of(c, struct hdmi_context, connector);
 }
 
-struct hdmiphy_config {
-	int pixel_clock;
-	u8 conf[32];
-};
-
-/* list of phy config settings */
 static const struct hdmiphy_config hdmiphy_v13_configs[] = {
 	{
 		.pixel_clock = 27000000,
@@ -501,25 +519,136 @@
 	},
 };
 
-static struct hdmi_driver_data exynos5420_hdmi_driver_data = {
-	.type		= HDMI_TYPE14,
-	.phy_confs	= hdmiphy_5420_configs,
-	.phy_conf_count	= ARRAY_SIZE(hdmiphy_5420_configs),
-	.is_apb_phy	= 1,
+static const struct hdmiphy_config hdmiphy_5433_configs[] = {
+	{
+		.pixel_clock = 27000000,
+		.conf = {
+			0x01, 0x51, 0x22, 0x51, 0x08, 0xfc, 0x88, 0x46,
+			0x72, 0x50, 0x24, 0x0c, 0x24, 0x0f, 0x7c, 0xa5,
+			0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
+			0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+		},
+	},
+	{
+		.pixel_clock = 27027000,
+		.conf = {
+			0x01, 0x51, 0x2d, 0x72, 0x64, 0x09, 0x88, 0xc3,
+			0x71, 0x50, 0x24, 0x14, 0x24, 0x0f, 0x7c, 0xa5,
+			0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
+			0x28, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+		},
+	},
+	{
+		.pixel_clock = 40000000,
+		.conf = {
+			0x01, 0x51, 0x32, 0x55, 0x01, 0x00, 0x88, 0x02,
+			0x4d, 0x50, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+			0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+			0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+		},
+	},
+	{
+		.pixel_clock = 50000000,
+		.conf = {
+			0x01, 0x51, 0x34, 0x40, 0x64, 0x09, 0x88, 0xc3,
+			0x3d, 0x50, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+			0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+			0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+		},
+	},
+	{
+		.pixel_clock = 65000000,
+		.conf = {
+			0x01, 0x51, 0x36, 0x31, 0x40, 0x10, 0x04, 0xc6,
+			0x2e, 0xe8, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+			0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+			0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+		},
+	},
+	{
+		.pixel_clock = 74176000,
+		.conf = {
+			0x01, 0x51, 0x3E, 0x35, 0x5B, 0xDE, 0x88, 0x42,
+			0x53, 0x51, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+			0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+			0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+		},
+	},
+	{
+		.pixel_clock = 74250000,
+		.conf = {
+			0x01, 0x51, 0x3E, 0x35, 0x40, 0xF0, 0x88, 0xC2,
+			0x52, 0x51, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+			0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+			0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+		},
+	},
+	{
+		.pixel_clock = 108000000,
+		.conf = {
+			0x01, 0x51, 0x2d, 0x15, 0x01, 0x00, 0x88, 0x02,
+			0x72, 0x52, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+			0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+			0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+		},
+	},
+	{
+		.pixel_clock = 148500000,
+		.conf = {
+			0x01, 0x51, 0x1f, 0x00, 0x40, 0xf8, 0x88, 0xc1,
+			0x52, 0x52, 0x24, 0x0c, 0x24, 0x0f, 0x7c, 0xa5,
+			0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
+			0x08, 0x10, 0x01, 0x01, 0x48, 0x4a, 0x00, 0x40,
+		},
+	},
 };
 
-static struct hdmi_driver_data exynos4212_hdmi_driver_data = {
-	.type		= HDMI_TYPE14,
-	.phy_confs	= hdmiphy_v14_configs,
-	.phy_conf_count	= ARRAY_SIZE(hdmiphy_v14_configs),
-	.is_apb_phy	= 0,
+static const char * const hdmi_clk_gates4[] = {
+	"hdmi", "sclk_hdmi"
 };
 
-static struct hdmi_driver_data exynos4210_hdmi_driver_data = {
+static const char * const hdmi_clk_muxes4[] = {
+	"sclk_pixel", "sclk_hdmiphy", "mout_hdmi"
+};
+
+static const char * const hdmi_clk_gates5433[] = {
+	"hdmi_pclk", "hdmi_i_pclk", "i_tmds_clk", "i_pixel_clk", "i_spdif_clk"
+};
+
+static const char * const hdmi_clk_muxes5433[] = {
+	"oscclk", "tmds_clko", "tmds_clko_user",
+	"oscclk", "pixel_clko", "pixel_clko_user"
+};
+
+static const struct hdmi_driver_data exynos4210_hdmi_driver_data = {
 	.type		= HDMI_TYPE13,
-	.phy_confs	= hdmiphy_v13_configs,
-	.phy_conf_count	= ARRAY_SIZE(hdmiphy_v13_configs),
-	.is_apb_phy	= 0,
+	.phy_confs	= INIT_ARRAY_SPEC(hdmiphy_v13_configs),
+	.clk_gates	= INIT_ARRAY_SPEC(hdmi_clk_gates4),
+	.clk_muxes	= INIT_ARRAY_SPEC(hdmi_clk_muxes4),
+};
+
+static const struct hdmi_driver_data exynos4212_hdmi_driver_data = {
+	.type		= HDMI_TYPE14,
+	.phy_confs	= INIT_ARRAY_SPEC(hdmiphy_v14_configs),
+	.clk_gates	= INIT_ARRAY_SPEC(hdmi_clk_gates4),
+	.clk_muxes	= INIT_ARRAY_SPEC(hdmi_clk_muxes4),
+};
+
+static const struct hdmi_driver_data exynos5420_hdmi_driver_data = {
+	.type		= HDMI_TYPE14,
+	.is_apb_phy	= 1,
+	.phy_confs	= INIT_ARRAY_SPEC(hdmiphy_5420_configs),
+	.clk_gates	= INIT_ARRAY_SPEC(hdmi_clk_gates4),
+	.clk_muxes	= INIT_ARRAY_SPEC(hdmi_clk_muxes4),
+};
+
+static const struct hdmi_driver_data exynos5433_hdmi_driver_data = {
+	.type		= HDMI_TYPE14,
+	.is_apb_phy	= 1,
+	.has_sysreg     = 1,
+	.phy_confs	= INIT_ARRAY_SPEC(hdmiphy_5433_configs),
+	.clk_gates	= INIT_ARRAY_SPEC(hdmi_clk_gates5433),
+	.clk_muxes	= INIT_ARRAY_SPEC(hdmi_clk_muxes5433),
 };
 
 static inline u32 hdmi_map_reg(struct hdmi_context *hdata, u32 reg_id)
@@ -585,266 +714,52 @@
 	}
 }
 
-static void hdmi_v13_regs_dump(struct hdmi_context *hdata, char *prefix)
+static int hdmi_clk_enable_gates(struct hdmi_context *hdata)
 {
-#define DUMPREG(reg_id) \
-	DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
-	readl(hdata->regs + reg_id))
-	DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
-	DUMPREG(HDMI_INTC_FLAG);
-	DUMPREG(HDMI_INTC_CON);
-	DUMPREG(HDMI_HPD_STATUS);
-	DUMPREG(HDMI_V13_PHY_RSTOUT);
-	DUMPREG(HDMI_V13_PHY_VPLL);
-	DUMPREG(HDMI_V13_PHY_CMU);
-	DUMPREG(HDMI_V13_CORE_RSTOUT);
+	int i, ret;
 
-	DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
-	DUMPREG(HDMI_CON_0);
-	DUMPREG(HDMI_CON_1);
-	DUMPREG(HDMI_CON_2);
-	DUMPREG(HDMI_SYS_STATUS);
-	DUMPREG(HDMI_V13_PHY_STATUS);
-	DUMPREG(HDMI_STATUS_EN);
-	DUMPREG(HDMI_HPD);
-	DUMPREG(HDMI_MODE_SEL);
-	DUMPREG(HDMI_V13_HPD_GEN);
-	DUMPREG(HDMI_V13_DC_CONTROL);
-	DUMPREG(HDMI_V13_VIDEO_PATTERN_GEN);
+	for (i = 0; i < hdata->drv_data->clk_gates.count; ++i) {
+		ret = clk_prepare_enable(hdata->clk_gates[i]);
+		if (!ret)
+			continue;
 
-	DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
-	DUMPREG(HDMI_H_BLANK_0);
-	DUMPREG(HDMI_H_BLANK_1);
-	DUMPREG(HDMI_V13_V_BLANK_0);
-	DUMPREG(HDMI_V13_V_BLANK_1);
-	DUMPREG(HDMI_V13_V_BLANK_2);
-	DUMPREG(HDMI_V13_H_V_LINE_0);
-	DUMPREG(HDMI_V13_H_V_LINE_1);
-	DUMPREG(HDMI_V13_H_V_LINE_2);
-	DUMPREG(HDMI_VSYNC_POL);
-	DUMPREG(HDMI_INT_PRO_MODE);
-	DUMPREG(HDMI_V13_V_BLANK_F_0);
-	DUMPREG(HDMI_V13_V_BLANK_F_1);
-	DUMPREG(HDMI_V13_V_BLANK_F_2);
-	DUMPREG(HDMI_V13_H_SYNC_GEN_0);
-	DUMPREG(HDMI_V13_H_SYNC_GEN_1);
-	DUMPREG(HDMI_V13_H_SYNC_GEN_2);
-	DUMPREG(HDMI_V13_V_SYNC_GEN_1_0);
-	DUMPREG(HDMI_V13_V_SYNC_GEN_1_1);
-	DUMPREG(HDMI_V13_V_SYNC_GEN_1_2);
-	DUMPREG(HDMI_V13_V_SYNC_GEN_2_0);
-	DUMPREG(HDMI_V13_V_SYNC_GEN_2_1);
-	DUMPREG(HDMI_V13_V_SYNC_GEN_2_2);
-	DUMPREG(HDMI_V13_V_SYNC_GEN_3_0);
-	DUMPREG(HDMI_V13_V_SYNC_GEN_3_1);
-	DUMPREG(HDMI_V13_V_SYNC_GEN_3_2);
+		dev_err(hdata->dev, "Cannot enable clock '%s', %d\n",
+			hdata->drv_data->clk_gates.data[i], ret);
+		while (i--)
+			clk_disable_unprepare(hdata->clk_gates[i]);
+		return ret;
+	}
 
-	DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
-	DUMPREG(HDMI_TG_CMD);
-	DUMPREG(HDMI_TG_H_FSZ_L);
-	DUMPREG(HDMI_TG_H_FSZ_H);
-	DUMPREG(HDMI_TG_HACT_ST_L);
-	DUMPREG(HDMI_TG_HACT_ST_H);
-	DUMPREG(HDMI_TG_HACT_SZ_L);
-	DUMPREG(HDMI_TG_HACT_SZ_H);
-	DUMPREG(HDMI_TG_V_FSZ_L);
-	DUMPREG(HDMI_TG_V_FSZ_H);
-	DUMPREG(HDMI_TG_VSYNC_L);
-	DUMPREG(HDMI_TG_VSYNC_H);
-	DUMPREG(HDMI_TG_VSYNC2_L);
-	DUMPREG(HDMI_TG_VSYNC2_H);
-	DUMPREG(HDMI_TG_VACT_ST_L);
-	DUMPREG(HDMI_TG_VACT_ST_H);
-	DUMPREG(HDMI_TG_VACT_SZ_L);
-	DUMPREG(HDMI_TG_VACT_SZ_H);
-	DUMPREG(HDMI_TG_FIELD_CHG_L);
-	DUMPREG(HDMI_TG_FIELD_CHG_H);
-	DUMPREG(HDMI_TG_VACT_ST2_L);
-	DUMPREG(HDMI_TG_VACT_ST2_H);
-	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
-	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
-	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
-	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
-	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
-	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
-	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
-	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
-#undef DUMPREG
+	return 0;
 }
 
-static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix)
+static void hdmi_clk_disable_gates(struct hdmi_context *hdata)
 {
+	int i = hdata->drv_data->clk_gates.count;
+
+	while (i--)
+		clk_disable_unprepare(hdata->clk_gates[i]);
+}
+
+static int hdmi_clk_set_parents(struct hdmi_context *hdata, bool to_phy)
+{
+	struct device *dev = hdata->dev;
+	int ret = 0;
 	int i;
 
-#define DUMPREG(reg_id) \
-	DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
-	readl(hdata->regs + reg_id))
+	for (i = 0; i < hdata->drv_data->clk_muxes.count; i += 3) {
+		struct clk **c = &hdata->clk_muxes[i];
 
-	DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
-	DUMPREG(HDMI_INTC_CON);
-	DUMPREG(HDMI_INTC_FLAG);
-	DUMPREG(HDMI_HPD_STATUS);
-	DUMPREG(HDMI_INTC_CON_1);
-	DUMPREG(HDMI_INTC_FLAG_1);
-	DUMPREG(HDMI_PHY_STATUS_0);
-	DUMPREG(HDMI_PHY_STATUS_PLL);
-	DUMPREG(HDMI_PHY_CON_0);
-	DUMPREG(HDMI_V14_PHY_RSTOUT);
-	DUMPREG(HDMI_PHY_VPLL);
-	DUMPREG(HDMI_PHY_CMU);
-	DUMPREG(HDMI_CORE_RSTOUT);
+		ret = clk_set_parent(c[2], c[to_phy]);
+		if (!ret)
+			continue;
 
-	DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
-	DUMPREG(HDMI_CON_0);
-	DUMPREG(HDMI_CON_1);
-	DUMPREG(HDMI_CON_2);
-	DUMPREG(HDMI_SYS_STATUS);
-	DUMPREG(HDMI_PHY_STATUS_0);
-	DUMPREG(HDMI_STATUS_EN);
-	DUMPREG(HDMI_HPD);
-	DUMPREG(HDMI_MODE_SEL);
-	DUMPREG(HDMI_ENC_EN);
-	DUMPREG(HDMI_DC_CONTROL);
-	DUMPREG(HDMI_VIDEO_PATTERN_GEN);
+		dev_err(dev, "Cannot set clock parent of '%s' to '%s', %d\n",
+			hdata->drv_data->clk_muxes.data[i + 2],
+			hdata->drv_data->clk_muxes.data[i + to_phy], ret);
+	}
 
-	DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
-	DUMPREG(HDMI_H_BLANK_0);
-	DUMPREG(HDMI_H_BLANK_1);
-	DUMPREG(HDMI_V2_BLANK_0);
-	DUMPREG(HDMI_V2_BLANK_1);
-	DUMPREG(HDMI_V1_BLANK_0);
-	DUMPREG(HDMI_V1_BLANK_1);
-	DUMPREG(HDMI_V_LINE_0);
-	DUMPREG(HDMI_V_LINE_1);
-	DUMPREG(HDMI_H_LINE_0);
-	DUMPREG(HDMI_H_LINE_1);
-	DUMPREG(HDMI_HSYNC_POL);
-
-	DUMPREG(HDMI_VSYNC_POL);
-	DUMPREG(HDMI_INT_PRO_MODE);
-	DUMPREG(HDMI_V_BLANK_F0_0);
-	DUMPREG(HDMI_V_BLANK_F0_1);
-	DUMPREG(HDMI_V_BLANK_F1_0);
-	DUMPREG(HDMI_V_BLANK_F1_1);
-
-	DUMPREG(HDMI_H_SYNC_START_0);
-	DUMPREG(HDMI_H_SYNC_START_1);
-	DUMPREG(HDMI_H_SYNC_END_0);
-	DUMPREG(HDMI_H_SYNC_END_1);
-
-	DUMPREG(HDMI_V_SYNC_LINE_BEF_2_0);
-	DUMPREG(HDMI_V_SYNC_LINE_BEF_2_1);
-	DUMPREG(HDMI_V_SYNC_LINE_BEF_1_0);
-	DUMPREG(HDMI_V_SYNC_LINE_BEF_1_1);
-
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_2_0);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_2_1);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_1_0);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_1_1);
-
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_0);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_1);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_0);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_1);
-
-	DUMPREG(HDMI_V_BLANK_F2_0);
-	DUMPREG(HDMI_V_BLANK_F2_1);
-	DUMPREG(HDMI_V_BLANK_F3_0);
-	DUMPREG(HDMI_V_BLANK_F3_1);
-	DUMPREG(HDMI_V_BLANK_F4_0);
-	DUMPREG(HDMI_V_BLANK_F4_1);
-	DUMPREG(HDMI_V_BLANK_F5_0);
-	DUMPREG(HDMI_V_BLANK_F5_1);
-
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_3_0);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_3_1);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_4_0);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_4_1);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_5_0);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_5_1);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_6_0);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_6_1);
-
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_0);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_1);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_0);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_1);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_0);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_1);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_0);
-	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_1);
-
-	DUMPREG(HDMI_VACT_SPACE_1_0);
-	DUMPREG(HDMI_VACT_SPACE_1_1);
-	DUMPREG(HDMI_VACT_SPACE_2_0);
-	DUMPREG(HDMI_VACT_SPACE_2_1);
-	DUMPREG(HDMI_VACT_SPACE_3_0);
-	DUMPREG(HDMI_VACT_SPACE_3_1);
-	DUMPREG(HDMI_VACT_SPACE_4_0);
-	DUMPREG(HDMI_VACT_SPACE_4_1);
-	DUMPREG(HDMI_VACT_SPACE_5_0);
-	DUMPREG(HDMI_VACT_SPACE_5_1);
-	DUMPREG(HDMI_VACT_SPACE_6_0);
-	DUMPREG(HDMI_VACT_SPACE_6_1);
-
-	DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
-	DUMPREG(HDMI_TG_CMD);
-	DUMPREG(HDMI_TG_H_FSZ_L);
-	DUMPREG(HDMI_TG_H_FSZ_H);
-	DUMPREG(HDMI_TG_HACT_ST_L);
-	DUMPREG(HDMI_TG_HACT_ST_H);
-	DUMPREG(HDMI_TG_HACT_SZ_L);
-	DUMPREG(HDMI_TG_HACT_SZ_H);
-	DUMPREG(HDMI_TG_V_FSZ_L);
-	DUMPREG(HDMI_TG_V_FSZ_H);
-	DUMPREG(HDMI_TG_VSYNC_L);
-	DUMPREG(HDMI_TG_VSYNC_H);
-	DUMPREG(HDMI_TG_VSYNC2_L);
-	DUMPREG(HDMI_TG_VSYNC2_H);
-	DUMPREG(HDMI_TG_VACT_ST_L);
-	DUMPREG(HDMI_TG_VACT_ST_H);
-	DUMPREG(HDMI_TG_VACT_SZ_L);
-	DUMPREG(HDMI_TG_VACT_SZ_H);
-	DUMPREG(HDMI_TG_FIELD_CHG_L);
-	DUMPREG(HDMI_TG_FIELD_CHG_H);
-	DUMPREG(HDMI_TG_VACT_ST2_L);
-	DUMPREG(HDMI_TG_VACT_ST2_H);
-	DUMPREG(HDMI_TG_VACT_ST3_L);
-	DUMPREG(HDMI_TG_VACT_ST3_H);
-	DUMPREG(HDMI_TG_VACT_ST4_L);
-	DUMPREG(HDMI_TG_VACT_ST4_H);
-	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
-	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
-	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
-	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
-	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
-	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
-	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
-	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
-	DUMPREG(HDMI_TG_3D);
-
-	DRM_DEBUG_KMS("%s: ---- PACKET REGISTERS ----\n", prefix);
-	DUMPREG(HDMI_AVI_CON);
-	DUMPREG(HDMI_AVI_HEADER0);
-	DUMPREG(HDMI_AVI_HEADER1);
-	DUMPREG(HDMI_AVI_HEADER2);
-	DUMPREG(HDMI_AVI_CHECK_SUM);
-	DUMPREG(HDMI_VSI_CON);
-	DUMPREG(HDMI_VSI_HEADER0);
-	DUMPREG(HDMI_VSI_HEADER1);
-	DUMPREG(HDMI_VSI_HEADER2);
-	for (i = 0; i < 7; ++i)
-		DUMPREG(HDMI_VSI_DATA(i));
-
-#undef DUMPREG
-}
-
-static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
-{
-	if (hdata->drv_data->type == HDMI_TYPE13)
-		hdmi_v13_regs_dump(hdata, prefix);
-	else
-		hdmi_v14_regs_dump(hdata, prefix);
+	return ret;
 }
 
 static u8 hdmi_chksum(struct hdmi_context *hdata,
@@ -993,10 +908,11 @@
 
 static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
 {
+	const struct hdmiphy_configs *confs = &hdata->drv_data->phy_confs;
 	int i;
 
-	for (i = 0; i < hdata->drv_data->phy_conf_count; i++)
-		if (hdata->drv_data->phy_confs[i].pixel_clock == pixel_clock)
+	for (i = 0; i < confs->count; i++)
+		if (confs->data[i].pixel_clock == pixel_clock)
 			return i;
 
 	DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
@@ -1078,13 +994,11 @@
 
 	mode_ok = hdmi_mode_valid(connector, adjusted_mode);
 
-	/* just return if user desired mode exists. */
 	if (mode_ok == MODE_OK)
 		return true;
 
 	/*
-	 * otherwise, find the most suitable mode among modes and change it
-	 * to adjusted_mode.
+	 * Find the most suitable mode and copy it to adjusted_mode.
 	 */
 	list_for_each_entry(m, &connector->modes, head) {
 		mode_ok = hdmi_mode_valid(connector, m);
@@ -1129,15 +1043,15 @@
 	switch (bits_per_sample) {
 	case 20:
 		data_num = 2;
-		bit_ch  = 1;
+		bit_ch = 1;
 		break;
 	case 24:
 		data_num = 3;
-		bit_ch  = 1;
+		bit_ch = 1;
 		break;
 	default:
 		data_num = 1;
-		bit_ch  = 0;
+		bit_ch = 0;
 		break;
 	}
 
@@ -1230,13 +1144,12 @@
 	/* choose HDMI mode */
 	hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
 		HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
-	/* Apply Video preable and Guard band in HDMI mode only */
+	/* apply video pre-amble and guard band in HDMI mode only */
 	hdmi_reg_writeb(hdata, HDMI_CON_2, 0);
 	/* disable bluescreen */
 	hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
 
 	if (hdata->dvi_mode) {
-		/* choose DVI mode */
 		hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
 				HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
 		hdmi_reg_writeb(hdata, HDMI_CON_2,
@@ -1308,7 +1221,7 @@
 
 	val = (m->hsync_start - m->hdisplay - 2);
 	val |= ((m->hsync_end - m->hdisplay - 2) << 10);
-	val |= ((m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0)<<20;
+	val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20;
 	hdmi_reg_writev(hdata, HDMI_V13_H_SYNC_GEN_0, 3, val);
 
 	/*
@@ -1319,7 +1232,6 @@
 
 	/* Following values & calculations differ for different type of modes */
 	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
-		/* Interlaced Mode */
 		val = ((m->vsync_end - m->vdisplay) / 2);
 		val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
 		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
@@ -1348,8 +1260,6 @@
 
 		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x249);
 	} else {
-		/* Progressive Mode */
-
 		val = m->vtotal;
 		val |= (m->vtotal - m->vdisplay) << 11;
 		hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
@@ -1365,21 +1275,12 @@
 		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
 				m->vtotal - m->vdisplay);
 		hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
-		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
 	}
 
-	/* Timing generator registers */
 	hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
 	hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
 	hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
 	hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
-	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
-	hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
-	hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
-	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
-	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
-	hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
-	hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
 }
 
 static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
@@ -1390,7 +1291,7 @@
 	hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal);
 	hdmi_reg_writev(hdata, HDMI_H_LINE_0, 2, m->htotal);
 	hdmi_reg_writev(hdata, HDMI_HSYNC_POL, 1,
-			(m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0);
+			(m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
 	hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1,
 			(m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
 	hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1,
@@ -1404,7 +1305,6 @@
 
 	/* Following values & calculations differ for different type of modes */
 	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
-		/* Interlaced Mode */
 		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
 			(m->vsync_end - m->vdisplay) / 2);
 		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
@@ -1437,7 +1337,6 @@
 		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x0);
 		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x0);
 	} else {
-		/* Progressive Mode */
 		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
 			m->vsync_end - m->vdisplay);
 		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
@@ -1454,15 +1353,8 @@
 		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
 				m->vtotal - m->vdisplay);
 		hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
-		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
-		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x47b);
-		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x6ae);
-		hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
-		hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
-		hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
 	}
 
-	/* Following values & calculations are same irrespective of mode type */
 	hdmi_reg_writev(hdata, HDMI_H_SYNC_START_0, 2,
 			m->hsync_start - m->hdisplay - 2);
 	hdmi_reg_writev(hdata, HDMI_H_SYNC_END_0, 2,
@@ -1486,16 +1378,12 @@
 	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, 2, 0xffff);
 	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, 2, 0xffff);
 
-	/* Timing generator registers */
 	hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
 	hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
 	hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
 	hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
-	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
-	hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
-	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
-	hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
-	hdmi_reg_writev(hdata, HDMI_TG_3D, 1, 0x0);
+	if (hdata->drv_data == &exynos5433_hdmi_driver_data)
+		hdmi_reg_writeb(hdata, HDMI_TG_DECON_EN, 1);
 }
 
 static void hdmi_mode_apply(struct hdmi_context *hdata)
@@ -1505,62 +1393,64 @@
 	else
 		hdmi_v14_mode_apply(hdata);
 
-	hdmiphy_wait_for_pll(hdata);
-
-	clk_set_parent(hdata->mout_hdmi, hdata->sclk_hdmiphy);
-
-	/* enable HDMI and timing generator */
 	hdmi_start(hdata, true);
 }
 
 static void hdmiphy_conf_reset(struct hdmi_context *hdata)
 {
-	clk_set_parent(hdata->mout_hdmi, hdata->sclk_pixel);
-
-	/* reset hdmiphy */
+	hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, 0, 1);
+	usleep_range(10000, 12000);
+	hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, ~0, 1);
+	usleep_range(10000, 12000);
 	hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
 	usleep_range(10000, 12000);
-	hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT,  0, HDMI_PHY_SW_RSTOUT);
+	hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
 	usleep_range(10000, 12000);
 }
 
+static void hdmiphy_enable_mode_set(struct hdmi_context *hdata, bool enable)
+{
+	u8 v = enable ? HDMI_PHY_ENABLE_MODE_SET : HDMI_PHY_DISABLE_MODE_SET;
+
+	if (hdata->drv_data == &exynos5433_hdmi_driver_data)
+		writel(v, hdata->regs_hdmiphy + HDMIPHY5433_MODE_SET_DONE);
+}
+
 static void hdmiphy_conf_apply(struct hdmi_context *hdata)
 {
 	int ret;
-	int i;
+	const u8 *phy_conf;
 
-	/* pixel clock */
-	i = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000);
-	if (i < 0) {
+	ret = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000);
+	if (ret < 0) {
 		DRM_ERROR("failed to find hdmiphy conf\n");
 		return;
 	}
+	phy_conf = hdata->drv_data->phy_confs.data[ret].conf;
 
-	ret = hdmiphy_reg_write_buf(hdata, 0,
-			hdata->drv_data->phy_confs[i].conf, 32);
+	hdmi_clk_set_parents(hdata, false);
+
+	hdmiphy_conf_reset(hdata);
+
+	hdmiphy_enable_mode_set(hdata, true);
+	ret = hdmiphy_reg_write_buf(hdata, 0, phy_conf, 32);
 	if (ret) {
 		DRM_ERROR("failed to configure hdmiphy\n");
 		return;
 	}
-
+	hdmiphy_enable_mode_set(hdata, false);
+	hdmi_clk_set_parents(hdata, true);
 	usleep_range(10000, 12000);
+	hdmiphy_wait_for_pll(hdata);
 }
 
 static void hdmi_conf_apply(struct hdmi_context *hdata)
 {
-	hdmiphy_conf_reset(hdata);
-	hdmiphy_conf_apply(hdata);
-
 	hdmi_start(hdata, false);
 	hdmi_conf_init(hdata);
-
 	hdmi_audio_init(hdata);
-
-	/* setting core registers */
 	hdmi_mode_apply(hdata);
 	hdmi_audio_control(hdata, true);
-
-	hdmi_regs_dump(hdata, "start");
 }
 
 static void hdmi_mode_set(struct drm_encoder *encoder,
@@ -1579,10 +1469,17 @@
 	hdata->cea_video_id = drm_match_cea_mode(mode);
 }
 
-static void hdmi_enable(struct drm_encoder *encoder)
+static void hdmi_set_refclk(struct hdmi_context *hdata, bool on)
 {
-	struct hdmi_context *hdata = encoder_to_hdmi(encoder);
+	if (!hdata->sysreg)
+		return;
 
+	regmap_update_bits(hdata->sysreg, EXYNOS5433_SYSREG_DISP_HDMI_PHY,
+			   SYSREG_HDMI_REFCLK_INT_CLK, on ? ~0 : 0);
+}
+
+static void hdmiphy_enable(struct hdmi_context *hdata)
+{
 	if (hdata->powered)
 		return;
 
@@ -1591,15 +1488,47 @@
 	if (regulator_bulk_enable(ARRAY_SIZE(supply), hdata->regul_bulk))
 		DRM_DEBUG_KMS("failed to enable regulator bulk\n");
 
-	/* set pmu hdmiphy control bit to enable hdmiphy */
 	regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
 			PMU_HDMI_PHY_ENABLE_BIT, 1);
 
-	hdmi_conf_apply(hdata);
+	hdmi_set_refclk(hdata, true);
+
+	hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0, HDMI_PHY_POWER_OFF_EN);
+
+	hdmiphy_conf_apply(hdata);
 
 	hdata->powered = true;
 }
 
+static void hdmiphy_disable(struct hdmi_context *hdata)
+{
+	if (!hdata->powered)
+		return;
+
+	hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
+
+	hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0, HDMI_PHY_POWER_OFF_EN);
+
+	hdmi_set_refclk(hdata, false);
+
+	regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
+			PMU_HDMI_PHY_ENABLE_BIT, 0);
+
+	regulator_bulk_disable(ARRAY_SIZE(supply), hdata->regul_bulk);
+
+	pm_runtime_put_sync(hdata->dev);
+
+	hdata->powered = false;
+}
+
+static void hdmi_enable(struct drm_encoder *encoder)
+{
+	struct hdmi_context *hdata = encoder_to_hdmi(encoder);
+
+	hdmiphy_enable(hdata);
+	hdmi_conf_apply(hdata);
+}
+
 static void hdmi_disable(struct drm_encoder *encoder)
 {
 	struct hdmi_context *hdata = encoder_to_hdmi(encoder);
@@ -1623,20 +1552,9 @@
 	if (funcs && funcs->disable)
 		(*funcs->disable)(crtc);
 
-	/* HDMI System Disable */
-	hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
-
 	cancel_delayed_work(&hdata->hotplug_work);
 
-	/* reset pmu hdmiphy control bit to disable hdmiphy */
-	regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
-			PMU_HDMI_PHY_ENABLE_BIT, 0);
-
-	regulator_bulk_disable(ARRAY_SIZE(supply), hdata->regul_bulk);
-
-	pm_runtime_put_sync(hdata->dev);
-
-	hdata->powered = false;
+	hdmiphy_disable(hdata);
 }
 
 static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
@@ -1670,6 +1588,68 @@
 	return IRQ_HANDLED;
 }
 
+static int hdmi_clks_get(struct hdmi_context *hdata,
+			 const struct string_array_spec *names,
+			 struct clk **clks)
+{
+	struct device *dev = hdata->dev;
+	int i;
+
+	for (i = 0; i < names->count; ++i) {
+		struct clk *clk = devm_clk_get(dev, names->data[i]);
+
+		if (IS_ERR(clk)) {
+			int ret = PTR_ERR(clk);
+
+			dev_err(dev, "Cannot get clock %s, %d\n",
+				names->data[i], ret);
+
+			return ret;
+		}
+
+		clks[i] = clk;
+	}
+
+	return 0;
+}
+
+static int hdmi_clk_init(struct hdmi_context *hdata)
+{
+	const struct hdmi_driver_data *drv_data = hdata->drv_data;
+	int count = drv_data->clk_gates.count + drv_data->clk_muxes.count;
+	struct device *dev = hdata->dev;
+	struct clk **clks;
+	int ret;
+
+	if (!count)
+		return 0;
+
+	clks = devm_kzalloc(dev, sizeof(*clks) * count, GFP_KERNEL);
+	if (!clks)
+		return -ENOMEM;
+
+	hdata->clk_gates = clks;
+	hdata->clk_muxes = clks + drv_data->clk_gates.count;
+
+	ret = hdmi_clks_get(hdata, &drv_data->clk_gates, hdata->clk_gates);
+	if (ret)
+		return ret;
+
+	return hdmi_clks_get(hdata, &drv_data->clk_muxes, hdata->clk_muxes);
+}
+
+
+static void hdmiphy_clk_enable(struct exynos_drm_clk *clk, bool enable)
+{
+	struct hdmi_context *hdata = container_of(clk, struct hdmi_context,
+						  phy_clk);
+
+	if (enable)
+		hdmiphy_enable(hdata);
+	else
+		hdmiphy_disable(hdata);
+}
+
 static int hdmi_resources_init(struct hdmi_context *hdata)
 {
 	struct device *dev = hdata->dev;
@@ -1688,39 +1668,14 @@
 		DRM_ERROR("failed to get GPIO irq\n");
 		return  hdata->irq;
 	}
-	/* get clocks, power */
-	hdata->hdmi = devm_clk_get(dev, "hdmi");
-	if (IS_ERR(hdata->hdmi)) {
-		DRM_ERROR("failed to get clock 'hdmi'\n");
-		ret = PTR_ERR(hdata->hdmi);
-		goto fail;
-	}
-	hdata->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
-	if (IS_ERR(hdata->sclk_hdmi)) {
-		DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
-		ret = PTR_ERR(hdata->sclk_hdmi);
-		goto fail;
-	}
-	hdata->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
-	if (IS_ERR(hdata->sclk_pixel)) {
-		DRM_ERROR("failed to get clock 'sclk_pixel'\n");
-		ret = PTR_ERR(hdata->sclk_pixel);
-		goto fail;
-	}
-	hdata->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
-	if (IS_ERR(hdata->sclk_hdmiphy)) {
-		DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
-		ret = PTR_ERR(hdata->sclk_hdmiphy);
-		goto fail;
-	}
-	hdata->mout_hdmi = devm_clk_get(dev, "mout_hdmi");
-	if (IS_ERR(hdata->mout_hdmi)) {
-		DRM_ERROR("failed to get clock 'mout_hdmi'\n");
-		ret = PTR_ERR(hdata->mout_hdmi);
-		goto fail;
-	}
 
-	clk_set_parent(hdata->mout_hdmi, hdata->sclk_pixel);
+	ret = hdmi_clk_init(hdata);
+	if (ret)
+		return ret;
+
+	ret = hdmi_clk_set_parents(hdata, false);
+	if (ret)
+		return ret;
 
 	for (i = 0; i < ARRAY_SIZE(supply); ++i) {
 		hdata->regul_bulk[i].supply = supply[i];
@@ -1728,7 +1683,8 @@
 	}
 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk);
 	if (ret) {
-		DRM_ERROR("failed to get regulators\n");
+		if (ret != -EPROBE_DEFER)
+			DRM_ERROR("failed to get regulators\n");
 		return ret;
 	}
 
@@ -1745,9 +1701,6 @@
 		DRM_ERROR("failed to enable hdmi-en regulator\n");
 
 	return ret;
-fail:
-	DRM_ERROR("HDMI resource init - failed\n");
-	return ret;
 }
 
 static struct of_device_id hdmi_match_types[] = {
@@ -1761,6 +1714,9 @@
 		.compatible = "samsung,exynos5420-hdmi",
 		.data = &exynos5420_hdmi_driver_data,
 	}, {
+		.compatible = "samsung,exynos5433-hdmi",
+		.data = &exynos5433_hdmi_driver_data,
+	}, {
 		/* end node */
 	}
 };
@@ -1780,6 +1736,10 @@
 	if (pipe < 0)
 		return pipe;
 
+	hdata->phy_clk.enable = hdmiphy_clk_enable;
+
+	exynos_drm_crtc_from_pipe(drm_dev, pipe)->pipe_clk = &hdata->phy_clk;
+
 	encoder->possible_crtcs = 1 << pipe;
 
 	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
@@ -1830,7 +1790,6 @@
 static int hdmi_probe(struct platform_device *pdev)
 {
 	struct device_node *ddc_node, *phy_node;
-	const struct of_device_id *match;
 	struct device *dev = &pdev->dev;
 	struct hdmi_context *hdata;
 	struct resource *res;
@@ -1840,11 +1799,7 @@
 	if (!hdata)
 		return -ENOMEM;
 
-	match = of_match_device(hdmi_match_types, dev);
-	if (!match)
-		return -ENODEV;
-
-	hdata->drv_data = match->data;
+	hdata->drv_data = of_device_get_match_data(dev);
 
 	platform_set_drvdata(pdev, hdata);
 
@@ -1852,7 +1807,8 @@
 
 	ret = hdmi_resources_init(hdata);
 	if (ret) {
-		DRM_ERROR("hdmi_resources_init failed\n");
+		if (ret != -EPROBE_DEFER)
+			DRM_ERROR("hdmi_resources_init failed\n");
 		return ret;
 	}
 
@@ -1867,7 +1823,6 @@
 	if (ddc_node)
 		goto out_get_ddc_adpt;
 
-	/* DDC i2c driver */
 	ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
 	if (!ddc_node) {
 		DRM_ERROR("Failed to find ddc node in device tree\n");
@@ -1885,7 +1840,6 @@
 	if (phy_node)
 		goto out_get_phy_port;
 
-	/* hdmiphy i2c driver */
 	phy_node = of_parse_phandle(dev->of_node, "phy", 0);
 	if (!phy_node) {
 		DRM_ERROR("Failed to find hdmiphy node in device tree\n");
@@ -1929,6 +1883,16 @@
 		goto err_hdmiphy;
 	}
 
+	if (hdata->drv_data->has_sysreg) {
+		hdata->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+				"samsung,sysreg-phandle");
+		if (IS_ERR(hdata->sysreg)) {
+			DRM_ERROR("sysreg regmap lookup failed.\n");
+			ret = -EPROBE_DEFER;
+			goto err_hdmiphy;
+		}
+	}
+
 	pm_runtime_enable(dev);
 
 	ret = component_add(&pdev->dev, &hdmi_component_ops);
@@ -1975,8 +1939,7 @@
 {
 	struct hdmi_context *hdata = dev_get_drvdata(dev);
 
-	clk_disable_unprepare(hdata->sclk_hdmi);
-	clk_disable_unprepare(hdata->hdmi);
+	hdmi_clk_disable_gates(hdata);
 
 	return 0;
 }
@@ -1986,17 +1949,9 @@
 	struct hdmi_context *hdata = dev_get_drvdata(dev);
 	int ret;
 
-	ret = clk_prepare_enable(hdata->hdmi);
-	if (ret < 0) {
-		DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
+	ret = hdmi_clk_enable_gates(hdata);
+	if (ret < 0)
 		return ret;
-	}
-	ret = clk_prepare_enable(hdata->sclk_hdmi);
-	if (ret < 0) {
-		DRM_ERROR("Failed to prepare_enable the sclk_mixer clk [%d]\n",
-			  ret);
-		return ret;
-	}
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 0a5a600..74a4269 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -31,6 +31,7 @@
 #include <linux/clk.h>
 #include <linux/regulator/consumer.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/component.h>
 
 #include <drm/exynos_drm.h>
@@ -103,8 +104,6 @@
 
 	struct mixer_resources	mixer_res;
 	enum mixer_version_id	mxr_ver;
-	wait_queue_head_t	wait_vsync_queue;
-	atomic_t		wait_vsync_event;
 };
 
 struct mixer_drv_data {
@@ -787,12 +786,6 @@
 
 			exynos_drm_crtc_finish_update(ctx->crtc, plane);
 		}
-
-		/* set wait vsync event to zero and wake up queue. */
-		if (atomic_read(&ctx->wait_vsync_event)) {
-			atomic_set(&ctx->wait_vsync_event, 0);
-			wake_up(&ctx->wait_vsync_queue);
-		}
 	}
 
 out:
@@ -1027,34 +1020,6 @@
 	mixer_vsync_set_update(mixer_ctx, true);
 }
 
-static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
-{
-	struct mixer_context *mixer_ctx = crtc->ctx;
-	int err;
-
-	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
-		return;
-
-	err = drm_vblank_get(mixer_ctx->drm_dev, mixer_ctx->pipe);
-	if (err < 0) {
-		DRM_DEBUG_KMS("failed to acquire vblank counter\n");
-		return;
-	}
-
-	atomic_set(&mixer_ctx->wait_vsync_event, 1);
-
-	/*
-	 * wait for MIXER to signal VSYNC interrupt or return after
-	 * timeout which is set to 50ms (refresh rate of 20).
-	 */
-	if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
-				!atomic_read(&mixer_ctx->wait_vsync_event),
-				HZ/20))
-		DRM_DEBUG_KMS("vblank wait timed out.\n");
-
-	drm_vblank_put(mixer_ctx->drm_dev, mixer_ctx->pipe);
-}
-
 static void mixer_enable(struct exynos_drm_crtc *crtc)
 {
 	struct mixer_context *ctx = crtc->ctx;
@@ -1065,6 +1030,8 @@
 
 	pm_runtime_get_sync(ctx->dev);
 
+	exynos_drm_pipe_clk_enable(crtc, true);
+
 	mixer_vsync_set_update(ctx, false);
 
 	mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
@@ -1094,6 +1061,8 @@
 	for (i = 0; i < MIXER_WIN_NR; i++)
 		mixer_disable_plane(crtc, &ctx->planes[i]);
 
+	exynos_drm_pipe_clk_enable(crtc, false);
+
 	pm_runtime_put(ctx->dev);
 
 	clear_bit(MXR_BIT_POWERED, &ctx->flags);
@@ -1126,7 +1095,6 @@
 	.disable		= mixer_disable,
 	.enable_vblank		= mixer_enable_vblank,
 	.disable_vblank		= mixer_disable_vblank,
-	.wait_for_vblank	= mixer_wait_for_vblank,
 	.atomic_begin		= mixer_atomic_begin,
 	.update_plane		= mixer_update_plane,
 	.disable_plane		= mixer_disable_plane,
@@ -1155,18 +1123,6 @@
 	.has_sclk = 1,
 };
 
-static const struct platform_device_id mixer_driver_types[] = {
-	{
-		.name		= "s5p-mixer",
-		.driver_data	= (unsigned long)&exynos4210_mxr_drv_data,
-	}, {
-		.name		= "exynos5-mixer",
-		.driver_data	= (unsigned long)&exynos5250_mxr_drv_data,
-	}, {
-		/* end node */
-	}
-};
-
 static struct of_device_id mixer_match_types[] = {
 	{
 		.compatible = "samsung,exynos4210-mixer",
@@ -1243,7 +1199,7 @@
 static int mixer_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct mixer_drv_data *drv;
+	const struct mixer_drv_data *drv;
 	struct mixer_context *ctx;
 	int ret;
 
@@ -1253,23 +1209,13 @@
 		return -ENOMEM;
 	}
 
-	if (dev->of_node) {
-		const struct of_device_id *match;
-
-		match = of_match_node(mixer_match_types, dev->of_node);
-		drv = (struct mixer_drv_data *)match->data;
-	} else {
-		drv = (struct mixer_drv_data *)
-			platform_get_device_id(pdev)->driver_data;
-	}
+	drv = of_device_get_match_data(dev);
 
 	ctx->pdev = pdev;
 	ctx->dev = dev;
 	ctx->vp_enabled = drv->is_vp_enabled;
 	ctx->has_sclk = drv->has_sclk;
 	ctx->mxr_ver = drv->version;
-	init_waitqueue_head(&ctx->wait_vsync_queue);
-	atomic_set(&ctx->wait_vsync_event, 0);
 
 	platform_set_drvdata(pdev, ctx);
 
@@ -1355,5 +1301,4 @@
 	},
 	.probe = mixer_probe,
 	.remove = mixer_remove,
-	.id_table	= mixer_driver_types,
 };
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 8c891e5..169667a 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -586,10 +586,12 @@
 #define HDMI_TG_VACT_ST4_L		HDMI_TG_BASE(0x0070)
 #define HDMI_TG_VACT_ST4_H		HDMI_TG_BASE(0x0074)
 #define HDMI_TG_3D			HDMI_TG_BASE(0x00F0)
+#define HDMI_TG_DECON_EN		HDMI_TG_BASE(0x01e0)
 
 /* HDMI PHY Registers Offsets*/
-#define HDMIPHY_POWER		(0x74 >> 2)
-#define HDMIPHY_MODE_SET_DONE		(0x7c >> 2)
+#define HDMIPHY_POWER			0x74
+#define HDMIPHY_MODE_SET_DONE		0x7c
+#define HDMIPHY5433_MODE_SET_DONE	0x84
 
 /* HDMI PHY Values */
 #define HDMI_PHY_POWER_ON              0x80
@@ -603,4 +605,7 @@
 #define PMU_HDMI_PHY_CONTROL		0x700
 #define PMU_HDMI_PHY_ENABLE_BIT		BIT(0)
 
+#define EXYNOS5433_SYSREG_DISP_HDMI_PHY	0x1008
+#define SYSREG_HDMI_REFCLK_INT_CLK	1
+
 #endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
index c78cf3f..b9c714d 100644
--- a/drivers/gpu/drm/fsl-dcu/Kconfig
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -1,6 +1,6 @@
 config DRM_FSL_DCU
 	tristate "DRM Support for Freescale DCU"
-	depends on DRM && OF && ARM
+	depends on DRM && OF && ARM && COMMON_CLK
 	select BACKLIGHT_CLASS_DEVICE
 	select BACKLIGHT_LCD_SUPPORT
 	select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/fsl-dcu/Makefile b/drivers/gpu/drm/fsl-dcu/Makefile
index 6ea1523..b35a292 100644
--- a/drivers/gpu/drm/fsl-dcu/Makefile
+++ b/drivers/gpu/drm/fsl-dcu/Makefile
@@ -3,5 +3,6 @@
 		 fsl_dcu_drm_rgb.o \
 		 fsl_dcu_drm_plane.o \
 		 fsl_dcu_drm_crtc.o \
-		 fsl_dcu_drm_fbdev.o
+		 fsl_dcu_drm_fbdev.o \
+		 fsl_tcon.o
 obj-$(CONFIG_DRM_FSL_DCU)	+= fsl-dcu-drm.o
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 4ed7798..89c0084 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -66,13 +66,12 @@
 {
 	struct drm_device *dev = crtc->dev;
 	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	struct drm_connector *con = &fsl_dev->connector.base;
 	struct drm_display_mode *mode = &crtc->state->mode;
-	unsigned int hbp, hfp, hsw, vbp, vfp, vsw, div, index, pol = 0;
-	unsigned long dcuclk;
+	unsigned int hbp, hfp, hsw, vbp, vfp, vsw, index, pol = 0;
 
 	index = drm_crtc_index(crtc);
-	dcuclk = clk_get_rate(fsl_dev->clk);
-	div = dcuclk / mode->clock / 1000;
+	clk_set_rate(fsl_dev->pix_clk, mode->clock * 1000);
 
 	/* Configure timings: */
 	hbp = mode->htotal - mode->hsync_end;
@@ -82,6 +81,10 @@
 	vfp = mode->vsync_start - mode->vdisplay;
 	vsw = mode->vsync_end - mode->vsync_start;
 
+	/* INV_PXCK as default (most display sample data on rising edge) */
+	if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE))
+		pol |= DCU_SYN_POL_INV_PXCK;
+
 	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
 		pol |= DCU_SYN_POL_INV_HS_LOW;
 
@@ -99,7 +102,6 @@
 	regmap_write(fsl_dev->regmap, DCU_DISP_SIZE,
 		     DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) |
 		     DCU_DISP_SIZE_DELTA_X(mode->hdisplay));
-	regmap_write(fsl_dev->regmap, DCU_DIV_RATIO, div);
 	regmap_write(fsl_dev->regmap, DCU_SYN_POL, pol);
 	regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) |
 		     DCU_BGND_G(0) | DCU_BGND_B(0));
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index e8d9337..dc723f7 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -23,10 +23,12 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 
 #include "fsl_dcu_drm_crtc.h"
 #include "fsl_dcu_drm_drv.h"
+#include "fsl_tcon.h"
 
 static bool fsl_dcu_drm_is_volatile_reg(struct device *dev, unsigned int reg)
 {
@@ -40,9 +42,10 @@
 	.reg_bits = 32,
 	.reg_stride = 4,
 	.val_bits = 32,
-	.cache_type = REGCACHE_RBTREE,
+	.cache_type = REGCACHE_FLAT,
 
 	.volatile_reg = fsl_dcu_drm_is_volatile_reg,
+	.max_register = 0x11fc,
 };
 
 static int fsl_dcu_drm_irq_init(struct drm_device *dev)
@@ -62,46 +65,54 @@
 	return ret;
 }
 
-static int fsl_dcu_load(struct drm_device *drm, unsigned long flags)
+static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
 {
-	struct device *dev = drm->dev;
-	struct fsl_dcu_drm_device *fsl_dev = drm->dev_private;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
 	int ret;
 
 	ret = fsl_dcu_drm_modeset_init(fsl_dev);
 	if (ret < 0) {
-		dev_err(dev, "failed to initialize mode setting\n");
+		dev_err(dev->dev, "failed to initialize mode setting\n");
 		return ret;
 	}
 
-	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
 	if (ret < 0) {
-		dev_err(dev, "failed to initialize vblank\n");
+		dev_err(dev->dev, "failed to initialize vblank\n");
 		goto done;
 	}
-	drm->vblank_disable_allowed = true;
 
-	ret = fsl_dcu_drm_irq_init(drm);
+	ret = fsl_dcu_drm_irq_init(dev);
 	if (ret < 0)
 		goto done;
-	drm->irq_enabled = true;
+	dev->irq_enabled = true;
 
-	fsl_dcu_fbdev_init(drm);
+	fsl_dcu_fbdev_init(dev);
 
 	return 0;
 done:
-	if (ret) {
-		drm_mode_config_cleanup(drm);
-		drm_vblank_cleanup(drm);
-		drm_irq_uninstall(drm);
-		drm->dev_private = NULL;
-	}
+	drm_kms_helper_poll_fini(dev);
+
+	if (fsl_dev->fbdev)
+		drm_fbdev_cma_fini(fsl_dev->fbdev);
+
+	drm_mode_config_cleanup(dev);
+	drm_vblank_cleanup(dev);
+	drm_irq_uninstall(dev);
+	dev->dev_private = NULL;
 
 	return ret;
 }
 
 static int fsl_dcu_unload(struct drm_device *dev)
 {
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+
+	drm_kms_helper_poll_fini(dev);
+
+	if (fsl_dev->fbdev)
+		drm_fbdev_cma_fini(fsl_dev->fbdev);
+
 	drm_mode_config_cleanup(dev);
 	drm_vblank_cleanup(dev);
 	drm_irq_uninstall(dev);
@@ -157,6 +168,13 @@
 	regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
 }
 
+static void fsl_dcu_drm_lastclose(struct drm_device *dev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+
+	drm_fbdev_cma_restore_mode(fsl_dev->fbdev);
+}
+
 static const struct file_operations fsl_dcu_drm_fops = {
 	.owner		= THIS_MODULE,
 	.open		= drm_open,
@@ -174,6 +192,7 @@
 static struct drm_driver fsl_dcu_drm_driver = {
 	.driver_features	= DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
 				| DRIVER_PRIME | DRIVER_ATOMIC,
+	.lastclose		= fsl_dcu_drm_lastclose,
 	.load			= fsl_dcu_load,
 	.unload			= fsl_dcu_unload,
 	.irq_handler		= fsl_dcu_drm_irq,
@@ -197,9 +216,9 @@
 	.fops			= &fsl_dcu_drm_fops,
 	.name			= "fsl-dcu-drm",
 	.desc			= "Freescale DCU DRM",
-	.date			= "20150213",
+	.date			= "20160425",
 	.major			= 1,
-	.minor			= 0,
+	.minor			= 1,
 };
 
 #ifdef CONFIG_PM_SLEEP
@@ -283,6 +302,9 @@
 	struct resource *res;
 	void __iomem *base;
 	struct drm_driver *driver = &fsl_dcu_drm_driver;
+	struct clk *pix_clk_in;
+	char pix_clk_name[32];
+	const char *pix_clk_in_name;
 	const struct of_device_id *id;
 	int ret;
 
@@ -290,6 +312,11 @@
 	if (!fsl_dev)
 		return -ENOMEM;
 
+	id = of_match_node(fsl_dcu_of_match, pdev->dev.of_node);
+	if (!id)
+		return -ENODEV;
+	fsl_dev->soc = id->data;
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
 		dev_err(dev, "could not get memory IO resource\n");
@@ -308,24 +335,6 @@
 		return -ENXIO;
 	}
 
-	fsl_dev->clk = devm_clk_get(dev, "dcu");
-	if (IS_ERR(fsl_dev->clk)) {
-		ret = PTR_ERR(fsl_dev->clk);
-		dev_err(dev, "failed to get dcu clock\n");
-		return ret;
-	}
-	ret = clk_prepare(fsl_dev->clk);
-	if (ret < 0) {
-		dev_err(dev, "failed to prepare dcu clk\n");
-		return ret;
-	}
-	ret = clk_enable(fsl_dev->clk);
-	if (ret < 0) {
-		dev_err(dev, "failed to enable dcu clk\n");
-		clk_unprepare(fsl_dev->clk);
-		return ret;
-	}
-
 	fsl_dev->regmap = devm_regmap_init_mmio(dev, base,
 			&fsl_dcu_regmap_config);
 	if (IS_ERR(fsl_dev->regmap)) {
@@ -333,14 +342,47 @@
 		return PTR_ERR(fsl_dev->regmap);
 	}
 
-	id = of_match_node(fsl_dcu_of_match, pdev->dev.of_node);
-	if (!id)
-		return -ENODEV;
-	fsl_dev->soc = id->data;
+	fsl_dev->clk = devm_clk_get(dev, "dcu");
+	if (IS_ERR(fsl_dev->clk)) {
+		dev_err(dev, "failed to get dcu clock\n");
+		return PTR_ERR(fsl_dev->clk);
+	}
+	ret = clk_prepare_enable(fsl_dev->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable dcu clk\n");
+		return ret;
+	}
+
+	pix_clk_in = devm_clk_get(dev, "pix");
+	if (IS_ERR(pix_clk_in)) {
+		/* legancy binding, use dcu clock as pixel clock input */
+		pix_clk_in = fsl_dev->clk;
+	}
+
+	pix_clk_in_name = __clk_get_name(pix_clk_in);
+	snprintf(pix_clk_name, sizeof(pix_clk_name), "%s_pix", pix_clk_in_name);
+	fsl_dev->pix_clk = clk_register_divider(dev, pix_clk_name,
+			pix_clk_in_name, 0, base + DCU_DIV_RATIO,
+			0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL);
+	if (IS_ERR(fsl_dev->pix_clk)) {
+		dev_err(dev, "failed to register pix clk\n");
+		ret = PTR_ERR(fsl_dev->pix_clk);
+		goto disable_clk;
+	}
+
+	ret = clk_prepare_enable(fsl_dev->pix_clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable pix clk\n");
+		goto unregister_pix_clk;
+	}
+
+	fsl_dev->tcon = fsl_tcon_init(dev);
 
 	drm = drm_dev_alloc(driver, dev);
-	if (!drm)
-		return -ENOMEM;
+	if (!drm) {
+		ret = -ENOMEM;
+		goto disable_pix_clk;
+	}
 
 	fsl_dev->dev = dev;
 	fsl_dev->drm = drm;
@@ -360,6 +402,12 @@
 
 unref:
 	drm_dev_unref(drm);
+disable_pix_clk:
+	clk_disable_unprepare(fsl_dev->pix_clk);
+unregister_pix_clk:
+	clk_unregister(fsl_dev->pix_clk);
+disable_clk:
+	clk_disable_unprepare(fsl_dev->clk);
 	return ret;
 }
 
@@ -367,6 +415,9 @@
 {
 	struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
 
+	clk_disable_unprepare(fsl_dev->clk);
+	clk_disable_unprepare(fsl_dev->pix_clk);
+	clk_unregister(fsl_dev->pix_clk);
 	drm_put_dev(fsl_dev->drm);
 
 	return 0;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index 6413ac9..c275f90 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -47,8 +47,8 @@
 #define DCU_VSYN_PARA_FP(x)		(x)
 
 #define DCU_SYN_POL			0x0024
-#define DCU_SYN_POL_INV_PXCK_FALL	(0 << 6)
-#define DCU_SYN_POL_NEG_REMAIN		(0 << 5)
+#define DCU_SYN_POL_INV_PXCK		BIT(6)
+#define DCU_SYN_POL_NEG			BIT(5)
 #define DCU_SYN_POL_INV_VS_LOW		BIT(1)
 #define DCU_SYN_POL_INV_HS_LOW		BIT(0)
 
@@ -183,6 +183,8 @@
 	struct regmap *regmap;
 	int irq;
 	struct clk *clk;
+	struct clk *pix_clk;
+	struct fsl_tcon *tcon;
 	/*protects hardware register*/
 	spinlock_t irq_lock;
 	struct drm_device *drm;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 8780deb..98c998d 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -17,6 +17,7 @@
 #include <drm/drm_panel.h>
 
 #include "fsl_dcu_drm_drv.h"
+#include "fsl_tcon.h"
 
 static int
 fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
@@ -28,10 +29,20 @@
 
 static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
 {
+	struct drm_device *dev = encoder->dev;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+
+	if (fsl_dev->tcon)
+		fsl_tcon_bypass_disable(fsl_dev->tcon);
 }
 
 static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
 {
+	struct drm_device *dev = encoder->dev;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+
+	if (fsl_dev->tcon)
+		fsl_tcon_bypass_enable(fsl_dev->tcon);
 }
 
 static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
@@ -68,7 +79,10 @@
 
 static void fsl_dcu_drm_connector_destroy(struct drm_connector *connector)
 {
+	struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector);
+
 	drm_connector_unregister(connector);
+	drm_panel_detach(fsl_con->panel);
 	drm_connector_cleanup(connector);
 }
 
@@ -131,7 +145,7 @@
 				 struct drm_encoder *encoder)
 {
 	struct drm_connector *connector = &fsl_dev->connector.base;
-	struct drm_mode_config mode_config = fsl_dev->drm->mode_config;
+	struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
 	struct device_node *panel_node;
 	int ret;
 
@@ -153,19 +167,23 @@
 		goto err_sysfs;
 
 	drm_object_property_set_value(&connector->base,
-				      mode_config.dpms_property,
+				      mode_config->dpms_property,
 				      DRM_MODE_DPMS_OFF);
 
 	panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
-	if (panel_node) {
-		fsl_dev->connector.panel = of_drm_find_panel(panel_node);
-		if (!fsl_dev->connector.panel) {
-			ret = -EPROBE_DEFER;
-			goto err_sysfs;
-		}
-	of_node_put(panel_node);
+	if (!panel_node) {
+		dev_err(fsl_dev->dev, "fsl,panel property not found\n");
+		ret = -ENODEV;
+		goto err_sysfs;
 	}
 
+	fsl_dev->connector.panel = of_drm_find_panel(panel_node);
+	if (!fsl_dev->connector.panel) {
+		ret = -EPROBE_DEFER;
+		goto err_panel;
+	}
+	of_node_put(panel_node);
+
 	ret = drm_panel_attach(fsl_dev->connector.panel, connector);
 	if (ret) {
 		dev_err(fsl_dev->dev, "failed to attach panel\n");
@@ -174,6 +192,8 @@
 
 	return 0;
 
+err_panel:
+	of_node_put(panel_node);
 err_sysfs:
 	drm_connector_unregister(connector);
 err_cleanup:
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
new file mode 100644
index 0000000..bbe34f1
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2015 Toradex AG
+ *
+ * Stefan Agner <stefan@agner.ch>
+ *
+ * Freescale TCON device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "fsl_tcon.h"
+
+void fsl_tcon_bypass_disable(struct fsl_tcon *tcon)
+{
+	regmap_update_bits(tcon->regs, FSL_TCON_CTRL1,
+			   FSL_TCON_CTRL1_TCON_BYPASS, 0);
+}
+
+void fsl_tcon_bypass_enable(struct fsl_tcon *tcon)
+{
+	regmap_update_bits(tcon->regs, FSL_TCON_CTRL1,
+			   FSL_TCON_CTRL1_TCON_BYPASS,
+			   FSL_TCON_CTRL1_TCON_BYPASS);
+}
+
+static struct regmap_config fsl_tcon_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+
+	.name = "tcon",
+};
+
+static int fsl_tcon_init_regmap(struct device *dev,
+				struct fsl_tcon *tcon,
+				struct device_node *np)
+{
+	struct resource res;
+	void __iomem *regs;
+
+	if (of_address_to_resource(np, 0, &res))
+		return -EINVAL;
+
+	regs = devm_ioremap_resource(dev, &res);
+	if (IS_ERR(regs))
+		return PTR_ERR(regs);
+
+	tcon->regs = devm_regmap_init_mmio(dev, regs,
+					   &fsl_tcon_regmap_config);
+	if (IS_ERR(tcon->regs))
+		return PTR_ERR(tcon->regs);
+
+	return 0;
+}
+
+struct fsl_tcon *fsl_tcon_init(struct device *dev)
+{
+	struct fsl_tcon *tcon;
+	struct device_node *np;
+	int ret;
+
+	/* TCON node is not mandatory, some devices do not provide TCON */
+	np = of_parse_phandle(dev->of_node, "fsl,tcon", 0);
+	if (!np)
+		return NULL;
+
+	tcon = devm_kzalloc(dev, sizeof(*tcon), GFP_KERNEL);
+	if (!tcon) {
+		ret = -ENOMEM;
+		goto err_node_put;
+	}
+
+	ret = fsl_tcon_init_regmap(dev, tcon, np);
+	if (ret) {
+		dev_err(dev, "Couldn't create the TCON regmap\n");
+		goto err_node_put;
+	}
+
+	tcon->ipg_clk = of_clk_get_by_name(np, "ipg");
+	if (IS_ERR(tcon->ipg_clk)) {
+		dev_err(dev, "Couldn't get the TCON bus clock\n");
+		goto err_node_put;
+	}
+
+	clk_prepare_enable(tcon->ipg_clk);
+
+	dev_info(dev, "Using TCON in bypass mode\n");
+
+	return tcon;
+
+err_node_put:
+	of_node_put(np);
+	return NULL;
+}
+
+void fsl_tcon_free(struct fsl_tcon *tcon)
+{
+	clk_disable_unprepare(tcon->ipg_clk);
+	clk_put(tcon->ipg_clk);
+}
+
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.h b/drivers/gpu/drm/fsl-dcu/fsl_tcon.h
new file mode 100644
index 0000000..80a7617
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2015 Toradex AG
+ *
+ * Stefan Agner <stefan@agner.ch>
+ *
+ * Freescale TCON device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_TCON_H__
+#define __FSL_TCON_H__
+
+#include <linux/bitops.h>
+
+#define FSL_TCON_CTRL1			0x0
+#define FSL_TCON_CTRL1_TCON_BYPASS	BIT(29)
+
+struct fsl_tcon {
+	struct regmap		*regs;
+	struct clk		*ipg_clk;
+};
+
+struct fsl_tcon *fsl_tcon_init(struct device *dev);
+void fsl_tcon_free(struct fsl_tcon *tcon);
+
+void fsl_tcon_bypass_disable(struct fsl_tcon *tcon);
+void fsl_tcon_bypass_enable(struct fsl_tcon *tcon);
+
+#endif /* __FSL_TCON_H__ */
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 033d894..7440bf9 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -411,7 +411,7 @@
 	info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
 	if (IS_ERR(info)) {
 		ret = PTR_ERR(info);
-		goto out_err1;
+		goto err_free_range;
 	}
 	info->par = fbdev;
 
@@ -419,7 +419,7 @@
 
 	ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
 	if (ret)
-		goto out_unref;
+		goto err_release;
 
 	fb = &psbfb->base;
 	psbfb->fbdev = info;
@@ -464,14 +464,9 @@
 					psbfb->base.width, psbfb->base.height);
 
 	return 0;
-out_unref:
-	if (backing->stolen)
-		psb_gtt_free_range(dev, backing);
-	else
-		drm_gem_object_unreference_unlocked(&backing->gem);
-
+err_release:
 	drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
-out_err1:
+err_free_range:
 	psb_gtt_free_range(dev, backing);
 	return ret;
 }
@@ -495,7 +490,7 @@
 	 *	Find the GEM object and thus the gtt range object that is
 	 *	to back this space
 	 */
-	obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
+	obj = drm_gem_object_lookup(filp, cmd->handles[0]);
 	if (obj == NULL)
 		return ERR_PTR(-ENOENT);
 
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 506224b..6d1cb6b 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -63,7 +63,7 @@
 	struct drm_gem_object *obj;
 
 	/* GEM does all our handle to object mapping */
-	obj = drm_gem_object_lookup(dev, file, handle);
+	obj = drm_gem_object_lookup(file, handle);
 	if (obj == NULL)
 		return -ENOENT;
 
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 5bf765d..c95406e 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -372,7 +372,7 @@
 		return -EINVAL;
 	}
 
-	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	obj = drm_gem_object_lookup(file_priv, handle);
 	if (!obj) {
 		ret = -ENOENT;
 		goto unlock;
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index 7cd87a0..a05c0206 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -979,11 +979,7 @@
 		return NULL;
 	}
 
-	if (dsi_connector->pipe)
-		dpi_output->panel_on = 0;
-	else
-		dpi_output->panel_on = 0;
-
+	dpi_output->panel_on = 0;
 	dpi_output->dev = dev;
 	if (mdfld_get_panel_type(dev, pipe) != TC35876X)
 		dpi_output->p_funcs = p_funcs;
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index 6b43ae3..1616af2 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -72,7 +72,7 @@
 	"RX Prot Violation",
 	"HS Generic Write FIFO Full",
 	"LP Generic Write FIFO Full",
-	"Generic Read Data Avail"
+	"Generic Read Data Avail",
 	"Special Packet Sent",
 	"Tearing Effect",
 };
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 4e1c685..82b8ce4 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -374,7 +374,6 @@
 
 	drm_irq_install(dev, dev->pdev->irq);
 
-	dev->vblank_disable_allowed = true;
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 	dev->driver->get_vblank_counter = psb_get_vblank_counter;
 
diff --git a/drivers/gpu/drm/hisilicon/Kconfig b/drivers/gpu/drm/hisilicon/Kconfig
new file mode 100644
index 0000000..558c61b
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/Kconfig
@@ -0,0 +1,5 @@
+#
+# hisilicon drm device configuration.
+# Please keep this list sorted alphabetically
+
+source "drivers/gpu/drm/hisilicon/kirin/Kconfig"
diff --git a/drivers/gpu/drm/hisilicon/Makefile b/drivers/gpu/drm/hisilicon/Makefile
new file mode 100644
index 0000000..e3f6d49
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for hisilicon drm drivers.
+# Please keep this list sorted alphabetically
+
+obj-$(CONFIG_DRM_HISI_KIRIN) += kirin/
diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig
new file mode 100644
index 0000000..ea0df61
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig
@@ -0,0 +1,18 @@
+config DRM_HISI_KIRIN
+	tristate "DRM Support for Hisilicon Kirin series SoCs Platform"
+	depends on DRM && OF && ARM64
+	select DRM_KMS_HELPER
+	select DRM_GEM_CMA_HELPER
+	select DRM_KMS_CMA_HELPER
+	help
+	  Choose this option if you have a hisilicon Kirin chipsets(hi6220).
+	  If M is selected the module will be called kirin-drm.
+
+config HISI_KIRIN_DW_DSI
+	tristate "HiSilicon Kirin specific extensions for Synopsys DW MIPI DSI"
+	depends on DRM_HISI_KIRIN
+	select DRM_MIPI_DSI
+	help
+	 This selects support for HiSilicon Kirin SoC specific extensions for
+	 the Synopsys DesignWare DSI driver. If you want to enable MIPI DSI on
+	 hi6220 based SoC, you should selet this option.
diff --git a/drivers/gpu/drm/hisilicon/kirin/Makefile b/drivers/gpu/drm/hisilicon/kirin/Makefile
new file mode 100644
index 0000000..cdf6158
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/Makefile
@@ -0,0 +1,6 @@
+kirin-drm-y := kirin_drm_drv.o \
+	       kirin_drm_ade.o
+
+obj-$(CONFIG_DRM_HISI_KIRIN) += kirin-drm.o
+
+obj-$(CONFIG_HISI_KIRIN_DW_DSI) += dw_drm_dsi.o
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
new file mode 100644
index 0000000..998452a
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -0,0 +1,858 @@
+/*
+ * DesignWare MIPI DSI Host Controller v1.02 driver
+ *
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * Author:
+ *	Xinliang Liu <z.liuxinliang@hisilicon.com>
+ *	Xinliang Liu <xinliang.liu@linaro.org>
+ *	Xinwei Kong <kong.kongxinwei@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+
+#include <drm/drm_of.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_atomic_helper.h>
+
+#include "dw_dsi_reg.h"
+
+#define MAX_TX_ESC_CLK		10
+#define ROUND(x, y)		((x) / (y) + \
+				((x) % (y) * 10 / (y) >= 5 ? 1 : 0))
+#define PHY_REF_CLK_RATE	19200000
+#define PHY_REF_CLK_PERIOD_PS	(1000000000 / (PHY_REF_CLK_RATE / 1000))
+
+#define encoder_to_dsi(encoder) \
+	container_of(encoder, struct dw_dsi, encoder)
+#define host_to_dsi(host) \
+	container_of(host, struct dw_dsi, host)
+
+struct mipi_phy_params {
+	u32 clk_t_lpx;
+	u32 clk_t_hs_prepare;
+	u32 clk_t_hs_zero;
+	u32 clk_t_hs_trial;
+	u32 clk_t_wakeup;
+	u32 data_t_lpx;
+	u32 data_t_hs_prepare;
+	u32 data_t_hs_zero;
+	u32 data_t_hs_trial;
+	u32 data_t_ta_go;
+	u32 data_t_ta_get;
+	u32 data_t_wakeup;
+	u32 hstx_ckg_sel;
+	u32 pll_fbd_div5f;
+	u32 pll_fbd_div1f;
+	u32 pll_fbd_2p;
+	u32 pll_enbwt;
+	u32 pll_fbd_p;
+	u32 pll_fbd_s;
+	u32 pll_pre_div1p;
+	u32 pll_pre_p;
+	u32 pll_vco_750M;
+	u32 pll_lpf_rs;
+	u32 pll_lpf_cs;
+	u32 clklp2hs_time;
+	u32 clkhs2lp_time;
+	u32 lp2hs_time;
+	u32 hs2lp_time;
+	u32 clk_to_data_delay;
+	u32 data_to_clk_delay;
+	u32 lane_byte_clk_kHz;
+	u32 clk_division;
+};
+
+struct dsi_hw_ctx {
+	void __iomem *base;
+	struct clk *pclk;
+};
+
+struct dw_dsi {
+	struct drm_encoder encoder;
+	struct drm_bridge *bridge;
+	struct mipi_dsi_host host;
+	struct drm_display_mode cur_mode;
+	struct dsi_hw_ctx *ctx;
+	struct mipi_phy_params phy;
+
+	u32 lanes;
+	enum mipi_dsi_pixel_format format;
+	unsigned long mode_flags;
+	bool enable;
+};
+
+struct dsi_data {
+	struct dw_dsi dsi;
+	struct dsi_hw_ctx ctx;
+};
+
+struct dsi_phy_range {
+	u32 min_range_kHz;
+	u32 max_range_kHz;
+	u32 pll_vco_750M;
+	u32 hstx_ckg_sel;
+};
+
+static const struct dsi_phy_range dphy_range_info[] = {
+	{   46875,    62500,   1,    7 },
+	{   62500,    93750,   0,    7 },
+	{   93750,   125000,   1,    6 },
+	{  125000,   187500,   0,    6 },
+	{  187500,   250000,   1,    5 },
+	{  250000,   375000,   0,    5 },
+	{  375000,   500000,   1,    4 },
+	{  500000,   750000,   0,    4 },
+	{  750000,  1000000,   1,    0 },
+	{ 1000000,  1500000,   0,    0 }
+};
+
+static u32 dsi_calc_phy_rate(u32 req_kHz, struct mipi_phy_params *phy)
+{
+	u32 ref_clk_ps = PHY_REF_CLK_PERIOD_PS;
+	u32 tmp_kHz = req_kHz;
+	u32 i = 0;
+	u32 q_pll = 1;
+	u32 m_pll = 0;
+	u32 n_pll = 0;
+	u32 r_pll = 1;
+	u32 m_n = 0;
+	u32 m_n_int = 0;
+	u32 f_kHz = 0;
+	u64 temp;
+
+	/*
+	 * Find a rate >= req_kHz.
+	 */
+	do {
+		f_kHz = tmp_kHz;
+
+		for (i = 0; i < ARRAY_SIZE(dphy_range_info); i++)
+			if (f_kHz >= dphy_range_info[i].min_range_kHz &&
+			    f_kHz <= dphy_range_info[i].max_range_kHz)
+				break;
+
+		if (i == ARRAY_SIZE(dphy_range_info)) {
+			DRM_ERROR("%dkHz out of range\n", f_kHz);
+			return 0;
+		}
+
+		phy->pll_vco_750M = dphy_range_info[i].pll_vco_750M;
+		phy->hstx_ckg_sel = dphy_range_info[i].hstx_ckg_sel;
+
+		if (phy->hstx_ckg_sel <= 7 &&
+		    phy->hstx_ckg_sel >= 4)
+			q_pll = 0x10 >> (7 - phy->hstx_ckg_sel);
+
+		temp = f_kHz * (u64)q_pll * (u64)ref_clk_ps;
+		m_n_int = temp / (u64)1000000000;
+		m_n = (temp % (u64)1000000000) / (u64)100000000;
+
+		if (m_n_int % 2 == 0) {
+			if (m_n * 6 >= 50) {
+				n_pll = 2;
+				m_pll = (m_n_int + 1) * n_pll;
+			} else if (m_n * 6 >= 30) {
+				n_pll = 3;
+				m_pll = m_n_int * n_pll + 2;
+			} else {
+				n_pll = 1;
+				m_pll = m_n_int * n_pll;
+			}
+		} else {
+			if (m_n * 6 >= 50) {
+				n_pll = 1;
+				m_pll = (m_n_int + 1) * n_pll;
+			} else if (m_n * 6 >= 30) {
+				n_pll = 1;
+				m_pll = (m_n_int + 1) * n_pll;
+			} else if (m_n * 6 >= 10) {
+				n_pll = 3;
+				m_pll = m_n_int * n_pll + 1;
+			} else {
+				n_pll = 2;
+				m_pll = m_n_int * n_pll;
+			}
+		}
+
+		if (n_pll == 1) {
+			phy->pll_fbd_p = 0;
+			phy->pll_pre_div1p = 1;
+		} else {
+			phy->pll_fbd_p = n_pll;
+			phy->pll_pre_div1p = 0;
+		}
+
+		if (phy->pll_fbd_2p <= 7 && phy->pll_fbd_2p >= 4)
+			r_pll = 0x10 >> (7 - phy->pll_fbd_2p);
+
+		if (m_pll == 2) {
+			phy->pll_pre_p = 0;
+			phy->pll_fbd_s = 0;
+			phy->pll_fbd_div1f = 0;
+			phy->pll_fbd_div5f = 1;
+		} else if (m_pll >= 2 * 2 * r_pll && m_pll <= 2 * 4 * r_pll) {
+			phy->pll_pre_p = m_pll / (2 * r_pll);
+			phy->pll_fbd_s = 0;
+			phy->pll_fbd_div1f = 1;
+			phy->pll_fbd_div5f = 0;
+		} else if (m_pll >= 2 * 5 * r_pll && m_pll <= 2 * 150 * r_pll) {
+			if (((m_pll / (2 * r_pll)) % 2) == 0) {
+				phy->pll_pre_p =
+					(m_pll / (2 * r_pll)) / 2 - 1;
+				phy->pll_fbd_s =
+					(m_pll / (2 * r_pll)) % 2 + 2;
+			} else {
+				phy->pll_pre_p =
+					(m_pll / (2 * r_pll)) / 2;
+				phy->pll_fbd_s =
+					(m_pll / (2 * r_pll)) % 2;
+			}
+			phy->pll_fbd_div1f = 0;
+			phy->pll_fbd_div5f = 0;
+		} else {
+			phy->pll_pre_p = 0;
+			phy->pll_fbd_s = 0;
+			phy->pll_fbd_div1f = 0;
+			phy->pll_fbd_div5f = 1;
+		}
+
+		f_kHz = (u64)1000000000 * (u64)m_pll /
+			((u64)ref_clk_ps * (u64)n_pll * (u64)q_pll);
+
+		if (f_kHz >= req_kHz)
+			break;
+
+		tmp_kHz += 10;
+
+	} while (true);
+
+	return f_kHz;
+}
+
+static void dsi_get_phy_params(u32 phy_req_kHz,
+			       struct mipi_phy_params *phy)
+{
+	u32 ref_clk_ps = PHY_REF_CLK_PERIOD_PS;
+	u32 phy_rate_kHz;
+	u32 ui;
+
+	memset(phy, 0, sizeof(*phy));
+
+	phy_rate_kHz = dsi_calc_phy_rate(phy_req_kHz, phy);
+	if (!phy_rate_kHz)
+		return;
+
+	ui = 1000000 / phy_rate_kHz;
+
+	phy->clk_t_lpx = ROUND(50, 8 * ui);
+	phy->clk_t_hs_prepare = ROUND(133, 16 * ui) - 1;
+
+	phy->clk_t_hs_zero = ROUND(262, 8 * ui);
+	phy->clk_t_hs_trial = 2 * (ROUND(60, 8 * ui) - 1);
+	phy->clk_t_wakeup = ROUND(1000000, (ref_clk_ps / 1000) - 1);
+	if (phy->clk_t_wakeup > 0xff)
+		phy->clk_t_wakeup = 0xff;
+	phy->data_t_wakeup = phy->clk_t_wakeup;
+	phy->data_t_lpx = phy->clk_t_lpx;
+	phy->data_t_hs_prepare = ROUND(125 + 10 * ui, 16 * ui) - 1;
+	phy->data_t_hs_zero = ROUND(105 + 6 * ui, 8 * ui);
+	phy->data_t_hs_trial = 2 * (ROUND(60 + 4 * ui, 8 * ui) - 1);
+	phy->data_t_ta_go = 3;
+	phy->data_t_ta_get = 4;
+
+	phy->pll_enbwt = 1;
+	phy->clklp2hs_time = ROUND(407, 8 * ui) + 12;
+	phy->clkhs2lp_time = ROUND(105 + 12 * ui, 8 * ui);
+	phy->lp2hs_time = ROUND(240 + 12 * ui, 8 * ui) + 1;
+	phy->hs2lp_time = phy->clkhs2lp_time;
+	phy->clk_to_data_delay = 1 + phy->clklp2hs_time;
+	phy->data_to_clk_delay = ROUND(60 + 52 * ui, 8 * ui) +
+				phy->clkhs2lp_time;
+
+	phy->lane_byte_clk_kHz = phy_rate_kHz / 8;
+	phy->clk_division =
+		DIV_ROUND_UP(phy->lane_byte_clk_kHz, MAX_TX_ESC_CLK);
+}
+
+static u32 dsi_get_dpi_color_coding(enum mipi_dsi_pixel_format format)
+{
+	u32 val;
+
+	/*
+	 * TODO: only support RGB888 now, to support more
+	 */
+	switch (format) {
+	case MIPI_DSI_FMT_RGB888:
+		val = DSI_24BITS_1;
+		break;
+	default:
+		val = DSI_24BITS_1;
+		break;
+	}
+
+	return val;
+}
+
+/*
+ * dsi phy reg write function
+ */
+static void dsi_phy_tst_set(void __iomem *base, u32 reg, u32 val)
+{
+	u32 reg_write = 0x10000 + reg;
+
+	/*
+	 * latch reg first
+	 */
+	writel(reg_write, base + PHY_TST_CTRL1);
+	writel(0x02, base + PHY_TST_CTRL0);
+	writel(0x00, base + PHY_TST_CTRL0);
+
+	/*
+	 * then latch value
+	 */
+	writel(val, base + PHY_TST_CTRL1);
+	writel(0x02, base + PHY_TST_CTRL0);
+	writel(0x00, base + PHY_TST_CTRL0);
+}
+
+static void dsi_set_phy_timer(void __iomem *base,
+			      struct mipi_phy_params *phy,
+			      u32 lanes)
+{
+	u32 val;
+
+	/*
+	 * Set lane value and phy stop wait time.
+	 */
+	val = (lanes - 1) | (PHY_STOP_WAIT_TIME << 8);
+	writel(val, base + PHY_IF_CFG);
+
+	/*
+	 * Set phy clk division.
+	 */
+	val = readl(base + CLKMGR_CFG) | phy->clk_division;
+	writel(val, base + CLKMGR_CFG);
+
+	/*
+	 * Set lp and hs switching params.
+	 */
+	dw_update_bits(base + PHY_TMR_CFG, 24, MASK(8), phy->hs2lp_time);
+	dw_update_bits(base + PHY_TMR_CFG, 16, MASK(8), phy->lp2hs_time);
+	dw_update_bits(base + PHY_TMR_LPCLK_CFG, 16, MASK(10),
+		       phy->clkhs2lp_time);
+	dw_update_bits(base + PHY_TMR_LPCLK_CFG, 0, MASK(10),
+		       phy->clklp2hs_time);
+	dw_update_bits(base + CLK_DATA_TMR_CFG, 8, MASK(8),
+		       phy->data_to_clk_delay);
+	dw_update_bits(base + CLK_DATA_TMR_CFG, 0, MASK(8),
+		       phy->clk_to_data_delay);
+}
+
+static void dsi_set_mipi_phy(void __iomem *base,
+			     struct mipi_phy_params *phy,
+			     u32 lanes)
+{
+	u32 delay_count;
+	u32 val;
+	u32 i;
+
+	/* phy timer setting */
+	dsi_set_phy_timer(base, phy, lanes);
+
+	/*
+	 * Reset to clean up phy tst params.
+	 */
+	writel(0, base + PHY_RSTZ);
+	writel(0, base + PHY_TST_CTRL0);
+	writel(1, base + PHY_TST_CTRL0);
+	writel(0, base + PHY_TST_CTRL0);
+
+	/*
+	 * Clock lane timing control setting: TLPX, THS-PREPARE,
+	 * THS-ZERO, THS-TRAIL, TWAKEUP.
+	 */
+	dsi_phy_tst_set(base, CLK_TLPX, phy->clk_t_lpx);
+	dsi_phy_tst_set(base, CLK_THS_PREPARE, phy->clk_t_hs_prepare);
+	dsi_phy_tst_set(base, CLK_THS_ZERO, phy->clk_t_hs_zero);
+	dsi_phy_tst_set(base, CLK_THS_TRAIL, phy->clk_t_hs_trial);
+	dsi_phy_tst_set(base, CLK_TWAKEUP, phy->clk_t_wakeup);
+
+	/*
+	 * Data lane timing control setting: TLPX, THS-PREPARE,
+	 * THS-ZERO, THS-TRAIL, TTA-GO, TTA-GET, TWAKEUP.
+	 */
+	for (i = 0; i < lanes; i++) {
+		dsi_phy_tst_set(base, DATA_TLPX(i), phy->data_t_lpx);
+		dsi_phy_tst_set(base, DATA_THS_PREPARE(i),
+				phy->data_t_hs_prepare);
+		dsi_phy_tst_set(base, DATA_THS_ZERO(i), phy->data_t_hs_zero);
+		dsi_phy_tst_set(base, DATA_THS_TRAIL(i), phy->data_t_hs_trial);
+		dsi_phy_tst_set(base, DATA_TTA_GO(i), phy->data_t_ta_go);
+		dsi_phy_tst_set(base, DATA_TTA_GET(i), phy->data_t_ta_get);
+		dsi_phy_tst_set(base, DATA_TWAKEUP(i), phy->data_t_wakeup);
+	}
+
+	/*
+	 * physical configuration: I, pll I, pll II, pll III,
+	 * pll IV, pll V.
+	 */
+	dsi_phy_tst_set(base, PHY_CFG_I, phy->hstx_ckg_sel);
+	val = (phy->pll_fbd_div5f << 5) + (phy->pll_fbd_div1f << 4) +
+				(phy->pll_fbd_2p << 1) + phy->pll_enbwt;
+	dsi_phy_tst_set(base, PHY_CFG_PLL_I, val);
+	dsi_phy_tst_set(base, PHY_CFG_PLL_II, phy->pll_fbd_p);
+	dsi_phy_tst_set(base, PHY_CFG_PLL_III, phy->pll_fbd_s);
+	val = (phy->pll_pre_div1p << 7) + phy->pll_pre_p;
+	dsi_phy_tst_set(base, PHY_CFG_PLL_IV, val);
+	val = (5 << 5) + (phy->pll_vco_750M << 4) + (phy->pll_lpf_rs << 2) +
+		phy->pll_lpf_cs;
+	dsi_phy_tst_set(base, PHY_CFG_PLL_V, val);
+
+	writel(PHY_ENABLECLK, base + PHY_RSTZ);
+	udelay(1);
+	writel(PHY_ENABLECLK | PHY_UNSHUTDOWNZ, base + PHY_RSTZ);
+	udelay(1);
+	writel(PHY_ENABLECLK | PHY_UNRSTZ | PHY_UNSHUTDOWNZ, base + PHY_RSTZ);
+	usleep_range(1000, 1500);
+
+	/*
+	 * wait for phy's clock ready
+	 */
+	delay_count = 100;
+	while (delay_count) {
+		val = readl(base +  PHY_STATUS);
+		if ((BIT(0) | BIT(2)) & val)
+			break;
+
+		udelay(1);
+		delay_count--;
+	}
+
+	if (!delay_count)
+		DRM_INFO("phylock and phystopstateclklane is not ready.\n");
+}
+
+static void dsi_set_mode_timing(void __iomem *base,
+				u32 lane_byte_clk_kHz,
+				struct drm_display_mode *mode,
+				enum mipi_dsi_pixel_format format)
+{
+	u32 hfp, hbp, hsw, vfp, vbp, vsw;
+	u32 hline_time;
+	u32 hsa_time;
+	u32 hbp_time;
+	u32 pixel_clk_kHz;
+	int htot, vtot;
+	u32 val;
+	u64 tmp;
+
+	val = dsi_get_dpi_color_coding(format);
+	writel(val, base + DPI_COLOR_CODING);
+
+	val = (mode->flags & DRM_MODE_FLAG_NHSYNC ? 1 : 0) << 2;
+	val |= (mode->flags & DRM_MODE_FLAG_NVSYNC ? 1 : 0) << 1;
+	writel(val, base +  DPI_CFG_POL);
+
+	/*
+	 * The DSI IP accepts vertical timing using lines as normal,
+	 * but horizontal timing is a mixture of pixel-clocks for the
+	 * active region and byte-lane clocks for the blanking-related
+	 * timings.  hfp is specified as the total hline_time in byte-
+	 * lane clocks minus hsa, hbp and active.
+	 */
+	pixel_clk_kHz = mode->clock;
+	htot = mode->htotal;
+	vtot = mode->vtotal;
+	hfp = mode->hsync_start - mode->hdisplay;
+	hbp = mode->htotal - mode->hsync_end;
+	hsw = mode->hsync_end - mode->hsync_start;
+	vfp = mode->vsync_start - mode->vdisplay;
+	vbp = mode->vtotal - mode->vsync_end;
+	vsw = mode->vsync_end - mode->vsync_start;
+	if (vsw > 15) {
+		DRM_DEBUG_DRIVER("vsw exceeded 15\n");
+		vsw = 15;
+	}
+
+	hsa_time = (hsw * lane_byte_clk_kHz) / pixel_clk_kHz;
+	hbp_time = (hbp * lane_byte_clk_kHz) / pixel_clk_kHz;
+	tmp = (u64)htot * (u64)lane_byte_clk_kHz;
+	hline_time = DIV_ROUND_UP(tmp, pixel_clk_kHz);
+
+	/* all specified in byte-lane clocks */
+	writel(hsa_time, base + VID_HSA_TIME);
+	writel(hbp_time, base + VID_HBP_TIME);
+	writel(hline_time, base + VID_HLINE_TIME);
+
+	writel(vsw, base + VID_VSA_LINES);
+	writel(vbp, base + VID_VBP_LINES);
+	writel(vfp, base + VID_VFP_LINES);
+	writel(mode->vdisplay, base + VID_VACTIVE_LINES);
+	writel(mode->hdisplay, base + VID_PKT_SIZE);
+
+	DRM_DEBUG_DRIVER("htot=%d, hfp=%d, hbp=%d, hsw=%d\n",
+			 htot, hfp, hbp, hsw);
+	DRM_DEBUG_DRIVER("vtol=%d, vfp=%d, vbp=%d, vsw=%d\n",
+			 vtot, vfp, vbp, vsw);
+	DRM_DEBUG_DRIVER("hsa_time=%d, hbp_time=%d, hline_time=%d\n",
+			 hsa_time, hbp_time, hline_time);
+}
+
+static void dsi_set_video_mode(void __iomem *base, unsigned long flags)
+{
+	u32 val;
+	u32 mode_mask = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+		MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+	u32 non_burst_sync_pulse = MIPI_DSI_MODE_VIDEO |
+		MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+	u32 non_burst_sync_event = MIPI_DSI_MODE_VIDEO;
+
+	/*
+	 * choose video mode type
+	 */
+	if ((flags & mode_mask) == non_burst_sync_pulse)
+		val = DSI_NON_BURST_SYNC_PULSES;
+	else if ((flags & mode_mask) == non_burst_sync_event)
+		val = DSI_NON_BURST_SYNC_EVENTS;
+	else
+		val = DSI_BURST_SYNC_PULSES_1;
+	writel(val, base + VID_MODE_CFG);
+
+	writel(PHY_TXREQUESTCLKHS, base + LPCLK_CTRL);
+	writel(DSI_VIDEO_MODE, base + MODE_CFG);
+}
+
+static void dsi_mipi_init(struct dw_dsi *dsi)
+{
+	struct dsi_hw_ctx *ctx = dsi->ctx;
+	struct mipi_phy_params *phy = &dsi->phy;
+	struct drm_display_mode *mode = &dsi->cur_mode;
+	u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+	void __iomem *base = ctx->base;
+	u32 dphy_req_kHz;
+
+	/*
+	 * count phy params
+	 */
+	dphy_req_kHz = mode->clock * bpp / dsi->lanes;
+	dsi_get_phy_params(dphy_req_kHz, phy);
+
+	/* reset Core */
+	writel(RESET, base + PWR_UP);
+
+	/* set dsi phy params */
+	dsi_set_mipi_phy(base, phy, dsi->lanes);
+
+	/* set dsi mode timing */
+	dsi_set_mode_timing(base, phy->lane_byte_clk_kHz, mode, dsi->format);
+
+	/* set dsi video mode */
+	dsi_set_video_mode(base, dsi->mode_flags);
+
+	/* dsi wake up */
+	writel(POWERUP, base + PWR_UP);
+
+	DRM_DEBUG_DRIVER("lanes=%d, pixel_clk=%d kHz, bytes_freq=%d kHz\n",
+			 dsi->lanes, mode->clock, phy->lane_byte_clk_kHz);
+}
+
+static void dsi_encoder_disable(struct drm_encoder *encoder)
+{
+	struct dw_dsi *dsi = encoder_to_dsi(encoder);
+	struct dsi_hw_ctx *ctx = dsi->ctx;
+	void __iomem *base = ctx->base;
+
+	if (!dsi->enable)
+		return;
+
+	writel(0, base + PWR_UP);
+	writel(0, base + LPCLK_CTRL);
+	writel(0, base + PHY_RSTZ);
+	clk_disable_unprepare(ctx->pclk);
+
+	dsi->enable = false;
+}
+
+static void dsi_encoder_enable(struct drm_encoder *encoder)
+{
+	struct dw_dsi *dsi = encoder_to_dsi(encoder);
+	struct dsi_hw_ctx *ctx = dsi->ctx;
+	int ret;
+
+	if (dsi->enable)
+		return;
+
+	ret = clk_prepare_enable(ctx->pclk);
+	if (ret) {
+		DRM_ERROR("fail to enable pclk: %d\n", ret);
+		return;
+	}
+
+	dsi_mipi_init(dsi);
+
+	dsi->enable = true;
+}
+
+static void dsi_encoder_mode_set(struct drm_encoder *encoder,
+				 struct drm_display_mode *mode,
+				 struct drm_display_mode *adj_mode)
+{
+	struct dw_dsi *dsi = encoder_to_dsi(encoder);
+
+	drm_mode_copy(&dsi->cur_mode, adj_mode);
+}
+
+static int dsi_encoder_atomic_check(struct drm_encoder *encoder,
+				    struct drm_crtc_state *crtc_state,
+				    struct drm_connector_state *conn_state)
+{
+	/* do nothing */
+	return 0;
+}
+
+static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = {
+	.atomic_check	= dsi_encoder_atomic_check,
+	.mode_set	= dsi_encoder_mode_set,
+	.enable		= dsi_encoder_enable,
+	.disable	= dsi_encoder_disable
+};
+
+static const struct drm_encoder_funcs dw_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+static int dw_drm_encoder_init(struct device *dev,
+			       struct drm_device *drm_dev,
+			       struct drm_encoder *encoder)
+{
+	int ret;
+	u32 crtc_mask = drm_of_find_possible_crtcs(drm_dev, dev->of_node);
+
+	if (!crtc_mask) {
+		DRM_ERROR("failed to find crtc mask\n");
+		return -EINVAL;
+	}
+
+	encoder->possible_crtcs = crtc_mask;
+	ret = drm_encoder_init(drm_dev, encoder, &dw_encoder_funcs,
+			       DRM_MODE_ENCODER_DSI, NULL);
+	if (ret) {
+		DRM_ERROR("failed to init dsi encoder\n");
+		return ret;
+	}
+
+	drm_encoder_helper_add(encoder, &dw_encoder_helper_funcs);
+
+	return 0;
+}
+
+static int dsi_host_attach(struct mipi_dsi_host *host,
+			   struct mipi_dsi_device *mdsi)
+{
+	struct dw_dsi *dsi = host_to_dsi(host);
+
+	if (mdsi->lanes < 1 || mdsi->lanes > 4) {
+		DRM_ERROR("dsi device params invalid\n");
+		return -EINVAL;
+	}
+
+	dsi->lanes = mdsi->lanes;
+	dsi->format = mdsi->format;
+	dsi->mode_flags = mdsi->mode_flags;
+
+	return 0;
+}
+
+static int dsi_host_detach(struct mipi_dsi_host *host,
+			   struct mipi_dsi_device *mdsi)
+{
+	/* do nothing */
+	return 0;
+}
+
+static const struct mipi_dsi_host_ops dsi_host_ops = {
+	.attach = dsi_host_attach,
+	.detach = dsi_host_detach,
+};
+
+static int dsi_host_init(struct device *dev, struct dw_dsi *dsi)
+{
+	struct mipi_dsi_host *host = &dsi->host;
+	int ret;
+
+	host->dev = dev;
+	host->ops = &dsi_host_ops;
+	ret = mipi_dsi_host_register(host);
+	if (ret) {
+		DRM_ERROR("failed to register dsi host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi)
+{
+	struct drm_encoder *encoder = &dsi->encoder;
+	struct drm_bridge *bridge = dsi->bridge;
+	int ret;
+
+	/* associate the bridge to dsi encoder */
+	encoder->bridge = bridge;
+	bridge->encoder = encoder;
+
+	ret = drm_bridge_attach(dev, bridge);
+	if (ret) {
+		DRM_ERROR("failed to attach external bridge\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int dsi_bind(struct device *dev, struct device *master, void *data)
+{
+	struct dsi_data *ddata = dev_get_drvdata(dev);
+	struct dw_dsi *dsi = &ddata->dsi;
+	struct drm_device *drm_dev = data;
+	int ret;
+
+	ret = dw_drm_encoder_init(dev, drm_dev, &dsi->encoder);
+	if (ret)
+		return ret;
+
+	ret = dsi_host_init(dev, dsi);
+	if (ret)
+		return ret;
+
+	ret = dsi_bridge_init(drm_dev, dsi);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void dsi_unbind(struct device *dev, struct device *master, void *data)
+{
+	/* do nothing */
+}
+
+static const struct component_ops dsi_ops = {
+	.bind	= dsi_bind,
+	.unbind	= dsi_unbind,
+};
+
+static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
+{
+	struct dsi_hw_ctx *ctx = dsi->ctx;
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *endpoint, *bridge_node;
+	struct drm_bridge *bridge;
+	struct resource *res;
+
+	/*
+	 * Get the endpoint node. In our case, dsi has one output port1
+	 * to which the external HDMI bridge is connected.
+	 */
+	endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
+	if (!endpoint) {
+		DRM_ERROR("no valid endpoint node\n");
+		return -ENODEV;
+	}
+	of_node_put(endpoint);
+
+	bridge_node = of_graph_get_remote_port_parent(endpoint);
+	if (!bridge_node) {
+		DRM_ERROR("no valid bridge node\n");
+		return -ENODEV;
+	}
+	of_node_put(bridge_node);
+
+	bridge = of_drm_find_bridge(bridge_node);
+	if (!bridge) {
+		DRM_INFO("wait for external HDMI bridge driver.\n");
+		return -EPROBE_DEFER;
+	}
+	dsi->bridge = bridge;
+
+	ctx->pclk = devm_clk_get(&pdev->dev, "pclk");
+	if (IS_ERR(ctx->pclk)) {
+		DRM_ERROR("failed to get pclk clock\n");
+		return PTR_ERR(ctx->pclk);
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ctx->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ctx->base)) {
+		DRM_ERROR("failed to remap dsi io region\n");
+		return PTR_ERR(ctx->base);
+	}
+
+	return 0;
+}
+
+static int dsi_probe(struct platform_device *pdev)
+{
+	struct dsi_data *data;
+	struct dw_dsi *dsi;
+	struct dsi_hw_ctx *ctx;
+	int ret;
+
+	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		DRM_ERROR("failed to allocate dsi data.\n");
+		return -ENOMEM;
+	}
+	dsi = &data->dsi;
+	ctx = &data->ctx;
+	dsi->ctx = ctx;
+
+	ret = dsi_parse_dt(pdev, dsi);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, data);
+
+	return component_add(&pdev->dev, &dsi_ops);
+}
+
+static int dsi_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &dsi_ops);
+
+	return 0;
+}
+
+static const struct of_device_id dsi_of_match[] = {
+	{.compatible = "hisilicon,hi6220-dsi"},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, dsi_of_match);
+
+static struct platform_driver dsi_driver = {
+	.probe = dsi_probe,
+	.remove = dsi_remove,
+	.driver = {
+		.name = "dw-dsi",
+		.of_match_table = dsi_of_match,
+	},
+};
+
+module_platform_driver(dsi_driver);
+
+MODULE_AUTHOR("Xinliang Liu <xinliang.liu@linaro.org>");
+MODULE_AUTHOR("Xinliang Liu <z.liuxinliang@hisilicon.com>");
+MODULE_AUTHOR("Xinwei Kong <kong.kongxinwei@hisilicon.com>");
+MODULE_DESCRIPTION("DesignWare MIPI DSI Host Controller v1.02 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h b/drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h
new file mode 100644
index 0000000..18808fc
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DW_DSI_REG_H__
+#define __DW_DSI_REG_H__
+
+#define MASK(x)				(BIT(x) - 1)
+
+/*
+ * regs
+ */
+#define PWR_UP                  0x04  /* Core power-up */
+#define RESET                   0
+#define POWERUP                 BIT(0)
+#define PHY_IF_CFG              0xA4  /* D-PHY interface configuration */
+#define CLKMGR_CFG              0x08  /* the internal clock dividers */
+#define PHY_RSTZ                0xA0  /* D-PHY reset control */
+#define PHY_ENABLECLK           BIT(2)
+#define PHY_UNRSTZ              BIT(1)
+#define PHY_UNSHUTDOWNZ         BIT(0)
+#define PHY_TST_CTRL0           0xB4  /* D-PHY test interface control 0 */
+#define PHY_TST_CTRL1           0xB8  /* D-PHY test interface control 1 */
+#define CLK_TLPX                0x10
+#define CLK_THS_PREPARE         0x11
+#define CLK_THS_ZERO            0x12
+#define CLK_THS_TRAIL           0x13
+#define CLK_TWAKEUP             0x14
+#define DATA_TLPX(x)            (0x20 + ((x) << 4))
+#define DATA_THS_PREPARE(x)     (0x21 + ((x) << 4))
+#define DATA_THS_ZERO(x)        (0x22 + ((x) << 4))
+#define DATA_THS_TRAIL(x)       (0x23 + ((x) << 4))
+#define DATA_TTA_GO(x)          (0x24 + ((x) << 4))
+#define DATA_TTA_GET(x)         (0x25 + ((x) << 4))
+#define DATA_TWAKEUP(x)         (0x26 + ((x) << 4))
+#define PHY_CFG_I               0x60
+#define PHY_CFG_PLL_I           0x63
+#define PHY_CFG_PLL_II          0x64
+#define PHY_CFG_PLL_III         0x65
+#define PHY_CFG_PLL_IV          0x66
+#define PHY_CFG_PLL_V           0x67
+#define DPI_COLOR_CODING        0x10  /* DPI color coding */
+#define DPI_CFG_POL             0x14  /* DPI polarity configuration */
+#define VID_HSA_TIME            0x48  /* Horizontal Sync Active time */
+#define VID_HBP_TIME            0x4C  /* Horizontal Back Porch time */
+#define VID_HLINE_TIME          0x50  /* Line time */
+#define VID_VSA_LINES           0x54  /* Vertical Sync Active period */
+#define VID_VBP_LINES           0x58  /* Vertical Back Porch period */
+#define VID_VFP_LINES           0x5C  /* Vertical Front Porch period */
+#define VID_VACTIVE_LINES       0x60  /* Vertical resolution */
+#define VID_PKT_SIZE            0x3C  /* Video packet size */
+#define VID_MODE_CFG            0x38  /* Video mode configuration */
+#define PHY_TMR_CFG             0x9C  /* Data lanes timing configuration */
+#define BTA_TO_CNT              0x8C  /* Response timeout definition */
+#define PHY_TMR_LPCLK_CFG       0x98  /* clock lane timing configuration */
+#define CLK_DATA_TMR_CFG        0xCC
+#define LPCLK_CTRL              0x94  /* Low-power in clock lane */
+#define PHY_TXREQUESTCLKHS      BIT(0)
+#define MODE_CFG                0x34  /* Video or Command mode selection */
+#define PHY_STATUS              0xB0  /* D-PHY PPI status interface */
+
+#define	PHY_STOP_WAIT_TIME      0x30
+
+/*
+ * regs relevant enum
+ */
+enum dpi_color_coding {
+	DSI_24BITS_1 = 5,
+};
+
+enum dsi_video_mode_type {
+	DSI_NON_BURST_SYNC_PULSES = 0,
+	DSI_NON_BURST_SYNC_EVENTS,
+	DSI_BURST_SYNC_PULSES_1,
+	DSI_BURST_SYNC_PULSES_2
+};
+
+enum dsi_work_mode {
+	DSI_VIDEO_MODE = 0,
+	DSI_COMMAND_MODE
+};
+
+/*
+ * Register Write/Read Helper functions
+ */
+static inline void dw_update_bits(void __iomem *addr, u32 bit_start,
+				  u32 mask, u32 val)
+{
+	u32 tmp, orig;
+
+	orig = readl(addr);
+	tmp = orig & ~(mask << bit_start);
+	tmp |= (val & mask) << bit_start;
+	writel(tmp, addr);
+}
+
+#endif /* __DW_DRM_DSI_H__ */
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
new file mode 100644
index 0000000..4cf281b7
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __KIRIN_ADE_REG_H__
+#define __KIRIN_ADE_REG_H__
+
+/*
+ * ADE Registers
+ */
+#define MASK(x)				(BIT(x) - 1)
+
+#define ADE_CTRL			0x0004
+#define FRM_END_START_OFST		0
+#define FRM_END_START_MASK		MASK(2)
+#define AUTO_CLK_GATE_EN_OFST		0
+#define AUTO_CLK_GATE_EN		BIT(0)
+#define ADE_DISP_SRC_CFG		0x0018
+#define ADE_CTRL1			0x008C
+#define ADE_EN				0x0100
+#define ADE_DISABLE			0
+#define ADE_ENABLE			1
+/* reset and reload regs */
+#define ADE_SOFT_RST_SEL(x)		(0x0078 + (x) * 0x4)
+#define ADE_RELOAD_DIS(x)		(0x00AC + (x) * 0x4)
+#define RDMA_OFST			0
+#define CLIP_OFST			15
+#define SCL_OFST			21
+#define CTRAN_OFST			24
+#define OVLY_OFST			37 /* 32+5 */
+/* channel regs */
+#define RD_CH_CTRL(x)			(0x1004 + (x) * 0x80)
+#define RD_CH_ADDR(x)			(0x1008 + (x) * 0x80)
+#define RD_CH_SIZE(x)			(0x100C + (x) * 0x80)
+#define RD_CH_STRIDE(x)			(0x1010 + (x) * 0x80)
+#define RD_CH_SPACE(x)			(0x1014 + (x) * 0x80)
+#define RD_CH_EN(x)			(0x1020 + (x) * 0x80)
+/* overlay regs */
+#define ADE_OVLY1_TRANS_CFG		0x002C
+#define ADE_OVLY_CTL			0x0098
+#define ADE_OVLY_CH_XY0(x)		(0x2004 + (x) * 4)
+#define ADE_OVLY_CH_XY1(x)		(0x2024 + (x) * 4)
+#define ADE_OVLY_CH_CTL(x)		(0x204C + (x) * 4)
+#define ADE_OVLY_OUTPUT_SIZE(x)		(0x2070 + (x) * 8)
+#define OUTPUT_XSIZE_OFST		16
+#define ADE_OVLYX_CTL(x)		(0x209C + (x) * 4)
+#define CH_OVLY_SEL_OFST(x)		((x) * 4)
+#define CH_OVLY_SEL_MASK		MASK(2)
+#define CH_OVLY_SEL_VAL(x)		((x) + 1)
+#define CH_ALP_MODE_OFST		0
+#define CH_ALP_SEL_OFST			2
+#define CH_UNDER_ALP_SEL_OFST		4
+#define CH_EN_OFST			6
+#define CH_ALP_GBL_OFST			15
+#define CH_SEL_OFST			28
+/* ctran regs */
+#define ADE_CTRAN_DIS(x)		(0x5004 + (x) * 0x100)
+#define CTRAN_BYPASS_ON			1
+#define CTRAN_BYPASS_OFF		0
+#define ADE_CTRAN_IMAGE_SIZE(x)		(0x503C + (x) * 0x100)
+/* clip regs */
+#define ADE_CLIP_DISABLE(x)		(0x6800 + (x) * 0x100)
+#define ADE_CLIP_SIZE0(x)		(0x6804 + (x) * 0x100)
+#define ADE_CLIP_SIZE1(x)		(0x6808 + (x) * 0x100)
+
+/*
+ * LDI Registers
+ */
+#define LDI_HRZ_CTRL0			0x7400
+#define HBP_OFST			20
+#define LDI_HRZ_CTRL1			0x7404
+#define LDI_VRT_CTRL0			0x7408
+#define VBP_OFST			20
+#define LDI_VRT_CTRL1			0x740C
+#define LDI_PLR_CTRL			0x7410
+#define FLAG_NVSYNC			BIT(0)
+#define FLAG_NHSYNC			BIT(1)
+#define FLAG_NPIXCLK			BIT(2)
+#define FLAG_NDE			BIT(3)
+#define LDI_DSP_SIZE			0x7414
+#define VSIZE_OFST			20
+#define LDI_INT_EN			0x741C
+#define FRAME_END_INT_EN_OFST		1
+#define LDI_CTRL			0x7420
+#define BPP_OFST			3
+#define DATA_GATE_EN			BIT(2)
+#define LDI_EN				BIT(0)
+#define LDI_MSK_INT			0x7428
+#define LDI_INT_CLR			0x742C
+#define LDI_WORK_MODE			0x7430
+#define LDI_HDMI_DSI_GT			0x7434
+
+/*
+ * ADE media bus service regs
+ */
+#define ADE0_QOSGENERATOR_MODE		0x010C
+#define QOSGENERATOR_MODE_MASK		MASK(2)
+#define ADE0_QOSGENERATOR_EXTCONTROL	0x0118
+#define SOCKET_QOS_EN			BIT(0)
+#define ADE1_QOSGENERATOR_MODE		0x020C
+#define ADE1_QOSGENERATOR_EXTCONTROL	0x0218
+
+/*
+ * ADE regs relevant enums
+ */
+enum frame_end_start {
+	/* regs take effect in every vsync */
+	REG_EFFECTIVE_IN_VSYNC = 0,
+	/* regs take effect in fist ade en and every frame end */
+	REG_EFFECTIVE_IN_ADEEN_FRMEND,
+	/* regs take effect in ade en immediately */
+	REG_EFFECTIVE_IN_ADEEN,
+	/* regs take effect in first vsync and every frame end */
+	REG_EFFECTIVE_IN_VSYNC_FRMEND
+};
+
+enum ade_fb_format {
+	ADE_RGB_565 = 0,
+	ADE_BGR_565,
+	ADE_XRGB_8888,
+	ADE_XBGR_8888,
+	ADE_ARGB_8888,
+	ADE_ABGR_8888,
+	ADE_RGBA_8888,
+	ADE_BGRA_8888,
+	ADE_RGB_888,
+	ADE_BGR_888 = 9,
+	ADE_FORMAT_UNSUPPORT = 800
+};
+
+enum ade_channel {
+	ADE_CH1 = 0,	/* channel 1 for primary plane */
+	ADE_CH_NUM
+};
+
+enum ade_scale {
+	ADE_SCL1 = 0,
+	ADE_SCL2,
+	ADE_SCL3,
+	ADE_SCL_NUM
+};
+
+enum ade_ctran {
+	ADE_CTRAN1 = 0,
+	ADE_CTRAN2,
+	ADE_CTRAN3,
+	ADE_CTRAN4,
+	ADE_CTRAN5,
+	ADE_CTRAN6,
+	ADE_CTRAN_NUM
+};
+
+enum ade_overlay {
+	ADE_OVLY1 = 0,
+	ADE_OVLY2,
+	ADE_OVLY3,
+	ADE_OVLY_NUM
+};
+
+enum ade_alpha_mode {
+	ADE_ALP_GLOBAL = 0,
+	ADE_ALP_PIXEL,
+	ADE_ALP_PIXEL_AND_GLB
+};
+
+enum ade_alpha_blending_mode {
+	ADE_ALP_MUL_COEFF_0 = 0,	/* alpha */
+	ADE_ALP_MUL_COEFF_1,		/* 1-alpha */
+	ADE_ALP_MUL_COEFF_2,		/* 0 */
+	ADE_ALP_MUL_COEFF_3		/* 1 */
+};
+
+/*
+ * LDI regs relevant enums
+ */
+enum dsi_pclk_en {
+	DSI_PCLK_ON = 0,
+	DSI_PCLK_OFF
+};
+
+enum ldi_output_format {
+	LDI_OUT_RGB_565 = 0,
+	LDI_OUT_RGB_666,
+	LDI_OUT_RGB_888
+};
+
+enum ldi_work_mode {
+	TEST_MODE = 0,
+	NORMAL_MODE
+};
+
+enum ldi_input_source {
+	DISP_SRC_NONE = 0,
+	DISP_SRC_OVLY2,
+	DISP_SRC_DISP,
+	DISP_SRC_ROT,
+	DISP_SRC_SCL2
+};
+
+/*
+ * ADE media bus service relevant enums
+ */
+enum qos_generator_mode {
+	FIXED_MODE = 0,
+	LIMITER_MODE,
+	BYPASS_MODE,
+	REGULATOR_MODE
+};
+
+/*
+ * Register Write/Read Helper functions
+ */
+static inline void ade_update_bits(void __iomem *addr, u32 bit_start,
+				   u32 mask, u32 val)
+{
+	u32 tmp, orig;
+
+	orig = readl(addr);
+	tmp = orig & ~(mask << bit_start);
+	tmp |= (val & mask) << bit_start;
+	writel(tmp, addr);
+}
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
new file mode 100644
index 0000000..fba6372
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -0,0 +1,1057 @@
+/*
+ * Hisilicon Hi6220 SoC ADE(Advanced Display Engine)'s crtc&plane driver
+ *
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * Author:
+ *	Xinliang Liu <z.liuxinliang@hisilicon.com>
+ *	Xinliang Liu <xinliang.liu@linaro.org>
+ *	Xinwei Kong <kong.kongxinwei@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <video/display_timing.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "kirin_drm_drv.h"
+#include "kirin_ade_reg.h"
+
+#define PRIMARY_CH	ADE_CH1 /* primary plane */
+#define OUT_OVLY	ADE_OVLY2 /* output overlay compositor */
+#define ADE_DEBUG	1
+
+#define to_ade_crtc(crtc) \
+	container_of(crtc, struct ade_crtc, base)
+
+#define to_ade_plane(plane) \
+	container_of(plane, struct ade_plane, base)
+
+struct ade_hw_ctx {
+	void __iomem  *base;
+	struct regmap *noc_regmap;
+	struct clk *ade_core_clk;
+	struct clk *media_noc_clk;
+	struct clk *ade_pix_clk;
+	struct reset_control *reset;
+	bool power_on;
+	int irq;
+};
+
+struct ade_crtc {
+	struct drm_crtc base;
+	struct ade_hw_ctx *ctx;
+	bool enable;
+	u32 out_format;
+};
+
+struct ade_plane {
+	struct drm_plane base;
+	void *ctx;
+	u8 ch; /* channel */
+};
+
+struct ade_data {
+	struct ade_crtc acrtc;
+	struct ade_plane aplane[ADE_CH_NUM];
+	struct ade_hw_ctx ctx;
+};
+
+/* ade-format info: */
+struct ade_format {
+	u32 pixel_format;
+	enum ade_fb_format ade_format;
+};
+
+static const struct ade_format ade_formats[] = {
+	/* 16bpp RGB: */
+	{ DRM_FORMAT_RGB565, ADE_RGB_565 },
+	{ DRM_FORMAT_BGR565, ADE_BGR_565 },
+	/* 24bpp RGB: */
+	{ DRM_FORMAT_RGB888, ADE_RGB_888 },
+	{ DRM_FORMAT_BGR888, ADE_BGR_888 },
+	/* 32bpp [A]RGB: */
+	{ DRM_FORMAT_XRGB8888, ADE_XRGB_8888 },
+	{ DRM_FORMAT_XBGR8888, ADE_XBGR_8888 },
+	{ DRM_FORMAT_RGBA8888, ADE_RGBA_8888 },
+	{ DRM_FORMAT_BGRA8888, ADE_BGRA_8888 },
+	{ DRM_FORMAT_ARGB8888, ADE_ARGB_8888 },
+	{ DRM_FORMAT_ABGR8888, ADE_ABGR_8888 },
+};
+
+static const u32 channel_formats1[] = {
+	/* channel 1,2,3,4 */
+	DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888,
+	DRM_FORMAT_BGR888, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_RGBA8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888
+};
+
+u32 ade_get_channel_formats(u8 ch, const u32 **formats)
+{
+	switch (ch) {
+	case ADE_CH1:
+		*formats = channel_formats1;
+		return ARRAY_SIZE(channel_formats1);
+	default:
+		DRM_ERROR("no this channel %d\n", ch);
+		*formats = NULL;
+		return 0;
+	}
+}
+
+/* convert from fourcc format to ade format */
+static u32 ade_get_format(u32 pixel_format)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ade_formats); i++)
+		if (ade_formats[i].pixel_format == pixel_format)
+			return ade_formats[i].ade_format;
+
+	/* not found */
+	DRM_ERROR("Not found pixel format!!fourcc_format= %d\n",
+		  pixel_format);
+	return ADE_FORMAT_UNSUPPORT;
+}
+
+static void ade_update_reload_bit(void __iomem *base, u32 bit_num, u32 val)
+{
+	u32 bit_ofst, reg_num;
+
+	bit_ofst = bit_num % 32;
+	reg_num = bit_num / 32;
+
+	ade_update_bits(base + ADE_RELOAD_DIS(reg_num), bit_ofst,
+			MASK(1), !!val);
+}
+
+static u32 ade_read_reload_bit(void __iomem *base, u32 bit_num)
+{
+	u32 tmp, bit_ofst, reg_num;
+
+	bit_ofst = bit_num % 32;
+	reg_num = bit_num / 32;
+
+	tmp = readl(base + ADE_RELOAD_DIS(reg_num));
+	return !!(BIT(bit_ofst) & tmp);
+}
+
+static void ade_init(struct ade_hw_ctx *ctx)
+{
+	void __iomem *base = ctx->base;
+
+	/* enable clk gate */
+	ade_update_bits(base + ADE_CTRL1, AUTO_CLK_GATE_EN_OFST,
+			AUTO_CLK_GATE_EN, ADE_ENABLE);
+	/* clear overlay */
+	writel(0, base + ADE_OVLY1_TRANS_CFG);
+	writel(0, base + ADE_OVLY_CTL);
+	writel(0, base + ADE_OVLYX_CTL(OUT_OVLY));
+	/* clear reset and reload regs */
+	writel(MASK(32), base + ADE_SOFT_RST_SEL(0));
+	writel(MASK(32), base + ADE_SOFT_RST_SEL(1));
+	writel(MASK(32), base + ADE_RELOAD_DIS(0));
+	writel(MASK(32), base + ADE_RELOAD_DIS(1));
+	/*
+	 * for video mode, all the ade registers should
+	 * become effective at frame end.
+	 */
+	ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST,
+			FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
+}
+
+static void ade_set_pix_clk(struct ade_hw_ctx *ctx,
+			    struct drm_display_mode *mode,
+			    struct drm_display_mode *adj_mode)
+{
+	u32 clk_Hz = mode->clock * 1000;
+	int ret;
+
+	/*
+	 * Success should be guaranteed in mode_valid call back,
+	 * so failure shouldn't happen here
+	 */
+	ret = clk_set_rate(ctx->ade_pix_clk, clk_Hz);
+	if (ret)
+		DRM_ERROR("failed to set pixel clk %dHz (%d)\n", clk_Hz, ret);
+	adj_mode->clock = clk_get_rate(ctx->ade_pix_clk) / 1000;
+}
+
+static void ade_ldi_set_mode(struct ade_crtc *acrtc,
+			     struct drm_display_mode *mode,
+			     struct drm_display_mode *adj_mode)
+{
+	struct ade_hw_ctx *ctx = acrtc->ctx;
+	void __iomem *base = ctx->base;
+	u32 width = mode->hdisplay;
+	u32 height = mode->vdisplay;
+	u32 hfp, hbp, hsw, vfp, vbp, vsw;
+	u32 plr_flags;
+
+	plr_flags = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? FLAG_NVSYNC : 0;
+	plr_flags |= (mode->flags & DRM_MODE_FLAG_NHSYNC) ? FLAG_NHSYNC : 0;
+	hfp = mode->hsync_start - mode->hdisplay;
+	hbp = mode->htotal - mode->hsync_end;
+	hsw = mode->hsync_end - mode->hsync_start;
+	vfp = mode->vsync_start - mode->vdisplay;
+	vbp = mode->vtotal - mode->vsync_end;
+	vsw = mode->vsync_end - mode->vsync_start;
+	if (vsw > 15) {
+		DRM_DEBUG_DRIVER("vsw exceeded 15\n");
+		vsw = 15;
+	}
+
+	writel((hbp << HBP_OFST) | hfp, base + LDI_HRZ_CTRL0);
+	 /* the configured value is actual value - 1 */
+	writel(hsw - 1, base + LDI_HRZ_CTRL1);
+	writel((vbp << VBP_OFST) | vfp, base + LDI_VRT_CTRL0);
+	 /* the configured value is actual value - 1 */
+	writel(vsw - 1, base + LDI_VRT_CTRL1);
+	 /* the configured value is actual value - 1 */
+	writel(((height - 1) << VSIZE_OFST) | (width - 1),
+	       base + LDI_DSP_SIZE);
+	writel(plr_flags, base + LDI_PLR_CTRL);
+
+	/* set overlay compositor output size */
+	writel(((width - 1) << OUTPUT_XSIZE_OFST) | (height - 1),
+	       base + ADE_OVLY_OUTPUT_SIZE(OUT_OVLY));
+
+	/* ctran6 setting */
+	writel(CTRAN_BYPASS_ON, base + ADE_CTRAN_DIS(ADE_CTRAN6));
+	 /* the configured value is actual value - 1 */
+	writel(width * height - 1, base + ADE_CTRAN_IMAGE_SIZE(ADE_CTRAN6));
+	ade_update_reload_bit(base, CTRAN_OFST + ADE_CTRAN6, 0);
+
+	ade_set_pix_clk(ctx, mode, adj_mode);
+
+	DRM_DEBUG_DRIVER("set mode: %dx%d\n", width, height);
+}
+
+static int ade_power_up(struct ade_hw_ctx *ctx)
+{
+	int ret;
+
+	ret = clk_prepare_enable(ctx->media_noc_clk);
+	if (ret) {
+		DRM_ERROR("failed to enable media_noc_clk (%d)\n", ret);
+		return ret;
+	}
+
+	ret = reset_control_deassert(ctx->reset);
+	if (ret) {
+		DRM_ERROR("failed to deassert reset\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(ctx->ade_core_clk);
+	if (ret) {
+		DRM_ERROR("failed to enable ade_core_clk (%d)\n", ret);
+		return ret;
+	}
+
+	ade_init(ctx);
+	ctx->power_on = true;
+	return 0;
+}
+
+static void ade_power_down(struct ade_hw_ctx *ctx)
+{
+	void __iomem *base = ctx->base;
+
+	writel(ADE_DISABLE, base + LDI_CTRL);
+	/* dsi pixel off */
+	writel(DSI_PCLK_OFF, base + LDI_HDMI_DSI_GT);
+
+	clk_disable_unprepare(ctx->ade_core_clk);
+	reset_control_assert(ctx->reset);
+	clk_disable_unprepare(ctx->media_noc_clk);
+	ctx->power_on = false;
+}
+
+static void ade_set_medianoc_qos(struct ade_crtc *acrtc)
+{
+	struct ade_hw_ctx *ctx = acrtc->ctx;
+	struct regmap *map = ctx->noc_regmap;
+
+	regmap_update_bits(map, ADE0_QOSGENERATOR_MODE,
+			   QOSGENERATOR_MODE_MASK, BYPASS_MODE);
+	regmap_update_bits(map, ADE0_QOSGENERATOR_EXTCONTROL,
+			   SOCKET_QOS_EN, SOCKET_QOS_EN);
+
+	regmap_update_bits(map, ADE1_QOSGENERATOR_MODE,
+			   QOSGENERATOR_MODE_MASK, BYPASS_MODE);
+	regmap_update_bits(map, ADE1_QOSGENERATOR_EXTCONTROL,
+			   SOCKET_QOS_EN, SOCKET_QOS_EN);
+}
+
+static int ade_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+	struct kirin_drm_private *priv = dev->dev_private;
+	struct ade_crtc *acrtc = to_ade_crtc(priv->crtc[pipe]);
+	struct ade_hw_ctx *ctx = acrtc->ctx;
+	void __iomem *base = ctx->base;
+
+	if (!ctx->power_on)
+		(void)ade_power_up(ctx);
+
+	ade_update_bits(base + LDI_INT_EN, FRAME_END_INT_EN_OFST,
+			MASK(1), 1);
+
+	return 0;
+}
+
+static void ade_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+	struct kirin_drm_private *priv = dev->dev_private;
+	struct ade_crtc *acrtc = to_ade_crtc(priv->crtc[pipe]);
+	struct ade_hw_ctx *ctx = acrtc->ctx;
+	void __iomem *base = ctx->base;
+
+	if (!ctx->power_on) {
+		DRM_ERROR("power is down! vblank disable fail\n");
+		return;
+	}
+
+	ade_update_bits(base + LDI_INT_EN, FRAME_END_INT_EN_OFST,
+			MASK(1), 0);
+}
+
+static irqreturn_t ade_irq_handler(int irq, void *data)
+{
+	struct ade_crtc *acrtc = data;
+	struct ade_hw_ctx *ctx = acrtc->ctx;
+	struct drm_crtc *crtc = &acrtc->base;
+	void __iomem *base = ctx->base;
+	u32 status;
+
+	status = readl(base + LDI_MSK_INT);
+	DRM_DEBUG_VBL("LDI IRQ: status=0x%X\n", status);
+
+	/* vblank irq */
+	if (status & BIT(FRAME_END_INT_EN_OFST)) {
+		ade_update_bits(base + LDI_INT_CLR, FRAME_END_INT_EN_OFST,
+				MASK(1), 1);
+		drm_crtc_handle_vblank(crtc);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void ade_display_enable(struct ade_crtc *acrtc)
+{
+	struct ade_hw_ctx *ctx = acrtc->ctx;
+	void __iomem *base = ctx->base;
+	u32 out_fmt = acrtc->out_format;
+
+	/* enable output overlay compositor */
+	writel(ADE_ENABLE, base + ADE_OVLYX_CTL(OUT_OVLY));
+	ade_update_reload_bit(base, OVLY_OFST + OUT_OVLY, 0);
+
+	/* display source setting */
+	writel(DISP_SRC_OVLY2, base + ADE_DISP_SRC_CFG);
+
+	/* enable ade */
+	writel(ADE_ENABLE, base + ADE_EN);
+	/* enable ldi */
+	writel(NORMAL_MODE, base + LDI_WORK_MODE);
+	writel((out_fmt << BPP_OFST) | DATA_GATE_EN | LDI_EN,
+	       base + LDI_CTRL);
+	/* dsi pixel on */
+	writel(DSI_PCLK_ON, base + LDI_HDMI_DSI_GT);
+}
+
+#if ADE_DEBUG
+static void ade_rdma_dump_regs(void __iomem *base, u32 ch)
+{
+	u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
+	u32 val;
+
+	reg_ctrl = RD_CH_CTRL(ch);
+	reg_addr = RD_CH_ADDR(ch);
+	reg_size = RD_CH_SIZE(ch);
+	reg_stride = RD_CH_STRIDE(ch);
+	reg_space = RD_CH_SPACE(ch);
+	reg_en = RD_CH_EN(ch);
+
+	val = ade_read_reload_bit(base, RDMA_OFST + ch);
+	DRM_DEBUG_DRIVER("[rdma%d]: reload(%d)\n", ch + 1, val);
+	val = readl(base + reg_ctrl);
+	DRM_DEBUG_DRIVER("[rdma%d]: reg_ctrl(0x%08x)\n", ch + 1, val);
+	val = readl(base + reg_addr);
+	DRM_DEBUG_DRIVER("[rdma%d]: reg_addr(0x%08x)\n", ch + 1, val);
+	val = readl(base + reg_size);
+	DRM_DEBUG_DRIVER("[rdma%d]: reg_size(0x%08x)\n", ch + 1, val);
+	val = readl(base + reg_stride);
+	DRM_DEBUG_DRIVER("[rdma%d]: reg_stride(0x%08x)\n", ch + 1, val);
+	val = readl(base + reg_space);
+	DRM_DEBUG_DRIVER("[rdma%d]: reg_space(0x%08x)\n", ch + 1, val);
+	val = readl(base + reg_en);
+	DRM_DEBUG_DRIVER("[rdma%d]: reg_en(0x%08x)\n", ch + 1, val);
+}
+
+static void ade_clip_dump_regs(void __iomem *base, u32 ch)
+{
+	u32 val;
+
+	val = ade_read_reload_bit(base, CLIP_OFST + ch);
+	DRM_DEBUG_DRIVER("[clip%d]: reload(%d)\n", ch + 1, val);
+	val = readl(base + ADE_CLIP_DISABLE(ch));
+	DRM_DEBUG_DRIVER("[clip%d]: reg_clip_disable(0x%08x)\n", ch + 1, val);
+	val = readl(base + ADE_CLIP_SIZE0(ch));
+	DRM_DEBUG_DRIVER("[clip%d]: reg_clip_size0(0x%08x)\n", ch + 1, val);
+	val = readl(base + ADE_CLIP_SIZE1(ch));
+	DRM_DEBUG_DRIVER("[clip%d]: reg_clip_size1(0x%08x)\n", ch + 1, val);
+}
+
+static void ade_compositor_routing_dump_regs(void __iomem *base, u32 ch)
+{
+	u8 ovly_ch = 0; /* TODO: Only primary plane now */
+	u32 val;
+
+	val = readl(base + ADE_OVLY_CH_XY0(ovly_ch));
+	DRM_DEBUG_DRIVER("[overlay ch%d]: reg_ch_xy0(0x%08x)\n", ovly_ch, val);
+	val = readl(base + ADE_OVLY_CH_XY1(ovly_ch));
+	DRM_DEBUG_DRIVER("[overlay ch%d]: reg_ch_xy1(0x%08x)\n", ovly_ch, val);
+	val = readl(base + ADE_OVLY_CH_CTL(ovly_ch));
+	DRM_DEBUG_DRIVER("[overlay ch%d]: reg_ch_ctl(0x%08x)\n", ovly_ch, val);
+}
+
+static void ade_dump_overlay_compositor_regs(void __iomem *base, u32 comp)
+{
+	u32 val;
+
+	val = ade_read_reload_bit(base, OVLY_OFST + comp);
+	DRM_DEBUG_DRIVER("[overlay%d]: reload(%d)\n", comp + 1, val);
+	writel(ADE_ENABLE, base + ADE_OVLYX_CTL(comp));
+	DRM_DEBUG_DRIVER("[overlay%d]: reg_ctl(0x%08x)\n", comp + 1, val);
+	val = readl(base + ADE_OVLY_CTL);
+	DRM_DEBUG_DRIVER("ovly_ctl(0x%08x)\n", val);
+}
+
+static void ade_dump_regs(void __iomem *base)
+{
+	u32 i;
+
+	/* dump channel regs */
+	for (i = 0; i < ADE_CH_NUM; i++) {
+		/* dump rdma regs */
+		ade_rdma_dump_regs(base, i);
+
+		/* dump clip regs */
+		ade_clip_dump_regs(base, i);
+
+		/* dump compositor routing regs */
+		ade_compositor_routing_dump_regs(base, i);
+	}
+
+	/* dump overlay compositor regs */
+	ade_dump_overlay_compositor_regs(base, OUT_OVLY);
+}
+#else
+static void ade_dump_regs(void __iomem *base) { }
+#endif
+
+static void ade_crtc_enable(struct drm_crtc *crtc)
+{
+	struct ade_crtc *acrtc = to_ade_crtc(crtc);
+	struct ade_hw_ctx *ctx = acrtc->ctx;
+	int ret;
+
+	if (acrtc->enable)
+		return;
+
+	if (!ctx->power_on) {
+		ret = ade_power_up(ctx);
+		if (ret)
+			return;
+	}
+
+	ade_set_medianoc_qos(acrtc);
+	ade_display_enable(acrtc);
+	ade_dump_regs(ctx->base);
+	acrtc->enable = true;
+}
+
+static void ade_crtc_disable(struct drm_crtc *crtc)
+{
+	struct ade_crtc *acrtc = to_ade_crtc(crtc);
+	struct ade_hw_ctx *ctx = acrtc->ctx;
+
+	if (!acrtc->enable)
+		return;
+
+	ade_power_down(ctx);
+	acrtc->enable = false;
+}
+
+static int ade_crtc_atomic_check(struct drm_crtc *crtc,
+				 struct drm_crtc_state *state)
+{
+	/* do nothing */
+	return 0;
+}
+
+static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+	struct ade_crtc *acrtc = to_ade_crtc(crtc);
+	struct ade_hw_ctx *ctx = acrtc->ctx;
+	struct drm_display_mode *mode = &crtc->state->mode;
+	struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
+
+	if (!ctx->power_on)
+		(void)ade_power_up(ctx);
+	ade_ldi_set_mode(acrtc, mode, adj_mode);
+}
+
+static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
+				  struct drm_crtc_state *old_state)
+{
+	struct ade_crtc *acrtc = to_ade_crtc(crtc);
+	struct ade_hw_ctx *ctx = acrtc->ctx;
+
+	if (!ctx->power_on)
+		(void)ade_power_up(ctx);
+}
+
+static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
+				  struct drm_crtc_state *old_state)
+
+{
+	struct ade_crtc *acrtc = to_ade_crtc(crtc);
+	struct ade_hw_ctx *ctx = acrtc->ctx;
+	void __iomem *base = ctx->base;
+
+	/* only crtc is enabled regs take effect */
+	if (acrtc->enable) {
+		ade_dump_regs(base);
+		/* flush ade registers */
+		writel(ADE_ENABLE, base + ADE_EN);
+	}
+}
+
+static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = {
+	.enable		= ade_crtc_enable,
+	.disable	= ade_crtc_disable,
+	.atomic_check	= ade_crtc_atomic_check,
+	.mode_set_nofb	= ade_crtc_mode_set_nofb,
+	.atomic_begin	= ade_crtc_atomic_begin,
+	.atomic_flush	= ade_crtc_atomic_flush,
+};
+
+static const struct drm_crtc_funcs ade_crtc_funcs = {
+	.destroy	= drm_crtc_cleanup,
+	.set_config	= drm_atomic_helper_set_config,
+	.page_flip	= drm_atomic_helper_page_flip,
+	.reset		= drm_atomic_helper_crtc_reset,
+	.set_property = drm_atomic_helper_crtc_set_property,
+	.atomic_duplicate_state	= drm_atomic_helper_crtc_duplicate_state,
+	.atomic_destroy_state	= drm_atomic_helper_crtc_destroy_state,
+};
+
+static int ade_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+			 struct drm_plane *plane)
+{
+	struct kirin_drm_private *priv = dev->dev_private;
+	struct device_node *port;
+	int ret;
+
+	/* set crtc port so that
+	 * drm_of_find_possible_crtcs call works
+	 */
+	port = of_get_child_by_name(dev->dev->of_node, "port");
+	if (!port) {
+		DRM_ERROR("no port node found in %s\n",
+			  dev->dev->of_node->full_name);
+		return -EINVAL;
+	}
+	of_node_put(port);
+	crtc->port = port;
+
+	ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+					&ade_crtc_funcs, NULL);
+	if (ret) {
+		DRM_ERROR("failed to init crtc.\n");
+		return ret;
+	}
+
+	drm_crtc_helper_add(crtc, &ade_crtc_helper_funcs);
+	priv->crtc[drm_crtc_index(crtc)] = crtc;
+
+	return 0;
+}
+
+static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
+			 u32 ch, u32 y, u32 in_h, u32 fmt)
+{
+	struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
+	u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
+	u32 stride = fb->pitches[0];
+	u32 addr = (u32)obj->paddr + y * stride;
+
+	DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
+			 ch + 1, y, in_h, stride, (u32)obj->paddr);
+	DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n",
+			 addr, fb->width, fb->height, fmt,
+			 drm_get_format_name(fb->pixel_format));
+
+	/* get reg offset */
+	reg_ctrl = RD_CH_CTRL(ch);
+	reg_addr = RD_CH_ADDR(ch);
+	reg_size = RD_CH_SIZE(ch);
+	reg_stride = RD_CH_STRIDE(ch);
+	reg_space = RD_CH_SPACE(ch);
+	reg_en = RD_CH_EN(ch);
+
+	/*
+	 * TODO: set rotation
+	 */
+	writel((fmt << 16) & 0x1f0000, base + reg_ctrl);
+	writel(addr, base + reg_addr);
+	writel((in_h << 16) | stride, base + reg_size);
+	writel(stride, base + reg_stride);
+	writel(in_h * stride, base + reg_space);
+	writel(ADE_ENABLE, base + reg_en);
+	ade_update_reload_bit(base, RDMA_OFST + ch, 0);
+}
+
+static void ade_rdma_disable(void __iomem *base, u32 ch)
+{
+	u32 reg_en;
+
+	/* get reg offset */
+	reg_en = RD_CH_EN(ch);
+	writel(0, base + reg_en);
+	ade_update_reload_bit(base, RDMA_OFST + ch, 1);
+}
+
+static void ade_clip_set(void __iomem *base, u32 ch, u32 fb_w, u32 x,
+			 u32 in_w, u32 in_h)
+{
+	u32 disable_val;
+	u32 clip_left;
+	u32 clip_right;
+
+	/*
+	 * clip width, no need to clip height
+	 */
+	if (fb_w == in_w) { /* bypass */
+		disable_val = 1;
+		clip_left = 0;
+		clip_right = 0;
+	} else {
+		disable_val = 0;
+		clip_left = x;
+		clip_right = fb_w - (x + in_w) - 1;
+	}
+
+	DRM_DEBUG_DRIVER("clip%d: clip_left=%d, clip_right=%d\n",
+			 ch + 1, clip_left, clip_right);
+
+	writel(disable_val, base + ADE_CLIP_DISABLE(ch));
+	writel((fb_w - 1) << 16 | (in_h - 1), base + ADE_CLIP_SIZE0(ch));
+	writel(clip_left << 16 | clip_right, base + ADE_CLIP_SIZE1(ch));
+	ade_update_reload_bit(base, CLIP_OFST + ch, 0);
+}
+
+static void ade_clip_disable(void __iomem *base, u32 ch)
+{
+	writel(1, base + ADE_CLIP_DISABLE(ch));
+	ade_update_reload_bit(base, CLIP_OFST + ch, 1);
+}
+
+static bool has_Alpha_channel(int format)
+{
+	switch (format) {
+	case ADE_ARGB_8888:
+	case ADE_ABGR_8888:
+	case ADE_RGBA_8888:
+	case ADE_BGRA_8888:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static void ade_get_blending_params(u32 fmt, u8 glb_alpha, u8 *alp_mode,
+				    u8 *alp_sel, u8 *under_alp_sel)
+{
+	bool has_alpha = has_Alpha_channel(fmt);
+
+	/*
+	 * get alp_mode
+	 */
+	if (has_alpha && glb_alpha < 255)
+		*alp_mode = ADE_ALP_PIXEL_AND_GLB;
+	else if (has_alpha)
+		*alp_mode = ADE_ALP_PIXEL;
+	else
+		*alp_mode = ADE_ALP_GLOBAL;
+
+	/*
+	 * get alp sel
+	 */
+	*alp_sel = ADE_ALP_MUL_COEFF_3; /* 1 */
+	*under_alp_sel = ADE_ALP_MUL_COEFF_2; /* 0 */
+}
+
+static void ade_compositor_routing_set(void __iomem *base, u8 ch,
+				       u32 x0, u32 y0,
+				       u32 in_w, u32 in_h, u32 fmt)
+{
+	u8 ovly_ch = 0; /* TODO: This is the zpos, only one plane now */
+	u8 glb_alpha = 255;
+	u32 x1 = x0 + in_w - 1;
+	u32 y1 = y0 + in_h - 1;
+	u32 val;
+	u8 alp_sel;
+	u8 under_alp_sel;
+	u8 alp_mode;
+
+	ade_get_blending_params(fmt, glb_alpha, &alp_mode, &alp_sel,
+				&under_alp_sel);
+
+	/* overlay routing setting
+	 */
+	writel(x0 << 16 | y0, base + ADE_OVLY_CH_XY0(ovly_ch));
+	writel(x1 << 16 | y1, base + ADE_OVLY_CH_XY1(ovly_ch));
+	val = (ch + 1) << CH_SEL_OFST | BIT(CH_EN_OFST) |
+		alp_sel << CH_ALP_SEL_OFST |
+		under_alp_sel << CH_UNDER_ALP_SEL_OFST |
+		glb_alpha << CH_ALP_GBL_OFST |
+		alp_mode << CH_ALP_MODE_OFST;
+	writel(val, base + ADE_OVLY_CH_CTL(ovly_ch));
+	/* connect this plane/channel to overlay2 compositor */
+	ade_update_bits(base + ADE_OVLY_CTL, CH_OVLY_SEL_OFST(ovly_ch),
+			CH_OVLY_SEL_MASK, CH_OVLY_SEL_VAL(OUT_OVLY));
+}
+
+static void ade_compositor_routing_disable(void __iomem *base, u32 ch)
+{
+	u8 ovly_ch = 0; /* TODO: Only primary plane now */
+
+	/* disable this plane/channel */
+	ade_update_bits(base + ADE_OVLY_CH_CTL(ovly_ch), CH_EN_OFST,
+			MASK(1), 0);
+	/* dis-connect this plane/channel of overlay2 compositor */
+	ade_update_bits(base + ADE_OVLY_CTL, CH_OVLY_SEL_OFST(ovly_ch),
+			CH_OVLY_SEL_MASK, 0);
+}
+
+/*
+ * Typicaly, a channel looks like: DMA-->clip-->scale-->ctrans-->compositor
+ */
+static void ade_update_channel(struct ade_plane *aplane,
+			       struct drm_framebuffer *fb, int crtc_x,
+			       int crtc_y, unsigned int crtc_w,
+			       unsigned int crtc_h, u32 src_x,
+			       u32 src_y, u32 src_w, u32 src_h)
+{
+	struct ade_hw_ctx *ctx = aplane->ctx;
+	void __iomem *base = ctx->base;
+	u32 fmt = ade_get_format(fb->pixel_format);
+	u32 ch = aplane->ch;
+	u32 in_w;
+	u32 in_h;
+
+	DRM_DEBUG_DRIVER("channel%d: src:(%d, %d)-%dx%d, crtc:(%d, %d)-%dx%d",
+			 ch + 1, src_x, src_y, src_w, src_h,
+			 crtc_x, crtc_y, crtc_w, crtc_h);
+
+	/* 1) DMA setting */
+	in_w = src_w;
+	in_h = src_h;
+	ade_rdma_set(base, fb, ch, src_y, in_h, fmt);
+
+	/* 2) clip setting */
+	ade_clip_set(base, ch, fb->width, src_x, in_w, in_h);
+
+	/* 3) TODO: scale setting for overlay planes */
+
+	/* 4) TODO: ctran/csc setting for overlay planes */
+
+	/* 5) compositor routing setting */
+	ade_compositor_routing_set(base, ch, crtc_x, crtc_y, in_w, in_h, fmt);
+}
+
+static void ade_disable_channel(struct ade_plane *aplane)
+{
+	struct ade_hw_ctx *ctx = aplane->ctx;
+	void __iomem *base = ctx->base;
+	u32 ch = aplane->ch;
+
+	DRM_DEBUG_DRIVER("disable channel%d\n", ch + 1);
+
+	/* disable read DMA */
+	ade_rdma_disable(base, ch);
+
+	/* disable clip */
+	ade_clip_disable(base, ch);
+
+	/* disable compositor routing */
+	ade_compositor_routing_disable(base, ch);
+}
+
+static int ade_plane_prepare_fb(struct drm_plane *plane,
+				const struct drm_plane_state *new_state)
+{
+	/* do nothing */
+	return 0;
+}
+
+static void ade_plane_cleanup_fb(struct drm_plane *plane,
+				 const struct drm_plane_state *old_state)
+{
+	/* do nothing */
+}
+
+static int ade_plane_atomic_check(struct drm_plane *plane,
+				  struct drm_plane_state *state)
+{
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_crtc *crtc = state->crtc;
+	struct drm_crtc_state *crtc_state;
+	u32 src_x = state->src_x >> 16;
+	u32 src_y = state->src_y >> 16;
+	u32 src_w = state->src_w >> 16;
+	u32 src_h = state->src_h >> 16;
+	int crtc_x = state->crtc_x;
+	int crtc_y = state->crtc_y;
+	u32 crtc_w = state->crtc_w;
+	u32 crtc_h = state->crtc_h;
+	u32 fmt;
+
+	if (!crtc || !fb)
+		return 0;
+
+	fmt = ade_get_format(fb->pixel_format);
+	if (fmt == ADE_FORMAT_UNSUPPORT)
+		return -EINVAL;
+
+	crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+	if (IS_ERR(crtc_state))
+		return PTR_ERR(crtc_state);
+
+	if (src_w != crtc_w || src_h != crtc_h) {
+		DRM_ERROR("Scale not support!!!\n");
+		return -EINVAL;
+	}
+
+	if (src_x + src_w > fb->width ||
+	    src_y + src_h > fb->height)
+		return -EINVAL;
+
+	if (crtc_x < 0 || crtc_y < 0)
+		return -EINVAL;
+
+	if (crtc_x + crtc_w > crtc_state->adjusted_mode.hdisplay ||
+	    crtc_y + crtc_h > crtc_state->adjusted_mode.vdisplay)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void ade_plane_atomic_update(struct drm_plane *plane,
+				    struct drm_plane_state *old_state)
+{
+	struct drm_plane_state	*state	= plane->state;
+	struct ade_plane *aplane = to_ade_plane(plane);
+
+	ade_update_channel(aplane, state->fb, state->crtc_x, state->crtc_y,
+			   state->crtc_w, state->crtc_h,
+			   state->src_x >> 16, state->src_y >> 16,
+			   state->src_w >> 16, state->src_h >> 16);
+}
+
+static void ade_plane_atomic_disable(struct drm_plane *plane,
+				     struct drm_plane_state *old_state)
+{
+	struct ade_plane *aplane = to_ade_plane(plane);
+
+	ade_disable_channel(aplane);
+}
+
+static const struct drm_plane_helper_funcs ade_plane_helper_funcs = {
+	.prepare_fb = ade_plane_prepare_fb,
+	.cleanup_fb = ade_plane_cleanup_fb,
+	.atomic_check = ade_plane_atomic_check,
+	.atomic_update = ade_plane_atomic_update,
+	.atomic_disable = ade_plane_atomic_disable,
+};
+
+static struct drm_plane_funcs ade_plane_funcs = {
+	.update_plane	= drm_atomic_helper_update_plane,
+	.disable_plane	= drm_atomic_helper_disable_plane,
+	.set_property = drm_atomic_helper_plane_set_property,
+	.destroy = drm_plane_cleanup,
+	.reset = drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int ade_plane_init(struct drm_device *dev, struct ade_plane *aplane,
+			  enum drm_plane_type type)
+{
+	const u32 *fmts;
+	u32 fmts_cnt;
+	int ret = 0;
+
+	/* get  properties */
+	fmts_cnt = ade_get_channel_formats(aplane->ch, &fmts);
+	if (ret)
+		return ret;
+
+	ret = drm_universal_plane_init(dev, &aplane->base, 1, &ade_plane_funcs,
+				       fmts, fmts_cnt, type, NULL);
+	if (ret) {
+		DRM_ERROR("fail to init plane, ch=%d\n", aplane->ch);
+		return ret;
+	}
+
+	drm_plane_helper_add(&aplane->base, &ade_plane_helper_funcs);
+
+	return 0;
+}
+
+static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx)
+{
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = pdev->dev.of_node;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ctx->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ctx->base)) {
+		DRM_ERROR("failed to remap ade io base\n");
+		return  PTR_ERR(ctx->base);
+	}
+
+	ctx->reset = devm_reset_control_get(dev, NULL);
+	if (IS_ERR(ctx->reset))
+		return PTR_ERR(ctx->reset);
+
+	ctx->noc_regmap =
+		syscon_regmap_lookup_by_phandle(np, "hisilicon,noc-syscon");
+	if (IS_ERR(ctx->noc_regmap)) {
+		DRM_ERROR("failed to get noc regmap\n");
+		return PTR_ERR(ctx->noc_regmap);
+	}
+
+	ctx->irq = platform_get_irq(pdev, 0);
+	if (ctx->irq < 0) {
+		DRM_ERROR("failed to get irq\n");
+		return -ENODEV;
+	}
+
+	ctx->ade_core_clk = devm_clk_get(dev, "clk_ade_core");
+	if (!ctx->ade_core_clk) {
+		DRM_ERROR("failed to parse clk ADE_CORE\n");
+		return -ENODEV;
+	}
+
+	ctx->media_noc_clk = devm_clk_get(dev, "clk_codec_jpeg");
+	if (!ctx->media_noc_clk) {
+		DRM_ERROR("failed to parse clk CODEC_JPEG\n");
+	    return -ENODEV;
+	}
+
+	ctx->ade_pix_clk = devm_clk_get(dev, "clk_ade_pix");
+	if (!ctx->ade_pix_clk) {
+		DRM_ERROR("failed to parse clk ADE_PIX\n");
+	    return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int ade_drm_init(struct drm_device *dev)
+{
+	struct platform_device *pdev = dev->platformdev;
+	struct ade_data *ade;
+	struct ade_hw_ctx *ctx;
+	struct ade_crtc *acrtc;
+	struct ade_plane *aplane;
+	enum drm_plane_type type;
+	int ret;
+	int i;
+
+	ade = devm_kzalloc(dev->dev, sizeof(*ade), GFP_KERNEL);
+	if (!ade) {
+		DRM_ERROR("failed to alloc ade_data\n");
+		return -ENOMEM;
+	}
+	platform_set_drvdata(pdev, ade);
+
+	ctx = &ade->ctx;
+	acrtc = &ade->acrtc;
+	acrtc->ctx = ctx;
+	acrtc->out_format = LDI_OUT_RGB_888;
+
+	ret = ade_dts_parse(pdev, ctx);
+	if (ret)
+		return ret;
+
+	/*
+	 * plane init
+	 * TODO: Now only support primary plane, overlay planes
+	 * need to do.
+	 */
+	for (i = 0; i < ADE_CH_NUM; i++) {
+		aplane = &ade->aplane[i];
+		aplane->ch = i;
+		aplane->ctx = ctx;
+		type = i == PRIMARY_CH ? DRM_PLANE_TYPE_PRIMARY :
+			DRM_PLANE_TYPE_OVERLAY;
+
+		ret = ade_plane_init(dev, aplane, type);
+		if (ret)
+			return ret;
+	}
+
+	/* crtc init */
+	ret = ade_crtc_init(dev, &acrtc->base, &ade->aplane[PRIMARY_CH].base);
+	if (ret)
+		return ret;
+
+	/* vblank irq init */
+	ret = devm_request_irq(dev->dev, ctx->irq, ade_irq_handler,
+			       IRQF_SHARED, dev->driver->name, acrtc);
+	if (ret)
+		return ret;
+	dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
+	dev->driver->enable_vblank = ade_enable_vblank;
+	dev->driver->disable_vblank = ade_disable_vblank;
+
+	return 0;
+}
+
+static void ade_drm_cleanup(struct drm_device *dev)
+{
+	struct platform_device *pdev = dev->platformdev;
+	struct ade_data *ade = platform_get_drvdata(pdev);
+	struct drm_crtc *crtc = &ade->acrtc.base;
+
+	drm_crtc_cleanup(crtc);
+}
+
+const struct kirin_dc_ops ade_dc_ops = {
+	.init = ade_drm_init,
+	.cleanup = ade_drm_cleanup
+};
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
new file mode 100644
index 0000000..3f94785
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -0,0 +1,343 @@
+/*
+ * Hisilicon Kirin SoCs drm master driver
+ *
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * Author:
+ *	Xinliang Liu <z.liuxinliang@hisilicon.com>
+ *	Xinliang Liu <xinliang.liu@linaro.org>
+ *	Xinwei Kong <kong.kongxinwei@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/of_platform.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "kirin_drm_drv.h"
+
+static struct kirin_dc_ops *dc_ops;
+
+static int kirin_drm_kms_cleanup(struct drm_device *dev)
+{
+	struct kirin_drm_private *priv = dev->dev_private;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+	if (priv->fbdev) {
+		drm_fbdev_cma_fini(priv->fbdev);
+		priv->fbdev = NULL;
+	}
+#endif
+	drm_kms_helper_poll_fini(dev);
+	drm_vblank_cleanup(dev);
+	dc_ops->cleanup(dev);
+	drm_mode_config_cleanup(dev);
+	devm_kfree(dev->dev, priv);
+	dev->dev_private = NULL;
+
+	return 0;
+}
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+static void kirin_fbdev_output_poll_changed(struct drm_device *dev)
+{
+	struct kirin_drm_private *priv = dev->dev_private;
+
+	if (priv->fbdev) {
+		drm_fbdev_cma_hotplug_event(priv->fbdev);
+	} else {
+		priv->fbdev = drm_fbdev_cma_init(dev, 32,
+				dev->mode_config.num_crtc,
+				dev->mode_config.num_connector);
+		if (IS_ERR(priv->fbdev))
+			priv->fbdev = NULL;
+	}
+}
+#endif
+
+static const struct drm_mode_config_funcs kirin_drm_mode_config_funcs = {
+	.fb_create = drm_fb_cma_create,
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+	.output_poll_changed = kirin_fbdev_output_poll_changed,
+#endif
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = drm_atomic_helper_commit,
+};
+
+static void kirin_drm_mode_config_init(struct drm_device *dev)
+{
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+
+	dev->mode_config.max_width = 2048;
+	dev->mode_config.max_height = 2048;
+
+	dev->mode_config.funcs = &kirin_drm_mode_config_funcs;
+}
+
+static int kirin_drm_kms_init(struct drm_device *dev)
+{
+	struct kirin_drm_private *priv;
+	int ret;
+
+	priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	dev->dev_private = priv;
+	dev_set_drvdata(dev->dev, dev);
+
+	/* dev->mode_config initialization */
+	drm_mode_config_init(dev);
+	kirin_drm_mode_config_init(dev);
+
+	/* display controller init */
+	ret = dc_ops->init(dev);
+	if (ret)
+		goto err_mode_config_cleanup;
+
+	/* bind and init sub drivers */
+	ret = component_bind_all(dev->dev, dev);
+	if (ret) {
+		DRM_ERROR("failed to bind all component.\n");
+		goto err_dc_cleanup;
+	}
+
+	/* vblank init */
+	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+	if (ret) {
+		DRM_ERROR("failed to initialize vblank.\n");
+		goto err_unbind_all;
+	}
+	/* with irq_enabled = true, we can use the vblank feature. */
+	dev->irq_enabled = true;
+
+	/* reset all the states of crtc/plane/encoder/connector */
+	drm_mode_config_reset(dev);
+
+	/* init kms poll for handling hpd */
+	drm_kms_helper_poll_init(dev);
+
+	/* force detection after connectors init */
+	(void)drm_helper_hpd_irq_event(dev);
+
+	return 0;
+
+err_unbind_all:
+	component_unbind_all(dev->dev, dev);
+err_dc_cleanup:
+	dc_ops->cleanup(dev);
+err_mode_config_cleanup:
+	drm_mode_config_cleanup(dev);
+	devm_kfree(dev->dev, priv);
+	dev->dev_private = NULL;
+
+	return ret;
+}
+
+static const struct file_operations kirin_drm_fops = {
+	.owner		= THIS_MODULE,
+	.open		= drm_open,
+	.release	= drm_release,
+	.unlocked_ioctl	= drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= drm_compat_ioctl,
+#endif
+	.poll		= drm_poll,
+	.read		= drm_read,
+	.llseek		= no_llseek,
+	.mmap		= drm_gem_cma_mmap,
+};
+
+static int kirin_gem_cma_dumb_create(struct drm_file *file,
+				     struct drm_device *dev,
+				     struct drm_mode_create_dumb *args)
+{
+	return drm_gem_cma_dumb_create_internal(file, dev, args);
+}
+
+static struct drm_driver kirin_drm_driver = {
+	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+				  DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
+	.fops			= &kirin_drm_fops,
+	.set_busid		= drm_platform_set_busid,
+
+	.gem_free_object	= drm_gem_cma_free_object,
+	.gem_vm_ops		= &drm_gem_cma_vm_ops,
+	.dumb_create		= kirin_gem_cma_dumb_create,
+	.dumb_map_offset	= drm_gem_cma_dumb_map_offset,
+	.dumb_destroy		= drm_gem_dumb_destroy,
+
+	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
+	.gem_prime_export	= drm_gem_prime_export,
+	.gem_prime_import	= drm_gem_prime_import,
+	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+	.gem_prime_vmap		= drm_gem_cma_prime_vmap,
+	.gem_prime_vunmap	= drm_gem_cma_prime_vunmap,
+	.gem_prime_mmap		= drm_gem_cma_prime_mmap,
+
+	.name			= "kirin",
+	.desc			= "Hisilicon Kirin SoCs' DRM Driver",
+	.date			= "20150718",
+	.major			= 1,
+	.minor			= 0,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+	return dev->of_node == data;
+}
+
+static int kirin_drm_bind(struct device *dev)
+{
+	struct drm_driver *driver = &kirin_drm_driver;
+	struct drm_device *drm_dev;
+	int ret;
+
+	drm_dev = drm_dev_alloc(driver, dev);
+	if (!drm_dev)
+		return -ENOMEM;
+
+	drm_dev->platformdev = to_platform_device(dev);
+
+	ret = kirin_drm_kms_init(drm_dev);
+	if (ret)
+		goto err_drm_dev_unref;
+
+	ret = drm_dev_register(drm_dev, 0);
+	if (ret)
+		goto err_kms_cleanup;
+
+	/* connectors should be registered after drm device register */
+	ret = drm_connector_register_all(drm_dev);
+	if (ret)
+		goto err_drm_dev_unregister;
+
+	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+		 driver->name, driver->major, driver->minor, driver->patchlevel,
+		 driver->date, drm_dev->primary->index);
+
+	return 0;
+
+err_drm_dev_unregister:
+	drm_dev_unregister(drm_dev);
+err_kms_cleanup:
+	kirin_drm_kms_cleanup(drm_dev);
+err_drm_dev_unref:
+	drm_dev_unref(drm_dev);
+
+	return ret;
+}
+
+static void kirin_drm_unbind(struct device *dev)
+{
+	struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+	drm_connector_unregister_all(drm_dev);
+	drm_dev_unregister(drm_dev);
+	kirin_drm_kms_cleanup(drm_dev);
+	drm_dev_unref(drm_dev);
+}
+
+static const struct component_master_ops kirin_drm_ops = {
+	.bind = kirin_drm_bind,
+	.unbind = kirin_drm_unbind,
+};
+
+static struct device_node *kirin_get_remote_node(struct device_node *np)
+{
+	struct device_node *endpoint, *remote;
+
+	/* get the first endpoint, in our case only one remote node
+	 * is connected to display controller.
+	 */
+	endpoint = of_graph_get_next_endpoint(np, NULL);
+	if (!endpoint) {
+		DRM_ERROR("no valid endpoint node\n");
+		return ERR_PTR(-ENODEV);
+	}
+	of_node_put(endpoint);
+
+	remote = of_graph_get_remote_port_parent(endpoint);
+	if (!remote) {
+		DRM_ERROR("no valid remote node\n");
+		return ERR_PTR(-ENODEV);
+	}
+	of_node_put(remote);
+
+	if (!of_device_is_available(remote)) {
+		DRM_ERROR("not available for remote node\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	return remote;
+}
+
+static int kirin_drm_platform_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct component_match *match = NULL;
+	struct device_node *remote;
+
+	dc_ops = (struct kirin_dc_ops *)of_device_get_match_data(dev);
+	if (!dc_ops) {
+		DRM_ERROR("failed to get dt id data\n");
+		return -EINVAL;
+	}
+
+	remote = kirin_get_remote_node(np);
+	if (IS_ERR(remote))
+		return PTR_ERR(remote);
+
+	component_match_add(dev, &match, compare_of, remote);
+
+	return component_master_add_with_match(dev, &kirin_drm_ops, match);
+
+	return 0;
+}
+
+static int kirin_drm_platform_remove(struct platform_device *pdev)
+{
+	component_master_del(&pdev->dev, &kirin_drm_ops);
+	dc_ops = NULL;
+	return 0;
+}
+
+static const struct of_device_id kirin_drm_dt_ids[] = {
+	{ .compatible = "hisilicon,hi6220-ade",
+	  .data = &ade_dc_ops,
+	},
+	{ /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, kirin_drm_dt_ids);
+
+static struct platform_driver kirin_drm_platform_driver = {
+	.probe = kirin_drm_platform_probe,
+	.remove = kirin_drm_platform_remove,
+	.driver = {
+		.name = "kirin-drm",
+		.of_match_table = kirin_drm_dt_ids,
+	},
+};
+
+module_platform_driver(kirin_drm_platform_driver);
+
+MODULE_AUTHOR("Xinliang Liu <xinliang.liu@linaro.org>");
+MODULE_AUTHOR("Xinliang Liu <z.liuxinliang@hisilicon.com>");
+MODULE_AUTHOR("Xinwei Kong <kong.kongxinwei@hisilicon.com>");
+MODULE_DESCRIPTION("hisilicon Kirin SoCs' DRM master driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
new file mode 100644
index 0000000..1a07caf
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __KIRIN_DRM_DRV_H__
+#define __KIRIN_DRM_DRV_H__
+
+#define MAX_CRTC	2
+
+/* display controller init/cleanup ops */
+struct kirin_dc_ops {
+	int (*init)(struct drm_device *dev);
+	void (*cleanup)(struct drm_device *dev);
+};
+
+struct kirin_drm_private {
+	struct drm_crtc *crtc[MAX_CRTC];
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+	struct drm_fbdev_cma *fbdev;
+#endif
+};
+
+extern const struct kirin_dc_ops ade_dc_ops;
+
+#endif /* __KIRIN_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 20a5d04..29a32b1 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -56,3 +56,9 @@
 	  selected to enabled full userptr support.
 
 	  If in doubt, say "Y".
+
+menu "drm/i915 Debugging"
+depends on DRM_I915
+depends on EXPERT
+source drivers/gpu/drm/i915/Kconfig.debug
+endmenu
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
new file mode 100644
index 0000000..8f40410
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -0,0 +1,41 @@
+config DRM_I915_WERROR
+        bool "Force GCC to throw an error instead of a warning when compiling"
+        # As this may inadvertently break the build, only allow the user
+        # to shoot oneself in the foot iff they aim really hard
+        depends on EXPERT
+        # We use the dependency on !COMPILE_TEST to not be enabled in
+        # allmodconfig or allyesconfig configurations
+        depends on !COMPILE_TEST
+        default n
+        help
+          Add -Werror to the build flags for (and only for) i915.ko.
+          Do not enable this unless you are writing code for the i915.ko module.
+
+          Recommended for driver developers only.
+
+          If in doubt, say "N".
+
+config DRM_I915_DEBUG
+        bool "Enable additional driver debugging"
+        depends on DRM_I915
+        default n
+        help
+          Choose this option to turn on extra driver debugging that may affect
+          performance but will catch some internal issues.
+
+          Recommended for driver developers only.
+
+          If in doubt, say "N".
+
+config DRM_I915_DEBUG_GEM
+        bool "Insert extra checks into the GEM internals"
+        default n
+        depends on DRM_I915_WERROR
+        help
+          Enable extra sanity checks (including BUGs) along the GEM driver
+          paths that may slow the system down and if hit hang the machine.
+
+          Recommended for driver developers only.
+
+          If in doubt, say "N".
+
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0851de07..0b88ba0 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -2,6 +2,8 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
+subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
+
 # Please keep these build lists sorted!
 
 # core driver code
@@ -55,7 +57,9 @@
 	  intel_atomic.o \
 	  intel_atomic_plane.o \
 	  intel_bios.o \
+	  intel_color.o \
 	  intel_display.o \
+	  intel_dpll_mgr.o \
 	  intel_fbc.o \
 	  intel_fifo_underrun.o \
 	  intel_frontbuffer.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 814d894..a337f33 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -444,6 +444,7 @@
 	REG64(CL_PRIMITIVES_COUNT),
 	REG64(PS_INVOCATION_COUNT),
 	REG64(PS_DEPTH_COUNT),
+	REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
 	REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
 	REG64(MI_PREDICATE_SRC0),
 	REG64(MI_PREDICATE_SRC1),
@@ -471,6 +472,25 @@
 	REG32(GEN7_L3SQCREG1),
 	REG32(GEN7_L3CNTLREG2),
 	REG32(GEN7_L3CNTLREG3),
+};
+
+static const struct drm_i915_reg_descriptor hsw_render_regs[] = {
+	REG64_IDX(HSW_CS_GPR, 0),
+	REG64_IDX(HSW_CS_GPR, 1),
+	REG64_IDX(HSW_CS_GPR, 2),
+	REG64_IDX(HSW_CS_GPR, 3),
+	REG64_IDX(HSW_CS_GPR, 4),
+	REG64_IDX(HSW_CS_GPR, 5),
+	REG64_IDX(HSW_CS_GPR, 6),
+	REG64_IDX(HSW_CS_GPR, 7),
+	REG64_IDX(HSW_CS_GPR, 8),
+	REG64_IDX(HSW_CS_GPR, 9),
+	REG64_IDX(HSW_CS_GPR, 10),
+	REG64_IDX(HSW_CS_GPR, 11),
+	REG64_IDX(HSW_CS_GPR, 12),
+	REG64_IDX(HSW_CS_GPR, 13),
+	REG64_IDX(HSW_CS_GPR, 14),
+	REG64_IDX(HSW_CS_GPR, 15),
 	REG32(HSW_SCRATCH1,
 	      .mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE,
 	      .value = 0),
@@ -500,6 +520,33 @@
 #undef REG64
 #undef REG32
 
+struct drm_i915_reg_table {
+	const struct drm_i915_reg_descriptor *regs;
+	int num_regs;
+	bool master;
+};
+
+static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
+	{ gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
+	{ ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+};
+
+static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
+	{ gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
+	{ ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+};
+
+static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
+	{ gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
+	{ hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false },
+	{ hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+};
+
+static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
+	{ gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
+	{ hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+};
+
 static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
 {
 	u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
@@ -555,7 +602,7 @@
 	return 0;
 }
 
-static bool validate_cmds_sorted(struct intel_engine_cs *ring,
+static bool validate_cmds_sorted(struct intel_engine_cs *engine,
 				 const struct drm_i915_cmd_table *cmd_tables,
 				 int cmd_table_count)
 {
@@ -577,7 +624,7 @@
 
 			if (curr < previous) {
 				DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
-					  ring->id, i, j, curr, previous);
+					  engine->id, i, j, curr, previous);
 				ret = false;
 			}
 
@@ -611,11 +658,18 @@
 	return ret;
 }
 
-static bool validate_regs_sorted(struct intel_engine_cs *ring)
+static bool validate_regs_sorted(struct intel_engine_cs *engine)
 {
-	return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
-		check_sorted(ring->id, ring->master_reg_table,
-			     ring->master_reg_count);
+	int i;
+	const struct drm_i915_reg_table *table;
+
+	for (i = 0; i < engine->reg_table_count; i++) {
+		table = &engine->reg_tables[i];
+		if (!check_sorted(engine->id, table->regs, table->num_regs))
+			return false;
+	}
+
+	return true;
 }
 
 struct cmd_node {
@@ -639,13 +693,13 @@
  */
 #define CMD_HASH_MASK STD_MI_OPCODE_MASK
 
-static int init_hash_table(struct intel_engine_cs *ring,
+static int init_hash_table(struct intel_engine_cs *engine,
 			   const struct drm_i915_cmd_table *cmd_tables,
 			   int cmd_table_count)
 {
 	int i, j;
 
-	hash_init(ring->cmd_hash);
+	hash_init(engine->cmd_hash);
 
 	for (i = 0; i < cmd_table_count; i++) {
 		const struct drm_i915_cmd_table *table = &cmd_tables[i];
@@ -660,7 +714,7 @@
 				return -ENOMEM;
 
 			desc_node->desc = desc;
-			hash_add(ring->cmd_hash, &desc_node->node,
+			hash_add(engine->cmd_hash, &desc_node->node,
 				 desc->cmd.value & CMD_HASH_MASK);
 		}
 	}
@@ -668,13 +722,13 @@
 	return 0;
 }
 
-static void fini_hash_table(struct intel_engine_cs *ring)
+static void fini_hash_table(struct intel_engine_cs *engine)
 {
 	struct hlist_node *tmp;
 	struct cmd_node *desc_node;
 	int i;
 
-	hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
+	hash_for_each_safe(engine->cmd_hash, i, tmp, desc_node, node) {
 		hash_del(&desc_node->node);
 		kfree(desc_node);
 	}
@@ -690,18 +744,18 @@
  *
  * Return: non-zero if initialization fails
  */
-int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
+int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
 {
 	const struct drm_i915_cmd_table *cmd_tables;
 	int cmd_table_count;
 	int ret;
 
-	if (!IS_GEN7(ring->dev))
+	if (!IS_GEN7(engine->dev))
 		return 0;
 
-	switch (ring->id) {
+	switch (engine->id) {
 	case RCS:
-		if (IS_HASWELL(ring->dev)) {
+		if (IS_HASWELL(engine->dev)) {
 			cmd_tables = hsw_render_ring_cmds;
 			cmd_table_count =
 				ARRAY_SIZE(hsw_render_ring_cmds);
@@ -710,26 +764,23 @@
 			cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
 		}
 
-		ring->reg_table = gen7_render_regs;
-		ring->reg_count = ARRAY_SIZE(gen7_render_regs);
-
-		if (IS_HASWELL(ring->dev)) {
-			ring->master_reg_table = hsw_master_regs;
-			ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+		if (IS_HASWELL(engine->dev)) {
+			engine->reg_tables = hsw_render_reg_tables;
+			engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
 		} else {
-			ring->master_reg_table = ivb_master_regs;
-			ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+			engine->reg_tables = ivb_render_reg_tables;
+			engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
 		}
 
-		ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
+		engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
 		break;
 	case VCS:
 		cmd_tables = gen7_video_cmds;
 		cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
-		ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+		engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
 		break;
 	case BCS:
-		if (IS_HASWELL(ring->dev)) {
+		if (IS_HASWELL(engine->dev)) {
 			cmd_tables = hsw_blt_ring_cmds;
 			cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
 		} else {
@@ -737,44 +788,41 @@
 			cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
 		}
 
-		ring->reg_table = gen7_blt_regs;
-		ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
-
-		if (IS_HASWELL(ring->dev)) {
-			ring->master_reg_table = hsw_master_regs;
-			ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+		if (IS_HASWELL(engine->dev)) {
+			engine->reg_tables = hsw_blt_reg_tables;
+			engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
 		} else {
-			ring->master_reg_table = ivb_master_regs;
-			ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+			engine->reg_tables = ivb_blt_reg_tables;
+			engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
 		}
 
-		ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+		engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
 		break;
 	case VECS:
 		cmd_tables = hsw_vebox_cmds;
 		cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
 		/* VECS can use the same length_mask function as VCS */
-		ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+		engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
 		break;
 	default:
 		DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
-			  ring->id);
+			  engine->id);
 		BUG();
 	}
 
-	BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
-	BUG_ON(!validate_regs_sorted(ring));
+	BUG_ON(!validate_cmds_sorted(engine, cmd_tables, cmd_table_count));
+	BUG_ON(!validate_regs_sorted(engine));
 
-	WARN_ON(!hash_empty(ring->cmd_hash));
+	WARN_ON(!hash_empty(engine->cmd_hash));
 
-	ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+	ret = init_hash_table(engine, cmd_tables, cmd_table_count);
 	if (ret) {
 		DRM_ERROR("CMD: cmd_parser_init failed!\n");
-		fini_hash_table(ring);
+		fini_hash_table(engine);
 		return ret;
 	}
 
-	ring->needs_cmd_parser = true;
+	engine->needs_cmd_parser = true;
 
 	return 0;
 }
@@ -786,21 +834,21 @@
  * Releases any resources related to command parsing that may have been
  * initialized for the specified ring.
  */
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine)
 {
-	if (!ring->needs_cmd_parser)
+	if (!engine->needs_cmd_parser)
 		return;
 
-	fini_hash_table(ring);
+	fini_hash_table(engine);
 }
 
 static const struct drm_i915_cmd_descriptor*
-find_cmd_in_table(struct intel_engine_cs *ring,
+find_cmd_in_table(struct intel_engine_cs *engine,
 		  u32 cmd_header)
 {
 	struct cmd_node *desc_node;
 
-	hash_for_each_possible(ring->cmd_hash, desc_node, node,
+	hash_for_each_possible(engine->cmd_hash, desc_node, node,
 			       cmd_header & CMD_HASH_MASK) {
 		const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
 		u32 masked_cmd = desc->cmd.mask & cmd_header;
@@ -822,18 +870,18 @@
  * ring's default length encoding and returns default_desc.
  */
 static const struct drm_i915_cmd_descriptor*
-find_cmd(struct intel_engine_cs *ring,
+find_cmd(struct intel_engine_cs *engine,
 	 u32 cmd_header,
 	 struct drm_i915_cmd_descriptor *default_desc)
 {
 	const struct drm_i915_cmd_descriptor *desc;
 	u32 mask;
 
-	desc = find_cmd_in_table(ring, cmd_header);
+	desc = find_cmd_in_table(engine, cmd_header);
 	if (desc)
 		return desc;
 
-	mask = ring->get_cmd_length_mask(cmd_header);
+	mask = engine->get_cmd_length_mask(cmd_header);
 	if (!mask)
 		return NULL;
 
@@ -848,12 +896,31 @@
 find_reg(const struct drm_i915_reg_descriptor *table,
 	 int count, u32 addr)
 {
-	if (table) {
-		int i;
+	int i;
 
-		for (i = 0; i < count; i++) {
-			if (i915_mmio_reg_offset(table[i].addr) == addr)
-				return &table[i];
+	for (i = 0; i < count; i++) {
+		if (i915_mmio_reg_offset(table[i].addr) == addr)
+			return &table[i];
+	}
+
+	return NULL;
+}
+
+static const struct drm_i915_reg_descriptor *
+find_reg_in_tables(const struct drm_i915_reg_table *tables,
+		   int count, bool is_master, u32 addr)
+{
+	int i;
+	const struct drm_i915_reg_table *table;
+	const struct drm_i915_reg_descriptor *reg;
+
+	for (i = 0; i < count; i++) {
+		table = &tables[i];
+		if (!table->master || is_master) {
+			reg = find_reg(table->regs, table->num_regs,
+				       addr);
+			if (reg != NULL)
+				return reg;
 		}
 	}
 
@@ -963,18 +1030,18 @@
  *
  * Return: true if the ring requires software command parsing
  */
-bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
+bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
 {
-	if (!ring->needs_cmd_parser)
+	if (!engine->needs_cmd_parser)
 		return false;
 
-	if (!USES_PPGTT(ring->dev))
+	if (!USES_PPGTT(engine->dev))
 		return false;
 
 	return (i915.enable_cmd_parser == 1);
 }
 
-static bool check_cmd(const struct intel_engine_cs *ring,
+static bool check_cmd(const struct intel_engine_cs *engine,
 		      const struct drm_i915_cmd_descriptor *desc,
 		      const u32 *cmd, u32 length,
 		      const bool is_master,
@@ -1004,17 +1071,14 @@
 		     offset += step) {
 			const u32 reg_addr = cmd[offset] & desc->reg.mask;
 			const struct drm_i915_reg_descriptor *reg =
-				find_reg(ring->reg_table, ring->reg_count,
-					 reg_addr);
-
-			if (!reg && is_master)
-				reg = find_reg(ring->master_reg_table,
-					       ring->master_reg_count,
-					       reg_addr);
+				find_reg_in_tables(engine->reg_tables,
+						   engine->reg_table_count,
+						   is_master,
+						   reg_addr);
 
 			if (!reg) {
 				DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
-						 reg_addr, *cmd, ring->id);
+						 reg_addr, *cmd, engine->id);
 				return false;
 			}
 
@@ -1087,7 +1151,7 @@
 						 *cmd,
 						 desc->bits[i].mask,
 						 desc->bits[i].expected,
-						 dword, ring->id);
+						 dword, engine->id);
 				return false;
 			}
 		}
@@ -1113,7 +1177,7 @@
  * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
  * if the batch appears legal but should use hardware parsing
  */
-int i915_parse_cmds(struct intel_engine_cs *ring,
+int i915_parse_cmds(struct intel_engine_cs *engine,
 		    struct drm_i915_gem_object *batch_obj,
 		    struct drm_i915_gem_object *shadow_batch_obj,
 		    u32 batch_start_offset,
@@ -1147,7 +1211,7 @@
 		if (*cmd == MI_BATCH_BUFFER_END)
 			break;
 
-		desc = find_cmd(ring, *cmd, &default_desc);
+		desc = find_cmd(engine, *cmd, &default_desc);
 		if (!desc) {
 			DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
 					 *cmd);
@@ -1179,7 +1243,7 @@
 			break;
 		}
 
-		if (!check_cmd(ring, desc, cmd, length, is_master,
+		if (!check_cmd(engine, desc, cmd, length, is_master,
 			       &oacontrol_set)) {
 			ret = -EINVAL;
 			break;
@@ -1223,6 +1287,7 @@
 	 * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
 	 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
 	 * 5. GPGPU dispatch compute indirect registers.
+	 * 6. TIMESTAMP register and Haswell CS GPR registers
 	 */
-	return 5;
+	return 6;
 }
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e3f4c72..3269033 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -89,27 +89,34 @@
 	return 0;
 }
 
-static const char *get_pin_flag(struct drm_i915_gem_object *obj)
+static const char get_active_flag(struct drm_i915_gem_object *obj)
 {
-	if (obj->pin_display)
-		return "p";
-	else
-		return " ";
+	return obj->active ? '*' : ' ';
 }
 
-static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
+static const char get_pin_flag(struct drm_i915_gem_object *obj)
+{
+	return obj->pin_display ? 'p' : ' ';
+}
+
+static const char get_tiling_flag(struct drm_i915_gem_object *obj)
 {
 	switch (obj->tiling_mode) {
 	default:
-	case I915_TILING_NONE: return " ";
-	case I915_TILING_X: return "X";
-	case I915_TILING_Y: return "Y";
+	case I915_TILING_NONE: return ' ';
+	case I915_TILING_X: return 'X';
+	case I915_TILING_Y: return 'Y';
 	}
 }
 
-static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
+static inline const char get_global_flag(struct drm_i915_gem_object *obj)
 {
-	return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
+	return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
+}
+
+static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
+{
+	return obj->mapping ? 'M' : ' ';
 }
 
 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
@@ -129,23 +136,26 @@
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct i915_vma *vma;
 	int pin_count = 0;
-	int i;
+	enum intel_engine_id id;
 
-	seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
 		   &obj->base,
-		   obj->active ? "*" : " ",
+		   get_active_flag(obj),
 		   get_pin_flag(obj),
 		   get_tiling_flag(obj),
 		   get_global_flag(obj),
+		   get_pin_mapped_flag(obj),
 		   obj->base.size / 1024,
 		   obj->base.read_domains,
 		   obj->base.write_domain);
-	for_each_ring(ring, dev_priv, i)
+	for_each_engine_id(engine, dev_priv, id)
 		seq_printf(m, "%x ",
-				i915_gem_request_get_seqno(obj->last_read_req[i]));
+				i915_gem_request_get_seqno(obj->last_read_req[id]));
 	seq_printf(m, "] %x %x%s%s%s",
 		   i915_gem_request_get_seqno(obj->last_write_req),
 		   i915_gem_request_get_seqno(obj->last_fenced_req),
@@ -184,7 +194,7 @@
 	}
 	if (obj->last_write_req != NULL)
 		seq_printf(m, " (%s)",
-			   i915_gem_request_get_ring(obj->last_write_req)->name);
+			   i915_gem_request_get_engine(obj->last_write_req)->name);
 	if (obj->frontbuffer_bits)
 		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
 }
@@ -202,8 +212,8 @@
 	uintptr_t list = (uintptr_t) node->info_ent->data;
 	struct list_head *head;
 	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct i915_address_space *vm = &dev_priv->gtt.base;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct i915_vma *vma;
 	u64 total_obj_size, total_gtt_size;
 	int count, ret;
@@ -216,11 +226,11 @@
 	switch (list) {
 	case ACTIVE_LIST:
 		seq_puts(m, "Active:\n");
-		head = &vm->active_list;
+		head = &ggtt->base.active_list;
 		break;
 	case INACTIVE_LIST:
 		seq_puts(m, "Inactive:\n");
-		head = &vm->inactive_list;
+		head = &ggtt->base.inactive_list;
 		break;
 	default:
 		mutex_unlock(&dev->struct_mutex);
@@ -397,15 +407,15 @@
 {
 	struct drm_i915_gem_object *obj;
 	struct file_stats stats;
-	struct intel_engine_cs *ring;
-	int i, j;
+	struct intel_engine_cs *engine;
+	int j;
 
 	memset(&stats, 0, sizeof(stats));
 
-	for_each_ring(ring, dev_priv, i) {
-		for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+	for_each_engine(engine, dev_priv) {
+		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
 			list_for_each_entry(obj,
-					    &ring->batch_pool.cache_list[j],
+					    &engine->batch_pool.cache_list[j],
 					    batch_pool_link)
 				per_file_stats(0, obj, &stats);
 		}
@@ -429,11 +439,13 @@
 {
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	u32 count, mappable_count, purgeable_count;
 	u64 size, mappable_size, purgeable_size;
+	unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0;
+	u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0;
 	struct drm_i915_gem_object *obj;
-	struct i915_address_space *vm = &dev_priv->gtt.base;
 	struct drm_file *file;
 	struct i915_vma *vma;
 	int ret;
@@ -452,12 +464,12 @@
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
-	count_vmas(&vm->active_list, vm_link);
+	count_vmas(&ggtt->base.active_list, vm_link);
 	seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
-	count_vmas(&vm->inactive_list, vm_link);
+	count_vmas(&ggtt->base.inactive_list, vm_link);
 	seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
@@ -466,6 +478,14 @@
 		size += obj->base.size, ++count;
 		if (obj->madv == I915_MADV_DONTNEED)
 			purgeable_size += obj->base.size, ++purgeable_count;
+		if (obj->mapping) {
+			pin_mapped_count++;
+			pin_mapped_size += obj->base.size;
+			if (obj->pages_pin_count == 0) {
+				pin_mapped_purgeable_count++;
+				pin_mapped_purgeable_size += obj->base.size;
+			}
+		}
 	}
 	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
 
@@ -483,6 +503,14 @@
 			purgeable_size += obj->base.size;
 			++purgeable_count;
 		}
+		if (obj->mapping) {
+			pin_mapped_count++;
+			pin_mapped_size += obj->base.size;
+			if (obj->pages_pin_count == 0) {
+				pin_mapped_purgeable_count++;
+				pin_mapped_purgeable_size += obj->base.size;
+			}
+		}
 	}
 	seq_printf(m, "%u purgeable objects, %llu bytes\n",
 		   purgeable_count, purgeable_size);
@@ -490,13 +518,20 @@
 		   mappable_count, mappable_size);
 	seq_printf(m, "%u fault mappable objects, %llu bytes\n",
 		   count, size);
+	seq_printf(m,
+		   "%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n",
+		   pin_mapped_count, pin_mapped_purgeable_count,
+		   pin_mapped_size, pin_mapped_purgeable_size);
 
 	seq_printf(m, "%llu [%llu] gtt total\n",
-		   dev_priv->gtt.base.total,
-		   (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
+		   ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
 
 	seq_putc(m, '\n');
 	print_batch_pool_stats(m, dev_priv);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	mutex_lock(&dev->filelist_mutex);
 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
 		struct file_stats stats;
 		struct task_struct *task;
@@ -517,8 +552,7 @@
 		print_file_stats(m, task ? task->comm : "<unknown>", stats);
 		rcu_read_unlock();
 	}
-
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev->filelist_mutex);
 
 	return 0;
 }
@@ -591,14 +625,13 @@
 					   pipe, plane);
 			}
 			if (work->flip_queued_req) {
-				struct intel_engine_cs *ring =
-					i915_gem_request_get_ring(work->flip_queued_req);
+				struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
 
 				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
-					   ring->name,
+					   engine->name,
 					   i915_gem_request_get_seqno(work->flip_queued_req),
 					   dev_priv->next_seqno,
-					   ring->get_seqno(ring, true),
+					   engine->get_seqno(engine),
 					   i915_gem_request_completed(work->flip_queued_req, true));
 			} else
 				seq_printf(m, "Flip not associated with any ring\n");
@@ -637,28 +670,28 @@
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int total = 0;
-	int ret, i, j;
+	int ret, j;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
-	for_each_ring(ring, dev_priv, i) {
-		for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+	for_each_engine(engine, dev_priv) {
+		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
 			int count;
 
 			count = 0;
 			list_for_each_entry(obj,
-					    &ring->batch_pool.cache_list[j],
+					    &engine->batch_pool.cache_list[j],
 					    batch_pool_link)
 				count++;
 			seq_printf(m, "%s cache[%d]: %d objects\n",
-				   ring->name, j, count);
+				   engine->name, j, count);
 
 			list_for_each_entry(obj,
-					    &ring->batch_pool.cache_list[j],
+					    &engine->batch_pool.cache_list[j],
 					    batch_pool_link) {
 				seq_puts(m, "   ");
 				describe_obj(m, obj);
@@ -681,26 +714,26 @@
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct drm_i915_gem_request *req;
-	int ret, any, i;
+	int ret, any;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
 	any = 0;
-	for_each_ring(ring, dev_priv, i) {
+	for_each_engine(engine, dev_priv) {
 		int count;
 
 		count = 0;
-		list_for_each_entry(req, &ring->request_list, list)
+		list_for_each_entry(req, &engine->request_list, list)
 			count++;
 		if (count == 0)
 			continue;
 
-		seq_printf(m, "%s requests: %d\n", ring->name, count);
-		list_for_each_entry(req, &ring->request_list, list) {
+		seq_printf(m, "%s requests: %d\n", engine->name, count);
+		list_for_each_entry(req, &engine->request_list, list) {
 			struct task_struct *task;
 
 			rcu_read_lock();
@@ -726,12 +759,12 @@
 }
 
 static void i915_ring_seqno_info(struct seq_file *m,
-				 struct intel_engine_cs *ring)
+				 struct intel_engine_cs *engine)
 {
-	if (ring->get_seqno) {
-		seq_printf(m, "Current sequence (%s): %x\n",
-			   ring->name, ring->get_seqno(ring, false));
-	}
+	seq_printf(m, "Current sequence (%s): %x\n",
+		   engine->name, engine->get_seqno(engine));
+	seq_printf(m, "Current user interrupts (%s): %x\n",
+		   engine->name, READ_ONCE(engine->user_interrupts));
 }
 
 static int i915_gem_seqno_info(struct seq_file *m, void *data)
@@ -739,16 +772,16 @@
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	int ret, i;
+	struct intel_engine_cs *engine;
+	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 	intel_runtime_pm_get(dev_priv);
 
-	for_each_ring(ring, dev_priv, i)
-		i915_ring_seqno_info(m, ring);
+	for_each_engine(engine, dev_priv)
+		i915_ring_seqno_info(m, engine);
 
 	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
@@ -762,7 +795,7 @@
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int ret, i, pipe;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -934,13 +967,13 @@
 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
 			   I915_READ(GTIMR));
 	}
-	for_each_ring(ring, dev_priv, i) {
+	for_each_engine(engine, dev_priv) {
 		if (INTEL_INFO(dev)->gen >= 6) {
 			seq_printf(m,
 				   "Graphics Interrupt mask (%s):	%08x\n",
-				   ring->name, I915_READ_IMR(ring));
+				   engine->name, I915_READ_IMR(engine));
 		}
-		i915_ring_seqno_info(m, ring);
+		i915_ring_seqno_info(m, engine);
 	}
 	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
@@ -981,12 +1014,12 @@
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	const u32 *hws;
 	int i;
 
-	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
-	hws = ring->status_page.page_addr;
+	engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
+	hws = engine->status_page.page_addr;
 	if (hws == NULL)
 		return 0;
 
@@ -1216,12 +1249,12 @@
 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
 
 		rpstat = I915_READ(GEN6_RPSTAT1);
-		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
-		rpcurup = I915_READ(GEN6_RP_CUR_UP);
-		rpprevup = I915_READ(GEN6_RP_PREV_UP);
-		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
-		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
-		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
+		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
+		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
+		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
+		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
 		if (IS_GEN9(dev))
 			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
 		else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -1261,21 +1294,21 @@
 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
 		seq_printf(m, "CAGF: %dMHz\n", cagf);
-		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
-			   GEN6_CURICONT_MASK);
-		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
-			   GEN6_CURBSYTAVG_MASK);
-		seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
-			   GEN6_CURBSYTAVG_MASK);
+		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
+			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
+		seq_printf(m, "RP CUR UP: %d (%dus)\n",
+			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
+		seq_printf(m, "RP PREV UP: %d (%dus)\n",
+			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
 		seq_printf(m, "Up threshold: %d%%\n",
 			   dev_priv->rps.up_threshold);
 
-		seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
-			   GEN6_CURIAVG_MASK);
-		seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
-			   GEN6_CURBSYTAVG_MASK);
-		seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
-			   GEN6_CURBSYTAVG_MASK);
+		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
+			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
+		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
+			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
+		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
+			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
 		seq_printf(m, "Down threshold: %d%%\n",
 			   dev_priv->rps.down_threshold);
 
@@ -1331,11 +1364,12 @@
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	u64 acthd[I915_NUM_RINGS];
-	u32 seqno[I915_NUM_RINGS];
+	struct intel_engine_cs *engine;
+	u64 acthd[I915_NUM_ENGINES];
+	u32 seqno[I915_NUM_ENGINES];
 	u32 instdone[I915_NUM_INSTDONE_REG];
-	int i, j;
+	enum intel_engine_id id;
+	int j;
 
 	if (!i915.enable_hangcheck) {
 		seq_printf(m, "Hangcheck disabled\n");
@@ -1344,9 +1378,9 @@
 
 	intel_runtime_pm_get(dev_priv);
 
-	for_each_ring(ring, dev_priv, i) {
-		seqno[i] = ring->get_seqno(ring, false);
-		acthd[i] = intel_ring_get_active_head(ring);
+	for_each_engine_id(engine, dev_priv, id) {
+		acthd[id] = intel_ring_get_active_head(engine);
+		seqno[id] = engine->get_seqno(engine);
 	}
 
 	i915_get_extra_instdone(dev, instdone);
@@ -1360,19 +1394,22 @@
 	} else
 		seq_printf(m, "Hangcheck inactive\n");
 
-	for_each_ring(ring, dev_priv, i) {
-		seq_printf(m, "%s:\n", ring->name);
-		seq_printf(m, "\tseqno = %x [current %x]\n",
-			   ring->hangcheck.seqno, seqno[i]);
+	for_each_engine_id(engine, dev_priv, id) {
+		seq_printf(m, "%s:\n", engine->name);
+		seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
+			   engine->hangcheck.seqno,
+			   seqno[id],
+			   engine->last_submitted_seqno);
+		seq_printf(m, "\tuser interrupts = %x [current %x]\n",
+			   engine->hangcheck.user_interrupts,
+			   READ_ONCE(engine->user_interrupts));
 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
-			   (long long)ring->hangcheck.acthd,
-			   (long long)acthd[i]);
-		seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
-			   (long long)ring->hangcheck.max_acthd);
-		seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
-		seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
+			   (long long)engine->hangcheck.acthd,
+			   (long long)acthd[id]);
+		seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
+		seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
 
-		if (ring->id == RCS) {
+		if (engine->id == RCS) {
 			seq_puts(m, "\tinstdone read =");
 
 			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
@@ -1382,7 +1419,7 @@
 
 			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
 				seq_printf(m, " 0x%08x",
-					   ring->hangcheck.instdone[j]);
+					   engine->hangcheck.instdone[j]);
 
 			seq_puts(m, "\n");
 		}
@@ -1465,12 +1502,11 @@
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_uncore_forcewake_domain *fw_domain;
-	int i;
 
 	spin_lock_irq(&dev_priv->uncore.lock);
-	for_each_fw_domain(fw_domain, dev_priv, i) {
+	for_each_fw_domain(fw_domain, dev_priv) {
 		seq_printf(m, "%s.wake_count = %u\n",
-			   intel_uncore_forcewake_domain_to_str(i),
+			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
 			   fw_domain->wake_count);
 	}
 	spin_unlock_irq(&dev_priv->uncore.lock);
@@ -1897,6 +1933,11 @@
 	struct drm_device *dev = node->minor->dev;
 	struct intel_framebuffer *fbdev_fb = NULL;
 	struct drm_framebuffer *drm_fb;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
        if (to_i915(dev)->fbdev) {
@@ -1908,7 +1949,7 @@
                          fbdev_fb->base.depth,
                          fbdev_fb->base.bits_per_pixel,
                          fbdev_fb->base.modifier[0],
-                         atomic_read(&fbdev_fb->base.refcount.refcount));
+                         drm_framebuffer_read_refcount(&fbdev_fb->base));
                describe_obj(m, fbdev_fb->obj);
                seq_putc(m, '\n');
        }
@@ -1926,11 +1967,12 @@
 			   fb->base.depth,
 			   fb->base.bits_per_pixel,
 			   fb->base.modifier[0],
-			   atomic_read(&fb->base.refcount.refcount));
+			   drm_framebuffer_read_refcount(&fb->base));
 		describe_obj(m, fb->obj);
 		seq_putc(m, '\n');
 	}
 	mutex_unlock(&dev->mode_config.fb_lock);
+	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
@@ -1948,9 +1990,10 @@
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct intel_context *ctx;
-	int ret, i;
+	enum intel_engine_id id;
+	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
@@ -1968,13 +2011,13 @@
 
 		if (i915.enable_execlists) {
 			seq_putc(m, '\n');
-			for_each_ring(ring, dev_priv, i) {
+			for_each_engine_id(engine, dev_priv, id) {
 				struct drm_i915_gem_object *ctx_obj =
-					ctx->engine[i].state;
+					ctx->engine[id].state;
 				struct intel_ringbuffer *ringbuf =
-					ctx->engine[i].ringbuf;
+					ctx->engine[id].ringbuf;
 
-				seq_printf(m, "%s: ", ring->name);
+				seq_printf(m, "%s: ", engine->name);
 				if (ctx_obj)
 					describe_obj(m, ctx_obj);
 				if (ringbuf)
@@ -1995,22 +2038,22 @@
 
 static void i915_dump_lrc_obj(struct seq_file *m,
 			      struct intel_context *ctx,
-			      struct intel_engine_cs *ring)
+			      struct intel_engine_cs *engine)
 {
 	struct page *page;
 	uint32_t *reg_state;
 	int j;
-	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+	struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
 	unsigned long ggtt_offset = 0;
 
 	if (ctx_obj == NULL) {
 		seq_printf(m, "Context on %s with no gem object\n",
-			   ring->name);
+			   engine->name);
 		return;
 	}
 
-	seq_printf(m, "CONTEXT: %s %u\n", ring->name,
-		   intel_execlists_ctx_id(ctx, ring));
+	seq_printf(m, "CONTEXT: %s %u\n", engine->name,
+		   intel_execlists_ctx_id(ctx, engine));
 
 	if (!i915_gem_obj_ggtt_bound(ctx_obj))
 		seq_puts(m, "\tNot bound in GGTT\n");
@@ -2043,9 +2086,9 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct intel_context *ctx;
-	int ret, i;
+	int ret;
 
 	if (!i915.enable_execlists) {
 		seq_printf(m, "Logical Ring Contexts are disabled\n");
@@ -2058,8 +2101,8 @@
 
 	list_for_each_entry(ctx, &dev_priv->context_list, link)
 		if (ctx != dev_priv->kernel_context)
-			for_each_ring(ring, dev_priv, i)
-				i915_dump_lrc_obj(m, ctx, ring);
+			for_each_engine(engine, dev_priv)
+				i915_dump_lrc_obj(m, ctx, engine);
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -2071,15 +2114,14 @@
 	struct drm_info_node *node = (struct drm_info_node *)m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	u32 status_pointer;
 	u8 read_pointer;
 	u8 write_pointer;
 	u32 status;
 	u32 ctx_id;
 	struct list_head *cursor;
-	int ring_id, i;
-	int ret;
+	int i, ret;
 
 	if (!i915.enable_execlists) {
 		seq_puts(m, "Logical Ring Contexts are disabled\n");
@@ -2092,22 +2134,21 @@
 
 	intel_runtime_pm_get(dev_priv);
 
-	for_each_ring(ring, dev_priv, ring_id) {
+	for_each_engine(engine, dev_priv) {
 		struct drm_i915_gem_request *head_req = NULL;
 		int count = 0;
-		unsigned long flags;
 
-		seq_printf(m, "%s\n", ring->name);
+		seq_printf(m, "%s\n", engine->name);
 
-		status = I915_READ(RING_EXECLIST_STATUS_LO(ring));
-		ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring));
+		status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
+		ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
 		seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
 			   status, ctx_id);
 
-		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
 		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
 
-		read_pointer = ring->next_context_status_buffer;
+		read_pointer = engine->next_context_status_buffer;
 		write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
 		if (read_pointer > write_pointer)
 			write_pointer += GEN8_CSB_ENTRIES;
@@ -2115,24 +2156,25 @@
 			   read_pointer, write_pointer);
 
 		for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
-			status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
-			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
+			status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
+			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
 
 			seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
 				   i, status, ctx_id);
 		}
 
-		spin_lock_irqsave(&ring->execlist_lock, flags);
-		list_for_each(cursor, &ring->execlist_queue)
+		spin_lock_bh(&engine->execlist_lock);
+		list_for_each(cursor, &engine->execlist_queue)
 			count++;
-		head_req = list_first_entry_or_null(&ring->execlist_queue,
-				struct drm_i915_gem_request, execlist_link);
-		spin_unlock_irqrestore(&ring->execlist_lock, flags);
+		head_req = list_first_entry_or_null(&engine->execlist_queue,
+						    struct drm_i915_gem_request,
+						    execlist_link);
+		spin_unlock_bh(&engine->execlist_lock);
 
 		seq_printf(m, "\t%d requests in queue\n", count);
 		if (head_req) {
 			seq_printf(m, "\tHead request id: %u\n",
-				   intel_execlists_ctx_id(head_req->ctx, ring));
+				   intel_execlists_ctx_id(head_req->ctx, engine));
 			seq_printf(m, "\tHead request tail: %u\n",
 				   head_req->tail);
 		}
@@ -2248,19 +2290,19 @@
 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-	int unused, i;
+	int i;
 
 	if (!ppgtt)
 		return;
 
-	for_each_ring(ring, dev_priv, unused) {
-		seq_printf(m, "%s\n", ring->name);
+	for_each_engine(engine, dev_priv) {
+		seq_printf(m, "%s\n", engine->name);
 		for (i = 0; i < 4; i++) {
-			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i));
+			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
 			pdp <<= 32;
-			pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i));
+			pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
 			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
 		}
 	}
@@ -2269,19 +2311,22 @@
 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	int i;
+	struct intel_engine_cs *engine;
 
 	if (INTEL_INFO(dev)->gen == 6)
 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
 
-	for_each_ring(ring, dev_priv, i) {
-		seq_printf(m, "%s\n", ring->name);
+	for_each_engine(engine, dev_priv) {
+		seq_printf(m, "%s\n", engine->name);
 		if (INTEL_INFO(dev)->gen == 7)
-			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
-		seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
-		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
-		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
+			seq_printf(m, "GFX_MODE: 0x%08x\n",
+				   I915_READ(RING_MODE_GEN7(engine)));
+		seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
+			   I915_READ(RING_PP_DIR_BASE(engine)));
+		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
+			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
+		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
+			   I915_READ(RING_PP_DIR_DCLV(engine)));
 	}
 	if (dev_priv->mm.aliasing_ppgtt) {
 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
@@ -2312,6 +2357,7 @@
 	else if (INTEL_INFO(dev)->gen >= 6)
 		gen6_ppgtt_info(m, dev);
 
+	mutex_lock(&dev->filelist_mutex);
 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
 		struct drm_i915_file_private *file_priv = file->driver_priv;
 		struct task_struct *task;
@@ -2326,6 +2372,7 @@
 		idr_for_each(&file_priv->context_idr, per_file_ctx,
 			     (void *)(unsigned long)m);
 	}
+	mutex_unlock(&dev->filelist_mutex);
 
 out_put:
 	intel_runtime_pm_put(dev_priv);
@@ -2336,12 +2383,11 @@
 
 static int count_irq_waiters(struct drm_i915_private *i915)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int count = 0;
-	int i;
 
-	for_each_ring(ring, i915, i)
-		count += ring->irq_refcount;
+	for_each_engine(engine, i915)
+		count += engine->irq_refcount;
 
 	return count;
 }
@@ -2362,6 +2408,8 @@
 		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
 		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
 		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+
+	mutex_lock(&dev->filelist_mutex);
 	spin_lock(&dev_priv->rps.client_lock);
 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
 		struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -2384,6 +2432,7 @@
 		   list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
 	seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
 	spin_unlock(&dev_priv->rps.client_lock);
+	mutex_unlock(&dev->filelist_mutex);
 
 	return 0;
 }
@@ -2393,10 +2442,11 @@
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	const bool edram = INTEL_GEN(dev_priv) > 8;
 
-	/* Size calculation for LLC is a bit of a pain. Ignore for now. */
 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
-	seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
+	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
+		   intel_uncore_edram_size(dev_priv)/1024/1024);
 
 	return 0;
 }
@@ -2408,7 +2458,7 @@
 	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 	u32 tmp, i;
 
-	if (!HAS_GUC_UCODE(dev_priv->dev))
+	if (!HAS_GUC_UCODE(dev_priv))
 		return 0;
 
 	seq_printf(m, "GuC firmware status:\n");
@@ -2449,9 +2499,8 @@
 				 struct drm_i915_private *dev_priv,
 				 struct i915_guc_client *client)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	uint64_t tot = 0;
-	uint32_t i;
 
 	seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
 		client->priority, client->ctx_index, client->proc_desc_offset);
@@ -2464,11 +2513,11 @@
 	seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
 	seq_printf(m, "\tLast submission result: %d\n", client->retcode);
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_engine(engine, dev_priv) {
 		seq_printf(m, "\tSubmissions: %llu %s\n",
-				client->submissions[ring->guc_id],
-				ring->name);
-		tot += client->submissions[ring->guc_id];
+				client->submissions[engine->guc_id],
+				engine->name);
+		tot += client->submissions[engine->guc_id];
 	}
 	seq_printf(m, "\tTotal: %llu\n", tot);
 }
@@ -2480,11 +2529,10 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_guc guc;
 	struct i915_guc_client client = {};
-	struct intel_engine_cs *ring;
-	enum intel_ring_id i;
+	struct intel_engine_cs *engine;
 	u64 total = 0;
 
-	if (!HAS_GUC_SCHED(dev_priv->dev))
+	if (!HAS_GUC_SCHED(dev_priv))
 		return 0;
 
 	if (mutex_lock_interruptible(&dev->struct_mutex))
@@ -2504,11 +2552,11 @@
 	seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
 
 	seq_printf(m, "\nGuC submissions:\n");
-	for_each_ring(ring, dev_priv, i) {
+	for_each_engine(engine, dev_priv) {
 		seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
-			ring->name, guc.submissions[ring->guc_id],
-			guc.last_seqno[ring->guc_id]);
-		total += guc.submissions[ring->guc_id];
+			engine->name, guc.submissions[engine->guc_id],
+			guc.last_seqno[engine->guc_id]);
+		total += guc.submissions[engine->guc_id];
 	}
 	seq_printf(m, "\t%s: %llu\n", "Total", total);
 
@@ -2688,10 +2736,8 @@
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (!HAS_RUNTIME_PM(dev)) {
-		seq_puts(m, "not supported\n");
-		return 0;
-	}
+	if (!HAS_RUNTIME_PM(dev_priv))
+		seq_puts(m, "Runtime power management not supported\n");
 
 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
 	seq_printf(m, "IRQs disabled: %s\n",
@@ -2702,6 +2748,9 @@
 #else
 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
 #endif
+	seq_printf(m, "PCI device power state: %s [%d]\n",
+		   pci_power_name(dev_priv->dev->pdev->current_state),
+		   dev_priv->dev->pdev->current_state);
 
 	return 0;
 }
@@ -3114,9 +3163,10 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
-	int i, j, ret;
+	enum intel_engine_id id;
+	int j, ret;
 
 	if (!i915_semaphore_is_enabled(dev)) {
 		seq_puts(m, "Semaphores are disabled\n");
@@ -3135,14 +3185,14 @@
 		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
 
 		seqno = (uint64_t *)kmap_atomic(page);
-		for_each_ring(ring, dev_priv, i) {
+		for_each_engine_id(engine, dev_priv, id) {
 			uint64_t offset;
 
-			seq_printf(m, "%s\n", ring->name);
+			seq_printf(m, "%s\n", engine->name);
 
 			seq_puts(m, "  Last signal:");
 			for (j = 0; j < num_rings; j++) {
-				offset = i * I915_NUM_RINGS + j;
+				offset = id * I915_NUM_ENGINES + j;
 				seq_printf(m, "0x%08llx (0x%02llx) ",
 					   seqno[offset], offset * 8);
 			}
@@ -3150,7 +3200,7 @@
 
 			seq_puts(m, "  Last wait:  ");
 			for (j = 0; j < num_rings; j++) {
-				offset = i + (j * I915_NUM_RINGS);
+				offset = id + (j * I915_NUM_ENGINES);
 				seq_printf(m, "0x%08llx (0x%02llx) ",
 					   seqno[offset], offset * 8);
 			}
@@ -3160,18 +3210,18 @@
 		kunmap_atomic(seqno);
 	} else {
 		seq_puts(m, "  Last signal:");
-		for_each_ring(ring, dev_priv, i)
+		for_each_engine(engine, dev_priv)
 			for (j = 0; j < num_rings; j++)
 				seq_printf(m, "0x%08x\n",
-					   I915_READ(ring->semaphore.mbox.signal[j]));
+					   I915_READ(engine->semaphore.mbox.signal[j]));
 		seq_putc(m, '\n');
 	}
 
 	seq_puts(m, "\nSync seqno:\n");
-	for_each_ring(ring, dev_priv, i) {
-		for (j = 0; j < num_rings; j++) {
-			seq_printf(m, "  0x%08x ", ring->semaphore.sync_seqno[j]);
-		}
+	for_each_engine(engine, dev_priv) {
+		for (j = 0; j < num_rings; j++)
+			seq_printf(m, "  0x%08x ",
+				   engine->semaphore.sync_seqno[j]);
 		seq_putc(m, '\n');
 	}
 	seq_putc(m, '\n');
@@ -3193,8 +3243,8 @@
 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
-		seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
-			   pll->config.crtc_mask, pll->active, yesno(pll->on));
+		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
+			   pll->config.crtc_mask, pll->active_mask, yesno(pll->on));
 		seq_printf(m, " tracked hardware state:\n");
 		seq_printf(m, " dpll:    0x%08x\n", pll->config.hw_state.dpll);
 		seq_printf(m, " dpll_md: 0x%08x\n",
@@ -3212,11 +3262,12 @@
 {
 	int i;
 	int ret;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_workarounds *workarounds = &dev_priv->workarounds;
+	enum intel_engine_id id;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
@@ -3225,9 +3276,9 @@
 	intel_runtime_pm_get(dev_priv);
 
 	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
-	for_each_ring(ring, dev_priv, i)
+	for_each_engine_id(engine, dev_priv, id)
 		seq_printf(m, "HW whitelist count for %s: %d\n",
-			   ring->name, workarounds->hw_whitelist_count[i]);
+			   engine->name, workarounds->hw_whitelist_count[id]);
 	for (i = 0; i < workarounds->count; ++i) {
 		i915_reg_t addr;
 		u32 mask, value, read;
@@ -3417,7 +3468,8 @@
 		intel_dig_port = enc_to_dig_port(encoder);
 		if (!intel_dig_port->dp.can_mst)
 			continue;
-
+		seq_printf(m, "MST Source Port %c\n",
+			   port_name(intel_dig_port->port));
 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
 	}
 	drm_modeset_unlock_all(dev);
@@ -4693,7 +4745,7 @@
 	struct drm_device *dev = data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	*val = atomic_read(&dev_priv->gpu_error.reset_counter);
+	*val = i915_terminally_wedged(&dev_priv->gpu_error);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 1c6d227..b3198fc 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -50,6 +50,66 @@
 #include <linux/pm_runtime.h>
 #include <linux/oom.h>
 
+static unsigned int i915_load_fail_count;
+
+bool __i915_inject_load_failure(const char *func, int line)
+{
+	if (i915_load_fail_count >= i915.inject_load_failure)
+		return false;
+
+	if (++i915_load_fail_count == i915.inject_load_failure) {
+		DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
+			 i915.inject_load_failure, func, line);
+		return true;
+	}
+
+	return false;
+}
+
+#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
+#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
+		    "providing the dmesg log by booting with drm.debug=0xf"
+
+void
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+	      const char *fmt, ...)
+{
+	static bool shown_bug_once;
+	struct device *dev = dev_priv->dev->dev;
+	bool is_error = level[1] <= KERN_ERR[1];
+	bool is_debug = level[1] == KERN_DEBUG[1];
+	struct va_format vaf;
+	va_list args;
+
+	if (is_debug && !(drm_debug & DRM_UT_DRIVER))
+		return;
+
+	va_start(args, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
+		   __builtin_return_address(0), &vaf);
+
+	if (is_error && !shown_bug_once) {
+		dev_notice(dev, "%s", FDO_BUG_MSG);
+		shown_bug_once = true;
+	}
+
+	va_end(args);
+}
+
+static bool i915_error_injected(struct drm_i915_private *dev_priv)
+{
+	return i915.inject_load_failure &&
+	       i915_load_fail_count == i915.inject_load_failure;
+}
+
+#define i915_load_error(dev_priv, fmt, ...)				     \
+	__i915_printk(dev_priv,						     \
+		      i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
+		      fmt, ##__VA_ARGS__)
 
 static int i915_getparam(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv)
@@ -87,16 +147,16 @@
 		value = 1;
 		break;
 	case I915_PARAM_HAS_BSD:
-		value = intel_ring_initialized(&dev_priv->ring[VCS]);
+		value = intel_engine_initialized(&dev_priv->engine[VCS]);
 		break;
 	case I915_PARAM_HAS_BLT:
-		value = intel_ring_initialized(&dev_priv->ring[BCS]);
+		value = intel_engine_initialized(&dev_priv->engine[BCS]);
 		break;
 	case I915_PARAM_HAS_VEBOX:
-		value = intel_ring_initialized(&dev_priv->ring[VECS]);
+		value = intel_engine_initialized(&dev_priv->engine[VECS]);
 		break;
 	case I915_PARAM_HAS_BSD2:
-		value = intel_ring_initialized(&dev_priv->ring[VCS2]);
+		value = intel_engine_initialized(&dev_priv->engine[VCS2]);
 		break;
 	case I915_PARAM_HAS_RELAXED_FENCING:
 		value = 1;
@@ -197,13 +257,6 @@
 	return 0;
 }
 
-#define MCHBAR_I915 0x44
-#define MCHBAR_I965 0x48
-#define MCHBAR_SIZE (4*4096)
-
-#define DEVEN_REG 0x54
-#define   DEVEN_MCHBAR_EN (1 << 28)
-
 /* Allocate space for the MCH regs if needed, return nonzero on error */
 static int
 intel_alloc_mchbar_resource(struct drm_device *dev)
@@ -265,7 +318,7 @@
 	dev_priv->mchbar_need_disable = false;
 
 	if (IS_I915G(dev) || IS_I915GM(dev)) {
-		pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
 		enabled = !!(temp & DEVEN_MCHBAR_EN);
 	} else {
 		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
@@ -283,7 +336,7 @@
 
 	/* Space is allocated or reserved, so enable it. */
 	if (IS_I915G(dev) || IS_I915GM(dev)) {
-		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
+		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
 				       temp | DEVEN_MCHBAR_EN);
 	} else {
 		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
@@ -296,17 +349,24 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-	u32 temp;
 
 	if (dev_priv->mchbar_need_disable) {
 		if (IS_I915G(dev) || IS_I915GM(dev)) {
-			pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
-			temp &= ~DEVEN_MCHBAR_EN;
-			pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
+			u32 deven_val;
+
+			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
+					      &deven_val);
+			deven_val &= ~DEVEN_MCHBAR_EN;
+			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+					       deven_val);
 		} else {
-			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
-			temp &= ~1;
-			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
+			u32 mchbar_val;
+
+			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
+					      &mchbar_val);
+			mchbar_val &= ~1;
+			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
+					       mchbar_val);
 		}
 	}
 
@@ -370,6 +430,9 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	if (i915_inject_load_failure())
+		return -ENODEV;
+
 	ret = intel_bios_init(dev_priv);
 	if (ret)
 		DRM_INFO("failed to find VBIOS tables\n");
@@ -413,9 +476,6 @@
 
 	intel_modeset_gem_init(dev);
 
-	/* Always safe in the mode setting case. */
-	/* FIXME: do pre/post-mode set stuff in core KMS code */
-	dev->vblank_disable_allowed = true;
 	if (INTEL_INFO(dev)->num_pipes == 0)
 		return 0;
 
@@ -444,7 +504,7 @@
 
 cleanup_gem:
 	mutex_lock(&dev->struct_mutex);
-	i915_gem_cleanup_ringbuffer(dev);
+	i915_gem_cleanup_engines(dev);
 	i915_gem_context_fini(dev);
 	mutex_unlock(&dev->struct_mutex);
 cleanup_irq:
@@ -453,6 +513,7 @@
 	intel_teardown_gmbus(dev);
 cleanup_csr:
 	intel_csr_ucode_fini(dev_priv);
+	intel_power_domains_fini(dev_priv);
 	vga_switcheroo_unregister_client(dev->pdev);
 cleanup_vga_client:
 	vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -465,6 +526,7 @@
 {
 	struct apertures_struct *ap;
 	struct pci_dev *pdev = dev_priv->dev->pdev;
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	bool primary;
 	int ret;
 
@@ -472,8 +534,8 @@
 	if (!ap)
 		return -ENOMEM;
 
-	ap->ranges[0].base = dev_priv->gtt.mappable_base;
-	ap->ranges[0].size = dev_priv->gtt.mappable_end;
+	ap->ranges[0].base = ggtt->mappable_base;
+	ap->ranges[0].size = ggtt->mappable_end;
 
 	primary =
 		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
@@ -853,6 +915,10 @@
 	else if (INTEL_INFO(dev)->gen >= 9)
 		gen9_sseu_info_init(dev);
 
+	/* Snooping is broken on BXT A stepping. */
+	info->has_snoop = !info->has_llc;
+	info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1);
+
 	DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
 	DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
 	DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
@@ -929,6 +995,84 @@
 	destroy_workqueue(dev_priv->wq);
 }
 
+/**
+ * i915_driver_init_early - setup state not requiring device access
+ * @dev_priv: device private
+ *
+ * Initialize everything that is a "SW-only" state, that is state not
+ * requiring accessing the device or exposing the driver via kernel internal
+ * or userspace interfaces. Example steps belonging here: lock initialization,
+ * system memory allocation, setting up device specific attributes and
+ * function hooks not requiring accessing the device.
+ */
+static int i915_driver_init_early(struct drm_i915_private *dev_priv,
+				  struct drm_device *dev,
+				  struct intel_device_info *info)
+{
+	struct intel_device_info *device_info;
+	int ret = 0;
+
+	if (i915_inject_load_failure())
+		return -ENODEV;
+
+	/* Setup the write-once "constant" device info */
+	device_info = (struct intel_device_info *)&dev_priv->info;
+	memcpy(device_info, info, sizeof(dev_priv->info));
+	device_info->device_id = dev->pdev->device;
+
+	spin_lock_init(&dev_priv->irq_lock);
+	spin_lock_init(&dev_priv->gpu_error.lock);
+	mutex_init(&dev_priv->backlight_lock);
+	spin_lock_init(&dev_priv->uncore.lock);
+	spin_lock_init(&dev_priv->mm.object_stat_lock);
+	spin_lock_init(&dev_priv->mmio_flip_lock);
+	mutex_init(&dev_priv->sb_lock);
+	mutex_init(&dev_priv->modeset_restore_lock);
+	mutex_init(&dev_priv->av_mutex);
+	mutex_init(&dev_priv->wm.wm_mutex);
+	mutex_init(&dev_priv->pps_mutex);
+
+	ret = i915_workqueues_init(dev_priv);
+	if (ret < 0)
+		return ret;
+
+	/* This must be called before any calls to HAS_PCH_* */
+	intel_detect_pch(dev);
+
+	intel_pm_setup(dev);
+	intel_init_dpio(dev_priv);
+	intel_power_domains_init(dev_priv);
+	intel_irq_init(dev_priv);
+	intel_init_display_hooks(dev_priv);
+	intel_init_clock_gating_hooks(dev_priv);
+	intel_init_audio_hooks(dev_priv);
+	i915_gem_load_init(dev);
+
+	intel_display_crc_init(dev);
+
+	i915_dump_device_info(dev_priv);
+
+	/* Not all pre-production machines fall into this category, only the
+	 * very first ones. Almost everything should work, except for maybe
+	 * suspend/resume. And we don't implement workarounds that affect only
+	 * pre-production machines. */
+	if (IS_HSW_EARLY_SDV(dev))
+		DRM_INFO("This is an early pre-production Haswell machine. "
+			 "It may not be fully functional.\n");
+
+	return 0;
+}
+
+/**
+ * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
+{
+	i915_gem_load_cleanup(dev_priv->dev);
+	i915_workqueues_cleanup(dev_priv);
+}
+
 static int i915_mmio_setup(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -970,97 +1114,93 @@
 }
 
 /**
- * i915_driver_load - setup chip and create an initial config
- * @dev: DRM device
- * @flags: startup flags
+ * i915_driver_init_mmio - setup device MMIO
+ * @dev_priv: device private
  *
- * The driver load routine has to do several things:
- *   - drive output discovery via intel_modeset_init()
- *   - initialize the memory manager
- *   - allocate initial config memory
- *   - setup the DRM framebuffer with the allocated memory
+ * Setup minimal device state necessary for MMIO accesses later in the
+ * initialization sequence. The setup here should avoid any other device-wide
+ * side effects or exposing the driver via kernel internal or user space
+ * interfaces.
  */
-int i915_driver_load(struct drm_device *dev, unsigned long flags)
+static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv;
-	struct intel_device_info *info, *device_info;
-	int ret = 0;
-	uint32_t aperture_size;
+	struct drm_device *dev = dev_priv->dev;
+	int ret;
 
-	info = (struct intel_device_info *) flags;
+	if (i915_inject_load_failure())
+		return -ENODEV;
 
-	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
-	if (dev_priv == NULL)
-		return -ENOMEM;
-
-	dev->dev_private = dev_priv;
-	dev_priv->dev = dev;
-
-	/* Setup the write-once "constant" device info */
-	device_info = (struct intel_device_info *)&dev_priv->info;
-	memcpy(device_info, info, sizeof(dev_priv->info));
-	device_info->device_id = dev->pdev->device;
-
-	spin_lock_init(&dev_priv->irq_lock);
-	spin_lock_init(&dev_priv->gpu_error.lock);
-	mutex_init(&dev_priv->backlight_lock);
-	spin_lock_init(&dev_priv->uncore.lock);
-	spin_lock_init(&dev_priv->mm.object_stat_lock);
-	spin_lock_init(&dev_priv->mmio_flip_lock);
-	mutex_init(&dev_priv->sb_lock);
-	mutex_init(&dev_priv->modeset_restore_lock);
-	mutex_init(&dev_priv->av_mutex);
-
-	ret = i915_workqueues_init(dev_priv);
-	if (ret < 0)
-		goto out_free_priv;
-
-	intel_pm_setup(dev);
-
-	intel_runtime_pm_get(dev_priv);
-
-	intel_display_crc_init(dev);
-
-	i915_dump_device_info(dev_priv);
-
-	/* Not all pre-production machines fall into this category, only the
-	 * very first ones. Almost everything should work, except for maybe
-	 * suspend/resume. And we don't implement workarounds that affect only
-	 * pre-production machines. */
-	if (IS_HSW_EARLY_SDV(dev))
-		DRM_INFO("This is an early pre-production Haswell machine. "
-			 "It may not be fully functional.\n");
-
-	if (i915_get_bridge_dev(dev)) {
-		ret = -EIO;
-		goto out_runtime_pm_put;
-	}
+	if (i915_get_bridge_dev(dev))
+		return -EIO;
 
 	ret = i915_mmio_setup(dev);
 	if (ret < 0)
 		goto put_bridge;
 
-	/* This must be called before any calls to HAS_PCH_* */
-	intel_detect_pch(dev);
-
 	intel_uncore_init(dev);
 
-	ret = i915_gem_gtt_init(dev);
+	return 0;
+
+put_bridge:
+	pci_dev_put(dev_priv->bridge_dev);
+
+	return ret;
+}
+
+/**
+ * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	intel_uncore_fini(dev);
+	i915_mmio_cleanup(dev);
+	pci_dev_put(dev_priv->bridge_dev);
+}
+
+/**
+ * i915_driver_init_hw - setup state requiring device access
+ * @dev_priv: device private
+ *
+ * Setup state that requires accessing the device, but doesn't require
+ * exposing the driver via kernel internal or userspace interfaces.
+ */
+static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	uint32_t aperture_size;
+	int ret;
+
+	if (i915_inject_load_failure())
+		return -ENODEV;
+
+	intel_device_info_runtime_init(dev);
+
+	ret = i915_ggtt_init_hw(dev);
 	if (ret)
-		goto out_uncore_fini;
+		return ret;
+
+	ret = i915_ggtt_enable_hw(dev);
+	if (ret) {
+		DRM_ERROR("failed to enable GGTT\n");
+		goto out_ggtt;
+	}
 
 	/* WARNING: Apparently we must kick fbdev drivers before vgacon,
 	 * otherwise the vga fbdev driver falls over. */
 	ret = i915_kick_out_firmware_fb(dev_priv);
 	if (ret) {
 		DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
-		goto out_gtt;
+		goto out_ggtt;
 	}
 
 	ret = i915_kick_out_vgacon(dev_priv);
 	if (ret) {
 		DRM_ERROR("failed to remove conflicting VGA console\n");
-		goto out_gtt;
+		goto out_ggtt;
 	}
 
 	pci_set_master(dev->pdev);
@@ -1080,26 +1220,27 @@
 	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
 		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
 
-	aperture_size = dev_priv->gtt.mappable_end;
+	aperture_size = ggtt->mappable_end;
 
-	dev_priv->gtt.mappable =
-		io_mapping_create_wc(dev_priv->gtt.mappable_base,
+	ggtt->mappable =
+		io_mapping_create_wc(ggtt->mappable_base,
 				     aperture_size);
-	if (dev_priv->gtt.mappable == NULL) {
+	if (!ggtt->mappable) {
 		ret = -EIO;
-		goto out_gtt;
+		goto out_ggtt;
 	}
 
-	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
+	ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
 					      aperture_size);
 
-	intel_irq_init(dev_priv);
+	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
+			   PM_QOS_DEFAULT_VALUE);
+
 	intel_uncore_sanitize(dev);
 
 	intel_opregion_setup(dev);
 
-	i915_gem_load_init(dev);
-	i915_gem_shrinker_init(dev_priv);
+	i915_gem_load_init_fences(dev_priv);
 
 	/* On the 945G/GM, the chipset reports the MSI capability on the
 	 * integrated graphics even though the support isn't actually there
@@ -1117,24 +1258,44 @@
 			DRM_DEBUG_DRIVER("can't enable MSI");
 	}
 
-	intel_device_info_runtime_init(dev);
+	return 0;
 
-	intel_init_dpio(dev_priv);
+out_ggtt:
+	i915_ggtt_cleanup_hw(dev);
 
-	if (INTEL_INFO(dev)->num_pipes) {
-		ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
-		if (ret)
-			goto out_gem_unload;
-	}
+	return ret;
+}
 
-	intel_power_domains_init(dev_priv);
+/**
+ * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
-	ret = i915_load_modeset_init(dev);
-	if (ret < 0) {
-		DRM_ERROR("failed to init modeset\n");
-		goto out_power_well;
-	}
+	if (dev->pdev->msi_enabled)
+		pci_disable_msi(dev->pdev);
 
+	pm_qos_remove_request(&dev_priv->pm_qos);
+	arch_phys_wc_del(ggtt->mtrr);
+	io_mapping_free(ggtt->mappable);
+	i915_ggtt_cleanup_hw(dev);
+}
+
+/**
+ * i915_driver_register - register the driver with the rest of the system
+ * @dev_priv: device private
+ *
+ * Perform any steps necessary to make the driver available via kernel
+ * internal or userspace interfaces.
+ */
+static void i915_driver_register(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	i915_gem_shrinker_init(dev_priv);
 	/*
 	 * Notify a valid surface after modesetting,
 	 * when running inside a VM.
@@ -1144,48 +1305,107 @@
 
 	i915_setup_sysfs(dev);
 
-	if (INTEL_INFO(dev)->num_pipes) {
+	if (INTEL_INFO(dev_priv)->num_pipes) {
 		/* Must be done after probing outputs */
 		intel_opregion_init(dev);
 		acpi_video_register();
 	}
 
-	if (IS_GEN5(dev))
+	if (IS_GEN5(dev_priv))
 		intel_gpu_ips_init(dev_priv);
 
-	intel_runtime_pm_enable(dev_priv);
-
 	i915_audio_component_init(dev_priv);
+}
+
+/**
+ * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
+ * @dev_priv: device private
+ */
+static void i915_driver_unregister(struct drm_i915_private *dev_priv)
+{
+	i915_audio_component_cleanup(dev_priv);
+	intel_gpu_ips_teardown();
+	acpi_video_unregister();
+	intel_opregion_fini(dev_priv->dev);
+	i915_teardown_sysfs(dev_priv->dev);
+	i915_gem_shrinker_cleanup(dev_priv);
+}
+
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ *   - drive output discovery via intel_modeset_init()
+ *   - initialize the memory manager
+ *   - allocate initial config memory
+ *   - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	struct drm_i915_private *dev_priv;
+	int ret = 0;
+
+	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	dev->dev_private = dev_priv;
+	/* Must be set before calling __i915_printk */
+	dev_priv->dev = dev;
+
+	ret = i915_driver_init_early(dev_priv, dev,
+				     (struct intel_device_info *)flags);
+
+	if (ret < 0)
+		goto out_free_priv;
+
+	intel_runtime_pm_get(dev_priv);
+
+	ret = i915_driver_init_mmio(dev_priv);
+	if (ret < 0)
+		goto out_runtime_pm_put;
+
+	ret = i915_driver_init_hw(dev_priv);
+	if (ret < 0)
+		goto out_cleanup_mmio;
+
+	/*
+	 * TODO: move the vblank init and parts of modeset init steps into one
+	 * of the i915_driver_init_/i915_driver_register functions according
+	 * to the role/effect of the given init step.
+	 */
+	if (INTEL_INFO(dev)->num_pipes) {
+		ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
+		if (ret)
+			goto out_cleanup_hw;
+	}
+
+	ret = i915_load_modeset_init(dev);
+	if (ret < 0)
+		goto out_cleanup_vblank;
+
+	i915_driver_register(dev_priv);
+
+	intel_runtime_pm_enable(dev_priv);
 
 	intel_runtime_pm_put(dev_priv);
 
 	return 0;
 
-out_power_well:
-	intel_power_domains_fini(dev_priv);
+out_cleanup_vblank:
 	drm_vblank_cleanup(dev);
-out_gem_unload:
-	i915_gem_shrinker_cleanup(dev_priv);
-
-	if (dev->pdev->msi_enabled)
-		pci_disable_msi(dev->pdev);
-
-	intel_teardown_mchbar(dev);
-	pm_qos_remove_request(&dev_priv->pm_qos);
-	arch_phys_wc_del(dev_priv->gtt.mtrr);
-	io_mapping_free(dev_priv->gtt.mappable);
-out_gtt:
-	i915_global_gtt_cleanup(dev);
-out_uncore_fini:
-	intel_uncore_fini(dev);
-	i915_mmio_cleanup(dev);
-put_bridge:
-	pci_dev_put(dev_priv->bridge_dev);
-	i915_gem_load_cleanup(dev);
+out_cleanup_hw:
+	i915_driver_cleanup_hw(dev_priv);
+out_cleanup_mmio:
+	i915_driver_cleanup_mmio(dev_priv);
 out_runtime_pm_put:
 	intel_runtime_pm_put(dev_priv);
-	i915_workqueues_cleanup(dev_priv);
+	i915_driver_cleanup_early(dev_priv);
 out_free_priv:
+	i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
+
 	kfree(dev_priv);
 
 	return ret;
@@ -1198,26 +1418,15 @@
 
 	intel_fbdev_fini(dev);
 
-	i915_audio_component_cleanup(dev_priv);
-
 	ret = i915_gem_suspend(dev);
 	if (ret) {
 		DRM_ERROR("failed to idle hardware: %d\n", ret);
 		return ret;
 	}
 
-	intel_power_domains_fini(dev_priv);
+	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 
-	intel_gpu_ips_teardown();
-
-	i915_teardown_sysfs(dev);
-
-	i915_gem_shrinker_cleanup(dev_priv);
-
-	io_mapping_free(dev_priv->gtt.mappable);
-	arch_phys_wc_del(dev_priv->gtt.mtrr);
-
-	acpi_video_unregister();
+	i915_driver_unregister(dev_priv);
 
 	drm_vblank_cleanup(dev);
 
@@ -1246,31 +1455,24 @@
 	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
 	i915_destroy_error_state(dev);
 
-	if (dev->pdev->msi_enabled)
-		pci_disable_msi(dev->pdev);
-
-	intel_opregion_fini(dev);
-
 	/* Flush any outstanding unpin_work. */
 	flush_workqueue(dev_priv->wq);
 
 	intel_guc_ucode_fini(dev);
 	mutex_lock(&dev->struct_mutex);
-	i915_gem_cleanup_ringbuffer(dev);
+	i915_gem_cleanup_engines(dev);
 	i915_gem_context_fini(dev);
 	mutex_unlock(&dev->struct_mutex);
 	intel_fbc_cleanup_cfb(dev_priv);
 
-	pm_qos_remove_request(&dev_priv->pm_qos);
+	intel_power_domains_fini(dev_priv);
 
-	i915_global_gtt_cleanup(dev);
+	i915_driver_cleanup_hw(dev_priv);
+	i915_driver_cleanup_mmio(dev_priv);
 
-	intel_uncore_fini(dev);
-	i915_mmio_cleanup(dev);
+	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
 
-	i915_gem_load_cleanup(dev);
-	pci_dev_put(dev_priv->bridge_dev);
-	i915_workqueues_cleanup(dev_priv);
+	i915_driver_cleanup_early(dev_priv);
 	kfree(dev_priv);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6d2fb3f..f313b4d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -66,6 +66,11 @@
 #define IVB_CURSOR_OFFSETS \
 	.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
 
+#define BDW_COLORS \
+	.color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
+#define CHV_COLORS \
+	.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+
 static const struct intel_device_info intel_i830_info = {
 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.has_overlay = 1, .overlay_needs_physical = 1,
@@ -288,24 +293,28 @@
 	.is_mobile = 1,
 };
 
+#define BDW_FEATURES \
+	HSW_FEATURES, \
+	BDW_COLORS
+
 static const struct intel_device_info intel_broadwell_d_info = {
-	HSW_FEATURES,
+	BDW_FEATURES,
 	.gen = 8,
 };
 
 static const struct intel_device_info intel_broadwell_m_info = {
-	HSW_FEATURES,
+	BDW_FEATURES,
 	.gen = 8, .is_mobile = 1,
 };
 
 static const struct intel_device_info intel_broadwell_gt3d_info = {
-	HSW_FEATURES,
+	BDW_FEATURES,
 	.gen = 8,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
 
 static const struct intel_device_info intel_broadwell_gt3m_info = {
-	HSW_FEATURES,
+	BDW_FEATURES,
 	.gen = 8, .is_mobile = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
@@ -318,16 +327,17 @@
 	.display_mmio_offset = VLV_DISPLAY_BASE,
 	GEN_CHV_PIPEOFFSETS,
 	CURSOR_OFFSETS,
+	CHV_COLORS,
 };
 
 static const struct intel_device_info intel_skylake_info = {
-	HSW_FEATURES,
+	BDW_FEATURES,
 	.is_skylake = 1,
 	.gen = 9,
 };
 
 static const struct intel_device_info intel_skylake_gt3_info = {
-	HSW_FEATURES,
+	BDW_FEATURES,
 	.is_skylake = 1,
 	.gen = 9,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
@@ -345,18 +355,17 @@
 	.has_fbc = 1,
 	GEN_DEFAULT_PIPEOFFSETS,
 	IVB_CURSOR_OFFSETS,
+	BDW_COLORS,
 };
 
 static const struct intel_device_info intel_kabylake_info = {
-	HSW_FEATURES,
-	.is_preliminary = 1,
+	BDW_FEATURES,
 	.is_kabylake = 1,
 	.gen = 9,
 };
 
 static const struct intel_device_info intel_kabylake_gt3_info = {
-	HSW_FEATURES,
-	.is_preliminary = 1,
+	BDW_FEATURES,
 	.is_kabylake = 1,
 	.gen = 9,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
@@ -504,6 +513,7 @@
 				WARN_ON(!IS_SKYLAKE(dev) &&
 					!IS_KABYLAKE(dev));
 			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
+				   (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
 				   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
 				    pch->subsystem_vendor == 0x1af4 &&
 				    pch->subsystem_device == 0x1100)) {
@@ -557,10 +567,9 @@
 	drm_modeset_unlock_all(dev);
 }
 
-static int intel_suspend_complete(struct drm_i915_private *dev_priv);
 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
 			      bool rpm_resume);
-static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
+static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
 
 static bool suspend_to_idle(struct drm_i915_private *dev_priv)
 {
@@ -630,8 +639,7 @@
 
 	intel_display_set_init_power(dev_priv, false);
 
-	if (HAS_CSR(dev_priv))
-		flush_work(&dev_priv->csr.work);
+	intel_csr_ucode_suspend(dev_priv);
 
 out:
 	enable_rpm_wakeref_asserts(dev_priv);
@@ -647,7 +655,8 @@
 
 	disable_rpm_wakeref_asserts(dev_priv);
 
-	fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
+	fw_csr = !IS_BROXTON(dev_priv) &&
+		suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
 	/*
 	 * In case of firmware assisted context save/restore don't manually
 	 * deinit the power domains. This also means the CSR/DMC firmware will
@@ -658,7 +667,13 @@
 	if (!fw_csr)
 		intel_power_domains_suspend(dev_priv);
 
-	ret = intel_suspend_complete(dev_priv);
+	ret = 0;
+	if (IS_BROXTON(dev_priv))
+		bxt_enable_dc9(dev_priv);
+	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+		hsw_enable_pc8(dev_priv);
+	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		ret = vlv_suspend_complete(dev_priv);
 
 	if (ret) {
 		DRM_ERROR("Suspend complete failed: %d\n", ret);
@@ -719,9 +734,16 @@
 static int i915_drm_resume(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
 
 	disable_rpm_wakeref_asserts(dev_priv);
 
+	ret = i915_ggtt_enable_hw(dev);
+	if (ret)
+		DRM_ERROR("failed to re-enable GGTT\n");
+
+	intel_csr_ucode_resume(dev_priv);
+
 	mutex_lock(&dev->struct_mutex);
 	i915_gem_restore_gtt_mappings(dev);
 	mutex_unlock(&dev->struct_mutex);
@@ -850,21 +872,25 @@
 
 	intel_uncore_early_sanitize(dev, true);
 
-	if (IS_BROXTON(dev))
-		ret = bxt_resume_prepare(dev_priv);
-	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	if (IS_BROXTON(dev)) {
+		if (!dev_priv->suspended_to_idle)
+			gen9_sanitize_dc_state(dev_priv);
+		bxt_disable_dc9(dev_priv);
+	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		hsw_disable_pc8(dev_priv);
+	}
 
 	intel_uncore_sanitize(dev);
 
-	if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
+	if (IS_BROXTON(dev_priv) ||
+	    !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
 		intel_power_domains_init_hw(dev_priv, true);
 
+	enable_rpm_wakeref_asserts(dev_priv);
+
 out:
 	dev_priv->suspended_to_idle = false;
 
-	enable_rpm_wakeref_asserts(dev_priv);
-
 	return ret;
 }
 
@@ -900,23 +926,32 @@
 int i915_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	bool simulated;
+	struct i915_gpu_error *error = &dev_priv->gpu_error;
+	unsigned reset_counter;
 	int ret;
 
 	intel_reset_gt_powersave(dev);
 
 	mutex_lock(&dev->struct_mutex);
 
+	/* Clear any previous failed attempts at recovery. Time to try again. */
+	atomic_andnot(I915_WEDGED, &error->reset_counter);
+
+	/* Clear the reset-in-progress flag and increment the reset epoch. */
+	reset_counter = atomic_inc_return(&error->reset_counter);
+	if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
+		ret = -EIO;
+		goto error;
+	}
+
 	i915_gem_reset(dev);
 
-	simulated = dev_priv->gpu_error.stop_rings != 0;
-
-	ret = intel_gpu_reset(dev);
+	ret = intel_gpu_reset(dev, ALL_ENGINES);
 
 	/* Also reset the gpu hangman. */
-	if (simulated) {
+	if (error->stop_rings != 0) {
 		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
-		dev_priv->gpu_error.stop_rings = 0;
+		error->stop_rings = 0;
 		if (ret == -ENODEV) {
 			DRM_INFO("Reset not implemented, but ignoring "
 				 "error for simulated gpu hangs\n");
@@ -928,9 +963,11 @@
 		pr_notice("drm/i915: Resetting chip after gpu hang\n");
 
 	if (ret) {
-		DRM_ERROR("Failed to reset chip: %i\n", ret);
-		mutex_unlock(&dev->struct_mutex);
-		return ret;
+		if (ret != -ENODEV)
+			DRM_ERROR("Failed to reset chip: %i\n", ret);
+		else
+			DRM_DEBUG_DRIVER("GPU reset disabled\n");
+		goto error;
 	}
 
 	intel_overlay_reset(dev_priv);
@@ -949,20 +986,14 @@
 	 * was running at the time of the reset (i.e. we weren't VT
 	 * switched away).
 	 */
-
-	/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
-	dev_priv->gpu_error.reload_in_reset = true;
-
 	ret = i915_gem_init_hw(dev);
-
-	dev_priv->gpu_error.reload_in_reset = false;
-
-	mutex_unlock(&dev->struct_mutex);
 	if (ret) {
 		DRM_ERROR("Failed hw init on reset %d\n", ret);
-		return ret;
+		goto error;
 	}
 
+	mutex_unlock(&dev->struct_mutex);
+
 	/*
 	 * rps/rc6 re-init is necessary to restore state lost after the
 	 * reset and the re-install of gt irqs. Skip for ironlake per
@@ -973,6 +1004,11 @@
 		intel_enable_gt_powersave(dev);
 
 	return 0;
+
+error:
+	atomic_or(I915_WEDGED, &error->reset_counter);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
 }
 
 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1079,44 +1115,6 @@
 	return i915_drm_resume(drm_dev);
 }
 
-static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
-{
-	hsw_enable_pc8(dev_priv);
-
-	return 0;
-}
-
-static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
-{
-	struct drm_device *dev = dev_priv->dev;
-
-	/* TODO: when DC5 support is added disable DC5 here. */
-
-	broxton_ddi_phy_uninit(dev);
-	broxton_uninit_cdclk(dev);
-	bxt_enable_dc9(dev_priv);
-
-	return 0;
-}
-
-static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
-{
-	struct drm_device *dev = dev_priv->dev;
-
-	/* TODO: when CSR FW support is added make sure the FW is loaded */
-
-	bxt_disable_dc9(dev_priv);
-
-	/*
-	 * TODO: when DC5 support is added enable DC5 here if the CSR FW
-	 * is available.
-	 */
-	broxton_init_cdclk(dev);
-	broxton_ddi_phy_init(dev);
-
-	return 0;
-}
-
 /*
  * Save all Gunit registers that may be lost after a D3 and a subsequent
  * S0i[R123] transition. The list of registers needing a save/restore is
@@ -1420,7 +1418,7 @@
 	if (err)
 		goto err2;
 
-	if (!IS_CHERRYVIEW(dev_priv->dev))
+	if (!IS_CHERRYVIEW(dev_priv))
 		vlv_save_gunit_s0ix_state(dev_priv);
 
 	err = vlv_force_gfx_clock(dev_priv, false);
@@ -1452,7 +1450,7 @@
 	 */
 	ret = vlv_force_gfx_clock(dev_priv, true);
 
-	if (!IS_CHERRYVIEW(dev_priv->dev))
+	if (!IS_CHERRYVIEW(dev_priv))
 		vlv_restore_gunit_s0ix_state(dev_priv);
 
 	err = vlv_allow_gt_wake(dev_priv, true);
@@ -1522,7 +1520,16 @@
 	intel_suspend_gt_powersave(dev);
 	intel_runtime_pm_disable_interrupts(dev_priv);
 
-	ret = intel_suspend_complete(dev_priv);
+	ret = 0;
+	if (IS_BROXTON(dev_priv)) {
+		bxt_display_core_uninit(dev_priv);
+		bxt_enable_dc9(dev_priv);
+	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+		hsw_enable_pc8(dev_priv);
+	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+		ret = vlv_suspend_complete(dev_priv);
+	}
+
 	if (ret) {
 		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
 		intel_runtime_pm_enable_interrupts(dev_priv);
@@ -1596,12 +1603,17 @@
 	if (IS_GEN6(dev_priv))
 		intel_init_pch_refclk(dev);
 
-	if (IS_BROXTON(dev))
-		ret = bxt_resume_prepare(dev_priv);
-	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+	if (IS_BROXTON(dev)) {
+		bxt_disable_dc9(dev_priv);
+		bxt_display_core_init(dev_priv, true);
+		if (dev_priv->csr.dmc_payload &&
+		    (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+			gen9_enable_dc5(dev_priv);
+	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		hsw_disable_pc8(dev_priv);
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		ret = vlv_resume_prepare(dev_priv, true);
+	}
 
 	/*
 	 * No point of rolling back things in case of an error, as the best
@@ -1632,26 +1644,6 @@
 	return ret;
 }
 
-/*
- * This function implements common functionality of runtime and system
- * suspend sequence.
- */
-static int intel_suspend_complete(struct drm_i915_private *dev_priv)
-{
-	int ret;
-
-	if (IS_BROXTON(dev_priv))
-		ret = bxt_suspend_complete(dev_priv);
-	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		ret = hsw_suspend_complete(dev_priv);
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		ret = vlv_suspend_complete(dev_priv);
-	else
-		ret = 0;
-
-	return ret;
-}
-
 static const struct dev_pm_ops i915_pm_ops = {
 	/*
 	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
@@ -1772,10 +1764,8 @@
 	if (i915.modeset == 0)
 		driver.driver_features &= ~DRIVER_MODESET;
 
-#ifdef CONFIG_VGA_CONSOLE
 	if (vgacon_text_force() && i915.modeset == -1)
 		driver.driver_features &= ~DRIVER_MODESET;
-#endif
 
 	if (!(driver.driver_features & DRIVER_MODESET)) {
 		/* Silently fail loading to not upset userspace. */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5d7a7c4..5faacc6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -33,33 +33,40 @@
 #include <uapi/drm/i915_drm.h>
 #include <uapi/drm/drm_fourcc.h>
 
-#include <drm/drmP.h>
-#include "i915_params.h"
-#include "i915_reg.h"
-#include "intel_bios.h"
-#include "intel_ringbuffer.h"
-#include "intel_lrc.h"
-#include "i915_gem_gtt.h"
-#include "i915_gem_render_state.h"
 #include <linux/io-mapping.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
-#include <drm/intel-gtt.h>
-#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
-#include <drm/drm_gem.h>
 #include <linux/backlight.h>
 #include <linux/hashtable.h>
 #include <linux/intel-iommu.h>
 #include <linux/kref.h>
 #include <linux/pm_qos.h>
+#include <linux/shmem_fs.h>
+
+#include <drm/drmP.h>
+#include <drm/intel-gtt.h>
+#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
+#include <drm/drm_gem.h>
+
+#include "i915_params.h"
+#include "i915_reg.h"
+
+#include "intel_bios.h"
+#include "intel_dpll_mgr.h"
 #include "intel_guc.h"
+#include "intel_lrc.h"
+#include "intel_ringbuffer.h"
+
+#include "i915_gem.h"
+#include "i915_gem_gtt.h"
+#include "i915_gem_render_state.h"
 
 /* General customization:
  */
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20160229"
+#define DRIVER_DATE		"20160425"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -97,6 +104,10 @@
 #define I915_STATE_WARN_ON(x)						\
 	I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
 
+bool __i915_inject_load_failure(const char *func, int line);
+#define i915_inject_load_failure() \
+	__i915_inject_load_failure(__func__, __LINE__)
+
 static inline const char *yesno(bool v)
 {
 	return v ? "yes" : "no";
@@ -122,9 +133,35 @@
 	TRANSCODER_B,
 	TRANSCODER_C,
 	TRANSCODER_EDP,
+	TRANSCODER_DSI_A,
+	TRANSCODER_DSI_C,
 	I915_MAX_TRANSCODERS
 };
-#define transcoder_name(t) ((t) + 'A')
+
+static inline const char *transcoder_name(enum transcoder transcoder)
+{
+	switch (transcoder) {
+	case TRANSCODER_A:
+		return "A";
+	case TRANSCODER_B:
+		return "B";
+	case TRANSCODER_C:
+		return "C";
+	case TRANSCODER_EDP:
+		return "EDP";
+	case TRANSCODER_DSI_A:
+		return "DSI A";
+	case TRANSCODER_DSI_C:
+		return "DSI C";
+	default:
+		return "<invalid>";
+	}
+}
+
+static inline bool transcoder_is_dsi(enum transcoder transcoder)
+{
+	return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
+}
 
 /*
  * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
@@ -176,6 +213,8 @@
 	POWER_DOMAIN_TRANSCODER_B,
 	POWER_DOMAIN_TRANSCODER_C,
 	POWER_DOMAIN_TRANSCODER_EDP,
+	POWER_DOMAIN_TRANSCODER_DSI_A,
+	POWER_DOMAIN_TRANSCODER_DSI_C,
 	POWER_DOMAIN_PORT_DDI_A_LANES,
 	POWER_DOMAIN_PORT_DDI_B_LANES,
 	POWER_DOMAIN_PORT_DDI_C_LANES,
@@ -273,6 +312,10 @@
 	     (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)];	\
 	     (__s)++)
 
+#define for_each_port_masked(__port, __ports_mask) \
+	for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)	\
+		for_each_if ((__ports_mask) & (1 << (__port)))
+
 #define for_each_crtc(dev, crtc) \
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
 
@@ -340,81 +383,6 @@
 	unsigned int bsd_ring;
 };
 
-enum intel_dpll_id {
-	DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
-	/* real shared dpll ids must be >= 0 */
-	DPLL_ID_PCH_PLL_A = 0,
-	DPLL_ID_PCH_PLL_B = 1,
-	/* hsw/bdw */
-	DPLL_ID_WRPLL1 = 0,
-	DPLL_ID_WRPLL2 = 1,
-	DPLL_ID_SPLL = 2,
-
-	/* skl */
-	DPLL_ID_SKL_DPLL1 = 0,
-	DPLL_ID_SKL_DPLL2 = 1,
-	DPLL_ID_SKL_DPLL3 = 2,
-};
-#define I915_NUM_PLLS 3
-
-struct intel_dpll_hw_state {
-	/* i9xx, pch plls */
-	uint32_t dpll;
-	uint32_t dpll_md;
-	uint32_t fp0;
-	uint32_t fp1;
-
-	/* hsw, bdw */
-	uint32_t wrpll;
-	uint32_t spll;
-
-	/* skl */
-	/*
-	 * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
-	 * lower part of ctrl1 and they get shifted into position when writing
-	 * the register.  This allows us to easily compare the state to share
-	 * the DPLL.
-	 */
-	uint32_t ctrl1;
-	/* HDMI only, 0 when used for DP */
-	uint32_t cfgcr1, cfgcr2;
-
-	/* bxt */
-	uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
-		 pcsdw12;
-};
-
-struct intel_shared_dpll_config {
-	unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
-	struct intel_dpll_hw_state hw_state;
-};
-
-struct intel_shared_dpll {
-	struct intel_shared_dpll_config config;
-
-	int active; /* count of number of active CRTCs (i.e. DPMS on) */
-	bool on; /* is the PLL actually active? Disabled during modeset */
-	const char *name;
-	/* should match the index in the dev_priv->shared_dplls array */
-	enum intel_dpll_id id;
-	/* The mode_set hook is optional and should be used together with the
-	 * intel_prepare_shared_dpll function. */
-	void (*mode_set)(struct drm_i915_private *dev_priv,
-			 struct intel_shared_dpll *pll);
-	void (*enable)(struct drm_i915_private *dev_priv,
-		       struct intel_shared_dpll *pll);
-	void (*disable)(struct drm_i915_private *dev_priv,
-			struct intel_shared_dpll *pll);
-	bool (*get_hw_state)(struct drm_i915_private *dev_priv,
-			     struct intel_shared_dpll *pll,
-			     struct intel_dpll_hw_state *hw_state);
-};
-
-#define SKL_DPLL0 0
-#define SKL_DPLL1 1
-#define SKL_DPLL2 2
-#define SKL_DPLL3 3
-
 /* Used by dp and fdi links */
 struct intel_link_m_n {
 	uint32_t	tu;
@@ -533,7 +501,8 @@
 		u32 cpu_ring_head;
 		u32 cpu_ring_tail;
 
-		u32 semaphore_seqno[I915_NUM_RINGS - 1];
+		u32 last_seqno;
+		u32 semaphore_seqno[I915_NUM_ENGINES - 1];
 
 		/* Register state */
 		u32 start;
@@ -553,7 +522,7 @@
 		u32 fault_reg;
 		u64 faddr;
 		u32 rc_psmi; /* sleep state */
-		u32 semaphore_mboxes[I915_NUM_RINGS - 1];
+		u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
 
 		struct drm_i915_error_object {
 			int page_count;
@@ -561,6 +530,8 @@
 			u32 *pages[0];
 		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
 
+		struct drm_i915_error_object *wa_ctx;
+
 		struct drm_i915_error_request {
 			long jiffies;
 			u32 seqno;
@@ -577,12 +548,12 @@
 
 		pid_t pid;
 		char comm[TASK_COMM_LEN];
-	} ring[I915_NUM_RINGS];
+	} ring[I915_NUM_ENGINES];
 
 	struct drm_i915_error_buffer {
 		u32 size;
 		u32 name;
-		u32 rseqno[I915_NUM_RINGS], wseqno;
+		u32 rseqno[I915_NUM_ENGINES], wseqno;
 		u64 gtt_offset;
 		u32 read_domains;
 		u32 write_domain;
@@ -611,27 +582,12 @@
 struct drm_i915_display_funcs {
 	int (*get_display_clock_speed)(struct drm_device *dev);
 	int (*get_fifo_size)(struct drm_device *dev, int plane);
-	/**
-	 * find_dpll() - Find the best values for the PLL
-	 * @limit: limits for the PLL
-	 * @crtc: current CRTC
-	 * @target: target frequency in kHz
-	 * @refclk: reference clock frequency in kHz
-	 * @match_clock: if provided, @best_clock P divider must
-	 *               match the P divider from @match_clock
-	 *               used for LVDS downclocking
-	 * @best_clock: best PLL values found
-	 *
-	 * Returns true on success, false on failure.
-	 */
-	bool (*find_dpll)(const struct intel_limit *limit,
-			  struct intel_crtc_state *crtc_state,
-			  int target, int refclk,
-			  struct dpll *match_clock,
-			  struct dpll *best_clock);
-	int (*compute_pipe_wm)(struct intel_crtc *crtc,
-			       struct drm_atomic_state *state);
-	void (*program_watermarks)(struct intel_crtc_state *cstate);
+	int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
+	int (*compute_intermediate_wm)(struct drm_device *dev,
+				       struct intel_crtc *intel_crtc,
+				       struct intel_crtc_state *newstate);
+	void (*initial_watermarks)(struct intel_crtc_state *cstate);
+	void (*optimize_watermarks)(struct intel_crtc_state *cstate);
 	void (*update_wm)(struct drm_crtc *crtc);
 	int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
 	void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@@ -662,6 +618,9 @@
 	/* render clock increase/decrease */
 	/* display clock increase/decrease */
 	/* pll clock increase/decrease */
+
+	void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
+	void (*load_luts)(struct drm_crtc_state *crtc_state);
 };
 
 enum forcewake_domain_id {
@@ -681,6 +640,13 @@
 			 FORCEWAKE_MEDIA)
 };
 
+#define FW_REG_READ  (1)
+#define FW_REG_WRITE (2)
+
+enum forcewake_domains
+intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+			       i915_reg_t reg, unsigned int op);
+
 struct intel_uncore_funcs {
 	void (*force_wake_get)(struct drm_i915_private *dev_priv,
 							enum forcewake_domains domains);
@@ -713,8 +679,9 @@
 	struct intel_uncore_forcewake_domain {
 		struct drm_i915_private *i915;
 		enum forcewake_domain_id id;
+		enum forcewake_domains mask;
 		unsigned wake_count;
-		struct timer_list timer;
+		struct hrtimer timer;
 		i915_reg_t reg_set;
 		u32 val_set;
 		u32 val_clear;
@@ -727,14 +694,14 @@
 };
 
 /* Iterate over initialised fw domains */
-#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \
-	for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
-	     (i__) < FW_DOMAIN_ID_COUNT; \
-	     (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
-		for_each_if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
+#define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \
+	for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
+	     (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \
+	     (domain__)++) \
+		for_each_if ((mask__) & (domain__)->mask)
 
-#define for_each_fw_domain(domain__, dev_priv__, i__) \
-	for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
+#define for_each_fw_domain(domain__, dev_priv__) \
+	for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__)
 
 #define CSR_VERSION(major, minor)	((major) << 16 | (minor))
 #define CSR_VERSION_MAJOR(version)	((version) >> 16)
@@ -750,6 +717,7 @@
 	i915_reg_t mmioaddr[8];
 	uint32_t mmiodata[8];
 	uint32_t dc_state;
+	uint32_t allowed_dc_mask;
 };
 
 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -779,6 +747,7 @@
 	func(overlay_needs_physical) sep \
 	func(supports_tv) sep \
 	func(has_llc) sep \
+	func(has_snoop) sep \
 	func(has_ddi) sep \
 	func(has_fpga_dbg)
 
@@ -810,6 +779,11 @@
 	u8 has_slice_pg:1;
 	u8 has_subslice_pg:1;
 	u8 has_eu_pg:1;
+
+	struct color_luts {
+		u16 degamma_lut_size;
+		u16 gamma_lut_size;
+	} color;
 };
 
 #undef DEFINE_FLAG
@@ -891,7 +865,7 @@
 		struct i915_vma *lrc_vma;
 		u64 lrc_desc;
 		uint32_t *lrc_reg_state;
-	} engine[I915_NUM_RINGS];
+	} engine[I915_NUM_ENGINES];
 
 	struct list_head link;
 };
@@ -1036,6 +1010,7 @@
 
 struct intel_gmbus {
 	struct i2c_adapter adapter;
+#define GMBUS_FORCE_BIT_RETRY (1U << 31)
 	u32 force_bit;
 	u32 reg0;
 	i915_reg_t gpio_reg;
@@ -1159,6 +1134,7 @@
 	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
 	u8 rp1_freq;		/* "less than" RP0 power/freqency */
 	u8 rp0_freq;		/* Non-overclocked max frequency. */
+	u16 gpll_ref_freq;	/* vlv/chv GPLL reference frequency */
 
 	u8 up_threshold; /* Current %busy required to uplock */
 	u8 down_threshold; /* Current %busy required to downclock */
@@ -1298,6 +1274,7 @@
 	struct i915_hw_ppgtt *aliasing_ppgtt;
 
 	struct notifier_block oom_notifier;
+	struct notifier_block vmap_notifier;
 	struct shrinker shrinker;
 	bool shrinker_no_lock_stealing;
 
@@ -1423,9 +1400,6 @@
 
 	/* For missed irq/seqno simulation. */
 	unsigned int test_irq_rings;
-
-	/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset   */
-	bool reload_in_reset;
 };
 
 enum modeset_restore {
@@ -1482,21 +1456,23 @@
 	unsigned int lvds_use_ssc:1;
 	unsigned int display_clock_mode:1;
 	unsigned int fdi_rx_polarity_inverted:1;
-	unsigned int has_mipi:1;
+	unsigned int panel_type:4;
 	int lvds_ssc_freq;
 	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
 
 	enum drrs_support_type drrs_type;
 
-	/* eDP */
-	int edp_rate;
-	int edp_lanes;
-	int edp_preemphasis;
-	int edp_vswing;
-	bool edp_initialized;
-	bool edp_support;
-	int edp_bpp;
-	struct edp_power_seq edp_pps;
+	struct {
+		int rate;
+		int lanes;
+		int preemphasis;
+		int vswing;
+		bool low_vswing;
+		bool initialized;
+		bool support;
+		int bpp;
+		struct edp_power_seq pps;
+	} edp;
 
 	struct {
 		bool full_link;
@@ -1516,7 +1492,6 @@
 
 	/* MIPI DSI */
 	struct {
-		u16 port;
 		u16 panel_id;
 		struct mipi_config *config;
 		struct mipi_pps_data *pps;
@@ -1532,6 +1507,7 @@
 	union child_device_config *child_dev;
 
 	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
+	struct sdvo_device_mapping sdvo_mappings[2];
 };
 
 enum intel_ddb_partitioning {
@@ -1706,7 +1682,7 @@
 struct i915_workarounds {
 	struct i915_wa_reg reg[I915_MAX_WA_REGS];
 	u32 count;
-	u32 hw_whitelist_count[I915_NUM_RINGS];
+	u32 hw_whitelist_count[I915_NUM_ENGINES];
 };
 
 struct i915_virtual_gpu {
@@ -1719,7 +1695,7 @@
 	uint32_t                        dispatch_flags;
 	uint32_t                        args_batch_start_offset;
 	uint64_t                        batch_obj_vm_offset;
-	struct intel_engine_cs          *ring;
+	struct intel_engine_cs *engine;
 	struct drm_i915_gem_object      *batch_obj;
 	struct intel_context            *ctx;
 	struct drm_i915_gem_request     *request;
@@ -1771,7 +1747,7 @@
 	wait_queue_head_t gmbus_wait_queue;
 
 	struct pci_dev *bridge_dev;
-	struct intel_engine_cs ring[I915_NUM_RINGS];
+	struct intel_engine_cs engine[I915_NUM_ENGINES];
 	struct drm_i915_gem_object *semaphore_obj;
 	uint32_t last_seqno, next_seqno;
 
@@ -1829,6 +1805,7 @@
 	unsigned int skl_boot_cdclk;
 	unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
 	unsigned int max_dotclk_freq;
+	unsigned int rawclk_freq;
 	unsigned int hpll_freq;
 	unsigned int czclk_freq;
 
@@ -1855,7 +1832,7 @@
 	struct drm_atomic_state *modeset_restore_state;
 
 	struct list_head vm_list; /* Global list of all address spaces */
-	struct i915_gtt gtt; /* VM representing the global address space */
+	struct i915_ggtt ggtt; /* VM representing the global address space */
 
 	struct i915_gem_mm mm;
 	DECLARE_HASHTABLE(mm_structs, 7);
@@ -1863,8 +1840,6 @@
 
 	/* Kernel Modesetting */
 
-	struct sdvo_device_mapping sdvo_mappings[2];
-
 	struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
 	struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
 	wait_queue_head_t pending_flip_queue;
@@ -1876,6 +1851,14 @@
 	/* dpll and cdclk state is protected by connection_mutex */
 	int num_shared_dpll;
 	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+	const struct intel_dpll_mgr *dpll_mgr;
+
+	/*
+	 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
+	 * Must be global rather than per dpll, because on some platforms
+	 * plls share registers.
+	 */
+	struct mutex dpll_lock;
 
 	unsigned int active_crtcs;
 	unsigned int min_pixclk[I915_MAX_PIPES];
@@ -1884,9 +1867,6 @@
 
 	struct i915_workarounds workarounds;
 
-	/* Reclocking support */
-	bool render_reclock_avail;
-
 	struct i915_frontbuffer_tracking fb_tracking;
 
 	u16 orig_clock;
@@ -1896,7 +1876,7 @@
 	struct intel_l3_parity l3_parity;
 
 	/* Cannot be determined by PCIID. You must always read a register. */
-	size_t ellc_size;
+	u32 edram_cap;
 
 	/* gen6+ rps state */
 	struct intel_gen6_power_mgmt rps;
@@ -1936,7 +1916,15 @@
 
 	u32 fdi_rx_config;
 
+	/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
 	u32 chv_phy_control;
+	/*
+	 * Shadows for CHV DPLL_MD regs to keep the state
+	 * checker somewhat working in the presence hardware
+	 * crappiness (can't read out DPLL_MD for pipes B & C).
+	 */
+	u32 chv_dpll_md[I915_MAX_PIPES];
+	u32 bxt_phy_grc;
 
 	u32 suspend_count;
 	bool suspended_to_idle;
@@ -1980,6 +1968,13 @@
 		};
 
 		uint8_t max_level;
+
+		/*
+		 * Should be held around atomic WM register writing; also
+		 * protects * intel_crtc->wm.active and
+		 * cstate->wm.need_postvbl_update.
+		 */
+		struct mutex wm_mutex;
 	} wm;
 
 	struct i915_runtime_pm pm;
@@ -1989,15 +1984,13 @@
 		int (*execbuf_submit)(struct i915_execbuffer_params *params,
 				      struct drm_i915_gem_execbuffer2 *args,
 				      struct list_head *vmas);
-		int (*init_rings)(struct drm_device *dev);
-		void (*cleanup_ring)(struct intel_engine_cs *ring);
-		void (*stop_ring)(struct intel_engine_cs *ring);
+		int (*init_engines)(struct drm_device *dev);
+		void (*cleanup_engine)(struct intel_engine_cs *engine);
+		void (*stop_engine)(struct intel_engine_cs *engine);
 	} gt;
 
 	struct intel_context *kernel_context;
 
-	bool edp_low_vswing;
-
 	/* perform PHY state sanity checks? */
 	bool chv_phy_assert[2];
 
@@ -2024,10 +2017,28 @@
 	return container_of(guc, struct drm_i915_private, guc);
 }
 
-/* Iterate over initialised rings */
-#define for_each_ring(ring__, dev_priv__, i__) \
-	for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
-		for_each_if ((((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))))
+/* Simple iterator over all initialised engines */
+#define for_each_engine(engine__, dev_priv__) \
+	for ((engine__) = &(dev_priv__)->engine[0]; \
+	     (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
+	     (engine__)++) \
+		for_each_if (intel_engine_initialized(engine__))
+
+/* Iterator with engine_id */
+#define for_each_engine_id(engine__, dev_priv__, id__) \
+	for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \
+	     (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
+	     (engine__)++) \
+		for_each_if (((id__) = (engine__)->id, \
+			      intel_engine_initialized(engine__)))
+
+/* Iterator over subset of engines selected by mask */
+#define for_each_engine_masked(engine__, dev_priv__, mask__) \
+	for ((engine__) = &(dev_priv__)->engine[0]; \
+	     (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
+	     (engine__)++) \
+		for_each_if (((mask__) & intel_engine_flag(engine__)) && \
+			     intel_engine_initialized(engine__))
 
 enum hdmi_force_audio {
 	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
@@ -2097,7 +2108,7 @@
 	struct drm_mm_node *stolen;
 	struct list_head global_list;
 
-	struct list_head ring_list[I915_NUM_RINGS];
+	struct list_head engine_list[I915_NUM_ENGINES];
 	/** Used in execbuf to temporarily hold a ref */
 	struct list_head obj_exec_link;
 
@@ -2108,7 +2119,7 @@
 	 * rendering and so a non-zero seqno), and is not set if it i s on
 	 * inactive (ready to be unbound) list.
 	 */
-	unsigned int active:I915_NUM_RINGS;
+	unsigned int active:I915_NUM_ENGINES;
 
 	/**
 	 * This is set if the object has been written to since last bound
@@ -2172,10 +2183,7 @@
 		struct scatterlist *sg;
 		int last;
 	} get_page;
-
-	/* prime dma-buf support */
-	void *dma_buf_vmapping;
-	int vmapping_count;
+	void *mapping;
 
 	/** Breadcrumb of last rendering to the buffer.
 	 * There can only be one writer, but we allow for multiple readers.
@@ -2187,7 +2195,7 @@
 	 * read request. This allows for the CPU to read from an active
 	 * buffer by only waiting for the write to complete.
 	 * */
-	struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS];
+	struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES];
 	struct drm_i915_gem_request *last_write_req;
 	/** Breadcrumb of last fenced GPU access to the buffer. */
 	struct drm_i915_gem_request *last_fenced_req;
@@ -2242,7 +2250,8 @@
 
 	/** On Which ring this request was generated */
 	struct drm_i915_private *i915;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
+	unsigned reset_counter;
 
 	 /** GEM sequence number associated with the previous request,
 	  * when the HWS breadcrumb is equal to this the GPU is processing
@@ -2323,7 +2332,6 @@
 struct drm_i915_gem_request * __must_check
 i915_gem_request_alloc(struct intel_engine_cs *engine,
 		       struct intel_context *ctx);
-void i915_gem_request_cancel(struct drm_i915_gem_request *req);
 void i915_gem_request_free(struct kref *req_ref);
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
 				   struct drm_file *file);
@@ -2335,9 +2343,9 @@
 }
 
 static inline struct intel_engine_cs *
-i915_gem_request_get_ring(struct drm_i915_gem_request *req)
+i915_gem_request_get_engine(struct drm_i915_gem_request *req)
 {
-	return req ? req->ring : NULL;
+	return req ? req->engine : NULL;
 }
 
 static inline struct drm_i915_gem_request *
@@ -2351,7 +2359,7 @@
 static inline void
 i915_gem_request_unreference(struct drm_i915_gem_request *req)
 {
-	WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
+	WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
 	kref_put(&req->ref, i915_gem_request_free);
 }
 
@@ -2363,7 +2371,7 @@
 	if (!req)
 		return;
 
-	dev = req->ring->dev;
+	dev = req->engine->dev;
 	if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
 		mutex_unlock(&dev->struct_mutex);
 }
@@ -2493,6 +2501,7 @@
 	__p; \
 })
 #define INTEL_INFO(p) 	(&__I915__(p)->info)
+#define INTEL_GEN(p)	(INTEL_INFO(p)->gen)
 #define INTEL_DEVID(p)	(INTEL_INFO(p)->device_id)
 #define INTEL_REVID(p)	(__I915__(p)->dev->pdev->revision)
 
@@ -2611,13 +2620,17 @@
 #define BLT_RING		(1<<BCS)
 #define VEBOX_RING		(1<<VECS)
 #define BSD2_RING		(1<<VCS2)
+#define ALL_ENGINES		(~0)
+
 #define HAS_BSD(dev)		(INTEL_INFO(dev)->ring_mask & BSD_RING)
 #define HAS_BSD2(dev)		(INTEL_INFO(dev)->ring_mask & BSD2_RING)
 #define HAS_BLT(dev)		(INTEL_INFO(dev)->ring_mask & BLT_RING)
 #define HAS_VEBOX(dev)		(INTEL_INFO(dev)->ring_mask & VEBOX_RING)
 #define HAS_LLC(dev)		(INTEL_INFO(dev)->has_llc)
+#define HAS_SNOOP(dev)		(INTEL_INFO(dev)->has_snoop)
+#define HAS_EDRAM(dev)		(__I915__(dev)->edram_cap & EDRAM_ENABLED)
 #define HAS_WT(dev)		((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
-				 __I915__(dev)->ellc_size)
+				 HAS_EDRAM(dev))
 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
 
 #define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6)
@@ -2671,7 +2684,7 @@
 #define HAS_RUNTIME_PM(dev)	(IS_GEN6(dev) || IS_HASWELL(dev) || \
 				 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
 				 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
-				 IS_KABYLAKE(dev))
+				 IS_KABYLAKE(dev) || IS_BROXTON(dev))
 #define HAS_RC6(dev)		(INTEL_INFO(dev)->gen >= 6)
 #define HAS_RC6p(dev)		(INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
 
@@ -2696,6 +2709,7 @@
 #define INTEL_PCH_SPT_DEVICE_ID_TYPE		0xA100
 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE		0x9D00
 #define INTEL_PCH_P2X_DEVICE_ID_TYPE		0x7100
+#define INTEL_PCH_P3X_DEVICE_ID_TYPE		0x7000
 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE		0x2900 /* qemu q35 has 2918 */
 
 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
@@ -2727,6 +2741,13 @@
 extern int i915_resume_switcheroo(struct drm_device *dev);
 
 /* i915_dma.c */
+void __printf(3, 4)
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+	      const char *fmt, ...);
+
+#define i915_report_error(dev_priv, fmt, ...)				   \
+	__i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
+
 extern int i915_driver_load(struct drm_device *, unsigned long flags);
 extern int i915_driver_unload(struct drm_device *);
 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
@@ -2739,9 +2760,11 @@
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 			      unsigned long arg);
 #endif
-extern int intel_gpu_reset(struct drm_device *dev);
+extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
 extern bool intel_has_gpu_reset(struct drm_device *dev);
 extern int i915_reset(struct drm_device *dev);
+extern int intel_guc_reset(struct drm_i915_private *dev_priv);
+extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -2758,7 +2781,7 @@
 /* i915_irq.c */
 void i915_queue_hangcheck(struct drm_device *dev);
 __printf(3, 4)
-void i915_handle_error(struct drm_device *dev, bool wedged,
+void i915_handle_error(struct drm_device *dev, u32 engine_mask,
 		       const char *fmt, ...);
 
 extern void intel_irq_init(struct drm_i915_private *dev_priv);
@@ -2785,6 +2808,8 @@
 					enum forcewake_domains domains);
 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
 					enum forcewake_domains domains);
+u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
+
 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
 static inline bool intel_vgpu_active(struct drm_device *dev)
 {
@@ -2863,7 +2888,6 @@
 			     struct drm_file *file_priv);
 void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 					struct drm_i915_gem_request *req);
-void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
 int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 				   struct drm_i915_gem_execbuffer2 *args,
 				   struct list_head *vmas);
@@ -2894,6 +2918,7 @@
 			struct drm_file *file_priv);
 void i915_gem_load_init(struct drm_device *dev);
 void i915_gem_load_cleanup(struct drm_device *dev);
+void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
 void *i915_gem_object_alloc(struct drm_device *dev);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -2978,12 +3003,46 @@
 	BUG_ON(obj->pages == NULL);
 	obj->pages_pin_count++;
 }
+
 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
 	BUG_ON(obj->pages_pin_count == 0);
 	obj->pages_pin_count--;
 }
 
+/**
+ * i915_gem_object_pin_map - return a contiguous mapping of the entire object
+ * @obj - the object to map into kernel address space
+ *
+ * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
+ * pages and then returns a contiguous mapping of the backing storage into
+ * the kernel address space.
+ *
+ * The caller must hold the struct_mutex, and is responsible for calling
+ * i915_gem_object_unpin_map() when the mapping is no longer required.
+ *
+ * Returns the pointer through which to access the mapped object, or an
+ * ERR_PTR() on error.
+ */
+void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
+
+/**
+ * i915_gem_object_unpin_map - releases an earlier mapping
+ * @obj - the object to unmap
+ *
+ * After pinning the object and mapping its pages, once you are finished
+ * with your access, call i915_gem_object_unpin_map() to release the pin
+ * upon the mapping. Once the pin count reaches zero, that mapping may be
+ * removed.
+ *
+ * The caller must hold the struct_mutex.
+ */
+static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
+{
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	i915_gem_object_unpin_pages(obj);
+}
+
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 			 struct intel_engine_cs *to,
@@ -3007,42 +3066,68 @@
 static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
 					   bool lazy_coherency)
 {
-	u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
-	return i915_seqno_passed(seqno, req->previous_seqno);
+	if (!lazy_coherency && req->engine->irq_seqno_barrier)
+		req->engine->irq_seqno_barrier(req->engine);
+	return i915_seqno_passed(req->engine->get_seqno(req->engine),
+				 req->previous_seqno);
 }
 
 static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
 					      bool lazy_coherency)
 {
-	u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
-	return i915_seqno_passed(seqno, req->seqno);
+	if (!lazy_coherency && req->engine->irq_seqno_barrier)
+		req->engine->irq_seqno_barrier(req->engine);
+	return i915_seqno_passed(req->engine->get_seqno(req->engine),
+				 req->seqno);
 }
 
 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
 
 struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_engine_cs *ring);
+i915_gem_find_active_request(struct intel_engine_cs *engine);
 
 bool i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
-int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
-				      bool interruptible);
+void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
+
+static inline u32 i915_reset_counter(struct i915_gpu_error *error)
+{
+	return atomic_read(&error->reset_counter);
+}
+
+static inline bool __i915_reset_in_progress(u32 reset)
+{
+	return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
+}
+
+static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
+{
+	return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+}
+
+static inline bool __i915_terminally_wedged(u32 reset)
+{
+	return unlikely(reset & I915_WEDGED);
+}
 
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
-	return unlikely(atomic_read(&error->reset_counter)
-			& (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+	return __i915_reset_in_progress(i915_reset_counter(error));
+}
+
+static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
+{
+	return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
 }
 
 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
 {
-	return atomic_read(&error->reset_counter) & I915_WEDGED;
+	return __i915_terminally_wedged(i915_reset_counter(error));
 }
 
 static inline u32 i915_reset_count(struct i915_gpu_error *error)
 {
-	return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
+	return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
 }
 
 static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
@@ -3060,11 +3145,11 @@
 void i915_gem_reset(struct drm_device *dev);
 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_init(struct drm_device *dev);
-int i915_gem_init_rings(struct drm_device *dev);
+int i915_gem_init_engines(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
-void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+void i915_gem_cleanup_engines(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_suspend(struct drm_device *dev);
 void __i915_add_request(struct drm_i915_gem_request *req,
@@ -3075,7 +3160,6 @@
 #define i915_add_request_no_flush(req) \
 	__i915_add_request(req, NULL, false)
 int __i915_wait_request(struct drm_i915_gem_request *req,
-			unsigned reset_counter,
 			bool interruptible,
 			s64 *timeout,
 			struct intel_rps_client *rps);
@@ -3155,13 +3239,9 @@
 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
 
 /* Some GGTT VM helpers */
-#define i915_obj_to_ggtt(obj) \
-	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
-
 static inline struct i915_hw_ppgtt *
 i915_vm_to_ppgtt(struct i915_address_space *vm)
 {
-	WARN_ON(i915_is_ggtt(vm));
 	return container_of(vm, struct i915_hw_ppgtt, base);
 }
 
@@ -3174,7 +3254,10 @@
 static inline unsigned long
 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
 {
-	return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+	return i915_gem_obj_size(obj, &ggtt->base);
 }
 
 static inline int __must_check
@@ -3182,7 +3265,10 @@
 		      uint32_t alignment,
 		      unsigned flags)
 {
-	return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+	return i915_gem_object_pin(obj, &ggtt->base,
 				   alignment, flags | PIN_GLOBAL);
 }
 
@@ -3297,6 +3383,7 @@
 #define I915_SHRINK_UNBOUND 0x2
 #define I915_SHRINK_BOUND 0x4
 #define I915_SHRINK_ACTIVE 0x8
+#define I915_SHRINK_VMAPS 0x10
 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
@@ -3343,7 +3430,7 @@
 {
 	kfree(eb->buf);
 }
-void i915_capture_error_state(struct drm_device *dev, bool wedge,
+void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
 			      const char *error_msg);
 void i915_error_state_get(struct drm_device *dev,
 			  struct i915_error_state_file_priv *error_priv);
@@ -3355,10 +3442,10 @@
 
 /* i915_cmd_parser.c */
 int i915_cmd_parser_get_version(void);
-int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
-bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
-int i915_parse_cmds(struct intel_engine_cs *ring,
+int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
+bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
+int i915_parse_cmds(struct intel_engine_cs *engine,
 		    struct drm_i915_gem_object *batch_obj,
 		    struct drm_i915_gem_object *shadow_batch_obj,
 		    u32 batch_start_offset,
@@ -3392,6 +3479,13 @@
 /* intel_bios.c */
 int intel_bios_init(struct drm_i915_private *dev_priv);
 bool intel_bios_is_valid_vbt(const void *buf, size_t size);
+bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
+bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
+bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
+bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
+				     enum port port);
 
 /* intel_opregion.c */
 #ifdef CONFIG_ACPI
@@ -3403,6 +3497,7 @@
 					 bool enable);
 extern int intel_opregion_notify_adapter(struct drm_device *dev,
 					 pci_power_t state);
+extern int intel_opregion_get_panel_type(struct drm_device *dev);
 #else
 static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
 static inline void intel_opregion_init(struct drm_device *dev) { return; }
@@ -3418,6 +3513,10 @@
 {
 	return 0;
 }
+static inline int intel_opregion_get_panel_type(struct drm_device *dev)
+{
+	return -ENODEV;
+}
 #endif
 
 /* intel_acpi.c */
@@ -3624,11 +3723,11 @@
 	}
 }
 
-static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
+static inline void i915_trace_irq_get(struct intel_engine_cs *engine,
 				      struct drm_i915_gem_request *req)
 {
-	if (ring->trace_irq_req == NULL && ring->irq_get(ring))
-		i915_gem_request_assign(&ring->trace_irq_req, req);
+	if (engine->trace_irq_req == NULL && engine->irq_get(engine))
+		i915_gem_request_assign(&engine->trace_irq_req, req);
 }
 
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 233adc3..aad2685 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -32,14 +32,13 @@
 #include "i915_vgpu.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include "intel_mocs.h"
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
 
-#define RQ_BUG_ON(expr)
-
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
 static void
@@ -85,9 +84,7 @@
 {
 	int ret;
 
-#define EXIT_COND (!i915_reset_in_progress(error) || \
-		   i915_terminally_wedged(error))
-	if (EXIT_COND)
+	if (!i915_reset_in_progress(error))
 		return 0;
 
 	/*
@@ -96,17 +93,16 @@
 	 * we should simply try to bail out and fail as gracefully as possible.
 	 */
 	ret = wait_event_interruptible_timeout(error->reset_queue,
-					       EXIT_COND,
+					       !i915_reset_in_progress(error),
 					       10*HZ);
 	if (ret == 0) {
 		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
 		return -EIO;
 	} else if (ret < 0) {
 		return ret;
+	} else {
+		return 0;
 	}
-#undef EXIT_COND
-
-	return 0;
 }
 
 int i915_mutex_lock_interruptible(struct drm_device *dev)
@@ -130,9 +126,9 @@
 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct drm_i915_gem_get_aperture *args = data;
-	struct i915_gtt *ggtt = &dev_priv->gtt;
 	struct i915_vma *vma;
 	size_t pinned;
 
@@ -146,7 +142,7 @@
 			pinned += vma->node.size;
 	mutex_unlock(&dev->struct_mutex);
 
-	args->aper_size = dev_priv->gtt.base.total;
+	args->aper_size = ggtt->base.total;
 	args->aper_available_size = args->aper_size - pinned;
 
 	return 0;
@@ -211,11 +207,10 @@
 	BUG_ON(obj->madv == __I915_MADV_PURGED);
 
 	ret = i915_gem_object_set_to_cpu_domain(obj, true);
-	if (ret) {
+	if (WARN_ON(ret)) {
 		/* In the event of a disaster, abandon all caches and
 		 * hope for the best.
 		 */
-		WARN_ON(ret != -EIO);
 		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	}
 
@@ -700,7 +695,7 @@
 	if (ret)
 		return ret;
 
-	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
 	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
@@ -765,7 +760,8 @@
 			 struct drm_i915_gem_pwrite *args,
 			 struct drm_file *file)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	ssize_t remain;
 	loff_t offset, page_base;
 	char __user *user_data;
@@ -807,7 +803,7 @@
 		 * source page isn't available.  Return the error and we'll
 		 * retry in the slow path.
 		 */
-		if (fast_user_write(dev_priv->gtt.mappable, page_base,
+		if (fast_user_write(ggtt->mappable, page_base,
 				    page_offset, user_data, page_length)) {
 			ret = -EFAULT;
 			goto out_flush;
@@ -1053,7 +1049,7 @@
 	if (ret)
 		goto put_rpm;
 
-	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
 	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
@@ -1109,27 +1105,19 @@
 	return ret;
 }
 
-int
-i915_gem_check_wedge(struct i915_gpu_error *error,
-		     bool interruptible)
+static int
+i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
 {
-	if (i915_reset_in_progress(error)) {
+	if (__i915_terminally_wedged(reset_counter))
+		return -EIO;
+
+	if (__i915_reset_in_progress(reset_counter)) {
 		/* Non-interruptible callers can't handle -EAGAIN, hence return
 		 * -EIO unconditionally for these. */
 		if (!interruptible)
 			return -EIO;
 
-		/* Recovery complete, but the reset failed ... */
-		if (i915_terminally_wedged(error))
-			return -EIO;
-
-		/*
-		 * Check if GPU Reset is in progress - we need intel_ring_begin
-		 * to work properly to reinit the hw state while the gpu is
-		 * still marked as reset-in-progress. Handle this with a flag.
-		 */
-		if (!error->reload_in_reset)
-			return -EAGAIN;
+		return -EAGAIN;
 	}
 
 	return 0;
@@ -1141,9 +1129,9 @@
 }
 
 static bool missed_irq(struct drm_i915_private *dev_priv,
-		       struct intel_engine_cs *ring)
+		       struct intel_engine_cs *engine)
 {
-	return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
+	return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
 }
 
 static unsigned long local_clock_us(unsigned *cpu)
@@ -1193,7 +1181,7 @@
 	 * takes to sleep on a request, on the order of a microsecond.
 	 */
 
-	if (req->ring->irq_refcount)
+	if (req->engine->irq_refcount)
 		return -EBUSY;
 
 	/* Only spin if we know the GPU is processing this request */
@@ -1223,7 +1211,6 @@
 /**
  * __i915_wait_request - wait until execution of request has finished
  * @req: duh!
- * @reset_counter: reset sequence associated with the given request
  * @interruptible: do an interruptible wait (normally yes)
  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  *
@@ -1238,16 +1225,15 @@
  * errno with remaining time filled in timeout argument.
  */
 int __i915_wait_request(struct drm_i915_gem_request *req,
-			unsigned reset_counter,
 			bool interruptible,
 			s64 *timeout,
 			struct intel_rps_client *rps)
 {
-	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	const bool irq_test_in_progress =
-		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
+		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
 	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
 	DEFINE_WAIT(wait);
 	unsigned long timeout_expire;
@@ -1288,7 +1274,7 @@
 	if (ret == 0)
 		goto out;
 
-	if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
+	if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
 		ret = -ENODEV;
 		goto out;
 	}
@@ -1296,16 +1282,17 @@
 	for (;;) {
 		struct timer_list timer;
 
-		prepare_to_wait(&ring->irq_queue, &wait, state);
+		prepare_to_wait(&engine->irq_queue, &wait, state);
 
 		/* We need to check whether any gpu reset happened in between
-		 * the caller grabbing the seqno and now ... */
-		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
-			/* ... but upgrade the -EAGAIN to an -EIO if the gpu
-			 * is truely gone. */
-			ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
-			if (ret == 0)
-				ret = -EAGAIN;
+		 * the request being submitted and now. If a reset has occurred,
+		 * the request is effectively complete (we either are in the
+		 * process of or have discarded the rendering and completely
+		 * reset the GPU. The results of the request are lost and we
+		 * are free to continue on with the original operation.
+		 */
+		if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
+			ret = 0;
 			break;
 		}
 
@@ -1325,11 +1312,11 @@
 		}
 
 		timer.function = NULL;
-		if (timeout || missed_irq(dev_priv, ring)) {
+		if (timeout || missed_irq(dev_priv, engine)) {
 			unsigned long expire;
 
 			setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
-			expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
+			expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
 			mod_timer(&timer, expire);
 		}
 
@@ -1341,9 +1328,9 @@
 		}
 	}
 	if (!irq_test_in_progress)
-		ring->irq_put(ring);
+		engine->irq_put(engine);
 
-	finish_wait(&ring->irq_queue, &wait);
+	finish_wait(&engine->irq_queue, &wait);
 
 out:
 	trace_i915_gem_request_wait_end(req);
@@ -1370,7 +1357,6 @@
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
 				   struct drm_file *file)
 {
-	struct drm_i915_private *dev_private;
 	struct drm_i915_file_private *file_priv;
 
 	WARN_ON(!req || !file || req->file_priv);
@@ -1381,7 +1367,6 @@
 	if (req->file_priv)
 		return -EINVAL;
 
-	dev_private = req->ring->dev->dev_private;
 	file_priv = file->driver_priv;
 
 	spin_lock(&file_priv->mm.lock);
@@ -1434,7 +1419,7 @@
 static void
 __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	struct drm_i915_gem_request *tmp;
 
 	lockdep_assert_held(&engine->dev->struct_mutex);
@@ -1459,30 +1444,22 @@
 int
 i915_wait_request(struct drm_i915_gem_request *req)
 {
-	struct drm_device *dev;
-	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *dev_priv = req->i915;
 	bool interruptible;
 	int ret;
 
-	BUG_ON(req == NULL);
-
-	dev = req->ring->dev;
-	dev_priv = dev->dev_private;
 	interruptible = dev_priv->mm.interruptible;
 
-	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-	ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+	ret = __i915_wait_request(req, interruptible, NULL, NULL);
 	if (ret)
 		return ret;
 
-	ret = __i915_wait_request(req,
-				  atomic_read(&dev_priv->gpu_error.reset_counter),
-				  interruptible, NULL, NULL);
-	if (ret)
-		return ret;
+	/* If the GPU hung, we want to keep the requests to find the guilty. */
+	if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
+		__i915_gem_request_retire__upto(req);
 
-	__i915_gem_request_retire__upto(req);
 	return 0;
 }
 
@@ -1505,14 +1482,14 @@
 			if (ret)
 				return ret;
 
-			i = obj->last_write_req->ring->id;
+			i = obj->last_write_req->engine->id;
 			if (obj->last_read_req[i] == obj->last_write_req)
 				i915_gem_object_retire__read(obj, i);
 			else
 				i915_gem_object_retire__write(obj);
 		}
 	} else {
-		for (i = 0; i < I915_NUM_RINGS; i++) {
+		for (i = 0; i < I915_NUM_ENGINES; i++) {
 			if (obj->last_read_req[i] == NULL)
 				continue;
 
@@ -1522,7 +1499,7 @@
 
 			i915_gem_object_retire__read(obj, i);
 		}
-		RQ_BUG_ON(obj->active);
+		GEM_BUG_ON(obj->active);
 	}
 
 	return 0;
@@ -1532,14 +1509,15 @@
 i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
 			       struct drm_i915_gem_request *req)
 {
-	int ring = req->ring->id;
+	int ring = req->engine->id;
 
 	if (obj->last_read_req[ring] == req)
 		i915_gem_object_retire__read(obj, ring);
 	else if (obj->last_write_req == req)
 		i915_gem_object_retire__write(obj);
 
-	__i915_gem_request_retire__upto(req);
+	if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
+		__i915_gem_request_retire__upto(req);
 }
 
 /* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1552,8 +1530,7 @@
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_request *requests[I915_NUM_RINGS];
-	unsigned reset_counter;
+	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
 	int ret, i, n = 0;
 
 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -1562,12 +1539,6 @@
 	if (!obj->active)
 		return 0;
 
-	ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
-	if (ret)
-		return ret;
-
-	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
-
 	if (readonly) {
 		struct drm_i915_gem_request *req;
 
@@ -1577,7 +1548,7 @@
 
 		requests[n++] = i915_gem_request_reference(req);
 	} else {
-		for (i = 0; i < I915_NUM_RINGS; i++) {
+		for (i = 0; i < I915_NUM_ENGINES; i++) {
 			struct drm_i915_gem_request *req;
 
 			req = obj->last_read_req[i];
@@ -1589,9 +1560,9 @@
 	}
 
 	mutex_unlock(&dev->struct_mutex);
+	ret = 0;
 	for (i = 0; ret == 0 && i < n; i++)
-		ret = __i915_wait_request(requests[i], reset_counter, true,
-					  NULL, rps);
+		ret = __i915_wait_request(requests[i], true, NULL, rps);
 	mutex_lock(&dev->struct_mutex);
 
 	for (i = 0; i < n; i++) {
@@ -1640,7 +1611,7 @@
 	if (ret)
 		return ret;
 
-	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
 	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
@@ -1688,7 +1659,7 @@
 	if (ret)
 		return ret;
 
-	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
 	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
@@ -1735,7 +1706,7 @@
 	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
 		return -ENODEV;
 
-	obj = drm_gem_object_lookup(dev, file, args->handle);
+	obj = drm_gem_object_lookup(file, args->handle);
 	if (obj == NULL)
 		return -ENOENT;
 
@@ -1754,7 +1725,10 @@
 		struct mm_struct *mm = current->mm;
 		struct vm_area_struct *vma;
 
-		down_write(&mm->mmap_sem);
+		if (down_write_killable(&mm->mmap_sem)) {
+			drm_gem_object_unreference_unlocked(obj);
+			return -EINTR;
+		}
 		vma = find_vma(mm, addr);
 		if (vma)
 			vma->vm_page_prot =
@@ -1792,7 +1766,8 @@
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
 	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct i915_ggtt_view view = i915_ggtt_view_normal;
 	pgoff_t page_offset;
 	unsigned long pfn;
@@ -1827,7 +1802,7 @@
 	}
 
 	/* Use a partial view if the object is bigger than the aperture. */
-	if (obj->base.size >= dev_priv->gtt.mappable_end &&
+	if (obj->base.size >= ggtt->mappable_end &&
 	    obj->tiling_mode == I915_TILING_NONE) {
 		static const unsigned int chunk_size = 256; // 1 MiB
 
@@ -1855,7 +1830,7 @@
 		goto unpin;
 
 	/* Finally, remap it using the new GTT offset */
-	pfn = dev_priv->gtt.mappable_base +
+	pfn = ggtt->mappable_base +
 		i915_gem_obj_ggtt_offset_view(obj, &view);
 	pfn >>= PAGE_SHIFT;
 
@@ -1964,11 +1939,27 @@
 void
 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
 {
+	/* Serialisation between user GTT access and our code depends upon
+	 * revoking the CPU's PTE whilst the mutex is held. The next user
+	 * pagefault then has to wait until we release the mutex.
+	 */
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
 	if (!obj->fault_mappable)
 		return;
 
 	drm_vma_node_unmap(&obj->base.vma_node,
 			   obj->base.dev->anon_inode->i_mapping);
+
+	/* Ensure that the CPU's PTE are revoked and there are not outstanding
+	 * memory transactions from userspace before we return. The TLB
+	 * flushing implied above by changing the PTE above *should* be
+	 * sufficient, an extra barrier here just provides us with a bit
+	 * of paranoid documentation about our requirement to serialise
+	 * memory writes before touching registers / GSM.
+	 */
+	wmb();
+
 	obj->fault_mappable = false;
 }
 
@@ -2033,9 +2024,6 @@
 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 	int ret;
 
-	if (drm_vma_node_has_offset(&obj->base.vma_node))
-		return 0;
-
 	dev_priv->mm.shrinker_no_lock_stealing = true;
 
 	ret = drm_gem_create_mmap_offset(&obj->base);
@@ -2084,7 +2072,7 @@
 	if (ret)
 		return ret;
 
-	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+	obj = to_intel_bo(drm_gem_object_lookup(file, handle));
 	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
@@ -2180,11 +2168,10 @@
 	BUG_ON(obj->madv == __I915_MADV_PURGED);
 
 	ret = i915_gem_object_set_to_cpu_domain(obj, true);
-	if (ret) {
+	if (WARN_ON(ret)) {
 		/* In the event of a disaster, abandon all caches and
 		 * hope for the best.
 		 */
-		WARN_ON(ret != -EIO);
 		i915_gem_clflush_object(obj, true);
 		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	}
@@ -2232,6 +2219,14 @@
 	 * lists early. */
 	list_del(&obj->global_list);
 
+	if (obj->mapping) {
+		if (is_vmalloc_addr(obj->mapping))
+			vunmap(obj->mapping);
+		else
+			kunmap(kmap_to_page(obj->mapping));
+		obj->mapping = NULL;
+	}
+
 	ops->put_pages(obj);
 	obj->pages = NULL;
 
@@ -2400,21 +2395,64 @@
 	return 0;
 }
 
+void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
+{
+	int ret;
+
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ERR_PTR(ret);
+
+	i915_gem_object_pin_pages(obj);
+
+	if (obj->mapping == NULL) {
+		struct page **pages;
+
+		pages = NULL;
+		if (obj->base.size == PAGE_SIZE)
+			obj->mapping = kmap(sg_page(obj->pages->sgl));
+		else
+			pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
+					       sizeof(*pages),
+					       GFP_TEMPORARY);
+		if (pages != NULL) {
+			struct sg_page_iter sg_iter;
+			int n;
+
+			n = 0;
+			for_each_sg_page(obj->pages->sgl, &sg_iter,
+					 obj->pages->nents, 0)
+				pages[n++] = sg_page_iter_page(&sg_iter);
+
+			obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
+			drm_free_large(pages);
+		}
+		if (obj->mapping == NULL) {
+			i915_gem_object_unpin_pages(obj);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	return obj->mapping;
+}
+
 void i915_vma_move_to_active(struct i915_vma *vma,
 			     struct drm_i915_gem_request *req)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 
-	ring = i915_gem_request_get_ring(req);
+	engine = i915_gem_request_get_engine(req);
 
 	/* Add a reference if we're newly entering the active list. */
 	if (obj->active == 0)
 		drm_gem_object_reference(&obj->base);
-	obj->active |= intel_ring_flag(ring);
+	obj->active |= intel_engine_flag(engine);
 
-	list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
-	i915_gem_request_assign(&obj->last_read_req[ring->id], req);
+	list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
+	i915_gem_request_assign(&obj->last_read_req[engine->id], req);
 
 	list_move_tail(&vma->vm_link, &vma->vm->active_list);
 }
@@ -2422,8 +2460,8 @@
 static void
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
 {
-	RQ_BUG_ON(obj->last_write_req == NULL);
-	RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
+	GEM_BUG_ON(obj->last_write_req == NULL);
+	GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
 
 	i915_gem_request_assign(&obj->last_write_req, NULL);
 	intel_fb_obj_flush(obj, true, ORIGIN_CS);
@@ -2434,13 +2472,13 @@
 {
 	struct i915_vma *vma;
 
-	RQ_BUG_ON(obj->last_read_req[ring] == NULL);
-	RQ_BUG_ON(!(obj->active & (1 << ring)));
+	GEM_BUG_ON(obj->last_read_req[ring] == NULL);
+	GEM_BUG_ON(!(obj->active & (1 << ring)));
 
-	list_del_init(&obj->ring_list[ring]);
+	list_del_init(&obj->engine_list[ring]);
 	i915_gem_request_assign(&obj->last_read_req[ring], NULL);
 
-	if (obj->last_write_req && obj->last_write_req->ring->id == ring)
+	if (obj->last_write_req && obj->last_write_req->engine->id == ring)
 		i915_gem_object_retire__write(obj);
 
 	obj->active &= ~(1 << ring);
@@ -2467,24 +2505,20 @@
 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	int ret, i, j;
+	struct intel_engine_cs *engine;
+	int ret;
 
 	/* Carefully retire all requests without writing to the rings */
-	for_each_ring(ring, dev_priv, i) {
-		ret = intel_ring_idle(ring);
+	for_each_engine(engine, dev_priv) {
+		ret = intel_engine_idle(engine);
 		if (ret)
 			return ret;
 	}
 	i915_gem_retire_requests(dev);
 
 	/* Finally reset hw state */
-	for_each_ring(ring, dev_priv, i) {
-		intel_ring_init_seqno(ring, seqno);
-
-		for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
-			ring->semaphore.sync_seqno[j] = 0;
-	}
+	for_each_engine(engine, dev_priv)
+		intel_ring_init_seqno(engine, seqno);
 
 	return 0;
 }
@@ -2542,7 +2576,7 @@
 			struct drm_i915_gem_object *obj,
 			bool flush_caches)
 {
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct drm_i915_private *dev_priv;
 	struct intel_ringbuffer *ringbuf;
 	u32 request_start;
@@ -2551,8 +2585,8 @@
 	if (WARN_ON(request == NULL))
 		return;
 
-	ring = request->ring;
-	dev_priv = ring->dev->dev_private;
+	engine = request->engine;
+	dev_priv = request->i915;
 	ringbuf = request->ringbuf;
 
 	/*
@@ -2579,22 +2613,7 @@
 		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
 	}
 
-	/* Record the position of the start of the request so that
-	 * should we detect the updated seqno part-way through the
-	 * GPU processing the request, we never over-estimate the
-	 * position of the head.
-	 */
-	request->postfix = intel_ring_get_tail(ringbuf);
-
-	if (i915.enable_execlists)
-		ret = ring->emit_request(request);
-	else {
-		ret = ring->add_request(request);
-
-		request->tail = intel_ring_get_tail(ringbuf);
-	}
-	/* Not allowed to fail! */
-	WARN(ret, "emit|add_request failed: %d!\n", ret);
+	trace_i915_gem_request_add(request);
 
 	request->head = request_start;
 
@@ -2606,14 +2625,34 @@
 	 */
 	request->batch_obj = obj;
 
+	/* Seal the request and mark it as pending execution. Note that
+	 * we may inspect this state, without holding any locks, during
+	 * hangcheck. Hence we apply the barrier to ensure that we do not
+	 * see a more recent value in the hws than we are tracking.
+	 */
 	request->emitted_jiffies = jiffies;
-	request->previous_seqno = ring->last_submitted_seqno;
-	ring->last_submitted_seqno = request->seqno;
-	list_add_tail(&request->list, &ring->request_list);
+	request->previous_seqno = engine->last_submitted_seqno;
+	smp_store_mb(engine->last_submitted_seqno, request->seqno);
+	list_add_tail(&request->list, &engine->request_list);
 
-	trace_i915_gem_request_add(request);
+	/* Record the position of the start of the request so that
+	 * should we detect the updated seqno part-way through the
+	 * GPU processing the request, we never over-estimate the
+	 * position of the head.
+	 */
+	request->postfix = intel_ring_get_tail(ringbuf);
 
-	i915_queue_hangcheck(ring->dev);
+	if (i915.enable_execlists)
+		ret = engine->emit_request(request);
+	else {
+		ret = engine->add_request(request);
+
+		request->tail = intel_ring_get_tail(ringbuf);
+	}
+	/* Not allowed to fail! */
+	WARN(ret, "emit|add_request failed: %d!\n", ret);
+
+	i915_queue_hangcheck(engine->dev);
 
 	queue_delayed_work(dev_priv->wq,
 			   &dev_priv->mm.retire_work,
@@ -2680,7 +2719,7 @@
 
 	if (ctx) {
 		if (i915.enable_execlists && ctx != req->i915->kernel_context)
-			intel_lr_context_unpin(ctx, req->ring);
+			intel_lr_context_unpin(ctx, req->engine);
 
 		i915_gem_context_unreference(ctx);
 	}
@@ -2689,11 +2728,12 @@
 }
 
 static inline int
-__i915_gem_request_alloc(struct intel_engine_cs *ring,
+__i915_gem_request_alloc(struct intel_engine_cs *engine,
 			 struct intel_context *ctx,
 			 struct drm_i915_gem_request **req_out)
 {
-	struct drm_i915_private *dev_priv = to_i915(ring->dev);
+	struct drm_i915_private *dev_priv = to_i915(engine->dev);
+	unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
 	struct drm_i915_gem_request *req;
 	int ret;
 
@@ -2702,17 +2742,26 @@
 
 	*req_out = NULL;
 
+	/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+	 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
+	 * and restart.
+	 */
+	ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
+	if (ret)
+		return ret;
+
 	req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
 	if (req == NULL)
 		return -ENOMEM;
 
-	ret = i915_gem_get_seqno(ring->dev, &req->seqno);
+	ret = i915_gem_get_seqno(engine->dev, &req->seqno);
 	if (ret)
 		goto err;
 
 	kref_init(&req->ref);
 	req->i915 = dev_priv;
-	req->ring = ring;
+	req->engine = engine;
+	req->reset_counter = reset_counter;
 	req->ctx  = ctx;
 	i915_gem_context_reference(req->ctx);
 
@@ -2742,7 +2791,8 @@
 		 * fully prepared. Thus it can be cleaned up using the proper
 		 * free code.
 		 */
-		i915_gem_request_cancel(req);
+		intel_ring_reserved_space_cancel(req->ringbuf);
+		i915_gem_request_unreference(req);
 		return ret;
 	}
 
@@ -2779,19 +2829,12 @@
 	return err ? ERR_PTR(err) : req;
 }
 
-void i915_gem_request_cancel(struct drm_i915_gem_request *req)
-{
-	intel_ring_reserved_space_cancel(req->ringbuf);
-
-	i915_gem_request_unreference(req);
-}
-
 struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_engine_cs *ring)
+i915_gem_find_active_request(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *request;
 
-	list_for_each_entry(request, &ring->request_list, list) {
+	list_for_each_entry(request, &engine->request_list, list) {
 		if (i915_gem_request_completed(request, false))
 			continue;
 
@@ -2801,38 +2844,38 @@
 	return NULL;
 }
 
-static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
-				       struct intel_engine_cs *ring)
+static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
+				       struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *request;
 	bool ring_hung;
 
-	request = i915_gem_find_active_request(ring);
+	request = i915_gem_find_active_request(engine);
 
 	if (request == NULL)
 		return;
 
-	ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+	ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
 
 	i915_set_reset_status(dev_priv, request->ctx, ring_hung);
 
-	list_for_each_entry_continue(request, &ring->request_list, list)
+	list_for_each_entry_continue(request, &engine->request_list, list)
 		i915_set_reset_status(dev_priv, request->ctx, false);
 }
 
-static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
-					struct intel_engine_cs *ring)
+static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
+					struct intel_engine_cs *engine)
 {
 	struct intel_ringbuffer *buffer;
 
-	while (!list_empty(&ring->active_list)) {
+	while (!list_empty(&engine->active_list)) {
 		struct drm_i915_gem_object *obj;
 
-		obj = list_first_entry(&ring->active_list,
+		obj = list_first_entry(&engine->active_list,
 				       struct drm_i915_gem_object,
-				       ring_list[ring->id]);
+				       engine_list[engine->id]);
 
-		i915_gem_object_retire__read(obj, ring->id);
+		i915_gem_object_retire__read(obj, engine->id);
 	}
 
 	/*
@@ -2842,14 +2885,16 @@
 	 */
 
 	if (i915.enable_execlists) {
-		spin_lock_irq(&ring->execlist_lock);
+		/* Ensure irq handler finishes or is cancelled. */
+		tasklet_kill(&engine->irq_tasklet);
 
+		spin_lock_bh(&engine->execlist_lock);
 		/* list_splice_tail_init checks for empty lists */
-		list_splice_tail_init(&ring->execlist_queue,
-				      &ring->execlist_retired_req_list);
+		list_splice_tail_init(&engine->execlist_queue,
+				      &engine->execlist_retired_req_list);
+		spin_unlock_bh(&engine->execlist_lock);
 
-		spin_unlock_irq(&ring->execlist_lock);
-		intel_execlists_retire_requests(ring);
+		intel_execlists_retire_requests(engine);
 	}
 
 	/*
@@ -2859,10 +2904,10 @@
 	 * implicit references on things like e.g. ppgtt address spaces through
 	 * the request.
 	 */
-	while (!list_empty(&ring->request_list)) {
+	while (!list_empty(&engine->request_list)) {
 		struct drm_i915_gem_request *request;
 
-		request = list_first_entry(&ring->request_list,
+		request = list_first_entry(&engine->request_list,
 					   struct drm_i915_gem_request,
 					   list);
 
@@ -2876,28 +2921,29 @@
 	 * upon reset is less than when we start. Do one more pass over
 	 * all the ringbuffers to reset last_retired_head.
 	 */
-	list_for_each_entry(buffer, &ring->buffers, link) {
+	list_for_each_entry(buffer, &engine->buffers, link) {
 		buffer->last_retired_head = buffer->tail;
 		intel_ring_update_space(buffer);
 	}
+
+	intel_ring_init_seqno(engine, engine->last_submitted_seqno);
 }
 
 void i915_gem_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	int i;
+	struct intel_engine_cs *engine;
 
 	/*
 	 * Before we free the objects from the requests, we need to inspect
 	 * them for finding the guilty party. As the requests only borrow
 	 * their reference to the objects, the inspection must be done first.
 	 */
-	for_each_ring(ring, dev_priv, i)
-		i915_gem_reset_ring_status(dev_priv, ring);
+	for_each_engine(engine, dev_priv)
+		i915_gem_reset_engine_status(dev_priv, engine);
 
-	for_each_ring(ring, dev_priv, i)
-		i915_gem_reset_ring_cleanup(dev_priv, ring);
+	for_each_engine(engine, dev_priv)
+		i915_gem_reset_engine_cleanup(dev_priv, engine);
 
 	i915_gem_context_reset(dev);
 
@@ -2910,19 +2956,19 @@
  * This function clears the request list as sequence numbers are passed.
  */
 void
-i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
+i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
 {
-	WARN_ON(i915_verify_lists(ring->dev));
+	WARN_ON(i915_verify_lists(engine->dev));
 
 	/* Retire requests first as we use it above for the early return.
 	 * If we retire requests last, we may use a later seqno and so clear
 	 * the requests lists without clearing the active list, leading to
 	 * confusion.
 	 */
-	while (!list_empty(&ring->request_list)) {
+	while (!list_empty(&engine->request_list)) {
 		struct drm_i915_gem_request *request;
 
-		request = list_first_entry(&ring->request_list,
+		request = list_first_entry(&engine->request_list,
 					   struct drm_i915_gem_request,
 					   list);
 
@@ -2936,45 +2982,44 @@
 	 * by the ringbuffer to the flushing/inactive lists as appropriate,
 	 * before we free the context associated with the requests.
 	 */
-	while (!list_empty(&ring->active_list)) {
+	while (!list_empty(&engine->active_list)) {
 		struct drm_i915_gem_object *obj;
 
-		obj = list_first_entry(&ring->active_list,
-				      struct drm_i915_gem_object,
-				      ring_list[ring->id]);
+		obj = list_first_entry(&engine->active_list,
+				       struct drm_i915_gem_object,
+				       engine_list[engine->id]);
 
-		if (!list_empty(&obj->last_read_req[ring->id]->list))
+		if (!list_empty(&obj->last_read_req[engine->id]->list))
 			break;
 
-		i915_gem_object_retire__read(obj, ring->id);
+		i915_gem_object_retire__read(obj, engine->id);
 	}
 
-	if (unlikely(ring->trace_irq_req &&
-		     i915_gem_request_completed(ring->trace_irq_req, true))) {
-		ring->irq_put(ring);
-		i915_gem_request_assign(&ring->trace_irq_req, NULL);
+	if (unlikely(engine->trace_irq_req &&
+		     i915_gem_request_completed(engine->trace_irq_req, true))) {
+		engine->irq_put(engine);
+		i915_gem_request_assign(&engine->trace_irq_req, NULL);
 	}
 
-	WARN_ON(i915_verify_lists(ring->dev));
+	WARN_ON(i915_verify_lists(engine->dev));
 }
 
 bool
 i915_gem_retire_requests(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	bool idle = true;
-	int i;
 
-	for_each_ring(ring, dev_priv, i) {
-		i915_gem_retire_requests_ring(ring);
-		idle &= list_empty(&ring->request_list);
+	for_each_engine(engine, dev_priv) {
+		i915_gem_retire_requests_ring(engine);
+		idle &= list_empty(&engine->request_list);
 		if (i915.enable_execlists) {
-			spin_lock_irq(&ring->execlist_lock);
-			idle &= list_empty(&ring->execlist_queue);
-			spin_unlock_irq(&ring->execlist_lock);
+			spin_lock_bh(&engine->execlist_lock);
+			idle &= list_empty(&engine->execlist_queue);
+			spin_unlock_bh(&engine->execlist_lock);
 
-			intel_execlists_retire_requests(ring);
+			intel_execlists_retire_requests(engine);
 		}
 	}
 
@@ -3011,25 +3056,21 @@
 	struct drm_i915_private *dev_priv =
 		container_of(work, typeof(*dev_priv), mm.idle_work.work);
 	struct drm_device *dev = dev_priv->dev;
-	struct intel_engine_cs *ring;
-	int i;
+	struct intel_engine_cs *engine;
 
-	for_each_ring(ring, dev_priv, i)
-		if (!list_empty(&ring->request_list))
+	for_each_engine(engine, dev_priv)
+		if (!list_empty(&engine->request_list))
 			return;
 
 	/* we probably should sync with hangcheck here, using cancel_work_sync.
-	 * Also locking seems to be fubar here, ring->request_list is protected
+	 * Also locking seems to be fubar here, engine->request_list is protected
 	 * by dev->struct_mutex. */
 
 	intel_mark_idle(dev);
 
 	if (mutex_trylock(&dev->struct_mutex)) {
-		struct intel_engine_cs *ring;
-		int i;
-
-		for_each_ring(ring, dev_priv, i)
-			i915_gem_batch_pool_fini(&ring->batch_pool);
+		for_each_engine(engine, dev_priv)
+			i915_gem_batch_pool_fini(&engine->batch_pool);
 
 		mutex_unlock(&dev->struct_mutex);
 	}
@@ -3048,7 +3089,7 @@
 	if (!obj->active)
 		return 0;
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
+	for (i = 0; i < I915_NUM_ENGINES; i++) {
 		struct drm_i915_gem_request *req;
 
 		req = obj->last_read_req[i];
@@ -3093,11 +3134,9 @@
 int
 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_wait *args = data;
 	struct drm_i915_gem_object *obj;
-	struct drm_i915_gem_request *req[I915_NUM_RINGS];
-	unsigned reset_counter;
+	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
 	int i, n = 0;
 	int ret;
 
@@ -3108,7 +3147,7 @@
 	if (ret)
 		return ret;
 
-	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
+	obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle));
 	if (&obj->base == NULL) {
 		mutex_unlock(&dev->struct_mutex);
 		return -ENOENT;
@@ -3131,9 +3170,8 @@
 	}
 
 	drm_gem_object_unreference(&obj->base);
-	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
+	for (i = 0; i < I915_NUM_ENGINES; i++) {
 		if (obj->last_read_req[i] == NULL)
 			continue;
 
@@ -3144,7 +3182,7 @@
 
 	for (i = 0; i < n; i++) {
 		if (ret == 0)
-			ret = __i915_wait_request(req[i], reset_counter, true,
+			ret = __i915_wait_request(req[i], true,
 						  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
 						  to_rps_client(file));
 		i915_gem_request_unreference__unlocked(req[i]);
@@ -3166,7 +3204,7 @@
 	struct intel_engine_cs *from;
 	int ret;
 
-	from = i915_gem_request_get_ring(from_req);
+	from = i915_gem_request_get_engine(from_req);
 	if (to == from)
 		return 0;
 
@@ -3176,7 +3214,6 @@
 	if (!i915_semaphore_is_enabled(obj->base.dev)) {
 		struct drm_i915_private *i915 = to_i915(obj->base.dev);
 		ret = __i915_wait_request(from_req,
-					  atomic_read(&i915->gpu_error.reset_counter),
 					  i915->mm.interruptible,
 					  NULL,
 					  &i915->rps.semaphores);
@@ -3260,7 +3297,7 @@
 		     struct drm_i915_gem_request **to_req)
 {
 	const bool readonly = obj->base.pending_write_domain == 0;
-	struct drm_i915_gem_request *req[I915_NUM_RINGS];
+	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
 	int ret, i, n;
 
 	if (!obj->active)
@@ -3274,7 +3311,7 @@
 		if (obj->last_write_req)
 			req[n++] = obj->last_write_req;
 	} else {
-		for (i = 0; i < I915_NUM_RINGS; i++)
+		for (i = 0; i < I915_NUM_ENGINES; i++)
 			if (obj->last_read_req[i])
 				req[n++] = obj->last_read_req[i];
 	}
@@ -3297,9 +3334,6 @@
 	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
 		return;
 
-	/* Wait for any direct GTT access to complete */
-	mb();
-
 	old_read_domains = obj->base.read_domains;
 	old_write_domain = obj->base.write_domain;
 
@@ -3391,28 +3425,25 @@
 int i915_gpu_idle(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	int ret, i;
+	struct intel_engine_cs *engine;
+	int ret;
 
 	/* Flush everything onto the inactive list. */
-	for_each_ring(ring, dev_priv, i) {
+	for_each_engine(engine, dev_priv) {
 		if (!i915.enable_execlists) {
 			struct drm_i915_gem_request *req;
 
-			req = i915_gem_request_alloc(ring, NULL);
+			req = i915_gem_request_alloc(engine, NULL);
 			if (IS_ERR(req))
 				return PTR_ERR(req);
 
 			ret = i915_switch_context(req);
-			if (ret) {
-				i915_gem_request_cancel(req);
-				return ret;
-			}
-
 			i915_add_request_no_flush(req);
+			if (ret)
+				return ret;
 		}
 
-		ret = intel_ring_idle(ring);
+		ret = intel_engine_idle(engine);
 		if (ret)
 			return ret;
 	}
@@ -3466,7 +3497,8 @@
 			   uint64_t flags)
 {
 	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	u32 fence_alignment, unfenced_alignment;
 	u32 search_flag, alloc_flag;
 	u64 start, end;
@@ -3513,7 +3545,7 @@
 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
 	end = vm->total;
 	if (flags & PIN_MAPPABLE)
-		end = min_t(u64, end, dev_priv->gtt.mappable_end);
+		end = min_t(u64, end, ggtt->mappable_end);
 	if (flags & PIN_ZONE_4G)
 		end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
 
@@ -3720,6 +3752,9 @@
 int
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	uint32_t old_write_domain, old_read_domains;
 	struct i915_vma *vma;
 	int ret;
@@ -3774,7 +3809,7 @@
 	vma = i915_gem_obj_to_ggtt(obj);
 	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
 		list_move_tail(&vma->vm_link,
-			       &to_i915(obj->base.dev)->gtt.base.inactive_list);
+			       &ggtt->base.inactive_list);
 
 	return 0;
 }
@@ -3906,7 +3941,7 @@
 	struct drm_i915_gem_caching *args = data;
 	struct drm_i915_gem_object *obj;
 
-	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
 	if (&obj->base == NULL)
 		return -ENOENT;
 
@@ -3949,7 +3984,7 @@
 		 * cacheline, whereas normally such cachelines would get
 		 * invalidated.
 		 */
-		if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+		if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
 			return -ENODEV;
 
 		level = I915_CACHE_LLC;
@@ -3967,7 +4002,7 @@
 	if (ret)
 		goto rpm_put;
 
-	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
 	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
@@ -4128,16 +4163,15 @@
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
 	struct drm_i915_gem_request *request, *target = NULL;
-	unsigned reset_counter;
 	int ret;
 
 	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
 	if (ret)
 		return ret;
 
-	ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
-	if (ret)
-		return ret;
+	/* ABI: return -EIO if already wedged */
+	if (i915_terminally_wedged(&dev_priv->gpu_error))
+		return -EIO;
 
 	spin_lock(&file_priv->mm.lock);
 	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
@@ -4153,7 +4187,6 @@
 
 		target = request;
 	}
-	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 	if (target)
 		i915_gem_request_reference(target);
 	spin_unlock(&file_priv->mm.lock);
@@ -4161,7 +4194,7 @@
 	if (target == NULL)
 		return 0;
 
-	ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
+	ret = __i915_wait_request(target, true, NULL, NULL);
 	if (ret == 0)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
@@ -4211,7 +4244,7 @@
 		     (vma->node.start & (fence_alignment - 1)) == 0);
 
 	mappable = (vma->node.start + fence_size <=
-		    to_i915(obj->base.dev)->gtt.mappable_end);
+		    to_i915(obj->base.dev)->ggtt.mappable_end);
 
 	obj->map_and_fenceable = mappable && fenceable;
 }
@@ -4243,9 +4276,6 @@
 	vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
 			  i915_gem_obj_to_vma(obj, vm);
 
-	if (IS_ERR(vma))
-		return PTR_ERR(vma);
-
 	if (vma) {
 		if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
 			return -EBUSY;
@@ -4308,10 +4338,13 @@
 			 uint32_t alignment,
 			 uint64_t flags)
 {
-	if (WARN_ONCE(!view, "no view specified"))
-		return -EINVAL;
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
-	return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
+	BUG_ON(!view);
+
+	return i915_gem_object_do_pin(obj, &ggtt->base, view,
 				      alignment, flags | PIN_GLOBAL);
 }
 
@@ -4321,7 +4354,6 @@
 {
 	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
 
-	BUG_ON(!vma);
 	WARN_ON(vma->pin_count == 0);
 	WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
 
@@ -4340,7 +4372,7 @@
 	if (ret)
 		return ret;
 
-	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
 	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
@@ -4359,15 +4391,15 @@
 	if (obj->active) {
 		int i;
 
-		for (i = 0; i < I915_NUM_RINGS; i++) {
+		for (i = 0; i < I915_NUM_ENGINES; i++) {
 			struct drm_i915_gem_request *req;
 
 			req = obj->last_read_req[i];
 			if (req)
-				args->busy |= 1 << (16 + req->ring->exec_id);
+				args->busy |= 1 << (16 + req->engine->exec_id);
 		}
 		if (obj->last_write_req)
-			args->busy |= obj->last_write_req->ring->exec_id;
+			args->busy |= obj->last_write_req->engine->exec_id;
 	}
 
 unref:
@@ -4405,7 +4437,7 @@
 	if (ret)
 		return ret;
 
-	obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
+	obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle));
 	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
@@ -4447,8 +4479,8 @@
 	int i;
 
 	INIT_LIST_HEAD(&obj->global_list);
-	for (i = 0; i < I915_NUM_RINGS; i++)
-		INIT_LIST_HEAD(&obj->ring_list[i]);
+	for (i = 0; i < I915_NUM_ENGINES; i++)
+		INIT_LIST_HEAD(&obj->engine_list[i]);
 	INIT_LIST_HEAD(&obj->obj_exec_link);
 	INIT_LIST_HEAD(&obj->vma_list);
 	INIT_LIST_HEAD(&obj->batch_pool_link);
@@ -4623,14 +4655,15 @@
 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
 					   const struct i915_ggtt_view *view)
 {
-	struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct i915_vma *vma;
 
-	if (WARN_ONCE(!view, "no view specified"))
-		return ERR_PTR(-EINVAL);
+	BUG_ON(!view);
 
 	list_for_each_entry(vma, &obj->vma_list, obj_link)
-		if (vma->vm == ggtt &&
+		if (vma->vm == &ggtt->base &&
 		    i915_ggtt_view_equal(&vma->ggtt_view, view))
 			return vma;
 	return NULL;
@@ -4653,14 +4686,13 @@
 }
 
 static void
-i915_gem_stop_ringbuffers(struct drm_device *dev)
+i915_gem_stop_engines(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	int i;
+	struct intel_engine_cs *engine;
 
-	for_each_ring(ring, dev_priv, i)
-		dev_priv->gt.stop_ring(ring);
+	for_each_engine(engine, dev_priv)
+		dev_priv->gt.stop_engine(engine);
 }
 
 int
@@ -4676,7 +4708,7 @@
 
 	i915_gem_retire_requests(dev);
 
-	i915_gem_stop_ringbuffers(dev);
+	i915_gem_stop_engines(dev);
 	mutex_unlock(&dev->struct_mutex);
 
 	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
@@ -4697,8 +4729,8 @@
 
 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 {
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = req->engine;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
 	int i, ret;
@@ -4716,12 +4748,12 @@
 	 * at initialization time.
 	 */
 	for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
-		intel_ring_emit(ring, remap_info[i]);
+		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
+		intel_ring_emit(engine, remap_info[i]);
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return ret;
 }
@@ -4778,7 +4810,7 @@
 	}
 }
 
-int i915_gem_init_rings(struct drm_device *dev)
+int i915_gem_init_engines(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
@@ -4814,13 +4846,13 @@
 	return 0;
 
 cleanup_vebox_ring:
-	intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
+	intel_cleanup_engine(&dev_priv->engine[VECS]);
 cleanup_blt_ring:
-	intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
+	intel_cleanup_engine(&dev_priv->engine[BCS]);
 cleanup_bsd_ring:
-	intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
+	intel_cleanup_engine(&dev_priv->engine[VCS]);
 cleanup_render_ring:
-	intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
+	intel_cleanup_engine(&dev_priv->engine[RCS]);
 
 	return ret;
 }
@@ -4829,16 +4861,13 @@
 i915_gem_init_hw(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	int ret, i, j;
-
-	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
-		return -EIO;
+	struct intel_engine_cs *engine;
+	int ret, j;
 
 	/* Double layer security blanket, see i915_gem_init() */
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-	if (dev_priv->ellc_size)
+	if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
 		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
 	if (IS_HASWELL(dev))
@@ -4876,12 +4905,14 @@
 	}
 
 	/* Need to do basic initialisation of all rings first: */
-	for_each_ring(ring, dev_priv, i) {
-		ret = ring->init_hw(ring);
+	for_each_engine(engine, dev_priv) {
+		ret = engine->init_hw(engine);
 		if (ret)
 			goto out;
 	}
 
+	intel_mocs_init_l3cc_table(dev);
+
 	/* We can't enable contexts until all firmware is loaded */
 	if (HAS_GUC_UCODE(dev)) {
 		ret = intel_guc_ucode_load(dev);
@@ -4901,38 +4932,39 @@
 		goto out;
 
 	/* Now it is safe to go back round and do everything else: */
-	for_each_ring(ring, dev_priv, i) {
+	for_each_engine(engine, dev_priv) {
 		struct drm_i915_gem_request *req;
 
-		req = i915_gem_request_alloc(ring, NULL);
+		req = i915_gem_request_alloc(engine, NULL);
 		if (IS_ERR(req)) {
 			ret = PTR_ERR(req);
-			i915_gem_cleanup_ringbuffer(dev);
-			goto out;
+			break;
 		}
 
-		if (ring->id == RCS) {
-			for (j = 0; j < NUM_L3_SLICES(dev); j++)
-				i915_gem_l3_remap(req, j);
+		if (engine->id == RCS) {
+			for (j = 0; j < NUM_L3_SLICES(dev); j++) {
+				ret = i915_gem_l3_remap(req, j);
+				if (ret)
+					goto err_request;
+			}
 		}
 
 		ret = i915_ppgtt_init_ring(req);
-		if (ret && ret != -EIO) {
-			DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
-			i915_gem_request_cancel(req);
-			i915_gem_cleanup_ringbuffer(dev);
-			goto out;
-		}
+		if (ret)
+			goto err_request;
 
 		ret = i915_gem_context_enable(req);
-		if (ret && ret != -EIO) {
-			DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
-			i915_gem_request_cancel(req);
-			i915_gem_cleanup_ringbuffer(dev);
-			goto out;
-		}
+		if (ret)
+			goto err_request;
 
+err_request:
 		i915_add_request_no_flush(req);
+		if (ret) {
+			DRM_ERROR("Failed to enable %s, error=%d\n",
+				  engine->name, ret);
+			i915_gem_cleanup_engines(dev);
+			break;
+		}
 	}
 
 out:
@@ -4952,14 +4984,14 @@
 
 	if (!i915.enable_execlists) {
 		dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
-		dev_priv->gt.init_rings = i915_gem_init_rings;
-		dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
-		dev_priv->gt.stop_ring = intel_stop_ring_buffer;
+		dev_priv->gt.init_engines = i915_gem_init_engines;
+		dev_priv->gt.cleanup_engine = intel_cleanup_engine;
+		dev_priv->gt.stop_engine = intel_stop_engine;
 	} else {
 		dev_priv->gt.execbuf_submit = intel_execlists_submission;
-		dev_priv->gt.init_rings = intel_logical_rings_init;
-		dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
-		dev_priv->gt.stop_ring = intel_logical_ring_stop;
+		dev_priv->gt.init_engines = intel_logical_rings_init;
+		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
+		dev_priv->gt.stop_engine = intel_logical_ring_stop;
 	}
 
 	/* This is just a security blanket to placate dragons.
@@ -4974,13 +5006,13 @@
 	if (ret)
 		goto out_unlock;
 
-	i915_gem_init_global_gtt(dev);
+	i915_gem_init_ggtt(dev);
 
 	ret = i915_gem_context_init(dev);
 	if (ret)
 		goto out_unlock;
 
-	ret = dev_priv->gt.init_rings(dev);
+	ret = dev_priv->gt.init_engines(dev);
 	if (ret)
 		goto out_unlock;
 
@@ -5003,29 +5035,52 @@
 }
 
 void
-i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+i915_gem_cleanup_engines(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	int i;
+	struct intel_engine_cs *engine;
 
-	for_each_ring(ring, dev_priv, i)
-		dev_priv->gt.cleanup_ring(ring);
+	for_each_engine(engine, dev_priv)
+		dev_priv->gt.cleanup_engine(engine);
 
-    if (i915.enable_execlists)
-            /*
-             * Neither the BIOS, ourselves or any other kernel
-             * expects the system to be in execlists mode on startup,
-             * so we need to reset the GPU back to legacy mode.
-             */
-            intel_gpu_reset(dev);
+	if (i915.enable_execlists)
+		/*
+		 * Neither the BIOS, ourselves or any other kernel
+		 * expects the system to be in execlists mode on startup,
+		 * so we need to reset the GPU back to legacy mode.
+		 */
+		intel_gpu_reset(dev, ALL_ENGINES);
 }
 
 static void
-init_ring_lists(struct intel_engine_cs *ring)
+init_engine_lists(struct intel_engine_cs *engine)
 {
-	INIT_LIST_HEAD(&ring->active_list);
-	INIT_LIST_HEAD(&ring->request_list);
+	INIT_LIST_HEAD(&engine->active_list);
+	INIT_LIST_HEAD(&engine->request_list);
+}
+
+void
+i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
+	    !IS_CHERRYVIEW(dev_priv))
+		dev_priv->num_fence_regs = 32;
+	else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
+		 IS_I945GM(dev_priv) || IS_G33(dev_priv))
+		dev_priv->num_fence_regs = 16;
+	else
+		dev_priv->num_fence_regs = 8;
+
+	if (intel_vgpu_active(dev))
+		dev_priv->num_fence_regs =
+				I915_READ(vgtif_reg(avail_rs.fence_num));
+
+	/* Initialize fence registers to zero */
+	i915_gem_restore_fences(dev);
+
+	i915_gem_detect_bit_6_swizzle(dev);
 }
 
 void
@@ -5055,8 +5110,8 @@
 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-	for (i = 0; i < I915_NUM_RINGS; i++)
-		init_ring_lists(&dev_priv->ring[i]);
+	for (i = 0; i < I915_NUM_ENGINES; i++)
+		init_engine_lists(&dev_priv->engine[i]);
 	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
 		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
@@ -5067,17 +5122,6 @@
 
 	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
 
-	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
-		dev_priv->num_fence_regs = 32;
-	else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-		dev_priv->num_fence_regs = 16;
-	else
-		dev_priv->num_fence_regs = 8;
-
-	if (intel_vgpu_active(dev))
-		dev_priv->num_fence_regs =
-				I915_READ(vgtif_reg(avail_rs.fence_num));
-
 	/*
 	 * Set initial sequence number for requests.
 	 * Using this number allows the wraparound to happen early,
@@ -5086,11 +5130,8 @@
 	dev_priv->next_seqno = ((u32)~0 - 0x1100);
 	dev_priv->last_seqno = ((u32)~0 - 0x1101);
 
-	/* Initialize fence registers to zero */
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-	i915_gem_restore_fences(dev);
 
-	i915_gem_detect_bit_6_swizzle(dev);
 	init_waitqueue_head(&dev_priv->pending_flip_queue);
 
 	dev_priv->mm.interruptible = true;
@@ -5213,11 +5254,12 @@
 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
 				  const struct i915_ggtt_view *view)
 {
-	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
+	struct drm_i915_private *dev_priv = to_i915(o->base.dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, &o->vma_list, obj_link)
-		if (vma->vm == ggtt &&
+		if (vma->vm == &ggtt->base &&
 		    i915_ggtt_view_equal(&vma->ggtt_view, view))
 			return vma->node.start;
 
@@ -5244,11 +5286,12 @@
 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
 				  const struct i915_ggtt_view *view)
 {
-	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
+	struct drm_i915_private *dev_priv = to_i915(o->base.dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, &o->vma_list, obj_link)
-		if (vma->vm == ggtt &&
+		if (vma->vm == &ggtt->base &&
 		    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
 		    drm_mm_node_allocated(&vma->node))
 			return true;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
new file mode 100644
index 0000000..8292e79
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_GEM_H__
+#define __I915_GEM_H__
+
+#ifdef CONFIG_DRM_I915_DEBUG_GEM
+#define GEM_BUG_ON(expr) BUG_ON(expr)
+#else
+#define GEM_BUG_ON(expr)
+#endif
+
+#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 5dd84e1..e5acc39 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -342,15 +342,15 @@
 		struct intel_context *ctx;
 
 		list_for_each_entry(ctx, &dev_priv->context_list, link)
-			intel_lr_context_reset(dev, ctx);
+			intel_lr_context_reset(dev_priv, ctx);
 	}
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		struct intel_engine_cs *ring = &dev_priv->ring[i];
+	for (i = 0; i < I915_NUM_ENGINES; i++) {
+		struct intel_engine_cs *engine = &dev_priv->engine[i];
 
-		if (ring->last_context) {
-			i915_gem_context_unpin(ring->last_context, ring);
-			ring->last_context = NULL;
+		if (engine->last_context) {
+			i915_gem_context_unpin(engine->last_context, engine);
+			engine->last_context = NULL;
 		}
 	}
 
@@ -413,7 +413,7 @@
 		/* The only known way to stop the gpu from accessing the hw context is
 		 * to reset it. Do this as the very last operation to avoid confusing
 		 * other code, leading to spurious errors. */
-		intel_gpu_reset(dev);
+		intel_gpu_reset(dev, ALL_ENGINES);
 
 		/* When default context is created and switched to, base object refcount
 		 * will be 2 (+1 from object creation and +1 from do_switch()).
@@ -421,17 +421,17 @@
 		 * to default context. So we need to unreference the base object once
 		 * to offset the do_switch part, so that i915_gem_context_unreference()
 		 * can then free the base object correctly. */
-		WARN_ON(!dev_priv->ring[RCS].last_context);
+		WARN_ON(!dev_priv->engine[RCS].last_context);
 
 		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
 	}
 
-	for (i = I915_NUM_RINGS; --i >= 0;) {
-		struct intel_engine_cs *ring = &dev_priv->ring[i];
+	for (i = I915_NUM_ENGINES; --i >= 0;) {
+		struct intel_engine_cs *engine = &dev_priv->engine[i];
 
-		if (ring->last_context) {
-			i915_gem_context_unpin(ring->last_context, ring);
-			ring->last_context = NULL;
+		if (engine->last_context) {
+			i915_gem_context_unpin(engine->last_context, engine);
+			engine->last_context = NULL;
 		}
 	}
 
@@ -441,14 +441,14 @@
 
 int i915_gem_context_enable(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
 	if (i915.enable_execlists) {
-		if (ring->init_context == NULL)
+		if (engine->init_context == NULL)
 			return 0;
 
-		ret = ring->init_context(req);
+		ret = engine->init_context(req);
 	} else
 		ret = i915_switch_context(req);
 
@@ -510,133 +510,147 @@
 static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
-		i915_semaphore_is_enabled(ring->dev) ?
-		hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
+		i915_semaphore_is_enabled(engine->dev) ?
+		hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
 		0;
-	int len, i, ret;
+	int len, ret;
 
 	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
 	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
 	 * explicitly, so we rely on the value at ring init, stored in
 	 * itlb_before_ctx_switch.
 	 */
-	if (IS_GEN6(ring->dev)) {
-		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
+	if (IS_GEN6(engine->dev)) {
+		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
 
 	/* These flags are for resource streamer on HSW+ */
-	if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+	if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
 		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
-	else if (INTEL_INFO(ring->dev)->gen < 8)
+	else if (INTEL_INFO(engine->dev)->gen < 8)
 		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
 	len = 4;
-	if (INTEL_INFO(ring->dev)->gen >= 7)
-		len += 2 + (num_rings ? 4*num_rings + 2 : 0);
+	if (INTEL_INFO(engine->dev)->gen >= 7)
+		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
 
 	ret = intel_ring_begin(req, len);
 	if (ret)
 		return ret;
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
-	if (INTEL_INFO(ring->dev)->gen >= 7) {
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+	if (INTEL_INFO(engine->dev)->gen >= 7) {
+		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
-			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_ring(signaller, to_i915(ring->dev), i) {
-				if (signaller == ring)
+			intel_ring_emit(engine,
+					MI_LOAD_REGISTER_IMM(num_rings));
+			for_each_engine(signaller, to_i915(engine->dev)) {
+				if (signaller == engine)
 					continue;
 
-				intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
-				intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				intel_ring_emit_reg(engine,
+						    RING_PSMI_CTL(signaller->mmio_base));
+				intel_ring_emit(engine,
+						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
 			}
 		}
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(engine, MI_SET_CONTEXT);
+	intel_ring_emit(engine,
+			i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
 			flags);
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
 	 */
-	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(engine, MI_NOOP);
 
-	if (INTEL_INFO(ring->dev)->gen >= 7) {
+	if (INTEL_INFO(engine->dev)->gen >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
+			i915_reg_t last_reg = {}; /* keep gcc quiet */
 
-			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_ring(signaller, to_i915(ring->dev), i) {
-				if (signaller == ring)
+			intel_ring_emit(engine,
+					MI_LOAD_REGISTER_IMM(num_rings));
+			for_each_engine(signaller, to_i915(engine->dev)) {
+				if (signaller == engine)
 					continue;
 
-				intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
-				intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				last_reg = RING_PSMI_CTL(signaller->mmio_base);
+				intel_ring_emit_reg(engine, last_reg);
+				intel_ring_emit(engine,
+						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
 			}
+
+			/* Insert a delay before the next switch! */
+			intel_ring_emit(engine,
+					MI_STORE_REGISTER_MEM |
+					MI_SRM_LRM_GLOBAL_GTT);
+			intel_ring_emit_reg(engine, last_reg);
+			intel_ring_emit(engine, engine->scratch.gtt_offset);
+			intel_ring_emit(engine, MI_NOOP);
 		}
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return ret;
 }
 
-static inline bool should_skip_switch(struct intel_engine_cs *ring,
-				      struct intel_context *from,
-				      struct intel_context *to)
+static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
+				   struct intel_context *to)
 {
 	if (to->remap_slice)
 		return false;
 
-	if (to->ppgtt && from == to &&
-	    !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
+	if (!to->legacy_hw_ctx.initialized)
+		return false;
+
+	if (to->ppgtt &&
+	    !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+		return false;
+
+	return to == engine->last_context;
+}
+
+static bool
+needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
+{
+	if (!to->ppgtt)
+		return false;
+
+	if (engine->last_context == to &&
+	    !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+		return false;
+
+	if (engine->id != RCS)
+		return true;
+
+	if (INTEL_INFO(engine->dev)->gen < 8)
 		return true;
 
 	return false;
 }
 
 static bool
-needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
+needs_pd_load_post(struct intel_context *to, u32 hw_flags)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 	if (!to->ppgtt)
 		return false;
 
-	if (INTEL_INFO(ring->dev)->gen < 8)
-		return true;
-
-	if (ring != &dev_priv->ring[RCS])
-		return true;
-
-	return false;
-}
-
-static bool
-needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
-		u32 hw_flags)
-{
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
-	if (!to->ppgtt)
-		return false;
-
-	if (!IS_GEN8(ring->dev))
-		return false;
-
-	if (ring != &dev_priv->ring[RCS])
+	if (!IS_GEN8(to->i915))
 		return false;
 
 	if (hw_flags & MI_RESTORE_INHIBIT)
@@ -645,58 +659,32 @@
 	return false;
 }
 
-static int do_switch(struct drm_i915_gem_request *req)
+static int do_rcs_switch(struct drm_i915_gem_request *req)
 {
 	struct intel_context *to = req->ctx;
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-	struct intel_context *from = ring->last_context;
-	u32 hw_flags = 0;
-	bool uninitialized = false;
+	struct intel_engine_cs *engine = req->engine;
+	struct intel_context *from;
+	u32 hw_flags;
 	int ret, i;
 
-	if (from != NULL && ring == &dev_priv->ring[RCS]) {
-		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
-		BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
-	}
-
-	if (should_skip_switch(ring, from, to))
+	if (skip_rcs_switch(engine, to))
 		return 0;
 
 	/* Trying to pin first makes error handling easier. */
-	if (ring == &dev_priv->ring[RCS]) {
-		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
-					    get_context_alignment(ring->dev), 0);
-		if (ret)
-			return ret;
-	}
+	ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
+				    get_context_alignment(engine->dev),
+				    0);
+	if (ret)
+		return ret;
 
 	/*
 	 * Pin can switch back to the default context if we end up calling into
 	 * evict_everything - as a last ditch gtt defrag effort that also
 	 * switches to the default context. Hence we need to reload from here.
+	 *
+	 * XXX: Doing so is painfully broken!
 	 */
-	from = ring->last_context;
-
-	if (needs_pd_load_pre(ring, to)) {
-		/* Older GENs and non render rings still want the load first,
-		 * "PP_DCLV followed by PP_DIR_BASE register through Load
-		 * Register Immediate commands in Ring Buffer before submitting
-		 * a context."*/
-		trace_switch_mm(ring, to);
-		ret = to->ppgtt->switch_mm(to->ppgtt, req);
-		if (ret)
-			goto unpin_out;
-
-		/* Doing a PD load always reloads the page dirs */
-		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
-	}
-
-	if (ring != &dev_priv->ring[RCS]) {
-		if (from)
-			i915_gem_context_unreference(from);
-		goto done;
-	}
+	from = engine->last_context;
 
 	/*
 	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
@@ -710,53 +698,37 @@
 	if (ret)
 		goto unpin_out;
 
-	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
-		hw_flags |= MI_RESTORE_INHIBIT;
+	if (needs_pd_load_pre(engine, to)) {
+		/* Older GENs and non render rings still want the load first,
+		 * "PP_DCLV followed by PP_DIR_BASE register through Load
+		 * Register Immediate commands in Ring Buffer before submitting
+		 * a context."*/
+		trace_switch_mm(engine, to);
+		ret = to->ppgtt->switch_mm(to->ppgtt, req);
+		if (ret)
+			goto unpin_out;
+	}
+
+	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
 		/* NB: If we inhibit the restore, the context is not allowed to
 		 * die because future work may end up depending on valid address
 		 * space. This means we must enforce that a page table load
 		 * occur when this occurs. */
-	} else if (to->ppgtt &&
-		   (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
-		hw_flags |= MI_FORCE_RESTORE;
-		to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
-	}
+		hw_flags = MI_RESTORE_INHIBIT;
+	else if (to->ppgtt &&
+		 intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
+		hw_flags = MI_FORCE_RESTORE;
+	else
+		hw_flags = 0;
 
 	/* We should never emit switch_mm more than once */
-	WARN_ON(needs_pd_load_pre(ring, to) &&
-		needs_pd_load_post(ring, to, hw_flags));
+	WARN_ON(needs_pd_load_pre(engine, to) &&
+		needs_pd_load_post(to, hw_flags));
 
-	ret = mi_set_context(req, hw_flags);
-	if (ret)
-		goto unpin_out;
-
-	/* GEN8 does *not* require an explicit reload if the PDPs have been
-	 * setup, and we do not wish to move them.
-	 */
-	if (needs_pd_load_post(ring, to, hw_flags)) {
-		trace_switch_mm(ring, to);
-		ret = to->ppgtt->switch_mm(to->ppgtt, req);
-		/* The hardware context switch is emitted, but we haven't
-		 * actually changed the state - so it's probably safe to bail
-		 * here. Still, let the user know something dangerous has
-		 * happened.
-		 */
-		if (ret) {
-			DRM_ERROR("Failed to change address space on context switch\n");
-			goto unpin_out;
-		}
-	}
-
-	for (i = 0; i < MAX_L3_SLICES; i++) {
-		if (!(to->remap_slice & (1<<i)))
-			continue;
-
-		ret = i915_gem_l3_remap(req, i);
-		/* If it failed, try again next round */
+	if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
+		ret = mi_set_context(req, hw_flags);
 		if (ret)
-			DRM_DEBUG_DRIVER("L3 remapping failed\n");
-		else
-			to->remap_slice &= ~(1<<i);
+			goto unpin_out;
 	}
 
 	/* The backing object for the context is done after switching to the
@@ -781,27 +753,51 @@
 		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
 		i915_gem_context_unreference(from);
 	}
-
-	uninitialized = !to->legacy_hw_ctx.initialized;
-	to->legacy_hw_ctx.initialized = true;
-
-done:
 	i915_gem_context_reference(to);
-	ring->last_context = to;
+	engine->last_context = to;
 
-	if (uninitialized) {
-		if (ring->init_context) {
-			ret = ring->init_context(req);
+	/* GEN8 does *not* require an explicit reload if the PDPs have been
+	 * setup, and we do not wish to move them.
+	 */
+	if (needs_pd_load_post(to, hw_flags)) {
+		trace_switch_mm(engine, to);
+		ret = to->ppgtt->switch_mm(to->ppgtt, req);
+		/* The hardware context switch is emitted, but we haven't
+		 * actually changed the state - so it's probably safe to bail
+		 * here. Still, let the user know something dangerous has
+		 * happened.
+		 */
+		if (ret)
+			return ret;
+	}
+
+	if (to->ppgtt)
+		to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+
+	for (i = 0; i < MAX_L3_SLICES; i++) {
+		if (!(to->remap_slice & (1<<i)))
+			continue;
+
+		ret = i915_gem_l3_remap(req, i);
+		if (ret)
+			return ret;
+
+		to->remap_slice &= ~(1<<i);
+	}
+
+	if (!to->legacy_hw_ctx.initialized) {
+		if (engine->init_context) {
+			ret = engine->init_context(req);
 			if (ret)
-				DRM_ERROR("ring init context: %d\n", ret);
+				return ret;
 		}
+		to->legacy_hw_ctx.initialized = true;
 	}
 
 	return 0;
 
 unpin_out:
-	if (ring->id == RCS)
-		i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
+	i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
 	return ret;
 }
 
@@ -820,23 +816,39 @@
  */
 int i915_switch_context(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct intel_engine_cs *engine = req->engine;
+	struct drm_i915_private *dev_priv = req->i915;
 
 	WARN_ON(i915.enable_execlists);
 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-	if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
-		if (req->ctx != ring->last_context) {
-			i915_gem_context_reference(req->ctx);
-			if (ring->last_context)
-				i915_gem_context_unreference(ring->last_context);
-			ring->last_context = req->ctx;
+	if (engine->id != RCS ||
+	    req->ctx->legacy_hw_ctx.rcs_state == NULL) {
+		struct intel_context *to = req->ctx;
+
+		if (needs_pd_load_pre(engine, to)) {
+			int ret;
+
+			trace_switch_mm(engine, to);
+			ret = to->ppgtt->switch_mm(to->ppgtt, req);
+			if (ret)
+				return ret;
+
+			/* Doing a PD load always reloads the page dirs */
+			to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
 		}
+
+		if (to != engine->last_context) {
+			i915_gem_context_reference(to);
+			if (engine->last_context)
+				i915_gem_context_unreference(engine->last_context);
+			engine->last_context = to;
+		}
+
 		return 0;
 	}
 
-	return do_switch(req);
+	return do_rcs_switch(req);
 }
 
 static bool contexts_enabled(struct drm_device *dev)
@@ -937,7 +949,7 @@
 		else if (to_i915(dev)->mm.aliasing_ppgtt)
 			args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
 		else
-			args->value = to_i915(dev)->gtt.base.total;
+			args->value = to_i915(dev)->ggtt.base.total;
 		break;
 	default:
 		ret = -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 17299d0..a565164 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -36,29 +36,29 @@
 	static int warned;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	int err = 0;
-	int i;
 
 	if (warned)
 		return 0;
 
-	for_each_ring(ring, dev_priv, i) {
-		list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) {
+	for_each_engine(engine, dev_priv) {
+		list_for_each_entry(obj, &engine->active_list,
+				    engine_list[engine->id]) {
 			if (obj->base.dev != dev ||
 			    !atomic_read(&obj->base.refcount.refcount)) {
 				DRM_ERROR("%s: freed active obj %p\n",
-					  ring->name, obj);
+					  engine->name, obj);
 				err++;
 				break;
 			} else if (!obj->active ||
-				   obj->last_read_req[ring->id] == NULL) {
+				   obj->last_read_req[engine->id] == NULL) {
 				DRM_ERROR("%s: invalid active obj %p\n",
-					  ring->name, obj);
+					  engine->name, obj);
 				err++;
 			} else if (obj->base.write_domain) {
 				DRM_ERROR("%s: invalid write obj %p (w %x)\n",
-					  ring->name,
+					  engine->name,
 					  obj, obj->base.write_domain);
 				err++;
 			}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 0506016..80bbe43 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -95,14 +95,12 @@
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
 
-	mutex_lock(&obj->base.dev->struct_mutex);
-
 	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
 	sg_free_table(sg);
 	kfree(sg);
 
+	mutex_lock(&obj->base.dev->struct_mutex);
 	i915_gem_object_unpin_pages(obj);
-
 	mutex_unlock(&obj->base.dev->struct_mutex);
 }
 
@@ -110,51 +108,17 @@
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 	struct drm_device *dev = obj->base.dev;
-	struct sg_page_iter sg_iter;
-	struct page **pages;
-	int ret, i;
+	void *addr;
+	int ret;
 
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		return ERR_PTR(ret);
 
-	if (obj->dma_buf_vmapping) {
-		obj->vmapping_count++;
-		goto out_unlock;
-	}
-
-	ret = i915_gem_object_get_pages(obj);
-	if (ret)
-		goto err;
-
-	i915_gem_object_pin_pages(obj);
-
-	ret = -ENOMEM;
-
-	pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
-	if (pages == NULL)
-		goto err_unpin;
-
-	i = 0;
-	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
-		pages[i++] = sg_page_iter_page(&sg_iter);
-
-	obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
-	drm_free_large(pages);
-
-	if (!obj->dma_buf_vmapping)
-		goto err_unpin;
-
-	obj->vmapping_count = 1;
-out_unlock:
+	addr = i915_gem_object_pin_map(obj);
 	mutex_unlock(&dev->struct_mutex);
-	return obj->dma_buf_vmapping;
 
-err_unpin:
-	i915_gem_object_unpin_pages(obj);
-err:
-	mutex_unlock(&dev->struct_mutex);
-	return ERR_PTR(ret);
+	return addr;
 }
 
 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -163,12 +127,7 @@
 	struct drm_device *dev = obj->base.dev;
 
 	mutex_lock(&dev->struct_mutex);
-	if (--obj->vmapping_count == 0) {
-		vunmap(obj->dma_buf_vmapping);
-		obj->dma_buf_vmapping = NULL;
-
-		i915_gem_object_unpin_pages(obj);
-	}
+	i915_gem_object_unpin_map(obj);
 	mutex_unlock(&dev->struct_mutex);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d4d7c88..33df74d 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -313,7 +313,8 @@
 		   uint64_t target_offset)
 {
 	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	uint64_t delta = relocation_target(reloc, target_offset);
 	uint64_t offset;
 	void __iomem *reloc_page;
@@ -330,7 +331,7 @@
 	/* Map the page containing the relocation we're going to perform.  */
 	offset = i915_gem_obj_ggtt_offset(obj);
 	offset += reloc->offset;
-	reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+	reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
 					      offset & PAGE_MASK);
 	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
 
@@ -340,7 +341,7 @@
 		if (offset_in_page(offset) == 0) {
 			io_mapping_unmap_atomic(reloc_page);
 			reloc_page =
-				io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+				io_mapping_map_atomic_wc(ggtt->mappable,
 							 offset);
 		}
 
@@ -597,7 +598,7 @@
 
 static int
 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
-				struct intel_engine_cs *ring,
+				struct intel_engine_cs *engine,
 				bool *need_reloc)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
@@ -711,7 +712,7 @@
 }
 
 static int
-i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
+i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
 			    struct list_head *vmas,
 			    struct intel_context *ctx,
 			    bool *need_relocs)
@@ -721,10 +722,10 @@
 	struct i915_address_space *vm;
 	struct list_head ordered_vmas;
 	struct list_head pinned_vmas;
-	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+	bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
 	int retry;
 
-	i915_gem_retire_requests_ring(ring);
+	i915_gem_retire_requests_ring(engine);
 
 	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 
@@ -786,7 +787,9 @@
 			if (eb_vma_misplaced(vma))
 				ret = i915_vma_unbind(vma);
 			else
-				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
+				ret = i915_gem_execbuffer_reserve_vma(vma,
+								      engine,
+								      need_relocs);
 			if (ret)
 				goto err;
 		}
@@ -796,7 +799,8 @@
 			if (drm_mm_node_allocated(&vma->node))
 				continue;
 
-			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
+			ret = i915_gem_execbuffer_reserve_vma(vma, engine,
+							      need_relocs);
 			if (ret)
 				goto err;
 		}
@@ -819,7 +823,7 @@
 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 				  struct drm_i915_gem_execbuffer2 *args,
 				  struct drm_file *file,
-				  struct intel_engine_cs *ring,
+				  struct intel_engine_cs *engine,
 				  struct eb_vmas *eb,
 				  struct drm_i915_gem_exec_object2 *exec,
 				  struct intel_context *ctx)
@@ -908,7 +912,8 @@
 		goto err;
 
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
+					  &need_relocs);
 	if (ret)
 		goto err;
 
@@ -936,7 +941,7 @@
 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 				struct list_head *vmas)
 {
-	const unsigned other_rings = ~intel_ring_flag(req->ring);
+	const unsigned other_rings = ~intel_engine_flag(req->engine);
 	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
@@ -946,7 +951,7 @@
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, req->ring, &req);
+			ret = i915_gem_object_sync(obj, req->engine, &req);
 			if (ret)
 				return ret;
 		}
@@ -958,7 +963,7 @@
 	}
 
 	if (flush_chipset)
-		i915_gem_chipset_flush(req->ring->dev);
+		i915_gem_chipset_flush(req->engine->dev);
 
 	if (flush_domains & I915_GEM_DOMAIN_GTT)
 		wmb();
@@ -1060,12 +1065,12 @@
 
 static struct intel_context *
 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
-			  struct intel_engine_cs *ring, const u32 ctx_id)
+			  struct intel_engine_cs *engine, const u32 ctx_id)
 {
 	struct intel_context *ctx = NULL;
 	struct i915_ctx_hang_stats *hs;
 
-	if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
+	if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
 		return ERR_PTR(-EINVAL);
 
 	ctx = i915_gem_context_get(file->driver_priv, ctx_id);
@@ -1078,8 +1083,8 @@
 		return ERR_PTR(-EIO);
 	}
 
-	if (i915.enable_execlists && !ctx->engine[ring->id].state) {
-		int ret = intel_lr_context_deferred_alloc(ctx, ring);
+	if (i915.enable_execlists && !ctx->engine[engine->id].state) {
+		int ret = intel_lr_context_deferred_alloc(ctx, engine);
 		if (ret) {
 			DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
 			return ERR_PTR(ret);
@@ -1093,7 +1098,7 @@
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
+	struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, vmas, exec_list) {
@@ -1120,7 +1125,7 @@
 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
 			i915_gem_request_assign(&obj->last_fenced_req, req);
 			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-				struct drm_i915_private *dev_priv = to_i915(ring->dev);
+				struct drm_i915_private *dev_priv = to_i915(engine->dev);
 				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
 					       &dev_priv->mm.fence_list);
 			}
@@ -1130,11 +1135,11 @@
 	}
 }
 
-void
+static void
 i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 {
 	/* Unconditionally force add_request to emit a full flush. */
-	params->ring->gpu_caches_dirty = true;
+	params->engine->gpu_caches_dirty = true;
 
 	/* Add a breadcrumb for the completion of the batch buffer */
 	__i915_add_request(params->request, params->batch_obj, true);
@@ -1144,11 +1149,11 @@
 i915_reset_gen7_sol_offsets(struct drm_device *dev,
 			    struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret, i;
 
-	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
+	if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
@@ -1158,18 +1163,18 @@
 		return ret;
 
 	for (i = 0; i < 4; i++) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
-		intel_ring_emit(ring, 0);
+		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
+		intel_ring_emit(engine, 0);
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return 0;
 }
 
 static struct drm_i915_gem_object*
-i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
+i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
 			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
 			  struct eb_vmas *eb,
 			  struct drm_i915_gem_object *batch_obj,
@@ -1181,12 +1186,12 @@
 	struct i915_vma *vma;
 	int ret;
 
-	shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
+	shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
 						   PAGE_ALIGN(batch_len));
 	if (IS_ERR(shadow_batch_obj))
 		return shadow_batch_obj;
 
-	ret = i915_parse_cmds(ring,
+	ret = i915_parse_cmds(engine,
 			      batch_obj,
 			      shadow_batch_obj,
 			      batch_start_offset,
@@ -1227,7 +1232,7 @@
 			       struct list_head *vmas)
 {
 	struct drm_device *dev = params->dev;
-	struct intel_engine_cs *ring = params->ring;
+	struct intel_engine_cs *engine = params->engine;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u64 exec_start, exec_len;
 	int instp_mode;
@@ -1242,8 +1247,8 @@
 	if (ret)
 		return ret;
 
-	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
-	     "%s didn't clear reload\n", ring->name);
+	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
+	     "%s didn't clear reload\n", engine->name);
 
 	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
 	instp_mask = I915_EXEC_CONSTANTS_MASK;
@@ -1251,7 +1256,7 @@
 	case I915_EXEC_CONSTANTS_REL_GENERAL:
 	case I915_EXEC_CONSTANTS_ABSOLUTE:
 	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+		if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
 			return -EINVAL;
 		}
@@ -1278,17 +1283,17 @@
 		return -EINVAL;
 	}
 
-	if (ring == &dev_priv->ring[RCS] &&
+	if (engine == &dev_priv->engine[RCS] &&
 	    instp_mode != dev_priv->relative_constants_mode) {
 		ret = intel_ring_begin(params->request, 4);
 		if (ret)
 			return ret;
 
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, INSTPM);
-		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
-		intel_ring_advance(ring);
+		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(engine, INSTPM);
+		intel_ring_emit(engine, instp_mask << 16 | instp_mode);
+		intel_ring_advance(engine);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
@@ -1306,7 +1311,7 @@
 	if (exec_len == 0)
 		exec_len = params->batch_obj->base.size;
 
-	ret = ring->dispatch_execbuffer(params->request,
+	ret = engine->dispatch_execbuffer(params->request,
 					exec_start, exec_len,
 					params->dispatch_flags);
 	if (ret)
@@ -1315,7 +1320,6 @@
 	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
 	i915_gem_execbuffer_move_to_active(vmas, params->request);
-	i915_gem_execbuffer_retire_commands(params);
 
 	return 0;
 }
@@ -1363,7 +1367,7 @@
 
 #define I915_USER_RINGS (4)
 
-static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
+static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
 	[I915_EXEC_DEFAULT]	= RCS,
 	[I915_EXEC_RENDER]	= RCS,
 	[I915_EXEC_BLT]		= BCS,
@@ -1406,12 +1410,12 @@
 			return -EINVAL;
 		}
 
-		*ring = &dev_priv->ring[_VCS(bsd_idx)];
+		*ring = &dev_priv->engine[_VCS(bsd_idx)];
 	} else {
-		*ring = &dev_priv->ring[user_ring_map[user_ring_id]];
+		*ring = &dev_priv->engine[user_ring_map[user_ring_id]];
 	}
 
-	if (!intel_ring_initialized(*ring)) {
+	if (!intel_engine_initialized(*ring)) {
 		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
 		return -EINVAL;
 	}
@@ -1425,12 +1429,13 @@
 		       struct drm_i915_gem_execbuffer2 *args,
 		       struct drm_i915_gem_exec_object2 *exec)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct drm_i915_gem_request *req = NULL;
 	struct eb_vmas *eb;
 	struct drm_i915_gem_object *batch_obj;
 	struct drm_i915_gem_exec_object2 shadow_exec_entry;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct intel_context *ctx;
 	struct i915_address_space *vm;
 	struct i915_execbuffer_params params_master; /* XXX: will be removed later */
@@ -1457,7 +1462,7 @@
 	if (args->flags & I915_EXEC_IS_PINNED)
 		dispatch_flags |= I915_DISPATCH_PINNED;
 
-	ret = eb_select_ring(dev_priv, file, args, &ring);
+	ret = eb_select_ring(dev_priv, file, args, &engine);
 	if (ret)
 		return ret;
 
@@ -1471,9 +1476,9 @@
 			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
 			return -EINVAL;
 		}
-		if (ring->id != RCS) {
+		if (engine->id != RCS) {
 			DRM_DEBUG("RS is not available on %s\n",
-				 ring->name);
+				 engine->name);
 			return -EINVAL;
 		}
 
@@ -1486,7 +1491,7 @@
 	if (ret)
 		goto pre_mutex_err;
 
-	ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
+	ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
 	if (IS_ERR(ctx)) {
 		mutex_unlock(&dev->struct_mutex);
 		ret = PTR_ERR(ctx);
@@ -1498,7 +1503,7 @@
 	if (ctx->ppgtt)
 		vm = &ctx->ppgtt->base;
 	else
-		vm = &dev_priv->gtt.base;
+		vm = &ggtt->base;
 
 	memset(&params_master, 0x00, sizeof(params_master));
 
@@ -1520,7 +1525,8 @@
 
 	/* Move the objects en-masse into the GTT, evicting if necessary. */
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
+					  &need_relocs);
 	if (ret)
 		goto err;
 
@@ -1529,7 +1535,8 @@
 		ret = i915_gem_execbuffer_relocate(eb);
 	if (ret) {
 		if (ret == -EFAULT) {
-			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
+			ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
+								engine,
 								eb, exec, ctx);
 			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 		}
@@ -1545,16 +1552,16 @@
 	}
 
 	params->args_batch_start_offset = args->batch_start_offset;
-	if (i915_needs_cmd_parser(ring) && args->batch_len) {
+	if (i915_needs_cmd_parser(engine) && args->batch_len) {
 		struct drm_i915_gem_object *parsed_batch_obj;
 
-		parsed_batch_obj = i915_gem_execbuffer_parse(ring,
-						      &shadow_exec_entry,
-						      eb,
-						      batch_obj,
-						      args->batch_start_offset,
-						      args->batch_len,
-						      file->is_master);
+		parsed_batch_obj = i915_gem_execbuffer_parse(engine,
+							     &shadow_exec_entry,
+							     eb,
+							     batch_obj,
+							     args->batch_start_offset,
+							     args->batch_len,
+							     file->is_master);
 		if (IS_ERR(parsed_batch_obj)) {
 			ret = PTR_ERR(parsed_batch_obj);
 			goto err;
@@ -1606,7 +1613,7 @@
 		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
 	/* Allocate a request for this batch buffer nice and early. */
-	req = i915_gem_request_alloc(ring, ctx);
+	req = i915_gem_request_alloc(engine, ctx);
 	if (IS_ERR(req)) {
 		ret = PTR_ERR(req);
 		goto err_batch_unpin;
@@ -1614,7 +1621,7 @@
 
 	ret = i915_gem_request_add_to_client(req, file);
 	if (ret)
-		goto err_batch_unpin;
+		goto err_request;
 
 	/*
 	 * Save assorted stuff away to pass through to *_submission().
@@ -1624,13 +1631,15 @@
 	 */
 	params->dev                     = dev;
 	params->file                    = file;
-	params->ring                    = ring;
+	params->engine                    = engine;
 	params->dispatch_flags          = dispatch_flags;
 	params->batch_obj               = batch_obj;
 	params->ctx                     = ctx;
 	params->request                 = req;
 
 	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+err_request:
+	i915_gem_execbuffer_retire_commands(params);
 
 err_batch_unpin:
 	/*
@@ -1647,14 +1656,6 @@
 	i915_gem_context_unreference(ctx);
 	eb_destroy(eb);
 
-	/*
-	 * If the request was created but not successfully submitted then it
-	 * must be freed again. If it was submitted then it is being tracked
-	 * on the active request list and no clean up is required here.
-	 */
-	if (ret && !IS_ERR_OR_NULL(req))
-		i915_gem_request_cancel(req);
-
 	mutex_unlock(&dev->struct_mutex);
 
 pre_mutex_err:
@@ -1773,11 +1774,9 @@
 		return -EINVAL;
 	}
 
-	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
-			     GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
-	if (exec2_list == NULL)
-		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
-					   args->buffer_count);
+	exec2_list = drm_malloc_gfp(args->buffer_count,
+				    sizeof(*exec2_list),
+				    GFP_TEMPORARY);
 	if (exec2_list == NULL) {
 		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
 			  args->buffer_count);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 49e4f26..92acdff 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -658,7 +658,7 @@
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
 	BUG_ON(entry >= 4);
@@ -667,13 +667,13 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
-	intel_ring_emit(ring, upper_32_bits(addr));
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
-	intel_ring_emit(ring, lower_32_bits(addr));
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry));
+	intel_ring_emit(engine, upper_32_bits(addr));
+	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry));
+	intel_ring_emit(engine, lower_32_bits(addr));
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -706,8 +706,7 @@
 				       uint64_t length,
 				       gen8_pte_t scratch_pte)
 {
-	struct i915_hw_ppgtt *ppgtt =
-		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 	gen8_pte_t *pt_vaddr;
 	unsigned pdpe = gen8_pdpe_index(start);
 	unsigned pde = gen8_pde_index(start);
@@ -746,7 +745,7 @@
 			num_entries--;
 		}
 
-		kunmap_px(ppgtt, pt);
+		kunmap_px(ppgtt, pt_vaddr);
 
 		pte = 0;
 		if (++pde == I915_PDES) {
@@ -762,8 +761,7 @@
 				   uint64_t length,
 				   bool use_scratch)
 {
-	struct i915_hw_ppgtt *ppgtt =
-		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 	gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
 						 I915_CACHE_LLC, use_scratch);
 
@@ -788,8 +786,7 @@
 			      uint64_t start,
 			      enum i915_cache_level cache_level)
 {
-	struct i915_hw_ppgtt *ppgtt =
-		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 	gen8_pte_t *pt_vaddr;
 	unsigned pdpe = gen8_pdpe_index(start);
 	unsigned pde = gen8_pde_index(start);
@@ -829,8 +826,7 @@
 				      enum i915_cache_level cache_level,
 				      u32 unused)
 {
-	struct i915_hw_ppgtt *ppgtt =
-		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 	struct sg_page_iter sg_iter;
 
 	__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
@@ -909,11 +905,10 @@
 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
 {
 	enum vgt_g2v_type msg;
-	struct drm_device *dev = ppgtt->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
 	int i;
 
-	if (USES_FULL_48BIT_PPGTT(dev)) {
+	if (USES_FULL_48BIT_PPGTT(dev_priv)) {
 		u64 daddr = px_dma(&ppgtt->pml4);
 
 		I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
@@ -981,8 +976,7 @@
 
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 {
-	struct i915_hw_ppgtt *ppgtt =
-		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 
 	if (intel_vgpu_active(vm->dev))
 		gen8_ppgtt_notify_vgt(ppgtt, false);
@@ -1216,8 +1210,7 @@
 				    uint64_t start,
 				    uint64_t length)
 {
-	struct i915_hw_ppgtt *ppgtt =
-		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 	unsigned long *new_page_dirs, *new_page_tables;
 	struct drm_device *dev = vm->dev;
 	struct i915_page_directory *pd;
@@ -1329,8 +1322,7 @@
 				    uint64_t length)
 {
 	DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
-	struct i915_hw_ppgtt *ppgtt =
-			container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 	struct i915_page_directory_pointer *pdp;
 	uint64_t pml4e;
 	int ret = 0;
@@ -1376,8 +1368,7 @@
 static int gen8_alloc_va_range(struct i915_address_space *vm,
 			       uint64_t start, uint64_t length)
 {
-	struct i915_hw_ppgtt *ppgtt =
-		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 
 	if (USES_FULL_48BIT_PPGTT(vm->dev))
 		return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
@@ -1629,6 +1620,7 @@
 				  struct i915_page_directory *pd,
 				  uint32_t start, uint32_t length)
 {
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct i915_page_table *pt;
 	uint32_t pde, temp;
 
@@ -1637,7 +1629,7 @@
 
 	/* Make sure write is complete before other code can use this page
 	 * table. Also require for WC mapped PTEs */
-	readl(dev_priv->gtt.gsm);
+	readl(ggtt->gsm);
 }
 
 static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
@@ -1650,11 +1642,11 @@
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1662,13 +1654,13 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
+	intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
+	intel_ring_emit(engine, PP_DIR_DCLV_2G);
+	intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
+	intel_ring_emit(engine, get_pd_offset(ppgtt));
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -1676,22 +1668,22 @@
 static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
 
-	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+	I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+	I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
 	return 0;
 }
 
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1699,17 +1691,17 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
+	intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
+	intel_ring_emit(engine, PP_DIR_DCLV_2G);
+	intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
+	intel_ring_emit(engine, get_pd_offset(ppgtt));
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
-	if (ring->id != RCS) {
-		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	if (engine->id != RCS) {
+		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 		if (ret)
 			return ret;
 	}
@@ -1720,15 +1712,15 @@
 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	struct drm_device *dev = ppgtt->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
-	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+	I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+	I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
 
-	POSTING_READ(RING_PP_DIR_DCLV(ring));
+	POSTING_READ(RING_PP_DIR_DCLV(engine));
 
 	return 0;
 }
@@ -1736,12 +1728,11 @@
 static void gen8_ppgtt_enable(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	int j;
+	struct intel_engine_cs *engine;
 
-	for_each_ring(ring, dev_priv, j) {
+	for_each_engine(engine, dev_priv) {
 		u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
-		I915_WRITE(RING_MODE_GEN7(ring),
+		I915_WRITE(RING_MODE_GEN7(engine),
 			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
 	}
 }
@@ -1749,9 +1740,8 @@
 static void gen7_ppgtt_enable(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	uint32_t ecochk, ecobits;
-	int i;
 
 	ecobits = I915_READ(GAC_ECO_BITS);
 	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
@@ -1765,9 +1755,9 @@
 	}
 	I915_WRITE(GAM_ECOCHK, ecochk);
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_engine(engine, dev_priv) {
 		/* GFX_MODE is per-ring on gen7+ */
-		I915_WRITE(RING_MODE_GEN7(ring),
+		I915_WRITE(RING_MODE_GEN7(engine),
 			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 	}
 }
@@ -1796,8 +1786,7 @@
 				   uint64_t length,
 				   bool use_scratch)
 {
-	struct i915_hw_ppgtt *ppgtt =
-		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 	gen6_pte_t *pt_vaddr, scratch_pte;
 	unsigned first_entry = start >> PAGE_SHIFT;
 	unsigned num_entries = length >> PAGE_SHIFT;
@@ -1831,8 +1820,7 @@
 				      uint64_t start,
 				      enum i915_cache_level cache_level, u32 flags)
 {
-	struct i915_hw_ppgtt *ppgtt =
-		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 	gen6_pte_t *pt_vaddr;
 	unsigned first_entry = start >> PAGE_SHIFT;
 	unsigned act_pt = first_entry / GEN6_PTES;
@@ -1864,9 +1852,9 @@
 {
 	DECLARE_BITMAP(new_page_tables, I915_PDES);
 	struct drm_device *dev = vm->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct i915_hw_ppgtt *ppgtt =
-				container_of(vm, struct i915_hw_ppgtt, base);
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 	struct i915_page_table *pt;
 	uint32_t start, length, start_save, length_save;
 	uint32_t pde, temp;
@@ -1932,7 +1920,7 @@
 
 	/* Make sure write is complete before other code can use this page
 	 * table. Also require for WC mapped PTEs */
-	readl(dev_priv->gtt.gsm);
+	readl(ggtt->gsm);
 
 	mark_tlbs_dirty(ppgtt);
 	return 0;
@@ -1978,8 +1966,7 @@
 
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 {
-	struct i915_hw_ppgtt *ppgtt =
-		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 	struct i915_page_table *pt;
 	uint32_t pde;
 
@@ -1997,7 +1984,8 @@
 {
 	struct i915_address_space *vm = &ppgtt->base;
 	struct drm_device *dev = ppgtt->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	bool retried = false;
 	int ret;
 
@@ -2005,23 +1993,23 @@
 	 * allocator works in address space sizes, so it's multiplied by page
 	 * size. We allocate at the top of the GTT to avoid fragmentation.
 	 */
-	BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+	BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
 
 	ret = gen6_init_scratch(vm);
 	if (ret)
 		return ret;
 
 alloc:
-	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
+	ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
 						  &ppgtt->node, GEN6_PD_SIZE,
 						  GEN6_PD_ALIGN, 0,
-						  0, dev_priv->gtt.base.total,
+						  0, ggtt->base.total,
 						  DRM_MM_TOPDOWN);
 	if (ret == -ENOSPC && !retried) {
-		ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
+		ret = i915_gem_evict_something(dev, &ggtt->base,
 					       GEN6_PD_SIZE, GEN6_PD_ALIGN,
 					       I915_CACHE_NONE,
-					       0, dev_priv->gtt.base.total,
+					       0, ggtt->base.total,
 					       0);
 		if (ret)
 			goto err_out;
@@ -2034,7 +2022,7 @@
 		goto err_out;
 
 
-	if (ppgtt->node.start < dev_priv->gtt.mappable_end)
+	if (ppgtt->node.start < ggtt->mappable_end)
 		DRM_DEBUG("Forced to use aperture for PDEs\n");
 
 	return 0;
@@ -2062,10 +2050,11 @@
 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
 	struct drm_device *dev = ppgtt->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	int ret;
 
-	ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
+	ppgtt->base.pte_encode = ggtt->base.pte_encode;
 	if (IS_GEN6(dev)) {
 		ppgtt->switch_mm = gen6_mm_switch;
 	} else if (IS_HASWELL(dev)) {
@@ -2095,7 +2084,7 @@
 	ppgtt->pd.base.ggtt_offset =
 		ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
 
-	ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
+	ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
 		ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
 
 	gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
@@ -2192,7 +2181,7 @@
 
 int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
 {
-	struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = req->i915;
 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 
 	if (i915.enable_execlists)
@@ -2263,9 +2252,10 @@
 
 static bool do_idling(struct drm_i915_private *dev_priv)
 {
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	bool ret = dev_priv->mm.interruptible;
 
-	if (unlikely(dev_priv->gtt.do_idle_maps)) {
+	if (unlikely(ggtt->do_idle_maps)) {
 		dev_priv->mm.interruptible = false;
 		if (i915_gpu_idle(dev_priv->dev)) {
 			DRM_ERROR("Couldn't idle GPU\n");
@@ -2279,22 +2269,23 @@
 
 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
 {
-	if (unlikely(dev_priv->gtt.do_idle_maps))
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+	if (unlikely(ggtt->do_idle_maps))
 		dev_priv->mm.interruptible = interruptible;
 }
 
 void i915_check_and_clear_faults(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	int i;
+	struct intel_engine_cs *engine;
 
 	if (INTEL_INFO(dev)->gen < 6)
 		return;
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_engine(engine, dev_priv) {
 		u32 fault_reg;
-		fault_reg = I915_READ(RING_FAULT_REG(ring));
+		fault_reg = I915_READ(RING_FAULT_REG(engine));
 		if (fault_reg & RING_FAULT_VALID) {
 			DRM_DEBUG_DRIVER("Unexpected fault\n"
 					 "\tAddr: 0x%08lx\n"
@@ -2305,16 +2296,16 @@
 					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
 					 RING_FAULT_SRCID(fault_reg),
 					 RING_FAULT_FAULT_TYPE(fault_reg));
-			I915_WRITE(RING_FAULT_REG(ring),
+			I915_WRITE(RING_FAULT_REG(engine),
 				   fault_reg & ~RING_FAULT_VALID);
 		}
 	}
-	POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
+	POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
 }
 
 static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
 {
-	if (INTEL_INFO(dev_priv->dev)->gen < 6) {
+	if (INTEL_INFO(dev_priv)->gen < 6) {
 		intel_gtt_chipset_flush();
 	} else {
 		I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
@@ -2324,7 +2315,8 @@
 
 void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
 	/* Don't bother messing with faults pre GEN6 as we have little
 	 * documentation supporting that it's a good idea.
@@ -2334,10 +2326,8 @@
 
 	i915_check_and_clear_faults(dev);
 
-	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-				       dev_priv->gtt.base.start,
-				       dev_priv->gtt.base.total,
-				       true);
+	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
+			     true);
 
 	i915_ggtt_flush(dev_priv);
 }
@@ -2367,10 +2357,11 @@
 				     uint64_t start,
 				     enum i915_cache_level level, u32 unused)
 {
-	struct drm_i915_private *dev_priv = vm->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(vm->dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	unsigned first_entry = start >> PAGE_SHIFT;
 	gen8_pte_t __iomem *gtt_entries =
-		(gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
 	int i = 0;
 	struct sg_page_iter sg_iter;
 	dma_addr_t addr = 0; /* shut up gcc */
@@ -2444,10 +2435,11 @@
 				     uint64_t start,
 				     enum i915_cache_level level, u32 flags)
 {
-	struct drm_i915_private *dev_priv = vm->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(vm->dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	unsigned first_entry = start >> PAGE_SHIFT;
 	gen6_pte_t __iomem *gtt_entries =
-		(gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
 	int i = 0;
 	struct sg_page_iter sg_iter;
 	dma_addr_t addr = 0;
@@ -2487,12 +2479,13 @@
 				  uint64_t length,
 				  bool use_scratch)
 {
-	struct drm_i915_private *dev_priv = vm->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(vm->dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	unsigned first_entry = start >> PAGE_SHIFT;
 	unsigned num_entries = length >> PAGE_SHIFT;
 	gen8_pte_t scratch_pte, __iomem *gtt_base =
-		(gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
-	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
+	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
 	int i;
 	int rpm_atomic_seq;
 
@@ -2518,12 +2511,13 @@
 				  uint64_t length,
 				  bool use_scratch)
 {
-	struct drm_i915_private *dev_priv = vm->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(vm->dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	unsigned first_entry = start >> PAGE_SHIFT;
 	unsigned num_entries = length >> PAGE_SHIFT;
 	gen6_pte_t scratch_pte, __iomem *gtt_base =
-		(gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
-	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
+	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
 	int i;
 	int rpm_atomic_seq;
 
@@ -2613,32 +2607,31 @@
 				 enum i915_cache_level cache_level,
 				 u32 flags)
 {
-	struct drm_device *dev = vma->vm->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj = vma->obj;
-	struct sg_table *pages = obj->pages;
-	u32 pte_flags = 0;
+	u32 pte_flags;
 	int ret;
 
 	ret = i915_get_ggtt_vma_pages(vma);
 	if (ret)
 		return ret;
-	pages = vma->ggtt_view.pages;
 
 	/* Currently applicable only to VLV */
-	if (obj->gt_ro)
+	pte_flags = 0;
+	if (vma->obj->gt_ro)
 		pte_flags |= PTE_READ_ONLY;
 
 
 	if (flags & GLOBAL_BIND) {
-		vma->vm->insert_entries(vma->vm, pages,
+		vma->vm->insert_entries(vma->vm,
+					vma->ggtt_view.pages,
 					vma->node.start,
 					cache_level, pte_flags);
 	}
 
 	if (flags & LOCAL_BIND) {
-		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
-		appgtt->base.insert_entries(&appgtt->base, pages,
+		struct i915_hw_ppgtt *appgtt =
+			to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+		appgtt->base.insert_entries(&appgtt->base,
+					    vma->ggtt_view.pages,
 					    vma->node.start,
 					    cache_level, pte_flags);
 	}
@@ -2717,8 +2710,8 @@
 	 * aperture.  One page should be enough to keep any prefetching inside
 	 * of the aperture.
 	 */
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct drm_mm_node *entry;
 	struct drm_i915_gem_object *obj;
 	unsigned long hole_start, hole_end;
@@ -2726,13 +2719,13 @@
 
 	BUG_ON(mappable_end > end);
 
-	ggtt_vm->start = start;
+	ggtt->base.start = start;
 
 	/* Subtract the guard page before address space initialization to
 	 * shrink the range used by drm_mm */
-	ggtt_vm->total = end - start - PAGE_SIZE;
-	i915_address_space_init(ggtt_vm, dev_priv);
-	ggtt_vm->total += PAGE_SIZE;
+	ggtt->base.total = end - start - PAGE_SIZE;
+	i915_address_space_init(&ggtt->base, dev_priv);
+	ggtt->base.total += PAGE_SIZE;
 
 	if (intel_vgpu_active(dev)) {
 		ret = intel_vgt_balloon(dev);
@@ -2741,36 +2734,36 @@
 	}
 
 	if (!HAS_LLC(dev))
-		ggtt_vm->mm.color_adjust = i915_gtt_color_adjust;
+		ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
 
 	/* Mark any preallocated objects as occupied */
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
+		struct i915_vma *vma = i915_gem_obj_to_vma(obj, &ggtt->base);
 
 		DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
 			      i915_gem_obj_ggtt_offset(obj), obj->base.size);
 
 		WARN_ON(i915_gem_obj_ggtt_bound(obj));
-		ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
+		ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
 		if (ret) {
 			DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
 			return ret;
 		}
 		vma->bound |= GLOBAL_BIND;
 		__i915_vma_set_map_and_fenceable(vma);
-		list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
+		list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
 	}
 
 	/* Clear any non-preallocated blocks */
-	drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
+	drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
 		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
 			      hole_start, hole_end);
-		ggtt_vm->clear_range(ggtt_vm, hole_start,
+		ggtt->base.clear_range(&ggtt->base, hole_start,
 				     hole_end - hole_start, true);
 	}
 
 	/* And finally clear the reserved guard page */
-	ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
+	ggtt->base.clear_range(&ggtt->base, end - PAGE_SIZE, PAGE_SIZE, true);
 
 	if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
 		struct i915_hw_ppgtt *ppgtt;
@@ -2801,28 +2794,33 @@
 					true);
 
 		dev_priv->mm.aliasing_ppgtt = ppgtt;
-		WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma);
-		dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma;
+		WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
+		ggtt->base.bind_vma = aliasing_gtt_bind_vma;
 	}
 
 	return 0;
 }
 
-void i915_gem_init_global_gtt(struct drm_device *dev)
+/**
+ * i915_gem_init_ggtt - Initialize GEM for Global GTT
+ * @dev: DRM device
+ */
+void i915_gem_init_ggtt(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u64 gtt_size, mappable_size;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
-	gtt_size = dev_priv->gtt.base.total;
-	mappable_size = dev_priv->gtt.mappable_end;
-
-	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+	i915_gem_setup_global_gtt(dev, 0, ggtt->mappable_end, ggtt->base.total);
 }
 
-void i915_global_gtt_cleanup(struct drm_device *dev)
+/**
+ * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
+ * @dev: DRM device
+ */
+void i915_ggtt_cleanup_hw(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct i915_address_space *vm = &dev_priv->gtt.base;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
 	if (dev_priv->mm.aliasing_ppgtt) {
 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
@@ -2832,15 +2830,15 @@
 
 	i915_gem_cleanup_stolen(dev);
 
-	if (drm_mm_initialized(&vm->mm)) {
+	if (drm_mm_initialized(&ggtt->base.mm)) {
 		if (intel_vgpu_active(dev))
 			intel_vgt_deballoon();
 
-		drm_mm_takedown(&vm->mm);
-		list_del(&vm->global_link);
+		drm_mm_takedown(&ggtt->base.mm);
+		list_del(&ggtt->base.global_link);
 	}
 
-	vm->cleanup(vm);
+	ggtt->base.cleanup(&ggtt->base);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2924,13 +2922,14 @@
 static int ggtt_probe_common(struct drm_device *dev,
 			     size_t gtt_size)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct i915_page_scratch *scratch_page;
-	phys_addr_t gtt_phys_addr;
+	phys_addr_t ggtt_phys_addr;
 
 	/* For Modern GENs the PTEs and register space are split in the BAR */
-	gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
-		(pci_resource_len(dev->pdev, 0) / 2);
+	ggtt_phys_addr = pci_resource_start(dev->pdev, 0) +
+			 (pci_resource_len(dev->pdev, 0) / 2);
 
 	/*
 	 * On BXT writes larger than 64 bit to the GTT pagetable range will be
@@ -2940,10 +2939,10 @@
 	 * readback check when writing GTT PTE entries.
 	 */
 	if (IS_BROXTON(dev))
-		dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
+		ggtt->gsm = ioremap_nocache(ggtt_phys_addr, gtt_size);
 	else
-		dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
-	if (!dev_priv->gtt.gsm) {
+		ggtt->gsm = ioremap_wc(ggtt_phys_addr, gtt_size);
+	if (!ggtt->gsm) {
 		DRM_ERROR("Failed to map the gtt page table\n");
 		return -ENOMEM;
 	}
@@ -2952,11 +2951,11 @@
 	if (IS_ERR(scratch_page)) {
 		DRM_ERROR("Scratch setup failed\n");
 		/* iounmap will also get called at remove, but meh */
-		iounmap(dev_priv->gtt.gsm);
+		iounmap(ggtt->gsm);
 		return PTR_ERR(scratch_page);
 	}
 
-	dev_priv->gtt.base.scratch_page = scratch_page;
+	ggtt->base.scratch_page = scratch_page;
 
 	return 0;
 }
@@ -2977,7 +2976,7 @@
 	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
 	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
 
-	if (!USES_PPGTT(dev_priv->dev))
+	if (!USES_PPGTT(dev_priv))
 		/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
 		 * so RTL will always use the value corresponding to
 		 * pat_sel = 000".
@@ -3034,20 +3033,16 @@
 	I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
 }
 
-static int gen8_gmch_probe(struct drm_device *dev,
-			   u64 *gtt_total,
-			   size_t *stolen,
-			   phys_addr_t *mappable_base,
-			   u64 *mappable_end)
+static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u64 gtt_size;
+	struct drm_device *dev = ggtt->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	u16 snb_gmch_ctl;
 	int ret;
 
 	/* TODO: We're not aware of mappable constraints on gen8 yet */
-	*mappable_base = pci_resource_start(dev->pdev, 2);
-	*mappable_end = pci_resource_len(dev->pdev, 2);
+	ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
+	ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
 
 	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
 		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
@@ -3055,56 +3050,50 @@
 	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
 	if (INTEL_INFO(dev)->gen >= 9) {
-		*stolen = gen9_get_stolen_size(snb_gmch_ctl);
-		gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+		ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
+		ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
 	} else if (IS_CHERRYVIEW(dev)) {
-		*stolen = chv_get_stolen_size(snb_gmch_ctl);
-		gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
+		ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
+		ggtt->size = chv_get_total_gtt_size(snb_gmch_ctl);
 	} else {
-		*stolen = gen8_get_stolen_size(snb_gmch_ctl);
-		gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+		ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
+		ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
 	}
 
-	*gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+	ggtt->base.total = (ggtt->size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
 
 	if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
 		chv_setup_private_ppat(dev_priv);
 	else
 		bdw_setup_private_ppat(dev_priv);
 
-	ret = ggtt_probe_common(dev, gtt_size);
+	ret = ggtt_probe_common(dev, ggtt->size);
 
-	dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
-	dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
-	dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
-	dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
-
+	ggtt->base.clear_range = gen8_ggtt_clear_range;
 	if (IS_CHERRYVIEW(dev_priv))
-		dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
+		ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
+	else
+		ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+	ggtt->base.bind_vma = ggtt_bind_vma;
+	ggtt->base.unbind_vma = ggtt_unbind_vma;
 
 	return ret;
 }
 
-static int gen6_gmch_probe(struct drm_device *dev,
-			   u64 *gtt_total,
-			   size_t *stolen,
-			   phys_addr_t *mappable_base,
-			   u64 *mappable_end)
+static int gen6_gmch_probe(struct i915_ggtt *ggtt)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned int gtt_size;
+	struct drm_device *dev = ggtt->base.dev;
 	u16 snb_gmch_ctl;
 	int ret;
 
-	*mappable_base = pci_resource_start(dev->pdev, 2);
-	*mappable_end = pci_resource_len(dev->pdev, 2);
+	ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
+	ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
 
 	/* 64/512MB is the current min/max we actually know of, but this is just
 	 * a coarse sanity check.
 	 */
-	if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
-		DRM_ERROR("Unknown GMADR size (%llx)\n",
-			  dev_priv->gtt.mappable_end);
+	if ((ggtt->mappable_end < (64<<20) || (ggtt->mappable_end > (512<<20)))) {
+		DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
 		return -ENXIO;
 	}
 
@@ -3112,37 +3101,32 @@
 		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
 	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
-	*stolen = gen6_get_stolen_size(snb_gmch_ctl);
+	ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
+	ggtt->size = gen6_get_total_gtt_size(snb_gmch_ctl);
+	ggtt->base.total = (ggtt->size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
 
-	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
-	*gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
+	ret = ggtt_probe_common(dev, ggtt->size);
 
-	ret = ggtt_probe_common(dev, gtt_size);
-
-	dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
-	dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
-	dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
-	dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+	ggtt->base.clear_range = gen6_ggtt_clear_range;
+	ggtt->base.insert_entries = gen6_ggtt_insert_entries;
+	ggtt->base.bind_vma = ggtt_bind_vma;
+	ggtt->base.unbind_vma = ggtt_unbind_vma;
 
 	return ret;
 }
 
 static void gen6_gmch_remove(struct i915_address_space *vm)
 {
+	struct i915_ggtt *ggtt = container_of(vm, struct i915_ggtt, base);
 
-	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
-
-	iounmap(gtt->gsm);
+	iounmap(ggtt->gsm);
 	free_scratch_page(vm->dev, vm->scratch_page);
 }
 
-static int i915_gmch_probe(struct drm_device *dev,
-			   u64 *gtt_total,
-			   size_t *stolen,
-			   phys_addr_t *mappable_base,
-			   u64 *mappable_end)
+static int i915_gmch_probe(struct i915_ggtt *ggtt)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_device *dev = ggtt->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	int ret;
 
 	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
@@ -3151,15 +3135,16 @@
 		return -EIO;
 	}
 
-	intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
+	intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
+		      &ggtt->mappable_base, &ggtt->mappable_end);
 
-	dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
-	dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
-	dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
-	dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
-	dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+	ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
+	ggtt->base.insert_entries = i915_ggtt_insert_entries;
+	ggtt->base.clear_range = i915_ggtt_clear_range;
+	ggtt->base.bind_vma = ggtt_bind_vma;
+	ggtt->base.unbind_vma = ggtt_unbind_vma;
 
-	if (unlikely(dev_priv->gtt.do_idle_maps))
+	if (unlikely(ggtt->do_idle_maps))
 		DRM_INFO("applying Ironlake quirks for intel_iommu\n");
 
 	return 0;
@@ -3170,41 +3155,53 @@
 	intel_gmch_remove();
 }
 
-int i915_gem_gtt_init(struct drm_device *dev)
+/**
+ * i915_ggtt_init_hw - Initialize GGTT hardware
+ * @dev: DRM device
+ */
+int i915_ggtt_init_hw(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct i915_gtt *gtt = &dev_priv->gtt;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	int ret;
 
 	if (INTEL_INFO(dev)->gen <= 5) {
-		gtt->gtt_probe = i915_gmch_probe;
-		gtt->base.cleanup = i915_gmch_remove;
+		ggtt->probe = i915_gmch_probe;
+		ggtt->base.cleanup = i915_gmch_remove;
 	} else if (INTEL_INFO(dev)->gen < 8) {
-		gtt->gtt_probe = gen6_gmch_probe;
-		gtt->base.cleanup = gen6_gmch_remove;
-		if (IS_HASWELL(dev) && dev_priv->ellc_size)
-			gtt->base.pte_encode = iris_pte_encode;
+		ggtt->probe = gen6_gmch_probe;
+		ggtt->base.cleanup = gen6_gmch_remove;
+
+		if (HAS_EDRAM(dev))
+			ggtt->base.pte_encode = iris_pte_encode;
 		else if (IS_HASWELL(dev))
-			gtt->base.pte_encode = hsw_pte_encode;
+			ggtt->base.pte_encode = hsw_pte_encode;
 		else if (IS_VALLEYVIEW(dev))
-			gtt->base.pte_encode = byt_pte_encode;
+			ggtt->base.pte_encode = byt_pte_encode;
 		else if (INTEL_INFO(dev)->gen >= 7)
-			gtt->base.pte_encode = ivb_pte_encode;
+			ggtt->base.pte_encode = ivb_pte_encode;
 		else
-			gtt->base.pte_encode = snb_pte_encode;
+			ggtt->base.pte_encode = snb_pte_encode;
 	} else {
-		dev_priv->gtt.gtt_probe = gen8_gmch_probe;
-		dev_priv->gtt.base.cleanup = gen6_gmch_remove;
+		ggtt->probe = gen8_gmch_probe;
+		ggtt->base.cleanup = gen6_gmch_remove;
 	}
 
-	gtt->base.dev = dev;
-	gtt->base.is_ggtt = true;
+	ggtt->base.dev = dev;
+	ggtt->base.is_ggtt = true;
 
-	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
-			     &gtt->mappable_base, &gtt->mappable_end);
+	ret = ggtt->probe(ggtt);
 	if (ret)
 		return ret;
 
+	if ((ggtt->base.total - 1) >> 32) {
+		DRM_ERROR("We never expected a Global GTT with more than 32bits"
+			  "of address space! Found %lldM!\n",
+			  ggtt->base.total >> 20);
+		ggtt->base.total = 1ULL << 32;
+		ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
+	}
+
 	/*
 	 * Initialise stolen early so that we may reserve preallocated
 	 * objects for the BIOS to KMS transition.
@@ -3215,9 +3212,9 @@
 
 	/* GMADR is the PCI mmio aperture into the global GTT. */
 	DRM_INFO("Memory usable by graphics device = %lluM\n",
-		 gtt->base.total >> 20);
-	DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
-	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
+		 ggtt->base.total >> 20);
+	DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
+	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20);
 #ifdef CONFIG_INTEL_IOMMU
 	if (intel_iommu_gfx_mapped)
 		DRM_INFO("VT-d active for gfx access\n");
@@ -3234,33 +3231,38 @@
 	return 0;
 
 out_gtt_cleanup:
-	gtt->base.cleanup(&dev_priv->gtt.base);
+	ggtt->base.cleanup(&ggtt->base);
 
 	return ret;
 }
 
+int i915_ggtt_enable_hw(struct drm_device *dev)
+{
+	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+		return -EIO;
+
+	return 0;
+}
+
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct drm_i915_gem_object *obj;
-	struct i915_address_space *vm;
 	struct i915_vma *vma;
 	bool flush;
 
 	i915_check_and_clear_faults(dev);
 
 	/* First fill our portion of the GTT with scratch pages */
-	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-				       dev_priv->gtt.base.start,
-				       dev_priv->gtt.base.total,
-				       true);
+	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
+			       true);
 
 	/* Cache flush objects bound into GGTT and rebind them. */
-	vm = &dev_priv->gtt.base;
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 		flush = false;
 		list_for_each_entry(vma, &obj->vma_list, obj_link) {
-			if (vma->vm != vm)
+			if (vma->vm != &ggtt->base)
 				continue;
 
 			WARN_ON(i915_vma_bind(vma, obj->cache_level,
@@ -3283,15 +3285,17 @@
 	}
 
 	if (USES_PPGTT(dev)) {
+		struct i915_address_space *vm;
+
 		list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
 			/* TODO: Perhaps it shouldn't be gen6 specific */
 
-			struct i915_hw_ppgtt *ppgtt =
-					container_of(vm, struct i915_hw_ppgtt,
-						     base);
+			struct i915_hw_ppgtt *ppgtt;
 
-			if (i915_is_ggtt(vm))
+			if (vm->is_ggtt)
 				ppgtt = dev_priv->mm.aliasing_ppgtt;
+			else
+				ppgtt = i915_vm_to_ppgtt(vm);
 
 			gen6_write_page_range(dev_priv, &ppgtt->pd,
 					      0, ppgtt->base.total);
@@ -3350,19 +3354,13 @@
 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
 				       const struct i915_ggtt_view *view)
 {
-	struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
-	struct i915_vma *vma;
-
-	if (WARN_ON(!view))
-		return ERR_PTR(-EINVAL);
-
-	vma = i915_gem_obj_to_ggtt_view(obj, view);
-
-	if (IS_ERR(vma))
-		return vma;
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
 
 	if (!vma)
-		vma = __i915_gem_vma_create(obj, ggtt, view);
+		vma = __i915_gem_vma_create(obj, &ggtt->base, view);
 
 	return vma;
 
@@ -3377,11 +3375,6 @@
 	unsigned int column, row;
 	unsigned int src_idx;
 
-	if (!sg) {
-		st->nents = 0;
-		sg = st->sgl;
-	}
-
 	for (column = 0; column < width; column++) {
 		src_idx = stride * (height - 1) + column;
 		for (row = 0; row < height; row++) {
@@ -3405,7 +3398,7 @@
 intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
 			  struct drm_i915_gem_object *obj)
 {
-	unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
+	unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
 	unsigned int size_pages_uv;
 	struct sg_page_iter sg_iter;
 	unsigned long i;
@@ -3416,14 +3409,15 @@
 	int ret = -ENOMEM;
 
 	/* Allocate a temporary list of source pages for random access. */
-	page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
-				       sizeof(dma_addr_t));
+	page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE,
+					sizeof(dma_addr_t),
+					GFP_TEMPORARY);
 	if (!page_addr_list)
 		return ERR_PTR(ret);
 
 	/* Account for UV plane with NV12. */
 	if (rot_info->pixel_format == DRM_FORMAT_NV12)
-		size_pages_uv = rot_info->size_uv >> PAGE_SHIFT;
+		size_pages_uv = rot_info->plane[1].width * rot_info->plane[1].height;
 	else
 		size_pages_uv = 0;
 
@@ -3443,11 +3437,14 @@
 		i++;
 	}
 
+	st->nents = 0;
+	sg = st->sgl;
+
 	/* Rotate the pages. */
 	sg = rotate_pages(page_addr_list, 0,
-		     rot_info->width_pages, rot_info->height_pages,
-		     rot_info->width_pages,
-		     st, NULL);
+			  rot_info->plane[0].width, rot_info->plane[0].height,
+			  rot_info->plane[0].width,
+			  st, sg);
 
 	/* Append the UV plane if NV12. */
 	if (rot_info->pixel_format == DRM_FORMAT_NV12) {
@@ -3459,18 +3456,15 @@
 
 		rot_info->uv_start_page = uv_start_page;
 
-		rotate_pages(page_addr_list, uv_start_page,
-			     rot_info->width_pages_uv,
-			     rot_info->height_pages_uv,
-			     rot_info->width_pages_uv,
-			     st, sg);
+		sg = rotate_pages(page_addr_list, rot_info->uv_start_page,
+				  rot_info->plane[1].width, rot_info->plane[1].height,
+				  rot_info->plane[1].width,
+				  st, sg);
 	}
 
-	DRM_DEBUG_KMS(
-		      "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n",
-		      obj->base.size, rot_info->pitch, rot_info->height,
-		      rot_info->pixel_format, rot_info->width_pages,
-		      rot_info->height_pages, size_pages + size_pages_uv,
+	DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages (%u plane 0)).\n",
+		      obj->base.size, rot_info->plane[0].width,
+		      rot_info->plane[0].height, size_pages + size_pages_uv,
 		      size_pages);
 
 	drm_free_large(page_addr_list);
@@ -3482,11 +3476,9 @@
 err_st_alloc:
 	drm_free_large(page_addr_list);
 
-	DRM_DEBUG_KMS(
-		      "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n",
-		      obj->base.size, ret, rot_info->pitch, rot_info->height,
-		      rot_info->pixel_format, rot_info->width_pages,
-		      rot_info->height_pages, size_pages + size_pages_uv,
+	DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%d) (%ux%u tiles, %u pages (%u plane 0))\n",
+		      obj->base.size, ret, rot_info->plane[0].width,
+		      rot_info->plane[0].height, size_pages + size_pages_uv,
 		      size_pages);
 	return ERR_PTR(ret);
 }
@@ -3634,7 +3626,7 @@
 	if (view->type == I915_GGTT_VIEW_NORMAL) {
 		return obj->base.size;
 	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
-		return view->params.rotated.size;
+		return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT;
 	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
 		return view->params.partial.size << PAGE_SHIFT;
 	} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 8774f1b..0008543 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -42,7 +42,7 @@
 typedef uint64_t gen8_ppgtt_pdpe_t;
 typedef uint64_t gen8_ppgtt_pml4e_t;
 
-#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
+#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
 
 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
 #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
@@ -135,16 +135,13 @@
 };
 
 struct intel_rotation_info {
-	unsigned int height;
-	unsigned int pitch;
 	unsigned int uv_offset;
 	uint32_t pixel_format;
-	uint64_t fb_modifier;
-	unsigned int width_pages, height_pages;
-	uint64_t size;
-	unsigned int width_pages_uv, height_pages_uv;
-	uint64_t size_uv;
 	unsigned int uv_start_page;
+	struct {
+		/* tiles */
+		unsigned int width, height;
+	} plane[2];
 };
 
 struct i915_ggtt_view {
@@ -342,13 +339,14 @@
  * and correct (in cases like swizzling). That region is referred to as GMADR in
  * the spec.
  */
-struct i915_gtt {
+struct i915_ggtt {
 	struct i915_address_space base;
 
 	size_t stolen_size;		/* Total size of stolen memory */
 	size_t stolen_usable_size;	/* Total size minus BIOS reserved */
 	size_t stolen_reserved_base;
 	size_t stolen_reserved_size;
+	size_t size;			/* Total size of Global GTT */
 	u64 mappable_end;		/* End offset that we can CPU map */
 	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */
 	phys_addr_t mappable_base;	/* PA of our GMADR */
@@ -360,10 +358,7 @@
 
 	int mtrr;
 
-	/* global gtt ops */
-	int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
-			  size_t *stolen, phys_addr_t *mappable_base,
-			  u64 *mappable_end);
+	int (*probe)(struct i915_ggtt *ggtt);
 };
 
 struct i915_hw_ppgtt {
@@ -518,10 +513,10 @@
 		px_dma(ppgtt->base.scratch_pd);
 }
 
-int i915_gem_gtt_init(struct drm_device *dev);
-void i915_gem_init_global_gtt(struct drm_device *dev);
-void i915_global_gtt_cleanup(struct drm_device *dev);
-
+int i915_ggtt_init_hw(struct drm_device *dev);
+int i915_ggtt_enable_hw(struct drm_device *dev);
+void i915_gem_init_ggtt(struct drm_device *dev);
+void i915_ggtt_cleanup_hw(struct drm_device *dev);
 
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
 int i915_ppgtt_init_hw(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index fc7e6d5..71611bf 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -169,15 +169,15 @@
 	drm_gem_object_unreference(&so->obj->base);
 }
 
-int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
+int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
 				  struct render_state *so)
 {
 	int ret;
 
-	if (WARN_ON(ring->id != RCS))
+	if (WARN_ON(engine->id != RCS))
 		return -ENOENT;
 
-	ret = render_state_init(so, ring->dev);
+	ret = render_state_init(so, engine->dev);
 	if (ret)
 		return ret;
 
@@ -198,21 +198,21 @@
 	struct render_state so;
 	int ret;
 
-	ret = i915_gem_render_state_prepare(req->ring, &so);
+	ret = i915_gem_render_state_prepare(req->engine, &so);
 	if (ret)
 		return ret;
 
 	if (so.rodata == NULL)
 		return 0;
 
-	ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
+	ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
 					     so.rodata->batch_items * 4,
 					     I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
 	if (so.aux_batch_size > 8) {
-		ret = req->ring->dispatch_execbuffer(req,
+		ret = req->engine->dispatch_execbuffer(req,
 						     (so.ggtt_offset +
 						      so.aux_batch_offset),
 						     so.aux_batch_size,
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index e641bb0..6aaa3a1 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -43,7 +43,7 @@
 
 int i915_gem_render_state_init(struct drm_i915_gem_request *req);
 void i915_gem_render_state_fini(struct render_state *so);
-int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
+int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
 				  struct render_state *so);
 
 #endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index d3c473f..425e721 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -28,6 +28,7 @@
 #include <linux/swap.h>
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
+#include <linux/vmalloc.h>
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 
@@ -69,6 +70,10 @@
 
 static bool can_release_pages(struct drm_i915_gem_object *obj)
 {
+	/* Only shmemfs objects are backed by swap */
+	if (!obj->base.filp)
+		return false;
+
 	/* Only report true if by unbinding the object and putting its pages
 	 * we can actually make forward progress towards freeing physical
 	 * pages.
@@ -166,6 +171,10 @@
 			    obj->madv != I915_MADV_DONTNEED)
 				continue;
 
+			if (flags & I915_SHRINK_VMAPS &&
+			    !is_vmalloc_addr(obj->mapping))
+				continue;
+
 			if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
 				continue;
 
@@ -246,7 +255,7 @@
 
 	count = 0;
 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
-		if (obj->pages_pin_count == 0)
+		if (can_release_pages(obj))
 			count += obj->base.size >> PAGE_SHIFT;
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -288,67 +297,82 @@
 	return freed;
 }
 
+struct shrinker_lock_uninterruptible {
+	bool was_interruptible;
+	bool unlock;
+};
+
+static bool
+i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
+				       struct shrinker_lock_uninterruptible *slu,
+				       int timeout_ms)
+{
+	unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
+
+	while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
+		schedule_timeout_killable(1);
+		if (fatal_signal_pending(current))
+			return false;
+		if (--timeout == 0) {
+			pr_err("Unable to lock GPU to purge memory.\n");
+			return false;
+		}
+	}
+
+	slu->was_interruptible = dev_priv->mm.interruptible;
+	dev_priv->mm.interruptible = false;
+	return true;
+}
+
+static void
+i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
+					 struct shrinker_lock_uninterruptible *slu)
+{
+	dev_priv->mm.interruptible = slu->was_interruptible;
+	if (slu->unlock)
+		mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
 static int
 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
 {
 	struct drm_i915_private *dev_priv =
 		container_of(nb, struct drm_i915_private, mm.oom_notifier);
-	struct drm_device *dev = dev_priv->dev;
+	struct shrinker_lock_uninterruptible slu;
 	struct drm_i915_gem_object *obj;
-	unsigned long timeout = msecs_to_jiffies(5000) + 1;
-	unsigned long pinned, bound, unbound, freed_pages;
-	bool was_interruptible;
-	bool unlock;
+	unsigned long unevictable, bound, unbound, freed_pages;
 
-	while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
-		schedule_timeout_killable(1);
-		if (fatal_signal_pending(current))
-			return NOTIFY_DONE;
-	}
-	if (timeout == 0) {
-		pr_err("Unable to purge GPU memory due lock contention.\n");
+	if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
 		return NOTIFY_DONE;
-	}
-
-	was_interruptible = dev_priv->mm.interruptible;
-	dev_priv->mm.interruptible = false;
 
 	freed_pages = i915_gem_shrink_all(dev_priv);
 
-	dev_priv->mm.interruptible = was_interruptible;
-
 	/* Because we may be allocating inside our own driver, we cannot
 	 * assert that there are no objects with pinned pages that are not
 	 * being pointed to by hardware.
 	 */
-	unbound = bound = pinned = 0;
+	unbound = bound = unevictable = 0;
 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
-		if (!obj->base.filp) /* not backed by a freeable object */
-			continue;
-
-		if (obj->pages_pin_count)
-			pinned += obj->base.size;
+		if (!can_release_pages(obj))
+			unevictable += obj->base.size >> PAGE_SHIFT;
 		else
-			unbound += obj->base.size;
+			unbound += obj->base.size >> PAGE_SHIFT;
 	}
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		if (!obj->base.filp)
-			continue;
-
-		if (obj->pages_pin_count)
-			pinned += obj->base.size;
+		if (!can_release_pages(obj))
+			unevictable += obj->base.size >> PAGE_SHIFT;
 		else
-			bound += obj->base.size;
+			bound += obj->base.size >> PAGE_SHIFT;
 	}
 
-	if (unlock)
-		mutex_unlock(&dev->struct_mutex);
+	i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
 
 	if (freed_pages || unbound || bound)
-		pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
-			freed_pages << PAGE_SHIFT, pinned);
+		pr_info("Purging GPU memory, %lu pages freed, "
+			"%lu pages still pinned.\n",
+			freed_pages, unevictable);
 	if (unbound || bound)
-		pr_err("%lu and %lu bytes still available in the "
+		pr_err("%lu and %lu pages still available in the "
 		       "bound and unbound GPU page lists.\n",
 		       bound, unbound);
 
@@ -356,6 +380,29 @@
 	return NOTIFY_DONE;
 }
 
+static int
+i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(nb, struct drm_i915_private, mm.vmap_notifier);
+	struct shrinker_lock_uninterruptible slu;
+	unsigned long freed_pages;
+
+	if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
+		return NOTIFY_DONE;
+
+	freed_pages = i915_gem_shrink(dev_priv, -1UL,
+				      I915_SHRINK_BOUND |
+				      I915_SHRINK_UNBOUND |
+				      I915_SHRINK_ACTIVE |
+				      I915_SHRINK_VMAPS);
+
+	i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
+
+	*(unsigned long *)ptr += freed_pages;
+	return NOTIFY_DONE;
+}
+
 /**
  * i915_gem_shrinker_init - Initialize i915 shrinker
  * @dev_priv: i915 device
@@ -371,6 +418,9 @@
 
 	dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
 	WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
+
+	dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
+	WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
 }
 
 /**
@@ -381,6 +431,7 @@
  */
 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
 {
+	WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
 	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
 	unregister_shrinker(&dev_priv->mm.shrinker);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 2e6e9fb..b7ce963 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -72,9 +72,11 @@
 				struct drm_mm_node *node, u64 size,
 				unsigned alignment)
 {
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
 	return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
-					alignment, 0,
-					dev_priv->gtt.stolen_usable_size);
+						    alignment, 0,
+						    ggtt->stolen_usable_size);
 }
 
 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
@@ -87,14 +89,15 @@
 
 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct resource *r;
 	u32 base;
 
 	/* Almost universally we can find the Graphics Base of Stolen Memory
-	 * at offset 0x5c in the igfx configuration space. On a few (desktop)
-	 * machines this is also mirrored in the bridge device at different
-	 * locations, or in the MCHBAR.
+	 * at register BSM (0x5c) in the igfx configuration space. On a few
+	 * (desktop) machines this is also mirrored in the bridge device at
+	 * different locations, or in the MCHBAR.
 	 *
 	 * On 865 we just check the TOUD register.
 	 *
@@ -104,9 +107,11 @@
 	 */
 	base = 0;
 	if (INTEL_INFO(dev)->gen >= 3) {
-		/* Read Graphics Base of Stolen Memory directly */
-		pci_read_config_dword(dev->pdev, 0x5c, &base);
-		base &= ~((1<<20) - 1);
+		u32 bsm;
+
+		pci_read_config_dword(dev->pdev, BSM, &bsm);
+
+		base = bsm & BSM_MASK;
 	} else if (IS_I865G(dev)) {
 		u16 toud = 0;
 
@@ -134,7 +139,7 @@
 					 I85X_DRB3, &tmp);
 		tom = tmp * MB(32);
 
-		base = tom - tseg_size - dev_priv->gtt.stolen_size;
+		base = tom - tseg_size - ggtt->stolen_size;
 	} else if (IS_845G(dev)) {
 		u32 tseg_size = 0;
 		u32 tom;
@@ -158,7 +163,7 @@
 					 I830_DRB3, &tmp);
 		tom = tmp * MB(32);
 
-		base = tom - tseg_size - dev_priv->gtt.stolen_size;
+		base = tom - tseg_size - ggtt->stolen_size;
 	} else if (IS_I830(dev)) {
 		u32 tseg_size = 0;
 		u32 tom;
@@ -178,7 +183,7 @@
 					 I830_DRB3, &tmp);
 		tom = tmp * MB(32);
 
-		base = tom - tseg_size - dev_priv->gtt.stolen_size;
+		base = tom - tseg_size - ggtt->stolen_size;
 	}
 
 	if (base == 0)
@@ -189,41 +194,41 @@
 		struct {
 			u32 start, end;
 		} stolen[2] = {
-			{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
-			{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
+			{ .start = base, .end = base + ggtt->stolen_size, },
+			{ .start = base, .end = base + ggtt->stolen_size, },
 		};
-		u64 gtt_start, gtt_end;
+		u64 ggtt_start, ggtt_end;
 
-		gtt_start = I915_READ(PGTBL_CTL);
+		ggtt_start = I915_READ(PGTBL_CTL);
 		if (IS_GEN4(dev))
-			gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
-				(gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
+			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
+				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
 		else
-			gtt_start &= PGTBL_ADDRESS_LO_MASK;
-		gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
+			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
+		ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
 
-		if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
-			stolen[0].end = gtt_start;
-		if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
-			stolen[1].start = gtt_end;
+		if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
+			stolen[0].end = ggtt_start;
+		if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
+			stolen[1].start = ggtt_end;
 
 		/* pick the larger of the two chunks */
 		if (stolen[0].end - stolen[0].start >
 		    stolen[1].end - stolen[1].start) {
 			base = stolen[0].start;
-			dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
+			ggtt->stolen_size = stolen[0].end - stolen[0].start;
 		} else {
 			base = stolen[1].start;
-			dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
+			ggtt->stolen_size = stolen[1].end - stolen[1].start;
 		}
 
 		if (stolen[0].start != stolen[1].start ||
 		    stolen[0].end != stolen[1].end) {
 			DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
-				      (unsigned long long) gtt_start,
-				      (unsigned long long) gtt_end - 1);
+				      (unsigned long long)ggtt_start,
+				      (unsigned long long)ggtt_end - 1);
 			DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
-				      base, base + (u32) dev_priv->gtt.stolen_size - 1);
+				      base, base + (u32)ggtt->stolen_size - 1);
 		}
 	}
 
@@ -233,7 +238,7 @@
 	 * kernel. So if the region is already marked as busy, something
 	 * is seriously wrong.
 	 */
-	r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
+	r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size,
 				    "Graphics Stolen Memory");
 	if (r == NULL) {
 		/*
@@ -245,7 +250,7 @@
 		 * reservation starting from 1 instead of 0.
 		 */
 		r = devm_request_mem_region(dev->dev, base + 1,
-					    dev_priv->gtt.stolen_size - 1,
+					    ggtt->stolen_size - 1,
 					    "Graphics Stolen Memory");
 		/*
 		 * GEN3 firmware likes to smash pci bridges into the stolen
@@ -253,7 +258,7 @@
 		 */
 		if (r == NULL && !IS_GEN3(dev)) {
 			DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
-				  base, base + (uint32_t)dev_priv->gtt.stolen_size);
+				  base, base + (uint32_t)ggtt->stolen_size);
 			base = 0;
 		}
 	}
@@ -274,11 +279,12 @@
 static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
 				    unsigned long *base, unsigned long *size)
 {
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
 				     CTG_STOLEN_RESERVED :
 				     ELK_STOLEN_RESERVED);
 	unsigned long stolen_top = dev_priv->mm.stolen_base +
-		dev_priv->gtt.stolen_size;
+				   ggtt->stolen_size;
 
 	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
 
@@ -369,10 +375,11 @@
 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
 				    unsigned long *base, unsigned long *size)
 {
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
 	unsigned long stolen_top;
 
-	stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+	stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
 
 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
 
@@ -388,7 +395,8 @@
 
 int i915_gem_init_stolen(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	unsigned long reserved_total, reserved_base = 0, reserved_size;
 	unsigned long stolen_top;
 
@@ -401,14 +409,14 @@
 	}
 #endif
 
-	if (dev_priv->gtt.stolen_size == 0)
+	if (ggtt->stolen_size == 0)
 		return 0;
 
 	dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
 	if (dev_priv->mm.stolen_base == 0)
 		return 0;
 
-	stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+	stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
 
 	switch (INTEL_INFO(dev_priv)->gen) {
 	case 2:
@@ -458,19 +466,18 @@
 		return 0;
 	}
 
-	dev_priv->gtt.stolen_reserved_base = reserved_base;
-	dev_priv->gtt.stolen_reserved_size = reserved_size;
+	ggtt->stolen_reserved_base = reserved_base;
+	ggtt->stolen_reserved_size = reserved_size;
 
 	/* It is possible for the reserved area to end before the end of stolen
 	 * memory, so just consider the start. */
 	reserved_total = stolen_top - reserved_base;
 
 	DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
-		      dev_priv->gtt.stolen_size >> 10,
-		      (dev_priv->gtt.stolen_size - reserved_total) >> 10);
+		      ggtt->stolen_size >> 10,
+		      (ggtt->stolen_size - reserved_total) >> 10);
 
-	dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
-					   reserved_total;
+	ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
 
 	/*
 	 * Basic memrange allocator for stolen space.
@@ -483,7 +490,7 @@
 	 * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
 	 * problem later.
 	 */
-	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
+	drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
 
 	return 0;
 }
@@ -492,12 +499,13 @@
 i915_pages_create_for_stolen(struct drm_device *dev,
 			     u32 offset, u32 size)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct sg_table *st;
 	struct scatterlist *sg;
 
 	DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
-	BUG_ON(offset > dev_priv->gtt.stolen_size - size);
+	BUG_ON(offset > ggtt->stolen_size - size);
 
 	/* We hide that we have no struct page backing our stolen object
 	 * by wrapping the contiguous physical allocation with a fake
@@ -628,8 +636,8 @@
 					       u32 gtt_offset,
 					       u32 size)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct i915_address_space *ggtt = &dev_priv->gtt.base;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct drm_i915_gem_object *obj;
 	struct drm_mm_node *stolen;
 	struct i915_vma *vma;
@@ -675,7 +683,7 @@
 	if (gtt_offset == I915_GTT_OFFSET_NONE)
 		return obj;
 
-	vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
+	vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto err;
@@ -688,8 +696,8 @@
 	 */
 	vma->node.start = gtt_offset;
 	vma->node.size = size;
-	if (drm_mm_initialized(&ggtt->mm)) {
-		ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
+	if (drm_mm_initialized(&ggtt->base.mm)) {
+		ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
 		if (ret) {
 			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
 			goto err;
@@ -697,7 +705,7 @@
 
 		vma->bound |= GLOBAL_BIND;
 		__i915_vma_set_map_and_fenceable(vma);
-		list_add_tail(&vma->vm_link, &ggtt->inactive_list);
+		list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
 	}
 
 	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7410f6c..b9bdb340 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -166,7 +166,7 @@
 	struct drm_i915_gem_object *obj;
 	int ret = 0;
 
-	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
 	if (&obj->base == NULL)
 		return -ENOENT;
 
@@ -297,7 +297,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 
-	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
 	if (&obj->base == NULL)
 		return -ENOENT;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 4d30b60..32d9726 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -34,7 +34,7 @@
 
 struct i915_mm_struct {
 	struct mm_struct *mm;
-	struct drm_device *dev;
+	struct drm_i915_private *i915;
 	struct i915_mmu_notifier *mn;
 	struct hlist_node node;
 	struct kref kref;
@@ -49,6 +49,7 @@
 	struct hlist_node node;
 	struct mmu_notifier mn;
 	struct rb_root objects;
+	struct workqueue_struct *wq;
 };
 
 struct i915_mmu_object {
@@ -60,6 +61,37 @@
 	bool attached;
 };
 
+static void wait_rendering(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
+	int i, n;
+
+	if (!obj->active)
+		return;
+
+	n = 0;
+	for (i = 0; i < I915_NUM_ENGINES; i++) {
+		struct drm_i915_gem_request *req;
+
+		req = obj->last_read_req[i];
+		if (req == NULL)
+			continue;
+
+		requests[n++] = i915_gem_request_reference(req);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	for (i = 0; i < n; i++)
+		__i915_wait_request(requests[i], false, NULL, NULL);
+
+	mutex_lock(&dev->struct_mutex);
+
+	for (i = 0; i < n; i++)
+		i915_gem_request_unreference(requests[i]);
+}
+
 static void cancel_userptr(struct work_struct *work)
 {
 	struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
@@ -75,13 +107,13 @@
 		struct i915_vma *vma, *tmp;
 		bool was_interruptible;
 
+		wait_rendering(obj);
+
 		was_interruptible = dev_priv->mm.interruptible;
 		dev_priv->mm.interruptible = false;
 
-		list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
-			int ret = i915_vma_unbind(vma);
-			WARN_ON(ret && ret != -EIO);
-		}
+		list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
+			WARN_ON(i915_vma_unbind(vma));
 		WARN_ON(i915_gem_object_put_pages(obj));
 
 		dev_priv->mm.interruptible = was_interruptible;
@@ -140,7 +172,7 @@
 		 */
 		mo = container_of(it, struct i915_mmu_object, it);
 		if (kref_get_unless_zero(&mo->obj->base.refcount))
-			schedule_work(&mo->work);
+			queue_work(mn->wq, &mo->work);
 
 		list_add(&mo->link, &cancelled);
 		it = interval_tree_iter_next(it, start, end);
@@ -148,6 +180,8 @@
 	list_for_each_entry(mo, &cancelled, link)
 		del_object(mo);
 	spin_unlock(&mn->lock);
+
+	flush_workqueue(mn->wq);
 }
 
 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
@@ -167,10 +201,16 @@
 	spin_lock_init(&mn->lock);
 	mn->mn.ops = &i915_gem_userptr_notifier;
 	mn->objects = RB_ROOT;
+	mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
+	if (mn->wq == NULL) {
+		kfree(mn);
+		return ERR_PTR(-ENOMEM);
+	}
 
 	 /* Protected by mmap_sem (write-lock) */
 	ret = __mmu_notifier_register(&mn->mn, mm);
 	if (ret) {
+		destroy_workqueue(mn->wq);
 		kfree(mn);
 		return ERR_PTR(ret);
 	}
@@ -205,13 +245,13 @@
 		return mn;
 
 	down_write(&mm->mm->mmap_sem);
-	mutex_lock(&to_i915(mm->dev)->mm_lock);
+	mutex_lock(&mm->i915->mm_lock);
 	if ((mn = mm->mn) == NULL) {
 		mn = i915_mmu_notifier_create(mm->mm);
 		if (!IS_ERR(mn))
 			mm->mn = mn;
 	}
-	mutex_unlock(&to_i915(mm->dev)->mm_lock);
+	mutex_unlock(&mm->i915->mm_lock);
 	up_write(&mm->mm->mmap_sem);
 
 	return mn;
@@ -256,6 +296,7 @@
 		return;
 
 	mmu_notifier_unregister(&mn->mn, mm);
+	destroy_workqueue(mn->wq);
 	kfree(mn);
 }
 
@@ -327,7 +368,7 @@
 		}
 
 		kref_init(&mm->kref);
-		mm->dev = obj->base.dev;
+		mm->i915 = to_i915(obj->base.dev);
 
 		mm->mm = current->mm;
 		atomic_inc(&current->mm->mm_count);
@@ -362,7 +403,7 @@
 
 	/* Protected by dev_priv->mm_lock */
 	hash_del(&mm->node);
-	mutex_unlock(&to_i915(mm->dev)->mm_lock);
+	mutex_unlock(&mm->i915->mm_lock);
 
 	INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
 	schedule_work(&mm->work);
@@ -494,10 +535,7 @@
 	ret = -ENOMEM;
 	pinned = 0;
 
-	pvec = kmalloc(npages*sizeof(struct page *),
-		       GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
-	if (pvec == NULL)
-		pvec = drm_malloc_ab(npages, sizeof(struct page *));
+	pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
 	if (pvec != NULL) {
 		struct mm_struct *mm = obj->userptr.mm->mm;
 
@@ -639,14 +677,11 @@
 	pvec = NULL;
 	pinned = 0;
 	if (obj->userptr.mm->mm == current->mm) {
-		pvec = kmalloc(num_pages*sizeof(struct page *),
-			       GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+		pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
+				      GFP_TEMPORARY);
 		if (pvec == NULL) {
-			pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
-			if (pvec == NULL) {
-				__i915_gem_userptr_set_active(obj, false);
-				return -ENOMEM;
-			}
+			__i915_gem_userptr_set_active(obj, false);
+			return -ENOMEM;
 		}
 
 		pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
@@ -763,6 +798,13 @@
 	int ret;
 	u32 handle;
 
+	if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) {
+		/* We cannot support coherent userptr objects on hw without
+		 * LLC and broken snooping.
+		 */
+		return -ENODEV;
+	}
+
 	if (args->flags & ~(I915_USERPTR_READ_ONLY |
 			    I915_USERPTR_UNSYNCHRONIZED))
 		return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 831895b..89725c9 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -198,7 +198,7 @@
 			   err->size,
 			   err->read_domains,
 			   err->write_domain);
-		for (i = 0; i < I915_NUM_RINGS; i++)
+		for (i = 0; i < I915_NUM_ENGINES; i++)
 			err_printf(m, "%02x ", err->rseqno[i]);
 
 		err_printf(m, "] %02x", err->wseqno);
@@ -230,8 +230,6 @@
 		return "wait";
 	case HANGCHECK_ACTIVE:
 		return "active";
-	case HANGCHECK_ACTIVE_LOOP:
-		return "active (loop)";
 	case HANGCHECK_KICK:
 		return "kick";
 	case HANGCHECK_HUNG:
@@ -298,6 +296,7 @@
 		}
 	}
 	err_printf(m, "  seqno: 0x%08x\n", ring->seqno);
+	err_printf(m, "  last_seqno: 0x%08x\n", ring->last_seqno);
 	err_printf(m, "  waiting: %s\n", yesno(ring->waiting));
 	err_printf(m, "  ring->head: 0x%08x\n", ring->cpu_ring_head);
 	err_printf(m, "  ring->tail: 0x%08x\n", ring->cpu_ring_tail);
@@ -433,7 +432,7 @@
 	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
 		obj = error->ring[i].batchbuffer;
 		if (obj) {
-			err_puts(m, dev_priv->ring[i].name);
+			err_puts(m, dev_priv->engine[i].name);
 			if (error->ring[i].pid != -1)
 				err_printf(m, " (submitted by %s [%d])",
 					   error->ring[i].comm,
@@ -447,14 +446,14 @@
 		obj = error->ring[i].wa_batchbuffer;
 		if (obj) {
 			err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
-				   dev_priv->ring[i].name,
+				   dev_priv->engine[i].name,
 				   lower_32_bits(obj->gtt_offset));
 			print_error_obj(m, obj);
 		}
 
 		if (error->ring[i].num_requests) {
 			err_printf(m, "%s --- %d requests\n",
-				   dev_priv->ring[i].name,
+				   dev_priv->engine[i].name,
 				   error->ring[i].num_requests);
 			for (j = 0; j < error->ring[i].num_requests; j++) {
 				err_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
@@ -466,7 +465,7 @@
 
 		if ((obj = error->ring[i].ringbuffer)) {
 			err_printf(m, "%s --- ringbuffer = 0x%08x\n",
-				   dev_priv->ring[i].name,
+				   dev_priv->engine[i].name,
 				   lower_32_bits(obj->gtt_offset));
 			print_error_obj(m, obj);
 		}
@@ -480,7 +479,7 @@
 				hws_page = &obj->pages[LRC_PPHWSP_PN][0];
 			}
 			err_printf(m, "%s --- HW Status = 0x%08llx\n",
-				   dev_priv->ring[i].name, hws_offset);
+				   dev_priv->engine[i].name, hws_offset);
 			offset = 0;
 			for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
 				err_printf(m, "[%04x] %08x %08x %08x %08x\n",
@@ -493,9 +492,31 @@
 			}
 		}
 
+		obj = error->ring[i].wa_ctx;
+		if (obj) {
+			u64 wa_ctx_offset = obj->gtt_offset;
+			u32 *wa_ctx_page = &obj->pages[0][0];
+			struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+			u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
+					   engine->wa_ctx.per_ctx.size);
+
+			err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
+				   dev_priv->engine[i].name, wa_ctx_offset);
+			offset = 0;
+			for (elt = 0; elt < wa_ctx_size; elt += 4) {
+				err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+					   offset,
+					   wa_ctx_page[elt + 0],
+					   wa_ctx_page[elt + 1],
+					   wa_ctx_page[elt + 2],
+					   wa_ctx_page[elt + 3]);
+				offset += 16;
+			}
+		}
+
 		if ((obj = error->ring[i].ctx)) {
 			err_printf(m, "%s --- HW Context = 0x%08x\n",
-				   dev_priv->ring[i].name,
+				   dev_priv->engine[i].name,
 				   lower_32_bits(obj->gtt_offset));
 			print_error_obj(m, obj);
 		}
@@ -585,6 +606,7 @@
 		i915_error_object_free(error->ring[i].hws_page);
 		i915_error_object_free(error->ring[i].ctx);
 		kfree(error->ring[i].requests);
+		i915_error_object_free(error->ring[i].wa_ctx);
 	}
 
 	i915_error_object_free(error->semaphore_obj);
@@ -606,6 +628,7 @@
 			 struct drm_i915_gem_object *src,
 			 struct i915_address_space *vm)
 {
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct drm_i915_error_object *dst;
 	struct i915_vma *vma = NULL;
 	int num_pages;
@@ -632,7 +655,7 @@
 		vma = i915_gem_obj_to_ggtt(src);
 	use_ggtt = (src->cache_level == I915_CACHE_NONE &&
 		   vma && (vma->bound & GLOBAL_BIND) &&
-		   reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
+		   reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
 
 	/* Cannot access stolen address directly, try to use the aperture */
 	if (src->stolen) {
@@ -642,12 +665,13 @@
 			goto unwind;
 
 		reloc_offset = i915_gem_obj_ggtt_offset(src);
-		if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
+		if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
 			goto unwind;
 	}
 
 	/* Cannot access snooped pages through the aperture */
-	if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev))
+	if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
+	    !HAS_LLC(dev_priv))
 		goto unwind;
 
 	dst->page_count = num_pages;
@@ -668,7 +692,7 @@
 			 * captures what the GPU read.
 			 */
 
-			s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+			s = io_mapping_map_atomic_wc(ggtt->mappable,
 						     reloc_offset);
 			memcpy_fromio(d, s, PAGE_SIZE);
 			io_mapping_unmap_atomic(s);
@@ -701,7 +725,7 @@
 	return NULL;
 }
 #define i915_error_ggtt_object_create(dev_priv, src) \
-	i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
+	i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
 
 static void capture_bo(struct drm_i915_error_buffer *err,
 		       struct i915_vma *vma)
@@ -711,7 +735,7 @@
 
 	err->size = obj->base.size;
 	err->name = obj->base.name;
-	for (i = 0; i < I915_NUM_RINGS; i++)
+	for (i = 0; i < I915_NUM_ENGINES; i++)
 		err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
 	err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
 	err->gtt_offset = vma->node.start;
@@ -726,7 +750,7 @@
 	err->purgeable = obj->madv != I915_MADV_WILLNEED;
 	err->userptr = obj->userptr.mm != NULL;
 	err->ring = obj->last_write_req ?
-			i915_gem_request_get_ring(obj->last_write_req)->id : -1;
+			i915_gem_request_get_engine(obj->last_write_req)->id : -1;
 	err->cache_level = obj->cache_level;
 }
 
@@ -788,7 +812,7 @@
 	 * synchronization commands which almost always appear in the case
 	 * strictly a client bug. Use instdone to differentiate those some.
 	 */
-	for (i = 0; i < I915_NUM_RINGS; i++) {
+	for (i = 0; i < I915_NUM_ENGINES; i++) {
 		if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
 			if (ring_id)
 				*ring_id = i;
@@ -821,11 +845,11 @@
 
 static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
 					struct drm_i915_error_state *error,
-					struct intel_engine_cs *ring,
+					struct intel_engine_cs *engine,
 					struct drm_i915_error_ring *ering)
 {
 	struct intel_engine_cs *to;
-	int i;
+	enum intel_engine_id id;
 
 	if (!i915_semaphore_is_enabled(dev_priv->dev))
 		return;
@@ -835,68 +859,69 @@
 			i915_error_ggtt_object_create(dev_priv,
 						      dev_priv->semaphore_obj);
 
-	for_each_ring(to, dev_priv, i) {
+	for_each_engine_id(to, dev_priv, id) {
 		int idx;
 		u16 signal_offset;
 		u32 *tmp;
 
-		if (ring == to)
+		if (engine == to)
 			continue;
 
-		signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
+		signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1))
 				/ 4;
 		tmp = error->semaphore_obj->pages[0];
-		idx = intel_ring_sync_index(ring, to);
+		idx = intel_ring_sync_index(engine, to);
 
 		ering->semaphore_mboxes[idx] = tmp[signal_offset];
-		ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
+		ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
 	}
 }
 
 static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
-					struct intel_engine_cs *ring,
+					struct intel_engine_cs *engine,
 					struct drm_i915_error_ring *ering)
 {
-	ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
-	ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
-	ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
-	ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
+	ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
+	ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
+	ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
+	ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
 
-	if (HAS_VEBOX(dev_priv->dev)) {
+	if (HAS_VEBOX(dev_priv)) {
 		ering->semaphore_mboxes[2] =
-			I915_READ(RING_SYNC_2(ring->mmio_base));
-		ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+			I915_READ(RING_SYNC_2(engine->mmio_base));
+		ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
 	}
 }
 
 static void i915_record_ring_state(struct drm_device *dev,
 				   struct drm_i915_error_state *error,
-				   struct intel_engine_cs *ring,
+				   struct intel_engine_cs *engine,
 				   struct drm_i915_error_ring *ering)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	if (INTEL_INFO(dev)->gen >= 6) {
-		ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base));
-		ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
+		ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
+		ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
 		if (INTEL_INFO(dev)->gen >= 8)
-			gen8_record_semaphore_state(dev_priv, error, ring, ering);
+			gen8_record_semaphore_state(dev_priv, error, engine,
+						    ering);
 		else
-			gen6_record_semaphore_state(dev_priv, ring, ering);
+			gen6_record_semaphore_state(dev_priv, engine, ering);
 	}
 
 	if (INTEL_INFO(dev)->gen >= 4) {
-		ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
-		ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
-		ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
-		ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
-		ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
-		ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
+		ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+		ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
+		ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+		ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
+		ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
+		ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
 		if (INTEL_INFO(dev)->gen >= 8) {
-			ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
-			ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
+			ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
+			ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
 		}
-		ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
+		ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
 	} else {
 		ering->faddr = I915_READ(DMA_FADD_I8XX);
 		ering->ipeir = I915_READ(IPEIR);
@@ -904,20 +929,21 @@
 		ering->instdone = I915_READ(GEN2_INSTDONE);
 	}
 
-	ering->waiting = waitqueue_active(&ring->irq_queue);
-	ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
-	ering->seqno = ring->get_seqno(ring, false);
-	ering->acthd = intel_ring_get_active_head(ring);
-	ering->start = I915_READ_START(ring);
-	ering->head = I915_READ_HEAD(ring);
-	ering->tail = I915_READ_TAIL(ring);
-	ering->ctl = I915_READ_CTL(ring);
+	ering->waiting = waitqueue_active(&engine->irq_queue);
+	ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
+	ering->acthd = intel_ring_get_active_head(engine);
+	ering->seqno = engine->get_seqno(engine);
+	ering->last_seqno = engine->last_submitted_seqno;
+	ering->start = I915_READ_START(engine);
+	ering->head = I915_READ_HEAD(engine);
+	ering->tail = I915_READ_TAIL(engine);
+	ering->ctl = I915_READ_CTL(engine);
 
 	if (I915_NEED_GFX_HWS(dev)) {
 		i915_reg_t mmio;
 
 		if (IS_GEN7(dev)) {
-			switch (ring->id) {
+			switch (engine->id) {
 			default:
 			case RCS:
 				mmio = RENDER_HWS_PGA_GEN7;
@@ -932,51 +958,51 @@
 				mmio = VEBOX_HWS_PGA_GEN7;
 				break;
 			}
-		} else if (IS_GEN6(ring->dev)) {
-			mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+		} else if (IS_GEN6(engine->dev)) {
+			mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
 		} else {
 			/* XXX: gen8 returns to sanity */
-			mmio = RING_HWS_PGA(ring->mmio_base);
+			mmio = RING_HWS_PGA(engine->mmio_base);
 		}
 
 		ering->hws = I915_READ(mmio);
 	}
 
-	ering->hangcheck_score = ring->hangcheck.score;
-	ering->hangcheck_action = ring->hangcheck.action;
+	ering->hangcheck_score = engine->hangcheck.score;
+	ering->hangcheck_action = engine->hangcheck.action;
 
 	if (USES_PPGTT(dev)) {
 		int i;
 
-		ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
+		ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
 
 		if (IS_GEN6(dev))
 			ering->vm_info.pp_dir_base =
-				I915_READ(RING_PP_DIR_BASE_READ(ring));
+				I915_READ(RING_PP_DIR_BASE_READ(engine));
 		else if (IS_GEN7(dev))
 			ering->vm_info.pp_dir_base =
-				I915_READ(RING_PP_DIR_BASE(ring));
+				I915_READ(RING_PP_DIR_BASE(engine));
 		else if (INTEL_INFO(dev)->gen >= 8)
 			for (i = 0; i < 4; i++) {
 				ering->vm_info.pdp[i] =
-					I915_READ(GEN8_RING_PDP_UDW(ring, i));
+					I915_READ(GEN8_RING_PDP_UDW(engine, i));
 				ering->vm_info.pdp[i] <<= 32;
 				ering->vm_info.pdp[i] |=
-					I915_READ(GEN8_RING_PDP_LDW(ring, i));
+					I915_READ(GEN8_RING_PDP_LDW(engine, i));
 			}
 	}
 }
 
 
-static void i915_gem_record_active_context(struct intel_engine_cs *ring,
+static void i915_gem_record_active_context(struct intel_engine_cs *engine,
 					   struct drm_i915_error_state *error,
 					   struct drm_i915_error_ring *ering)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	struct drm_i915_gem_object *obj;
 
 	/* Currently render ring is the only HW context user */
-	if (ring->id != RCS || !error->ccid)
+	if (engine->id != RCS || !error->ccid)
 		return;
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -993,30 +1019,31 @@
 static void i915_gem_record_rings(struct drm_device *dev,
 				  struct drm_i915_error_state *error)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct drm_i915_gem_request *request;
 	int i, count;
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		struct intel_engine_cs *ring = &dev_priv->ring[i];
+	for (i = 0; i < I915_NUM_ENGINES; i++) {
+		struct intel_engine_cs *engine = &dev_priv->engine[i];
 		struct intel_ringbuffer *rbuf;
 
 		error->ring[i].pid = -1;
 
-		if (ring->dev == NULL)
+		if (engine->dev == NULL)
 			continue;
 
 		error->ring[i].valid = true;
 
-		i915_record_ring_state(dev, error, ring, &error->ring[i]);
+		i915_record_ring_state(dev, error, engine, &error->ring[i]);
 
-		request = i915_gem_find_active_request(ring);
+		request = i915_gem_find_active_request(engine);
 		if (request) {
 			struct i915_address_space *vm;
 
 			vm = request->ctx && request->ctx->ppgtt ?
 				&request->ctx->ppgtt->base :
-				&dev_priv->gtt.base;
+				&ggtt->base;
 
 			/* We need to copy these to an anonymous buffer
 			 * as the simplest method to avoid being overwritten
@@ -1027,10 +1054,10 @@
 							 request->batch_obj,
 							 vm);
 
-			if (HAS_BROKEN_CS_TLB(dev_priv->dev))
+			if (HAS_BROKEN_CS_TLB(dev_priv))
 				error->ring[i].wa_batchbuffer =
 					i915_error_ggtt_object_create(dev_priv,
-							     ring->scratch.obj);
+							     engine->scratch.obj);
 
 			if (request->pid) {
 				struct task_struct *task;
@@ -1052,11 +1079,11 @@
 			 * executed).
 			 */
 			if (request)
-				rbuf = request->ctx->engine[ring->id].ringbuf;
+				rbuf = request->ctx->engine[engine->id].ringbuf;
 			else
-				rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
+				rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf;
 		} else
-			rbuf = ring->buffer;
+			rbuf = engine->buffer;
 
 		error->ring[i].cpu_ring_head = rbuf->head;
 		error->ring[i].cpu_ring_tail = rbuf->tail;
@@ -1065,12 +1092,19 @@
 			i915_error_ggtt_object_create(dev_priv, rbuf->obj);
 
 		error->ring[i].hws_page =
-			i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
+			i915_error_ggtt_object_create(dev_priv,
+						      engine->status_page.obj);
 
-		i915_gem_record_active_context(ring, error, &error->ring[i]);
+		if (engine->wa_ctx.obj) {
+			error->ring[i].wa_ctx =
+				i915_error_ggtt_object_create(dev_priv,
+							      engine->wa_ctx.obj);
+		}
+
+		i915_gem_record_active_context(engine, error, &error->ring[i]);
 
 		count = 0;
-		list_for_each_entry(request, &ring->request_list, list)
+		list_for_each_entry(request, &engine->request_list, list)
 			count++;
 
 		error->ring[i].num_requests = count;
@@ -1083,7 +1117,7 @@
 		}
 
 		count = 0;
-		list_for_each_entry(request, &ring->request_list, list) {
+		list_for_each_entry(request, &engine->request_list, list) {
 			struct drm_i915_error_request *erq;
 
 			if (count >= error->ring[i].num_requests) {
@@ -1272,7 +1306,7 @@
 
 static void i915_error_capture_msg(struct drm_device *dev,
 				   struct drm_i915_error_state *error,
-				   bool wedged,
+				   u32 engine_mask,
 				   const char *error_msg)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1295,7 +1329,7 @@
 	scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
 		  ", reason: %s, action: %s",
 		  error_msg,
-		  wedged ? "reset" : "continue");
+		  engine_mask ? "reset" : "continue");
 }
 
 static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
@@ -1318,7 +1352,7 @@
  * out a structure which becomes available in debugfs for user level tools
  * to pick up.
  */
-void i915_capture_error_state(struct drm_device *dev, bool wedged,
+void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
 			      const char *error_msg)
 {
 	static bool warned;
@@ -1346,7 +1380,7 @@
 	error->overlay = intel_overlay_capture_error_state(dev);
 	error->display = intel_display_capture_error_state(dev);
 
-	i915_error_capture_msg(dev, error, wedged, error_msg);
+	i915_error_capture_msg(dev, error, engine_mask, error_msg);
 	DRM_INFO("%s\n", error->error_msg);
 
 	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index e4ba582..80786d9 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -27,9 +27,12 @@
 /* Definitions of GuC H/W registers, bits, etc */
 
 #define GUC_STATUS			_MMIO(0xc000)
+#define   GS_RESET_SHIFT		0
+#define   GS_MIA_IN_RESET		  (0x01 << GS_RESET_SHIFT)
 #define   GS_BOOTROM_SHIFT		1
 #define   GS_BOOTROM_MASK		  (0x7F << GS_BOOTROM_SHIFT)
 #define   GS_BOOTROM_RSA_FAILED		  (0x50 << GS_BOOTROM_SHIFT)
+#define   GS_BOOTROM_JUMP_PASSED	  (0x76 << GS_BOOTROM_SHIFT)
 #define   GS_UKERNEL_SHIFT		8
 #define   GS_UKERNEL_MASK		  (0xFF << GS_UKERNEL_SHIFT)
 #define   GS_UKERNEL_LAPIC_DONE		  (0x30 << GS_UKERNEL_SHIFT)
@@ -37,7 +40,13 @@
 #define   GS_UKERNEL_READY		  (0xF0 << GS_UKERNEL_SHIFT)
 #define   GS_MIA_SHIFT			16
 #define   GS_MIA_MASK			  (0x07 << GS_MIA_SHIFT)
-#define   GS_MIA_CORE_STATE		  (1 << GS_MIA_SHIFT)
+#define   GS_MIA_CORE_STATE		  (0x01 << GS_MIA_SHIFT)
+#define   GS_MIA_HALT_REQUESTED		  (0x02 << GS_MIA_SHIFT)
+#define   GS_MIA_ISR_ENTRY		  (0x04 << GS_MIA_SHIFT)
+#define   GS_AUTH_STATUS_SHIFT		30
+#define   GS_AUTH_STATUS_MASK		  (0x03 << GS_AUTH_STATUS_SHIFT)
+#define   GS_AUTH_STATUS_BAD		  (0x01 << GS_AUTH_STATUS_SHIFT)
+#define   GS_AUTH_STATUS_GOOD		  (0x02 << GS_AUTH_STATUS_SHIFT)
 
 #define SOFT_SCRATCH(n)			_MMIO(0xc180 + (n) * 4)
 #define SOFT_SCRATCH_COUNT		16
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index d7543ef..d40c13f 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -179,15 +179,11 @@
 			      struct i915_guc_client *client)
 {
 	struct guc_doorbell_info *doorbell;
-	void *base;
 
-	base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
-	doorbell = base + client->doorbell_offset;
+	doorbell = client->client_base + client->doorbell_offset;
 
-	doorbell->db_status = 1;
+	doorbell->db_status = GUC_DOORBELL_ENABLED;
 	doorbell->cookie = 0;
-
-	kunmap_atomic(base);
 }
 
 static int guc_ring_doorbell(struct i915_guc_client *gc)
@@ -195,11 +191,9 @@
 	struct guc_process_desc *desc;
 	union guc_doorbell_qw db_cmp, db_exc, db_ret;
 	union guc_doorbell_qw *db;
-	void *base;
 	int attempt = 2, ret = -EAGAIN;
 
-	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
-	desc = base + gc->proc_desc_offset;
+	desc = gc->client_base + gc->proc_desc_offset;
 
 	/* Update the tail so it is visible to GuC */
 	desc->tail = gc->wq_tail;
@@ -215,7 +209,7 @@
 		db_exc.cookie = 1;
 
 	/* pointer of current doorbell cacheline */
-	db = base + gc->doorbell_offset;
+	db = gc->client_base + gc->doorbell_offset;
 
 	while (attempt--) {
 		/* lets ring the doorbell */
@@ -244,10 +238,6 @@
 			db_exc.cookie = 1;
 	}
 
-	/* Finally, update the cached copy of the GuC's WQ head */
-	gc->wq_head = desc->head;
-
-	kunmap_atomic(base);
 	return ret;
 }
 
@@ -256,16 +246,12 @@
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct guc_doorbell_info *doorbell;
-	void *base;
 	i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
 	int value;
 
-	base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
-	doorbell = base + client->doorbell_offset;
+	doorbell = client->client_base + client->doorbell_offset;
 
-	doorbell->db_status = 0;
-
-	kunmap_atomic(base);
+	doorbell->db_status = GUC_DOORBELL_DISABLED;
 
 	I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID);
 
@@ -341,10 +327,8 @@
 			       struct i915_guc_client *client)
 {
 	struct guc_process_desc *desc;
-	void *base;
 
-	base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
-	desc = base + client->proc_desc_offset;
+	desc = client->client_base + client->proc_desc_offset;
 
 	memset(desc, 0, sizeof(*desc));
 
@@ -361,8 +345,6 @@
 	desc->wq_size_bytes = client->wq_size;
 	desc->wq_status = WQ_STATUS_ACTIVE;
 	desc->priority = client->priority;
-
-	kunmap_atomic(base);
 }
 
 /*
@@ -376,12 +358,14 @@
 static void guc_init_ctx_desc(struct intel_guc *guc,
 			      struct i915_guc_client *client)
 {
+	struct drm_i915_gem_object *client_obj = client->client_obj;
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct intel_context *ctx = client->owner;
 	struct guc_context_desc desc;
 	struct sg_table *sg;
-	int i;
+	enum intel_engine_id id;
+	u32 gfx_addr;
 
 	memset(&desc, 0, sizeof(desc));
 
@@ -390,8 +374,8 @@
 	desc.priority = client->priority;
 	desc.db_id = client->doorbell_id;
 
-	for_each_ring(ring, dev_priv, i) {
-		struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
+	for_each_engine_id(engine, dev_priv, id) {
+		struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
 		struct drm_i915_gem_object *obj;
 		uint64_t ctx_desc;
 
@@ -402,48 +386,44 @@
 		 * for now who owns a GuC client. But for future owner of GuC
 		 * client, need to make sure lrc is pinned prior to enter here.
 		 */
-		obj = ctx->engine[i].state;
+		obj = ctx->engine[id].state;
 		if (!obj)
 			break;	/* XXX: continue? */
 
-		ctx_desc = intel_lr_context_descriptor(ctx, ring);
+		ctx_desc = intel_lr_context_descriptor(ctx, engine);
 		lrc->context_desc = (u32)ctx_desc;
 
 		/* The state page is after PPHWSP */
-		lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
-				LRC_STATE_PN * PAGE_SIZE;
+		gfx_addr = i915_gem_obj_ggtt_offset(obj);
+		lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
 		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
-				(ring->guc_id << GUC_ELC_ENGINE_OFFSET);
+				(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
 
-		obj = ctx->engine[i].ringbuf->obj;
+		obj = ctx->engine[id].ringbuf->obj;
+		gfx_addr = i915_gem_obj_ggtt_offset(obj);
 
-		lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
-		lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
-		lrc->ring_next_free_location = lrc->ring_begin;
+		lrc->ring_begin = gfx_addr;
+		lrc->ring_end = gfx_addr + obj->base.size - 1;
+		lrc->ring_next_free_location = gfx_addr;
 		lrc->ring_current_tail_pointer_value = 0;
 
-		desc.engines_used |= (1 << ring->guc_id);
+		desc.engines_used |= (1 << engine->guc_id);
 	}
 
 	WARN_ON(desc.engines_used == 0);
 
 	/*
-	 * The CPU address is only needed at certain points, so kmap_atomic on
-	 * demand instead of storing it in the ctx descriptor.
-	 * XXX: May make debug easier to have it mapped
+	 * The doorbell, process descriptor, and workqueue are all parts
+	 * of the client object, which the GuC will reference via the GGTT
 	 */
-	desc.db_trigger_cpu = 0;
-	desc.db_trigger_uk = client->doorbell_offset +
-		i915_gem_obj_ggtt_offset(client->client_obj);
-	desc.db_trigger_phy = client->doorbell_offset +
-		sg_dma_address(client->client_obj->pages->sgl);
-
-	desc.process_desc = client->proc_desc_offset +
-		i915_gem_obj_ggtt_offset(client->client_obj);
-
-	desc.wq_addr = client->wq_offset +
-		i915_gem_obj_ggtt_offset(client->client_obj);
-
+	gfx_addr = i915_gem_obj_ggtt_offset(client_obj);
+	desc.db_trigger_phy = sg_dma_address(client_obj->pages->sgl) +
+				client->doorbell_offset;
+	desc.db_trigger_cpu = (uintptr_t)client->client_base +
+				client->doorbell_offset;
+	desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
+	desc.process_desc = gfx_addr + client->proc_desc_offset;
+	desc.wq_addr = gfx_addr + client->wq_offset;
 	desc.wq_size = client->wq_size;
 
 	/*
@@ -474,25 +454,16 @@
 int i915_guc_wq_check_space(struct i915_guc_client *gc)
 {
 	struct guc_process_desc *desc;
-	void *base;
 	u32 size = sizeof(struct guc_wq_item);
 	int ret = -ETIMEDOUT, timeout_counter = 200;
 
 	if (!gc)
 		return 0;
 
-	/* Quickly return if wq space is available since last time we cache the
-	 * head position. */
-	if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
-		return 0;
-
-	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
-	desc = base + gc->proc_desc_offset;
+	desc = gc->client_base + gc->proc_desc_offset;
 
 	while (timeout_counter-- > 0) {
-		gc->wq_head = desc->head;
-
-		if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
+		if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
 			ret = 0;
 			break;
 		}
@@ -501,19 +472,19 @@
 			usleep_range(1000, 2000);
 	};
 
-	kunmap_atomic(base);
-
 	return ret;
 }
 
 static int guc_add_workqueue_item(struct i915_guc_client *gc,
 				  struct drm_i915_gem_request *rq)
 {
+	struct guc_process_desc *desc;
 	struct guc_wq_item *wqi;
 	void *base;
 	u32 tail, wq_len, wq_off, space;
 
-	space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
+	desc = gc->client_base + gc->proc_desc_offset;
+	space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
 	if (WARN_ON(space < sizeof(struct guc_wq_item)))
 		return -ENOSPC; /* shouldn't happen */
 
@@ -542,11 +513,12 @@
 	wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
 	wqi->header = WQ_TYPE_INORDER |
 			(wq_len << WQ_LEN_SHIFT) |
-			(rq->ring->guc_id << WQ_TARGET_SHIFT) |
+			(rq->engine->guc_id << WQ_TARGET_SHIFT) |
 			WQ_NO_WCFLUSH_WAIT;
 
 	/* The GuC wants only the low-order word of the context descriptor */
-	wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring);
+	wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
+							     rq->engine);
 
 	/* The GuC firmware wants the tail index in QWords, not bytes */
 	tail = rq->ringbuf->tail >> 3;
@@ -569,7 +541,7 @@
 		    struct drm_i915_gem_request *rq)
 {
 	struct intel_guc *guc = client->guc;
-	unsigned int engine_id = rq->ring->guc_id;
+	unsigned int engine_id = rq->engine->guc_id;
 	int q_ret, b_ret;
 
 	q_ret = guc_add_workqueue_item(client, rq);
@@ -660,21 +632,28 @@
 	if (!client)
 		return;
 
-	if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
-		/*
-		 * First disable the doorbell, then tell the GuC we've
-		 * finished with it, finally deallocate it in our bitmap
-		 */
-		guc_disable_doorbell(guc, client);
-		host2guc_release_doorbell(guc, client);
-		release_doorbell(guc, client->doorbell_id);
-	}
-
 	/*
 	 * XXX: wait for any outstanding submissions before freeing memory.
 	 * Be sure to drop any locks
 	 */
 
+	if (client->client_base) {
+		/*
+		 * If we got as far as setting up a doorbell, make sure
+		 * we shut it down before unmapping & deallocating the
+		 * memory. So first disable the doorbell, then tell the
+		 * GuC that we've finished with it, finally deallocate
+		 * it in our bitmap
+		 */
+		if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
+			guc_disable_doorbell(guc, client);
+			host2guc_release_doorbell(guc, client);
+			release_doorbell(guc, client->doorbell_id);
+		}
+
+		kunmap(kmap_to_page(client->client_base));
+	}
+
 	gem_release_guc_obj(client->client_obj);
 
 	if (client->ctx_index != GUC_INVALID_CTX_ID) {
@@ -695,7 +674,7 @@
  * @ctx:	the context that owns the client (we use the default render
  * 		context)
  *
- * Return:	An i915_guc_client object if success.
+ * Return:	An i915_guc_client object if success, else NULL.
  */
 static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
 						uint32_t priority,
@@ -727,7 +706,9 @@
 	if (!obj)
 		goto err;
 
+	/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
 	client->client_obj = obj;
+	client->client_base = kmap(i915_gem_object_get_page(obj, 0));
 	client->wq_offset = GUC_DB_SIZE;
 	client->wq_size = GUC_WQ_SIZE;
 
@@ -839,9 +820,9 @@
 	struct guc_ads *ads;
 	struct guc_policies *policies;
 	struct guc_mmio_reg_state *reg_state;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct page *page;
-	u32 size, i;
+	u32 size;
 
 	/* The ads obj includes the struct itself and buffers passed to GuC */
 	size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
@@ -867,11 +848,11 @@
 	 * so its address won't change after we've told the GuC where
 	 * to find it.
 	 */
-	ring = &dev_priv->ring[RCS];
-	ads->golden_context_lrca = ring->status_page.gfx_addr;
+	engine = &dev_priv->engine[RCS];
+	ads->golden_context_lrca = engine->status_page.gfx_addr;
 
-	for_each_ring(ring, dev_priv, i)
-		ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
+	for_each_engine(engine, dev_priv)
+		ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
 
 	/* GuC scheduling policies */
 	policies = (void *)ads + sizeof(struct guc_ads);
@@ -883,12 +864,12 @@
 	/* MMIO reg state */
 	reg_state = (void *)policies + sizeof(struct guc_policies);
 
-	for_each_ring(ring, dev_priv, i) {
-		reg_state->mmio_white_list[ring->guc_id].mmio_start =
-			ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
+	for_each_engine(engine, dev_priv) {
+		reg_state->mmio_white_list[engine->guc_id].mmio_start =
+			engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
 
 		/* Nothing to be saved or restored for now. */
-		reg_state->mmio_white_list[ring->guc_id].count = 0;
+		reg_state->mmio_white_list[engine->guc_id].count = 0;
 	}
 
 	ads->reg_state_addr = ads->scheduler_policies +
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1c21220..2f6fd33 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -994,14 +994,15 @@
 	return;
 }
 
-static void notify_ring(struct intel_engine_cs *ring)
+static void notify_ring(struct intel_engine_cs *engine)
 {
-	if (!intel_ring_initialized(ring))
+	if (!intel_engine_initialized(engine))
 		return;
 
-	trace_i915_gem_request_notify(ring);
+	trace_i915_gem_request_notify(engine);
+	engine->user_interrupts++;
 
-	wake_up_all(&ring->irq_queue);
+	wake_up_all(&engine->irq_queue);
 }
 
 static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -1079,11 +1080,10 @@
 
 static bool any_waiters(struct drm_i915_private *dev_priv)
 {
-	struct intel_engine_cs *ring;
-	int i;
+	struct intel_engine_cs *engine;
 
-	for_each_ring(ring, dev_priv, i)
-		if (ring->irq_refcount)
+	for_each_engine(engine, dev_priv)
+		if (engine->irq_refcount)
 			return true;
 
 	return false;
@@ -1219,7 +1219,7 @@
 		i915_reg_t reg;
 
 		slice--;
-		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
+		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
 			break;
 
 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
@@ -1258,24 +1258,23 @@
 out:
 	WARN_ON(dev_priv->l3_parity.which_slice);
 	spin_lock_irq(&dev_priv->irq_lock);
-	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
 	spin_unlock_irq(&dev_priv->irq_lock);
 
 	mutex_unlock(&dev_priv->dev->struct_mutex);
 }
 
-static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
+static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
+					       u32 iir)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (!HAS_L3_DPF(dev))
+	if (!HAS_L3_DPF(dev_priv))
 		return;
 
 	spin_lock(&dev_priv->irq_lock);
-	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
+	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
 	spin_unlock(&dev_priv->irq_lock);
 
-	iir &= GT_PARITY_ERROR(dev);
+	iir &= GT_PARITY_ERROR(dev_priv);
 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
 		dev_priv->l3_parity.which_slice |= 1 << 1;
 
@@ -1285,102 +1284,85 @@
 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
 }
 
-static void ilk_gt_irq_handler(struct drm_device *dev,
-			       struct drm_i915_private *dev_priv,
+static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
 			       u32 gt_iir)
 {
 	if (gt_iir &
 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
-		notify_ring(&dev_priv->ring[RCS]);
+		notify_ring(&dev_priv->engine[RCS]);
 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
-		notify_ring(&dev_priv->ring[VCS]);
+		notify_ring(&dev_priv->engine[VCS]);
 }
 
-static void snb_gt_irq_handler(struct drm_device *dev,
-			       struct drm_i915_private *dev_priv,
+static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
 			       u32 gt_iir)
 {
 
 	if (gt_iir &
 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
-		notify_ring(&dev_priv->ring[RCS]);
+		notify_ring(&dev_priv->engine[RCS]);
 	if (gt_iir & GT_BSD_USER_INTERRUPT)
-		notify_ring(&dev_priv->ring[VCS]);
+		notify_ring(&dev_priv->engine[VCS]);
 	if (gt_iir & GT_BLT_USER_INTERRUPT)
-		notify_ring(&dev_priv->ring[BCS]);
+		notify_ring(&dev_priv->engine[BCS]);
 
 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
 		      GT_BSD_CS_ERROR_INTERRUPT |
 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
 
-	if (gt_iir & GT_PARITY_ERROR(dev))
-		ivybridge_parity_error_irq_handler(dev, gt_iir);
+	if (gt_iir & GT_PARITY_ERROR(dev_priv))
+		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
 }
 
 static __always_inline void
-gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
+gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
 {
 	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
-		notify_ring(ring);
+		notify_ring(engine);
 	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
-		intel_lrc_irq_handler(ring);
+		tasklet_schedule(&engine->irq_tasklet);
 }
 
-static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
-				       u32 master_ctl)
+static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
+				   u32 master_ctl,
+				   u32 gt_iir[4])
 {
 	irqreturn_t ret = IRQ_NONE;
 
 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
-		u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
-		if (iir) {
-			I915_WRITE_FW(GEN8_GT_IIR(0), iir);
+		gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
+		if (gt_iir[0]) {
+			I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
 			ret = IRQ_HANDLED;
-
-			gen8_cs_irq_handler(&dev_priv->ring[RCS],
-					iir, GEN8_RCS_IRQ_SHIFT);
-
-			gen8_cs_irq_handler(&dev_priv->ring[BCS],
-					iir, GEN8_BCS_IRQ_SHIFT);
 		} else
 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
 	}
 
 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
-		u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
-		if (iir) {
-			I915_WRITE_FW(GEN8_GT_IIR(1), iir);
+		gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
+		if (gt_iir[1]) {
+			I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
 			ret = IRQ_HANDLED;
-
-			gen8_cs_irq_handler(&dev_priv->ring[VCS],
-					iir, GEN8_VCS1_IRQ_SHIFT);
-
-			gen8_cs_irq_handler(&dev_priv->ring[VCS2],
-					iir, GEN8_VCS2_IRQ_SHIFT);
 		} else
 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
 	}
 
 	if (master_ctl & GEN8_GT_VECS_IRQ) {
-		u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
-		if (iir) {
-			I915_WRITE_FW(GEN8_GT_IIR(3), iir);
+		gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
+		if (gt_iir[3]) {
+			I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
 			ret = IRQ_HANDLED;
-
-			gen8_cs_irq_handler(&dev_priv->ring[VECS],
-					iir, GEN8_VECS_IRQ_SHIFT);
 		} else
 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
 	}
 
 	if (master_ctl & GEN8_GT_PM_IRQ) {
-		u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
-		if (iir & dev_priv->pm_rps_events) {
+		gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
+		if (gt_iir[2] & dev_priv->pm_rps_events) {
 			I915_WRITE_FW(GEN8_GT_IIR(2),
-				      iir & dev_priv->pm_rps_events);
+				      gt_iir[2] & dev_priv->pm_rps_events);
 			ret = IRQ_HANDLED;
-			gen6_rps_irq_handler(dev_priv, iir);
 		} else
 			DRM_ERROR("The master control interrupt lied (PM)!\n");
 	}
@@ -1388,6 +1370,31 @@
 	return ret;
 }
 
+static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
+				u32 gt_iir[4])
+{
+	if (gt_iir[0]) {
+		gen8_cs_irq_handler(&dev_priv->engine[RCS],
+				    gt_iir[0], GEN8_RCS_IRQ_SHIFT);
+		gen8_cs_irq_handler(&dev_priv->engine[BCS],
+				    gt_iir[0], GEN8_BCS_IRQ_SHIFT);
+	}
+
+	if (gt_iir[1]) {
+		gen8_cs_irq_handler(&dev_priv->engine[VCS],
+				    gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
+		gen8_cs_irq_handler(&dev_priv->engine[VCS2],
+				    gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
+	}
+
+	if (gt_iir[3])
+		gen8_cs_irq_handler(&dev_priv->engine[VECS],
+				    gt_iir[3], GEN8_VECS_IRQ_SHIFT);
+
+	if (gt_iir[2] & dev_priv->pm_rps_events)
+		gen6_rps_irq_handler(dev_priv, gt_iir[2]);
+}
+
 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
 {
 	switch (port) {
@@ -1627,9 +1634,9 @@
 	if (INTEL_INFO(dev_priv)->gen >= 8)
 		return;
 
-	if (HAS_VEBOX(dev_priv->dev)) {
+	if (HAS_VEBOX(dev_priv)) {
 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
-			notify_ring(&dev_priv->ring[VECS]);
+			notify_ring(&dev_priv->engine[VECS]);
 
 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
@@ -1644,10 +1651,10 @@
 	return true;
 }
 
-static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
+static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
+					u32 pipe_stats[I915_MAX_PIPES])
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 pipe_stats[I915_MAX_PIPES] = { };
 	int pipe;
 
 	spin_lock(&dev_priv->irq_lock);
@@ -1701,6 +1708,13 @@
 			I915_WRITE(reg, pipe_stats[pipe]);
 	}
 	spin_unlock(&dev_priv->irq_lock);
+}
+
+static void valleyview_pipestat_irq_handler(struct drm_device *dev,
+					    u32 pipe_stats[I915_MAX_PIPES])
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	enum pipe pipe;
 
 	for_each_pipe(dev_priv, pipe) {
 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@@ -1723,22 +1737,21 @@
 		gmbus_irq_handler(dev);
 }
 
-static void i9xx_hpd_irq_handler(struct drm_device *dev)
+static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+	if (hotplug_status)
+		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+
+	return hotplug_status;
+}
+
+static void i9xx_hpd_irq_handler(struct drm_device *dev,
+				 u32 hotplug_status)
+{
 	u32 pin_mask = 0, long_mask = 0;
 
-	if (!hotplug_status)
-		return;
-
-	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
-	/*
-	 * Make sure hotplug status is cleared before we clear IIR, or else we
-	 * may miss hotplug events.
-	 */
-	POSTING_READ(PORT_HOTPLUG_STAT);
-
 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
 
@@ -1768,59 +1781,6 @@
 {
 	struct drm_device *dev = arg;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 iir, gt_iir, pm_iir;
-	irqreturn_t ret = IRQ_NONE;
-
-	if (!intel_irqs_enabled(dev_priv))
-		return IRQ_NONE;
-
-	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
-	disable_rpm_wakeref_asserts(dev_priv);
-
-	while (true) {
-		/* Find, clear, then process each source of interrupt */
-
-		gt_iir = I915_READ(GTIIR);
-		if (gt_iir)
-			I915_WRITE(GTIIR, gt_iir);
-
-		pm_iir = I915_READ(GEN6_PMIIR);
-		if (pm_iir)
-			I915_WRITE(GEN6_PMIIR, pm_iir);
-
-		iir = I915_READ(VLV_IIR);
-		if (iir) {
-			/* Consume port before clearing IIR or we'll miss events */
-			if (iir & I915_DISPLAY_PORT_INTERRUPT)
-				i9xx_hpd_irq_handler(dev);
-			I915_WRITE(VLV_IIR, iir);
-		}
-
-		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
-			goto out;
-
-		ret = IRQ_HANDLED;
-
-		if (gt_iir)
-			snb_gt_irq_handler(dev, dev_priv, gt_iir);
-		if (pm_iir)
-			gen6_rps_irq_handler(dev_priv, pm_iir);
-		/* Call regardless, as some status bits might not be
-		 * signalled in iir */
-		valleyview_pipestat_irq_handler(dev, iir);
-	}
-
-out:
-	enable_rpm_wakeref_asserts(dev_priv);
-
-	return ret;
-}
-
-static irqreturn_t cherryview_irq_handler(int irq, void *arg)
-{
-	struct drm_device *dev = arg;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 master_ctl, iir;
 	irqreturn_t ret = IRQ_NONE;
 
 	if (!intel_irqs_enabled(dev_priv))
@@ -1830,6 +1790,95 @@
 	disable_rpm_wakeref_asserts(dev_priv);
 
 	do {
+		u32 iir, gt_iir, pm_iir;
+		u32 pipe_stats[I915_MAX_PIPES] = {};
+		u32 hotplug_status = 0;
+		u32 ier = 0;
+
+		gt_iir = I915_READ(GTIIR);
+		pm_iir = I915_READ(GEN6_PMIIR);
+		iir = I915_READ(VLV_IIR);
+
+		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
+			break;
+
+		ret = IRQ_HANDLED;
+
+		/*
+		 * Theory on interrupt generation, based on empirical evidence:
+		 *
+		 * x = ((VLV_IIR & VLV_IER) ||
+		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
+		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
+		 *
+		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
+		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
+		 * guarantee the CPU interrupt will be raised again even if we
+		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
+		 * bits this time around.
+		 */
+		I915_WRITE(VLV_MASTER_IER, 0);
+		ier = I915_READ(VLV_IER);
+		I915_WRITE(VLV_IER, 0);
+
+		if (gt_iir)
+			I915_WRITE(GTIIR, gt_iir);
+		if (pm_iir)
+			I915_WRITE(GEN6_PMIIR, pm_iir);
+
+		if (iir & I915_DISPLAY_PORT_INTERRUPT)
+			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+
+		/* Call regardless, as some status bits might not be
+		 * signalled in iir */
+		valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
+
+		/*
+		 * VLV_IIR is single buffered, and reflects the level
+		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
+		 */
+		if (iir)
+			I915_WRITE(VLV_IIR, iir);
+
+		I915_WRITE(VLV_IER, ier);
+		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+		POSTING_READ(VLV_MASTER_IER);
+
+		if (gt_iir)
+			snb_gt_irq_handler(dev_priv, gt_iir);
+		if (pm_iir)
+			gen6_rps_irq_handler(dev_priv, pm_iir);
+
+		if (hotplug_status)
+			i9xx_hpd_irq_handler(dev, hotplug_status);
+
+		valleyview_pipestat_irq_handler(dev, pipe_stats);
+	} while (0);
+
+	enable_rpm_wakeref_asserts(dev_priv);
+
+	return ret;
+}
+
+static irqreturn_t cherryview_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	irqreturn_t ret = IRQ_NONE;
+
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
+	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
+	disable_rpm_wakeref_asserts(dev_priv);
+
+	do {
+		u32 master_ctl, iir;
+		u32 gt_iir[4] = {};
+		u32 pipe_stats[I915_MAX_PIPES] = {};
+		u32 hotplug_status = 0;
+		u32 ier = 0;
+
 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
 		iir = I915_READ(VLV_IIR);
 
@@ -1838,25 +1887,49 @@
 
 		ret = IRQ_HANDLED;
 
+		/*
+		 * Theory on interrupt generation, based on empirical evidence:
+		 *
+		 * x = ((VLV_IIR & VLV_IER) ||
+		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
+		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
+		 *
+		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
+		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
+		 * guarantee the CPU interrupt will be raised again even if we
+		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
+		 * bits this time around.
+		 */
 		I915_WRITE(GEN8_MASTER_IRQ, 0);
+		ier = I915_READ(VLV_IER);
+		I915_WRITE(VLV_IER, 0);
 
-		/* Find, clear, then process each source of interrupt */
+		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
 
-		if (iir) {
-			/* Consume port before clearing IIR or we'll miss events */
-			if (iir & I915_DISPLAY_PORT_INTERRUPT)
-				i9xx_hpd_irq_handler(dev);
-			I915_WRITE(VLV_IIR, iir);
-		}
-
-		gen8_gt_irq_handler(dev_priv, master_ctl);
+		if (iir & I915_DISPLAY_PORT_INTERRUPT)
+			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
 
 		/* Call regardless, as some status bits might not be
 		 * signalled in iir */
-		valleyview_pipestat_irq_handler(dev, iir);
+		valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
 
-		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+		/*
+		 * VLV_IIR is single buffered, and reflects the level
+		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
+		 */
+		if (iir)
+			I915_WRITE(VLV_IIR, iir);
+
+		I915_WRITE(VLV_IER, ier);
+		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
 		POSTING_READ(GEN8_MASTER_IRQ);
+
+		gen8_gt_irq_handler(dev_priv, gt_iir);
+
+		if (hotplug_status)
+			i9xx_hpd_irq_handler(dev, hotplug_status);
+
+		valleyview_pipestat_irq_handler(dev, pipe_stats);
 	} while (0);
 
 	enable_rpm_wakeref_asserts(dev_priv);
@@ -2217,9 +2290,9 @@
 		I915_WRITE(GTIIR, gt_iir);
 		ret = IRQ_HANDLED;
 		if (INTEL_INFO(dev)->gen >= 6)
-			snb_gt_irq_handler(dev, dev_priv, gt_iir);
+			snb_gt_irq_handler(dev_priv, gt_iir);
 		else
-			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+			ilk_gt_irq_handler(dev_priv, gt_iir);
 	}
 
 	de_iir = I915_READ(DEIIR);
@@ -2419,6 +2492,7 @@
 	struct drm_device *dev = arg;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 master_ctl;
+	u32 gt_iir[4] = {};
 	irqreturn_t ret;
 
 	if (!intel_irqs_enabled(dev_priv))
@@ -2435,7 +2509,8 @@
 	disable_rpm_wakeref_asserts(dev_priv);
 
 	/* Find, clear, then process each source of interrupt */
-	ret = gen8_gt_irq_handler(dev_priv, master_ctl);
+	ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
+	gen8_gt_irq_handler(dev_priv, gt_iir);
 	ret |= gen8_de_irq_handler(dev_priv, master_ctl);
 
 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -2449,8 +2524,7 @@
 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
 			       bool reset_completed)
 {
-	struct intel_engine_cs *ring;
-	int i;
+	struct intel_engine_cs *engine;
 
 	/*
 	 * Notify all waiters for GPU completion events that reset state has
@@ -2460,8 +2534,8 @@
 	 */
 
 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
-	for_each_ring(ring, dev_priv, i)
-		wake_up_all(&ring->irq_queue);
+	for_each_engine(engine, dev_priv)
+		wake_up_all(&engine->irq_queue);
 
 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
 	wake_up_all(&dev_priv->pending_flip_queue);
@@ -2484,7 +2558,6 @@
 static void i915_reset_and_wakeup(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_gpu_error *error = &dev_priv->gpu_error;
 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
@@ -2502,7 +2575,7 @@
 	 * the reset in-progress bit is only ever set by code outside of this
 	 * work we don't need to worry about any other races.
 	 */
-	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
+	if (i915_reset_in_progress(&dev_priv->gpu_error)) {
 		DRM_DEBUG_DRIVER("resetting chip\n");
 		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
 				   reset_event);
@@ -2530,25 +2603,9 @@
 
 		intel_runtime_pm_put(dev_priv);
 
-		if (ret == 0) {
-			/*
-			 * After all the gem state is reset, increment the reset
-			 * counter and wake up everyone waiting for the reset to
-			 * complete.
-			 *
-			 * Since unlock operations are a one-sided barrier only,
-			 * we need to insert a barrier here to order any seqno
-			 * updates before
-			 * the counter increment.
-			 */
-			smp_mb__before_atomic();
-			atomic_inc(&dev_priv->gpu_error.reset_counter);
-
+		if (ret == 0)
 			kobject_uevent_env(&dev->primary->kdev->kobj,
 					   KOBJ_CHANGE, reset_done_event);
-		} else {
-			atomic_or(I915_WEDGED, &error->reset_counter);
-		}
 
 		/*
 		 * Note: The wake_up also serves as a memory barrier so that
@@ -2653,14 +2710,14 @@
 /**
  * i915_handle_error - handle a gpu error
  * @dev: drm device
- *
+ * @engine_mask: mask representing engines that are hung
  * Do some basic checking of register state at error time and
  * dump it to the syslog.  Also call i915_capture_error_state() to make
  * sure we get a record and make it available in debugfs.  Fire a uevent
  * so userspace knows something bad happened (should trigger collection
  * of a ring dump etc.).
  */
-void i915_handle_error(struct drm_device *dev, bool wedged,
+void i915_handle_error(struct drm_device *dev, u32 engine_mask,
 		       const char *fmt, ...)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2671,10 +2728,10 @@
 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
 	va_end(args);
 
-	i915_capture_error_state(dev, wedged, error_msg);
+	i915_capture_error_state(dev, engine_mask, error_msg);
 	i915_report_and_clear_eir(dev);
 
-	if (wedged) {
+	if (engine_mask) {
 		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
 				&dev_priv->gpu_error.reset_counter);
 
@@ -2805,10 +2862,10 @@
 }
 
 static bool
-ring_idle(struct intel_engine_cs *ring, u32 seqno)
+ring_idle(struct intel_engine_cs *engine, u32 seqno)
 {
-	return (list_empty(&ring->request_list) ||
-		i915_seqno_passed(seqno, ring->last_submitted_seqno));
+	return i915_seqno_passed(seqno,
+				 READ_ONCE(engine->last_submitted_seqno));
 }
 
 static bool
@@ -2824,42 +2881,42 @@
 }
 
 static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
+				 u64 offset)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	struct intel_engine_cs *signaller;
-	int i;
 
-	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
-		for_each_ring(signaller, dev_priv, i) {
-			if (ring == signaller)
+	if (INTEL_INFO(dev_priv)->gen >= 8) {
+		for_each_engine(signaller, dev_priv) {
+			if (engine == signaller)
 				continue;
 
-			if (offset == signaller->semaphore.signal_ggtt[ring->id])
+			if (offset == signaller->semaphore.signal_ggtt[engine->id])
 				return signaller;
 		}
 	} else {
 		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
 
-		for_each_ring(signaller, dev_priv, i) {
-			if(ring == signaller)
+		for_each_engine(signaller, dev_priv) {
+			if(engine == signaller)
 				continue;
 
-			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
+			if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
 				return signaller;
 		}
 	}
 
 	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
-		  ring->id, ipehr, offset);
+		  engine->id, ipehr, offset);
 
 	return NULL;
 }
 
 static struct intel_engine_cs *
-semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
+semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	u32 cmd, ipehr, head;
 	u64 offset = 0;
 	int i, backwards;
@@ -2881,11 +2938,11 @@
 	 * Therefore, this function does not support execlist mode in its
 	 * current form. Just return NULL and move on.
 	 */
-	if (ring->buffer == NULL)
+	if (engine->buffer == NULL)
 		return NULL;
 
-	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
-	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
+	ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+	if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
 		return NULL;
 
 	/*
@@ -2896,8 +2953,8 @@
 	 * point at at batch, and semaphores are always emitted into the
 	 * ringbuffer itself.
 	 */
-	head = I915_READ_HEAD(ring) & HEAD_ADDR;
-	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
+	head = I915_READ_HEAD(engine) & HEAD_ADDR;
+	backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
 
 	for (i = backwards; i; --i) {
 		/*
@@ -2905,10 +2962,10 @@
 		 * our ring is smaller than what the hardware (and hence
 		 * HEAD_ADDR) allows. Also handles wrap-around.
 		 */
-		head &= ring->buffer->size - 1;
+		head &= engine->buffer->size - 1;
 
 		/* This here seems to blow up */
-		cmd = ioread32(ring->buffer->virtual_start + head);
+		cmd = ioread32(engine->buffer->virtual_start + head);
 		if (cmd == ipehr)
 			break;
 
@@ -2918,32 +2975,32 @@
 	if (!i)
 		return NULL;
 
-	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
-	if (INTEL_INFO(ring->dev)->gen >= 8) {
-		offset = ioread32(ring->buffer->virtual_start + head + 12);
+	*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
+	if (INTEL_INFO(engine->dev)->gen >= 8) {
+		offset = ioread32(engine->buffer->virtual_start + head + 12);
 		offset <<= 32;
-		offset = ioread32(ring->buffer->virtual_start + head + 8);
+		offset = ioread32(engine->buffer->virtual_start + head + 8);
 	}
-	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
+	return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
 }
 
-static int semaphore_passed(struct intel_engine_cs *ring)
+static int semaphore_passed(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	struct intel_engine_cs *signaller;
 	u32 seqno;
 
-	ring->hangcheck.deadlock++;
+	engine->hangcheck.deadlock++;
 
-	signaller = semaphore_waits_for(ring, &seqno);
+	signaller = semaphore_waits_for(engine, &seqno);
 	if (signaller == NULL)
 		return -1;
 
 	/* Prevent pathological recursion due to driver bugs */
-	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
+	if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
 		return -1;
 
-	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
+	if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
 		return 1;
 
 	/* cursory check for an unkickable deadlock */
@@ -2956,23 +3013,22 @@
 
 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
 {
-	struct intel_engine_cs *ring;
-	int i;
+	struct intel_engine_cs *engine;
 
-	for_each_ring(ring, dev_priv, i)
-		ring->hangcheck.deadlock = 0;
+	for_each_engine(engine, dev_priv)
+		engine->hangcheck.deadlock = 0;
 }
 
-static bool subunits_stuck(struct intel_engine_cs *ring)
+static bool subunits_stuck(struct intel_engine_cs *engine)
 {
 	u32 instdone[I915_NUM_INSTDONE_REG];
 	bool stuck;
 	int i;
 
-	if (ring->id != RCS)
+	if (engine->id != RCS)
 		return true;
 
-	i915_get_extra_instdone(ring->dev, instdone);
+	i915_get_extra_instdone(engine->dev, instdone);
 
 	/* There might be unstable subunit states even when
 	 * actual head is not moving. Filter out the unstable ones by
@@ -2981,49 +3037,44 @@
 	 */
 	stuck = true;
 	for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
-		const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
+		const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
 
-		if (tmp != ring->hangcheck.instdone[i])
+		if (tmp != engine->hangcheck.instdone[i])
 			stuck = false;
 
-		ring->hangcheck.instdone[i] |= tmp;
+		engine->hangcheck.instdone[i] |= tmp;
 	}
 
 	return stuck;
 }
 
 static enum intel_ring_hangcheck_action
-head_stuck(struct intel_engine_cs *ring, u64 acthd)
+head_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
-	if (acthd != ring->hangcheck.acthd) {
+	if (acthd != engine->hangcheck.acthd) {
 
 		/* Clear subunit states on head movement */
-		memset(ring->hangcheck.instdone, 0,
-		       sizeof(ring->hangcheck.instdone));
+		memset(engine->hangcheck.instdone, 0,
+		       sizeof(engine->hangcheck.instdone));
 
-		if (acthd > ring->hangcheck.max_acthd) {
-			ring->hangcheck.max_acthd = acthd;
-			return HANGCHECK_ACTIVE;
-		}
-
-		return HANGCHECK_ACTIVE_LOOP;
+		return HANGCHECK_ACTIVE;
 	}
 
-	if (!subunits_stuck(ring))
+	if (!subunits_stuck(engine))
 		return HANGCHECK_ACTIVE;
 
 	return HANGCHECK_HUNG;
 }
 
 static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+ring_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum intel_ring_hangcheck_action ha;
 	u32 tmp;
 
-	ha = head_stuck(ring, acthd);
+	ha = head_stuck(engine, acthd);
 	if (ha != HANGCHECK_HUNG)
 		return ha;
 
@@ -3035,24 +3086,24 @@
 	 * and break the hang. This should work on
 	 * all but the second generation chipsets.
 	 */
-	tmp = I915_READ_CTL(ring);
+	tmp = I915_READ_CTL(engine);
 	if (tmp & RING_WAIT) {
-		i915_handle_error(dev, false,
+		i915_handle_error(dev, 0,
 				  "Kicking stuck wait on %s",
-				  ring->name);
-		I915_WRITE_CTL(ring, tmp);
+				  engine->name);
+		I915_WRITE_CTL(engine, tmp);
 		return HANGCHECK_KICK;
 	}
 
 	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
-		switch (semaphore_passed(ring)) {
+		switch (semaphore_passed(engine)) {
 		default:
 			return HANGCHECK_HUNG;
 		case 1:
-			i915_handle_error(dev, false,
+			i915_handle_error(dev, 0,
 					  "Kicking stuck semaphore on %s",
-					  ring->name);
-			I915_WRITE_CTL(ring, tmp);
+					  engine->name);
+			I915_WRITE_CTL(engine, tmp);
 			return HANGCHECK_KICK;
 		case 0:
 			return HANGCHECK_WAIT;
@@ -3062,6 +3113,24 @@
 	return HANGCHECK_HUNG;
 }
 
+static unsigned kick_waiters(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *i915 = to_i915(engine->dev);
+	unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
+
+	if (engine->hangcheck.user_interrupts == user_interrupts &&
+	    !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
+		if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
+			DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+				  engine->name);
+		else
+			DRM_INFO("Fake missed irq on %s\n",
+				 engine->name);
+		wake_up_all(&engine->irq_queue);
+	}
+
+	return user_interrupts;
+}
 /*
  * This is called when the chip hasn't reported back with completed
  * batchbuffers in a long time. We keep track per ring seqno progress and
@@ -3076,13 +3145,14 @@
 		container_of(work, typeof(*dev_priv),
 			     gpu_error.hangcheck_work.work);
 	struct drm_device *dev = dev_priv->dev;
-	struct intel_engine_cs *ring;
-	int i;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	int busy_count = 0, rings_hung = 0;
-	bool stuck[I915_NUM_RINGS] = { 0 };
+	bool stuck[I915_NUM_ENGINES] = { 0 };
 #define BUSY 1
 #define KICK 5
 #define HUNG 20
+#define ACTIVE_DECAY 15
 
 	if (!i915.enable_hangcheck)
 		return;
@@ -3100,33 +3170,37 @@
 	 */
 	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_engine_id(engine, dev_priv, id) {
 		u64 acthd;
 		u32 seqno;
+		unsigned user_interrupts;
 		bool busy = true;
 
 		semaphore_clear_deadlocks(dev_priv);
 
-		seqno = ring->get_seqno(ring, false);
-		acthd = intel_ring_get_active_head(ring);
+		/* We don't strictly need an irq-barrier here, as we are not
+		 * serving an interrupt request, be paranoid in case the
+		 * barrier has side-effects (such as preventing a broken
+		 * cacheline snoop) and so be sure that we can see the seqno
+		 * advance. If the seqno should stick, due to a stale
+		 * cacheline, we would erroneously declare the GPU hung.
+		 */
+		if (engine->irq_seqno_barrier)
+			engine->irq_seqno_barrier(engine);
 
-		if (ring->hangcheck.seqno == seqno) {
-			if (ring_idle(ring, seqno)) {
-				ring->hangcheck.action = HANGCHECK_IDLE;
+		acthd = intel_ring_get_active_head(engine);
+		seqno = engine->get_seqno(engine);
 
-				if (waitqueue_active(&ring->irq_queue)) {
-					/* Issue a wake-up to catch stuck h/w. */
-					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
-						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
-							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
-								  ring->name);
-						else
-							DRM_INFO("Fake missed irq on %s\n",
-								 ring->name);
-						wake_up_all(&ring->irq_queue);
-					}
+		/* Reset stuck interrupts between batch advances */
+		user_interrupts = 0;
+
+		if (engine->hangcheck.seqno == seqno) {
+			if (ring_idle(engine, seqno)) {
+				engine->hangcheck.action = HANGCHECK_IDLE;
+				if (waitqueue_active(&engine->irq_queue)) {
 					/* Safeguard against driver failure */
-					ring->hangcheck.score += BUSY;
+					user_interrupts = kick_waiters(engine);
+					engine->hangcheck.score += BUSY;
 				} else
 					busy = false;
 			} else {
@@ -3145,58 +3219,60 @@
 				 * being repeatedly kicked and so responsible
 				 * for stalling the machine.
 				 */
-				ring->hangcheck.action = ring_stuck(ring,
-								    acthd);
+				engine->hangcheck.action = ring_stuck(engine,
+								      acthd);
 
-				switch (ring->hangcheck.action) {
+				switch (engine->hangcheck.action) {
 				case HANGCHECK_IDLE:
 				case HANGCHECK_WAIT:
-				case HANGCHECK_ACTIVE:
 					break;
-				case HANGCHECK_ACTIVE_LOOP:
-					ring->hangcheck.score += BUSY;
+				case HANGCHECK_ACTIVE:
+					engine->hangcheck.score += BUSY;
 					break;
 				case HANGCHECK_KICK:
-					ring->hangcheck.score += KICK;
+					engine->hangcheck.score += KICK;
 					break;
 				case HANGCHECK_HUNG:
-					ring->hangcheck.score += HUNG;
-					stuck[i] = true;
+					engine->hangcheck.score += HUNG;
+					stuck[id] = true;
 					break;
 				}
 			}
 		} else {
-			ring->hangcheck.action = HANGCHECK_ACTIVE;
+			engine->hangcheck.action = HANGCHECK_ACTIVE;
 
 			/* Gradually reduce the count so that we catch DoS
 			 * attempts across multiple batches.
 			 */
-			if (ring->hangcheck.score > 0)
-				ring->hangcheck.score--;
+			if (engine->hangcheck.score > 0)
+				engine->hangcheck.score -= ACTIVE_DECAY;
+			if (engine->hangcheck.score < 0)
+				engine->hangcheck.score = 0;
 
 			/* Clear head and subunit states on seqno movement */
-			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
+			acthd = 0;
 
-			memset(ring->hangcheck.instdone, 0,
-			       sizeof(ring->hangcheck.instdone));
+			memset(engine->hangcheck.instdone, 0,
+			       sizeof(engine->hangcheck.instdone));
 		}
 
-		ring->hangcheck.seqno = seqno;
-		ring->hangcheck.acthd = acthd;
+		engine->hangcheck.seqno = seqno;
+		engine->hangcheck.acthd = acthd;
+		engine->hangcheck.user_interrupts = user_interrupts;
 		busy_count += busy;
 	}
 
-	for_each_ring(ring, dev_priv, i) {
-		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+	for_each_engine_id(engine, dev_priv, id) {
+		if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
 			DRM_INFO("%s on %s\n",
-				 stuck[i] ? "stuck" : "no progress",
-				 ring->name);
-			rings_hung++;
+				 stuck[id] ? "stuck" : "no progress",
+				 engine->name);
+			rings_hung |= intel_engine_flag(engine);
 		}
 	}
 
 	if (rings_hung) {
-		i915_handle_error(dev, true, "Ring hung");
+		i915_handle_error(dev, rings_hung, "Engine(s) hung");
 		goto out;
 	}
 
@@ -3267,6 +3343,55 @@
 		GEN5_IRQ_RESET(GEN6_PM);
 }
 
+static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+{
+	enum pipe pipe;
+
+	if (IS_CHERRYVIEW(dev_priv))
+		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
+	else
+		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+
+	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
+	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+	for_each_pipe(dev_priv, pipe) {
+		I915_WRITE(PIPESTAT(pipe),
+			   PIPE_FIFO_UNDERRUN_STATUS |
+			   PIPESTAT_INT_STATUS_MASK);
+		dev_priv->pipestat_irq_mask[pipe] = 0;
+	}
+
+	GEN5_IRQ_RESET(VLV_);
+	dev_priv->irq_mask = ~0;
+}
+
+static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+	u32 pipestat_mask;
+	u32 enable_mask;
+	enum pipe pipe;
+
+	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
+			PIPE_CRC_DONE_INTERRUPT_STATUS;
+
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+	for_each_pipe(dev_priv, pipe)
+		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
+
+	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
+		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+	if (IS_CHERRYVIEW(dev_priv))
+		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+
+	WARN_ON(dev_priv->irq_mask != ~0);
+
+	dev_priv->irq_mask = ~enable_mask;
+
+	GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
+}
+
 /* drm_dma.h hooks
 */
 static void ironlake_irq_reset(struct drm_device *dev)
@@ -3284,34 +3409,19 @@
 	ibx_irq_reset(dev);
 }
 
-static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
-{
-	enum pipe pipe;
-
-	i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
-	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-
-	for_each_pipe(dev_priv, pipe)
-		I915_WRITE(PIPESTAT(pipe), 0xffff);
-
-	GEN5_IRQ_RESET(VLV_);
-}
-
 static void valleyview_irq_preinstall(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	/* VLV magic */
-	I915_WRITE(VLV_IMR, 0);
-	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
-	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
-	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
+	I915_WRITE(VLV_MASTER_IER, 0);
+	POSTING_READ(VLV_MASTER_IER);
 
 	gen5_gt_irq_reset(dev);
 
-	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
-
-	vlv_display_irq_reset(dev_priv);
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->display_irqs_enabled)
+		vlv_display_irq_reset(dev_priv);
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
@@ -3384,9 +3494,10 @@
 
 	GEN5_IRQ_RESET(GEN8_PCU_);
 
-	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
-
-	vlv_display_irq_reset(dev_priv);
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->display_irqs_enabled)
+		vlv_display_irq_reset(dev_priv);
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
@@ -3506,6 +3617,26 @@
 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
 		PORTA_HOTPLUG_ENABLE;
+
+	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
+		      hotplug, enabled_irqs);
+	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
+
+	/*
+	 * For BXT invert bit has to be set based on AOB design
+	 * for HPD detection logic, update it based on VBT fields.
+	 */
+
+	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
+	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
+		hotplug |= BXT_DDIA_HPD_INVERT;
+	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
+	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
+		hotplug |= BXT_DDIB_HPD_INVERT;
+	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
+	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
+		hotplug |= BXT_DDIC_HPD_INVERT;
+
 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
 }
 
@@ -3613,74 +3744,6 @@
 	return 0;
 }
 
-static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
-{
-	u32 pipestat_mask;
-	u32 iir_mask;
-	enum pipe pipe;
-
-	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
-			PIPE_FIFO_UNDERRUN_STATUS;
-
-	for_each_pipe(dev_priv, pipe)
-		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
-	POSTING_READ(PIPESTAT(PIPE_A));
-
-	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
-			PIPE_CRC_DONE_INTERRUPT_STATUS;
-
-	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
-	for_each_pipe(dev_priv, pipe)
-		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
-
-	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
-		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
-		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
-	if (IS_CHERRYVIEW(dev_priv))
-		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
-	dev_priv->irq_mask &= ~iir_mask;
-
-	I915_WRITE(VLV_IIR, iir_mask);
-	I915_WRITE(VLV_IIR, iir_mask);
-	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
-	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-	POSTING_READ(VLV_IMR);
-}
-
-static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
-{
-	u32 pipestat_mask;
-	u32 iir_mask;
-	enum pipe pipe;
-
-	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
-		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
-		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
-	if (IS_CHERRYVIEW(dev_priv))
-		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
-
-	dev_priv->irq_mask |= iir_mask;
-	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
-	I915_WRITE(VLV_IIR, iir_mask);
-	I915_WRITE(VLV_IIR, iir_mask);
-	POSTING_READ(VLV_IIR);
-
-	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
-			PIPE_CRC_DONE_INTERRUPT_STATUS;
-
-	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
-	for_each_pipe(dev_priv, pipe)
-		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
-
-	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
-			PIPE_FIFO_UNDERRUN_STATUS;
-
-	for_each_pipe(dev_priv, pipe)
-		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
-	POSTING_READ(PIPESTAT(PIPE_A));
-}
-
 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
 {
 	assert_spin_locked(&dev_priv->irq_lock);
@@ -3690,8 +3753,10 @@
 
 	dev_priv->display_irqs_enabled = true;
 
-	if (intel_irqs_enabled(dev_priv))
-		valleyview_display_irqs_install(dev_priv);
+	if (intel_irqs_enabled(dev_priv)) {
+		vlv_display_irq_reset(dev_priv);
+		vlv_display_irq_postinstall(dev_priv);
+	}
 }
 
 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
@@ -3704,45 +3769,23 @@
 	dev_priv->display_irqs_enabled = false;
 
 	if (intel_irqs_enabled(dev_priv))
-		valleyview_display_irqs_uninstall(dev_priv);
+		vlv_display_irq_reset(dev_priv);
 }
 
-static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
-{
-	dev_priv->irq_mask = ~0;
-
-	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
-	POSTING_READ(PORT_HOTPLUG_EN);
-
-	I915_WRITE(VLV_IIR, 0xffffffff);
-	I915_WRITE(VLV_IIR, 0xffffffff);
-	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
-	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-	POSTING_READ(VLV_IMR);
-
-	/* Interrupt setup is already guaranteed to be single-threaded, this is
-	 * just to make the assert_spin_locked check happy. */
-	spin_lock_irq(&dev_priv->irq_lock);
-	if (dev_priv->display_irqs_enabled)
-		valleyview_display_irqs_install(dev_priv);
-	spin_unlock_irq(&dev_priv->irq_lock);
-}
 
 static int valleyview_irq_postinstall(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	vlv_display_irq_postinstall(dev_priv);
-
 	gen5_gt_irq_postinstall(dev);
 
-	/* ack & enable invalid PTE error interrupts */
-#if 0 /* FIXME: add support to irq handler for checking these bits */
-	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
-	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
-#endif
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->display_irqs_enabled)
+		vlv_display_irq_postinstall(dev_priv);
+	spin_unlock_irq(&dev_priv->irq_lock);
 
 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+	POSTING_READ(VLV_MASTER_IER);
 
 	return 0;
 }
@@ -3753,7 +3796,6 @@
 	uint32_t gt_interrupts[] = {
 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
-			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
@@ -3765,6 +3807,9 @@
 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
 		};
 
+	if (HAS_L3_DPF(dev_priv))
+		gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+
 	dev_priv->pm_irq_mask = 0xffffffff;
 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
@@ -3832,7 +3877,7 @@
 	if (HAS_PCH_SPLIT(dev))
 		ibx_irq_postinstall(dev);
 
-	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
 	POSTING_READ(GEN8_MASTER_IRQ);
 
 	return 0;
@@ -3842,11 +3887,14 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	vlv_display_irq_postinstall(dev_priv);
-
 	gen8_gt_irq_postinstall(dev_priv);
 
-	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->display_irqs_enabled)
+		vlv_display_irq_postinstall(dev_priv);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
 	POSTING_READ(GEN8_MASTER_IRQ);
 
 	return 0;
@@ -3862,20 +3910,6 @@
 	gen8_irq_reset(dev);
 }
 
-static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
-{
-	/* Interrupt setup is already guaranteed to be single-threaded, this is
-	 * just to make the assert_spin_locked check happy. */
-	spin_lock_irq(&dev_priv->irq_lock);
-	if (dev_priv->display_irqs_enabled)
-		valleyview_display_irqs_uninstall(dev_priv);
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	vlv_display_irq_reset(dev_priv);
-
-	dev_priv->irq_mask = ~0;
-}
-
 static void valleyview_irq_uninstall(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3884,12 +3918,16 @@
 		return;
 
 	I915_WRITE(VLV_MASTER_IER, 0);
+	POSTING_READ(VLV_MASTER_IER);
 
 	gen5_gt_irq_reset(dev);
 
 	I915_WRITE(HWSTAM, 0xffffffff);
 
-	vlv_display_irq_uninstall(dev_priv);
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->display_irqs_enabled)
+		vlv_display_irq_reset(dev_priv);
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void cherryview_irq_uninstall(struct drm_device *dev)
@@ -3906,7 +3944,10 @@
 
 	GEN5_IRQ_RESET(GEN8_PCU_);
 
-	vlv_display_irq_uninstall(dev_priv);
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->display_irqs_enabled)
+		vlv_display_irq_reset(dev_priv);
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void ironlake_irq_uninstall(struct drm_device *dev)
@@ -4044,7 +4085,7 @@
 		new_iir = I915_READ16(IIR); /* Flush posted writes */
 
 		if (iir & I915_USER_INTERRUPT)
-			notify_ring(&dev_priv->ring[RCS]);
+			notify_ring(&dev_priv->engine[RCS]);
 
 		for_each_pipe(dev_priv, pipe) {
 			int plane = pipe;
@@ -4233,14 +4274,17 @@
 
 		/* Consume port.  Then clear IIR or we'll miss events */
 		if (I915_HAS_HOTPLUG(dev) &&
-		    iir & I915_DISPLAY_PORT_INTERRUPT)
-			i9xx_hpd_irq_handler(dev);
+		    iir & I915_DISPLAY_PORT_INTERRUPT) {
+			u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+			if (hotplug_status)
+				i9xx_hpd_irq_handler(dev, hotplug_status);
+		}
 
 		I915_WRITE(IIR, iir & ~flip_mask);
 		new_iir = I915_READ(IIR); /* Flush posted writes */
 
 		if (iir & I915_USER_INTERRUPT)
-			notify_ring(&dev_priv->ring[RCS]);
+			notify_ring(&dev_priv->engine[RCS]);
 
 		for_each_pipe(dev_priv, pipe) {
 			int plane = pipe;
@@ -4463,16 +4507,19 @@
 		ret = IRQ_HANDLED;
 
 		/* Consume port.  Then clear IIR or we'll miss events */
-		if (iir & I915_DISPLAY_PORT_INTERRUPT)
-			i9xx_hpd_irq_handler(dev);
+		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
+			u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+			if (hotplug_status)
+				i9xx_hpd_irq_handler(dev, hotplug_status);
+		}
 
 		I915_WRITE(IIR, iir & ~flip_mask);
 		new_iir = I915_READ(IIR); /* Flush posted writes */
 
 		if (iir & I915_USER_INTERRUPT)
-			notify_ring(&dev_priv->ring[RCS]);
+			notify_ring(&dev_priv->engine[RCS]);
 		if (iir & I915_BSD_USER_INTERRUPT)
-			notify_ring(&dev_priv->ring[VCS]);
+			notify_ring(&dev_priv->engine[VCS]);
 
 		for_each_pipe(dev_priv, pipe) {
 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@@ -4567,8 +4614,6 @@
 	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
 			  i915_hangcheck_elapsed);
 
-	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
-
 	if (IS_GEN2(dev_priv)) {
 		dev->max_vblank_count = 0;
 		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 278c9c4..1779f02 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -56,6 +56,8 @@
 	.edp_vswing = 0,
 	.enable_guc_submission = false,
 	.guc_log_level = -1,
+	.enable_dp_mst = true,
+	.inject_load_failure = 0,
 };
 
 module_param_named(modeset, i915.modeset, int, 0400);
@@ -201,3 +203,10 @@
 module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
 MODULE_PARM_DESC(guc_log_level,
 	"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
+
+module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600);
+MODULE_PARM_DESC(enable_dp_mst,
+	"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
+module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
+MODULE_PARM_DESC(inject_load_failure,
+	"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index bd5026b..02bc278 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -49,6 +49,7 @@
 	int use_mmio_flip;
 	int mmio_debug;
 	int edp_vswing;
+	unsigned int inject_load_failure;
 	/* leave bools at the end to not create holes */
 	bool enable_hangcheck;
 	bool fastboot;
@@ -59,6 +60,7 @@
 	bool enable_guc_submission;
 	bool verbose_state_checks;
 	bool nuclear_pageflip;
+	bool enable_dp_mst;
 };
 
 extern struct i915_params i915 __read_mostly;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 363bd79..b407411 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -79,6 +79,16 @@
 
 /* PCI config space */
 
+#define MCHBAR_I915 0x44
+#define MCHBAR_I965 0x48
+#define MCHBAR_SIZE (4 * 4096)
+
+#define DEVEN 0x54
+#define   DEVEN_MCHBAR_EN (1 << 28)
+
+#define BSM 0x5c
+#define   BSM_MASK (0xFFFF << 20)
+
 #define HPLLCC	0xc0 /* 85x only */
 #define   GC_CLOCK_CONTROL_MASK		(0x7 << 0)
 #define   GC_CLOCK_133_200		(0 << 0)
@@ -90,6 +100,16 @@
 #define   GC_CLOCK_166_266		(6 << 0)
 #define   GC_CLOCK_166_250		(7 << 0)
 
+#define I915_GDRST 0xc0 /* PCI config register */
+#define   GRDOM_FULL		(0 << 2)
+#define   GRDOM_RENDER		(1 << 2)
+#define   GRDOM_MEDIA		(3 << 2)
+#define   GRDOM_MASK		(3 << 2)
+#define   GRDOM_RESET_STATUS	(1 << 1)
+#define   GRDOM_RESET_ENABLE	(1 << 0)
+
+#define GCDGMBUS 0xcc
+
 #define GCFGC2	0xda
 #define GCFGC	0xf0 /* 915+ only */
 #define   GC_LOW_FREQUENCY_ENABLE	(1 << 7)
@@ -121,18 +141,16 @@
 #define   I915_GC_RENDER_CLOCK_166_MHZ	(0 << 0)
 #define   I915_GC_RENDER_CLOCK_200_MHZ	(1 << 0)
 #define   I915_GC_RENDER_CLOCK_333_MHZ	(4 << 0)
-#define GCDGMBUS 0xcc
-#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
 
+#define ASLE	0xe4
+#define ASLS	0xfc
 
-/* Graphics reset regs */
-#define I915_GDRST 0xc0 /* PCI config register */
-#define  GRDOM_FULL	(0<<2)
-#define  GRDOM_RENDER	(1<<2)
-#define  GRDOM_MEDIA	(3<<2)
-#define  GRDOM_MASK	(3<<2)
-#define  GRDOM_RESET_STATUS (1<<1)
-#define  GRDOM_RESET_ENABLE (1<<0)
+#define SWSCI	0xe8
+#define   SWSCI_SCISEL	(1 << 15)
+#define   SWSCI_GSSCIE	(1 << 0)
+
+#define LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
+
 
 #define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
 #define  ILK_GRDOM_FULL		(0<<1)
@@ -164,6 +182,9 @@
 #define  GEN6_GRDOM_RENDER		(1 << 1)
 #define  GEN6_GRDOM_MEDIA		(1 << 2)
 #define  GEN6_GRDOM_BLT			(1 << 3)
+#define  GEN6_GRDOM_VECS		(1 << 4)
+#define  GEN9_GRDOM_GUC			(1 << 5)
+#define  GEN8_GRDOM_MEDIA2		(1 << 7)
 
 #define RING_PP_DIR_BASE(ring)		_MMIO((ring)->mmio_base+0x228)
 #define RING_PP_DIR_BASE_READ(ring)	_MMIO((ring)->mmio_base+0x518)
@@ -586,6 +607,10 @@
 #define GEN7_GPGPU_DISPATCHDIMY         _MMIO(0x2504)
 #define GEN7_GPGPU_DISPATCHDIMZ         _MMIO(0x2508)
 
+/* There are the 16 64-bit CS General Purpose Registers */
+#define HSW_CS_GPR(n)                   _MMIO(0x2600 + (n) * 8)
+#define HSW_CS_GPR_UDW(n)               _MMIO(0x2600 + (n) * 8 + 4)
+
 #define OACONTROL _MMIO(0x2360)
 
 #define _GEN7_PIPEA_DE_LOAD_SL	0x70068
@@ -621,6 +646,10 @@
 #define   IOSF_PORT_GPIO_SC			0x48
 #define   IOSF_PORT_GPIO_SUS			0xa8
 #define   IOSF_PORT_CCU				0xa9
+#define   CHV_IOSF_PORT_GPIO_N			0x13
+#define   CHV_IOSF_PORT_GPIO_SE			0x48
+#define   CHV_IOSF_PORT_GPIO_E			0xa8
+#define   CHV_IOSF_PORT_GPIO_SW			0xb2
 #define VLV_IOSF_DATA				_MMIO(VLV_DISPLAY_BASE + 0x2104)
 #define VLV_IOSF_ADDR				_MMIO(VLV_DISPLAY_BASE + 0x2108)
 
@@ -785,7 +814,9 @@
 #define  DSI_PLL_M1_DIV_SHIFT			0
 #define  DSI_PLL_M1_DIV_MASK			(0x1ff << 0)
 #define CCK_CZ_CLOCK_CONTROL			0x62
+#define CCK_GPLL_CLOCK_CONTROL			0x67
 #define CCK_DISPLAY_CLOCK_CONTROL		0x6b
+#define CCK_DISPLAY_REF_CLOCK_CONTROL		0x6c
 #define  CCK_TRUNK_FORCE_ON			(1 << 17)
 #define  CCK_TRUNK_FORCE_OFF			(1 << 16)
 #define  CCK_FREQUENCY_STATUS			(0x1f << 8)
@@ -1317,6 +1348,7 @@
 #define _PORT_CL1CM_DW0_A		0x162000
 #define _PORT_CL1CM_DW0_BC		0x6C000
 #define   PHY_POWER_GOOD		(1 << 16)
+#define   PHY_RESERVED			(1 << 7)
 #define BXT_PORT_CL1CM_DW0(phy)		_BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
 							_PORT_CL1CM_DW0_A)
 
@@ -1361,14 +1393,10 @@
 
 #define _PORT_REF_DW6_A			0x162198
 #define _PORT_REF_DW6_BC		0x6C198
-/*
- * FIXME: BSpec/CHV ConfigDB disagrees on the following two fields, fix them
- * after testing.
- */
-#define   GRC_CODE_SHIFT		23
-#define   GRC_CODE_MASK			(0x1FF << GRC_CODE_SHIFT)
+#define   GRC_CODE_SHIFT		24
+#define   GRC_CODE_MASK			(0xFF << GRC_CODE_SHIFT)
 #define   GRC_CODE_FAST_SHIFT		16
-#define   GRC_CODE_FAST_MASK		(0x7F << GRC_CODE_FAST_SHIFT)
+#define   GRC_CODE_FAST_MASK		(0xFF << GRC_CODE_FAST_SHIFT)
 #define   GRC_CODE_SLOW_SHIFT		8
 #define   GRC_CODE_SLOW_MASK		(0xFF << GRC_CODE_SLOW_SHIFT)
 #define   GRC_CODE_NOM_MASK		0xFF
@@ -1776,6 +1804,18 @@
 #define   GEN9_IZ_HASHING_MASK(slice)			(0x3 << ((slice) * 2))
 #define   GEN9_IZ_HASHING(slice, val)			((val) << ((slice) * 2))
 
+/* WaClearTdlStateAckDirtyBits */
+#define GEN8_STATE_ACK		_MMIO(0x20F0)
+#define GEN9_STATE_ACK_SLICE1	_MMIO(0x20F8)
+#define GEN9_STATE_ACK_SLICE2	_MMIO(0x2100)
+#define   GEN9_STATE_ACK_TDL0 (1 << 12)
+#define   GEN9_STATE_ACK_TDL1 (1 << 13)
+#define   GEN9_STATE_ACK_TDL2 (1 << 14)
+#define   GEN9_STATE_ACK_TDL3 (1 << 15)
+#define   GEN9_SUBSLICE_TDL_ACK_BITS \
+	(GEN9_STATE_ACK_TDL3 | GEN9_STATE_ACK_TDL2 | \
+	 GEN9_STATE_ACK_TDL1 | GEN9_STATE_ACK_TDL0)
+
 #define GFX_MODE	_MMIO(0x2520)
 #define GFX_MODE_GEN7	_MMIO(0x229c)
 #define RING_MODE_GEN7(ring)	_MMIO((ring)->mmio_base+0x29c)
@@ -1795,6 +1835,7 @@
 
 #define VLV_DISPLAY_BASE 0x180000
 #define VLV_MIPI_BASE VLV_DISPLAY_BASE
+#define BXT_MIPI_BASE 0x60000
 
 #define VLV_GU_CTL0	_MMIO(VLV_DISPLAY_BASE + 0x2030)
 #define VLV_GU_CTL1	_MMIO(VLV_DISPLAY_BASE + 0x2034)
@@ -2923,6 +2964,15 @@
 				INTERVAL_1_33_US(us)) : \
 				INTERVAL_1_28_US(us))
 
+#define INTERVAL_1_28_TO_US(interval)  (((interval) << 7) / 100)
+#define INTERVAL_1_33_TO_US(interval)  (((interval) << 2) / 3)
+#define INTERVAL_0_833_TO_US(interval) (((interval) * 5)  / 6)
+#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \
+                           (IS_BROXTON(dev_priv) ? \
+                           INTERVAL_0_833_TO_US(interval) : \
+                           INTERVAL_1_33_TO_US(interval)) : \
+                           INTERVAL_1_28_TO_US(interval))
+
 /*
  * Logical Context regs
  */
@@ -4784,6 +4834,10 @@
 #define  CBR_PND_DEADLINE_DISABLE	(1<<31)
 #define  CBR_PWM_CLOCK_MUX_SELECT	(1<<30)
 
+#define CBR4_VLV			_MMIO(VLV_DISPLAY_BASE + 0x70450)
+#define  CBR_DPLLBMD_PIPE_C		(1<<29)
+#define  CBR_DPLLBMD_PIPE_B		(1<<18)
+
 /* FIFO watermark sizes etc */
 #define G4X_FIFO_LINE_SIZE	64
 #define I915_FIFO_LINE_SIZE	64
@@ -6184,6 +6238,7 @@
 /* digital port hotplug */
 #define PCH_PORT_HOTPLUG		_MMIO(0xc4030)	/* SHOTPLUG_CTL */
 #define  PORTA_HOTPLUG_ENABLE		(1 << 28) /* LPT:LP+ & BXT */
+#define  BXT_DDIA_HPD_INVERT            (1 << 27)
 #define  PORTA_HOTPLUG_STATUS_MASK	(3 << 24) /* SPT+ & BXT */
 #define  PORTA_HOTPLUG_NO_DETECT	(0 << 24) /* SPT+ & BXT */
 #define  PORTA_HOTPLUG_SHORT_DETECT	(1 << 24) /* SPT+ & BXT */
@@ -6199,6 +6254,7 @@
 #define  PORTD_HOTPLUG_SHORT_DETECT	(1 << 16)
 #define  PORTD_HOTPLUG_LONG_DETECT	(2 << 16)
 #define  PORTC_HOTPLUG_ENABLE		(1 << 12)
+#define  BXT_DDIC_HPD_INVERT            (1 << 11)
 #define  PORTC_PULSE_DURATION_2ms	(0 << 10) /* pre-LPT */
 #define  PORTC_PULSE_DURATION_4_5ms	(1 << 10) /* pre-LPT */
 #define  PORTC_PULSE_DURATION_6ms	(2 << 10) /* pre-LPT */
@@ -6209,6 +6265,7 @@
 #define  PORTC_HOTPLUG_SHORT_DETECT	(1 << 8)
 #define  PORTC_HOTPLUG_LONG_DETECT	(2 << 8)
 #define  PORTB_HOTPLUG_ENABLE		(1 << 4)
+#define  BXT_DDIB_HPD_INVERT            (1 << 3)
 #define  PORTB_PULSE_DURATION_2ms	(0 << 2) /* pre-LPT */
 #define  PORTB_PULSE_DURATION_4_5ms	(1 << 2) /* pre-LPT */
 #define  PORTB_PULSE_DURATION_6ms	(2 << 2) /* pre-LPT */
@@ -6218,6 +6275,9 @@
 #define  PORTB_HOTPLUG_NO_DETECT	(0 << 0)
 #define  PORTB_HOTPLUG_SHORT_DETECT	(1 << 0)
 #define  PORTB_HOTPLUG_LONG_DETECT	(2 << 0)
+#define  BXT_DDI_HPD_INVERT_MASK	(BXT_DDIA_HPD_INVERT | \
+					BXT_DDIB_HPD_INVERT | \
+					BXT_DDIC_HPD_INVERT)
 
 #define PCH_PORT_HOTPLUG2		_MMIO(0xc403C)	/* SHOTPLUG_CTL2 SPT+ */
 #define  PORTE_HOTPLUG_ENABLE		(1 << 4)
@@ -6836,6 +6896,8 @@
 #define  VLV_SPAREG2H				_MMIO(0xA194)
 
 #define  GTFIFODBG				_MMIO(0x120000)
+#define    GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV	(0x1f << 20)
+#define    GT_FIFO_FREE_ENTRIES_CHV		(0x7f << 13)
 #define    GT_FIFO_SBDROPERR			(1<<6)
 #define    GT_FIFO_BLOBDROPERR			(1<<5)
 #define    GT_FIFO_SB_READ_ABORTERR		(1<<4)
@@ -6852,8 +6914,11 @@
 
 #define  HSW_IDICR				_MMIO(0x9008)
 #define    IDIHASHMSK(x)			(((x) & 0x3f) << 16)
-#define  HSW_EDRAM_PRESENT			_MMIO(0x120010)
+#define  HSW_EDRAM_CAP				_MMIO(0x120010)
 #define    EDRAM_ENABLED			0x1
+#define    EDRAM_NUM_BANKS(cap)			(((cap) >> 1) & 0xf)
+#define    EDRAM_WAYS_IDX(cap)			(((cap) >> 5) & 0x7)
+#define    EDRAM_SETS_IDX(cap)			(((cap) >> 8) & 0x3)
 
 #define GEN6_UCGCTL1				_MMIO(0x9400)
 # define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE		(1 << 16)
@@ -7109,6 +7174,7 @@
 #define   GEN9_CCS_TLB_PREFETCH_ENABLE	(1<<3)
 
 #define GEN8_ROW_CHICKEN		_MMIO(0xe4f0)
+#define   FLOW_CONTROL_ENABLE		(1<<15)
 #define   PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE	(1<<8)
 #define   STALL_DOP_GATING_DISABLE		(1<<5)
 
@@ -7130,6 +7196,7 @@
 
 #define GEN9_HALF_SLICE_CHICKEN7	_MMIO(0xe194)
 #define   GEN9_ENABLE_YV12_BUGFIX	(1<<4)
+#define   GEN9_ENABLE_GPGPU_PREEMPTION	(1<<2)
 
 /* Audio */
 #define G4X_AUD_VID_DID			_MMIO(dev_priv->info.display_mmio_offset + 0x62020)
@@ -7369,9 +7436,11 @@
 /* SBI offsets */
 #define  SBI_SSCDIVINTPHASE			0x0200
 #define  SBI_SSCDIVINTPHASE6			0x0600
-#define   SBI_SSCDIVINTPHASE_DIVSEL_MASK	((0x7f)<<1)
+#define   SBI_SSCDIVINTPHASE_DIVSEL_SHIFT	1
+#define   SBI_SSCDIVINTPHASE_DIVSEL_MASK	(0x7f<<1)
 #define   SBI_SSCDIVINTPHASE_DIVSEL(x)		((x)<<1)
-#define   SBI_SSCDIVINTPHASE_INCVAL_MASK	((0x7f)<<8)
+#define   SBI_SSCDIVINTPHASE_INCVAL_SHIFT	8
+#define   SBI_SSCDIVINTPHASE_INCVAL_MASK	(0x7f<<8)
 #define   SBI_SSCDIVINTPHASE_INCVAL(x)		((x)<<8)
 #define   SBI_SSCDIVINTPHASE_DIR(x)		((x)<<15)
 #define   SBI_SSCDIVINTPHASE_PROPAGATE		(1<<0)
@@ -7381,6 +7450,8 @@
 #define   SBI_SSCCTL_PATHALT			(1<<3)
 #define   SBI_SSCCTL_DISABLE			(1<<0)
 #define  SBI_SSCAUXDIV6				0x0610
+#define   SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT	4
+#define   SBI_SSCAUXDIV_FINALDIV2SEL_MASK	(1<<4)
 #define   SBI_SSCAUXDIV_FINALDIV2SEL(x)		((x)<<4)
 #define  SBI_DBUFF0				0x2a00
 #define  SBI_GEN0				0x1f00
@@ -7660,6 +7731,59 @@
 #define PIPE_CSC_POSTOFF_ME(pipe)	_MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
 #define PIPE_CSC_POSTOFF_LO(pipe)	_MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
 
+/* pipe degamma/gamma LUTs on IVB+ */
+#define _PAL_PREC_INDEX_A	0x4A400
+#define _PAL_PREC_INDEX_B	0x4AC00
+#define _PAL_PREC_INDEX_C	0x4B400
+#define   PAL_PREC_10_12_BIT		(0 << 31)
+#define   PAL_PREC_SPLIT_MODE		(1 << 31)
+#define   PAL_PREC_AUTO_INCREMENT	(1 << 15)
+#define _PAL_PREC_DATA_A	0x4A404
+#define _PAL_PREC_DATA_B	0x4AC04
+#define _PAL_PREC_DATA_C	0x4B404
+#define _PAL_PREC_GC_MAX_A	0x4A410
+#define _PAL_PREC_GC_MAX_B	0x4AC10
+#define _PAL_PREC_GC_MAX_C	0x4B410
+#define _PAL_PREC_EXT_GC_MAX_A	0x4A420
+#define _PAL_PREC_EXT_GC_MAX_B	0x4AC20
+#define _PAL_PREC_EXT_GC_MAX_C	0x4B420
+
+#define PREC_PAL_INDEX(pipe)		_MMIO_PIPE(pipe, _PAL_PREC_INDEX_A, _PAL_PREC_INDEX_B)
+#define PREC_PAL_DATA(pipe)		_MMIO_PIPE(pipe, _PAL_PREC_DATA_A, _PAL_PREC_DATA_B)
+#define PREC_PAL_GC_MAX(pipe, i)	_MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4)
+#define PREC_PAL_EXT_GC_MAX(pipe, i)	_MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4)
+
+/* pipe CSC & degamma/gamma LUTs on CHV */
+#define _CGM_PIPE_A_CSC_COEFF01	(VLV_DISPLAY_BASE + 0x67900)
+#define _CGM_PIPE_A_CSC_COEFF23	(VLV_DISPLAY_BASE + 0x67904)
+#define _CGM_PIPE_A_CSC_COEFF45	(VLV_DISPLAY_BASE + 0x67908)
+#define _CGM_PIPE_A_CSC_COEFF67	(VLV_DISPLAY_BASE + 0x6790C)
+#define _CGM_PIPE_A_CSC_COEFF8	(VLV_DISPLAY_BASE + 0x67910)
+#define _CGM_PIPE_A_DEGAMMA	(VLV_DISPLAY_BASE + 0x66000)
+#define _CGM_PIPE_A_GAMMA	(VLV_DISPLAY_BASE + 0x67000)
+#define _CGM_PIPE_A_MODE	(VLV_DISPLAY_BASE + 0x67A00)
+#define   CGM_PIPE_MODE_GAMMA	(1 << 2)
+#define   CGM_PIPE_MODE_CSC	(1 << 1)
+#define   CGM_PIPE_MODE_DEGAMMA	(1 << 0)
+
+#define _CGM_PIPE_B_CSC_COEFF01	(VLV_DISPLAY_BASE + 0x69900)
+#define _CGM_PIPE_B_CSC_COEFF23	(VLV_DISPLAY_BASE + 0x69904)
+#define _CGM_PIPE_B_CSC_COEFF45	(VLV_DISPLAY_BASE + 0x69908)
+#define _CGM_PIPE_B_CSC_COEFF67	(VLV_DISPLAY_BASE + 0x6990C)
+#define _CGM_PIPE_B_CSC_COEFF8	(VLV_DISPLAY_BASE + 0x69910)
+#define _CGM_PIPE_B_DEGAMMA	(VLV_DISPLAY_BASE + 0x68000)
+#define _CGM_PIPE_B_GAMMA	(VLV_DISPLAY_BASE + 0x69000)
+#define _CGM_PIPE_B_MODE	(VLV_DISPLAY_BASE + 0x69A00)
+
+#define CGM_PIPE_CSC_COEFF01(pipe)	_MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF01, _CGM_PIPE_B_CSC_COEFF01)
+#define CGM_PIPE_CSC_COEFF23(pipe)	_MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF23, _CGM_PIPE_B_CSC_COEFF23)
+#define CGM_PIPE_CSC_COEFF45(pipe)	_MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF45, _CGM_PIPE_B_CSC_COEFF45)
+#define CGM_PIPE_CSC_COEFF67(pipe)	_MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF67, _CGM_PIPE_B_CSC_COEFF67)
+#define CGM_PIPE_CSC_COEFF8(pipe)	_MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF8, _CGM_PIPE_B_CSC_COEFF8)
+#define CGM_PIPE_DEGAMMA(pipe, i, w)	_MMIO(_PIPE(pipe, _CGM_PIPE_A_DEGAMMA, _CGM_PIPE_B_DEGAMMA) + (i) * 8 + (w) * 4)
+#define CGM_PIPE_GAMMA(pipe, i, w)	_MMIO(_PIPE(pipe, _CGM_PIPE_A_GAMMA, _CGM_PIPE_B_GAMMA) + (i) * 8 + (w) * 4)
+#define CGM_PIPE_MODE(pipe)		_MMIO_PIPE(pipe, _CGM_PIPE_A_MODE, _CGM_PIPE_B_MODE)
+
 /* MIPI DSI registers */
 
 #define _MIPI_PORT(port, a, c)	_PORT3(port, a, 0, c)	/* ports A and C only */
@@ -7674,58 +7798,62 @@
 #define  BXT_MIPI_DIV_SHIFT(port)		\
 			_MIPI_PORT(port, BXT_MIPI1_DIV_SHIFT, \
 					BXT_MIPI2_DIV_SHIFT)
-/* Var clock divider to generate TX source. Result must be < 39.5 M */
-#define  BXT_MIPI1_ESCLK_VAR_DIV_MASK		(0x3F << 26)
-#define  BXT_MIPI2_ESCLK_VAR_DIV_MASK		(0x3F << 10)
-#define  BXT_MIPI_ESCLK_VAR_DIV_MASK(port)	\
-			_MIPI_PORT(port, BXT_MIPI1_ESCLK_VAR_DIV_MASK, \
-						BXT_MIPI2_ESCLK_VAR_DIV_MASK)
 
-#define  BXT_MIPI_ESCLK_VAR_DIV(port, val)	\
-			(val << BXT_MIPI_DIV_SHIFT(port))
 /* TX control divider to select actual TX clock output from (8x/var) */
-#define  BXT_MIPI1_TX_ESCLK_SHIFT		21
-#define  BXT_MIPI2_TX_ESCLK_SHIFT		5
+#define  BXT_MIPI1_TX_ESCLK_SHIFT		26
+#define  BXT_MIPI2_TX_ESCLK_SHIFT		10
 #define  BXT_MIPI_TX_ESCLK_SHIFT(port)		\
 			_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_SHIFT, \
 					BXT_MIPI2_TX_ESCLK_SHIFT)
-#define  BXT_MIPI1_TX_ESCLK_FIXDIV_MASK		(3 << 21)
-#define  BXT_MIPI2_TX_ESCLK_FIXDIV_MASK		(3 << 5)
+#define  BXT_MIPI1_TX_ESCLK_FIXDIV_MASK		(0x3F << 26)
+#define  BXT_MIPI2_TX_ESCLK_FIXDIV_MASK		(0x3F << 10)
 #define  BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port)	\
 			_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
-						BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
-#define  BXT_MIPI_TX_ESCLK_8XDIV_BY2(port)	\
-		(0x0 << BXT_MIPI_TX_ESCLK_SHIFT(port))
-#define  BXT_MIPI_TX_ESCLK_8XDIV_BY4(port)	\
-		(0x1 << BXT_MIPI_TX_ESCLK_SHIFT(port))
-#define  BXT_MIPI_TX_ESCLK_8XDIV_BY8(port)	\
-		(0x2 << BXT_MIPI_TX_ESCLK_SHIFT(port))
-/* RX control divider to select actual RX clock output from 8x*/
-#define  BXT_MIPI1_RX_ESCLK_SHIFT		19
-#define  BXT_MIPI2_RX_ESCLK_SHIFT		3
-#define  BXT_MIPI_RX_ESCLK_SHIFT(port)		\
-			_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_SHIFT, \
-					BXT_MIPI2_RX_ESCLK_SHIFT)
-#define  BXT_MIPI1_RX_ESCLK_FIXDIV_MASK		(3 << 19)
-#define  BXT_MIPI2_RX_ESCLK_FIXDIV_MASK		(3 << 3)
-#define  BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port)	\
-		(3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
-#define  BXT_MIPI_RX_ESCLK_8X_BY2(port)	\
-		(1 << BXT_MIPI_RX_ESCLK_SHIFT(port))
-#define  BXT_MIPI_RX_ESCLK_8X_BY3(port)	\
-		(2 << BXT_MIPI_RX_ESCLK_SHIFT(port))
-#define  BXT_MIPI_RX_ESCLK_8X_BY4(port)	\
-		(3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
-/* BXT-A WA: Always prog DPHY dividers to 00 */
-#define  BXT_MIPI1_DPHY_DIV_SHIFT		16
-#define  BXT_MIPI2_DPHY_DIV_SHIFT		0
-#define  BXT_MIPI_DPHY_DIV_SHIFT(port)		\
-			_MIPI_PORT(port, BXT_MIPI1_DPHY_DIV_SHIFT, \
-					BXT_MIPI2_DPHY_DIV_SHIFT)
-#define  BXT_MIPI_1_DPHY_DIVIDER_MASK		(3 << 16)
-#define  BXT_MIPI_2_DPHY_DIVIDER_MASK		(3 << 0)
-#define  BXT_MIPI_DPHY_DIVIDER_MASK(port)	\
-		(3 << BXT_MIPI_DPHY_DIV_SHIFT(port))
+					BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
+#define  BXT_MIPI_TX_ESCLK_DIVIDER(port, val)	\
+		((val & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
+/* RX upper control divider to select actual RX clock output from 8x */
+#define  BXT_MIPI1_RX_ESCLK_UPPER_SHIFT		21
+#define  BXT_MIPI2_RX_ESCLK_UPPER_SHIFT		5
+#define  BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port)		\
+			_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_SHIFT, \
+					BXT_MIPI2_RX_ESCLK_UPPER_SHIFT)
+#define  BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK		(3 << 21)
+#define  BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK		(3 << 5)
+#define  BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port)	\
+			_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \
+					BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK)
+#define  BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val)	\
+		((val & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
+/* 8/3X divider to select the actual 8/3X clock output from 8x */
+#define  BXT_MIPI1_8X_BY3_SHIFT                19
+#define  BXT_MIPI2_8X_BY3_SHIFT                3
+#define  BXT_MIPI_8X_BY3_SHIFT(port)          \
+			_MIPI_PORT(port, BXT_MIPI1_8X_BY3_SHIFT, \
+					BXT_MIPI2_8X_BY3_SHIFT)
+#define  BXT_MIPI1_8X_BY3_DIVIDER_MASK         (3 << 19)
+#define  BXT_MIPI2_8X_BY3_DIVIDER_MASK         (3 << 3)
+#define  BXT_MIPI_8X_BY3_DIVIDER_MASK(port)    \
+			_MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \
+						BXT_MIPI2_8X_BY3_DIVIDER_MASK)
+#define  BXT_MIPI_8X_BY3_DIVIDER(port, val)    \
+			((val & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
+/* RX lower control divider to select actual RX clock output from 8x */
+#define  BXT_MIPI1_RX_ESCLK_LOWER_SHIFT		16
+#define  BXT_MIPI2_RX_ESCLK_LOWER_SHIFT		0
+#define  BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port)		\
+			_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_SHIFT, \
+					BXT_MIPI2_RX_ESCLK_LOWER_SHIFT)
+#define  BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK		(3 << 16)
+#define  BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK		(3 << 0)
+#define  BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port)	\
+			_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \
+					BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK)
+#define  BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val)	\
+		((val & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
+
+#define RX_DIVIDER_BIT_1_2                     0x3
+#define RX_DIVIDER_BIT_3_4                     0xC
 
 /* BXT MIPI mode configure */
 #define  _BXT_MIPIA_TRANS_HACTIVE			0x6B0F8
@@ -7750,9 +7878,11 @@
 #define  BXT_DSIC_16X_BY2		(1 << 10)
 #define  BXT_DSIC_16X_BY3		(2 << 10)
 #define  BXT_DSIC_16X_BY4		(3 << 10)
+#define  BXT_DSIC_16X_MASK		(3 << 10)
 #define  BXT_DSIA_16X_BY2		(1 << 8)
 #define  BXT_DSIA_16X_BY3		(2 << 8)
 #define  BXT_DSIA_16X_BY4		(3 << 8)
+#define  BXT_DSIA_16X_MASK		(3 << 8)
 #define  BXT_DSI_FREQ_SEL_SHIFT		8
 #define  BXT_DSI_FREQ_SEL_MASK		(0xF << BXT_DSI_FREQ_SEL_SHIFT)
 
@@ -7887,8 +8017,8 @@
 #define  VID_MODE_FORMAT_MASK				(0xf << 7)
 #define  VID_MODE_NOT_SUPPORTED				(0 << 7)
 #define  VID_MODE_FORMAT_RGB565				(1 << 7)
-#define  VID_MODE_FORMAT_RGB666				(2 << 7)
-#define  VID_MODE_FORMAT_RGB666_LOOSE			(3 << 7)
+#define  VID_MODE_FORMAT_RGB666_PACKED			(2 << 7)
+#define  VID_MODE_FORMAT_RGB666				(3 << 7)
 #define  VID_MODE_FORMAT_RGB888				(4 << 7)
 #define  CMD_MODE_CHANNEL_NUMBER_SHIFT			5
 #define  CMD_MODE_CHANNEL_NUMBER_MASK			(3 << 5)
@@ -8144,6 +8274,7 @@
 #define  READ_REQUEST_PRIORITY_HIGH			(3 << 3)
 #define  RGB_FLIP_TO_BGR				(1 << 2)
 
+#define  BXT_PIPE_SELECT_SHIFT				7
 #define  BXT_PIPE_SELECT_MASK				(7 << 7)
 #define  BXT_PIPE_SELECT(pipe)				((pipe) << 7)
 
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index c6188dd..2d576b7 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -370,6 +370,8 @@
 
 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
+	intel_runtime_pm_get(dev_priv);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 	val = intel_freq_opcode(dev_priv, val);
@@ -378,6 +380,7 @@
 	    val > dev_priv->rps.max_freq ||
 	    val < dev_priv->rps.min_freq_softlimit) {
 		mutex_unlock(&dev_priv->rps.hw_lock);
+		intel_runtime_pm_put(dev_priv);
 		return -EINVAL;
 	}
 
@@ -398,6 +401,8 @@
 
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
+	intel_runtime_pm_put(dev_priv);
+
 	return count;
 }
 
@@ -433,6 +438,8 @@
 
 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
+	intel_runtime_pm_get(dev_priv);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 	val = intel_freq_opcode(dev_priv, val);
@@ -441,6 +448,7 @@
 	    val > dev_priv->rps.max_freq ||
 	    val > dev_priv->rps.max_freq_softlimit) {
 		mutex_unlock(&dev_priv->rps.hw_lock);
+		intel_runtime_pm_put(dev_priv);
 		return -EINVAL;
 	}
 
@@ -457,6 +465,8 @@
 
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
+	intel_runtime_pm_put(dev_priv);
+
 	return count;
 
 }
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fa09e55..dc0def2 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -464,7 +464,7 @@
 	    TP_fast_assign(
 			   __entry->dev = from->dev->primary->index;
 			   __entry->sync_from = from->id;
-			   __entry->sync_to = to_req->ring->id;
+			   __entry->sync_to = to_req->engine->id;
 			   __entry->seqno = i915_gem_request_get_seqno(req);
 			   ),
 
@@ -486,13 +486,13 @@
 			     ),
 
 	    TP_fast_assign(
-			   struct intel_engine_cs *ring =
-						i915_gem_request_get_ring(req);
-			   __entry->dev = ring->dev->primary->index;
-			   __entry->ring = ring->id;
+			   struct intel_engine_cs *engine =
+						i915_gem_request_get_engine(req);
+			   __entry->dev = engine->dev->primary->index;
+			   __entry->ring = engine->id;
 			   __entry->seqno = i915_gem_request_get_seqno(req);
 			   __entry->flags = flags;
-			   i915_trace_irq_get(ring, req);
+			   i915_trace_irq_get(engine, req);
 			   ),
 
 	    TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -511,8 +511,8 @@
 			     ),
 
 	    TP_fast_assign(
-			   __entry->dev = req->ring->dev->primary->index;
-			   __entry->ring = req->ring->id;
+			   __entry->dev = req->engine->dev->primary->index;
+			   __entry->ring = req->engine->id;
 			   __entry->invalidate = invalidate;
 			   __entry->flush = flush;
 			   ),
@@ -533,10 +533,10 @@
 			     ),
 
 	    TP_fast_assign(
-			   struct intel_engine_cs *ring =
-						i915_gem_request_get_ring(req);
-			   __entry->dev = ring->dev->primary->index;
-			   __entry->ring = ring->id;
+			   struct intel_engine_cs *engine =
+						i915_gem_request_get_engine(req);
+			   __entry->dev = engine->dev->primary->index;
+			   __entry->ring = engine->id;
 			   __entry->seqno = i915_gem_request_get_seqno(req);
 			   ),
 
@@ -550,8 +550,8 @@
 );
 
 TRACE_EVENT(i915_gem_request_notify,
-	    TP_PROTO(struct intel_engine_cs *ring),
-	    TP_ARGS(ring),
+	    TP_PROTO(struct intel_engine_cs *engine),
+	    TP_ARGS(engine),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
@@ -560,9 +560,9 @@
 			     ),
 
 	    TP_fast_assign(
-			   __entry->dev = ring->dev->primary->index;
-			   __entry->ring = ring->id;
-			   __entry->seqno = ring->get_seqno(ring, false);
+			   __entry->dev = engine->dev->primary->index;
+			   __entry->ring = engine->id;
+			   __entry->seqno = engine->get_seqno(engine);
 			   ),
 
 	    TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -597,13 +597,13 @@
 	     * less desirable.
 	     */
 	    TP_fast_assign(
-			   struct intel_engine_cs *ring =
-						i915_gem_request_get_ring(req);
-			   __entry->dev = ring->dev->primary->index;
-			   __entry->ring = ring->id;
+			   struct intel_engine_cs *engine =
+						i915_gem_request_get_engine(req);
+			   __entry->dev = engine->dev->primary->index;
+			   __entry->ring = engine->id;
 			   __entry->seqno = i915_gem_request_get_seqno(req);
 			   __entry->blocking =
-				     mutex_is_locked(&ring->dev->struct_mutex);
+				     mutex_is_locked(&engine->dev->struct_mutex);
 			   ),
 
 	    TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -777,9 +777,9 @@
  * called only if full ppgtt is enabled.
  */
 TRACE_EVENT(switch_mm,
-	TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
+	TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to),
 
-	TP_ARGS(ring, to),
+	TP_ARGS(engine, to),
 
 	TP_STRUCT__entry(
 			__field(u32, ring)
@@ -789,10 +789,10 @@
 	),
 
 	TP_fast_assign(
-			__entry->ring = ring->id;
+			__entry->ring = engine->id;
 			__entry->to = to;
 			__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
-			__entry->dev = ring->dev->primary->index;
+			__entry->dev = engine->dev->primary->index;
 	),
 
 	TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index dea7429..d02efb8 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -181,8 +181,8 @@
 int intel_vgt_balloon(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
-	unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	unsigned long ggtt_end = ggtt->base.start + ggtt->base.total;
 
 	unsigned long mappable_base, mappable_size, mappable_end;
 	unsigned long unmappable_base, unmappable_size, unmappable_end;
@@ -202,19 +202,19 @@
 	DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
 		 unmappable_base, unmappable_size / 1024);
 
-	if (mappable_base < ggtt_vm->start ||
-	    mappable_end > dev_priv->gtt.mappable_end ||
-	    unmappable_base < dev_priv->gtt.mappable_end ||
-	    unmappable_end > ggtt_vm_end) {
+	if (mappable_base < ggtt->base.start ||
+	    mappable_end > ggtt->mappable_end ||
+	    unmappable_base < ggtt->mappable_end ||
+	    unmappable_end > ggtt_end) {
 		DRM_ERROR("Invalid ballooning configuration!\n");
 		return -EINVAL;
 	}
 
 	/* Unmappable graphic memory ballooning */
-	if (unmappable_base > dev_priv->gtt.mappable_end) {
-		ret = vgt_balloon_space(&ggtt_vm->mm,
+	if (unmappable_base > ggtt->mappable_end) {
+		ret = vgt_balloon_space(&ggtt->base.mm,
 					&bl_info.space[2],
-					dev_priv->gtt.mappable_end,
+					ggtt->mappable_end,
 					unmappable_base);
 
 		if (ret)
@@ -225,30 +225,30 @@
 	 * No need to partition out the last physical page,
 	 * because it is reserved to the guard page.
 	 */
-	if (unmappable_end < ggtt_vm_end - PAGE_SIZE) {
-		ret = vgt_balloon_space(&ggtt_vm->mm,
+	if (unmappable_end < ggtt_end - PAGE_SIZE) {
+		ret = vgt_balloon_space(&ggtt->base.mm,
 					&bl_info.space[3],
 					unmappable_end,
-					ggtt_vm_end - PAGE_SIZE);
+					ggtt_end - PAGE_SIZE);
 		if (ret)
 			goto err;
 	}
 
 	/* Mappable graphic memory ballooning */
-	if (mappable_base > ggtt_vm->start) {
-		ret = vgt_balloon_space(&ggtt_vm->mm,
+	if (mappable_base > ggtt->base.start) {
+		ret = vgt_balloon_space(&ggtt->base.mm,
 					&bl_info.space[0],
-					ggtt_vm->start, mappable_base);
+					ggtt->base.start, mappable_base);
 
 		if (ret)
 			goto err;
 	}
 
-	if (mappable_end < dev_priv->gtt.mappable_end) {
-		ret = vgt_balloon_space(&ggtt_vm->mm,
+	if (mappable_end < ggtt->mappable_end) {
+		ret = vgt_balloon_space(&ggtt->base.mm,
 					&bl_info.space[1],
 					mappable_end,
-					dev_priv->gtt.mappable_end);
+					ggtt->mappable_end);
 
 		if (ret)
 			goto err;
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 8e579a8..50ff90a 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -96,8 +96,11 @@
 	crtc_state->update_pipe = false;
 	crtc_state->disable_lp_wm = false;
 	crtc_state->disable_cxsr = false;
-	crtc_state->wm_changed = false;
+	crtc_state->update_wm_pre = false;
+	crtc_state->update_wm_post = false;
 	crtc_state->fb_changed = false;
+	crtc_state->wm.need_postvbl_update = false;
+	crtc_state->fb_bits = 0;
 
 	return &crtc_state->base;
 }
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index e0b851a..7de7721 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -195,12 +195,10 @@
 	struct intel_plane_state *intel_state =
 		to_intel_plane_state(plane->state);
 	struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
-	struct drm_crtc_state *crtc_state =
-		drm_atomic_get_existing_crtc_state(old_state->state, crtc);
 
 	if (intel_state->visible)
 		intel_plane->update_plane(plane,
-					  to_intel_crtc_state(crtc_state),
+					  to_intel_crtc_state(crtc->state),
 					  intel_state);
 	else
 		intel_plane->disable_plane(plane, crtc);
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 7d281b4..02a7527 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -372,7 +372,7 @@
 	if (WARN_ON(port == PORT_A))
 		return;
 
-	if (HAS_PCH_IBX(dev_priv->dev)) {
+	if (HAS_PCH_IBX(dev_priv)) {
 		aud_config = IBX_AUD_CFG(pipe);
 		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -561,23 +561,21 @@
 }
 
 /**
- * intel_init_audio - Set up chip specific audio functions
- * @dev: drm device
+ * intel_init_audio_hooks - Set up chip specific audio hooks
+ * @dev_priv: device private
  */
-void intel_init_audio(struct drm_device *dev)
+void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (IS_G4X(dev)) {
+	if (IS_G4X(dev_priv)) {
 		dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
 		dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
-	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
 		dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
-	} else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) {
+	} else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) {
 		dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
 		dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
-	} else if (HAS_PCH_SPLIT(dev)) {
+	} else if (HAS_PCH_SPLIT(dev_priv)) {
 		dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
 		dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
 	}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index bf62a19..b235b6e 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -29,7 +29,9 @@
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
-#include "intel_bios.h"
+
+#define _INTEL_BIOS_PRIVATE
+#include "intel_vbt_defs.h"
 
 /**
  * DOC: Video BIOS Table (VBT)
@@ -56,8 +58,6 @@
 #define	SLAVE_ADDR1	0x70
 #define	SLAVE_ADDR2	0x72
 
-static int panel_type;
-
 /* Get BDB block size given a pointer to Block ID. */
 static u32 _get_blocksize(const u8 *block_base)
 {
@@ -203,17 +203,32 @@
 	const struct lvds_dvo_timing *panel_dvo_timing;
 	const struct lvds_fp_timing *fp_timing;
 	struct drm_display_mode *panel_fixed_mode;
+	int panel_type;
 	int drrs_mode;
+	int ret;
 
 	lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
 	if (!lvds_options)
 		return;
 
 	dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
-	if (lvds_options->panel_type == 0xff)
-		return;
 
-	panel_type = lvds_options->panel_type;
+	ret = intel_opregion_get_panel_type(dev_priv->dev);
+	if (ret >= 0) {
+		WARN_ON(ret > 0xf);
+		panel_type = ret;
+		DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type);
+	} else {
+		if (lvds_options->panel_type > 0xf) {
+			DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n",
+				      lvds_options->panel_type);
+			return;
+		}
+		panel_type = lvds_options->panel_type;
+		DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type);
+	}
+
+	dev_priv->vbt.panel_type = panel_type;
 
 	drrs_mode = (lvds_options->dps_panel_type_bits
 				>> (panel_type * 2)) & MODE_MASK;
@@ -249,7 +264,7 @@
 
 	panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
 					       lvds_lfp_data_ptrs,
-					       lvds_options->panel_type);
+					       panel_type);
 
 	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
 	if (!panel_fixed_mode)
@@ -264,7 +279,7 @@
 
 	fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
 				       lvds_lfp_data_ptrs,
-				       lvds_options->panel_type);
+				       panel_type);
 	if (fp_timing) {
 		/* check the resolution, just to be sure */
 		if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
@@ -282,6 +297,7 @@
 {
 	const struct bdb_lfp_backlight_data *backlight_data;
 	const struct bdb_lfp_backlight_data_entry *entry;
+	int panel_type = dev_priv->vbt.panel_type;
 
 	backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
 	if (!backlight_data)
@@ -480,7 +496,7 @@
 			      child->slave_addr,
 			      (child->dvo_port == DEVICE_PORT_DVOB) ?
 			      "SDVOB" : "SDVOC");
-		p_mapping = &(dev_priv->sdvo_mappings[child->dvo_port - 1]);
+		p_mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
 		if (!p_mapping->initialized) {
 			p_mapping->dvo_port = child->dvo_port;
 			p_mapping->slave_addr = child->slave_addr;
@@ -525,10 +541,7 @@
 		return;
 
 	if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
-		dev_priv->vbt.edp_support = 1;
-
-	if (driver->dual_frequency)
-		dev_priv->render_reclock_avail = true;
+		dev_priv->vbt.edp.support = 1;
 
 	DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
 	/*
@@ -547,23 +560,24 @@
 	const struct bdb_edp *edp;
 	const struct edp_power_seq *edp_pps;
 	const struct edp_link_params *edp_link_params;
+	int panel_type = dev_priv->vbt.panel_type;
 
 	edp = find_section(bdb, BDB_EDP);
 	if (!edp) {
-		if (dev_priv->vbt.edp_support)
+		if (dev_priv->vbt.edp.support)
 			DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
 		return;
 	}
 
 	switch ((edp->color_depth >> (panel_type * 2)) & 3) {
 	case EDP_18BPP:
-		dev_priv->vbt.edp_bpp = 18;
+		dev_priv->vbt.edp.bpp = 18;
 		break;
 	case EDP_24BPP:
-		dev_priv->vbt.edp_bpp = 24;
+		dev_priv->vbt.edp.bpp = 24;
 		break;
 	case EDP_30BPP:
-		dev_priv->vbt.edp_bpp = 30;
+		dev_priv->vbt.edp.bpp = 30;
 		break;
 	}
 
@@ -571,14 +585,14 @@
 	edp_pps = &edp->power_seqs[panel_type];
 	edp_link_params = &edp->link_params[panel_type];
 
-	dev_priv->vbt.edp_pps = *edp_pps;
+	dev_priv->vbt.edp.pps = *edp_pps;
 
 	switch (edp_link_params->rate) {
 	case EDP_RATE_1_62:
-		dev_priv->vbt.edp_rate = DP_LINK_BW_1_62;
+		dev_priv->vbt.edp.rate = DP_LINK_BW_1_62;
 		break;
 	case EDP_RATE_2_7:
-		dev_priv->vbt.edp_rate = DP_LINK_BW_2_7;
+		dev_priv->vbt.edp.rate = DP_LINK_BW_2_7;
 		break;
 	default:
 		DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
@@ -588,13 +602,13 @@
 
 	switch (edp_link_params->lanes) {
 	case EDP_LANE_1:
-		dev_priv->vbt.edp_lanes = 1;
+		dev_priv->vbt.edp.lanes = 1;
 		break;
 	case EDP_LANE_2:
-		dev_priv->vbt.edp_lanes = 2;
+		dev_priv->vbt.edp.lanes = 2;
 		break;
 	case EDP_LANE_4:
-		dev_priv->vbt.edp_lanes = 4;
+		dev_priv->vbt.edp.lanes = 4;
 		break;
 	default:
 		DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
@@ -604,16 +618,16 @@
 
 	switch (edp_link_params->preemphasis) {
 	case EDP_PREEMPHASIS_NONE:
-		dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
+		dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
 		break;
 	case EDP_PREEMPHASIS_3_5dB:
-		dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
+		dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
 		break;
 	case EDP_PREEMPHASIS_6dB:
-		dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
+		dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
 		break;
 	case EDP_PREEMPHASIS_9_5dB:
-		dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
+		dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
 		break;
 	default:
 		DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
@@ -623,16 +637,16 @@
 
 	switch (edp_link_params->vswing) {
 	case EDP_VSWING_0_4V:
-		dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+		dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
 		break;
 	case EDP_VSWING_0_6V:
-		dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+		dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
 		break;
 	case EDP_VSWING_0_8V:
-		dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+		dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
 		break;
 	case EDP_VSWING_1_2V:
-		dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+		dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
 		break;
 	default:
 		DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
@@ -645,10 +659,10 @@
 
 		/* Don't read from VBT if module parameter has valid value*/
 		if (i915.edp_vswing) {
-			dev_priv->edp_low_vswing = i915.edp_vswing == 1;
+			dev_priv->vbt.edp.low_vswing = i915.edp_vswing == 1;
 		} else {
 			vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
-			dev_priv->edp_low_vswing = vswing == 0;
+			dev_priv->vbt.edp.low_vswing = vswing == 0;
 		}
 	}
 }
@@ -658,6 +672,7 @@
 {
 	const struct bdb_psr *psr;
 	const struct psr_table *psr_table;
+	int panel_type = dev_priv->vbt.panel_type;
 
 	psr = find_section(bdb, BDB_PSR);
 	if (!psr) {
@@ -704,9 +719,10 @@
 	const struct bdb_mipi_config *start;
 	const struct mipi_config *config;
 	const struct mipi_pps_data *pps;
+	int panel_type = dev_priv->vbt.panel_type;
 
 	/* parse MIPI blocks only if LFP type is MIPI */
-	if (!dev_priv->vbt.has_mipi)
+	if (!intel_bios_is_dsi_present(dev_priv, NULL))
 		return;
 
 	/* Initialize this to undefined indicating no generic MIPI support */
@@ -911,6 +927,7 @@
 parse_mipi_sequence(struct drm_i915_private *dev_priv,
 		    const struct bdb_header *bdb)
 {
+	int panel_type = dev_priv->vbt.panel_type;
 	const struct bdb_mipi_sequence *sequence;
 	const u8 *seq_data;
 	u32 seq_size;
@@ -1124,7 +1141,7 @@
 	}
 
 	/* Parse the I_boost config for SKL and above */
-	if (bdb->version >= 196 && (child->common.flags_1 & IBOOST_ENABLE)) {
+	if (bdb->version >= 196 && child->common.iboost) {
 		info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
 		DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
 			      port_name(port), info->dp_boost_level);
@@ -1232,14 +1249,6 @@
 			continue;
 		}
 
-		if (p_child->common.dvo_port >= DVO_PORT_MIPIA
-		    && p_child->common.dvo_port <= DVO_PORT_MIPID
-		    &&p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT) {
-			DRM_DEBUG_KMS("Found MIPI as LFP\n");
-			dev_priv->vbt.has_mipi = 1;
-			dev_priv->vbt.dsi.port = p_child->common.dvo_port;
-		}
-
 		child_dev_ptr = dev_priv->vbt.child_dev + count;
 		count++;
 
@@ -1250,6 +1259,19 @@
 		 */
 		memcpy(child_dev_ptr, p_child,
 		       min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
+
+		/*
+		 * copied full block, now init values when they are not
+		 * available in current version
+		 */
+		if (bdb->version < 196) {
+			/* Set default values for bits added from v196 */
+			child_dev_ptr->common.iboost = 0;
+			child_dev_ptr->common.hpd_invert = 0;
+		}
+
+		if (bdb->version < 192)
+			child_dev_ptr->common.lspcon = 0;
 	}
 	return;
 }
@@ -1431,3 +1453,246 @@
 
 	return 0;
 }
+
+/**
+ * intel_bios_is_tv_present - is integrated TV present in VBT
+ * @dev_priv:	i915 device instance
+ *
+ * Return true if TV is present. If no child devices were parsed from VBT,
+ * assume TV is present.
+ */
+bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
+{
+	union child_device_config *p_child;
+	int i;
+
+	if (!dev_priv->vbt.int_tv_support)
+		return false;
+
+	if (!dev_priv->vbt.child_dev_num)
+		return true;
+
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		p_child = dev_priv->vbt.child_dev + i;
+		/*
+		 * If the device type is not TV, continue.
+		 */
+		switch (p_child->old.device_type) {
+		case DEVICE_TYPE_INT_TV:
+		case DEVICE_TYPE_TV:
+		case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
+			break;
+		default:
+			continue;
+		}
+		/* Only when the addin_offset is non-zero, it is regarded
+		 * as present.
+		 */
+		if (p_child->old.addin_offset)
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * intel_bios_is_lvds_present - is LVDS present in VBT
+ * @dev_priv:	i915 device instance
+ * @i2c_pin:	i2c pin for LVDS if present
+ *
+ * Return true if LVDS is present. If no child devices were parsed from VBT,
+ * assume LVDS is present.
+ */
+bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
+{
+	int i;
+
+	if (!dev_priv->vbt.child_dev_num)
+		return true;
+
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		union child_device_config *uchild = dev_priv->vbt.child_dev + i;
+		struct old_child_dev_config *child = &uchild->old;
+
+		/* If the device type is not LFP, continue.
+		 * We have to check both the new identifiers as well as the
+		 * old for compatibility with some BIOSes.
+		 */
+		if (child->device_type != DEVICE_TYPE_INT_LFP &&
+		    child->device_type != DEVICE_TYPE_LFP)
+			continue;
+
+		if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin))
+			*i2c_pin = child->i2c_pin;
+
+		/* However, we cannot trust the BIOS writers to populate
+		 * the VBT correctly.  Since LVDS requires additional
+		 * information from AIM blocks, a non-zero addin offset is
+		 * a good indicator that the LVDS is actually present.
+		 */
+		if (child->addin_offset)
+			return true;
+
+		/* But even then some BIOS writers perform some black magic
+		 * and instantiate the device without reference to any
+		 * additional data.  Trust that if the VBT was written into
+		 * the OpRegion then they have validated the LVDS's existence.
+		 */
+		if (dev_priv->opregion.vbt)
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * intel_bios_is_port_edp - is the device in given port eDP
+ * @dev_priv:	i915 device instance
+ * @port:	port to check
+ *
+ * Return true if the device in %port is eDP.
+ */
+bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
+{
+	union child_device_config *p_child;
+	static const short port_mapping[] = {
+		[PORT_B] = DVO_PORT_DPB,
+		[PORT_C] = DVO_PORT_DPC,
+		[PORT_D] = DVO_PORT_DPD,
+		[PORT_E] = DVO_PORT_DPE,
+	};
+	int i;
+
+	if (!dev_priv->vbt.child_dev_num)
+		return false;
+
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		p_child = dev_priv->vbt.child_dev + i;
+
+		if (p_child->common.dvo_port == port_mapping[port] &&
+		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
+		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
+			return true;
+	}
+
+	return false;
+}
+
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
+{
+	static const struct {
+		u16 dp, hdmi;
+	} port_mapping[] = {
+		/*
+		 * Buggy VBTs may declare DP ports as having
+		 * HDMI type dvo_port :( So let's check both.
+		 */
+		[PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+		[PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+		[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+		[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+	};
+	int i;
+
+	if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
+		return false;
+
+	if (!dev_priv->vbt.child_dev_num)
+		return false;
+
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		const union child_device_config *p_child =
+			&dev_priv->vbt.child_dev[i];
+
+		if ((p_child->common.dvo_port == port_mapping[port].dp ||
+		     p_child->common.dvo_port == port_mapping[port].hdmi) &&
+		    (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
+		    (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * intel_bios_is_dsi_present - is DSI present in VBT
+ * @dev_priv:	i915 device instance
+ * @port:	port for DSI if present
+ *
+ * Return true if DSI is present, and return the port in %port.
+ */
+bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
+			       enum port *port)
+{
+	union child_device_config *p_child;
+	u8 dvo_port;
+	int i;
+
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		p_child = dev_priv->vbt.child_dev + i;
+
+		if (!(p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT))
+			continue;
+
+		dvo_port = p_child->common.dvo_port;
+
+		switch (dvo_port) {
+		case DVO_PORT_MIPIA:
+		case DVO_PORT_MIPIC:
+			if (port)
+				*port = dvo_port - DVO_PORT_MIPIA;
+			return true;
+		case DVO_PORT_MIPIB:
+		case DVO_PORT_MIPID:
+			DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
+				      port_name(dvo_port - DVO_PORT_MIPIA));
+			break;
+		}
+	}
+
+	return false;
+}
+
+/**
+ * intel_bios_is_port_hpd_inverted - is HPD inverted for %port
+ * @dev_priv:	i915 device instance
+ * @port:	port to check
+ *
+ * Return true if HPD should be inverted for %port.
+ */
+bool
+intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
+				enum port port)
+{
+	int i;
+
+	if (WARN_ON_ONCE(!IS_BROXTON(dev_priv)))
+		return false;
+
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		if (!dev_priv->vbt.child_dev[i].common.hpd_invert)
+			continue;
+
+		switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
+		case DVO_PORT_DPA:
+		case DVO_PORT_HDMIA:
+			if (port == PORT_A)
+				return true;
+			break;
+		case DVO_PORT_DPB:
+		case DVO_PORT_HDMIB:
+			if (port == PORT_B)
+				return true;
+			break;
+		case DVO_PORT_DPC:
+		case DVO_PORT_HDMIC:
+			if (port == PORT_C)
+				return true;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 350d4e0..ab0ea31 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -1,5 +1,5 @@
 /*
- * Copyright © 2006 Intel Corporation
+ * Copyright © 2016 Intel Corporation
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -19,544 +19,17 @@
  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * Authors:
- *    Eric Anholt <eric@anholt.net>
- *
+ */
+
+/*
+ * Please use intel_vbt_defs.h for VBT private data, to hide and abstract away
+ * the VBT from the rest of the driver. Add the parsed, clean data to struct
+ * intel_vbt_data within struct drm_i915_private.
  */
 
 #ifndef _INTEL_BIOS_H_
 #define _INTEL_BIOS_H_
 
-/**
- * struct vbt_header - VBT Header structure
- * @signature:		VBT signature, always starts with "$VBT"
- * @version:		Version of this structure
- * @header_size:	Size of this structure
- * @vbt_size:		Size of VBT (VBT Header, BDB Header and data blocks)
- * @vbt_checksum:	Checksum
- * @reserved0:		Reserved
- * @bdb_offset:		Offset of &struct bdb_header from beginning of VBT
- * @aim_offset:		Offsets of add-in data blocks from beginning of VBT
- */
-struct vbt_header {
-	u8 signature[20];
-	u16 version;
-	u16 header_size;
-	u16 vbt_size;
-	u8 vbt_checksum;
-	u8 reserved0;
-	u32 bdb_offset;
-	u32 aim_offset[4];
-} __packed;
-
-/**
- * struct bdb_header - BDB Header structure
- * @signature:		BDB signature "BIOS_DATA_BLOCK"
- * @version:		Version of the data block definitions
- * @header_size:	Size of this structure
- * @bdb_size:		Size of BDB (BDB Header and data blocks)
- */
-struct bdb_header {
-	u8 signature[16];
-	u16 version;
-	u16 header_size;
-	u16 bdb_size;
-} __packed;
-
-/* strictly speaking, this is a "skip" block, but it has interesting info */
-struct vbios_data {
-	u8 type; /* 0 == desktop, 1 == mobile */
-	u8 relstage;
-	u8 chipset;
-	u8 lvds_present:1;
-	u8 tv_present:1;
-	u8 rsvd2:6; /* finish byte */
-	u8 rsvd3[4];
-	u8 signon[155];
-	u8 copyright[61];
-	u16 code_segment;
-	u8 dos_boot_mode;
-	u8 bandwidth_percent;
-	u8 rsvd4; /* popup memory size */
-	u8 resize_pci_bios;
-	u8 rsvd5; /* is crt already on ddc2 */
-} __packed;
-
-/*
- * There are several types of BIOS data blocks (BDBs), each block has
- * an ID and size in the first 3 bytes (ID in first, size in next 2).
- * Known types are listed below.
- */
-#define BDB_GENERAL_FEATURES	  1
-#define BDB_GENERAL_DEFINITIONS	  2
-#define BDB_OLD_TOGGLE_LIST	  3
-#define BDB_MODE_SUPPORT_LIST	  4
-#define BDB_GENERIC_MODE_TABLE	  5
-#define BDB_EXT_MMIO_REGS	  6
-#define BDB_SWF_IO		  7
-#define BDB_SWF_MMIO		  8
-#define BDB_PSR			  9
-#define BDB_MODE_REMOVAL_TABLE	 10
-#define BDB_CHILD_DEVICE_TABLE	 11
-#define BDB_DRIVER_FEATURES	 12
-#define BDB_DRIVER_PERSISTENCE	 13
-#define BDB_EXT_TABLE_PTRS	 14
-#define BDB_DOT_CLOCK_OVERRIDE	 15
-#define BDB_DISPLAY_SELECT	 16
-/* 17 rsvd */
-#define BDB_DRIVER_ROTATION	 18
-#define BDB_DISPLAY_REMOVE	 19
-#define BDB_OEM_CUSTOM		 20
-#define BDB_EFP_LIST		 21 /* workarounds for VGA hsync/vsync */
-#define BDB_SDVO_LVDS_OPTIONS	 22
-#define BDB_SDVO_PANEL_DTDS	 23
-#define BDB_SDVO_LVDS_PNP_IDS	 24
-#define BDB_SDVO_LVDS_POWER_SEQ	 25
-#define BDB_TV_OPTIONS		 26
-#define BDB_EDP			 27
-#define BDB_LVDS_OPTIONS	 40
-#define BDB_LVDS_LFP_DATA_PTRS	 41
-#define BDB_LVDS_LFP_DATA	 42
-#define BDB_LVDS_BACKLIGHT	 43
-#define BDB_LVDS_POWER		 44
-#define BDB_MIPI_CONFIG		 52
-#define BDB_MIPI_SEQUENCE	 53
-#define BDB_SKIP		254 /* VBIOS private block, ignore */
-
-struct bdb_general_features {
-        /* bits 1 */
-	u8 panel_fitting:2;
-	u8 flexaim:1;
-	u8 msg_enable:1;
-	u8 clear_screen:3;
-	u8 color_flip:1;
-
-        /* bits 2 */
-	u8 download_ext_vbt:1;
-	u8 enable_ssc:1;
-	u8 ssc_freq:1;
-	u8 enable_lfp_on_override:1;
-	u8 disable_ssc_ddt:1;
-	u8 rsvd7:1;
-	u8 display_clock_mode:1;
-	u8 rsvd8:1; /* finish byte */
-
-        /* bits 3 */
-	u8 disable_smooth_vision:1;
-	u8 single_dvi:1;
-	u8 rsvd9:1;
-	u8 fdi_rx_polarity_inverted:1;
-	u8 rsvd10:4; /* finish byte */
-
-        /* bits 4 */
-	u8 legacy_monitor_detect;
-
-        /* bits 5 */
-	u8 int_crt_support:1;
-	u8 int_tv_support:1;
-	u8 int_efp_support:1;
-	u8 dp_ssc_enb:1;	/* PCH attached eDP supports SSC */
-	u8 dp_ssc_freq:1;	/* SSC freq for PCH attached eDP */
-	u8 rsvd11:3; /* finish byte */
-} __packed;
-
-/* pre-915 */
-#define GPIO_PIN_DVI_LVDS	0x03 /* "DVI/LVDS DDC GPIO pins" */
-#define GPIO_PIN_ADD_I2C	0x05 /* "ADDCARD I2C GPIO pins" */
-#define GPIO_PIN_ADD_DDC	0x04 /* "ADDCARD DDC GPIO pins" */
-#define GPIO_PIN_ADD_DDC_I2C	0x06 /* "ADDCARD DDC/I2C GPIO pins" */
-
-/* Pre 915 */
-#define DEVICE_TYPE_NONE	0x00
-#define DEVICE_TYPE_CRT		0x01
-#define DEVICE_TYPE_TV		0x09
-#define DEVICE_TYPE_EFP		0x12
-#define DEVICE_TYPE_LFP		0x22
-/* On 915+ */
-#define DEVICE_TYPE_CRT_DPMS		0x6001
-#define DEVICE_TYPE_CRT_DPMS_HOTPLUG	0x4001
-#define DEVICE_TYPE_TV_COMPOSITE	0x0209
-#define DEVICE_TYPE_TV_MACROVISION	0x0289
-#define DEVICE_TYPE_TV_RF_COMPOSITE	0x020c
-#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE	0x0609
-#define DEVICE_TYPE_TV_SCART		0x0209
-#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
-#define DEVICE_TYPE_EFP_HOTPLUG_PWR	0x6012
-#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR	0x6052
-#define DEVICE_TYPE_EFP_DVI_I		0x6053
-#define DEVICE_TYPE_EFP_DVI_D_DUAL	0x6152
-#define DEVICE_TYPE_EFP_DVI_D_HDCP	0x60d2
-#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR	0x6062
-#define DEVICE_TYPE_OPENLDI_DUALPIX	0x6162
-#define DEVICE_TYPE_LFP_PANELLINK	0x5012
-#define DEVICE_TYPE_LFP_CMOS_PWR	0x5042
-#define DEVICE_TYPE_LFP_LVDS_PWR	0x5062
-#define DEVICE_TYPE_LFP_LVDS_DUAL	0x5162
-#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP	0x51e2
-
-#define DEVICE_CFG_NONE		0x00
-#define DEVICE_CFG_12BIT_DVOB	0x01
-#define DEVICE_CFG_12BIT_DVOC	0x02
-#define DEVICE_CFG_24BIT_DVOBC	0x09
-#define DEVICE_CFG_24BIT_DVOCB	0x0a
-#define DEVICE_CFG_DUAL_DVOB	0x11
-#define DEVICE_CFG_DUAL_DVOC	0x12
-#define DEVICE_CFG_DUAL_DVOBC	0x13
-#define DEVICE_CFG_DUAL_LINK_DVOBC	0x19
-#define DEVICE_CFG_DUAL_LINK_DVOCB	0x1a
-
-#define DEVICE_WIRE_NONE	0x00
-#define DEVICE_WIRE_DVOB	0x01
-#define DEVICE_WIRE_DVOC	0x02
-#define DEVICE_WIRE_DVOBC	0x03
-#define DEVICE_WIRE_DVOBB	0x05
-#define DEVICE_WIRE_DVOCC	0x06
-#define DEVICE_WIRE_DVOB_MASTER 0x0d
-#define DEVICE_WIRE_DVOC_MASTER 0x0e
-
-#define DEVICE_PORT_DVOA	0x00 /* none on 845+ */
-#define DEVICE_PORT_DVOB	0x01
-#define DEVICE_PORT_DVOC	0x02
-
-/*
- * We used to keep this struct but without any version control. We should avoid
- * using it in the future, but it should be safe to keep using it in the old
- * code. Do not change; we rely on its size.
- */
-struct old_child_dev_config {
-	u16 handle;
-	u16 device_type;
-	u8  device_id[10]; /* ascii string */
-	u16 addin_offset;
-	u8  dvo_port; /* See Device_PORT_* above */
-	u8  i2c_pin;
-	u8  slave_addr;
-	u8  ddc_pin;
-	u16 edid_ptr;
-	u8  dvo_cfg; /* See DEVICE_CFG_* above */
-	u8  dvo2_port;
-	u8  i2c2_pin;
-	u8  slave2_addr;
-	u8  ddc2_pin;
-	u8  capabilities;
-	u8  dvo_wiring;/* See DEVICE_WIRE_* above */
-	u8  dvo2_wiring;
-	u16 extended_type;
-	u8  dvo_function;
-} __packed;
-
-/* This one contains field offsets that are known to be common for all BDB
- * versions. Notice that the meaning of the contents contents may still change,
- * but at least the offsets are consistent. */
-
-/* Definitions for flags_1 */
-#define IBOOST_ENABLE (1<<3)
-
-struct common_child_dev_config {
-	u16 handle;
-	u16 device_type;
-	u8 not_common1[12];
-	u8 dvo_port;
-	u8 not_common2[2];
-	u8 ddc_pin;
-	u16 edid_ptr;
-	u8 obsolete;
-	u8 flags_1;
-	u8 not_common3[13];
-	u8 iboost_level;
-} __packed;
-
-
-/* This field changes depending on the BDB version, so the most reliable way to
- * read it is by checking the BDB version and reading the raw pointer. */
-union child_device_config {
-	/* This one is safe to be used anywhere, but the code should still check
-	 * the BDB version. */
-	u8 raw[33];
-	/* This one should only be kept for legacy code. */
-	struct old_child_dev_config old;
-	/* This one should also be safe to use anywhere, even without version
-	 * checks. */
-	struct common_child_dev_config common;
-} __packed;
-
-struct bdb_general_definitions {
-	/* DDC GPIO */
-	u8 crt_ddc_gmbus_pin;
-
-	/* DPMS bits */
-	u8 dpms_acpi:1;
-	u8 skip_boot_crt_detect:1;
-	u8 dpms_aim:1;
-	u8 rsvd1:5; /* finish byte */
-
-	/* boot device bits */
-	u8 boot_display[2];
-	u8 child_dev_size;
-
-	/*
-	 * Device info:
-	 * If TV is present, it'll be at devices[0].
-	 * LVDS will be next, either devices[0] or [1], if present.
-	 * On some platforms the number of device is 6. But could be as few as
-	 * 4 if both TV and LVDS are missing.
-	 * And the device num is related with the size of general definition
-	 * block. It is obtained by using the following formula:
-	 * number = (block_size - sizeof(bdb_general_definitions))/
-	 *	     defs->child_dev_size;
-	 */
-	uint8_t devices[0];
-} __packed;
-
-/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
-#define MODE_MASK		0x3
-
-struct bdb_lvds_options {
-	u8 panel_type;
-	u8 rsvd1;
-	/* LVDS capabilities, stored in a dword */
-	u8 pfit_mode:2;
-	u8 pfit_text_mode_enhanced:1;
-	u8 pfit_gfx_mode_enhanced:1;
-	u8 pfit_ratio_auto:1;
-	u8 pixel_dither:1;
-	u8 lvds_edid:1;
-	u8 rsvd2:1;
-	u8 rsvd4;
-	/* LVDS Panel channel bits stored here */
-	u32 lvds_panel_channel_bits;
-	/* LVDS SSC (Spread Spectrum Clock) bits stored here. */
-	u16 ssc_bits;
-	u16 ssc_freq;
-	u16 ssc_ddt;
-	/* Panel color depth defined here */
-	u16 panel_color_depth;
-	/* LVDS panel type bits stored here */
-	u32 dps_panel_type_bits;
-	/* LVDS backlight control type bits stored here */
-	u32 blt_control_type_bits;
-} __packed;
-
-/* LFP pointer table contains entries to the struct below */
-struct bdb_lvds_lfp_data_ptr {
-	u16 fp_timing_offset; /* offsets are from start of bdb */
-	u8 fp_table_size;
-	u16 dvo_timing_offset;
-	u8 dvo_table_size;
-	u16 panel_pnp_id_offset;
-	u8 pnp_table_size;
-} __packed;
-
-struct bdb_lvds_lfp_data_ptrs {
-	u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
-	struct bdb_lvds_lfp_data_ptr ptr[16];
-} __packed;
-
-/* LFP data has 3 blocks per entry */
-struct lvds_fp_timing {
-	u16 x_res;
-	u16 y_res;
-	u32 lvds_reg;
-	u32 lvds_reg_val;
-	u32 pp_on_reg;
-	u32 pp_on_reg_val;
-	u32 pp_off_reg;
-	u32 pp_off_reg_val;
-	u32 pp_cycle_reg;
-	u32 pp_cycle_reg_val;
-	u32 pfit_reg;
-	u32 pfit_reg_val;
-	u16 terminator;
-} __packed;
-
-struct lvds_dvo_timing {
-	u16 clock;		/**< In 10khz */
-	u8 hactive_lo;
-	u8 hblank_lo;
-	u8 hblank_hi:4;
-	u8 hactive_hi:4;
-	u8 vactive_lo;
-	u8 vblank_lo;
-	u8 vblank_hi:4;
-	u8 vactive_hi:4;
-	u8 hsync_off_lo;
-	u8 hsync_pulse_width;
-	u8 vsync_pulse_width:4;
-	u8 vsync_off:4;
-	u8 rsvd0:6;
-	u8 hsync_off_hi:2;
-	u8 h_image;
-	u8 v_image;
-	u8 max_hv;
-	u8 h_border;
-	u8 v_border;
-	u8 rsvd1:3;
-	u8 digital:2;
-	u8 vsync_positive:1;
-	u8 hsync_positive:1;
-	u8 rsvd2:1;
-} __packed;
-
-struct lvds_pnp_id {
-	u16 mfg_name;
-	u16 product_code;
-	u32 serial;
-	u8 mfg_week;
-	u8 mfg_year;
-} __packed;
-
-struct bdb_lvds_lfp_data_entry {
-	struct lvds_fp_timing fp_timing;
-	struct lvds_dvo_timing dvo_timing;
-	struct lvds_pnp_id pnp_id;
-} __packed;
-
-struct bdb_lvds_lfp_data {
-	struct bdb_lvds_lfp_data_entry data[16];
-} __packed;
-
-#define BDB_BACKLIGHT_TYPE_NONE	0
-#define BDB_BACKLIGHT_TYPE_PWM	2
-
-struct bdb_lfp_backlight_data_entry {
-	u8 type:2;
-	u8 active_low_pwm:1;
-	u8 obsolete1:5;
-	u16 pwm_freq_hz;
-	u8 min_brightness;
-	u8 obsolete2;
-	u8 obsolete3;
-} __packed;
-
-struct bdb_lfp_backlight_data {
-	u8 entry_size;
-	struct bdb_lfp_backlight_data_entry data[16];
-	u8 level[16];
-} __packed;
-
-struct aimdb_header {
-	char signature[16];
-	char oem_device[20];
-	u16 aimdb_version;
-	u16 aimdb_header_size;
-	u16 aimdb_size;
-} __packed;
-
-struct aimdb_block {
-	u8 aimdb_id;
-	u16 aimdb_size;
-} __packed;
-
-struct vch_panel_data {
-	u16 fp_timing_offset;
-	u8 fp_timing_size;
-	u16 dvo_timing_offset;
-	u8 dvo_timing_size;
-	u16 text_fitting_offset;
-	u8 text_fitting_size;
-	u16 graphics_fitting_offset;
-	u8 graphics_fitting_size;
-} __packed;
-
-struct vch_bdb_22 {
-	struct aimdb_block aimdb_block;
-	struct vch_panel_data panels[16];
-} __packed;
-
-struct bdb_sdvo_lvds_options {
-	u8 panel_backlight;
-	u8 h40_set_panel_type;
-	u8 panel_type;
-	u8 ssc_clk_freq;
-	u16 als_low_trip;
-	u16 als_high_trip;
-	u8 sclalarcoeff_tab_row_num;
-	u8 sclalarcoeff_tab_row_size;
-	u8 coefficient[8];
-	u8 panel_misc_bits_1;
-	u8 panel_misc_bits_2;
-	u8 panel_misc_bits_3;
-	u8 panel_misc_bits_4;
-} __packed;
-
-
-#define BDB_DRIVER_FEATURE_NO_LVDS		0
-#define BDB_DRIVER_FEATURE_INT_LVDS		1
-#define BDB_DRIVER_FEATURE_SDVO_LVDS		2
-#define BDB_DRIVER_FEATURE_EDP			3
-
-struct bdb_driver_features {
-	u8 boot_dev_algorithm:1;
-	u8 block_display_switch:1;
-	u8 allow_display_switch:1;
-	u8 hotplug_dvo:1;
-	u8 dual_view_zoom:1;
-	u8 int15h_hook:1;
-	u8 sprite_in_clone:1;
-	u8 primary_lfp_id:1;
-
-	u16 boot_mode_x;
-	u16 boot_mode_y;
-	u8 boot_mode_bpp;
-	u8 boot_mode_refresh;
-
-	u16 enable_lfp_primary:1;
-	u16 selective_mode_pruning:1;
-	u16 dual_frequency:1;
-	u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
-	u16 nt_clone_support:1;
-	u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
-	u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
-	u16 cui_aspect_scaling:1;
-	u16 preserve_aspect_ratio:1;
-	u16 sdvo_device_power_down:1;
-	u16 crt_hotplug:1;
-	u16 lvds_config:2;
-	u16 tv_hotplug:1;
-	u16 hdmi_config:2;
-
-	u8 static_display:1;
-	u8 reserved2:7;
-	u16 legacy_crt_max_x;
-	u16 legacy_crt_max_y;
-	u8 legacy_crt_max_refresh;
-
-	u8 hdmi_termination;
-	u8 custom_vbt_version;
-	/* Driver features data block */
-	u16 rmpm_enabled:1;
-	u16 s2ddt_enabled:1;
-	u16 dpst_enabled:1;
-	u16 bltclt_enabled:1;
-	u16 adb_enabled:1;
-	u16 drrs_enabled:1;
-	u16 grs_enabled:1;
-	u16 gpmt_enabled:1;
-	u16 tbt_enabled:1;
-	u16 psr_enabled:1;
-	u16 ips_enabled:1;
-	u16 reserved3:4;
-	u16 pc_feature_valid:1;
-} __packed;
-
-#define EDP_18BPP	0
-#define EDP_24BPP	1
-#define EDP_30BPP	2
-#define EDP_RATE_1_62	0
-#define EDP_RATE_2_7	1
-#define EDP_LANE_1	0
-#define EDP_LANE_2	1
-#define EDP_LANE_4	3
-#define EDP_PREEMPHASIS_NONE	0
-#define EDP_PREEMPHASIS_3_5dB	1
-#define EDP_PREEMPHASIS_6dB	2
-#define EDP_PREEMPHASIS_9_5dB	3
-#define EDP_VSWING_0_4V		0
-#define EDP_VSWING_0_6V		1
-#define EDP_VSWING_0_8V		2
-#define EDP_VSWING_1_2V		3
-
 struct edp_power_seq {
 	u16 t1_t3;
 	u16 t8;
@@ -565,245 +38,37 @@
 	u16 t11_t12;
 } __packed;
 
-struct edp_link_params {
-	u8 rate:4;
-	u8 lanes:4;
-	u8 preemphasis:4;
-	u8 vswing:4;
-} __packed;
+/* MIPI Sequence Block definitions */
+enum mipi_seq {
+	MIPI_SEQ_END = 0,
+	MIPI_SEQ_ASSERT_RESET,
+	MIPI_SEQ_INIT_OTP,
+	MIPI_SEQ_DISPLAY_ON,
+	MIPI_SEQ_DISPLAY_OFF,
+	MIPI_SEQ_DEASSERT_RESET,
+	MIPI_SEQ_BACKLIGHT_ON,		/* sequence block v2+ */
+	MIPI_SEQ_BACKLIGHT_OFF,		/* sequence block v2+ */
+	MIPI_SEQ_TEAR_ON,		/* sequence block v2+ */
+	MIPI_SEQ_TEAR_OFF,		/* sequence block v3+ */
+	MIPI_SEQ_POWER_ON,		/* sequence block v3+ */
+	MIPI_SEQ_POWER_OFF,		/* sequence block v3+ */
+	MIPI_SEQ_MAX
+};
 
-struct bdb_edp {
-	struct edp_power_seq power_seqs[16];
-	u32 color_depth;
-	struct edp_link_params link_params[16];
-	u32 sdrrs_msa_timing_delay;
-
-	/* ith bit indicates enabled/disabled for (i+1)th panel */
-	u16 edp_s3d_feature;
-	u16 edp_t3_optimization;
-	u64 edp_vswing_preemph;		/* v173 */
-} __packed;
-
-struct psr_table {
-	/* Feature bits */
-	u8 full_link:1;
-	u8 require_aux_to_wakeup:1;
-	u8 feature_bits_rsvd:6;
-
-	/* Wait times */
-	u8 idle_frames:4;
-	u8 lines_to_wait:3;
-	u8 wait_times_rsvd:1;
-
-	/* TP wake up time in multiple of 100 */
-	u16 tp1_wakeup_time;
-	u16 tp2_tp3_wakeup_time;
-} __packed;
-
-struct bdb_psr {
-	struct psr_table psr_table[16];
-} __packed;
-
-/*
- * Driver<->VBIOS interaction occurs through scratch bits in
- * GR18 & SWF*.
- */
-
-/* GR18 bits are set on display switch and hotkey events */
-#define GR18_DRIVER_SWITCH_EN	(1<<7) /* 0: VBIOS control, 1: driver control */
-#define GR18_HOTKEY_MASK	0x78 /* See also SWF4 15:0 */
-#define   GR18_HK_NONE		(0x0<<3)
-#define   GR18_HK_LFP_STRETCH	(0x1<<3)
-#define   GR18_HK_TOGGLE_DISP	(0x2<<3)
-#define   GR18_HK_DISP_SWITCH	(0x4<<3) /* see SWF14 15:0 for what to enable */
-#define   GR18_HK_POPUP_DISABLED (0x6<<3)
-#define   GR18_HK_POPUP_ENABLED	(0x7<<3)
-#define   GR18_HK_PFIT		(0x8<<3)
-#define   GR18_HK_APM_CHANGE	(0xa<<3)
-#define   GR18_HK_MULTIPLE	(0xc<<3)
-#define GR18_USER_INT_EN	(1<<2)
-#define GR18_A0000_FLUSH_EN	(1<<1)
-#define GR18_SMM_EN		(1<<0)
-
-/* Set by driver, cleared by VBIOS */
-#define SWF00_YRES_SHIFT	16
-#define SWF00_XRES_SHIFT	0
-#define SWF00_RES_MASK		0xffff
-
-/* Set by VBIOS at boot time and driver at runtime */
-#define SWF01_TV2_FORMAT_SHIFT	8
-#define SWF01_TV1_FORMAT_SHIFT	0
-#define SWF01_TV_FORMAT_MASK	0xffff
-
-#define SWF10_VBIOS_BLC_I2C_EN	(1<<29)
-#define SWF10_GTT_OVERRIDE_EN	(1<<28)
-#define SWF10_LFP_DPMS_OVR	(1<<27) /* override DPMS on display switch */
-#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
-#define   SWF10_OLD_TOGGLE	0x0
-#define   SWF10_TOGGLE_LIST_1	0x1
-#define   SWF10_TOGGLE_LIST_2	0x2
-#define   SWF10_TOGGLE_LIST_3	0x3
-#define   SWF10_TOGGLE_LIST_4	0x4
-#define SWF10_PANNING_EN	(1<<23)
-#define SWF10_DRIVER_LOADED	(1<<22)
-#define SWF10_EXTENDED_DESKTOP	(1<<21)
-#define SWF10_EXCLUSIVE_MODE	(1<<20)
-#define SWF10_OVERLAY_EN	(1<<19)
-#define SWF10_PLANEB_HOLDOFF	(1<<18)
-#define SWF10_PLANEA_HOLDOFF	(1<<17)
-#define SWF10_VGA_HOLDOFF	(1<<16)
-#define SWF10_ACTIVE_DISP_MASK	0xffff
-#define   SWF10_PIPEB_LFP2	(1<<15)
-#define   SWF10_PIPEB_EFP2	(1<<14)
-#define   SWF10_PIPEB_TV2	(1<<13)
-#define   SWF10_PIPEB_CRT2	(1<<12)
-#define   SWF10_PIPEB_LFP	(1<<11)
-#define   SWF10_PIPEB_EFP	(1<<10)
-#define   SWF10_PIPEB_TV	(1<<9)
-#define   SWF10_PIPEB_CRT	(1<<8)
-#define   SWF10_PIPEA_LFP2	(1<<7)
-#define   SWF10_PIPEA_EFP2	(1<<6)
-#define   SWF10_PIPEA_TV2	(1<<5)
-#define   SWF10_PIPEA_CRT2	(1<<4)
-#define   SWF10_PIPEA_LFP	(1<<3)
-#define   SWF10_PIPEA_EFP	(1<<2)
-#define   SWF10_PIPEA_TV	(1<<1)
-#define   SWF10_PIPEA_CRT	(1<<0)
-
-#define SWF11_MEMORY_SIZE_SHIFT	16
-#define SWF11_SV_TEST_EN	(1<<15)
-#define SWF11_IS_AGP		(1<<14)
-#define SWF11_DISPLAY_HOLDOFF	(1<<13)
-#define SWF11_DPMS_REDUCED	(1<<12)
-#define SWF11_IS_VBE_MODE	(1<<11)
-#define SWF11_PIPEB_ACCESS	(1<<10) /* 0 here means pipe a */
-#define SWF11_DPMS_MASK		0x07
-#define   SWF11_DPMS_OFF	(1<<2)
-#define   SWF11_DPMS_SUSPEND	(1<<1)
-#define   SWF11_DPMS_STANDBY	(1<<0)
-#define   SWF11_DPMS_ON		0
-
-#define SWF14_GFX_PFIT_EN	(1<<31)
-#define SWF14_TEXT_PFIT_EN	(1<<30)
-#define SWF14_LID_STATUS_CLOSED	(1<<29) /* 0 here means open */
-#define SWF14_POPUP_EN		(1<<28)
-#define SWF14_DISPLAY_HOLDOFF	(1<<27)
-#define SWF14_DISP_DETECT_EN	(1<<26)
-#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
-#define SWF14_DRIVER_STATUS	(1<<24)
-#define SWF14_OS_TYPE_WIN9X	(1<<23)
-#define SWF14_OS_TYPE_WINNT	(1<<22)
-/* 21:19 rsvd */
-#define SWF14_PM_TYPE_MASK	0x00070000
-#define   SWF14_PM_ACPI_VIDEO	(0x4 << 16)
-#define   SWF14_PM_ACPI		(0x3 << 16)
-#define   SWF14_PM_APM_12	(0x2 << 16)
-#define   SWF14_PM_APM_11	(0x1 << 16)
-#define SWF14_HK_REQUEST_MASK	0x0000ffff /* see GR18 6:3 for event type */
-          /* if GR18 indicates a display switch */
-#define   SWF14_DS_PIPEB_LFP2_EN (1<<15)
-#define   SWF14_DS_PIPEB_EFP2_EN (1<<14)
-#define   SWF14_DS_PIPEB_TV2_EN  (1<<13)
-#define   SWF14_DS_PIPEB_CRT2_EN (1<<12)
-#define   SWF14_DS_PIPEB_LFP_EN  (1<<11)
-#define   SWF14_DS_PIPEB_EFP_EN  (1<<10)
-#define   SWF14_DS_PIPEB_TV_EN   (1<<9)
-#define   SWF14_DS_PIPEB_CRT_EN  (1<<8)
-#define   SWF14_DS_PIPEA_LFP2_EN (1<<7)
-#define   SWF14_DS_PIPEA_EFP2_EN (1<<6)
-#define   SWF14_DS_PIPEA_TV2_EN  (1<<5)
-#define   SWF14_DS_PIPEA_CRT2_EN (1<<4)
-#define   SWF14_DS_PIPEA_LFP_EN  (1<<3)
-#define   SWF14_DS_PIPEA_EFP_EN  (1<<2)
-#define   SWF14_DS_PIPEA_TV_EN   (1<<1)
-#define   SWF14_DS_PIPEA_CRT_EN  (1<<0)
-          /* if GR18 indicates a panel fitting request */
-#define   SWF14_PFIT_EN		(1<<0) /* 0 means disable */
-          /* if GR18 indicates an APM change request */
-#define   SWF14_APM_HIBERNATE	0x4
-#define   SWF14_APM_SUSPEND	0x3
-#define   SWF14_APM_STANDBY	0x1
-#define   SWF14_APM_RESTORE	0x0
-
-/* Add the device class for LFP, TV, HDMI */
-#define	 DEVICE_TYPE_INT_LFP	0x1022
-#define	 DEVICE_TYPE_INT_TV	0x1009
-#define	 DEVICE_TYPE_HDMI	0x60D2
-#define	 DEVICE_TYPE_DP		0x68C6
-#define	 DEVICE_TYPE_eDP	0x78C6
-
-#define  DEVICE_TYPE_CLASS_EXTENSION	(1 << 15)
-#define  DEVICE_TYPE_POWER_MANAGEMENT	(1 << 14)
-#define  DEVICE_TYPE_HOTPLUG_SIGNALING	(1 << 13)
-#define  DEVICE_TYPE_INTERNAL_CONNECTOR	(1 << 12)
-#define  DEVICE_TYPE_NOT_HDMI_OUTPUT	(1 << 11)
-#define  DEVICE_TYPE_MIPI_OUTPUT	(1 << 10)
-#define  DEVICE_TYPE_COMPOSITE_OUTPUT	(1 << 9)
-#define  DEVICE_TYPE_DUAL_CHANNEL	(1 << 8)
-#define  DEVICE_TYPE_HIGH_SPEED_LINK	(1 << 6)
-#define  DEVICE_TYPE_LVDS_SINGALING	(1 << 5)
-#define  DEVICE_TYPE_TMDS_DVI_SIGNALING	(1 << 4)
-#define  DEVICE_TYPE_VIDEO_SIGNALING	(1 << 3)
-#define  DEVICE_TYPE_DISPLAYPORT_OUTPUT	(1 << 2)
-#define  DEVICE_TYPE_DIGITAL_OUTPUT	(1 << 1)
-#define  DEVICE_TYPE_ANALOG_OUTPUT	(1 << 0)
-
-/*
- * Bits we care about when checking for DEVICE_TYPE_eDP
- * Depending on the system, the other bits may or may not
- * be set for eDP outputs.
- */
-#define DEVICE_TYPE_eDP_BITS \
-	(DEVICE_TYPE_INTERNAL_CONNECTOR | \
-	 DEVICE_TYPE_MIPI_OUTPUT | \
-	 DEVICE_TYPE_COMPOSITE_OUTPUT | \
-	 DEVICE_TYPE_DUAL_CHANNEL | \
-	 DEVICE_TYPE_LVDS_SINGALING | \
-	 DEVICE_TYPE_TMDS_DVI_SIGNALING | \
-	 DEVICE_TYPE_VIDEO_SIGNALING | \
-	 DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
-	 DEVICE_TYPE_ANALOG_OUTPUT)
-
-/* define the DVO port for HDMI output type */
-#define		DVO_B		1
-#define		DVO_C		2
-#define		DVO_D		3
-
-/* Possible values for the "DVO Port" field for versions >= 155: */
-#define DVO_PORT_HDMIA	0
-#define DVO_PORT_HDMIB	1
-#define DVO_PORT_HDMIC	2
-#define DVO_PORT_HDMID	3
-#define DVO_PORT_LVDS	4
-#define DVO_PORT_TV	5
-#define DVO_PORT_CRT	6
-#define DVO_PORT_DPB	7
-#define DVO_PORT_DPC	8
-#define DVO_PORT_DPD	9
-#define DVO_PORT_DPA	10
-#define DVO_PORT_DPE	11
-#define DVO_PORT_HDMIE	12
-#define DVO_PORT_MIPIA	21
-#define DVO_PORT_MIPIB	22
-#define DVO_PORT_MIPIC	23
-#define DVO_PORT_MIPID	24
-
-/* Block 52 contains MIPI Panel info
- * 6 such enteries will there. Index into correct
- * entery is based on the panel_index in #40 LFP
- */
-#define MAX_MIPI_CONFIGURATIONS	6
+enum mipi_seq_element {
+	MIPI_SEQ_ELEM_END = 0,
+	MIPI_SEQ_ELEM_SEND_PKT,
+	MIPI_SEQ_ELEM_DELAY,
+	MIPI_SEQ_ELEM_GPIO,
+	MIPI_SEQ_ELEM_I2C,		/* sequence block v2+ */
+	MIPI_SEQ_ELEM_SPI,		/* sequence block v3+ */
+	MIPI_SEQ_ELEM_PMIC,		/* sequence block v3+ */
+	MIPI_SEQ_ELEM_MAX
+};
 
 #define MIPI_DSI_UNDEFINED_PANEL_ID	0
 #define MIPI_DSI_GENERIC_PANEL_ID	1
 
-/*
- * PMIC vs SoC Backlight support specified in pwm_blc
- * field in mipi_config block below.
-*/
-#define PPS_BLC_PMIC   0
-#define PPS_BLC_SOC    1
-
 struct mipi_config {
 	u16 panel_id;
 
@@ -821,6 +86,8 @@
 	u32 video_transfer_mode:2;
 
 	u32 cabc_supported:1;
+#define PPS_BLC_PMIC   0
+#define PPS_BLC_SOC    1
 	u32 pwm_blc:1;
 
 	/* Bit 13:10 */
@@ -924,12 +191,7 @@
 
 } __packed;
 
-/* Block 52 contains MIPI configuration block
- * 6 * bdb_mipi_config, followed by 6 pps data
- * block below
- *
- * all delays has a unit of 100us
- */
+/* all delays have a unit of 100us */
 struct mipi_pps_data {
 	u16 panel_on_delay;
 	u16 bl_enable_delay;
@@ -938,57 +200,4 @@
 	u16 panel_power_cycle_delay;
 } __packed;
 
-struct bdb_mipi_config {
-	struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
-	struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
-} __packed;
-
-/* Block 53 contains MIPI sequences as needed by the panel
- * for enabling it. This block can be variable in size and
- * can be maximum of 6 blocks
- */
-struct bdb_mipi_sequence {
-	u8 version;
-	u8 data[0];
-} __packed;
-
-/* MIPI Sequnece Block definitions */
-enum mipi_seq {
-	MIPI_SEQ_END = 0,
-	MIPI_SEQ_ASSERT_RESET,
-	MIPI_SEQ_INIT_OTP,
-	MIPI_SEQ_DISPLAY_ON,
-	MIPI_SEQ_DISPLAY_OFF,
-	MIPI_SEQ_DEASSERT_RESET,
-	MIPI_SEQ_BACKLIGHT_ON,		/* sequence block v2+ */
-	MIPI_SEQ_BACKLIGHT_OFF,		/* sequence block v2+ */
-	MIPI_SEQ_TEAR_ON,		/* sequence block v2+ */
-	MIPI_SEQ_TEAR_OFF,		/* sequence block v3+ */
-	MIPI_SEQ_POWER_ON,		/* sequence block v3+ */
-	MIPI_SEQ_POWER_OFF,		/* sequence block v3+ */
-	MIPI_SEQ_MAX
-};
-
-enum mipi_seq_element {
-	MIPI_SEQ_ELEM_END = 0,
-	MIPI_SEQ_ELEM_SEND_PKT,
-	MIPI_SEQ_ELEM_DELAY,
-	MIPI_SEQ_ELEM_GPIO,
-	MIPI_SEQ_ELEM_I2C,		/* sequence block v2+ */
-	MIPI_SEQ_ELEM_SPI,		/* sequence block v3+ */
-	MIPI_SEQ_ELEM_PMIC,		/* sequence block v3+ */
-	MIPI_SEQ_ELEM_MAX
-};
-
-enum mipi_gpio_pin_index {
-	MIPI_GPIO_UNDEFINED = 0,
-	MIPI_GPIO_PANEL_ENABLE,
-	MIPI_GPIO_BL_ENABLE,
-	MIPI_GPIO_PWM_ENABLE,
-	MIPI_GPIO_RESET_N,
-	MIPI_GPIO_PWR_DOWN_R,
-	MIPI_GPIO_STDBY_RST_N,
-	MIPI_GPIO_MAX
-};
-
 #endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
new file mode 100644
index 0000000..1b3f974
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -0,0 +1,553 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "intel_drv.h"
+
+#define CTM_COEFF_SIGN	(1ULL << 63)
+
+#define CTM_COEFF_1_0	(1ULL << 32)
+#define CTM_COEFF_2_0	(CTM_COEFF_1_0 << 1)
+#define CTM_COEFF_4_0	(CTM_COEFF_2_0 << 1)
+#define CTM_COEFF_8_0	(CTM_COEFF_4_0 << 1)
+#define CTM_COEFF_0_5	(CTM_COEFF_1_0 >> 1)
+#define CTM_COEFF_0_25	(CTM_COEFF_0_5 >> 1)
+#define CTM_COEFF_0_125	(CTM_COEFF_0_25 >> 1)
+
+#define CTM_COEFF_LIMITED_RANGE ((235ULL - 16ULL) * CTM_COEFF_1_0 / 255)
+
+#define CTM_COEFF_NEGATIVE(coeff)	(((coeff) & CTM_COEFF_SIGN) != 0)
+#define CTM_COEFF_ABS(coeff)		((coeff) & (CTM_COEFF_SIGN - 1))
+
+#define LEGACY_LUT_LENGTH		(sizeof(struct drm_color_lut) * 256)
+
+/*
+ * Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
+ * format). This macro takes the coefficient we want transformed and the
+ * number of fractional bits.
+ *
+ * We only have a 9 bits precision window which slides depending on the value
+ * of the CTM coefficient and we write the value from bit 3. We also round the
+ * value.
+ */
+#define I9XX_CSC_COEFF_FP(coeff, fbits)	\
+	(clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
+
+#define I9XX_CSC_COEFF_LIMITED_RANGE	\
+	I9XX_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9)
+#define I9XX_CSC_COEFF_1_0		\
+	((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
+
+static bool crtc_state_is_legacy(struct drm_crtc_state *state)
+{
+	return !state->degamma_lut &&
+		!state->ctm &&
+		state->gamma_lut &&
+		state->gamma_lut->length == LEGACY_LUT_LENGTH;
+}
+
+/*
+ * When using limited range, multiply the matrix given by userspace by
+ * the matrix that we would use for the limited range. We do the
+ * multiplication in U2.30 format.
+ */
+static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
+{
+	int i;
+
+	for (i = 0; i < 9; i++)
+		result[i] = 0;
+
+	for (i = 0; i < 3; i++) {
+		int64_t user_coeff = input[i * 3 + i];
+		uint64_t limited_coeff = CTM_COEFF_LIMITED_RANGE >> 2;
+		uint64_t abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff),
+					       0,
+					       CTM_COEFF_4_0 - 1) >> 2;
+
+		result[i * 3 + i] = (limited_coeff * abs_coeff) >> 27;
+		if (CTM_COEFF_NEGATIVE(user_coeff))
+			result[i * 3 + i] |= CTM_COEFF_SIGN;
+	}
+}
+
+/* Set up the pipe CSC unit. */
+static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
+{
+	struct drm_crtc *crtc = crtc_state->crtc;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int i, pipe = intel_crtc->pipe;
+	uint16_t coeffs[9] = { 0, };
+
+	if (crtc_state->ctm) {
+		struct drm_color_ctm *ctm =
+			(struct drm_color_ctm *)crtc_state->ctm->data;
+		uint64_t input[9] = { 0, };
+
+		if (intel_crtc->config->limited_color_range) {
+			ctm_mult_by_limited(input, ctm->matrix);
+		} else {
+			for (i = 0; i < ARRAY_SIZE(input); i++)
+				input[i] = ctm->matrix[i];
+		}
+
+		/*
+		 * Convert fixed point S31.32 input to format supported by the
+		 * hardware.
+		 */
+		for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
+			uint64_t abs_coeff = ((1ULL << 63) - 1) & input[i];
+
+			/*
+			 * Clamp input value to min/max supported by
+			 * hardware.
+			 */
+			abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
+
+			/* sign bit */
+			if (CTM_COEFF_NEGATIVE(input[i]))
+				coeffs[i] |= 1 << 15;
+
+			if (abs_coeff < CTM_COEFF_0_125)
+				coeffs[i] |= (3 << 12) |
+					I9XX_CSC_COEFF_FP(abs_coeff, 12);
+			else if (abs_coeff < CTM_COEFF_0_25)
+				coeffs[i] |= (2 << 12) |
+					I9XX_CSC_COEFF_FP(abs_coeff, 11);
+			else if (abs_coeff < CTM_COEFF_0_5)
+				coeffs[i] |= (1 << 12) |
+					I9XX_CSC_COEFF_FP(abs_coeff, 10);
+			else if (abs_coeff < CTM_COEFF_1_0)
+				coeffs[i] |= I9XX_CSC_COEFF_FP(abs_coeff, 9);
+			else if (abs_coeff < CTM_COEFF_2_0)
+				coeffs[i] |= (7 << 12) |
+					I9XX_CSC_COEFF_FP(abs_coeff, 8);
+			else
+				coeffs[i] |= (6 << 12) |
+					I9XX_CSC_COEFF_FP(abs_coeff, 7);
+		}
+	} else {
+		/*
+		 * Load an identity matrix if no coefficients are provided.
+		 *
+		 * TODO: Check what kind of values actually come out of the
+		 * pipe with these coeff/postoff values and adjust to get the
+		 * best accuracy. Perhaps we even need to take the bpc value
+		 * into consideration.
+		 */
+		for (i = 0; i < 3; i++) {
+			if (intel_crtc->config->limited_color_range)
+				coeffs[i * 3 + i] =
+					I9XX_CSC_COEFF_LIMITED_RANGE;
+			else
+				coeffs[i * 3 + i] = I9XX_CSC_COEFF_1_0;
+		}
+	}
+
+	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeffs[0] << 16 | coeffs[1]);
+	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeffs[2] << 16);
+
+	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeffs[3] << 16 | coeffs[4]);
+	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeffs[5] << 16);
+
+	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeffs[6] << 16 | coeffs[7]);
+	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeffs[8] << 16);
+
+	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
+	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
+	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+
+	if (INTEL_INFO(dev)->gen > 6) {
+		uint16_t postoff = 0;
+
+		if (intel_crtc->config->limited_color_range)
+			postoff = (16 * (1 << 12) / 255) & 0x1fff;
+
+		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
+		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
+		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
+
+		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
+	} else {
+		uint32_t mode = CSC_MODE_YUV_TO_RGB;
+
+		if (intel_crtc->config->limited_color_range)
+			mode |= CSC_BLACK_SCREEN_OFFSET;
+
+		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
+	}
+}
+
+/*
+ * Set up the pipe CSC unit on CherryView.
+ */
+static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
+{
+	struct drm_crtc *crtc = state->crtc;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = to_intel_crtc(crtc)->pipe;
+	uint32_t mode;
+
+	if (state->ctm) {
+		struct drm_color_ctm *ctm =
+			(struct drm_color_ctm *) state->ctm->data;
+		uint16_t coeffs[9] = { 0, };
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
+			uint64_t abs_coeff =
+				((1ULL << 63) - 1) & ctm->matrix[i];
+
+			/* Round coefficient. */
+			abs_coeff += 1 << (32 - 13);
+			/* Clamp to hardware limits. */
+			abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
+
+			/* Write coefficients in S3.12 format. */
+			if (ctm->matrix[i] & (1ULL << 63))
+				coeffs[i] = 1 << 15;
+			coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
+			coeffs[i] |= (abs_coeff >> 20) & 0xfff;
+		}
+
+		I915_WRITE(CGM_PIPE_CSC_COEFF01(pipe),
+			   coeffs[1] << 16 | coeffs[0]);
+		I915_WRITE(CGM_PIPE_CSC_COEFF23(pipe),
+			   coeffs[3] << 16 | coeffs[2]);
+		I915_WRITE(CGM_PIPE_CSC_COEFF45(pipe),
+			   coeffs[5] << 16 | coeffs[4]);
+		I915_WRITE(CGM_PIPE_CSC_COEFF67(pipe),
+			   coeffs[7] << 16 | coeffs[6]);
+		I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
+	}
+
+	mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
+	if (!crtc_state_is_legacy(state)) {
+		mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
+			(state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
+	}
+	I915_WRITE(CGM_PIPE_MODE(pipe), mode);
+}
+
+void intel_color_set_csc(struct drm_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc_state->crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->display.load_csc_matrix)
+		dev_priv->display.load_csc_matrix(crtc_state);
+}
+
+/* Loads the legacy palette/gamma unit for the CRTC. */
+static void i9xx_load_luts_internal(struct drm_crtc *crtc,
+				    struct drm_property_blob *blob)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	int i;
+
+	if (HAS_GMCH_DISPLAY(dev)) {
+		if (intel_crtc->config->has_dsi_encoder)
+			assert_dsi_pll_enabled(dev_priv);
+		else
+			assert_pll_enabled(dev_priv, pipe);
+	}
+
+	if (blob) {
+		struct drm_color_lut *lut = (struct drm_color_lut *) blob->data;
+		for (i = 0; i < 256; i++) {
+			uint32_t word =
+				(drm_color_lut_extract(lut[i].red, 8) << 16) |
+				(drm_color_lut_extract(lut[i].green, 8) << 8) |
+				drm_color_lut_extract(lut[i].blue, 8);
+
+			if (HAS_GMCH_DISPLAY(dev))
+				I915_WRITE(PALETTE(pipe, i), word);
+			else
+				I915_WRITE(LGC_PALETTE(pipe, i), word);
+		}
+	} else {
+		for (i = 0; i < 256; i++) {
+			uint32_t word = (i << 16) | (i << 8) | i;
+
+			if (HAS_GMCH_DISPLAY(dev))
+				I915_WRITE(PALETTE(pipe, i), word);
+			else
+				I915_WRITE(LGC_PALETTE(pipe, i), word);
+		}
+	}
+}
+
+static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
+{
+	i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut);
+}
+
+/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
+static void haswell_load_luts(struct drm_crtc_state *crtc_state)
+{
+	struct drm_crtc *crtc = crtc_state->crtc;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc_state *intel_crtc_state =
+		to_intel_crtc_state(crtc_state);
+	bool reenable_ips = false;
+
+	/*
+	 * Workaround : Do not read or write the pipe palette/gamma data while
+	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+	 */
+	if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
+	    (intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
+		hsw_disable_ips(intel_crtc);
+		reenable_ips = true;
+	}
+
+	intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
+
+	i9xx_load_luts(crtc_state);
+
+	if (reenable_ips)
+		hsw_enable_ips(intel_crtc);
+}
+
+/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
+static void broadwell_load_luts(struct drm_crtc_state *state)
+{
+	struct drm_crtc *crtc = state->crtc;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
+	uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+
+	if (crtc_state_is_legacy(state)) {
+		haswell_load_luts(state);
+		return;
+	}
+
+	I915_WRITE(PREC_PAL_INDEX(pipe),
+		   PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
+
+	if (state->degamma_lut) {
+		struct drm_color_lut *lut =
+			(struct drm_color_lut *) state->degamma_lut->data;
+
+		for (i = 0; i < lut_size; i++) {
+			uint32_t word =
+			drm_color_lut_extract(lut[i].red, 10) << 20 |
+			drm_color_lut_extract(lut[i].green, 10) << 10 |
+			drm_color_lut_extract(lut[i].blue, 10);
+
+			I915_WRITE(PREC_PAL_DATA(pipe), word);
+		}
+	} else {
+		for (i = 0; i < lut_size; i++) {
+			uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+
+			I915_WRITE(PREC_PAL_DATA(pipe),
+				   (v << 20) | (v << 10) | v);
+		}
+	}
+
+	if (state->gamma_lut) {
+		struct drm_color_lut *lut =
+			(struct drm_color_lut *) state->gamma_lut->data;
+
+		for (i = 0; i < lut_size; i++) {
+			uint32_t word =
+			(drm_color_lut_extract(lut[i].red, 10) << 20) |
+			(drm_color_lut_extract(lut[i].green, 10) << 10) |
+			drm_color_lut_extract(lut[i].blue, 10);
+
+			I915_WRITE(PREC_PAL_DATA(pipe), word);
+		}
+
+		/* Program the max register to clamp values > 1.0. */
+		I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
+			   drm_color_lut_extract(lut[i].red, 16));
+		I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
+			   drm_color_lut_extract(lut[i].green, 16));
+		I915_WRITE(PREC_PAL_GC_MAX(pipe, 2),
+			   drm_color_lut_extract(lut[i].blue, 16));
+	} else {
+		for (i = 0; i < lut_size; i++) {
+			uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+
+			I915_WRITE(PREC_PAL_DATA(pipe),
+				   (v << 20) | (v << 10) | v);
+		}
+
+		I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), (1 << 16) - 1);
+		I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), (1 << 16) - 1);
+		I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), (1 << 16) - 1);
+	}
+
+	intel_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
+	I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT);
+	POSTING_READ(GAMMA_MODE(pipe));
+
+	/*
+	 * Reset the index, otherwise it prevents the legacy palette to be
+	 * written properly.
+	 */
+	I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+}
+
+/* Loads the palette/gamma unit for the CRTC on CherryView. */
+static void cherryview_load_luts(struct drm_crtc_state *state)
+{
+	struct drm_crtc *crtc = state->crtc;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
+	struct drm_color_lut *lut;
+	uint32_t i, lut_size;
+	uint32_t word0, word1;
+
+	if (crtc_state_is_legacy(state)) {
+		/* Turn off degamma/gamma on CGM block. */
+		I915_WRITE(CGM_PIPE_MODE(pipe),
+			   (state->ctm ? CGM_PIPE_MODE_CSC : 0));
+		i9xx_load_luts_internal(crtc, state->gamma_lut);
+		return;
+	}
+
+	if (state->degamma_lut) {
+		lut = (struct drm_color_lut *) state->degamma_lut->data;
+		lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+		for (i = 0; i < lut_size; i++) {
+			/* Write LUT in U0.14 format. */
+			word0 =
+			(drm_color_lut_extract(lut[i].green, 14) << 16) |
+			drm_color_lut_extract(lut[i].blue, 14);
+			word1 = drm_color_lut_extract(lut[i].red, 14);
+
+			I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0), word0);
+			I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1), word1);
+		}
+	}
+
+	if (state->gamma_lut) {
+		lut = (struct drm_color_lut *) state->gamma_lut->data;
+		lut_size = INTEL_INFO(dev)->color.gamma_lut_size;
+		for (i = 0; i < lut_size; i++) {
+			/* Write LUT in U0.10 format. */
+			word0 =
+			(drm_color_lut_extract(lut[i].green, 10) << 16) |
+			drm_color_lut_extract(lut[i].blue, 10);
+			word1 = drm_color_lut_extract(lut[i].red, 10);
+
+			I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0), word0);
+			I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1), word1);
+		}
+	}
+
+	I915_WRITE(CGM_PIPE_MODE(pipe),
+		   (state->ctm ? CGM_PIPE_MODE_CSC : 0) |
+		   (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
+		   (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0));
+
+	/*
+	 * Also program a linear LUT in the legacy block (behind the
+	 * CGM block).
+	 */
+	i9xx_load_luts_internal(crtc, NULL);
+}
+
+void intel_color_load_luts(struct drm_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc_state->crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->display.load_luts(crtc_state);
+}
+
+int intel_color_check(struct drm_crtc *crtc,
+		      struct drm_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->dev;
+	size_t gamma_length, degamma_length;
+
+	degamma_length = INTEL_INFO(dev)->color.degamma_lut_size *
+		sizeof(struct drm_color_lut);
+	gamma_length = INTEL_INFO(dev)->color.gamma_lut_size *
+		sizeof(struct drm_color_lut);
+
+	/*
+	 * We allow both degamma & gamma luts at the right size or
+	 * NULL.
+	 */
+	if ((!crtc_state->degamma_lut ||
+	     crtc_state->degamma_lut->length == degamma_length) &&
+	    (!crtc_state->gamma_lut ||
+	     crtc_state->gamma_lut->length == gamma_length))
+		return 0;
+
+	/*
+	 * We also allow no degamma lut and a gamma lut at the legacy
+	 * size (256 entries).
+	 */
+	if (!crtc_state->degamma_lut &&
+	    crtc_state->gamma_lut &&
+	    crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH)
+		return 0;
+
+	return -EINVAL;
+}
+
+void intel_color_init(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	drm_mode_crtc_set_gamma_size(crtc, 256);
+
+	if (IS_CHERRYVIEW(dev)) {
+		dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
+		dev_priv->display.load_luts = cherryview_load_luts;
+	} else if (IS_HASWELL(dev)) {
+		dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+		dev_priv->display.load_luts = haswell_load_luts;
+	} else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev) ||
+		   IS_BROXTON(dev) || IS_KABYLAKE(dev)) {
+		dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+		dev_priv->display.load_luts = broadwell_load_luts;
+	} else {
+		dev_priv->display.load_luts = i9xx_load_luts;
+	}
+
+	/* Enable color management support when we have degamma & gamma LUTs. */
+	if (INTEL_INFO(dev)->color.degamma_lut_size != 0 &&
+	    INTEL_INFO(dev)->color.gamma_lut_size != 0)
+		drm_helper_crtc_enable_color_mgmt(crtc,
+					INTEL_INFO(dev)->color.degamma_lut_size,
+					INTEL_INFO(dev)->color.gamma_lut_size);
+}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 0364292..3fbb6fc 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -120,22 +120,16 @@
 static void intel_crt_get_config(struct intel_encoder *encoder,
 				 struct intel_crtc_state *pipe_config)
 {
-	struct drm_device *dev = encoder->base.dev;
-	int dotclock;
-
 	pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
 
-	dotclock = pipe_config->port_clock;
-
-	if (HAS_PCH_SPLIT(dev))
-		ironlake_check_encoder_dotclock(pipe_config, dotclock);
-
-	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+	pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
 }
 
 static void hsw_crt_get_config(struct intel_encoder *encoder,
 			       struct intel_crtc_state *pipe_config)
 {
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
 	intel_ddi_get_config(encoder, pipe_config);
 
 	pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
@@ -143,6 +137,8 @@
 					      DRM_MODE_FLAG_PVSYNC |
 					      DRM_MODE_FLAG_NVSYNC);
 	pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+
+	pipe_config->base.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
 }
 
 /* Note: The caller is required to filter out dpms modes not supported by the
@@ -222,18 +218,26 @@
 {
 	struct drm_device *dev = connector->dev;
 	int max_dotclk = to_i915(dev)->max_dotclk_freq;
+	int max_clock;
 
-	int max_clock = 0;
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return MODE_NO_DBLESCAN;
 
 	if (mode->clock < 25000)
 		return MODE_CLOCK_LOW;
 
-	if (IS_GEN2(dev))
-		max_clock = 350000;
-	else
+	if (HAS_PCH_LPT(dev))
+		max_clock = 180000;
+	else if (IS_VALLEYVIEW(dev))
+		/*
+		 * 270 MHz due to current DPLL limits,
+		 * DAC limit supposedly 355 MHz.
+		 */
+		max_clock = 270000;
+	else if (IS_GEN3(dev) || IS_GEN4(dev))
 		max_clock = 400000;
+	else
+		max_clock = 350000;
 	if (mode->clock > max_clock)
 		return MODE_CLOCK_HIGH;
 
@@ -267,15 +271,9 @@
 	}
 
 	/* FDI must always be 2.7 GHz */
-	if (HAS_DDI(dev)) {
-		pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+	if (HAS_DDI(dev))
 		pipe_config->port_clock = 135000 * 2;
 
-		pipe_config->dpll_hw_state.wrpll = 0;
-		pipe_config->dpll_hw_state.spll =
-			SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
-	}
-
 	return true;
 }
 
@@ -658,6 +656,8 @@
 		else if (INTEL_INFO(dev)->gen < 4)
 			status = intel_crt_load_detect(crt,
 				to_intel_crtc(connector->state->crtc)->pipe);
+		else if (i915.load_detect_test)
+			status = connector_status_disconnected;
 		else
 			status = connector_status_unknown;
 		intel_release_load_detect_pipe(connector, &tmp, &ctx);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 902054e..a34c23e 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -50,6 +50,7 @@
 MODULE_FIRMWARE(I915_CSR_BXT);
 
 #define SKL_CSR_VERSION_REQUIRED	CSR_VERSION(1, 23)
+#define BXT_CSR_VERSION_REQUIRED	CSR_VERSION(1, 7)
 
 #define CSR_MAX_FW_SIZE			0x2FFF
 #define CSR_DEFAULT_FW_OFFSET		0xFFFFFFFF
@@ -188,28 +189,49 @@
 	{'B', '0'}, {'B', '1'}, {'B', '2'}
 };
 
-static const struct stepping_info *intel_get_stepping_info(struct drm_device *dev)
+static const struct stepping_info no_stepping_info = { '*', '*' };
+
+static const struct stepping_info *
+intel_get_stepping_info(struct drm_i915_private *dev_priv)
 {
 	const struct stepping_info *si;
 	unsigned int size;
 
-	if (IS_KABYLAKE(dev)) {
+	if (IS_KABYLAKE(dev_priv)) {
 		size = ARRAY_SIZE(kbl_stepping_info);
 		si = kbl_stepping_info;
-	} else if (IS_SKYLAKE(dev)) {
+	} else if (IS_SKYLAKE(dev_priv)) {
 		size = ARRAY_SIZE(skl_stepping_info);
 		si = skl_stepping_info;
-	} else if (IS_BROXTON(dev)) {
+	} else if (IS_BROXTON(dev_priv)) {
 		size = ARRAY_SIZE(bxt_stepping_info);
 		si = bxt_stepping_info;
 	} else {
-		return NULL;
+		size = 0;
 	}
 
-	if (INTEL_REVID(dev) < size)
-		return si + INTEL_REVID(dev);
+	if (INTEL_REVID(dev_priv) < size)
+		return si + INTEL_REVID(dev_priv);
 
-	return NULL;
+	return &no_stepping_info;
+}
+
+static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
+{
+	uint32_t val, mask;
+
+	mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
+
+	if (IS_BROXTON(dev_priv))
+		mask |= DC_STATE_DEBUG_MASK_CORES;
+
+	/* The below bit doesn't need to be cleared ever afterwards */
+	val = I915_READ(DC_STATE_DEBUG);
+	if ((val & mask) != mask) {
+		val |= mask;
+		I915_WRITE(DC_STATE_DEBUG, val);
+		POSTING_READ(DC_STATE_DEBUG);
+	}
 }
 
 /**
@@ -220,19 +242,19 @@
  * Everytime display comes back from low power state this function is called to
  * copy the firmware from internal memory to registers.
  */
-bool intel_csr_load_program(struct drm_i915_private *dev_priv)
+void intel_csr_load_program(struct drm_i915_private *dev_priv)
 {
 	u32 *payload = dev_priv->csr.dmc_payload;
 	uint32_t i, fw_size;
 
 	if (!IS_GEN9(dev_priv)) {
 		DRM_ERROR("No CSR support available for this platform\n");
-		return false;
+		return;
 	}
 
 	if (!dev_priv->csr.dmc_payload) {
 		DRM_ERROR("Tried to program CSR with empty payload\n");
-		return false;
+		return;
 	}
 
 	fw_size = dev_priv->csr.dmc_fw_size;
@@ -246,34 +268,25 @@
 
 	dev_priv->csr.dc_state = 0;
 
-	return true;
+	gen9_set_dc_state_debugmask(dev_priv);
 }
 
 static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 			      const struct firmware *fw)
 {
-	struct drm_device *dev = dev_priv->dev;
 	struct intel_css_header *css_header;
 	struct intel_package_header *package_header;
 	struct intel_dmc_header *dmc_header;
 	struct intel_csr *csr = &dev_priv->csr;
-	const struct stepping_info *stepping_info = intel_get_stepping_info(dev);
-	char stepping, substepping;
+	const struct stepping_info *si = intel_get_stepping_info(dev_priv);
 	uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
 	uint32_t i;
 	uint32_t *dmc_payload;
+	uint32_t required_min_version;
 
 	if (!fw)
 		return NULL;
 
-	if (!stepping_info) {
-		DRM_ERROR("Unknown stepping info, firmware loading failed\n");
-		return NULL;
-	}
-
-	stepping = stepping_info->stepping;
-	substepping = stepping_info->substepping;
-
 	/* Extract CSS Header information*/
 	css_header = (struct intel_css_header *)fw->data;
 	if (sizeof(struct intel_css_header) !=
@@ -285,15 +298,23 @@
 
 	csr->version = css_header->version;
 
-	if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
-	    csr->version < SKL_CSR_VERSION_REQUIRED) {
-		DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+		required_min_version = SKL_CSR_VERSION_REQUIRED;
+	} else if (IS_BROXTON(dev_priv)) {
+		required_min_version = BXT_CSR_VERSION_REQUIRED;
+	} else {
+		MISSING_CASE(INTEL_REVID(dev_priv));
+		required_min_version = 0;
+	}
+
+	if (csr->version < required_min_version) {
+		DRM_INFO("Refusing to load old DMC firmware v%u.%u,"
 			 " please upgrade to v%u.%u or later"
 			   " [" FIRMWARE_URL "].\n",
 			 CSR_VERSION_MAJOR(csr->version),
 			 CSR_VERSION_MINOR(csr->version),
-			 CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
-			 CSR_VERSION_MINOR(SKL_CSR_VERSION_REQUIRED));
+			 CSR_VERSION_MAJOR(required_min_version),
+			 CSR_VERSION_MINOR(required_min_version));
 		return NULL;
 	}
 
@@ -313,11 +334,11 @@
 	/* Search for dmc_offset to find firware binary. */
 	for (i = 0; i < package_header->num_entries; i++) {
 		if (package_header->fw_info[i].substepping == '*' &&
-		    stepping == package_header->fw_info[i].stepping) {
+		    si->stepping == package_header->fw_info[i].stepping) {
 			dmc_offset = package_header->fw_info[i].offset;
 			break;
-		} else if (stepping == package_header->fw_info[i].stepping &&
-			substepping == package_header->fw_info[i].substepping) {
+		} else if (si->stepping == package_header->fw_info[i].stepping &&
+			   si->substepping == package_header->fw_info[i].substepping) {
 			dmc_offset = package_header->fw_info[i].offset;
 			break;
 		} else if (package_header->fw_info[i].stepping == '*' &&
@@ -325,7 +346,8 @@
 			dmc_offset = package_header->fw_info[i].offset;
 	}
 	if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
-		DRM_ERROR("Firmware not supported for %c stepping\n", stepping);
+		DRM_ERROR("Firmware not supported for %c stepping\n",
+			  si->stepping);
 		return NULL;
 	}
 	readcount += dmc_offset;
@@ -371,9 +393,7 @@
 		return NULL;
 	}
 
-	memcpy(dmc_payload, &fw->data[readcount], nbytes);
-
-	return dmc_payload;
+	return memcpy(dmc_payload, &fw->data[readcount], nbytes);
 }
 
 static void csr_load_work_fn(struct work_struct *work)
@@ -388,18 +408,12 @@
 
 	ret = request_firmware(&fw, dev_priv->csr.fw_path,
 			       &dev_priv->dev->pdev->dev);
-	if (!fw)
-		goto out;
+	if (fw)
+		dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
 
-	dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
-	if (!dev_priv->csr.dmc_payload)
-		goto out;
-
-	/* load csr program during system boot, as needed for DC states */
-	intel_csr_load_program(dev_priv);
-
-out:
 	if (dev_priv->csr.dmc_payload) {
+		intel_csr_load_program(dev_priv);
+
 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
 
 		DRM_INFO("Finished loading %s (v%u.%u)\n",
@@ -453,10 +467,50 @@
 }
 
 /**
+ * intel_csr_ucode_suspend() - prepare CSR firmware before system suspend
+ * @dev_priv: i915 drm device
+ *
+ * Prepare the DMC firmware before entering system suspend. This includes
+ * flushing pending work items and releasing any resources acquired during
+ * init.
+ */
+void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
+{
+	if (!HAS_CSR(dev_priv))
+		return;
+
+	flush_work(&dev_priv->csr.work);
+
+	/* Drop the reference held in case DMC isn't loaded. */
+	if (!dev_priv->csr.dmc_payload)
+		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+}
+
+/**
+ * intel_csr_ucode_resume() - init CSR firmware during system resume
+ * @dev_priv: i915 drm device
+ *
+ * Reinitialize the DMC firmware during system resume, reacquiring any
+ * resources released in intel_csr_ucode_suspend().
+ */
+void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
+{
+	if (!HAS_CSR(dev_priv))
+		return;
+
+	/*
+	 * Reacquire the reference to keep RPM disabled in case DMC isn't
+	 * loaded.
+	 */
+	if (!dev_priv->csr.dmc_payload)
+		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+}
+
+/**
  * intel_csr_ucode_fini() - unload the CSR firmware.
  * @dev_priv: i915 drm device.
  *
- * Firmmware unloading includes freeing the internal momory and reset the
+ * Firmmware unloading includes freeing the internal memory and reset the
  * firmware loading status.
  */
 void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
@@ -464,7 +518,7 @@
 	if (!HAS_CSR(dev_priv))
 		return;
 
-	flush_work(&dev_priv->csr.work);
+	intel_csr_ucode_suspend(dev_priv);
 
 	kfree(dev_priv->csr.dmc_payload);
 }
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 96ffcc5..01e523d 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -315,6 +315,9 @@
 		*dig_port = enc_to_mst(encoder)->primary;
 		*port = (*dig_port)->port;
 		break;
+	default:
+		WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
+		/* fallthrough and treat as unknown */
 	case INTEL_OUTPUT_DISPLAYPORT:
 	case INTEL_OUTPUT_EDP:
 	case INTEL_OUTPUT_HDMI:
@@ -326,9 +329,6 @@
 		*dig_port = NULL;
 		*port = PORT_E;
 		break;
-	default:
-		WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
-		break;
 	}
 }
 
@@ -360,7 +360,7 @@
 static const struct ddi_buf_trans *
 skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 {
-	if (dev_priv->edp_low_vswing) {
+	if (dev_priv->vbt.edp.low_vswing) {
 		if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
 			*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
 			return skl_y_ddi_translations_edp;
@@ -444,7 +444,7 @@
 		ddi_translations_fdi = bdw_ddi_translations_fdi;
 		ddi_translations_dp = bdw_ddi_translations_dp;
 
-		if (dev_priv->edp_low_vswing) {
+		if (dev_priv->vbt.edp.low_vswing) {
 			ddi_translations_edp = bdw_ddi_translations_edp;
 			n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
 		} else {
@@ -637,6 +637,10 @@
 			break;
 		}
 
+		rx_ctl_val &= ~FDI_RX_ENABLE;
+		I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
+		POSTING_READ(FDI_RX_CTL(PIPE_A));
+
 		temp = I915_READ(DDI_BUF_CTL(PORT_E));
 		temp &= ~DDI_BUF_CTL_ENABLE;
 		I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
@@ -651,10 +655,6 @@
 
 		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
 
-		rx_ctl_val &= ~FDI_RX_ENABLE;
-		I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
-		POSTING_READ(FDI_RX_CTL(PIPE_A));
-
 		/* Reset FDI_RX_MISC pwrdn lanes */
 		temp = I915_READ(FDI_RX_MISC(PIPE_A));
 		temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
@@ -732,160 +732,6 @@
 }
 
 #define LC_FREQ 2700
-#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
-
-#define P_MIN 2
-#define P_MAX 64
-#define P_INC 2
-
-/* Constraints for PLL good behavior */
-#define REF_MIN 48
-#define REF_MAX 400
-#define VCO_MIN 2400
-#define VCO_MAX 4800
-
-#define abs_diff(a, b) ({			\
-	typeof(a) __a = (a);			\
-	typeof(b) __b = (b);			\
-	(void) (&__a == &__b);			\
-	__a > __b ? (__a - __b) : (__b - __a); })
-
-struct hsw_wrpll_rnp {
-	unsigned p, n2, r2;
-};
-
-static unsigned hsw_wrpll_get_budget_for_freq(int clock)
-{
-	unsigned budget;
-
-	switch (clock) {
-	case 25175000:
-	case 25200000:
-	case 27000000:
-	case 27027000:
-	case 37762500:
-	case 37800000:
-	case 40500000:
-	case 40541000:
-	case 54000000:
-	case 54054000:
-	case 59341000:
-	case 59400000:
-	case 72000000:
-	case 74176000:
-	case 74250000:
-	case 81000000:
-	case 81081000:
-	case 89012000:
-	case 89100000:
-	case 108000000:
-	case 108108000:
-	case 111264000:
-	case 111375000:
-	case 148352000:
-	case 148500000:
-	case 162000000:
-	case 162162000:
-	case 222525000:
-	case 222750000:
-	case 296703000:
-	case 297000000:
-		budget = 0;
-		break;
-	case 233500000:
-	case 245250000:
-	case 247750000:
-	case 253250000:
-	case 298000000:
-		budget = 1500;
-		break;
-	case 169128000:
-	case 169500000:
-	case 179500000:
-	case 202000000:
-		budget = 2000;
-		break;
-	case 256250000:
-	case 262500000:
-	case 270000000:
-	case 272500000:
-	case 273750000:
-	case 280750000:
-	case 281250000:
-	case 286000000:
-	case 291750000:
-		budget = 4000;
-		break;
-	case 267250000:
-	case 268500000:
-		budget = 5000;
-		break;
-	default:
-		budget = 1000;
-		break;
-	}
-
-	return budget;
-}
-
-static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
-				 unsigned r2, unsigned n2, unsigned p,
-				 struct hsw_wrpll_rnp *best)
-{
-	uint64_t a, b, c, d, diff, diff_best;
-
-	/* No best (r,n,p) yet */
-	if (best->p == 0) {
-		best->p = p;
-		best->n2 = n2;
-		best->r2 = r2;
-		return;
-	}
-
-	/*
-	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
-	 * freq2k.
-	 *
-	 * delta = 1e6 *
-	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
-	 *	   freq2k;
-	 *
-	 * and we would like delta <= budget.
-	 *
-	 * If the discrepancy is above the PPM-based budget, always prefer to
-	 * improve upon the previous solution.  However, if you're within the
-	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
-	 */
-	a = freq2k * budget * p * r2;
-	b = freq2k * budget * best->p * best->r2;
-	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
-	diff_best = abs_diff(freq2k * best->p * best->r2,
-			     LC_FREQ_2K * best->n2);
-	c = 1000000 * diff;
-	d = 1000000 * diff_best;
-
-	if (a < c && b < d) {
-		/* If both are above the budget, pick the closer */
-		if (best->p * best->r2 * diff < p * r2 * diff_best) {
-			best->p = p;
-			best->n2 = n2;
-			best->r2 = r2;
-		}
-	} else if (a >= c && b < d) {
-		/* If A is below the threshold but B is above it?  Update. */
-		best->p = p;
-		best->n2 = n2;
-		best->r2 = r2;
-	} else if (a >= c && b >= d) {
-		/* Both are below the limit, so pick the higher n2/(r2*r2) */
-		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
-			best->p = p;
-			best->n2 = n2;
-			best->r2 = r2;
-		}
-	}
-	/* Otherwise a < c && b >= d, do nothing */
-}
 
 static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
 				   i915_reg_t reg)
@@ -1147,363 +993,20 @@
 		bxt_ddi_clock_get(encoder, pipe_config);
 }
 
-static void
-hsw_ddi_calculate_wrpll(int clock /* in Hz */,
-			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
-{
-	uint64_t freq2k;
-	unsigned p, n2, r2;
-	struct hsw_wrpll_rnp best = { 0, 0, 0 };
-	unsigned budget;
-
-	freq2k = clock / 100;
-
-	budget = hsw_wrpll_get_budget_for_freq(clock);
-
-	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
-	 * and directly pass the LC PLL to it. */
-	if (freq2k == 5400000) {
-		*n2_out = 2;
-		*p_out = 1;
-		*r2_out = 2;
-		return;
-	}
-
-	/*
-	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
-	 * the WR PLL.
-	 *
-	 * We want R so that REF_MIN <= Ref <= REF_MAX.
-	 * Injecting R2 = 2 * R gives:
-	 *   REF_MAX * r2 > LC_FREQ * 2 and
-	 *   REF_MIN * r2 < LC_FREQ * 2
-	 *
-	 * Which means the desired boundaries for r2 are:
-	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
-	 *
-	 */
-	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
-	     r2 <= LC_FREQ * 2 / REF_MIN;
-	     r2++) {
-
-		/*
-		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
-		 *
-		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
-		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
-		 *   VCO_MAX * r2 > n2 * LC_FREQ and
-		 *   VCO_MIN * r2 < n2 * LC_FREQ)
-		 *
-		 * Which means the desired boundaries for n2 are:
-		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
-		 */
-		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
-		     n2 <= VCO_MAX * r2 / LC_FREQ;
-		     n2++) {
-
-			for (p = P_MIN; p <= P_MAX; p += P_INC)
-				hsw_wrpll_update_rnp(freq2k, budget,
-						     r2, n2, p, &best);
-		}
-	}
-
-	*n2_out = best.n2;
-	*p_out = best.p;
-	*r2_out = best.r2;
-}
-
 static bool
 hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
 		   struct intel_crtc_state *crtc_state,
 		   struct intel_encoder *intel_encoder)
 {
-	int clock = crtc_state->port_clock;
+	struct intel_shared_dpll *pll;
 
-	if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
-		struct intel_shared_dpll *pll;
-		uint32_t val;
-		unsigned p, n2, r2;
+	pll = intel_get_shared_dpll(intel_crtc, crtc_state,
+				    intel_encoder);
+	if (!pll)
+		DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+				 pipe_name(intel_crtc->pipe));
 
-		hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
-		val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
-		      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
-		      WRPLL_DIVIDER_POST(p);
-
-		memset(&crtc_state->dpll_hw_state, 0,
-		       sizeof(crtc_state->dpll_hw_state));
-
-		crtc_state->dpll_hw_state.wrpll = val;
-
-		pll = intel_get_shared_dpll(intel_crtc, crtc_state);
-		if (pll == NULL) {
-			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
-					 pipe_name(intel_crtc->pipe));
-			return false;
-		}
-
-		crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
-	} else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) {
-		struct drm_atomic_state *state = crtc_state->base.state;
-		struct intel_shared_dpll_config *spll =
-			&intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL];
-
-		if (spll->crtc_mask &&
-		    WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll))
-			return false;
-
-		crtc_state->shared_dpll = DPLL_ID_SPLL;
-		spll->hw_state.spll = crtc_state->dpll_hw_state.spll;
-		spll->crtc_mask |= 1 << intel_crtc->pipe;
-	}
-
-	return true;
-}
-
-struct skl_wrpll_context {
-	uint64_t min_deviation;		/* current minimal deviation */
-	uint64_t central_freq;		/* chosen central freq */
-	uint64_t dco_freq;		/* chosen dco freq */
-	unsigned int p;			/* chosen divider */
-};
-
-static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
-{
-	memset(ctx, 0, sizeof(*ctx));
-
-	ctx->min_deviation = U64_MAX;
-}
-
-/* DCO freq must be within +1%/-6%  of the DCO central freq */
-#define SKL_DCO_MAX_PDEVIATION	100
-#define SKL_DCO_MAX_NDEVIATION	600
-
-static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
-				  uint64_t central_freq,
-				  uint64_t dco_freq,
-				  unsigned int divider)
-{
-	uint64_t deviation;
-
-	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
-			      central_freq);
-
-	/* positive deviation */
-	if (dco_freq >= central_freq) {
-		if (deviation < SKL_DCO_MAX_PDEVIATION &&
-		    deviation < ctx->min_deviation) {
-			ctx->min_deviation = deviation;
-			ctx->central_freq = central_freq;
-			ctx->dco_freq = dco_freq;
-			ctx->p = divider;
-		}
-	/* negative deviation */
-	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
-		   deviation < ctx->min_deviation) {
-		ctx->min_deviation = deviation;
-		ctx->central_freq = central_freq;
-		ctx->dco_freq = dco_freq;
-		ctx->p = divider;
-	}
-}
-
-static void skl_wrpll_get_multipliers(unsigned int p,
-				      unsigned int *p0 /* out */,
-				      unsigned int *p1 /* out */,
-				      unsigned int *p2 /* out */)
-{
-	/* even dividers */
-	if (p % 2 == 0) {
-		unsigned int half = p / 2;
-
-		if (half == 1 || half == 2 || half == 3 || half == 5) {
-			*p0 = 2;
-			*p1 = 1;
-			*p2 = half;
-		} else if (half % 2 == 0) {
-			*p0 = 2;
-			*p1 = half / 2;
-			*p2 = 2;
-		} else if (half % 3 == 0) {
-			*p0 = 3;
-			*p1 = half / 3;
-			*p2 = 2;
-		} else if (half % 7 == 0) {
-			*p0 = 7;
-			*p1 = half / 7;
-			*p2 = 2;
-		}
-	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
-		*p0 = 3;
-		*p1 = 1;
-		*p2 = p / 3;
-	} else if (p == 5 || p == 7) {
-		*p0 = p;
-		*p1 = 1;
-		*p2 = 1;
-	} else if (p == 15) {
-		*p0 = 3;
-		*p1 = 1;
-		*p2 = 5;
-	} else if (p == 21) {
-		*p0 = 7;
-		*p1 = 1;
-		*p2 = 3;
-	} else if (p == 35) {
-		*p0 = 7;
-		*p1 = 1;
-		*p2 = 5;
-	}
-}
-
-struct skl_wrpll_params {
-	uint32_t        dco_fraction;
-	uint32_t        dco_integer;
-	uint32_t        qdiv_ratio;
-	uint32_t        qdiv_mode;
-	uint32_t        kdiv;
-	uint32_t        pdiv;
-	uint32_t        central_freq;
-};
-
-static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
-				      uint64_t afe_clock,
-				      uint64_t central_freq,
-				      uint32_t p0, uint32_t p1, uint32_t p2)
-{
-	uint64_t dco_freq;
-
-	switch (central_freq) {
-	case 9600000000ULL:
-		params->central_freq = 0;
-		break;
-	case 9000000000ULL:
-		params->central_freq = 1;
-		break;
-	case 8400000000ULL:
-		params->central_freq = 3;
-	}
-
-	switch (p0) {
-	case 1:
-		params->pdiv = 0;
-		break;
-	case 2:
-		params->pdiv = 1;
-		break;
-	case 3:
-		params->pdiv = 2;
-		break;
-	case 7:
-		params->pdiv = 4;
-		break;
-	default:
-		WARN(1, "Incorrect PDiv\n");
-	}
-
-	switch (p2) {
-	case 5:
-		params->kdiv = 0;
-		break;
-	case 2:
-		params->kdiv = 1;
-		break;
-	case 3:
-		params->kdiv = 2;
-		break;
-	case 1:
-		params->kdiv = 3;
-		break;
-	default:
-		WARN(1, "Incorrect KDiv\n");
-	}
-
-	params->qdiv_ratio = p1;
-	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
-
-	dco_freq = p0 * p1 * p2 * afe_clock;
-
-	/*
-	 * Intermediate values are in Hz.
-	 * Divide by MHz to match bsepc
-	 */
-	params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
-	params->dco_fraction =
-		div_u64((div_u64(dco_freq, 24) -
-			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
-}
-
-static bool
-skl_ddi_calculate_wrpll(int clock /* in Hz */,
-			struct skl_wrpll_params *wrpll_params)
-{
-	uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
-	uint64_t dco_central_freq[3] = {8400000000ULL,
-					9000000000ULL,
-					9600000000ULL};
-	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
-					     24, 28, 30, 32, 36, 40, 42, 44,
-					     48, 52, 54, 56, 60, 64, 66, 68,
-					     70, 72, 76, 78, 80, 84, 88, 90,
-					     92, 96, 98 };
-	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
-	static const struct {
-		const int *list;
-		int n_dividers;
-	} dividers[] = {
-		{ even_dividers, ARRAY_SIZE(even_dividers) },
-		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
-	};
-	struct skl_wrpll_context ctx;
-	unsigned int dco, d, i;
-	unsigned int p0, p1, p2;
-
-	skl_wrpll_context_init(&ctx);
-
-	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
-		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
-			for (i = 0; i < dividers[d].n_dividers; i++) {
-				unsigned int p = dividers[d].list[i];
-				uint64_t dco_freq = p * afe_clock;
-
-				skl_wrpll_try_divider(&ctx,
-						      dco_central_freq[dco],
-						      dco_freq,
-						      p);
-				/*
-				 * Skip the remaining dividers if we're sure to
-				 * have found the definitive divider, we can't
-				 * improve a 0 deviation.
-				 */
-				if (ctx.min_deviation == 0)
-					goto skip_remaining_dividers;
-			}
-		}
-
-skip_remaining_dividers:
-		/*
-		 * If a solution is found with an even divider, prefer
-		 * this one.
-		 */
-		if (d == 0 && ctx.p)
-			break;
-	}
-
-	if (!ctx.p) {
-		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
-		return false;
-	}
-
-	/*
-	 * gcc incorrectly analyses that these can be used without being
-	 * initialized. To be fair, it's hard to guess.
-	 */
-	p0 = p1 = p2 = 0;
-	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
-	skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
-				  p0, p1, p2);
-
-	return true;
+	return pll;
 }
 
 static bool
@@ -1512,218 +1015,23 @@
 		   struct intel_encoder *intel_encoder)
 {
 	struct intel_shared_dpll *pll;
-	uint32_t ctrl1, cfgcr1, cfgcr2;
-	int clock = crtc_state->port_clock;
 
-	/*
-	 * See comment in intel_dpll_hw_state to understand why we always use 0
-	 * as the DPLL id in this function.
-	 */
-
-	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
-
-	if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
-		struct skl_wrpll_params wrpll_params = { 0, };
-
-		ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
-
-		if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
-			return false;
-
-		cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
-			 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
-			 wrpll_params.dco_integer;
-
-		cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
-			 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
-			 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
-			 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
-			 wrpll_params.central_freq;
-	} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
-		   intel_encoder->type == INTEL_OUTPUT_DP_MST) {
-		switch (crtc_state->port_clock / 2) {
-		case 81000:
-			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
-			break;
-		case 135000:
-			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
-			break;
-		case 270000:
-			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
-			break;
-		}
-
-		cfgcr1 = cfgcr2 = 0;
-	} else if (intel_encoder->type == INTEL_OUTPUT_EDP) {
-		return true;
-	} else
-		return false;
-
-	memset(&crtc_state->dpll_hw_state, 0,
-	       sizeof(crtc_state->dpll_hw_state));
-
-	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
-	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
-	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
-
-	pll = intel_get_shared_dpll(intel_crtc, crtc_state);
+	pll = intel_get_shared_dpll(intel_crtc, crtc_state, intel_encoder);
 	if (pll == NULL) {
 		DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
 				 pipe_name(intel_crtc->pipe));
 		return false;
 	}
 
-	/* shared DPLL id 0 is DPLL 1 */
-	crtc_state->ddi_pll_sel = pll->id + 1;
-
 	return true;
 }
 
-/* bxt clock parameters */
-struct bxt_clk_div {
-	int clock;
-	uint32_t p1;
-	uint32_t p2;
-	uint32_t m2_int;
-	uint32_t m2_frac;
-	bool m2_frac_en;
-	uint32_t n;
-};
-
-/* pre-calculated values for DP linkrates */
-static const struct bxt_clk_div bxt_dp_clk_val[] = {
-	{162000, 4, 2, 32, 1677722, 1, 1},
-	{270000, 4, 1, 27,       0, 0, 1},
-	{540000, 2, 1, 27,       0, 0, 1},
-	{216000, 3, 2, 32, 1677722, 1, 1},
-	{243000, 4, 1, 24, 1258291, 1, 1},
-	{324000, 4, 1, 32, 1677722, 1, 1},
-	{432000, 3, 1, 32, 1677722, 1, 1}
-};
-
 static bool
 bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
 		   struct intel_crtc_state *crtc_state,
 		   struct intel_encoder *intel_encoder)
 {
-	struct intel_shared_dpll *pll;
-	struct bxt_clk_div clk_div = {0};
-	int vco = 0;
-	uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
-	uint32_t lanestagger;
-	int clock = crtc_state->port_clock;
-
-	if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
-		intel_clock_t best_clock;
-
-		/* Calculate HDMI div */
-		/*
-		 * FIXME: tie the following calculation into
-		 * i9xx_crtc_compute_clock
-		 */
-		if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
-			DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
-					 clock, pipe_name(intel_crtc->pipe));
-			return false;
-		}
-
-		clk_div.p1 = best_clock.p1;
-		clk_div.p2 = best_clock.p2;
-		WARN_ON(best_clock.m1 != 2);
-		clk_div.n = best_clock.n;
-		clk_div.m2_int = best_clock.m2 >> 22;
-		clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1);
-		clk_div.m2_frac_en = clk_div.m2_frac != 0;
-
-		vco = best_clock.vco;
-	} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
-			intel_encoder->type == INTEL_OUTPUT_EDP) {
-		int i;
-
-		clk_div = bxt_dp_clk_val[0];
-		for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
-			if (bxt_dp_clk_val[i].clock == clock) {
-				clk_div = bxt_dp_clk_val[i];
-				break;
-			}
-		}
-		vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
-	}
-
-	if (vco >= 6200000 && vco <= 6700000) {
-		prop_coef = 4;
-		int_coef = 9;
-		gain_ctl = 3;
-		targ_cnt = 8;
-	} else if ((vco > 5400000 && vco < 6200000) ||
-			(vco >= 4800000 && vco < 5400000)) {
-		prop_coef = 5;
-		int_coef = 11;
-		gain_ctl = 3;
-		targ_cnt = 9;
-	} else if (vco == 5400000) {
-		prop_coef = 3;
-		int_coef = 8;
-		gain_ctl = 1;
-		targ_cnt = 9;
-	} else {
-		DRM_ERROR("Invalid VCO\n");
-		return false;
-	}
-
-	memset(&crtc_state->dpll_hw_state, 0,
-	       sizeof(crtc_state->dpll_hw_state));
-
-	if (clock > 270000)
-		lanestagger = 0x18;
-	else if (clock > 135000)
-		lanestagger = 0x0d;
-	else if (clock > 67000)
-		lanestagger = 0x07;
-	else if (clock > 33000)
-		lanestagger = 0x04;
-	else
-		lanestagger = 0x02;
-
-	crtc_state->dpll_hw_state.ebb0 =
-		PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2);
-	crtc_state->dpll_hw_state.pll0 = clk_div.m2_int;
-	crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n);
-	crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac;
-
-	if (clk_div.m2_frac_en)
-		crtc_state->dpll_hw_state.pll3 =
-			PORT_PLL_M2_FRAC_ENABLE;
-
-	crtc_state->dpll_hw_state.pll6 =
-		prop_coef | PORT_PLL_INT_COEFF(int_coef);
-	crtc_state->dpll_hw_state.pll6 |=
-		PORT_PLL_GAIN_CTL(gain_ctl);
-
-	crtc_state->dpll_hw_state.pll8 = targ_cnt;
-
-	crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
-
-	crtc_state->dpll_hw_state.pll10 =
-		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
-		| PORT_PLL_DCO_AMP_OVR_EN_H;
-
-	crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
-
-	crtc_state->dpll_hw_state.pcsdw12 =
-		LANESTAGGER_STRAP_OVRD | lanestagger;
-
-	pll = intel_get_shared_dpll(intel_crtc, crtc_state);
-	if (pll == NULL) {
-		DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
-			pipe_name(intel_crtc->pipe));
-		return false;
-	}
-
-	/* shared DPLL id 0 is DPLL A */
-	crtc_state->ddi_pll_sel = pll->id;
-
-	return true;
+	return !!intel_get_shared_dpll(intel_crtc, crtc_state, intel_encoder);
 }
 
 /*
@@ -1761,6 +1069,8 @@
 	uint32_t temp;
 
 	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
+		WARN_ON(transcoder_is_dsi(cpu_transcoder));
+
 		temp = TRANS_MSA_SYNC_CLK;
 		switch (intel_crtc->config->pipe_bpp) {
 		case 18:
@@ -2129,7 +1439,7 @@
 	u32 n_entries, i;
 	uint32_t val;
 
-	if (type == INTEL_OUTPUT_EDP && dev_priv->edp_low_vswing) {
+	if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
 		n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
 		ddi_translations = bxt_ddi_translations_edp;
 	} else if (type == INTEL_OUTPUT_DISPLAYPORT
@@ -2267,24 +1577,6 @@
 		uint32_t dpll = pipe_config->ddi_pll_sel;
 		uint32_t val;
 
-		/*
-		 * DPLL0 is used for eDP and is the only "private" DPLL (as
-		 * opposed to shared) on SKL
-		 */
-		if (encoder->type == INTEL_OUTPUT_EDP) {
-			WARN_ON(dpll != SKL_DPLL0);
-
-			val = I915_READ(DPLL_CTRL1);
-
-			val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
-				 DPLL_CTRL1_SSC(dpll) |
-				 DPLL_CTRL1_LINK_RATE_MASK(dpll));
-			val |= pipe_config->dpll_hw_state.ctrl1 << (dpll * 6);
-
-			I915_WRITE(DPLL_CTRL1, val);
-			POSTING_READ(DPLL_CTRL1);
-		}
-
 		/* DDI -> PLL mapping  */
 		val = I915_READ(DPLL_CTRL2);
 
@@ -2309,6 +1601,12 @@
 	enum port port = intel_ddi_get_encoder_port(intel_encoder);
 	int type = intel_encoder->type;
 
+	if (type == INTEL_OUTPUT_HDMI) {
+		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+		intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+	}
+
 	intel_prepare_ddi_buffer(intel_encoder);
 
 	if (type == INTEL_OUTPUT_EDP) {
@@ -2375,6 +1673,12 @@
 					DPLL_CTRL2_DDI_CLK_OFF(port)));
 	else if (INTEL_INFO(dev)->gen < 9)
 		I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+
+	if (type == INTEL_OUTPUT_HDMI) {
+		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+		intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
+	}
 }
 
 static void intel_enable_ddi(struct intel_encoder *intel_encoder)
@@ -2438,251 +1742,101 @@
 	}
 }
 
-static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
-			       struct intel_shared_dpll *pll)
+static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv,
+				   enum dpio_phy phy)
 {
-	I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
-	POSTING_READ(WRPLL_CTL(pll->id));
-	udelay(20);
-}
-
-static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
-				struct intel_shared_dpll *pll)
-{
-	I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
-	POSTING_READ(SPLL_CTL);
-	udelay(20);
-}
-
-static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
-				  struct intel_shared_dpll *pll)
-{
-	uint32_t val;
-
-	val = I915_READ(WRPLL_CTL(pll->id));
-	I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
-	POSTING_READ(WRPLL_CTL(pll->id));
-}
-
-static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
-				 struct intel_shared_dpll *pll)
-{
-	uint32_t val;
-
-	val = I915_READ(SPLL_CTL);
-	I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
-	POSTING_READ(SPLL_CTL);
-}
-
-static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
-				       struct intel_shared_dpll *pll,
-				       struct intel_dpll_hw_state *hw_state)
-{
-	uint32_t val;
-
-	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+	if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
 		return false;
 
-	val = I915_READ(WRPLL_CTL(pll->id));
-	hw_state->wrpll = val;
+	if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+	     (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
+		DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
+				 phy);
 
-	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
-
-	return val & WRPLL_PLL_ENABLE;
-}
-
-static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
-				      struct intel_shared_dpll *pll,
-				      struct intel_dpll_hw_state *hw_state)
-{
-	uint32_t val;
-
-	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
 		return false;
-
-	val = I915_READ(SPLL_CTL);
-	hw_state->spll = val;
-
-	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
-
-	return val & SPLL_PLL_ENABLE;
-}
-
-
-static const char * const hsw_ddi_pll_names[] = {
-	"WRPLL 1",
-	"WRPLL 2",
-	"SPLL"
-};
-
-static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
-{
-	int i;
-
-	dev_priv->num_shared_dpll = 3;
-
-	for (i = 0; i < 2; i++) {
-		dev_priv->shared_dplls[i].id = i;
-		dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
-		dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable;
-		dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable;
-		dev_priv->shared_dplls[i].get_hw_state =
-			hsw_ddi_wrpll_get_hw_state;
 	}
 
-	/* SPLL is special, but needs to be initialized anyway.. */
-	dev_priv->shared_dplls[i].id = i;
-	dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
-	dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable;
-	dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable;
-	dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state;
+	if (phy == DPIO_PHY1 &&
+	    !(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE)) {
+		DRM_DEBUG_DRIVER("DDI PHY 1 powered, but GRC isn't done\n");
 
-}
-
-static const char * const skl_ddi_pll_names[] = {
-	"DPLL 1",
-	"DPLL 2",
-	"DPLL 3",
-};
-
-struct skl_dpll_regs {
-	i915_reg_t ctl, cfgcr1, cfgcr2;
-};
-
-/* this array is indexed by the *shared* pll id */
-static const struct skl_dpll_regs skl_dpll_regs[3] = {
-	{
-		/* DPLL 1 */
-		.ctl = LCPLL2_CTL,
-		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
-		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
-	},
-	{
-		/* DPLL 2 */
-		.ctl = WRPLL_CTL(0),
-		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
-		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
-	},
-	{
-		/* DPLL 3 */
-		.ctl = WRPLL_CTL(1),
-		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
-		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
-	},
-};
-
-static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
-			       struct intel_shared_dpll *pll)
-{
-	uint32_t val;
-	unsigned int dpll;
-	const struct skl_dpll_regs *regs = skl_dpll_regs;
-
-	/* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
-	dpll = pll->id + 1;
-
-	val = I915_READ(DPLL_CTRL1);
-
-	val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
-		 DPLL_CTRL1_LINK_RATE_MASK(dpll));
-	val |= pll->config.hw_state.ctrl1 << (dpll * 6);
-
-	I915_WRITE(DPLL_CTRL1, val);
-	POSTING_READ(DPLL_CTRL1);
-
-	I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
-	I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
-	POSTING_READ(regs[pll->id].cfgcr1);
-	POSTING_READ(regs[pll->id].cfgcr2);
-
-	/* the enable bit is always bit 31 */
-	I915_WRITE(regs[pll->id].ctl,
-		   I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
-
-	if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(dpll), 5))
-		DRM_ERROR("DPLL %d not locked\n", dpll);
-}
-
-static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
-				struct intel_shared_dpll *pll)
-{
-	const struct skl_dpll_regs *regs = skl_dpll_regs;
-
-	/* the enable bit is always bit 31 */
-	I915_WRITE(regs[pll->id].ctl,
-		   I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
-	POSTING_READ(regs[pll->id].ctl);
-}
-
-static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
-				     struct intel_shared_dpll *pll,
-				     struct intel_dpll_hw_state *hw_state)
-{
-	uint32_t val;
-	unsigned int dpll;
-	const struct skl_dpll_regs *regs = skl_dpll_regs;
-	bool ret;
-
-	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
 		return false;
-
-	ret = false;
-
-	/* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
-	dpll = pll->id + 1;
-
-	val = I915_READ(regs[pll->id].ctl);
-	if (!(val & LCPLL_PLL_ENABLE))
-		goto out;
-
-	val = I915_READ(DPLL_CTRL1);
-	hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
-
-	/* avoid reading back stale values if HDMI mode is not enabled */
-	if (val & DPLL_CTRL1_HDMI_MODE(dpll)) {
-		hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
-		hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
 	}
-	ret = true;
 
-out:
-	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+	if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+		DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
+				 phy);
 
-	return ret;
+		return false;
+	}
+
+	return true;
 }
 
-static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
+static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
 {
-	int i;
+	u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
 
-	dev_priv->num_shared_dpll = 3;
-
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		dev_priv->shared_dplls[i].id = i;
-		dev_priv->shared_dplls[i].name = skl_ddi_pll_names[i];
-		dev_priv->shared_dplls[i].disable = skl_ddi_pll_disable;
-		dev_priv->shared_dplls[i].enable = skl_ddi_pll_enable;
-		dev_priv->shared_dplls[i].get_hw_state =
-			skl_ddi_pll_get_hw_state;
-	}
+	return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
 }
 
+static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+				      enum dpio_phy phy)
+{
+	if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10))
+		DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
+}
+
+static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
+				     enum dpio_phy phy);
+
 static void broxton_phy_init(struct drm_i915_private *dev_priv,
 			     enum dpio_phy phy)
 {
 	enum port port;
-	uint32_t val;
+	u32 ports, val;
+
+	if (broxton_phy_is_enabled(dev_priv, phy)) {
+		/* Still read out the GRC value for state verification */
+		if (phy == DPIO_PHY0)
+			dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy);
+
+		if (broxton_phy_verify_state(dev_priv, phy)) {
+			DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
+					 "won't reprogram it\n", phy);
+
+			return;
+		}
+
+		DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
+				 "force reprogramming it\n", phy);
+	} else {
+		DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy);
+	}
 
 	val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
 	val |= GT_DISPLAY_POWER_ON(phy);
 	I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
 
-	/* Considering 10ms timeout until BSpec is updated */
-	if (wait_for(I915_READ(BXT_PORT_CL1CM_DW0(phy)) & PHY_POWER_GOOD, 10))
+	/*
+	 * The PHY registers start out inaccessible and respond to reads with
+	 * all 1s.  Eventually they become accessible as they power up, then
+	 * the reserved bit will give the default 0.  Poll on the reserved bit
+	 * becoming 0 to find when the PHY is accessible.
+	 * HW team confirmed that the time to reach phypowergood status is
+	 * anywhere between 50 us and 100us.
+	 */
+	if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+		(PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
 		DRM_ERROR("timeout during PHY%d power on\n", phy);
+	}
 
-	for (port =  (phy == DPIO_PHY0 ? PORT_B : PORT_A);
-	     port <= (phy == DPIO_PHY0 ? PORT_C : PORT_A); port++) {
+	if (phy == DPIO_PHY0)
+		ports = BIT(PORT_B) | BIT(PORT_C);
+	else
+		ports = BIT(PORT_A);
+
+	for_each_port_masked(port, ports) {
 		int lane;
 
 		for (lane = 0; lane < 4; lane++) {
@@ -2730,6 +1884,9 @@
 	 * enabled.
 	 * TODO: port C is only connected on BXT-P, so on BXT0/1 we should
 	 * power down the second channel on PHY0 as well.
+	 *
+	 * FIXME: Clarify programming of the following, the register is
+	 * read-only with bit 6 fixed at 0 at least in stepping A.
 	 */
 	if (phy == DPIO_PHY1)
 		val |= OCL2_LDOFUSE_PWR_DIS;
@@ -2742,12 +1899,10 @@
 		 * the corresponding calibrated value from PHY1, and disable
 		 * the automatic calibration on PHY0.
 		 */
-		if (wait_for(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE,
-			     10))
-			DRM_ERROR("timeout waiting for PHY1 GRC\n");
+		broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
 
-		val = I915_READ(BXT_PORT_REF_DW6(DPIO_PHY1));
-		val = (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+		val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv,
+							      DPIO_PHY1);
 		grc_code = val << GRC_CODE_FAST_SHIFT |
 			   val << GRC_CODE_SLOW_SHIFT |
 			   val;
@@ -2757,17 +1912,27 @@
 		val |= GRC_DIS | GRC_RDY_OVRD;
 		I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
 	}
+	/*
+	 * During PHY1 init delay waiting for GRC calibration to finish, since
+	 * it can happen in parallel with the subsequent PHY0 init.
+	 */
 
 	val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
 	val |= COMMON_RESET_DIS;
 	I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
 }
 
-void broxton_ddi_phy_init(struct drm_device *dev)
+void broxton_ddi_phy_init(struct drm_i915_private *dev_priv)
 {
 	/* Enable PHY1 first since it provides Rcomp for PHY0 */
-	broxton_phy_init(dev->dev_private, DPIO_PHY1);
-	broxton_phy_init(dev->dev_private, DPIO_PHY0);
+	broxton_phy_init(dev_priv, DPIO_PHY1);
+	broxton_phy_init(dev_priv, DPIO_PHY0);
+
+	/*
+	 * If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the
+	 * PHY1 GRC calibration to finish, so wait for it here.
+	 */
+	broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
 }
 
 static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
@@ -2778,260 +1943,126 @@
 	val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
 	val &= ~COMMON_RESET_DIS;
 	I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+
+	val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+	val &= ~GT_DISPLAY_POWER_ON(phy);
+	I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
 }
 
-void broxton_ddi_phy_uninit(struct drm_device *dev)
+void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	broxton_phy_uninit(dev_priv, DPIO_PHY1);
 	broxton_phy_uninit(dev_priv, DPIO_PHY0);
-
-	/* FIXME: do this in broxton_phy_uninit per phy */
-	I915_WRITE(BXT_P_CR_GT_DISP_PWRON, 0);
 }
 
-static const char * const bxt_ddi_pll_names[] = {
-	"PORT PLL A",
-	"PORT PLL B",
-	"PORT PLL C",
-};
-
-static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
-				struct intel_shared_dpll *pll)
+static bool __printf(6, 7)
+__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+		       i915_reg_t reg, u32 mask, u32 expected,
+		       const char *reg_fmt, ...)
 {
-	uint32_t temp;
-	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
+	struct va_format vaf;
+	va_list args;
+	u32 val;
 
-	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
-	temp &= ~PORT_PLL_REF_SEL;
-	/* Non-SSC reference */
-	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+	val = I915_READ(reg);
+	if ((val & mask) == expected)
+		return true;
 
-	/* Disable 10 bit clock */
-	temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
-	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
-	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+	va_start(args, reg_fmt);
+	vaf.fmt = reg_fmt;
+	vaf.va = &args;
 
-	/* Write P1 & P2 */
-	temp = I915_READ(BXT_PORT_PLL_EBB_0(port));
-	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
-	temp |= pll->config.hw_state.ebb0;
-	I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp);
+	DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
+			 "current %08x, expected %08x (mask %08x)\n",
+			 phy, &vaf, reg.reg, val, (val & ~mask) | expected,
+			 mask);
 
-	/* Write M2 integer */
-	temp = I915_READ(BXT_PORT_PLL(port, 0));
-	temp &= ~PORT_PLL_M2_MASK;
-	temp |= pll->config.hw_state.pll0;
-	I915_WRITE(BXT_PORT_PLL(port, 0), temp);
+	va_end(args);
 
-	/* Write N */
-	temp = I915_READ(BXT_PORT_PLL(port, 1));
-	temp &= ~PORT_PLL_N_MASK;
-	temp |= pll->config.hw_state.pll1;
-	I915_WRITE(BXT_PORT_PLL(port, 1), temp);
-
-	/* Write M2 fraction */
-	temp = I915_READ(BXT_PORT_PLL(port, 2));
-	temp &= ~PORT_PLL_M2_FRAC_MASK;
-	temp |= pll->config.hw_state.pll2;
-	I915_WRITE(BXT_PORT_PLL(port, 2), temp);
-
-	/* Write M2 fraction enable */
-	temp = I915_READ(BXT_PORT_PLL(port, 3));
-	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
-	temp |= pll->config.hw_state.pll3;
-	I915_WRITE(BXT_PORT_PLL(port, 3), temp);
-
-	/* Write coeff */
-	temp = I915_READ(BXT_PORT_PLL(port, 6));
-	temp &= ~PORT_PLL_PROP_COEFF_MASK;
-	temp &= ~PORT_PLL_INT_COEFF_MASK;
-	temp &= ~PORT_PLL_GAIN_CTL_MASK;
-	temp |= pll->config.hw_state.pll6;
-	I915_WRITE(BXT_PORT_PLL(port, 6), temp);
-
-	/* Write calibration val */
-	temp = I915_READ(BXT_PORT_PLL(port, 8));
-	temp &= ~PORT_PLL_TARGET_CNT_MASK;
-	temp |= pll->config.hw_state.pll8;
-	I915_WRITE(BXT_PORT_PLL(port, 8), temp);
-
-	temp = I915_READ(BXT_PORT_PLL(port, 9));
-	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
-	temp |= pll->config.hw_state.pll9;
-	I915_WRITE(BXT_PORT_PLL(port, 9), temp);
-
-	temp = I915_READ(BXT_PORT_PLL(port, 10));
-	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
-	temp &= ~PORT_PLL_DCO_AMP_MASK;
-	temp |= pll->config.hw_state.pll10;
-	I915_WRITE(BXT_PORT_PLL(port, 10), temp);
-
-	/* Recalibrate with new settings */
-	temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
-	temp |= PORT_PLL_RECALIBRATE;
-	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
-	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
-	temp |= pll->config.hw_state.ebb4;
-	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
-
-	/* Enable PLL */
-	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
-	temp |= PORT_PLL_ENABLE;
-	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
-	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
-
-	if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
-			PORT_PLL_LOCK), 200))
-		DRM_ERROR("PLL %d not locked\n", port);
-
-	/*
-	 * While we write to the group register to program all lanes at once we
-	 * can read only lane registers and we pick lanes 0/1 for that.
-	 */
-	temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
-	temp &= ~LANE_STAGGER_MASK;
-	temp &= ~LANESTAGGER_STRAP_OVRD;
-	temp |= pll->config.hw_state.pcsdw12;
-	I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp);
+	return false;
 }
 
-static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
-					struct intel_shared_dpll *pll)
+static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
+				     enum dpio_phy phy)
 {
-	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
-	uint32_t temp;
+	enum port port;
+	u32 ports;
+	uint32_t mask;
+	bool ok;
 
-	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
-	temp &= ~PORT_PLL_ENABLE;
-	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
-	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
-}
+#define _CHK(reg, mask, exp, fmt, ...)					\
+	__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt,	\
+			       ## __VA_ARGS__)
 
-static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
-					struct intel_shared_dpll *pll,
-					struct intel_dpll_hw_state *hw_state)
-{
-	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
-	uint32_t val;
-	bool ret;
-
-	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+	/* We expect the PHY to be always enabled */
+	if (!broxton_phy_is_enabled(dev_priv, phy))
 		return false;
 
-	ret = false;
+	ok = true;
 
-	val = I915_READ(BXT_PORT_PLL_ENABLE(port));
-	if (!(val & PORT_PLL_ENABLE))
-		goto out;
+	if (phy == DPIO_PHY0)
+		ports = BIT(PORT_B) | BIT(PORT_C);
+	else
+		ports = BIT(PORT_A);
 
-	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
-	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
+	for_each_port_masked(port, ports) {
+		int lane;
 
-	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
-	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
+		for (lane = 0; lane < 4; lane++)
+			ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane),
+				    LATENCY_OPTIM,
+				    lane != 1 ? LATENCY_OPTIM : 0,
+				    "BXT_PORT_TX_DW14_LN(%d, %d)", port, lane);
+	}
 
-	hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
-	hw_state->pll0 &= PORT_PLL_M2_MASK;
+	/* PLL Rcomp code offset */
+	ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
+		    IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
+		    "BXT_PORT_CL1CM_DW9(%d)", phy);
+	ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
+		    IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
+		    "BXT_PORT_CL1CM_DW10(%d)", phy);
 
-	hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
-	hw_state->pll1 &= PORT_PLL_N_MASK;
+	/* Power gating */
+	mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
+	ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
+		    "BXT_PORT_CL1CM_DW28(%d)", phy);
 
-	hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
-	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
-
-	hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
-	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
-
-	hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
-	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
-			  PORT_PLL_INT_COEFF_MASK |
-			  PORT_PLL_GAIN_CTL_MASK;
-
-	hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
-	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
-
-	hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
-	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
-
-	hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
-	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
-			   PORT_PLL_DCO_AMP_MASK;
+	if (phy == DPIO_PHY0)
+		ok &= _CHK(BXT_PORT_CL2CM_DW6_BC,
+			   DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
+			   "BXT_PORT_CL2CM_DW6_BC");
 
 	/*
-	 * While we write to the group register to program all lanes at once we
-	 * can read only lane registers. We configure all lanes the same way, so
-	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
+	 * TODO: Verify BXT_PORT_CL1CM_DW30 bit OCL2_LDOFUSE_PWR_DIS,
+	 * at least on stepping A this bit is read-only and fixed at 0.
 	 */
-	hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
-	if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
-		DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
-				 hw_state->pcsdw12,
-				 I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
-	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
 
-	ret = true;
+	if (phy == DPIO_PHY0) {
+		u32 grc_code = dev_priv->bxt_phy_grc;
 
-out:
-	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+		grc_code = grc_code << GRC_CODE_FAST_SHIFT |
+			   grc_code << GRC_CODE_SLOW_SHIFT |
+			   grc_code;
+		mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
+		       GRC_CODE_NOM_MASK;
+		ok &= _CHK(BXT_PORT_REF_DW6(DPIO_PHY0), mask, grc_code,
+			    "BXT_PORT_REF_DW6(%d)", DPIO_PHY0);
 
-	return ret;
+		mask = GRC_DIS | GRC_RDY_OVRD;
+		ok &= _CHK(BXT_PORT_REF_DW8(DPIO_PHY0), mask, mask,
+			    "BXT_PORT_REF_DW8(%d)", DPIO_PHY0);
+	}
+
+	return ok;
+#undef _CHK
 }
 
-static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
+void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv)
 {
-	int i;
-
-	dev_priv->num_shared_dpll = 3;
-
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		dev_priv->shared_dplls[i].id = i;
-		dev_priv->shared_dplls[i].name = bxt_ddi_pll_names[i];
-		dev_priv->shared_dplls[i].disable = bxt_ddi_pll_disable;
-		dev_priv->shared_dplls[i].enable = bxt_ddi_pll_enable;
-		dev_priv->shared_dplls[i].get_hw_state =
-			bxt_ddi_pll_get_hw_state;
-	}
-}
-
-void intel_ddi_pll_init(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t val = I915_READ(LCPLL_CTL);
-
-	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
-		skl_shared_dplls_init(dev_priv);
-	else if (IS_BROXTON(dev))
-		bxt_shared_dplls_init(dev_priv);
-	else
-		hsw_shared_dplls_init(dev_priv);
-
-	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
-		int cdclk_freq;
-
-		cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
-		dev_priv->skl_boot_cdclk = cdclk_freq;
-		if (skl_sanitize_cdclk(dev_priv))
-			DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
-		if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
-			DRM_ERROR("LCPLL1 is disabled\n");
-	} else if (IS_BROXTON(dev)) {
-		broxton_init_cdclk(dev);
-		broxton_ddi_phy_init(dev);
-	} else {
-		/*
-		 * The LCPLL register should be turned on by the BIOS. For now
-		 * let's just check its state and print errors in case
-		 * something is wrong.  Don't even try to turn it on.
-		 */
-
-		if (val & LCPLL_CD_SOURCE_FCLK)
-			DRM_ERROR("CDCLK source is not LCPLL\n");
-
-		if (val & LCPLL_PLL_DISABLE)
-			DRM_ERROR("LCPLL is disabled\n");
-	}
+	if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) ||
+	    !broxton_phy_verify_state(dev_priv, DPIO_PHY1))
+		i915_report_error(dev_priv, "DDI PHY state mismatch\n");
 }
 
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3086,12 +2117,18 @@
 	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
 	uint32_t val;
 
-	intel_ddi_post_disable(intel_encoder);
-
+	/*
+	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
+	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
+	 * step 13 is the correct place for it. Step 18 is where it was
+	 * originally before the BUN.
+	 */
 	val = I915_READ(FDI_RX_CTL(PIPE_A));
 	val &= ~FDI_RX_ENABLE;
 	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
 
+	intel_ddi_post_disable(intel_encoder);
+
 	val = I915_READ(FDI_RX_MISC(PIPE_A));
 	val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
 	val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
@@ -3115,6 +2152,10 @@
 	struct intel_hdmi *intel_hdmi;
 	u32 temp, flags = 0;
 
+	/* XXX: DSI transcoder paranoia */
+	if (WARN_ON(transcoder_is_dsi(cpu_transcoder)))
+		return;
+
 	temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
 	if (temp & TRANS_DDI_PHSYNC)
 		flags |= DRM_MODE_FLAG_PHSYNC;
@@ -3151,8 +2192,10 @@
 
 		if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
 			pipe_config->has_infoframe = true;
-		break;
+		/* fall through */
 	case TRANS_DDI_MODE_SELECT_DVI:
+		pipe_config->lane_count = 4;
+		break;
 	case TRANS_DDI_MODE_SELECT_FDI:
 		break;
 	case TRANS_DDI_MODE_SELECT_DP_SST:
@@ -3172,8 +2215,8 @@
 			pipe_config->has_audio = true;
 	}
 
-	if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
-	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+	if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp &&
+	    pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
 		/*
 		 * This is a big fat ugly hack.
 		 *
@@ -3188,8 +2231,8 @@
 		 * load.
 		 */
 		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
-			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
-		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+			      pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+		dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
 	}
 
 	intel_ddi_clock_get(encoder, pipe_config);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0104a06..2113f40 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -36,6 +36,7 @@
 #include "intel_drv.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include "intel_dsi.h"
 #include "i915_trace.h"
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
@@ -96,12 +97,13 @@
 				  struct drm_i915_gem_object *obj);
 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
+static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
 					 struct intel_link_m_n *m_n,
 					 struct intel_link_m_n *m2_n2);
 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
 static void haswell_set_pipeconf(struct drm_crtc *crtc);
-static void intel_set_pipe_csc(struct drm_crtc *crtc);
+static void haswell_set_pipemisc(struct drm_crtc *crtc);
 static void vlv_prepare_pll(struct intel_crtc *crtc,
 			    const struct intel_crtc_state *pipe_config);
 static void chv_prepare_pll(struct intel_crtc *crtc,
@@ -110,13 +112,11 @@
 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
 	struct intel_crtc_state *crtc_state);
-static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
-			   int num_connectors);
 static void skylake_pfit_enable(struct intel_crtc *crtc);
 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
 static void ironlake_pfit_enable(struct intel_crtc *crtc);
 static void intel_modeset_setup_hw_state(struct drm_device *dev);
-static void intel_pre_disable_primary(struct drm_crtc *crtc);
+static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
 
 typedef struct {
 	int	min, max;
@@ -147,15 +147,12 @@
 	return vco_freq[hpll_freq] * 1000;
 }
 
-static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
-				  const char *name, u32 reg)
+int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+		      const char *name, u32 reg, int ref_freq)
 {
 	u32 val;
 	int divider;
 
-	if (dev_priv->hpll_freq == 0)
-		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
-
 	mutex_lock(&dev_priv->sb_lock);
 	val = vlv_cck_read(dev_priv, reg);
 	mutex_unlock(&dev_priv->sb_lock);
@@ -166,52 +163,75 @@
 	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
 	     "%s change in progress\n", name);
 
-	return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
+	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
 }
 
-int
-intel_pch_rawclk(struct drm_device *dev)
+static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+				  const char *name, u32 reg)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	if (dev_priv->hpll_freq == 0)
+		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
 
-	WARN_ON(!HAS_PCH_SPLIT(dev));
-
-	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
+	return vlv_get_cck_clock(dev_priv, name, reg,
+				 dev_priv->hpll_freq);
 }
 
-/* hrawclock is 1/4 the FSB frequency */
-int intel_hrawclk(struct drm_device *dev)
+static int
+intel_pch_rawclk(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
+}
+
+static int
+intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
+{
+	return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
+				      CCK_DISPLAY_REF_CLOCK_CONTROL);
+}
+
+static int
+intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
+{
 	uint32_t clkcfg;
 
-	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-		return 200;
-
+	/* hrawclock is 1/4 the FSB frequency */
 	clkcfg = I915_READ(CLKCFG);
 	switch (clkcfg & CLKCFG_FSB_MASK) {
 	case CLKCFG_FSB_400:
-		return 100;
+		return 100000;
 	case CLKCFG_FSB_533:
-		return 133;
+		return 133333;
 	case CLKCFG_FSB_667:
-		return 166;
+		return 166667;
 	case CLKCFG_FSB_800:
-		return 200;
+		return 200000;
 	case CLKCFG_FSB_1067:
-		return 266;
+		return 266667;
 	case CLKCFG_FSB_1333:
-		return 333;
+		return 333333;
 	/* these two are just a guess; one of them might be right */
 	case CLKCFG_FSB_1600:
 	case CLKCFG_FSB_1600_ALT:
-		return 400;
+		return 400000;
 	default:
-		return 133;
+		return 133333;
 	}
 }
 
+static void intel_update_rawclk(struct drm_i915_private *dev_priv)
+{
+	if (HAS_PCH_SPLIT(dev_priv))
+		dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
+	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
+	else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
+		dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
+	else
+		return; /* no rawclk on other platforms, or no need to know it */
+
+	DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
+}
+
 static void intel_update_czclk(struct drm_i915_private *dev_priv)
 {
 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
@@ -224,13 +244,15 @@
 }
 
 static inline u32 /* units of 100MHz */
-intel_fdi_link_freq(struct drm_device *dev)
+intel_fdi_link_freq(struct drm_i915_private *dev_priv,
+		    const struct intel_crtc_state *pipe_config)
 {
-	if (IS_GEN5(dev)) {
-		struct drm_i915_private *dev_priv = dev->dev_private;
-		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
-	} else
-		return 27;
+	if (HAS_DDI(dev_priv))
+		return pipe_config->port_clock; /* SPLL */
+	else if (IS_GEN5(dev_priv))
+		return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
+	else
+		return 270000;
 }
 
 static const intel_limit_t intel_limits_i8xx_dac = {
@@ -550,89 +572,6 @@
 	return false;
 }
 
-static const intel_limit_t *
-intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
-{
-	struct drm_device *dev = crtc_state->base.crtc->dev;
-	const intel_limit_t *limit;
-
-	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if (intel_is_dual_link_lvds(dev)) {
-			if (refclk == 100000)
-				limit = &intel_limits_ironlake_dual_lvds_100m;
-			else
-				limit = &intel_limits_ironlake_dual_lvds;
-		} else {
-			if (refclk == 100000)
-				limit = &intel_limits_ironlake_single_lvds_100m;
-			else
-				limit = &intel_limits_ironlake_single_lvds;
-		}
-	} else
-		limit = &intel_limits_ironlake_dac;
-
-	return limit;
-}
-
-static const intel_limit_t *
-intel_g4x_limit(struct intel_crtc_state *crtc_state)
-{
-	struct drm_device *dev = crtc_state->base.crtc->dev;
-	const intel_limit_t *limit;
-
-	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if (intel_is_dual_link_lvds(dev))
-			limit = &intel_limits_g4x_dual_channel_lvds;
-		else
-			limit = &intel_limits_g4x_single_channel_lvds;
-	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
-		   intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
-		limit = &intel_limits_g4x_hdmi;
-	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
-		limit = &intel_limits_g4x_sdvo;
-	} else /* The option is for other outputs */
-		limit = &intel_limits_i9xx_sdvo;
-
-	return limit;
-}
-
-static const intel_limit_t *
-intel_limit(struct intel_crtc_state *crtc_state, int refclk)
-{
-	struct drm_device *dev = crtc_state->base.crtc->dev;
-	const intel_limit_t *limit;
-
-	if (IS_BROXTON(dev))
-		limit = &intel_limits_bxt;
-	else if (HAS_PCH_SPLIT(dev))
-		limit = intel_ironlake_limit(crtc_state, refclk);
-	else if (IS_G4X(dev)) {
-		limit = intel_g4x_limit(crtc_state);
-	} else if (IS_PINEVIEW(dev)) {
-		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
-			limit = &intel_limits_pineview_lvds;
-		else
-			limit = &intel_limits_pineview_sdvo;
-	} else if (IS_CHERRYVIEW(dev)) {
-		limit = &intel_limits_chv;
-	} else if (IS_VALLEYVIEW(dev)) {
-		limit = &intel_limits_vlv;
-	} else if (!IS_GEN2(dev)) {
-		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
-			limit = &intel_limits_i9xx_lvds;
-		else
-			limit = &intel_limits_i9xx_sdvo;
-	} else {
-		if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
-			limit = &intel_limits_i8xx_lvds;
-		else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
-			limit = &intel_limits_i8xx_dvo;
-		else
-			limit = &intel_limits_i8xx_dac;
-	}
-	return limit;
-}
-
 /*
  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
@@ -763,6 +702,16 @@
 	}
 }
 
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
 static bool
 i9xx_find_best_dpll(const intel_limit_t *limit,
 		    struct intel_crtc_state *crtc_state,
@@ -810,6 +759,16 @@
 	return (err != target);
 }
 
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
 static bool
 pnv_find_best_dpll(const intel_limit_t *limit,
 		   struct intel_crtc_state *crtc_state,
@@ -855,6 +814,16 @@
 	return (err != target);
 }
 
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
 static bool
 g4x_find_best_dpll(const intel_limit_t *limit,
 		   struct intel_crtc_state *crtc_state,
@@ -943,6 +912,11 @@
 	return *error_ppm + 10 < best_error_ppm;
 }
 
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
 static bool
 vlv_find_best_dpll(const intel_limit_t *limit,
 		   struct intel_crtc_state *crtc_state,
@@ -997,6 +971,11 @@
 	return found;
 }
 
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
 static bool
 chv_find_best_dpll(const intel_limit_t *limit,
 		   struct intel_crtc_state *crtc_state,
@@ -1058,9 +1037,10 @@
 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
 			intel_clock_t *best_clock)
 {
-	int refclk = i9xx_get_refclk(crtc_state, 0);
+	int refclk = 100000;
+	const intel_limit_t *limit = &intel_limits_bxt;
 
-	return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
+	return chv_find_best_dpll(limit, crtc_state,
 				  target_clock, refclk, NULL, best_clock);
 }
 
@@ -1165,7 +1145,7 @@
 }
 
 /* XXX: the dsi pll is shared between MIPI DSI ports */
-static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
+void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
 {
 	u32 val;
 	bool cur_state;
@@ -1179,36 +1159,6 @@
 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
 			onoff(state), onoff(cur_state));
 }
-#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
-#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
-
-struct intel_shared_dpll *
-intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-
-	if (crtc->config->shared_dpll < 0)
-		return NULL;
-
-	return &dev_priv->shared_dplls[crtc->config->shared_dpll];
-}
-
-/* For ILK+ */
-void assert_shared_dpll(struct drm_i915_private *dev_priv,
-			struct intel_shared_dpll *pll,
-			bool state)
-{
-	bool cur_state;
-	struct intel_dpll_hw_state hw_state;
-
-	if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
-		return;
-
-	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
-	I915_STATE_WARN(cur_state != state,
-	     "%s assertion failure (expected %s, current %s)\n",
-			pll->name, onoff(state), onoff(cur_state));
-}
 
 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
 			  enum pipe pipe, bool state)
@@ -1217,7 +1167,7 @@
 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
 								      pipe);
 
-	if (HAS_DDI(dev_priv->dev)) {
+	if (HAS_DDI(dev_priv)) {
 		/* DDI does not have a specific FDI_TX register */
 		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
@@ -1253,11 +1203,11 @@
 	u32 val;
 
 	/* ILK FDI PLL is always enabled */
-	if (INTEL_INFO(dev_priv->dev)->gen == 5)
+	if (INTEL_INFO(dev_priv)->gen == 5)
 		return;
 
 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
-	if (HAS_DDI(dev_priv->dev))
+	if (HAS_DDI(dev_priv))
 		return;
 
 	val = I915_READ(FDI_TX_CTL(pipe));
@@ -1446,21 +1396,8 @@
 		drm_crtc_vblank_put(crtc);
 }
 
-static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
-{
-	u32 val;
-	bool enabled;
-
-	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
-
-	val = I915_READ(PCH_DREF_CONTROL);
-	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
-			    DREF_SUPERSPREAD_SOURCE_MASK));
-	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
-}
-
-static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
-					   enum pipe pipe)
+void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+				    enum pipe pipe)
 {
 	u32 val;
 	bool enabled;
@@ -1478,11 +1415,11 @@
 	if ((val & DP_PORT_EN) == 0)
 		return false;
 
-	if (HAS_PCH_CPT(dev_priv->dev)) {
+	if (HAS_PCH_CPT(dev_priv)) {
 		u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
 		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
 			return false;
-	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
+	} else if (IS_CHERRYVIEW(dev_priv)) {
 		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
 			return false;
 	} else {
@@ -1498,10 +1435,10 @@
 	if ((val & SDVO_ENABLE) == 0)
 		return false;
 
-	if (HAS_PCH_CPT(dev_priv->dev)) {
+	if (HAS_PCH_CPT(dev_priv)) {
 		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
 			return false;
-	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
+	} else if (IS_CHERRYVIEW(dev_priv)) {
 		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
 			return false;
 	} else {
@@ -1517,7 +1454,7 @@
 	if ((val & LVDS_PORT_EN) == 0)
 		return false;
 
-	if (HAS_PCH_CPT(dev_priv->dev)) {
+	if (HAS_PCH_CPT(dev_priv)) {
 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
 			return false;
 	} else {
@@ -1532,7 +1469,7 @@
 {
 	if ((val & ADPA_DAC_ENABLE) == 0)
 		return false;
-	if (HAS_PCH_CPT(dev_priv->dev)) {
+	if (HAS_PCH_CPT(dev_priv)) {
 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
 			return false;
 	} else {
@@ -1551,7 +1488,7 @@
 	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
 	     i915_mmio_reg_offset(reg), pipe_name(pipe));
 
-	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
 	     && (val & DP_PIPEB_SELECT),
 	     "IBX PCH dp port still using transcoder B\n");
 }
@@ -1564,7 +1501,7 @@
 	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
 	     i915_mmio_reg_offset(reg), pipe_name(pipe));
 
-	I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
+	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
 	     && (val & SDVO_PIPE_B_SELECT),
 	     "IBX PCH hdmi port still using transcoder B\n");
 }
@@ -1593,53 +1530,47 @@
 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
 }
 
+static void _vlv_enable_pll(struct intel_crtc *crtc,
+			    const struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
+
+	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+	POSTING_READ(DPLL(pipe));
+	udelay(150);
+
+	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+		DRM_ERROR("DPLL %d failed to lock\n", pipe);
+}
+
 static void vlv_enable_pll(struct intel_crtc *crtc,
 			   const struct intel_crtc_state *pipe_config)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	i915_reg_t reg = DPLL(crtc->pipe);
-	u32 dpll = pipe_config->dpll_hw_state.dpll;
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
 
-	assert_pipe_disabled(dev_priv, crtc->pipe);
+	assert_pipe_disabled(dev_priv, pipe);
 
 	/* PLL is protected by panel, make sure we can write it */
-	if (IS_MOBILE(dev_priv->dev))
-		assert_panel_unlocked(dev_priv, crtc->pipe);
+	assert_panel_unlocked(dev_priv, pipe);
 
-	I915_WRITE(reg, dpll);
-	POSTING_READ(reg);
-	udelay(150);
+	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
+		_vlv_enable_pll(crtc, pipe_config);
 
-	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
-		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
-
-	I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
-	POSTING_READ(DPLL_MD(crtc->pipe));
-
-	/* We do this three times for luck */
-	I915_WRITE(reg, dpll);
-	POSTING_READ(reg);
-	udelay(150); /* wait for warmup */
-	I915_WRITE(reg, dpll);
-	POSTING_READ(reg);
-	udelay(150); /* wait for warmup */
-	I915_WRITE(reg, dpll);
-	POSTING_READ(reg);
-	udelay(150); /* wait for warmup */
+	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
+	POSTING_READ(DPLL_MD(pipe));
 }
 
-static void chv_enable_pll(struct intel_crtc *crtc,
-			   const struct intel_crtc_state *pipe_config)
+
+static void _chv_enable_pll(struct intel_crtc *crtc,
+			    const struct intel_crtc_state *pipe_config)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int pipe = crtc->pipe;
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
 	u32 tmp;
 
-	assert_pipe_disabled(dev_priv, crtc->pipe);
-
 	mutex_lock(&dev_priv->sb_lock);
 
 	/* Enable back the 10bit clock to display controller */
@@ -1660,10 +1591,43 @@
 	/* Check PLL is locked */
 	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
 		DRM_ERROR("PLL %d failed to lock\n", pipe);
+}
 
-	/* not sure when this should be written */
-	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
-	POSTING_READ(DPLL_MD(pipe));
+static void chv_enable_pll(struct intel_crtc *crtc,
+			   const struct intel_crtc_state *pipe_config)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
+
+	assert_pipe_disabled(dev_priv, pipe);
+
+	/* PLL is protected by panel, make sure we can write it */
+	assert_panel_unlocked(dev_priv, pipe);
+
+	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
+		_chv_enable_pll(crtc, pipe_config);
+
+	if (pipe != PIPE_A) {
+		/*
+		 * WaPixelRepeatModeFixForC0:chv
+		 *
+		 * DPLLCMD is AWOL. Use chicken bits to propagate
+		 * the value from DPLLBMD to either pipe B or C.
+		 */
+		I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
+		I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
+		I915_WRITE(CBR4_VLV, 0);
+		dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
+
+		/*
+		 * DPLLB VGA mode also seems to cause problems.
+		 * We should always have it disabled.
+		 */
+		WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
+	} else {
+		I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
+		POSTING_READ(DPLL_MD(pipe));
+	}
 }
 
 static int intel_num_dvo_pipes(struct drm_device *dev)
@@ -1687,9 +1651,6 @@
 
 	assert_pipe_disabled(dev_priv, crtc->pipe);
 
-	/* No really, not for ILK+ */
-	BUG_ON(INTEL_INFO(dev)->gen >= 5);
-
 	/* PLL is protected by panel, make sure we can write it */
 	if (IS_MOBILE(dev) && !IS_I830(dev))
 		assert_panel_unlocked(dev_priv, crtc->pipe);
@@ -1788,16 +1749,13 @@
 	/* Make sure the pipe isn't still relying on us */
 	assert_pipe_disabled(dev_priv, pipe);
 
-	/*
-	 * Leave integrated clock source and reference clock enabled for pipe B.
-	 * The latter is needed for VGA hotplug / manual detection.
-	 */
-	val = DPLL_VGA_MODE_DIS;
-	if (pipe == PIPE_B)
-		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
+	val = DPLL_INTEGRATED_REF_CLK_VLV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+	if (pipe != PIPE_A)
+		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
 	I915_WRITE(DPLL(pipe), val);
 	POSTING_READ(DPLL(pipe));
-
 }
 
 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -1808,11 +1766,11 @@
 	/* Make sure the pipe isn't still relying on us */
 	assert_pipe_disabled(dev_priv, pipe);
 
-	/* Set PLL en = 0 */
 	val = DPLL_SSC_REF_CLK_CHV |
 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
 	if (pipe != PIPE_A)
 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
 	I915_WRITE(DPLL(pipe), val);
 	POSTING_READ(DPLL(pipe));
 
@@ -1856,100 +1814,6 @@
 		     port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
 }
 
-static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
-
-	if (WARN_ON(pll == NULL))
-		return;
-
-	WARN_ON(!pll->config.crtc_mask);
-	if (pll->active == 0) {
-		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
-		WARN_ON(pll->on);
-		assert_shared_dpll_disabled(dev_priv, pll);
-
-		pll->mode_set(dev_priv, pll);
-	}
-}
-
-/**
- * intel_enable_shared_dpll - enable PCH PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to enable
- *
- * The PCH PLL needs to be enabled before the PCH transcoder, since it
- * drives the transcoder clock.
- */
-static void intel_enable_shared_dpll(struct intel_crtc *crtc)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
-
-	if (WARN_ON(pll == NULL))
-		return;
-
-	if (WARN_ON(pll->config.crtc_mask == 0))
-		return;
-
-	DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
-		      pll->name, pll->active, pll->on,
-		      crtc->base.base.id);
-
-	if (pll->active++) {
-		WARN_ON(!pll->on);
-		assert_shared_dpll_enabled(dev_priv, pll);
-		return;
-	}
-	WARN_ON(pll->on);
-
-	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
-
-	DRM_DEBUG_KMS("enabling %s\n", pll->name);
-	pll->enable(dev_priv, pll);
-	pll->on = true;
-}
-
-static void intel_disable_shared_dpll(struct intel_crtc *crtc)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
-
-	/* PCH only available on ILK+ */
-	if (INTEL_INFO(dev)->gen < 5)
-		return;
-
-	if (pll == NULL)
-		return;
-
-	if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
-		return;
-
-	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
-		      pll->name, pll->active, pll->on,
-		      crtc->base.base.id);
-
-	if (WARN_ON(pll->active == 0)) {
-		assert_shared_dpll_disabled(dev_priv, pll);
-		return;
-	}
-
-	assert_shared_dpll_enabled(dev_priv, pll);
-	WARN_ON(!pll->on);
-	if (--pll->active)
-		return;
-
-	DRM_DEBUG_KMS("disabling %s\n", pll->name);
-	pll->disable(dev_priv, pll);
-	pll->on = false;
-
-	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
-}
-
 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 					   enum pipe pipe)
 {
@@ -1959,12 +1823,8 @@
 	i915_reg_t reg;
 	uint32_t val, pipeconf_val;
 
-	/* PCH only available on ILK+ */
-	BUG_ON(!HAS_PCH_SPLIT(dev));
-
 	/* Make sure PCH DPLL is enabled */
-	assert_shared_dpll_enabled(dev_priv,
-				   intel_crtc_to_shared_dpll(intel_crtc));
+	assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
 
 	/* FDI must be feeding us bits for PCH ports */
 	assert_fdi_tx_enabled(dev_priv, pipe);
@@ -1983,7 +1843,7 @@
 	val = I915_READ(reg);
 	pipeconf_val = I915_READ(PIPECONF(pipe));
 
-	if (HAS_PCH_IBX(dev_priv->dev)) {
+	if (HAS_PCH_IBX(dev_priv)) {
 		/*
 		 * Make the BPC in transcoder be consistent with
 		 * that in pipeconf reg. For HDMI we must use 8bpc
@@ -1998,7 +1858,7 @@
 
 	val &= ~TRANS_INTERLACE_MASK;
 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
-		if (HAS_PCH_IBX(dev_priv->dev) &&
+		if (HAS_PCH_IBX(dev_priv) &&
 		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
 			val |= TRANS_LEGACY_INTERLACED_ILK;
 		else
@@ -2016,9 +1876,6 @@
 {
 	u32 val, pipeconf_val;
 
-	/* PCH only available on ILK+ */
-	BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
-
 	/* FDI must be feeding us bits for PCH ports */
 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
 	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
@@ -2113,7 +1970,7 @@
 	assert_cursor_disabled(dev_priv, pipe);
 	assert_sprites_disabled(dev_priv, pipe);
 
-	if (HAS_PCH_LPT(dev_priv->dev))
+	if (HAS_PCH_LPT(dev_priv))
 		pch_transcoder = TRANSCODER_A;
 	else
 		pch_transcoder = pipe;
@@ -2123,7 +1980,7 @@
 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
 	 * need the check.
 	 */
-	if (HAS_GMCH_DISPLAY(dev_priv->dev))
+	if (HAS_GMCH_DISPLAY(dev_priv))
 		if (crtc->config->has_dsi_encoder)
 			assert_dsi_pll_enabled(dev_priv);
 		else
@@ -2225,8 +2082,8 @@
 	return IS_GEN2(dev_priv) ? 2048 : 4096;
 }
 
-static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv,
-				     uint64_t fb_modifier, unsigned int cpp)
+static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
+					   uint64_t fb_modifier, unsigned int cpp)
 {
 	switch (fb_modifier) {
 	case DRM_FORMAT_MOD_NONE:
@@ -2269,7 +2126,21 @@
 		return 1;
 	else
 		return intel_tile_size(dev_priv) /
-			intel_tile_width(dev_priv, fb_modifier, cpp);
+			intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
+}
+
+/* Return the tile dimensions in pixel units */
+static void intel_tile_dims(const struct drm_i915_private *dev_priv,
+			    unsigned int *tile_width,
+			    unsigned int *tile_height,
+			    uint64_t fb_modifier,
+			    unsigned int cpp)
+{
+	unsigned int tile_width_bytes =
+		intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
+
+	*tile_width = tile_width_bytes / cpp;
+	*tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
 }
 
 unsigned int
@@ -2282,48 +2153,54 @@
 	return ALIGN(height, tile_height);
 }
 
-static void
-intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
-			const struct drm_plane_state *plane_state)
+unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
 {
-	struct drm_i915_private *dev_priv = to_i915(fb->dev);
-	struct intel_rotation_info *info = &view->params.rotated;
+	unsigned int size = 0;
+	int i;
+
+	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
+		size += rot_info->plane[i].width * rot_info->plane[i].height;
+
+	return size;
+}
+
+static void
+intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
+			const struct drm_framebuffer *fb,
+			unsigned int rotation)
+{
+	if (intel_rotation_90_or_270(rotation)) {
+		*view = i915_ggtt_view_rotated;
+		view->params.rotated = to_intel_framebuffer(fb)->rot_info;
+	} else {
+		*view = i915_ggtt_view_normal;
+	}
+}
+
+static void
+intel_fill_fb_info(struct drm_i915_private *dev_priv,
+		   struct drm_framebuffer *fb)
+{
+	struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
 	unsigned int tile_size, tile_width, tile_height, cpp;
 
-	*view = i915_ggtt_view_normal;
-
-	if (!plane_state)
-		return;
-
-	if (!intel_rotation_90_or_270(plane_state->rotation))
-		return;
-
-	*view = i915_ggtt_view_rotated;
-
-	info->height = fb->height;
-	info->pixel_format = fb->pixel_format;
-	info->pitch = fb->pitches[0];
-	info->uv_offset = fb->offsets[1];
-	info->fb_modifier = fb->modifier[0];
-
 	tile_size = intel_tile_size(dev_priv);
 
 	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
-	tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp);
-	tile_height = tile_size / tile_width;
+	intel_tile_dims(dev_priv, &tile_width, &tile_height,
+			fb->modifier[0], cpp);
 
-	info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width);
-	info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
-	info->size = info->width_pages * info->height_pages * tile_size;
+	info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
+	info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
 
 	if (info->pixel_format == DRM_FORMAT_NV12) {
 		cpp = drm_format_plane_cpp(fb->pixel_format, 1);
-		tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp);
-		tile_height = tile_size / tile_width;
+		intel_tile_dims(dev_priv, &tile_width, &tile_height,
+				fb->modifier[1], cpp);
 
-		info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width);
-		info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height);
-		info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size;
+		info->uv_offset = fb->offsets[1];
+		info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
+		info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
 	}
 }
 
@@ -2360,9 +2237,8 @@
 }
 
 int
-intel_pin_and_fence_fb_obj(struct drm_plane *plane,
-			   struct drm_framebuffer *fb,
-			   const struct drm_plane_state *plane_state)
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+			   unsigned int rotation)
 {
 	struct drm_device *dev = fb->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2375,7 +2251,7 @@
 
 	alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
 
-	intel_fill_fb_ggtt_view(&view, fb, plane_state);
+	intel_fill_fb_ggtt_view(&view, fb, rotation);
 
 	/* Note that the w/a also requires 64 PTE of padding following the
 	 * bo. We currently fill all unused PTE with the shadow page and so
@@ -2433,15 +2309,14 @@
 	return ret;
 }
 
-static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
-			       const struct drm_plane_state *plane_state)
+static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
 {
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct i915_ggtt_view view;
 
 	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
 
-	intel_fill_fb_ggtt_view(&view, fb, plane_state);
+	intel_fill_fb_ggtt_view(&view, fb, rotation);
 
 	if (view.type == I915_GGTT_VIEW_NORMAL)
 		i915_gem_object_unpin_fence(obj);
@@ -2449,38 +2324,93 @@
 	i915_gem_object_unpin_from_display_plane(obj, &view);
 }
 
-/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
- * is assumed to be a power-of-two. */
-u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
-			      int *x, int *y,
-			      uint64_t fb_modifier,
-			      unsigned int cpp,
-			      unsigned int pitch)
+/*
+ * Adjust the tile offset by moving the difference into
+ * the x/y offsets.
+ *
+ * Input tile dimensions and pitch must already be
+ * rotated to match x and y, and in pixel units.
+ */
+static u32 intel_adjust_tile_offset(int *x, int *y,
+				    unsigned int tile_width,
+				    unsigned int tile_height,
+				    unsigned int tile_size,
+				    unsigned int pitch_tiles,
+				    u32 old_offset,
+				    u32 new_offset)
 {
+	unsigned int tiles;
+
+	WARN_ON(old_offset & (tile_size - 1));
+	WARN_ON(new_offset & (tile_size - 1));
+	WARN_ON(new_offset > old_offset);
+
+	tiles = (old_offset - new_offset) / tile_size;
+
+	*y += tiles / pitch_tiles * tile_height;
+	*x += tiles % pitch_tiles * tile_width;
+
+	return new_offset;
+}
+
+/*
+ * Computes the linear offset to the base tile and adjusts
+ * x, y. bytes per pixel is assumed to be a power-of-two.
+ *
+ * In the 90/270 rotated case, x and y are assumed
+ * to be already rotated to match the rotated GTT view, and
+ * pitch is the tile_height aligned framebuffer height.
+ */
+u32 intel_compute_tile_offset(int *x, int *y,
+			      const struct drm_framebuffer *fb, int plane,
+			      unsigned int pitch,
+			      unsigned int rotation)
+{
+	const struct drm_i915_private *dev_priv = to_i915(fb->dev);
+	uint64_t fb_modifier = fb->modifier[plane];
+	unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+	u32 offset, offset_aligned, alignment;
+
+	alignment = intel_surf_alignment(dev_priv, fb_modifier);
+	if (alignment)
+		alignment--;
+
 	if (fb_modifier != DRM_FORMAT_MOD_NONE) {
 		unsigned int tile_size, tile_width, tile_height;
-		unsigned int tile_rows, tiles;
+		unsigned int tile_rows, tiles, pitch_tiles;
 
 		tile_size = intel_tile_size(dev_priv);
-		tile_width = intel_tile_width(dev_priv, fb_modifier, cpp);
-		tile_height = tile_size / tile_width;
+		intel_tile_dims(dev_priv, &tile_width, &tile_height,
+				fb_modifier, cpp);
+
+		if (intel_rotation_90_or_270(rotation)) {
+			pitch_tiles = pitch / tile_height;
+			swap(tile_width, tile_height);
+		} else {
+			pitch_tiles = pitch / (tile_width * cpp);
+		}
 
 		tile_rows = *y / tile_height;
 		*y %= tile_height;
 
-		tiles = *x / (tile_width/cpp);
-		*x %= tile_width/cpp;
+		tiles = *x / tile_width;
+		*x %= tile_width;
 
-		return tile_rows * pitch * tile_height + tiles * tile_size;
+		offset = (tile_rows * pitch_tiles + tiles) * tile_size;
+		offset_aligned = offset & ~alignment;
+
+		intel_adjust_tile_offset(x, y, tile_width, tile_height,
+					 tile_size, pitch_tiles,
+					 offset, offset_aligned);
 	} else {
-		unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
-		unsigned int offset;
-
 		offset = *y * pitch + *x * cpp;
+		offset_aligned = offset & ~alignment;
+
 		*y = (offset & alignment) / pitch;
 		*x = ((offset & alignment) - *y * pitch) / cpp;
-		return offset & ~alignment;
 	}
+
+	return offset_aligned;
 }
 
 static int i9xx_format_to_fourcc(int format)
@@ -2536,6 +2466,7 @@
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct drm_i915_gem_object *obj = NULL;
 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 	struct drm_framebuffer *fb = &plane_config->fb->base;
@@ -2551,7 +2482,7 @@
 	/* If the FB is too big, just don't use it since fbdev is not very
 	 * important and we should probably use that space with FBC or other
 	 * features. */
-	if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
+	if (size_aligned * 2 > ggtt->stolen_usable_size)
 		return false;
 
 	mutex_lock(&dev->struct_mutex);
@@ -2667,7 +2598,7 @@
 	 */
 	to_intel_plane_state(plane_state)->visible = false;
 	crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
-	intel_pre_disable_primary(&intel_crtc->base);
+	intel_pre_disable_primary_noatomic(&intel_crtc->base);
 	intel_plane->disable_plane(primary, &intel_crtc->base);
 
 	return;
@@ -2716,6 +2647,7 @@
 	u32 linear_offset;
 	u32 dspcntr;
 	i915_reg_t reg = DSPCNTR(plane);
+	unsigned int rotation = plane_state->base.rotation;
 	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 	int x = plane_state->src.x1 >> 16;
 	int y = plane_state->src.y1 >> 16;
@@ -2780,15 +2712,14 @@
 
 	if (INTEL_INFO(dev)->gen >= 4) {
 		intel_crtc->dspaddr_offset =
-			intel_compute_tile_offset(dev_priv, &x, &y,
-						  fb->modifier[0], cpp,
-						  fb->pitches[0]);
+			intel_compute_tile_offset(&x, &y, fb, 0,
+						  fb->pitches[0], rotation);
 		linear_offset -= intel_crtc->dspaddr_offset;
 	} else {
 		intel_crtc->dspaddr_offset = linear_offset;
 	}
 
-	if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+	if (rotation == BIT(DRM_ROTATE_180)) {
 		dspcntr |= DISPPLANE_ROTATE_180;
 
 		x += (crtc_state->pipe_src_w - 1);
@@ -2846,6 +2777,7 @@
 	u32 linear_offset;
 	u32 dspcntr;
 	i915_reg_t reg = DSPCNTR(plane);
+	unsigned int rotation = plane_state->base.rotation;
 	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 	int x = plane_state->src.x1 >> 16;
 	int y = plane_state->src.y1 >> 16;
@@ -2887,11 +2819,10 @@
 
 	linear_offset = y * fb->pitches[0] + x * cpp;
 	intel_crtc->dspaddr_offset =
-		intel_compute_tile_offset(dev_priv, &x, &y,
-					  fb->modifier[0], cpp,
-					  fb->pitches[0]);
+		intel_compute_tile_offset(&x, &y, fb, 0,
+					  fb->pitches[0], rotation);
 	linear_offset -= intel_crtc->dspaddr_offset;
-	if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+	if (rotation == BIT(DRM_ROTATE_180)) {
 		dspcntr |= DISPPLANE_ROTATE_180;
 
 		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
@@ -2931,7 +2862,7 @@
 	} else {
 		int cpp = drm_format_plane_cpp(pixel_format, 0);
 
-		return intel_tile_width(dev_priv, fb_modifier, cpp);
+		return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
 	}
 }
 
@@ -2944,7 +2875,7 @@
 	u64 offset;
 
 	intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
-				intel_plane->base.state);
+				intel_plane->base.state->rotation);
 
 	vma = i915_gem_obj_to_ggtt_view(obj, &view);
 	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
@@ -3284,12 +3215,12 @@
 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	unsigned reset_counter;
 	bool pending;
 
-	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
-	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+	reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
+	if (intel_crtc->reset_counter != reset_counter)
 		return false;
 
 	spin_lock_irq(&dev->event_lock);
@@ -3314,9 +3245,6 @@
 		      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
 
-	if (HAS_DDI(dev))
-		intel_set_pipe_csc(&crtc->base);
-
 	/*
 	 * Update pipe size and adjust fitter if needed: the reason for this is
 	 * that in compute_mode_changes we check the native mode (not the pfit
@@ -3894,9 +3822,7 @@
 	intel_crtc->unpin_work = NULL;
 
 	if (work->event)
-		drm_send_vblank_event(intel_crtc->base.dev,
-				      intel_crtc->pipe,
-				      work->event);
+		drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
 
 	drm_crtc_vblank_put(&intel_crtc->base);
 
@@ -3955,37 +3881,35 @@
 /* Program iCLKIP clock to the desired frequency */
 static void lpt_program_iclkip(struct drm_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
 	u32 temp;
 
 	lpt_disable_iclkip(dev_priv);
 
-	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
-	if (clock == 20000) {
-		auxdiv = 1;
-		divsel = 0x41;
-		phaseinc = 0x20;
-	} else {
-		/* The iCLK virtual clock root frequency is in MHz,
-		 * but the adjusted_mode->crtc_clock in in KHz. To get the
-		 * divisors, it is necessary to divide one by another, so we
-		 * convert the virtual clock precision to KHz here for higher
-		 * precision.
-		 */
+	/* The iCLK virtual clock root frequency is in MHz,
+	 * but the adjusted_mode->crtc_clock in in KHz. To get the
+	 * divisors, it is necessary to divide one by another, so we
+	 * convert the virtual clock precision to KHz here for higher
+	 * precision.
+	 */
+	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
 		u32 iclk_virtual_root_freq = 172800 * 1000;
 		u32 iclk_pi_range = 64;
-		u32 desired_divisor, msb_divisor_value, pi_value;
+		u32 desired_divisor;
 
-		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock);
-		msb_divisor_value = desired_divisor / iclk_pi_range;
-		pi_value = desired_divisor % iclk_pi_range;
+		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
+						    clock << auxdiv);
+		divsel = (desired_divisor / iclk_pi_range) - 2;
+		phaseinc = desired_divisor % iclk_pi_range;
 
-		auxdiv = 0;
-		divsel = msb_divisor_value - 2;
-		phaseinc = pi_value;
+		/*
+		 * Near 20MHz is a corner case which is
+		 * out of range for the 7-bit divisor
+		 */
+		if (divsel <= 0x7f)
+			break;
 	}
 
 	/* This should not happen with any sane values */
@@ -4032,6 +3956,43 @@
 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
 }
 
+int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+{
+	u32 divsel, phaseinc, auxdiv;
+	u32 iclk_virtual_root_freq = 172800 * 1000;
+	u32 iclk_pi_range = 64;
+	u32 desired_divisor;
+	u32 temp;
+
+	if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
+		return 0;
+
+	mutex_lock(&dev_priv->sb_lock);
+
+	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+	if (temp & SBI_SSCCTL_DISABLE) {
+		mutex_unlock(&dev_priv->sb_lock);
+		return 0;
+	}
+
+	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
+		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
+	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
+		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
+
+	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
+		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
+
+	mutex_unlock(&dev_priv->sb_lock);
+
+	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
+
+	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
+				 desired_divisor << auxdiv);
+}
+
 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
 						enum pipe pch_transcoder)
 {
@@ -4142,12 +4103,6 @@
 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
 
-	/*
-	 * Sometimes spurious CPU pipe underruns happen during FDI
-	 * training, at least with VGA+HDMI cloning. Suppress them.
-	 */
-	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
 	/* For PCH output, training FDI link */
 	dev_priv->display.fdi_link_train(crtc);
 
@@ -4159,7 +4114,8 @@
 		temp = I915_READ(PCH_DPLL_SEL);
 		temp |= TRANS_DPLL_ENABLE(pipe);
 		sel = TRANS_DPLLB_SEL(pipe);
-		if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
+		if (intel_crtc->config->shared_dpll ==
+		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
 			temp |= sel;
 		else
 			temp &= ~sel;
@@ -4181,8 +4137,6 @@
 
 	intel_fdi_normal_train(crtc);
 
-	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
 	/* For PCH DP, enable TRANS_DP_CTL */
 	if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
 		const struct drm_display_mode *adjusted_mode =
@@ -4238,113 +4192,6 @@
 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
 }
 
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
-						struct intel_crtc_state *crtc_state)
-{
-	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-	struct intel_shared_dpll *pll;
-	struct intel_shared_dpll_config *shared_dpll;
-	enum intel_dpll_id i;
-	int max = dev_priv->num_shared_dpll;
-
-	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
-
-	if (HAS_PCH_IBX(dev_priv->dev)) {
-		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
-		i = (enum intel_dpll_id) crtc->pipe;
-		pll = &dev_priv->shared_dplls[i];
-
-		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
-			      crtc->base.base.id, pll->name);
-
-		WARN_ON(shared_dpll[i].crtc_mask);
-
-		goto found;
-	}
-
-	if (IS_BROXTON(dev_priv->dev)) {
-		/* PLL is attached to port in bxt */
-		struct intel_encoder *encoder;
-		struct intel_digital_port *intel_dig_port;
-
-		encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
-		if (WARN_ON(!encoder))
-			return NULL;
-
-		intel_dig_port = enc_to_dig_port(&encoder->base);
-		/* 1:1 mapping between ports and PLLs */
-		i = (enum intel_dpll_id)intel_dig_port->port;
-		pll = &dev_priv->shared_dplls[i];
-		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
-			crtc->base.base.id, pll->name);
-		WARN_ON(shared_dpll[i].crtc_mask);
-
-		goto found;
-	} else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
-		/* Do not consider SPLL */
-		max = 2;
-
-	for (i = 0; i < max; i++) {
-		pll = &dev_priv->shared_dplls[i];
-
-		/* Only want to check enabled timings first */
-		if (shared_dpll[i].crtc_mask == 0)
-			continue;
-
-		if (memcmp(&crtc_state->dpll_hw_state,
-			   &shared_dpll[i].hw_state,
-			   sizeof(crtc_state->dpll_hw_state)) == 0) {
-			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
-				      crtc->base.base.id, pll->name,
-				      shared_dpll[i].crtc_mask,
-				      pll->active);
-			goto found;
-		}
-	}
-
-	/* Ok no matching timings, maybe there's a free one? */
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		pll = &dev_priv->shared_dplls[i];
-		if (shared_dpll[i].crtc_mask == 0) {
-			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
-				      crtc->base.base.id, pll->name);
-			goto found;
-		}
-	}
-
-	return NULL;
-
-found:
-	if (shared_dpll[i].crtc_mask == 0)
-		shared_dpll[i].hw_state =
-			crtc_state->dpll_hw_state;
-
-	crtc_state->shared_dpll = i;
-	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
-			 pipe_name(crtc->pipe));
-
-	shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
-
-	return pll;
-}
-
-static void intel_shared_dpll_commit(struct drm_atomic_state *state)
-{
-	struct drm_i915_private *dev_priv = to_i915(state->dev);
-	struct intel_shared_dpll_config *shared_dpll;
-	struct intel_shared_dpll *pll;
-	enum intel_dpll_id i;
-
-	if (!to_intel_atomic_state(state)->dpll_set)
-		return;
-
-	shared_dpll = to_intel_atomic_state(state)->shared_dpll;
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		pll = &dev_priv->shared_dplls[i];
-		pll->config = shared_dpll[i];
-	}
-}
-
 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4576,8 +4423,11 @@
 	if (!crtc->config->ips_enabled)
 		return;
 
-	/* We can only enable IPS after we enable a plane and wait for a vblank */
-	intel_wait_for_vblank(dev, crtc->pipe);
+	/*
+	 * We can only enable IPS after we enable a plane and wait for a vblank
+	 * This function is called from post_plane_update, which is run after
+	 * a vblank wait.
+	 */
 
 	assert_plane_enabled(dev_priv, crtc->plane);
 	if (IS_BROADWELL(dev)) {
@@ -4626,55 +4476,6 @@
 	intel_wait_for_vblank(dev, crtc->pipe);
 }
 
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-static void intel_crtc_load_lut(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
-	int i;
-	bool reenable_ips = false;
-
-	/* The clocks have to be on to load the palette. */
-	if (!crtc->state->active)
-		return;
-
-	if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
-		if (intel_crtc->config->has_dsi_encoder)
-			assert_dsi_pll_enabled(dev_priv);
-		else
-			assert_pll_enabled(dev_priv, pipe);
-	}
-
-	/* Workaround : Do not read or write the pipe palette/gamma data while
-	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
-	 */
-	if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
-	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
-	     GAMMA_MODE_MODE_SPLIT)) {
-		hsw_disable_ips(intel_crtc);
-		reenable_ips = true;
-	}
-
-	for (i = 0; i < 256; i++) {
-		i915_reg_t palreg;
-
-		if (HAS_GMCH_DISPLAY(dev))
-			palreg = PALETTE(pipe, i);
-		else
-			palreg = LGC_PALETTE(pipe, i);
-
-		I915_WRITE(palreg,
-			   (intel_crtc->lut_r[i] << 16) |
-			   (intel_crtc->lut_g[i] << 8) |
-			   intel_crtc->lut_b[i]);
-	}
-
-	if (reenable_ips)
-		hsw_enable_ips(intel_crtc);
-}
-
 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
 {
 	if (intel_crtc->overlay) {
@@ -4734,16 +4535,7 @@
 	intel_check_pch_fifo_underruns(dev_priv);
 }
 
-/**
- * intel_pre_disable_primary - Perform operations before disabling primary plane
- * @crtc: the CRTC whose primary plane is to be disabled
- *
- * Performs potentially sleeping operations that must be done before the
- * primary plane is disabled, such as updating FBC and IPS.  Note that this may
- * be called due to an explicit primary plane update, or due to an implicit
- * disable that is caused when a sprite plane completely hides the primary
- * plane.
- */
+/* FIXME move all this to pre_plane_update() with proper state tracking */
 static void
 intel_pre_disable_primary(struct drm_crtc *crtc)
 {
@@ -4762,6 +4554,26 @@
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
 	/*
+	 * FIXME IPS should be fine as long as one plane is
+	 * enabled, but in practice it seems to have problems
+	 * when going from primary only to sprite only and vice
+	 * versa.
+	 */
+	hsw_disable_ips(intel_crtc);
+}
+
+/* FIXME get rid of this and use pre_plane_update */
+static void
+intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+
+	intel_pre_disable_primary(crtc);
+
+	/*
 	 * Vblank time updates from the shadow to live plane control register
 	 * are blocked if the memory self-refresh mode is active at that
 	 * moment. So to make sure the plane gets truly disabled, disable
@@ -4775,37 +4587,39 @@
 		dev_priv->wm.vlv.cxsr = false;
 		intel_wait_for_vblank(dev, pipe);
 	}
-
-	/*
-	 * FIXME IPS should be fine as long as one plane is
-	 * enabled, but in practice it seems to have problems
-	 * when going from primary only to sprite only and vice
-	 * versa.
-	 */
-	hsw_disable_ips(intel_crtc);
 }
 
-static void intel_post_plane_update(struct intel_crtc *crtc)
+static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 {
-	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct drm_atomic_state *old_state = old_crtc_state->base.state;
 	struct intel_crtc_state *pipe_config =
 		to_intel_crtc_state(crtc->base.state);
 	struct drm_device *dev = crtc->base.dev;
+	struct drm_plane *primary = crtc->base.primary;
+	struct drm_plane_state *old_pri_state =
+		drm_atomic_get_existing_plane_state(old_state, primary);
 
-	intel_frontbuffer_flip(dev, atomic->fb_bits);
+	intel_frontbuffer_flip(dev, pipe_config->fb_bits);
 
 	crtc->wm.cxsr_allowed = true;
 
-	if (pipe_config->wm_changed && pipe_config->base.active)
+	if (pipe_config->update_wm_post && pipe_config->base.active)
 		intel_update_watermarks(&crtc->base);
 
-	if (atomic->update_fbc)
+	if (old_pri_state) {
+		struct intel_plane_state *primary_state =
+			to_intel_plane_state(primary->state);
+		struct intel_plane_state *old_primary_state =
+			to_intel_plane_state(old_pri_state);
+
 		intel_fbc_post_update(crtc);
 
-	if (atomic->post_enable_primary)
-		intel_post_enable_primary(&crtc->base);
-
-	memset(atomic, 0, sizeof(*atomic));
+		if (primary_state->visible &&
+		    (needs_modeset(&pipe_config->base) ||
+		     !old_primary_state->visible))
+			intel_post_enable_primary(&crtc->base);
+	}
 }
 
 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
@@ -4813,7 +4627,6 @@
 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
 	struct intel_crtc_state *pipe_config =
 		to_intel_crtc_state(crtc->base.state);
 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
@@ -4822,15 +4635,14 @@
 		drm_atomic_get_existing_plane_state(old_state, primary);
 	bool modeset = needs_modeset(&pipe_config->base);
 
-	if (atomic->update_fbc)
-		intel_fbc_pre_update(crtc);
-
 	if (old_pri_state) {
 		struct intel_plane_state *primary_state =
 			to_intel_plane_state(primary->state);
 		struct intel_plane_state *old_primary_state =
 			to_intel_plane_state(old_pri_state);
 
+		intel_fbc_pre_update(crtc);
+
 		if (old_primary_state->visible &&
 		    (modeset || !primary_state->visible))
 			intel_pre_disable_primary(&crtc->base);
@@ -4839,11 +4651,58 @@
 	if (pipe_config->disable_cxsr) {
 		crtc->wm.cxsr_allowed = false;
 
-		if (old_crtc_state->base.active)
+		/*
+		 * Vblank time updates from the shadow to live plane control register
+		 * are blocked if the memory self-refresh mode is active at that
+		 * moment. So to make sure the plane gets truly disabled, disable
+		 * first the self-refresh mode. The self-refresh enable bit in turn
+		 * will be checked/applied by the HW only at the next frame start
+		 * event which is after the vblank start event, so we need to have a
+		 * wait-for-vblank between disabling the plane and the pipe.
+		 */
+		if (old_crtc_state->base.active) {
 			intel_set_memory_cxsr(dev_priv, false);
+			dev_priv->wm.vlv.cxsr = false;
+			intel_wait_for_vblank(dev, crtc->pipe);
+		}
 	}
 
-	if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed)
+	/*
+	 * IVB workaround: must disable low power watermarks for at least
+	 * one frame before enabling scaling.  LP watermarks can be re-enabled
+	 * when scaling is disabled.
+	 *
+	 * WaCxSRDisabledForSpriteScaling:ivb
+	 */
+	if (pipe_config->disable_lp_wm) {
+		ilk_disable_lp_wm(dev);
+		intel_wait_for_vblank(dev, crtc->pipe);
+	}
+
+	/*
+	 * If we're doing a modeset, we're done.  No need to do any pre-vblank
+	 * watermark programming here.
+	 */
+	if (needs_modeset(&pipe_config->base))
+		return;
+
+	/*
+	 * For platforms that support atomic watermarks, program the
+	 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
+	 * will be the intermediate values that are safe for both pre- and
+	 * post- vblank; when vblank happens, the 'active' values will be set
+	 * to the final 'target' values and we'll do this again to get the
+	 * optimal watermarks.  For gen9+ platforms, the values we program here
+	 * will be the final target values which will get automatically latched
+	 * at vblank time; no further programming will be necessary.
+	 *
+	 * If a platform hasn't been transitioned to atomic watermarks yet,
+	 * we'll continue to update watermarks the old way, if flags tell
+	 * us to.
+	 */
+	if (dev_priv->display.initial_watermarks != NULL)
+		dev_priv->display.initial_watermarks(pipe_config);
+	else if (pipe_config->update_wm_pre)
 		intel_update_watermarks(&crtc->base);
 }
 
@@ -4874,10 +4733,24 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
+	struct intel_crtc_state *pipe_config =
+		to_intel_crtc_state(crtc->state);
 
 	if (WARN_ON(intel_crtc->active))
 		return;
 
+	/*
+	 * Sometimes spurious CPU pipe underruns happen during FDI
+	 * training, at least with VGA+HDMI cloning. Suppress them.
+	 *
+	 * On ILK we get an occasional spurious CPU pipe underruns
+	 * between eDP port A enable and vdd enable. Also PCH port
+	 * enable seems to result in the occasional CPU pipe underrun.
+	 *
+	 * Spurious PCH underruns also occur during PCH enabling.
+	 */
+	if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 	if (intel_crtc->config->has_pch_encoder)
 		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
@@ -4888,6 +4761,7 @@
 		intel_dp_set_m_n(intel_crtc, M1_N1);
 
 	intel_set_pipe_timings(intel_crtc);
+	intel_set_pipe_src_size(intel_crtc);
 
 	if (intel_crtc->config->has_pch_encoder) {
 		intel_cpu_transcoder_set_m_n(intel_crtc,
@@ -4898,8 +4772,6 @@
 
 	intel_crtc->active = true;
 
-	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		if (encoder->pre_enable)
 			encoder->pre_enable(encoder);
@@ -4920,9 +4792,10 @@
 	 * On ILK+ LUT must be loaded before the pipe is running but with
 	 * clocks enabled
 	 */
-	intel_crtc_load_lut(crtc);
+	intel_color_load_luts(&pipe_config->base);
 
-	intel_update_watermarks(crtc);
+	if (dev_priv->display.initial_watermarks != NULL)
+		dev_priv->display.initial_watermarks(intel_crtc->config);
 	intel_enable_pipe(intel_crtc);
 
 	if (intel_crtc->config->has_pch_encoder)
@@ -4940,6 +4813,7 @@
 	/* Must wait for vblank to avoid spurious PCH FIFO underruns */
 	if (intel_crtc->config->has_pch_encoder)
 		intel_wait_for_vblank(dev, pipe);
+	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 }
 
@@ -4956,6 +4830,7 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
+	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 	struct intel_crtc_state *pipe_config =
 		to_intel_crtc_state(crtc->state);
 
@@ -4966,16 +4841,20 @@
 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
 						      false);
 
-	if (intel_crtc_to_shared_dpll(intel_crtc))
+	if (intel_crtc->config->shared_dpll)
 		intel_enable_shared_dpll(intel_crtc);
 
 	if (intel_crtc->config->has_dp_encoder)
 		intel_dp_set_m_n(intel_crtc, M1_N1);
 
-	intel_set_pipe_timings(intel_crtc);
+	if (!intel_crtc->config->has_dsi_encoder)
+		intel_set_pipe_timings(intel_crtc);
 
-	if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
-		I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
+	intel_set_pipe_src_size(intel_crtc);
+
+	if (cpu_transcoder != TRANSCODER_EDP &&
+	    !transcoder_is_dsi(cpu_transcoder)) {
+		I915_WRITE(PIPE_MULT(cpu_transcoder),
 			   intel_crtc->config->pixel_multiplier - 1);
 	}
 
@@ -4984,9 +4863,12 @@
 				     &intel_crtc->config->fdi_m_n, NULL);
 	}
 
-	haswell_set_pipeconf(crtc);
+	if (!intel_crtc->config->has_dsi_encoder)
+		haswell_set_pipeconf(crtc);
 
-	intel_set_pipe_csc(crtc);
+	haswell_set_pipemisc(crtc);
+
+	intel_color_set_csc(&pipe_config->base);
 
 	intel_crtc->active = true;
 
@@ -5015,14 +4897,20 @@
 	 * On ILK+ LUT must be loaded before the pipe is running but with
 	 * clocks enabled
 	 */
-	intel_crtc_load_lut(crtc);
+	intel_color_load_luts(&pipe_config->base);
 
 	intel_ddi_set_pipe_settings(crtc);
 	if (!intel_crtc->config->has_dsi_encoder)
 		intel_ddi_enable_transcoder_func(crtc);
 
-	intel_update_watermarks(crtc);
-	intel_enable_pipe(intel_crtc);
+	if (dev_priv->display.initial_watermarks != NULL)
+		dev_priv->display.initial_watermarks(pipe_config);
+	else
+		intel_update_watermarks(crtc);
+
+	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
+	if (!intel_crtc->config->has_dsi_encoder)
+		intel_enable_pipe(intel_crtc);
 
 	if (intel_crtc->config->has_pch_encoder)
 		lpt_pch_enable(crtc);
@@ -5078,8 +4966,15 @@
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 
-	if (intel_crtc->config->has_pch_encoder)
+	/*
+	 * Sometimes spurious CPU pipe underruns happen when the
+	 * pipe is already disabled, but FDI RX/TX is still enabled.
+	 * Happens at least with VGA+HDMI cloning. Suppress them.
+	 */
+	if (intel_crtc->config->has_pch_encoder) {
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+	}
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		encoder->disable(encoder);
@@ -5087,22 +4982,12 @@
 	drm_crtc_vblank_off(crtc);
 	assert_vblank_disabled(crtc);
 
-	/*
-	 * Sometimes spurious CPU pipe underruns happen when the
-	 * pipe is already disabled, but FDI RX/TX is still enabled.
-	 * Happens at least with VGA+HDMI cloning. Suppress them.
-	 */
-	if (intel_crtc->config->has_pch_encoder)
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
 	intel_disable_pipe(intel_crtc);
 
 	ironlake_pfit_disable(intel_crtc, false);
 
-	if (intel_crtc->config->has_pch_encoder) {
+	if (intel_crtc->config->has_pch_encoder)
 		ironlake_fdi_disable(crtc);
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-	}
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		if (encoder->post_disable)
@@ -5132,6 +5017,7 @@
 		ironlake_fdi_pll_disable(intel_crtc);
 	}
 
+	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 }
 
@@ -5155,7 +5041,9 @@
 	drm_crtc_vblank_off(crtc);
 	assert_vblank_disabled(crtc);
 
-	intel_disable_pipe(intel_crtc);
+	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
+	if (!intel_crtc->config->has_dsi_encoder)
+		intel_disable_pipe(intel_crtc);
 
 	if (intel_crtc->config->dp_encoder_is_mst)
 		intel_ddi_set_vc_payload_alloc(crtc, false);
@@ -5330,6 +5218,9 @@
 		mask |= BIT(intel_display_port_power_domain(intel_encoder));
 	}
 
+	if (crtc_state->shared_dpll)
+		mask |= BIT(POWER_DOMAIN_PLLS);
+
 	return mask;
 }
 
@@ -5393,6 +5284,8 @@
 			dev_priv->max_cdclk_freq = 450000;
 		else
 			dev_priv->max_cdclk_freq = 337500;
+	} else if (IS_BROXTON(dev)) {
+		dev_priv->max_cdclk_freq = 624000;
 	} else if (IS_BROADWELL(dev))  {
 		/*
 		 * FIXME with extra cooling we can allow
@@ -5452,9 +5345,8 @@
 		intel_update_max_cdclk(dev);
 }
 
-static void broxton_set_cdclk(struct drm_device *dev, int frequency)
+static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t divider;
 	uint32_t ratio;
 	uint32_t current_freq;
@@ -5568,33 +5460,46 @@
 		return;
 	}
 
-	intel_update_cdclk(dev);
+	intel_update_cdclk(dev_priv->dev);
 }
 
-void broxton_init_cdclk(struct drm_device *dev)
+static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t val;
+	if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE))
+		return false;
 
-	/*
-	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
-	 * or else the reset will hang because there is no PCH to respond.
-	 * Move the handshake programming to initialization sequence.
-	 * Previously was left up to BIOS.
-	 */
-	val = I915_READ(HSW_NDE_RSTWRN_OPT);
-	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
-	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+	/* TODO: Check for a valid CDCLK rate */
 
-	/* Enable PG1 for cdclk */
-	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) {
+		DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n");
 
+		return false;
+	}
+
+	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) {
+		DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n");
+
+		return false;
+	}
+
+	return true;
+}
+
+bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv)
+{
+	return broxton_cdclk_is_enabled(dev_priv);
+}
+
+void broxton_init_cdclk(struct drm_i915_private *dev_priv)
+{
 	/* check if cd clock is enabled */
-	if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
-		DRM_DEBUG_KMS("Display already initialized\n");
+	if (broxton_cdclk_is_enabled(dev_priv)) {
+		DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
 		return;
 	}
 
+	DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n");
+
 	/*
 	 * FIXME:
 	 * - The initial CDCLK needs to be read from VBT.
@@ -5602,7 +5507,7 @@
 	 * - check if setting the max (or any) cdclk freq is really necessary
 	 *   here, it belongs to modeset time
 	 */
-	broxton_set_cdclk(dev, 624000);
+	broxton_set_cdclk(dev_priv, 624000);
 
 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
 	POSTING_READ(DBUF_CTL);
@@ -5613,10 +5518,8 @@
 		DRM_ERROR("DBuf power enable timeout!\n");
 }
 
-void broxton_uninit_cdclk(struct drm_device *dev)
+void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
 	POSTING_READ(DBUF_CTL);
 
@@ -5626,9 +5529,7 @@
 		DRM_ERROR("DBuf power disable timeout!\n");
 
 	/* Set minimum (bypass) frequency, in effect turning off the DE PLL */
-	broxton_set_cdclk(dev, 19200);
-
-	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+	broxton_set_cdclk(dev_priv, 19200);
 }
 
 static const struct skl_cdclk_entry {
@@ -6165,6 +6066,8 @@
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *encoder;
+	struct intel_crtc_state *pipe_config =
+		to_intel_crtc_state(crtc->state);
 	int pipe = intel_crtc->pipe;
 
 	if (WARN_ON(intel_crtc->active))
@@ -6174,6 +6077,7 @@
 		intel_dp_set_m_n(intel_crtc, M1_N1);
 
 	intel_set_pipe_timings(intel_crtc);
+	intel_set_pipe_src_size(intel_crtc);
 
 	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
 		struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6192,14 +6096,12 @@
 		if (encoder->pre_pll_enable)
 			encoder->pre_pll_enable(encoder);
 
-	if (!intel_crtc->config->has_dsi_encoder) {
-		if (IS_CHERRYVIEW(dev)) {
-			chv_prepare_pll(intel_crtc, intel_crtc->config);
-			chv_enable_pll(intel_crtc, intel_crtc->config);
-		} else {
-			vlv_prepare_pll(intel_crtc, intel_crtc->config);
-			vlv_enable_pll(intel_crtc, intel_crtc->config);
-		}
+	if (IS_CHERRYVIEW(dev)) {
+		chv_prepare_pll(intel_crtc, intel_crtc->config);
+		chv_enable_pll(intel_crtc, intel_crtc->config);
+	} else {
+		vlv_prepare_pll(intel_crtc, intel_crtc->config);
+		vlv_enable_pll(intel_crtc, intel_crtc->config);
 	}
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -6208,8 +6110,9 @@
 
 	i9xx_pfit_enable(intel_crtc);
 
-	intel_crtc_load_lut(crtc);
+	intel_color_load_luts(&pipe_config->base);
 
+	intel_update_watermarks(crtc);
 	intel_enable_pipe(intel_crtc);
 
 	assert_vblank_disabled(crtc);
@@ -6234,7 +6137,9 @@
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *encoder;
-	int pipe = intel_crtc->pipe;
+	struct intel_crtc_state *pipe_config =
+		to_intel_crtc_state(crtc->state);
+	enum pipe pipe = intel_crtc->pipe;
 
 	if (WARN_ON(intel_crtc->active))
 		return;
@@ -6245,6 +6150,7 @@
 		intel_dp_set_m_n(intel_crtc, M1_N1);
 
 	intel_set_pipe_timings(intel_crtc);
+	intel_set_pipe_src_size(intel_crtc);
 
 	i9xx_set_pipeconf(intel_crtc);
 
@@ -6261,7 +6167,7 @@
 
 	i9xx_pfit_enable(intel_crtc);
 
-	intel_crtc_load_lut(crtc);
+	intel_color_load_luts(&pipe_config->base);
 
 	intel_update_watermarks(crtc);
 	intel_enable_pipe(intel_crtc);
@@ -6299,10 +6205,9 @@
 	/*
 	 * On gen2 planes are double buffered but the pipe isn't, so we must
 	 * wait for planes to fully turn off before disabling the pipe.
-	 * We also need to wait on all gmch platforms because of the
-	 * self-refresh mode constraint explained above.
 	 */
-	intel_wait_for_vblank(dev, pipe);
+	if (IS_GEN2(dev))
+		intel_wait_for_vblank(dev, pipe);
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		encoder->disable(encoder);
@@ -6337,6 +6242,7 @@
 
 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
 {
+	struct intel_encoder *encoder;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	enum intel_display_power_domain domain;
@@ -6348,14 +6254,27 @@
 	if (to_intel_plane_state(crtc->primary->state)->visible) {
 		WARN_ON(intel_crtc->unpin_work);
 
-		intel_pre_disable_primary(crtc);
+		intel_pre_disable_primary_noatomic(crtc);
 
 		intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
 		to_intel_plane_state(crtc->primary->state)->visible = false;
 	}
 
 	dev_priv->display.crtc_disable(crtc);
+
+	DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n",
+		      crtc->base.id);
+
+	WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
+	crtc->state->active = false;
 	intel_crtc->active = false;
+	crtc->enabled = false;
+	crtc->state->connector_mask = 0;
+	crtc->state->encoder_mask = 0;
+
+	for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
+		encoder->base.crtc = NULL;
+
 	intel_fbc_disable(intel_crtc);
 	intel_update_watermarks(crtc);
 	intel_disable_shared_dpll(intel_crtc);
@@ -6398,7 +6317,7 @@
 
 /* Cross check the actual hw state with our own modeset state tracking (and it's
  * internal consistency). */
-static void intel_connector_check_state(struct intel_connector *connector)
+static void intel_connector_verify_state(struct intel_connector *connector)
 {
 	struct drm_crtc *crtc = connector->base.state->crtc;
 
@@ -6568,7 +6487,7 @@
 	 * Hence the bw of each lane in terms of the mode signal
 	 * is:
 	 */
-	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
+	link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
 
 	fdi_dotclock = adjusted_mode->crtc_clock;
 
@@ -6580,8 +6499,7 @@
 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
 			       link_bw, &pipe_config->fdi_m_n);
 
-	ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
-				       intel_crtc->pipe, pipe_config);
+	ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
 		pipe_config->pipe_bpp -= 2*3;
 		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
@@ -6605,7 +6523,7 @@
 		return false;
 
 	/* HSW can handle pixel rate up to cdclk? */
-	if (IS_HASWELL(dev_priv->dev))
+	if (IS_HASWELL(dev_priv))
 		return true;
 
 	/*
@@ -7133,30 +7051,6 @@
 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
 }
 
-static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
-			   int num_connectors)
-{
-	struct drm_device *dev = crtc_state->base.crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int refclk;
-
-	WARN_ON(!crtc_state->base.state);
-
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) {
-		refclk = 100000;
-	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
-	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
-		refclk = dev_priv->vbt.lvds_ssc_freq;
-		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
-	} else if (!IS_GEN2(dev)) {
-		refclk = 96000;
-	} else {
-		refclk = 48000;
-	}
-
-	return refclk;
-}
-
 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
 {
 	return (1 << dpll->n) << 16 | dpll->m2;
@@ -7300,24 +7194,34 @@
 static void vlv_compute_dpll(struct intel_crtc *crtc,
 			     struct intel_crtc_state *pipe_config)
 {
-	u32 dpll, dpll_md;
+	pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+	if (crtc->pipe != PIPE_A)
+		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
-	/*
-	 * Enable DPIO clock input. We should never disable the reference
-	 * clock for pipe B, since VGA hotplug / manual detection depends
-	 * on it.
-	 */
-	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
-		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
-	/* We should never disable this, set it here for state tracking */
-	if (crtc->pipe == PIPE_B)
-		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-	dpll |= DPLL_VCO_ENABLE;
-	pipe_config->dpll_hw_state.dpll = dpll;
+	/* DPLL not used with DSI, but still need the rest set up */
+	if (!pipe_config->has_dsi_encoder)
+		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
+			DPLL_EXT_BUFFER_ENABLE_VLV;
 
-	dpll_md = (pipe_config->pixel_multiplier - 1)
-		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
-	pipe_config->dpll_hw_state.dpll_md = dpll_md;
+	pipe_config->dpll_hw_state.dpll_md =
+		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+static void chv_compute_dpll(struct intel_crtc *crtc,
+			     struct intel_crtc_state *pipe_config)
+{
+	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+	if (crtc->pipe != PIPE_A)
+		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+	/* DPLL not used with DSI, but still need the rest set up */
+	if (!pipe_config->has_dsi_encoder)
+		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+
+	pipe_config->dpll_hw_state.dpll_md =
+		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
 }
 
 static void vlv_prepare_pll(struct intel_crtc *crtc,
@@ -7325,11 +7229,20 @@
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int pipe = crtc->pipe;
+	enum pipe pipe = crtc->pipe;
 	u32 mdiv;
 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
 	u32 coreclk, reg_val;
 
+	/* Enable Refclk */
+	I915_WRITE(DPLL(pipe),
+		   pipe_config->dpll_hw_state.dpll &
+		   ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+
+	/* No need to actually set up the DPLL with DSI */
+	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+		return;
+
 	mutex_lock(&dev_priv->sb_lock);
 
 	bestn = pipe_config->dpll.n;
@@ -7411,32 +7324,26 @@
 	mutex_unlock(&dev_priv->sb_lock);
 }
 
-static void chv_compute_dpll(struct intel_crtc *crtc,
-			     struct intel_crtc_state *pipe_config)
-{
-	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
-		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
-		DPLL_VCO_ENABLE;
-	if (crtc->pipe != PIPE_A)
-		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
-	pipe_config->dpll_hw_state.dpll_md =
-		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
 static void chv_prepare_pll(struct intel_crtc *crtc,
 			    const struct intel_crtc_state *pipe_config)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int pipe = crtc->pipe;
-	i915_reg_t dpll_reg = DPLL(crtc->pipe);
+	enum pipe pipe = crtc->pipe;
 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
 	u32 loopfilter, tribuf_calcntr;
 	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
 	u32 dpio_val;
 	int vco;
 
+	/* Enable Refclk and SSC */
+	I915_WRITE(DPLL(pipe),
+		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+
+	/* No need to actually set up the DPLL with DSI */
+	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+		return;
+
 	bestn = pipe_config->dpll.n;
 	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
 	bestm1 = pipe_config->dpll.m1;
@@ -7447,12 +7354,6 @@
 	dpio_val = 0;
 	loopfilter = 0;
 
-	/*
-	 * Enable Refclk and SSC
-	 */
-	I915_WRITE(dpll_reg,
-		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
-
 	mutex_lock(&dev_priv->sb_lock);
 
 	/* p1 and p2 divider */
@@ -7586,8 +7487,7 @@
 
 static void i9xx_compute_dpll(struct intel_crtc *crtc,
 			      struct intel_crtc_state *crtc_state,
-			      intel_clock_t *reduced_clock,
-			      int num_connectors)
+			      intel_clock_t *reduced_clock)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7646,7 +7546,7 @@
 	if (crtc_state->sdvo_tv_clock)
 		dpll |= PLL_REF_INPUT_TVCLKINBC;
 	else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
-		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+		 intel_panel_use_ssc(dev_priv))
 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
 	else
 		dpll |= PLL_REF_INPUT_DREFCLK;
@@ -7663,8 +7563,7 @@
 
 static void i8xx_compute_dpll(struct intel_crtc *crtc,
 			      struct intel_crtc_state *crtc_state,
-			      intel_clock_t *reduced_clock,
-			      int num_connectors)
+			      intel_clock_t *reduced_clock)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7690,7 +7589,7 @@
 		dpll |= DPLL_DVO_2X_MODE;
 
 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
-		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+	    intel_panel_use_ssc(dev_priv))
 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
 	else
 		dpll |= PLL_REF_INPUT_DREFCLK;
@@ -7759,6 +7658,14 @@
 	    (pipe == PIPE_B || pipe == PIPE_C))
 		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
 
+}
+
+static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_crtc->pipe;
+
 	/* pipesrc controls the size that is scaled from, which should
 	 * always be the user's requested size.
 	 */
@@ -7800,6 +7707,14 @@
 		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
 		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
 	}
+}
+
+static void intel_get_pipe_src_size(struct intel_crtc *crtc,
+				    struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp;
 
 	tmp = I915_READ(PIPESRC(crtc->pipe));
 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
@@ -7897,69 +7812,192 @@
 	POSTING_READ(PIPECONF(intel_crtc->pipe));
 }
 
+static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
+				   struct intel_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const intel_limit_t *limit;
+	int refclk = 48000;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		if (intel_panel_use_ssc(dev_priv)) {
+			refclk = dev_priv->vbt.lvds_ssc_freq;
+			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+		}
+
+		limit = &intel_limits_i8xx_lvds;
+	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
+		limit = &intel_limits_i8xx_dvo;
+	} else {
+		limit = &intel_limits_i8xx_dac;
+	}
+
+	if (!crtc_state->clock_set &&
+	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+				 refclk, NULL, &crtc_state->dpll)) {
+		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+
+	i8xx_compute_dpll(crtc, crtc_state, NULL);
+
+	return 0;
+}
+
+static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
+				  struct intel_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const intel_limit_t *limit;
+	int refclk = 96000;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		if (intel_panel_use_ssc(dev_priv)) {
+			refclk = dev_priv->vbt.lvds_ssc_freq;
+			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+		}
+
+		if (intel_is_dual_link_lvds(dev))
+			limit = &intel_limits_g4x_dual_channel_lvds;
+		else
+			limit = &intel_limits_g4x_single_channel_lvds;
+	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+		   intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+		limit = &intel_limits_g4x_hdmi;
+	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
+		limit = &intel_limits_g4x_sdvo;
+	} else {
+		/* The option is for other outputs */
+		limit = &intel_limits_i9xx_sdvo;
+	}
+
+	if (!crtc_state->clock_set &&
+	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+				refclk, NULL, &crtc_state->dpll)) {
+		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+
+	i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+	return 0;
+}
+
+static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
+				  struct intel_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const intel_limit_t *limit;
+	int refclk = 96000;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		if (intel_panel_use_ssc(dev_priv)) {
+			refclk = dev_priv->vbt.lvds_ssc_freq;
+			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+		}
+
+		limit = &intel_limits_pineview_lvds;
+	} else {
+		limit = &intel_limits_pineview_sdvo;
+	}
+
+	if (!crtc_state->clock_set &&
+	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+				refclk, NULL, &crtc_state->dpll)) {
+		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+
+	i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+	return 0;
+}
+
 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
 				   struct intel_crtc_state *crtc_state)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int refclk, num_connectors = 0;
-	intel_clock_t clock;
-	bool ok;
 	const intel_limit_t *limit;
-	struct drm_atomic_state *state = crtc_state->base.state;
-	struct drm_connector *connector;
-	struct drm_connector_state *connector_state;
-	int i;
+	int refclk = 96000;
 
 	memset(&crtc_state->dpll_hw_state, 0,
 	       sizeof(crtc_state->dpll_hw_state));
 
-	if (crtc_state->has_dsi_encoder)
-		return 0;
-
-	for_each_connector_in_state(state, connector, connector_state, i) {
-		if (connector_state->crtc == &crtc->base)
-			num_connectors++;
-	}
-
-	if (!crtc_state->clock_set) {
-		refclk = i9xx_get_refclk(crtc_state, num_connectors);
-
-		/*
-		 * Returns a set of divisors for the desired target clock with
-		 * the given refclk, or FALSE.  The returned values represent
-		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
-		 * 2) / p1 / p2.
-		 */
-		limit = intel_limit(crtc_state, refclk);
-		ok = dev_priv->display.find_dpll(limit, crtc_state,
-						 crtc_state->port_clock,
-						 refclk, NULL, &clock);
-		if (!ok) {
-			DRM_ERROR("Couldn't find PLL settings for mode!\n");
-			return -EINVAL;
+	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		if (intel_panel_use_ssc(dev_priv)) {
+			refclk = dev_priv->vbt.lvds_ssc_freq;
+			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
 		}
 
-		/* Compat-code for transition, will disappear. */
-		crtc_state->dpll.n = clock.n;
-		crtc_state->dpll.m1 = clock.m1;
-		crtc_state->dpll.m2 = clock.m2;
-		crtc_state->dpll.p1 = clock.p1;
-		crtc_state->dpll.p2 = clock.p2;
+		limit = &intel_limits_i9xx_lvds;
+	} else {
+		limit = &intel_limits_i9xx_sdvo;
 	}
 
-	if (IS_GEN2(dev)) {
-		i8xx_compute_dpll(crtc, crtc_state, NULL,
-				  num_connectors);
-	} else if (IS_CHERRYVIEW(dev)) {
-		chv_compute_dpll(crtc, crtc_state);
-	} else if (IS_VALLEYVIEW(dev)) {
-		vlv_compute_dpll(crtc, crtc_state);
-	} else {
-		i9xx_compute_dpll(crtc, crtc_state, NULL,
-				  num_connectors);
+	if (!crtc_state->clock_set &&
+	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+				 refclk, NULL, &crtc_state->dpll)) {
+		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
 	}
 
+	i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+	return 0;
+}
+
+static int chv_crtc_compute_clock(struct intel_crtc *crtc,
+				  struct intel_crtc_state *crtc_state)
+{
+	int refclk = 100000;
+	const intel_limit_t *limit = &intel_limits_chv;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	if (!crtc_state->clock_set &&
+	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+				refclk, NULL, &crtc_state->dpll)) {
+		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+
+	chv_compute_dpll(crtc, crtc_state);
+
+	return 0;
+}
+
+static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
+				  struct intel_crtc_state *crtc_state)
+{
+	int refclk = 100000;
+	const intel_limit_t *limit = &intel_limits_vlv;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	if (!crtc_state->clock_set &&
+	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+				refclk, NULL, &crtc_state->dpll)) {
+		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+
+	vlv_compute_dpll(crtc, crtc_state);
+
 	return 0;
 }
 
@@ -8000,8 +8038,8 @@
 	u32 mdiv;
 	int refclk = 100000;
 
-	/* In case of MIPI DPLL will not even be used */
-	if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
+	/* In case of DSI, DPLL will not be used */
+	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
 		return;
 
 	mutex_lock(&dev_priv->sb_lock);
@@ -8097,6 +8135,10 @@
 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
 	int refclk = 100000;
 
+	/* In case of DSI, DPLL will not be used */
+	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+		return;
+
 	mutex_lock(&dev_priv->sb_lock);
 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
@@ -8130,7 +8172,7 @@
 		return false;
 
 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
-	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+	pipe_config->shared_dpll = NULL;
 
 	ret = false;
 
@@ -8162,11 +8204,16 @@
 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
 
 	intel_get_pipe_timings(crtc, pipe_config);
+	intel_get_pipe_src_size(crtc, pipe_config);
 
 	i9xx_get_pfit_config(crtc, pipe_config);
 
 	if (INTEL_INFO(dev)->gen >= 4) {
-		tmp = I915_READ(DPLL_MD(crtc->pipe));
+		/* No way to read it out on pipes B and C */
+		if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
+			tmp = dev_priv->chv_dpll_md[crtc->pipe];
+		else
+			tmp = I915_READ(DPLL_MD(crtc->pipe));
 		pipe_config->pixel_multiplier =
 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
@@ -8635,42 +8682,6 @@
 		lpt_init_pch_refclk(dev);
 }
 
-static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
-{
-	struct drm_device *dev = crtc_state->base.crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_atomic_state *state = crtc_state->base.state;
-	struct drm_connector *connector;
-	struct drm_connector_state *connector_state;
-	struct intel_encoder *encoder;
-	int num_connectors = 0, i;
-	bool is_lvds = false;
-
-	for_each_connector_in_state(state, connector, connector_state, i) {
-		if (connector_state->crtc != crtc_state->base.crtc)
-			continue;
-
-		encoder = to_intel_encoder(connector_state->best_encoder);
-
-		switch (encoder->type) {
-		case INTEL_OUTPUT_LVDS:
-			is_lvds = true;
-			break;
-		default:
-			break;
-		}
-		num_connectors++;
-	}
-
-	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
-		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
-			      dev_priv->vbt.lvds_ssc_freq);
-		return dev_priv->vbt.lvds_ssc_freq;
-	}
-
-	return 120000;
-}
-
 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -8713,82 +8724,14 @@
 	POSTING_READ(PIPECONF(pipe));
 }
 
-/*
- * Set up the pipe CSC unit.
- *
- * Currently only full range RGB to limited range RGB conversion
- * is supported, but eventually this should handle various
- * RGB<->YCbCr scenarios as well.
- */
-static void intel_set_pipe_csc(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int pipe = intel_crtc->pipe;
-	uint16_t coeff = 0x7800; /* 1.0 */
-
-	/*
-	 * TODO: Check what kind of values actually come out of the pipe
-	 * with these coeff/postoff values and adjust to get the best
-	 * accuracy. Perhaps we even need to take the bpc value into
-	 * consideration.
-	 */
-
-	if (intel_crtc->config->limited_color_range)
-		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
-
-	/*
-	 * GY/GU and RY/RU should be the other way around according
-	 * to BSpec, but reality doesn't agree. Just set them up in
-	 * a way that results in the correct picture.
-	 */
-	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
-	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
-
-	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
-	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
-
-	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
-	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
-
-	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
-	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
-	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
-
-	if (INTEL_INFO(dev)->gen > 6) {
-		uint16_t postoff = 0;
-
-		if (intel_crtc->config->limited_color_range)
-			postoff = (16 * (1 << 12) / 255) & 0x1fff;
-
-		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
-		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
-		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
-
-		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
-	} else {
-		uint32_t mode = CSC_MODE_YUV_TO_RGB;
-
-		if (intel_crtc->config->limited_color_range)
-			mode |= CSC_BLACK_SCREEN_OFFSET;
-
-		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
-	}
-}
-
 static void haswell_set_pipeconf(struct drm_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
-	uint32_t val;
+	u32 val = 0;
 
-	val = 0;
-
-	if (IS_HASWELL(dev) && intel_crtc->config->dither)
+	if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
@@ -8798,12 +8741,15 @@
 
 	I915_WRITE(PIPECONF(cpu_transcoder), val);
 	POSTING_READ(PIPECONF(cpu_transcoder));
+}
 
-	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
-	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
+static void haswell_set_pipemisc(struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
-		val = 0;
+	if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
+		u32 val = 0;
 
 		switch (intel_crtc->config->pipe_bpp) {
 		case 18:
@@ -8826,39 +8772,10 @@
 		if (intel_crtc->config->dither)
 			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
 
-		I915_WRITE(PIPEMISC(pipe), val);
+		I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
 	}
 }
 
-static bool ironlake_compute_clocks(struct drm_crtc *crtc,
-				    struct intel_crtc_state *crtc_state,
-				    intel_clock_t *clock,
-				    bool *has_reduced_clock,
-				    intel_clock_t *reduced_clock)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int refclk;
-	const intel_limit_t *limit;
-	bool ret;
-
-	refclk = ironlake_get_refclk(crtc_state);
-
-	/*
-	 * Returns a set of divisors for the desired target clock with the given
-	 * refclk, or FALSE.  The returned values represent the clock equation:
-	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
-	 */
-	limit = intel_limit(crtc_state, refclk);
-	ret = dev_priv->display.find_dpll(limit, crtc_state,
-					  crtc_state->port_clock,
-					  refclk, NULL, clock);
-	if (!ret)
-		return false;
-
-	return true;
-}
-
 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
 {
 	/*
@@ -8875,10 +8792,9 @@
 	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
 }
 
-static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
-				      struct intel_crtc_state *crtc_state,
-				      u32 *fp,
-				      intel_clock_t *reduced_clock, u32 *fp2)
+static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+				  struct intel_crtc_state *crtc_state,
+				  intel_clock_t *reduced_clock)
 {
 	struct drm_crtc *crtc = &intel_crtc->base;
 	struct drm_device *dev = crtc->dev;
@@ -8887,8 +8803,8 @@
 	struct drm_connector *connector;
 	struct drm_connector_state *connector_state;
 	struct intel_encoder *encoder;
-	uint32_t dpll;
-	int factor, num_connectors = 0, i;
+	u32 dpll, fp, fp2;
+	int factor, i;
 	bool is_lvds = false, is_sdvo = false;
 
 	for_each_connector_in_state(state, connector, connector_state, i) {
@@ -8908,8 +8824,6 @@
 		default:
 			break;
 		}
-
-		num_connectors++;
 	}
 
 	/* Enable autotuning of the PLL clock (if permissible) */
@@ -8922,11 +8836,19 @@
 	} else if (crtc_state->sdvo_tv_clock)
 		factor = 20;
 
-	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
-		*fp |= FP_CB_TUNE;
+	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
 
-	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
-		*fp2 |= FP_CB_TUNE;
+	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
+		fp |= FP_CB_TUNE;
+
+	if (reduced_clock) {
+		fp2 = i9xx_dpll_compute_fp(reduced_clock);
+
+		if (reduced_clock->m < factor * reduced_clock->n)
+			fp2 |= FP_CB_TUNE;
+	} else {
+		fp2 = fp;
+	}
 
 	dpll = 0;
 
@@ -8963,76 +8885,80 @@
 		break;
 	}
 
-	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+	if (is_lvds && intel_panel_use_ssc(dev_priv))
 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
 	else
 		dpll |= PLL_REF_INPUT_DREFCLK;
 
-	return dpll | DPLL_VCO_ENABLE;
+	dpll |= DPLL_VCO_ENABLE;
+
+	crtc_state->dpll_hw_state.dpll = dpll;
+	crtc_state->dpll_hw_state.fp0 = fp;
+	crtc_state->dpll_hw_state.fp1 = fp2;
 }
 
 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
 				       struct intel_crtc_state *crtc_state)
 {
 	struct drm_device *dev = crtc->base.dev;
-	intel_clock_t clock, reduced_clock;
-	u32 dpll = 0, fp = 0, fp2 = 0;
-	bool ok, has_reduced_clock = false;
-	bool is_lvds = false;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	intel_clock_t reduced_clock;
+	bool has_reduced_clock = false;
 	struct intel_shared_dpll *pll;
+	const intel_limit_t *limit;
+	int refclk = 120000;
 
 	memset(&crtc_state->dpll_hw_state, 0,
 	       sizeof(crtc_state->dpll_hw_state));
 
-	is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
+	crtc->lowfreq_avail = false;
 
-	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
-	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+	if (!crtc_state->has_pch_encoder)
+		return 0;
 
-	ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
-				     &has_reduced_clock, &reduced_clock);
-	if (!ok && !crtc_state->clock_set) {
+	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+		if (intel_panel_use_ssc(dev_priv)) {
+			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
+				      dev_priv->vbt.lvds_ssc_freq);
+			refclk = dev_priv->vbt.lvds_ssc_freq;
+		}
+
+		if (intel_is_dual_link_lvds(dev)) {
+			if (refclk == 100000)
+				limit = &intel_limits_ironlake_dual_lvds_100m;
+			else
+				limit = &intel_limits_ironlake_dual_lvds;
+		} else {
+			if (refclk == 100000)
+				limit = &intel_limits_ironlake_single_lvds_100m;
+			else
+				limit = &intel_limits_ironlake_single_lvds;
+		}
+	} else {
+		limit = &intel_limits_ironlake_dac;
+	}
+
+	if (!crtc_state->clock_set &&
+	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+				refclk, NULL, &crtc_state->dpll)) {
 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
 		return -EINVAL;
 	}
-	/* Compat-code for transition, will disappear. */
-	if (!crtc_state->clock_set) {
-		crtc_state->dpll.n = clock.n;
-		crtc_state->dpll.m1 = clock.m1;
-		crtc_state->dpll.m2 = clock.m2;
-		crtc_state->dpll.p1 = clock.p1;
-		crtc_state->dpll.p2 = clock.p2;
+
+	ironlake_compute_dpll(crtc, crtc_state,
+			      has_reduced_clock ? &reduced_clock : NULL);
+
+	pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
+	if (pll == NULL) {
+		DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+				 pipe_name(crtc->pipe));
+		return -EINVAL;
 	}
 
-	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
-	if (crtc_state->has_pch_encoder) {
-		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
-		if (has_reduced_clock)
-			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
-
-		dpll = ironlake_compute_dpll(crtc, crtc_state,
-					     &fp, &reduced_clock,
-					     has_reduced_clock ? &fp2 : NULL);
-
-		crtc_state->dpll_hw_state.dpll = dpll;
-		crtc_state->dpll_hw_state.fp0 = fp;
-		if (has_reduced_clock)
-			crtc_state->dpll_hw_state.fp1 = fp2;
-		else
-			crtc_state->dpll_hw_state.fp1 = fp;
-
-		pll = intel_get_shared_dpll(crtc, crtc_state);
-		if (pll == NULL) {
-			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
-					 pipe_name(crtc->pipe));
-			return -EINVAL;
-		}
-	}
-
-	if (is_lvds && has_reduced_clock)
+	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+	    has_reduced_clock)
 		crtc->lowfreq_avail = true;
-	else
-		crtc->lowfreq_avail = false;
 
 	return 0;
 }
@@ -9334,7 +9260,7 @@
 		return false;
 
 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
-	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+	pipe_config->shared_dpll = NULL;
 
 	ret = false;
 	tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -9363,6 +9289,7 @@
 
 	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
 		struct intel_shared_dpll *pll;
+		enum intel_dpll_id pll_id;
 
 		pipe_config->has_pch_encoder = true;
 
@@ -9372,21 +9299,22 @@
 
 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
 
-		if (HAS_PCH_IBX(dev_priv->dev)) {
-			pipe_config->shared_dpll =
-				(enum intel_dpll_id) crtc->pipe;
+		if (HAS_PCH_IBX(dev_priv)) {
+			pll_id = (enum intel_dpll_id) crtc->pipe;
 		} else {
 			tmp = I915_READ(PCH_DPLL_SEL);
 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
-				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
+				pll_id = DPLL_ID_PCH_PLL_B;
 			else
-				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
+				pll_id= DPLL_ID_PCH_PLL_A;
 		}
 
-		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
+		pipe_config->shared_dpll =
+			intel_get_shared_dpll_by_id(dev_priv, pll_id);
+		pll = pipe_config->shared_dpll;
 
-		WARN_ON(!pll->get_hw_state(dev_priv, pll,
-					   &pipe_config->dpll_hw_state));
+		WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
+						 &pipe_config->dpll_hw_state));
 
 		tmp = pipe_config->dpll_hw_state.dpll;
 		pipe_config->pixel_multiplier =
@@ -9399,6 +9327,7 @@
 	}
 
 	intel_get_pipe_timings(crtc, pipe_config);
+	intel_get_pipe_src_size(crtc, pipe_config);
 
 	ironlake_get_pfit_config(crtc, pipe_config);
 
@@ -9638,7 +9567,7 @@
 		to_intel_atomic_state(old_state);
 	unsigned int req_cdclk = old_intel_state->dev_cdclk;
 
-	broxton_set_cdclk(dev, req_cdclk);
+	broxton_set_cdclk(to_i915(dev), req_cdclk);
 }
 
 /* compute the max rate for new configuration */
@@ -9706,8 +9635,8 @@
 	val |= LCPLL_CD_SOURCE_FCLK;
 	I915_WRITE(LCPLL_CTL, val);
 
-	if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
-			       LCPLL_CD_SOURCE_FCLK_DONE, 1))
+	if (wait_for_us(I915_READ(LCPLL_CTL) &
+			LCPLL_CD_SOURCE_FCLK_DONE, 1))
 		DRM_ERROR("Switching to FCLK failed\n");
 
 	val = I915_READ(LCPLL_CTL);
@@ -9741,8 +9670,8 @@
 	val &= ~LCPLL_CD_SOURCE_FCLK;
 	I915_WRITE(LCPLL_CTL, val);
 
-	if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
-				LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+	if (wait_for_us((I915_READ(LCPLL_CTL) &
+			LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
 		DRM_ERROR("Switching back to LCPLL failed\n");
 
 	mutex_lock(&dev_priv->rps.hw_lock);
@@ -9821,72 +9750,193 @@
 				enum port port,
 				struct intel_crtc_state *pipe_config)
 {
+	enum intel_dpll_id id;
+
 	switch (port) {
 	case PORT_A:
 		pipe_config->ddi_pll_sel = SKL_DPLL0;
-		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
+		id = DPLL_ID_SKL_DPLL0;
 		break;
 	case PORT_B:
 		pipe_config->ddi_pll_sel = SKL_DPLL1;
-		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
+		id = DPLL_ID_SKL_DPLL1;
 		break;
 	case PORT_C:
 		pipe_config->ddi_pll_sel = SKL_DPLL2;
-		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
+		id = DPLL_ID_SKL_DPLL2;
 		break;
 	default:
 		DRM_ERROR("Incorrect port type\n");
+		return;
 	}
+
+	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
 }
 
 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
 				enum port port,
 				struct intel_crtc_state *pipe_config)
 {
-	u32 temp, dpll_ctl1;
+	enum intel_dpll_id id;
+	u32 temp;
 
 	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
 	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
 
 	switch (pipe_config->ddi_pll_sel) {
 	case SKL_DPLL0:
-		/*
-		 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
-		 * of the shared DPLL framework and thus needs to be read out
-		 * separately
-		 */
-		dpll_ctl1 = I915_READ(DPLL_CTRL1);
-		pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
+		id = DPLL_ID_SKL_DPLL0;
 		break;
 	case SKL_DPLL1:
-		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
+		id = DPLL_ID_SKL_DPLL1;
 		break;
 	case SKL_DPLL2:
-		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
+		id = DPLL_ID_SKL_DPLL2;
 		break;
 	case SKL_DPLL3:
-		pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
+		id = DPLL_ID_SKL_DPLL3;
 		break;
+	default:
+		MISSING_CASE(pipe_config->ddi_pll_sel);
+		return;
 	}
+
+	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
 }
 
 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
 				enum port port,
 				struct intel_crtc_state *pipe_config)
 {
+	enum intel_dpll_id id;
+
 	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
 
 	switch (pipe_config->ddi_pll_sel) {
 	case PORT_CLK_SEL_WRPLL1:
-		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
+		id = DPLL_ID_WRPLL1;
 		break;
 	case PORT_CLK_SEL_WRPLL2:
-		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
+		id = DPLL_ID_WRPLL2;
 		break;
 	case PORT_CLK_SEL_SPLL:
-		pipe_config->shared_dpll = DPLL_ID_SPLL;
+		id = DPLL_ID_SPLL;
+		break;
+	case PORT_CLK_SEL_LCPLL_810:
+		id = DPLL_ID_LCPLL_810;
+		break;
+	case PORT_CLK_SEL_LCPLL_1350:
+		id = DPLL_ID_LCPLL_1350;
+		break;
+	case PORT_CLK_SEL_LCPLL_2700:
+		id = DPLL_ID_LCPLL_2700;
+		break;
+	default:
+		MISSING_CASE(pipe_config->ddi_pll_sel);
+		/* fall through */
+	case PORT_CLK_SEL_NONE:
+		return;
+	}
+
+	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+}
+
+static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
+				     struct intel_crtc_state *pipe_config,
+				     unsigned long *power_domain_mask)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum intel_display_power_domain power_domain;
+	u32 tmp;
+
+	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+
+	/*
+	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
+	 * consistency and less surprising code; it's in always on power).
+	 */
+	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+	if (tmp & TRANS_DDI_FUNC_ENABLE) {
+		enum pipe trans_edp_pipe;
+		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+		default:
+			WARN(1, "unknown pipe linked to edp transcoder\n");
+		case TRANS_DDI_EDP_INPUT_A_ONOFF:
+		case TRANS_DDI_EDP_INPUT_A_ON:
+			trans_edp_pipe = PIPE_A;
+			break;
+		case TRANS_DDI_EDP_INPUT_B_ONOFF:
+			trans_edp_pipe = PIPE_B;
+			break;
+		case TRANS_DDI_EDP_INPUT_C_ONOFF:
+			trans_edp_pipe = PIPE_C;
+			break;
+		}
+
+		if (trans_edp_pipe == crtc->pipe)
+			pipe_config->cpu_transcoder = TRANSCODER_EDP;
+	}
+
+	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
+	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+		return false;
+	*power_domain_mask |= BIT(power_domain);
+
+	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+
+	return tmp & PIPECONF_ENABLE;
+}
+
+static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
+					 struct intel_crtc_state *pipe_config,
+					 unsigned long *power_domain_mask)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum intel_display_power_domain power_domain;
+	enum port port;
+	enum transcoder cpu_transcoder;
+	u32 tmp;
+
+	pipe_config->has_dsi_encoder = false;
+
+	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
+		if (port == PORT_A)
+			cpu_transcoder = TRANSCODER_DSI_A;
+		else
+			cpu_transcoder = TRANSCODER_DSI_C;
+
+		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+		if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+			continue;
+		*power_domain_mask |= BIT(power_domain);
+
+		/*
+		 * The PLL needs to be enabled with a valid divider
+		 * configuration, otherwise accessing DSI registers will hang
+		 * the machine. See BSpec North Display Engine
+		 * registers/MIPI[BXT]. We can break out here early, since we
+		 * need the same DSI PLL to be enabled for both DSI ports.
+		 */
+		if (!intel_dsi_pll_is_enabled(dev_priv))
+			break;
+
+		/* XXX: this works for video mode only */
+		tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
+		if (!(tmp & DPI_ENABLE))
+			continue;
+
+		tmp = I915_READ(MIPI_CTRL(port));
+		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
+			continue;
+
+		pipe_config->cpu_transcoder = cpu_transcoder;
+		pipe_config->has_dsi_encoder = true;
 		break;
 	}
+
+	return pipe_config->has_dsi_encoder;
 }
 
 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
@@ -9909,11 +9959,10 @@
 	else
 		haswell_get_ddi_pll(dev_priv, port, pipe_config);
 
-	if (pipe_config->shared_dpll >= 0) {
-		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
-
-		WARN_ON(!pll->get_hw_state(dev_priv, pll,
-					   &pipe_config->dpll_hw_state));
+	pll = pipe_config->shared_dpll;
+	if (pll) {
+		WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
+						 &pipe_config->dpll_hw_state));
 	}
 
 	/*
@@ -9940,53 +9989,37 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum intel_display_power_domain power_domain;
 	unsigned long power_domain_mask;
-	uint32_t tmp;
-	bool ret;
+	bool active;
 
 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 		return false;
 	power_domain_mask = BIT(power_domain);
 
-	ret = false;
+	pipe_config->shared_dpll = NULL;
 
-	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
-	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
 
-	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
-	if (tmp & TRANS_DDI_FUNC_ENABLE) {
-		enum pipe trans_edp_pipe;
-		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
-		default:
-			WARN(1, "unknown pipe linked to edp transcoder\n");
-		case TRANS_DDI_EDP_INPUT_A_ONOFF:
-		case TRANS_DDI_EDP_INPUT_A_ON:
-			trans_edp_pipe = PIPE_A;
-			break;
-		case TRANS_DDI_EDP_INPUT_B_ONOFF:
-			trans_edp_pipe = PIPE_B;
-			break;
-		case TRANS_DDI_EDP_INPUT_C_ONOFF:
-			trans_edp_pipe = PIPE_C;
-			break;
-		}
-
-		if (trans_edp_pipe == crtc->pipe)
-			pipe_config->cpu_transcoder = TRANSCODER_EDP;
+	if (IS_BROXTON(dev_priv)) {
+		bxt_get_dsi_transcoder_state(crtc, pipe_config,
+					     &power_domain_mask);
+		WARN_ON(active && pipe_config->has_dsi_encoder);
+		if (pipe_config->has_dsi_encoder)
+			active = true;
 	}
 
-	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
-	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
-		goto out;
-	power_domain_mask |= BIT(power_domain);
-
-	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
-	if (!(tmp & PIPECONF_ENABLE))
+	if (!active)
 		goto out;
 
-	haswell_get_ddi_port_state(crtc, pipe_config);
+	if (!pipe_config->has_dsi_encoder) {
+		haswell_get_ddi_port_state(crtc, pipe_config);
+		intel_get_pipe_timings(crtc, pipe_config);
+	}
 
-	intel_get_pipe_timings(crtc, pipe_config);
+	intel_get_pipe_src_size(crtc, pipe_config);
+
+	pipe_config->gamma_mode =
+		I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
 
 	if (INTEL_INFO(dev)->gen >= 9) {
 		skl_init_scalers(dev, crtc, pipe_config);
@@ -10010,20 +10043,19 @@
 		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
 			(I915_READ(IPS_CTL) & IPS_ENABLE);
 
-	if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
+	if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
+	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
 		pipe_config->pixel_multiplier =
 			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
 	} else {
 		pipe_config->pixel_multiplier = 1;
 	}
 
-	ret = true;
-
 out:
 	for_each_power_domain(power_domain, power_domain_mask)
 		intel_display_power_put(dev_priv, power_domain);
 
-	return ret;
+	return active;
 }
 
 static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
@@ -10216,21 +10248,6 @@
 	return true;
 }
 
-static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
-				 u16 *blue, uint32_t start, uint32_t size)
-{
-	int end = (start + size > 256) ? 256 : start + size, i;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	for (i = start; i < end; i++) {
-		intel_crtc->lut_r[i] = red[i] >> 8;
-		intel_crtc->lut_g[i] = green[i] >> 8;
-		intel_crtc->lut_b[i] = blue[i] >> 8;
-	}
-
-	intel_crtc_load_lut(crtc);
-}
-
 /* VESA 640x480x72Hz mode to set on the pipe */
 static struct drm_display_mode load_detect_mode = {
 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
@@ -10718,19 +10735,18 @@
 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
 				   struct intel_crtc_state *pipe_config)
 {
-	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	/* read out port_clock from the DPLL */
 	i9xx_crtc_clock_get(crtc, pipe_config);
 
 	/*
-	 * This value does not include pixel_multiplier.
-	 * We will check that port_clock and adjusted_mode.crtc_clock
-	 * agree once we know their relationship in the encoder's
-	 * get_config() function.
+	 * In case there is an active pipe without active ports,
+	 * we may need some idea for the dotclock anyway.
+	 * Calculate one based on the FDI configuration.
 	 */
 	pipe_config->base.adjusted_mode.crtc_clock =
-		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
+		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
 					 &pipe_config->fdi_m_n);
 }
 
@@ -10849,7 +10865,7 @@
 	struct drm_plane *primary = crtc->base.primary;
 
 	mutex_lock(&dev->struct_mutex);
-	intel_unpin_fb_obj(work->old_fb, primary->state);
+	intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
 	drm_gem_object_unreference(&work->pending_flip_obj->base);
 
 	if (work->flip_queued_req)
@@ -10923,9 +10939,10 @@
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned reset_counter;
 
-	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
-	    crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+	reset_counter = i915_reset_counter(&dev_priv->gpu_error);
+	if (crtc->reset_counter != reset_counter)
 		return true;
 
 	/*
@@ -11003,7 +11020,7 @@
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11019,13 +11036,13 @@
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(engine, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
-	intel_ring_emit(ring, 0); /* aux display base address, unused */
+	intel_ring_emit(engine, fb->pitches[0]);
+	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+	intel_ring_emit(engine, 0); /* aux display base address, unused */
 
 	intel_mark_page_flip_active(intel_crtc->unpin_work);
 	return 0;
@@ -11038,7 +11055,7 @@
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11051,13 +11068,13 @@
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
-	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(engine, fb->pitches[0]);
+	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+	intel_ring_emit(engine, MI_NOOP);
 
 	intel_mark_page_flip_active(intel_crtc->unpin_work);
 	return 0;
@@ -11070,7 +11087,7 @@
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11084,10 +11101,10 @@
 	 * Display Registers (which do not change across a page-flip)
 	 * so we need only reprogram the base address.
 	 */
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+	intel_ring_emit(engine, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
+	intel_ring_emit(engine, fb->pitches[0]);
+	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
 			obj->tiling_mode);
 
 	/* XXX Enabling the panel-fitter across page-flip is so far
@@ -11096,7 +11113,7 @@
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	intel_ring_emit(engine, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc->unpin_work);
 	return 0;
@@ -11109,7 +11126,7 @@
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11119,10 +11136,10 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+	intel_ring_emit(engine, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
-	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+	intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
+	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
 
 	/* Contrary to the suggestions in the documentation,
 	 * "Enable Panel Fitter" does not seem to be required when page
@@ -11132,7 +11149,7 @@
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	intel_ring_emit(engine, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc->unpin_work);
 	return 0;
@@ -11145,7 +11162,7 @@
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t plane_bit = 0;
 	int len, ret;
@@ -11166,7 +11183,7 @@
 	}
 
 	len = 4;
-	if (ring->id == RCS) {
+	if (engine->id == RCS) {
 		len += 6;
 		/*
 		 * On Gen 8, SRM is now taking an extra dword to accommodate
@@ -11204,36 +11221,36 @@
 	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
 	 */
-	if (ring->id == RCS) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
-					DERRMR_PIPEB_PRI_FLIP_DONE |
-					DERRMR_PIPEC_PRI_FLIP_DONE));
+	if (engine->id == RCS) {
+		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(engine, DERRMR);
+		intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+					  DERRMR_PIPEB_PRI_FLIP_DONE |
+					  DERRMR_PIPEC_PRI_FLIP_DONE));
 		if (IS_GEN8(dev))
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
+			intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
 					      MI_SRM_LRM_GLOBAL_GTT);
 		else
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
+			intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
 					      MI_SRM_LRM_GLOBAL_GTT);
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+		intel_ring_emit_reg(engine, DERRMR);
+		intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
 		if (IS_GEN8(dev)) {
-			intel_ring_emit(ring, 0);
-			intel_ring_emit(ring, MI_NOOP);
+			intel_ring_emit(engine, 0);
+			intel_ring_emit(engine, MI_NOOP);
 		}
 	}
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
-	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
-	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
-	intel_ring_emit(ring, (MI_NOOP));
+	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
+	intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
+	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+	intel_ring_emit(engine, (MI_NOOP));
 
 	intel_mark_page_flip_active(intel_crtc->unpin_work);
 	return 0;
 }
 
-static bool use_mmio_flip(struct intel_engine_cs *ring,
+static bool use_mmio_flip(struct intel_engine_cs *engine,
 			  struct drm_i915_gem_object *obj)
 {
 	/*
@@ -11244,10 +11261,10 @@
 	 * So using MMIO flips there would disrupt this mechanism.
 	 */
 
-	if (ring == NULL)
+	if (engine == NULL)
 		return true;
 
-	if (INTEL_INFO(ring->dev)->gen < 5)
+	if (INTEL_INFO(engine->dev)->gen < 5)
 		return false;
 
 	if (i915.use_mmio_flip < 0)
@@ -11261,7 +11278,7 @@
 						       false))
 		return true;
 	else
-		return ring != i915_gem_request_get_ring(obj->last_write_req);
+		return engine != i915_gem_request_get_engine(obj->last_write_req);
 }
 
 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -11379,7 +11396,6 @@
 
 	if (mmio_flip->req) {
 		WARN_ON(__i915_wait_request(mmio_flip->req,
-					    mmio_flip->crtc->reset_counter,
 					    false, NULL,
 					    &mmio_flip->i915->rps.mmioflips));
 		i915_gem_request_unreference__unlocked(mmio_flip->req);
@@ -11507,7 +11523,7 @@
 	struct drm_plane *primary = crtc->primary;
 	enum pipe pipe = intel_crtc->pipe;
 	struct intel_unpin_work *work;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	bool mmio_flip;
 	struct drm_i915_gem_request *request = NULL;
 	int ret;
@@ -11587,28 +11603,33 @@
 	if (ret)
 		goto cleanup;
 
+	intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
+	if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
+		ret = -EIO;
+		goto cleanup;
+	}
+
 	atomic_inc(&intel_crtc->unpin_work_count);
-	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
 		work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
 
 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
-		ring = &dev_priv->ring[BCS];
+		engine = &dev_priv->engine[BCS];
 		if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
 			/* vlv: DISPLAY_FLIP fails to change tiling */
-			ring = NULL;
+			engine = NULL;
 	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
-		ring = &dev_priv->ring[BCS];
+		engine = &dev_priv->engine[BCS];
 	} else if (INTEL_INFO(dev)->gen >= 7) {
-		ring = i915_gem_request_get_ring(obj->last_write_req);
-		if (ring == NULL || ring->id != RCS)
-			ring = &dev_priv->ring[BCS];
+		engine = i915_gem_request_get_engine(obj->last_write_req);
+		if (engine == NULL || engine->id != RCS)
+			engine = &dev_priv->engine[BCS];
 	} else {
-		ring = &dev_priv->ring[RCS];
+		engine = &dev_priv->engine[RCS];
 	}
 
-	mmio_flip = use_mmio_flip(ring, obj);
+	mmio_flip = use_mmio_flip(engine, obj);
 
 	/* When using CS flips, we want to emit semaphores between rings.
 	 * However, when using mmio flips we will create a task to do the
@@ -11616,13 +11637,12 @@
 	 * into the display plane and skip any waits.
 	 */
 	if (!mmio_flip) {
-		ret = i915_gem_object_sync(obj, ring, &request);
+		ret = i915_gem_object_sync(obj, engine, &request);
 		if (ret)
 			goto cleanup_pending;
 	}
 
-	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
-					 crtc->primary->state);
+	ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
 	if (ret)
 		goto cleanup_pending;
 
@@ -11639,7 +11659,7 @@
 					obj->last_write_req);
 	} else {
 		if (!request) {
-			request = i915_gem_request_alloc(ring, NULL);
+			request = i915_gem_request_alloc(engine, NULL);
 			if (IS_ERR(request)) {
 				ret = PTR_ERR(request);
 				goto cleanup_unpin;
@@ -11672,10 +11692,10 @@
 	return 0;
 
 cleanup_unpin:
-	intel_unpin_fb_obj(fb, crtc->primary->state);
+	intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
 cleanup_pending:
 	if (!IS_ERR_OR_NULL(request))
-		i915_gem_request_cancel(request);
+		i915_add_request_no_flush(request);
 	atomic_dec(&intel_crtc->unpin_work_count);
 	mutex_unlock(&dev->struct_mutex);
 cleanup:
@@ -11725,7 +11745,7 @@
 
 		if (ret == 0 && event) {
 			spin_lock_irq(&dev->event_lock);
-			drm_send_vblank_event(dev, pipe, event);
+			drm_crtc_send_vblank_event(crtc, event);
 			spin_unlock_irq(&dev->event_lock);
 		}
 	}
@@ -11785,6 +11805,7 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct drm_plane *plane = plane_state->plane;
 	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_plane_state *old_plane_state =
 		to_intel_plane_state(plane->state);
 	int idx = intel_crtc->base.base.id, ret;
@@ -11833,42 +11854,43 @@
 			 plane->base.id, was_visible, visible,
 			 turn_off, turn_on, mode_changed);
 
-	if (turn_on || turn_off) {
-		pipe_config->wm_changed = true;
+	if (turn_on) {
+		pipe_config->update_wm_pre = true;
+
+		/* must disable cxsr around plane enable/disable */
+		if (plane->type != DRM_PLANE_TYPE_CURSOR)
+			pipe_config->disable_cxsr = true;
+	} else if (turn_off) {
+		pipe_config->update_wm_post = true;
 
 		/* must disable cxsr around plane enable/disable */
 		if (plane->type != DRM_PLANE_TYPE_CURSOR)
 			pipe_config->disable_cxsr = true;
 	} else if (intel_wm_need_update(plane, plane_state)) {
-		pipe_config->wm_changed = true;
+		/* FIXME bollocks */
+		pipe_config->update_wm_pre = true;
+		pipe_config->update_wm_post = true;
 	}
 
+	/* Pre-gen9 platforms need two-step watermark updates */
+	if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
+	    INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
+		to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
+
 	if (visible || was_visible)
-		intel_crtc->atomic.fb_bits |=
-			to_intel_plane(plane)->frontbuffer_bit;
+		pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
 
-	switch (plane->type) {
-	case DRM_PLANE_TYPE_PRIMARY:
-		intel_crtc->atomic.post_enable_primary = turn_on;
-		intel_crtc->atomic.update_fbc = true;
+	/*
+	 * WaCxSRDisabledForSpriteScaling:ivb
+	 *
+	 * cstate->update_wm was already set above, so this flag will
+	 * take effect when we commit and program watermarks.
+	 */
+	if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
+	    needs_scaling(to_intel_plane_state(plane_state)) &&
+	    !needs_scaling(old_plane_state))
+		pipe_config->disable_lp_wm = true;
 
-		break;
-	case DRM_PLANE_TYPE_CURSOR:
-		break;
-	case DRM_PLANE_TYPE_OVERLAY:
-		/*
-		 * WaCxSRDisabledForSpriteScaling:ivb
-		 *
-		 * cstate->update_wm was already set above, so this flag will
-		 * take effect when we commit and program watermarks.
-		 */
-		if (IS_IVYBRIDGE(dev) &&
-		    needs_scaling(to_intel_plane_state(plane_state)) &&
-		    !needs_scaling(old_plane_state))
-			pipe_config->disable_lp_wm = true;
-
-		break;
-	}
 	return 0;
 }
 
@@ -11940,24 +11962,54 @@
 	}
 
 	if (mode_changed && !crtc_state->active)
-		pipe_config->wm_changed = true;
+		pipe_config->update_wm_post = true;
 
 	if (mode_changed && crtc_state->enable &&
 	    dev_priv->display.crtc_compute_clock &&
-	    !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
+	    !WARN_ON(pipe_config->shared_dpll)) {
 		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
 							   pipe_config);
 		if (ret)
 			return ret;
 	}
 
-	ret = 0;
-	if (dev_priv->display.compute_pipe_wm) {
-		ret = dev_priv->display.compute_pipe_wm(intel_crtc, state);
+	if (crtc_state->color_mgmt_changed) {
+		ret = intel_color_check(crtc, crtc_state);
 		if (ret)
 			return ret;
 	}
 
+	ret = 0;
+	if (dev_priv->display.compute_pipe_wm) {
+		ret = dev_priv->display.compute_pipe_wm(pipe_config);
+		if (ret) {
+			DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
+			return ret;
+		}
+	}
+
+	if (dev_priv->display.compute_intermediate_wm &&
+	    !to_intel_atomic_state(state)->skip_intermediate_wm) {
+		if (WARN_ON(!dev_priv->display.compute_pipe_wm))
+			return 0;
+
+		/*
+		 * Calculate 'intermediate' watermarks that satisfy both the
+		 * old state and the new state.  We can program these
+		 * immediately.
+		 */
+		ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
+								intel_crtc,
+								pipe_config);
+		if (ret) {
+			DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
+			return ret;
+		}
+	} else if (dev_priv->display.compute_intermediate_wm) {
+		if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
+			pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk;
+	}
+
 	if (INTEL_INFO(dev)->gen >= 9) {
 		if (mode_changed)
 			ret = skl_update_scaler_crtc(pipe_config);
@@ -11972,7 +12024,6 @@
 
 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
-	.load_lut = intel_crtc_load_lut,
 	.atomic_begin = intel_begin_crtc_commit,
 	.atomic_flush = intel_finish_crtc_commit,
 	.atomic_check = intel_crtc_atomic_check,
@@ -11983,11 +12034,16 @@
 	struct intel_connector *connector;
 
 	for_each_intel_connector(dev, connector) {
+		if (connector->base.state->crtc)
+			drm_connector_unreference(&connector->base);
+
 		if (connector->base.encoder) {
 			connector->base.state->best_encoder =
 				connector->base.encoder;
 			connector->base.state->crtc =
 				connector->base.encoder->crtc;
+
+			drm_connector_reference(&connector->base);
 		} else {
 			connector->base.state->best_encoder = NULL;
 			connector->base.state->crtc = NULL;
@@ -12089,7 +12145,7 @@
 	DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
 		      context, pipe_config, pipe_name(crtc->pipe));
 
-	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
+	DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
 	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
 		      pipe_config->pipe_bpp, pipe_config->dither);
 	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
@@ -12165,7 +12221,7 @@
 			      pipe_config->dpll_hw_state.cfgcr1,
 			      pipe_config->dpll_hw_state.cfgcr2);
 	} else if (HAS_DDI(dev)) {
-		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
+		DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
 			      pipe_config->ddi_pll_sel,
 			      pipe_config->dpll_hw_state.wrpll,
 			      pipe_config->dpll_hw_state.spll);
@@ -12268,7 +12324,7 @@
 	struct drm_crtc_state tmp_state;
 	struct intel_crtc_scaler_state scaler_state;
 	struct intel_dpll_hw_state dpll_hw_state;
-	enum intel_dpll_id shared_dpll;
+	struct intel_shared_dpll *shared_dpll;
 	uint32_t ddi_pll_sel;
 	bool force_thru;
 
@@ -12538,6 +12594,15 @@
 		ret = false; \
 	}
 
+#define PIPE_CONF_CHECK_P(name)	\
+	if (current_config->name != pipe_config->name) { \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+			  "(expected %p, found %p)\n", \
+			  current_config->name, \
+			  pipe_config->name); \
+		ret = false; \
+	}
+
 #define PIPE_CONF_CHECK_M_N(name) \
 	if (!intel_compare_link_m_n(&current_config->name, \
 				    &pipe_config->name,\
@@ -12558,6 +12623,11 @@
 		ret = false; \
 	}
 
+/* This is required for BDW+ where there is only one set of registers for
+ * switching between high and low RR.
+ * This macro can be used whenever a comparison has to be made between one
+ * hw state and multiple sw state variables.
+ */
 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
 	if (!intel_compare_link_m_n(&current_config->name, \
 				    &pipe_config->name, adjust) && \
@@ -12585,22 +12655,6 @@
 		ret = false; \
 	}
 
-/* This is required for BDW+ where there is only one set of registers for
- * switching between high and low RR.
- * This macro can be used whenever a comparison has to be made between one
- * hw state and multiple sw state variables.
- */
-#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
-	if ((current_config->name != pipe_config->name) && \
-		(current_config->alt_name != pipe_config->name)) { \
-			INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
-				  "(expected %i or %i, found %i)\n", \
-				  current_config->name, \
-				  current_config->alt_name, \
-				  pipe_config->name); \
-			ret = false; \
-	}
-
 #define PIPE_CONF_CHECK_FLAGS(name, mask)	\
 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
@@ -12681,7 +12735,7 @@
 	PIPE_CONF_CHECK_X(gmch_pfit.control);
 	/* pfit ratios are autocomputed by the hw on gen4+ */
 	if (INTEL_INFO(dev)->gen < 4)
-		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
 
 	if (!adjust) {
@@ -12705,7 +12759,7 @@
 
 	PIPE_CONF_CHECK_X(ddi_pll_sel);
 
-	PIPE_CONF_CHECK_I(shared_dpll);
+	PIPE_CONF_CHECK_P(shared_dpll);
 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
 	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
@@ -12716,6 +12770,9 @@
 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
 
+	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
+	PIPE_CONF_CHECK_X(dsi_pll.div);
+
 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
 		PIPE_CONF_CHECK_I(pipe_bpp);
 
@@ -12724,7 +12781,7 @@
 
 #undef PIPE_CONF_CHECK_X
 #undef PIPE_CONF_CHECK_I
-#undef PIPE_CONF_CHECK_I_ALT
+#undef PIPE_CONF_CHECK_P
 #undef PIPE_CONF_CHECK_FLAGS
 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
 #undef PIPE_CONF_QUIRK
@@ -12733,48 +12790,61 @@
 	return ret;
 }
 
-static void check_wm_state(struct drm_device *dev)
+static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
+					   const struct intel_crtc_state *pipe_config)
 {
+	if (pipe_config->has_pch_encoder) {
+		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
+							    &pipe_config->fdi_m_n);
+		int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
+
+		/*
+		 * FDI already provided one idea for the dotclock.
+		 * Yell if the encoder disagrees.
+		 */
+		WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
+		     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+		     fdi_dotclock, dotclock);
+	}
+}
+
+static void verify_wm_state(struct drm_crtc *crtc,
+			    struct drm_crtc_state *new_state)
+{
+	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct skl_ddb_allocation hw_ddb, *sw_ddb;
-	struct intel_crtc *intel_crtc;
+	struct skl_ddb_entry *hw_entry, *sw_entry;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	const enum pipe pipe = intel_crtc->pipe;
 	int plane;
 
-	if (INTEL_INFO(dev)->gen < 9)
+	if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
 		return;
 
 	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
 
-	for_each_intel_crtc(dev, intel_crtc) {
-		struct skl_ddb_entry *hw_entry, *sw_entry;
-		const enum pipe pipe = intel_crtc->pipe;
-
-		if (!intel_crtc->active)
-			continue;
-
-		/* planes */
-		for_each_plane(dev_priv, pipe, plane) {
-			hw_entry = &hw_ddb.plane[pipe][plane];
-			sw_entry = &sw_ddb->plane[pipe][plane];
-
-			if (skl_ddb_entry_equal(hw_entry, sw_entry))
-				continue;
-
-			DRM_ERROR("mismatch in DDB state pipe %c plane %d "
-				  "(expected (%u,%u), found (%u,%u))\n",
-				  pipe_name(pipe), plane + 1,
-				  sw_entry->start, sw_entry->end,
-				  hw_entry->start, hw_entry->end);
-		}
-
-		/* cursor */
-		hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
-		sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+	/* planes */
+	for_each_plane(dev_priv, pipe, plane) {
+		hw_entry = &hw_ddb.plane[pipe][plane];
+		sw_entry = &sw_ddb->plane[pipe][plane];
 
 		if (skl_ddb_entry_equal(hw_entry, sw_entry))
 			continue;
 
+		DRM_ERROR("mismatch in DDB state pipe %c plane %d "
+			  "(expected (%u,%u), found (%u,%u))\n",
+			  pipe_name(pipe), plane + 1,
+			  sw_entry->start, sw_entry->end,
+			  hw_entry->start, hw_entry->end);
+	}
+
+	/* cursor */
+	hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+	sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+
+	if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
 		DRM_ERROR("mismatch in DDB state pipe %c cursor "
 			  "(expected (%u,%u), found (%u,%u))\n",
 			  pipe_name(pipe),
@@ -12784,20 +12854,18 @@
 }
 
 static void
-check_connector_state(struct drm_device *dev,
-		      struct drm_atomic_state *old_state)
+verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
 {
-	struct drm_connector_state *old_conn_state;
 	struct drm_connector *connector;
-	int i;
 
-	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+	drm_for_each_connector(connector, dev) {
 		struct drm_encoder *encoder = connector->encoder;
 		struct drm_connector_state *state = connector->state;
 
-		/* This also checks the encoder/connector hw state with the
-		 * ->get_hw_state callbacks. */
-		intel_connector_check_state(to_intel_connector(connector));
+		if (state->crtc != crtc)
+			continue;
+
+		intel_connector_verify_state(to_intel_connector(connector));
 
 		I915_STATE_WARN(state->best_encoder != encoder,
 		     "connector's atomic encoder doesn't match legacy encoder\n");
@@ -12805,7 +12873,7 @@
 }
 
 static void
-check_encoder_state(struct drm_device *dev)
+verify_encoder_state(struct drm_device *dev)
 {
 	struct intel_encoder *encoder;
 	struct intel_connector *connector;
@@ -12845,149 +12913,186 @@
 }
 
 static void
-check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
+verify_crtc_state(struct drm_crtc *crtc,
+		  struct drm_crtc_state *old_crtc_state,
+		  struct drm_crtc_state *new_crtc_state)
 {
+	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_encoder *encoder;
-	struct drm_crtc_state *old_crtc_state;
-	struct drm_crtc *crtc;
-	int i;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc_state *pipe_config, *sw_config;
+	struct drm_atomic_state *old_state;
+	bool active;
 
-	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
-		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-		struct intel_crtc_state *pipe_config, *sw_config;
-		bool active;
+	old_state = old_crtc_state->state;
+	__drm_atomic_helper_crtc_destroy_state(old_crtc_state);
+	pipe_config = to_intel_crtc_state(old_crtc_state);
+	memset(pipe_config, 0, sizeof(*pipe_config));
+	pipe_config->base.crtc = crtc;
+	pipe_config->base.state = old_state;
 
-		if (!needs_modeset(crtc->state) &&
-		    !to_intel_crtc_state(crtc->state)->update_pipe)
-			continue;
+	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
-		__drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
-		pipe_config = to_intel_crtc_state(old_crtc_state);
-		memset(pipe_config, 0, sizeof(*pipe_config));
-		pipe_config->base.crtc = crtc;
-		pipe_config->base.state = old_state;
+	active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
 
-		DRM_DEBUG_KMS("[CRTC:%d]\n",
-			      crtc->base.id);
+	/* hw state is inconsistent with the pipe quirk */
+	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
+	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
+		active = new_crtc_state->active;
 
-		active = dev_priv->display.get_pipe_config(intel_crtc,
-							   pipe_config);
+	I915_STATE_WARN(new_crtc_state->active != active,
+	     "crtc active state doesn't match with hw state "
+	     "(expected %i, found %i)\n", new_crtc_state->active, active);
 
-		/* hw state is inconsistent with the pipe quirk */
-		if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
-		    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
-			active = crtc->state->active;
+	I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
+	     "transitional active state does not match atomic hw state "
+	     "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
 
-		I915_STATE_WARN(crtc->state->active != active,
-		     "crtc active state doesn't match with hw state "
-		     "(expected %i, found %i)\n", crtc->state->active, active);
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
+		enum pipe pipe;
 
-		I915_STATE_WARN(intel_crtc->active != crtc->state->active,
-		     "transitional active state does not match atomic hw state "
-		     "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
+		active = encoder->get_hw_state(encoder, &pipe);
+		I915_STATE_WARN(active != new_crtc_state->active,
+			"[ENCODER:%i] active %i with crtc active %i\n",
+			encoder->base.base.id, active, new_crtc_state->active);
 
-		for_each_encoder_on_crtc(dev, crtc, encoder) {
-			enum pipe pipe;
+		I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+				"Encoder connected to wrong pipe %c\n",
+				pipe_name(pipe));
 
-			active = encoder->get_hw_state(encoder, &pipe);
-			I915_STATE_WARN(active != crtc->state->active,
-				"[ENCODER:%i] active %i with crtc active %i\n",
-				encoder->base.base.id, active, crtc->state->active);
+		if (active)
+			encoder->get_config(encoder, pipe_config);
+	}
 
-			I915_STATE_WARN(active && intel_crtc->pipe != pipe,
-					"Encoder connected to wrong pipe %c\n",
-					pipe_name(pipe));
+	if (!new_crtc_state->active)
+		return;
 
-			if (active)
-				encoder->get_config(encoder, pipe_config);
-		}
+	intel_pipe_config_sanity_check(dev_priv, pipe_config);
 
-		if (!crtc->state->active)
-			continue;
-
-		sw_config = to_intel_crtc_state(crtc->state);
-		if (!intel_pipe_config_compare(dev, sw_config,
-					       pipe_config, false)) {
-			I915_STATE_WARN(1, "pipe state doesn't match!\n");
-			intel_dump_pipe_config(intel_crtc, pipe_config,
-					       "[hw state]");
-			intel_dump_pipe_config(intel_crtc, sw_config,
-					       "[sw state]");
-		}
+	sw_config = to_intel_crtc_state(crtc->state);
+	if (!intel_pipe_config_compare(dev, sw_config,
+				       pipe_config, false)) {
+		I915_STATE_WARN(1, "pipe state doesn't match!\n");
+		intel_dump_pipe_config(intel_crtc, pipe_config,
+				       "[hw state]");
+		intel_dump_pipe_config(intel_crtc, sw_config,
+				       "[sw state]");
 	}
 }
 
 static void
-check_shared_dpll_state(struct drm_device *dev)
+verify_single_dpll_state(struct drm_i915_private *dev_priv,
+			 struct intel_shared_dpll *pll,
+			 struct drm_crtc *crtc,
+			 struct drm_crtc_state *new_state)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *crtc;
 	struct intel_dpll_hw_state dpll_hw_state;
-	int i;
+	unsigned crtc_mask;
+	bool active;
 
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-		int enabled_crtcs = 0, active_crtcs = 0;
-		bool active;
+	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
 
-		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
+	DRM_DEBUG_KMS("%s\n", pll->name);
 
-		DRM_DEBUG_KMS("%s\n", pll->name);
+	active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
 
-		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
-
-		I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
-		     "more active pll users than references: %i vs %i\n",
-		     pll->active, hweight32(pll->config.crtc_mask));
-		I915_STATE_WARN(pll->active && !pll->on,
+	if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
+		I915_STATE_WARN(!pll->on && pll->active_mask,
 		     "pll in active use but not on in sw tracking\n");
-		I915_STATE_WARN(pll->on && !pll->active,
-		     "pll in on but not on in use in sw tracking\n");
+		I915_STATE_WARN(pll->on && !pll->active_mask,
+		     "pll is on but not used by any active crtc\n");
 		I915_STATE_WARN(pll->on != active,
 		     "pll on state mismatch (expected %i, found %i)\n",
 		     pll->on, active);
+	}
 
-		for_each_intel_crtc(dev, crtc) {
-			if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
-				enabled_crtcs++;
-			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
-				active_crtcs++;
-		}
-		I915_STATE_WARN(pll->active != active_crtcs,
-		     "pll active crtcs mismatch (expected %i, found %i)\n",
-		     pll->active, active_crtcs);
-		I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
-		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
-		     hweight32(pll->config.crtc_mask), enabled_crtcs);
+	if (!crtc) {
+		I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
+				"more active pll users than references: %x vs %x\n",
+				pll->active_mask, pll->config.crtc_mask);
 
-		I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
-				       sizeof(dpll_hw_state)),
-		     "pll hw state mismatch\n");
+		return;
+	}
+
+	crtc_mask = 1 << drm_crtc_index(crtc);
+
+	if (new_state->active)
+		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
+				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
+				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+	else
+		I915_STATE_WARN(pll->active_mask & crtc_mask,
+				"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
+				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+
+	I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
+			"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
+			crtc_mask, pll->config.crtc_mask);
+
+	I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
+					  &dpll_hw_state,
+					  sizeof(dpll_hw_state)),
+			"pll hw state mismatch\n");
+}
+
+static void
+verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
+			 struct drm_crtc_state *old_crtc_state,
+			 struct drm_crtc_state *new_crtc_state)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
+	struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
+
+	if (new_state->shared_dpll)
+		verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
+
+	if (old_state->shared_dpll &&
+	    old_state->shared_dpll != new_state->shared_dpll) {
+		unsigned crtc_mask = 1 << drm_crtc_index(crtc);
+		struct intel_shared_dpll *pll = old_state->shared_dpll;
+
+		I915_STATE_WARN(pll->active_mask & crtc_mask,
+				"pll active mismatch (didn't expect pipe %c in active mask)\n",
+				pipe_name(drm_crtc_index(crtc)));
+		I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
+				"pll enabled crtcs mismatch (found %x in enabled mask)\n",
+				pipe_name(drm_crtc_index(crtc)));
 	}
 }
 
 static void
-intel_modeset_check_state(struct drm_device *dev,
-			  struct drm_atomic_state *old_state)
+intel_modeset_verify_crtc(struct drm_crtc *crtc,
+			 struct drm_crtc_state *old_state,
+			 struct drm_crtc_state *new_state)
 {
-	check_wm_state(dev);
-	check_connector_state(dev, old_state);
-	check_encoder_state(dev);
-	check_crtc_state(dev, old_state);
-	check_shared_dpll_state(dev);
+	if (!needs_modeset(new_state) &&
+	    !to_intel_crtc_state(new_state)->update_pipe)
+		return;
+
+	verify_wm_state(crtc, new_state);
+	verify_connector_state(crtc->dev, crtc);
+	verify_crtc_state(crtc, old_state, new_state);
+	verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
 }
 
-void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
-				     int dotclock)
+static void
+verify_disabled_dpll_state(struct drm_device *dev)
 {
-	/*
-	 * FDI already provided one idea for the dotclock.
-	 * Yell if the encoder disagrees.
-	 */
-	WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
-	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
-	     pipe_config->base.adjusted_mode.crtc_clock, dotclock);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < dev_priv->num_shared_dpll; i++)
+		verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
+}
+
+static void
+intel_modeset_verify_disabled(struct drm_device *dev)
+{
+	verify_encoder_state(dev);
+	verify_connector_state(dev, NULL);
+	verify_disabled_dpll_state(dev);
 }
 
 static void update_scanline_offset(struct intel_crtc *crtc)
@@ -13042,20 +13147,21 @@
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-		int old_dpll = to_intel_crtc_state(crtc->state)->shared_dpll;
+		struct intel_shared_dpll *old_dpll =
+			to_intel_crtc_state(crtc->state)->shared_dpll;
 
 		if (!needs_modeset(crtc_state))
 			continue;
 
-		to_intel_crtc_state(crtc_state)->shared_dpll = DPLL_ID_PRIVATE;
+		to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
 
-		if (old_dpll == DPLL_ID_PRIVATE)
+		if (!old_dpll)
 			continue;
 
 		if (!shared_dpll)
 			shared_dpll = intel_atomic_get_shared_dpll_state(state);
 
-		shared_dpll[old_dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
+		intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
 	}
 }
 
@@ -13267,9 +13373,6 @@
 		struct intel_crtc_state *pipe_config =
 			to_intel_crtc_state(crtc_state);
 
-		memset(&to_intel_crtc(crtc)->atomic, 0,
-		       sizeof(struct intel_crtc_atomic_commit));
-
 		/* Catch I915_MODE_FLAG_INHERITED */
 		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
 			crtc_state->mode_changed = true;
@@ -13335,7 +13438,7 @@
 
 static int intel_atomic_prepare_commit(struct drm_device *dev,
 				       struct drm_atomic_state *state,
-				       bool async)
+				       bool nonblock)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_plane_state *plane_state;
@@ -13344,8 +13447,8 @@
 	struct drm_crtc *crtc;
 	int i, ret;
 
-	if (async) {
-		DRM_DEBUG_KMS("i915 does not yet support async commit\n");
+	if (nonblock) {
+		DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
 		return -EINVAL;
 	}
 
@@ -13366,12 +13469,9 @@
 		return ret;
 
 	ret = drm_atomic_helper_prepare_planes(dev, state);
-	if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
-		u32 reset_counter;
+	mutex_unlock(&dev->struct_mutex);
 
-		reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
-		mutex_unlock(&dev->struct_mutex);
-
+	if (!ret && !nonblock) {
 		for_each_plane_in_state(state, plane, plane_state, i) {
 			struct intel_plane_state *intel_plane_state =
 				to_intel_plane_state(plane_state);
@@ -13380,25 +13480,18 @@
 				continue;
 
 			ret = __i915_wait_request(intel_plane_state->wait_req,
-						  reset_counter, true,
-						  NULL, NULL);
-
-			/* Swallow -EIO errors to allow updates during hw lockup. */
-			if (ret == -EIO)
-				ret = 0;
-
-			if (ret)
+						  true, NULL, NULL);
+			if (ret) {
+				/* Any hang should be swallowed by the wait */
+				WARN_ON(ret == -EIO);
+				mutex_lock(&dev->struct_mutex);
+				drm_atomic_helper_cleanup_planes(dev, state);
+				mutex_unlock(&dev->struct_mutex);
 				break;
+			}
 		}
-
-		if (!ret)
-			return 0;
-
-		mutex_lock(&dev->struct_mutex);
-		drm_atomic_helper_cleanup_planes(dev, state);
 	}
 
-	mutex_unlock(&dev->struct_mutex);
 	return ret;
 }
 
@@ -13440,7 +13533,7 @@
 					drm_crtc_vblank_count(crtc),
 				msecs_to_jiffies(50));
 
-		WARN_ON(!lret);
+		WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
 
 		drm_crtc_vblank_put(crtc);
 	}
@@ -13453,12 +13546,12 @@
 		return true;
 
 	/* wm changes, need vblank before final wm's */
-	if (crtc_state->wm_changed)
+	if (crtc_state->update_wm_post)
 		return true;
 
 	/*
 	 * cxsr is re-enabled after vblank.
-	 * This is already handled by crtc_state->wm_changed,
+	 * This is already handled by crtc_state->update_wm_post,
 	 * but added for clarity.
 	 */
 	if (crtc_state->disable_cxsr)
@@ -13471,39 +13564,41 @@
  * intel_atomic_commit - commit validated state object
  * @dev: DRM device
  * @state: the top-level driver state object
- * @async: asynchronous commit
+ * @nonblock: nonblocking commit
  *
  * This function commits a top-level state object that has been validated
  * with drm_atomic_helper_check().
  *
  * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
  * we can only handle plane-related operations and do not yet support
- * asynchronous commit.
+ * nonblocking commit.
  *
  * RETURNS
  * Zero for success or -errno.
  */
 static int intel_atomic_commit(struct drm_device *dev,
 			       struct drm_atomic_state *state,
-			       bool async)
+			       bool nonblock)
 {
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc_state *crtc_state;
+	struct drm_crtc_state *old_crtc_state;
 	struct drm_crtc *crtc;
+	struct intel_crtc_state *intel_cstate;
 	int ret = 0, i;
 	bool hw_check = intel_state->modeset;
 	unsigned long put_domains[I915_MAX_PIPES] = {};
 	unsigned crtc_vblank_mask = 0;
 
-	ret = intel_atomic_prepare_commit(dev, state, async);
+	ret = intel_atomic_prepare_commit(dev, state, nonblock);
 	if (ret) {
 		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
 		return ret;
 	}
 
 	drm_atomic_helper_swap_state(dev, state);
-	dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
+	dev_priv->wm.config = intel_state->wm_config;
+	intel_shared_dpll_commit(state);
 
 	if (intel_state->modeset) {
 		memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
@@ -13514,7 +13609,7 @@
 		intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
 	}
 
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
 		if (needs_modeset(crtc->state) ||
@@ -13529,10 +13624,10 @@
 		if (!needs_modeset(crtc->state))
 			continue;
 
-		intel_pre_plane_update(to_intel_crtc_state(crtc_state));
+		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
 
-		if (crtc_state->active) {
-			intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
+		if (old_crtc_state->active) {
+			intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
 			dev_priv->display.crtc_disable(crtc);
 			intel_crtc->active = false;
 			intel_fbc_disable(intel_crtc);
@@ -13555,17 +13650,17 @@
 	intel_modeset_update_crtc_state(state);
 
 	if (intel_state->modeset) {
-		intel_shared_dpll_commit(state);
-
 		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
 
 		if (dev_priv->display.modeset_commit_cdclk &&
 		    intel_state->dev_cdclk != dev_priv->cdclk_freq)
 			dev_priv->display.modeset_commit_cdclk(state);
+
+		intel_modeset_verify_disabled(dev);
 	}
 
 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 		bool modeset = needs_modeset(crtc->state);
 		struct intel_crtc_state *pipe_config =
@@ -13578,14 +13673,15 @@
 		}
 
 		if (!modeset)
-			intel_pre_plane_update(to_intel_crtc_state(crtc_state));
+			intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
 
-		if (crtc->state->active && intel_crtc->atomic.update_fbc)
+		if (crtc->state->active &&
+		    drm_atomic_get_existing_plane_state(state, crtc->primary))
 			intel_fbc_enable(intel_crtc);
 
 		if (crtc->state->active &&
 		    (crtc->state->planes_changed || update_pipe))
-			drm_atomic_helper_commit_planes_on_crtc(crtc_state);
+			drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
 
 		if (pipe_config->base.active && needs_vblank_wait(pipe_config))
 			crtc_vblank_mask |= 1 << i;
@@ -13596,11 +13692,27 @@
 	if (!state->legacy_cursor_update)
 		intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
 
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		intel_post_plane_update(to_intel_crtc(crtc));
+	/*
+	 * Now that the vblank has passed, we can go ahead and program the
+	 * optimal watermarks on platforms that need two-step watermark
+	 * programming.
+	 *
+	 * TODO: Move this (and other cleanup) to an async worker eventually.
+	 */
+	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+		intel_cstate = to_intel_crtc_state(crtc->state);
+
+		if (dev_priv->display.optimize_watermarks)
+			dev_priv->display.optimize_watermarks(intel_cstate);
+	}
+
+	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+		intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
 
 		if (put_domains[i])
 			modeset_put_power_domains(dev_priv, put_domains[i]);
+
+		intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
 	}
 
 	if (intel_state->modeset)
@@ -13610,9 +13722,6 @@
 	drm_atomic_helper_cleanup_planes(dev, state);
 	mutex_unlock(&dev->struct_mutex);
 
-	if (hw_check)
-		intel_modeset_check_state(dev, state);
-
 	drm_atomic_state_free(state);
 
 	/* As one of the primary mmio accessors, KMS has a high likelihood
@@ -13672,116 +13781,15 @@
 #undef for_each_intel_crtc_masked
 
 static const struct drm_crtc_funcs intel_crtc_funcs = {
-	.gamma_set = intel_crtc_gamma_set,
+	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 	.set_config = drm_atomic_helper_set_config,
+	.set_property = drm_atomic_helper_crtc_set_property,
 	.destroy = intel_crtc_destroy,
 	.page_flip = intel_crtc_page_flip,
 	.atomic_duplicate_state = intel_crtc_duplicate_state,
 	.atomic_destroy_state = intel_crtc_destroy_state,
 };
 
-static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
-				      struct intel_shared_dpll *pll,
-				      struct intel_dpll_hw_state *hw_state)
-{
-	uint32_t val;
-
-	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
-		return false;
-
-	val = I915_READ(PCH_DPLL(pll->id));
-	hw_state->dpll = val;
-	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
-	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
-
-	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
-
-	return val & DPLL_VCO_ENABLE;
-}
-
-static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
-				  struct intel_shared_dpll *pll)
-{
-	I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
-	I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
-}
-
-static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
-				struct intel_shared_dpll *pll)
-{
-	/* PCH refclock must be enabled first */
-	ibx_assert_pch_refclk_enabled(dev_priv);
-
-	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
-
-	/* Wait for the clocks to stabilize. */
-	POSTING_READ(PCH_DPLL(pll->id));
-	udelay(150);
-
-	/* The pixel multiplier can only be updated once the
-	 * DPLL is enabled and the clocks are stable.
-	 *
-	 * So write it again.
-	 */
-	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
-	POSTING_READ(PCH_DPLL(pll->id));
-	udelay(200);
-}
-
-static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
-				 struct intel_shared_dpll *pll)
-{
-	struct drm_device *dev = dev_priv->dev;
-	struct intel_crtc *crtc;
-
-	/* Make sure no transcoder isn't still depending on us. */
-	for_each_intel_crtc(dev, crtc) {
-		if (intel_crtc_to_shared_dpll(crtc) == pll)
-			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
-	}
-
-	I915_WRITE(PCH_DPLL(pll->id), 0);
-	POSTING_READ(PCH_DPLL(pll->id));
-	udelay(200);
-}
-
-static char *ibx_pch_dpll_names[] = {
-	"PCH DPLL A",
-	"PCH DPLL B",
-};
-
-static void ibx_pch_dpll_init(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
-
-	dev_priv->num_shared_dpll = 2;
-
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		dev_priv->shared_dplls[i].id = i;
-		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
-		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
-		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
-		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
-		dev_priv->shared_dplls[i].get_hw_state =
-			ibx_pch_dpll_get_hw_state;
-	}
-}
-
-static void intel_shared_dpll_init(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (HAS_DDI(dev))
-		intel_ddi_pll_init(dev);
-	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-		ibx_pch_dpll_init(dev);
-	else
-		dev_priv->num_shared_dpll = 0;
-
-	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
-}
-
 /**
  * intel_prepare_plane_fb - Prepare fb for usage on plane
  * @plane: drm plane to prepare for
@@ -13827,10 +13835,11 @@
 		 */
 		if (needs_modeset(crtc_state))
 			ret = i915_gem_object_wait_rendering(old_obj, true);
-
-		/* Swallow -EIO errors to allow updates during hw lockup. */
-		if (ret && ret != -EIO)
+		if (ret) {
+			/* GPU hangs should have been swallowed by the wait */
+			WARN_ON(ret == -EIO);
 			return ret;
+		}
 	}
 
 	/* For framebuffer backed by dmabuf, wait for fence */
@@ -13855,7 +13864,7 @@
 		if (ret)
 			DRM_DEBUG_KMS("failed to attach phys object\n");
 	} else {
-		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state);
+		ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
 	}
 
 	if (ret == 0) {
@@ -13899,7 +13908,7 @@
 
 	if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
 	    !INTEL_INFO(dev)->cursor_needs_physical))
-		intel_unpin_fb_obj(old_state->fb, old_state);
+		intel_unpin_fb_obj(old_state->fb, old_state->rotation);
 
 	/* prepare_fb aborted? */
 	if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
@@ -13907,7 +13916,6 @@
 		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
 
 	i915_gem_request_assign(&old_intel_state->wait_req, NULL);
-
 }
 
 int
@@ -13982,6 +13990,11 @@
 	if (modeset)
 		return;
 
+	if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
+		intel_color_set_csc(crtc->state);
+		intel_color_load_luts(crtc->state);
+	}
+
 	if (to_intel_crtc_state(crtc->state)->update_pipe)
 		intel_update_pipe_config(intel_crtc, old_intel_state);
 	else if (INTEL_INFO(dev)->gen >= 9)
@@ -14025,20 +14038,19 @@
 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
 						    int pipe)
 {
-	struct intel_plane *primary;
-	struct intel_plane_state *state;
+	struct intel_plane *primary = NULL;
+	struct intel_plane_state *state = NULL;
 	const uint32_t *intel_primary_formats;
 	unsigned int num_formats;
+	int ret;
 
 	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
-	if (primary == NULL)
-		return NULL;
+	if (!primary)
+		goto fail;
 
 	state = intel_create_plane_state(&primary->base);
-	if (!state) {
-		kfree(primary);
-		return NULL;
-	}
+	if (!state)
+		goto fail;
 	primary->base.state = &state->base;
 
 	primary->can_scale = false;
@@ -14080,10 +14092,12 @@
 		primary->disable_plane = i9xx_disable_primary_plane;
 	}
 
-	drm_universal_plane_init(dev, &primary->base, 0,
-				 &intel_plane_funcs,
-				 intel_primary_formats, num_formats,
-				 DRM_PLANE_TYPE_PRIMARY, NULL);
+	ret = drm_universal_plane_init(dev, &primary->base, 0,
+				       &intel_plane_funcs,
+				       intel_primary_formats, num_formats,
+				       DRM_PLANE_TYPE_PRIMARY, NULL);
+	if (ret)
+		goto fail;
 
 	if (INTEL_INFO(dev)->gen >= 4)
 		intel_create_rotation_property(dev, primary);
@@ -14091,6 +14105,12 @@
 	drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
 
 	return &primary->base;
+
+fail:
+	kfree(state);
+	kfree(primary);
+
+	return NULL;
 }
 
 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
@@ -14207,18 +14227,17 @@
 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
 						   int pipe)
 {
-	struct intel_plane *cursor;
-	struct intel_plane_state *state;
+	struct intel_plane *cursor = NULL;
+	struct intel_plane_state *state = NULL;
+	int ret;
 
 	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
-	if (cursor == NULL)
-		return NULL;
+	if (!cursor)
+		goto fail;
 
 	state = intel_create_plane_state(&cursor->base);
-	if (!state) {
-		kfree(cursor);
-		return NULL;
-	}
+	if (!state)
+		goto fail;
 	cursor->base.state = &state->base;
 
 	cursor->can_scale = false;
@@ -14230,11 +14249,13 @@
 	cursor->update_plane = intel_update_cursor_plane;
 	cursor->disable_plane = intel_disable_cursor_plane;
 
-	drm_universal_plane_init(dev, &cursor->base, 0,
-				 &intel_plane_funcs,
-				 intel_cursor_formats,
-				 ARRAY_SIZE(intel_cursor_formats),
-				 DRM_PLANE_TYPE_CURSOR, NULL);
+	ret = drm_universal_plane_init(dev, &cursor->base, 0,
+				       &intel_plane_funcs,
+				       intel_cursor_formats,
+				       ARRAY_SIZE(intel_cursor_formats),
+				       DRM_PLANE_TYPE_CURSOR, NULL);
+	if (ret)
+		goto fail;
 
 	if (INTEL_INFO(dev)->gen >= 4) {
 		if (!dev->mode_config.rotation_property)
@@ -14254,6 +14275,12 @@
 	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
 
 	return &cursor->base;
+
+fail:
+	kfree(state);
+	kfree(cursor);
+
+	return NULL;
 }
 
 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
@@ -14279,7 +14306,7 @@
 	struct intel_crtc_state *crtc_state = NULL;
 	struct drm_plane *primary = NULL;
 	struct drm_plane *cursor = NULL;
-	int i, ret;
+	int ret;
 
 	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
 	if (intel_crtc == NULL)
@@ -14315,13 +14342,6 @@
 	if (ret)
 		goto fail;
 
-	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
-	for (i = 0; i < 256; i++) {
-		intel_crtc->lut_r[i] = i;
-		intel_crtc->lut_g[i] = i;
-		intel_crtc->lut_b[i] = i;
-	}
-
 	/*
 	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
 	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
@@ -14346,6 +14366,8 @@
 
 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 
+	intel_color_init(&intel_crtc->base);
+
 	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
 	return;
 
@@ -14470,6 +14492,8 @@
 		intel_ddi_init(dev, PORT_A);
 		intel_ddi_init(dev, PORT_B);
 		intel_ddi_init(dev, PORT_C);
+
+		intel_dsi_init(dev);
 	} else if (HAS_DDI(dev)) {
 		int found;
 
@@ -14839,6 +14863,8 @@
 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
 	intel_fb->obj = obj;
 
+	intel_fill_fb_info(dev_priv, &intel_fb->base);
+
 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
 	if (ret) {
 		DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -14859,8 +14885,7 @@
 	struct drm_i915_gem_object *obj;
 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
 
-	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
-						mode_cmd.handles[0]));
+	obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
 	if (&obj->base == NULL)
 		return ERR_PTR(-ENOENT);
 
@@ -14886,23 +14911,13 @@
 	.atomic_state_clear = intel_atomic_state_clear,
 };
 
-/* Set up chip specific display functions */
-static void intel_init_display(struct drm_device *dev)
+/**
+ * intel_init_display_hooks - initialize the display modesetting hooks
+ * @dev_priv: device private
+ */
+void intel_init_display_hooks(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
-		dev_priv->display.find_dpll = g4x_find_best_dpll;
-	else if (IS_CHERRYVIEW(dev))
-		dev_priv->display.find_dpll = chv_find_best_dpll;
-	else if (IS_VALLEYVIEW(dev))
-		dev_priv->display.find_dpll = vlv_find_best_dpll;
-	else if (IS_PINEVIEW(dev))
-		dev_priv->display.find_dpll = pnv_find_best_dpll;
-	else
-		dev_priv->display.find_dpll = i9xx_find_best_dpll;
-
-	if (INTEL_INFO(dev)->gen >= 9) {
+	if (INTEL_INFO(dev_priv)->gen >= 9) {
 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
 		dev_priv->display.get_initial_plane_config =
 			skylake_get_initial_plane_config;
@@ -14910,7 +14925,7 @@
 			haswell_crtc_compute_clock;
 		dev_priv->display.crtc_enable = haswell_crtc_enable;
 		dev_priv->display.crtc_disable = haswell_crtc_disable;
-	} else if (HAS_DDI(dev)) {
+	} else if (HAS_DDI(dev_priv)) {
 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
 		dev_priv->display.get_initial_plane_config =
 			ironlake_get_initial_plane_config;
@@ -14918,7 +14933,7 @@
 			haswell_crtc_compute_clock;
 		dev_priv->display.crtc_enable = haswell_crtc_enable;
 		dev_priv->display.crtc_disable = haswell_crtc_disable;
-	} else if (HAS_PCH_SPLIT(dev)) {
+	} else if (HAS_PCH_SPLIT(dev_priv)) {
 		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
 		dev_priv->display.get_initial_plane_config =
 			ironlake_get_initial_plane_config;
@@ -14926,106 +14941,134 @@
 			ironlake_crtc_compute_clock;
 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
-	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	} else if (IS_CHERRYVIEW(dev_priv)) {
 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
 		dev_priv->display.get_initial_plane_config =
 			i9xx_get_initial_plane_config;
-		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
+		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-	} else {
+	} else if (IS_VALLEYVIEW(dev_priv)) {
+		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+		dev_priv->display.get_initial_plane_config =
+			i9xx_get_initial_plane_config;
+		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
+		dev_priv->display.crtc_enable = valleyview_crtc_enable;
+		dev_priv->display.crtc_disable = i9xx_crtc_disable;
+	} else if (IS_G4X(dev_priv)) {
+		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+		dev_priv->display.get_initial_plane_config =
+			i9xx_get_initial_plane_config;
+		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
+		dev_priv->display.crtc_enable = i9xx_crtc_enable;
+		dev_priv->display.crtc_disable = i9xx_crtc_disable;
+	} else if (IS_PINEVIEW(dev_priv)) {
+		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+		dev_priv->display.get_initial_plane_config =
+			i9xx_get_initial_plane_config;
+		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
+		dev_priv->display.crtc_enable = i9xx_crtc_enable;
+		dev_priv->display.crtc_disable = i9xx_crtc_disable;
+	} else if (!IS_GEN2(dev_priv)) {
 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
 		dev_priv->display.get_initial_plane_config =
 			i9xx_get_initial_plane_config;
 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
+	} else {
+		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+		dev_priv->display.get_initial_plane_config =
+			i9xx_get_initial_plane_config;
+		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
+		dev_priv->display.crtc_enable = i9xx_crtc_enable;
+		dev_priv->display.crtc_disable = i9xx_crtc_disable;
 	}
 
 	/* Returns the core display clock speed */
-	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			skylake_get_display_clock_speed;
-	else if (IS_BROXTON(dev))
+	else if (IS_BROXTON(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			broxton_get_display_clock_speed;
-	else if (IS_BROADWELL(dev))
+	else if (IS_BROADWELL(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			broadwell_get_display_clock_speed;
-	else if (IS_HASWELL(dev))
+	else if (IS_HASWELL(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			haswell_get_display_clock_speed;
-	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			valleyview_get_display_clock_speed;
-	else if (IS_GEN5(dev))
+	else if (IS_GEN5(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			ilk_get_display_clock_speed;
-	else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
-		 IS_GEN6(dev) || IS_IVYBRIDGE(dev))
+	else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
+		 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			i945_get_display_clock_speed;
-	else if (IS_GM45(dev))
+	else if (IS_GM45(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			gm45_get_display_clock_speed;
-	else if (IS_CRESTLINE(dev))
+	else if (IS_CRESTLINE(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			i965gm_get_display_clock_speed;
-	else if (IS_PINEVIEW(dev))
+	else if (IS_PINEVIEW(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			pnv_get_display_clock_speed;
-	else if (IS_G33(dev) || IS_G4X(dev))
+	else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			g33_get_display_clock_speed;
-	else if (IS_I915G(dev))
+	else if (IS_I915G(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			i915_get_display_clock_speed;
-	else if (IS_I945GM(dev) || IS_845G(dev))
+	else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			i9xx_misc_get_display_clock_speed;
-	else if (IS_I915GM(dev))
+	else if (IS_I915GM(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			i915gm_get_display_clock_speed;
-	else if (IS_I865G(dev))
+	else if (IS_I865G(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			i865_get_display_clock_speed;
-	else if (IS_I85X(dev))
+	else if (IS_I85X(dev_priv))
 		dev_priv->display.get_display_clock_speed =
 			i85x_get_display_clock_speed;
 	else { /* 830 */
-		WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
+		WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
 		dev_priv->display.get_display_clock_speed =
 			i830_get_display_clock_speed;
 	}
 
-	if (IS_GEN5(dev)) {
+	if (IS_GEN5(dev_priv)) {
 		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
-	} else if (IS_GEN6(dev)) {
+	} else if (IS_GEN6(dev_priv)) {
 		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
-	} else if (IS_IVYBRIDGE(dev)) {
+	} else if (IS_IVYBRIDGE(dev_priv)) {
 		/* FIXME: detect B0+ stepping and use auto training */
 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
-	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
-		if (IS_BROADWELL(dev)) {
+		if (IS_BROADWELL(dev_priv)) {
 			dev_priv->display.modeset_commit_cdclk =
 				broadwell_modeset_commit_cdclk;
 			dev_priv->display.modeset_calc_cdclk =
 				broadwell_modeset_calc_cdclk;
 		}
-	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		dev_priv->display.modeset_commit_cdclk =
 			valleyview_modeset_commit_cdclk;
 		dev_priv->display.modeset_calc_cdclk =
 			valleyview_modeset_calc_cdclk;
-	} else if (IS_BROXTON(dev)) {
+	} else if (IS_BROXTON(dev_priv)) {
 		dev_priv->display.modeset_commit_cdclk =
 			broxton_modeset_commit_cdclk;
 		dev_priv->display.modeset_calc_cdclk =
 			broxton_modeset_calc_cdclk;
 	}
 
-	switch (INTEL_INFO(dev)->gen) {
+	switch (INTEL_INFO(dev_priv)->gen) {
 	case 2:
 		dev_priv->display.queue_flip = intel_gen2_queue_flip;
 		break;
@@ -15052,8 +15095,6 @@
 		/* Default just returns -ENODEV to indicate unsupported */
 		dev_priv->display.queue_flip = intel_default_queue_flip;
 	}
-
-	mutex_init(&dev_priv->pps_mutex);
 }
 
 /*
@@ -15276,7 +15317,7 @@
 	int i;
 
 	/* Only supported on platforms that use atomic watermark design */
-	if (!dev_priv->display.program_watermarks)
+	if (!dev_priv->display.optimize_watermarks)
 		return;
 
 	/*
@@ -15297,6 +15338,13 @@
 	if (WARN_ON(IS_ERR(state)))
 		goto fail;
 
+	/*
+	 * Hardware readout is the only time we don't want to calculate
+	 * intermediate watermarks (since we don't trust the current
+	 * watermarks).
+	 */
+	to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
 	ret = intel_atomic_check(dev, state);
 	if (ret) {
 		/*
@@ -15319,7 +15367,8 @@
 	for_each_crtc_in_state(state, crtc, cstate, i) {
 		struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
 
-		dev_priv->display.program_watermarks(cs);
+		cs->wm.need_postvbl_update = true;
+		dev_priv->display.optimize_watermarks(cs);
 	}
 
 	drm_atomic_state_free(state);
@@ -15330,7 +15379,8 @@
 
 void intel_modeset_init(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	int sprite, ret;
 	enum pipe pipe;
 	struct intel_crtc *crtc;
@@ -15372,9 +15422,6 @@
 		}
 	}
 
-	intel_init_display(dev);
-	intel_init_audio(dev);
-
 	if (IS_GEN2(dev)) {
 		dev->mode_config.max_width = 2048;
 		dev->mode_config.max_height = 2048;
@@ -15397,7 +15444,7 @@
 		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
 	}
 
-	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
+	dev->mode_config.fb_base = ggtt->mappable_base;
 
 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
 		      INTEL_INFO(dev)->num_pipes,
@@ -15414,6 +15461,7 @@
 	}
 
 	intel_update_czclk(dev_priv);
+	intel_update_rawclk(dev_priv);
 	intel_update_cdclk(dev);
 
 	intel_shared_dpll_init(dev);
@@ -15526,10 +15574,15 @@
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder);
+	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
 
 	/* Clear any frame start delays used for debugging left by the BIOS */
-	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+	if (!transcoder_is_dsi(cpu_transcoder)) {
+		i915_reg_t reg = PIPECONF(cpu_transcoder);
+
+		I915_WRITE(reg,
+			   I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+	}
 
 	/* restore vblank interrupts to correct state */
 	drm_crtc_vblank_reset(&crtc->base);
@@ -15577,38 +15630,9 @@
 
 	/* Adjust the state of the output pipe according to whether we
 	 * have active connectors/encoders. */
-	if (!intel_crtc_has_encoders(crtc))
+	if (crtc->active && !intel_crtc_has_encoders(crtc))
 		intel_crtc_disable_noatomic(&crtc->base);
 
-	if (crtc->active != crtc->base.state->active) {
-		struct intel_encoder *encoder;
-
-		/* This can happen either due to bugs in the get_hw_state
-		 * functions or because of calls to intel_crtc_disable_noatomic,
-		 * or because the pipe is force-enabled due to the
-		 * pipe A quirk. */
-		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
-			      crtc->base.base.id,
-			      crtc->base.state->enable ? "enabled" : "disabled",
-			      crtc->active ? "enabled" : "disabled");
-
-		WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
-		crtc->base.state->active = crtc->active;
-		crtc->base.enabled = crtc->active;
-		crtc->base.state->connector_mask = 0;
-		crtc->base.state->encoder_mask = 0;
-
-		/* Because we only establish the connector -> encoder ->
-		 * crtc links if something is active, this means the
-		 * crtc is now deactivated. Break the links. connector
-		 * -> encoder links are only establish when things are
-		 *  actually up, hence no need to break them. */
-		WARN_ON(crtc->active);
-
-		for_each_encoder_on_crtc(dev, &crtc->base, encoder)
-			encoder->base.crtc = NULL;
-	}
-
 	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
 		/*
 		 * We start out with underrun reporting disabled to avoid races.
@@ -15738,7 +15762,7 @@
 		struct intel_crtc_state *crtc_state = crtc->config;
 		int pixclk = 0;
 
-		__drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base);
+		__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
 		memset(crtc_state, 0, sizeof(*crtc_state));
 		crtc_state->base.crtc = &crtc->base;
 
@@ -15777,22 +15801,17 @@
 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
-		pll->on = pll->get_hw_state(dev_priv, pll,
-					    &pll->config.hw_state);
-		pll->active = 0;
+		pll->on = pll->funcs.get_hw_state(dev_priv, pll,
+						  &pll->config.hw_state);
 		pll->config.crtc_mask = 0;
 		for_each_intel_crtc(dev, crtc) {
-			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
-				pll->active++;
+			if (crtc->active && crtc->config->shared_dpll == pll)
 				pll->config.crtc_mask |= 1 << crtc->pipe;
-			}
 		}
+		pll->active_mask = pll->config.crtc_mask;
 
 		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
 			      pll->name, pll->config.crtc_mask, pll->on);
-
-		if (pll->config.crtc_mask)
-			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
 	}
 
 	for_each_intel_encoder(dev, encoder) {
@@ -15874,6 +15893,8 @@
 			drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
 			update_scanline_offset(crtc);
 		}
+
+		intel_pipe_config_sanity_check(dev_priv, crtc->config);
 	}
 }
 
@@ -15908,12 +15929,12 @@
 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
-		if (!pll->on || pll->active)
+		if (!pll->on || pll->active_mask)
 			continue;
 
 		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
 
-		pll->disable(dev_priv, pll);
+		pll->funcs.disable(dev_priv, pll);
 		pll->on = false;
 	}
 
@@ -15972,6 +15993,9 @@
 
 		state->acquire_ctx = &ctx;
 
+		/* ignore any reset values/BIOS leftovers in the WM registers */
+		to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
 		for_each_crtc_in_state(state, crtc, crtc_state, i) {
 			/*
 			 * Force recalculation even if we restore
@@ -16022,9 +16046,8 @@
 			continue;
 
 		mutex_lock(&dev->struct_mutex);
-		ret = intel_pin_and_fence_fb_obj(c->primary,
-						 c->primary->fb,
-						 c->primary->state);
+		ret = intel_pin_and_fence_fb_obj(c->primary->fb,
+						 c->primary->state->rotation);
 		mutex_unlock(&dev->struct_mutex);
 		if (ret) {
 			DRM_ERROR("failed to pin boot fb on pipe %d\n",
@@ -16233,8 +16256,9 @@
 			error->pipe[i].stat = I915_READ(PIPESTAT(i));
 	}
 
+	/* Note: this does not include DSI transcoders. */
 	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
-	if (HAS_DDI(dev_priv->dev))
+	if (HAS_DDI(dev_priv))
 		error->num_transcoders++; /* Account for eDP. */
 
 	for (i = 0; i < error->num_transcoders; i++) {
@@ -16305,7 +16329,7 @@
 	}
 
 	for (i = 0; i < error->num_transcoders; i++) {
-		err_printf(m, "CPU transcoder: %c\n",
+		err_printf(m, "CPU transcoder: %s\n",
 			   transcoder_name(error->transcoder[i].cpu_transcoder));
 		err_printf(m, "  Power: %s\n",
 			   onoff(error->transcoder[i].power_domain_on));
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 412a34c..f192f58 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -129,6 +129,7 @@
 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
 static void vlv_steal_power_sequencer(struct drm_device *dev,
 				      enum pipe pipe);
+static void intel_dp_unset_edid(struct intel_dp *intel_dp);
 
 static unsigned int intel_dp_unused_lane_mask(int lane_count)
 {
@@ -671,60 +672,55 @@
 	return status;
 }
 
-static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+
+	if (index)
+		return 0;
 
 	/*
 	 * The clock divider is based off the hrawclk, and would like to run at
-	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
+	 * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
 	 */
-	return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
+	return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
 }
 
 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = intel_dig_port->base.base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
 
 	if (index)
 		return 0;
 
-	if (intel_dig_port->port == PORT_A) {
+	/*
+	 * The clock divider is based off the cdclk or PCH rawclk, and would
+	 * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
+	 * divide by 2000 and use that
+	 */
+	if (intel_dig_port->port == PORT_A)
 		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
-
-	} else {
-		return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
-	}
+	else
+		return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
 }
 
 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = intel_dig_port->base.base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
 
-	if (intel_dig_port->port == PORT_A) {
-		if (index)
-			return 0;
-		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
-	} else if (HAS_PCH_LPT_H(dev_priv)) {
+	if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
 		/* Workaround for non-ULT HSW */
 		switch (index) {
 		case 0: return 63;
 		case 1: return 72;
 		default: return 0;
 		}
-	} else  {
-		return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
 	}
-}
 
-static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
-	return index ? 0 : 100;
+	return ilk_get_aux_clock_divider(intel_dp, index);
 }
 
 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
@@ -737,10 +733,10 @@
 	return index ? 0 : 1;
 }
 
-static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
-				      bool has_aux_irq,
-				      int send_bytes,
-				      uint32_t aux_clock_divider)
+static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+				     bool has_aux_irq,
+				     int send_bytes,
+				     uint32_t aux_clock_divider)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -1229,71 +1225,6 @@
 	intel_connector_unregister(intel_connector);
 }
 
-static void
-skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
-{
-	u32 ctrl1;
-
-	memset(&pipe_config->dpll_hw_state, 0,
-	       sizeof(pipe_config->dpll_hw_state));
-
-	pipe_config->ddi_pll_sel = SKL_DPLL0;
-	pipe_config->dpll_hw_state.cfgcr1 = 0;
-	pipe_config->dpll_hw_state.cfgcr2 = 0;
-
-	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
-	switch (pipe_config->port_clock / 2) {
-	case 81000:
-		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
-					      SKL_DPLL0);
-		break;
-	case 135000:
-		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
-					      SKL_DPLL0);
-		break;
-	case 270000:
-		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
-					      SKL_DPLL0);
-		break;
-	case 162000:
-		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
-					      SKL_DPLL0);
-		break;
-	/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
-	results in CDCLK change. Need to handle the change of CDCLK by
-	disabling pipes and re-enabling them */
-	case 108000:
-		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
-					      SKL_DPLL0);
-		break;
-	case 216000:
-		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
-					      SKL_DPLL0);
-		break;
-
-	}
-	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
-}
-
-void
-hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
-{
-	memset(&pipe_config->dpll_hw_state, 0,
-	       sizeof(pipe_config->dpll_hw_state));
-
-	switch (pipe_config->port_clock / 2) {
-	case 81000:
-		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
-		break;
-	case 135000:
-		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
-		break;
-	case 270000:
-		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
-		break;
-	}
-}
-
 static int
 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
 {
@@ -1570,10 +1501,10 @@
 
 		/* Get bpp from vbt only for panels that dont have bpp in edid */
 		if (intel_connector->base.display_info.bpc == 0 &&
-			(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
+			(dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
 			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
-				      dev_priv->vbt.edp_bpp);
-			bpp = dev_priv->vbt.edp_bpp;
+				      dev_priv->vbt.edp.bpp);
+			bpp = dev_priv->vbt.edp.bpp;
 		}
 
 		/*
@@ -1651,13 +1582,7 @@
 				&pipe_config->dp_m2_n2);
 	}
 
-	if ((IS_SKYLAKE(dev)  || IS_KABYLAKE(dev)) && is_edp(intel_dp))
-		skl_edp_set_pll_config(pipe_config);
-	else if (IS_BROXTON(dev))
-		/* handled in ddi */;
-	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-		hsw_dp_set_ddi_pll_sel(pipe_config);
-	else
+	if (!HAS_DDI(dev))
 		intel_dp_set_clock(encoder, pipe_config);
 
 	return true;
@@ -1779,11 +1704,11 @@
 			I915_READ(pp_stat_reg),
 			I915_READ(pp_ctrl_reg));
 
-	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
+	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value,
+		      5 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
 		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
 				I915_READ(pp_stat_reg),
 				I915_READ(pp_ctrl_reg));
-	}
 
 	DRM_DEBUG_KMS("Wait complete\n");
 }
@@ -2290,6 +2215,15 @@
 	POSTING_READ(DP_A);
 	udelay(500);
 
+	/*
+	 * [DevILK] Work around required when enabling DP PLL
+	 * while a pipe is enabled going to FDI:
+	 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
+	 * 2. Program DP PLL enable
+	 */
+	if (IS_GEN5(dev_priv))
+		intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe);
+
 	intel_dp->DP |= DP_PLL_ENABLE;
 
 	I915_WRITE(DP_A, intel_dp->DP);
@@ -2409,7 +2343,6 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum port port = dp_to_dig_port(intel_dp)->port;
 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-	int dotclock;
 
 	tmp = I915_READ(intel_dp->output_reg);
 
@@ -2459,16 +2392,12 @@
 			pipe_config->port_clock = 270000;
 	}
 
-	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
-					    &pipe_config->dp_m_n);
+	pipe_config->base.adjusted_mode.crtc_clock =
+		intel_dotclock_calculate(pipe_config->port_clock,
+					 &pipe_config->dp_m_n);
 
-	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
-		ironlake_check_encoder_dotclock(pipe_config, dotclock);
-
-	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
-
-	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
-	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+	if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
+	    pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
 		/*
 		 * This is a big fat ugly hack.
 		 *
@@ -2483,8 +2412,8 @@
 		 * load.
 		 */
 		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
-			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
-		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+			      pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+		dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
 	}
 }
 
@@ -2710,7 +2639,6 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
-	enum port port = dp_to_dig_port(intel_dp)->port;
 	enum pipe pipe = crtc->pipe;
 
 	if (WARN_ON(dp_reg & DP_PORT_EN))
@@ -2721,35 +2649,12 @@
 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
 		vlv_init_panel_power_sequencer(intel_dp);
 
-	/*
-	 * We get an occasional spurious underrun between the port
-	 * enable and vdd enable, when enabling port A eDP.
-	 *
-	 * FIXME: Not sure if this applies to (PCH) port D eDP as well
-	 */
-	if (port == PORT_A)
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
 	intel_dp_enable_port(intel_dp);
 
-	if (port == PORT_A && IS_GEN5(dev_priv)) {
-		/*
-		 * Underrun reporting for the other pipe was disabled in
-		 * g4x_pre_enable_dp(). The eDP PLL and port have now been
-		 * enabled, so it's now safe to re-enable underrun reporting.
-		 */
-		intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
-		intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
-	}
-
 	edp_panel_vdd_on(intel_dp);
 	edp_panel_on(intel_dp);
 	edp_panel_vdd_off(intel_dp, true);
 
-	if (port == PORT_A)
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
 	pps_unlock(intel_dp);
 
 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
@@ -2791,26 +2696,11 @@
 
 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	enum port port = dp_to_dig_port(intel_dp)->port;
-	enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
 
 	intel_dp_prepare(encoder);
 
-	if (port == PORT_A && IS_GEN5(dev_priv)) {
-		/*
-		 * We get FIFO underruns on the other pipe when
-		 * enabling the CPU eDP PLL, and when enabling CPU
-		 * eDP port. We could potentially avoid the PLL
-		 * underrun with a vblank wait just prior to enabling
-		 * the PLL, but that doesn't appear to help the port
-		 * enable case. Just sweep it all under the rug.
-		 */
-		intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
-		intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
-	}
-
 	/* Only ilk+ has port A */
 	if (port == PORT_A)
 		ironlake_edp_pll_on(intel_dp);
@@ -3184,47 +3074,14 @@
 }
 
 /*
- * Native read with retry for link status and receiver capability reads for
- * cases where the sink may still be asleep.
- *
- * Sinks are *supposed* to come up within 1ms from an off state, but we're also
- * supposed to retry 3 times per the spec.
- */
-static ssize_t
-intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
-			void *buffer, size_t size)
-{
-	ssize_t ret;
-	int i;
-
-	/*
-	 * Sometime we just get the same incorrect byte repeated
-	 * over the entire buffer. Doing just one throw away read
-	 * initially seems to "solve" it.
-	 */
-	drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
-
-	for (i = 0; i < 3; i++) {
-		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
-		if (ret == size)
-			return ret;
-		msleep(1);
-	}
-
-	return ret;
-}
-
-/*
  * Fetch AUX CH registers 0x202 - 0x207 which contain
  * link status information
  */
 bool
 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
-	return intel_dp_dpcd_read_wake(&intel_dp->aux,
-				       DP_LANE0_1_STATUS,
-				       link_status,
-				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
+	return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
+				DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
 }
 
 /* These are source-specific values. */
@@ -3238,7 +3095,7 @@
 	if (IS_BROXTON(dev))
 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
 	else if (INTEL_INFO(dev)->gen >= 9) {
-		if (dev_priv->edp_low_vswing && port == PORT_A)
+		if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
 			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
@@ -3859,8 +3716,8 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint8_t rev;
 
-	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
-				    sizeof(intel_dp->dpcd)) < 0)
+	if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
+			     sizeof(intel_dp->dpcd)) < 0)
 		return false; /* aux transfer failed */
 
 	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
@@ -3868,12 +3725,33 @@
 	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
 		return false; /* DPCD not present */
 
+	if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
+			     &intel_dp->sink_count, 1) < 0)
+		return false;
+
+	/*
+	 * Sink count can change between short pulse hpd hence
+	 * a member variable in intel_dp will track any changes
+	 * between short pulse interrupts.
+	 */
+	intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
+
+	/*
+	 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
+	 * a dongle is present but no display. Unless we require to know
+	 * if a dongle is present or not, we don't need to update
+	 * downstream port information. So, an early return here saves
+	 * time from performing other operations which are not required.
+	 */
+	if (!is_edp(intel_dp) && !intel_dp->sink_count)
+		return false;
+
 	/* Check if the panel supports PSR */
 	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
 	if (is_edp(intel_dp)) {
-		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
-					intel_dp->psr_dpcd,
-					sizeof(intel_dp->psr_dpcd));
+		drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
+				 intel_dp->psr_dpcd,
+				 sizeof(intel_dp->psr_dpcd));
 		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
 			dev_priv->psr.sink_support = true;
 			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
@@ -3884,9 +3762,9 @@
 			uint8_t frame_sync_cap;
 
 			dev_priv->psr.sink_support = true;
-			intel_dp_dpcd_read_wake(&intel_dp->aux,
-					DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
-					&frame_sync_cap, 1);
+			drm_dp_dpcd_read(&intel_dp->aux,
+					 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
+					 &frame_sync_cap, 1);
 			dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
 			/* PSR2 needs frame sync as well */
 			dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
@@ -3902,15 +3780,13 @@
 	/* Intermediate frequency support */
 	if (is_edp(intel_dp) &&
 	    (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] &	DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
-	    (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
+	    (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
 	    (rev >= 0x03)) { /* eDp v1.4 or higher */
 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
 		int i;
 
-		intel_dp_dpcd_read_wake(&intel_dp->aux,
-				DP_SUPPORTED_LINK_RATES,
-				sink_rates,
-				sizeof(sink_rates));
+		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
+				sink_rates, sizeof(sink_rates));
 
 		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
 			int val = le16_to_cpu(sink_rates[i]);
@@ -3933,9 +3809,9 @@
 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
 		return true; /* no per-port downstream info */
 
-	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
-				    intel_dp->downstream_ports,
-				    DP_MAX_DOWNSTREAM_PORTS) < 0)
+	if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
+			     intel_dp->downstream_ports,
+			     DP_MAX_DOWNSTREAM_PORTS) < 0)
 		return false; /* downstream port status fetch failed */
 
 	return true;
@@ -3949,11 +3825,11 @@
 	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
 		return;
 
-	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
+	if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
 		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
 			      buf[0], buf[1], buf[2]);
 
-	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
+	if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
 		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
 			      buf[0], buf[1], buf[2]);
 }
@@ -3963,13 +3839,16 @@
 {
 	u8 buf[1];
 
+	if (!i915.enable_dp_mst)
+		return false;
+
 	if (!intel_dp->can_mst)
 		return false;
 
 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
 		return false;
 
-	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
+	if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
 		if (buf[0] & DP_MST_CAP) {
 			DRM_DEBUG_KMS("Sink is MST capable\n");
 			intel_dp->is_mst = true;
@@ -4106,7 +3985,7 @@
 static bool
 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
 {
-	return intel_dp_dpcd_read_wake(&intel_dp->aux,
+	return drm_dp_dpcd_read(&intel_dp->aux,
 				       DP_DEVICE_SERVICE_IRQ_VECTOR,
 				       sink_irq_vector, 1) == 1;
 }
@@ -4116,7 +3995,7 @@
 {
 	int ret;
 
-	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
+	ret = drm_dp_dpcd_read(&intel_dp->aux,
 					     DP_SINK_COUNT_ESI,
 					     sink_irq_vector, 14);
 	if (ret != 14)
@@ -4292,6 +4171,36 @@
 	return -EINVAL;
 }
 
+static void
+intel_dp_check_link_status(struct intel_dp *intel_dp)
+{
+	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	u8 link_status[DP_LINK_STATUS_SIZE];
+
+	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+	if (!intel_dp_get_link_status(intel_dp, link_status)) {
+		DRM_ERROR("Failed to get link status\n");
+		return;
+	}
+
+	if (!intel_encoder->base.crtc)
+		return;
+
+	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
+		return;
+
+	/* if link training is requested we should perform it always */
+	if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
+	    (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
+		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
+			      intel_encoder->base.name);
+		intel_dp_start_link_train(intel_dp);
+		intel_dp_stop_link_train(intel_dp);
+	}
+}
+
 /*
  * According to DP spec
  * 5.1.2:
@@ -4299,16 +4208,19 @@
  *  2. Configure link according to Receiver Capabilities
  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
  *  4. Check link status on receipt of hot-plug interrupt
+ *
+ * intel_dp_short_pulse -  handles short pulse interrupts
+ * when full detection is not required.
+ * Returns %true if short pulse is handled and full detection
+ * is NOT required and %false otherwise.
  */
-static void
-intel_dp_check_link_status(struct intel_dp *intel_dp)
+static bool
+intel_dp_short_pulse(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
-	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
 	u8 sink_irq_vector;
-	u8 link_status[DP_LINK_STATUS_SIZE];
-
-	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+	u8 old_sink_count = intel_dp->sink_count;
+	bool ret;
 
 	/*
 	 * Clearing compliance test variables to allow capturing
@@ -4318,20 +4230,17 @@
 	intel_dp->compliance_test_type = 0;
 	intel_dp->compliance_test_data = 0;
 
-	if (!intel_encoder->base.crtc)
-		return;
+	/*
+	 * Now read the DPCD to see if it's actually running
+	 * If the current value of sink count doesn't match with
+	 * the value that was stored earlier or dpcd read failed
+	 * we need to do full detection
+	 */
+	ret = intel_dp_get_dpcd(intel_dp);
 
-	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
-		return;
-
-	/* Try to read receiver status if the link appears to be up */
-	if (!intel_dp_get_link_status(intel_dp, link_status)) {
-		return;
-	}
-
-	/* Now read the DPCD to see if it's actually running */
-	if (!intel_dp_get_dpcd(intel_dp)) {
-		return;
+	if ((old_sink_count != intel_dp->sink_count) || !ret) {
+		/* No need to proceed if we are going to do full detect */
+		return false;
 	}
 
 	/* Try to read the source of the interrupt */
@@ -4348,14 +4257,11 @@
 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
 	}
 
-	/* if link training is requested we should perform it always */
-	if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
-		(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
-		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
-			      intel_encoder->base.name);
-		intel_dp_start_link_train(intel_dp);
-		intel_dp_stop_link_train(intel_dp);
-	}
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+	intel_dp_check_link_status(intel_dp);
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+	return true;
 }
 
 /* XXX this is probably wrong for multiple downstream ports */
@@ -4368,6 +4274,9 @@
 	if (!intel_dp_get_dpcd(intel_dp))
 		return connector_status_disconnected;
 
+	if (is_edp(intel_dp))
+		return connector_status_connected;
+
 	/* if there's no downstream port, we're done */
 	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
 		return connector_status_connected;
@@ -4375,14 +4284,9 @@
 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
 	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
-		uint8_t reg;
 
-		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
-					    &reg, 1) < 0)
-			return connector_status_unknown;
-
-		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
-					      : connector_status_disconnected;
+		return intel_dp->sink_count ?
+		connector_status_connected : connector_status_disconnected;
 	}
 
 	/* If no HPD, poke DDC gently */
@@ -4591,6 +4495,7 @@
 	struct intel_connector *intel_connector = intel_dp->attached_connector;
 	struct edid *edid;
 
+	intel_dp_unset_edid(intel_dp);
 	edid = intel_dp_get_edid(intel_dp);
 	intel_connector->detect_edid = edid;
 
@@ -4611,9 +4516,10 @@
 	intel_dp->has_audio = false;
 }
 
-static enum drm_connector_status
-intel_dp_detect(struct drm_connector *connector, bool force)
+static void
+intel_dp_long_pulse(struct intel_connector *intel_connector)
 {
+	struct drm_connector *connector = &intel_connector->base;
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
@@ -4623,17 +4529,6 @@
 	bool ret;
 	u8 sink_irq_vector;
 
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-		      connector->base.id, connector->name);
-	intel_dp_unset_edid(intel_dp);
-
-	if (intel_dp->is_mst) {
-		/* MST devices are disconnected from a monitor POV */
-		if (intel_encoder->type != INTEL_OUTPUT_EDP)
-			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
-		return connector_status_disconnected;
-	}
-
 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
 	intel_display_power_get(to_i915(dev), power_domain);
 
@@ -4651,19 +4546,42 @@
 		intel_dp->compliance_test_type = 0;
 		intel_dp->compliance_test_data = 0;
 
+		if (intel_dp->is_mst) {
+			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
+				      intel_dp->is_mst,
+				      intel_dp->mst_mgr.mst_state);
+			intel_dp->is_mst = false;
+			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+							intel_dp->is_mst);
+		}
+
 		goto out;
 	}
 
+	if (intel_encoder->type != INTEL_OUTPUT_EDP)
+		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+
 	intel_dp_probe_oui(intel_dp);
 
 	ret = intel_dp_probe_mst(intel_dp);
 	if (ret) {
-		/* if we are in MST mode then this connector
-		   won't appear connected or have anything with EDID on it */
-		if (intel_encoder->type != INTEL_OUTPUT_EDP)
-			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+		/*
+		 * If we are in MST mode then this connector
+		 * won't appear connected or have anything
+		 * with EDID on it
+		 */
 		status = connector_status_disconnected;
 		goto out;
+	} else if (connector->status == connector_status_connected) {
+		/*
+		 * If display was connected already and is still connected
+		 * check links status, there has been known issues of
+		 * link loss triggerring long pulse!!!!
+		 */
+		drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+		intel_dp_check_link_status(intel_dp);
+		drm_modeset_unlock(&dev->mode_config.connection_mutex);
+		goto out;
 	}
 
 	/*
@@ -4676,9 +4594,8 @@
 
 	intel_dp_set_edid(intel_dp);
 
-	if (intel_encoder->type != INTEL_OUTPUT_EDP)
-		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
 	status = connector_status_connected;
+	intel_dp->detect_done = true;
 
 	/* Try to read the source of the interrupt */
 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
@@ -4695,8 +4612,43 @@
 	}
 
 out:
+	if ((status != connector_status_connected) &&
+	    (intel_dp->is_mst == false))
+		intel_dp_unset_edid(intel_dp);
+
 	intel_display_power_put(to_i915(dev), power_domain);
-	return status;
+	return;
+}
+
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector, bool force)
+{
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, connector->name);
+
+	if (intel_dp->is_mst) {
+		/* MST devices are disconnected from a monitor POV */
+		intel_dp_unset_edid(intel_dp);
+		if (intel_encoder->type != INTEL_OUTPUT_EDP)
+			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+		return connector_status_disconnected;
+	}
+
+	/* If full detect is not performed yet, do a full detect */
+	if (!intel_dp->detect_done)
+		intel_dp_long_pulse(intel_dp->attached_connector);
+
+	intel_dp->detect_done = false;
+
+	if (intel_connector->detect_edid)
+		return connector_status_connected;
+	else
+		return connector_status_disconnected;
 }
 
 static void
@@ -4835,6 +4787,11 @@
 			DRM_DEBUG_KMS("no scaling not supported\n");
 			return -EINVAL;
 		}
+		if (HAS_GMCH_DISPLAY(dev_priv) &&
+		    val == DRM_MODE_SCALE_CENTER) {
+			DRM_DEBUG_KMS("centering not supported\n");
+			return -EINVAL;
+		}
 
 		if (intel_connector->panel.fitting_mode == val) {
 			/* the eDP scaling property is not changed */
@@ -5023,44 +4980,37 @@
 		/* indicate that we need to restart link training */
 		intel_dp->train_set_valid = false;
 
-		if (!intel_digital_port_connected(dev_priv, intel_dig_port))
-			goto mst_fail;
+		intel_dp_long_pulse(intel_dp->attached_connector);
+		if (intel_dp->is_mst)
+			ret = IRQ_HANDLED;
+		goto put_power;
 
-		if (!intel_dp_get_dpcd(intel_dp)) {
-			goto mst_fail;
-		}
-
-		intel_dp_probe_oui(intel_dp);
-
-		if (!intel_dp_probe_mst(intel_dp)) {
-			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-			intel_dp_check_link_status(intel_dp);
-			drm_modeset_unlock(&dev->mode_config.connection_mutex);
-			goto mst_fail;
-		}
 	} else {
 		if (intel_dp->is_mst) {
-			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
-				goto mst_fail;
+			if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
+				/*
+				 * If we were in MST mode, and device is not
+				 * there, get out of MST mode
+				 */
+				DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
+					      intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+				intel_dp->is_mst = false;
+				drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+								intel_dp->is_mst);
+				goto put_power;
+			}
 		}
 
 		if (!intel_dp->is_mst) {
-			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-			intel_dp_check_link_status(intel_dp);
-			drm_modeset_unlock(&dev->mode_config.connection_mutex);
+			if (!intel_dp_short_pulse(intel_dp)) {
+				intel_dp_long_pulse(intel_dp->attached_connector);
+				goto put_power;
+			}
 		}
 	}
 
 	ret = IRQ_HANDLED;
 
-	goto put_power;
-mst_fail:
-	/* if we were in MST mode, and device is not there get out of MST mode */
-	if (intel_dp->is_mst) {
-		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
-		intel_dp->is_mst = false;
-		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
-	}
 put_power:
 	intel_display_power_put(dev_priv, power_domain);
 
@@ -5071,14 +5021,6 @@
 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	union child_device_config *p_child;
-	int i;
-	static const short port_mapping[] = {
-		[PORT_B] = DVO_PORT_DPB,
-		[PORT_C] = DVO_PORT_DPC,
-		[PORT_D] = DVO_PORT_DPD,
-		[PORT_E] = DVO_PORT_DPE,
-	};
 
 	/*
 	 * eDP not supported on g4x. so bail out early just
@@ -5090,18 +5032,7 @@
 	if (port == PORT_A)
 		return true;
 
-	if (!dev_priv->vbt.child_dev_num)
-		return false;
-
-	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		p_child = dev_priv->vbt.child_dev + i;
-
-		if (p_child->common.dvo_port == port_mapping[port] &&
-		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
-		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
-			return true;
-	}
-	return false;
+	return intel_bios_is_port_edp(dev_priv, port);
 }
 
 void
@@ -5208,7 +5139,7 @@
 	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
 		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
 
-	vbt = dev_priv->vbt.edp_pps;
+	vbt = dev_priv->vbt.edp.pps;
 
 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
 	 * our hw here, which are all in 100usec. */
@@ -5259,7 +5190,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp_on, pp_off, pp_div, port_sel = 0;
-	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
+	int div = dev_priv->rawclk_freq / 1000;
 	i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
 	enum port port = dp_to_dig_port(intel_dp)->port;
 	const struct edp_power_seq *seq = &intel_dp->pps_delays;
@@ -5852,19 +5783,17 @@
 	/* intel_dp vfuncs */
 	if (INTEL_INFO(dev)->gen >= 9)
 		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
-	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
 	else if (HAS_PCH_SPLIT(dev))
 		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
 	else
-		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
+		intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
 
 	if (INTEL_INFO(dev)->gen >= 9)
 		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
 	else
-		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
+		intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
 
 	if (HAS_DDI(dev))
 		intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 2c99972..7a34090 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -33,7 +33,6 @@
 static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 					struct intel_crtc_state *pipe_config)
 {
-	struct drm_device *dev = encoder->base.dev;
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -90,9 +89,6 @@
 
 	pipe_config->dp_m_n.tu = slots;
 
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-		hsw_dp_set_ddi_pll_sel(pipe_config);
-
 	return true;
 
 }
@@ -106,7 +102,7 @@
 
 	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
-	drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port);
+	drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->connector->port);
 
 	ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
 	if (ret) {
@@ -127,10 +123,11 @@
 	/* and this can also fail */
 	drm_dp_update_payload_part2(&intel_dp->mst_mgr);
 
-	drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port);
+	drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->connector->port);
 
 	intel_dp->active_mst_links--;
-	intel_mst->port = NULL;
+
+	intel_mst->connector = NULL;
 	if (intel_dp->active_mst_links == 0) {
 		intel_dig_port->base.post_disable(&intel_dig_port->base);
 		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
@@ -170,7 +167,8 @@
 	found->encoder = encoder;
 
 	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
-	intel_mst->port = found->port;
+
+	intel_mst->connector = found;
 
 	if (intel_dp->active_mst_links == 0) {
 		intel_prepare_ddi_buffer(&intel_dig_port->base);
@@ -188,7 +186,7 @@
 	}
 
 	ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
-				       intel_mst->port,
+				       intel_mst->connector->port,
 				       intel_crtc->config->pbn, &slots);
 	if (ret == false) {
 		DRM_ERROR("failed to allocate vcpi\n");
@@ -229,7 +227,7 @@
 {
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	*pipe = intel_mst->pipe;
-	if (intel_mst->port)
+	if (intel_mst->connector)
 		return true;
 	return false;
 }
@@ -290,10 +288,11 @@
 	struct edid *edid;
 	int ret;
 
-	edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
-	if (!edid)
-		return 0;
+	if (!intel_dp) {
+		return intel_connector_update_modes(connector, NULL);
+	}
 
+	edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
 	ret = intel_connector_update_modes(connector, edid);
 	kfree(edid);
 
@@ -306,6 +305,8 @@
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct intel_dp *intel_dp = intel_connector->mst_port;
 
+	if (!intel_dp)
+		return connector_status_disconnected;
 	return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
 }
 
@@ -371,6 +372,8 @@
 	struct intel_dp *intel_dp = intel_connector->mst_port;
 	struct intel_crtc *crtc = to_intel_crtc(state->crtc);
 
+	if (!intel_dp)
+		return NULL;
 	return &intel_dp->mst_encoders[crtc->pipe]->base.base;
 }
 
@@ -378,6 +381,8 @@
 {
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct intel_dp *intel_dp = intel_connector->mst_port;
+	if (!intel_dp)
+		return NULL;
 	return &intel_dp->mst_encoders[0]->base.base;
 }
 
@@ -488,23 +493,11 @@
 
 	/* need to nuke the connector */
 	drm_modeset_lock_all(dev);
-	if (connector->state->crtc) {
-		struct drm_mode_set set;
-		int ret;
-
-		memset(&set, 0, sizeof(set));
-		set.crtc = connector->state->crtc,
-
-		ret = drm_atomic_helper_set_config(&set);
-
-		WARN(ret, "Disabling mst crtc failed with %i\n", ret);
-	}
-
 	intel_connector_remove_from_fbdev(intel_connector);
-	drm_connector_cleanup(connector);
+	intel_connector->mst_port = NULL;
 	drm_modeset_unlock_all(dev);
 
-	kfree(intel_connector);
+	drm_connector_unreference(&intel_connector->base);
 	DRM_DEBUG_KMS("\n");
 }
 
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
new file mode 100644
index 0000000..3ac7059
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -0,0 +1,1783 @@
+/*
+ * Copyright © 2006-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "intel_drv.h"
+
+struct intel_shared_dpll *
+intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
+			    enum intel_dpll_id id)
+{
+	return &dev_priv->shared_dplls[id];
+}
+
+enum intel_dpll_id
+intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
+			 struct intel_shared_dpll *pll)
+{
+	if (WARN_ON(pll < dev_priv->shared_dplls||
+		    pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
+		return -1;
+
+	return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
+}
+
+void
+intel_shared_dpll_config_get(struct intel_shared_dpll_config *config,
+			     struct intel_shared_dpll *pll,
+			     struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll);
+
+	config[id].crtc_mask |= 1 << crtc->pipe;
+}
+
+void
+intel_shared_dpll_config_put(struct intel_shared_dpll_config *config,
+			     struct intel_shared_dpll *pll,
+			     struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll);
+
+	config[id].crtc_mask &= ~(1 << crtc->pipe);
+}
+
+/* For ILK+ */
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+			struct intel_shared_dpll *pll,
+			bool state)
+{
+	bool cur_state;
+	struct intel_dpll_hw_state hw_state;
+
+	if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
+		return;
+
+	cur_state = pll->funcs.get_hw_state(dev_priv, pll, &hw_state);
+	I915_STATE_WARN(cur_state != state,
+	     "%s assertion failure (expected %s, current %s)\n",
+			pll->name, onoff(state), onoff(cur_state));
+}
+
+void intel_prepare_shared_dpll(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+
+	if (WARN_ON(pll == NULL))
+		return;
+
+	mutex_lock(&dev_priv->dpll_lock);
+	WARN_ON(!pll->config.crtc_mask);
+	if (!pll->active_mask) {
+		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
+		WARN_ON(pll->on);
+		assert_shared_dpll_disabled(dev_priv, pll);
+
+		pll->funcs.mode_set(dev_priv, pll);
+	}
+	mutex_unlock(&dev_priv->dpll_lock);
+}
+
+/**
+ * intel_enable_shared_dpll - enable PCH PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * The PCH PLL needs to be enabled before the PCH transcoder, since it
+ * drives the transcoder clock.
+ */
+void intel_enable_shared_dpll(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+	unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
+	unsigned old_mask;
+
+	if (WARN_ON(pll == NULL))
+		return;
+
+	mutex_lock(&dev_priv->dpll_lock);
+	old_mask = pll->active_mask;
+
+	if (WARN_ON(!(pll->config.crtc_mask & crtc_mask)) ||
+	    WARN_ON(pll->active_mask & crtc_mask))
+		goto out;
+
+	pll->active_mask |= crtc_mask;
+
+	DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
+		      pll->name, pll->active_mask, pll->on,
+		      crtc->base.base.id);
+
+	if (old_mask) {
+		WARN_ON(!pll->on);
+		assert_shared_dpll_enabled(dev_priv, pll);
+		goto out;
+	}
+	WARN_ON(pll->on);
+
+	DRM_DEBUG_KMS("enabling %s\n", pll->name);
+	pll->funcs.enable(dev_priv, pll);
+	pll->on = true;
+
+out:
+	mutex_unlock(&dev_priv->dpll_lock);
+}
+
+void intel_disable_shared_dpll(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+	unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
+
+	/* PCH only available on ILK+ */
+	if (INTEL_INFO(dev)->gen < 5)
+		return;
+
+	if (pll == NULL)
+		return;
+
+	mutex_lock(&dev_priv->dpll_lock);
+	if (WARN_ON(!(pll->active_mask & crtc_mask)))
+		goto out;
+
+	DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
+		      pll->name, pll->active_mask, pll->on,
+		      crtc->base.base.id);
+
+	assert_shared_dpll_enabled(dev_priv, pll);
+	WARN_ON(!pll->on);
+
+	pll->active_mask &= ~crtc_mask;
+	if (pll->active_mask)
+		goto out;
+
+	DRM_DEBUG_KMS("disabling %s\n", pll->name);
+	pll->funcs.disable(dev_priv, pll);
+	pll->on = false;
+
+out:
+	mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static struct intel_shared_dpll *
+intel_find_shared_dpll(struct intel_crtc *crtc,
+		       struct intel_crtc_state *crtc_state,
+		       enum intel_dpll_id range_min,
+		       enum intel_dpll_id range_max)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct intel_shared_dpll *pll;
+	struct intel_shared_dpll_config *shared_dpll;
+	enum intel_dpll_id i;
+
+	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+
+	for (i = range_min; i <= range_max; i++) {
+		pll = &dev_priv->shared_dplls[i];
+
+		/* Only want to check enabled timings first */
+		if (shared_dpll[i].crtc_mask == 0)
+			continue;
+
+		if (memcmp(&crtc_state->dpll_hw_state,
+			   &shared_dpll[i].hw_state,
+			   sizeof(crtc_state->dpll_hw_state)) == 0) {
+			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, active %x)\n",
+				      crtc->base.base.id, pll->name,
+				      shared_dpll[i].crtc_mask,
+				      pll->active_mask);
+			return pll;
+		}
+	}
+
+	/* Ok no matching timings, maybe there's a free one? */
+	for (i = range_min; i <= range_max; i++) {
+		pll = &dev_priv->shared_dplls[i];
+		if (shared_dpll[i].crtc_mask == 0) {
+			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
+				      crtc->base.base.id, pll->name);
+			return pll;
+		}
+	}
+
+	return NULL;
+}
+
+static void
+intel_reference_shared_dpll(struct intel_shared_dpll *pll,
+			    struct intel_crtc_state *crtc_state)
+{
+	struct intel_shared_dpll_config *shared_dpll;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	enum intel_dpll_id i = pll->id;
+
+	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+
+	if (shared_dpll[i].crtc_mask == 0)
+		shared_dpll[i].hw_state =
+			crtc_state->dpll_hw_state;
+
+	crtc_state->shared_dpll = pll;
+	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
+			 pipe_name(crtc->pipe));
+
+	intel_shared_dpll_config_get(shared_dpll, pll, crtc);
+}
+
+void intel_shared_dpll_commit(struct drm_atomic_state *state)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->dev);
+	struct intel_shared_dpll_config *shared_dpll;
+	struct intel_shared_dpll *pll;
+	enum intel_dpll_id i;
+
+	if (!to_intel_atomic_state(state)->dpll_set)
+		return;
+
+	shared_dpll = to_intel_atomic_state(state)->shared_dpll;
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		pll = &dev_priv->shared_dplls[i];
+		pll->config = shared_dpll[i];
+	}
+}
+
+static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
+				      struct intel_shared_dpll *pll,
+				      struct intel_dpll_hw_state *hw_state)
+{
+	uint32_t val;
+
+	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+		return false;
+
+	val = I915_READ(PCH_DPLL(pll->id));
+	hw_state->dpll = val;
+	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
+	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+	return val & DPLL_VCO_ENABLE;
+}
+
+static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
+				  struct intel_shared_dpll *pll)
+{
+	I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
+	I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
+}
+
+static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+{
+	u32 val;
+	bool enabled;
+
+	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+
+	val = I915_READ(PCH_DREF_CONTROL);
+	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
+			    DREF_SUPERSPREAD_SOURCE_MASK));
+	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+}
+
+static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
+				struct intel_shared_dpll *pll)
+{
+	/* PCH refclock must be enabled first */
+	ibx_assert_pch_refclk_enabled(dev_priv);
+
+	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
+
+	/* Wait for the clocks to stabilize. */
+	POSTING_READ(PCH_DPLL(pll->id));
+	udelay(150);
+
+	/* The pixel multiplier can only be updated once the
+	 * DPLL is enabled and the clocks are stable.
+	 *
+	 * So write it again.
+	 */
+	I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
+	POSTING_READ(PCH_DPLL(pll->id));
+	udelay(200);
+}
+
+static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
+				 struct intel_shared_dpll *pll)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct intel_crtc *crtc;
+
+	/* Make sure no transcoder isn't still depending on us. */
+	for_each_intel_crtc(dev, crtc) {
+		if (crtc->config->shared_dpll == pll)
+			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
+	}
+
+	I915_WRITE(PCH_DPLL(pll->id), 0);
+	POSTING_READ(PCH_DPLL(pll->id));
+	udelay(200);
+}
+
+static struct intel_shared_dpll *
+ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+	     struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_shared_dpll *pll;
+	enum intel_dpll_id i;
+
+	if (HAS_PCH_IBX(dev_priv)) {
+		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
+		i = (enum intel_dpll_id) crtc->pipe;
+		pll = &dev_priv->shared_dplls[i];
+
+		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
+			      crtc->base.base.id, pll->name);
+	} else {
+		pll = intel_find_shared_dpll(crtc, crtc_state,
+					     DPLL_ID_PCH_PLL_A,
+					     DPLL_ID_PCH_PLL_B);
+	}
+
+	/* reference the pll */
+	intel_reference_shared_dpll(pll, crtc_state);
+
+	return pll;
+}
+
+static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
+	.mode_set = ibx_pch_dpll_mode_set,
+	.enable = ibx_pch_dpll_enable,
+	.disable = ibx_pch_dpll_disable,
+	.get_hw_state = ibx_pch_dpll_get_hw_state,
+};
+
+static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
+			       struct intel_shared_dpll *pll)
+{
+	I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
+	POSTING_READ(WRPLL_CTL(pll->id));
+	udelay(20);
+}
+
+static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
+				struct intel_shared_dpll *pll)
+{
+	I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
+	POSTING_READ(SPLL_CTL);
+	udelay(20);
+}
+
+static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
+				  struct intel_shared_dpll *pll)
+{
+	uint32_t val;
+
+	val = I915_READ(WRPLL_CTL(pll->id));
+	I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
+	POSTING_READ(WRPLL_CTL(pll->id));
+}
+
+static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
+				 struct intel_shared_dpll *pll)
+{
+	uint32_t val;
+
+	val = I915_READ(SPLL_CTL);
+	I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+	POSTING_READ(SPLL_CTL);
+}
+
+static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
+				       struct intel_shared_dpll *pll,
+				       struct intel_dpll_hw_state *hw_state)
+{
+	uint32_t val;
+
+	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+		return false;
+
+	val = I915_READ(WRPLL_CTL(pll->id));
+	hw_state->wrpll = val;
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+	return val & WRPLL_PLL_ENABLE;
+}
+
+static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
+				      struct intel_shared_dpll *pll,
+				      struct intel_dpll_hw_state *hw_state)
+{
+	uint32_t val;
+
+	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+		return false;
+
+	val = I915_READ(SPLL_CTL);
+	hw_state->spll = val;
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+	return val & SPLL_PLL_ENABLE;
+}
+
+static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
+{
+	switch (pll->id) {
+	case DPLL_ID_WRPLL1:
+		return PORT_CLK_SEL_WRPLL1;
+	case DPLL_ID_WRPLL2:
+		return PORT_CLK_SEL_WRPLL2;
+	case DPLL_ID_SPLL:
+		return PORT_CLK_SEL_SPLL;
+	case DPLL_ID_LCPLL_810:
+		return PORT_CLK_SEL_LCPLL_810;
+	case DPLL_ID_LCPLL_1350:
+		return PORT_CLK_SEL_LCPLL_1350;
+	case DPLL_ID_LCPLL_2700:
+		return PORT_CLK_SEL_LCPLL_2700;
+	default:
+		return PORT_CLK_SEL_NONE;
+	}
+}
+
+#define LC_FREQ 2700
+#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
+
+#define P_MIN 2
+#define P_MAX 64
+#define P_INC 2
+
+/* Constraints for PLL good behavior */
+#define REF_MIN 48
+#define REF_MAX 400
+#define VCO_MIN 2400
+#define VCO_MAX 4800
+
+struct hsw_wrpll_rnp {
+	unsigned p, n2, r2;
+};
+
+static unsigned hsw_wrpll_get_budget_for_freq(int clock)
+{
+	unsigned budget;
+
+	switch (clock) {
+	case 25175000:
+	case 25200000:
+	case 27000000:
+	case 27027000:
+	case 37762500:
+	case 37800000:
+	case 40500000:
+	case 40541000:
+	case 54000000:
+	case 54054000:
+	case 59341000:
+	case 59400000:
+	case 72000000:
+	case 74176000:
+	case 74250000:
+	case 81000000:
+	case 81081000:
+	case 89012000:
+	case 89100000:
+	case 108000000:
+	case 108108000:
+	case 111264000:
+	case 111375000:
+	case 148352000:
+	case 148500000:
+	case 162000000:
+	case 162162000:
+	case 222525000:
+	case 222750000:
+	case 296703000:
+	case 297000000:
+		budget = 0;
+		break;
+	case 233500000:
+	case 245250000:
+	case 247750000:
+	case 253250000:
+	case 298000000:
+		budget = 1500;
+		break;
+	case 169128000:
+	case 169500000:
+	case 179500000:
+	case 202000000:
+		budget = 2000;
+		break;
+	case 256250000:
+	case 262500000:
+	case 270000000:
+	case 272500000:
+	case 273750000:
+	case 280750000:
+	case 281250000:
+	case 286000000:
+	case 291750000:
+		budget = 4000;
+		break;
+	case 267250000:
+	case 268500000:
+		budget = 5000;
+		break;
+	default:
+		budget = 1000;
+		break;
+	}
+
+	return budget;
+}
+
+static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
+				 unsigned r2, unsigned n2, unsigned p,
+				 struct hsw_wrpll_rnp *best)
+{
+	uint64_t a, b, c, d, diff, diff_best;
+
+	/* No best (r,n,p) yet */
+	if (best->p == 0) {
+		best->p = p;
+		best->n2 = n2;
+		best->r2 = r2;
+		return;
+	}
+
+	/*
+	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
+	 * freq2k.
+	 *
+	 * delta = 1e6 *
+	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
+	 *	   freq2k;
+	 *
+	 * and we would like delta <= budget.
+	 *
+	 * If the discrepancy is above the PPM-based budget, always prefer to
+	 * improve upon the previous solution.  However, if you're within the
+	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
+	 */
+	a = freq2k * budget * p * r2;
+	b = freq2k * budget * best->p * best->r2;
+	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
+	diff_best = abs_diff(freq2k * best->p * best->r2,
+			     LC_FREQ_2K * best->n2);
+	c = 1000000 * diff;
+	d = 1000000 * diff_best;
+
+	if (a < c && b < d) {
+		/* If both are above the budget, pick the closer */
+		if (best->p * best->r2 * diff < p * r2 * diff_best) {
+			best->p = p;
+			best->n2 = n2;
+			best->r2 = r2;
+		}
+	} else if (a >= c && b < d) {
+		/* If A is below the threshold but B is above it?  Update. */
+		best->p = p;
+		best->n2 = n2;
+		best->r2 = r2;
+	} else if (a >= c && b >= d) {
+		/* Both are below the limit, so pick the higher n2/(r2*r2) */
+		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
+			best->p = p;
+			best->n2 = n2;
+			best->r2 = r2;
+		}
+	}
+	/* Otherwise a < c && b >= d, do nothing */
+}
+
+static void
+hsw_ddi_calculate_wrpll(int clock /* in Hz */,
+			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
+{
+	uint64_t freq2k;
+	unsigned p, n2, r2;
+	struct hsw_wrpll_rnp best = { 0, 0, 0 };
+	unsigned budget;
+
+	freq2k = clock / 100;
+
+	budget = hsw_wrpll_get_budget_for_freq(clock);
+
+	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
+	 * and directly pass the LC PLL to it. */
+	if (freq2k == 5400000) {
+		*n2_out = 2;
+		*p_out = 1;
+		*r2_out = 2;
+		return;
+	}
+
+	/*
+	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
+	 * the WR PLL.
+	 *
+	 * We want R so that REF_MIN <= Ref <= REF_MAX.
+	 * Injecting R2 = 2 * R gives:
+	 *   REF_MAX * r2 > LC_FREQ * 2 and
+	 *   REF_MIN * r2 < LC_FREQ * 2
+	 *
+	 * Which means the desired boundaries for r2 are:
+	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
+	 *
+	 */
+	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
+	     r2 <= LC_FREQ * 2 / REF_MIN;
+	     r2++) {
+
+		/*
+		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
+		 *
+		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
+		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
+		 *   VCO_MAX * r2 > n2 * LC_FREQ and
+		 *   VCO_MIN * r2 < n2 * LC_FREQ)
+		 *
+		 * Which means the desired boundaries for n2 are:
+		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
+		 */
+		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
+		     n2 <= VCO_MAX * r2 / LC_FREQ;
+		     n2++) {
+
+			for (p = P_MIN; p <= P_MAX; p += P_INC)
+				hsw_wrpll_update_rnp(freq2k, budget,
+						     r2, n2, p, &best);
+		}
+	}
+
+	*n2_out = best.n2;
+	*p_out = best.p;
+	*r2_out = best.r2;
+}
+
+static struct intel_shared_dpll *
+hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+	     struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_shared_dpll *pll;
+	int clock = crtc_state->port_clock;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	if (encoder->type == INTEL_OUTPUT_HDMI) {
+		uint32_t val;
+		unsigned p, n2, r2;
+
+		hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+		val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
+		      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+		      WRPLL_DIVIDER_POST(p);
+
+		crtc_state->dpll_hw_state.wrpll = val;
+
+		pll = intel_find_shared_dpll(crtc, crtc_state,
+					     DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+
+	} else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+		   encoder->type == INTEL_OUTPUT_DP_MST ||
+		   encoder->type == INTEL_OUTPUT_EDP) {
+		enum intel_dpll_id pll_id;
+
+		switch (clock / 2) {
+		case 81000:
+			pll_id = DPLL_ID_LCPLL_810;
+			break;
+		case 135000:
+			pll_id = DPLL_ID_LCPLL_1350;
+			break;
+		case 270000:
+			pll_id = DPLL_ID_LCPLL_2700;
+			break;
+		default:
+			DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
+			return NULL;
+		}
+
+		pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+
+	} else if (encoder->type == INTEL_OUTPUT_ANALOG) {
+		if (WARN_ON(crtc_state->port_clock / 2 != 135000))
+			return NULL;
+
+		crtc_state->dpll_hw_state.spll =
+			SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+
+		pll = intel_find_shared_dpll(crtc, crtc_state,
+					     DPLL_ID_SPLL, DPLL_ID_SPLL);
+	} else {
+		return NULL;
+	}
+
+	if (!pll)
+		return NULL;
+
+	crtc_state->ddi_pll_sel = hsw_pll_to_ddi_pll_sel(pll);
+
+	intel_reference_shared_dpll(pll, crtc_state);
+
+	return pll;
+}
+
+
+static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
+	.enable = hsw_ddi_wrpll_enable,
+	.disable = hsw_ddi_wrpll_disable,
+	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
+	.enable = hsw_ddi_spll_enable,
+	.disable = hsw_ddi_spll_disable,
+	.get_hw_state = hsw_ddi_spll_get_hw_state,
+};
+
+static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
+				 struct intel_shared_dpll *pll)
+{
+}
+
+static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
+				  struct intel_shared_dpll *pll)
+{
+}
+
+static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
+				       struct intel_shared_dpll *pll,
+				       struct intel_dpll_hw_state *hw_state)
+{
+	return true;
+}
+
+static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
+	.enable = hsw_ddi_lcpll_enable,
+	.disable = hsw_ddi_lcpll_disable,
+	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
+};
+
+struct skl_dpll_regs {
+	i915_reg_t ctl, cfgcr1, cfgcr2;
+};
+
+/* this array is indexed by the *shared* pll id */
+static const struct skl_dpll_regs skl_dpll_regs[4] = {
+	{
+		/* DPLL 0 */
+		.ctl = LCPLL1_CTL,
+		/* DPLL 0 doesn't support HDMI mode */
+	},
+	{
+		/* DPLL 1 */
+		.ctl = LCPLL2_CTL,
+		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
+		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
+	},
+	{
+		/* DPLL 2 */
+		.ctl = WRPLL_CTL(0),
+		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
+		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
+	},
+	{
+		/* DPLL 3 */
+		.ctl = WRPLL_CTL(1),
+		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
+		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
+	},
+};
+
+static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
+				    struct intel_shared_dpll *pll)
+{
+	uint32_t val;
+
+	val = I915_READ(DPLL_CTRL1);
+
+	val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) |
+		 DPLL_CTRL1_LINK_RATE_MASK(pll->id));
+	val |= pll->config.hw_state.ctrl1 << (pll->id * 6);
+
+	I915_WRITE(DPLL_CTRL1, val);
+	POSTING_READ(DPLL_CTRL1);
+}
+
+static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
+			       struct intel_shared_dpll *pll)
+{
+	const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+	skl_ddi_pll_write_ctrl1(dev_priv, pll);
+
+	I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
+	I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
+	POSTING_READ(regs[pll->id].cfgcr1);
+	POSTING_READ(regs[pll->id].cfgcr2);
+
+	/* the enable bit is always bit 31 */
+	I915_WRITE(regs[pll->id].ctl,
+		   I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
+
+	if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(pll->id), 5))
+		DRM_ERROR("DPLL %d not locked\n", pll->id);
+}
+
+static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
+				 struct intel_shared_dpll *pll)
+{
+	skl_ddi_pll_write_ctrl1(dev_priv, pll);
+}
+
+static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
+				struct intel_shared_dpll *pll)
+{
+	const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+	/* the enable bit is always bit 31 */
+	I915_WRITE(regs[pll->id].ctl,
+		   I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
+	POSTING_READ(regs[pll->id].ctl);
+}
+
+static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
+				  struct intel_shared_dpll *pll)
+{
+}
+
+static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+				     struct intel_shared_dpll *pll,
+				     struct intel_dpll_hw_state *hw_state)
+{
+	uint32_t val;
+	const struct skl_dpll_regs *regs = skl_dpll_regs;
+	bool ret;
+
+	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+		return false;
+
+	ret = false;
+
+	val = I915_READ(regs[pll->id].ctl);
+	if (!(val & LCPLL_PLL_ENABLE))
+		goto out;
+
+	val = I915_READ(DPLL_CTRL1);
+	hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
+
+	/* avoid reading back stale values if HDMI mode is not enabled */
+	if (val & DPLL_CTRL1_HDMI_MODE(pll->id)) {
+		hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
+		hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
+	}
+	ret = true;
+
+out:
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+	return ret;
+}
+
+static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
+				       struct intel_shared_dpll *pll,
+				       struct intel_dpll_hw_state *hw_state)
+{
+	uint32_t val;
+	const struct skl_dpll_regs *regs = skl_dpll_regs;
+	bool ret;
+
+	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+		return false;
+
+	ret = false;
+
+	/* DPLL0 is always enabled since it drives CDCLK */
+	val = I915_READ(regs[pll->id].ctl);
+	if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
+		goto out;
+
+	val = I915_READ(DPLL_CTRL1);
+	hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
+
+	ret = true;
+
+out:
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+	return ret;
+}
+
+struct skl_wrpll_context {
+	uint64_t min_deviation;		/* current minimal deviation */
+	uint64_t central_freq;		/* chosen central freq */
+	uint64_t dco_freq;		/* chosen dco freq */
+	unsigned int p;			/* chosen divider */
+};
+
+static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
+{
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->min_deviation = U64_MAX;
+}
+
+/* DCO freq must be within +1%/-6%  of the DCO central freq */
+#define SKL_DCO_MAX_PDEVIATION	100
+#define SKL_DCO_MAX_NDEVIATION	600
+
+static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
+				  uint64_t central_freq,
+				  uint64_t dco_freq,
+				  unsigned int divider)
+{
+	uint64_t deviation;
+
+	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
+			      central_freq);
+
+	/* positive deviation */
+	if (dco_freq >= central_freq) {
+		if (deviation < SKL_DCO_MAX_PDEVIATION &&
+		    deviation < ctx->min_deviation) {
+			ctx->min_deviation = deviation;
+			ctx->central_freq = central_freq;
+			ctx->dco_freq = dco_freq;
+			ctx->p = divider;
+		}
+	/* negative deviation */
+	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
+		   deviation < ctx->min_deviation) {
+		ctx->min_deviation = deviation;
+		ctx->central_freq = central_freq;
+		ctx->dco_freq = dco_freq;
+		ctx->p = divider;
+	}
+}
+
+static void skl_wrpll_get_multipliers(unsigned int p,
+				      unsigned int *p0 /* out */,
+				      unsigned int *p1 /* out */,
+				      unsigned int *p2 /* out */)
+{
+	/* even dividers */
+	if (p % 2 == 0) {
+		unsigned int half = p / 2;
+
+		if (half == 1 || half == 2 || half == 3 || half == 5) {
+			*p0 = 2;
+			*p1 = 1;
+			*p2 = half;
+		} else if (half % 2 == 0) {
+			*p0 = 2;
+			*p1 = half / 2;
+			*p2 = 2;
+		} else if (half % 3 == 0) {
+			*p0 = 3;
+			*p1 = half / 3;
+			*p2 = 2;
+		} else if (half % 7 == 0) {
+			*p0 = 7;
+			*p1 = half / 7;
+			*p2 = 2;
+		}
+	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
+		*p0 = 3;
+		*p1 = 1;
+		*p2 = p / 3;
+	} else if (p == 5 || p == 7) {
+		*p0 = p;
+		*p1 = 1;
+		*p2 = 1;
+	} else if (p == 15) {
+		*p0 = 3;
+		*p1 = 1;
+		*p2 = 5;
+	} else if (p == 21) {
+		*p0 = 7;
+		*p1 = 1;
+		*p2 = 3;
+	} else if (p == 35) {
+		*p0 = 7;
+		*p1 = 1;
+		*p2 = 5;
+	}
+}
+
+struct skl_wrpll_params {
+	uint32_t        dco_fraction;
+	uint32_t        dco_integer;
+	uint32_t        qdiv_ratio;
+	uint32_t        qdiv_mode;
+	uint32_t        kdiv;
+	uint32_t        pdiv;
+	uint32_t        central_freq;
+};
+
+static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
+				      uint64_t afe_clock,
+				      uint64_t central_freq,
+				      uint32_t p0, uint32_t p1, uint32_t p2)
+{
+	uint64_t dco_freq;
+
+	switch (central_freq) {
+	case 9600000000ULL:
+		params->central_freq = 0;
+		break;
+	case 9000000000ULL:
+		params->central_freq = 1;
+		break;
+	case 8400000000ULL:
+		params->central_freq = 3;
+	}
+
+	switch (p0) {
+	case 1:
+		params->pdiv = 0;
+		break;
+	case 2:
+		params->pdiv = 1;
+		break;
+	case 3:
+		params->pdiv = 2;
+		break;
+	case 7:
+		params->pdiv = 4;
+		break;
+	default:
+		WARN(1, "Incorrect PDiv\n");
+	}
+
+	switch (p2) {
+	case 5:
+		params->kdiv = 0;
+		break;
+	case 2:
+		params->kdiv = 1;
+		break;
+	case 3:
+		params->kdiv = 2;
+		break;
+	case 1:
+		params->kdiv = 3;
+		break;
+	default:
+		WARN(1, "Incorrect KDiv\n");
+	}
+
+	params->qdiv_ratio = p1;
+	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
+
+	dco_freq = p0 * p1 * p2 * afe_clock;
+
+	/*
+	 * Intermediate values are in Hz.
+	 * Divide by MHz to match bsepc
+	 */
+	params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+	params->dco_fraction =
+		div_u64((div_u64(dco_freq, 24) -
+			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
+}
+
+static bool
+skl_ddi_calculate_wrpll(int clock /* in Hz */,
+			struct skl_wrpll_params *wrpll_params)
+{
+	uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+	uint64_t dco_central_freq[3] = {8400000000ULL,
+					9000000000ULL,
+					9600000000ULL};
+	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
+					     24, 28, 30, 32, 36, 40, 42, 44,
+					     48, 52, 54, 56, 60, 64, 66, 68,
+					     70, 72, 76, 78, 80, 84, 88, 90,
+					     92, 96, 98 };
+	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
+	static const struct {
+		const int *list;
+		int n_dividers;
+	} dividers[] = {
+		{ even_dividers, ARRAY_SIZE(even_dividers) },
+		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
+	};
+	struct skl_wrpll_context ctx;
+	unsigned int dco, d, i;
+	unsigned int p0, p1, p2;
+
+	skl_wrpll_context_init(&ctx);
+
+	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
+			for (i = 0; i < dividers[d].n_dividers; i++) {
+				unsigned int p = dividers[d].list[i];
+				uint64_t dco_freq = p * afe_clock;
+
+				skl_wrpll_try_divider(&ctx,
+						      dco_central_freq[dco],
+						      dco_freq,
+						      p);
+				/*
+				 * Skip the remaining dividers if we're sure to
+				 * have found the definitive divider, we can't
+				 * improve a 0 deviation.
+				 */
+				if (ctx.min_deviation == 0)
+					goto skip_remaining_dividers;
+			}
+		}
+
+skip_remaining_dividers:
+		/*
+		 * If a solution is found with an even divider, prefer
+		 * this one.
+		 */
+		if (d == 0 && ctx.p)
+			break;
+	}
+
+	if (!ctx.p) {
+		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
+		return false;
+	}
+
+	/*
+	 * gcc incorrectly analyses that these can be used without being
+	 * initialized. To be fair, it's hard to guess.
+	 */
+	p0 = p1 = p2 = 0;
+	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
+	skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
+				  p0, p1, p2);
+
+	return true;
+}
+
+static struct intel_shared_dpll *
+skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+	     struct intel_encoder *encoder)
+{
+	struct intel_shared_dpll *pll;
+	uint32_t ctrl1, cfgcr1, cfgcr2;
+	int clock = crtc_state->port_clock;
+
+	/*
+	 * See comment in intel_dpll_hw_state to understand why we always use 0
+	 * as the DPLL id in this function.
+	 */
+
+	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+
+	if (encoder->type == INTEL_OUTPUT_HDMI) {
+		struct skl_wrpll_params wrpll_params = { 0, };
+
+		ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+
+		if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+			return NULL;
+
+		cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+			 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+			 wrpll_params.dco_integer;
+
+		cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+			 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+			 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+			 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+			 wrpll_params.central_freq;
+	} else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+		   encoder->type == INTEL_OUTPUT_DP_MST ||
+		   encoder->type == INTEL_OUTPUT_EDP) {
+		switch (crtc_state->port_clock / 2) {
+		case 81000:
+			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
+			break;
+		case 135000:
+			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
+			break;
+		case 270000:
+			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
+			break;
+		/* eDP 1.4 rates */
+		case 162000:
+			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
+			break;
+		/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
+		results in CDCLK change. Need to handle the change of CDCLK by
+		disabling pipes and re-enabling them */
+		case 108000:
+			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
+			break;
+		case 216000:
+			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
+			break;
+		}
+
+		cfgcr1 = cfgcr2 = 0;
+	} else {
+		return NULL;
+	}
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
+
+	if (encoder->type == INTEL_OUTPUT_EDP)
+		pll = intel_find_shared_dpll(crtc, crtc_state,
+					     DPLL_ID_SKL_DPLL0,
+					     DPLL_ID_SKL_DPLL0);
+	else
+		pll = intel_find_shared_dpll(crtc, crtc_state,
+					     DPLL_ID_SKL_DPLL1,
+					     DPLL_ID_SKL_DPLL3);
+	if (!pll)
+		return NULL;
+
+	crtc_state->ddi_pll_sel = pll->id;
+
+	intel_reference_shared_dpll(pll, crtc_state);
+
+	return pll;
+}
+
+static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
+	.enable = skl_ddi_pll_enable,
+	.disable = skl_ddi_pll_disable,
+	.get_hw_state = skl_ddi_pll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
+	.enable = skl_ddi_dpll0_enable,
+	.disable = skl_ddi_dpll0_disable,
+	.get_hw_state = skl_ddi_dpll0_get_hw_state,
+};
+
+static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
+				struct intel_shared_dpll *pll)
+{
+	uint32_t temp;
+	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
+
+	/* Non-SSC reference */
+	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+	temp |= PORT_PLL_REF_SEL;
+	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+
+	/* Disable 10 bit clock */
+	temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+
+	/* Write P1 & P2 */
+	temp = I915_READ(BXT_PORT_PLL_EBB_0(port));
+	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
+	temp |= pll->config.hw_state.ebb0;
+	I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp);
+
+	/* Write M2 integer */
+	temp = I915_READ(BXT_PORT_PLL(port, 0));
+	temp &= ~PORT_PLL_M2_MASK;
+	temp |= pll->config.hw_state.pll0;
+	I915_WRITE(BXT_PORT_PLL(port, 0), temp);
+
+	/* Write N */
+	temp = I915_READ(BXT_PORT_PLL(port, 1));
+	temp &= ~PORT_PLL_N_MASK;
+	temp |= pll->config.hw_state.pll1;
+	I915_WRITE(BXT_PORT_PLL(port, 1), temp);
+
+	/* Write M2 fraction */
+	temp = I915_READ(BXT_PORT_PLL(port, 2));
+	temp &= ~PORT_PLL_M2_FRAC_MASK;
+	temp |= pll->config.hw_state.pll2;
+	I915_WRITE(BXT_PORT_PLL(port, 2), temp);
+
+	/* Write M2 fraction enable */
+	temp = I915_READ(BXT_PORT_PLL(port, 3));
+	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
+	temp |= pll->config.hw_state.pll3;
+	I915_WRITE(BXT_PORT_PLL(port, 3), temp);
+
+	/* Write coeff */
+	temp = I915_READ(BXT_PORT_PLL(port, 6));
+	temp &= ~PORT_PLL_PROP_COEFF_MASK;
+	temp &= ~PORT_PLL_INT_COEFF_MASK;
+	temp &= ~PORT_PLL_GAIN_CTL_MASK;
+	temp |= pll->config.hw_state.pll6;
+	I915_WRITE(BXT_PORT_PLL(port, 6), temp);
+
+	/* Write calibration val */
+	temp = I915_READ(BXT_PORT_PLL(port, 8));
+	temp &= ~PORT_PLL_TARGET_CNT_MASK;
+	temp |= pll->config.hw_state.pll8;
+	I915_WRITE(BXT_PORT_PLL(port, 8), temp);
+
+	temp = I915_READ(BXT_PORT_PLL(port, 9));
+	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
+	temp |= pll->config.hw_state.pll9;
+	I915_WRITE(BXT_PORT_PLL(port, 9), temp);
+
+	temp = I915_READ(BXT_PORT_PLL(port, 10));
+	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
+	temp &= ~PORT_PLL_DCO_AMP_MASK;
+	temp |= pll->config.hw_state.pll10;
+	I915_WRITE(BXT_PORT_PLL(port, 10), temp);
+
+	/* Recalibrate with new settings */
+	temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+	temp |= PORT_PLL_RECALIBRATE;
+	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+	temp |= pll->config.hw_state.ebb4;
+	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+
+	/* Enable PLL */
+	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+	temp |= PORT_PLL_ENABLE;
+	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+
+	if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
+			PORT_PLL_LOCK), 200))
+		DRM_ERROR("PLL %d not locked\n", port);
+
+	/*
+	 * While we write to the group register to program all lanes at once we
+	 * can read only lane registers and we pick lanes 0/1 for that.
+	 */
+	temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
+	temp &= ~LANE_STAGGER_MASK;
+	temp &= ~LANESTAGGER_STRAP_OVRD;
+	temp |= pll->config.hw_state.pcsdw12;
+	I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp);
+}
+
+static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
+					struct intel_shared_dpll *pll)
+{
+	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
+	uint32_t temp;
+
+	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+	temp &= ~PORT_PLL_ENABLE;
+	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+}
+
+static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+					struct intel_shared_dpll *pll,
+					struct intel_dpll_hw_state *hw_state)
+{
+	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
+	uint32_t val;
+	bool ret;
+
+	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+		return false;
+
+	ret = false;
+
+	val = I915_READ(BXT_PORT_PLL_ENABLE(port));
+	if (!(val & PORT_PLL_ENABLE))
+		goto out;
+
+	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
+
+	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
+	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
+
+	hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+	hw_state->pll0 &= PORT_PLL_M2_MASK;
+
+	hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+	hw_state->pll1 &= PORT_PLL_N_MASK;
+
+	hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
+
+	hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
+
+	hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
+			  PORT_PLL_INT_COEFF_MASK |
+			  PORT_PLL_GAIN_CTL_MASK;
+
+	hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
+
+	hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
+	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
+
+	hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
+			   PORT_PLL_DCO_AMP_MASK;
+
+	/*
+	 * While we write to the group register to program all lanes at once we
+	 * can read only lane registers. We configure all lanes the same way, so
+	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
+	 */
+	hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
+	if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
+		DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
+				 hw_state->pcsdw12,
+				 I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
+
+	ret = true;
+
+out:
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+	return ret;
+}
+
+/* bxt clock parameters */
+struct bxt_clk_div {
+	int clock;
+	uint32_t p1;
+	uint32_t p2;
+	uint32_t m2_int;
+	uint32_t m2_frac;
+	bool m2_frac_en;
+	uint32_t n;
+};
+
+/* pre-calculated values for DP linkrates */
+static const struct bxt_clk_div bxt_dp_clk_val[] = {
+	{162000, 4, 2, 32, 1677722, 1, 1},
+	{270000, 4, 1, 27,       0, 0, 1},
+	{540000, 2, 1, 27,       0, 0, 1},
+	{216000, 3, 2, 32, 1677722, 1, 1},
+	{243000, 4, 1, 24, 1258291, 1, 1},
+	{324000, 4, 1, 32, 1677722, 1, 1},
+	{432000, 3, 1, 32, 1677722, 1, 1}
+};
+
+static struct intel_shared_dpll *
+bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+	     struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_shared_dpll *pll;
+	enum intel_dpll_id i;
+	struct intel_digital_port *intel_dig_port;
+	struct bxt_clk_div clk_div = {0};
+	int vco = 0;
+	uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
+	uint32_t lanestagger;
+	int clock = crtc_state->port_clock;
+
+	if (encoder->type == INTEL_OUTPUT_HDMI) {
+		intel_clock_t best_clock;
+
+		/* Calculate HDMI div */
+		/*
+		 * FIXME: tie the following calculation into
+		 * i9xx_crtc_compute_clock
+		 */
+		if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
+			DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
+					 clock, pipe_name(crtc->pipe));
+			return NULL;
+		}
+
+		clk_div.p1 = best_clock.p1;
+		clk_div.p2 = best_clock.p2;
+		WARN_ON(best_clock.m1 != 2);
+		clk_div.n = best_clock.n;
+		clk_div.m2_int = best_clock.m2 >> 22;
+		clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1);
+		clk_div.m2_frac_en = clk_div.m2_frac != 0;
+
+		vco = best_clock.vco;
+	} else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+		   encoder->type == INTEL_OUTPUT_EDP) {
+		int i;
+
+		clk_div = bxt_dp_clk_val[0];
+		for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+			if (bxt_dp_clk_val[i].clock == clock) {
+				clk_div = bxt_dp_clk_val[i];
+				break;
+			}
+		}
+		vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
+	}
+
+	if (vco >= 6200000 && vco <= 6700000) {
+		prop_coef = 4;
+		int_coef = 9;
+		gain_ctl = 3;
+		targ_cnt = 8;
+	} else if ((vco > 5400000 && vco < 6200000) ||
+			(vco >= 4800000 && vco < 5400000)) {
+		prop_coef = 5;
+		int_coef = 11;
+		gain_ctl = 3;
+		targ_cnt = 9;
+	} else if (vco == 5400000) {
+		prop_coef = 3;
+		int_coef = 8;
+		gain_ctl = 1;
+		targ_cnt = 9;
+	} else {
+		DRM_ERROR("Invalid VCO\n");
+		return NULL;
+	}
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	if (clock > 270000)
+		lanestagger = 0x18;
+	else if (clock > 135000)
+		lanestagger = 0x0d;
+	else if (clock > 67000)
+		lanestagger = 0x07;
+	else if (clock > 33000)
+		lanestagger = 0x04;
+	else
+		lanestagger = 0x02;
+
+	crtc_state->dpll_hw_state.ebb0 =
+		PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2);
+	crtc_state->dpll_hw_state.pll0 = clk_div.m2_int;
+	crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n);
+	crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac;
+
+	if (clk_div.m2_frac_en)
+		crtc_state->dpll_hw_state.pll3 =
+			PORT_PLL_M2_FRAC_ENABLE;
+
+	crtc_state->dpll_hw_state.pll6 =
+		prop_coef | PORT_PLL_INT_COEFF(int_coef);
+	crtc_state->dpll_hw_state.pll6 |=
+		PORT_PLL_GAIN_CTL(gain_ctl);
+
+	crtc_state->dpll_hw_state.pll8 = targ_cnt;
+
+	crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
+
+	crtc_state->dpll_hw_state.pll10 =
+		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
+		| PORT_PLL_DCO_AMP_OVR_EN_H;
+
+	crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
+
+	crtc_state->dpll_hw_state.pcsdw12 =
+		LANESTAGGER_STRAP_OVRD | lanestagger;
+
+	intel_dig_port = enc_to_dig_port(&encoder->base);
+
+	/* 1:1 mapping between ports and PLLs */
+	i = (enum intel_dpll_id) intel_dig_port->port;
+	pll = intel_get_shared_dpll_by_id(dev_priv, i);
+
+	DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
+		crtc->base.base.id, pll->name);
+
+	intel_reference_shared_dpll(pll, crtc_state);
+
+	/* shared DPLL id 0 is DPLL A */
+	crtc_state->ddi_pll_sel = pll->id;
+
+	return pll;
+}
+
+static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
+	.enable = bxt_ddi_pll_enable,
+	.disable = bxt_ddi_pll_disable,
+	.get_hw_state = bxt_ddi_pll_get_hw_state,
+};
+
+static void intel_ddi_pll_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t val = I915_READ(LCPLL_CTL);
+
+	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+		int cdclk_freq;
+
+		cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+		dev_priv->skl_boot_cdclk = cdclk_freq;
+		if (skl_sanitize_cdclk(dev_priv))
+			DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
+		if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
+			DRM_ERROR("LCPLL1 is disabled\n");
+	} else if (!IS_BROXTON(dev_priv)) {
+		/*
+		 * The LCPLL register should be turned on by the BIOS. For now
+		 * let's just check its state and print errors in case
+		 * something is wrong.  Don't even try to turn it on.
+		 */
+
+		if (val & LCPLL_CD_SOURCE_FCLK)
+			DRM_ERROR("CDCLK source is not LCPLL\n");
+
+		if (val & LCPLL_PLL_DISABLE)
+			DRM_ERROR("LCPLL is disabled\n");
+	}
+}
+
+struct dpll_info {
+	const char *name;
+	const int id;
+	const struct intel_shared_dpll_funcs *funcs;
+	uint32_t flags;
+};
+
+struct intel_dpll_mgr {
+	const struct dpll_info *dpll_info;
+
+	struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc,
+					      struct intel_crtc_state *crtc_state,
+					      struct intel_encoder *encoder);
+};
+
+static const struct dpll_info pch_plls[] = {
+	{ "PCH DPLL A", DPLL_ID_PCH_PLL_A, &ibx_pch_dpll_funcs, 0 },
+	{ "PCH DPLL B", DPLL_ID_PCH_PLL_B, &ibx_pch_dpll_funcs, 0 },
+	{ NULL, -1, NULL, 0 },
+};
+
+static const struct intel_dpll_mgr pch_pll_mgr = {
+	.dpll_info = pch_plls,
+	.get_dpll = ibx_get_dpll,
+};
+
+static const struct dpll_info hsw_plls[] = {
+	{ "WRPLL 1",    DPLL_ID_WRPLL1,     &hsw_ddi_wrpll_funcs, 0 },
+	{ "WRPLL 2",    DPLL_ID_WRPLL2,     &hsw_ddi_wrpll_funcs, 0 },
+	{ "SPLL",       DPLL_ID_SPLL,       &hsw_ddi_spll_funcs,  0 },
+	{ "LCPLL 810",  DPLL_ID_LCPLL_810,  &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
+	{ "LCPLL 1350", DPLL_ID_LCPLL_1350, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
+	{ "LCPLL 2700", DPLL_ID_LCPLL_2700, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
+	{ NULL, -1, NULL, },
+};
+
+static const struct intel_dpll_mgr hsw_pll_mgr = {
+	.dpll_info = hsw_plls,
+	.get_dpll = hsw_get_dpll,
+};
+
+static const struct dpll_info skl_plls[] = {
+	{ "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON },
+	{ "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs,   0 },
+	{ "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs,   0 },
+	{ "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs,   0 },
+	{ NULL, -1, NULL, },
+};
+
+static const struct intel_dpll_mgr skl_pll_mgr = {
+	.dpll_info = skl_plls,
+	.get_dpll = skl_get_dpll,
+};
+
+static const struct dpll_info bxt_plls[] = {
+	{ "PORT PLL A", DPLL_ID_SKL_DPLL0, &bxt_ddi_pll_funcs, 0 },
+	{ "PORT PLL B", DPLL_ID_SKL_DPLL1, &bxt_ddi_pll_funcs, 0 },
+	{ "PORT PLL C", DPLL_ID_SKL_DPLL2, &bxt_ddi_pll_funcs, 0 },
+	{ NULL, -1, NULL, },
+};
+
+static const struct intel_dpll_mgr bxt_pll_mgr = {
+	.dpll_info = bxt_plls,
+	.get_dpll = bxt_get_dpll,
+};
+
+void intel_shared_dpll_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct intel_dpll_mgr *dpll_mgr = NULL;
+	const struct dpll_info *dpll_info;
+	int i;
+
+	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+		dpll_mgr = &skl_pll_mgr;
+	else if (IS_BROXTON(dev))
+		dpll_mgr = &bxt_pll_mgr;
+	else if (HAS_DDI(dev))
+		dpll_mgr = &hsw_pll_mgr;
+	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+		dpll_mgr = &pch_pll_mgr;
+
+	if (!dpll_mgr) {
+		dev_priv->num_shared_dpll = 0;
+		return;
+	}
+
+	dpll_info = dpll_mgr->dpll_info;
+
+	for (i = 0; dpll_info[i].id >= 0; i++) {
+		WARN_ON(i != dpll_info[i].id);
+
+		dev_priv->shared_dplls[i].id = dpll_info[i].id;
+		dev_priv->shared_dplls[i].name = dpll_info[i].name;
+		dev_priv->shared_dplls[i].funcs = *dpll_info[i].funcs;
+		dev_priv->shared_dplls[i].flags = dpll_info[i].flags;
+	}
+
+	dev_priv->dpll_mgr = dpll_mgr;
+	dev_priv->num_shared_dpll = i;
+	mutex_init(&dev_priv->dpll_lock);
+
+	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
+
+	/* FIXME: Move this to a more suitable place */
+	if (HAS_DDI(dev))
+		intel_ddi_pll_init(dev);
+}
+
+struct intel_shared_dpll *
+intel_get_shared_dpll(struct intel_crtc *crtc,
+		      struct intel_crtc_state *crtc_state,
+		      struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+
+	if (WARN_ON(!dpll_mgr))
+		return NULL;
+
+	return dpll_mgr->get_dpll(crtc, crtc_state, encoder);
+}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
new file mode 100644
index 0000000..89c5ada
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright © 2012-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DPLL_MGR_H_
+#define _INTEL_DPLL_MGR_H_
+
+/*FIXME: Move this to a more appropriate place. */
+#define abs_diff(a, b) ({			\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	(void) (&__a == &__b);			\
+	__a > __b ? (__a - __b) : (__b - __a); })
+
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_encoder;
+
+struct intel_shared_dpll;
+struct intel_dpll_mgr;
+
+enum intel_dpll_id {
+	DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
+	/* real shared dpll ids must be >= 0 */
+	DPLL_ID_PCH_PLL_A = 0,
+	DPLL_ID_PCH_PLL_B = 1,
+	/* hsw/bdw */
+	DPLL_ID_WRPLL1 = 0,
+	DPLL_ID_WRPLL2 = 1,
+	DPLL_ID_SPLL = 2,
+	DPLL_ID_LCPLL_810 = 3,
+	DPLL_ID_LCPLL_1350 = 4,
+	DPLL_ID_LCPLL_2700 = 5,
+
+	/* skl */
+	DPLL_ID_SKL_DPLL0 = 0,
+	DPLL_ID_SKL_DPLL1 = 1,
+	DPLL_ID_SKL_DPLL2 = 2,
+	DPLL_ID_SKL_DPLL3 = 3,
+};
+#define I915_NUM_PLLS 6
+
+/** Inform the state checker that the DPLL is kept enabled even if not
+ * in use by any crtc.
+ */
+#define INTEL_DPLL_ALWAYS_ON	(1 << 0)
+
+struct intel_dpll_hw_state {
+	/* i9xx, pch plls */
+	uint32_t dpll;
+	uint32_t dpll_md;
+	uint32_t fp0;
+	uint32_t fp1;
+
+	/* hsw, bdw */
+	uint32_t wrpll;
+	uint32_t spll;
+
+	/* skl */
+	/*
+	 * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
+	 * lower part of ctrl1 and they get shifted into position when writing
+	 * the register.  This allows us to easily compare the state to share
+	 * the DPLL.
+	 */
+	uint32_t ctrl1;
+	/* HDMI only, 0 when used for DP */
+	uint32_t cfgcr1, cfgcr2;
+
+	/* bxt */
+	uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
+		 pcsdw12;
+};
+
+struct intel_shared_dpll_config {
+	unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
+	struct intel_dpll_hw_state hw_state;
+};
+
+struct intel_shared_dpll_funcs {
+	/* The mode_set hook is optional and should be used together with the
+	 * intel_prepare_shared_dpll function. */
+	void (*mode_set)(struct drm_i915_private *dev_priv,
+			 struct intel_shared_dpll *pll);
+	void (*enable)(struct drm_i915_private *dev_priv,
+		       struct intel_shared_dpll *pll);
+	void (*disable)(struct drm_i915_private *dev_priv,
+			struct intel_shared_dpll *pll);
+	bool (*get_hw_state)(struct drm_i915_private *dev_priv,
+			     struct intel_shared_dpll *pll,
+			     struct intel_dpll_hw_state *hw_state);
+};
+
+struct intel_shared_dpll {
+	struct intel_shared_dpll_config config;
+
+	unsigned active_mask; /* mask of active CRTCs (i.e. DPMS on) */
+	bool on; /* is the PLL actually active? Disabled during modeset */
+	const char *name;
+	/* should match the index in the dev_priv->shared_dplls array */
+	enum intel_dpll_id id;
+
+	struct intel_shared_dpll_funcs funcs;
+
+	uint32_t flags;
+};
+
+#define SKL_DPLL0 0
+#define SKL_DPLL1 1
+#define SKL_DPLL2 2
+#define SKL_DPLL3 3
+
+/* shared dpll functions */
+struct intel_shared_dpll *
+intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
+			    enum intel_dpll_id id);
+enum intel_dpll_id
+intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
+			 struct intel_shared_dpll *pll);
+void
+intel_shared_dpll_config_get(struct intel_shared_dpll_config *config,
+			     struct intel_shared_dpll *pll,
+			     struct intel_crtc *crtc);
+void
+intel_shared_dpll_config_put(struct intel_shared_dpll_config *config,
+			     struct intel_shared_dpll *pll,
+			     struct intel_crtc *crtc);
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+			struct intel_shared_dpll *pll,
+			bool state);
+#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
+#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
+						struct intel_crtc_state *state,
+						struct intel_encoder *encoder);
+void intel_prepare_shared_dpll(struct intel_crtc *crtc);
+void intel_enable_shared_dpll(struct intel_crtc *crtc);
+void intel_disable_shared_dpll(struct intel_crtc *crtc);
+void intel_shared_dpll_commit(struct drm_atomic_state *state);
+void intel_shared_dpll_init(struct drm_device *dev);
+
+
+#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9d0770c..a28b4aa 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -33,6 +33,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_dp_dual_mode_helper.h>
 #include <drm/drm_dp_mst_helper.h>
 #include <drm/drm_rect.h>
 #include <drm/drm_atomic.h>
@@ -44,9 +45,13 @@
  * contexts. Note that it's important that we check the condition again after
  * having timed out, since the timeout could be due to preemption or similar and
  * we've never had a chance to check the condition before the timeout.
+ *
+ * TODO: When modesetting has fully transitioned to atomic, the below
+ * drm_can_sleep() can be removed and in_atomic()/!in_atomic() asserts
+ * added.
  */
-#define _wait_for(COND, MS, W) ({ \
-	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1;	\
+#define _wait_for(COND, US, W) ({ \
+	unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1;	\
 	int ret__ = 0;							\
 	while (!(COND)) {						\
 		if (time_after(jiffies, timeout__)) {			\
@@ -55,7 +60,7 @@
 			break;						\
 		}							\
 		if ((W) && drm_can_sleep()) {				\
-			usleep_range((W)*1000, (W)*2000);		\
+			usleep_range((W), (W)*2);			\
 		} else {						\
 			cpu_relax();					\
 		}							\
@@ -63,10 +68,40 @@
 	ret__;								\
 })
 
-#define wait_for(COND, MS) _wait_for(COND, MS, 1)
-#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
-#define wait_for_atomic_us(COND, US) _wait_for((COND), \
-					       DIV_ROUND_UP((US), 1000), 0)
+#define wait_for(COND, MS)	  	_wait_for((COND), (MS) * 1000, 1000)
+#define wait_for_us(COND, US)	  	_wait_for((COND), (US), 1)
+
+/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
+#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
+# define _WAIT_FOR_ATOMIC_CHECK WARN_ON_ONCE(!in_atomic())
+#else
+# define _WAIT_FOR_ATOMIC_CHECK do { } while (0)
+#endif
+
+#define _wait_for_atomic(COND, US) ({ \
+	unsigned long end__; \
+	int ret__ = 0; \
+	_WAIT_FOR_ATOMIC_CHECK; \
+	BUILD_BUG_ON((US) > 50000); \
+	end__ = (local_clock() >> 10) + (US) + 1; \
+	while (!(COND)) { \
+		if (time_after((unsigned long)(local_clock() >> 10), end__)) { \
+			/* Unlike the regular wait_for(), this atomic variant \
+			 * cannot be preempted (and we'll just ignore the issue\
+			 * of irq interruptions) and so we know that no time \
+			 * has passed since the last check of COND and can \
+			 * immediately report the timeout. \
+			 */ \
+			ret__ = -ETIMEDOUT; \
+			break; \
+		} \
+		cpu_relax(); \
+	} \
+	ret__; \
+})
+
+#define wait_for_atomic(COND, MS)	_wait_for_atomic((COND), (MS) * 1000)
+#define wait_for_atomic_us(COND, US)	_wait_for_atomic((COND), (US))
 
 #define KHz(x) (1000 * (x))
 #define MHz(x) KHz(1000 * (x))
@@ -118,6 +153,7 @@
 struct intel_framebuffer {
 	struct drm_framebuffer base;
 	struct drm_i915_gem_object *obj;
+	struct intel_rotation_info rot_info;
 };
 
 struct intel_fbdev {
@@ -260,6 +296,12 @@
 
 	struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
 	struct intel_wm_config wm_config;
+
+	/*
+	 * Current watermarks can't be trusted during hardware readout, so
+	 * don't bother calculating intermediate watermarks.
+	 */
+	bool skip_intermediate_wm;
 };
 
 struct intel_plane_state {
@@ -349,6 +391,7 @@
 
 struct intel_pipe_wm {
 	struct intel_wm_level wm[5];
+	struct intel_wm_level raw_wm[5];
 	uint32_t linetime;
 	bool fbc_wm_enabled;
 	bool pipe_enabled;
@@ -376,9 +419,10 @@
 #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS	(1<<0) /* unreliable sync mode.flags */
 	unsigned long quirks;
 
+	unsigned fb_bits; /* framebuffers to flip */
 	bool update_pipe; /* can a fast modeset be performed? */
 	bool disable_cxsr;
-	bool wm_changed; /* watermarks are updated */
+	bool update_wm_pre, update_wm_post; /* watermarks are updated */
 	bool fb_changed; /* fb on any of the planes is changed */
 
 	/* Pipe source size (ie. panel fitter input size)
@@ -394,7 +438,8 @@
 	bool has_infoframe;
 
 	/* CPU Transcoder for the pipe. Currently this can only differ from the
-	 * pipe on Haswell (where we have a special eDP transcoder). */
+	 * pipe on Haswell and later (where we have a special eDP transcoder)
+	 * and Broxton (where we have special DSI transcoders). */
 	enum transcoder cpu_transcoder;
 
 	/*
@@ -441,8 +486,8 @@
 	 * haswell. */
 	struct dpll dpll;
 
-	/* Selected dpll when shared or DPLL_ID_PRIVATE. */
-	enum intel_dpll_id shared_dpll;
+	/* Selected dpll when shared or NULL. */
+	struct intel_shared_dpll *shared_dpll;
 
 	/*
 	 * - PORT_CLK_SEL for DDI ports on HSW/BDW.
@@ -453,6 +498,11 @@
 	/* Actual register state of the dpll, for shared dpll cross-checking. */
 	struct intel_dpll_hw_state dpll_hw_state;
 
+	/* DSI PLL registers */
+	struct {
+		u32 ctrl, div;
+	} dsi_pll;
+
 	int pipe_bpp;
 	struct intel_link_m_n dp_m_n;
 
@@ -510,14 +560,33 @@
 
 	struct {
 		/*
-		 * optimal watermarks, programmed post-vblank when this state
-		 * is committed
+		 * Optimal watermarks, programmed post-vblank when this state
+		 * is committed.
 		 */
 		union {
 			struct intel_pipe_wm ilk;
 			struct skl_pipe_wm skl;
 		} optimal;
+
+		/*
+		 * Intermediate watermarks; these can be programmed immediately
+		 * since they satisfy both the current configuration we're
+		 * switching away from and the new configuration we're switching
+		 * to.
+		 */
+		struct intel_pipe_wm intermediate;
+
+		/*
+		 * Platforms with two-step watermark programming will need to
+		 * update watermark programming post-vblank to switch from the
+		 * safe intermediate watermarks to the optimal final
+		 * watermarks.
+		 */
+		bool need_postvbl_update;
 	} wm;
+
+	/* Gamma mode programmed on the pipe */
+	uint32_t gamma_mode;
 };
 
 struct vlv_wm_state {
@@ -537,23 +606,6 @@
 	unsigned int rotation;
 };
 
-/*
- * Tracking of operations that need to be performed at the beginning/end of an
- * atomic commit, outside the atomic section where interrupts are disabled.
- * These are generally operations that grab mutexes or might otherwise sleep
- * and thus can't be run with interrupts disabled.
- */
-struct intel_crtc_atomic_commit {
-	/* Sleepable operations to perform before commit */
-
-	/* Sleepable operations to perform after commit */
-	unsigned fb_bits;
-	bool post_enable_primary;
-
-	/* Sleepable operations to perform before and after commit */
-	bool update_fbc;
-};
-
 struct intel_crtc {
 	struct drm_crtc base;
 	enum pipe pipe;
@@ -600,6 +652,7 @@
 			struct intel_pipe_wm ilk;
 			struct skl_pipe_wm skl;
 		} active;
+
 		/* allow CxSR on this pipe */
 		bool cxsr_allowed;
 	} wm;
@@ -613,8 +666,6 @@
 		int scanline_start;
 	} debug;
 
-	struct intel_crtc_atomic_commit atomic;
-
 	/* scalers available on this crtc */
 	int num_scalers;
 
@@ -703,6 +754,10 @@
 struct intel_hdmi {
 	i915_reg_t hdmi_reg;
 	int ddc_bus;
+	struct {
+		enum drm_dp_dual_mode_type type;
+		int max_tmds_clock;
+	} dp_dual_mode;
 	bool limited_color_range;
 	bool color_range_auto;
 	bool has_hdmi_sink;
@@ -751,7 +806,9 @@
 	uint32_t DP;
 	int link_rate;
 	uint8_t lane_count;
+	uint8_t sink_count;
 	bool has_audio;
+	bool detect_done;
 	enum hdmi_force_audio force_audio;
 	bool limited_color_range;
 	bool color_range_auto;
@@ -831,7 +888,7 @@
 	struct intel_encoder base;
 	enum pipe pipe;
 	struct intel_digital_port *primary;
-	void *port; /* store this opaque as its illegal to dereference it */
+	struct intel_connector *connector;
 };
 
 static inline enum dpio_channel
@@ -1007,7 +1064,6 @@
 void intel_ddi_init(struct drm_device *dev, enum port port);
 enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
 bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
-void intel_ddi_pll_init(struct drm_device *dev);
 void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
 void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
 				       enum transcoder cpu_transcoder);
@@ -1049,17 +1105,19 @@
 			      uint64_t fb_modifier, uint32_t pixel_format);
 
 /* intel_audio.c */
-void intel_init_audio(struct drm_device *dev);
+void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
 void intel_audio_codec_enable(struct intel_encoder *encoder);
 void intel_audio_codec_disable(struct intel_encoder *encoder);
 void i915_audio_component_init(struct drm_i915_private *dev_priv);
 void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
 
 /* intel_display.c */
+int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+		      const char *name, u32 reg, int ref_freq);
 extern const struct drm_plane_funcs intel_plane_funcs;
+void intel_init_display_hooks(struct drm_i915_private *dev_priv);
+unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
 bool intel_has_pending_fb_unpin(struct drm_device *dev);
-int intel_pch_rawclk(struct drm_device *dev);
-int intel_hrawclk(struct drm_device *dev);
 void intel_mark_busy(struct drm_device *dev);
 void intel_mark_idle(struct drm_device *dev);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);
@@ -1104,9 +1162,8 @@
 void intel_release_load_detect_pipe(struct drm_connector *connector,
 				    struct intel_load_detect_pipe *old,
 				    struct drm_modeset_acquire_ctx *ctx);
-int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
-			       struct drm_framebuffer *fb,
-			       const struct drm_plane_state *plane_state);
+int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+			       unsigned int rotation);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
 			   struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1142,19 +1199,13 @@
 void intel_create_rotation_property(struct drm_device *dev,
 					struct intel_plane *plane);
 
-/* shared dpll functions */
-struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
-void assert_shared_dpll(struct drm_i915_private *dev_priv,
-			struct intel_shared_dpll *pll,
-			bool state);
-#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
-#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
-						struct intel_crtc_state *state);
+void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+				    enum pipe pipe);
 
 int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
 		     const struct dpll *dpll);
 void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
+int lpt_get_iclkip(struct drm_i915_private *dev_priv);
 
 /* modesetting asserts */
 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
@@ -1163,6 +1214,9 @@
 		enum pipe pipe, bool state);
 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
+void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
+#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
+#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
 		       enum pipe pipe, bool state);
 #define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
@@ -1170,21 +1224,24 @@
 void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
-			      int *x, int *y,
-			      uint64_t fb_modifier,
-			      unsigned int cpp,
-			      unsigned int pitch);
+u32 intel_compute_tile_offset(int *x, int *y,
+			      const struct drm_framebuffer *fb, int plane,
+			      unsigned int pitch,
+			      unsigned int rotation);
 void intel_prepare_reset(struct drm_device *dev);
 void intel_finish_reset(struct drm_device *dev);
 void hsw_enable_pc8(struct drm_i915_private *dev_priv);
 void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void broxton_init_cdclk(struct drm_device *dev);
-void broxton_uninit_cdclk(struct drm_device *dev);
-void broxton_ddi_phy_init(struct drm_device *dev);
-void broxton_ddi_phy_uninit(struct drm_device *dev);
+void broxton_init_cdclk(struct drm_i915_private *dev_priv);
+void broxton_uninit_cdclk(struct drm_i915_private *dev_priv);
+bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv);
+void broxton_ddi_phy_init(struct drm_i915_private *dev_priv);
+void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv);
+void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv);
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
 void bxt_enable_dc9(struct drm_i915_private *dev_priv);
 void bxt_disable_dc9(struct drm_i915_private *dev_priv);
+void gen9_enable_dc5(struct drm_i915_private *dev_priv);
 void skl_init_cdclk(struct drm_i915_private *dev_priv);
 int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
 void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
@@ -1194,9 +1251,6 @@
 		      struct intel_crtc_state *pipe_config);
 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
 int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-void
-ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
-				int dotclock);
 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
 			intel_clock_t *best_clock);
 int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
@@ -1224,8 +1278,10 @@
 
 /* intel_csr.c */
 void intel_csr_ucode_init(struct drm_i915_private *);
-bool intel_csr_load_program(struct drm_i915_private *);
+void intel_csr_load_program(struct drm_i915_private *);
 void intel_csr_ucode_fini(struct drm_i915_private *);
+void intel_csr_ucode_suspend(struct drm_i915_private *);
+void intel_csr_ucode_resume(struct drm_i915_private *);
 
 /* intel_dp.c */
 void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
@@ -1266,7 +1322,6 @@
 void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
 					 struct intel_digital_port *port);
-void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
 
 void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1351,6 +1406,7 @@
 struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
 bool intel_hdmi_compute_config(struct intel_encoder *encoder,
 			       struct intel_crtc_state *pipe_config);
+void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
 
 
 /* intel_lvds.c */
@@ -1423,8 +1479,8 @@
 void intel_power_domains_fini(struct drm_i915_private *);
 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
 void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
-void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv);
-void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv);
+void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
 const char *
 intel_display_power_domain_str(enum intel_display_power_domain domain);
@@ -1541,6 +1597,7 @@
 int ilk_wm_max_level(const struct drm_device *dev);
 void intel_update_watermarks(struct drm_crtc *crtc);
 void intel_init_pm(struct drm_device *dev);
+void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
 void intel_pm_setup(struct drm_device *dev);
 void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
 void intel_gpu_ips_teardown(void);
@@ -1565,6 +1622,7 @@
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 			  struct skl_ddb_allocation *ddb /* out */);
 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
+bool ilk_disable_lp_wm(struct drm_device *dev);
 int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6);
 
 /* intel_sdvo.c */
@@ -1606,6 +1664,18 @@
 
 	return to_intel_crtc_state(crtc_state);
 }
+
+static inline struct intel_plane_state *
+intel_atomic_get_existing_plane_state(struct drm_atomic_state *state,
+				      struct intel_plane *plane)
+{
+	struct drm_plane_state *plane_state;
+
+	plane_state = drm_atomic_get_existing_plane_state(state, &plane->base);
+
+	return to_intel_plane_state(plane_state);
+}
+
 int intel_atomic_setup_scalers(struct drm_device *dev,
 	struct intel_crtc *intel_crtc,
 	struct intel_crtc_state *crtc_state);
@@ -1617,4 +1687,10 @@
 			       struct drm_plane_state *state);
 extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
 
+/* intel_color.c */
+void intel_color_init(struct drm_crtc *crtc);
+int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
+void intel_color_set_csc(struct drm_crtc_state *crtc_state);
+void intel_color_load_luts(struct drm_crtc_state *crtc_state);
+
 #endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 01b8e9f..366ad6c 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -46,6 +46,40 @@
 	},
 };
 
+/* return pixels in terms of txbyteclkhs */
+static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
+		       u16 burst_mode_ratio)
+{
+	return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
+					 8 * 100), lane_count);
+}
+
+/* return pixels equvalent to txbyteclkhs */
+static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count,
+			u16 burst_mode_ratio)
+{
+	return DIV_ROUND_UP((clk_hs * lane_count * 8 * 100),
+						(bpp * burst_mode_ratio));
+}
+
+enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
+{
+	/* It just so happens the VBT matches register contents. */
+	switch (fmt) {
+	case VID_MODE_FORMAT_RGB888:
+		return MIPI_DSI_FMT_RGB888;
+	case VID_MODE_FORMAT_RGB666:
+		return MIPI_DSI_FMT_RGB666;
+	case VID_MODE_FORMAT_RGB666_PACKED:
+		return MIPI_DSI_FMT_RGB666_PACKED;
+	case VID_MODE_FORMAT_RGB565:
+		return MIPI_DSI_FMT_RGB565;
+	default:
+		MISSING_CASE(fmt);
+		return MIPI_DSI_FMT_RGB666;
+	}
+}
+
 static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
 {
 	struct drm_encoder *encoder = &intel_dsi->base.base;
@@ -268,22 +302,47 @@
 static bool intel_dsi_compute_config(struct intel_encoder *encoder,
 				     struct intel_crtc_state *pipe_config)
 {
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 	struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
 						   base);
 	struct intel_connector *intel_connector = intel_dsi->attached_connector;
-	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	int ret;
 
 	DRM_DEBUG_KMS("\n");
 
 	pipe_config->has_dsi_encoder = true;
 
-	if (fixed_mode)
+	if (fixed_mode) {
 		intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
+		if (HAS_GMCH_DISPLAY(dev_priv))
+			intel_gmch_panel_fitting(crtc, pipe_config,
+						 intel_connector->panel.fitting_mode);
+		else
+			intel_pch_panel_fitting(crtc, pipe_config,
+						intel_connector->panel.fitting_mode);
+	}
+
 	/* DSI uses short packets for sync events, so clear mode flags for DSI */
 	adjusted_mode->flags = 0;
 
+	if (IS_BROXTON(dev_priv)) {
+		/* Dual link goes to DSI transcoder A. */
+		if (intel_dsi->ports == BIT(PORT_C))
+			pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
+		else
+			pipe_config->cpu_transcoder = TRANSCODER_DSI_A;
+	}
+
+	ret = intel_compute_dsi_pll(encoder, pipe_config);
+	if (ret)
+		return false;
+
+	pipe_config->clock_set = true;
+
 	return true;
 }
 
@@ -403,7 +462,7 @@
 		temp &= ~LANE_CONFIGURATION_MASK;
 		temp &= ~DUAL_LINK_MODE_MASK;
 
-		if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) {
+		if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
 			temp |= (intel_dsi->dual_link - 1)
 						<< DUAL_LINK_MODE_SHIFT;
 			temp |= intel_crtc->pipe ?
@@ -471,14 +530,19 @@
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-	enum pipe pipe = intel_crtc->pipe;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 	enum port port;
 	u32 tmp;
 
 	DRM_DEBUG_KMS("\n");
 
-	intel_enable_dsi_pll(encoder);
+	/*
+	 * The BIOS may leave the PLL in a wonky state where it doesn't
+	 * lock. It needs to be fully powered down to fix it.
+	 */
+	intel_disable_dsi_pll(encoder);
+	intel_enable_dsi_pll(encoder, crtc->config);
+
 	intel_dsi_prepare(encoder);
 
 	/* Panel Enable over CRC PMIC */
@@ -488,19 +552,7 @@
 	msleep(intel_dsi->panel_on_delay);
 
 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
-		/*
-		 * Disable DPOunit clock gating, can stall pipe
-		 * and we need DPLL REFA always enabled
-		 */
-		tmp = I915_READ(DPLL(pipe));
-		tmp |= DPLL_REF_CLK_ENABLE_VLV;
-		I915_WRITE(DPLL(pipe), tmp);
-
-		/* update the hw state for DPLL */
-		intel_crtc->config->dpll_hw_state.dpll =
-				DPLL_INTEGRATED_REF_CLK_VLV |
-					DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
-
+		/* Disable DPOunit clock gating, can stall pipe */
 		tmp = I915_READ(DSPCLK_GATE_D);
 		tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
 		I915_WRITE(DSPCLK_GATE_D, tmp);
@@ -652,11 +704,16 @@
 	drm_panel_unprepare(intel_dsi->panel);
 
 	msleep(intel_dsi->panel_off_delay);
-	msleep(intel_dsi->panel_pwr_cycle_delay);
 
 	/* Panel Disable over CRC PMIC */
 	if (intel_dsi->gpio_panel)
 		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
+
+	/*
+	 * FIXME As we do with eDP, just make a note of the time here
+	 * and perform the wait before the next panel power on.
+	 */
+	msleep(intel_dsi->panel_pwr_cycle_delay);
 }
 
 static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -667,7 +724,7 @@
 	struct drm_device *dev = encoder->base.dev;
 	enum intel_display_power_domain power_domain;
 	enum port port;
-	bool ret;
+	bool active = false;
 
 	DRM_DEBUG_KMS("\n");
 
@@ -675,55 +732,234 @@
 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 		return false;
 
-	ret = false;
+	/*
+	 * On Broxton the PLL needs to be enabled with a valid divider
+	 * configuration, otherwise accessing DSI registers will hang the
+	 * machine. See BSpec North Display Engine registers/MIPI[BXT].
+	 */
+	if (IS_BROXTON(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
+		goto out_put_power;
 
 	/* XXX: this only works for one DSI output */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
 			BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
-		u32 dpi_enabled, func;
+		bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
 
-		func = I915_READ(MIPI_DSI_FUNC_PRG(port));
-		dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
-
-		/* Due to some hardware limitations on BYT, MIPI Port C DPI
-		 * Enable bit does not get set. To check whether DSI Port C
-		 * was enabled in BIOS, check the Pipe B enable bit
+		/*
+		 * Due to some hardware limitations on VLV/CHV, the DPI enable
+		 * bit in port C control register does not get set. As a
+		 * workaround, check pipe B conf instead.
 		 */
-		if (IS_VALLEYVIEW(dev) && port == PORT_C)
-			dpi_enabled = I915_READ(PIPECONF(PIPE_B)) &
-							PIPECONF_ENABLE;
+		if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && port == PORT_C)
+			enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
 
-		if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
-			if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
-				*pipe = port == PORT_A ? PIPE_A : PIPE_B;
-				ret = true;
-
-				goto out;
-			}
+		/* Try command mode if video mode not enabled */
+		if (!enabled) {
+			u32 tmp = I915_READ(MIPI_DSI_FUNC_PRG(port));
+			enabled = tmp & CMD_MODE_DATA_WIDTH_MASK;
 		}
+
+		if (!enabled)
+			continue;
+
+		if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
+			continue;
+
+		if (IS_BROXTON(dev_priv)) {
+			u32 tmp = I915_READ(MIPI_CTRL(port));
+			tmp &= BXT_PIPE_SELECT_MASK;
+			tmp >>= BXT_PIPE_SELECT_SHIFT;
+
+			if (WARN_ON(tmp > PIPE_C))
+				continue;
+
+			*pipe = tmp;
+		} else {
+			*pipe = port == PORT_A ? PIPE_A : PIPE_B;
+		}
+
+		active = true;
+		break;
 	}
-out:
+
+out_put_power:
 	intel_display_power_put(dev_priv, power_domain);
 
-	return ret;
+	return active;
+}
+
+static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
+				 struct intel_crtc_state *pipe_config)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_display_mode *adjusted_mode =
+					&pipe_config->base.adjusted_mode;
+	struct drm_display_mode *adjusted_mode_sw;
+	struct intel_crtc *intel_crtc;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	unsigned int lane_count = intel_dsi->lane_count;
+	unsigned int bpp, fmt;
+	enum port port;
+	u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
+	u16 hfp_sw, hsync_sw, hbp_sw;
+	u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
+				crtc_hblank_start_sw, crtc_hblank_end_sw;
+
+	intel_crtc = to_intel_crtc(encoder->base.crtc);
+	adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode;
+
+	/*
+	 * Atleast one port is active as encoder->get_config called only if
+	 * encoder->get_hw_state() returns true.
+	 */
+	for_each_dsi_port(port, intel_dsi->ports) {
+		if (I915_READ(BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
+			break;
+	}
+
+	fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
+	pipe_config->pipe_bpp =
+			mipi_dsi_pixel_format_to_bpp(
+				pixel_format_from_register_bits(fmt));
+	bpp = pipe_config->pipe_bpp;
+
+	/* In terms of pixels */
+	adjusted_mode->crtc_hdisplay =
+				I915_READ(BXT_MIPI_TRANS_HACTIVE(port));
+	adjusted_mode->crtc_vdisplay =
+				I915_READ(BXT_MIPI_TRANS_VACTIVE(port));
+	adjusted_mode->crtc_vtotal =
+				I915_READ(BXT_MIPI_TRANS_VTOTAL(port));
+
+	hactive = adjusted_mode->crtc_hdisplay;
+	hfp = I915_READ(MIPI_HFP_COUNT(port));
+
+	/*
+	 * Meaningful for video mode non-burst sync pulse mode only,
+	 * can be zero for non-burst sync events and burst modes
+	 */
+	hsync = I915_READ(MIPI_HSYNC_PADDING_COUNT(port));
+	hbp = I915_READ(MIPI_HBP_COUNT(port));
+
+	/* harizontal values are in terms of high speed byte clock */
+	hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count,
+						intel_dsi->burst_mode_ratio);
+	hsync = pixels_from_txbyteclkhs(hsync, bpp, lane_count,
+						intel_dsi->burst_mode_ratio);
+	hbp = pixels_from_txbyteclkhs(hbp, bpp, lane_count,
+						intel_dsi->burst_mode_ratio);
+
+	if (intel_dsi->dual_link) {
+		hfp *= 2;
+		hsync *= 2;
+		hbp *= 2;
+	}
+
+	/* vertical values are in terms of lines */
+	vfp = I915_READ(MIPI_VFP_COUNT(port));
+	vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port));
+	vbp = I915_READ(MIPI_VBP_COUNT(port));
+
+	adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
+	adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay;
+	adjusted_mode->crtc_hsync_end = hsync + adjusted_mode->crtc_hsync_start;
+	adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
+	adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal;
+
+	adjusted_mode->crtc_vsync_start = vfp + adjusted_mode->crtc_vdisplay;
+	adjusted_mode->crtc_vsync_end = vsync + adjusted_mode->crtc_vsync_start;
+	adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
+	adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
+
+	/*
+	 * In BXT DSI there is no regs programmed with few horizontal timings
+	 * in Pixels but txbyteclkhs.. So retrieval process adds some
+	 * ROUND_UP ERRORS in the process of PIXELS<==>txbyteclkhs.
+	 * Actually here for the given adjusted_mode, we are calculating the
+	 * value programmed to the port and then back to the horizontal timing
+	 * param in pixels. This is the expected value, including roundup errors
+	 * And if that is same as retrieved value from port, then
+	 * (HW state) adjusted_mode's horizontal timings are corrected to
+	 * match with SW state to nullify the errors.
+	 */
+	/* Calculating the value programmed to the Port register */
+	hfp_sw = adjusted_mode_sw->crtc_hsync_start -
+					adjusted_mode_sw->crtc_hdisplay;
+	hsync_sw = adjusted_mode_sw->crtc_hsync_end -
+					adjusted_mode_sw->crtc_hsync_start;
+	hbp_sw = adjusted_mode_sw->crtc_htotal -
+					adjusted_mode_sw->crtc_hsync_end;
+
+	if (intel_dsi->dual_link) {
+		hfp_sw /= 2;
+		hsync_sw /= 2;
+		hbp_sw /= 2;
+	}
+
+	hfp_sw = txbyteclkhs(hfp_sw, bpp, lane_count,
+						intel_dsi->burst_mode_ratio);
+	hsync_sw = txbyteclkhs(hsync_sw, bpp, lane_count,
+			    intel_dsi->burst_mode_ratio);
+	hbp_sw = txbyteclkhs(hbp_sw, bpp, lane_count,
+						intel_dsi->burst_mode_ratio);
+
+	/* Reverse calculating the adjusted mode parameters from port reg vals*/
+	hfp_sw = pixels_from_txbyteclkhs(hfp_sw, bpp, lane_count,
+						intel_dsi->burst_mode_ratio);
+	hsync_sw = pixels_from_txbyteclkhs(hsync_sw, bpp, lane_count,
+						intel_dsi->burst_mode_ratio);
+	hbp_sw = pixels_from_txbyteclkhs(hbp_sw, bpp, lane_count,
+						intel_dsi->burst_mode_ratio);
+
+	if (intel_dsi->dual_link) {
+		hfp_sw *= 2;
+		hsync_sw *= 2;
+		hbp_sw *= 2;
+	}
+
+	crtc_htotal_sw = adjusted_mode_sw->crtc_hdisplay + hfp_sw +
+							hsync_sw + hbp_sw;
+	crtc_hsync_start_sw = hfp_sw + adjusted_mode_sw->crtc_hdisplay;
+	crtc_hsync_end_sw = hsync_sw + crtc_hsync_start_sw;
+	crtc_hblank_start_sw = adjusted_mode_sw->crtc_hdisplay;
+	crtc_hblank_end_sw = crtc_htotal_sw;
+
+	if (adjusted_mode->crtc_htotal == crtc_htotal_sw)
+		adjusted_mode->crtc_htotal = adjusted_mode_sw->crtc_htotal;
+
+	if (adjusted_mode->crtc_hsync_start == crtc_hsync_start_sw)
+		adjusted_mode->crtc_hsync_start =
+					adjusted_mode_sw->crtc_hsync_start;
+
+	if (adjusted_mode->crtc_hsync_end == crtc_hsync_end_sw)
+		adjusted_mode->crtc_hsync_end =
+					adjusted_mode_sw->crtc_hsync_end;
+
+	if (adjusted_mode->crtc_hblank_start == crtc_hblank_start_sw)
+		adjusted_mode->crtc_hblank_start =
+					adjusted_mode_sw->crtc_hblank_start;
+
+	if (adjusted_mode->crtc_hblank_end == crtc_hblank_end_sw)
+		adjusted_mode->crtc_hblank_end =
+					adjusted_mode_sw->crtc_hblank_end;
 }
 
 static void intel_dsi_get_config(struct intel_encoder *encoder,
 				 struct intel_crtc_state *pipe_config)
 {
+	struct drm_device *dev = encoder->base.dev;
 	u32 pclk;
 	DRM_DEBUG_KMS("\n");
 
 	pipe_config->has_dsi_encoder = true;
 
-	/*
-	 * DPLL_MD is not used in case of DSI, reading will get some default value
-	 * set dpll_md = 0
-	 */
-	pipe_config->dpll_hw_state.dpll_md = 0;
+	if (IS_BROXTON(dev))
+		bxt_dsi_get_pipe_config(encoder, pipe_config);
 
-	pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp);
+	pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
+				  pipe_config);
 	if (!pclk)
 		return;
 
@@ -736,7 +972,7 @@
 		     struct drm_display_mode *mode)
 {
 	struct intel_connector *intel_connector = to_intel_connector(connector);
-	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+	const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
 	DRM_DEBUG_KMS("\n");
@@ -772,14 +1008,6 @@
 	}
 }
 
-/* return pixels in terms of txbyteclkhs */
-static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
-		       u16 burst_mode_ratio)
-{
-	return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
-					 8 * 100), lane_count);
-}
-
 static void set_dsi_timings(struct drm_encoder *encoder,
 			    const struct drm_display_mode *adjusted_mode)
 {
@@ -787,7 +1015,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
-	unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
+	unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
 	unsigned int lane_count = intel_dsi->lane_count;
 
 	u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
@@ -849,6 +1077,23 @@
 	}
 }
 
+static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
+{
+	switch (fmt) {
+	case MIPI_DSI_FMT_RGB888:
+		return VID_MODE_FORMAT_RGB888;
+	case MIPI_DSI_FMT_RGB666:
+		return VID_MODE_FORMAT_RGB666;
+	case MIPI_DSI_FMT_RGB666_PACKED:
+		return VID_MODE_FORMAT_RGB666_PACKED;
+	case MIPI_DSI_FMT_RGB565:
+		return VID_MODE_FORMAT_RGB565;
+	default:
+		MISSING_CASE(fmt);
+		return VID_MODE_FORMAT_RGB666;
+	}
+}
+
 static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
@@ -858,7 +1103,7 @@
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
 	enum port port;
-	unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
+	unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
 	u32 val, tmp;
 	u16 mode_hdisplay;
 
@@ -917,9 +1162,7 @@
 		val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
 	} else {
 		val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
-
-		/* XXX: cross-check bpp vs. pixel format? */
-		val |= intel_dsi->pixel_format;
+		val |= pixel_format_to_reg(intel_dsi->pixel_format);
 	}
 
 	tmp = 0;
@@ -1059,6 +1302,48 @@
 	return 1;
 }
 
+static int intel_dsi_set_property(struct drm_connector *connector,
+				  struct drm_property *property,
+				  uint64_t val)
+{
+	struct drm_device *dev = connector->dev;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_crtc *crtc;
+	int ret;
+
+	ret = drm_object_property_set_value(&connector->base, property, val);
+	if (ret)
+		return ret;
+
+	if (property == dev->mode_config.scaling_mode_property) {
+		if (val == DRM_MODE_SCALE_NONE) {
+			DRM_DEBUG_KMS("no scaling not supported\n");
+			return -EINVAL;
+		}
+		if (HAS_GMCH_DISPLAY(dev) &&
+		    val == DRM_MODE_SCALE_CENTER) {
+			DRM_DEBUG_KMS("centering not supported\n");
+			return -EINVAL;
+		}
+
+		if (intel_connector->panel.fitting_mode == val)
+			return 0;
+
+		intel_connector->panel.fitting_mode = val;
+	}
+
+	crtc = intel_attached_encoder(connector)->base.crtc;
+	if (crtc && crtc->state->enable) {
+		/*
+		 * If the CRTC is enabled, the display will be changed
+		 * according to the new panel fitting mode.
+		 */
+		intel_crtc_restore_mode(crtc);
+	}
+
+	return 0;
+}
+
 static void intel_dsi_connector_destroy(struct drm_connector *connector)
 {
 	struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -1101,11 +1386,25 @@
 	.detect = intel_dsi_detect,
 	.destroy = intel_dsi_connector_destroy,
 	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = intel_dsi_set_property,
 	.atomic_get_property = intel_connector_atomic_get_property,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 };
 
+static void intel_dsi_add_properties(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+
+	if (connector->panel.fixed_mode) {
+		drm_mode_create_scaling_mode_property(dev);
+		drm_object_attach_property(&connector->base.base,
+					   dev->mode_config.scaling_mode_property,
+					   DRM_MODE_SCALE_ASPECT);
+		connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+	}
+}
+
 void intel_dsi_init(struct drm_device *dev)
 {
 	struct intel_dsi *intel_dsi;
@@ -1121,11 +1420,13 @@
 	DRM_DEBUG_KMS("\n");
 
 	/* There is no detection method for MIPI so rely on VBT */
-	if (!dev_priv->vbt.has_mipi)
+	if (!intel_bios_is_dsi_present(dev_priv, &port))
 		return;
 
 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
 		dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+	} else if (IS_BROXTON(dev)) {
+		dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
 	} else {
 		DRM_ERROR("Unsupported Mipi device to reg base");
 		return;
@@ -1161,17 +1462,21 @@
 	intel_connector->get_hw_state = intel_connector_get_hw_state;
 	intel_connector->unregister = intel_connector_unregister;
 
-	/* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
-	if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
-		intel_encoder->crtc_mask = (1 << PIPE_A);
-		intel_dsi->ports = (1 << PORT_A);
-	} else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
-		intel_encoder->crtc_mask = (1 << PIPE_B);
-		intel_dsi->ports = (1 << PORT_C);
-	}
+	/*
+	 * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
+	 * port C. BXT isn't limited like this.
+	 */
+	if (IS_BROXTON(dev_priv))
+		intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+	else if (port == PORT_A)
+		intel_encoder->crtc_mask = BIT(PIPE_A);
+	else
+		intel_encoder->crtc_mask = BIT(PIPE_B);
 
 	if (dev_priv->vbt.dsi.config->dual_link)
-		intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
+		intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
+	else
+		intel_dsi->ports = BIT(port);
 
 	/* Create a DSI host (and a device) for each port. */
 	for_each_dsi_port(port, intel_dsi->ports) {
@@ -1223,8 +1528,6 @@
 
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
 
-	drm_connector_register(connector);
-
 	drm_panel_attach(intel_dsi->panel, connector);
 
 	mutex_lock(&dev->mode_config.mutex);
@@ -1243,6 +1546,11 @@
 	}
 
 	intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+
+	intel_dsi_add_properties(intel_connector);
+
+	drm_connector_register(connector);
+
 	intel_panel_setup_backlight(connector, INVALID_PIPE);
 
 	return;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 92f3922..61a6957 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -34,8 +34,6 @@
 #define DSI_DUAL_LINK_FRONT_BACK	1
 #define DSI_DUAL_LINK_PIXEL_ALT		2
 
-int dsi_pixel_format_bpp(int pixel_format);
-
 struct intel_dsi_host;
 
 struct intel_dsi {
@@ -64,8 +62,12 @@
 	/* number of DSI lanes */
 	unsigned int lane_count;
 
-	/* video mode pixel format for MIPI_DSI_FUNC_PRG register */
-	u32 pixel_format;
+	/*
+	 * video mode pixel format
+	 *
+	 * XXX: consolidate on .format in struct mipi_dsi_device.
+	 */
+	enum mipi_dsi_pixel_format pixel_format;
 
 	/* video mode format for MIPI_VIDEO_MODE_FORMAT register */
 	u32 video_mode_format;
@@ -117,21 +119,25 @@
 	return container_of(h, struct intel_dsi_host, base);
 }
 
-#define for_each_dsi_port(__port, __ports_mask) \
-	for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)	\
-		for_each_if ((__ports_mask) & (1 << (__port)))
+#define for_each_dsi_port(__port, __ports_mask) for_each_port_masked(__port, __ports_mask)
 
 static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
 {
 	return container_of(encoder, struct intel_dsi, base.base);
 }
 
-extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
-extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
-extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp);
-extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
-							enum port port);
+bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+int intel_compute_dsi_pll(struct intel_encoder *encoder,
+			  struct intel_crtc_state *config);
+void intel_enable_dsi_pll(struct intel_encoder *encoder,
+			  const struct intel_crtc_state *config);
+void intel_disable_dsi_pll(struct intel_encoder *encoder);
+u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+		       struct intel_crtc_state *config);
+void intel_dsi_reset_clocks(struct intel_encoder *encoder,
+			    enum port port);
 
 struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
+enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
 
 #endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 7f145b4..e498f1c 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -58,50 +58,41 @@
 
 #define NS_KHZ_RATIO 1000000
 
-#define GPI0_NC_0_HV_DDI0_HPD           0x4130
-#define GPIO_NC_0_HV_DDI0_PAD           0x4138
-#define GPIO_NC_1_HV_DDI0_DDC_SDA       0x4120
-#define GPIO_NC_1_HV_DDI0_DDC_SDA_PAD   0x4128
-#define GPIO_NC_2_HV_DDI0_DDC_SCL       0x4110
-#define GPIO_NC_2_HV_DDI0_DDC_SCL_PAD   0x4118
-#define GPIO_NC_3_PANEL0_VDDEN          0x4140
-#define GPIO_NC_3_PANEL0_VDDEN_PAD      0x4148
-#define GPIO_NC_4_PANEL0_BLKEN          0x4150
-#define GPIO_NC_4_PANEL0_BLKEN_PAD      0x4158
-#define GPIO_NC_5_PANEL0_BLKCTL         0x4160
-#define GPIO_NC_5_PANEL0_BLKCTL_PAD     0x4168
-#define GPIO_NC_6_PCONF0                0x4180
-#define GPIO_NC_6_PAD                   0x4188
-#define GPIO_NC_7_PCONF0                0x4190
-#define GPIO_NC_7_PAD                   0x4198
-#define GPIO_NC_8_PCONF0                0x4170
-#define GPIO_NC_8_PAD                   0x4178
-#define GPIO_NC_9_PCONF0                0x4100
-#define GPIO_NC_9_PAD                   0x4108
-#define GPIO_NC_10_PCONF0               0x40E0
-#define GPIO_NC_10_PAD                  0x40E8
-#define GPIO_NC_11_PCONF0               0x40F0
-#define GPIO_NC_11_PAD                  0x40F8
+/* base offsets for gpio pads */
+#define VLV_GPIO_NC_0_HV_DDI0_HPD	0x4130
+#define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA	0x4120
+#define VLV_GPIO_NC_2_HV_DDI0_DDC_SCL	0x4110
+#define VLV_GPIO_NC_3_PANEL0_VDDEN	0x4140
+#define VLV_GPIO_NC_4_PANEL0_BKLTEN	0x4150
+#define VLV_GPIO_NC_5_PANEL0_BKLTCTL	0x4160
+#define VLV_GPIO_NC_6_HV_DDI1_HPD	0x4180
+#define VLV_GPIO_NC_7_HV_DDI1_DDC_SDA	0x4190
+#define VLV_GPIO_NC_8_HV_DDI1_DDC_SCL	0x4170
+#define VLV_GPIO_NC_9_PANEL1_VDDEN	0x4100
+#define VLV_GPIO_NC_10_PANEL1_BKLTEN	0x40E0
+#define VLV_GPIO_NC_11_PANEL1_BKLTCTL	0x40F0
 
-struct gpio_table {
-	u16 function_reg;
-	u16 pad_reg;
-	u8 init;
+#define VLV_GPIO_PCONF0(base_offset)	(base_offset)
+#define VLV_GPIO_PAD_VAL(base_offset)	((base_offset) + 8)
+
+struct gpio_map {
+	u16 base_offset;
+	bool init;
 };
 
-static struct gpio_table gtable[] = {
-	{ GPI0_NC_0_HV_DDI0_HPD, GPIO_NC_0_HV_DDI0_PAD, 0 },
-	{ GPIO_NC_1_HV_DDI0_DDC_SDA, GPIO_NC_1_HV_DDI0_DDC_SDA_PAD, 0 },
-	{ GPIO_NC_2_HV_DDI0_DDC_SCL, GPIO_NC_2_HV_DDI0_DDC_SCL_PAD, 0 },
-	{ GPIO_NC_3_PANEL0_VDDEN, GPIO_NC_3_PANEL0_VDDEN_PAD, 0 },
-	{ GPIO_NC_4_PANEL0_BLKEN, GPIO_NC_4_PANEL0_BLKEN_PAD, 0 },
-	{ GPIO_NC_5_PANEL0_BLKCTL, GPIO_NC_5_PANEL0_BLKCTL_PAD, 0 },
-	{ GPIO_NC_6_PCONF0, GPIO_NC_6_PAD, 0 },
-	{ GPIO_NC_7_PCONF0, GPIO_NC_7_PAD, 0 },
-	{ GPIO_NC_8_PCONF0, GPIO_NC_8_PAD, 0 },
-	{ GPIO_NC_9_PCONF0, GPIO_NC_9_PAD, 0 },
-	{ GPIO_NC_10_PCONF0, GPIO_NC_10_PAD, 0},
-	{ GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
+static struct gpio_map vlv_gpio_table[] = {
+	{ VLV_GPIO_NC_0_HV_DDI0_HPD },
+	{ VLV_GPIO_NC_1_HV_DDI0_DDC_SDA },
+	{ VLV_GPIO_NC_2_HV_DDI0_DDC_SCL },
+	{ VLV_GPIO_NC_3_PANEL0_VDDEN },
+	{ VLV_GPIO_NC_4_PANEL0_BKLTEN },
+	{ VLV_GPIO_NC_5_PANEL0_BKLTCTL },
+	{ VLV_GPIO_NC_6_HV_DDI1_HPD },
+	{ VLV_GPIO_NC_7_HV_DDI1_DDC_SDA },
+	{ VLV_GPIO_NC_8_HV_DDI1_DDC_SCL },
+	{ VLV_GPIO_NC_9_PANEL1_VDDEN },
+	{ VLV_GPIO_NC_10_PANEL1_BKLTEN },
+	{ VLV_GPIO_NC_11_PANEL1_BKLTCTL },
 };
 
 static inline enum port intel_dsi_seq_port_to_port(u8 port)
@@ -196,56 +187,76 @@
 	return data;
 }
 
+static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
+			  u8 gpio_source, u8 gpio_index, bool value)
+{
+	struct gpio_map *map;
+	u16 pconf0, padval;
+	u32 tmp;
+	u8 port;
+
+	if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
+		DRM_DEBUG_KMS("unknown gpio index %u\n", gpio_index);
+		return;
+	}
+
+	map = &vlv_gpio_table[gpio_index];
+
+	if (dev_priv->vbt.dsi.seq_version >= 3) {
+		DRM_DEBUG_KMS("GPIO element v3 not supported\n");
+		return;
+	} else {
+		if (gpio_source == 0) {
+			port = IOSF_PORT_GPIO_NC;
+		} else if (gpio_source == 1) {
+			port = IOSF_PORT_GPIO_SC;
+		} else {
+			DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+			return;
+		}
+	}
+
+	pconf0 = VLV_GPIO_PCONF0(map->base_offset);
+	padval = VLV_GPIO_PAD_VAL(map->base_offset);
+
+	mutex_lock(&dev_priv->sb_lock);
+	if (!map->init) {
+		/* FIXME: remove constant below */
+		vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00);
+		map->init = true;
+	}
+
+	tmp = 0x4 | value;
+	vlv_iosf_sb_write(dev_priv, port, padval, tmp);
+	mutex_unlock(&dev_priv->sb_lock);
+}
+
 static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
 {
-	u8 gpio, action;
-	u16 function, pad;
-	u32 val;
 	struct drm_device *dev = intel_dsi->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	u8 gpio_source, gpio_index;
+	bool value;
 
 	if (dev_priv->vbt.dsi.seq_version >= 3)
 		data++;
 
-	gpio = *data++;
+	gpio_index = *data++;
+
+	/* gpio source in sequence v2 only */
+	if (dev_priv->vbt.dsi.seq_version == 2)
+		gpio_source = (*data >> 1) & 3;
+	else
+		gpio_source = 0;
 
 	/* pull up/down */
-	action = *data++ & 1;
+	value = *data++ & 1;
 
-	if (gpio >= ARRAY_SIZE(gtable)) {
-		DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
-		goto out;
-	}
-
-	if (!IS_VALLEYVIEW(dev_priv)) {
+	if (IS_VALLEYVIEW(dev_priv))
+		vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+	else
 		DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
-		goto out;
-	}
 
-	if (dev_priv->vbt.dsi.seq_version >= 3) {
-		DRM_DEBUG_KMS("GPIO element v3 not supported\n");
-		goto out;
-	}
-
-	function = gtable[gpio].function_reg;
-	pad = gtable[gpio].pad_reg;
-
-	mutex_lock(&dev_priv->sb_lock);
-	if (!gtable[gpio].init) {
-		/* program the function */
-		/* FIXME: remove constant below */
-		vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, function,
-				  0x2000CC00);
-		gtable[gpio].init = 1;
-	}
-
-	val = 0x4 | action;
-
-	/* pull up/down */
-	vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, pad, val);
-	mutex_unlock(&dev_priv->sb_lock);
-
-out:
 	return data;
 }
 
@@ -420,7 +431,7 @@
 	struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
 	struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
 	struct vbt_panel *vbt_panel;
-	u32 bits_per_pixel = 24;
+	u32 bpp;
 	u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
 	u32 ui_num, ui_den;
 	u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
@@ -436,12 +447,13 @@
 	intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
 	intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
 	intel_dsi->lane_count = mipi_config->lane_cnt + 1;
-	intel_dsi->pixel_format = mipi_config->videomode_color_format << 7;
+	intel_dsi->pixel_format =
+			pixel_format_from_register_bits(
+				mipi_config->videomode_color_format << 7);
+	bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
 	intel_dsi->dual_link = mipi_config->dual_link;
 	intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
-
-	bits_per_pixel = dsi_pixel_format_bpp(intel_dsi->pixel_format);
-
 	intel_dsi->operation_mode = mipi_config->is_cmd_mode;
 	intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
 	intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
@@ -475,8 +487,7 @@
 	 */
 	if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
 		if (mipi_config->target_burst_mode_freq) {
-			computed_ddr =
-				(pclk * bits_per_pixel) / intel_dsi->lane_count;
+			computed_ddr = (pclk * bpp) / intel_dsi->lane_count;
 
 			if (mipi_config->target_burst_mode_freq <
 								computed_ddr) {
@@ -499,7 +510,7 @@
 	intel_dsi->burst_mode_ratio = burst_mode_ratio;
 	intel_dsi->pclk = pclk;
 
-	bitrate = (pclk * bits_per_pixel) / intel_dsi->lane_count;
+	bitrate = (pclk * bpp) / intel_dsi->lane_count;
 
 	switch (intel_dsi->escape_clk_div) {
 	case 0:
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 70883c5..1765e6e 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -30,33 +30,7 @@
 #include "i915_drv.h"
 #include "intel_dsi.h"
 
-int dsi_pixel_format_bpp(int pixel_format)
-{
-	int bpp;
-
-	switch (pixel_format) {
-	default:
-	case VID_MODE_FORMAT_RGB888:
-	case VID_MODE_FORMAT_RGB666_LOOSE:
-		bpp = 24;
-		break;
-	case VID_MODE_FORMAT_RGB666:
-		bpp = 18;
-		break;
-	case VID_MODE_FORMAT_RGB565:
-		bpp = 16;
-		break;
-	}
-
-	return bpp;
-}
-
-struct dsi_mnp {
-	u32 dsi_pll_ctrl;
-	u32 dsi_pll_div;
-};
-
-static const u32 lfsr_converts[] = {
+static const u16 lfsr_converts[] = {
 	426, 469, 234, 373, 442, 221, 110, 311, 411,		/* 62 - 70 */
 	461, 486, 243, 377, 188, 350, 175, 343, 427, 213,	/* 71 - 80 */
 	106, 53, 282, 397, 454, 227, 113, 56, 284, 142,		/* 81 - 90 */
@@ -64,10 +38,11 @@
 };
 
 /* Get DSI clock from pixel clock */
-static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
+static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt,
+			     int lane_count)
 {
 	u32 dsi_clk_khz;
-	u32 bpp = dsi_pixel_format_bpp(pixel_format);
+	u32 bpp = mipi_dsi_pixel_format_to_bpp(fmt);
 
 	/* DSI data rate = pixel clock * bits per pixel / lane count
 	   pixel clock is converted from KHz to Hz */
@@ -77,7 +52,8 @@
 }
 
 static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
-			struct dsi_mnp *dsi_mnp, int target_dsi_clk)
+			struct intel_crtc_state *config,
+			int target_dsi_clk)
 {
 	unsigned int calc_m = 0, calc_p = 0;
 	unsigned int m_min, m_max, p_min = 2, p_max = 6;
@@ -123,8 +99,8 @@
 	/* register has log2(N1), this works fine for powers of two */
 	n = ffs(n) - 1;
 	m_seed = lfsr_converts[calc_m - 62];
-	dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
-	dsi_mnp->dsi_pll_div = n << DSI_PLL_N1_DIV_SHIFT |
+	config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
+	config->dsi_pll.div = n << DSI_PLL_N1_DIV_SHIFT |
 		m_seed << DSI_PLL_M1_DIV_SHIFT;
 
 	return 0;
@@ -134,54 +110,55 @@
  * XXX: The muxing and gating is hard coded for now. Need to add support for
  * sharing PLLs with two DSI outputs.
  */
-static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
+static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
+			       struct intel_crtc_state *config)
 {
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 	int ret;
-	struct dsi_mnp dsi_mnp;
 	u32 dsi_clk;
 
 	dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
 				    intel_dsi->lane_count);
 
-	ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk);
+	ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
 	if (ret) {
 		DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
-		return;
+		return ret;
 	}
 
 	if (intel_dsi->ports & (1 << PORT_A))
-		dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+		config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
 
 	if (intel_dsi->ports & (1 << PORT_C))
-		dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
+		config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
+
+	config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;
 
 	DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
-		      dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
+		      config->dsi_pll.div, config->dsi_pll.ctrl);
 
-	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
-	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
-	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
+	return 0;
 }
 
-static void vlv_enable_dsi_pll(struct intel_encoder *encoder)
+static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
+			       const struct intel_crtc_state *config)
 {
-	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
-	u32 tmp;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
 	DRM_DEBUG_KMS("\n");
 
 	mutex_lock(&dev_priv->sb_lock);
 
-	vlv_configure_dsi_pll(encoder);
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL,
+		      config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN);
 
 	/* wait at least 0.5 us after ungating before enabling VCO */
 	usleep_range(1, 10);
 
-	tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
-	tmp |= DSI_PLL_VCO_EN;
-	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
 
 	if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
 						DSI_PLL_LOCK, 20)) {
@@ -197,7 +174,7 @@
 
 static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 tmp;
 
 	DRM_DEBUG_KMS("\n");
@@ -212,9 +189,39 @@
 	mutex_unlock(&dev_priv->sb_lock);
 }
 
+static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+{
+	bool enabled;
+	u32 val;
+	u32 mask;
+
+	mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED;
+	val = I915_READ(BXT_DSI_PLL_ENABLE);
+	enabled = (val & mask) == mask;
+
+	if (!enabled)
+		return false;
+
+	/*
+	 * Both dividers must be programmed with valid values even if only one
+	 * of the PLL is used, see BSpec/Broxton Clocks. Check this here for
+	 * paranoia, since BIOS is known to misconfigure PLLs in this way at
+	 * times, and since accessing DSI registers with invalid dividers
+	 * causes a system hang.
+	 */
+	val = I915_READ(BXT_DSI_PLL_CTL);
+	if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
+		DRM_DEBUG_DRIVER("PLL is enabled with invalid divider settings (%08x)\n",
+				 val);
+		enabled = false;
+	}
+
+	return enabled;
+}
+
 static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 val;
 
 	DRM_DEBUG_KMS("\n");
@@ -232,23 +239,24 @@
 		DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
 }
 
-static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
+static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp)
 {
-	int bpp = dsi_pixel_format_bpp(pixel_format);
+	int bpp = mipi_dsi_pixel_format_to_bpp(fmt);
 
 	WARN(bpp != pipe_bpp,
 	     "bpp match assertion failure (expected %d, current %d)\n",
 	     bpp, pipe_bpp);
 }
 
-static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+			    struct intel_crtc_state *config)
 {
-	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 	u32 dsi_clock, pclk;
 	u32 pll_ctl, pll_div;
 	u32 m = 0, p = 0, n;
-	int refclk = 25000;
+	int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
 	int i;
 
 	DRM_DEBUG_KMS("\n");
@@ -258,6 +266,9 @@
 	pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
 	mutex_unlock(&dev_priv->sb_lock);
 
+	config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
+	config->dsi_pll.div = pll_div;
+
 	/* mask out other bits and extract the P1 divisor */
 	pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
 	pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
@@ -303,7 +314,8 @@
 	return pclk;
 }
 
-static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+			    struct intel_crtc_state *config)
 {
 	u32 pclk;
 	u32 dsi_clk;
@@ -317,15 +329,9 @@
 		return 0;
 	}
 
-	dsi_ratio = I915_READ(BXT_DSI_PLL_CTL) &
-				BXT_DSI_PLL_RATIO_MASK;
+	config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL);
 
-	/* Invalid DSI ratio ? */
-	if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
-			dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
-		DRM_ERROR("Invalid DSI pll ratio(%u) programmed\n", dsi_ratio);
-		return 0;
-	}
+	dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
 
 	dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
 
@@ -338,12 +344,13 @@
 	return pclk;
 }
 
-u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+		       struct intel_crtc_state *config)
 {
 	if (IS_BROXTON(encoder->base.dev))
-		return bxt_dsi_get_pclk(encoder, pipe_bpp);
+		return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
 	else
-		return vlv_dsi_get_pclk(encoder, pipe_bpp);
+		return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
 }
 
 static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
@@ -360,51 +367,72 @@
 }
 
 /* Program BXT Mipi clocks and dividers */
-static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port)
+static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
+				   const struct intel_crtc_state *config)
 {
-	u32 tmp;
-	u32 divider;
-	u32 dsi_rate;
-	u32 pll_ratio;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp;
+	u32 dsi_rate = 0;
+	u32 pll_ratio = 0;
+	u32 rx_div;
+	u32 tx_div;
+	u32 rx_div_upper;
+	u32 rx_div_lower;
+	u32 mipi_8by3_divider;
 
 	/* Clear old configurations */
 	tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
 	tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
-	tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
-	tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
-	tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
+	tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
+	tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
+	tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
 
 	/* Get the current DSI rate(actual) */
-	pll_ratio = I915_READ(BXT_DSI_PLL_CTL) &
-				BXT_DSI_PLL_RATIO_MASK;
+	pll_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
 	dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2;
 
-	/* Max possible output of clock is 39.5 MHz, program value -1 */
-	divider = (dsi_rate / BXT_MAX_VAR_OUTPUT_KHZ) - 1;
-	tmp |= BXT_MIPI_ESCLK_VAR_DIV(port, divider);
+	/*
+	 * tx clock should be <= 20MHz and the div value must be
+	 * subtracted by 1 as per bspec
+	 */
+	tx_div = DIV_ROUND_UP(dsi_rate, 20000) - 1;
+	/*
+	 * rx clock should be <= 150MHz and the div value must be
+	 * subtracted by 1 as per bspec
+	 */
+	rx_div = DIV_ROUND_UP(dsi_rate, 150000) - 1;
 
 	/*
-	 * Tx escape clock must be as close to 20MHz possible, but should
-	 * not exceed it. Hence select divide by 2
+	 * rx divider value needs to be updated in the
+	 * two differnt bit fields in the register hence splitting the
+	 * rx divider value accordingly
 	 */
-	tmp |= BXT_MIPI_TX_ESCLK_8XDIV_BY2(port);
+	rx_div_lower = rx_div & RX_DIVIDER_BIT_1_2;
+	rx_div_upper = (rx_div & RX_DIVIDER_BIT_3_4) >> 2;
 
-	tmp |= BXT_MIPI_RX_ESCLK_8X_BY3(port);
+	/* As per bpsec program the 8/3X clock divider to the below value */
+	if (dev_priv->vbt.dsi.config->is_cmd_mode)
+		mipi_8by3_divider = 0x2;
+	else
+		mipi_8by3_divider = 0x3;
+
+	tmp |= BXT_MIPI_8X_BY3_DIVIDER(port, mipi_8by3_divider);
+	tmp |= BXT_MIPI_TX_ESCLK_DIVIDER(port, tx_div);
+	tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower);
+	tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper);
 
 	I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
 }
 
-static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
+static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
+			       struct intel_crtc_state *config)
 {
-	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 	u8 dsi_ratio;
 	u32 dsi_clk;
-	u32 val;
 
 	dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
-			intel_dsi->lane_count);
+				    intel_dsi->lane_count);
 
 	/*
 	 * From clock diagram, to get PLL ratio divider, divide double of DSI
@@ -413,9 +441,9 @@
 	 */
 	dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
 	if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
-			dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
+	    dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
 		DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
-		return false;
+		return -ECHRNG;
 	}
 
 	/*
@@ -423,27 +451,19 @@
 	 * Spec says both have to be programmed, even if one is not getting
 	 * used. Configure MIPI_CLOCK_CTL dividers in modeset
 	 */
-	val = I915_READ(BXT_DSI_PLL_CTL);
-	val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
-	val &= ~BXT_DSI_FREQ_SEL_MASK;
-	val &= ~BXT_DSI_PLL_RATIO_MASK;
-	val |= (dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2);
+	config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2;
 
 	/* As per recommendation from hardware team,
 	 * Prog PVD ratio =1 if dsi ratio <= 50
 	 */
-	if (dsi_ratio <= 50) {
-		val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
-		val |= BXT_DSI_PLL_PVD_RATIO_1;
-	}
+	if (dsi_ratio <= 50)
+		config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
 
-	I915_WRITE(BXT_DSI_PLL_CTL, val);
-	POSTING_READ(BXT_DSI_PLL_CTL);
-
-	return true;
+	return 0;
 }
 
-static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
+static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
+			       const struct intel_crtc_state *config)
 {
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -452,23 +472,13 @@
 
 	DRM_DEBUG_KMS("\n");
 
-	val = I915_READ(BXT_DSI_PLL_ENABLE);
-
-	if (val & BXT_DSI_PLL_DO_ENABLE) {
-		WARN(1, "DSI PLL already enabled. Disabling it.\n");
-		val &= ~BXT_DSI_PLL_DO_ENABLE;
-		I915_WRITE(BXT_DSI_PLL_ENABLE, val);
-	}
-
 	/* Configure PLL vales */
-	if (!bxt_configure_dsi_pll(encoder)) {
-		DRM_ERROR("Configure DSI PLL failed, abort PLL enable\n");
-		return;
-	}
+	I915_WRITE(BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
+	POSTING_READ(BXT_DSI_PLL_CTL);
 
 	/* Program TX, RX, Dphy clocks */
 	for_each_dsi_port(port, intel_dsi->ports)
-		bxt_dsi_program_clocks(encoder->base.dev, port);
+		bxt_dsi_program_clocks(encoder->base.dev, port, config);
 
 	/* Enable DSI PLL */
 	val = I915_READ(BXT_DSI_PLL_ENABLE);
@@ -484,14 +494,38 @@
 	DRM_DEBUG_KMS("DSI PLL locked\n");
 }
 
-void intel_enable_dsi_pll(struct intel_encoder *encoder)
+bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+{
+	if (IS_BROXTON(dev_priv))
+		return bxt_dsi_pll_is_enabled(dev_priv);
+
+	MISSING_CASE(INTEL_DEVID(dev_priv));
+
+	return false;
+}
+
+int intel_compute_dsi_pll(struct intel_encoder *encoder,
+			  struct intel_crtc_state *config)
 {
 	struct drm_device *dev = encoder->base.dev;
 
 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-		vlv_enable_dsi_pll(encoder);
+		return vlv_compute_dsi_pll(encoder, config);
 	else if (IS_BROXTON(dev))
-		bxt_enable_dsi_pll(encoder);
+		return bxt_compute_dsi_pll(encoder, config);
+
+	return -ENODEV;
+}
+
+void intel_enable_dsi_pll(struct intel_encoder *encoder,
+			  const struct intel_crtc_state *config)
+{
+	struct drm_device *dev = encoder->base.dev;
+
+	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+		vlv_enable_dsi_pll(encoder, config);
+	else if (IS_BROXTON(dev))
+		bxt_enable_dsi_pll(encoder, config);
 }
 
 void intel_disable_dsi_pll(struct intel_encoder *encoder)
@@ -513,9 +547,9 @@
 	/* Clear old configurations */
 	tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
 	tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
-	tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
-	tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
-	tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
+	tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
+	tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
+	tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
 	I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
 	I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
 }
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 0f0492f..d5a7cfe 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -506,6 +506,7 @@
 				      int size,
 				      int fb_cpp)
 {
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	int compression_threshold = 1;
 	int ret;
 	u64 end;
@@ -516,9 +517,9 @@
 	 * underruns, even if that range is not reserved by the BIOS. */
 	if (IS_BROADWELL(dev_priv) ||
 	    IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
-		end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
+		end = ggtt->stolen_size - 8 * 1024 * 1024;
 	else
-		end = dev_priv->gtt.stolen_usable_size;
+		end = ggtt->stolen_usable_size;
 
 	/* HACK: This code depends on what we will do in *_enable_fbc. If that
 	 * code changes, this code needs to change as well.
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 97a91e6..ab8d09a 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -122,6 +122,7 @@
 	struct drm_framebuffer *fb;
 	struct drm_device *dev = helper->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct drm_mode_fb_cmd2 mode_cmd = {};
 	struct drm_i915_gem_object *obj = NULL;
 	int size, ret;
@@ -146,7 +147,7 @@
 	/* If the FB is too big, just don't use it since fbdev is not very
 	 * important and we should probably use that space with FBC or other
 	 * features. */
-	if (size * 2 < dev_priv->gtt.stolen_usable_size)
+	if (size * 2 < ggtt->stolen_usable_size)
 		obj = i915_gem_object_create_stolen(dev, size);
 	if (obj == NULL)
 		obj = i915_gem_alloc_object(dev, size);
@@ -181,7 +182,8 @@
 		container_of(helper, struct intel_fbdev, helper);
 	struct intel_framebuffer *intel_fb = ifbdev->fb;
 	struct drm_device *dev = helper->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct drm_i915_gem_object *obj;
@@ -220,7 +222,7 @@
 	 * This also validates that any existing fb inherited from the
 	 * BIOS is suitable for own access.
 	 */
-	ret = intel_pin_and_fence_fb_obj(NULL, &ifbdev->fb->base, NULL);
+	ret = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
 	if (ret)
 		goto out_unlock;
 
@@ -244,13 +246,13 @@
 
 	/* setup aperture base/size for vesafb takeover */
 	info->apertures->ranges[0].base = dev->mode_config.fb_base;
-	info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
+	info->apertures->ranges[0].size = ggtt->mappable_end;
 
 	info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
 	info->fix.smem_len = size;
 
 	info->screen_base =
-		ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
+		ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj),
 			   size);
 	if (!info->screen_base) {
 		DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
@@ -366,12 +368,12 @@
 	uint64_t conn_configured = 0, mask;
 	int pass = 0;
 
-	save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
+	save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool),
 			       GFP_KERNEL);
 	if (!save_enabled)
 		return false;
 
-	memcpy(save_enabled, enabled, dev->mode_config.num_connector);
+	memcpy(save_enabled, enabled, fb_helper->connector_count);
 	mask = (1 << fb_helper->connector_count) - 1;
 retry:
 	for (i = 0; i < fb_helper->connector_count; i++) {
@@ -379,6 +381,7 @@
 		struct drm_connector *connector;
 		struct drm_encoder *encoder;
 		struct drm_fb_helper_crtc *new_crtc;
+		struct intel_crtc *intel_crtc;
 
 		fb_conn = fb_helper->connector_info[i];
 		connector = fb_conn->connector;
@@ -420,6 +423,13 @@
 
 		num_connectors_enabled++;
 
+		intel_crtc = to_intel_crtc(connector->state->crtc);
+		for (j = 0; j < 256; j++) {
+			intel_crtc->lut_r[j] = j;
+			intel_crtc->lut_g[j] = j;
+			intel_crtc->lut_b[j] = j;
+		}
+
 		new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc);
 
 		/*
@@ -510,7 +520,7 @@
 	if (fallback) {
 bail:
 		DRM_DEBUG_KMS("Not using firmware configuration\n");
-		memcpy(enabled, save_enabled, dev->mode_config.num_connector);
+		memcpy(enabled, save_enabled, fb_helper->connector_count);
 		kfree(save_enabled);
 		return false;
 	}
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index bda5266..9be839a 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -212,7 +212,7 @@
 	I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
 	POSTING_READ(SERR_INT);
 
-	DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
+	DRM_ERROR("pch fifo underrun on pch transcoder %s\n",
 		  transcoder_name(pch_transcoder));
 }
 
@@ -235,7 +235,7 @@
 
 		if (old && I915_READ(SERR_INT) &
 		    SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
-			DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
+			DRM_ERROR("uncleared pch fifo underrun on pch transcoder %s\n",
 				  transcoder_name(pch_transcoder));
 		}
 	}
@@ -333,7 +333,7 @@
 	old = !intel_crtc->pch_fifo_underrun_disabled;
 	intel_crtc->pch_fifo_underrun_disabled = !enable;
 
-	if (HAS_PCH_IBX(dev_priv->dev))
+	if (HAS_PCH_IBX(dev_priv))
 		ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
 						enable);
 	else
@@ -363,7 +363,7 @@
 		return;
 
 	/* GMCH can't disable fifo underruns, filter them. */
-	if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
+	if (HAS_GMCH_DISPLAY(dev_priv) &&
 	    to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
 		return;
 
@@ -386,7 +386,7 @@
 {
 	if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
 						  false))
-		DRM_ERROR("PCH transcoder %c FIFO underrun\n",
+		DRM_ERROR("PCH transcoder %s FIFO underrun\n",
 			  transcoder_name(pch_transcoder));
 }
 
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 73002e9..9d79c4c 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -27,8 +27,34 @@
 #include "intel_guc_fwif.h"
 #include "i915_guc_reg.h"
 
+struct drm_i915_gem_request;
+
+/*
+ * This structure primarily describes the GEM object shared with the GuC.
+ * The GEM object is held for the entire lifetime of our interaction with
+ * the GuC, being allocated before the GuC is loaded with its firmware.
+ * Because there's no way to update the address used by the GuC after
+ * initialisation, the shared object must stay pinned into the GGTT as
+ * long as the GuC is in use. We also keep the first page (only) mapped
+ * into kernel address space, as it includes shared data that must be
+ * updated on every request submission.
+ *
+ * The single GEM object described here is actually made up of several
+ * separate areas, as far as the GuC is concerned. The first page (kept
+ * kmap'd) includes the "process decriptor" which holds sequence data for
+ * the doorbell, and one cacheline which actually *is* the doorbell; a
+ * write to this will "ring the doorbell" (i.e. send an interrupt to the
+ * GuC). The subsequent  pages of the client object constitute the work
+ * queue (a circular array of work items), again described in the process
+ * descriptor. Work queue pages are mapped momentarily as required.
+ *
+ * Finally, we also keep a few statistics here, including the number of
+ * submissions to each engine, and a record of the last submission failure
+ * (if any).
+ */
 struct i915_guc_client {
 	struct drm_i915_gem_object *client_obj;
+	void *client_base;		/* first page (only) of above	*/
 	struct intel_context *owner;
 	struct intel_guc *guc;
 	uint32_t priority;
@@ -43,13 +69,14 @@
 	uint32_t wq_offset;
 	uint32_t wq_size;
 	uint32_t wq_tail;
-	uint32_t wq_head;
+	uint32_t unused;		/* Was 'wq_head'		*/
 
 	/* GuC submission statistics & status */
 	uint64_t submissions[GUC_MAX_ENGINES_NUM];
 	uint32_t q_fail;
 	uint32_t b_fail;
 	int retcode;
+	int spare;			/* pad to 32 DWords		*/
 };
 
 enum intel_guc_fw_status {
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 82a3c03..876e5da 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -59,7 +59,7 @@
  *
  */
 
-#define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin"
+#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6.bin"
 MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
 
 /* User-friendly representation of an enum */
@@ -81,14 +81,14 @@
 
 static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
 {
-	struct intel_engine_cs *ring;
-	int i, irqs;
+	struct intel_engine_cs *engine;
+	int irqs;
 
 	/* tell all command streamers NOT to forward interrupts and vblank to GuC */
 	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
 	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
-	for_each_ring(ring, dev_priv, i)
-		I915_WRITE(RING_MODE_GEN7(ring), irqs);
+	for_each_engine(engine, dev_priv)
+		I915_WRITE(RING_MODE_GEN7(engine), irqs);
 
 	/* route all GT interrupts to the host */
 	I915_WRITE(GUC_BCS_RCS_IER, 0);
@@ -98,14 +98,14 @@
 
 static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
 {
-	struct intel_engine_cs *ring;
-	int i, irqs;
+	struct intel_engine_cs *engine;
+	int irqs;
 
 	/* tell all command streamers to forward interrupts and vblank to GuC */
 	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
 	irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
-	for_each_ring(ring, dev_priv, i)
-		I915_WRITE(RING_MODE_GEN7(ring), irqs);
+	for_each_engine(engine, dev_priv)
+		I915_WRITE(RING_MODE_GEN7(engine), irqs);
 
 	/* route USER_INTERRUPT to Host, all others are sent to GuC. */
 	irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
@@ -353,6 +353,24 @@
 	return ret;
 }
 
+static int i915_reset_guc(struct drm_i915_private *dev_priv)
+{
+	int ret;
+	u32 guc_status;
+
+	ret = intel_guc_reset(dev_priv);
+	if (ret) {
+		DRM_ERROR("GuC reset failed, ret = %d\n", ret);
+		return ret;
+	}
+
+	guc_status = I915_READ(GUC_STATUS);
+	WARN(!(guc_status & GS_MIA_IN_RESET),
+	     "GuC status: 0x%x, MIA core expected to be in reset\n", guc_status);
+
+	return ret;
+}
+
 /**
  * intel_guc_ucode_load() - load GuC uCode into the device
  * @dev:	drm device
@@ -369,7 +387,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
-	int err = 0;
+	int retries, err = 0;
 
 	if (!i915.enable_guc_submission)
 		return 0;
@@ -417,9 +435,33 @@
 	if (err)
 		goto fail;
 
-	err = guc_ucode_xfer(dev_priv);
-	if (err)
-		goto fail;
+	/*
+	 * WaEnableuKernelHeaderValidFix:skl,bxt
+	 * For BXT, this is only upto B0 but below WA is required for later
+	 * steppings also so this is extended as well.
+	 */
+	/* WaEnableGuCBootHashCheckNotSet:skl,bxt */
+	for (retries = 3; ; ) {
+		/*
+		 * Always reset the GuC just before (re)loading, so
+		 * that the state and timing are fairly predictable
+		 */
+		err = i915_reset_guc(dev_priv);
+		if (err) {
+			DRM_ERROR("GuC reset failed, err %d\n", err);
+			goto fail;
+		}
+
+		err = guc_ucode_xfer(dev_priv);
+		if (!err)
+			break;
+
+		if (--retries == 0)
+			goto fail;
+
+		DRM_INFO("GuC fw load failed, err %d; will reset and "
+			"retry %d more time(s)\n", err, retries);
+	}
 
 	guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
 
@@ -440,6 +482,7 @@
 	return 0;
 
 fail:
+	DRM_ERROR("GuC firmware load failed, err %d\n", err);
 	if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
 		guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
 
@@ -595,8 +638,8 @@
 		fw_path = NULL;
 	} else if (IS_SKYLAKE(dev)) {
 		fw_path = I915_SKL_GUC_UCODE;
-		guc_fw->guc_fw_major_wanted = 4;
-		guc_fw->guc_fw_minor_wanted = 3;
+		guc_fw->guc_fw_major_wanted = 6;
+		guc_fw->guc_fw_minor_wanted = 1;
 	} else {
 		i915.enable_guc_submission = false;
 		fw_path = "";	/* unknown device */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 1ab6f68..2c3bd9c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -638,7 +638,7 @@
 		reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
-	else if (HAS_PCH_SPLIT(dev_priv->dev))
+	else if (HAS_PCH_SPLIT(dev_priv))
 		reg = TVIDEO_DIP_GCP(crtc->pipe);
 	else
 		return false;
@@ -836,6 +836,22 @@
 	intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
 }
 
+void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
+{
+	struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
+	struct i2c_adapter *adapter =
+		intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+
+	if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
+		return;
+
+	DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n",
+		      enable ? "Enabling" : "Disabling");
+
+	drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type,
+					 adapter, enable);
+}
+
 static void intel_hdmi_prepare(struct intel_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
@@ -845,6 +861,8 @@
 	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
 	u32 hdmi_val;
 
+	intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+
 	hdmi_val = SDVO_ENCODING_HDMI;
 	if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
 		hdmi_val |= HDMI_COLOR_RANGE_16_235;
@@ -952,10 +970,9 @@
 	if (pipe_config->pixel_multiplier)
 		dotclock /= pipe_config->pixel_multiplier;
 
-	if (HAS_PCH_SPLIT(dev_priv->dev))
-		ironlake_check_encoder_dotclock(pipe_config, dotclock);
-
 	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+
+	pipe_config->lane_count = 4;
 }
 
 static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
@@ -1143,6 +1160,8 @@
 	}
 
 	intel_hdmi->set_infoframes(&encoder->base, false, NULL);
+
+	intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
 }
 
 static void g4x_disable_hdmi(struct intel_encoder *encoder)
@@ -1168,27 +1187,42 @@
 	intel_disable_hdmi(encoder);
 }
 
-static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
+static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
-
-	if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
+	if (IS_G4X(dev_priv))
 		return 165000;
-	else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
+	else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
 		return 300000;
 	else
 		return 225000;
 }
 
+static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
+				 bool respect_downstream_limits)
+{
+	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+	int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev));
+
+	if (respect_downstream_limits) {
+		if (hdmi->dp_dual_mode.max_tmds_clock)
+			max_tmds_clock = min(max_tmds_clock,
+					     hdmi->dp_dual_mode.max_tmds_clock);
+		if (!hdmi->has_hdmi_sink)
+			max_tmds_clock = min(max_tmds_clock, 165000);
+	}
+
+	return max_tmds_clock;
+}
+
 static enum drm_mode_status
 hdmi_port_clock_valid(struct intel_hdmi *hdmi,
-		      int clock, bool respect_dvi_limit)
+		      int clock, bool respect_downstream_limits)
 {
 	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 
 	if (clock < 25000)
 		return MODE_CLOCK_LOW;
-	if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit))
+	if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits))
 		return MODE_CLOCK_HIGH;
 
 	/* BXT DPLL can't generate 223-240 MHz */
@@ -1312,7 +1346,7 @@
 	 * within limits.
 	 */
 	if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
-	    hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK &&
+	    hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true) == MODE_OK &&
 	    hdmi_12bpc_possible(pipe_config)) {
 		DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
 		desired_bpp = 12*3;
@@ -1340,6 +1374,8 @@
 	/* Set user selected PAR to incoming mode's member */
 	adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
 
+	pipe_config->lane_count = 4;
+
 	return true;
 }
 
@@ -1352,10 +1388,57 @@
 	intel_hdmi->has_audio = false;
 	intel_hdmi->rgb_quant_range_selectable = false;
 
+	intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
+	intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
+
 	kfree(to_intel_connector(connector)->detect_edid);
 	to_intel_connector(connector)->detect_edid = NULL;
 }
 
+static void
+intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
+{
+	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+	enum port port = hdmi_to_dig_port(hdmi)->port;
+	struct i2c_adapter *adapter =
+		intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+	enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter);
+
+	/*
+	 * Type 1 DVI adaptors are not required to implement any
+	 * registers, so we can't always detect their presence.
+	 * Ideally we should be able to check the state of the
+	 * CONFIG1 pin, but no such luck on our hardware.
+	 *
+	 * The only method left to us is to check the VBT to see
+	 * if the port is a dual mode capable DP port. But let's
+	 * only do that when we sucesfully read the EDID, to avoid
+	 * confusing log messages about DP dual mode adaptors when
+	 * there's nothing connected to the port.
+	 */
+	if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
+		if (has_edid &&
+		    intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
+			DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n");
+			type = DRM_DP_DUAL_MODE_TYPE1_DVI;
+		} else {
+			type = DRM_DP_DUAL_MODE_NONE;
+		}
+	}
+
+	if (type == DRM_DP_DUAL_MODE_NONE)
+		return;
+
+	hdmi->dp_dual_mode.type = type;
+	hdmi->dp_dual_mode.max_tmds_clock =
+		drm_dp_dual_mode_max_tmds_clock(type, adapter);
+
+	DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
+		      drm_dp_get_dual_mode_type_name(type),
+		      hdmi->dp_dual_mode.max_tmds_clock);
+}
+
 static bool
 intel_hdmi_set_edid(struct drm_connector *connector, bool force)
 {
@@ -1371,6 +1454,8 @@
 				    intel_gmbus_get_adapter(dev_priv,
 				    intel_hdmi->ddc_bus));
 
+		intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
+
 		intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 52fbe53..81de230 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -124,7 +124,7 @@
 	u32 val;
 
 	/* When using bit bashing for I2C, this bit needs to be set to 1 */
-	if (!IS_PINEVIEW(dev_priv->dev))
+	if (!IS_PINEVIEW(dev_priv))
 		return;
 
 	val = I915_READ(DSPCLK_GATE_D);
@@ -264,7 +264,7 @@
 	u32 gmbus2 = 0;
 	DEFINE_WAIT(wait);
 
-	if (!HAS_GMBUS_IRQ(dev_priv->dev))
+	if (!HAS_GMBUS_IRQ(dev_priv))
 		gmbus4_irq_en = 0;
 
 	/* Important: The hw handles only the first bit, so set only one! Since
@@ -300,7 +300,7 @@
 
 #define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0)
 
-	if (!HAS_GMBUS_IRQ(dev_priv->dev))
+	if (!HAS_GMBUS_IRQ(dev_priv))
 		return wait_for(C, 10);
 
 	/* Important: The hw handles only the first bit, so set only one! */
@@ -571,15 +571,14 @@
 	goto out;
 
 timeout:
-	DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
-		 bus->adapter.name, bus->reg0 & 0xff);
+	DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+		      bus->adapter.name, bus->reg0 & 0xff);
 	I915_WRITE(GMBUS0, 0);
 
 	/*
 	 * Hardware may not support GMBUS over these pins? Try GPIO bitbanging
 	 * instead. Use EAGAIN to have i2c core retry.
 	 */
-	bus->force_bit = 1;
 	ret = -EAGAIN;
 
 out:
@@ -597,10 +596,15 @@
 	intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 	mutex_lock(&dev_priv->gmbus_mutex);
 
-	if (bus->force_bit)
+	if (bus->force_bit) {
 		ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
-	else
+		if (ret < 0)
+			bus->force_bit &= ~GMBUS_FORCE_BIT_RETRY;
+	} else {
 		ret = do_gmbus_xfer(adapter, msgs, num);
+		if (ret == -EAGAIN)
+			bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
+	}
 
 	mutex_unlock(&dev_priv->gmbus_mutex);
 	intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
@@ -718,11 +722,16 @@
 void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
 {
 	struct intel_gmbus *bus = to_intel_gmbus(adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+
+	mutex_lock(&dev_priv->gmbus_mutex);
 
 	bus->force_bit += force_bit ? 1 : -1;
 	DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
 		      force_bit ? "en" : "dis", adapter->name,
 		      bus->force_bit);
+
+	mutex_unlock(&dev_priv->gmbus_mutex);
 }
 
 void intel_teardown_gmbus(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 5c6080f..42eac37 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -131,6 +131,7 @@
  * preemption, but just sampling the new tail pointer).
  *
  */
+#include <linux/interrupt.h>
 
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
@@ -228,9 +229,6 @@
 
 static int intel_lr_context_pin(struct intel_context *ctx,
 				struct intel_engine_cs *engine);
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
-		struct drm_i915_gem_object *default_ctx_obj);
-
 
 /**
  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -266,20 +264,23 @@
 }
 
 static void
-logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
+logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 
-	ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
+	if (IS_GEN8(dev) || IS_GEN9(dev))
+		engine->idle_lite_restore_wa = ~0;
+
+	engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
 					IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
-					(ring->id == VCS || ring->id == VCS2);
+					(engine->id == VCS || engine->id == VCS2);
 
-	ring->ctx_desc_template = GEN8_CTX_VALID;
-	ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
+	engine->ctx_desc_template = GEN8_CTX_VALID;
+	engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
 				   GEN8_CTX_ADDRESSING_MODE_SHIFT;
 	if (IS_GEN8(dev))
-		ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
-	ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
+		engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
+	engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
 
 	/* TODO: WaDisableLiteRestore when we start using semaphore
 	 * signalling between Command Streamers */
@@ -287,8 +288,8 @@
 
 	/* WaEnableForceRestoreInCtxtDescForVCS:skl */
 	/* WaEnableForceRestoreInCtxtDescForVCS:bxt */
-	if (ring->disable_lite_restore_wa)
-		ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
+	if (engine->disable_lite_restore_wa)
+		engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
 }
 
 /**
@@ -311,24 +312,24 @@
  */
 static void
 intel_lr_context_descriptor_update(struct intel_context *ctx,
-				   struct intel_engine_cs *ring)
+				   struct intel_engine_cs *engine)
 {
 	uint64_t lrca, desc;
 
-	lrca = ctx->engine[ring->id].lrc_vma->node.start +
+	lrca = ctx->engine[engine->id].lrc_vma->node.start +
 	       LRC_PPHWSP_PN * PAGE_SIZE;
 
-	desc = ring->ctx_desc_template;			   /* bits  0-11 */
+	desc = engine->ctx_desc_template;			   /* bits  0-11 */
 	desc |= lrca;					   /* bits 12-31 */
 	desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
 
-	ctx->engine[ring->id].lrc_desc = desc;
+	ctx->engine[engine->id].lrc_desc = desc;
 }
 
 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
-				     struct intel_engine_cs *ring)
+				     struct intel_engine_cs *engine)
 {
-	return ctx->engine[ring->id].lrc_desc;
+	return ctx->engine[engine->id].lrc_desc;
 }
 
 /**
@@ -348,98 +349,103 @@
  * Return: 20-bits globally unique context ID.
  */
 u32 intel_execlists_ctx_id(struct intel_context *ctx,
-			   struct intel_engine_cs *ring)
+			   struct intel_engine_cs *engine)
 {
-	return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT;
+	return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
 }
 
 static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
 				 struct drm_i915_gem_request *rq1)
 {
 
-	struct intel_engine_cs *ring = rq0->ring;
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = rq0->engine;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint64_t desc[2];
 
 	if (rq1) {
-		desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring);
+		desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
 		rq1->elsp_submitted++;
 	} else {
 		desc[1] = 0;
 	}
 
-	desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring);
+	desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
 	rq0->elsp_submitted++;
 
 	/* You must always write both descriptors in the order below. */
-	spin_lock(&dev_priv->uncore.lock);
-	intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
-	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
-	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
+	I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
+	I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
 
-	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
+	I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
 	/* The context is automatically loaded after the following */
-	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
+	I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
 
 	/* ELSP is a wo register, use another nearby reg for posting */
-	POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring));
-	intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
-	spin_unlock(&dev_priv->uncore.lock);
+	POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
 }
 
-static int execlists_update_context(struct drm_i915_gem_request *rq)
+static void
+execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
 {
-	struct intel_engine_cs *ring = rq->ring;
+	ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
+	ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
+	ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
+	ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+}
+
+static void execlists_update_context(struct drm_i915_gem_request *rq)
+{
+	struct intel_engine_cs *engine = rq->engine;
 	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
-	uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
+	uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
 
 	reg_state[CTX_RING_TAIL+1] = rq->tail;
 
-	if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
-		/* True 32b PPGTT with dynamic page allocation: update PDP
-		 * registers and point the unallocated PDPs to scratch page.
-		 * PML4 is allocated during ppgtt init, so this is not needed
-		 * in 48-bit mode.
-		 */
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
-	}
-
-	return 0;
+	/* True 32b PPGTT with dynamic page allocation: update PDP
+	 * registers and point the unallocated PDPs to scratch page.
+	 * PML4 is allocated during ppgtt init, so this is not needed
+	 * in 48-bit mode.
+	 */
+	if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+		execlists_update_context_pdps(ppgtt, reg_state);
 }
 
 static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
 				      struct drm_i915_gem_request *rq1)
 {
+	struct drm_i915_private *dev_priv = rq0->i915;
+	unsigned int fw_domains = rq0->engine->fw_domains;
+
 	execlists_update_context(rq0);
 
 	if (rq1)
 		execlists_update_context(rq1);
 
+	spin_lock_irq(&dev_priv->uncore.lock);
+	intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+
 	execlists_elsp_write(rq0, rq1);
+
+	intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
+	spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
-static void execlists_context_unqueue(struct intel_engine_cs *ring)
+static void execlists_context_unqueue(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
-	struct drm_i915_gem_request *cursor = NULL, *tmp = NULL;
+	struct drm_i915_gem_request *cursor, *tmp;
 
-	assert_spin_locked(&ring->execlist_lock);
+	assert_spin_locked(&engine->execlist_lock);
 
 	/*
 	 * If irqs are not active generate a warning as batches that finish
 	 * without the irqs may get lost and a GPU Hang may occur.
 	 */
-	WARN_ON(!intel_irqs_enabled(ring->dev->dev_private));
-
-	if (list_empty(&ring->execlist_queue))
-		return;
+	WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
 
 	/* Try to read in pairs */
-	list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
+	list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
 				 execlist_link) {
 		if (!req0) {
 			req0 = cursor;
@@ -448,172 +454,179 @@
 			 * will update tail past first request's workload */
 			cursor->elsp_submitted = req0->elsp_submitted;
 			list_move_tail(&req0->execlist_link,
-				       &ring->execlist_retired_req_list);
+				       &engine->execlist_retired_req_list);
 			req0 = cursor;
 		} else {
 			req1 = cursor;
+			WARN_ON(req1->elsp_submitted);
 			break;
 		}
 	}
 
-	if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) {
+	if (unlikely(!req0))
+		return;
+
+	if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
 		/*
-		 * WaIdleLiteRestore: make sure we never cause a lite
-		 * restore with HEAD==TAIL
+		 * WaIdleLiteRestore: make sure we never cause a lite restore
+		 * with HEAD==TAIL.
+		 *
+		 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
+		 * resubmit the request. See gen8_emit_request() for where we
+		 * prepare the padding after the end of the request.
 		 */
-		if (req0->elsp_submitted) {
-			/*
-			 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL
-			 * as we resubmit the request. See gen8_emit_request()
-			 * for where we prepare the padding after the end of the
-			 * request.
-			 */
-			struct intel_ringbuffer *ringbuf;
+		struct intel_ringbuffer *ringbuf;
 
-			ringbuf = req0->ctx->engine[ring->id].ringbuf;
-			req0->tail += 8;
-			req0->tail &= ringbuf->size - 1;
-		}
+		ringbuf = req0->ctx->engine[engine->id].ringbuf;
+		req0->tail += 8;
+		req0->tail &= ringbuf->size - 1;
 	}
 
-	WARN_ON(req1 && req1->elsp_submitted);
-
 	execlists_submit_requests(req0, req1);
 }
 
-static bool execlists_check_remove_request(struct intel_engine_cs *ring,
-					   u32 request_id)
+static unsigned int
+execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
 {
 	struct drm_i915_gem_request *head_req;
 
-	assert_spin_locked(&ring->execlist_lock);
+	assert_spin_locked(&engine->execlist_lock);
 
-	head_req = list_first_entry_or_null(&ring->execlist_queue,
+	head_req = list_first_entry_or_null(&engine->execlist_queue,
 					    struct drm_i915_gem_request,
 					    execlist_link);
 
-	if (head_req != NULL) {
-		if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) {
-			WARN(head_req->elsp_submitted == 0,
-			     "Never submitted head request\n");
+	if (!head_req)
+		return 0;
 
-			if (--head_req->elsp_submitted <= 0) {
-				list_move_tail(&head_req->execlist_link,
-					       &ring->execlist_retired_req_list);
-				return true;
-			}
-		}
-	}
+	if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
+		return 0;
 
-	return false;
+	WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
+
+	if (--head_req->elsp_submitted > 0)
+		return 0;
+
+	list_move_tail(&head_req->execlist_link,
+		       &engine->execlist_retired_req_list);
+
+	return 1;
 }
 
-static void get_context_status(struct intel_engine_cs *ring,
-			       u8 read_pointer,
-			       u32 *status, u32 *context_id)
+static u32
+get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
+		   u32 *context_id)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	u32 status;
 
-	if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES))
-		return;
+	read_pointer %= GEN8_CSB_ENTRIES;
 
-	*status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer));
-	*context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer));
+	status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
+
+	if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
+		return 0;
+
+	*context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
+							      read_pointer));
+
+	return status;
 }
 
 /**
  * intel_lrc_irq_handler() - handle Context Switch interrupts
- * @ring: Engine Command Streamer to handle.
+ * @engine: Engine Command Streamer to handle.
  *
  * Check the unread Context Status Buffers and manage the submission of new
  * contexts to the ELSP accordingly.
  */
-void intel_lrc_irq_handler(struct intel_engine_cs *ring)
+static void intel_lrc_irq_handler(unsigned long data)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	u32 status_pointer;
-	u8 read_pointer;
-	u8 write_pointer;
-	u32 status = 0;
-	u32 status_id;
-	u32 submit_contexts = 0;
+	unsigned int read_pointer, write_pointer;
+	u32 csb[GEN8_CSB_ENTRIES][2];
+	unsigned int csb_read = 0, i;
+	unsigned int submit_contexts = 0;
 
-	status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+	intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
 
-	read_pointer = ring->next_context_status_buffer;
+	status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
+
+	read_pointer = engine->next_context_status_buffer;
 	write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
 	if (read_pointer > write_pointer)
 		write_pointer += GEN8_CSB_ENTRIES;
 
-	spin_lock(&ring->execlist_lock);
-
 	while (read_pointer < write_pointer) {
+		if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
+			break;
+		csb[csb_read][0] = get_context_status(engine, ++read_pointer,
+						      &csb[csb_read][1]);
+		csb_read++;
+	}
 
-		get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES,
-				   &status, &status_id);
+	engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
 
-		if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
-			continue;
+	/* Update the read pointer to the old write pointer. Manual ringbuffer
+	 * management ftw </sarcasm> */
+	I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
+		      _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
+				    engine->next_context_status_buffer << 8));
 
-		if (status & GEN8_CTX_STATUS_PREEMPTED) {
-			if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
-				if (execlists_check_remove_request(ring, status_id))
+	intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
+
+	spin_lock(&engine->execlist_lock);
+
+	for (i = 0; i < csb_read; i++) {
+		if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
+			if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
+				if (execlists_check_remove_request(engine, csb[i][1]))
 					WARN(1, "Lite Restored request removed from queue\n");
 			} else
 				WARN(1, "Preemption without Lite Restore\n");
 		}
 
-		if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
-		    (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
-			if (execlists_check_remove_request(ring, status_id))
-				submit_contexts++;
-		}
+		if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
+		    GEN8_CTX_STATUS_ELEMENT_SWITCH))
+			submit_contexts +=
+				execlists_check_remove_request(engine, csb[i][1]);
 	}
 
-	if (ring->disable_lite_restore_wa) {
-		/* Prevent a ctx to preempt itself */
-		if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
-		    (submit_contexts != 0))
-			execlists_context_unqueue(ring);
-	} else if (submit_contexts != 0) {
-		execlists_context_unqueue(ring);
+	if (submit_contexts) {
+		if (!engine->disable_lite_restore_wa ||
+		    (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
+			execlists_context_unqueue(engine);
 	}
 
-	spin_unlock(&ring->execlist_lock);
+	spin_unlock(&engine->execlist_lock);
 
 	if (unlikely(submit_contexts > 2))
 		DRM_ERROR("More than two context complete events?\n");
-
-	ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
-
-	/* Update the read pointer to the old write pointer. Manual ringbuffer
-	 * management ftw </sarcasm> */
-	I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
-		   _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
-				 ring->next_context_status_buffer << 8));
 }
 
-static int execlists_context_queue(struct drm_i915_gem_request *request)
+static void execlists_context_queue(struct drm_i915_gem_request *request)
 {
-	struct intel_engine_cs *ring = request->ring;
+	struct intel_engine_cs *engine = request->engine;
 	struct drm_i915_gem_request *cursor;
 	int num_elements = 0;
 
 	if (request->ctx != request->i915->kernel_context)
-		intel_lr_context_pin(request->ctx, ring);
+		intel_lr_context_pin(request->ctx, engine);
 
 	i915_gem_request_reference(request);
 
-	spin_lock_irq(&ring->execlist_lock);
+	spin_lock_bh(&engine->execlist_lock);
 
-	list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
+	list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
 		if (++num_elements > 2)
 			break;
 
 	if (num_elements > 2) {
 		struct drm_i915_gem_request *tail_req;
 
-		tail_req = list_last_entry(&ring->execlist_queue,
+		tail_req = list_last_entry(&engine->execlist_queue,
 					   struct drm_i915_gem_request,
 					   execlist_link);
 
@@ -621,41 +634,39 @@
 			WARN(tail_req->elsp_submitted != 0,
 				"More than 2 already-submitted reqs queued\n");
 			list_move_tail(&tail_req->execlist_link,
-				       &ring->execlist_retired_req_list);
+				       &engine->execlist_retired_req_list);
 		}
 	}
 
-	list_add_tail(&request->execlist_link, &ring->execlist_queue);
+	list_add_tail(&request->execlist_link, &engine->execlist_queue);
 	if (num_elements == 0)
-		execlists_context_unqueue(ring);
+		execlists_context_unqueue(engine);
 
-	spin_unlock_irq(&ring->execlist_lock);
-
-	return 0;
+	spin_unlock_bh(&engine->execlist_lock);
 }
 
 static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	uint32_t flush_domains;
 	int ret;
 
 	flush_domains = 0;
-	if (ring->gpu_caches_dirty)
+	if (engine->gpu_caches_dirty)
 		flush_domains = I915_GEM_GPU_DOMAINS;
 
-	ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+	ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 	if (ret)
 		return ret;
 
-	ring->gpu_caches_dirty = false;
+	engine->gpu_caches_dirty = false;
 	return 0;
 }
 
 static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 				 struct list_head *vmas)
 {
-	const unsigned other_rings = ~intel_ring_flag(req->ring);
+	const unsigned other_rings = ~intel_engine_flag(req->engine);
 	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
@@ -665,7 +676,7 @@
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, req->ring, &req);
+			ret = i915_gem_object_sync(obj, req->engine, &req);
 			if (ret)
 				return ret;
 		}
@@ -689,7 +700,7 @@
 {
 	int ret = 0;
 
-	request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
+	request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
 
 	if (i915.enable_guc_submission) {
 		/*
@@ -705,53 +716,11 @@
 	}
 
 	if (request->ctx != request->i915->kernel_context)
-		ret = intel_lr_context_pin(request->ctx, request->ring);
+		ret = intel_lr_context_pin(request->ctx, request->engine);
 
 	return ret;
 }
 
-static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
-				       int bytes)
-{
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_i915_gem_request *target;
-	unsigned space;
-	int ret;
-
-	if (intel_ring_space(ringbuf) >= bytes)
-		return 0;
-
-	/* The whole point of reserving space is to not wait! */
-	WARN_ON(ringbuf->reserved_in_use);
-
-	list_for_each_entry(target, &ring->request_list, list) {
-		/*
-		 * The request queue is per-engine, so can contain requests
-		 * from multiple ringbuffers. Here, we must ignore any that
-		 * aren't from the ringbuffer we're considering.
-		 */
-		if (target->ringbuf != ringbuf)
-			continue;
-
-		/* Would completion of this request free enough space? */
-		space = __intel_ring_space(target->postfix, ringbuf->tail,
-					   ringbuf->size);
-		if (space >= bytes)
-			break;
-	}
-
-	if (WARN_ON(&target->list == &ring->request_list))
-		return -ENOSPC;
-
-	ret = i915_wait_request(target);
-	if (ret)
-		return ret;
-
-	ringbuf->space = space;
-	return 0;
-}
-
 /*
  * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
  * @request: Request to advance the logical ringbuffer of.
@@ -766,7 +735,7 @@
 {
 	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct drm_i915_private *dev_priv = request->i915;
-	struct intel_engine_cs *engine = request->ring;
+	struct intel_engine_cs *engine = request->engine;
 
 	intel_logical_ring_advance(ringbuf);
 	request->tail = ringbuf->tail;
@@ -781,7 +750,7 @@
 	intel_logical_ring_emit(ringbuf, MI_NOOP);
 	intel_logical_ring_advance(ringbuf);
 
-	if (intel_ring_stopped(engine))
+	if (intel_engine_stopped(engine))
 		return 0;
 
 	if (engine->last_context != request->ctx) {
@@ -803,101 +772,6 @@
 	return 0;
 }
 
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
-{
-	uint32_t __iomem *virt;
-	int rem = ringbuf->size - ringbuf->tail;
-
-	virt = ringbuf->virtual_start + ringbuf->tail;
-	rem /= 4;
-	while (rem--)
-		iowrite32(MI_NOOP, virt++);
-
-	ringbuf->tail = 0;
-	intel_ring_update_space(ringbuf);
-}
-
-static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
-{
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	int remain_usable = ringbuf->effective_size - ringbuf->tail;
-	int remain_actual = ringbuf->size - ringbuf->tail;
-	int ret, total_bytes, wait_bytes = 0;
-	bool need_wrap = false;
-
-	if (ringbuf->reserved_in_use)
-		total_bytes = bytes;
-	else
-		total_bytes = bytes + ringbuf->reserved_size;
-
-	if (unlikely(bytes > remain_usable)) {
-		/*
-		 * Not enough space for the basic request. So need to flush
-		 * out the remainder and then wait for base + reserved.
-		 */
-		wait_bytes = remain_actual + total_bytes;
-		need_wrap = true;
-	} else {
-		if (unlikely(total_bytes > remain_usable)) {
-			/*
-			 * The base request will fit but the reserved space
-			 * falls off the end. So don't need an immediate wrap
-			 * and only need to effectively wait for the reserved
-			 * size space from the start of ringbuffer.
-			 */
-			wait_bytes = remain_actual + ringbuf->reserved_size;
-		} else if (total_bytes > ringbuf->space) {
-			/* No wrapping required, just waiting. */
-			wait_bytes = total_bytes;
-		}
-	}
-
-	if (wait_bytes) {
-		ret = logical_ring_wait_for_space(req, wait_bytes);
-		if (unlikely(ret))
-			return ret;
-
-		if (need_wrap)
-			__wrap_ring_buffer(ringbuf);
-	}
-
-	return 0;
-}
-
-/**
- * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
- *
- * @req: The request to start some new work for
- * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
- *
- * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
- * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
- * and also preallocates a request (every workload submission is still mediated through
- * requests, same as it did with legacy ringbuffer submission).
- *
- * Return: non-zero if the ringbuffer is not ready to be written to.
- */
-int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
-{
-	struct drm_i915_private *dev_priv;
-	int ret;
-
-	WARN_ON(req == NULL);
-	dev_priv = req->ring->dev->dev_private;
-
-	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
-				   dev_priv->mm.interruptible);
-	if (ret)
-		return ret;
-
-	ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
-	if (ret)
-		return ret;
-
-	req->ringbuf->space -= num_dwords * sizeof(uint32_t);
-	return 0;
-}
-
 int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
 {
 	/*
@@ -910,7 +784,7 @@
 	 */
 	intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
 
-	return intel_logical_ring_begin(request, 0);
+	return intel_ring_begin(request, 0);
 }
 
 /**
@@ -935,9 +809,9 @@
 			       struct list_head *vmas)
 {
 	struct drm_device       *dev = params->dev;
-	struct intel_engine_cs  *ring = params->ring;
+	struct intel_engine_cs *engine = params->engine;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
+	struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
 	u64 exec_start;
 	int instp_mode;
 	u32 instp_mask;
@@ -949,7 +823,7 @@
 	case I915_EXEC_CONSTANTS_REL_GENERAL:
 	case I915_EXEC_CONSTANTS_ABSOLUTE:
 	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+		if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
 			return -EINVAL;
 		}
@@ -978,9 +852,9 @@
 	if (ret)
 		return ret;
 
-	if (ring == &dev_priv->ring[RCS] &&
+	if (engine == &dev_priv->engine[RCS] &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		ret = intel_logical_ring_begin(params->request, 4);
+		ret = intel_ring_begin(params->request, 4);
 		if (ret)
 			return ret;
 
@@ -996,116 +870,116 @@
 	exec_start = params->batch_obj_vm_offset +
 		     args->batch_start_offset;
 
-	ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
+	ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
 	if (ret)
 		return ret;
 
 	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
 	i915_gem_execbuffer_move_to_active(vmas, params->request);
-	i915_gem_execbuffer_retire_commands(params);
 
 	return 0;
 }
 
-void intel_execlists_retire_requests(struct intel_engine_cs *ring)
+void intel_execlists_retire_requests(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *req, *tmp;
 	struct list_head retired_list;
 
-	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-	if (list_empty(&ring->execlist_retired_req_list))
+	WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
+	if (list_empty(&engine->execlist_retired_req_list))
 		return;
 
 	INIT_LIST_HEAD(&retired_list);
-	spin_lock_irq(&ring->execlist_lock);
-	list_replace_init(&ring->execlist_retired_req_list, &retired_list);
-	spin_unlock_irq(&ring->execlist_lock);
+	spin_lock_bh(&engine->execlist_lock);
+	list_replace_init(&engine->execlist_retired_req_list, &retired_list);
+	spin_unlock_bh(&engine->execlist_lock);
 
 	list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
 		struct intel_context *ctx = req->ctx;
 		struct drm_i915_gem_object *ctx_obj =
-				ctx->engine[ring->id].state;
+				ctx->engine[engine->id].state;
 
 		if (ctx_obj && (ctx != req->i915->kernel_context))
-			intel_lr_context_unpin(ctx, ring);
+			intel_lr_context_unpin(ctx, engine);
 
 		list_del(&req->execlist_link);
 		i915_gem_request_unreference(req);
 	}
 }
 
-void intel_logical_ring_stop(struct intel_engine_cs *ring)
+void intel_logical_ring_stop(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	int ret;
 
-	if (!intel_ring_initialized(ring))
+	if (!intel_engine_initialized(engine))
 		return;
 
-	ret = intel_ring_idle(ring);
-	if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+	ret = intel_engine_idle(engine);
+	if (ret)
 		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
-			  ring->name, ret);
+			  engine->name, ret);
 
 	/* TODO: Is this correct with Execlists enabled? */
-	I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
-	if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
-		DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+	I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
+	if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+		DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
 		return;
 	}
-	I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+	I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
-	if (!ring->gpu_caches_dirty)
+	if (!engine->gpu_caches_dirty)
 		return 0;
 
-	ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
+	ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
-	ring->gpu_caches_dirty = false;
+	engine->gpu_caches_dirty = false;
 	return 0;
 }
 
 static int intel_lr_context_do_pin(struct intel_context *ctx,
-				   struct intel_engine_cs *ring)
+				   struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
-	struct page *lrc_state_page;
-	uint32_t *lrc_reg_state;
+	struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
+	struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
+	void *vaddr;
+	u32 *lrc_reg_state;
 	int ret;
 
-	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+	WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
 
 	ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
 			PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
 	if (ret)
 		return ret;
 
-	lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-	if (WARN_ON(!lrc_state_page)) {
-		ret = -ENODEV;
+	vaddr = i915_gem_object_pin_map(ctx_obj);
+	if (IS_ERR(vaddr)) {
+		ret = PTR_ERR(vaddr);
 		goto unpin_ctx_obj;
 	}
 
-	ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
-	if (ret)
-		goto unpin_ctx_obj;
+	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 
-	ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
-	intel_lr_context_descriptor_update(ctx, ring);
-	lrc_reg_state = kmap(lrc_state_page);
+	ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
+	if (ret)
+		goto unpin_map;
+
+	ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
+	intel_lr_context_descriptor_update(ctx, engine);
 	lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
-	ctx->engine[ring->id].lrc_reg_state = lrc_reg_state;
+	ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
 	ctx_obj->dirty = true;
 
 	/* Invalidate GuC TLB. */
@@ -1114,6 +988,8 @@
 
 	return ret;
 
+unpin_map:
+	i915_gem_object_unpin_map(ctx_obj);
 unpin_ctx_obj:
 	i915_gem_object_ggtt_unpin(ctx_obj);
 
@@ -1146,7 +1022,7 @@
 
 	WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
 	if (--ctx->engine[engine->id].pin_count == 0) {
-		kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
+		i915_gem_object_unpin_map(ctx_obj);
 		intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
 		i915_gem_object_ggtt_unpin(ctx_obj);
 		ctx->engine[engine->id].lrc_vma = NULL;
@@ -1160,21 +1036,21 @@
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_workarounds *w = &dev_priv->workarounds;
 
 	if (w->count == 0)
 		return 0;
 
-	ring->gpu_caches_dirty = true;
+	engine->gpu_caches_dirty = true;
 	ret = logical_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
 
-	ret = intel_logical_ring_begin(req, w->count * 2 + 2);
+	ret = intel_ring_begin(req, w->count * 2 + 2);
 	if (ret)
 		return ret;
 
@@ -1187,7 +1063,7 @@
 
 	intel_logical_ring_advance(ringbuf);
 
-	ring->gpu_caches_dirty = true;
+	engine->gpu_caches_dirty = true;
 	ret = logical_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -1223,7 +1099,7 @@
  * This WA is also required for Gen9 so extracting as a function avoids
  * code duplication.
  */
-static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
+static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
 						uint32_t *const batch,
 						uint32_t index)
 {
@@ -1235,13 +1111,13 @@
 	 * this batch updates GEN8_L3SQCREG4 with default value we need to
 	 * set this bit here to retain the WA during flush.
 	 */
-	if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0))
+	if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
 		l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
 
 	wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
 				   MI_SRM_LRM_GLOBAL_GTT));
 	wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
-	wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+	wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
 	wa_ctx_emit(batch, index, 0);
 
 	wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
@@ -1259,7 +1135,7 @@
 	wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
 				   MI_SRM_LRM_GLOBAL_GTT));
 	wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
-	wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+	wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
 	wa_ctx_emit(batch, index, 0);
 
 	return index;
@@ -1312,7 +1188,7 @@
  * Return: non-zero if we exceed the PAGE_SIZE limit.
  */
 
-static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
+static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
 				    struct i915_wa_ctx_bb *wa_ctx,
 				    uint32_t *const batch,
 				    uint32_t *offset)
@@ -1324,8 +1200,8 @@
 	wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
 	/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
-	if (IS_BROADWELL(ring->dev)) {
-		int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+	if (IS_BROADWELL(engine->dev)) {
+		int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
 		if (rc < 0)
 			return rc;
 		index = rc;
@@ -1333,7 +1209,7 @@
 
 	/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
 	/* Actual scratch location is at 128 bytes offset */
-	scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES;
+	scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
 
 	wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
 	wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
@@ -1375,7 +1251,7 @@
  *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
  *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
  */
-static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
+static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
 			       struct i915_wa_ctx_bb *wa_ctx,
 			       uint32_t *const batch,
 			       uint32_t *offset)
@@ -1390,13 +1266,13 @@
 	return wa_ctx_end(wa_ctx, *offset = index, 1);
 }
 
-static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
+static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
 				    struct i915_wa_ctx_bb *wa_ctx,
 				    uint32_t *const batch,
 				    uint32_t *offset)
 {
 	int ret;
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
 	/* WaDisableCtxRestoreArbitration:skl,bxt */
@@ -1405,7 +1281,7 @@
 		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
 	/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
-	ret = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+	ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
 	if (ret < 0)
 		return ret;
 	index = ret;
@@ -1417,12 +1293,12 @@
 	return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
 }
 
-static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
+static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
 			       struct i915_wa_ctx_bb *wa_ctx,
 			       uint32_t *const batch,
 			       uint32_t *offset)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
 	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
@@ -1435,6 +1311,25 @@
 		wa_ctx_emit(batch, index, MI_NOOP);
 	}
 
+	/* WaClearTdlStateAckDirtyBits:bxt */
+	if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+		wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
+
+		wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
+		wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
+
+		wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
+		wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
+
+		wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
+		wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
+
+		wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
+		/* dummy write to CS, mask bits are 0 to ensure the register is not modified */
+		wa_ctx_emit(batch, index, 0x0);
+		wa_ctx_emit(batch, index, MI_NOOP);
+	}
+
 	/* WaDisableCtxRestoreArbitration:skl,bxt */
 	if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
 	    IS_BXT_REVID(dev, 0, BXT_REVID_A1))
@@ -1445,60 +1340,61 @@
 	return wa_ctx_end(wa_ctx, *offset = index, 1);
 }
 
-static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
+static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
 {
 	int ret;
 
-	ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
-	if (!ring->wa_ctx.obj) {
+	engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev,
+						   PAGE_ALIGN(size));
+	if (!engine->wa_ctx.obj) {
 		DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
 		return -ENOMEM;
 	}
 
-	ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0);
+	ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
 	if (ret) {
 		DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
 				 ret);
-		drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+		drm_gem_object_unreference(&engine->wa_ctx.obj->base);
 		return ret;
 	}
 
 	return 0;
 }
 
-static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
+static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
 {
-	if (ring->wa_ctx.obj) {
-		i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
-		drm_gem_object_unreference(&ring->wa_ctx.obj->base);
-		ring->wa_ctx.obj = NULL;
+	if (engine->wa_ctx.obj) {
+		i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
+		drm_gem_object_unreference(&engine->wa_ctx.obj->base);
+		engine->wa_ctx.obj = NULL;
 	}
 }
 
-static int intel_init_workaround_bb(struct intel_engine_cs *ring)
+static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 {
 	int ret;
 	uint32_t *batch;
 	uint32_t offset;
 	struct page *page;
-	struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+	struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
 
-	WARN_ON(ring->id != RCS);
+	WARN_ON(engine->id != RCS);
 
 	/* update this when WA for higher Gen are added */
-	if (INTEL_INFO(ring->dev)->gen > 9) {
+	if (INTEL_INFO(engine->dev)->gen > 9) {
 		DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
-			  INTEL_INFO(ring->dev)->gen);
+			  INTEL_INFO(engine->dev)->gen);
 		return 0;
 	}
 
 	/* some WA perform writes to scratch page, ensure it is valid */
-	if (ring->scratch.obj == NULL) {
-		DRM_ERROR("scratch page not allocated for %s\n", ring->name);
+	if (engine->scratch.obj == NULL) {
+		DRM_ERROR("scratch page not allocated for %s\n", engine->name);
 		return -EINVAL;
 	}
 
-	ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
+	ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
 		return ret;
@@ -1508,29 +1404,29 @@
 	batch = kmap_atomic(page);
 	offset = 0;
 
-	if (INTEL_INFO(ring->dev)->gen == 8) {
-		ret = gen8_init_indirectctx_bb(ring,
+	if (INTEL_INFO(engine->dev)->gen == 8) {
+		ret = gen8_init_indirectctx_bb(engine,
 					       &wa_ctx->indirect_ctx,
 					       batch,
 					       &offset);
 		if (ret)
 			goto out;
 
-		ret = gen8_init_perctx_bb(ring,
+		ret = gen8_init_perctx_bb(engine,
 					  &wa_ctx->per_ctx,
 					  batch,
 					  &offset);
 		if (ret)
 			goto out;
-	} else if (INTEL_INFO(ring->dev)->gen == 9) {
-		ret = gen9_init_indirectctx_bb(ring,
+	} else if (INTEL_INFO(engine->dev)->gen == 9) {
+		ret = gen9_init_indirectctx_bb(engine,
 					       &wa_ctx->indirect_ctx,
 					       batch,
 					       &offset);
 		if (ret)
 			goto out;
 
-		ret = gen9_init_perctx_bb(ring,
+		ret = gen9_init_perctx_bb(engine,
 					  &wa_ctx->per_ctx,
 					  batch,
 					  &offset);
@@ -1541,27 +1437,36 @@
 out:
 	kunmap_atomic(batch);
 	if (ret)
-		lrc_destroy_wa_ctx_obj(ring);
+		lrc_destroy_wa_ctx_obj(engine);
 
 	return ret;
 }
 
-static int gen8_init_common_ring(struct intel_engine_cs *ring)
+static void lrc_init_hws(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
+	I915_WRITE(RING_HWS_PGA(engine->mmio_base),
+		   (u32)engine->status_page.gfx_addr);
+	POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+}
+
+static int gen8_init_common_ring(struct intel_engine_cs *engine)
+{
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u8 next_context_status_buffer_hw;
+	unsigned int next_context_status_buffer_hw;
 
-	lrc_setup_hardware_status_page(ring,
-				dev_priv->kernel_context->engine[ring->id].state);
+	lrc_init_hws(engine);
 
-	I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
-	I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
+	I915_WRITE_IMR(engine,
+		       ~(engine->irq_enable_mask | engine->irq_keep_mask));
+	I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
 
-	I915_WRITE(RING_MODE_GEN7(ring),
+	I915_WRITE(RING_MODE_GEN7(engine),
 		   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
 		   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
-	POSTING_READ(RING_MODE_GEN7(ring));
+	POSTING_READ(RING_MODE_GEN7(engine));
 
 	/*
 	 * Instead of resetting the Context Status Buffer (CSB) read pointer to
@@ -1576,7 +1481,7 @@
 	 * BXT  |         ?                |         ?            |
 	 */
 	next_context_status_buffer_hw =
-		GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring)));
+		GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
 
 	/*
 	 * When the CSB registers are reset (also after power-up / gpu reset),
@@ -1586,21 +1491,21 @@
 	if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
 		next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
 
-	ring->next_context_status_buffer = next_context_status_buffer_hw;
-	DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
+	engine->next_context_status_buffer = next_context_status_buffer_hw;
+	DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
 
-	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+	intel_engine_init_hangcheck(engine);
 
-	return 0;
+	return intel_mocs_init_engine(engine);
 }
 
-static int gen8_init_render_ring(struct intel_engine_cs *ring)
+static int gen8_init_render_ring(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
-	ret = gen8_init_common_ring(ring);
+	ret = gen8_init_common_ring(engine);
 	if (ret)
 		return ret;
 
@@ -1614,29 +1519,29 @@
 
 	I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-	return init_workarounds_ring(ring);
+	return init_workarounds_ring(engine);
 }
 
-static int gen9_init_render_ring(struct intel_engine_cs *ring)
+static int gen9_init_render_ring(struct intel_engine_cs *engine)
 {
 	int ret;
 
-	ret = gen8_init_common_ring(ring);
+	ret = gen8_init_common_ring(engine);
 	if (ret)
 		return ret;
 
-	return init_workarounds_ring(ring);
+	return init_workarounds_ring(engine);
 }
 
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
 	int i, ret;
 
-	ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
+	ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
 	if (ret)
 		return ret;
 
@@ -1644,9 +1549,11 @@
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-		intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+		intel_logical_ring_emit_reg(ringbuf,
+					    GEN8_RING_PDP_UDW(engine, i));
 		intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
-		intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+		intel_logical_ring_emit_reg(ringbuf,
+					    GEN8_RING_PDP_LDW(engine, i));
 		intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
 	}
 
@@ -1670,7 +1577,7 @@
 	 * not idle). PML4 is allocated during ppgtt init so this is
 	 * not needed in 48-bit.*/
 	if (req->ctx->ppgtt &&
-	    (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
+	    (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
 		if (!USES_FULL_48BIT_PPGTT(req->i915) &&
 		    !intel_vgpu_active(req->i915->dev)) {
 			ret = intel_logical_ring_emit_pdps(req);
@@ -1678,10 +1585,10 @@
 				return ret;
 		}
 
-		req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
+		req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
 	}
 
-	ret = intel_logical_ring_begin(req, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -1698,9 +1605,9 @@
 	return 0;
 }
 
-static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
+static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
@@ -1708,25 +1615,26 @@
 		return false;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
-		POSTING_READ(RING_IMR(ring->mmio_base));
+	if (engine->irq_refcount++ == 0) {
+		I915_WRITE_IMR(engine,
+			       ~(engine->irq_enable_mask | engine->irq_keep_mask));
+		POSTING_READ(RING_IMR(engine->mmio_base));
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
 	return true;
 }
 
-static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
+static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
-		POSTING_READ(RING_IMR(ring->mmio_base));
+	if (--engine->irq_refcount == 0) {
+		I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+		POSTING_READ(RING_IMR(engine->mmio_base));
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
@@ -1736,13 +1644,13 @@
 			   u32 unused)
 {
 	struct intel_ringbuffer *ringbuf = request->ringbuf;
-	struct intel_engine_cs *ring = ringbuf->ring;
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = ringbuf->engine;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t cmd;
 	int ret;
 
-	ret = intel_logical_ring_begin(request, 4);
+	ret = intel_ring_begin(request, 4);
 	if (ret)
 		return ret;
 
@@ -1757,7 +1665,7 @@
 
 	if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
 		cmd |= MI_INVALIDATE_TLB;
-		if (ring == &dev_priv->ring[VCS])
+		if (engine == &dev_priv->engine[VCS])
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
@@ -1777,8 +1685,8 @@
 				  u32 flush_domains)
 {
 	struct intel_ringbuffer *ringbuf = request->ringbuf;
-	struct intel_engine_cs *ring = ringbuf->ring;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	struct intel_engine_cs *engine = ringbuf->engine;
+	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false;
 	u32 flags = 0;
 	int ret;
@@ -1806,11 +1714,11 @@
 		 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
 		 * pipe control.
 		 */
-		if (IS_GEN9(ring->dev))
+		if (IS_GEN9(engine->dev))
 			vf_flush_wa = true;
 	}
 
-	ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
+	ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
 	if (ret)
 		return ret;
 
@@ -1834,19 +1742,18 @@
 	return 0;
 }
 
-static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static u32 gen8_get_seqno(struct intel_engine_cs *engine)
 {
-	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
-static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+	intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
 }
 
-static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
 {
-
 	/*
 	 * On BXT A steppings there is a HW coherency issue whereby the
 	 * MI_STORE_DATA_IMM storing the completed request's seqno
@@ -1857,19 +1764,15 @@
 	 * bxt_a_set_seqno(), where we also do a clflush after the write. So
 	 * this clflush in practice becomes an invalidate operation.
 	 */
-
-	if (!lazy_coherency)
-		intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
-
-	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+	intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
-static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+	intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
 
 	/* See bxt_a_get_seqno() explaining the reason for the clflush. */
-	intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
+	intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
 /*
@@ -1889,7 +1792,7 @@
 	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	int ret;
 
-	ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+	ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
 	if (ret)
 		return ret;
 
@@ -1899,7 +1802,7 @@
 	intel_logical_ring_emit(ringbuf,
 				(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
 	intel_logical_ring_emit(ringbuf,
-				hws_seqno_address(request->ring) |
+				hws_seqno_address(request->engine) |
 				MI_FLUSH_DW_USE_GTT);
 	intel_logical_ring_emit(ringbuf, 0);
 	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
@@ -1913,7 +1816,7 @@
 	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	int ret;
 
-	ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
+	ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
 	if (ret)
 		return ret;
 
@@ -1929,7 +1832,7 @@
 				(PIPE_CONTROL_GLOBAL_GTT_IVB |
 				 PIPE_CONTROL_CS_STALL |
 				 PIPE_CONTROL_QW_WRITE));
-	intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
+	intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
 	intel_logical_ring_emit(ringbuf, 0);
 	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
 	/* We're thrashing one dword of HWS. */
@@ -1944,19 +1847,19 @@
 	struct render_state so;
 	int ret;
 
-	ret = i915_gem_render_state_prepare(req->ring, &so);
+	ret = i915_gem_render_state_prepare(req->engine, &so);
 	if (ret)
 		return ret;
 
 	if (so.rodata == NULL)
 		return 0;
 
-	ret = req->ring->emit_bb_start(req, so.ggtt_offset,
+	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
 				       I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
-	ret = req->ring->emit_bb_start(req,
+	ret = req->engine->emit_bb_start(req,
 				       (so.ggtt_offset + so.aux_batch_offset),
 				       I915_DISPATCH_SECURE);
 	if (ret)
@@ -1994,146 +1897,197 @@
  * @ring: Engine Command Streamer.
  *
  */
-void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
+void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv;
 
-	if (!intel_ring_initialized(ring))
+	if (!intel_engine_initialized(engine))
 		return;
 
-	dev_priv = ring->dev->dev_private;
+	/*
+	 * Tasklet cannot be active at this point due intel_mark_active/idle
+	 * so this is just for documentation.
+	 */
+	if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
+		tasklet_kill(&engine->irq_tasklet);
 
-	if (ring->buffer) {
-		intel_logical_ring_stop(ring);
-		WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+	dev_priv = engine->dev->dev_private;
+
+	if (engine->buffer) {
+		intel_logical_ring_stop(engine);
+		WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
 	}
 
-	if (ring->cleanup)
-		ring->cleanup(ring);
+	if (engine->cleanup)
+		engine->cleanup(engine);
 
-	i915_cmd_parser_fini_ring(ring);
-	i915_gem_batch_pool_fini(&ring->batch_pool);
+	i915_cmd_parser_fini_ring(engine);
+	i915_gem_batch_pool_fini(&engine->batch_pool);
 
-	if (ring->status_page.obj) {
-		kunmap(sg_page(ring->status_page.obj->pages->sgl));
-		ring->status_page.obj = NULL;
+	if (engine->status_page.obj) {
+		i915_gem_object_unpin_map(engine->status_page.obj);
+		engine->status_page.obj = NULL;
 	}
 
-	ring->disable_lite_restore_wa = false;
-	ring->ctx_desc_template = 0;
+	engine->idle_lite_restore_wa = 0;
+	engine->disable_lite_restore_wa = false;
+	engine->ctx_desc_template = 0;
 
-	lrc_destroy_wa_ctx_obj(ring);
-	ring->dev = NULL;
+	lrc_destroy_wa_ctx_obj(engine);
+	engine->dev = NULL;
 }
 
 static void
 logical_ring_default_vfuncs(struct drm_device *dev,
-			    struct intel_engine_cs *ring)
+			    struct intel_engine_cs *engine)
 {
 	/* Default vfuncs which can be overriden by each engine. */
-	ring->init_hw = gen8_init_common_ring;
-	ring->emit_request = gen8_emit_request;
-	ring->emit_flush = gen8_emit_flush;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
-	ring->emit_bb_start = gen8_emit_bb_start;
+	engine->init_hw = gen8_init_common_ring;
+	engine->emit_request = gen8_emit_request;
+	engine->emit_flush = gen8_emit_flush;
+	engine->irq_get = gen8_logical_ring_get_irq;
+	engine->irq_put = gen8_logical_ring_put_irq;
+	engine->emit_bb_start = gen8_emit_bb_start;
+	engine->get_seqno = gen8_get_seqno;
+	engine->set_seqno = gen8_set_seqno;
 	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-		ring->get_seqno = bxt_a_get_seqno;
-		ring->set_seqno = bxt_a_set_seqno;
-	} else {
-		ring->get_seqno = gen8_get_seqno;
-		ring->set_seqno = gen8_set_seqno;
+		engine->irq_seqno_barrier = bxt_a_seqno_barrier;
+		engine->set_seqno = bxt_a_set_seqno;
 	}
 }
 
 static inline void
-logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift)
+logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
 {
-	ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
-	ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
+	engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
 }
 
 static int
-logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+lrc_setup_hws(struct intel_engine_cs *engine,
+	      struct drm_i915_gem_object *dctx_obj)
 {
-	struct intel_context *dctx = to_i915(dev)->kernel_context;
+	void *hws;
+
+	/* The HWSP is part of the default context object in LRC mode. */
+	engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
+				       LRC_PPHWSP_PN * PAGE_SIZE;
+	hws = i915_gem_object_pin_map(dctx_obj);
+	if (IS_ERR(hws))
+		return PTR_ERR(hws);
+	engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
+	engine->status_page.obj = dctx_obj;
+
+	return 0;
+}
+
+static int
+logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_context *dctx = dev_priv->kernel_context;
+	enum forcewake_domains fw_domains;
 	int ret;
 
 	/* Intentionally left blank. */
-	ring->buffer = NULL;
+	engine->buffer = NULL;
 
-	ring->dev = dev;
-	INIT_LIST_HEAD(&ring->active_list);
-	INIT_LIST_HEAD(&ring->request_list);
-	i915_gem_batch_pool_init(dev, &ring->batch_pool);
-	init_waitqueue_head(&ring->irq_queue);
+	engine->dev = dev;
+	INIT_LIST_HEAD(&engine->active_list);
+	INIT_LIST_HEAD(&engine->request_list);
+	i915_gem_batch_pool_init(dev, &engine->batch_pool);
+	init_waitqueue_head(&engine->irq_queue);
 
-	INIT_LIST_HEAD(&ring->buffers);
-	INIT_LIST_HEAD(&ring->execlist_queue);
-	INIT_LIST_HEAD(&ring->execlist_retired_req_list);
-	spin_lock_init(&ring->execlist_lock);
+	INIT_LIST_HEAD(&engine->buffers);
+	INIT_LIST_HEAD(&engine->execlist_queue);
+	INIT_LIST_HEAD(&engine->execlist_retired_req_list);
+	spin_lock_init(&engine->execlist_lock);
 
-	logical_ring_init_platform_invariants(ring);
+	tasklet_init(&engine->irq_tasklet,
+		     intel_lrc_irq_handler, (unsigned long)engine);
 
-	ret = i915_cmd_parser_init_ring(ring);
+	logical_ring_init_platform_invariants(engine);
+
+	fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
+						    RING_ELSP(engine),
+						    FW_REG_WRITE);
+
+	fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+						     RING_CONTEXT_STATUS_PTR(engine),
+						     FW_REG_READ | FW_REG_WRITE);
+
+	fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+						     RING_CONTEXT_STATUS_BUF_BASE(engine),
+						     FW_REG_READ);
+
+	engine->fw_domains = fw_domains;
+
+	ret = i915_cmd_parser_init_ring(engine);
 	if (ret)
 		goto error;
 
-	ret = intel_lr_context_deferred_alloc(dctx, ring);
+	ret = intel_lr_context_deferred_alloc(dctx, engine);
 	if (ret)
 		goto error;
 
 	/* As this is the default context, always pin it */
-	ret = intel_lr_context_do_pin(dctx, ring);
+	ret = intel_lr_context_do_pin(dctx, engine);
 	if (ret) {
 		DRM_ERROR(
 			"Failed to pin and map ringbuffer %s: %d\n",
-			ring->name, ret);
+			engine->name, ret);
+		goto error;
+	}
+
+	/* And setup the hardware status page. */
+	ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
+	if (ret) {
+		DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
 		goto error;
 	}
 
 	return 0;
 
 error:
-	intel_logical_ring_cleanup(ring);
+	intel_logical_ring_cleanup(engine);
 	return ret;
 }
 
 static int logical_render_ring_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	int ret;
 
-	ring->name = "render ring";
-	ring->id = RCS;
-	ring->exec_id = I915_EXEC_RENDER;
-	ring->guc_id = GUC_RENDER_ENGINE;
-	ring->mmio_base = RENDER_RING_BASE;
+	engine->name = "render ring";
+	engine->id = RCS;
+	engine->exec_id = I915_EXEC_RENDER;
+	engine->guc_id = GUC_RENDER_ENGINE;
+	engine->mmio_base = RENDER_RING_BASE;
 
-	logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
+	logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
 	if (HAS_L3_DPF(dev))
-		ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+		engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
-	logical_ring_default_vfuncs(dev, ring);
+	logical_ring_default_vfuncs(dev, engine);
 
 	/* Override some for render ring. */
 	if (INTEL_INFO(dev)->gen >= 9)
-		ring->init_hw = gen9_init_render_ring;
+		engine->init_hw = gen9_init_render_ring;
 	else
-		ring->init_hw = gen8_init_render_ring;
-	ring->init_context = gen8_init_rcs_context;
-	ring->cleanup = intel_fini_pipe_control;
-	ring->emit_flush = gen8_emit_flush_render;
-	ring->emit_request = gen8_emit_request_render;
+		engine->init_hw = gen8_init_render_ring;
+	engine->init_context = gen8_init_rcs_context;
+	engine->cleanup = intel_fini_pipe_control;
+	engine->emit_flush = gen8_emit_flush_render;
+	engine->emit_request = gen8_emit_request_render;
 
-	ring->dev = dev;
+	engine->dev = dev;
 
-	ret = intel_init_pipe_control(ring);
+	ret = intel_init_pipe_control(engine);
 	if (ret)
 		return ret;
 
-	ret = intel_init_workaround_bb(ring);
+	ret = intel_init_workaround_bb(engine);
 	if (ret) {
 		/*
 		 * We continue even if we fail to initialize WA batch
@@ -2144,9 +2098,9 @@
 			  ret);
 	}
 
-	ret = logical_ring_init(dev, ring);
+	ret = logical_ring_init(dev, engine);
 	if (ret) {
-		lrc_destroy_wa_ctx_obj(ring);
+		lrc_destroy_wa_ctx_obj(engine);
 	}
 
 	return ret;
@@ -2155,69 +2109,69 @@
 static int logical_bsd_ring_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[VCS];
 
-	ring->name = "bsd ring";
-	ring->id = VCS;
-	ring->exec_id = I915_EXEC_BSD;
-	ring->guc_id = GUC_VIDEO_ENGINE;
-	ring->mmio_base = GEN6_BSD_RING_BASE;
+	engine->name = "bsd ring";
+	engine->id = VCS;
+	engine->exec_id = I915_EXEC_BSD;
+	engine->guc_id = GUC_VIDEO_ENGINE;
+	engine->mmio_base = GEN6_BSD_RING_BASE;
 
-	logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
-	logical_ring_default_vfuncs(dev, ring);
+	logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
+	logical_ring_default_vfuncs(dev, engine);
 
-	return logical_ring_init(dev, ring);
+	return logical_ring_init(dev, engine);
 }
 
 static int logical_bsd2_ring_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+	struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
 
-	ring->name = "bsd2 ring";
-	ring->id = VCS2;
-	ring->exec_id = I915_EXEC_BSD;
-	ring->guc_id = GUC_VIDEO_ENGINE2;
-	ring->mmio_base = GEN8_BSD2_RING_BASE;
+	engine->name = "bsd2 ring";
+	engine->id = VCS2;
+	engine->exec_id = I915_EXEC_BSD;
+	engine->guc_id = GUC_VIDEO_ENGINE2;
+	engine->mmio_base = GEN8_BSD2_RING_BASE;
 
-	logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
-	logical_ring_default_vfuncs(dev, ring);
+	logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
+	logical_ring_default_vfuncs(dev, engine);
 
-	return logical_ring_init(dev, ring);
+	return logical_ring_init(dev, engine);
 }
 
 static int logical_blt_ring_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[BCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[BCS];
 
-	ring->name = "blitter ring";
-	ring->id = BCS;
-	ring->exec_id = I915_EXEC_BLT;
-	ring->guc_id = GUC_BLITTER_ENGINE;
-	ring->mmio_base = BLT_RING_BASE;
+	engine->name = "blitter ring";
+	engine->id = BCS;
+	engine->exec_id = I915_EXEC_BLT;
+	engine->guc_id = GUC_BLITTER_ENGINE;
+	engine->mmio_base = BLT_RING_BASE;
 
-	logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
-	logical_ring_default_vfuncs(dev, ring);
+	logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
+	logical_ring_default_vfuncs(dev, engine);
 
-	return logical_ring_init(dev, ring);
+	return logical_ring_init(dev, engine);
 }
 
 static int logical_vebox_ring_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+	struct intel_engine_cs *engine = &dev_priv->engine[VECS];
 
-	ring->name = "video enhancement ring";
-	ring->id = VECS;
-	ring->exec_id = I915_EXEC_VEBOX;
-	ring->guc_id = GUC_VIDEOENHANCE_ENGINE;
-	ring->mmio_base = VEBOX_RING_BASE;
+	engine->name = "video enhancement ring";
+	engine->id = VECS;
+	engine->exec_id = I915_EXEC_VEBOX;
+	engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
+	engine->mmio_base = VEBOX_RING_BASE;
 
-	logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
-	logical_ring_default_vfuncs(dev, ring);
+	logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT);
+	logical_ring_default_vfuncs(dev, engine);
 
-	return logical_ring_init(dev, ring);
+	return logical_ring_init(dev, engine);
 }
 
 /**
@@ -2225,7 +2179,7 @@
  * @dev: DRM device.
  *
  * This function inits the engines for an Execlists submission style (the equivalent in the
- * legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for
+ * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
  * those engines that are present in the hardware.
  *
  * Return: non-zero if the initialization failed.
@@ -2266,13 +2220,13 @@
 	return 0;
 
 cleanup_vebox_ring:
-	intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
+	intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
 cleanup_blt_ring:
-	intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
+	intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
 cleanup_bsd_ring:
-	intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
+	intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
 cleanup_render_ring:
-	intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
+	intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
 
 	return ret;
 }
@@ -2320,13 +2274,13 @@
 	return rpcs;
 }
 
-static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
+static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
 {
 	u32 indirect_ctx_offset;
 
-	switch (INTEL_INFO(ring->dev)->gen) {
+	switch (INTEL_INFO(engine->dev)->gen) {
 	default:
-		MISSING_CASE(INTEL_INFO(ring->dev)->gen);
+		MISSING_CASE(INTEL_INFO(engine->dev)->gen);
 		/* fall through */
 	case 9:
 		indirect_ctx_offset =
@@ -2342,14 +2296,16 @@
 }
 
 static int
-populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
-		    struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
+populate_lr_context(struct intel_context *ctx,
+		    struct drm_i915_gem_object *ctx_obj,
+		    struct intel_engine_cs *engine,
+		    struct intel_ringbuffer *ringbuf)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
-	struct page *page;
-	uint32_t *reg_state;
+	void *vaddr;
+	u32 *reg_state;
 	int ret;
 
 	if (!ppgtt)
@@ -2361,18 +2317,17 @@
 		return ret;
 	}
 
-	ret = i915_gem_object_get_pages(ctx_obj);
-	if (ret) {
-		DRM_DEBUG_DRIVER("Could not get object pages\n");
+	vaddr = i915_gem_object_pin_map(ctx_obj);
+	if (IS_ERR(vaddr)) {
+		ret = PTR_ERR(vaddr);
+		DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
 		return ret;
 	}
-
-	i915_gem_object_pin_pages(ctx_obj);
+	ctx_obj->dirty = true;
 
 	/* The second page of the context object contains some fields which must
 	 * be set up prior to the first execution. */
-	page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-	reg_state = kmap_atomic(page);
+	reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 
 	/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
 	 * commands followed by (reg, value) pairs. The values we are setting here are
@@ -2380,33 +2335,47 @@
 	 * recreate this batchbuffer with new values (including all the missing
 	 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
 	reg_state[CTX_LRI_HEADER_0] =
-		MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
-	ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
+		MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
+	ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
+		       RING_CONTEXT_CONTROL(engine),
 		       _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
 					  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
 					  (HAS_RESOURCE_STREAMER(dev) ?
 					    CTX_CTRL_RS_CTX_ENABLE : 0)));
-	ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
-	ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
+	ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
+		       0);
+	ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
+		       0);
 	/* Ring buffer start address is not known until the buffer is pinned.
 	 * It is written to the context image in execlists_update_context()
 	 */
-	ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0);
-	ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base),
+	ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
+		       RING_START(engine->mmio_base), 0);
+	ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
+		       RING_CTL(engine->mmio_base),
 		       ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
-	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0);
-	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0);
-	ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base),
+	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
+		       RING_BBADDR_UDW(engine->mmio_base), 0);
+	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
+		       RING_BBADDR(engine->mmio_base), 0);
+	ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
+		       RING_BBSTATE(engine->mmio_base),
 		       RING_BB_PPGTT);
-	ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0);
-	ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0);
-	ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0);
-	if (ring->id == RCS) {
-		ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0);
-		ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0);
-		ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0);
-		if (ring->wa_ctx.obj) {
-			struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+	ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
+		       RING_SBBADDR_UDW(engine->mmio_base), 0);
+	ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
+		       RING_SBBADDR(engine->mmio_base), 0);
+	ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
+		       RING_SBBSTATE(engine->mmio_base), 0);
+	if (engine->id == RCS) {
+		ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
+			       RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
+		ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
+			       RING_INDIRECT_CTX(engine->mmio_base), 0);
+		ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
+			       RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
+		if (engine->wa_ctx.obj) {
+			struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
 			uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
 
 			reg_state[CTX_RCS_INDIRECT_CTX+1] =
@@ -2414,7 +2383,7 @@
 				(wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
 
 			reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
-				intel_lr_indirect_ctx_offset(ring) << 6;
+				intel_lr_indirect_ctx_offset(engine) << 6;
 
 			reg_state[CTX_BB_PER_CTX_PTR+1] =
 				(ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
@@ -2422,16 +2391,25 @@
 		}
 	}
 	reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
-	ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0);
+	ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
+		       RING_CTX_TIMESTAMP(engine->mmio_base), 0);
 	/* PDP values well be assigned later if needed */
-	ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0);
-	ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0);
-	ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0);
-	ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0);
-	ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0);
-	ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0);
-	ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0);
-	ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0);
+	ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
+		       0);
+	ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
+		       0);
+	ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
+		       0);
+	ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
+		       0);
+	ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
+		       0);
+	ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
+		       0);
+	ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
+		       0);
+	ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
+		       0);
 
 	if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
 		/* 64b PPGTT (48bit canonical)
@@ -2445,20 +2423,16 @@
 		 * With dynamic page allocation, PDPs may not be allocated at
 		 * this point. Point the unallocated PDPs to the scratch page
 		 */
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+		execlists_update_context_pdps(ppgtt, reg_state);
 	}
 
-	if (ring->id == RCS) {
+	if (engine->id == RCS) {
 		reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
 		ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
 			       make_rpcs(dev));
 	}
 
-	kunmap_atomic(reg_state);
-	i915_gem_object_unpin_pages(ctx_obj);
+	i915_gem_object_unpin_map(ctx_obj);
 
 	return 0;
 }
@@ -2475,7 +2449,7 @@
 {
 	int i;
 
-	for (i = I915_NUM_RINGS; --i >= 0; ) {
+	for (i = I915_NUM_ENGINES; --i >= 0; ) {
 		struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
 		struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
 
@@ -2485,6 +2459,7 @@
 		if (ctx == ctx->i915->kernel_context) {
 			intel_unpin_ringbuffer_obj(ringbuf);
 			i915_gem_object_ggtt_unpin(ctx_obj);
+			i915_gem_object_unpin_map(ctx_obj);
 		}
 
 		WARN_ON(ctx->engine[i].pin_count);
@@ -2507,15 +2482,15 @@
  * in LRC mode, but does not include the "shared data page" used with
  * GuC submission. The caller should account for this if using the GuC.
  */
-uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
+uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
 {
 	int ret = 0;
 
-	WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
+	WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
 
-	switch (ring->id) {
+	switch (engine->id) {
 	case RCS:
-		if (INTEL_INFO(ring->dev)->gen >= 9)
+		if (INTEL_INFO(engine->dev)->gen >= 9)
 			ret = GEN9_LR_CONTEXT_RENDER_SIZE;
 		else
 			ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2531,24 +2506,6 @@
 	return ret;
 }
 
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
-		struct drm_i915_gem_object *default_ctx_obj)
-{
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-	struct page *page;
-
-	/* The HWSP is part of the default context object in LRC mode. */
-	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
-			+ LRC_PPHWSP_PN * PAGE_SIZE;
-	page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
-	ring->status_page.page_addr = kmap(page);
-	ring->status_page.obj = default_ctx_obj;
-
-	I915_WRITE(RING_HWS_PGA(ring->mmio_base),
-			(u32)ring->status_page.gfx_addr);
-	POSTING_READ(RING_HWS_PGA(ring->mmio_base));
-}
-
 /**
  * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
  * @ctx: LR context to create.
@@ -2564,18 +2521,18 @@
  */
 
 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
-				    struct intel_engine_cs *ring)
+				    struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_gem_object *ctx_obj;
 	uint32_t context_size;
 	struct intel_ringbuffer *ringbuf;
 	int ret;
 
 	WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
-	WARN_ON(ctx->engine[ring->id].state);
+	WARN_ON(ctx->engine[engine->id].state);
 
-	context_size = round_up(intel_lr_context_size(ring), 4096);
+	context_size = round_up(intel_lr_context_size(engine), 4096);
 
 	/* One extra page as the sharing data between driver and GuC */
 	context_size += PAGE_SIZE * LRC_PPHWSP_PN;
@@ -2586,39 +2543,38 @@
 		return -ENOMEM;
 	}
 
-	ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
+	ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
 	if (IS_ERR(ringbuf)) {
 		ret = PTR_ERR(ringbuf);
 		goto error_deref_obj;
 	}
 
-	ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
+	ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
 		goto error_ringbuf;
 	}
 
-	ctx->engine[ring->id].ringbuf = ringbuf;
-	ctx->engine[ring->id].state = ctx_obj;
+	ctx->engine[engine->id].ringbuf = ringbuf;
+	ctx->engine[engine->id].state = ctx_obj;
 
-	if (ctx != ctx->i915->kernel_context && ring->init_context) {
+	if (ctx != ctx->i915->kernel_context && engine->init_context) {
 		struct drm_i915_gem_request *req;
 
-		req = i915_gem_request_alloc(ring, ctx);
+		req = i915_gem_request_alloc(engine, ctx);
 		if (IS_ERR(req)) {
 			ret = PTR_ERR(req);
 			DRM_ERROR("ring create req: %d\n", ret);
 			goto error_ringbuf;
 		}
 
-		ret = ring->init_context(req);
+		ret = engine->init_context(req);
+		i915_add_request_no_flush(req);
 		if (ret) {
 			DRM_ERROR("ring init context: %d\n",
 				ret);
-			i915_gem_request_cancel(req);
 			goto error_ringbuf;
 		}
-		i915_add_request_no_flush(req);
 	}
 	return 0;
 
@@ -2626,40 +2582,38 @@
 	intel_ringbuffer_free(ringbuf);
 error_deref_obj:
 	drm_gem_object_unreference(&ctx_obj->base);
-	ctx->engine[ring->id].ringbuf = NULL;
-	ctx->engine[ring->id].state = NULL;
+	ctx->engine[engine->id].ringbuf = NULL;
+	ctx->engine[engine->id].state = NULL;
 	return ret;
 }
 
-void intel_lr_context_reset(struct drm_device *dev,
-			struct intel_context *ctx)
+void intel_lr_context_reset(struct drm_i915_private *dev_priv,
+			    struct intel_context *ctx)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	int i;
+	struct intel_engine_cs *engine;
 
-	for_each_ring(ring, dev_priv, i) {
+	for_each_engine(engine, dev_priv) {
 		struct drm_i915_gem_object *ctx_obj =
-				ctx->engine[ring->id].state;
+				ctx->engine[engine->id].state;
 		struct intel_ringbuffer *ringbuf =
-				ctx->engine[ring->id].ringbuf;
+				ctx->engine[engine->id].ringbuf;
+		void *vaddr;
 		uint32_t *reg_state;
-		struct page *page;
 
 		if (!ctx_obj)
 			continue;
 
-		if (i915_gem_object_get_pages(ctx_obj)) {
-			WARN(1, "Failed get_pages for context obj\n");
+		vaddr = i915_gem_object_pin_map(ctx_obj);
+		if (WARN_ON(IS_ERR(vaddr)))
 			continue;
-		}
-		page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-		reg_state = kmap_atomic(page);
+
+		reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+		ctx_obj->dirty = true;
 
 		reg_state[CTX_RING_HEAD+1] = 0;
 		reg_state[CTX_RING_TAIL+1] = 0;
 
-		kunmap_atomic(reg_state);
+		i915_gem_object_unpin_map(ctx_obj);
 
 		ringbuf->head = 0;
 		ringbuf->tail = 0;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index e6cda3e..60a7385 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -24,6 +24,8 @@
 #ifndef _INTEL_LRC_H_
 #define _INTEL_LRC_H_
 
+#include "intel_ringbuffer.h"
+
 #define GEN8_LR_CONTEXT_ALIGN 4096
 
 /* Execlists regs */
@@ -34,6 +36,7 @@
 #define	  CTX_CTRL_INHIBIT_SYN_CTX_SWITCH	(1 << 3)
 #define	  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT	(1 << 0)
 #define   CTX_CTRL_RS_CTX_ENABLE                (1 << 1)
+#define RING_CONTEXT_STATUS_BUF_BASE(ring)	_MMIO((ring)->mmio_base + 0x370)
 #define RING_CONTEXT_STATUS_BUF_LO(ring, i)	_MMIO((ring)->mmio_base + 0x370 + (i) * 8)
 #define RING_CONTEXT_STATUS_BUF_HI(ring, i)	_MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
 #define RING_CONTEXT_STATUS_PTR(ring)		_MMIO((ring)->mmio_base + 0x3a0)
@@ -57,10 +60,9 @@
 /* Logical Rings */
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
-void intel_logical_ring_stop(struct intel_engine_cs *ring);
-void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
+void intel_logical_ring_stop(struct intel_engine_cs *engine);
+void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
 int intel_logical_rings_init(struct drm_device *dev);
-int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
 
 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
 /**
@@ -98,18 +100,21 @@
 #define LRC_STATE_PN	(LRC_PPHWSP_PN + 1)
 
 void intel_lr_context_free(struct intel_context *ctx);
-uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
+uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
-				    struct intel_engine_cs *ring);
+				    struct intel_engine_cs *engine);
 void intel_lr_context_unpin(struct intel_context *ctx,
 			    struct intel_engine_cs *engine);
-void intel_lr_context_reset(struct drm_device *dev,
-			struct intel_context *ctx);
+
+struct drm_i915_private;
+
+void intel_lr_context_reset(struct drm_i915_private *dev_priv,
+			    struct intel_context *ctx);
 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
-				     struct intel_engine_cs *ring);
+				     struct intel_engine_cs *engine);
 
 u32 intel_execlists_ctx_id(struct intel_context *ctx,
-			   struct intel_engine_cs *ring);
+			   struct intel_engine_cs *engine);
 
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -118,7 +123,6 @@
 			       struct drm_i915_gem_execbuffer2 *args,
 			       struct list_head *vmas);
 
-void intel_lrc_irq_handler(struct intel_engine_cs *ring);
-void intel_execlists_retire_requests(struct intel_engine_cs *ring);
+void intel_execlists_retire_requests(struct intel_engine_cs *engine);
 
 #endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 10dc351..bc53c0d 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -109,7 +109,6 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
 	u32 tmp, flags = 0;
-	int dotclock;
 
 	tmp = I915_READ(lvds_encoder->reg);
 	if (tmp & LVDS_HSYNC_POLARITY)
@@ -134,12 +133,7 @@
 		pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
 	}
 
-	dotclock = pipe_config->port_clock;
-
-	if (HAS_PCH_SPLIT(dev_priv->dev))
-		ironlake_check_encoder_dotclock(pipe_config, dotclock);
-
-	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+	pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
 }
 
 static void intel_pre_enable_lvds(struct intel_encoder *encoder)
@@ -155,7 +149,7 @@
 	if (HAS_PCH_SPLIT(dev)) {
 		assert_fdi_rx_pll_disabled(dev_priv, pipe);
 		assert_shared_dpll_disabled(dev_priv,
-					    intel_crtc_to_shared_dpll(crtc));
+					    crtc->config->shared_dpll);
 	} else {
 		assert_pll_disabled(dev_priv, pipe);
 	}
@@ -782,57 +776,6 @@
 	{ }	/* terminating entry */
 };
 
-/*
- * Enumerate the child dev array parsed from VBT to check whether
- * the LVDS is present.
- * If it is present, return 1.
- * If it is not present, return false.
- * If no child dev is parsed from VBT, it assumes that the LVDS is present.
- */
-static bool lvds_is_present_in_vbt(struct drm_device *dev,
-				   u8 *i2c_pin)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
-
-	if (!dev_priv->vbt.child_dev_num)
-		return true;
-
-	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		union child_device_config *uchild = dev_priv->vbt.child_dev + i;
-		struct old_child_dev_config *child = &uchild->old;
-
-		/* If the device type is not LFP, continue.
-		 * We have to check both the new identifiers as well as the
-		 * old for compatibility with some BIOSes.
-		 */
-		if (child->device_type != DEVICE_TYPE_INT_LFP &&
-		    child->device_type != DEVICE_TYPE_LFP)
-			continue;
-
-		if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin))
-			*i2c_pin = child->i2c_pin;
-
-		/* However, we cannot trust the BIOS writers to populate
-		 * the VBT correctly.  Since LVDS requires additional
-		 * information from AIM blocks, a non-zero addin offset is
-		 * a good indicator that the LVDS is actually present.
-		 */
-		if (child->addin_offset)
-			return true;
-
-		/* But even then some BIOS writers perform some black magic
-		 * and instantiate the device without reference to any
-		 * additional data.  Trust that if the VBT was written into
-		 * the OpRegion then they have validated the LVDS's existence.
-		 */
-		if (dev_priv->opregion.vbt)
-			return true;
-	}
-
-	return false;
-}
-
 static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
 {
 	DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
@@ -982,14 +925,14 @@
 	if (HAS_PCH_SPLIT(dev)) {
 		if ((lvds & LVDS_DETECTED) == 0)
 			return;
-		if (dev_priv->vbt.edp_support) {
+		if (dev_priv->vbt.edp.support) {
 			DRM_DEBUG_KMS("disable LVDS for eDP support\n");
 			return;
 		}
 	}
 
 	pin = GMBUS_PIN_PANEL;
-	if (!lvds_is_present_in_vbt(dev, &pin)) {
+	if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
 		if ((lvds & LVDS_PORT_EN) == 0) {
 			DRM_DEBUG_KMS("LVDS is not present in VBT\n");
 			return;
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index fed7bea..6ba4bf7 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -128,9 +128,9 @@
 
 /**
  * get_mocs_settings()
- * @dev:        DRM device.
+ * @dev_priv:	i915 device.
  * @table:      Output table that will be made to point at appropriate
- *              MOCS values for the device.
+ *	      MOCS values for the device.
  *
  * This function will return the values of the MOCS table that needs to
  * be programmed for the platform. It will return the values that need
@@ -138,28 +138,28 @@
  *
  * Return: true if there are applicable MOCS settings for the device.
  */
-static bool get_mocs_settings(struct drm_device *dev,
+static bool get_mocs_settings(struct drm_i915_private *dev_priv,
 			      struct drm_i915_mocs_table *table)
 {
 	bool result = false;
 
-	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 		table->size  = ARRAY_SIZE(skylake_mocs_table);
 		table->table = skylake_mocs_table;
 		result = true;
-	} else if (IS_BROXTON(dev)) {
+	} else if (IS_BROXTON(dev_priv)) {
 		table->size  = ARRAY_SIZE(broxton_mocs_table);
 		table->table = broxton_mocs_table;
 		result = true;
 	} else {
-		WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
+		WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
 			  "Platform that should have a MOCS table does not.\n");
 	}
 
 	return result;
 }
 
-static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
+static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
 {
 	switch (ring) {
 	case RCS:
@@ -179,10 +179,49 @@
 }
 
 /**
+ * intel_mocs_init_engine() - emit the mocs control table
+ * @engine:	The engine for whom to emit the registers.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+int intel_mocs_init_engine(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = to_i915(engine->dev);
+	struct drm_i915_mocs_table table;
+	unsigned int index;
+
+	if (!get_mocs_settings(dev_priv, &table))
+		return 0;
+
+	if (WARN_ON(table.size > GEN9_NUM_MOCS_ENTRIES))
+		return -ENODEV;
+
+	for (index = 0; index < table.size; index++)
+		I915_WRITE(mocs_register(engine->id, index),
+			   table.table[index].control_value);
+
+	/*
+	 * Ok, now set the unused entries to uncached. These entries
+	 * are officially undefined and no contract for the contents
+	 * and settings is given for these entries.
+	 *
+	 * Entry 0 in the table is uncached - so we are just writing
+	 * that value to all the used entries.
+	 */
+	for (; index < GEN9_NUM_MOCS_ENTRIES; index++)
+		I915_WRITE(mocs_register(engine->id, index),
+			   table.table[0].control_value);
+
+	return 0;
+}
+
+/**
  * emit_mocs_control_table() - emit the mocs control table
  * @req:	Request to set up the MOCS table for.
  * @table:	The values to program into the control regs.
- * @ring:	The engine for whom to emit the registers.
  *
  * This function simply emits a MI_LOAD_REGISTER_IMM command for the
  * given table starting at the given address.
@@ -190,27 +229,26 @@
  * Return: 0 on success, otherwise the error status.
  */
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
-				   const struct drm_i915_mocs_table *table,
-				   enum intel_ring_id ring)
+				   const struct drm_i915_mocs_table *table)
 {
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	enum intel_engine_id engine = req->engine->id;
 	unsigned int index;
 	int ret;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
-	if (ret) {
-		DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+	ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+	if (ret)
 		return ret;
-	}
 
 	intel_logical_ring_emit(ringbuf,
 				MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
 
 	for (index = 0; index < table->size; index++) {
-		intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
+		intel_logical_ring_emit_reg(ringbuf,
+					    mocs_register(engine, index));
 		intel_logical_ring_emit(ringbuf,
 					table->table[index].control_value);
 	}
@@ -224,8 +262,10 @@
 	 * that value to all the used entries.
 	 */
 	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-		intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
-		intel_logical_ring_emit(ringbuf, table->table[0].control_value);
+		intel_logical_ring_emit_reg(ringbuf,
+					    mocs_register(engine, index));
+		intel_logical_ring_emit(ringbuf,
+					table->table[0].control_value);
 	}
 
 	intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -234,6 +274,14 @@
 	return 0;
 }
 
+static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
+			       u16 low,
+			       u16 high)
+{
+	return table->table[low].l3cc_value |
+	       table->table[high].l3cc_value << 16;
+}
+
 /**
  * emit_mocs_l3cc_table() - emit the mocs control table
  * @req:	Request to set up the MOCS table for.
@@ -249,39 +297,31 @@
 				const struct drm_i915_mocs_table *table)
 {
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	unsigned int count;
 	unsigned int i;
-	u32 value;
-	u32 filler = (table->table[0].l3cc_value & 0xffff) |
-			((table->table[0].l3cc_value & 0xffff) << 16);
 	int ret;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
-	if (ret) {
-		DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+	ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+	if (ret)
 		return ret;
-	}
 
 	intel_logical_ring_emit(ringbuf,
 			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
 
-	for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
-		value = (table->table[count].l3cc_value & 0xffff) |
-			((table->table[count + 1].l3cc_value & 0xffff) << 16);
-
+	for (i = 0; i < table->size/2; i++) {
 		intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_logical_ring_emit(ringbuf, value);
+		intel_logical_ring_emit(ringbuf,
+					l3cc_combine(table, 2*i, 2*i+1));
 	}
 
 	if (table->size & 0x01) {
 		/* Odd table size - 1 left over */
-		value = (table->table[count].l3cc_value & 0xffff) |
-			((table->table[0].l3cc_value & 0xffff) << 16);
-	} else
-		value = filler;
+		intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
+		intel_logical_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
+		i++;
+	}
 
 	/*
 	 * Now set the rest of the table to uncached - use entry 0 as
@@ -290,9 +330,7 @@
 	 */
 	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
 		intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_logical_ring_emit(ringbuf, value);
-
-		value = filler;
+		intel_logical_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
 	}
 
 	intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -302,6 +340,47 @@
 }
 
 /**
+ * intel_mocs_init_l3cc_table() - program the mocs control table
+ * @dev:      The the device to be programmed.
+ *
+ * This function simply programs the mocs registers for the given table
+ * starting at the given address. This register set is  programmed in pairs.
+ *
+ * These registers may get programmed more than once, it is simpler to
+ * re-program 32 registers than maintain the state of when they were programmed.
+ * We are always reprogramming with the same values and this only on context
+ * start.
+ *
+ * Return: Nothing.
+ */
+void intel_mocs_init_l3cc_table(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_mocs_table table;
+	unsigned int i;
+
+	if (!get_mocs_settings(dev_priv, &table))
+		return;
+
+	for (i = 0; i < table.size/2; i++)
+		I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 2*i+1));
+
+	/* Odd table size - 1 left over */
+	if (table.size & 0x01) {
+		I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 0));
+		i++;
+	}
+
+	/*
+	 * Now set the rest of the table to uncached - use entry 0 as
+	 * this will be uncached. Leave the last pair as initialised as
+	 * they are reserved by the hardware.
+	 */
+	for (; i < (GEN9_NUM_MOCS_ENTRIES / 2); i++)
+		I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 0, 0));
+}
+
+/**
  * intel_rcs_context_init_mocs() - program the MOCS register.
  * @req:	Request to set up the MOCS tables for.
  *
@@ -322,17 +401,11 @@
 	struct drm_i915_mocs_table t;
 	int ret;
 
-	if (get_mocs_settings(req->ring->dev, &t)) {
-		struct drm_i915_private *dev_priv = req->i915;
-		struct intel_engine_cs *ring;
-		enum intel_ring_id ring_id;
-
-		/* Program the control registers */
-		for_each_ring(ring, dev_priv, ring_id) {
-			ret = emit_mocs_control_table(req, &t, ring_id);
-			if (ret)
-				return ret;
-		}
+	if (get_mocs_settings(req->i915, &t)) {
+		/* Program the RCS control registers */
+		ret = emit_mocs_control_table(req, &t);
+		if (ret)
+			return ret;
 
 		/* Now program the l3cc registers */
 		ret = emit_mocs_l3cc_table(req, &t);
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
index 76e45b1..4640299 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -53,5 +53,7 @@
 #include "i915_drv.h"
 
 int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
+void intel_mocs_init_l3cc_table(struct drm_device *dev);
+int intel_mocs_init_engine(struct intel_engine_cs *ring);
 
 #endif
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index c15718b..99e2603 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -34,12 +34,6 @@
 #include "i915_drv.h"
 #include "intel_drv.h"
 
-#define PCI_ASLE		0xe4
-#define PCI_ASLS		0xfc
-#define PCI_SWSCI		0xe8
-#define PCI_SWSCI_SCISEL	(1 << 15)
-#define PCI_SWSCI_GSSCIE	(1 << 0)
-
 #define OPREGION_HEADER_OFFSET 0
 #define OPREGION_ACPI_OFFSET   0x100
 #define   ACPI_CLID 0x01ac /* current lid state indicator */
@@ -246,13 +240,12 @@
 
 #define MAX_DSLP	1500
 
-#ifdef CONFIG_ACPI
 static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct opregion_swsci *swsci = dev_priv->opregion.swsci;
 	u32 main_function, sub_function, scic;
-	u16 pci_swsci;
+	u16 swsci_val;
 	u32 dslp;
 
 	if (!swsci)
@@ -300,16 +293,16 @@
 	swsci->scic = scic;
 
 	/* Ensure SCI event is selected and event trigger is cleared. */
-	pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
-	if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
-		pci_swsci |= PCI_SWSCI_SCISEL;
-		pci_swsci &= ~PCI_SWSCI_GSSCIE;
-		pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+	pci_read_config_word(dev->pdev, SWSCI, &swsci_val);
+	if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
+		swsci_val |= SWSCI_SCISEL;
+		swsci_val &= ~SWSCI_GSSCIE;
+		pci_write_config_word(dev->pdev, SWSCI, swsci_val);
 	}
 
 	/* Use event trigger to tell bios to check the mail. */
-	pci_swsci |= PCI_SWSCI_GSSCIE;
-	pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+	swsci_val |= SWSCI_GSSCIE;
+	pci_write_config_word(dev->pdev, SWSCI, swsci_val);
 
 	/* Poll for the result. */
 #define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
@@ -905,9 +898,6 @@
 			 opregion->swsci_gbda_sub_functions,
 			 opregion->swsci_sbcb_sub_functions);
 }
-#else /* CONFIG_ACPI */
-static inline void swsci_setup(struct drm_device *dev) {}
-#endif  /* CONFIG_ACPI */
 
 static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
 {
@@ -943,16 +933,14 @@
 	BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
 	BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
 
-	pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
+	pci_read_config_dword(dev->pdev, ASLS, &asls);
 	DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
 	if (asls == 0) {
 		DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
 		return -ENOTSUPP;
 	}
 
-#ifdef CONFIG_ACPI
 	INIT_WORK(&opregion->asle_work, asle_work);
-#endif
 
 	base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB);
 	if (!base)
@@ -1024,3 +1012,31 @@
 	memunmap(base);
 	return err;
 }
+
+int
+intel_opregion_get_panel_type(struct drm_device *dev)
+{
+	u32 panel_details;
+	int ret;
+
+	ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
+	if (ret) {
+		DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
+			      ret);
+		return ret;
+	}
+
+	ret = (panel_details >> 8) & 0xff;
+	if (ret > 0x10) {
+		DRM_DEBUG_KMS("Invalid OpRegion panel type 0x%x\n", ret);
+		return -EINVAL;
+	}
+
+	/* fall back to VBT panel type? */
+	if (ret == 0x0) {
+		DRM_DEBUG_KMS("No panel type in OpRegion\n");
+		return -ENODEV;
+	}
+
+	return ret - 1;
+}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 9168413..bd38e49 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -190,13 +190,14 @@
 static struct overlay_registers __iomem *
 intel_overlay_map_regs(struct intel_overlay *overlay)
 {
-	struct drm_i915_private *dev_priv = overlay->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(overlay->dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct overlay_registers __iomem *regs;
 
 	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
 		regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
 	else
-		regs = io_mapping_map_wc(dev_priv->gtt.mappable,
+		regs = io_mapping_map_wc(ggtt->mappable,
 					 i915_gem_obj_ggtt_offset(overlay->reg_bo));
 
 	return regs;
@@ -233,30 +234,30 @@
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
 	int ret;
 
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
-	req = i915_gem_request_alloc(ring, NULL);
+	req = i915_gem_request_alloc(engine, NULL);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
 	ret = intel_ring_begin(req, 4);
 	if (ret) {
-		i915_gem_request_cancel(req);
+		i915_add_request_no_flush(req);
 		return ret;
 	}
 
 	overlay->active = true;
 
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+	intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE);
+	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -267,7 +268,7 @@
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
@@ -283,19 +284,19 @@
 	if (tmp & (1 << 17))
 		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-	req = i915_gem_request_alloc(ring, NULL);
+	req = i915_gem_request_alloc(engine, NULL);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
 	ret = intel_ring_begin(req, 2);
 	if (ret) {
-		i915_gem_request_cancel(req);
+		i915_add_request_no_flush(req);
 		return ret;
 	}
 
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(engine, flip_addr);
+	intel_ring_advance(engine);
 
 	WARN_ON(overlay->last_flip_req);
 	i915_gem_request_assign(&overlay->last_flip_req, req);
@@ -336,7 +337,7 @@
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
 	u32 flip_addr = overlay->flip_addr;
 	int ret;
@@ -349,33 +350,34 @@
 	 * of the hw. Do it in both cases */
 	flip_addr |= OFC_UPDATE;
 
-	req = i915_gem_request_alloc(ring, NULL);
+	req = i915_gem_request_alloc(engine, NULL);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
 	ret = intel_ring_begin(req, 6);
 	if (ret) {
-		i915_gem_request_cancel(req);
+		i915_add_request_no_flush(req);
 		return ret;
 	}
 
 	/* wait for overlay to go idle */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(engine, flip_addr);
+	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	/* turn overlay off */
 	if (IS_I830(dev)) {
 		/* Workaround: Don't disable the overlay fully, since otherwise
 		 * it dies on the next OVERLAY_ON cmd. */
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(engine, MI_NOOP);
 	} else {
-		intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-		intel_ring_emit(ring, flip_addr);
-		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+		intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+		intel_ring_emit(engine, flip_addr);
+		intel_ring_emit(engine,
+				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
 }
@@ -408,7 +410,7 @@
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	int ret;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -423,19 +425,20 @@
 		/* synchronous slowpath */
 		struct drm_i915_gem_request *req;
 
-		req = i915_gem_request_alloc(ring, NULL);
+		req = i915_gem_request_alloc(engine, NULL);
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
 		ret = intel_ring_begin(req, 2);
 		if (ret) {
-			i915_gem_request_cancel(req);
+			i915_add_request_no_flush(req);
 			return ret;
 		}
 
-		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		intel_ring_emit(engine,
+				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_advance(engine);
 
 		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
@@ -1124,7 +1127,7 @@
 	}
 	crtc = to_intel_crtc(drmmode_crtc);
 
-	new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
+	new_bo = to_intel_bo(drm_gem_object_lookup(file_priv,
 						   put_image_rec->bo_handle));
 	if (&new_bo->base == NULL) {
 		ret = -ENOENT;
@@ -1479,7 +1482,8 @@
 static struct overlay_registers __iomem *
 intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
 {
-	struct drm_i915_private *dev_priv = overlay->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(overlay->dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct overlay_registers __iomem *regs;
 
 	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
@@ -1488,7 +1492,7 @@
 		regs = (struct overlay_registers __iomem *)
 			overlay->reg_bo->phys_handle->vaddr;
 	else
-		regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+		regs = io_mapping_map_atomic_wc(ggtt->mappable,
 						i915_gem_obj_ggtt_offset(overlay->reg_bo));
 
 	return regs;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 21ee647..8357d57 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -504,7 +504,7 @@
 	if (panel->backlight.combination_mode) {
 		u8 lbpc;
 
-		pci_read_config_byte(dev_priv->dev->pdev, PCI_LBPC, &lbpc);
+		pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc);
 		val *= lbpc;
 	}
 
@@ -592,7 +592,7 @@
 
 		lbpc = level * 0xfe / panel->backlight.max + 1;
 		level /= lbpc;
-		pci_write_config_byte(dev_priv->dev->pdev, PCI_LBPC, lbpc);
+		pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc);
 	}
 
 	if (IS_GEN4(dev_priv)) {
@@ -1240,7 +1240,7 @@
  */
 static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 {
-	return KHz(19200) / pwm_freq_hz;
+	return DIV_ROUND_CLOSEST(KHz(19200), pwm_freq_hz);
 }
 
 /*
@@ -1251,16 +1251,14 @@
 static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-	u32 mul, clock;
+	u32 mul;
 
 	if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
 		mul = 128;
 	else
 		mul = 16;
 
-	clock = MHz(24);
-
-	return clock / (pwm_freq_hz * mul);
+	return DIV_ROUND_CLOSEST(MHz(24), pwm_freq_hz * mul);
 }
 
 /*
@@ -1283,7 +1281,7 @@
 	else
 		clock = MHz(24); /* LPT:LP */
 
-	return clock / (pwm_freq_hz * mul);
+	return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
 }
 
 /*
@@ -1292,10 +1290,9 @@
  */
 static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 {
-	struct drm_device *dev = connector->base.dev;
-	int clock = MHz(intel_pch_rawclk(dev));
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
-	return clock / (pwm_freq_hz * 128);
+	return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz * 128);
 }
 
 /*
@@ -1308,16 +1305,15 @@
  */
 static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 {
-	struct drm_device *dev = connector->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	int clock;
 
-	if (IS_PINEVIEW(dev))
-		clock = MHz(intel_hrawclk(dev));
+	if (IS_PINEVIEW(dev_priv))
+		clock = KHz(dev_priv->rawclk_freq);
 	else
-		clock = 1000 * dev_priv->cdclk_freq;
+		clock = KHz(dev_priv->cdclk_freq);
 
-	return clock / (pwm_freq_hz * 32);
+	return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
 }
 
 /*
@@ -1332,11 +1328,11 @@
 	int clock;
 
 	if (IS_G4X(dev_priv))
-		clock = MHz(intel_hrawclk(dev));
+		clock = KHz(dev_priv->rawclk_freq);
 	else
-		clock = 1000 * dev_priv->cdclk_freq;
+		clock = KHz(dev_priv->cdclk_freq);
 
-	return clock / (pwm_freq_hz * 128);
+	return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
 }
 
 /*
@@ -1346,19 +1342,21 @@
  */
 static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 {
-	struct drm_device *dev = connector->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int clock;
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+	int mul, clock;
 
 	if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
-		if (IS_CHERRYVIEW(dev))
-			return KHz(19200) / (pwm_freq_hz * 16);
+		if (IS_CHERRYVIEW(dev_priv))
+			clock = KHz(19200);
 		else
-			return MHz(25) / (pwm_freq_hz * 16);
+			clock = MHz(25);
+		mul = 16;
 	} else {
-		clock = intel_hrawclk(dev);
-		return MHz(clock) / (pwm_freq_hz * 128);
+		clock = KHz(dev_priv->rawclk_freq);
+		mul = 128;
 	}
+
+	return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
 }
 
 static u32 get_backlight_max_vbt(struct intel_connector *connector)
@@ -1640,6 +1638,12 @@
 		return -ENODEV;
 	}
 
+	/*
+	 * FIXME: pwm_apply_args() should be removed when switching to
+	 * the atomic PWM API.
+	 */
+	pwm_apply_args(panel->backlight.pwm);
+
 	retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
 			    CRC_PMIC_PWM_PERIOD_NS);
 	if (retval < 0) {
@@ -1745,7 +1749,7 @@
 		panel->backlight.get = pch_get_backlight;
 		panel->backlight.hz_to_pwm = pch_hz_to_pwm;
 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		if (dev_priv->vbt.has_mipi) {
+		if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
 			panel->backlight.setup = pwm_setup_backlight;
 			panel->backlight.enable = pwm_enable_backlight;
 			panel->backlight.disable = pwm_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3425d8e..a7ef45d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -487,20 +487,6 @@
 	.guard_size = 2,
 	.cacheline_size = G4X_FIFO_LINE_SIZE,
 };
-static const struct intel_watermark_params valleyview_wm_info = {
-	.fifo_size = VALLEYVIEW_FIFO_SIZE,
-	.max_wm = VALLEYVIEW_MAX_WM,
-	.default_wm = VALLEYVIEW_MAX_WM,
-	.guard_size = 2,
-	.cacheline_size = G4X_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params valleyview_cursor_wm_info = {
-	.fifo_size = I965_CURSOR_FIFO,
-	.max_wm = VALLEYVIEW_CURSOR_MAX_WM,
-	.default_wm = I965_CURSOR_DFT_WM,
-	.guard_size = 2,
-	.cacheline_size = G4X_FIFO_LINE_SIZE,
-};
 static const struct intel_watermark_params i965_cursor_wm_info = {
 	.fifo_size = I965_CURSOR_FIFO,
 	.max_wm = I965_CURSOR_MAX_WM,
@@ -2010,11 +1996,18 @@
 		cur_latency *= 5;
 	}
 
-	result->pri_val = ilk_compute_pri_wm(cstate, pristate,
-					     pri_latency, level);
-	result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
-	result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
-	result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
+	if (pristate) {
+		result->pri_val = ilk_compute_pri_wm(cstate, pristate,
+						     pri_latency, level);
+		result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
+	}
+
+	if (sprstate)
+		result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
+
+	if (curstate)
+		result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
+
 	result->enable = true;
 }
 
@@ -2278,100 +2271,171 @@
 	intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
 }
 
-/* Compute new watermarks for the pipe */
-static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
-			       struct drm_atomic_state *state)
+static bool ilk_validate_pipe_wm(struct drm_device *dev,
+				 struct intel_pipe_wm *pipe_wm)
 {
-	struct intel_pipe_wm *pipe_wm;
-	struct drm_device *dev = intel_crtc->base.dev;
-	const struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc_state *cstate = NULL;
-	struct intel_plane *intel_plane;
-	struct drm_plane_state *ps;
-	struct intel_plane_state *pristate = NULL;
-	struct intel_plane_state *sprstate = NULL;
-	struct intel_plane_state *curstate = NULL;
-	int level, max_level = ilk_wm_max_level(dev);
 	/* LP0 watermark maximums depend on this pipe alone */
-	struct intel_wm_config config = {
+	const struct intel_wm_config config = {
 		.num_pipes_active = 1,
+		.sprites_enabled = pipe_wm->sprites_enabled,
+		.sprites_scaled = pipe_wm->sprites_scaled,
 	};
 	struct ilk_wm_maximums max;
 
-	cstate = intel_atomic_get_crtc_state(state, intel_crtc);
-	if (IS_ERR(cstate))
-		return PTR_ERR(cstate);
-
-	pipe_wm = &cstate->wm.optimal.ilk;
-	memset(pipe_wm, 0, sizeof(*pipe_wm));
-
-	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
-		ps = drm_atomic_get_plane_state(state,
-						&intel_plane->base);
-		if (IS_ERR(ps))
-			return PTR_ERR(ps);
-
-		if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
-			pristate = to_intel_plane_state(ps);
-		else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
-			sprstate = to_intel_plane_state(ps);
-		else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
-			curstate = to_intel_plane_state(ps);
-	}
-
-	config.sprites_enabled = sprstate->visible;
-	config.sprites_scaled = sprstate->visible &&
-		(drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
-		drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
-
-	pipe_wm->pipe_enabled = cstate->base.active;
-	pipe_wm->sprites_enabled = config.sprites_enabled;
-	pipe_wm->sprites_scaled = config.sprites_scaled;
-
-	/* ILK/SNB: LP2+ watermarks only w/o sprites */
-	if (INTEL_INFO(dev)->gen <= 6 && sprstate->visible)
-		max_level = 1;
-
-	/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
-	if (config.sprites_scaled)
-		max_level = 0;
-
-	ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
-			     pristate, sprstate, curstate, &pipe_wm->wm[0]);
-
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-		pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
-
 	/* LP0 watermarks always use 1/2 DDB partitioning */
 	ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
 
 	/* At least LP0 must be valid */
-	if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
+	if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
+		DRM_DEBUG_KMS("LP0 watermark invalid\n");
+		return false;
+	}
+
+	return true;
+}
+
+/* Compute new watermarks for the pipe */
+static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
+{
+	struct drm_atomic_state *state = cstate->base.state;
+	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+	struct intel_pipe_wm *pipe_wm;
+	struct drm_device *dev = state->dev;
+	const struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane;
+	struct intel_plane_state *pristate = NULL;
+	struct intel_plane_state *sprstate = NULL;
+	struct intel_plane_state *curstate = NULL;
+	int level, max_level = ilk_wm_max_level(dev), usable_level;
+	struct ilk_wm_maximums max;
+
+	pipe_wm = &cstate->wm.optimal.ilk;
+
+	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+		struct intel_plane_state *ps;
+
+		ps = intel_atomic_get_existing_plane_state(state,
+							   intel_plane);
+		if (!ps)
+			continue;
+
+		if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+			pristate = ps;
+		else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
+			sprstate = ps;
+		else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
+			curstate = ps;
+	}
+
+	pipe_wm->pipe_enabled = cstate->base.active;
+	if (sprstate) {
+		pipe_wm->sprites_enabled = sprstate->visible;
+		pipe_wm->sprites_scaled = sprstate->visible &&
+			(drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
+			 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
+	}
+
+	usable_level = max_level;
+
+	/* ILK/SNB: LP2+ watermarks only w/o sprites */
+	if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
+		usable_level = 1;
+
+	/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
+	if (pipe_wm->sprites_scaled)
+		usable_level = 0;
+
+	ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
+			     pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
+
+	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
+	pipe_wm->wm[0] = pipe_wm->raw_wm[0];
+
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
+
+	if (!ilk_validate_pipe_wm(dev, pipe_wm))
 		return -EINVAL;
 
 	ilk_compute_wm_reg_maximums(dev, 1, &max);
 
 	for (level = 1; level <= max_level; level++) {
-		struct intel_wm_level wm = {};
+		struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
 
 		ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
-				     pristate, sprstate, curstate, &wm);
+				     pristate, sprstate, curstate, wm);
 
 		/*
 		 * Disable any watermark level that exceeds the
 		 * register maximums since such watermarks are
 		 * always invalid.
 		 */
-		if (!ilk_validate_wm_level(level, &max, &wm))
-			break;
+		if (level > usable_level)
+			continue;
 
-		pipe_wm->wm[level] = wm;
+		if (ilk_validate_wm_level(level, &max, wm))
+			pipe_wm->wm[level] = *wm;
+		else
+			usable_level = level;
 	}
 
 	return 0;
 }
 
 /*
+ * Build a set of 'intermediate' watermark values that satisfy both the old
+ * state and the new state.  These can be programmed to the hardware
+ * immediately.
+ */
+static int ilk_compute_intermediate_wm(struct drm_device *dev,
+				       struct intel_crtc *intel_crtc,
+				       struct intel_crtc_state *newstate)
+{
+	struct intel_pipe_wm *a = &newstate->wm.intermediate;
+	struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
+	int level, max_level = ilk_wm_max_level(dev);
+
+	/*
+	 * Start with the final, target watermarks, then combine with the
+	 * currently active watermarks to get values that are safe both before
+	 * and after the vblank.
+	 */
+	*a = newstate->wm.optimal.ilk;
+	a->pipe_enabled |= b->pipe_enabled;
+	a->sprites_enabled |= b->sprites_enabled;
+	a->sprites_scaled |= b->sprites_scaled;
+
+	for (level = 0; level <= max_level; level++) {
+		struct intel_wm_level *a_wm = &a->wm[level];
+		const struct intel_wm_level *b_wm = &b->wm[level];
+
+		a_wm->enable &= b_wm->enable;
+		a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
+		a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
+		a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
+		a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
+	}
+
+	/*
+	 * We need to make sure that these merged watermark values are
+	 * actually a valid configuration themselves.  If they're not,
+	 * there's no safe way to transition from the old state to
+	 * the new state, so we need to fail the atomic transaction.
+	 */
+	if (!ilk_validate_pipe_wm(dev, a))
+		return -EINVAL;
+
+	/*
+	 * If our intermediate WM are identical to the final WM, then we can
+	 * omit the post-vblank programming; only update if it's different.
+	 */
+	if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0)
+		newstate->wm.need_postvbl_update = false;
+
+	return 0;
+}
+
+/*
  * Merge the watermarks from all active pipes for a specific level.
  */
 static void ilk_merge_wm_level(struct drm_device *dev,
@@ -2383,9 +2447,7 @@
 	ret_wm->enable = true;
 
 	for_each_intel_crtc(dev, intel_crtc) {
-		const struct intel_crtc_state *cstate =
-			to_intel_crtc_state(intel_crtc->base.state);
-		const struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
+		const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
 		const struct intel_wm_level *wm = &active->wm[level];
 
 		if (!active->pipe_enabled)
@@ -2421,7 +2483,7 @@
 	/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
 	if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
 	    config->num_pipes_active > 1)
-		return;
+		last_enabled_level = 0;
 
 	/* ILK: FBC WM must be disabled always */
 	merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
@@ -2533,15 +2595,14 @@
 
 	/* LP0 register values */
 	for_each_intel_crtc(dev, intel_crtc) {
-		const struct intel_crtc_state *cstate =
-			to_intel_crtc_state(intel_crtc->base.state);
 		enum pipe pipe = intel_crtc->pipe;
-		const struct intel_wm_level *r = &cstate->wm.optimal.ilk.wm[0];
+		const struct intel_wm_level *r =
+			&intel_crtc->wm.active.ilk.wm[0];
 
 		if (WARN_ON(!r->enable))
 			continue;
 
-		results->wm_linetime[pipe] = cstate->wm.optimal.ilk.linetime;
+		results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
 
 		results->wm_pipe[pipe] =
 			(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
@@ -2748,7 +2809,7 @@
 	dev_priv->wm.hw = *results;
 }
 
-static bool ilk_disable_lp_wm(struct drm_device *dev)
+bool ilk_disable_lp_wm(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -3657,11 +3718,9 @@
 	}
 }
 
-static void ilk_program_watermarks(struct intel_crtc_state *cstate)
+static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
 {
-	struct drm_crtc *crtc = cstate->base.crtc;
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_device *dev = dev_priv->dev;
 	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
 	struct ilk_wm_maximums max;
 	struct intel_wm_config config = {};
@@ -3692,28 +3751,28 @@
 	ilk_write_wm_values(dev_priv, &results);
 }
 
-static void ilk_update_wm(struct drm_crtc *crtc)
+static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
 
-	WARN_ON(cstate->base.active != intel_crtc->active);
+	mutex_lock(&dev_priv->wm.wm_mutex);
+	intel_crtc->wm.active.ilk = cstate->wm.intermediate;
+	ilk_program_watermarks(dev_priv);
+	mutex_unlock(&dev_priv->wm.wm_mutex);
+}
 
-	/*
-	 * IVB workaround: must disable low power watermarks for at least
-	 * one frame before enabling scaling.  LP watermarks can be re-enabled
-	 * when scaling is disabled.
-	 *
-	 * WaCxSRDisabledForSpriteScaling:ivb
-	 */
-	if (cstate->disable_lp_wm) {
-		ilk_disable_lp_wm(crtc->dev);
-		intel_wait_for_vblank(crtc->dev, intel_crtc->pipe);
+static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
+{
+	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+
+	mutex_lock(&dev_priv->wm.wm_mutex);
+	if (cstate->wm.need_postvbl_update) {
+		intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
+		ilk_program_watermarks(dev_priv);
 	}
-
-	intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
-
-	ilk_program_watermarks(cstate);
+	mutex_unlock(&dev_priv->wm.wm_mutex);
 }
 
 static void skl_pipe_wm_active_state(uint32_t val,
@@ -3845,6 +3904,8 @@
 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 		hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
 
+	memset(active, 0, sizeof(*active));
+
 	active->pipe_enabled = intel_crtc->active;
 
 	if (active->pipe_enabled) {
@@ -4243,7 +4304,7 @@
 	 * the hw runs at the minimal clock before selecting the desired
 	 * frequency, if the down threshold expires in that window we will not
 	 * receive a down interrupt. */
-	if (IS_GEN9(dev_priv->dev)) {
+	if (IS_GEN9(dev_priv)) {
 		limits = (dev_priv->rps.max_freq_softlimit) << 23;
 		if (val <= dev_priv->rps.min_freq_softlimit)
 			limits |= (dev_priv->rps.min_freq_softlimit) << 14;
@@ -4528,7 +4589,7 @@
 		gen6_set_rps(dev, val);
 }
 
-static void gen9_disable_rps(struct drm_device *dev)
+static void gen9_disable_rc6(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -4536,12 +4597,20 @@
 	I915_WRITE(GEN9_PG_ENABLE, 0);
 }
 
+static void gen9_disable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(GEN6_RP_CONTROL, 0);
+}
+
 static void gen6_disable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	I915_WRITE(GEN6_RC_CONTROL, 0);
 	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+	I915_WRITE(GEN6_RP_CONTROL, 0);
 }
 
 static void cherryview_disable_rps(struct drm_device *dev)
@@ -4585,7 +4654,8 @@
 
 static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	bool enable_rc6 = true;
 	unsigned long rc6_ctx_base;
 
@@ -4599,9 +4669,9 @@
 	 * for this check.
 	 */
 	rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
-	if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) &&
-	      (rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base +
-					dev_priv->gtt.stolen_reserved_size))) {
+	if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
+	      (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
+					ggtt->stolen_reserved_size))) {
 		DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
 		enable_rc6 = false;
 	}
@@ -4744,6 +4814,16 @@
 
 	/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
 	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+		/*
+		 * BIOS could leave the Hw Turbo enabled, so need to explicitly
+		 * clear out the Control register just to avoid inconsitency
+		 * with debugfs interface, which will show  Turbo as enabled
+		 * only and that is not expected by the User after adding the
+		 * WaGsvDisableTurbo. Apart from this there is no problem even
+		 * if the Turbo is left enabled in the Control register, as the
+		 * Up/Down interrupts would remain masked.
+		 */
+		gen9_disable_rps(dev);
 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 		return;
 	}
@@ -4762,7 +4842,7 @@
 	 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
 	 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
 	dev_priv->rps.power = HIGH_POWER; /* force a reset */
-	gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+	gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -4770,9 +4850,8 @@
 static void gen9_enable_rc6(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	uint32_t rc6_mask = 0;
-	int unused;
 
 	/* 1a: Software RC state - RC0 */
 	I915_WRITE(GEN6_RC_STATE, 0);
@@ -4793,8 +4872,8 @@
 		I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-	for_each_ring(ring, dev_priv, unused)
-		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	for_each_engine(engine, dev_priv)
+		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 
 	if (HAS_GUC_UCODE(dev))
 		I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
@@ -4840,9 +4919,8 @@
 static void gen8_enable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	uint32_t rc6_mask = 0;
-	int unused;
 
 	/* 1a: Software RC state - RC0 */
 	I915_WRITE(GEN6_RC_STATE, 0);
@@ -4861,8 +4939,8 @@
 	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-	for_each_ring(ring, dev_priv, unused)
-		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	for_each_engine(engine, dev_priv)
+		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 	I915_WRITE(GEN6_RC_SLEEP, 0);
 	if (IS_BROADWELL(dev))
 		I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
@@ -4922,11 +5000,11 @@
 static void gen6_enable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
 	u32 gtfifodbg;
 	int rc6_mode;
-	int i, ret;
+	int ret;
 
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
@@ -4939,7 +5017,8 @@
 	I915_WRITE(GEN6_RC_STATE, 0);
 
 	/* Clear the DBG now so we don't confuse earlier errors */
-	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+	gtfifodbg = I915_READ(GTFIFODBG);
+	if (gtfifodbg) {
 		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
 		I915_WRITE(GTFIFODBG, gtfifodbg);
 	}
@@ -4958,8 +5037,8 @@
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
-	for_each_ring(ring, dev_priv, i)
-		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	for_each_engine(engine, dev_priv)
+		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 
 	I915_WRITE(GEN6_RC_SLEEP, 0);
 	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
@@ -5244,9 +5323,9 @@
 
 static void cherryview_setup_pctx(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	unsigned long pctx_paddr, paddr;
-	struct i915_gtt *gtt = &dev_priv->gtt;
 	u32 pcbr;
 	int pctx_size = 32*1024;
 
@@ -5254,7 +5333,7 @@
 	if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
 		DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
 		paddr = (dev_priv->mm.stolen_base +
-			 (gtt->stolen_size - pctx_size));
+			 (ggtt->stolen_size - pctx_size));
 
 		pctx_paddr = (paddr & (~4095));
 		I915_WRITE(VLV_PCBR, pctx_paddr);
@@ -5322,6 +5401,17 @@
 	dev_priv->vlv_pctx = NULL;
 }
 
+static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
+{
+	dev_priv->rps.gpll_ref_freq =
+		vlv_get_cck_clock(dev_priv, "GPLL ref",
+				  CCK_GPLL_CLOCK_CONTROL,
+				  dev_priv->czclk_freq);
+
+	DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
+			 dev_priv->rps.gpll_ref_freq);
+}
+
 static void valleyview_init_gt_powersave(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5329,6 +5419,8 @@
 
 	valleyview_setup_pctx(dev);
 
+	vlv_init_gpll_ref_freq(dev_priv);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
@@ -5386,6 +5478,8 @@
 
 	cherryview_setup_pctx(dev);
 
+	vlv_init_gpll_ref_freq(dev_priv);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 	mutex_lock(&dev_priv->sb_lock);
@@ -5450,13 +5544,13 @@
 static void cherryview_enable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	u32 gtfifodbg, val, rc6_mode = 0, pcbr;
-	int i;
 
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
-	gtfifodbg = I915_READ(GTFIFODBG);
+	gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
+					     GT_FIFO_FREE_ENTRIES_CHV);
 	if (gtfifodbg) {
 		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
 				 gtfifodbg);
@@ -5477,8 +5571,8 @@
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
 
-	for_each_ring(ring, dev_priv, i)
-		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	for_each_engine(engine, dev_priv)
+		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 	I915_WRITE(GEN6_RC_SLEEP, 0);
 
 	/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
@@ -5537,10 +5631,10 @@
 			 dev_priv->rps.cur_freq);
 
 	DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-			 dev_priv->rps.efficient_freq);
+			 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
+			 dev_priv->rps.idle_freq);
 
-	valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+	valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -5548,15 +5642,15 @@
 static void valleyview_enable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	u32 gtfifodbg, val, rc6_mode = 0;
-	int i;
 
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
 	valleyview_check_pctx(dev_priv);
 
-	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+	gtfifodbg = I915_READ(GTFIFODBG);
+	if (gtfifodbg) {
 		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
 				 gtfifodbg);
 		I915_WRITE(GTFIFODBG, gtfifodbg);
@@ -5588,8 +5682,8 @@
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
-	for_each_ring(ring, dev_priv, i)
-		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+	for_each_engine(engine, dev_priv)
+		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 
 	I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
 
@@ -5627,10 +5721,10 @@
 			 dev_priv->rps.cur_freq);
 
 	DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-			 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-			 dev_priv->rps.efficient_freq);
+			 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
+			 dev_priv->rps.idle_freq);
 
-	valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+	valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -5965,17 +6059,16 @@
 bool i915_gpu_busy(void)
 {
 	struct drm_i915_private *dev_priv;
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	bool ret = false;
-	int i;
 
 	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev)
 		goto out_unlock;
 	dev_priv = i915_mch_dev;
 
-	for_each_ring(ring, dev_priv, i)
-		ret |= !list_empty(&ring->request_list);
+	for_each_engine(engine, dev_priv)
+		ret |= !list_empty(&engine->request_list);
 
 out_unlock:
 	spin_unlock_irq(&mchdev_lock);
@@ -6195,9 +6288,10 @@
 		intel_suspend_gt_powersave(dev);
 
 		mutex_lock(&dev_priv->rps.hw_lock);
-		if (INTEL_INFO(dev)->gen >= 9)
+		if (INTEL_INFO(dev)->gen >= 9) {
+			gen9_disable_rc6(dev);
 			gen9_disable_rps(dev);
-		else if (IS_CHERRYVIEW(dev))
+		} else if (IS_CHERRYVIEW(dev))
 			cherryview_disable_rps(dev);
 		else if (IS_VALLEYVIEW(dev))
 			valleyview_disable_rps(dev);
@@ -6818,23 +6912,10 @@
 	gen6_check_mch_setup(dev);
 }
 
-static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
-{
-	I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
-
-	/*
-	 * Disable trickle feed and enable pnd deadline calculation
-	 */
-	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
-	I915_WRITE(CBR1_VLV, 0);
-}
-
 static void valleyview_init_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	vlv_init_display_clock_gating(dev_priv);
-
 	/* WaDisableEarlyCull:vlv */
 	I915_WRITE(_3D_CHICKEN3,
 		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
@@ -6917,8 +6998,6 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	vlv_init_display_clock_gating(dev_priv);
-
 	/* WaVSRefCountFullforceMissDisable:chv */
 	/* WaDSRefCountFullforceMissDisable:chv */
 	I915_WRITE(GEN7_FF_THREAD_MODE,
@@ -7058,8 +7137,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->display.init_clock_gating)
-		dev_priv->display.init_clock_gating(dev);
+	dev_priv->display.init_clock_gating(dev);
 }
 
 void intel_suspend_hw(struct drm_device *dev)
@@ -7068,6 +7146,60 @@
 		lpt_suspend_hw(dev);
 }
 
+static void nop_init_clock_gating(struct drm_device *dev)
+{
+	DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
+}
+
+/**
+ * intel_init_clock_gating_hooks - setup the clock gating hooks
+ * @dev_priv: device private
+ *
+ * Setup the hooks that configure which clocks of a given platform can be
+ * gated and also apply various GT and display specific workarounds for these
+ * platforms. Note that some GT specific workarounds are applied separately
+ * when GPU contexts or batchbuffers start their execution.
+ */
+void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
+{
+	if (IS_SKYLAKE(dev_priv))
+		dev_priv->display.init_clock_gating = nop_init_clock_gating;
+	else if (IS_KABYLAKE(dev_priv))
+		dev_priv->display.init_clock_gating = nop_init_clock_gating;
+	else if (IS_BROXTON(dev_priv))
+		dev_priv->display.init_clock_gating = bxt_init_clock_gating;
+	else if (IS_BROADWELL(dev_priv))
+		dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
+	else if (IS_CHERRYVIEW(dev_priv))
+		dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
+	else if (IS_HASWELL(dev_priv))
+		dev_priv->display.init_clock_gating = haswell_init_clock_gating;
+	else if (IS_IVYBRIDGE(dev_priv))
+		dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+	else if (IS_VALLEYVIEW(dev_priv))
+		dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
+	else if (IS_GEN6(dev_priv))
+		dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+	else if (IS_GEN5(dev_priv))
+		dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+	else if (IS_G4X(dev_priv))
+		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
+	else if (IS_CRESTLINE(dev_priv))
+		dev_priv->display.init_clock_gating = crestline_init_clock_gating;
+	else if (IS_BROADWATER(dev_priv))
+		dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
+	else if (IS_GEN3(dev_priv))
+		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+	else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
+		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+	else if (IS_GEN2(dev_priv))
+		dev_priv->display.init_clock_gating = i830_init_clock_gating;
+	else {
+		MISSING_CASE(INTEL_DEVID(dev_priv));
+		dev_priv->display.init_clock_gating = nop_init_clock_gating;
+	}
+}
+
 /* Set up chip specific power management-related functions */
 void intel_init_pm(struct drm_device *dev)
 {
@@ -7084,10 +7216,6 @@
 	/* For FIFO watermark updates */
 	if (INTEL_INFO(dev)->gen >= 9) {
 		skl_setup_wm_latency(dev);
-
-		if (IS_BROXTON(dev))
-			dev_priv->display.init_clock_gating =
-				bxt_init_clock_gating;
 		dev_priv->display.update_wm = skl_update_wm;
 	} else if (HAS_PCH_SPLIT(dev)) {
 		ilk_setup_wm_latency(dev);
@@ -7096,36 +7224,23 @@
 		     dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
 		    (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
 		     dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
-			dev_priv->display.update_wm = ilk_update_wm;
 			dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
-			dev_priv->display.program_watermarks = ilk_program_watermarks;
+			dev_priv->display.compute_intermediate_wm =
+				ilk_compute_intermediate_wm;
+			dev_priv->display.initial_watermarks =
+				ilk_initial_watermarks;
+			dev_priv->display.optimize_watermarks =
+				ilk_optimize_watermarks;
 		} else {
 			DRM_DEBUG_KMS("Failed to read display plane latency. "
 				      "Disable CxSR\n");
 		}
-
-		if (IS_GEN5(dev))
-			dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
-		else if (IS_GEN6(dev))
-			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
-		else if (IS_IVYBRIDGE(dev))
-			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
-		else if (IS_HASWELL(dev))
-			dev_priv->display.init_clock_gating = haswell_init_clock_gating;
-		else if (INTEL_INFO(dev)->gen == 8)
-			dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
 	} else if (IS_CHERRYVIEW(dev)) {
 		vlv_setup_wm_latency(dev);
-
 		dev_priv->display.update_wm = vlv_update_wm;
-		dev_priv->display.init_clock_gating =
-			cherryview_init_clock_gating;
 	} else if (IS_VALLEYVIEW(dev)) {
 		vlv_setup_wm_latency(dev);
-
 		dev_priv->display.update_wm = vlv_update_wm;
-		dev_priv->display.init_clock_gating =
-			valleyview_init_clock_gating;
 	} else if (IS_PINEVIEW(dev)) {
 		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
 					    dev_priv->is_ddr3,
@@ -7141,20 +7256,13 @@
 			dev_priv->display.update_wm = NULL;
 		} else
 			dev_priv->display.update_wm = pineview_update_wm;
-		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
 	} else if (IS_G4X(dev)) {
 		dev_priv->display.update_wm = g4x_update_wm;
-		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
 	} else if (IS_GEN4(dev)) {
 		dev_priv->display.update_wm = i965_update_wm;
-		if (IS_CRESTLINE(dev))
-			dev_priv->display.init_clock_gating = crestline_init_clock_gating;
-		else if (IS_BROADWATER(dev))
-			dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
 	} else if (IS_GEN3(dev)) {
 		dev_priv->display.update_wm = i9xx_update_wm;
 		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
-		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
 	} else if (IS_GEN2(dev)) {
 		if (INTEL_INFO(dev)->num_pipes == 1) {
 			dev_priv->display.update_wm = i845_update_wm;
@@ -7163,11 +7271,6 @@
 			dev_priv->display.update_wm = i9xx_update_wm;
 			dev_priv->display.get_fifo_size = i830_get_fifo_size;
 		}
-
-		if (IS_I85X(dev) || IS_I865G(dev))
-			dev_priv->display.init_clock_gating = i85x_init_clock_gating;
-		else
-			dev_priv->display.init_clock_gating = i830_init_clock_gating;
 	} else {
 		DRM_ERROR("unexpected fall-through in intel_init_pm\n");
 	}
@@ -7221,78 +7324,43 @@
 	return 0;
 }
 
-static int vlv_gpu_freq_div(unsigned int czclk_freq)
-{
-	switch (czclk_freq) {
-	case 200:
-		return 10;
-	case 267:
-		return 12;
-	case 320:
-	case 333:
-		return 16;
-	case 400:
-		return 20;
-	default:
-		return -1;
-	}
-}
-
 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
-	int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
-	div = vlv_gpu_freq_div(czclk_freq);
-	if (div < 0)
-		return div;
-
-	return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
+	/*
+	 * N = val - 0xb7
+	 * Slow = Fast = GPLL ref * N
+	 */
+	return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
 }
 
 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
-	int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
-	mul = vlv_gpu_freq_div(czclk_freq);
-	if (mul < 0)
-		return mul;
-
-	return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
+	return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
 }
 
 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
-	int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
-	div = vlv_gpu_freq_div(czclk_freq);
-	if (div < 0)
-		return div;
-	div /= 2;
-
-	return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
+	/*
+	 * N = val / 2
+	 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
+	 */
+	return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
 }
 
 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
-	int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
-	mul = vlv_gpu_freq_div(czclk_freq);
-	if (mul < 0)
-		return mul;
-	mul /= 2;
-
 	/* CHV needs even values */
-	return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
+	return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
 }
 
 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
-	if (IS_GEN9(dev_priv->dev))
+	if (IS_GEN9(dev_priv))
 		return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
 					 GEN9_FREQ_SCALER);
-	else if (IS_CHERRYVIEW(dev_priv->dev))
+	else if (IS_CHERRYVIEW(dev_priv))
 		return chv_gpu_freq(dev_priv, val);
-	else if (IS_VALLEYVIEW(dev_priv->dev))
+	else if (IS_VALLEYVIEW(dev_priv))
 		return byt_gpu_freq(dev_priv, val);
 	else
 		return val * GT_FREQUENCY_MULTIPLIER;
@@ -7300,12 +7368,12 @@
 
 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
-	if (IS_GEN9(dev_priv->dev))
+	if (IS_GEN9(dev_priv))
 		return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
 					 GT_FREQUENCY_MULTIPLIER);
-	else if (IS_CHERRYVIEW(dev_priv->dev))
+	else if (IS_CHERRYVIEW(dev_priv))
 		return chv_freq_opcode(dev_priv, val);
-	else if (IS_VALLEYVIEW(dev_priv->dev))
+	else if (IS_VALLEYVIEW(dev_priv))
 		return byt_freq_opcode(dev_priv, val);
 	else
 		return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
@@ -7322,7 +7390,7 @@
 	struct drm_i915_gem_request *req = boost->req;
 
 	if (!i915_gem_request_completed(req, true))
-		gen6_rps_boost(to_i915(req->ring->dev), NULL,
+		gen6_rps_boost(to_i915(req->engine->dev), NULL,
 			       req->emitted_jiffies);
 
 	i915_gem_request_unreference__unlocked(req);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 0b42ada..a788d1e 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -280,7 +280,10 @@
 	 * with the 5 or 6 idle patterns.
 	 */
 	uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-	uint32_t val = 0x0;
+	uint32_t val = EDP_PSR_ENABLE;
+
+	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+	val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
 
 	if (IS_HASWELL(dev))
 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
@@ -288,14 +291,50 @@
 	if (dev_priv->psr.link_standby)
 		val |= EDP_PSR_LINK_STANDBY;
 
-	I915_WRITE(EDP_PSR_CTL, val |
-		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
-		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
-		   EDP_PSR_ENABLE);
+	if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
+		val |= EDP_PSR_TP1_TIME_2500us;
+	else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
+		val |= EDP_PSR_TP1_TIME_500us;
+	else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
+		val |= EDP_PSR_TP1_TIME_100us;
+	else
+		val |= EDP_PSR_TP1_TIME_0us;
 
-	if (dev_priv->psr.psr2_support)
-		I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
-				EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
+	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
+		val |= EDP_PSR_TP2_TP3_TIME_2500us;
+	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
+		val |= EDP_PSR_TP2_TP3_TIME_500us;
+	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
+		val |= EDP_PSR_TP2_TP3_TIME_100us;
+	else
+		val |= EDP_PSR_TP2_TP3_TIME_0us;
+
+	if (intel_dp_source_supports_hbr2(intel_dp) &&
+	    drm_dp_tps3_supported(intel_dp->dpcd))
+		val |= EDP_PSR_TP1_TP3_SEL;
+	else
+		val |= EDP_PSR_TP1_TP2_SEL;
+
+	I915_WRITE(EDP_PSR_CTL, val);
+
+	if (!dev_priv->psr.psr2_support)
+		return;
+
+	/* FIXME: selective update is probably totally broken because it doesn't
+	 * mesh at all with our frontbuffer tracking. And the hw alone isn't
+	 * good enough. */
+	val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
+
+	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
+		val |= EDP_PSR2_TP2_TIME_2500;
+	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
+		val |= EDP_PSR2_TP2_TIME_500;
+	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
+		val |= EDP_PSR2_TP2_TIME_100;
+	else
+		val |= EDP_PSR2_TP2_TIME_50;
+
+	I915_WRITE(EDP_PSR2_CTL, val);
 }
 
 static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
@@ -507,7 +546,8 @@
 
 		/* Wait till PSR is idle */
 		if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
-			       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+			       EDP_PSR_STATUS_STATE_MASK) == 0,
+			       2 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
 			DRM_ERROR("Timed out waiting for PSR Idle State\n");
 
 		dev_priv->psr.active = false;
@@ -562,7 +602,7 @@
 	 * PSR might take some time to get fully disabled
 	 * and be ready for re-enable.
 	 */
-	if (HAS_DDI(dev_priv->dev)) {
+	if (HAS_DDI(dev_priv)) {
 		if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
 			      EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
 			DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
@@ -780,8 +820,7 @@
 
 	/* Per platform default */
 	if (i915.enable_psr == -1) {
-		if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
-		    IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+		if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 			i915.enable_psr = 1;
 		else
 			i915.enable_psr = 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9121646..04402bb 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -53,25 +53,19 @@
 					    ringbuf->tail, ringbuf->size);
 }
 
-int intel_ring_space(struct intel_ringbuffer *ringbuf)
+bool intel_engine_stopped(struct intel_engine_cs *engine)
 {
-	intel_ring_update_space(ringbuf);
-	return ringbuf->space;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
 }
 
-bool intel_ring_stopped(struct intel_engine_cs *ring)
+static void __intel_ring_advance(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-	return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
-}
-
-static void __intel_ring_advance(struct intel_engine_cs *ring)
-{
-	struct intel_ringbuffer *ringbuf = ring->buffer;
+	struct intel_ringbuffer *ringbuf = engine->buffer;
 	ringbuf->tail &= ringbuf->size - 1;
-	if (intel_ring_stopped(ring))
+	if (intel_engine_stopped(engine))
 		return;
-	ring->write_tail(ring, ringbuf->tail);
+	engine->write_tail(engine, ringbuf->tail);
 }
 
 static int
@@ -79,7 +73,7 @@
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	u32 cmd;
 	int ret;
 
@@ -94,9 +88,9 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, cmd);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -106,8 +100,8 @@
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = req->engine;
+	struct drm_device *dev = engine->dev;
 	u32 cmd;
 	int ret;
 
@@ -153,9 +147,9 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, cmd);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -200,34 +194,34 @@
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	struct intel_engine_cs *engine = req->engine;
+	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
 			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-	intel_ring_emit(ring, 0); /* low dword */
-	intel_ring_emit(ring, 0); /* high dword */
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+	intel_ring_emit(engine, 0); /* low dword */
+	intel_ring_emit(engine, 0); /* high dword */
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE);
+	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+	intel_ring_emit(engine, 0);
+	intel_ring_emit(engine, 0);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -236,9 +230,9 @@
 gen6_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	u32 flags = 0;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -276,11 +270,11 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(engine, flags);
+	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(engine, 0);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -288,19 +282,19 @@
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
 			      PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, 0);
+	intel_ring_emit(engine, 0);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -309,9 +303,9 @@
 gen7_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	u32 flags = 0;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/*
@@ -360,11 +354,11 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(engine, flags);
+	intel_ring_emit(engine, scratch_addr);
+	intel_ring_emit(engine, 0);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -373,20 +367,20 @@
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(engine, flags);
+	intel_ring_emit(engine, scratch_addr);
+	intel_ring_emit(engine, 0);
+	intel_ring_emit(engine, 0);
+	intel_ring_emit(engine, 0);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -396,7 +390,7 @@
 		       u32 invalidate_domains, u32 flush_domains)
 {
 	u32 flags = 0;
-	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	flags |= PIPE_CONTROL_CS_STALL;
@@ -429,51 +423,51 @@
 	return gen8_emit_pipe_control(req, flags, scratch_addr);
 }
 
-static void ring_write_tail(struct intel_engine_cs *ring,
+static void ring_write_tail(struct intel_engine_cs *engine,
 			    u32 value)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-	I915_WRITE_TAIL(ring, value);
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	I915_WRITE_TAIL(engine, value);
 }
 
-u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
+u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	u64 acthd;
 
-	if (INTEL_INFO(ring->dev)->gen >= 8)
-		acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
-					 RING_ACTHD_UDW(ring->mmio_base));
-	else if (INTEL_INFO(ring->dev)->gen >= 4)
-		acthd = I915_READ(RING_ACTHD(ring->mmio_base));
+	if (INTEL_INFO(engine->dev)->gen >= 8)
+		acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
+					 RING_ACTHD_UDW(engine->mmio_base));
+	else if (INTEL_INFO(engine->dev)->gen >= 4)
+		acthd = I915_READ(RING_ACTHD(engine->mmio_base));
 	else
 		acthd = I915_READ(ACTHD);
 
 	return acthd;
 }
 
-static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
+static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	u32 addr;
 
 	addr = dev_priv->status_page_dmah->busaddr;
-	if (INTEL_INFO(ring->dev)->gen >= 4)
+	if (INTEL_INFO(engine->dev)->gen >= 4)
 		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
 	I915_WRITE(HWS_PGA, addr);
 }
 
-static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
+static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_device *dev = engine->dev;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	i915_reg_t mmio;
 
 	/* The ring status page addresses are no longer next to the rest of
 	 * the ring registers as of gen7.
 	 */
 	if (IS_GEN7(dev)) {
-		switch (ring->id) {
+		switch (engine->id) {
 		case RCS:
 			mmio = RENDER_HWS_PGA_GEN7;
 			break;
@@ -492,14 +486,14 @@
 			mmio = VEBOX_HWS_PGA_GEN7;
 			break;
 		}
-	} else if (IS_GEN6(ring->dev)) {
-		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+	} else if (IS_GEN6(engine->dev)) {
+		mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
 	} else {
 		/* XXX: gen8 returns to sanity */
-		mmio = RING_HWS_PGA(ring->mmio_base);
+		mmio = RING_HWS_PGA(engine->mmio_base);
 	}
 
-	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+	I915_WRITE(mmio, (u32)engine->status_page.gfx_addr);
 	POSTING_READ(mmio);
 
 	/*
@@ -510,10 +504,10 @@
 	 * invalidating the TLB?
 	 */
 	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
-		i915_reg_t reg = RING_INSTPM(ring->mmio_base);
+		i915_reg_t reg = RING_INSTPM(engine->mmio_base);
 
 		/* ring should be idle before issuing a sync flush*/
-		WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+		WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
 		I915_WRITE(reg,
 			   _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
@@ -521,117 +515,125 @@
 		if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
 			     1000))
 			DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
-				  ring->name);
+				  engine->name);
 	}
 }
 
-static bool stop_ring(struct intel_engine_cs *ring)
+static bool stop_ring(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = to_i915(ring->dev);
+	struct drm_i915_private *dev_priv = to_i915(engine->dev);
 
-	if (!IS_GEN2(ring->dev)) {
-		I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
-		if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
-			DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
+	if (!IS_GEN2(engine->dev)) {
+		I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
+		if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+			DRM_ERROR("%s : timed out trying to stop ring\n",
+				  engine->name);
 			/* Sometimes we observe that the idle flag is not
 			 * set even though the ring is empty. So double
 			 * check before giving up.
 			 */
-			if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
+			if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
 				return false;
 		}
 	}
 
-	I915_WRITE_CTL(ring, 0);
-	I915_WRITE_HEAD(ring, 0);
-	ring->write_tail(ring, 0);
+	I915_WRITE_CTL(engine, 0);
+	I915_WRITE_HEAD(engine, 0);
+	engine->write_tail(engine, 0);
 
-	if (!IS_GEN2(ring->dev)) {
-		(void)I915_READ_CTL(ring);
-		I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+	if (!IS_GEN2(engine->dev)) {
+		(void)I915_READ_CTL(engine);
+		I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 	}
 
-	return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
+	return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
 }
 
-static int init_ring_common(struct intel_engine_cs *ring)
+void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
+}
+
+static int init_ring_common(struct intel_engine_cs *engine)
+{
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_ringbuffer *ringbuf = ring->buffer;
+	struct intel_ringbuffer *ringbuf = engine->buffer;
 	struct drm_i915_gem_object *obj = ringbuf->obj;
 	int ret = 0;
 
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-	if (!stop_ring(ring)) {
+	if (!stop_ring(engine)) {
 		/* G45 ring initialization often fails to reset head to zero */
 		DRM_DEBUG_KMS("%s head not reset to zero "
 			      "ctl %08x head %08x tail %08x start %08x\n",
-			      ring->name,
-			      I915_READ_CTL(ring),
-			      I915_READ_HEAD(ring),
-			      I915_READ_TAIL(ring),
-			      I915_READ_START(ring));
+			      engine->name,
+			      I915_READ_CTL(engine),
+			      I915_READ_HEAD(engine),
+			      I915_READ_TAIL(engine),
+			      I915_READ_START(engine));
 
-		if (!stop_ring(ring)) {
+		if (!stop_ring(engine)) {
 			DRM_ERROR("failed to set %s head to zero "
 				  "ctl %08x head %08x tail %08x start %08x\n",
-				  ring->name,
-				  I915_READ_CTL(ring),
-				  I915_READ_HEAD(ring),
-				  I915_READ_TAIL(ring),
-				  I915_READ_START(ring));
+				  engine->name,
+				  I915_READ_CTL(engine),
+				  I915_READ_HEAD(engine),
+				  I915_READ_TAIL(engine),
+				  I915_READ_START(engine));
 			ret = -EIO;
 			goto out;
 		}
 	}
 
 	if (I915_NEED_GFX_HWS(dev))
-		intel_ring_setup_status_page(ring);
+		intel_ring_setup_status_page(engine);
 	else
-		ring_setup_phys_status_page(ring);
+		ring_setup_phys_status_page(engine);
 
 	/* Enforce ordering by reading HEAD register back */
-	I915_READ_HEAD(ring);
+	I915_READ_HEAD(engine);
 
 	/* Initialize the ring. This must happen _after_ we've cleared the ring
 	 * registers with the above sequence (the readback of the HEAD registers
 	 * also enforces ordering), otherwise the hw might lose the new ring
 	 * register values. */
-	I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
+	I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj));
 
 	/* WaClearRingBufHeadRegAtInit:ctg,elk */
-	if (I915_READ_HEAD(ring))
+	if (I915_READ_HEAD(engine))
 		DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
-			  ring->name, I915_READ_HEAD(ring));
-	I915_WRITE_HEAD(ring, 0);
-	(void)I915_READ_HEAD(ring);
+			  engine->name, I915_READ_HEAD(engine));
+	I915_WRITE_HEAD(engine, 0);
+	(void)I915_READ_HEAD(engine);
 
-	I915_WRITE_CTL(ring,
+	I915_WRITE_CTL(engine,
 			((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
 			| RING_VALID);
 
 	/* If the head is still not zero, the ring is dead */
-	if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
-		     I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
-		     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
+	if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
+		     I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) &&
+		     (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
 		DRM_ERROR("%s initialization failed "
 			  "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
-			  ring->name,
-			  I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
-			  I915_READ_HEAD(ring), I915_READ_TAIL(ring),
-			  I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
+			  engine->name,
+			  I915_READ_CTL(engine),
+			  I915_READ_CTL(engine) & RING_VALID,
+			  I915_READ_HEAD(engine), I915_READ_TAIL(engine),
+			  I915_READ_START(engine),
+			  (unsigned long)i915_gem_obj_ggtt_offset(obj));
 		ret = -EIO;
 		goto out;
 	}
 
 	ringbuf->last_retired_head = -1;
-	ringbuf->head = I915_READ_HEAD(ring);
-	ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+	ringbuf->head = I915_READ_HEAD(engine);
+	ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
 	intel_ring_update_space(ringbuf);
 
-	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+	intel_engine_init_hangcheck(engine);
 
 out:
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -640,59 +642,60 @@
 }
 
 void
-intel_fini_pipe_control(struct intel_engine_cs *ring)
+intel_fini_pipe_control(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 
-	if (ring->scratch.obj == NULL)
+	if (engine->scratch.obj == NULL)
 		return;
 
 	if (INTEL_INFO(dev)->gen >= 5) {
-		kunmap(sg_page(ring->scratch.obj->pages->sgl));
-		i915_gem_object_ggtt_unpin(ring->scratch.obj);
+		kunmap(sg_page(engine->scratch.obj->pages->sgl));
+		i915_gem_object_ggtt_unpin(engine->scratch.obj);
 	}
 
-	drm_gem_object_unreference(&ring->scratch.obj->base);
-	ring->scratch.obj = NULL;
+	drm_gem_object_unreference(&engine->scratch.obj->base);
+	engine->scratch.obj = NULL;
 }
 
 int
-intel_init_pipe_control(struct intel_engine_cs *ring)
+intel_init_pipe_control(struct intel_engine_cs *engine)
 {
 	int ret;
 
-	WARN_ON(ring->scratch.obj);
+	WARN_ON(engine->scratch.obj);
 
-	ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
-	if (ring->scratch.obj == NULL) {
+	engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096);
+	if (engine->scratch.obj == NULL) {
 		DRM_ERROR("Failed to allocate seqno page\n");
 		ret = -ENOMEM;
 		goto err;
 	}
 
-	ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
+	ret = i915_gem_object_set_cache_level(engine->scratch.obj,
+					      I915_CACHE_LLC);
 	if (ret)
 		goto err_unref;
 
-	ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
+	ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0);
 	if (ret)
 		goto err_unref;
 
-	ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
-	ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
-	if (ring->scratch.cpu_page == NULL) {
+	engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj);
+	engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl));
+	if (engine->scratch.cpu_page == NULL) {
 		ret = -ENOMEM;
 		goto err_unpin;
 	}
 
 	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
-			 ring->name, ring->scratch.gtt_offset);
+			 engine->name, engine->scratch.gtt_offset);
 	return 0;
 
 err_unpin:
-	i915_gem_object_ggtt_unpin(ring->scratch.obj);
+	i915_gem_object_ggtt_unpin(engine->scratch.obj);
 err_unref:
-	drm_gem_object_unreference(&ring->scratch.obj->base);
+	drm_gem_object_unreference(&engine->scratch.obj->base);
 err:
 	return ret;
 }
@@ -700,15 +703,15 @@
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = req->engine;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_workarounds *w = &dev_priv->workarounds;
 
 	if (w->count == 0)
 		return 0;
 
-	ring->gpu_caches_dirty = true;
+	engine->gpu_caches_dirty = true;
 	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -717,16 +720,16 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count));
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		intel_ring_emit_reg(engine, w->reg[i].addr);
+		intel_ring_emit(engine, w->reg[i].value);
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(engine, MI_NOOP);
 
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
-	ring->gpu_caches_dirty = true;
+	engine->gpu_caches_dirty = true;
 	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -789,25 +792,26 @@
 
 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
 
-static int wa_ring_whitelist_reg(struct intel_engine_cs *ring, i915_reg_t reg)
+static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
+				 i915_reg_t reg)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	struct i915_workarounds *wa = &dev_priv->workarounds;
-	const uint32_t index = wa->hw_whitelist_count[ring->id];
+	const uint32_t index = wa->hw_whitelist_count[engine->id];
 
 	if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
 		return -EINVAL;
 
-	WA_WRITE(RING_FORCE_TO_NONPRIV(ring->mmio_base, index),
+	WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
 		 i915_mmio_reg_offset(reg));
-	wa->hw_whitelist_count[ring->id]++;
+	wa->hw_whitelist_count[engine->id]++;
 
 	return 0;
 }
 
-static int gen8_init_workarounds(struct intel_engine_cs *ring)
+static int gen8_init_workarounds(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
@@ -857,13 +861,13 @@
 	return 0;
 }
 
-static int bdw_init_workarounds(struct intel_engine_cs *ring)
+static int bdw_init_workarounds(struct intel_engine_cs *engine)
 {
 	int ret;
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	ret = gen8_init_workarounds(ring);
+	ret = gen8_init_workarounds(engine);
 	if (ret)
 		return ret;
 
@@ -886,13 +890,13 @@
 	return 0;
 }
 
-static int chv_init_workarounds(struct intel_engine_cs *ring)
+static int chv_init_workarounds(struct intel_engine_cs *engine)
 {
 	int ret;
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	ret = gen8_init_workarounds(ring);
+	ret = gen8_init_workarounds(engine);
 	if (ret)
 		return ret;
 
@@ -905,9 +909,9 @@
 	return 0;
 }
 
-static int gen9_init_workarounds(struct intel_engine_cs *ring)
+static int gen9_init_workarounds(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t tmp;
 	int ret;
@@ -920,8 +924,10 @@
 	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
 		   ECOCHK_DIS_TLB);
 
+	/* WaClearFlowControlGpgpuContextSave:skl,bxt */
 	/* WaDisablePartialInstShootdown:skl,bxt */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+			  FLOW_CONTROL_ENABLE |
 			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
 
 	/* Syncing dependencies between camera and graphics:skl,bxt */
@@ -947,9 +953,10 @@
 	}
 
 	/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
-	if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
-		WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
-				  GEN9_ENABLE_YV12_BUGFIX);
+	/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */
+	WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+			  GEN9_ENABLE_YV12_BUGFIX |
+			  GEN9_ENABLE_GPGPU_PREEMPTION);
 
 	/* Wa4x4STCOptimizationDisable:skl,bxt */
 	/* WaDisablePartialResolveInVc:skl,bxt */
@@ -986,21 +993,21 @@
 				    GEN8_LQSC_FLUSH_COHERENT_LINES));
 
 	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
-	ret= wa_ring_whitelist_reg(ring, GEN8_CS_CHICKEN1);
+	ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
 	if (ret)
 		return ret;
 
 	/* WaAllowUMDToModifyHDCChicken1:skl,bxt */
-	ret = wa_ring_whitelist_reg(ring, GEN8_HDC_CHICKEN1);
+	ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
 	if (ret)
 		return ret;
 
 	return 0;
 }
 
-static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
+static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u8 vals[3] = { 0, 0, 0 };
 	unsigned int i;
@@ -1040,13 +1047,13 @@
 	return 0;
 }
 
-static int skl_init_workarounds(struct intel_engine_cs *ring)
+static int skl_init_workarounds(struct intel_engine_cs *engine)
 {
 	int ret;
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	ret = gen9_init_workarounds(ring);
+	ret = gen9_init_workarounds(engine);
 	if (ret)
 		return ret;
 
@@ -1114,20 +1121,20 @@
 			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 
 	/* WaDisableLSQCROPERFforOCL:skl */
-	ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
 	if (ret)
 		return ret;
 
-	return skl_tune_iz_hashing(ring);
+	return skl_tune_iz_hashing(engine);
 }
 
-static int bxt_init_workarounds(struct intel_engine_cs *ring)
+static int bxt_init_workarounds(struct intel_engine_cs *engine)
 {
 	int ret;
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	ret = gen9_init_workarounds(ring);
+	ret = gen9_init_workarounds(engine);
 	if (ret)
 		return ret;
 
@@ -1158,11 +1165,11 @@
 	/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
 	/* WaDisableLSQCROPERFforOCL:bxt */
 	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-		ret = wa_ring_whitelist_reg(ring, GEN9_CS_DEBUG_MODE1);
+		ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
 		if (ret)
 			return ret;
 
-		ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+		ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
 		if (ret)
 			return ret;
 	}
@@ -1170,36 +1177,36 @@
 	return 0;
 }
 
-int init_workarounds_ring(struct intel_engine_cs *ring)
+int init_workarounds_ring(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	WARN_ON(ring->id != RCS);
+	WARN_ON(engine->id != RCS);
 
 	dev_priv->workarounds.count = 0;
 	dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
 
 	if (IS_BROADWELL(dev))
-		return bdw_init_workarounds(ring);
+		return bdw_init_workarounds(engine);
 
 	if (IS_CHERRYVIEW(dev))
-		return chv_init_workarounds(ring);
+		return chv_init_workarounds(engine);
 
 	if (IS_SKYLAKE(dev))
-		return skl_init_workarounds(ring);
+		return skl_init_workarounds(engine);
 
 	if (IS_BROXTON(dev))
-		return bxt_init_workarounds(ring);
+		return bxt_init_workarounds(engine);
 
 	return 0;
 }
 
-static int init_render_ring(struct intel_engine_cs *ring)
+static int init_render_ring(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret = init_ring_common(ring);
+	int ret = init_ring_common(engine);
 	if (ret)
 		return ret;
 
@@ -1242,14 +1249,14 @@
 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
 	if (HAS_L3_DPF(dev))
-		I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
+		I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
 
-	return init_workarounds_ring(ring);
+	return init_workarounds_ring(engine);
 }
 
-static void render_ring_cleanup(struct intel_engine_cs *ring)
+static void render_ring_cleanup(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	if (dev_priv->semaphore_obj) {
@@ -1258,18 +1265,19 @@
 		dev_priv->semaphore_obj = NULL;
 	}
 
-	intel_fini_pipe_control(ring);
+	intel_fini_pipe_control(engine);
 }
 
 static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 8
-	struct intel_engine_cs *signaller = signaller_req->ring;
+	struct intel_engine_cs *signaller = signaller_req->engine;
 	struct drm_device *dev = signaller->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *waiter;
-	int i, ret, num_rings;
+	enum intel_engine_id id;
+	int ret, num_rings;
 
 	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
@@ -1279,9 +1287,9 @@
 	if (ret)
 		return ret;
 
-	for_each_ring(waiter, dev_priv, i) {
+	for_each_engine_id(waiter, dev_priv, id) {
 		u32 seqno;
-		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+		u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
@@ -1295,7 +1303,7 @@
 		intel_ring_emit(signaller, seqno);
 		intel_ring_emit(signaller, 0);
 		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-					   MI_SEMAPHORE_TARGET(waiter->id));
+					   MI_SEMAPHORE_TARGET(waiter->hw_id));
 		intel_ring_emit(signaller, 0);
 	}
 
@@ -1306,11 +1314,12 @@
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 6
-	struct intel_engine_cs *signaller = signaller_req->ring;
+	struct intel_engine_cs *signaller = signaller_req->engine;
 	struct drm_device *dev = signaller->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *waiter;
-	int i, ret, num_rings;
+	enum intel_engine_id id;
+	int ret, num_rings;
 
 	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
@@ -1320,9 +1329,9 @@
 	if (ret)
 		return ret;
 
-	for_each_ring(waiter, dev_priv, i) {
+	for_each_engine_id(waiter, dev_priv, id) {
 		u32 seqno;
-		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+		u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
@@ -1334,7 +1343,7 @@
 		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
 		intel_ring_emit(signaller, seqno);
 		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-					   MI_SEMAPHORE_TARGET(waiter->id));
+					   MI_SEMAPHORE_TARGET(waiter->hw_id));
 		intel_ring_emit(signaller, 0);
 	}
 
@@ -1344,11 +1353,12 @@
 static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
-	struct intel_engine_cs *signaller = signaller_req->ring;
+	struct intel_engine_cs *signaller = signaller_req->engine;
 	struct drm_device *dev = signaller->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *useless;
-	int i, ret, num_rings;
+	enum intel_engine_id id;
+	int ret, num_rings;
 
 #define MBOX_UPDATE_DWORDS 3
 	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
@@ -1359,8 +1369,8 @@
 	if (ret)
 		return ret;
 
-	for_each_ring(useless, dev_priv, i) {
-		i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i];
+	for_each_engine_id(useless, dev_priv, id) {
+		i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
 
 		if (i915_mmio_reg_valid(mbox_reg)) {
 			u32 seqno = i915_gem_request_get_seqno(signaller_req);
@@ -1389,22 +1399,23 @@
 static int
 gen6_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
-	if (ring->semaphore.signal)
-		ret = ring->semaphore.signal(req, 4);
+	if (engine->semaphore.signal)
+		ret = engine->semaphore.signal(req, 4);
 	else
 		ret = intel_ring_begin(req, 4);
 
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
-	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	__intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(engine,
+			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+	intel_ring_emit(engine, MI_USER_INTERRUPT);
+	__intel_ring_advance(engine);
 
 	return 0;
 }
@@ -1429,7 +1440,7 @@
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_engine_cs *waiter = waiter_req->ring;
+	struct intel_engine_cs *waiter = waiter_req->engine;
 	struct drm_i915_private *dev_priv = waiter->dev->dev_private;
 	int ret;
 
@@ -1455,7 +1466,7 @@
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_engine_cs *waiter = waiter_req->ring;
+	struct intel_engine_cs *waiter = waiter_req->engine;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
@@ -1503,8 +1514,8 @@
 static int
 pc_render_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	struct intel_engine_cs *engine = req->engine;
+	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -1519,78 +1530,93 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+	intel_ring_emit(engine,
+			GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
-	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
-	intel_ring_emit(ring, 0);
-	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	intel_ring_emit(engine,
+			engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+	intel_ring_emit(engine, 0);
+	PIPE_CONTROL_FLUSH(engine, scratch_addr);
 	scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
-	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	PIPE_CONTROL_FLUSH(engine, scratch_addr);
 	scratch_addr += 2 * CACHELINE_BYTES;
-	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	PIPE_CONTROL_FLUSH(engine, scratch_addr);
 	scratch_addr += 2 * CACHELINE_BYTES;
-	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	PIPE_CONTROL_FLUSH(engine, scratch_addr);
 	scratch_addr += 2 * CACHELINE_BYTES;
-	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	PIPE_CONTROL_FLUSH(engine, scratch_addr);
 	scratch_addr += 2 * CACHELINE_BYTES;
-	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	PIPE_CONTROL_FLUSH(engine, scratch_addr);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+	intel_ring_emit(engine,
+			GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
 			PIPE_CONTROL_NOTIFY);
-	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
-	intel_ring_emit(ring, 0);
-	__intel_ring_advance(ring);
+	intel_ring_emit(engine,
+			engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+	intel_ring_emit(engine, 0);
+	__intel_ring_advance(engine);
 
 	return 0;
 }
 
-static u32
-gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static void
+gen6_seqno_barrier(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
 	/* Workaround to force correct ordering between irq and seqno writes on
 	 * ivb (and maybe also on snb) by reading from a CS register (like
-	 * ACTHD) before reading the status page. */
-	if (!lazy_coherency) {
-		struct drm_i915_private *dev_priv = ring->dev->dev_private;
-		POSTING_READ(RING_ACTHD(ring->mmio_base));
-	}
-
-	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+	 * ACTHD) before reading the status page.
+	 *
+	 * Note that this effectively stalls the read by the time it takes to
+	 * do a memory transaction, which more or less ensures that the write
+	 * from the GPU has sufficient time to invalidate the CPU cacheline.
+	 * Alternatively we could delay the interrupt from the CS ring to give
+	 * the write time to land, but that would incur a delay after every
+	 * batch i.e. much more frequent than a delay when waiting for the
+	 * interrupt (with the same net latency).
+	 *
+	 * Also note that to prevent whole machine hangs on gen7, we have to
+	 * take the spinlock to guard against concurrent cacheline access.
+	 */
+	spin_lock_irq(&dev_priv->uncore.lock);
+	POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
+	spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
 static u32
-ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+ring_get_seqno(struct intel_engine_cs *engine)
 {
-	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
 static void
-ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+ring_set_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+	intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
 }
 
 static u32
-pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+pc_render_get_seqno(struct intel_engine_cs *engine)
 {
-	return ring->scratch.cpu_page[0];
+	return engine->scratch.cpu_page[0];
 }
 
 static void
-pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-	ring->scratch.cpu_page[0] = seqno;
+	engine->scratch.cpu_page[0] = seqno;
 }
 
 static bool
-gen5_ring_get_irq(struct intel_engine_cs *ring)
+gen5_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
@@ -1598,30 +1624,30 @@
 		return false;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0)
-		gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+	if (engine->irq_refcount++ == 0)
+		gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
 	return true;
 }
 
 static void
-gen5_ring_put_irq(struct intel_engine_cs *ring)
+gen5_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0)
-		gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+	if (--engine->irq_refcount == 0)
+		gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
 static bool
-i9xx_ring_get_irq(struct intel_engine_cs *ring)
+i9xx_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
@@ -1629,8 +1655,8 @@
 		return false;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		dev_priv->irq_mask &= ~ring->irq_enable_mask;
+	if (engine->irq_refcount++ == 0) {
+		dev_priv->irq_mask &= ~engine->irq_enable_mask;
 		I915_WRITE(IMR, dev_priv->irq_mask);
 		POSTING_READ(IMR);
 	}
@@ -1640,15 +1666,15 @@
 }
 
 static void
-i9xx_ring_put_irq(struct intel_engine_cs *ring)
+i9xx_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		dev_priv->irq_mask |= ring->irq_enable_mask;
+	if (--engine->irq_refcount == 0) {
+		dev_priv->irq_mask |= engine->irq_enable_mask;
 		I915_WRITE(IMR, dev_priv->irq_mask);
 		POSTING_READ(IMR);
 	}
@@ -1656,9 +1682,9 @@
 }
 
 static bool
-i8xx_ring_get_irq(struct intel_engine_cs *ring)
+i8xx_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
@@ -1666,8 +1692,8 @@
 		return false;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		dev_priv->irq_mask &= ~ring->irq_enable_mask;
+	if (engine->irq_refcount++ == 0) {
+		dev_priv->irq_mask &= ~engine->irq_enable_mask;
 		I915_WRITE16(IMR, dev_priv->irq_mask);
 		POSTING_READ16(IMR);
 	}
@@ -1677,15 +1703,15 @@
 }
 
 static void
-i8xx_ring_put_irq(struct intel_engine_cs *ring)
+i8xx_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		dev_priv->irq_mask |= ring->irq_enable_mask;
+	if (--engine->irq_refcount == 0) {
+		dev_priv->irq_mask |= engine->irq_enable_mask;
 		I915_WRITE16(IMR, dev_priv->irq_mask);
 		POSTING_READ16(IMR);
 	}
@@ -1697,42 +1723,43 @@
 	       u32     invalidate_domains,
 	       u32     flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_FLUSH);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_FLUSH);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 	return 0;
 }
 
 static int
 i9xx_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
-	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	__intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(engine,
+			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+	intel_ring_emit(engine, MI_USER_INTERRUPT);
+	__intel_ring_advance(engine);
 
 	return 0;
 }
 
 static bool
-gen6_ring_get_irq(struct intel_engine_cs *ring)
+gen6_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
@@ -1740,14 +1767,14 @@
 		return false;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		if (HAS_L3_DPF(dev) && ring->id == RCS)
-			I915_WRITE_IMR(ring,
-				       ~(ring->irq_enable_mask |
+	if (engine->irq_refcount++ == 0) {
+		if (HAS_L3_DPF(dev) && engine->id == RCS)
+			I915_WRITE_IMR(engine,
+				       ~(engine->irq_enable_mask |
 					 GT_PARITY_ERROR(dev)));
 		else
-			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-		gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+			I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+		gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -1755,27 +1782,27 @@
 }
 
 static void
-gen6_ring_put_irq(struct intel_engine_cs *ring)
+gen6_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		if (HAS_L3_DPF(dev) && ring->id == RCS)
-			I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
+	if (--engine->irq_refcount == 0) {
+		if (HAS_L3_DPF(dev) && engine->id == RCS)
+			I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
 		else
-			I915_WRITE_IMR(ring, ~0);
-		gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+			I915_WRITE_IMR(engine, ~0);
+		gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
 static bool
-hsw_vebox_get_irq(struct intel_engine_cs *ring)
+hsw_vebox_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
@@ -1783,9 +1810,9 @@
 		return false;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-		gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
+	if (engine->irq_refcount++ == 0) {
+		I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+		gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -1793,24 +1820,24 @@
 }
 
 static void
-hsw_vebox_put_irq(struct intel_engine_cs *ring)
+hsw_vebox_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		I915_WRITE_IMR(ring, ~0);
-		gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
+	if (--engine->irq_refcount == 0) {
+		I915_WRITE_IMR(engine, ~0);
+		gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
 static bool
-gen8_ring_get_irq(struct intel_engine_cs *ring)
+gen8_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
@@ -1818,15 +1845,15 @@
 		return false;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		if (HAS_L3_DPF(dev) && ring->id == RCS) {
-			I915_WRITE_IMR(ring,
-				       ~(ring->irq_enable_mask |
+	if (engine->irq_refcount++ == 0) {
+		if (HAS_L3_DPF(dev) && engine->id == RCS) {
+			I915_WRITE_IMR(engine,
+				       ~(engine->irq_enable_mask |
 					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
 		} else {
-			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+			I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
 		}
-		POSTING_READ(RING_IMR(ring->mmio_base));
+		POSTING_READ(RING_IMR(engine->mmio_base));
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -1834,21 +1861,21 @@
 }
 
 static void
-gen8_ring_put_irq(struct intel_engine_cs *ring)
+gen8_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = ring->dev;
+	struct drm_device *dev = engine->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		if (HAS_L3_DPF(dev) && ring->id == RCS) {
-			I915_WRITE_IMR(ring,
+	if (--engine->irq_refcount == 0) {
+		if (HAS_L3_DPF(dev) && engine->id == RCS) {
+			I915_WRITE_IMR(engine,
 				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
 		} else {
-			I915_WRITE_IMR(ring, ~0);
+			I915_WRITE_IMR(engine, ~0);
 		}
-		POSTING_READ(RING_IMR(ring->mmio_base));
+		POSTING_READ(RING_IMR(engine->mmio_base));
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
@@ -1858,20 +1885,20 @@
 			 u64 offset, u32 length,
 			 unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring,
+	intel_ring_emit(engine,
 			MI_BATCH_BUFFER_START |
 			MI_BATCH_GTT |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
 			 0 : MI_BATCH_NON_SECURE_I965));
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, offset);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -1885,8 +1912,8 @@
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
-	u32 cs_offset = ring->scratch.gtt_offset;
+	struct intel_engine_cs *engine = req->engine;
+	u32 cs_offset = engine->scratch.gtt_offset;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
@@ -1894,13 +1921,13 @@
 		return ret;
 
 	/* Evict the invalid PTE TLBs */
-	intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
-	intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
-	intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
-	intel_ring_emit(ring, cs_offset);
-	intel_ring_emit(ring, 0xdeadbeef);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+	intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+	intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+	intel_ring_emit(engine, cs_offset);
+	intel_ring_emit(engine, 0xdeadbeef);
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
 		if (len > I830_BATCH_LIMIT)
@@ -1914,16 +1941,17 @@
 		 * stable batch scratch bo area (so that the CS never
 		 * stumbles over its tlb invalidation bug) ...
 		 */
-		intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
-		intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
-		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
-		intel_ring_emit(ring, cs_offset);
-		intel_ring_emit(ring, 4096);
-		intel_ring_emit(ring, offset);
+		intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+		intel_ring_emit(engine,
+				BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
+		intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096);
+		intel_ring_emit(engine, cs_offset);
+		intel_ring_emit(engine, 4096);
+		intel_ring_emit(engine, offset);
 
-		intel_ring_emit(ring, MI_FLUSH);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		intel_ring_emit(engine, MI_FLUSH);
+		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_advance(engine);
 
 		/* ... and execute it. */
 		offset = cs_offset;
@@ -1933,10 +1961,10 @@
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+	intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+					  0 : MI_BATCH_NON_SECURE));
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -1946,55 +1974,55 @@
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+	intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+					  0 : MI_BATCH_NON_SECURE));
+	intel_ring_advance(engine);
 
 	return 0;
 }
 
-static void cleanup_phys_status_page(struct intel_engine_cs *ring)
+static void cleanup_phys_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = to_i915(ring->dev);
+	struct drm_i915_private *dev_priv = to_i915(engine->dev);
 
 	if (!dev_priv->status_page_dmah)
 		return;
 
-	drm_pci_free(ring->dev, dev_priv->status_page_dmah);
-	ring->status_page.page_addr = NULL;
+	drm_pci_free(engine->dev, dev_priv->status_page_dmah);
+	engine->status_page.page_addr = NULL;
 }
 
-static void cleanup_status_page(struct intel_engine_cs *ring)
+static void cleanup_status_page(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_object *obj;
 
-	obj = ring->status_page.obj;
+	obj = engine->status_page.obj;
 	if (obj == NULL)
 		return;
 
 	kunmap(sg_page(obj->pages->sgl));
 	i915_gem_object_ggtt_unpin(obj);
 	drm_gem_object_unreference(&obj->base);
-	ring->status_page.obj = NULL;
+	engine->status_page.obj = NULL;
 }
 
-static int init_status_page(struct intel_engine_cs *ring)
+static int init_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_i915_gem_object *obj = ring->status_page.obj;
+	struct drm_i915_gem_object *obj = engine->status_page.obj;
 
 	if (obj == NULL) {
 		unsigned flags;
 		int ret;
 
-		obj = i915_gem_alloc_object(ring->dev, 4096);
+		obj = i915_gem_alloc_object(engine->dev, 4096);
 		if (obj == NULL) {
 			DRM_ERROR("Failed to allocate status page\n");
 			return -ENOMEM;
@@ -2005,7 +2033,7 @@
 			goto err_unref;
 
 		flags = 0;
-		if (!HAS_LLC(ring->dev))
+		if (!HAS_LLC(engine->dev))
 			/* On g33, we cannot place HWS above 256MiB, so
 			 * restrict its pinning to the low mappable arena.
 			 * Though this restriction is not documented for
@@ -2024,32 +2052,32 @@
 			return ret;
 		}
 
-		ring->status_page.obj = obj;
+		engine->status_page.obj = obj;
 	}
 
-	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
-	ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
-	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+	engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
+	engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
+	memset(engine->status_page.page_addr, 0, PAGE_SIZE);
 
 	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
-			ring->name, ring->status_page.gfx_addr);
+			engine->name, engine->status_page.gfx_addr);
 
 	return 0;
 }
 
-static int init_phys_status_page(struct intel_engine_cs *ring)
+static int init_phys_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 
 	if (!dev_priv->status_page_dmah) {
 		dev_priv->status_page_dmah =
-			drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
+			drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE);
 		if (!dev_priv->status_page_dmah)
 			return -ENOMEM;
 	}
 
-	ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
-	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+	engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+	memset(engine->status_page.page_addr, 0, PAGE_SIZE);
 
 	return 0;
 }
@@ -2057,7 +2085,7 @@
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 {
 	if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
-		vunmap(ringbuf->virtual_start);
+		i915_gem_object_unpin_map(ringbuf->obj);
 	else
 		iounmap(ringbuf->virtual_start);
 	ringbuf->virtual_start = NULL;
@@ -2065,34 +2093,15 @@
 	i915_gem_object_ggtt_unpin(ringbuf->obj);
 }
 
-static u32 *vmap_obj(struct drm_i915_gem_object *obj)
-{
-	struct sg_page_iter sg_iter;
-	struct page **pages;
-	void *addr;
-	int i;
-
-	pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
-	if (pages == NULL)
-		return NULL;
-
-	i = 0;
-	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
-		pages[i++] = sg_page_iter_page(&sg_iter);
-
-	addr = vmap(pages, i, 0, PAGE_KERNEL);
-	drm_free_large(pages);
-
-	return addr;
-}
-
 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
 				     struct intel_ringbuffer *ringbuf)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct drm_i915_gem_object *obj = ringbuf->obj;
 	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
 	unsigned flags = PIN_OFFSET_BIAS | 4096;
+	void *addr;
 	int ret;
 
 	if (HAS_LLC(dev_priv) && !obj->stolen) {
@@ -2101,15 +2110,13 @@
 			return ret;
 
 		ret = i915_gem_object_set_to_cpu_domain(obj, true);
-		if (ret) {
-			i915_gem_object_ggtt_unpin(obj);
-			return ret;
-		}
+		if (ret)
+			goto err_unpin;
 
-		ringbuf->virtual_start = vmap_obj(obj);
-		if (ringbuf->virtual_start == NULL) {
-			i915_gem_object_ggtt_unpin(obj);
-			return -ENOMEM;
+		addr = i915_gem_object_pin_map(obj);
+		if (IS_ERR(addr)) {
+			ret = PTR_ERR(addr);
+			goto err_unpin;
 		}
 	} else {
 		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
@@ -2118,25 +2125,27 @@
 			return ret;
 
 		ret = i915_gem_object_set_to_gtt_domain(obj, true);
-		if (ret) {
-			i915_gem_object_ggtt_unpin(obj);
-			return ret;
-		}
+		if (ret)
+			goto err_unpin;
 
 		/* Access through the GTT requires the device to be awake. */
 		assert_rpm_wakelock_held(dev_priv);
 
-		ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
-						    i915_gem_obj_ggtt_offset(obj), ringbuf->size);
-		if (ringbuf->virtual_start == NULL) {
-			i915_gem_object_ggtt_unpin(obj);
-			return -EINVAL;
+		addr = ioremap_wc(ggtt->mappable_base +
+				  i915_gem_obj_ggtt_offset(obj), ringbuf->size);
+		if (addr == NULL) {
+			ret = -ENOMEM;
+			goto err_unpin;
 		}
 	}
 
+	ringbuf->virtual_start = addr;
 	ringbuf->vma = i915_gem_obj_to_ggtt(obj);
-
 	return 0;
+
+err_unpin:
+	i915_gem_object_ggtt_unpin(obj);
+	return ret;
 }
 
 static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
@@ -2179,7 +2188,7 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	ring->ring = engine;
+	ring->engine = engine;
 	list_add(&ring->link, &engine->buffers);
 
 	ring->size = size;
@@ -2215,37 +2224,38 @@
 }
 
 static int intel_init_ring_buffer(struct drm_device *dev,
-				  struct intel_engine_cs *ring)
+				  struct intel_engine_cs *engine)
 {
 	struct intel_ringbuffer *ringbuf;
 	int ret;
 
-	WARN_ON(ring->buffer);
+	WARN_ON(engine->buffer);
 
-	ring->dev = dev;
-	INIT_LIST_HEAD(&ring->active_list);
-	INIT_LIST_HEAD(&ring->request_list);
-	INIT_LIST_HEAD(&ring->execlist_queue);
-	INIT_LIST_HEAD(&ring->buffers);
-	i915_gem_batch_pool_init(dev, &ring->batch_pool);
-	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
+	engine->dev = dev;
+	INIT_LIST_HEAD(&engine->active_list);
+	INIT_LIST_HEAD(&engine->request_list);
+	INIT_LIST_HEAD(&engine->execlist_queue);
+	INIT_LIST_HEAD(&engine->buffers);
+	i915_gem_batch_pool_init(dev, &engine->batch_pool);
+	memset(engine->semaphore.sync_seqno, 0,
+	       sizeof(engine->semaphore.sync_seqno));
 
-	init_waitqueue_head(&ring->irq_queue);
+	init_waitqueue_head(&engine->irq_queue);
 
-	ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
+	ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
 	if (IS_ERR(ringbuf)) {
 		ret = PTR_ERR(ringbuf);
 		goto error;
 	}
-	ring->buffer = ringbuf;
+	engine->buffer = ringbuf;
 
 	if (I915_NEED_GFX_HWS(dev)) {
-		ret = init_status_page(ring);
+		ret = init_status_page(engine);
 		if (ret)
 			goto error;
 	} else {
-		WARN_ON(ring->id != RCS);
-		ret = init_phys_status_page(ring);
+		WARN_ON(engine->id != RCS);
+		ret = init_phys_status_page(engine);
 		if (ret)
 			goto error;
 	}
@@ -2253,122 +2263,76 @@
 	ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
 	if (ret) {
 		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
-				ring->name, ret);
+				engine->name, ret);
 		intel_destroy_ringbuffer_obj(ringbuf);
 		goto error;
 	}
 
-	ret = i915_cmd_parser_init_ring(ring);
+	ret = i915_cmd_parser_init_ring(engine);
 	if (ret)
 		goto error;
 
 	return 0;
 
 error:
-	intel_cleanup_ring_buffer(ring);
+	intel_cleanup_engine(engine);
 	return ret;
 }
 
-void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
+void intel_cleanup_engine(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv;
 
-	if (!intel_ring_initialized(ring))
+	if (!intel_engine_initialized(engine))
 		return;
 
-	dev_priv = to_i915(ring->dev);
+	dev_priv = to_i915(engine->dev);
 
-	if (ring->buffer) {
-		intel_stop_ring_buffer(ring);
-		WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
+	if (engine->buffer) {
+		intel_stop_engine(engine);
+		WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
-		intel_unpin_ringbuffer_obj(ring->buffer);
-		intel_ringbuffer_free(ring->buffer);
-		ring->buffer = NULL;
+		intel_unpin_ringbuffer_obj(engine->buffer);
+		intel_ringbuffer_free(engine->buffer);
+		engine->buffer = NULL;
 	}
 
-	if (ring->cleanup)
-		ring->cleanup(ring);
+	if (engine->cleanup)
+		engine->cleanup(engine);
 
-	if (I915_NEED_GFX_HWS(ring->dev)) {
-		cleanup_status_page(ring);
+	if (I915_NEED_GFX_HWS(engine->dev)) {
+		cleanup_status_page(engine);
 	} else {
-		WARN_ON(ring->id != RCS);
-		cleanup_phys_status_page(ring);
+		WARN_ON(engine->id != RCS);
+		cleanup_phys_status_page(engine);
 	}
 
-	i915_cmd_parser_fini_ring(ring);
-	i915_gem_batch_pool_fini(&ring->batch_pool);
-	ring->dev = NULL;
+	i915_cmd_parser_fini_ring(engine);
+	i915_gem_batch_pool_fini(&engine->batch_pool);
+	engine->dev = NULL;
 }
 
-static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
-{
-	struct intel_ringbuffer *ringbuf = ring->buffer;
-	struct drm_i915_gem_request *request;
-	unsigned space;
-	int ret;
-
-	if (intel_ring_space(ringbuf) >= n)
-		return 0;
-
-	/* The whole point of reserving space is to not wait! */
-	WARN_ON(ringbuf->reserved_in_use);
-
-	list_for_each_entry(request, &ring->request_list, list) {
-		space = __intel_ring_space(request->postfix, ringbuf->tail,
-					   ringbuf->size);
-		if (space >= n)
-			break;
-	}
-
-	if (WARN_ON(&request->list == &ring->request_list))
-		return -ENOSPC;
-
-	ret = i915_wait_request(request);
-	if (ret)
-		return ret;
-
-	ringbuf->space = space;
-	return 0;
-}
-
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
-{
-	uint32_t __iomem *virt;
-	int rem = ringbuf->size - ringbuf->tail;
-
-	virt = ringbuf->virtual_start + ringbuf->tail;
-	rem /= 4;
-	while (rem--)
-		iowrite32(MI_NOOP, virt++);
-
-	ringbuf->tail = 0;
-	intel_ring_update_space(ringbuf);
-}
-
-int intel_ring_idle(struct intel_engine_cs *ring)
+int intel_engine_idle(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *req;
 
 	/* Wait upon the last request to be completed */
-	if (list_empty(&ring->request_list))
+	if (list_empty(&engine->request_list))
 		return 0;
 
-	req = list_entry(ring->request_list.prev,
-			struct drm_i915_gem_request,
-			list);
+	req = list_entry(engine->request_list.prev,
+			 struct drm_i915_gem_request,
+			 list);
 
 	/* Make sure we do not trigger any retires */
 	return __i915_wait_request(req,
-				   atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter),
-				   to_i915(ring->dev)->mm.interruptible,
+				   req->i915->mm.interruptible,
 				   NULL, NULL);
 }
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
-	request->ringbuf = request->ring->buffer;
+	request->ringbuf = request->engine->buffer;
 	return 0;
 }
 
@@ -2389,63 +2353,82 @@
 
 void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
 {
-	WARN_ON(ringbuf->reserved_size);
-	WARN_ON(ringbuf->reserved_in_use);
-
+	GEM_BUG_ON(ringbuf->reserved_size);
 	ringbuf->reserved_size = size;
 }
 
 void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
 {
-	WARN_ON(ringbuf->reserved_in_use);
-
+	GEM_BUG_ON(!ringbuf->reserved_size);
 	ringbuf->reserved_size   = 0;
-	ringbuf->reserved_in_use = false;
 }
 
 void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
 {
-	WARN_ON(ringbuf->reserved_in_use);
-
-	ringbuf->reserved_in_use = true;
-	ringbuf->reserved_tail   = ringbuf->tail;
+	GEM_BUG_ON(!ringbuf->reserved_size);
+	ringbuf->reserved_size   = 0;
 }
 
 void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
 {
-	WARN_ON(!ringbuf->reserved_in_use);
-	if (ringbuf->tail > ringbuf->reserved_tail) {
-		WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
-		     "request reserved size too small: %d vs %d!\n",
-		     ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
-	} else {
-		/*
-		 * The ring was wrapped while the reserved space was in use.
-		 * That means that some unknown amount of the ring tail was
-		 * no-op filled and skipped. Thus simply adding the ring size
-		 * to the tail and doing the above space check will not work.
-		 * Rather than attempt to track how much tail was skipped,
-		 * it is much simpler to say that also skipping the sanity
-		 * check every once in a while is not a big issue.
-		 */
-	}
-
-	ringbuf->reserved_size   = 0;
-	ringbuf->reserved_in_use = false;
+	GEM_BUG_ON(ringbuf->reserved_size);
 }
 
-static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
+static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
-	struct intel_ringbuffer *ringbuf = ring->buffer;
-	int remain_usable = ringbuf->effective_size - ringbuf->tail;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_engine_cs *engine = req->engine;
+	struct drm_i915_gem_request *target;
+
+	intel_ring_update_space(ringbuf);
+	if (ringbuf->space >= bytes)
+		return 0;
+
+	/*
+	 * Space is reserved in the ringbuffer for finalising the request,
+	 * as that cannot be allowed to fail. During request finalisation,
+	 * reserved_space is set to 0 to stop the overallocation and the
+	 * assumption is that then we never need to wait (which has the
+	 * risk of failing with EINTR).
+	 *
+	 * See also i915_gem_request_alloc() and i915_add_request().
+	 */
+	GEM_BUG_ON(!ringbuf->reserved_size);
+
+	list_for_each_entry(target, &engine->request_list, list) {
+		unsigned space;
+
+		/*
+		 * The request queue is per-engine, so can contain requests
+		 * from multiple ringbuffers. Here, we must ignore any that
+		 * aren't from the ringbuffer we're considering.
+		 */
+		if (target->ringbuf != ringbuf)
+			continue;
+
+		/* Would completion of this request free enough space? */
+		space = __intel_ring_space(target->postfix, ringbuf->tail,
+					   ringbuf->size);
+		if (space >= bytes)
+			break;
+	}
+
+	if (WARN_ON(&target->list == &engine->request_list))
+		return -ENOSPC;
+
+	return i915_wait_request(target);
+}
+
+int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
+{
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	int remain_actual = ringbuf->size - ringbuf->tail;
-	int ret, total_bytes, wait_bytes = 0;
+	int remain_usable = ringbuf->effective_size - ringbuf->tail;
+	int bytes = num_dwords * sizeof(u32);
+	int total_bytes, wait_bytes;
 	bool need_wrap = false;
 
-	if (ringbuf->reserved_in_use)
-		total_bytes = bytes;
-	else
-		total_bytes = bytes + ringbuf->reserved_size;
+	total_bytes = bytes + ringbuf->reserved_size;
 
 	if (unlikely(bytes > remain_usable)) {
 		/*
@@ -2454,62 +2437,50 @@
 		 */
 		wait_bytes = remain_actual + total_bytes;
 		need_wrap = true;
+	} else if (unlikely(total_bytes > remain_usable)) {
+		/*
+		 * The base request will fit but the reserved space
+		 * falls off the end. So we don't need an immediate wrap
+		 * and only need to effectively wait for the reserved
+		 * size space from the start of ringbuffer.
+		 */
+		wait_bytes = remain_actual + ringbuf->reserved_size;
 	} else {
-		if (unlikely(total_bytes > remain_usable)) {
-			/*
-			 * The base request will fit but the reserved space
-			 * falls off the end. So don't need an immediate wrap
-			 * and only need to effectively wait for the reserved
-			 * size space from the start of ringbuffer.
-			 */
-			wait_bytes = remain_actual + ringbuf->reserved_size;
-		} else if (total_bytes > ringbuf->space) {
-			/* No wrapping required, just waiting. */
-			wait_bytes = total_bytes;
-		}
+		/* No wrapping required, just waiting. */
+		wait_bytes = total_bytes;
 	}
 
-	if (wait_bytes) {
-		ret = ring_wait_for_space(ring, wait_bytes);
+	if (wait_bytes > ringbuf->space) {
+		int ret = wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
 			return ret;
 
-		if (need_wrap)
-			__wrap_ring_buffer(ringbuf);
+		intel_ring_update_space(ringbuf);
+		if (unlikely(ringbuf->space < wait_bytes))
+			return -EAGAIN;
 	}
 
-	return 0;
-}
+	if (unlikely(need_wrap)) {
+		GEM_BUG_ON(remain_actual > ringbuf->space);
+		GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
 
-int intel_ring_begin(struct drm_i915_gem_request *req,
-		     int num_dwords)
-{
-	struct intel_engine_cs *ring;
-	struct drm_i915_private *dev_priv;
-	int ret;
+		/* Fill the tail with MI_NOOP */
+		memset(ringbuf->virtual_start + ringbuf->tail,
+		       0, remain_actual);
+		ringbuf->tail = 0;
+		ringbuf->space -= remain_actual;
+	}
 
-	WARN_ON(req == NULL);
-	ring = req->ring;
-	dev_priv = ring->dev->dev_private;
-
-	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
-				   dev_priv->mm.interruptible);
-	if (ret)
-		return ret;
-
-	ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
-	if (ret)
-		return ret;
-
-	ring->buffer->space -= num_dwords * sizeof(uint32_t);
+	ringbuf->space -= bytes;
+	GEM_BUG_ON(ringbuf->space < 0);
 	return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
-	int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	struct intel_engine_cs *engine = req->engine;
+	int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
 
 	if (num_dwords == 0)
@@ -2521,33 +2492,52 @@
 		return ret;
 
 	while (num_dwords--)
-		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(engine, MI_NOOP);
 
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return 0;
 }
 
-void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
+void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine->dev);
 
-	if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
-		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
-		I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
-		if (HAS_VEBOX(dev))
-			I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
+	/* Our semaphore implementation is strictly monotonic (i.e. we proceed
+	 * so long as the semaphore value in the register/page is greater
+	 * than the sync value), so whenever we reset the seqno,
+	 * so long as we reset the tracking semaphore value to 0, it will
+	 * always be before the next request's seqno. If we don't reset
+	 * the semaphore value, then when the seqno moves backwards all
+	 * future waits will complete instantly (causing rendering corruption).
+	 */
+	if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) {
+		I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
+		I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
+		if (HAS_VEBOX(dev_priv))
+			I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
 	}
+	if (dev_priv->semaphore_obj) {
+		struct drm_i915_gem_object *obj = dev_priv->semaphore_obj;
+		struct page *page = i915_gem_object_get_dirty_page(obj, 0);
+		void *semaphores = kmap(page);
+		memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
+		       0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
+		kunmap(page);
+	}
+	memset(engine->semaphore.sync_seqno, 0,
+	       sizeof(engine->semaphore.sync_seqno));
 
-	ring->set_seqno(ring, seqno);
-	ring->hangcheck.seqno = seqno;
+	engine->set_seqno(engine, seqno);
+	engine->last_submitted_seqno = seqno;
+
+	engine->hangcheck.seqno = seqno;
 }
 
-static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
+static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
 				     u32 value)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 
        /* Every tail move must follow the sequence below */
 
@@ -2567,8 +2557,8 @@
 		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
 
 	/* Now that the ring is fully powered up, update the tail */
-	I915_WRITE_TAIL(ring, value);
-	POSTING_READ(RING_TAIL(ring->mmio_base));
+	I915_WRITE_TAIL(engine, value);
+	POSTING_READ(RING_TAIL(engine->mmio_base));
 
 	/* Let the ring send IDLE messages to the GT again,
 	 * and so let it sleep to conserve power when idle.
@@ -2580,7 +2570,7 @@
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 			       u32 invalidate, u32 flush)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	uint32_t cmd;
 	int ret;
 
@@ -2589,7 +2579,7 @@
 		return ret;
 
 	cmd = MI_FLUSH_DW;
-	if (INTEL_INFO(ring->dev)->gen >= 8)
+	if (INTEL_INFO(engine->dev)->gen >= 8)
 		cmd += 1;
 
 	/* We always require a command barrier so that subsequent
@@ -2608,16 +2598,17 @@
 	if (invalidate & I915_GEM_GPU_DOMAINS)
 		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-	if (INTEL_INFO(ring->dev)->gen >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+	intel_ring_emit(engine, cmd);
+	intel_ring_emit(engine,
+			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	if (INTEL_INFO(engine->dev)->gen >= 8) {
+		intel_ring_emit(engine, 0); /* upper addr */
+		intel_ring_emit(engine, 0); /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(engine, 0);
+		intel_ring_emit(engine, MI_NOOP);
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 	return 0;
 }
 
@@ -2626,8 +2617,8 @@
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
-	bool ppgtt = USES_PPGTT(ring->dev) &&
+	struct intel_engine_cs *engine = req->engine;
+	bool ppgtt = USES_PPGTT(engine->dev) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -2636,13 +2627,13 @@
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+	intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
 			(dispatch_flags & I915_DISPATCH_RS ?
 			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, lower_32_bits(offset));
+	intel_ring_emit(engine, upper_32_bits(offset));
+	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -2652,22 +2643,22 @@
 			     u64 offset, u32 len,
 			     unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring,
+	intel_ring_emit(engine,
 			MI_BATCH_BUFFER_START |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
 			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
 			(dispatch_flags & I915_DISPATCH_RS ?
 			 MI_BATCH_RESOURCE_STREAMER : 0));
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, offset);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -2677,20 +2668,20 @@
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring,
+	intel_ring_emit(engine,
 			MI_BATCH_BUFFER_START |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
 			 0 : MI_BATCH_NON_SECURE_I965));
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	intel_ring_emit(engine, offset);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -2700,8 +2691,8 @@
 static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
-	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
+	struct intel_engine_cs *engine = req->engine;
+	struct drm_device *dev = engine->dev;
 	uint32_t cmd;
 	int ret;
 
@@ -2728,16 +2719,17 @@
 	 */
 	if (invalidate & I915_GEM_DOMAIN_RENDER)
 		cmd |= MI_INVALIDATE_TLB;
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	intel_ring_emit(engine, cmd);
+	intel_ring_emit(engine,
+			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
 	if (INTEL_INFO(dev)->gen >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		intel_ring_emit(engine, 0); /* upper addr */
+		intel_ring_emit(engine, 0); /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(engine, 0);
+		intel_ring_emit(engine, MI_NOOP);
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(engine);
 
 	return 0;
 }
@@ -2745,14 +2737,15 @@
 int intel_init_render_ring_buffer(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_object *obj;
 	int ret;
 
-	ring->name = "render ring";
-	ring->id = RCS;
-	ring->exec_id = I915_EXEC_RENDER;
-	ring->mmio_base = RENDER_RING_BASE;
+	engine->name = "render ring";
+	engine->id = RCS;
+	engine->exec_id = I915_EXEC_RENDER;
+	engine->hw_id = 0;
+	engine->mmio_base = RENDER_RING_BASE;
 
 	if (INTEL_INFO(dev)->gen >= 8) {
 		if (i915_semaphore_is_enabled(dev)) {
@@ -2772,34 +2765,36 @@
 			}
 		}
 
-		ring->init_context = intel_rcs_ctx_init;
-		ring->add_request = gen6_add_request;
-		ring->flush = gen8_render_ring_flush;
-		ring->irq_get = gen8_ring_get_irq;
-		ring->irq_put = gen8_ring_put_irq;
-		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
-		ring->get_seqno = gen6_ring_get_seqno;
-		ring->set_seqno = ring_set_seqno;
+		engine->init_context = intel_rcs_ctx_init;
+		engine->add_request = gen6_add_request;
+		engine->flush = gen8_render_ring_flush;
+		engine->irq_get = gen8_ring_get_irq;
+		engine->irq_put = gen8_ring_put_irq;
+		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+		engine->irq_seqno_barrier = gen6_seqno_barrier;
+		engine->get_seqno = ring_get_seqno;
+		engine->set_seqno = ring_set_seqno;
 		if (i915_semaphore_is_enabled(dev)) {
 			WARN_ON(!dev_priv->semaphore_obj);
-			ring->semaphore.sync_to = gen8_ring_sync;
-			ring->semaphore.signal = gen8_rcs_signal;
-			GEN8_RING_SEMAPHORE_INIT;
+			engine->semaphore.sync_to = gen8_ring_sync;
+			engine->semaphore.signal = gen8_rcs_signal;
+			GEN8_RING_SEMAPHORE_INIT(engine);
 		}
 	} else if (INTEL_INFO(dev)->gen >= 6) {
-		ring->init_context = intel_rcs_ctx_init;
-		ring->add_request = gen6_add_request;
-		ring->flush = gen7_render_ring_flush;
+		engine->init_context = intel_rcs_ctx_init;
+		engine->add_request = gen6_add_request;
+		engine->flush = gen7_render_ring_flush;
 		if (INTEL_INFO(dev)->gen == 6)
-			ring->flush = gen6_render_ring_flush;
-		ring->irq_get = gen6_ring_get_irq;
-		ring->irq_put = gen6_ring_put_irq;
-		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
-		ring->get_seqno = gen6_ring_get_seqno;
-		ring->set_seqno = ring_set_seqno;
+			engine->flush = gen6_render_ring_flush;
+		engine->irq_get = gen6_ring_get_irq;
+		engine->irq_put = gen6_ring_put_irq;
+		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+		engine->irq_seqno_barrier = gen6_seqno_barrier;
+		engine->get_seqno = ring_get_seqno;
+		engine->set_seqno = ring_set_seqno;
 		if (i915_semaphore_is_enabled(dev)) {
-			ring->semaphore.sync_to = gen6_ring_sync;
-			ring->semaphore.signal = gen6_signal;
+			engine->semaphore.sync_to = gen6_ring_sync;
+			engine->semaphore.signal = gen6_signal;
 			/*
 			 * The current semaphore is only applied on pre-gen8
 			 * platform.  And there is no VCS2 ring on the pre-gen8
@@ -2807,59 +2802,59 @@
 			 * initialized as INVALID.  Gen8 will initialize the
 			 * sema between VCS2 and RCS later.
 			 */
-			ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
-			ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
-			ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
-			ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
-			ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-			ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
-			ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
-			ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
-			ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
-			ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+			engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+			engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
+			engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
+			engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
+			engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+			engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+			engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
+			engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
+			engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
+			engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
 		}
 	} else if (IS_GEN5(dev)) {
-		ring->add_request = pc_render_add_request;
-		ring->flush = gen4_render_ring_flush;
-		ring->get_seqno = pc_render_get_seqno;
-		ring->set_seqno = pc_render_set_seqno;
-		ring->irq_get = gen5_ring_get_irq;
-		ring->irq_put = gen5_ring_put_irq;
-		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
+		engine->add_request = pc_render_add_request;
+		engine->flush = gen4_render_ring_flush;
+		engine->get_seqno = pc_render_get_seqno;
+		engine->set_seqno = pc_render_set_seqno;
+		engine->irq_get = gen5_ring_get_irq;
+		engine->irq_put = gen5_ring_put_irq;
+		engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
 					GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
 	} else {
-		ring->add_request = i9xx_add_request;
+		engine->add_request = i9xx_add_request;
 		if (INTEL_INFO(dev)->gen < 4)
-			ring->flush = gen2_render_ring_flush;
+			engine->flush = gen2_render_ring_flush;
 		else
-			ring->flush = gen4_render_ring_flush;
-		ring->get_seqno = ring_get_seqno;
-		ring->set_seqno = ring_set_seqno;
+			engine->flush = gen4_render_ring_flush;
+		engine->get_seqno = ring_get_seqno;
+		engine->set_seqno = ring_set_seqno;
 		if (IS_GEN2(dev)) {
-			ring->irq_get = i8xx_ring_get_irq;
-			ring->irq_put = i8xx_ring_put_irq;
+			engine->irq_get = i8xx_ring_get_irq;
+			engine->irq_put = i8xx_ring_put_irq;
 		} else {
-			ring->irq_get = i9xx_ring_get_irq;
-			ring->irq_put = i9xx_ring_put_irq;
+			engine->irq_get = i9xx_ring_get_irq;
+			engine->irq_put = i9xx_ring_put_irq;
 		}
-		ring->irq_enable_mask = I915_USER_INTERRUPT;
+		engine->irq_enable_mask = I915_USER_INTERRUPT;
 	}
-	ring->write_tail = ring_write_tail;
+	engine->write_tail = ring_write_tail;
 
 	if (IS_HASWELL(dev))
-		ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+		engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
 	else if (IS_GEN8(dev))
-		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
 	else if (INTEL_INFO(dev)->gen >= 6)
-		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 	else if (INTEL_INFO(dev)->gen >= 4)
-		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+		engine->dispatch_execbuffer = i965_dispatch_execbuffer;
 	else if (IS_I830(dev) || IS_845G(dev))
-		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+		engine->dispatch_execbuffer = i830_dispatch_execbuffer;
 	else
-		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
-	ring->init_hw = init_render_ring;
-	ring->cleanup = render_ring_cleanup;
+		engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+	engine->init_hw = init_render_ring;
+	engine->cleanup = render_ring_cleanup;
 
 	/* Workaround batchbuffer to combat CS tlb bug. */
 	if (HAS_BROKEN_CS_TLB(dev)) {
@@ -2876,16 +2871,16 @@
 			return ret;
 		}
 
-		ring->scratch.obj = obj;
-		ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
+		engine->scratch.obj = obj;
+		engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
 	}
 
-	ret = intel_init_ring_buffer(dev, ring);
+	ret = intel_init_ring_buffer(dev, engine);
 	if (ret)
 		return ret;
 
 	if (INTEL_INFO(dev)->gen >= 5) {
-		ret = intel_init_pipe_control(ring);
+		ret = intel_init_pipe_control(engine);
 		if (ret)
 			return ret;
 	}
@@ -2896,75 +2891,77 @@
 int intel_init_bsd_ring_buffer(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[VCS];
 
-	ring->name = "bsd ring";
-	ring->id = VCS;
-	ring->exec_id = I915_EXEC_BSD;
+	engine->name = "bsd ring";
+	engine->id = VCS;
+	engine->exec_id = I915_EXEC_BSD;
+	engine->hw_id = 1;
 
-	ring->write_tail = ring_write_tail;
+	engine->write_tail = ring_write_tail;
 	if (INTEL_INFO(dev)->gen >= 6) {
-		ring->mmio_base = GEN6_BSD_RING_BASE;
+		engine->mmio_base = GEN6_BSD_RING_BASE;
 		/* gen6 bsd needs a special wa for tail updates */
 		if (IS_GEN6(dev))
-			ring->write_tail = gen6_bsd_ring_write_tail;
-		ring->flush = gen6_bsd_ring_flush;
-		ring->add_request = gen6_add_request;
-		ring->get_seqno = gen6_ring_get_seqno;
-		ring->set_seqno = ring_set_seqno;
+			engine->write_tail = gen6_bsd_ring_write_tail;
+		engine->flush = gen6_bsd_ring_flush;
+		engine->add_request = gen6_add_request;
+		engine->irq_seqno_barrier = gen6_seqno_barrier;
+		engine->get_seqno = ring_get_seqno;
+		engine->set_seqno = ring_set_seqno;
 		if (INTEL_INFO(dev)->gen >= 8) {
-			ring->irq_enable_mask =
+			engine->irq_enable_mask =
 				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
-			ring->irq_get = gen8_ring_get_irq;
-			ring->irq_put = gen8_ring_put_irq;
-			ring->dispatch_execbuffer =
+			engine->irq_get = gen8_ring_get_irq;
+			engine->irq_put = gen8_ring_put_irq;
+			engine->dispatch_execbuffer =
 				gen8_ring_dispatch_execbuffer;
 			if (i915_semaphore_is_enabled(dev)) {
-				ring->semaphore.sync_to = gen8_ring_sync;
-				ring->semaphore.signal = gen8_xcs_signal;
-				GEN8_RING_SEMAPHORE_INIT;
+				engine->semaphore.sync_to = gen8_ring_sync;
+				engine->semaphore.signal = gen8_xcs_signal;
+				GEN8_RING_SEMAPHORE_INIT(engine);
 			}
 		} else {
-			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
-			ring->irq_get = gen6_ring_get_irq;
-			ring->irq_put = gen6_ring_put_irq;
-			ring->dispatch_execbuffer =
+			engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+			engine->irq_get = gen6_ring_get_irq;
+			engine->irq_put = gen6_ring_put_irq;
+			engine->dispatch_execbuffer =
 				gen6_ring_dispatch_execbuffer;
 			if (i915_semaphore_is_enabled(dev)) {
-				ring->semaphore.sync_to = gen6_ring_sync;
-				ring->semaphore.signal = gen6_signal;
-				ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
-				ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
-				ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
-				ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
-				ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-				ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
-				ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
-				ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
-				ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
-				ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+				engine->semaphore.sync_to = gen6_ring_sync;
+				engine->semaphore.signal = gen6_signal;
+				engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
+				engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+				engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
+				engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
+				engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+				engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
+				engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+				engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
+				engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
+				engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
 			}
 		}
 	} else {
-		ring->mmio_base = BSD_RING_BASE;
-		ring->flush = bsd_ring_flush;
-		ring->add_request = i9xx_add_request;
-		ring->get_seqno = ring_get_seqno;
-		ring->set_seqno = ring_set_seqno;
+		engine->mmio_base = BSD_RING_BASE;
+		engine->flush = bsd_ring_flush;
+		engine->add_request = i9xx_add_request;
+		engine->get_seqno = ring_get_seqno;
+		engine->set_seqno = ring_set_seqno;
 		if (IS_GEN5(dev)) {
-			ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
-			ring->irq_get = gen5_ring_get_irq;
-			ring->irq_put = gen5_ring_put_irq;
+			engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
+			engine->irq_get = gen5_ring_get_irq;
+			engine->irq_put = gen5_ring_put_irq;
 		} else {
-			ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
-			ring->irq_get = i9xx_ring_get_irq;
-			ring->irq_put = i9xx_ring_put_irq;
+			engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+			engine->irq_get = i9xx_ring_get_irq;
+			engine->irq_put = i9xx_ring_put_irq;
 		}
-		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+		engine->dispatch_execbuffer = i965_dispatch_execbuffer;
 	}
-	ring->init_hw = init_ring_common;
+	engine->init_hw = init_ring_common;
 
-	return intel_init_ring_buffer(dev, ring);
+	return intel_init_ring_buffer(dev, engine);
 }
 
 /**
@@ -2973,68 +2970,72 @@
 int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+	struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
 
-	ring->name = "bsd2 ring";
-	ring->id = VCS2;
-	ring->exec_id = I915_EXEC_BSD;
+	engine->name = "bsd2 ring";
+	engine->id = VCS2;
+	engine->exec_id = I915_EXEC_BSD;
+	engine->hw_id = 4;
 
-	ring->write_tail = ring_write_tail;
-	ring->mmio_base = GEN8_BSD2_RING_BASE;
-	ring->flush = gen6_bsd_ring_flush;
-	ring->add_request = gen6_add_request;
-	ring->get_seqno = gen6_ring_get_seqno;
-	ring->set_seqno = ring_set_seqno;
-	ring->irq_enable_mask =
+	engine->write_tail = ring_write_tail;
+	engine->mmio_base = GEN8_BSD2_RING_BASE;
+	engine->flush = gen6_bsd_ring_flush;
+	engine->add_request = gen6_add_request;
+	engine->irq_seqno_barrier = gen6_seqno_barrier;
+	engine->get_seqno = ring_get_seqno;
+	engine->set_seqno = ring_set_seqno;
+	engine->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
-	ring->irq_get = gen8_ring_get_irq;
-	ring->irq_put = gen8_ring_put_irq;
-	ring->dispatch_execbuffer =
+	engine->irq_get = gen8_ring_get_irq;
+	engine->irq_put = gen8_ring_put_irq;
+	engine->dispatch_execbuffer =
 			gen8_ring_dispatch_execbuffer;
 	if (i915_semaphore_is_enabled(dev)) {
-		ring->semaphore.sync_to = gen8_ring_sync;
-		ring->semaphore.signal = gen8_xcs_signal;
-		GEN8_RING_SEMAPHORE_INIT;
+		engine->semaphore.sync_to = gen8_ring_sync;
+		engine->semaphore.signal = gen8_xcs_signal;
+		GEN8_RING_SEMAPHORE_INIT(engine);
 	}
-	ring->init_hw = init_ring_common;
+	engine->init_hw = init_ring_common;
 
-	return intel_init_ring_buffer(dev, ring);
+	return intel_init_ring_buffer(dev, engine);
 }
 
 int intel_init_blt_ring_buffer(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[BCS];
+	struct intel_engine_cs *engine = &dev_priv->engine[BCS];
 
-	ring->name = "blitter ring";
-	ring->id = BCS;
-	ring->exec_id = I915_EXEC_BLT;
+	engine->name = "blitter ring";
+	engine->id = BCS;
+	engine->exec_id = I915_EXEC_BLT;
+	engine->hw_id = 2;
 
-	ring->mmio_base = BLT_RING_BASE;
-	ring->write_tail = ring_write_tail;
-	ring->flush = gen6_ring_flush;
-	ring->add_request = gen6_add_request;
-	ring->get_seqno = gen6_ring_get_seqno;
-	ring->set_seqno = ring_set_seqno;
+	engine->mmio_base = BLT_RING_BASE;
+	engine->write_tail = ring_write_tail;
+	engine->flush = gen6_ring_flush;
+	engine->add_request = gen6_add_request;
+	engine->irq_seqno_barrier = gen6_seqno_barrier;
+	engine->get_seqno = ring_get_seqno;
+	engine->set_seqno = ring_set_seqno;
 	if (INTEL_INFO(dev)->gen >= 8) {
-		ring->irq_enable_mask =
+		engine->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-		ring->irq_get = gen8_ring_get_irq;
-		ring->irq_put = gen8_ring_put_irq;
-		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+		engine->irq_get = gen8_ring_get_irq;
+		engine->irq_put = gen8_ring_put_irq;
+		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev)) {
-			ring->semaphore.sync_to = gen8_ring_sync;
-			ring->semaphore.signal = gen8_xcs_signal;
-			GEN8_RING_SEMAPHORE_INIT;
+			engine->semaphore.sync_to = gen8_ring_sync;
+			engine->semaphore.signal = gen8_xcs_signal;
+			GEN8_RING_SEMAPHORE_INIT(engine);
 		}
 	} else {
-		ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
-		ring->irq_get = gen6_ring_get_irq;
-		ring->irq_put = gen6_ring_put_irq;
-		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+		engine->irq_get = gen6_ring_get_irq;
+		engine->irq_put = gen6_ring_put_irq;
+		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev)) {
-			ring->semaphore.signal = gen6_signal;
-			ring->semaphore.sync_to = gen6_ring_sync;
+			engine->semaphore.signal = gen6_signal;
+			engine->semaphore.sync_to = gen6_ring_sync;
 			/*
 			 * The current semaphore is only applied on pre-gen8
 			 * platform.  And there is no VCS2 ring on the pre-gen8
@@ -3042,127 +3043,129 @@
 			 * initialized as INVALID.  Gen8 will initialize the
 			 * sema between BCS and VCS2 later.
 			 */
-			ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
-			ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
-			ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
-			ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
-			ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-			ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
-			ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
-			ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
-			ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
-			ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+			engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
+			engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
+			engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+			engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
+			engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+			engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
+			engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
+			engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+			engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
+			engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
 		}
 	}
-	ring->init_hw = init_ring_common;
+	engine->init_hw = init_ring_common;
 
-	return intel_init_ring_buffer(dev, ring);
+	return intel_init_ring_buffer(dev, engine);
 }
 
 int intel_init_vebox_ring_buffer(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+	struct intel_engine_cs *engine = &dev_priv->engine[VECS];
 
-	ring->name = "video enhancement ring";
-	ring->id = VECS;
-	ring->exec_id = I915_EXEC_VEBOX;
+	engine->name = "video enhancement ring";
+	engine->id = VECS;
+	engine->exec_id = I915_EXEC_VEBOX;
+	engine->hw_id = 3;
 
-	ring->mmio_base = VEBOX_RING_BASE;
-	ring->write_tail = ring_write_tail;
-	ring->flush = gen6_ring_flush;
-	ring->add_request = gen6_add_request;
-	ring->get_seqno = gen6_ring_get_seqno;
-	ring->set_seqno = ring_set_seqno;
+	engine->mmio_base = VEBOX_RING_BASE;
+	engine->write_tail = ring_write_tail;
+	engine->flush = gen6_ring_flush;
+	engine->add_request = gen6_add_request;
+	engine->irq_seqno_barrier = gen6_seqno_barrier;
+	engine->get_seqno = ring_get_seqno;
+	engine->set_seqno = ring_set_seqno;
 
 	if (INTEL_INFO(dev)->gen >= 8) {
-		ring->irq_enable_mask =
+		engine->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
-		ring->irq_get = gen8_ring_get_irq;
-		ring->irq_put = gen8_ring_put_irq;
-		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+		engine->irq_get = gen8_ring_get_irq;
+		engine->irq_put = gen8_ring_put_irq;
+		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev)) {
-			ring->semaphore.sync_to = gen8_ring_sync;
-			ring->semaphore.signal = gen8_xcs_signal;
-			GEN8_RING_SEMAPHORE_INIT;
+			engine->semaphore.sync_to = gen8_ring_sync;
+			engine->semaphore.signal = gen8_xcs_signal;
+			GEN8_RING_SEMAPHORE_INIT(engine);
 		}
 	} else {
-		ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
-		ring->irq_get = hsw_vebox_get_irq;
-		ring->irq_put = hsw_vebox_put_irq;
-		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
+		engine->irq_get = hsw_vebox_get_irq;
+		engine->irq_put = hsw_vebox_put_irq;
+		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev)) {
-			ring->semaphore.sync_to = gen6_ring_sync;
-			ring->semaphore.signal = gen6_signal;
-			ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
-			ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
-			ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
-			ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
-			ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-			ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
-			ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
-			ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
-			ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
-			ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+			engine->semaphore.sync_to = gen6_ring_sync;
+			engine->semaphore.signal = gen6_signal;
+			engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
+			engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
+			engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
+			engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+			engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+			engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
+			engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
+			engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
+			engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+			engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
 		}
 	}
-	ring->init_hw = init_ring_common;
+	engine->init_hw = init_ring_common;
 
-	return intel_init_ring_buffer(dev, ring);
+	return intel_init_ring_buffer(dev, engine);
 }
 
 int
 intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
-	if (!ring->gpu_caches_dirty)
+	if (!engine->gpu_caches_dirty)
 		return 0;
 
-	ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
+	ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
 	trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
 
-	ring->gpu_caches_dirty = false;
+	engine->gpu_caches_dirty = false;
 	return 0;
 }
 
 int
 intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = req->ring;
+	struct intel_engine_cs *engine = req->engine;
 	uint32_t flush_domains;
 	int ret;
 
 	flush_domains = 0;
-	if (ring->gpu_caches_dirty)
+	if (engine->gpu_caches_dirty)
 		flush_domains = I915_GEM_GPU_DOMAINS;
 
-	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 	if (ret)
 		return ret;
 
 	trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 
-	ring->gpu_caches_dirty = false;
+	engine->gpu_caches_dirty = false;
 	return 0;
 }
 
 void
-intel_stop_ring_buffer(struct intel_engine_cs *ring)
+intel_stop_engine(struct intel_engine_cs *engine)
 {
 	int ret;
 
-	if (!intel_ring_initialized(ring))
+	if (!intel_engine_initialized(engine))
 		return;
 
-	ret = intel_ring_idle(ring);
-	if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+	ret = intel_engine_idle(engine);
+	if (ret)
 		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
-			  ring->name, ret);
+			  engine->name, ret);
 
-	stop_ring(ring);
+	stop_ring(engine);
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 566b0ae..ff12648 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -52,34 +52,32 @@
 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
  * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
  */
-#define i915_semaphore_seqno_size sizeof(uint64_t)
+#define gen8_semaphore_seqno_size sizeof(uint64_t)
+#define GEN8_SEMAPHORE_OFFSET(__from, __to)			     \
+	(((__from) * I915_NUM_ENGINES  + (__to)) * gen8_semaphore_seqno_size)
 #define GEN8_SIGNAL_OFFSET(__ring, to)			     \
 	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
-	((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) +	\
-	(i915_semaphore_seqno_size * (to)))
-
+	 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
 #define GEN8_WAIT_OFFSET(__ring, from)			     \
 	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
-	((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
-	(i915_semaphore_seqno_size * (__ring)->id))
+	 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
 
-#define GEN8_RING_SEMAPHORE_INIT do { \
+#define GEN8_RING_SEMAPHORE_INIT(e) do { \
 	if (!dev_priv->semaphore_obj) { \
 		break; \
 	} \
-	ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
-	ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
-	ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
-	ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
-	ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
-	ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
+	(e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
+	(e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
+	(e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
+	(e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
+	(e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
+	(e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
 	} while(0)
 
 enum intel_ring_hangcheck_action {
 	HANGCHECK_IDLE = 0,
 	HANGCHECK_WAIT,
 	HANGCHECK_ACTIVE,
-	HANGCHECK_ACTIVE_LOOP,
 	HANGCHECK_KICK,
 	HANGCHECK_HUNG,
 };
@@ -88,8 +86,8 @@
 
 struct intel_ring_hangcheck {
 	u64 acthd;
-	u64 max_acthd;
 	u32 seqno;
+	unsigned user_interrupts;
 	int score;
 	enum intel_ring_hangcheck_action action;
 	int deadlock;
@@ -101,7 +99,7 @@
 	void __iomem *virtual_start;
 	struct i915_vma *vma;
 
-	struct intel_engine_cs *ring;
+	struct intel_engine_cs *engine;
 	struct list_head link;
 
 	u32 head;
@@ -110,8 +108,6 @@
 	int size;
 	int effective_size;
 	int reserved_size;
-	int reserved_tail;
-	bool reserved_in_use;
 
 	/** We track the position of the requests in the ring buffer, and
 	 * when each is retired we increment last_retired_head as the GPU
@@ -125,7 +121,7 @@
 };
 
 struct	intel_context;
-struct drm_i915_reg_descriptor;
+struct drm_i915_reg_table;
 
 /*
  * we use a single page to load ctx workarounds so all of these
@@ -148,17 +144,18 @@
 
 struct  intel_engine_cs {
 	const char	*name;
-	enum intel_ring_id {
+	enum intel_engine_id {
 		RCS = 0,
 		BCS,
 		VCS,
 		VCS2,	/* Keep instances of the same type engine together. */
 		VECS
 	} id;
-#define I915_NUM_RINGS 5
+#define I915_NUM_ENGINES 5
 #define _VCS(n) (VCS + (n))
 	unsigned int exec_id;
-	unsigned int guc_id;
+	unsigned int hw_id;
+	unsigned int guc_id; /* XXX same as hw_id? */
 	u32		mmio_base;
 	struct		drm_device *dev;
 	struct intel_ringbuffer *buffer;
@@ -196,8 +193,8 @@
 	 * seen value is good enough. Note that the seqno will always be
 	 * monotonic, even if not coherent.
 	 */
-	u32		(*get_seqno)(struct intel_engine_cs *ring,
-				     bool lazy_coherency);
+	void		(*irq_seqno_barrier)(struct intel_engine_cs *ring);
+	u32		(*get_seqno)(struct intel_engine_cs *ring);
 	void		(*set_seqno)(struct intel_engine_cs *ring,
 				     u32 seqno);
 	int		(*dispatch_execbuffer)(struct drm_i915_gem_request *req,
@@ -246,16 +243,16 @@
 	 *  ie. transpose of f(x, y)
 	 */
 	struct {
-		u32	sync_seqno[I915_NUM_RINGS-1];
+		u32	sync_seqno[I915_NUM_ENGINES-1];
 
 		union {
 			struct {
 				/* our mbox written by others */
-				u32		wait[I915_NUM_RINGS];
+				u32		wait[I915_NUM_ENGINES];
 				/* mboxes this ring signals to */
-				i915_reg_t	signal[I915_NUM_RINGS];
+				i915_reg_t	signal[I915_NUM_ENGINES];
 			} mbox;
-			u64		signal_ggtt[I915_NUM_RINGS];
+			u64		signal_ggtt[I915_NUM_ENGINES];
 		};
 
 		/* AKA wait() */
@@ -268,10 +265,13 @@
 	} semaphore;
 
 	/* Execlists */
-	spinlock_t execlist_lock;
+	struct tasklet_struct irq_tasklet;
+	spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
 	struct list_head execlist_queue;
 	struct list_head execlist_retired_req_list;
-	u8 next_context_status_buffer;
+	unsigned int fw_domains;
+	unsigned int next_context_status_buffer;
+	unsigned int idle_lite_restore_wa;
 	bool disable_lite_restore_wa;
 	u32 ctx_desc_template;
 	u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
@@ -306,6 +306,7 @@
 	 * inspecting request list.
 	 */
 	u32 last_submitted_seqno;
+	unsigned user_interrupts;
 
 	bool gpu_caches_dirty;
 
@@ -332,15 +333,8 @@
 	/*
 	 * Table of registers allowed in commands that read/write registers.
 	 */
-	const struct drm_i915_reg_descriptor *reg_table;
-	int reg_count;
-
-	/*
-	 * Table of registers allowed in commands that read/write registers, but
-	 * only from the DRM master.
-	 */
-	const struct drm_i915_reg_descriptor *master_reg_table;
-	int master_reg_count;
+	const struct drm_i915_reg_table *reg_tables;
+	int reg_table_count;
 
 	/*
 	 * Returns the bitmask for the length field of the specified command.
@@ -356,19 +350,19 @@
 };
 
 static inline bool
-intel_ring_initialized(struct intel_engine_cs *ring)
+intel_engine_initialized(struct intel_engine_cs *engine)
 {
-	return ring->dev != NULL;
+	return engine->dev != NULL;
 }
 
 static inline unsigned
-intel_ring_flag(struct intel_engine_cs *ring)
+intel_engine_flag(struct intel_engine_cs *engine)
 {
-	return 1 << ring->id;
+	return 1 << engine->id;
 }
 
 static inline u32
-intel_ring_sync_index(struct intel_engine_cs *ring,
+intel_ring_sync_index(struct intel_engine_cs *engine,
 		      struct intel_engine_cs *other)
 {
 	int idx;
@@ -381,34 +375,33 @@
 	 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
 	 */
 
-	idx = (other - ring) - 1;
+	idx = (other - engine) - 1;
 	if (idx < 0)
-		idx += I915_NUM_RINGS;
+		idx += I915_NUM_ENGINES;
 
 	return idx;
 }
 
 static inline void
-intel_flush_status_page(struct intel_engine_cs *ring, int reg)
+intel_flush_status_page(struct intel_engine_cs *engine, int reg)
 {
-	drm_clflush_virt_range(&ring->status_page.page_addr[reg],
-			       sizeof(uint32_t));
+	mb();
+	clflush(&engine->status_page.page_addr[reg]);
+	mb();
 }
 
 static inline u32
-intel_read_status_page(struct intel_engine_cs *ring,
-		       int reg)
+intel_read_status_page(struct intel_engine_cs *engine, int reg)
 {
 	/* Ensure that the compiler doesn't optimize away the load. */
-	barrier();
-	return ring->status_page.page_addr[reg];
+	return READ_ONCE(engine->status_page.page_addr[reg]);
 }
 
 static inline void
-intel_write_status_page(struct intel_engine_cs *ring,
+intel_write_status_page(struct intel_engine_cs *engine,
 			int reg, u32 value)
 {
-	ring->status_page.page_addr[reg] = value;
+	engine->status_page.page_addr[reg] = value;
 }
 
 /*
@@ -439,42 +432,41 @@
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
 void intel_ringbuffer_free(struct intel_ringbuffer *ring);
 
-void intel_stop_ring_buffer(struct intel_engine_cs *ring);
-void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
+void intel_stop_engine(struct intel_engine_cs *engine);
+void intel_cleanup_engine(struct intel_engine_cs *engine);
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
-static inline void intel_ring_emit(struct intel_engine_cs *ring,
+static inline void intel_ring_emit(struct intel_engine_cs *engine,
 				   u32 data)
 {
-	struct intel_ringbuffer *ringbuf = ring->buffer;
+	struct intel_ringbuffer *ringbuf = engine->buffer;
 	iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
 	ringbuf->tail += 4;
 }
-static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
+static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
 				       i915_reg_t reg)
 {
-	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
+	intel_ring_emit(engine, i915_mmio_reg_offset(reg));
 }
-static inline void intel_ring_advance(struct intel_engine_cs *ring)
+static inline void intel_ring_advance(struct intel_engine_cs *engine)
 {
-	struct intel_ringbuffer *ringbuf = ring->buffer;
+	struct intel_ringbuffer *ringbuf = engine->buffer;
 	ringbuf->tail &= ringbuf->size - 1;
 }
 int __intel_ring_space(int head, int tail, int size);
 void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
-int intel_ring_space(struct intel_ringbuffer *ringbuf);
-bool intel_ring_stopped(struct intel_engine_cs *ring);
+bool intel_engine_stopped(struct intel_engine_cs *engine);
 
-int __must_check intel_ring_idle(struct intel_engine_cs *ring);
-void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
+int __must_check intel_engine_idle(struct intel_engine_cs *engine);
+void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
 int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
 int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
 
-void intel_fini_pipe_control(struct intel_engine_cs *ring);
-int intel_init_pipe_control(struct intel_engine_cs *ring);
+void intel_fini_pipe_control(struct intel_engine_cs *engine);
+int intel_init_pipe_control(struct intel_engine_cs *engine);
 
 int intel_init_render_ring_buffer(struct drm_device *dev);
 int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -482,9 +474,9 @@
 int intel_init_blt_ring_buffer(struct drm_device *dev);
 int intel_init_vebox_ring_buffer(struct drm_device *dev);
 
-u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
+u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
 
-int init_workarounds_ring(struct intel_engine_cs *ring);
+int init_workarounds_ring(struct intel_engine_cs *engine);
 
 static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
 {
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 6e54d97..7fb1da4 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -89,6 +89,10 @@
 		return "TRANSCODER_C";
 	case POWER_DOMAIN_TRANSCODER_EDP:
 		return "TRANSCODER_EDP";
+	case POWER_DOMAIN_TRANSCODER_DSI_A:
+		return "TRANSCODER_DSI_A";
+	case POWER_DOMAIN_TRANSCODER_DSI_C:
+		return "TRANSCODER_DSI_C";
 	case POWER_DOMAIN_PORT_DDI_A_LANES:
 		return "PORT_DDI_A_LANES";
 	case POWER_DOMAIN_PORT_DDI_B_LANES:
@@ -393,11 +397,6 @@
 	BIT(POWER_DOMAIN_MODESET) |			\
 	BIT(POWER_DOMAIN_AUX_A) |			\
 	BIT(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
-	(POWER_DOMAIN_MASK & ~(				\
-	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
-	SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) |		\
-	BIT(POWER_DOMAIN_INIT))
 
 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
 	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
@@ -415,36 +414,21 @@
 	BIT(POWER_DOMAIN_VGA) |				\
 	BIT(POWER_DOMAIN_GMBUS) |			\
 	BIT(POWER_DOMAIN_INIT))
-#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
-	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
-	BIT(POWER_DOMAIN_PIPE_A) |			\
-	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
-	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
-	BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
-	BIT(POWER_DOMAIN_AUX_A) |			\
-	BIT(POWER_DOMAIN_PLLS) |			\
-	BIT(POWER_DOMAIN_INIT))
 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
 	BIT(POWER_DOMAIN_MODESET) |			\
 	BIT(POWER_DOMAIN_AUX_A) |			\
 	BIT(POWER_DOMAIN_INIT))
-#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
-	(POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
-	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) |	\
-	BIT(POWER_DOMAIN_INIT))
 
 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = dev_priv->dev;
-
-	WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
-	WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
-		"DC9 already programmed to be enabled.\n");
-	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
-		"DC5 still not disabled to enable DC9.\n");
-	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
-	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
+	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
+		  "DC9 already programmed to be enabled.\n");
+	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+		  "DC5 still not disabled to enable DC9.\n");
+	WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
+	WARN_ONCE(intel_irqs_enabled(dev_priv),
+		  "Interrupts not disabled yet.\n");
 
 	 /*
 	  * TODO: check for the following to verify the conditions to enter DC9
@@ -457,11 +441,10 @@
 
 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
 {
-	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
-	WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
-		"DC9 already programmed to be disabled.\n");
-	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
-		"DC5 still not disabled.\n");
+	WARN_ONCE(intel_irqs_enabled(dev_priv),
+		  "Interrupts not disabled yet.\n");
+	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+		  "DC5 still not disabled.\n");
 
 	 /*
 	  * TODO: check for the following to verify DC9 state was indeed
@@ -472,24 +455,6 @@
 	  */
 }
 
-static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
-{
-	uint32_t val, mask;
-
-	mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
-
-	if (IS_BROXTON(dev_priv))
-		mask |= DC_STATE_DEBUG_MASK_CORES;
-
-	/* The below bit doesn't need to be cleared ever afterwards */
-	val = I915_READ(DC_STATE_DEBUG);
-	if ((val & mask) != mask) {
-		val |= mask;
-		I915_WRITE(DC_STATE_DEBUG, val);
-		POSTING_READ(DC_STATE_DEBUG);
-	}
-}
-
 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
 				u32 state)
 {
@@ -527,10 +492,9 @@
 			      state, rewrites);
 }
 
-static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
+static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
 {
-	uint32_t val;
-	uint32_t mask;
+	u32 mask;
 
 	mask = DC_STATE_EN_UPTO_DC5;
 	if (IS_BROXTON(dev_priv))
@@ -538,14 +502,30 @@
 	else
 		mask |= DC_STATE_EN_UPTO_DC6;
 
-	WARN_ON_ONCE(state & ~mask);
+	return mask;
+}
 
-	if (i915.enable_dc == 0)
-		state = DC_STATE_DISABLE;
-	else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
-		state = DC_STATE_EN_UPTO_DC5;
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+{
+	u32 val;
+
+	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
+
+	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
+		      dev_priv->csr.dc_state, val);
+	dev_priv->csr.dc_state = val;
+}
+
+static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
+{
+	uint32_t val;
+	uint32_t mask;
+
+	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
+		state &= dev_priv->csr.allowed_dc_mask;
 
 	val = I915_READ(DC_STATE_EN);
+	mask = gen9_dc_mask(dev_priv);
 	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
 		      val & mask, state);
 
@@ -590,13 +570,9 @@
 
 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = dev_priv->dev;
 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
 					SKL_DISP_PW_2);
 
-	WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
-		  "Platform doesn't support DC5.\n");
-	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
 	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
 
 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
@@ -606,19 +582,7 @@
 	assert_csr_loaded(dev_priv);
 }
 
-static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
-{
-	/*
-	 * During initialization, the firmware may not be loaded yet.
-	 * We still want to make sure that the DC enabling flag is cleared.
-	 */
-	if (dev_priv->power_domains.initializing)
-		return;
-
-	assert_rpm_wakelock_held(dev_priv);
-}
-
-static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+void gen9_enable_dc5(struct drm_i915_private *dev_priv)
 {
 	assert_can_enable_dc5(dev_priv);
 
@@ -629,11 +593,6 @@
 
 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = dev_priv->dev;
-
-	WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
-		  "Platform doesn't support DC6.\n");
-	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
 	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
 		  "Backlight is not disabled.\n");
 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
@@ -642,30 +601,6 @@
 	assert_csr_loaded(dev_priv);
 }
 
-static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
-{
-	/*
-	 * During initialization, the firmware may not be loaded yet.
-	 * We still want to make sure that the DC enabling flag is cleared.
-	 */
-	if (dev_priv->power_domains.initializing)
-		return;
-
-	WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
-		  "DC6 already programmed to be disabled.\n");
-}
-
-static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
-{
-	assert_can_disable_dc5(dev_priv);
-
-	if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
-	    i915.enable_dc != 0 && i915.enable_dc != 1)
-		assert_can_disable_dc6(dev_priv);
-
-	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-}
-
 void skl_enable_dc6(struct drm_i915_private *dev_priv)
 {
 	assert_can_enable_dc6(dev_priv);
@@ -678,13 +613,50 @@
 
 void skl_disable_dc6(struct drm_i915_private *dev_priv)
 {
-	assert_can_disable_dc6(dev_priv);
-
 	DRM_DEBUG_KMS("Disabling DC6\n");
 
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 }
 
+static void
+gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
+				  struct i915_power_well *power_well)
+{
+	enum skl_disp_power_wells power_well_id = power_well->data;
+	u32 val;
+	u32 mask;
+
+	mask = SKL_POWER_WELL_REQ(power_well_id);
+
+	val = I915_READ(HSW_PWR_WELL_KVMR);
+	if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
+		      power_well->name))
+		I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
+
+	val = I915_READ(HSW_PWR_WELL_BIOS);
+	val |= I915_READ(HSW_PWR_WELL_DEBUG);
+
+	if (!(val & mask))
+		return;
+
+	/*
+	 * DMC is known to force on the request bits for power well 1 on SKL
+	 * and BXT and the misc IO power well on SKL but we don't expect any
+	 * other request bits to be set, so WARN for those.
+	 */
+	if (power_well_id == SKL_DISP_PW_1 ||
+	    ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+	     power_well_id == SKL_DISP_PW_MISC_IO))
+		DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
+				 "by DMC\n", power_well->name);
+	else
+		WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
+			  power_well->name);
+
+	I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
+	I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
+}
+
 static void skl_set_power_well(struct drm_i915_private *dev_priv,
 			struct i915_power_well *power_well, bool enable)
 {
@@ -739,10 +711,6 @@
 
 		if (!is_enabled) {
 			DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
-			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
-				state_mask), 1))
-				DRM_ERROR("%s enable timeout\n",
-					power_well->name);
 			check_fuse_status = true;
 		}
 	} else {
@@ -751,8 +719,16 @@
 			POSTING_READ(HSW_PWR_WELL_DRIVER);
 			DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
 		}
+
+		if (IS_GEN9(dev_priv))
+			gen9_sanitize_power_well_requests(dev_priv, power_well);
 	}
 
+	if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
+		     1))
+		DRM_ERROR("%s %s timeout\n",
+			  power_well->name, enable ? "enable" : "disable");
+
 	if (check_fuse_status) {
 		if (power_well->data == SKL_DISP_PW_1) {
 			if (wait_for((I915_READ(SKL_FUSE_STATUS) &
@@ -833,32 +809,33 @@
 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
 					  struct i915_power_well *power_well)
 {
-	gen9_disable_dc5_dc6(dev_priv);
+	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+	if (IS_BROXTON(dev_priv)) {
+		broxton_cdclk_verify_state(dev_priv);
+		broxton_ddi_phy_verify_state(dev_priv);
+	}
 }
 
 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
 					   struct i915_power_well *power_well)
 {
-	if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
-	    i915.enable_dc != 0 && i915.enable_dc != 1)
+	if (!dev_priv->csr.dmc_payload)
+		return;
+
+	if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
 		skl_enable_dc6(dev_priv);
-	else
+	else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
 		gen9_enable_dc5(dev_priv);
 }
 
 static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
 					   struct i915_power_well *power_well)
 {
-	if (power_well->count > 0) {
-		gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-	} else {
-		if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
-		    i915.enable_dc != 0 &&
-		    i915.enable_dc != 1)
-			gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
-		else
-			gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
-	}
+	if (power_well->count > 0)
+		gen9_dc_off_power_well_enable(dev_priv, power_well);
+	else
+		gen9_dc_off_power_well_disable(dev_priv, power_well);
 }
 
 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
@@ -962,6 +939,17 @@
 	return enabled;
 }
 
+static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+
+	/*
+	 * Disable trickle feed and enable pnd deadline calculation
+	 */
+	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+	I915_WRITE(CBR1_VLV, 0);
+}
+
 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
 {
 	enum pipe pipe;
@@ -984,6 +972,8 @@
 		I915_WRITE(DPLL(pipe), val);
 	}
 
+	vlv_init_display_clock_gating(dev_priv);
+
 	spin_lock_irq(&dev_priv->irq_lock);
 	valleyview_enable_display_irqs(dev_priv);
 	spin_unlock_irq(&dev_priv->irq_lock);
@@ -1622,34 +1612,56 @@
 	intel_runtime_pm_put(dev_priv);
 }
 
-#define HSW_ALWAYS_ON_POWER_DOMAINS (			\
-	BIT(POWER_DOMAIN_PIPE_A) |			\
-	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
-	BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
+#define HSW_DISPLAY_POWER_DOMAINS (			\
+	BIT(POWER_DOMAIN_PIPE_B) |			\
+	BIT(POWER_DOMAIN_PIPE_C) |			\
+	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
+	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
+	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
+	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
+	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
+	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
 	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
 	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
 	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
-	BIT(POWER_DOMAIN_PORT_CRT) |			\
-	BIT(POWER_DOMAIN_PLLS) |			\
-	BIT(POWER_DOMAIN_AUX_A) |			\
-	BIT(POWER_DOMAIN_AUX_B) |			\
-	BIT(POWER_DOMAIN_AUX_C) |			\
-	BIT(POWER_DOMAIN_AUX_D) |			\
-	BIT(POWER_DOMAIN_GMBUS) |			\
-	BIT(POWER_DOMAIN_INIT))
-#define HSW_DISPLAY_POWER_DOMAINS (				\
-	(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |	\
+	BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
+	BIT(POWER_DOMAIN_VGA) |				\
+	BIT(POWER_DOMAIN_AUDIO) |			\
 	BIT(POWER_DOMAIN_INIT))
 
-#define BDW_ALWAYS_ON_POWER_DOMAINS (			\
-	HSW_ALWAYS_ON_POWER_DOMAINS |			\
-	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
-#define BDW_DISPLAY_POWER_DOMAINS (				\
-	(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |	\
+#define BDW_DISPLAY_POWER_DOMAINS (			\
+	BIT(POWER_DOMAIN_PIPE_B) |			\
+	BIT(POWER_DOMAIN_PIPE_C) |			\
+	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
+	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
+	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
+	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
+	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
+	BIT(POWER_DOMAIN_VGA) |				\
+	BIT(POWER_DOMAIN_AUDIO) |			\
 	BIT(POWER_DOMAIN_INIT))
 
-#define VLV_ALWAYS_ON_POWER_DOMAINS	BIT(POWER_DOMAIN_INIT)
-#define VLV_DISPLAY_POWER_DOMAINS	POWER_DOMAIN_MASK
+#define VLV_DISPLAY_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_PIPE_A) |		\
+	BIT(POWER_DOMAIN_PIPE_B) |		\
+	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
+	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
+	BIT(POWER_DOMAIN_TRANSCODER_A) |	\
+	BIT(POWER_DOMAIN_TRANSCODER_B) |	\
+	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
+	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
+	BIT(POWER_DOMAIN_PORT_DSI) |		\
+	BIT(POWER_DOMAIN_PORT_CRT) |		\
+	BIT(POWER_DOMAIN_VGA) |			\
+	BIT(POWER_DOMAIN_AUDIO) |		\
+	BIT(POWER_DOMAIN_AUX_B) |		\
+	BIT(POWER_DOMAIN_AUX_C) |		\
+	BIT(POWER_DOMAIN_GMBUS) |		\
+	BIT(POWER_DOMAIN_INIT))
 
 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
 	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
@@ -1679,6 +1691,28 @@
 	BIT(POWER_DOMAIN_AUX_C) |		\
 	BIT(POWER_DOMAIN_INIT))
 
+#define CHV_DISPLAY_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_PIPE_A) |		\
+	BIT(POWER_DOMAIN_PIPE_B) |		\
+	BIT(POWER_DOMAIN_PIPE_C) |		\
+	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
+	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
+	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
+	BIT(POWER_DOMAIN_TRANSCODER_A) |	\
+	BIT(POWER_DOMAIN_TRANSCODER_B) |	\
+	BIT(POWER_DOMAIN_TRANSCODER_C) |	\
+	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
+	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
+	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
+	BIT(POWER_DOMAIN_PORT_DSI) |		\
+	BIT(POWER_DOMAIN_VGA) |			\
+	BIT(POWER_DOMAIN_AUDIO) |		\
+	BIT(POWER_DOMAIN_AUX_B) |		\
+	BIT(POWER_DOMAIN_AUX_C) |		\
+	BIT(POWER_DOMAIN_AUX_D) |		\
+	BIT(POWER_DOMAIN_GMBUS) |		\
+	BIT(POWER_DOMAIN_INIT))
+
 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
 	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
 	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
@@ -1746,7 +1780,7 @@
 	{
 		.name = "always-on",
 		.always_on = 1,
-		.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 	},
 	{
@@ -1760,7 +1794,7 @@
 	{
 		.name = "always-on",
 		.always_on = 1,
-		.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 	},
 	{
@@ -1795,7 +1829,7 @@
 	{
 		.name = "always-on",
 		.always_on = 1,
-		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 		.data = PUNIT_POWER_WELL_ALWAYS_ON,
 	},
@@ -1853,7 +1887,7 @@
 	{
 		.name = "always-on",
 		.always_on = 1,
-		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 	},
 	{
@@ -1863,7 +1897,7 @@
 		 * power wells don't actually exist. Pipe A power well is
 		 * required for any pipe to work.
 		 */
-		.domains = VLV_DISPLAY_POWER_DOMAINS,
+		.domains = CHV_DISPLAY_POWER_DOMAINS,
 		.data = PIPE_A,
 		.ops = &chv_pipe_power_well_ops,
 	},
@@ -1897,7 +1931,7 @@
 	{
 		.name = "always-on",
 		.always_on = 1,
-		.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
+		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 		.data = SKL_DISP_PW_ALWAYS_ON,
 	},
@@ -1953,44 +1987,16 @@
 	},
 };
 
-void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
-{
-	struct i915_power_well *well;
-
-	if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
-		return;
-
-	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-	intel_power_well_enable(dev_priv, well);
-
-	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
-	intel_power_well_enable(dev_priv, well);
-}
-
-void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
-{
-	struct i915_power_well *well;
-
-	if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
-		return;
-
-	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-	intel_power_well_disable(dev_priv, well);
-
-	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
-	intel_power_well_disable(dev_priv, well);
-}
-
 static struct i915_power_well bxt_power_wells[] = {
 	{
 		.name = "always-on",
 		.always_on = 1,
-		.domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
+		.domains = POWER_DOMAIN_MASK,
 		.ops = &i9xx_always_on_power_well_ops,
 	},
 	{
 		.name = "power well 1",
-		.domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
+		.domains = 0,
 		.ops = &skl_power_well_ops,
 		.data = SKL_DISP_PW_1,
 	},
@@ -2015,12 +2021,56 @@
 	if (disable_power_well >= 0)
 		return !!disable_power_well;
 
-	if (IS_BROXTON(dev_priv)) {
-		DRM_DEBUG_KMS("Disabling display power well support\n");
-		return 0;
+	return 1;
+}
+
+static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
+				    int enable_dc)
+{
+	uint32_t mask;
+	int requested_dc;
+	int max_dc;
+
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+		max_dc = 2;
+		mask = 0;
+	} else if (IS_BROXTON(dev_priv)) {
+		max_dc = 1;
+		/*
+		 * DC9 has a separate HW flow from the rest of the DC states,
+		 * not depending on the DMC firmware. It's needed by system
+		 * suspend/resume, so allow it unconditionally.
+		 */
+		mask = DC_STATE_EN_DC9;
+	} else {
+		max_dc = 0;
+		mask = 0;
 	}
 
-	return 1;
+	if (!i915.disable_power_well)
+		max_dc = 0;
+
+	if (enable_dc >= 0 && enable_dc <= max_dc) {
+		requested_dc = enable_dc;
+	} else if (enable_dc == -1) {
+		requested_dc = max_dc;
+	} else if (enable_dc > max_dc && enable_dc <= 2) {
+		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
+			      enable_dc, max_dc);
+		requested_dc = max_dc;
+	} else {
+		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
+		requested_dc = max_dc;
+	}
+
+	if (requested_dc > 1)
+		mask |= DC_STATE_EN_UPTO_DC6;
+	if (requested_dc > 0)
+		mask |= DC_STATE_EN_UPTO_DC5;
+
+	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
+
+	return mask;
 }
 
 #define set_power_wells(power_domains, __power_wells) ({		\
@@ -2041,6 +2091,8 @@
 
 	i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
 						     i915.disable_power_well);
+	dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
+							    i915.enable_dc);
 
 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
 
@@ -2050,17 +2102,17 @@
 	 * The enabling order will be from lower to higher indexed wells,
 	 * the disabling order is reversed.
 	 */
-	if (IS_HASWELL(dev_priv->dev)) {
+	if (IS_HASWELL(dev_priv)) {
 		set_power_wells(power_domains, hsw_power_wells);
-	} else if (IS_BROADWELL(dev_priv->dev)) {
+	} else if (IS_BROADWELL(dev_priv)) {
 		set_power_wells(power_domains, bdw_power_wells);
-	} else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) {
+	} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 		set_power_wells(power_domains, skl_power_wells);
-	} else if (IS_BROXTON(dev_priv->dev)) {
+	} else if (IS_BROXTON(dev_priv)) {
 		set_power_wells(power_domains, bxt_power_wells);
-	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
+	} else if (IS_CHERRYVIEW(dev_priv)) {
 		set_power_wells(power_domains, chv_power_wells);
-	} else if (IS_VALLEYVIEW(dev_priv->dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv)) {
 		set_power_wells(power_domains, vlv_power_wells);
 	} else {
 		set_power_wells(power_domains, i9xx_always_on_power_well);
@@ -2120,9 +2172,10 @@
 }
 
 static void skl_display_core_init(struct drm_i915_private *dev_priv,
-				  bool resume)
+				   bool resume)
 {
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_well *well;
 	uint32_t val;
 
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -2133,7 +2186,13 @@
 
 	/* enable PG1 and Misc I/O */
 	mutex_lock(&power_domains->lock);
-	skl_pw1_misc_io_init(dev_priv);
+
+	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+	intel_power_well_enable(dev_priv, well);
+
+	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
+	intel_power_well_enable(dev_priv, well);
+
 	mutex_unlock(&power_domains->lock);
 
 	if (!resume)
@@ -2141,13 +2200,14 @@
 
 	skl_init_cdclk(dev_priv);
 
-	if (dev_priv->csr.dmc_payload && intel_csr_load_program(dev_priv))
-		gen9_set_dc_state_debugmask(dev_priv);
+	if (dev_priv->csr.dmc_payload)
+		intel_csr_load_program(dev_priv);
 }
 
 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
 {
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_well *well;
 
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
@@ -2155,8 +2215,73 @@
 
 	/* The spec doesn't call for removing the reset handshake flag */
 	/* disable PG1 and Misc I/O */
+
 	mutex_lock(&power_domains->lock);
-	skl_pw1_misc_io_fini(dev_priv);
+
+	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
+	intel_power_well_disable(dev_priv, well);
+
+	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+	intel_power_well_disable(dev_priv, well);
+
+	mutex_unlock(&power_domains->lock);
+}
+
+void bxt_display_core_init(struct drm_i915_private *dev_priv,
+			   bool resume)
+{
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_well *well;
+	uint32_t val;
+
+	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+	/*
+	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
+	 * or else the reset will hang because there is no PCH to respond.
+	 * Move the handshake programming to initialization sequence.
+	 * Previously was left up to BIOS.
+	 */
+	val = I915_READ(HSW_NDE_RSTWRN_OPT);
+	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
+	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+
+	/* Enable PG1 */
+	mutex_lock(&power_domains->lock);
+
+	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+	intel_power_well_enable(dev_priv, well);
+
+	mutex_unlock(&power_domains->lock);
+
+	broxton_init_cdclk(dev_priv);
+	broxton_ddi_phy_init(dev_priv);
+
+	broxton_cdclk_verify_state(dev_priv);
+	broxton_ddi_phy_verify_state(dev_priv);
+
+	if (resume && dev_priv->csr.dmc_payload)
+		intel_csr_load_program(dev_priv);
+}
+
+void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_well *well;
+
+	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+	broxton_ddi_phy_uninit(dev_priv);
+	broxton_uninit_cdclk(dev_priv);
+
+	/* The spec doesn't call for removing the reset handshake flag */
+
+	/* Disable PG1 */
+	mutex_lock(&power_domains->lock);
+
+	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+	intel_power_well_disable(dev_priv, well);
+
 	mutex_unlock(&power_domains->lock);
 }
 
@@ -2291,6 +2416,8 @@
 
 	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
 		skl_display_core_init(dev_priv, resume);
+	} else if (IS_BROXTON(dev)) {
+		bxt_display_core_init(dev_priv, resume);
 	} else if (IS_CHERRYVIEW(dev)) {
 		mutex_lock(&power_domains->lock);
 		chv_phy_control_init(dev_priv);
@@ -2328,6 +2455,8 @@
 
 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
 		skl_display_core_uninit(dev_priv);
+	else if (IS_BROXTON(dev_priv))
+		bxt_display_core_uninit(dev_priv);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 4ecc076..2128fae 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1398,12 +1398,10 @@
 	}
 
 	dotclock = pipe_config->port_clock;
+
 	if (pipe_config->pixel_multiplier)
 		dotclock /= pipe_config->pixel_multiplier;
 
-	if (HAS_PCH_SPLIT(dev))
-		ironlake_check_encoder_dotclock(pipe_config, dotclock);
-
 	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
 
 	/* Cross check the port pixel multiplier with the sdvo encoder state. */
@@ -2262,9 +2260,9 @@
 	struct sdvo_device_mapping *mapping;
 
 	if (sdvo->port == PORT_B)
-		mapping = &(dev_priv->sdvo_mappings[0]);
+		mapping = &dev_priv->vbt.sdvo_mappings[0];
 	else
-		mapping = &(dev_priv->sdvo_mappings[1]);
+		mapping = &dev_priv->vbt.sdvo_mappings[1];
 
 	if (mapping->initialized)
 		sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
@@ -2280,9 +2278,9 @@
 	u8 pin;
 
 	if (sdvo->port == PORT_B)
-		mapping = &dev_priv->sdvo_mappings[0];
+		mapping = &dev_priv->vbt.sdvo_mappings[0];
 	else
-		mapping = &dev_priv->sdvo_mappings[1];
+		mapping = &dev_priv->vbt.sdvo_mappings[1];
 
 	if (mapping->initialized &&
 	    intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin))
@@ -2318,11 +2316,11 @@
 	struct sdvo_device_mapping *my_mapping, *other_mapping;
 
 	if (sdvo->port == PORT_B) {
-		my_mapping = &dev_priv->sdvo_mappings[0];
-		other_mapping = &dev_priv->sdvo_mappings[1];
+		my_mapping = &dev_priv->vbt.sdvo_mappings[0];
+		other_mapping = &dev_priv->vbt.sdvo_mappings[1];
 	} else {
-		my_mapping = &dev_priv->sdvo_mappings[1];
-		other_mapping = &dev_priv->sdvo_mappings[0];
+		my_mapping = &dev_priv->vbt.sdvo_mappings[1];
+		other_mapping = &dev_priv->vbt.sdvo_mappings[0];
 	}
 
 	/* If the BIOS described our SDVO device, take advantage of it. */
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index a2582c4..0f3e230 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -193,7 +193,7 @@
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
 	u32 surf_addr;
 	u32 tile_height, plane_offset, plane_size;
-	unsigned int rotation;
+	unsigned int rotation = plane_state->base.rotation;
 	int x_offset, y_offset;
 	int crtc_x = plane_state->dst.x1;
 	int crtc_y = plane_state->dst.y1;
@@ -213,7 +213,6 @@
 	plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
 	plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
 
-	rotation = plane_state->base.rotation;
 	plane_ctl |= skl_plane_ctl_rotation(rotation);
 
 	stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
@@ -351,6 +350,7 @@
 	int plane = intel_plane->plane;
 	u32 sprctl;
 	u32 sprsurf_offset, linear_offset;
+	unsigned int rotation = dplane->state->rotation;
 	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
 	int crtc_x = plane_state->dst.x1;
@@ -423,12 +423,11 @@
 	crtc_h--;
 
 	linear_offset = y * fb->pitches[0] + x * cpp;
-	sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
-						   fb->modifier[0], cpp,
-						   fb->pitches[0]);
+	sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
+						   fb->pitches[0], rotation);
 	linear_offset -= sprsurf_offset;
 
-	if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+	if (rotation == BIT(DRM_ROTATE_180)) {
 		sprctl |= SP_ROTATE_180;
 
 		x += src_w;
@@ -493,6 +492,7 @@
 	enum pipe pipe = intel_plane->pipe;
 	u32 sprctl, sprscale = 0;
 	u32 sprsurf_offset, linear_offset;
+	unsigned int rotation = plane_state->base.rotation;
 	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
 	int crtc_x = plane_state->dst.x1;
@@ -556,12 +556,11 @@
 		sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
 
 	linear_offset = y * fb->pitches[0] + x * cpp;
-	sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
-						   fb->modifier[0], cpp,
-						   fb->pitches[0]);
+	sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
+						   fb->pitches[0], rotation);
 	linear_offset -= sprsurf_offset;
 
-	if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+	if (rotation == BIT(DRM_ROTATE_180)) {
 		sprctl |= SPRITE_ROTATE_180;
 
 		/* HSW and BDW does this automagically in hardware */
@@ -634,6 +633,7 @@
 	int pipe = intel_plane->pipe;
 	u32 dvscntr, dvsscale;
 	u32 dvssurf_offset, linear_offset;
+	unsigned int rotation = plane_state->base.rotation;
 	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
 	int crtc_x = plane_state->dst.x1;
@@ -693,12 +693,11 @@
 		dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
 
 	linear_offset = y * fb->pitches[0] + x * cpp;
-	dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
-						   fb->modifier[0], cpp,
-						   fb->pitches[0]);
+	dvssurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
+						   fb->pitches[0], rotation);
 	linear_offset -= dvssurf_offset;
 
-	if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+	if (rotation == BIT(DRM_ROTATE_180)) {
 		dvscntr |= DVS_ROTATE_180;
 
 		x += src_w;
@@ -1026,8 +1025,8 @@
 int
 intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
 {
-	struct intel_plane *intel_plane;
-	struct intel_plane_state *state;
+	struct intel_plane *intel_plane = NULL;
+	struct intel_plane_state *state = NULL;
 	unsigned long possible_crtcs;
 	const uint32_t *plane_formats;
 	int num_plane_formats;
@@ -1037,13 +1036,15 @@
 		return -ENODEV;
 
 	intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
-	if (!intel_plane)
-		return -ENOMEM;
+	if (!intel_plane) {
+		ret = -ENOMEM;
+		goto fail;
+	}
 
 	state = intel_create_plane_state(&intel_plane->base);
 	if (!state) {
-		kfree(intel_plane);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto fail;
 	}
 	intel_plane->base.state = &state->base;
 
@@ -1098,28 +1099,34 @@
 		num_plane_formats = ARRAY_SIZE(skl_plane_formats);
 		break;
 	default:
-		kfree(intel_plane);
-		return -ENODEV;
+		MISSING_CASE(INTEL_INFO(dev)->gen);
+		ret = -ENODEV;
+		goto fail;
 	}
 
 	intel_plane->pipe = pipe;
 	intel_plane->plane = plane;
 	intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
 	intel_plane->check_plane = intel_check_sprite_plane;
+
 	possible_crtcs = (1 << pipe);
+
 	ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
 				       &intel_plane_funcs,
 				       plane_formats, num_plane_formats,
 				       DRM_PLANE_TYPE_OVERLAY, NULL);
-	if (ret) {
-		kfree(intel_plane);
-		goto out;
-	}
+	if (ret)
+		goto fail;
 
 	intel_create_rotation_property(dev, intel_plane);
 
 	drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
 
-out:
+	return 0;
+
+fail:
+	kfree(state);
+	kfree(intel_plane);
+
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 6745bad..223129d 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -326,24 +326,12 @@
 	.rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
 };
 
-static const struct color_conversion sdtv_csc_rgb = {
-	.ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
-	.ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
-	.rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
-};
-
 static const struct color_conversion hdtv_csc_yprpb = {
 	.ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
 	.ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
 	.rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
 };
 
-static const struct color_conversion hdtv_csc_rgb = {
-	.ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
-	.ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
-	.rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
-};
-
 static const struct video_levels component_levels = {
 	.blank = 279, .black = 279, .burst = 0,
 };
@@ -1531,47 +1519,6 @@
 	.destroy = intel_encoder_destroy,
 };
 
-/*
- * Enumerate the child dev array parsed from VBT to check whether
- * the integrated TV is present.
- * If it is present, return 1.
- * If it is not present, return false.
- * If no child dev is parsed from VBT, it assumes that the TV is present.
- */
-static int tv_is_present_in_vbt(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	union child_device_config *p_child;
-	int i, ret;
-
-	if (!dev_priv->vbt.child_dev_num)
-		return 1;
-
-	ret = 0;
-	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		p_child = dev_priv->vbt.child_dev + i;
-		/*
-		 * If the device type is not TV, continue.
-		 */
-		switch (p_child->old.device_type) {
-		case DEVICE_TYPE_INT_TV:
-		case DEVICE_TYPE_TV:
-		case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
-			break;
-		default:
-			continue;
-		}
-		/* Only when the addin_offset is non-zero, it is regarded
-		 * as present.
-		 */
-		if (p_child->old.addin_offset) {
-			ret = 1;
-			break;
-		}
-	}
-	return ret;
-}
-
 void
 intel_tv_init(struct drm_device *dev)
 {
@@ -1587,13 +1534,10 @@
 	if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
 		return;
 
-	if (!tv_is_present_in_vbt(dev)) {
+	if (!intel_bios_is_tv_present(dev_priv)) {
 		DRM_DEBUG_KMS("Integrated TV is not present.\n");
 		return;
 	}
-	/* Even if we have an encoder we may not have a connector */
-	if (!dev_priv->vbt.int_tv_support)
-		return;
 
 	/*
 	 * Sanity check the TV output by checking to see if the
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 68b6f69..4f1dfe6 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -60,7 +60,11 @@
 static inline void
 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
 {
-	mod_timer_pinned(&d->timer, jiffies + 1);
+	d->wake_count++;
+	hrtimer_start_range_ns(&d->timer,
+			       ktime_set(0, NSEC_PER_MSEC),
+			       NSEC_PER_MSEC,
+			       HRTIMER_MODE_REL);
 }
 
 static inline void
@@ -107,22 +111,22 @@
 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
 {
 	struct intel_uncore_forcewake_domain *d;
-	enum forcewake_domain_id id;
 
-	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+	for_each_fw_domain_masked(d, fw_domains, dev_priv) {
 		fw_domain_wait_ack_clear(d);
 		fw_domain_get(d);
-		fw_domain_wait_ack(d);
 	}
+
+	for_each_fw_domain_masked(d, fw_domains, dev_priv)
+		fw_domain_wait_ack(d);
 }
 
 static void
 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
 {
 	struct intel_uncore_forcewake_domain *d;
-	enum forcewake_domain_id id;
 
-	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+	for_each_fw_domain_masked(d, fw_domains, dev_priv) {
 		fw_domain_put(d);
 		fw_domain_posting_read(d);
 	}
@@ -132,10 +136,9 @@
 fw_domains_posting_read(struct drm_i915_private *dev_priv)
 {
 	struct intel_uncore_forcewake_domain *d;
-	enum forcewake_domain_id id;
 
 	/* No need to do for all, just do for first found */
-	for_each_fw_domain(d, dev_priv, id) {
+	for_each_fw_domain(d, dev_priv) {
 		fw_domain_posting_read(d);
 		break;
 	}
@@ -145,12 +148,11 @@
 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
 {
 	struct intel_uncore_forcewake_domain *d;
-	enum forcewake_domain_id id;
 
 	if (dev_priv->uncore.fw_domains == 0)
 		return;
 
-	for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
+	for_each_fw_domain_masked(d, fw_domains, dev_priv)
 		fw_domain_reset(d);
 
 	fw_domains_posting_read(dev_priv);
@@ -204,7 +206,7 @@
 
 	/* On VLV, FIFO will be shared by both SW and HW.
 	 * So, we need to read the FREE_ENTRIES everytime */
-	if (IS_VALLEYVIEW(dev_priv->dev))
+	if (IS_VALLEYVIEW(dev_priv))
 		dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
 
 	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
@@ -224,9 +226,11 @@
 	return ret;
 }
 
-static void intel_uncore_fw_release_timer(unsigned long arg)
+static enum hrtimer_restart
+intel_uncore_fw_release_timer(struct hrtimer *timer)
 {
-	struct intel_uncore_forcewake_domain *domain = (void *)arg;
+	struct intel_uncore_forcewake_domain *domain =
+	       container_of(timer, struct intel_uncore_forcewake_domain, timer);
 	unsigned long irqflags;
 
 	assert_rpm_device_not_suspended(domain->i915);
@@ -240,6 +244,8 @@
 							  1 << domain->id);
 
 	spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
+
+	return HRTIMER_NORESTART;
 }
 
 void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
@@ -248,7 +254,6 @@
 	unsigned long irqflags;
 	struct intel_uncore_forcewake_domain *domain;
 	int retry_count = 100;
-	enum forcewake_domain_id id;
 	enum forcewake_domains fw = 0, active_domains;
 
 	/* Hold uncore.lock across reset to prevent any register access
@@ -258,18 +263,18 @@
 	while (1) {
 		active_domains = 0;
 
-		for_each_fw_domain(domain, dev_priv, id) {
-			if (del_timer_sync(&domain->timer) == 0)
+		for_each_fw_domain(domain, dev_priv) {
+			if (hrtimer_cancel(&domain->timer) == 0)
 				continue;
 
-			intel_uncore_fw_release_timer((unsigned long)domain);
+			intel_uncore_fw_release_timer(&domain->timer);
 		}
 
 		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-		for_each_fw_domain(domain, dev_priv, id) {
-			if (timer_pending(&domain->timer))
-				active_domains |= (1 << id);
+		for_each_fw_domain(domain, dev_priv) {
+			if (hrtimer_active(&domain->timer))
+				active_domains |= domain->mask;
 		}
 
 		if (active_domains == 0)
@@ -286,9 +291,9 @@
 
 	WARN_ON(active_domains);
 
-	for_each_fw_domain(domain, dev_priv, id)
+	for_each_fw_domain(domain, dev_priv)
 		if (domain->wake_count)
-			fw |= 1 << id;
+			fw |= domain->mask;
 
 	if (fw)
 		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
@@ -310,21 +315,49 @@
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
-static void intel_uncore_ellc_detect(struct drm_device *dev)
+static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+	const unsigned int sets[4] = { 1, 1, 2, 2 };
+	const u32 cap = dev_priv->edram_cap;
 
-	if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
-	     INTEL_INFO(dev)->gen >= 9) &&
-	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
-		/* The docs do not explain exactly how the calculation can be
-		 * made. It is somewhat guessable, but for now, it's always
-		 * 128MB.
-		 * NB: We can't write IDICR yet because we do not have gt funcs
+	return EDRAM_NUM_BANKS(cap) *
+		ways[EDRAM_WAYS_IDX(cap)] *
+		sets[EDRAM_SETS_IDX(cap)] *
+		1024 * 1024;
+}
+
+u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
+{
+	if (!HAS_EDRAM(dev_priv))
+		return 0;
+
+	/* The needed capability bits for size calculation
+	 * are not there with pre gen9 so return 128MB always.
+	 */
+	if (INTEL_GEN(dev_priv) < 9)
+		return 128 * 1024 * 1024;
+
+	return gen9_edram_size(dev_priv);
+}
+
+static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
+{
+	if (IS_HASWELL(dev_priv) ||
+	    IS_BROADWELL(dev_priv) ||
+	    INTEL_GEN(dev_priv) >= 9) {
+		dev_priv->edram_cap = __raw_i915_read32(dev_priv,
+							HSW_EDRAM_CAP);
+
+		/* NB: We can't write IDICR yet because we do not have gt funcs
 		 * set up */
-		dev_priv->ellc_size = 128;
-		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
+	} else {
+		dev_priv->edram_cap = 0;
 	}
+
+	if (HAS_EDRAM(dev_priv))
+		DRM_INFO("Found %lluMB of eDRAM\n",
+			 intel_uncore_edram_size(dev_priv) / (1024 * 1024));
 }
 
 static bool
@@ -410,16 +443,15 @@
 					 enum forcewake_domains fw_domains)
 {
 	struct intel_uncore_forcewake_domain *domain;
-	enum forcewake_domain_id id;
 
 	if (!dev_priv->uncore.funcs.force_wake_get)
 		return;
 
 	fw_domains &= dev_priv->uncore.fw_domains;
 
-	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+	for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
 		if (domain->wake_count++)
-			fw_domains &= ~(1 << id);
+			fw_domains &= ~domain->mask;
 	}
 
 	if (fw_domains)
@@ -477,21 +509,19 @@
 					 enum forcewake_domains fw_domains)
 {
 	struct intel_uncore_forcewake_domain *domain;
-	enum forcewake_domain_id id;
 
 	if (!dev_priv->uncore.funcs.force_wake_put)
 		return;
 
 	fw_domains &= dev_priv->uncore.fw_domains;
 
-	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+	for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
 		if (WARN_ON(domain->wake_count == 0))
 			continue;
 
 		if (--domain->wake_count)
 			continue;
 
-		domain->wake_count++;
 		fw_domain_arm_timer(domain);
 	}
 }
@@ -539,18 +569,27 @@
 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
 {
 	struct intel_uncore_forcewake_domain *domain;
-	enum forcewake_domain_id id;
 
 	if (!dev_priv->uncore.funcs.force_wake_get)
 		return;
 
-	for_each_fw_domain(domain, dev_priv, id)
+	for_each_fw_domain(domain, dev_priv)
 		WARN_ON(domain->wake_count);
 }
 
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
 
+#define __gen6_reg_read_fw_domains(offset) \
+({ \
+	enum forcewake_domains __fwd; \
+	if (NEEDS_FORCE_WAKE(offset)) \
+		__fwd = FORCEWAKE_RENDER; \
+	else \
+		__fwd = 0; \
+	__fwd; \
+})
+
 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
 
 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
@@ -564,6 +603,48 @@
 	 REG_RANGE((reg), 0x22000, 0x24000) || \
 	 REG_RANGE((reg), 0x30000, 0x40000))
 
+#define __vlv_reg_read_fw_domains(offset) \
+({ \
+	enum forcewake_domains __fwd = 0; \
+	if (!NEEDS_FORCE_WAKE(offset)) \
+		__fwd = 0; \
+	else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
+		__fwd = FORCEWAKE_RENDER; \
+	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
+		__fwd = FORCEWAKE_MEDIA; \
+	__fwd; \
+})
+
+static const i915_reg_t gen8_shadowed_regs[] = {
+	GEN6_RPNSWREQ,
+	GEN6_RC_VIDEO_FREQ,
+	RING_TAIL(RENDER_RING_BASE),
+	RING_TAIL(GEN6_BSD_RING_BASE),
+	RING_TAIL(VEBOX_RING_BASE),
+	RING_TAIL(BLT_RING_BASE),
+	/* TODO: Other registers are not yet used */
+};
+
+static bool is_gen8_shadowed(u32 offset)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
+		if (offset == gen8_shadowed_regs[i].reg)
+			return true;
+
+	return false;
+}
+
+#define __gen8_reg_write_fw_domains(offset) \
+({ \
+	enum forcewake_domains __fwd; \
+	if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
+		__fwd = FORCEWAKE_RENDER; \
+	else \
+		__fwd = 0; \
+	__fwd; \
+})
+
 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
 	(REG_RANGE((reg), 0x2000, 0x4000) || \
 	 REG_RANGE((reg), 0x5200, 0x8000) || \
@@ -586,6 +667,34 @@
 	 REG_RANGE((reg), 0x9000, 0xB000) || \
 	 REG_RANGE((reg), 0xF000, 0x10000))
 
+#define __chv_reg_read_fw_domains(offset) \
+({ \
+	enum forcewake_domains __fwd = 0; \
+	if (!NEEDS_FORCE_WAKE(offset)) \
+		__fwd = 0; \
+	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
+		__fwd = FORCEWAKE_RENDER; \
+	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
+		__fwd = FORCEWAKE_MEDIA; \
+	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
+		__fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+	__fwd; \
+})
+
+#define __chv_reg_write_fw_domains(offset) \
+({ \
+	enum forcewake_domains __fwd = 0; \
+	if (!NEEDS_FORCE_WAKE(offset) || is_gen8_shadowed(offset)) \
+		__fwd = 0; \
+	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
+		__fwd = FORCEWAKE_RENDER; \
+	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
+		__fwd = FORCEWAKE_MEDIA; \
+	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
+		__fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+	__fwd; \
+})
+
 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
 	REG_RANGE((reg), 0xB00,  0x2000)
 
@@ -618,6 +727,61 @@
 	 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
 	 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
 
+#define SKL_NEEDS_FORCE_WAKE(reg) \
+	((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
+
+#define __gen9_reg_read_fw_domains(offset) \
+({ \
+	enum forcewake_domains __fwd; \
+	if (!SKL_NEEDS_FORCE_WAKE(offset)) \
+		__fwd = 0; \
+	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
+		__fwd = FORCEWAKE_RENDER; \
+	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
+		__fwd = FORCEWAKE_MEDIA; \
+	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
+		__fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+	else \
+		__fwd = FORCEWAKE_BLITTER; \
+	__fwd; \
+})
+
+static const i915_reg_t gen9_shadowed_regs[] = {
+	RING_TAIL(RENDER_RING_BASE),
+	RING_TAIL(GEN6_BSD_RING_BASE),
+	RING_TAIL(VEBOX_RING_BASE),
+	RING_TAIL(BLT_RING_BASE),
+	GEN6_RPNSWREQ,
+	GEN6_RC_VIDEO_FREQ,
+	/* TODO: Other registers are not yet used */
+};
+
+static bool is_gen9_shadowed(u32 offset)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
+		if (offset == gen9_shadowed_regs[i].reg)
+			return true;
+
+	return false;
+}
+
+#define __gen9_reg_write_fw_domains(offset) \
+({ \
+	enum forcewake_domains __fwd; \
+	if (!SKL_NEEDS_FORCE_WAKE(offset) || is_gen9_shadowed(offset)) \
+		__fwd = 0; \
+	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
+		__fwd = FORCEWAKE_RENDER; \
+	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
+		__fwd = FORCEWAKE_MEDIA; \
+	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
+		__fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+	else \
+		__fwd = FORCEWAKE_BLITTER; \
+	__fwd; \
+})
+
 static void
 ilk_dummy_write(struct drm_i915_private *dev_priv)
 {
@@ -633,15 +797,6 @@
 		      const bool read,
 		      const bool before)
 {
-	/* XXX. We limit the auto arming traces for mmio
-	 * debugs on these platforms. There are just too many
-	 * revealed by these and CI/Bat suffers from the noise.
-	 * Please fix and then re-enable the automatic traces.
-	 */
-	if (i915.mmio_debug < 2 &&
-	    (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
-		return;
-
 	if (WARN(check_for_unclaimed_mmio(dev_priv),
 		 "Unclaimed register detected %s %s register 0x%x\n",
 		 before ? "before" : "after",
@@ -716,23 +871,21 @@
 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
 	return val
 
-static inline void __force_wake_get(struct drm_i915_private *dev_priv,
-				    enum forcewake_domains fw_domains)
+static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
+				     enum forcewake_domains fw_domains)
 {
 	struct intel_uncore_forcewake_domain *domain;
-	enum forcewake_domain_id id;
 
 	if (WARN_ON(!fw_domains))
 		return;
 
 	/* Ideally GCC would be constant-fold and eliminate this loop */
-	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+	for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
 		if (domain->wake_count) {
-			fw_domains &= ~(1 << id);
+			fw_domains &= ~domain->mask;
 			continue;
 		}
 
-		domain->wake_count++;
 		fw_domain_arm_timer(domain);
 	}
 
@@ -743,9 +896,11 @@
 #define __gen6_read(x) \
 static u##x \
 gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+	enum forcewake_domains fw_engine; \
 	GEN6_READ_HEADER(x); \
-	if (NEEDS_FORCE_WAKE(offset)) \
-		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+	fw_engine = __gen6_reg_read_fw_domains(offset); \
+	if (fw_engine) \
+		__force_wake_auto(dev_priv, fw_engine); \
 	val = __raw_i915_read##x(dev_priv, reg); \
 	GEN6_READ_FOOTER; \
 }
@@ -753,16 +908,11 @@
 #define __vlv_read(x) \
 static u##x \
 vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
-	enum forcewake_domains fw_engine = 0; \
+	enum forcewake_domains fw_engine; \
 	GEN6_READ_HEADER(x); \
-	if (!NEEDS_FORCE_WAKE(offset)) \
-		fw_engine = 0; \
-	else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
-		fw_engine = FORCEWAKE_RENDER; \
-	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
-		fw_engine = FORCEWAKE_MEDIA; \
+	fw_engine = __vlv_reg_read_fw_domains(offset); \
 	if (fw_engine) \
-		__force_wake_get(dev_priv, fw_engine); \
+		__force_wake_auto(dev_priv, fw_engine); \
 	val = __raw_i915_read##x(dev_priv, reg); \
 	GEN6_READ_FOOTER; \
 }
@@ -770,42 +920,23 @@
 #define __chv_read(x) \
 static u##x \
 chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
-	enum forcewake_domains fw_engine = 0; \
+	enum forcewake_domains fw_engine; \
 	GEN6_READ_HEADER(x); \
-	if (!NEEDS_FORCE_WAKE(offset)) \
-		fw_engine = 0; \
-	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
-		fw_engine = FORCEWAKE_RENDER; \
-	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
-		fw_engine = FORCEWAKE_MEDIA; \
-	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
-		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+	fw_engine = __chv_reg_read_fw_domains(offset); \
 	if (fw_engine) \
-		__force_wake_get(dev_priv, fw_engine); \
+		__force_wake_auto(dev_priv, fw_engine); \
 	val = __raw_i915_read##x(dev_priv, reg); \
 	GEN6_READ_FOOTER; \
 }
 
-#define SKL_NEEDS_FORCE_WAKE(reg) \
-	((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
-
 #define __gen9_read(x) \
 static u##x \
 gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 	enum forcewake_domains fw_engine; \
 	GEN6_READ_HEADER(x); \
-	if (!SKL_NEEDS_FORCE_WAKE(offset)) \
-		fw_engine = 0; \
-	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
-		fw_engine = FORCEWAKE_RENDER; \
-	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
-		fw_engine = FORCEWAKE_MEDIA; \
-	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
-		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
-	else \
-		fw_engine = FORCEWAKE_BLITTER; \
+	fw_engine = __gen9_reg_read_fw_domains(offset); \
 	if (fw_engine) \
-		__force_wake_get(dev_priv, fw_engine); \
+		__force_wake_auto(dev_priv, fw_engine); \
 	val = __raw_i915_read##x(dev_priv, reg); \
 	GEN6_READ_FOOTER; \
 }
@@ -942,34 +1073,14 @@
 	GEN6_WRITE_FOOTER; \
 }
 
-static const i915_reg_t gen8_shadowed_regs[] = {
-	FORCEWAKE_MT,
-	GEN6_RPNSWREQ,
-	GEN6_RC_VIDEO_FREQ,
-	RING_TAIL(RENDER_RING_BASE),
-	RING_TAIL(GEN6_BSD_RING_BASE),
-	RING_TAIL(VEBOX_RING_BASE),
-	RING_TAIL(BLT_RING_BASE),
-	/* TODO: Other registers are not yet used */
-};
-
-static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
-			     i915_reg_t reg)
-{
-	int i;
-	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
-		if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
-			return true;
-
-	return false;
-}
-
 #define __gen8_write(x) \
 static void \
 gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+	enum forcewake_domains fw_engine; \
 	GEN6_WRITE_HEADER; \
-	if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
-		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+	fw_engine = __gen8_reg_write_fw_domains(offset); \
+	if (fw_engine) \
+		__force_wake_auto(dev_priv, fw_engine); \
 	__raw_i915_write##x(dev_priv, reg, val); \
 	GEN6_WRITE_FOOTER; \
 }
@@ -977,66 +1088,24 @@
 #define __chv_write(x) \
 static void \
 chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
-	enum forcewake_domains fw_engine = 0; \
+	enum forcewake_domains fw_engine; \
 	GEN6_WRITE_HEADER; \
-	if (!NEEDS_FORCE_WAKE(offset) || \
-	    is_gen8_shadowed(dev_priv, reg)) \
-		fw_engine = 0; \
-	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
-		fw_engine = FORCEWAKE_RENDER; \
-	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
-		fw_engine = FORCEWAKE_MEDIA; \
-	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
-		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+	fw_engine = __chv_reg_write_fw_domains(offset); \
 	if (fw_engine) \
-		__force_wake_get(dev_priv, fw_engine); \
+		__force_wake_auto(dev_priv, fw_engine); \
 	__raw_i915_write##x(dev_priv, reg, val); \
 	GEN6_WRITE_FOOTER; \
 }
 
-static const i915_reg_t gen9_shadowed_regs[] = {
-	RING_TAIL(RENDER_RING_BASE),
-	RING_TAIL(GEN6_BSD_RING_BASE),
-	RING_TAIL(VEBOX_RING_BASE),
-	RING_TAIL(BLT_RING_BASE),
-	FORCEWAKE_BLITTER_GEN9,
-	FORCEWAKE_RENDER_GEN9,
-	FORCEWAKE_MEDIA_GEN9,
-	GEN6_RPNSWREQ,
-	GEN6_RC_VIDEO_FREQ,
-	/* TODO: Other registers are not yet used */
-};
-
-static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
-			     i915_reg_t reg)
-{
-	int i;
-	for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
-		if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
-			return true;
-
-	return false;
-}
-
 #define __gen9_write(x) \
 static void \
 gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
 		bool trace) { \
 	enum forcewake_domains fw_engine; \
 	GEN6_WRITE_HEADER; \
-	if (!SKL_NEEDS_FORCE_WAKE(offset) || \
-	    is_gen9_shadowed(dev_priv, reg)) \
-		fw_engine = 0; \
-	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
-		fw_engine = FORCEWAKE_RENDER; \
-	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
-		fw_engine = FORCEWAKE_MEDIA; \
-	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
-		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
-	else \
-		fw_engine = FORCEWAKE_BLITTER; \
+	fw_engine = __gen9_reg_write_fw_domains(offset); \
 	if (fw_engine) \
-		__force_wake_get(dev_priv, fw_engine); \
+		__force_wake_auto(dev_priv, fw_engine); \
 	__raw_i915_write##x(dev_priv, reg, val); \
 	GEN6_WRITE_FOOTER; \
 }
@@ -1150,7 +1219,14 @@
 	d->i915 = dev_priv;
 	d->id = domain_id;
 
-	setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
+	BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
+	BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
+	BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
+
+	d->mask = 1 << domain_id;
+
+	hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	d->timer.function = intel_uncore_fw_release_timer;
 
 	dev_priv->uncore.fw_domains |= (1 << domain_id);
 
@@ -1161,7 +1237,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (INTEL_INFO(dev_priv->dev)->gen <= 5)
+	if (INTEL_INFO(dev_priv)->gen <= 5)
 		return;
 
 	if (IS_GEN9(dev)) {
@@ -1257,7 +1333,7 @@
 
 	i915_check_vgpu(dev);
 
-	intel_uncore_ellc_detect(dev);
+	intel_uncore_edram_detect(dev_priv);
 	intel_uncore_fw_domains_init(dev);
 	__intel_uncore_early_sanitize(dev, false);
 
@@ -1437,7 +1513,7 @@
 	return (gdrst & GRDOM_RESET_STATUS) == 0;
 }
 
-static int i915_do_reset(struct drm_device *dev)
+static int i915_do_reset(struct drm_device *dev, unsigned engine_mask)
 {
 	/* assert reset for at least 20 usec */
 	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
@@ -1454,13 +1530,13 @@
 	return (gdrst & GRDOM_RESET_ENABLE) == 0;
 }
 
-static int g33_do_reset(struct drm_device *dev)
+static int g33_do_reset(struct drm_device *dev, unsigned engine_mask)
 {
 	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
 	return wait_for(g4x_reset_complete(dev), 500);
 }
 
-static int g4x_do_reset(struct drm_device *dev)
+static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
@@ -1490,7 +1566,7 @@
 	return 0;
 }
 
-static int ironlake_do_reset(struct drm_device *dev)
+static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
@@ -1514,75 +1590,132 @@
 	return 0;
 }
 
-static int gen6_do_reset(struct drm_device *dev)
+/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
+static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
+				u32 hw_domain_mask)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int	ret;
-
-	/* Reset the chip */
+	int ret;
 
 	/* GEN6_GDRST is not in the gt power well, no need to check
 	 * for fifo space for the write or forcewake the chip for
 	 * the read
 	 */
-	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
+	__raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
 
-	/* Spin waiting for the device to ack the reset request */
-	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+#define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0)
+	/* Spin waiting for the device to ack the reset requests */
+	ret = wait_for(ACKED, 500);
+#undef ACKED
+
+	return ret;
+}
+
+/**
+ * gen6_reset_engines - reset individual engines
+ * @dev: DRM device
+ * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
+ *
+ * This function will reset the individual engines that are set in engine_mask.
+ * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
+ *
+ * Note: It is responsibility of the caller to handle the difference between
+ * asking full domain reset versus reset for all available individual engines.
+ *
+ * Returns 0 on success, nonzero on error.
+ */
+static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *engine;
+	const u32 hw_engine_mask[I915_NUM_ENGINES] = {
+		[RCS] = GEN6_GRDOM_RENDER,
+		[BCS] = GEN6_GRDOM_BLT,
+		[VCS] = GEN6_GRDOM_MEDIA,
+		[VCS2] = GEN8_GRDOM_MEDIA2,
+		[VECS] = GEN6_GRDOM_VECS,
+	};
+	u32 hw_mask;
+	int ret;
+
+	if (engine_mask == ALL_ENGINES) {
+		hw_mask = GEN6_GRDOM_FULL;
+	} else {
+		hw_mask = 0;
+		for_each_engine_masked(engine, dev_priv, engine_mask)
+			hw_mask |= hw_engine_mask[engine->id];
+	}
+
+	ret = gen6_hw_domain_reset(dev_priv, hw_mask);
 
 	intel_uncore_forcewake_reset(dev, true);
 
 	return ret;
 }
 
-static int wait_for_register(struct drm_i915_private *dev_priv,
-			     i915_reg_t reg,
-			     const u32 mask,
-			     const u32 value,
-			     const unsigned long timeout_ms)
+static int wait_for_register_fw(struct drm_i915_private *dev_priv,
+				i915_reg_t reg,
+				const u32 mask,
+				const u32 value,
+				const unsigned long timeout_ms)
 {
-	return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
+	return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms);
 }
 
-static int gen8_do_reset(struct drm_device *dev)
+static int gen8_request_engine_reset(struct intel_engine_cs *engine)
+{
+	int ret;
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
+	I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
+		      _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+
+	ret = wait_for_register_fw(dev_priv,
+				   RING_RESET_CTL(engine->mmio_base),
+				   RESET_CTL_READY_TO_RESET,
+				   RESET_CTL_READY_TO_RESET,
+				   700);
+	if (ret)
+		DRM_ERROR("%s: reset request timeout\n", engine->name);
+
+	return ret;
+}
+
+static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
+	I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
+		      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+}
+
+static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *engine;
-	int i;
 
-	for_each_ring(engine, dev_priv, i) {
-		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
-			   _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
-
-		if (wait_for_register(dev_priv,
-				      RING_RESET_CTL(engine->mmio_base),
-				      RESET_CTL_READY_TO_RESET,
-				      RESET_CTL_READY_TO_RESET,
-				      700)) {
-			DRM_ERROR("%s: reset request timeout\n", engine->name);
+	for_each_engine_masked(engine, dev_priv, engine_mask)
+		if (gen8_request_engine_reset(engine))
 			goto not_ready;
-		}
-	}
 
-	return gen6_do_reset(dev);
+	return gen6_reset_engines(dev, engine_mask);
 
 not_ready:
-	for_each_ring(engine, dev_priv, i)
-		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
-			   _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+	for_each_engine_masked(engine, dev_priv, engine_mask)
+		gen8_unrequest_engine_reset(engine);
 
 	return -EIO;
 }
 
-static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
+static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *,
+							  unsigned engine_mask)
 {
 	if (!i915.reset)
 		return NULL;
 
 	if (INTEL_INFO(dev)->gen >= 8)
-		return gen8_do_reset;
+		return gen8_reset_engines;
 	else if (INTEL_INFO(dev)->gen >= 6)
-		return gen6_do_reset;
+		return gen6_reset_engines;
 	else if (IS_GEN5(dev))
 		return ironlake_do_reset;
 	else if (IS_G4X(dev))
@@ -1595,10 +1728,10 @@
 		return NULL;
 }
 
-int intel_gpu_reset(struct drm_device *dev)
+int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	int (*reset)(struct drm_device *);
+	int (*reset)(struct drm_device *, unsigned);
 	int ret;
 
 	reset = intel_get_gpu_reset(dev);
@@ -1609,7 +1742,7 @@
 	 * request may be dropped and never completes (causing -EIO).
 	 */
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-	ret = reset(dev);
+	ret = reset(dev, engine_mask);
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
 	return ret;
@@ -1620,6 +1753,25 @@
 	return intel_get_gpu_reset(dev) != NULL;
 }
 
+int intel_guc_reset(struct drm_i915_private *dev_priv)
+{
+	int ret;
+	unsigned long irqflags;
+
+	if (!i915.enable_guc_submission)
+		return -EINVAL;
+
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+	return ret;
+}
+
 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
 {
 	return check_for_unclaimed_mmio(dev_priv);
@@ -1643,3 +1795,111 @@
 
 	return false;
 }
+
+static enum forcewake_domains
+intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
+				i915_reg_t reg)
+{
+	enum forcewake_domains fw_domains;
+
+	if (intel_vgpu_active(dev_priv->dev))
+		return 0;
+
+	switch (INTEL_INFO(dev_priv)->gen) {
+	case 9:
+		fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+		break;
+	case 8:
+		if (IS_CHERRYVIEW(dev_priv))
+			fw_domains = __chv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+		else
+			fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+		break;
+	case 7:
+	case 6:
+		if (IS_VALLEYVIEW(dev_priv))
+			fw_domains = __vlv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+		else
+			fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+		break;
+	default:
+		MISSING_CASE(INTEL_INFO(dev_priv)->gen);
+	case 5: /* forcewake was introduced with gen6 */
+	case 4:
+	case 3:
+	case 2:
+		return 0;
+	}
+
+	WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+
+	return fw_domains;
+}
+
+static enum forcewake_domains
+intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
+				 i915_reg_t reg)
+{
+	enum forcewake_domains fw_domains;
+
+	if (intel_vgpu_active(dev_priv->dev))
+		return 0;
+
+	switch (INTEL_INFO(dev_priv)->gen) {
+	case 9:
+		fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
+		break;
+	case 8:
+		if (IS_CHERRYVIEW(dev_priv))
+			fw_domains = __chv_reg_write_fw_domains(i915_mmio_reg_offset(reg));
+		else
+			fw_domains = __gen8_reg_write_fw_domains(i915_mmio_reg_offset(reg));
+		break;
+	case 7:
+	case 6:
+		fw_domains = FORCEWAKE_RENDER;
+		break;
+	default:
+		MISSING_CASE(INTEL_INFO(dev_priv)->gen);
+	case 5:
+	case 4:
+	case 3:
+	case 2:
+		return 0;
+	}
+
+	WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+
+	return fw_domains;
+}
+
+/**
+ * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
+ * 				    a register
+ * @dev_priv: pointer to struct drm_i915_private
+ * @reg: register in question
+ * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
+ *
+ * Returns a set of forcewake domains required to be taken with for example
+ * intel_uncore_forcewake_get for the specified register to be accessible in the
+ * specified mode (read, write or read/write) with raw mmio accessors.
+ *
+ * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
+ * callers to do FIFO management on their own or risk losing writes.
+ */
+enum forcewake_domains
+intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+			       i915_reg_t reg, unsigned int op)
+{
+	enum forcewake_domains fw_domains = 0;
+
+	WARN_ON(!op);
+
+	if (op & FW_REG_READ)
+		fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
+
+	if (op & FW_REG_WRITE)
+		fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
+
+	return fw_domains;
+}
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
new file mode 100644
index 0000000..c15051d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -0,0 +1,844 @@
+/*
+ * Copyright © 2006-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/*
+ * This information is private to VBT parsing in intel_bios.c.
+ *
+ * Please do NOT include anywhere else.
+ */
+#ifndef _INTEL_BIOS_PRIVATE
+#error "intel_vbt_defs.h is private to intel_bios.c"
+#endif
+
+#ifndef _INTEL_VBT_DEFS_H_
+#define _INTEL_VBT_DEFS_H_
+
+#include "intel_bios.h"
+
+/**
+ * struct vbt_header - VBT Header structure
+ * @signature:		VBT signature, always starts with "$VBT"
+ * @version:		Version of this structure
+ * @header_size:	Size of this structure
+ * @vbt_size:		Size of VBT (VBT Header, BDB Header and data blocks)
+ * @vbt_checksum:	Checksum
+ * @reserved0:		Reserved
+ * @bdb_offset:		Offset of &struct bdb_header from beginning of VBT
+ * @aim_offset:		Offsets of add-in data blocks from beginning of VBT
+ */
+struct vbt_header {
+	u8 signature[20];
+	u16 version;
+	u16 header_size;
+	u16 vbt_size;
+	u8 vbt_checksum;
+	u8 reserved0;
+	u32 bdb_offset;
+	u32 aim_offset[4];
+} __packed;
+
+/**
+ * struct bdb_header - BDB Header structure
+ * @signature:		BDB signature "BIOS_DATA_BLOCK"
+ * @version:		Version of the data block definitions
+ * @header_size:	Size of this structure
+ * @bdb_size:		Size of BDB (BDB Header and data blocks)
+ */
+struct bdb_header {
+	u8 signature[16];
+	u16 version;
+	u16 header_size;
+	u16 bdb_size;
+} __packed;
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+struct vbios_data {
+	u8 type; /* 0 == desktop, 1 == mobile */
+	u8 relstage;
+	u8 chipset;
+	u8 lvds_present:1;
+	u8 tv_present:1;
+	u8 rsvd2:6; /* finish byte */
+	u8 rsvd3[4];
+	u8 signon[155];
+	u8 copyright[61];
+	u16 code_segment;
+	u8 dos_boot_mode;
+	u8 bandwidth_percent;
+	u8 rsvd4; /* popup memory size */
+	u8 resize_pci_bios;
+	u8 rsvd5; /* is crt already on ddc2 */
+} __packed;
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES	  1
+#define BDB_GENERAL_DEFINITIONS	  2
+#define BDB_OLD_TOGGLE_LIST	  3
+#define BDB_MODE_SUPPORT_LIST	  4
+#define BDB_GENERIC_MODE_TABLE	  5
+#define BDB_EXT_MMIO_REGS	  6
+#define BDB_SWF_IO		  7
+#define BDB_SWF_MMIO		  8
+#define BDB_PSR			  9
+#define BDB_MODE_REMOVAL_TABLE	 10
+#define BDB_CHILD_DEVICE_TABLE	 11
+#define BDB_DRIVER_FEATURES	 12
+#define BDB_DRIVER_PERSISTENCE	 13
+#define BDB_EXT_TABLE_PTRS	 14
+#define BDB_DOT_CLOCK_OVERRIDE	 15
+#define BDB_DISPLAY_SELECT	 16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION	 18
+#define BDB_DISPLAY_REMOVE	 19
+#define BDB_OEM_CUSTOM		 20
+#define BDB_EFP_LIST		 21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS	 22
+#define BDB_SDVO_PANEL_DTDS	 23
+#define BDB_SDVO_LVDS_PNP_IDS	 24
+#define BDB_SDVO_LVDS_POWER_SEQ	 25
+#define BDB_TV_OPTIONS		 26
+#define BDB_EDP			 27
+#define BDB_LVDS_OPTIONS	 40
+#define BDB_LVDS_LFP_DATA_PTRS	 41
+#define BDB_LVDS_LFP_DATA	 42
+#define BDB_LVDS_BACKLIGHT	 43
+#define BDB_LVDS_POWER		 44
+#define BDB_MIPI_CONFIG		 52
+#define BDB_MIPI_SEQUENCE	 53
+#define BDB_SKIP		254 /* VBIOS private block, ignore */
+
+struct bdb_general_features {
+        /* bits 1 */
+	u8 panel_fitting:2;
+	u8 flexaim:1;
+	u8 msg_enable:1;
+	u8 clear_screen:3;
+	u8 color_flip:1;
+
+        /* bits 2 */
+	u8 download_ext_vbt:1;
+	u8 enable_ssc:1;
+	u8 ssc_freq:1;
+	u8 enable_lfp_on_override:1;
+	u8 disable_ssc_ddt:1;
+	u8 rsvd7:1;
+	u8 display_clock_mode:1;
+	u8 rsvd8:1; /* finish byte */
+
+        /* bits 3 */
+	u8 disable_smooth_vision:1;
+	u8 single_dvi:1;
+	u8 rsvd9:1;
+	u8 fdi_rx_polarity_inverted:1;
+	u8 rsvd10:4; /* finish byte */
+
+        /* bits 4 */
+	u8 legacy_monitor_detect;
+
+        /* bits 5 */
+	u8 int_crt_support:1;
+	u8 int_tv_support:1;
+	u8 int_efp_support:1;
+	u8 dp_ssc_enb:1;	/* PCH attached eDP supports SSC */
+	u8 dp_ssc_freq:1;	/* SSC freq for PCH attached eDP */
+	u8 rsvd11:3; /* finish byte */
+} __packed;
+
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS	0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C	0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC	0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C	0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE	0x00
+#define DEVICE_TYPE_CRT		0x01
+#define DEVICE_TYPE_TV		0x09
+#define DEVICE_TYPE_EFP		0x12
+#define DEVICE_TYPE_LFP		0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS		0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG	0x4001
+#define DEVICE_TYPE_TV_COMPOSITE	0x0209
+#define DEVICE_TYPE_TV_MACROVISION	0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE	0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE	0x0609
+#define DEVICE_TYPE_TV_SCART		0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR	0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR	0x6052
+#define DEVICE_TYPE_EFP_DVI_I		0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL	0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP	0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR	0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX	0x6162
+#define DEVICE_TYPE_LFP_PANELLINK	0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR	0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR	0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL	0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP	0x51e2
+
+#define DEVICE_CFG_NONE		0x00
+#define DEVICE_CFG_12BIT_DVOB	0x01
+#define DEVICE_CFG_12BIT_DVOC	0x02
+#define DEVICE_CFG_24BIT_DVOBC	0x09
+#define DEVICE_CFG_24BIT_DVOCB	0x0a
+#define DEVICE_CFG_DUAL_DVOB	0x11
+#define DEVICE_CFG_DUAL_DVOC	0x12
+#define DEVICE_CFG_DUAL_DVOBC	0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC	0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB	0x1a
+
+#define DEVICE_WIRE_NONE	0x00
+#define DEVICE_WIRE_DVOB	0x01
+#define DEVICE_WIRE_DVOC	0x02
+#define DEVICE_WIRE_DVOBC	0x03
+#define DEVICE_WIRE_DVOBB	0x05
+#define DEVICE_WIRE_DVOCC	0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+#define DEVICE_PORT_DVOA	0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB	0x01
+#define DEVICE_PORT_DVOC	0x02
+
+/*
+ * We used to keep this struct but without any version control. We should avoid
+ * using it in the future, but it should be safe to keep using it in the old
+ * code. Do not change; we rely on its size.
+ */
+struct old_child_dev_config {
+	u16 handle;
+	u16 device_type;
+	u8  device_id[10]; /* ascii string */
+	u16 addin_offset;
+	u8  dvo_port; /* See Device_PORT_* above */
+	u8  i2c_pin;
+	u8  slave_addr;
+	u8  ddc_pin;
+	u16 edid_ptr;
+	u8  dvo_cfg; /* See DEVICE_CFG_* above */
+	u8  dvo2_port;
+	u8  i2c2_pin;
+	u8  slave2_addr;
+	u8  ddc2_pin;
+	u8  capabilities;
+	u8  dvo_wiring;/* See DEVICE_WIRE_* above */
+	u8  dvo2_wiring;
+	u16 extended_type;
+	u8  dvo_function;
+} __packed;
+
+/* This one contains field offsets that are known to be common for all BDB
+ * versions. Notice that the meaning of the contents contents may still change,
+ * but at least the offsets are consistent. */
+
+struct common_child_dev_config {
+	u16 handle;
+	u16 device_type;
+	u8 not_common1[12];
+	u8 dvo_port;
+	u8 not_common2[2];
+	u8 ddc_pin;
+	u16 edid_ptr;
+	u8 dvo_cfg; /* See DEVICE_CFG_* above */
+	u8 efp_routed:1;
+	u8 lane_reversal:1;
+	u8 lspcon:1;
+	u8 iboost:1;
+	u8 hpd_invert:1;
+	u8 flag_reserved:3;
+	u8 hdmi_support:1;
+	u8 dp_support:1;
+	u8 tmds_support:1;
+	u8 support_reserved:5;
+	u8 not_common3[12];
+	u8 iboost_level;
+} __packed;
+
+
+/* This field changes depending on the BDB version, so the most reliable way to
+ * read it is by checking the BDB version and reading the raw pointer. */
+union child_device_config {
+	/* This one is safe to be used anywhere, but the code should still check
+	 * the BDB version. */
+	u8 raw[33];
+	/* This one should only be kept for legacy code. */
+	struct old_child_dev_config old;
+	/* This one should also be safe to use anywhere, even without version
+	 * checks. */
+	struct common_child_dev_config common;
+} __packed;
+
+struct bdb_general_definitions {
+	/* DDC GPIO */
+	u8 crt_ddc_gmbus_pin;
+
+	/* DPMS bits */
+	u8 dpms_acpi:1;
+	u8 skip_boot_crt_detect:1;
+	u8 dpms_aim:1;
+	u8 rsvd1:5; /* finish byte */
+
+	/* boot device bits */
+	u8 boot_display[2];
+	u8 child_dev_size;
+
+	/*
+	 * Device info:
+	 * If TV is present, it'll be at devices[0].
+	 * LVDS will be next, either devices[0] or [1], if present.
+	 * On some platforms the number of device is 6. But could be as few as
+	 * 4 if both TV and LVDS are missing.
+	 * And the device num is related with the size of general definition
+	 * block. It is obtained by using the following formula:
+	 * number = (block_size - sizeof(bdb_general_definitions))/
+	 *	     defs->child_dev_size;
+	 */
+	uint8_t devices[0];
+} __packed;
+
+/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
+#define MODE_MASK		0x3
+
+struct bdb_lvds_options {
+	u8 panel_type;
+	u8 rsvd1;
+	/* LVDS capabilities, stored in a dword */
+	u8 pfit_mode:2;
+	u8 pfit_text_mode_enhanced:1;
+	u8 pfit_gfx_mode_enhanced:1;
+	u8 pfit_ratio_auto:1;
+	u8 pixel_dither:1;
+	u8 lvds_edid:1;
+	u8 rsvd2:1;
+	u8 rsvd4;
+	/* LVDS Panel channel bits stored here */
+	u32 lvds_panel_channel_bits;
+	/* LVDS SSC (Spread Spectrum Clock) bits stored here. */
+	u16 ssc_bits;
+	u16 ssc_freq;
+	u16 ssc_ddt;
+	/* Panel color depth defined here */
+	u16 panel_color_depth;
+	/* LVDS panel type bits stored here */
+	u32 dps_panel_type_bits;
+	/* LVDS backlight control type bits stored here */
+	u32 blt_control_type_bits;
+} __packed;
+
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+	u16 fp_timing_offset; /* offsets are from start of bdb */
+	u8 fp_table_size;
+	u16 dvo_timing_offset;
+	u8 dvo_table_size;
+	u16 panel_pnp_id_offset;
+	u8 pnp_table_size;
+} __packed;
+
+struct bdb_lvds_lfp_data_ptrs {
+	u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+	struct bdb_lvds_lfp_data_ptr ptr[16];
+} __packed;
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+	u16 x_res;
+	u16 y_res;
+	u32 lvds_reg;
+	u32 lvds_reg_val;
+	u32 pp_on_reg;
+	u32 pp_on_reg_val;
+	u32 pp_off_reg;
+	u32 pp_off_reg_val;
+	u32 pp_cycle_reg;
+	u32 pp_cycle_reg_val;
+	u32 pfit_reg;
+	u32 pfit_reg_val;
+	u16 terminator;
+} __packed;
+
+struct lvds_dvo_timing {
+	u16 clock;		/**< In 10khz */
+	u8 hactive_lo;
+	u8 hblank_lo;
+	u8 hblank_hi:4;
+	u8 hactive_hi:4;
+	u8 vactive_lo;
+	u8 vblank_lo;
+	u8 vblank_hi:4;
+	u8 vactive_hi:4;
+	u8 hsync_off_lo;
+	u8 hsync_pulse_width;
+	u8 vsync_pulse_width:4;
+	u8 vsync_off:4;
+	u8 rsvd0:6;
+	u8 hsync_off_hi:2;
+	u8 h_image;
+	u8 v_image;
+	u8 max_hv;
+	u8 h_border;
+	u8 v_border;
+	u8 rsvd1:3;
+	u8 digital:2;
+	u8 vsync_positive:1;
+	u8 hsync_positive:1;
+	u8 rsvd2:1;
+} __packed;
+
+struct lvds_pnp_id {
+	u16 mfg_name;
+	u16 product_code;
+	u32 serial;
+	u8 mfg_week;
+	u8 mfg_year;
+} __packed;
+
+struct bdb_lvds_lfp_data_entry {
+	struct lvds_fp_timing fp_timing;
+	struct lvds_dvo_timing dvo_timing;
+	struct lvds_pnp_id pnp_id;
+} __packed;
+
+struct bdb_lvds_lfp_data {
+	struct bdb_lvds_lfp_data_entry data[16];
+} __packed;
+
+#define BDB_BACKLIGHT_TYPE_NONE	0
+#define BDB_BACKLIGHT_TYPE_PWM	2
+
+struct bdb_lfp_backlight_data_entry {
+	u8 type:2;
+	u8 active_low_pwm:1;
+	u8 obsolete1:5;
+	u16 pwm_freq_hz;
+	u8 min_brightness;
+	u8 obsolete2;
+	u8 obsolete3;
+} __packed;
+
+struct bdb_lfp_backlight_data {
+	u8 entry_size;
+	struct bdb_lfp_backlight_data_entry data[16];
+	u8 level[16];
+} __packed;
+
+struct aimdb_header {
+	char signature[16];
+	char oem_device[20];
+	u16 aimdb_version;
+	u16 aimdb_header_size;
+	u16 aimdb_size;
+} __packed;
+
+struct aimdb_block {
+	u8 aimdb_id;
+	u16 aimdb_size;
+} __packed;
+
+struct vch_panel_data {
+	u16 fp_timing_offset;
+	u8 fp_timing_size;
+	u16 dvo_timing_offset;
+	u8 dvo_timing_size;
+	u16 text_fitting_offset;
+	u8 text_fitting_size;
+	u16 graphics_fitting_offset;
+	u8 graphics_fitting_size;
+} __packed;
+
+struct vch_bdb_22 {
+	struct aimdb_block aimdb_block;
+	struct vch_panel_data panels[16];
+} __packed;
+
+struct bdb_sdvo_lvds_options {
+	u8 panel_backlight;
+	u8 h40_set_panel_type;
+	u8 panel_type;
+	u8 ssc_clk_freq;
+	u16 als_low_trip;
+	u16 als_high_trip;
+	u8 sclalarcoeff_tab_row_num;
+	u8 sclalarcoeff_tab_row_size;
+	u8 coefficient[8];
+	u8 panel_misc_bits_1;
+	u8 panel_misc_bits_2;
+	u8 panel_misc_bits_3;
+	u8 panel_misc_bits_4;
+} __packed;
+
+
+#define BDB_DRIVER_FEATURE_NO_LVDS		0
+#define BDB_DRIVER_FEATURE_INT_LVDS		1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS		2
+#define BDB_DRIVER_FEATURE_EDP			3
+
+struct bdb_driver_features {
+	u8 boot_dev_algorithm:1;
+	u8 block_display_switch:1;
+	u8 allow_display_switch:1;
+	u8 hotplug_dvo:1;
+	u8 dual_view_zoom:1;
+	u8 int15h_hook:1;
+	u8 sprite_in_clone:1;
+	u8 primary_lfp_id:1;
+
+	u16 boot_mode_x;
+	u16 boot_mode_y;
+	u8 boot_mode_bpp;
+	u8 boot_mode_refresh;
+
+	u16 enable_lfp_primary:1;
+	u16 selective_mode_pruning:1;
+	u16 dual_frequency:1;
+	u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
+	u16 nt_clone_support:1;
+	u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
+	u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
+	u16 cui_aspect_scaling:1;
+	u16 preserve_aspect_ratio:1;
+	u16 sdvo_device_power_down:1;
+	u16 crt_hotplug:1;
+	u16 lvds_config:2;
+	u16 tv_hotplug:1;
+	u16 hdmi_config:2;
+
+	u8 static_display:1;
+	u8 reserved2:7;
+	u16 legacy_crt_max_x;
+	u16 legacy_crt_max_y;
+	u8 legacy_crt_max_refresh;
+
+	u8 hdmi_termination;
+	u8 custom_vbt_version;
+	/* Driver features data block */
+	u16 rmpm_enabled:1;
+	u16 s2ddt_enabled:1;
+	u16 dpst_enabled:1;
+	u16 bltclt_enabled:1;
+	u16 adb_enabled:1;
+	u16 drrs_enabled:1;
+	u16 grs_enabled:1;
+	u16 gpmt_enabled:1;
+	u16 tbt_enabled:1;
+	u16 psr_enabled:1;
+	u16 ips_enabled:1;
+	u16 reserved3:4;
+	u16 pc_feature_valid:1;
+} __packed;
+
+#define EDP_18BPP	0
+#define EDP_24BPP	1
+#define EDP_30BPP	2
+#define EDP_RATE_1_62	0
+#define EDP_RATE_2_7	1
+#define EDP_LANE_1	0
+#define EDP_LANE_2	1
+#define EDP_LANE_4	3
+#define EDP_PREEMPHASIS_NONE	0
+#define EDP_PREEMPHASIS_3_5dB	1
+#define EDP_PREEMPHASIS_6dB	2
+#define EDP_PREEMPHASIS_9_5dB	3
+#define EDP_VSWING_0_4V		0
+#define EDP_VSWING_0_6V		1
+#define EDP_VSWING_0_8V		2
+#define EDP_VSWING_1_2V		3
+
+
+struct edp_link_params {
+	u8 rate:4;
+	u8 lanes:4;
+	u8 preemphasis:4;
+	u8 vswing:4;
+} __packed;
+
+struct bdb_edp {
+	struct edp_power_seq power_seqs[16];
+	u32 color_depth;
+	struct edp_link_params link_params[16];
+	u32 sdrrs_msa_timing_delay;
+
+	/* ith bit indicates enabled/disabled for (i+1)th panel */
+	u16 edp_s3d_feature;
+	u16 edp_t3_optimization;
+	u64 edp_vswing_preemph;		/* v173 */
+} __packed;
+
+struct psr_table {
+	/* Feature bits */
+	u8 full_link:1;
+	u8 require_aux_to_wakeup:1;
+	u8 feature_bits_rsvd:6;
+
+	/* Wait times */
+	u8 idle_frames:4;
+	u8 lines_to_wait:3;
+	u8 wait_times_rsvd:1;
+
+	/* TP wake up time in multiple of 100 */
+	u16 tp1_wakeup_time;
+	u16 tp2_tp3_wakeup_time;
+} __packed;
+
+struct bdb_psr {
+	struct psr_table psr_table[16];
+} __packed;
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN	(1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK	0x78 /* See also SWF4 15:0 */
+#define   GR18_HK_NONE		(0x0<<3)
+#define   GR18_HK_LFP_STRETCH	(0x1<<3)
+#define   GR18_HK_TOGGLE_DISP	(0x2<<3)
+#define   GR18_HK_DISP_SWITCH	(0x4<<3) /* see SWF14 15:0 for what to enable */
+#define   GR18_HK_POPUP_DISABLED (0x6<<3)
+#define   GR18_HK_POPUP_ENABLED	(0x7<<3)
+#define   GR18_HK_PFIT		(0x8<<3)
+#define   GR18_HK_APM_CHANGE	(0xa<<3)
+#define   GR18_HK_MULTIPLE	(0xc<<3)
+#define GR18_USER_INT_EN	(1<<2)
+#define GR18_A0000_FLUSH_EN	(1<<1)
+#define GR18_SMM_EN		(1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT	16
+#define SWF00_XRES_SHIFT	0
+#define SWF00_RES_MASK		0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT	8
+#define SWF01_TV1_FORMAT_SHIFT	0
+#define SWF01_TV_FORMAT_MASK	0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN	(1<<29)
+#define SWF10_GTT_OVERRIDE_EN	(1<<28)
+#define SWF10_LFP_DPMS_OVR	(1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define   SWF10_OLD_TOGGLE	0x0
+#define   SWF10_TOGGLE_LIST_1	0x1
+#define   SWF10_TOGGLE_LIST_2	0x2
+#define   SWF10_TOGGLE_LIST_3	0x3
+#define   SWF10_TOGGLE_LIST_4	0x4
+#define SWF10_PANNING_EN	(1<<23)
+#define SWF10_DRIVER_LOADED	(1<<22)
+#define SWF10_EXTENDED_DESKTOP	(1<<21)
+#define SWF10_EXCLUSIVE_MODE	(1<<20)
+#define SWF10_OVERLAY_EN	(1<<19)
+#define SWF10_PLANEB_HOLDOFF	(1<<18)
+#define SWF10_PLANEA_HOLDOFF	(1<<17)
+#define SWF10_VGA_HOLDOFF	(1<<16)
+#define SWF10_ACTIVE_DISP_MASK	0xffff
+#define   SWF10_PIPEB_LFP2	(1<<15)
+#define   SWF10_PIPEB_EFP2	(1<<14)
+#define   SWF10_PIPEB_TV2	(1<<13)
+#define   SWF10_PIPEB_CRT2	(1<<12)
+#define   SWF10_PIPEB_LFP	(1<<11)
+#define   SWF10_PIPEB_EFP	(1<<10)
+#define   SWF10_PIPEB_TV	(1<<9)
+#define   SWF10_PIPEB_CRT	(1<<8)
+#define   SWF10_PIPEA_LFP2	(1<<7)
+#define   SWF10_PIPEA_EFP2	(1<<6)
+#define   SWF10_PIPEA_TV2	(1<<5)
+#define   SWF10_PIPEA_CRT2	(1<<4)
+#define   SWF10_PIPEA_LFP	(1<<3)
+#define   SWF10_PIPEA_EFP	(1<<2)
+#define   SWF10_PIPEA_TV	(1<<1)
+#define   SWF10_PIPEA_CRT	(1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT	16
+#define SWF11_SV_TEST_EN	(1<<15)
+#define SWF11_IS_AGP		(1<<14)
+#define SWF11_DISPLAY_HOLDOFF	(1<<13)
+#define SWF11_DPMS_REDUCED	(1<<12)
+#define SWF11_IS_VBE_MODE	(1<<11)
+#define SWF11_PIPEB_ACCESS	(1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK		0x07
+#define   SWF11_DPMS_OFF	(1<<2)
+#define   SWF11_DPMS_SUSPEND	(1<<1)
+#define   SWF11_DPMS_STANDBY	(1<<0)
+#define   SWF11_DPMS_ON		0
+
+#define SWF14_GFX_PFIT_EN	(1<<31)
+#define SWF14_TEXT_PFIT_EN	(1<<30)
+#define SWF14_LID_STATUS_CLOSED	(1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN		(1<<28)
+#define SWF14_DISPLAY_HOLDOFF	(1<<27)
+#define SWF14_DISP_DETECT_EN	(1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS	(1<<24)
+#define SWF14_OS_TYPE_WIN9X	(1<<23)
+#define SWF14_OS_TYPE_WINNT	(1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK	0x00070000
+#define   SWF14_PM_ACPI_VIDEO	(0x4 << 16)
+#define   SWF14_PM_ACPI		(0x3 << 16)
+#define   SWF14_PM_APM_12	(0x2 << 16)
+#define   SWF14_PM_APM_11	(0x1 << 16)
+#define SWF14_HK_REQUEST_MASK	0x0000ffff /* see GR18 6:3 for event type */
+          /* if GR18 indicates a display switch */
+#define   SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define   SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define   SWF14_DS_PIPEB_TV2_EN  (1<<13)
+#define   SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define   SWF14_DS_PIPEB_LFP_EN  (1<<11)
+#define   SWF14_DS_PIPEB_EFP_EN  (1<<10)
+#define   SWF14_DS_PIPEB_TV_EN   (1<<9)
+#define   SWF14_DS_PIPEB_CRT_EN  (1<<8)
+#define   SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define   SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define   SWF14_DS_PIPEA_TV2_EN  (1<<5)
+#define   SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define   SWF14_DS_PIPEA_LFP_EN  (1<<3)
+#define   SWF14_DS_PIPEA_EFP_EN  (1<<2)
+#define   SWF14_DS_PIPEA_TV_EN   (1<<1)
+#define   SWF14_DS_PIPEA_CRT_EN  (1<<0)
+          /* if GR18 indicates a panel fitting request */
+#define   SWF14_PFIT_EN		(1<<0) /* 0 means disable */
+          /* if GR18 indicates an APM change request */
+#define   SWF14_APM_HIBERNATE	0x4
+#define   SWF14_APM_SUSPEND	0x3
+#define   SWF14_APM_STANDBY	0x1
+#define   SWF14_APM_RESTORE	0x0
+
+/* Add the device class for LFP, TV, HDMI */
+#define	 DEVICE_TYPE_INT_LFP	0x1022
+#define	 DEVICE_TYPE_INT_TV	0x1009
+#define	 DEVICE_TYPE_HDMI	0x60D2
+#define	 DEVICE_TYPE_DP		0x68C6
+#define	 DEVICE_TYPE_DP_DUAL_MODE	0x60D6
+#define	 DEVICE_TYPE_eDP	0x78C6
+
+#define  DEVICE_TYPE_CLASS_EXTENSION	(1 << 15)
+#define  DEVICE_TYPE_POWER_MANAGEMENT	(1 << 14)
+#define  DEVICE_TYPE_HOTPLUG_SIGNALING	(1 << 13)
+#define  DEVICE_TYPE_INTERNAL_CONNECTOR	(1 << 12)
+#define  DEVICE_TYPE_NOT_HDMI_OUTPUT	(1 << 11)
+#define  DEVICE_TYPE_MIPI_OUTPUT	(1 << 10)
+#define  DEVICE_TYPE_COMPOSITE_OUTPUT	(1 << 9)
+#define  DEVICE_TYPE_DUAL_CHANNEL	(1 << 8)
+#define  DEVICE_TYPE_HIGH_SPEED_LINK	(1 << 6)
+#define  DEVICE_TYPE_LVDS_SINGALING	(1 << 5)
+#define  DEVICE_TYPE_TMDS_DVI_SIGNALING	(1 << 4)
+#define  DEVICE_TYPE_VIDEO_SIGNALING	(1 << 3)
+#define  DEVICE_TYPE_DISPLAYPORT_OUTPUT	(1 << 2)
+#define  DEVICE_TYPE_DIGITAL_OUTPUT	(1 << 1)
+#define  DEVICE_TYPE_ANALOG_OUTPUT	(1 << 0)
+
+/*
+ * Bits we care about when checking for DEVICE_TYPE_eDP
+ * Depending on the system, the other bits may or may not
+ * be set for eDP outputs.
+ */
+#define DEVICE_TYPE_eDP_BITS \
+	(DEVICE_TYPE_INTERNAL_CONNECTOR | \
+	 DEVICE_TYPE_MIPI_OUTPUT | \
+	 DEVICE_TYPE_COMPOSITE_OUTPUT | \
+	 DEVICE_TYPE_DUAL_CHANNEL | \
+	 DEVICE_TYPE_LVDS_SINGALING | \
+	 DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+	 DEVICE_TYPE_VIDEO_SIGNALING | \
+	 DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+	 DEVICE_TYPE_ANALOG_OUTPUT)
+
+#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
+	(DEVICE_TYPE_INTERNAL_CONNECTOR | \
+	 DEVICE_TYPE_MIPI_OUTPUT | \
+	 DEVICE_TYPE_COMPOSITE_OUTPUT | \
+	 DEVICE_TYPE_LVDS_SINGALING | \
+	 DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+	 DEVICE_TYPE_VIDEO_SIGNALING | \
+	 DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+	 DEVICE_TYPE_DIGITAL_OUTPUT | \
+	 DEVICE_TYPE_ANALOG_OUTPUT)
+
+/* define the DVO port for HDMI output type */
+#define		DVO_B		1
+#define		DVO_C		2
+#define		DVO_D		3
+
+/* Possible values for the "DVO Port" field for versions >= 155: */
+#define DVO_PORT_HDMIA	0
+#define DVO_PORT_HDMIB	1
+#define DVO_PORT_HDMIC	2
+#define DVO_PORT_HDMID	3
+#define DVO_PORT_LVDS	4
+#define DVO_PORT_TV	5
+#define DVO_PORT_CRT	6
+#define DVO_PORT_DPB	7
+#define DVO_PORT_DPC	8
+#define DVO_PORT_DPD	9
+#define DVO_PORT_DPA	10
+#define DVO_PORT_DPE	11
+#define DVO_PORT_HDMIE	12
+#define DVO_PORT_MIPIA	21
+#define DVO_PORT_MIPIB	22
+#define DVO_PORT_MIPIC	23
+#define DVO_PORT_MIPID	24
+
+/* Block 52 contains MIPI configuration block
+ * 6 * bdb_mipi_config, followed by 6 pps data block
+ * block below
+ */
+#define MAX_MIPI_CONFIGURATIONS	6
+
+struct bdb_mipi_config {
+	struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
+	struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
+} __packed;
+
+/* Block 53 contains MIPI sequences as needed by the panel
+ * for enabling it. This block can be variable in size and
+ * can be maximum of 6 blocks
+ */
+struct bdb_mipi_sequence {
+	u8 version;
+	u8 data[0];
+} __packed;
+
+enum mipi_gpio_pin_index {
+	MIPI_GPIO_UNDEFINED = 0,
+	MIPI_GPIO_PANEL_ENABLE,
+	MIPI_GPIO_BL_ENABLE,
+	MIPI_GPIO_PWM_ENABLE,
+	MIPI_GPIO_RESET_N,
+	MIPI_GPIO_PWR_DOWN_R,
+	MIPI_GPIO_STDBY_RST_N,
+	MIPI_GPIO_MAX
+};
+
+#endif /* _INTEL_VBT_DEFS_H_ */
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index e26dcde..8265665 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -25,6 +25,7 @@
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_of.h>
+#include <video/imx-ipu-v3.h>
 
 #include "imx-drm.h"
 
@@ -96,8 +97,8 @@
 	return NULL;
 }
 
-int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format,
-		int hsync_pin, int vsync_pin)
+int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
+		int hsync_pin, int vsync_pin, u32 bus_flags)
 {
 	struct imx_drm_crtc_helper_funcs *helper;
 	struct imx_drm_crtc *imx_crtc;
@@ -109,14 +110,17 @@
 	helper = &imx_crtc->imx_drm_helper_funcs;
 	if (helper->set_interface_pix_fmt)
 		return helper->set_interface_pix_fmt(encoder->crtc,
-					bus_format, hsync_pin, vsync_pin);
+					bus_format, hsync_pin, vsync_pin,
+					bus_flags);
 	return 0;
 }
-EXPORT_SYMBOL_GPL(imx_drm_set_bus_format_pins);
+EXPORT_SYMBOL_GPL(imx_drm_set_bus_config);
 
 int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format)
 {
-	return imx_drm_set_bus_format_pins(encoder, bus_format, 2, 3);
+	return imx_drm_set_bus_config(encoder, bus_format, 2, 3,
+				      DRM_BUS_FLAG_DE_HIGH |
+				      DRM_BUS_FLAG_PIXDATA_NEGEDGE);
 }
 EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
 
@@ -252,13 +256,6 @@
 	if (ret)
 		goto err_kms;
 
-	/*
-	 * with vblank_disable_allowed = true, vblank interrupt will be
-	 * disabled by drm timer once a current process gives up ownership
-	 * of vblank event. (after drm_vblank_put function is called)
-	 */
-	drm->vblank_disable_allowed = true;
-
 	platform_set_drvdata(drm->platformdev, drm);
 
 	/* Now try and bind all our sub-components */
@@ -411,7 +408,7 @@
 	.unload			= imx_drm_driver_unload,
 	.lastclose		= imx_drm_driver_lastclose,
 	.set_busid		= drm_platform_set_busid,
-	.gem_free_object	= drm_gem_cma_free_object,
+	.gem_free_object_unlocked = drm_gem_cma_free_object,
 	.gem_vm_ops		= &drm_gem_cma_vm_ops,
 	.dumb_create		= drm_gem_cma_dumb_create,
 	.dumb_map_offset	= drm_gem_cma_dumb_map_offset,
@@ -444,6 +441,13 @@
 {
 	struct device_node *np = data;
 
+	/* Special case for DI, dev->of_node may not be set yet */
+	if (strcmp(dev->driver->name, "imx-ipuv3-crtc") == 0) {
+		struct ipu_client_platformdata *pdata = dev->platform_data;
+
+		return pdata->of_node == np;
+	}
+
 	/* Special case for LDB, one device for two channels */
 	if (of_node_cmp(np->name, "lvds-channel") == 0) {
 		np = of_get_parent(np);
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index b0241b9..74320a1 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -19,7 +19,8 @@
 	int (*enable_vblank)(struct drm_crtc *crtc);
 	void (*disable_vblank)(struct drm_crtc *crtc);
 	int (*set_interface_pix_fmt)(struct drm_crtc *crtc,
-			u32 bus_format, int hsync_pin, int vsync_pin);
+			u32 bus_format, int hsync_pin, int vsync_pin,
+			u32 bus_flags);
 	const struct drm_crtc_helper_funcs *crtc_helper_funcs;
 	const struct drm_crtc_funcs *crtc_funcs;
 };
@@ -41,8 +42,8 @@
 
 struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
 
-int imx_drm_set_bus_format_pins(struct drm_encoder *encoder,
-		u32 bus_format, int hsync_pin, int vsync_pin);
+int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
+		int hsync_pin, int vsync_pin, u32 bus_flags);
 int imx_drm_set_bus_format(struct drm_encoder *encoder,
 		u32 bus_format);
 
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index a58eee5..beff793 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -25,6 +25,7 @@
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 #include <linux/of_device.h>
 #include <linux/of_graph.h>
+#include <video/of_display_timing.h>
 #include <video/of_videomode.h>
 #include <linux/regmap.h>
 #include <linux/videodev2.h>
@@ -59,6 +60,7 @@
 	struct drm_encoder encoder;
 	struct drm_panel *panel;
 	struct device_node *child;
+	struct i2c_adapter *ddc;
 	int chno;
 	void *edid;
 	int edid_len;
@@ -107,6 +109,9 @@
 			return num_modes;
 	}
 
+	if (!imx_ldb_ch->edid && imx_ldb_ch->ddc)
+		imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc);
+
 	if (imx_ldb_ch->edid) {
 		drm_mode_connector_update_edid_property(connector,
 							imx_ldb_ch->edid);
@@ -553,7 +558,8 @@
 
 	for_each_child_of_node(np, child) {
 		struct imx_ldb_channel *channel;
-		struct device_node *port;
+		struct device_node *ddc_node;
+		struct device_node *ep;
 
 		ret = of_property_read_u32(child, "reg", &i);
 		if (ret || i < 0 || i > 1)
@@ -576,33 +582,54 @@
 		 * The output port is port@4 with an external 4-port mux or
 		 * port@2 with the internal 2-port mux.
 		 */
-		port = of_graph_get_port_by_id(child, imx_ldb->lvds_mux ? 4 : 2);
-		if (port) {
-			struct device_node *endpoint, *remote;
+		ep = of_graph_get_endpoint_by_regs(child,
+						   imx_ldb->lvds_mux ? 4 : 2,
+						   -1);
+		if (ep) {
+			struct device_node *remote;
 
-			endpoint = of_get_child_by_name(port, "endpoint");
-			if (endpoint) {
-				remote = of_graph_get_remote_port_parent(endpoint);
-				if (remote)
-					channel->panel = of_drm_find_panel(remote);
-				else
-					return -EPROBE_DEFER;
-				if (!channel->panel) {
-					dev_err(dev, "panel not found: %s\n",
-						remote->full_name);
-					return -EPROBE_DEFER;
-				}
+			remote = of_graph_get_remote_port_parent(ep);
+			of_node_put(ep);
+			if (remote)
+				channel->panel = of_drm_find_panel(remote);
+			else
+				return -EPROBE_DEFER;
+			of_node_put(remote);
+			if (!channel->panel) {
+				dev_err(dev, "panel not found: %s\n",
+					remote->full_name);
+				return -EPROBE_DEFER;
 			}
 		}
 
-		edidp = of_get_property(child, "edid", &channel->edid_len);
-		if (edidp) {
-			channel->edid = kmemdup(edidp, channel->edid_len,
-						GFP_KERNEL);
-		} else if (!channel->panel) {
-			ret = of_get_drm_display_mode(child, &channel->mode, 0);
-			if (!ret)
-				channel->mode_valid = 1;
+		ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0);
+		if (ddc_node) {
+			channel->ddc = of_find_i2c_adapter_by_node(ddc_node);
+			of_node_put(ddc_node);
+			if (!channel->ddc) {
+				dev_warn(dev, "failed to get ddc i2c adapter\n");
+				return -EPROBE_DEFER;
+			}
+		}
+
+		if (!channel->ddc) {
+			/* if no DDC available, fallback to hardcoded EDID */
+			dev_dbg(dev, "no ddc available\n");
+
+			edidp = of_get_property(child, "edid",
+						&channel->edid_len);
+			if (edidp) {
+				channel->edid = kmemdup(edidp,
+							channel->edid_len,
+							GFP_KERNEL);
+			} else if (!channel->panel) {
+				/* fallback to display-timings node */
+				ret = of_get_drm_display_mode(child,
+							      &channel->mode,
+							      OF_USE_NATIVE_MODE);
+				if (!ret)
+					channel->mode_valid = 1;
+			}
 		}
 
 		channel->bus_format = of_get_bus_format(dev, child);
@@ -647,6 +674,7 @@
 		channel->encoder.funcs->destroy(&channel->encoder);
 
 		kfree(channel->edid);
+		i2c_put_adapter(channel->ddc);
 	}
 }
 
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index ae7a9fb..baf7881 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -294,8 +294,10 @@
 
 	switch (tve->mode) {
 	case TVE_MODE_VGA:
-		imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24,
-					    tve->hsync_pin, tve->vsync_pin);
+		imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24,
+				       tve->hsync_pin, tve->vsync_pin,
+				       DRM_BUS_FLAG_DE_HIGH |
+				       DRM_BUS_FLAG_PIXDATA_NEGEDGE);
 		break;
 	case TVE_MODE_TVOUT:
 		imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index dee8e8b..fc04041 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -66,6 +66,7 @@
 	struct ipu_flip_work	*flip_work;
 	int			irq;
 	u32			bus_format;
+	u32			bus_flags;
 	int			di_hsync_pin;
 	int			di_vsync_pin;
 };
@@ -271,8 +272,10 @@
 	else
 		sig_cfg.clkflags = 0;
 
-	sig_cfg.enable_pol = 1;
-	sig_cfg.clk_pol = 0;
+	sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW);
+	/* Default to driving pixel data on negative clock edges */
+	sig_cfg.clk_pol = !!(ipu_crtc->bus_flags &
+			     DRM_BUS_FLAG_PIXDATA_POSEDGE);
 	sig_cfg.bus_format = ipu_crtc->bus_format;
 	sig_cfg.v_to_h_sync = 0;
 	sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
@@ -396,11 +399,12 @@
 }
 
 static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
-		u32 bus_format, int hsync_pin, int vsync_pin)
+		u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags)
 {
 	struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
 
 	ipu_crtc->bus_format = bus_format;
+	ipu_crtc->bus_flags = bus_flags;
 	ipu_crtc->di_hsync_pin = hsync_pin;
 	ipu_crtc->di_vsync_pin = vsync_pin;
 
@@ -473,7 +477,7 @@
 
 	ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
 			&ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs,
-			ipu_crtc->dev->of_node);
+			pdata->of_node);
 	if (ret) {
 		dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
 		goto err_put_resources;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 681ec6e..a4bb441 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -38,6 +38,8 @@
 	DRM_FORMAT_RGBX8888,
 	DRM_FORMAT_BGRA8888,
 	DRM_FORMAT_BGRA8888,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
 	DRM_FORMAT_YUYV,
 	DRM_FORMAT_YVYU,
 	DRM_FORMAT_YUV420,
@@ -428,7 +430,6 @@
 	if (crtc != plane->crtc)
 		dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n",
 				plane->crtc, crtc);
-	plane->crtc = crtc;
 
 	if (!ipu_plane->enabled)
 		ipu_plane_enable(ipu_plane);
@@ -461,7 +462,7 @@
 	kfree(ipu_plane);
 }
 
-static struct drm_plane_funcs ipu_plane_funcs = {
+static const struct drm_plane_funcs ipu_plane_funcs = {
 	.update_plane	= ipu_update_plane,
 	.disable_plane	= ipu_disable_plane,
 	.destroy	= ipu_plane_destroy,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 363e2c7..2d1fd02 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -35,7 +35,6 @@
 	void *edid;
 	int edid_len;
 	u32 bus_format;
-	int mode_valid;
 	struct drm_display_mode mode;
 	struct drm_panel *panel;
 };
@@ -68,17 +67,6 @@
 		num_modes = drm_add_edid_modes(connector, imxpd->edid);
 	}
 
-	if (imxpd->mode_valid) {
-		struct drm_display_mode *mode = drm_mode_create(connector->dev);
-
-		if (!mode)
-			return -EINVAL;
-		drm_mode_copy(mode, &imxpd->mode);
-		mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
-		drm_mode_probed_add(connector, mode);
-		num_modes++;
-	}
-
 	if (np) {
 		struct drm_display_mode *mode = drm_mode_create(connector->dev);
 
@@ -115,8 +103,8 @@
 static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
 {
 	struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
-
-	imx_drm_set_bus_format(encoder, imxpd->bus_format);
+	imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3,
+			       imxpd->connector.display_info.bus_flags);
 }
 
 static void imx_pd_encoder_commit(struct drm_encoder *encoder)
@@ -203,7 +191,7 @@
 {
 	struct drm_device *drm = data;
 	struct device_node *np = dev->of_node;
-	struct device_node *port;
+	struct device_node *ep;
 	const u8 *edidp;
 	struct imx_parallel_display *imxpd;
 	int ret;
@@ -230,18 +218,18 @@
 	}
 
 	/* port@1 is the output port */
-	port = of_graph_get_port_by_id(np, 1);
-	if (port) {
-		struct device_node *endpoint, *remote;
+	ep = of_graph_get_endpoint_by_regs(np, 1, -1);
+	if (ep) {
+		struct device_node *remote;
 
-		endpoint = of_get_child_by_name(port, "endpoint");
-		if (endpoint) {
-			remote = of_graph_get_remote_port_parent(endpoint);
-			if (remote)
-				imxpd->panel = of_drm_find_panel(remote);
-			if (!imxpd->panel)
-				return -EPROBE_DEFER;
+		remote = of_graph_get_remote_port_parent(ep);
+		of_node_put(ep);
+		if (remote) {
+			imxpd->panel = of_drm_find_panel(remote);
+			of_node_put(remote);
 		}
+		if (!imxpd->panel)
+			return -EPROBE_DEFER;
 	}
 
 	imxpd->dev = dev;
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
new file mode 100644
index 0000000..eeefc97
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -0,0 +1,16 @@
+config DRM_MEDIATEK
+	tristate "DRM Support for Mediatek SoCs"
+	depends on DRM
+	depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST)
+	select DRM_GEM_CMA_HELPER
+	select DRM_KMS_HELPER
+	select DRM_MIPI_DSI
+	select DRM_PANEL
+	select IOMMU_DMA
+	select MEMORY
+	select MTK_SMI
+	help
+	  Choose this option if you have a Mediatek SoCs.
+	  The module will be called mediatek-drm
+	  This driver provides kernel mode setting and
+	  buffer management to userspace.
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
new file mode 100644
index 0000000..5fcf58e
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -0,0 +1,14 @@
+mediatek-drm-y := mtk_disp_ovl.o \
+		  mtk_disp_rdma.o \
+		  mtk_drm_crtc.o \
+		  mtk_drm_ddp.o \
+		  mtk_drm_ddp_comp.o \
+		  mtk_drm_drv.o \
+		  mtk_drm_fb.o \
+		  mtk_drm_gem.o \
+		  mtk_drm_plane.o \
+		  mtk_dsi.o \
+		  mtk_mipi_tx.o \
+		  mtk_dpi.o
+
+obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
new file mode 100644
index 0000000..8f62671f
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -0,0 +1,302 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_REG_OVL_INTEN			0x0004
+#define OVL_FME_CPL_INT					BIT(1)
+#define DISP_REG_OVL_INTSTA			0x0008
+#define DISP_REG_OVL_EN				0x000c
+#define DISP_REG_OVL_RST			0x0014
+#define DISP_REG_OVL_ROI_SIZE			0x0020
+#define DISP_REG_OVL_ROI_BGCLR			0x0028
+#define DISP_REG_OVL_SRC_CON			0x002c
+#define DISP_REG_OVL_CON(n)			(0x0030 + 0x20 * (n))
+#define DISP_REG_OVL_SRC_SIZE(n)		(0x0038 + 0x20 * (n))
+#define DISP_REG_OVL_OFFSET(n)			(0x003c + 0x20 * (n))
+#define DISP_REG_OVL_PITCH(n)			(0x0044 + 0x20 * (n))
+#define DISP_REG_OVL_RDMA_CTRL(n)		(0x00c0 + 0x20 * (n))
+#define DISP_REG_OVL_RDMA_GMC(n)		(0x00c8 + 0x20 * (n))
+#define DISP_REG_OVL_ADDR(n)			(0x0f40 + 0x20 * (n))
+
+#define	OVL_RDMA_MEM_GMC	0x40402020
+
+#define OVL_CON_BYTE_SWAP	BIT(24)
+#define OVL_CON_CLRFMT_RGB565	(0 << 12)
+#define OVL_CON_CLRFMT_RGB888	(1 << 12)
+#define OVL_CON_CLRFMT_RGBA8888	(2 << 12)
+#define OVL_CON_CLRFMT_ARGB8888	(3 << 12)
+#define	OVL_CON_AEN		BIT(8)
+#define	OVL_CON_ALPHA		0xff
+
+/**
+ * struct mtk_disp_ovl - DISP_OVL driver structure
+ * @ddp_comp - structure containing type enum and hardware resources
+ * @crtc - associated crtc to report vblank events to
+ */
+struct mtk_disp_ovl {
+	struct mtk_ddp_comp		ddp_comp;
+	struct drm_crtc			*crtc;
+};
+
+static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
+{
+	struct mtk_disp_ovl *priv = dev_id;
+	struct mtk_ddp_comp *ovl = &priv->ddp_comp;
+
+	/* Clear frame completion interrupt */
+	writel(0x0, ovl->regs + DISP_REG_OVL_INTSTA);
+
+	if (!priv->crtc)
+		return IRQ_NONE;
+
+	mtk_crtc_ddp_irq(priv->crtc, ovl);
+
+	return IRQ_HANDLED;
+}
+
+static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp,
+				  struct drm_crtc *crtc)
+{
+	struct mtk_disp_ovl *priv = container_of(comp, struct mtk_disp_ovl,
+						 ddp_comp);
+
+	priv->crtc = crtc;
+	writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN);
+}
+
+static void mtk_ovl_disable_vblank(struct mtk_ddp_comp *comp)
+{
+	struct mtk_disp_ovl *priv = container_of(comp, struct mtk_disp_ovl,
+						 ddp_comp);
+
+	priv->crtc = NULL;
+	writel_relaxed(0x0, comp->regs + DISP_REG_OVL_INTEN);
+}
+
+static void mtk_ovl_start(struct mtk_ddp_comp *comp)
+{
+	writel_relaxed(0x1, comp->regs + DISP_REG_OVL_EN);
+}
+
+static void mtk_ovl_stop(struct mtk_ddp_comp *comp)
+{
+	writel_relaxed(0x0, comp->regs + DISP_REG_OVL_EN);
+}
+
+static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
+			   unsigned int h, unsigned int vrefresh)
+{
+	if (w != 0 && h != 0)
+		writel_relaxed(h << 16 | w, comp->regs + DISP_REG_OVL_ROI_SIZE);
+	writel_relaxed(0x0, comp->regs + DISP_REG_OVL_ROI_BGCLR);
+
+	writel(0x1, comp->regs + DISP_REG_OVL_RST);
+	writel(0x0, comp->regs + DISP_REG_OVL_RST);
+}
+
+static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
+{
+	unsigned int reg;
+
+	writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
+	writel(OVL_RDMA_MEM_GMC, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
+
+	reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
+	reg = reg | BIT(idx);
+	writel(reg, comp->regs + DISP_REG_OVL_SRC_CON);
+}
+
+static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
+{
+	unsigned int reg;
+
+	reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
+	reg = reg & ~BIT(idx);
+	writel(reg, comp->regs + DISP_REG_OVL_SRC_CON);
+
+	writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
+}
+
+static unsigned int ovl_fmt_convert(unsigned int fmt)
+{
+	switch (fmt) {
+	default:
+	case DRM_FORMAT_RGB565:
+		return OVL_CON_CLRFMT_RGB565;
+	case DRM_FORMAT_BGR565:
+		return OVL_CON_CLRFMT_RGB565 | OVL_CON_BYTE_SWAP;
+	case DRM_FORMAT_RGB888:
+		return OVL_CON_CLRFMT_RGB888;
+	case DRM_FORMAT_BGR888:
+		return OVL_CON_CLRFMT_RGB888 | OVL_CON_BYTE_SWAP;
+	case DRM_FORMAT_RGBX8888:
+	case DRM_FORMAT_RGBA8888:
+		return OVL_CON_CLRFMT_ARGB8888;
+	case DRM_FORMAT_BGRX8888:
+	case DRM_FORMAT_BGRA8888:
+		return OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP;
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		return OVL_CON_CLRFMT_RGBA8888;
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
+		return OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP;
+	}
+}
+
+static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
+				 struct mtk_plane_state *state)
+{
+	struct mtk_plane_pending_state *pending = &state->pending;
+	unsigned int addr = pending->addr;
+	unsigned int pitch = pending->pitch & 0xffff;
+	unsigned int fmt = pending->format;
+	unsigned int offset = (pending->y << 16) | pending->x;
+	unsigned int src_size = (pending->height << 16) | pending->width;
+	unsigned int con;
+
+	if (!pending->enable)
+		mtk_ovl_layer_off(comp, idx);
+
+	con = ovl_fmt_convert(fmt);
+	if (idx != 0)
+		con |= OVL_CON_AEN | OVL_CON_ALPHA;
+
+	writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx));
+	writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx));
+	writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx));
+	writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx));
+	writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(idx));
+
+	if (pending->enable)
+		mtk_ovl_layer_on(comp, idx);
+}
+
+static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
+	.config = mtk_ovl_config,
+	.start = mtk_ovl_start,
+	.stop = mtk_ovl_stop,
+	.enable_vblank = mtk_ovl_enable_vblank,
+	.disable_vblank = mtk_ovl_disable_vblank,
+	.layer_on = mtk_ovl_layer_on,
+	.layer_off = mtk_ovl_layer_off,
+	.layer_config = mtk_ovl_layer_config,
+};
+
+static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
+			     void *data)
+{
+	struct mtk_disp_ovl *priv = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	int ret;
+
+	ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
+	if (ret < 0) {
+		dev_err(dev, "Failed to register component %s: %d\n",
+			dev->of_node->full_name, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void mtk_disp_ovl_unbind(struct device *dev, struct device *master,
+				void *data)
+{
+	struct mtk_disp_ovl *priv = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+
+	mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
+}
+
+static const struct component_ops mtk_disp_ovl_component_ops = {
+	.bind	= mtk_disp_ovl_bind,
+	.unbind = mtk_disp_ovl_unbind,
+};
+
+static int mtk_disp_ovl_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mtk_disp_ovl *priv;
+	int comp_id;
+	int irq;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
+			       IRQF_TRIGGER_NONE, dev_name(dev), priv);
+	if (ret < 0) {
+		dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
+		return ret;
+	}
+
+	comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
+	if (comp_id < 0) {
+		dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
+		return comp_id;
+	}
+
+	ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
+				&mtk_disp_ovl_funcs);
+	if (ret) {
+		dev_err(dev, "Failed to initialize component: %d\n", ret);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, priv);
+
+	ret = component_add(dev, &mtk_disp_ovl_component_ops);
+	if (ret)
+		dev_err(dev, "Failed to add component: %d\n", ret);
+
+	return ret;
+}
+
+static int mtk_disp_ovl_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
+
+	return 0;
+}
+
+static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
+	{ .compatible = "mediatek,mt8173-disp-ovl", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
+
+struct platform_driver mtk_disp_ovl_driver = {
+	.probe		= mtk_disp_ovl_probe,
+	.remove		= mtk_disp_ovl_remove,
+	.driver		= {
+		.name	= "mediatek-disp-ovl",
+		.owner	= THIS_MODULE,
+		.of_match_table = mtk_disp_ovl_driver_dt_match,
+	},
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
new file mode 100644
index 0000000..5fb80cb
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_REG_RDMA_INT_ENABLE		0x0000
+#define DISP_REG_RDMA_INT_STATUS		0x0004
+#define RDMA_TARGET_LINE_INT				BIT(5)
+#define RDMA_FIFO_UNDERFLOW_INT				BIT(4)
+#define RDMA_EOF_ABNORMAL_INT				BIT(3)
+#define RDMA_FRAME_END_INT				BIT(2)
+#define RDMA_FRAME_START_INT				BIT(1)
+#define RDMA_REG_UPDATE_INT				BIT(0)
+#define DISP_REG_RDMA_GLOBAL_CON		0x0010
+#define RDMA_ENGINE_EN					BIT(0)
+#define DISP_REG_RDMA_SIZE_CON_0		0x0014
+#define DISP_REG_RDMA_SIZE_CON_1		0x0018
+#define DISP_REG_RDMA_TARGET_LINE		0x001c
+#define DISP_REG_RDMA_FIFO_CON			0x0040
+#define RDMA_FIFO_UNDERFLOW_EN				BIT(31)
+#define RDMA_FIFO_PSEUDO_SIZE(bytes)			(((bytes) / 16) << 16)
+#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes)		((bytes) / 16)
+
+/**
+ * struct mtk_disp_rdma - DISP_RDMA driver structure
+ * @ddp_comp - structure containing type enum and hardware resources
+ * @crtc - associated crtc to report irq events to
+ */
+struct mtk_disp_rdma {
+	struct mtk_ddp_comp		ddp_comp;
+	struct drm_crtc			*crtc;
+};
+
+static irqreturn_t mtk_disp_rdma_irq_handler(int irq, void *dev_id)
+{
+	struct mtk_disp_rdma *priv = dev_id;
+	struct mtk_ddp_comp *rdma = &priv->ddp_comp;
+
+	/* Clear frame completion interrupt */
+	writel(0x0, rdma->regs + DISP_REG_RDMA_INT_STATUS);
+
+	if (!priv->crtc)
+		return IRQ_NONE;
+
+	mtk_crtc_ddp_irq(priv->crtc, rdma);
+
+	return IRQ_HANDLED;
+}
+
+static void rdma_update_bits(struct mtk_ddp_comp *comp, unsigned int reg,
+			     unsigned int mask, unsigned int val)
+{
+	unsigned int tmp = readl(comp->regs + reg);
+
+	tmp = (tmp & ~mask) | (val & mask);
+	writel(tmp, comp->regs + reg);
+}
+
+static void mtk_rdma_enable_vblank(struct mtk_ddp_comp *comp,
+				   struct drm_crtc *crtc)
+{
+	struct mtk_disp_rdma *priv = container_of(comp, struct mtk_disp_rdma,
+						  ddp_comp);
+
+	priv->crtc = crtc;
+	rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT,
+			 RDMA_FRAME_END_INT);
+}
+
+static void mtk_rdma_disable_vblank(struct mtk_ddp_comp *comp)
+{
+	struct mtk_disp_rdma *priv = container_of(comp, struct mtk_disp_rdma,
+						  ddp_comp);
+
+	priv->crtc = NULL;
+	rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0);
+}
+
+static void mtk_rdma_start(struct mtk_ddp_comp *comp)
+{
+	rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN,
+			 RDMA_ENGINE_EN);
+}
+
+static void mtk_rdma_stop(struct mtk_ddp_comp *comp)
+{
+	rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN, 0);
+}
+
+static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
+			    unsigned int height, unsigned int vrefresh)
+{
+	unsigned int threshold;
+	unsigned int reg;
+
+	rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width);
+	rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height);
+
+	/*
+	 * Enable FIFO underflow since DSI and DPI can't be blocked.
+	 * Keep the FIFO pseudo size reset default of 8 KiB. Set the
+	 * output threshold to 6 microseconds with 7/6 overhead to
+	 * account for blanking, and with a pixel depth of 4 bytes:
+	 */
+	threshold = width * height * vrefresh * 4 * 7 / 1000000;
+	reg = RDMA_FIFO_UNDERFLOW_EN |
+	      RDMA_FIFO_PSEUDO_SIZE(SZ_8K) |
+	      RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
+	writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
+}
+
+static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
+	.config = mtk_rdma_config,
+	.start = mtk_rdma_start,
+	.stop = mtk_rdma_stop,
+	.enable_vblank = mtk_rdma_enable_vblank,
+	.disable_vblank = mtk_rdma_disable_vblank,
+};
+
+static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
+			      void *data)
+{
+	struct mtk_disp_rdma *priv = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	int ret;
+
+	ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
+	if (ret < 0) {
+		dev_err(dev, "Failed to register component %s: %d\n",
+			dev->of_node->full_name, ret);
+		return ret;
+	}
+
+	return 0;
+
+}
+
+static void mtk_disp_rdma_unbind(struct device *dev, struct device *master,
+				 void *data)
+{
+	struct mtk_disp_rdma *priv = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+
+	mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
+}
+
+static const struct component_ops mtk_disp_rdma_component_ops = {
+	.bind	= mtk_disp_rdma_bind,
+	.unbind = mtk_disp_rdma_unbind,
+};
+
+static int mtk_disp_rdma_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mtk_disp_rdma *priv;
+	int comp_id;
+	int irq;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_RDMA);
+	if (comp_id < 0) {
+		dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
+		return comp_id;
+	}
+
+	ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
+				&mtk_disp_rdma_funcs);
+	if (ret) {
+		dev_err(dev, "Failed to initialize component: %d\n", ret);
+		return ret;
+	}
+
+	/* Disable and clear pending interrupts */
+	writel(0x0, priv->ddp_comp.regs + DISP_REG_RDMA_INT_ENABLE);
+	writel(0x0, priv->ddp_comp.regs + DISP_REG_RDMA_INT_STATUS);
+
+	ret = devm_request_irq(dev, irq, mtk_disp_rdma_irq_handler,
+			       IRQF_TRIGGER_NONE, dev_name(dev), priv);
+	if (ret < 0) {
+		dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, priv);
+
+	ret = component_add(dev, &mtk_disp_rdma_component_ops);
+	if (ret)
+		dev_err(dev, "Failed to add component: %d\n", ret);
+
+	return ret;
+}
+
+static int mtk_disp_rdma_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &mtk_disp_rdma_component_ops);
+
+	return 0;
+}
+
+static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
+	{ .compatible = "mediatek,mt8173-disp-rdma", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
+
+struct platform_driver mtk_disp_rdma_driver = {
+	.probe		= mtk_disp_rdma_probe,
+	.remove		= mtk_disp_rdma_remove,
+	.driver		= {
+		.name	= "mediatek-disp-rdma",
+		.owner	= THIS_MODULE,
+		.of_match_table = mtk_disp_rdma_driver_dt_match,
+	},
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
new file mode 100644
index 0000000..0186e50
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -0,0 +1,764 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/kernel.h>
+#include <linux/component.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+
+#include "mtk_dpi_regs.h"
+#include "mtk_drm_ddp_comp.h"
+
+enum mtk_dpi_out_bit_num {
+	MTK_DPI_OUT_BIT_NUM_8BITS,
+	MTK_DPI_OUT_BIT_NUM_10BITS,
+	MTK_DPI_OUT_BIT_NUM_12BITS,
+	MTK_DPI_OUT_BIT_NUM_16BITS
+};
+
+enum mtk_dpi_out_yc_map {
+	MTK_DPI_OUT_YC_MAP_RGB,
+	MTK_DPI_OUT_YC_MAP_CYCY,
+	MTK_DPI_OUT_YC_MAP_YCYC,
+	MTK_DPI_OUT_YC_MAP_CY,
+	MTK_DPI_OUT_YC_MAP_YC
+};
+
+enum mtk_dpi_out_channel_swap {
+	MTK_DPI_OUT_CHANNEL_SWAP_RGB,
+	MTK_DPI_OUT_CHANNEL_SWAP_GBR,
+	MTK_DPI_OUT_CHANNEL_SWAP_BRG,
+	MTK_DPI_OUT_CHANNEL_SWAP_RBG,
+	MTK_DPI_OUT_CHANNEL_SWAP_GRB,
+	MTK_DPI_OUT_CHANNEL_SWAP_BGR
+};
+
+enum mtk_dpi_out_color_format {
+	MTK_DPI_COLOR_FORMAT_RGB,
+	MTK_DPI_COLOR_FORMAT_RGB_FULL,
+	MTK_DPI_COLOR_FORMAT_YCBCR_444,
+	MTK_DPI_COLOR_FORMAT_YCBCR_422,
+	MTK_DPI_COLOR_FORMAT_XV_YCC,
+	MTK_DPI_COLOR_FORMAT_YCBCR_444_FULL,
+	MTK_DPI_COLOR_FORMAT_YCBCR_422_FULL
+};
+
+struct mtk_dpi {
+	struct mtk_ddp_comp ddp_comp;
+	struct drm_encoder encoder;
+	void __iomem *regs;
+	struct device *dev;
+	struct clk *engine_clk;
+	struct clk *pixel_clk;
+	struct clk *tvd_clk;
+	int irq;
+	struct drm_display_mode mode;
+	enum mtk_dpi_out_color_format color_format;
+	enum mtk_dpi_out_yc_map yc_map;
+	enum mtk_dpi_out_bit_num bit_num;
+	enum mtk_dpi_out_channel_swap channel_swap;
+	bool power_sta;
+	u8 power_ctl;
+};
+
+static inline struct mtk_dpi *mtk_dpi_from_encoder(struct drm_encoder *e)
+{
+	return container_of(e, struct mtk_dpi, encoder);
+}
+
+enum mtk_dpi_polarity {
+	MTK_DPI_POLARITY_RISING,
+	MTK_DPI_POLARITY_FALLING,
+};
+
+enum mtk_dpi_power_ctl {
+	DPI_POWER_START = BIT(0),
+	DPI_POWER_ENABLE = BIT(1),
+};
+
+struct mtk_dpi_polarities {
+	enum mtk_dpi_polarity de_pol;
+	enum mtk_dpi_polarity ck_pol;
+	enum mtk_dpi_polarity hsync_pol;
+	enum mtk_dpi_polarity vsync_pol;
+};
+
+struct mtk_dpi_sync_param {
+	u32 sync_width;
+	u32 front_porch;
+	u32 back_porch;
+	bool shift_half_line;
+};
+
+struct mtk_dpi_yc_limit {
+	u16 y_top;
+	u16 y_bottom;
+	u16 c_top;
+	u16 c_bottom;
+};
+
+static void mtk_dpi_mask(struct mtk_dpi *dpi, u32 offset, u32 val, u32 mask)
+{
+	u32 tmp = readl(dpi->regs + offset) & ~mask;
+
+	tmp |= (val & mask);
+	writel(tmp, dpi->regs + offset);
+}
+
+static void mtk_dpi_sw_reset(struct mtk_dpi *dpi, bool reset)
+{
+	mtk_dpi_mask(dpi, DPI_RET, reset ? RST : 0, RST);
+}
+
+static void mtk_dpi_enable(struct mtk_dpi *dpi)
+{
+	mtk_dpi_mask(dpi, DPI_EN, EN, EN);
+}
+
+static void mtk_dpi_disable(struct mtk_dpi *dpi)
+{
+	mtk_dpi_mask(dpi, DPI_EN, 0, EN);
+}
+
+static void mtk_dpi_config_hsync(struct mtk_dpi *dpi,
+				 struct mtk_dpi_sync_param *sync)
+{
+	mtk_dpi_mask(dpi, DPI_TGEN_HWIDTH,
+		     sync->sync_width << HPW, HPW_MASK);
+	mtk_dpi_mask(dpi, DPI_TGEN_HPORCH,
+		     sync->back_porch << HBP, HBP_MASK);
+	mtk_dpi_mask(dpi, DPI_TGEN_HPORCH, sync->front_porch << HFP,
+		     HFP_MASK);
+}
+
+static void mtk_dpi_config_vsync(struct mtk_dpi *dpi,
+				 struct mtk_dpi_sync_param *sync,
+				 u32 width_addr, u32 porch_addr)
+{
+	mtk_dpi_mask(dpi, width_addr,
+		     sync->sync_width << VSYNC_WIDTH_SHIFT,
+		     VSYNC_WIDTH_MASK);
+	mtk_dpi_mask(dpi, width_addr,
+		     sync->shift_half_line << VSYNC_HALF_LINE_SHIFT,
+		     VSYNC_HALF_LINE_MASK);
+	mtk_dpi_mask(dpi, porch_addr,
+		     sync->back_porch << VSYNC_BACK_PORCH_SHIFT,
+		     VSYNC_BACK_PORCH_MASK);
+	mtk_dpi_mask(dpi, porch_addr,
+		     sync->front_porch << VSYNC_FRONT_PORCH_SHIFT,
+		     VSYNC_FRONT_PORCH_MASK);
+}
+
+static void mtk_dpi_config_vsync_lodd(struct mtk_dpi *dpi,
+				      struct mtk_dpi_sync_param *sync)
+{
+	mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH, DPI_TGEN_VPORCH);
+}
+
+static void mtk_dpi_config_vsync_leven(struct mtk_dpi *dpi,
+				       struct mtk_dpi_sync_param *sync)
+{
+	mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH_LEVEN,
+			     DPI_TGEN_VPORCH_LEVEN);
+}
+
+static void mtk_dpi_config_vsync_rodd(struct mtk_dpi *dpi,
+				      struct mtk_dpi_sync_param *sync)
+{
+	mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH_RODD,
+			     DPI_TGEN_VPORCH_RODD);
+}
+
+static void mtk_dpi_config_vsync_reven(struct mtk_dpi *dpi,
+				       struct mtk_dpi_sync_param *sync)
+{
+	mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH_REVEN,
+			     DPI_TGEN_VPORCH_REVEN);
+}
+
+static void mtk_dpi_config_pol(struct mtk_dpi *dpi,
+			       struct mtk_dpi_polarities *dpi_pol)
+{
+	unsigned int pol;
+
+	pol = (dpi_pol->ck_pol == MTK_DPI_POLARITY_RISING ? 0 : CK_POL) |
+	      (dpi_pol->de_pol == MTK_DPI_POLARITY_RISING ? 0 : DE_POL) |
+	      (dpi_pol->hsync_pol == MTK_DPI_POLARITY_RISING ? 0 : HSYNC_POL) |
+	      (dpi_pol->vsync_pol == MTK_DPI_POLARITY_RISING ? 0 : VSYNC_POL);
+	mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, pol,
+		     CK_POL | DE_POL | HSYNC_POL | VSYNC_POL);
+}
+
+static void mtk_dpi_config_3d(struct mtk_dpi *dpi, bool en_3d)
+{
+	mtk_dpi_mask(dpi, DPI_CON, en_3d ? TDFP_EN : 0, TDFP_EN);
+}
+
+static void mtk_dpi_config_interface(struct mtk_dpi *dpi, bool inter)
+{
+	mtk_dpi_mask(dpi, DPI_CON, inter ? INTL_EN : 0, INTL_EN);
+}
+
+static void mtk_dpi_config_fb_size(struct mtk_dpi *dpi, u32 width, u32 height)
+{
+	mtk_dpi_mask(dpi, DPI_SIZE, width << HSIZE, HSIZE_MASK);
+	mtk_dpi_mask(dpi, DPI_SIZE, height << VSIZE, VSIZE_MASK);
+}
+
+static void mtk_dpi_config_channel_limit(struct mtk_dpi *dpi,
+					 struct mtk_dpi_yc_limit *limit)
+{
+	mtk_dpi_mask(dpi, DPI_Y_LIMIT, limit->y_bottom << Y_LIMINT_BOT,
+		     Y_LIMINT_BOT_MASK);
+	mtk_dpi_mask(dpi, DPI_Y_LIMIT, limit->y_top << Y_LIMINT_TOP,
+		     Y_LIMINT_TOP_MASK);
+	mtk_dpi_mask(dpi, DPI_C_LIMIT, limit->c_bottom << C_LIMIT_BOT,
+		     C_LIMIT_BOT_MASK);
+	mtk_dpi_mask(dpi, DPI_C_LIMIT, limit->c_top << C_LIMIT_TOP,
+		     C_LIMIT_TOP_MASK);
+}
+
+static void mtk_dpi_config_bit_num(struct mtk_dpi *dpi,
+				   enum mtk_dpi_out_bit_num num)
+{
+	u32 val;
+
+	switch (num) {
+	case MTK_DPI_OUT_BIT_NUM_8BITS:
+		val = OUT_BIT_8;
+		break;
+	case MTK_DPI_OUT_BIT_NUM_10BITS:
+		val = OUT_BIT_10;
+		break;
+	case MTK_DPI_OUT_BIT_NUM_12BITS:
+		val = OUT_BIT_12;
+		break;
+	case MTK_DPI_OUT_BIT_NUM_16BITS:
+		val = OUT_BIT_16;
+		break;
+	default:
+		val = OUT_BIT_8;
+		break;
+	}
+	mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, val << OUT_BIT,
+		     OUT_BIT_MASK);
+}
+
+static void mtk_dpi_config_yc_map(struct mtk_dpi *dpi,
+				  enum mtk_dpi_out_yc_map map)
+{
+	u32 val;
+
+	switch (map) {
+	case MTK_DPI_OUT_YC_MAP_RGB:
+		val = YC_MAP_RGB;
+		break;
+	case MTK_DPI_OUT_YC_MAP_CYCY:
+		val = YC_MAP_CYCY;
+		break;
+	case MTK_DPI_OUT_YC_MAP_YCYC:
+		val = YC_MAP_YCYC;
+		break;
+	case MTK_DPI_OUT_YC_MAP_CY:
+		val = YC_MAP_CY;
+		break;
+	case MTK_DPI_OUT_YC_MAP_YC:
+		val = YC_MAP_YC;
+		break;
+	default:
+		val = YC_MAP_RGB;
+		break;
+	}
+
+	mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, val << YC_MAP, YC_MAP_MASK);
+}
+
+static void mtk_dpi_config_channel_swap(struct mtk_dpi *dpi,
+					enum mtk_dpi_out_channel_swap swap)
+{
+	u32 val;
+
+	switch (swap) {
+	case MTK_DPI_OUT_CHANNEL_SWAP_RGB:
+		val = SWAP_RGB;
+		break;
+	case MTK_DPI_OUT_CHANNEL_SWAP_GBR:
+		val = SWAP_GBR;
+		break;
+	case MTK_DPI_OUT_CHANNEL_SWAP_BRG:
+		val = SWAP_BRG;
+		break;
+	case MTK_DPI_OUT_CHANNEL_SWAP_RBG:
+		val = SWAP_RBG;
+		break;
+	case MTK_DPI_OUT_CHANNEL_SWAP_GRB:
+		val = SWAP_GRB;
+		break;
+	case MTK_DPI_OUT_CHANNEL_SWAP_BGR:
+		val = SWAP_BGR;
+		break;
+	default:
+		val = SWAP_RGB;
+		break;
+	}
+
+	mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, val << CH_SWAP, CH_SWAP_MASK);
+}
+
+static void mtk_dpi_config_yuv422_enable(struct mtk_dpi *dpi, bool enable)
+{
+	mtk_dpi_mask(dpi, DPI_CON, enable ? YUV422_EN : 0, YUV422_EN);
+}
+
+static void mtk_dpi_config_csc_enable(struct mtk_dpi *dpi, bool enable)
+{
+	mtk_dpi_mask(dpi, DPI_CON, enable ? CSC_ENABLE : 0, CSC_ENABLE);
+}
+
+static void mtk_dpi_config_swap_input(struct mtk_dpi *dpi, bool enable)
+{
+	mtk_dpi_mask(dpi, DPI_CON, enable ? IN_RB_SWAP : 0, IN_RB_SWAP);
+}
+
+static void mtk_dpi_config_2n_h_fre(struct mtk_dpi *dpi)
+{
+	mtk_dpi_mask(dpi, DPI_H_FRE_CON, H_FRE_2N, H_FRE_2N);
+}
+
+static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
+					enum mtk_dpi_out_color_format format)
+{
+	if ((format == MTK_DPI_COLOR_FORMAT_YCBCR_444) ||
+	    (format == MTK_DPI_COLOR_FORMAT_YCBCR_444_FULL)) {
+		mtk_dpi_config_yuv422_enable(dpi, false);
+		mtk_dpi_config_csc_enable(dpi, true);
+		mtk_dpi_config_swap_input(dpi, false);
+		mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_BGR);
+	} else if ((format == MTK_DPI_COLOR_FORMAT_YCBCR_422) ||
+		   (format == MTK_DPI_COLOR_FORMAT_YCBCR_422_FULL)) {
+		mtk_dpi_config_yuv422_enable(dpi, true);
+		mtk_dpi_config_csc_enable(dpi, true);
+		mtk_dpi_config_swap_input(dpi, true);
+		mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
+	} else {
+		mtk_dpi_config_yuv422_enable(dpi, false);
+		mtk_dpi_config_csc_enable(dpi, false);
+		mtk_dpi_config_swap_input(dpi, false);
+		mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
+	}
+}
+
+static void mtk_dpi_power_off(struct mtk_dpi *dpi, enum mtk_dpi_power_ctl pctl)
+{
+	dpi->power_ctl &= ~pctl;
+
+	if ((dpi->power_ctl & DPI_POWER_START) ||
+	    (dpi->power_ctl & DPI_POWER_ENABLE))
+		return;
+
+	if (!dpi->power_sta)
+		return;
+
+	mtk_dpi_disable(dpi);
+	clk_disable_unprepare(dpi->pixel_clk);
+	clk_disable_unprepare(dpi->engine_clk);
+	dpi->power_sta = false;
+}
+
+static int mtk_dpi_power_on(struct mtk_dpi *dpi, enum mtk_dpi_power_ctl pctl)
+{
+	int ret;
+
+	dpi->power_ctl |= pctl;
+
+	if (!(dpi->power_ctl & DPI_POWER_START) &&
+	    !(dpi->power_ctl & DPI_POWER_ENABLE))
+		return 0;
+
+	if (dpi->power_sta)
+		return 0;
+
+	ret = clk_prepare_enable(dpi->engine_clk);
+	if (ret) {
+		dev_err(dpi->dev, "Failed to enable engine clock: %d\n", ret);
+		goto err_eng;
+	}
+
+	ret = clk_prepare_enable(dpi->pixel_clk);
+	if (ret) {
+		dev_err(dpi->dev, "Failed to enable pixel clock: %d\n", ret);
+		goto err_pixel;
+	}
+
+	mtk_dpi_enable(dpi);
+	dpi->power_sta = true;
+	return 0;
+
+err_pixel:
+	clk_disable_unprepare(dpi->engine_clk);
+err_eng:
+	dpi->power_ctl &= ~pctl;
+	return ret;
+}
+
+static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
+				    struct drm_display_mode *mode)
+{
+	struct mtk_dpi_yc_limit limit;
+	struct mtk_dpi_polarities dpi_pol;
+	struct mtk_dpi_sync_param hsync;
+	struct mtk_dpi_sync_param vsync_lodd = { 0 };
+	struct mtk_dpi_sync_param vsync_leven = { 0 };
+	struct mtk_dpi_sync_param vsync_rodd = { 0 };
+	struct mtk_dpi_sync_param vsync_reven = { 0 };
+	unsigned long pix_rate;
+	unsigned long pll_rate;
+	unsigned int factor;
+
+	pix_rate = 1000UL * mode->clock;
+	if (mode->clock <= 74000)
+		factor = 8 * 3;
+	else
+		factor = 4 * 3;
+	pll_rate = pix_rate * factor;
+
+	dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n",
+		pll_rate, pix_rate);
+
+	clk_set_rate(dpi->tvd_clk, pll_rate);
+	pll_rate = clk_get_rate(dpi->tvd_clk);
+
+	pix_rate = pll_rate / factor;
+	clk_set_rate(dpi->pixel_clk, pix_rate);
+	pix_rate = clk_get_rate(dpi->pixel_clk);
+
+	dev_dbg(dpi->dev, "Got  PLL %lu Hz, pixel clock %lu Hz\n",
+		pll_rate, pix_rate);
+
+	limit.c_bottom = 0x0010;
+	limit.c_top = 0x0FE0;
+	limit.y_bottom = 0x0010;
+	limit.y_top = 0x0FE0;
+
+	dpi_pol.ck_pol = MTK_DPI_POLARITY_FALLING;
+	dpi_pol.de_pol = MTK_DPI_POLARITY_RISING;
+	dpi_pol.hsync_pol = mode->flags & DRM_MODE_FLAG_PHSYNC ?
+			    MTK_DPI_POLARITY_FALLING : MTK_DPI_POLARITY_RISING;
+	dpi_pol.vsync_pol = mode->flags & DRM_MODE_FLAG_PVSYNC ?
+			    MTK_DPI_POLARITY_FALLING : MTK_DPI_POLARITY_RISING;
+
+	hsync.sync_width = mode->hsync_end - mode->hsync_start;
+	hsync.back_porch = mode->htotal - mode->hsync_end;
+	hsync.front_porch = mode->hsync_start - mode->hdisplay;
+	hsync.shift_half_line = false;
+
+	vsync_lodd.sync_width = mode->vsync_end - mode->vsync_start;
+	vsync_lodd.back_porch = mode->vtotal - mode->vsync_end;
+	vsync_lodd.front_porch = mode->vsync_start - mode->vdisplay;
+	vsync_lodd.shift_half_line = false;
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
+	    mode->flags & DRM_MODE_FLAG_3D_MASK) {
+		vsync_leven = vsync_lodd;
+		vsync_rodd = vsync_lodd;
+		vsync_reven = vsync_lodd;
+		vsync_leven.shift_half_line = true;
+		vsync_reven.shift_half_line = true;
+	} else if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
+		   !(mode->flags & DRM_MODE_FLAG_3D_MASK)) {
+		vsync_leven = vsync_lodd;
+		vsync_leven.shift_half_line = true;
+	} else if (!(mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+		   mode->flags & DRM_MODE_FLAG_3D_MASK) {
+		vsync_rodd = vsync_lodd;
+	}
+	mtk_dpi_sw_reset(dpi, true);
+	mtk_dpi_config_pol(dpi, &dpi_pol);
+
+	mtk_dpi_config_hsync(dpi, &hsync);
+	mtk_dpi_config_vsync_lodd(dpi, &vsync_lodd);
+	mtk_dpi_config_vsync_rodd(dpi, &vsync_rodd);
+	mtk_dpi_config_vsync_leven(dpi, &vsync_leven);
+	mtk_dpi_config_vsync_reven(dpi, &vsync_reven);
+
+	mtk_dpi_config_3d(dpi, !!(mode->flags & DRM_MODE_FLAG_3D_MASK));
+	mtk_dpi_config_interface(dpi, !!(mode->flags &
+					 DRM_MODE_FLAG_INTERLACE));
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		mtk_dpi_config_fb_size(dpi, mode->hdisplay, mode->vdisplay / 2);
+	else
+		mtk_dpi_config_fb_size(dpi, mode->hdisplay, mode->vdisplay);
+
+	mtk_dpi_config_channel_limit(dpi, &limit);
+	mtk_dpi_config_bit_num(dpi, dpi->bit_num);
+	mtk_dpi_config_channel_swap(dpi, dpi->channel_swap);
+	mtk_dpi_config_yc_map(dpi, dpi->yc_map);
+	mtk_dpi_config_color_format(dpi, dpi->color_format);
+	mtk_dpi_config_2n_h_fre(dpi);
+	mtk_dpi_sw_reset(dpi, false);
+
+	return 0;
+}
+
+static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = {
+	.destroy = mtk_dpi_encoder_destroy,
+};
+
+static bool mtk_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
+				       const struct drm_display_mode *mode,
+				       struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void mtk_dpi_encoder_mode_set(struct drm_encoder *encoder,
+				     struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted_mode)
+{
+	struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+
+	drm_mode_copy(&dpi->mode, adjusted_mode);
+}
+
+static void mtk_dpi_encoder_disable(struct drm_encoder *encoder)
+{
+	struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+
+	mtk_dpi_power_off(dpi, DPI_POWER_ENABLE);
+}
+
+static void mtk_dpi_encoder_enable(struct drm_encoder *encoder)
+{
+	struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+
+	mtk_dpi_power_on(dpi, DPI_POWER_ENABLE);
+	mtk_dpi_set_display_mode(dpi, &dpi->mode);
+}
+
+static int mtk_dpi_atomic_check(struct drm_encoder *encoder,
+				struct drm_crtc_state *crtc_state,
+				struct drm_connector_state *conn_state)
+{
+	return 0;
+}
+
+static const struct drm_encoder_helper_funcs mtk_dpi_encoder_helper_funcs = {
+	.mode_fixup = mtk_dpi_encoder_mode_fixup,
+	.mode_set = mtk_dpi_encoder_mode_set,
+	.disable = mtk_dpi_encoder_disable,
+	.enable = mtk_dpi_encoder_enable,
+	.atomic_check = mtk_dpi_atomic_check,
+};
+
+static void mtk_dpi_start(struct mtk_ddp_comp *comp)
+{
+	struct mtk_dpi *dpi = container_of(comp, struct mtk_dpi, ddp_comp);
+
+	mtk_dpi_power_on(dpi, DPI_POWER_START);
+}
+
+static void mtk_dpi_stop(struct mtk_ddp_comp *comp)
+{
+	struct mtk_dpi *dpi = container_of(comp, struct mtk_dpi, ddp_comp);
+
+	mtk_dpi_power_off(dpi, DPI_POWER_START);
+}
+
+static const struct mtk_ddp_comp_funcs mtk_dpi_funcs = {
+	.start = mtk_dpi_start,
+	.stop = mtk_dpi_stop,
+};
+
+static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
+{
+	struct mtk_dpi *dpi = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	int ret;
+
+	ret = mtk_ddp_comp_register(drm_dev, &dpi->ddp_comp);
+	if (ret < 0) {
+		dev_err(dev, "Failed to register component %s: %d\n",
+			dev->of_node->full_name, ret);
+		return ret;
+	}
+
+	ret = drm_encoder_init(drm_dev, &dpi->encoder, &mtk_dpi_encoder_funcs,
+			       DRM_MODE_ENCODER_TMDS, NULL);
+	if (ret) {
+		dev_err(dev, "Failed to initialize decoder: %d\n", ret);
+		goto err_unregister;
+	}
+	drm_encoder_helper_add(&dpi->encoder, &mtk_dpi_encoder_helper_funcs);
+
+	/* Currently DPI0 is fixed to be driven by OVL1 */
+	dpi->encoder.possible_crtcs = BIT(1);
+
+	dpi->encoder.bridge->encoder = &dpi->encoder;
+	ret = drm_bridge_attach(dpi->encoder.dev, dpi->encoder.bridge);
+	if (ret) {
+		dev_err(dev, "Failed to attach bridge: %d\n", ret);
+		goto err_cleanup;
+	}
+
+	dpi->bit_num = MTK_DPI_OUT_BIT_NUM_8BITS;
+	dpi->channel_swap = MTK_DPI_OUT_CHANNEL_SWAP_RGB;
+	dpi->yc_map = MTK_DPI_OUT_YC_MAP_RGB;
+	dpi->color_format = MTK_DPI_COLOR_FORMAT_RGB;
+
+	return 0;
+
+err_cleanup:
+	drm_encoder_cleanup(&dpi->encoder);
+err_unregister:
+	mtk_ddp_comp_unregister(drm_dev, &dpi->ddp_comp);
+	return ret;
+}
+
+static void mtk_dpi_unbind(struct device *dev, struct device *master,
+			   void *data)
+{
+	struct mtk_dpi *dpi = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+
+	drm_encoder_cleanup(&dpi->encoder);
+	mtk_ddp_comp_unregister(drm_dev, &dpi->ddp_comp);
+}
+
+static const struct component_ops mtk_dpi_component_ops = {
+	.bind = mtk_dpi_bind,
+	.unbind = mtk_dpi_unbind,
+};
+
+static int mtk_dpi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mtk_dpi *dpi;
+	struct resource *mem;
+	struct device_node *ep, *bridge_node = NULL;
+	int comp_id;
+	int ret;
+
+	dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
+	if (!dpi)
+		return -ENOMEM;
+
+	dpi->dev = dev;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dpi->regs = devm_ioremap_resource(dev, mem);
+	if (IS_ERR(dpi->regs)) {
+		ret = PTR_ERR(dpi->regs);
+		dev_err(dev, "Failed to ioremap mem resource: %d\n", ret);
+		return ret;
+	}
+
+	dpi->engine_clk = devm_clk_get(dev, "engine");
+	if (IS_ERR(dpi->engine_clk)) {
+		ret = PTR_ERR(dpi->engine_clk);
+		dev_err(dev, "Failed to get engine clock: %d\n", ret);
+		return ret;
+	}
+
+	dpi->pixel_clk = devm_clk_get(dev, "pixel");
+	if (IS_ERR(dpi->pixel_clk)) {
+		ret = PTR_ERR(dpi->pixel_clk);
+		dev_err(dev, "Failed to get pixel clock: %d\n", ret);
+		return ret;
+	}
+
+	dpi->tvd_clk = devm_clk_get(dev, "pll");
+	if (IS_ERR(dpi->tvd_clk)) {
+		ret = PTR_ERR(dpi->tvd_clk);
+		dev_err(dev, "Failed to get tvdpll clock: %d\n", ret);
+		return ret;
+	}
+
+	dpi->irq = platform_get_irq(pdev, 0);
+	if (dpi->irq <= 0) {
+		dev_err(dev, "Failed to get irq: %d\n", dpi->irq);
+		return -EINVAL;
+	}
+
+	ep = of_graph_get_next_endpoint(dev->of_node, NULL);
+	if (ep) {
+		bridge_node = of_graph_get_remote_port_parent(ep);
+		of_node_put(ep);
+	}
+	if (!bridge_node) {
+		dev_err(dev, "Failed to find bridge node\n");
+		return -ENODEV;
+	}
+
+	dev_info(dev, "Found bridge node: %s\n", bridge_node->full_name);
+
+	dpi->encoder.bridge = of_drm_find_bridge(bridge_node);
+	of_node_put(bridge_node);
+	if (!dpi->encoder.bridge)
+		return -EPROBE_DEFER;
+
+	comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DPI);
+	if (comp_id < 0) {
+		dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
+		return comp_id;
+	}
+
+	ret = mtk_ddp_comp_init(dev, dev->of_node, &dpi->ddp_comp, comp_id,
+				&mtk_dpi_funcs);
+	if (ret) {
+		dev_err(dev, "Failed to initialize component: %d\n", ret);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, dpi);
+
+	ret = component_add(dev, &mtk_dpi_component_ops);
+	if (ret) {
+		dev_err(dev, "Failed to add component: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int mtk_dpi_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &mtk_dpi_component_ops);
+
+	return 0;
+}
+
+static const struct of_device_id mtk_dpi_of_ids[] = {
+	{ .compatible = "mediatek,mt8173-dpi", },
+	{}
+};
+
+struct platform_driver mtk_dpi_driver = {
+	.probe = mtk_dpi_probe,
+	.remove = mtk_dpi_remove,
+	.driver = {
+		.name = "mediatek-dpi",
+		.of_match_table = mtk_dpi_of_ids,
+	},
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi_regs.h b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
new file mode 100644
index 0000000..4b6ad47
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MTK_DPI_REGS_H
+#define __MTK_DPI_REGS_H
+
+#define DPI_EN			0x00
+#define EN				BIT(0)
+
+#define DPI_RET			0x04
+#define RST				BIT(0)
+
+#define DPI_INTEN		0x08
+#define INT_VSYNC_EN			BIT(0)
+#define INT_VDE_EN			BIT(1)
+#define INT_UNDERFLOW_EN		BIT(2)
+
+#define DPI_INTSTA		0x0C
+#define INT_VSYNC_STA			BIT(0)
+#define INT_VDE_STA			BIT(1)
+#define INT_UNDERFLOW_STA		BIT(2)
+
+#define DPI_CON			0x10
+#define BG_ENABLE			BIT(0)
+#define IN_RB_SWAP			BIT(1)
+#define INTL_EN				BIT(2)
+#define TDFP_EN				BIT(3)
+#define CLPF_EN				BIT(4)
+#define YUV422_EN			BIT(5)
+#define CSC_ENABLE			BIT(6)
+#define R601_SEL			BIT(7)
+#define EMBSYNC_EN			BIT(8)
+#define VS_LODD_EN			BIT(16)
+#define VS_LEVEN_EN			BIT(17)
+#define VS_RODD_EN			BIT(18)
+#define VS_REVEN			BIT(19)
+#define FAKE_DE_LODD			BIT(20)
+#define FAKE_DE_LEVEN			BIT(21)
+#define FAKE_DE_RODD			BIT(22)
+#define FAKE_DE_REVEN			BIT(23)
+
+#define DPI_OUTPUT_SETTING	0x14
+#define CH_SWAP				0
+#define CH_SWAP_MASK			(0x7 << 0)
+#define SWAP_RGB			0x00
+#define SWAP_GBR			0x01
+#define SWAP_BRG			0x02
+#define SWAP_RBG			0x03
+#define SWAP_GRB			0x04
+#define SWAP_BGR			0x05
+#define BIT_SWAP			BIT(3)
+#define B_MASK				BIT(4)
+#define G_MASK				BIT(5)
+#define R_MASK				BIT(6)
+#define DE_MASK				BIT(8)
+#define HS_MASK				BIT(9)
+#define VS_MASK				BIT(10)
+#define DE_POL				BIT(12)
+#define HSYNC_POL			BIT(13)
+#define VSYNC_POL			BIT(14)
+#define CK_POL				BIT(15)
+#define OEN_OFF				BIT(16)
+#define EDGE_SEL			BIT(17)
+#define OUT_BIT				18
+#define OUT_BIT_MASK			(0x3 << 18)
+#define OUT_BIT_8			0x00
+#define OUT_BIT_10			0x01
+#define OUT_BIT_12			0x02
+#define OUT_BIT_16			0x03
+#define YC_MAP				20
+#define YC_MAP_MASK			(0x7 << 20)
+#define YC_MAP_RGB			0x00
+#define YC_MAP_CYCY			0x04
+#define YC_MAP_YCYC			0x05
+#define YC_MAP_CY			0x06
+#define YC_MAP_YC			0x07
+
+#define DPI_SIZE		0x18
+#define HSIZE				0
+#define HSIZE_MASK			(0x1FFF << 0)
+#define VSIZE				16
+#define VSIZE_MASK			(0x1FFF << 16)
+
+#define DPI_DDR_SETTING		0x1C
+#define DDR_EN				BIT(0)
+#define DDDR_SEL			BIT(1)
+#define DDR_4PHASE			BIT(2)
+#define DDR_WIDTH			(0x3 << 4)
+#define DDR_PAD_MODE			(0x1 << 8)
+
+#define DPI_TGEN_HWIDTH		0x20
+#define HPW				0
+#define HPW_MASK			(0xFFF << 0)
+
+#define DPI_TGEN_HPORCH		0x24
+#define HBP				0
+#define HBP_MASK			(0xFFF << 0)
+#define HFP				16
+#define HFP_MASK			(0xFFF << 16)
+
+#define DPI_TGEN_VWIDTH		0x28
+#define DPI_TGEN_VPORCH		0x2C
+
+#define VSYNC_WIDTH_SHIFT		0
+#define VSYNC_WIDTH_MASK		(0xFFF << 0)
+#define VSYNC_HALF_LINE_SHIFT		16
+#define VSYNC_HALF_LINE_MASK		BIT(16)
+#define VSYNC_BACK_PORCH_SHIFT		0
+#define VSYNC_BACK_PORCH_MASK		(0xFFF << 0)
+#define VSYNC_FRONT_PORCH_SHIFT		16
+#define VSYNC_FRONT_PORCH_MASK		(0xFFF << 16)
+
+#define DPI_BG_HCNTL		0x30
+#define BG_RIGHT			(0x1FFF << 0)
+#define BG_LEFT				(0x1FFF << 16)
+
+#define DPI_BG_VCNTL		0x34
+#define BG_BOT				(0x1FFF << 0)
+#define BG_TOP				(0x1FFF << 16)
+
+#define DPI_BG_COLOR		0x38
+#define BG_B				(0xF << 0)
+#define BG_G				(0xF << 8)
+#define BG_R				(0xF << 16)
+
+#define DPI_FIFO_CTL		0x3C
+#define FIFO_VALID_SET			(0x1F << 0)
+#define FIFO_RST_SEL			(0x1 << 8)
+
+#define DPI_STATUS		0x40
+#define VCOUNTER			(0x1FFF << 0)
+#define DPI_BUSY			BIT(16)
+#define OUTEN				BIT(17)
+#define FIELD				BIT(20)
+#define TDLR				BIT(21)
+
+#define DPI_TMODE		0x44
+#define DPI_OEN_ON			BIT(0)
+
+#define DPI_CHECKSUM		0x48
+#define DPI_CHECKSUM_MASK		(0xFFFFFF << 0)
+#define DPI_CHECKSUM_READY		BIT(30)
+#define DPI_CHECKSUM_EN			BIT(31)
+
+#define DPI_DUMMY		0x50
+#define DPI_DUMMY_MASK			(0xFFFFFFFF << 0)
+
+#define DPI_TGEN_VWIDTH_LEVEN	0x68
+#define DPI_TGEN_VPORCH_LEVEN	0x6C
+#define DPI_TGEN_VWIDTH_RODD	0x70
+#define DPI_TGEN_VPORCH_RODD	0x74
+#define DPI_TGEN_VWIDTH_REVEN	0x78
+#define DPI_TGEN_VPORCH_REVEN	0x7C
+
+#define DPI_ESAV_VTIMING_LODD	0x80
+#define ESAV_VOFST_LODD			(0xFFF << 0)
+#define ESAV_VWID_LODD			(0xFFF << 16)
+
+#define DPI_ESAV_VTIMING_LEVEN	0x84
+#define ESAV_VOFST_LEVEN		(0xFFF << 0)
+#define ESAV_VWID_LEVEN			(0xFFF << 16)
+
+#define DPI_ESAV_VTIMING_RODD	0x88
+#define ESAV_VOFST_RODD			(0xFFF << 0)
+#define ESAV_VWID_RODD			(0xFFF << 16)
+
+#define DPI_ESAV_VTIMING_REVEN	0x8C
+#define ESAV_VOFST_REVEN		(0xFFF << 0)
+#define ESAV_VWID_REVEN			(0xFFF << 16)
+
+#define DPI_ESAV_FTIMING	0x90
+#define ESAV_FOFST_ODD			(0xFFF << 0)
+#define ESAV_FOFST_EVEN			(0xFFF << 16)
+
+#define DPI_CLPF_SETTING	0x94
+#define CLPF_TYPE			(0x3 << 0)
+#define ROUND_EN			BIT(4)
+
+#define DPI_Y_LIMIT		0x98
+#define Y_LIMINT_BOT			0
+#define Y_LIMINT_BOT_MASK		(0xFFF << 0)
+#define Y_LIMINT_TOP			16
+#define Y_LIMINT_TOP_MASK		(0xFFF << 16)
+
+#define DPI_C_LIMIT		0x9C
+#define C_LIMIT_BOT			0
+#define C_LIMIT_BOT_MASK		(0xFFF << 0)
+#define C_LIMIT_TOP			16
+#define C_LIMIT_TOP_MASK		(0xFFF << 16)
+
+#define DPI_YUV422_SETTING	0xA0
+#define UV_SWAP				BIT(0)
+#define CR_DELSEL			BIT(4)
+#define CB_DELSEL			BIT(5)
+#define Y_DELSEL			BIT(6)
+#define DE_DELSEL			BIT(7)
+
+#define DPI_EMBSYNC_SETTING	0xA4
+#define EMBSYNC_R_CR_EN			BIT(0)
+#define EMPSYNC_G_Y_EN			BIT(1)
+#define EMPSYNC_B_CB_EN			BIT(2)
+#define ESAV_F_INV			BIT(4)
+#define ESAV_V_INV			BIT(5)
+#define ESAV_H_INV			BIT(6)
+#define ESAV_CODE_MAN			BIT(8)
+#define VS_OUT_SEL			(0x7 << 12)
+
+#define DPI_ESAV_CODE_SET0	0xA8
+#define ESAV_CODE0			(0xFFF << 0)
+#define ESAV_CODE1			(0xFFF << 16)
+
+#define DPI_ESAV_CODE_SET1	0xAC
+#define ESAV_CODE2			(0xFFF << 0)
+#define ESAV_CODE3_MSB			BIT(16)
+
+#define DPI_H_FRE_CON		0xE0
+#define H_FRE_2N			BIT(25)
+#endif /* __MTK_DPI_REGS_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
new file mode 100644
index 0000000..24aa3ba
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -0,0 +1,582 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/barrier.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_drm_drv.h"
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp.h"
+#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_gem.h"
+#include "mtk_drm_plane.h"
+
+/**
+ * struct mtk_drm_crtc - MediaTek specific crtc structure.
+ * @base: crtc object.
+ * @enabled: records whether crtc_enable succeeded
+ * @planes: array of 4 mtk_drm_plane structures, one for each overlay plane
+ * @pending_planes: whether any plane has pending changes to be applied
+ * @config_regs: memory mapped mmsys configuration register space
+ * @mutex: handle to one of the ten disp_mutex streams
+ * @ddp_comp_nr: number of components in ddp_comp
+ * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
+ */
+struct mtk_drm_crtc {
+	struct drm_crtc			base;
+	bool				enabled;
+
+	bool				pending_needs_vblank;
+	struct drm_pending_vblank_event	*event;
+
+	struct mtk_drm_plane		planes[OVL_LAYER_NR];
+	bool				pending_planes;
+
+	void __iomem			*config_regs;
+	struct mtk_disp_mutex		*mutex;
+	unsigned int			ddp_comp_nr;
+	struct mtk_ddp_comp		**ddp_comp;
+};
+
+struct mtk_crtc_state {
+	struct drm_crtc_state		base;
+
+	bool				pending_config;
+	unsigned int			pending_width;
+	unsigned int			pending_height;
+	unsigned int			pending_vrefresh;
+};
+
+static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c)
+{
+	return container_of(c, struct mtk_drm_crtc, base);
+}
+
+static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
+{
+	return container_of(s, struct mtk_crtc_state, base);
+}
+
+static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
+{
+	struct drm_crtc *crtc = &mtk_crtc->base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+	drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
+	drm_crtc_vblank_put(crtc);
+	mtk_crtc->event = NULL;
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
+static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
+{
+	drm_crtc_handle_vblank(&mtk_crtc->base);
+	if (mtk_crtc->pending_needs_vblank) {
+		mtk_drm_crtc_finish_page_flip(mtk_crtc);
+		mtk_crtc->pending_needs_vblank = false;
+	}
+}
+
+static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+	int i;
+
+	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+		clk_unprepare(mtk_crtc->ddp_comp[i]->clk);
+
+	mtk_disp_mutex_put(mtk_crtc->mutex);
+
+	drm_crtc_cleanup(crtc);
+}
+
+static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
+{
+	struct mtk_crtc_state *state;
+
+	if (crtc->state) {
+		if (crtc->state->mode_blob)
+			drm_property_unreference_blob(crtc->state->mode_blob);
+
+		state = to_mtk_crtc_state(crtc->state);
+		memset(state, 0, sizeof(*state));
+	} else {
+		state = kzalloc(sizeof(*state), GFP_KERNEL);
+		if (!state)
+			return;
+		crtc->state = &state->base;
+	}
+
+	state->base.crtc = crtc;
+}
+
+static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+	struct mtk_crtc_state *state;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return NULL;
+
+	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+	WARN_ON(state->base.crtc != crtc);
+	state->base.crtc = crtc;
+
+	return &state->base;
+}
+
+static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc,
+				       struct drm_crtc_state *state)
+{
+	__drm_atomic_helper_crtc_destroy_state(state);
+	kfree(to_mtk_crtc_state(state));
+}
+
+static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+				    const struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode)
+{
+	/* Nothing to do here, but this callback is mandatory. */
+	return true;
+}
+
+static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+	struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
+
+	state->pending_width = crtc->mode.hdisplay;
+	state->pending_height = crtc->mode.vdisplay;
+	state->pending_vrefresh = crtc->mode.vrefresh;
+	wmb();	/* Make sure the above parameters are set before update */
+	state->pending_config = true;
+}
+
+int mtk_drm_crtc_enable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+	struct mtk_drm_private *priv = drm->dev_private;
+	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(priv->crtc[pipe]);
+	struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+
+	mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base);
+
+	return 0;
+}
+
+void mtk_drm_crtc_disable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+	struct mtk_drm_private *priv = drm->dev_private;
+	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(priv->crtc[pipe]);
+	struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+
+	mtk_ddp_comp_disable_vblank(ovl);
+}
+
+static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
+{
+	int ret;
+	int i;
+
+	DRM_DEBUG_DRIVER("%s\n", __func__);
+	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+		ret = clk_enable(mtk_crtc->ddp_comp[i]->clk);
+		if (ret) {
+			DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	while (--i >= 0)
+		clk_disable(mtk_crtc->ddp_comp[i]->clk);
+	return ret;
+}
+
+static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
+{
+	int i;
+
+	DRM_DEBUG_DRIVER("%s\n", __func__);
+	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+		clk_disable(mtk_crtc->ddp_comp[i]->clk);
+}
+
+static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
+{
+	struct drm_crtc *crtc = &mtk_crtc->base;
+	unsigned int width, height, vrefresh;
+	int ret;
+	int i;
+
+	DRM_DEBUG_DRIVER("%s\n", __func__);
+	if (WARN_ON(!crtc->state))
+		return -EINVAL;
+
+	width = crtc->state->adjusted_mode.hdisplay;
+	height = crtc->state->adjusted_mode.vdisplay;
+	vrefresh = crtc->state->adjusted_mode.vrefresh;
+
+	ret = pm_runtime_get_sync(crtc->dev->dev);
+	if (ret < 0) {
+		DRM_ERROR("Failed to enable power domain: %d\n", ret);
+		return ret;
+	}
+
+	ret = mtk_disp_mutex_prepare(mtk_crtc->mutex);
+	if (ret < 0) {
+		DRM_ERROR("Failed to enable mutex clock: %d\n", ret);
+		goto err_pm_runtime_put;
+	}
+
+	ret = mtk_crtc_ddp_clk_enable(mtk_crtc);
+	if (ret < 0) {
+		DRM_ERROR("Failed to enable component clocks: %d\n", ret);
+		goto err_mutex_unprepare;
+	}
+
+	DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n");
+	for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
+		mtk_ddp_add_comp_to_path(mtk_crtc->config_regs,
+					 mtk_crtc->ddp_comp[i]->id,
+					 mtk_crtc->ddp_comp[i + 1]->id);
+		mtk_disp_mutex_add_comp(mtk_crtc->mutex,
+					mtk_crtc->ddp_comp[i]->id);
+	}
+	mtk_disp_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
+	mtk_disp_mutex_enable(mtk_crtc->mutex);
+
+	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+		struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
+
+		mtk_ddp_comp_config(comp, width, height, vrefresh);
+		mtk_ddp_comp_start(comp);
+	}
+
+	/* Initially configure all planes */
+	for (i = 0; i < OVL_LAYER_NR; i++) {
+		struct drm_plane *plane = &mtk_crtc->planes[i].base;
+		struct mtk_plane_state *plane_state;
+
+		plane_state = to_mtk_plane_state(plane->state);
+		mtk_ddp_comp_layer_config(mtk_crtc->ddp_comp[0], i,
+					  plane_state);
+	}
+
+	return 0;
+
+err_mutex_unprepare:
+	mtk_disp_mutex_unprepare(mtk_crtc->mutex);
+err_pm_runtime_put:
+	pm_runtime_put(crtc->dev->dev);
+	return ret;
+}
+
+static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
+{
+	struct drm_device *drm = mtk_crtc->base.dev;
+	int i;
+
+	DRM_DEBUG_DRIVER("%s\n", __func__);
+	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+		mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
+	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+		mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
+					   mtk_crtc->ddp_comp[i]->id);
+	mtk_disp_mutex_disable(mtk_crtc->mutex);
+	for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
+		mtk_ddp_remove_comp_from_path(mtk_crtc->config_regs,
+					      mtk_crtc->ddp_comp[i]->id,
+					      mtk_crtc->ddp_comp[i + 1]->id);
+		mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
+					   mtk_crtc->ddp_comp[i]->id);
+	}
+	mtk_disp_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
+	mtk_crtc_ddp_clk_disable(mtk_crtc);
+	mtk_disp_mutex_unprepare(mtk_crtc->mutex);
+
+	pm_runtime_put(drm->dev);
+}
+
+static void mtk_drm_crtc_enable(struct drm_crtc *crtc)
+{
+	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+	struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+	int ret;
+
+	DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
+
+	ret = mtk_smi_larb_get(ovl->larb_dev);
+	if (ret) {
+		DRM_ERROR("Failed to get larb: %d\n", ret);
+		return;
+	}
+
+	ret = mtk_crtc_ddp_hw_init(mtk_crtc);
+	if (ret) {
+		mtk_smi_larb_put(ovl->larb_dev);
+		return;
+	}
+
+	drm_crtc_vblank_on(crtc);
+	mtk_crtc->enabled = true;
+}
+
+static void mtk_drm_crtc_disable(struct drm_crtc *crtc)
+{
+	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+	struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+	int i;
+
+	DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
+	if (!mtk_crtc->enabled)
+		return;
+
+	/* Set all pending plane state to disabled */
+	for (i = 0; i < OVL_LAYER_NR; i++) {
+		struct drm_plane *plane = &mtk_crtc->planes[i].base;
+		struct mtk_plane_state *plane_state;
+
+		plane_state = to_mtk_plane_state(plane->state);
+		plane_state->pending.enable = false;
+		plane_state->pending.config = true;
+	}
+	mtk_crtc->pending_planes = true;
+
+	/* Wait for planes to be disabled */
+	drm_crtc_wait_one_vblank(crtc);
+
+	drm_crtc_vblank_off(crtc);
+	mtk_crtc_ddp_hw_fini(mtk_crtc);
+	mtk_smi_larb_put(ovl->larb_dev);
+
+	mtk_crtc->enabled = false;
+}
+
+static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+				      struct drm_crtc_state *old_crtc_state)
+{
+	struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
+	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+
+	if (mtk_crtc->event && state->base.event)
+		DRM_ERROR("new event while there is still a pending event\n");
+
+	if (state->base.event) {
+		state->base.event->pipe = drm_crtc_index(crtc);
+		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+		mtk_crtc->event = state->base.event;
+		state->base.event = NULL;
+	}
+}
+
+static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
+				      struct drm_crtc_state *old_crtc_state)
+{
+	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+	unsigned int pending_planes = 0;
+	int i;
+
+	if (mtk_crtc->event)
+		mtk_crtc->pending_needs_vblank = true;
+	for (i = 0; i < OVL_LAYER_NR; i++) {
+		struct drm_plane *plane = &mtk_crtc->planes[i].base;
+		struct mtk_plane_state *plane_state;
+
+		plane_state = to_mtk_plane_state(plane->state);
+		if (plane_state->pending.dirty) {
+			plane_state->pending.config = true;
+			plane_state->pending.dirty = false;
+			pending_planes |= BIT(i);
+		}
+	}
+	if (pending_planes)
+		mtk_crtc->pending_planes = true;
+}
+
+static const struct drm_crtc_funcs mtk_crtc_funcs = {
+	.set_config		= drm_atomic_helper_set_config,
+	.page_flip		= drm_atomic_helper_page_flip,
+	.destroy		= mtk_drm_crtc_destroy,
+	.reset			= mtk_drm_crtc_reset,
+	.atomic_duplicate_state	= mtk_drm_crtc_duplicate_state,
+	.atomic_destroy_state	= mtk_drm_crtc_destroy_state,
+};
+
+static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
+	.mode_fixup	= mtk_drm_crtc_mode_fixup,
+	.mode_set_nofb	= mtk_drm_crtc_mode_set_nofb,
+	.enable		= mtk_drm_crtc_enable,
+	.disable	= mtk_drm_crtc_disable,
+	.atomic_begin	= mtk_drm_crtc_atomic_begin,
+	.atomic_flush	= mtk_drm_crtc_atomic_flush,
+};
+
+static int mtk_drm_crtc_init(struct drm_device *drm,
+			     struct mtk_drm_crtc *mtk_crtc,
+			     struct drm_plane *primary,
+			     struct drm_plane *cursor, unsigned int pipe)
+{
+	int ret;
+
+	ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
+					&mtk_crtc_funcs, NULL);
+	if (ret)
+		goto err_cleanup_crtc;
+
+	drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs);
+
+	return 0;
+
+err_cleanup_crtc:
+	drm_crtc_cleanup(&mtk_crtc->base);
+	return ret;
+}
+
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl)
+{
+	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+	struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
+	unsigned int i;
+
+	/*
+	 * TODO: instead of updating the registers here, we should prepare
+	 * working registers in atomic_commit and let the hardware command
+	 * queue update module registers on vblank.
+	 */
+	if (state->pending_config) {
+		mtk_ddp_comp_config(ovl, state->pending_width,
+				    state->pending_height,
+				    state->pending_vrefresh);
+
+		state->pending_config = false;
+	}
+
+	if (mtk_crtc->pending_planes) {
+		for (i = 0; i < OVL_LAYER_NR; i++) {
+			struct drm_plane *plane = &mtk_crtc->planes[i].base;
+			struct mtk_plane_state *plane_state;
+
+			plane_state = to_mtk_plane_state(plane->state);
+
+			if (plane_state->pending.config) {
+				mtk_ddp_comp_layer_config(ovl, i, plane_state);
+				plane_state->pending.config = false;
+			}
+		}
+		mtk_crtc->pending_planes = false;
+	}
+
+	mtk_drm_finish_page_flip(mtk_crtc);
+}
+
+int mtk_drm_crtc_create(struct drm_device *drm_dev,
+			const enum mtk_ddp_comp_id *path, unsigned int path_len)
+{
+	struct mtk_drm_private *priv = drm_dev->dev_private;
+	struct device *dev = drm_dev->dev;
+	struct mtk_drm_crtc *mtk_crtc;
+	enum drm_plane_type type;
+	unsigned int zpos;
+	int pipe = priv->num_pipes;
+	int ret;
+	int i;
+
+	for (i = 0; i < path_len; i++) {
+		enum mtk_ddp_comp_id comp_id = path[i];
+		struct device_node *node;
+
+		node = priv->comp_node[comp_id];
+		if (!node) {
+			dev_info(dev,
+				 "Not creating crtc %d because component %d is disabled or missing\n",
+				 pipe, comp_id);
+			return 0;
+		}
+	}
+
+	mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL);
+	if (!mtk_crtc)
+		return -ENOMEM;
+
+	mtk_crtc->config_regs = priv->config_regs;
+	mtk_crtc->ddp_comp_nr = path_len;
+	mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
+						sizeof(*mtk_crtc->ddp_comp),
+						GFP_KERNEL);
+
+	mtk_crtc->mutex = mtk_disp_mutex_get(priv->mutex_dev, pipe);
+	if (IS_ERR(mtk_crtc->mutex)) {
+		ret = PTR_ERR(mtk_crtc->mutex);
+		dev_err(dev, "Failed to get mutex: %d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+		enum mtk_ddp_comp_id comp_id = path[i];
+		struct mtk_ddp_comp *comp;
+		struct device_node *node;
+
+		node = priv->comp_node[comp_id];
+		comp = priv->ddp_comp[comp_id];
+		if (!comp) {
+			dev_err(dev, "Component %s not initialized\n",
+				node->full_name);
+			ret = -ENODEV;
+			goto unprepare;
+		}
+
+		ret = clk_prepare(comp->clk);
+		if (ret) {
+			dev_err(dev,
+				"Failed to prepare clock for component %s: %d\n",
+				node->full_name, ret);
+			goto unprepare;
+		}
+
+		mtk_crtc->ddp_comp[i] = comp;
+	}
+
+	for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) {
+		type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY :
+				(zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
+						DRM_PLANE_TYPE_OVERLAY;
+		ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos],
+				     BIT(pipe), type, zpos);
+		if (ret)
+			goto unprepare;
+	}
+
+	ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0].base,
+				&mtk_crtc->planes[1].base, pipe);
+	if (ret < 0)
+		goto unprepare;
+
+	priv->crtc[pipe] = &mtk_crtc->base;
+	priv->num_pipes++;
+
+	return 0;
+
+unprepare:
+	while (--i >= 0)
+		clk_unprepare(mtk_crtc->ddp_comp[i]->clk);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
new file mode 100644
index 0000000..81e5566
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MTK_DRM_CRTC_H
+#define MTK_DRM_CRTC_H
+
+#include <drm/drm_crtc.h>
+#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_plane.h"
+
+#define OVL_LAYER_NR	4
+
+int mtk_drm_crtc_enable_vblank(struct drm_device *drm, unsigned int pipe);
+void mtk_drm_crtc_disable_vblank(struct drm_device *drm, unsigned int pipe);
+void mtk_drm_crtc_check_flush(struct drm_crtc *crtc);
+void mtk_drm_crtc_commit(struct drm_crtc *crtc);
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl);
+int mtk_drm_crtc_create(struct drm_device *drm_dev,
+			const enum mtk_ddp_comp_id *path,
+			unsigned int path_len);
+
+#endif /* MTK_DRM_CRTC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
new file mode 100644
index 0000000..17ba935
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "mtk_drm_ddp.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN	0x040
+#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN	0x044
+#define DISP_REG_CONFIG_DISP_OD_MOUT_EN		0x048
+#define DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN	0x04c
+#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN	0x050
+#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN	0x084
+#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN	0x088
+#define DISP_REG_CONFIG_DPI_SEL_IN		0x0ac
+#define DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN	0x0c8
+#define DISP_REG_CONFIG_MMSYS_CG_CON0		0x100
+
+#define DISP_REG_MUTEX_EN(n)	(0x20 + 0x20 * (n))
+#define DISP_REG_MUTEX_RST(n)	(0x28 + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD(n)	(0x2c + 0x20 * (n))
+#define DISP_REG_MUTEX_SOF(n)	(0x30 + 0x20 * (n))
+
+#define MUTEX_MOD_DISP_OVL0		BIT(11)
+#define MUTEX_MOD_DISP_OVL1		BIT(12)
+#define MUTEX_MOD_DISP_RDMA0		BIT(13)
+#define MUTEX_MOD_DISP_RDMA1		BIT(14)
+#define MUTEX_MOD_DISP_RDMA2		BIT(15)
+#define MUTEX_MOD_DISP_WDMA0		BIT(16)
+#define MUTEX_MOD_DISP_WDMA1		BIT(17)
+#define MUTEX_MOD_DISP_COLOR0		BIT(18)
+#define MUTEX_MOD_DISP_COLOR1		BIT(19)
+#define MUTEX_MOD_DISP_AAL		BIT(20)
+#define MUTEX_MOD_DISP_GAMMA		BIT(21)
+#define MUTEX_MOD_DISP_UFOE		BIT(22)
+#define MUTEX_MOD_DISP_PWM0		BIT(23)
+#define MUTEX_MOD_DISP_PWM1		BIT(24)
+#define MUTEX_MOD_DISP_OD		BIT(25)
+
+#define MUTEX_SOF_SINGLE_MODE		0
+#define MUTEX_SOF_DSI0			1
+#define MUTEX_SOF_DSI1			2
+#define MUTEX_SOF_DPI0			3
+
+#define OVL0_MOUT_EN_COLOR0		0x1
+#define OD_MOUT_EN_RDMA0		0x1
+#define UFOE_MOUT_EN_DSI0		0x1
+#define COLOR0_SEL_IN_OVL0		0x1
+#define OVL1_MOUT_EN_COLOR1		0x1
+#define GAMMA_MOUT_EN_RDMA1		0x1
+#define RDMA1_MOUT_DPI0			0x2
+#define DPI0_SEL_IN_RDMA1		0x1
+#define COLOR1_SEL_IN_OVL1		0x1
+
+struct mtk_disp_mutex {
+	int id;
+	bool claimed;
+};
+
+struct mtk_ddp {
+	struct device			*dev;
+	struct clk			*clk;
+	void __iomem			*regs;
+	struct mtk_disp_mutex		mutex[10];
+};
+
+static const unsigned int mutex_mod[DDP_COMPONENT_ID_MAX] = {
+	[DDP_COMPONENT_AAL] = MUTEX_MOD_DISP_AAL,
+	[DDP_COMPONENT_COLOR0] = MUTEX_MOD_DISP_COLOR0,
+	[DDP_COMPONENT_COLOR1] = MUTEX_MOD_DISP_COLOR1,
+	[DDP_COMPONENT_GAMMA] = MUTEX_MOD_DISP_GAMMA,
+	[DDP_COMPONENT_OD] = MUTEX_MOD_DISP_OD,
+	[DDP_COMPONENT_OVL0] = MUTEX_MOD_DISP_OVL0,
+	[DDP_COMPONENT_OVL1] = MUTEX_MOD_DISP_OVL1,
+	[DDP_COMPONENT_PWM0] = MUTEX_MOD_DISP_PWM0,
+	[DDP_COMPONENT_PWM1] = MUTEX_MOD_DISP_PWM1,
+	[DDP_COMPONENT_RDMA0] = MUTEX_MOD_DISP_RDMA0,
+	[DDP_COMPONENT_RDMA1] = MUTEX_MOD_DISP_RDMA1,
+	[DDP_COMPONENT_RDMA2] = MUTEX_MOD_DISP_RDMA2,
+	[DDP_COMPONENT_UFOE] = MUTEX_MOD_DISP_UFOE,
+	[DDP_COMPONENT_WDMA0] = MUTEX_MOD_DISP_WDMA0,
+	[DDP_COMPONENT_WDMA1] = MUTEX_MOD_DISP_WDMA1,
+};
+
+static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
+				    enum mtk_ddp_comp_id next,
+				    unsigned int *addr)
+{
+	unsigned int value;
+
+	if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
+		*addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN;
+		value = OVL0_MOUT_EN_COLOR0;
+	} else if (cur == DDP_COMPONENT_OD && next == DDP_COMPONENT_RDMA0) {
+		*addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
+		value = OD_MOUT_EN_RDMA0;
+	} else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) {
+		*addr = DISP_REG_CONFIG_DISP_UFOE_MOUT_EN;
+		value = UFOE_MOUT_EN_DSI0;
+	} else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
+		*addr = DISP_REG_CONFIG_DISP_OVL1_MOUT_EN;
+		value = OVL1_MOUT_EN_COLOR1;
+	} else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) {
+		*addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN;
+		value = GAMMA_MOUT_EN_RDMA1;
+	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
+		*addr = DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN;
+		value = RDMA1_MOUT_DPI0;
+	} else {
+		value = 0;
+	}
+
+	return value;
+}
+
+static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
+				   enum mtk_ddp_comp_id next,
+				   unsigned int *addr)
+{
+	unsigned int value;
+
+	if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
+		*addr = DISP_REG_CONFIG_DISP_COLOR0_SEL_IN;
+		value = COLOR0_SEL_IN_OVL0;
+	} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
+		*addr = DISP_REG_CONFIG_DPI_SEL_IN;
+		value = DPI0_SEL_IN_RDMA1;
+	} else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
+		*addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
+		value = COLOR1_SEL_IN_OVL1;
+	} else {
+		value = 0;
+	}
+
+	return value;
+}
+
+void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
+			      enum mtk_ddp_comp_id cur,
+			      enum mtk_ddp_comp_id next)
+{
+	unsigned int addr, value, reg;
+
+	value = mtk_ddp_mout_en(cur, next, &addr);
+	if (value) {
+		reg = readl_relaxed(config_regs + addr) | value;
+		writel_relaxed(reg, config_regs + addr);
+	}
+
+	value = mtk_ddp_sel_in(cur, next, &addr);
+	if (value) {
+		reg = readl_relaxed(config_regs + addr) | value;
+		writel_relaxed(reg, config_regs + addr);
+	}
+}
+
+void mtk_ddp_remove_comp_from_path(void __iomem *config_regs,
+				   enum mtk_ddp_comp_id cur,
+				   enum mtk_ddp_comp_id next)
+{
+	unsigned int addr, value, reg;
+
+	value = mtk_ddp_mout_en(cur, next, &addr);
+	if (value) {
+		reg = readl_relaxed(config_regs + addr) & ~value;
+		writel_relaxed(reg, config_regs + addr);
+	}
+
+	value = mtk_ddp_sel_in(cur, next, &addr);
+	if (value) {
+		reg = readl_relaxed(config_regs + addr) & ~value;
+		writel_relaxed(reg, config_regs + addr);
+	}
+}
+
+struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id)
+{
+	struct mtk_ddp *ddp = dev_get_drvdata(dev);
+
+	if (id >= 10)
+		return ERR_PTR(-EINVAL);
+	if (ddp->mutex[id].claimed)
+		return ERR_PTR(-EBUSY);
+
+	ddp->mutex[id].claimed = true;
+
+	return &ddp->mutex[id];
+}
+
+void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex)
+{
+	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+					   mutex[mutex->id]);
+
+	WARN_ON(&ddp->mutex[mutex->id] != mutex);
+
+	mutex->claimed = false;
+}
+
+int mtk_disp_mutex_prepare(struct mtk_disp_mutex *mutex)
+{
+	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+					   mutex[mutex->id]);
+	return clk_prepare_enable(ddp->clk);
+}
+
+void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex)
+{
+	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+					   mutex[mutex->id]);
+	clk_disable_unprepare(ddp->clk);
+}
+
+void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
+			     enum mtk_ddp_comp_id id)
+{
+	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+					   mutex[mutex->id]);
+	unsigned int reg;
+
+	WARN_ON(&ddp->mutex[mutex->id] != mutex);
+
+	switch (id) {
+	case DDP_COMPONENT_DSI0:
+		reg = MUTEX_SOF_DSI0;
+		break;
+	case DDP_COMPONENT_DSI1:
+		reg = MUTEX_SOF_DSI0;
+		break;
+	case DDP_COMPONENT_DPI0:
+		reg = MUTEX_SOF_DPI0;
+		break;
+	default:
+		reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+		reg |= mutex_mod[id];
+		writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+		return;
+	}
+
+	writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+}
+
+void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
+				enum mtk_ddp_comp_id id)
+{
+	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+					   mutex[mutex->id]);
+	unsigned int reg;
+
+	WARN_ON(&ddp->mutex[mutex->id] != mutex);
+
+	switch (id) {
+	case DDP_COMPONENT_DSI0:
+	case DDP_COMPONENT_DSI1:
+	case DDP_COMPONENT_DPI0:
+		writel_relaxed(MUTEX_SOF_SINGLE_MODE,
+			       ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+		break;
+	default:
+		reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+		reg &= ~mutex_mod[id];
+		writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+		break;
+	}
+}
+
+void mtk_disp_mutex_enable(struct mtk_disp_mutex *mutex)
+{
+	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+					   mutex[mutex->id]);
+
+	WARN_ON(&ddp->mutex[mutex->id] != mutex);
+
+	writel(1, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
+}
+
+void mtk_disp_mutex_disable(struct mtk_disp_mutex *mutex)
+{
+	struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+					   mutex[mutex->id]);
+
+	WARN_ON(&ddp->mutex[mutex->id] != mutex);
+
+	writel(0, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
+}
+
+static int mtk_ddp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mtk_ddp *ddp;
+	struct resource *regs;
+	int i;
+
+	ddp = devm_kzalloc(dev, sizeof(*ddp), GFP_KERNEL);
+	if (!ddp)
+		return -ENOMEM;
+
+	for (i = 0; i < 10; i++)
+		ddp->mutex[i].id = i;
+
+	ddp->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(ddp->clk)) {
+		dev_err(dev, "Failed to get clock\n");
+		return PTR_ERR(ddp->clk);
+	}
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ddp->regs = devm_ioremap_resource(dev, regs);
+	if (IS_ERR(ddp->regs)) {
+		dev_err(dev, "Failed to map mutex registers\n");
+		return PTR_ERR(ddp->regs);
+	}
+
+	platform_set_drvdata(pdev, ddp);
+
+	return 0;
+}
+
+static int mtk_ddp_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id ddp_driver_dt_match[] = {
+	{ .compatible = "mediatek,mt8173-disp-mutex" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ddp_driver_dt_match);
+
+struct platform_driver mtk_ddp_driver = {
+	.probe		= mtk_ddp_probe,
+	.remove		= mtk_ddp_remove,
+	.driver		= {
+		.name	= "mediatek-ddp",
+		.owner	= THIS_MODULE,
+		.of_match_table = ddp_driver_dt_match,
+	},
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
new file mode 100644
index 0000000..92c1175
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MTK_DRM_DDP_H
+#define MTK_DRM_DDP_H
+
+#include "mtk_drm_ddp_comp.h"
+
+struct regmap;
+struct device;
+struct mtk_disp_mutex;
+
+void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
+			      enum mtk_ddp_comp_id cur,
+			      enum mtk_ddp_comp_id next);
+void mtk_ddp_remove_comp_from_path(void __iomem *config_regs,
+				   enum mtk_ddp_comp_id cur,
+				   enum mtk_ddp_comp_id next);
+
+struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id);
+int mtk_disp_mutex_prepare(struct mtk_disp_mutex *mutex);
+void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
+			     enum mtk_ddp_comp_id id);
+void mtk_disp_mutex_enable(struct mtk_disp_mutex *mutex);
+void mtk_disp_mutex_disable(struct mtk_disp_mutex *mutex);
+void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
+				enum mtk_ddp_comp_id id);
+void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex);
+void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex);
+
+#endif /* MTK_DRM_DDP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
new file mode 100644
index 0000000..3970fcf
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Authors:
+ *	YT Shen <yt.shen@mediatek.com>
+ *	CK Hu <ck.hu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <drm/drmP.h>
+#include "mtk_drm_drv.h"
+#include "mtk_drm_plane.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_OD_EN				0x0000
+#define DISP_OD_INTEN				0x0008
+#define DISP_OD_INTSTA				0x000c
+#define DISP_OD_CFG				0x0020
+#define DISP_OD_SIZE				0x0030
+
+#define DISP_REG_UFO_START			0x0000
+
+#define DISP_COLOR_CFG_MAIN			0x0400
+#define DISP_COLOR_START			0x0c00
+#define DISP_COLOR_WIDTH			0x0c50
+#define DISP_COLOR_HEIGHT			0x0c54
+
+#define	OD_RELAY_MODE		BIT(0)
+
+#define	UFO_BYPASS		BIT(2)
+
+#define	COLOR_BYPASS_ALL	BIT(7)
+#define	COLOR_SEQ_SEL		BIT(13)
+
+static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
+			     unsigned int h, unsigned int vrefresh)
+{
+	writel(w, comp->regs + DISP_COLOR_WIDTH);
+	writel(h, comp->regs + DISP_COLOR_HEIGHT);
+}
+
+static void mtk_color_start(struct mtk_ddp_comp *comp)
+{
+	writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
+	       comp->regs + DISP_COLOR_CFG_MAIN);
+	writel(0x1, comp->regs + DISP_COLOR_START);
+}
+
+static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
+			  unsigned int h, unsigned int vrefresh)
+{
+	writel(w << 16 | h, comp->regs + DISP_OD_SIZE);
+}
+
+static void mtk_od_start(struct mtk_ddp_comp *comp)
+{
+	writel(OD_RELAY_MODE, comp->regs + DISP_OD_CFG);
+	writel(1, comp->regs + DISP_OD_EN);
+}
+
+static void mtk_ufoe_start(struct mtk_ddp_comp *comp)
+{
+	writel(UFO_BYPASS, comp->regs + DISP_REG_UFO_START);
+}
+
+static const struct mtk_ddp_comp_funcs ddp_color = {
+	.config = mtk_color_config,
+	.start = mtk_color_start,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_od = {
+	.config = mtk_od_config,
+	.start = mtk_od_start,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_ufoe = {
+	.start = mtk_ufoe_start,
+};
+
+static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
+	[MTK_DISP_OVL] = "ovl",
+	[MTK_DISP_RDMA] = "rdma",
+	[MTK_DISP_WDMA] = "wdma",
+	[MTK_DISP_COLOR] = "color",
+	[MTK_DISP_AAL] = "aal",
+	[MTK_DISP_GAMMA] = "gamma",
+	[MTK_DISP_UFOE] = "ufoe",
+	[MTK_DSI] = "dsi",
+	[MTK_DPI] = "dpi",
+	[MTK_DISP_PWM] = "pwm",
+	[MTK_DISP_MUTEX] = "mutex",
+	[MTK_DISP_OD] = "od",
+};
+
+struct mtk_ddp_comp_match {
+	enum mtk_ddp_comp_type type;
+	int alias_id;
+	const struct mtk_ddp_comp_funcs *funcs;
+};
+
+static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
+	[DDP_COMPONENT_AAL]	= { MTK_DISP_AAL,	0, NULL },
+	[DDP_COMPONENT_COLOR0]	= { MTK_DISP_COLOR,	0, &ddp_color },
+	[DDP_COMPONENT_COLOR1]	= { MTK_DISP_COLOR,	1, &ddp_color },
+	[DDP_COMPONENT_DPI0]	= { MTK_DPI,		0, NULL },
+	[DDP_COMPONENT_DSI0]	= { MTK_DSI,		0, NULL },
+	[DDP_COMPONENT_DSI1]	= { MTK_DSI,		1, NULL },
+	[DDP_COMPONENT_GAMMA]	= { MTK_DISP_GAMMA,	0, NULL },
+	[DDP_COMPONENT_OD]	= { MTK_DISP_OD,	0, &ddp_od },
+	[DDP_COMPONENT_OVL0]	= { MTK_DISP_OVL,	0, NULL },
+	[DDP_COMPONENT_OVL1]	= { MTK_DISP_OVL,	1, NULL },
+	[DDP_COMPONENT_PWM0]	= { MTK_DISP_PWM,	0, NULL },
+	[DDP_COMPONENT_RDMA0]	= { MTK_DISP_RDMA,	0, NULL },
+	[DDP_COMPONENT_RDMA1]	= { MTK_DISP_RDMA,	1, NULL },
+	[DDP_COMPONENT_RDMA2]	= { MTK_DISP_RDMA,	2, NULL },
+	[DDP_COMPONENT_UFOE]	= { MTK_DISP_UFOE,	0, &ddp_ufoe },
+	[DDP_COMPONENT_WDMA0]	= { MTK_DISP_WDMA,	0, NULL },
+	[DDP_COMPONENT_WDMA1]	= { MTK_DISP_WDMA,	1, NULL },
+};
+
+int mtk_ddp_comp_get_id(struct device_node *node,
+			enum mtk_ddp_comp_type comp_type)
+{
+	int id = of_alias_get_id(node, mtk_ddp_comp_stem[comp_type]);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mtk_ddp_matches); i++) {
+		if (comp_type == mtk_ddp_matches[i].type &&
+		    (id < 0 || id == mtk_ddp_matches[i].alias_id))
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
+		      struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
+		      const struct mtk_ddp_comp_funcs *funcs)
+{
+	enum mtk_ddp_comp_type type;
+	struct device_node *larb_node;
+	struct platform_device *larb_pdev;
+
+	if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
+		return -EINVAL;
+
+	comp->id = comp_id;
+	comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs;
+
+	if (comp_id == DDP_COMPONENT_DPI0 ||
+	    comp_id == DDP_COMPONENT_DSI0 ||
+	    comp_id == DDP_COMPONENT_PWM0) {
+		comp->regs = NULL;
+		comp->clk = NULL;
+		comp->irq = 0;
+		return 0;
+	}
+
+	comp->regs = of_iomap(node, 0);
+	comp->irq = of_irq_get(node, 0);
+	comp->clk = of_clk_get(node, 0);
+	if (IS_ERR(comp->clk))
+		comp->clk = NULL;
+
+	type = mtk_ddp_matches[comp_id].type;
+
+	/* Only DMA capable components need the LARB property */
+	comp->larb_dev = NULL;
+	if (type != MTK_DISP_OVL &&
+	    type != MTK_DISP_RDMA &&
+	    type != MTK_DISP_WDMA)
+		return 0;
+
+	larb_node = of_parse_phandle(node, "mediatek,larb", 0);
+	if (!larb_node) {
+		dev_err(dev,
+			"Missing mediadek,larb phandle in %s node\n",
+			node->full_name);
+		return -EINVAL;
+	}
+
+	larb_pdev = of_find_device_by_node(larb_node);
+	if (!larb_pdev) {
+		dev_warn(dev, "Waiting for larb device %s\n",
+			 larb_node->full_name);
+		of_node_put(larb_node);
+		return -EPROBE_DEFER;
+	}
+	of_node_put(larb_node);
+
+	comp->larb_dev = &larb_pdev->dev;
+
+	return 0;
+}
+
+int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp)
+{
+	struct mtk_drm_private *private = drm->dev_private;
+
+	if (private->ddp_comp[comp->id])
+		return -EBUSY;
+
+	private->ddp_comp[comp->id] = comp;
+	return 0;
+}
+
+void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp)
+{
+	struct mtk_drm_private *private = drm->dev_private;
+
+	private->ddp_comp[comp->id] = NULL;
+}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
new file mode 100644
index 0000000..6b13ba9
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MTK_DRM_DDP_COMP_H
+#define MTK_DRM_DDP_COMP_H
+
+#include <linux/io.h>
+
+struct device;
+struct device_node;
+struct drm_crtc;
+struct drm_device;
+struct mtk_plane_state;
+
+enum mtk_ddp_comp_type {
+	MTK_DISP_OVL,
+	MTK_DISP_RDMA,
+	MTK_DISP_WDMA,
+	MTK_DISP_COLOR,
+	MTK_DISP_AAL,
+	MTK_DISP_GAMMA,
+	MTK_DISP_UFOE,
+	MTK_DSI,
+	MTK_DPI,
+	MTK_DISP_PWM,
+	MTK_DISP_MUTEX,
+	MTK_DISP_OD,
+	MTK_DDP_COMP_TYPE_MAX,
+};
+
+enum mtk_ddp_comp_id {
+	DDP_COMPONENT_AAL,
+	DDP_COMPONENT_COLOR0,
+	DDP_COMPONENT_COLOR1,
+	DDP_COMPONENT_DPI0,
+	DDP_COMPONENT_DSI0,
+	DDP_COMPONENT_DSI1,
+	DDP_COMPONENT_GAMMA,
+	DDP_COMPONENT_OD,
+	DDP_COMPONENT_OVL0,
+	DDP_COMPONENT_OVL1,
+	DDP_COMPONENT_PWM0,
+	DDP_COMPONENT_PWM1,
+	DDP_COMPONENT_RDMA0,
+	DDP_COMPONENT_RDMA1,
+	DDP_COMPONENT_RDMA2,
+	DDP_COMPONENT_UFOE,
+	DDP_COMPONENT_WDMA0,
+	DDP_COMPONENT_WDMA1,
+	DDP_COMPONENT_ID_MAX,
+};
+
+struct mtk_ddp_comp;
+
+struct mtk_ddp_comp_funcs {
+	void (*config)(struct mtk_ddp_comp *comp, unsigned int w,
+		       unsigned int h, unsigned int vrefresh);
+	void (*start)(struct mtk_ddp_comp *comp);
+	void (*stop)(struct mtk_ddp_comp *comp);
+	void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
+	void (*disable_vblank)(struct mtk_ddp_comp *comp);
+	void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
+	void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
+	void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
+			     struct mtk_plane_state *state);
+};
+
+struct mtk_ddp_comp {
+	struct clk *clk;
+	void __iomem *regs;
+	int irq;
+	struct device *larb_dev;
+	enum mtk_ddp_comp_id id;
+	const struct mtk_ddp_comp_funcs *funcs;
+};
+
+static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp,
+				       unsigned int w, unsigned int h,
+				       unsigned int vrefresh)
+{
+	if (comp->funcs && comp->funcs->config)
+		comp->funcs->config(comp, w, h, vrefresh);
+}
+
+static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp)
+{
+	if (comp->funcs && comp->funcs->start)
+		comp->funcs->start(comp);
+}
+
+static inline void mtk_ddp_comp_stop(struct mtk_ddp_comp *comp)
+{
+	if (comp->funcs && comp->funcs->stop)
+		comp->funcs->stop(comp);
+}
+
+static inline void mtk_ddp_comp_enable_vblank(struct mtk_ddp_comp *comp,
+					      struct drm_crtc *crtc)
+{
+	if (comp->funcs && comp->funcs->enable_vblank)
+		comp->funcs->enable_vblank(comp, crtc);
+}
+
+static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
+{
+	if (comp->funcs && comp->funcs->disable_vblank)
+		comp->funcs->disable_vblank(comp);
+}
+
+static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp,
+					 unsigned int idx)
+{
+	if (comp->funcs && comp->funcs->layer_on)
+		comp->funcs->layer_on(comp, idx);
+}
+
+static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp,
+					  unsigned int idx)
+{
+	if (comp->funcs && comp->funcs->layer_off)
+		comp->funcs->layer_off(comp, idx);
+}
+
+static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp,
+					     unsigned int idx,
+					     struct mtk_plane_state *state)
+{
+	if (comp->funcs && comp->funcs->layer_config)
+		comp->funcs->layer_config(comp, idx, state);
+}
+
+int mtk_ddp_comp_get_id(struct device_node *node,
+			enum mtk_ddp_comp_type comp_type);
+int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
+		      struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
+		      const struct mtk_ddp_comp_funcs *funcs);
+int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp);
+void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp);
+
+#endif /* MTK_DRM_DDP_COMP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
new file mode 100644
index 0000000..b1223d5
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: YT SHEN <yt.shen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <linux/component.h>
+#include <linux/iommu.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp.h"
+#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_drv.h"
+#include "mtk_drm_fb.h"
+#include "mtk_drm_gem.h"
+
+#define DRIVER_NAME "mediatek"
+#define DRIVER_DESC "Mediatek SoC DRM"
+#define DRIVER_DATE "20150513"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static void mtk_atomic_schedule(struct mtk_drm_private *private,
+				struct drm_atomic_state *state)
+{
+	private->commit.state = state;
+	schedule_work(&private->commit.work);
+}
+
+static void mtk_atomic_wait_for_fences(struct drm_atomic_state *state)
+{
+	struct drm_plane *plane;
+	struct drm_plane_state *plane_state;
+	int i;
+
+	for_each_plane_in_state(state, plane, plane_state, i)
+		mtk_fb_wait(plane->state->fb);
+}
+
+static void mtk_atomic_complete(struct mtk_drm_private *private,
+				struct drm_atomic_state *state)
+{
+	struct drm_device *drm = private->drm;
+
+	mtk_atomic_wait_for_fences(state);
+
+	drm_atomic_helper_commit_modeset_disables(drm, state);
+	drm_atomic_helper_commit_planes(drm, state, false);
+	drm_atomic_helper_commit_modeset_enables(drm, state);
+	drm_atomic_helper_wait_for_vblanks(drm, state);
+	drm_atomic_helper_cleanup_planes(drm, state);
+	drm_atomic_state_free(state);
+}
+
+static void mtk_atomic_work(struct work_struct *work)
+{
+	struct mtk_drm_private *private = container_of(work,
+			struct mtk_drm_private, commit.work);
+
+	mtk_atomic_complete(private, private->commit.state);
+}
+
+static int mtk_atomic_commit(struct drm_device *drm,
+			     struct drm_atomic_state *state,
+			     bool async)
+{
+	struct mtk_drm_private *private = drm->dev_private;
+	int ret;
+
+	ret = drm_atomic_helper_prepare_planes(drm, state);
+	if (ret)
+		return ret;
+
+	mutex_lock(&private->commit.lock);
+	flush_work(&private->commit.work);
+
+	drm_atomic_helper_swap_state(drm, state);
+
+	if (async)
+		mtk_atomic_schedule(private, state);
+	else
+		mtk_atomic_complete(private, state);
+
+	mutex_unlock(&private->commit.lock);
+
+	return 0;
+}
+
+static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = {
+	.fb_create = mtk_drm_mode_fb_create,
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = mtk_atomic_commit,
+};
+
+static const enum mtk_ddp_comp_id mtk_ddp_main[] = {
+	DDP_COMPONENT_OVL0,
+	DDP_COMPONENT_COLOR0,
+	DDP_COMPONENT_AAL,
+	DDP_COMPONENT_OD,
+	DDP_COMPONENT_RDMA0,
+	DDP_COMPONENT_UFOE,
+	DDP_COMPONENT_DSI0,
+	DDP_COMPONENT_PWM0,
+};
+
+static const enum mtk_ddp_comp_id mtk_ddp_ext[] = {
+	DDP_COMPONENT_OVL1,
+	DDP_COMPONENT_COLOR1,
+	DDP_COMPONENT_GAMMA,
+	DDP_COMPONENT_RDMA1,
+	DDP_COMPONENT_DPI0,
+};
+
+static int mtk_drm_kms_init(struct drm_device *drm)
+{
+	struct mtk_drm_private *private = drm->dev_private;
+	struct platform_device *pdev;
+	struct device_node *np;
+	int ret;
+
+	if (!iommu_present(&platform_bus_type))
+		return -EPROBE_DEFER;
+
+	pdev = of_find_device_by_node(private->mutex_node);
+	if (!pdev) {
+		dev_err(drm->dev, "Waiting for disp-mutex device %s\n",
+			private->mutex_node->full_name);
+		of_node_put(private->mutex_node);
+		return -EPROBE_DEFER;
+	}
+	private->mutex_dev = &pdev->dev;
+
+	drm_mode_config_init(drm);
+
+	drm->mode_config.min_width = 64;
+	drm->mode_config.min_height = 64;
+
+	/*
+	 * set max width and height as default value(4096x4096).
+	 * this value would be used to check framebuffer size limitation
+	 * at drm_mode_addfb().
+	 */
+	drm->mode_config.max_width = 4096;
+	drm->mode_config.max_height = 4096;
+	drm->mode_config.funcs = &mtk_drm_mode_config_funcs;
+
+	ret = component_bind_all(drm->dev, drm);
+	if (ret)
+		goto err_config_cleanup;
+
+	/*
+	 * We currently support two fixed data streams, each optional,
+	 * and each statically assigned to a crtc:
+	 * OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0 ...
+	 */
+	ret = mtk_drm_crtc_create(drm, mtk_ddp_main, ARRAY_SIZE(mtk_ddp_main));
+	if (ret < 0)
+		goto err_component_unbind;
+	/* ... and OVL1 -> COLOR1 -> GAMMA -> RDMA1 -> DPI0. */
+	ret = mtk_drm_crtc_create(drm, mtk_ddp_ext, ARRAY_SIZE(mtk_ddp_ext));
+	if (ret < 0)
+		goto err_component_unbind;
+
+	/* Use OVL device for all DMA memory allocations */
+	np = private->comp_node[mtk_ddp_main[0]] ?:
+	     private->comp_node[mtk_ddp_ext[0]];
+	pdev = of_find_device_by_node(np);
+	if (!pdev) {
+		ret = -ENODEV;
+		dev_err(drm->dev, "Need at least one OVL device\n");
+		goto err_component_unbind;
+	}
+
+	private->dma_dev = &pdev->dev;
+
+	/*
+	 * We don't use the drm_irq_install() helpers provided by the DRM
+	 * core, so we need to set this manually in order to allow the
+	 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
+	 */
+	drm->irq_enabled = true;
+	ret = drm_vblank_init(drm, MAX_CRTC);
+	if (ret < 0)
+		goto err_component_unbind;
+
+	drm_kms_helper_poll_init(drm);
+	drm_mode_config_reset(drm);
+
+	return 0;
+
+err_component_unbind:
+	component_unbind_all(drm->dev, drm);
+err_config_cleanup:
+	drm_mode_config_cleanup(drm);
+
+	return ret;
+}
+
+static void mtk_drm_kms_deinit(struct drm_device *drm)
+{
+	drm_kms_helper_poll_fini(drm);
+
+	drm_vblank_cleanup(drm);
+	component_unbind_all(drm->dev, drm);
+	drm_mode_config_cleanup(drm);
+}
+
+static const struct file_operations mtk_drm_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = mtk_drm_gem_mmap,
+	.poll = drm_poll,
+	.read = drm_read,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+};
+
+static struct drm_driver mtk_drm_driver = {
+	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+			   DRIVER_ATOMIC,
+
+	.get_vblank_counter = drm_vblank_count,
+	.enable_vblank = mtk_drm_crtc_enable_vblank,
+	.disable_vblank = mtk_drm_crtc_disable_vblank,
+
+	.gem_free_object = mtk_drm_gem_free_object,
+	.gem_vm_ops = &drm_gem_cma_vm_ops,
+	.dumb_create = mtk_drm_gem_dumb_create,
+	.dumb_map_offset = mtk_drm_gem_dumb_map_offset,
+	.dumb_destroy = drm_gem_dumb_destroy,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_import = drm_gem_prime_import,
+	.gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
+	.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
+	.gem_prime_mmap = mtk_drm_gem_mmap_buf,
+	.fops = &mtk_drm_fops,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+	return dev->of_node == data;
+}
+
+static int mtk_drm_bind(struct device *dev)
+{
+	struct mtk_drm_private *private = dev_get_drvdata(dev);
+	struct drm_device *drm;
+	int ret;
+
+	drm = drm_dev_alloc(&mtk_drm_driver, dev);
+	if (!drm)
+		return -ENOMEM;
+
+	drm_dev_set_unique(drm, dev_name(dev));
+
+	drm->dev_private = private;
+	private->drm = drm;
+
+	ret = mtk_drm_kms_init(drm);
+	if (ret < 0)
+		goto err_free;
+
+	ret = drm_dev_register(drm, 0);
+	if (ret < 0)
+		goto err_deinit;
+
+	ret = drm_connector_register_all(drm);
+	if (ret < 0)
+		goto err_unregister;
+
+	return 0;
+
+err_unregister:
+	drm_dev_unregister(drm);
+err_deinit:
+	mtk_drm_kms_deinit(drm);
+err_free:
+	drm_dev_unref(drm);
+	return ret;
+}
+
+static void mtk_drm_unbind(struct device *dev)
+{
+	struct mtk_drm_private *private = dev_get_drvdata(dev);
+
+	drm_put_dev(private->drm);
+	private->drm = NULL;
+}
+
+static const struct component_master_ops mtk_drm_ops = {
+	.bind		= mtk_drm_bind,
+	.unbind		= mtk_drm_unbind,
+};
+
+static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
+	{ .compatible = "mediatek,mt8173-disp-ovl",   .data = (void *)MTK_DISP_OVL },
+	{ .compatible = "mediatek,mt8173-disp-rdma",  .data = (void *)MTK_DISP_RDMA },
+	{ .compatible = "mediatek,mt8173-disp-wdma",  .data = (void *)MTK_DISP_WDMA },
+	{ .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR },
+	{ .compatible = "mediatek,mt8173-disp-aal",   .data = (void *)MTK_DISP_AAL},
+	{ .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, },
+	{ .compatible = "mediatek,mt8173-disp-ufoe",  .data = (void *)MTK_DISP_UFOE },
+	{ .compatible = "mediatek,mt8173-dsi",        .data = (void *)MTK_DSI },
+	{ .compatible = "mediatek,mt8173-dpi",        .data = (void *)MTK_DPI },
+	{ .compatible = "mediatek,mt8173-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
+	{ .compatible = "mediatek,mt8173-disp-pwm",   .data = (void *)MTK_DISP_PWM },
+	{ .compatible = "mediatek,mt8173-disp-od",    .data = (void *)MTK_DISP_OD },
+	{ }
+};
+
+static int mtk_drm_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mtk_drm_private *private;
+	struct resource *mem;
+	struct device_node *node;
+	struct component_match *match = NULL;
+	int ret;
+	int i;
+
+	private = devm_kzalloc(dev, sizeof(*private), GFP_KERNEL);
+	if (!private)
+		return -ENOMEM;
+
+	mutex_init(&private->commit.lock);
+	INIT_WORK(&private->commit.work, mtk_atomic_work);
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	private->config_regs = devm_ioremap_resource(dev, mem);
+	if (IS_ERR(private->config_regs)) {
+		ret = PTR_ERR(private->config_regs);
+		dev_err(dev, "Failed to ioremap mmsys-config resource: %d\n",
+			ret);
+		return ret;
+	}
+
+	/* Iterate over sibling DISP function blocks */
+	for_each_child_of_node(dev->of_node->parent, node) {
+		const struct of_device_id *of_id;
+		enum mtk_ddp_comp_type comp_type;
+		int comp_id;
+
+		of_id = of_match_node(mtk_ddp_comp_dt_ids, node);
+		if (!of_id)
+			continue;
+
+		if (!of_device_is_available(node)) {
+			dev_dbg(dev, "Skipping disabled component %s\n",
+				node->full_name);
+			continue;
+		}
+
+		comp_type = (enum mtk_ddp_comp_type)of_id->data;
+
+		if (comp_type == MTK_DISP_MUTEX) {
+			private->mutex_node = of_node_get(node);
+			continue;
+		}
+
+		comp_id = mtk_ddp_comp_get_id(node, comp_type);
+		if (comp_id < 0) {
+			dev_warn(dev, "Skipping unknown component %s\n",
+				 node->full_name);
+			continue;
+		}
+
+		private->comp_node[comp_id] = of_node_get(node);
+
+		/*
+		 * Currently only the OVL, RDMA, DSI, and DPI blocks have
+		 * separate component platform drivers and initialize their own
+		 * DDP component structure. The others are initialized here.
+		 */
+		if (comp_type == MTK_DISP_OVL ||
+		    comp_type == MTK_DISP_RDMA ||
+		    comp_type == MTK_DSI ||
+		    comp_type == MTK_DPI) {
+			dev_info(dev, "Adding component match for %s\n",
+				 node->full_name);
+			component_match_add(dev, &match, compare_of, node);
+		} else {
+			struct mtk_ddp_comp *comp;
+
+			comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
+			if (!comp) {
+				ret = -ENOMEM;
+				goto err_node;
+			}
+
+			ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
+			if (ret)
+				goto err_node;
+
+			private->ddp_comp[comp_id] = comp;
+		}
+	}
+
+	if (!private->mutex_node) {
+		dev_err(dev, "Failed to find disp-mutex node\n");
+		ret = -ENODEV;
+		goto err_node;
+	}
+
+	pm_runtime_enable(dev);
+
+	platform_set_drvdata(pdev, private);
+
+	ret = component_master_add_with_match(dev, &mtk_drm_ops, match);
+	if (ret)
+		goto err_pm;
+
+	return 0;
+
+err_pm:
+	pm_runtime_disable(dev);
+err_node:
+	of_node_put(private->mutex_node);
+	for (i = 0; i < DDP_COMPONENT_ID_MAX; i++)
+		of_node_put(private->comp_node[i]);
+	return ret;
+}
+
+static int mtk_drm_remove(struct platform_device *pdev)
+{
+	struct mtk_drm_private *private = platform_get_drvdata(pdev);
+	struct drm_device *drm = private->drm;
+	int i;
+
+	drm_connector_unregister_all(drm);
+	drm_dev_unregister(drm);
+	mtk_drm_kms_deinit(drm);
+	drm_dev_unref(drm);
+
+	component_master_del(&pdev->dev, &mtk_drm_ops);
+	pm_runtime_disable(&pdev->dev);
+	of_node_put(private->mutex_node);
+	for (i = 0; i < DDP_COMPONENT_ID_MAX; i++)
+		of_node_put(private->comp_node[i]);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_drm_sys_suspend(struct device *dev)
+{
+	struct mtk_drm_private *private = dev_get_drvdata(dev);
+	struct drm_device *drm = private->drm;
+
+	drm_kms_helper_poll_disable(drm);
+
+	private->suspend_state = drm_atomic_helper_suspend(drm);
+	if (IS_ERR(private->suspend_state)) {
+		drm_kms_helper_poll_enable(drm);
+		return PTR_ERR(private->suspend_state);
+	}
+
+	DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
+	return 0;
+}
+
+static int mtk_drm_sys_resume(struct device *dev)
+{
+	struct mtk_drm_private *private = dev_get_drvdata(dev);
+	struct drm_device *drm = private->drm;
+
+	drm_atomic_helper_resume(drm, private->suspend_state);
+	drm_kms_helper_poll_enable(drm);
+
+	DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend,
+			 mtk_drm_sys_resume);
+
+static const struct of_device_id mtk_drm_of_ids[] = {
+	{ .compatible = "mediatek,mt8173-mmsys", },
+	{ }
+};
+
+static struct platform_driver mtk_drm_platform_driver = {
+	.probe	= mtk_drm_probe,
+	.remove	= mtk_drm_remove,
+	.driver	= {
+		.name	= "mediatek-drm",
+		.of_match_table = mtk_drm_of_ids,
+		.pm     = &mtk_drm_pm_ops,
+	},
+};
+
+static struct platform_driver * const mtk_drm_drivers[] = {
+	&mtk_ddp_driver,
+	&mtk_disp_ovl_driver,
+	&mtk_disp_rdma_driver,
+	&mtk_dpi_driver,
+	&mtk_drm_platform_driver,
+	&mtk_dsi_driver,
+	&mtk_mipi_tx_driver,
+};
+
+static int __init mtk_drm_init(void)
+{
+	int ret;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mtk_drm_drivers); i++) {
+		ret = platform_driver_register(mtk_drm_drivers[i]);
+		if (ret < 0) {
+			pr_err("Failed to register %s driver: %d\n",
+			       mtk_drm_drivers[i]->driver.name, ret);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	while (--i >= 0)
+		platform_driver_unregister(mtk_drm_drivers[i]);
+
+	return ret;
+}
+
+static void __exit mtk_drm_exit(void)
+{
+	int i;
+
+	for (i = ARRAY_SIZE(mtk_drm_drivers) - 1; i >= 0; i--)
+		platform_driver_unregister(mtk_drm_drivers[i]);
+}
+
+module_init(mtk_drm_init);
+module_exit(mtk_drm_exit);
+
+MODULE_AUTHOR("YT SHEN <yt.shen@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek SoC DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
new file mode 100644
index 0000000..aa93894
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MTK_DRM_DRV_H
+#define MTK_DRM_DRV_H
+
+#include <linux/io.h>
+#include "mtk_drm_ddp_comp.h"
+
+#define MAX_CRTC	2
+#define MAX_CONNECTOR	2
+
+struct device;
+struct device_node;
+struct drm_crtc;
+struct drm_device;
+struct drm_fb_helper;
+struct drm_property;
+struct regmap;
+
+struct mtk_drm_private {
+	struct drm_device *drm;
+	struct device *dma_dev;
+
+	struct drm_crtc *crtc[MAX_CRTC];
+	unsigned int num_pipes;
+
+	struct device_node *mutex_node;
+	struct device *mutex_dev;
+	void __iomem *config_regs;
+	struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
+	struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
+
+	struct {
+		struct drm_atomic_state *state;
+		struct work_struct work;
+		struct mutex lock;
+	} commit;
+
+	struct drm_atomic_state *suspend_state;
+};
+
+extern struct platform_driver mtk_ddp_driver;
+extern struct platform_driver mtk_disp_ovl_driver;
+extern struct platform_driver mtk_disp_rdma_driver;
+extern struct platform_driver mtk_dpi_driver;
+extern struct platform_driver mtk_dsi_driver;
+extern struct platform_driver mtk_mipi_tx_driver;
+
+#endif /* MTK_DRM_DRV_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
new file mode 100644
index 0000000..147df85
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
+#include "mtk_drm_drv.h"
+#include "mtk_drm_fb.h"
+#include "mtk_drm_gem.h"
+
+/*
+ * mtk specific framebuffer structure.
+ *
+ * @fb: drm framebuffer object.
+ * @gem_obj: array of gem objects.
+ */
+struct mtk_drm_fb {
+	struct drm_framebuffer	base;
+	/* For now we only support a single plane */
+	struct drm_gem_object	*gem_obj;
+};
+
+#define to_mtk_fb(x) container_of(x, struct mtk_drm_fb, base)
+
+struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb)
+{
+	struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
+
+	return mtk_fb->gem_obj;
+}
+
+static int mtk_drm_fb_create_handle(struct drm_framebuffer *fb,
+				    struct drm_file *file_priv,
+				    unsigned int *handle)
+{
+	struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
+
+	return drm_gem_handle_create(file_priv, mtk_fb->gem_obj, handle);
+}
+
+static void mtk_drm_fb_destroy(struct drm_framebuffer *fb)
+{
+	struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
+
+	drm_framebuffer_cleanup(fb);
+
+	drm_gem_object_unreference_unlocked(mtk_fb->gem_obj);
+
+	kfree(mtk_fb);
+}
+
+static const struct drm_framebuffer_funcs mtk_drm_fb_funcs = {
+	.create_handle = mtk_drm_fb_create_handle,
+	.destroy = mtk_drm_fb_destroy,
+};
+
+static struct mtk_drm_fb *mtk_drm_framebuffer_init(struct drm_device *dev,
+					const struct drm_mode_fb_cmd2 *mode,
+					struct drm_gem_object *obj)
+{
+	struct mtk_drm_fb *mtk_fb;
+	int ret;
+
+	if (drm_format_num_planes(mode->pixel_format) != 1)
+		return ERR_PTR(-EINVAL);
+
+	mtk_fb = kzalloc(sizeof(*mtk_fb), GFP_KERNEL);
+	if (!mtk_fb)
+		return ERR_PTR(-ENOMEM);
+
+	drm_helper_mode_fill_fb_struct(&mtk_fb->base, mode);
+
+	mtk_fb->gem_obj = obj;
+
+	ret = drm_framebuffer_init(dev, &mtk_fb->base, &mtk_drm_fb_funcs);
+	if (ret) {
+		DRM_ERROR("failed to initialize framebuffer\n");
+		kfree(mtk_fb);
+		return ERR_PTR(ret);
+	}
+
+	return mtk_fb;
+}
+
+/*
+ * Wait for any exclusive fence in fb's gem object's reservation object.
+ *
+ * Returns -ERESTARTSYS if interrupted, else 0.
+ */
+int mtk_fb_wait(struct drm_framebuffer *fb)
+{
+	struct drm_gem_object *gem;
+	struct reservation_object *resv;
+	long ret;
+
+	if (!fb)
+		return 0;
+
+	gem = mtk_fb_get_gem_obj(fb);
+	if (!gem || !gem->dma_buf || !gem->dma_buf->resv)
+		return 0;
+
+	resv = gem->dma_buf->resv;
+	ret = reservation_object_wait_timeout_rcu(resv, false, true,
+						  MAX_SCHEDULE_TIMEOUT);
+	/* MAX_SCHEDULE_TIMEOUT on success, -ERESTARTSYS if interrupted */
+	if (WARN_ON(ret < 0))
+		return ret;
+
+	return 0;
+}
+
+struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
+					       struct drm_file *file,
+					       const struct drm_mode_fb_cmd2 *cmd)
+{
+	struct mtk_drm_fb *mtk_fb;
+	struct drm_gem_object *gem;
+	unsigned int width = cmd->width;
+	unsigned int height = cmd->height;
+	unsigned int size, bpp;
+	int ret;
+
+	if (drm_format_num_planes(cmd->pixel_format) != 1)
+		return ERR_PTR(-EINVAL);
+
+	gem = drm_gem_object_lookup(file, cmd->handles[0]);
+	if (!gem)
+		return ERR_PTR(-ENOENT);
+
+	bpp = drm_format_plane_cpp(cmd->pixel_format, 0);
+	size = (height - 1) * cmd->pitches[0] + width * bpp;
+	size += cmd->offsets[0];
+
+	if (gem->size < size) {
+		ret = -EINVAL;
+		goto unreference;
+	}
+
+	mtk_fb = mtk_drm_framebuffer_init(dev, cmd, gem);
+	if (IS_ERR(mtk_fb)) {
+		ret = PTR_ERR(mtk_fb);
+		goto unreference;
+	}
+
+	return &mtk_fb->base;
+
+unreference:
+	drm_gem_object_unreference_unlocked(gem);
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.h b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
new file mode 100644
index 0000000..9b2ae34
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MTK_DRM_FB_H
+#define MTK_DRM_FB_H
+
+struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb);
+int mtk_fb_wait(struct drm_framebuffer *fb);
+struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
+					       struct drm_file *file,
+					       const struct drm_mode_fb_cmd2 *cmd);
+
+#endif /* MTK_DRM_FB_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
new file mode 100644
index 0000000..fa2ec0c
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <linux/dma-buf.h>
+
+#include "mtk_drm_drv.h"
+#include "mtk_drm_gem.h"
+
+static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
+						unsigned long size)
+{
+	struct mtk_drm_gem_obj *mtk_gem_obj;
+	int ret;
+
+	size = round_up(size, PAGE_SIZE);
+
+	mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
+	if (!mtk_gem_obj)
+		return ERR_PTR(-ENOMEM);
+
+	ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
+	if (ret < 0) {
+		DRM_ERROR("failed to initialize gem object\n");
+		kfree(mtk_gem_obj);
+		return ERR_PTR(ret);
+	}
+
+	return mtk_gem_obj;
+}
+
+struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev,
+					   size_t size, bool alloc_kmap)
+{
+	struct mtk_drm_private *priv = dev->dev_private;
+	struct mtk_drm_gem_obj *mtk_gem;
+	struct drm_gem_object *obj;
+	int ret;
+
+	mtk_gem = mtk_drm_gem_init(dev, size);
+	if (IS_ERR(mtk_gem))
+		return ERR_CAST(mtk_gem);
+
+	obj = &mtk_gem->base;
+
+	init_dma_attrs(&mtk_gem->dma_attrs);
+	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &mtk_gem->dma_attrs);
+
+	if (!alloc_kmap)
+		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &mtk_gem->dma_attrs);
+
+	mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size,
+					  &mtk_gem->dma_addr, GFP_KERNEL,
+					  &mtk_gem->dma_attrs);
+	if (!mtk_gem->cookie) {
+		DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
+		ret = -ENOMEM;
+		goto err_gem_free;
+	}
+
+	if (alloc_kmap)
+		mtk_gem->kvaddr = mtk_gem->cookie;
+
+	DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n",
+			 mtk_gem->cookie, &mtk_gem->dma_addr,
+			 size);
+
+	return mtk_gem;
+
+err_gem_free:
+	drm_gem_object_release(obj);
+	kfree(mtk_gem);
+	return ERR_PTR(ret);
+}
+
+void mtk_drm_gem_free_object(struct drm_gem_object *obj)
+{
+	struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+	struct mtk_drm_private *priv = obj->dev->dev_private;
+
+	if (mtk_gem->sg)
+		drm_prime_gem_destroy(obj, mtk_gem->sg);
+	else
+		dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie,
+			       mtk_gem->dma_addr, &mtk_gem->dma_attrs);
+
+	/* release file pointer to gem object. */
+	drm_gem_object_release(obj);
+
+	kfree(mtk_gem);
+}
+
+int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+			    struct drm_mode_create_dumb *args)
+{
+	struct mtk_drm_gem_obj *mtk_gem;
+	int ret;
+
+	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+	args->size = args->pitch * args->height;
+
+	mtk_gem = mtk_drm_gem_create(dev, args->size, false);
+	if (IS_ERR(mtk_gem))
+		return PTR_ERR(mtk_gem);
+
+	/*
+	 * allocate a id of idr table where the obj is registered
+	 * and handle has the id what user can see.
+	 */
+	ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle);
+	if (ret)
+		goto err_handle_create;
+
+	/* drop reference from allocate - handle holds it now. */
+	drm_gem_object_unreference_unlocked(&mtk_gem->base);
+
+	return 0;
+
+err_handle_create:
+	mtk_drm_gem_free_object(&mtk_gem->base);
+	return ret;
+}
+
+int mtk_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+				struct drm_device *dev, uint32_t handle,
+				uint64_t *offset)
+{
+	struct drm_gem_object *obj;
+	int ret;
+
+	obj = drm_gem_object_lookup(file_priv, handle);
+	if (!obj) {
+		DRM_ERROR("failed to lookup gem object.\n");
+		return -EINVAL;
+	}
+
+	ret = drm_gem_create_mmap_offset(obj);
+	if (ret)
+		goto out;
+
+	*offset = drm_vma_node_offset_addr(&obj->vma_node);
+	DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
+
+out:
+	drm_gem_object_unreference_unlocked(obj);
+	return ret;
+}
+
+static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
+				   struct vm_area_struct *vma)
+
+{
+	int ret;
+	struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+	struct mtk_drm_private *priv = obj->dev->dev_private;
+
+	/*
+	 * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
+	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
+	 */
+	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_pgoff = 0;
+
+	ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
+			     mtk_gem->dma_addr, obj->size, &mtk_gem->dma_attrs);
+	if (ret)
+		drm_gem_vm_close(vma);
+
+	return ret;
+}
+
+int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+	int ret;
+
+	ret = drm_gem_mmap_obj(obj, obj->size, vma);
+	if (ret)
+		return ret;
+
+	return mtk_drm_gem_object_mmap(obj, vma);
+}
+
+int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_gem_object *obj;
+	int ret;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret)
+		return ret;
+
+	obj = vma->vm_private_data;
+
+	return mtk_drm_gem_object_mmap(obj, vma);
+}
+
+/*
+ * Allocate a sg_table for this GEM object.
+ * Note: Both the table's contents, and the sg_table itself must be freed by
+ *       the caller.
+ * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
+ */
+struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+	struct mtk_drm_private *priv = obj->dev->dev_private;
+	struct sg_table *sgt;
+	int ret;
+
+	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+	if (!sgt)
+		return ERR_PTR(-ENOMEM);
+
+	ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie,
+				    mtk_gem->dma_addr, obj->size,
+				    &mtk_gem->dma_attrs);
+	if (ret) {
+		DRM_ERROR("failed to allocate sgt, %d\n", ret);
+		kfree(sgt);
+		return ERR_PTR(ret);
+	}
+
+	return sgt;
+}
+
+struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
+			struct dma_buf_attachment *attach, struct sg_table *sg)
+{
+	struct mtk_drm_gem_obj *mtk_gem;
+	int ret;
+	struct scatterlist *s;
+	unsigned int i;
+	dma_addr_t expected;
+
+	mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
+
+	if (IS_ERR(mtk_gem))
+		return ERR_PTR(PTR_ERR(mtk_gem));
+
+	expected = sg_dma_address(sg->sgl);
+	for_each_sg(sg->sgl, s, sg->nents, i) {
+		if (sg_dma_address(s) != expected) {
+			DRM_ERROR("sg_table is not contiguous");
+			ret = -EINVAL;
+			goto err_gem_free;
+		}
+		expected = sg_dma_address(s) + sg_dma_len(s);
+	}
+
+	mtk_gem->dma_addr = sg_dma_address(sg->sgl);
+	mtk_gem->sg = sg;
+
+	return &mtk_gem->base;
+
+err_gem_free:
+	kfree(mtk_gem);
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
new file mode 100644
index 0000000..3a2a562
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_DRM_GEM_H_
+#define _MTK_DRM_GEM_H_
+
+#include <drm/drm_gem.h>
+
+/*
+ * mtk drm buffer structure.
+ *
+ * @base: a gem object.
+ *	- a new handle to this gem object would be created
+ *	by drm_gem_handle_create().
+ * @cookie: the return value of dma_alloc_attrs(), keep it for dma_free_attrs()
+ * @kvaddr: kernel virtual address of gem buffer.
+ * @dma_addr: dma address of gem buffer.
+ * @dma_attrs: dma attributes of gem buffer.
+ *
+ * P.S. this object would be transferred to user as kms_bo.handle so
+ *	user can access the buffer through kms_bo.handle.
+ */
+struct mtk_drm_gem_obj {
+	struct drm_gem_object	base;
+	void			*cookie;
+	void			*kvaddr;
+	dma_addr_t		dma_addr;
+	struct dma_attrs	dma_attrs;
+	struct sg_table		*sg;
+};
+
+#define to_mtk_gem_obj(x)	container_of(x, struct mtk_drm_gem_obj, base)
+
+void mtk_drm_gem_free_object(struct drm_gem_object *gem);
+struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev, size_t size,
+					   bool alloc_kmap);
+int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+			    struct drm_mode_create_dumb *args);
+int mtk_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+				struct drm_device *dev, uint32_t handle,
+				uint64_t *offset);
+int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj,
+			 struct vm_area_struct *vma);
+struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
+			struct dma_buf_attachment *attach, struct sg_table *sg);
+
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
new file mode 100644
index 0000000..51bc898
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: CK Hu <ck.hu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_drv.h"
+#include "mtk_drm_fb.h"
+#include "mtk_drm_gem.h"
+#include "mtk_drm_plane.h"
+
+static const u32 formats[] = {
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_RGB565,
+};
+
+static void mtk_plane_enable(struct mtk_drm_plane *mtk_plane, bool enable,
+			     dma_addr_t addr, struct drm_rect *dest)
+{
+	struct drm_plane *plane = &mtk_plane->base;
+	struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+	unsigned int pitch, format;
+	int x, y;
+
+	if (WARN_ON(!plane->state || (enable && !plane->state->fb)))
+		return;
+
+	if (plane->state->fb) {
+		pitch = plane->state->fb->pitches[0];
+		format = plane->state->fb->pixel_format;
+	} else {
+		pitch = 0;
+		format = DRM_FORMAT_RGBA8888;
+	}
+
+	x = plane->state->crtc_x;
+	y = plane->state->crtc_y;
+
+	if (x < 0) {
+		addr -= x * 4;
+		x = 0;
+	}
+
+	if (y < 0) {
+		addr -= y * pitch;
+		y = 0;
+	}
+
+	state->pending.enable = enable;
+	state->pending.pitch = pitch;
+	state->pending.format = format;
+	state->pending.addr = addr;
+	state->pending.x = x;
+	state->pending.y = y;
+	state->pending.width = dest->x2 - dest->x1;
+	state->pending.height = dest->y2 - dest->y1;
+	wmb(); /* Make sure the above parameters are set before update */
+	state->pending.dirty = true;
+}
+
+static void mtk_plane_reset(struct drm_plane *plane)
+{
+	struct mtk_plane_state *state;
+
+	if (plane->state) {
+		if (plane->state->fb)
+			drm_framebuffer_unreference(plane->state->fb);
+
+		state = to_mtk_plane_state(plane->state);
+		memset(state, 0, sizeof(*state));
+	} else {
+		state = kzalloc(sizeof(*state), GFP_KERNEL);
+		if (!state)
+			return;
+		plane->state = &state->base;
+	}
+
+	state->base.plane = plane;
+	state->pending.format = DRM_FORMAT_RGB565;
+}
+
+static struct drm_plane_state *mtk_plane_duplicate_state(struct drm_plane *plane)
+{
+	struct mtk_plane_state *old_state = to_mtk_plane_state(plane->state);
+	struct mtk_plane_state *state;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return NULL;
+
+	__drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+
+	WARN_ON(state->base.plane != plane);
+
+	state->pending = old_state->pending;
+
+	return &state->base;
+}
+
+static void mtk_drm_plane_destroy_state(struct drm_plane *plane,
+					struct drm_plane_state *state)
+{
+	__drm_atomic_helper_plane_destroy_state(state);
+	kfree(to_mtk_plane_state(state));
+}
+
+static const struct drm_plane_funcs mtk_plane_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = drm_plane_cleanup,
+	.reset = mtk_plane_reset,
+	.atomic_duplicate_state = mtk_plane_duplicate_state,
+	.atomic_destroy_state = mtk_drm_plane_destroy_state,
+};
+
+static int mtk_plane_atomic_check(struct drm_plane *plane,
+				  struct drm_plane_state *state)
+{
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_crtc_state *crtc_state;
+	bool visible;
+	struct drm_rect dest = {
+		.x1 = state->crtc_x,
+		.y1 = state->crtc_y,
+		.x2 = state->crtc_x + state->crtc_w,
+		.y2 = state->crtc_y + state->crtc_h,
+	};
+	struct drm_rect src = {
+		/* 16.16 fixed point */
+		.x1 = state->src_x,
+		.y1 = state->src_y,
+		.x2 = state->src_x + state->src_w,
+		.y2 = state->src_y + state->src_h,
+	};
+	struct drm_rect clip = { 0, };
+
+	if (!fb)
+		return 0;
+
+	if (!mtk_fb_get_gem_obj(fb)) {
+		DRM_DEBUG_KMS("buffer is null\n");
+		return -EFAULT;
+	}
+
+	if (!state->crtc)
+		return 0;
+
+	crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+	if (IS_ERR(crtc_state))
+		return PTR_ERR(crtc_state);
+
+	clip.x2 = crtc_state->mode.hdisplay;
+	clip.y2 = crtc_state->mode.vdisplay;
+
+	return drm_plane_helper_check_update(plane, state->crtc, fb,
+					     &src, &dest, &clip,
+					     DRM_PLANE_HELPER_NO_SCALING,
+					     DRM_PLANE_HELPER_NO_SCALING,
+					     true, true, &visible);
+}
+
+static void mtk_plane_atomic_update(struct drm_plane *plane,
+				    struct drm_plane_state *old_state)
+{
+	struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+	struct drm_crtc *crtc = state->base.crtc;
+	struct drm_gem_object *gem;
+	struct mtk_drm_gem_obj *mtk_gem;
+	struct mtk_drm_plane *mtk_plane = to_mtk_plane(plane);
+	struct drm_rect dest = {
+		.x1 = state->base.crtc_x,
+		.y1 = state->base.crtc_y,
+		.x2 = state->base.crtc_x + state->base.crtc_w,
+		.y2 = state->base.crtc_y + state->base.crtc_h,
+	};
+	struct drm_rect clip = { 0, };
+
+	if (!crtc)
+		return;
+
+	clip.x2 = state->base.crtc->state->mode.hdisplay;
+	clip.y2 = state->base.crtc->state->mode.vdisplay;
+	drm_rect_intersect(&dest, &clip);
+
+	gem = mtk_fb_get_gem_obj(state->base.fb);
+	mtk_gem = to_mtk_gem_obj(gem);
+	mtk_plane_enable(mtk_plane, true, mtk_gem->dma_addr, &dest);
+}
+
+static void mtk_plane_atomic_disable(struct drm_plane *plane,
+				     struct drm_plane_state *old_state)
+{
+	struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+
+	state->pending.enable = false;
+	wmb(); /* Make sure the above parameter is set before update */
+	state->pending.dirty = true;
+}
+
+static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
+	.atomic_check = mtk_plane_atomic_check,
+	.atomic_update = mtk_plane_atomic_update,
+	.atomic_disable = mtk_plane_atomic_disable,
+};
+
+int mtk_plane_init(struct drm_device *dev, struct mtk_drm_plane *mtk_plane,
+		   unsigned long possible_crtcs, enum drm_plane_type type,
+		   unsigned int zpos)
+{
+	int err;
+
+	err = drm_universal_plane_init(dev, &mtk_plane->base, possible_crtcs,
+				       &mtk_plane_funcs, formats,
+				       ARRAY_SIZE(formats), type, NULL);
+	if (err) {
+		DRM_ERROR("failed to initialize plane\n");
+		return err;
+	}
+
+	drm_plane_helper_add(&mtk_plane->base, &mtk_plane_helper_funcs);
+	mtk_plane->idx = zpos;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
new file mode 100644
index 0000000..72a7b3e
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: CK Hu <ck.hu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_DRM_PLANE_H_
+#define _MTK_DRM_PLANE_H_
+
+#include <drm/drm_crtc.h>
+#include <linux/types.h>
+
+struct mtk_drm_plane {
+	struct drm_plane		base;
+	unsigned int			idx;
+};
+
+struct mtk_plane_pending_state {
+	bool				config;
+	bool				enable;
+	dma_addr_t			addr;
+	unsigned int			pitch;
+	unsigned int			format;
+	unsigned int			x;
+	unsigned int			y;
+	unsigned int			width;
+	unsigned int			height;
+	bool				dirty;
+};
+
+struct mtk_plane_state {
+	struct drm_plane_state		base;
+	struct mtk_plane_pending_state	pending;
+};
+
+static inline struct mtk_drm_plane *to_mtk_plane(struct drm_plane *plane)
+{
+	return container_of(plane, struct mtk_drm_plane, base);
+}
+
+static inline struct mtk_plane_state *
+to_mtk_plane_state(struct drm_plane_state *state)
+{
+	return container_of(state, struct mtk_plane_state, base);
+}
+
+int mtk_plane_init(struct drm_device *dev, struct mtk_drm_plane *mtk_plane,
+		   unsigned long possible_crtcs, enum drm_plane_type type,
+		   unsigned int zpos);
+
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
new file mode 100644
index 0000000..7695591
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -0,0 +1,911 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <video/videomode.h>
+
+#include "mtk_drm_ddp_comp.h"
+
+#define DSI_VIDEO_FIFO_DEPTH	(1920 / 4)
+#define DSI_HOST_FIFO_DEPTH	64
+
+#define DSI_START		0x00
+
+#define DSI_CON_CTRL		0x10
+#define DSI_RESET			BIT(0)
+#define DSI_EN				BIT(1)
+
+#define DSI_MODE_CTRL		0x14
+#define MODE				(3)
+#define CMD_MODE			0
+#define SYNC_PULSE_MODE			1
+#define SYNC_EVENT_MODE			2
+#define BURST_MODE			3
+#define FRM_MODE			BIT(16)
+#define MIX_MODE			BIT(17)
+
+#define DSI_TXRX_CTRL		0x18
+#define VC_NUM				(2 << 0)
+#define LANE_NUM			(0xf << 2)
+#define DIS_EOT				BIT(6)
+#define NULL_EN				BIT(7)
+#define TE_FREERUN			BIT(8)
+#define EXT_TE_EN			BIT(9)
+#define EXT_TE_EDGE			BIT(10)
+#define MAX_RTN_SIZE			(0xf << 12)
+#define HSTX_CKLP_EN			BIT(16)
+
+#define DSI_PSCTRL		0x1c
+#define DSI_PS_WC			0x3fff
+#define DSI_PS_SEL			(3 << 16)
+#define PACKED_PS_16BIT_RGB565		(0 << 16)
+#define LOOSELY_PS_18BIT_RGB666		(1 << 16)
+#define PACKED_PS_18BIT_RGB666		(2 << 16)
+#define PACKED_PS_24BIT_RGB888		(3 << 16)
+
+#define DSI_VSA_NL		0x20
+#define DSI_VBP_NL		0x24
+#define DSI_VFP_NL		0x28
+#define DSI_VACT_NL		0x2C
+#define DSI_HSA_WC		0x50
+#define DSI_HBP_WC		0x54
+#define DSI_HFP_WC		0x58
+
+#define DSI_HSTX_CKL_WC		0x64
+
+#define DSI_PHY_LCCON		0x104
+#define LC_HS_TX_EN			BIT(0)
+#define LC_ULPM_EN			BIT(1)
+#define LC_WAKEUP_EN			BIT(2)
+
+#define DSI_PHY_LD0CON		0x108
+#define LD0_HS_TX_EN			BIT(0)
+#define LD0_ULPM_EN			BIT(1)
+#define LD0_WAKEUP_EN			BIT(2)
+
+#define DSI_PHY_TIMECON0	0x110
+#define LPX				(0xff << 0)
+#define HS_PRPR				(0xff << 8)
+#define HS_ZERO				(0xff << 16)
+#define HS_TRAIL			(0xff << 24)
+
+#define DSI_PHY_TIMECON1	0x114
+#define TA_GO				(0xff << 0)
+#define TA_SURE				(0xff << 8)
+#define TA_GET				(0xff << 16)
+#define DA_HS_EXIT			(0xff << 24)
+
+#define DSI_PHY_TIMECON2	0x118
+#define CONT_DET			(0xff << 0)
+#define CLK_ZERO			(0xff << 16)
+#define CLK_TRAIL			(0xff << 24)
+
+#define DSI_PHY_TIMECON3	0x11c
+#define CLK_HS_PRPR			(0xff << 0)
+#define CLK_HS_POST			(0xff << 8)
+#define CLK_HS_EXIT			(0xff << 16)
+
+#define NS_TO_CYCLE(n, c)    ((n) / (c) + (((n) % (c)) ? 1 : 0))
+
+struct phy;
+
+struct mtk_dsi {
+	struct mtk_ddp_comp ddp_comp;
+	struct device *dev;
+	struct mipi_dsi_host host;
+	struct drm_encoder encoder;
+	struct drm_connector conn;
+	struct drm_panel *panel;
+	struct drm_bridge *bridge;
+	struct phy *phy;
+
+	void __iomem *regs;
+
+	struct clk *engine_clk;
+	struct clk *digital_clk;
+	struct clk *hs_clk;
+
+	u32 data_rate;
+
+	unsigned long mode_flags;
+	enum mipi_dsi_pixel_format format;
+	unsigned int lanes;
+	struct videomode vm;
+	int refcount;
+	bool enabled;
+};
+
+static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
+{
+	return container_of(e, struct mtk_dsi, encoder);
+}
+
+static inline struct mtk_dsi *connector_to_dsi(struct drm_connector *c)
+{
+	return container_of(c, struct mtk_dsi, conn);
+}
+
+static inline struct mtk_dsi *host_to_dsi(struct mipi_dsi_host *h)
+{
+	return container_of(h, struct mtk_dsi, host);
+}
+
+static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
+{
+	u32 temp = readl(dsi->regs + offset);
+
+	writel((temp & ~mask) | (data & mask), dsi->regs + offset);
+}
+
+static void dsi_phy_timconfig(struct mtk_dsi *dsi)
+{
+	u32 timcon0, timcon1, timcon2, timcon3;
+	unsigned int ui, cycle_time;
+	unsigned int lpx;
+
+	ui = 1000 / dsi->data_rate + 0x01;
+	cycle_time = 8000 / dsi->data_rate + 0x01;
+	lpx = 5;
+
+	timcon0 = (8 << 24) | (0xa << 16) | (0x6 << 8) | lpx;
+	timcon1 = (7 << 24) | (5 * lpx << 16) | ((3 * lpx) / 2) << 8 |
+		  (4 * lpx);
+	timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
+		  (NS_TO_CYCLE(0x150, cycle_time) << 16);
+	timcon3 = (2 * lpx) << 16 | NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8 |
+		   NS_TO_CYCLE(0x40, cycle_time);
+
+	writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
+	writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
+	writel(timcon2, dsi->regs + DSI_PHY_TIMECON2);
+	writel(timcon3, dsi->regs + DSI_PHY_TIMECON3);
+}
+
+static void mtk_dsi_enable(struct mtk_dsi *dsi)
+{
+	mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, DSI_EN);
+}
+
+static void mtk_dsi_disable(struct mtk_dsi *dsi)
+{
+	mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0);
+}
+
+static void mtk_dsi_reset(struct mtk_dsi *dsi)
+{
+	mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET);
+	mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
+}
+
+static int mtk_dsi_poweron(struct mtk_dsi *dsi)
+{
+	struct device *dev = dsi->dev;
+	int ret;
+
+	if (++dsi->refcount != 1)
+		return 0;
+
+	/**
+	 * data_rate = (pixel_clock / 1000) * pixel_dipth * mipi_ratio;
+	 * pixel_clock unit is Khz, data_rata unit is MHz, so need divide 1000.
+	 * mipi_ratio is mipi clk coefficient for balance the pixel clk in mipi.
+	 * we set mipi_ratio is 1.05.
+	 */
+	dsi->data_rate = dsi->vm.pixelclock * 3 * 21 / (1 * 1000 * 10);
+
+	ret = clk_set_rate(dsi->hs_clk, dsi->data_rate * 1000000);
+	if (ret < 0) {
+		dev_err(dev, "Failed to set data rate: %d\n", ret);
+		goto err_refcount;
+	}
+
+	phy_power_on(dsi->phy);
+
+	ret = clk_prepare_enable(dsi->engine_clk);
+	if (ret < 0) {
+		dev_err(dev, "Failed to enable engine clock: %d\n", ret);
+		goto err_phy_power_off;
+	}
+
+	ret = clk_prepare_enable(dsi->digital_clk);
+	if (ret < 0) {
+		dev_err(dev, "Failed to enable digital clock: %d\n", ret);
+		goto err_disable_engine_clk;
+	}
+
+	mtk_dsi_enable(dsi);
+	mtk_dsi_reset(dsi);
+	dsi_phy_timconfig(dsi);
+
+	return 0;
+
+err_disable_engine_clk:
+	clk_disable_unprepare(dsi->engine_clk);
+err_phy_power_off:
+	phy_power_off(dsi->phy);
+err_refcount:
+	dsi->refcount--;
+	return ret;
+}
+
+static void dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
+{
+	mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
+	mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
+}
+
+static void dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi)
+{
+	mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
+	mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN);
+	mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0);
+}
+
+static void dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi)
+{
+	mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0);
+	mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
+}
+
+static void dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
+{
+	mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
+	mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN);
+	mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0);
+}
+
+static bool dsi_clk_hs_state(struct mtk_dsi *dsi)
+{
+	u32 tmp_reg1;
+
+	tmp_reg1 = readl(dsi->regs + DSI_PHY_LCCON);
+	return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false;
+}
+
+static void dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
+{
+	if (enter && !dsi_clk_hs_state(dsi))
+		mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN);
+	else if (!enter && dsi_clk_hs_state(dsi))
+		mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
+}
+
+static void dsi_set_mode(struct mtk_dsi *dsi)
+{
+	u32 vid_mode = CMD_MODE;
+
+	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+		vid_mode = SYNC_PULSE_MODE;
+
+		if ((dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
+		    !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE))
+			vid_mode = BURST_MODE;
+	}
+
+	writel(vid_mode, dsi->regs + DSI_MODE_CTRL);
+}
+
+static void dsi_ps_control_vact(struct mtk_dsi *dsi)
+{
+	struct videomode *vm = &dsi->vm;
+	u32 dsi_buf_bpp, ps_wc;
+	u32 ps_bpp_mode;
+
+	if (dsi->format == MIPI_DSI_FMT_RGB565)
+		dsi_buf_bpp = 2;
+	else
+		dsi_buf_bpp = 3;
+
+	ps_wc = vm->hactive * dsi_buf_bpp;
+	ps_bpp_mode = ps_wc;
+
+	switch (dsi->format) {
+	case MIPI_DSI_FMT_RGB888:
+		ps_bpp_mode |= PACKED_PS_24BIT_RGB888;
+		break;
+	case MIPI_DSI_FMT_RGB666:
+		ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
+		break;
+	case MIPI_DSI_FMT_RGB666_PACKED:
+		ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666;
+		break;
+	case MIPI_DSI_FMT_RGB565:
+		ps_bpp_mode |= PACKED_PS_16BIT_RGB565;
+		break;
+	}
+
+	writel(vm->vactive, dsi->regs + DSI_VACT_NL);
+	writel(ps_bpp_mode, dsi->regs + DSI_PSCTRL);
+	writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC);
+}
+
+static void dsi_rxtx_control(struct mtk_dsi *dsi)
+{
+	u32 tmp_reg;
+
+	switch (dsi->lanes) {
+	case 1:
+		tmp_reg = 1 << 2;
+		break;
+	case 2:
+		tmp_reg = 3 << 2;
+		break;
+	case 3:
+		tmp_reg = 7 << 2;
+		break;
+	case 4:
+		tmp_reg = 0xf << 2;
+		break;
+	default:
+		tmp_reg = 0xf << 2;
+		break;
+	}
+
+	writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
+}
+
+static void dsi_ps_control(struct mtk_dsi *dsi)
+{
+	unsigned int dsi_tmp_buf_bpp;
+	u32 tmp_reg;
+
+	switch (dsi->format) {
+	case MIPI_DSI_FMT_RGB888:
+		tmp_reg = PACKED_PS_24BIT_RGB888;
+		dsi_tmp_buf_bpp = 3;
+		break;
+	case MIPI_DSI_FMT_RGB666:
+		tmp_reg = LOOSELY_PS_18BIT_RGB666;
+		dsi_tmp_buf_bpp = 3;
+		break;
+	case MIPI_DSI_FMT_RGB666_PACKED:
+		tmp_reg = PACKED_PS_18BIT_RGB666;
+		dsi_tmp_buf_bpp = 3;
+		break;
+	case MIPI_DSI_FMT_RGB565:
+		tmp_reg = PACKED_PS_16BIT_RGB565;
+		dsi_tmp_buf_bpp = 2;
+		break;
+	default:
+		tmp_reg = PACKED_PS_24BIT_RGB888;
+		dsi_tmp_buf_bpp = 3;
+		break;
+	}
+
+	tmp_reg += dsi->vm.hactive * dsi_tmp_buf_bpp & DSI_PS_WC;
+	writel(tmp_reg, dsi->regs + DSI_PSCTRL);
+}
+
+static void dsi_config_vdo_timing(struct mtk_dsi *dsi)
+{
+	unsigned int horizontal_sync_active_byte;
+	unsigned int horizontal_backporch_byte;
+	unsigned int horizontal_frontporch_byte;
+	unsigned int dsi_tmp_buf_bpp;
+
+	struct videomode *vm = &dsi->vm;
+
+	if (dsi->format == MIPI_DSI_FMT_RGB565)
+		dsi_tmp_buf_bpp = 2;
+	else
+		dsi_tmp_buf_bpp = 3;
+
+	writel(vm->vsync_len, dsi->regs + DSI_VSA_NL);
+	writel(vm->vback_porch, dsi->regs + DSI_VBP_NL);
+	writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
+	writel(vm->vactive, dsi->regs + DSI_VACT_NL);
+
+	horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
+
+	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+		horizontal_backporch_byte =
+			(vm->hback_porch * dsi_tmp_buf_bpp - 10);
+	else
+		horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
+			dsi_tmp_buf_bpp - 10);
+
+	horizontal_frontporch_byte = (vm->hfront_porch * dsi_tmp_buf_bpp - 12);
+
+	writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
+	writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
+	writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
+
+	dsi_ps_control(dsi);
+}
+
+static void mtk_dsi_start(struct mtk_dsi *dsi)
+{
+	writel(0, dsi->regs + DSI_START);
+	writel(1, dsi->regs + DSI_START);
+}
+
+static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
+{
+	if (WARN_ON(dsi->refcount == 0))
+		return;
+
+	if (--dsi->refcount != 0)
+		return;
+
+	dsi_lane0_ulp_mode_enter(dsi);
+	dsi_clk_ulp_mode_enter(dsi);
+
+	mtk_dsi_disable(dsi);
+
+	clk_disable_unprepare(dsi->engine_clk);
+	clk_disable_unprepare(dsi->digital_clk);
+
+	phy_power_off(dsi->phy);
+}
+
+static void mtk_output_dsi_enable(struct mtk_dsi *dsi)
+{
+	int ret;
+
+	if (dsi->enabled)
+		return;
+
+	if (dsi->panel) {
+		if (drm_panel_prepare(dsi->panel)) {
+			DRM_ERROR("failed to setup the panel\n");
+			return;
+		}
+	}
+
+	ret = mtk_dsi_poweron(dsi);
+	if (ret < 0) {
+		DRM_ERROR("failed to power on dsi\n");
+		return;
+	}
+
+	dsi_rxtx_control(dsi);
+
+	dsi_clk_ulp_mode_leave(dsi);
+	dsi_lane0_ulp_mode_leave(dsi);
+	dsi_clk_hs_mode(dsi, 0);
+	dsi_set_mode(dsi);
+
+	dsi_ps_control_vact(dsi);
+	dsi_config_vdo_timing(dsi);
+
+	dsi_set_mode(dsi);
+	dsi_clk_hs_mode(dsi, 1);
+
+	mtk_dsi_start(dsi);
+
+	dsi->enabled = true;
+}
+
+static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
+{
+	if (!dsi->enabled)
+		return;
+
+	if (dsi->panel) {
+		if (drm_panel_disable(dsi->panel)) {
+			DRM_ERROR("failed to disable the panel\n");
+			return;
+		}
+	}
+
+	mtk_dsi_poweroff(dsi);
+
+	dsi->enabled = false;
+}
+
+static void mtk_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs mtk_dsi_encoder_funcs = {
+	.destroy = mtk_dsi_encoder_destroy,
+};
+
+static bool mtk_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
+				       const struct drm_display_mode *mode,
+				       struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void mtk_dsi_encoder_mode_set(struct drm_encoder *encoder,
+				     struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted)
+{
+	struct mtk_dsi *dsi = encoder_to_dsi(encoder);
+
+	dsi->vm.pixelclock = adjusted->clock;
+	dsi->vm.hactive = adjusted->hdisplay;
+	dsi->vm.hback_porch = adjusted->htotal - adjusted->hsync_end;
+	dsi->vm.hfront_porch = adjusted->hsync_start - adjusted->hdisplay;
+	dsi->vm.hsync_len = adjusted->hsync_end - adjusted->hsync_start;
+
+	dsi->vm.vactive = adjusted->vdisplay;
+	dsi->vm.vback_porch = adjusted->vtotal - adjusted->vsync_end;
+	dsi->vm.vfront_porch = adjusted->vsync_start - adjusted->vdisplay;
+	dsi->vm.vsync_len = adjusted->vsync_end - adjusted->vsync_start;
+}
+
+static void mtk_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+	struct mtk_dsi *dsi = encoder_to_dsi(encoder);
+
+	mtk_output_dsi_disable(dsi);
+}
+
+static void mtk_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+	struct mtk_dsi *dsi = encoder_to_dsi(encoder);
+
+	mtk_output_dsi_enable(dsi);
+}
+
+static enum drm_connector_status mtk_dsi_connector_detect(
+	struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
+{
+	struct mtk_dsi *dsi = connector_to_dsi(connector);
+
+	return drm_panel_get_modes(dsi->panel);
+}
+
+static struct drm_encoder *mtk_dsi_connector_best_encoder(
+		struct drm_connector *connector)
+{
+	struct mtk_dsi *dsi = connector_to_dsi(connector);
+
+	return &dsi->encoder;
+}
+
+static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
+	.mode_fixup = mtk_dsi_encoder_mode_fixup,
+	.mode_set = mtk_dsi_encoder_mode_set,
+	.disable = mtk_dsi_encoder_disable,
+	.enable = mtk_dsi_encoder_enable,
+};
+
+static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.detect = mtk_dsi_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = drm_connector_cleanup,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs
+	mtk_dsi_connector_helper_funcs = {
+	.get_modes = mtk_dsi_connector_get_modes,
+	.best_encoder = mtk_dsi_connector_best_encoder,
+};
+
+static int mtk_drm_attach_bridge(struct drm_bridge *bridge,
+				 struct drm_encoder *encoder)
+{
+	int ret;
+
+	if (!bridge)
+		return -ENOENT;
+
+	encoder->bridge = bridge;
+	bridge->encoder = encoder;
+	ret = drm_bridge_attach(encoder->dev, bridge);
+	if (ret) {
+		DRM_ERROR("Failed to attach bridge to drm\n");
+		encoder->bridge = NULL;
+		bridge->encoder = NULL;
+	}
+
+	return ret;
+}
+
+static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi)
+{
+	int ret;
+
+	ret = drm_connector_init(drm, &dsi->conn, &mtk_dsi_connector_funcs,
+				 DRM_MODE_CONNECTOR_DSI);
+	if (ret) {
+		DRM_ERROR("Failed to connector init to drm\n");
+		return ret;
+	}
+
+	drm_connector_helper_add(&dsi->conn, &mtk_dsi_connector_helper_funcs);
+
+	dsi->conn.dpms = DRM_MODE_DPMS_OFF;
+	drm_mode_connector_attach_encoder(&dsi->conn, &dsi->encoder);
+
+	if (dsi->panel) {
+		ret = drm_panel_attach(dsi->panel, &dsi->conn);
+		if (ret) {
+			DRM_ERROR("Failed to attach panel to drm\n");
+			goto err_connector_cleanup;
+		}
+	}
+
+	return 0;
+
+err_connector_cleanup:
+	drm_connector_cleanup(&dsi->conn);
+	return ret;
+}
+
+static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
+{
+	int ret;
+
+	ret = drm_encoder_init(drm, &dsi->encoder, &mtk_dsi_encoder_funcs,
+			       DRM_MODE_ENCODER_DSI, NULL);
+	if (ret) {
+		DRM_ERROR("Failed to encoder init to drm\n");
+		return ret;
+	}
+	drm_encoder_helper_add(&dsi->encoder, &mtk_dsi_encoder_helper_funcs);
+
+	/*
+	 * Currently display data paths are statically assigned to a crtc each.
+	 * crtc 0 is OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0
+	 */
+	dsi->encoder.possible_crtcs = 1;
+
+	/* If there's a bridge, attach to it and let it create the connector */
+	ret = mtk_drm_attach_bridge(dsi->bridge, &dsi->encoder);
+	if (ret) {
+		/* Otherwise create our own connector and attach to a panel */
+		ret = mtk_dsi_create_connector(drm, dsi);
+		if (ret)
+			goto err_encoder_cleanup;
+	}
+
+	return 0;
+
+err_encoder_cleanup:
+	drm_encoder_cleanup(&dsi->encoder);
+	return ret;
+}
+
+static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
+{
+	drm_encoder_cleanup(&dsi->encoder);
+	/* Skip connector cleanup if creation was delegated to the bridge */
+	if (dsi->conn.dev)
+		drm_connector_cleanup(&dsi->conn);
+}
+
+static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
+{
+	struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
+
+	mtk_dsi_poweron(dsi);
+}
+
+static void mtk_dsi_ddp_stop(struct mtk_ddp_comp *comp)
+{
+	struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
+
+	mtk_dsi_poweroff(dsi);
+}
+
+static const struct mtk_ddp_comp_funcs mtk_dsi_funcs = {
+	.start = mtk_dsi_ddp_start,
+	.stop = mtk_dsi_ddp_stop,
+};
+
+static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
+			       struct mipi_dsi_device *device)
+{
+	struct mtk_dsi *dsi = host_to_dsi(host);
+
+	dsi->lanes = device->lanes;
+	dsi->format = device->format;
+	dsi->mode_flags = device->mode_flags;
+
+	if (dsi->conn.dev)
+		drm_helper_hpd_irq_event(dsi->conn.dev);
+
+	return 0;
+}
+
+static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
+			       struct mipi_dsi_device *device)
+{
+	struct mtk_dsi *dsi = host_to_dsi(host);
+
+	if (dsi->conn.dev)
+		drm_helper_hpd_irq_event(dsi->conn.dev);
+
+	return 0;
+}
+
+static const struct mipi_dsi_host_ops mtk_dsi_ops = {
+	.attach = mtk_dsi_host_attach,
+	.detach = mtk_dsi_host_detach,
+};
+
+static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+	int ret;
+	struct drm_device *drm = data;
+	struct mtk_dsi *dsi = dev_get_drvdata(dev);
+
+	ret = mtk_ddp_comp_register(drm, &dsi->ddp_comp);
+	if (ret < 0) {
+		dev_err(dev, "Failed to register component %s: %d\n",
+			dev->of_node->full_name, ret);
+		return ret;
+	}
+
+	ret = mipi_dsi_host_register(&dsi->host);
+	if (ret < 0) {
+		dev_err(dev, "failed to register DSI host: %d\n", ret);
+		goto err_ddp_comp_unregister;
+	}
+
+	ret = mtk_dsi_create_conn_enc(drm, dsi);
+	if (ret) {
+		DRM_ERROR("Encoder create failed with %d\n", ret);
+		goto err_unregister;
+	}
+
+	return 0;
+
+err_unregister:
+	mipi_dsi_host_unregister(&dsi->host);
+err_ddp_comp_unregister:
+	mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
+	return ret;
+}
+
+static void mtk_dsi_unbind(struct device *dev, struct device *master,
+			   void *data)
+{
+	struct drm_device *drm = data;
+	struct mtk_dsi *dsi = dev_get_drvdata(dev);
+
+	mtk_dsi_destroy_conn_enc(dsi);
+	mipi_dsi_host_unregister(&dsi->host);
+	mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
+}
+
+static const struct component_ops mtk_dsi_component_ops = {
+	.bind = mtk_dsi_bind,
+	.unbind = mtk_dsi_unbind,
+};
+
+static int mtk_dsi_probe(struct platform_device *pdev)
+{
+	struct mtk_dsi *dsi;
+	struct device *dev = &pdev->dev;
+	struct device_node *remote_node, *endpoint;
+	struct resource *regs;
+	int comp_id;
+	int ret;
+
+	dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+	if (!dsi)
+		return -ENOMEM;
+
+	dsi->host.ops = &mtk_dsi_ops;
+	dsi->host.dev = dev;
+
+	endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+	if (endpoint) {
+		remote_node = of_graph_get_remote_port_parent(endpoint);
+		if (!remote_node) {
+			dev_err(dev, "No panel connected\n");
+			return -ENODEV;
+		}
+
+		dsi->bridge = of_drm_find_bridge(remote_node);
+		dsi->panel = of_drm_find_panel(remote_node);
+		of_node_put(remote_node);
+		if (!dsi->bridge && !dsi->panel) {
+			dev_info(dev, "Waiting for bridge or panel driver\n");
+			return -EPROBE_DEFER;
+		}
+	}
+
+	dsi->engine_clk = devm_clk_get(dev, "engine");
+	if (IS_ERR(dsi->engine_clk)) {
+		ret = PTR_ERR(dsi->engine_clk);
+		dev_err(dev, "Failed to get engine clock: %d\n", ret);
+		return ret;
+	}
+
+	dsi->digital_clk = devm_clk_get(dev, "digital");
+	if (IS_ERR(dsi->digital_clk)) {
+		ret = PTR_ERR(dsi->digital_clk);
+		dev_err(dev, "Failed to get digital clock: %d\n", ret);
+		return ret;
+	}
+
+	dsi->hs_clk = devm_clk_get(dev, "hs");
+	if (IS_ERR(dsi->hs_clk)) {
+		ret = PTR_ERR(dsi->hs_clk);
+		dev_err(dev, "Failed to get hs clock: %d\n", ret);
+		return ret;
+	}
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dsi->regs = devm_ioremap_resource(dev, regs);
+	if (IS_ERR(dsi->regs)) {
+		ret = PTR_ERR(dsi->regs);
+		dev_err(dev, "Failed to ioremap memory: %d\n", ret);
+		return ret;
+	}
+
+	dsi->phy = devm_phy_get(dev, "dphy");
+	if (IS_ERR(dsi->phy)) {
+		ret = PTR_ERR(dsi->phy);
+		dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret);
+		return ret;
+	}
+
+	comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI);
+	if (comp_id < 0) {
+		dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
+		return comp_id;
+	}
+
+	ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id,
+				&mtk_dsi_funcs);
+	if (ret) {
+		dev_err(dev, "Failed to initialize component: %d\n", ret);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, dsi);
+
+	return component_add(&pdev->dev, &mtk_dsi_component_ops);
+}
+
+static int mtk_dsi_remove(struct platform_device *pdev)
+{
+	struct mtk_dsi *dsi = platform_get_drvdata(pdev);
+
+	mtk_output_dsi_disable(dsi);
+	component_del(&pdev->dev, &mtk_dsi_component_ops);
+
+	return 0;
+}
+
+static const struct of_device_id mtk_dsi_of_match[] = {
+	{ .compatible = "mediatek,mt8173-dsi" },
+	{ },
+};
+
+struct platform_driver mtk_dsi_driver = {
+	.probe = mtk_dsi_probe,
+	.remove = mtk_dsi_remove,
+	.driver = {
+		.name = "mtk-dsi",
+		.of_match_table = mtk_dsi_of_match,
+	},
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
new file mode 100644
index 0000000..cf8f38d
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+
+#define MIPITX_DSI_CON		0x00
+#define RG_DSI_LDOCORE_EN		BIT(0)
+#define RG_DSI_CKG_LDOOUT_EN		BIT(1)
+#define RG_DSI_BCLK_SEL			(3 << 2)
+#define RG_DSI_LD_IDX_SEL		(7 << 4)
+#define RG_DSI_PHYCLK_SEL		(2 << 8)
+#define RG_DSI_DSICLK_FREQ_SEL		BIT(10)
+#define RG_DSI_LPTX_CLMP_EN		BIT(11)
+
+#define MIPITX_DSI_CLOCK_LANE	0x04
+#define MIPITX_DSI_DATA_LANE0	0x08
+#define MIPITX_DSI_DATA_LANE1	0x0c
+#define MIPITX_DSI_DATA_LANE2	0x10
+#define MIPITX_DSI_DATA_LANE3	0x14
+#define RG_DSI_LNTx_LDOOUT_EN		BIT(0)
+#define RG_DSI_LNTx_CKLANE_EN		BIT(1)
+#define RG_DSI_LNTx_LPTX_IPLUS1		BIT(2)
+#define RG_DSI_LNTx_LPTX_IPLUS2		BIT(3)
+#define RG_DSI_LNTx_LPTX_IMINUS		BIT(4)
+#define RG_DSI_LNTx_LPCD_IPLUS		BIT(5)
+#define RG_DSI_LNTx_LPCD_IMINUS		BIT(6)
+#define RG_DSI_LNTx_RT_CODE		(0xf << 8)
+
+#define MIPITX_DSI_TOP_CON	0x40
+#define RG_DSI_LNT_INTR_EN		BIT(0)
+#define RG_DSI_LNT_HS_BIAS_EN		BIT(1)
+#define RG_DSI_LNT_IMP_CAL_EN		BIT(2)
+#define RG_DSI_LNT_TESTMODE_EN		BIT(3)
+#define RG_DSI_LNT_IMP_CAL_CODE		(0xf << 4)
+#define RG_DSI_LNT_AIO_SEL		(7 << 8)
+#define RG_DSI_PAD_TIE_LOW_EN		BIT(11)
+#define RG_DSI_DEBUG_INPUT_EN		BIT(12)
+#define RG_DSI_PRESERVE			(7 << 13)
+
+#define MIPITX_DSI_BG_CON	0x44
+#define RG_DSI_BG_CORE_EN		BIT(0)
+#define RG_DSI_BG_CKEN			BIT(1)
+#define RG_DSI_BG_DIV			(0x3 << 2)
+#define RG_DSI_BG_FAST_CHARGE		BIT(4)
+#define RG_DSI_VOUT_MSK			(0x3ffff << 5)
+#define RG_DSI_V12_SEL			(7 << 5)
+#define RG_DSI_V10_SEL			(7 << 8)
+#define RG_DSI_V072_SEL			(7 << 11)
+#define RG_DSI_V04_SEL			(7 << 14)
+#define RG_DSI_V032_SEL			(7 << 17)
+#define RG_DSI_V02_SEL			(7 << 20)
+#define RG_DSI_BG_R1_TRIM		(0xf << 24)
+#define RG_DSI_BG_R2_TRIM		(0xf << 28)
+
+#define MIPITX_DSI_PLL_CON0	0x50
+#define RG_DSI_MPPLL_PLL_EN		BIT(0)
+#define RG_DSI_MPPLL_DIV_MSK		(0x1ff << 1)
+#define RG_DSI_MPPLL_PREDIV		(3 << 1)
+#define RG_DSI_MPPLL_TXDIV0		(3 << 3)
+#define RG_DSI_MPPLL_TXDIV1		(3 << 5)
+#define RG_DSI_MPPLL_POSDIV		(7 << 7)
+#define RG_DSI_MPPLL_MONVC_EN		BIT(10)
+#define RG_DSI_MPPLL_MONREF_EN		BIT(11)
+#define RG_DSI_MPPLL_VOD_EN		BIT(12)
+
+#define MIPITX_DSI_PLL_CON1	0x54
+#define RG_DSI_MPPLL_SDM_FRA_EN		BIT(0)
+#define RG_DSI_MPPLL_SDM_SSC_PH_INIT	BIT(1)
+#define RG_DSI_MPPLL_SDM_SSC_EN		BIT(2)
+#define RG_DSI_MPPLL_SDM_SSC_PRD	(0xffff << 16)
+
+#define MIPITX_DSI_PLL_CON2	0x58
+
+#define MIPITX_DSI_PLL_PWR	0x68
+#define RG_DSI_MPPLL_SDM_PWR_ON		BIT(0)
+#define RG_DSI_MPPLL_SDM_ISO_EN		BIT(1)
+#define RG_DSI_MPPLL_SDM_PWR_ACK	BIT(8)
+
+#define MIPITX_DSI_SW_CTRL	0x80
+#define SW_CTRL_EN			BIT(0)
+
+#define MIPITX_DSI_SW_CTRL_CON0	0x84
+#define SW_LNTC_LPTX_PRE_OE		BIT(0)
+#define SW_LNTC_LPTX_OE			BIT(1)
+#define SW_LNTC_LPTX_P			BIT(2)
+#define SW_LNTC_LPTX_N			BIT(3)
+#define SW_LNTC_HSTX_PRE_OE		BIT(4)
+#define SW_LNTC_HSTX_OE			BIT(5)
+#define SW_LNTC_HSTX_ZEROCLK		BIT(6)
+#define SW_LNT0_LPTX_PRE_OE		BIT(7)
+#define SW_LNT0_LPTX_OE			BIT(8)
+#define SW_LNT0_LPTX_P			BIT(9)
+#define SW_LNT0_LPTX_N			BIT(10)
+#define SW_LNT0_HSTX_PRE_OE		BIT(11)
+#define SW_LNT0_HSTX_OE			BIT(12)
+#define SW_LNT0_LPRX_EN			BIT(13)
+#define SW_LNT1_LPTX_PRE_OE		BIT(14)
+#define SW_LNT1_LPTX_OE			BIT(15)
+#define SW_LNT1_LPTX_P			BIT(16)
+#define SW_LNT1_LPTX_N			BIT(17)
+#define SW_LNT1_HSTX_PRE_OE		BIT(18)
+#define SW_LNT1_HSTX_OE			BIT(19)
+#define SW_LNT2_LPTX_PRE_OE		BIT(20)
+#define SW_LNT2_LPTX_OE			BIT(21)
+#define SW_LNT2_LPTX_P			BIT(22)
+#define SW_LNT2_LPTX_N			BIT(23)
+#define SW_LNT2_HSTX_PRE_OE		BIT(24)
+#define SW_LNT2_HSTX_OE			BIT(25)
+
+struct mtk_mipi_tx {
+	struct device *dev;
+	void __iomem *regs;
+	unsigned int data_rate;
+	struct clk_hw pll_hw;
+	struct clk *pll;
+};
+
+static inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
+{
+	return container_of(hw, struct mtk_mipi_tx, pll_hw);
+}
+
+static void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+				   u32 bits)
+{
+	u32 temp = readl(mipi_tx->regs + offset);
+
+	writel(temp & ~bits, mipi_tx->regs + offset);
+}
+
+static void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+				 u32 bits)
+{
+	u32 temp = readl(mipi_tx->regs + offset);
+
+	writel(temp | bits, mipi_tx->regs + offset);
+}
+
+static void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+				    u32 mask, u32 data)
+{
+	u32 temp = readl(mipi_tx->regs + offset);
+
+	writel((temp & ~mask) | (data & mask), mipi_tx->regs + offset);
+}
+
+static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
+{
+	struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+	unsigned int txdiv, txdiv0, txdiv1;
+	u64 pcw;
+
+	dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
+
+	if (mipi_tx->data_rate >= 500000000) {
+		txdiv = 1;
+		txdiv0 = 0;
+		txdiv1 = 0;
+	} else if (mipi_tx->data_rate >= 250000000) {
+		txdiv = 2;
+		txdiv0 = 1;
+		txdiv1 = 0;
+	} else if (mipi_tx->data_rate >= 125000000) {
+		txdiv = 4;
+		txdiv0 = 2;
+		txdiv1 = 0;
+	} else if (mipi_tx->data_rate > 62000000) {
+		txdiv = 8;
+		txdiv0 = 2;
+		txdiv1 = 1;
+	} else if (mipi_tx->data_rate >= 50000000) {
+		txdiv = 16;
+		txdiv0 = 2;
+		txdiv1 = 2;
+	} else {
+		return -EINVAL;
+	}
+
+	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
+				RG_DSI_VOUT_MSK |
+				RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
+				(4 << 20) | (4 << 17) | (4 << 14) |
+				(4 << 11) | (4 << 8) | (4 << 5) |
+				RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+	usleep_range(30, 100);
+
+	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+				RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
+				(8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
+
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
+			     RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+				RG_DSI_MPPLL_SDM_PWR_ON |
+				RG_DSI_MPPLL_SDM_ISO_EN,
+				RG_DSI_MPPLL_SDM_PWR_ON);
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+			       RG_DSI_MPPLL_PLL_EN);
+
+	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+				RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
+				RG_DSI_MPPLL_PREDIV,
+				(txdiv0 << 3) | (txdiv1 << 5));
+
+	/*
+	 * PLL PCW config
+	 * PCW bit 24~30 = integer part of pcw
+	 * PCW bit 0~23 = fractional part of pcw
+	 * pcw = data_Rate*4*txdiv/(Ref_clk*2);
+	 * Post DIV =4, so need data_Rate*4
+	 * Ref_clk is 26MHz
+	 */
+	pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
+		      26000000);
+	writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
+
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+			     RG_DSI_MPPLL_SDM_FRA_EN);
+
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
+
+	usleep_range(20, 100);
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+			       RG_DSI_MPPLL_SDM_SSC_EN);
+
+	return 0;
+}
+
+static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
+{
+	struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+	dev_dbg(mipi_tx->dev, "unprepare\n");
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+			       RG_DSI_MPPLL_PLL_EN);
+
+	mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+				RG_DSI_MPPLL_SDM_ISO_EN |
+				RG_DSI_MPPLL_SDM_PWR_ON,
+				RG_DSI_MPPLL_SDM_ISO_EN);
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+			       RG_DSI_LNT_HS_BIAS_EN);
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
+			       RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
+			       RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+			       RG_DSI_MPPLL_DIV_MSK);
+}
+
+static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+				       unsigned long *prate)
+{
+	return clamp_val(rate, 50000000, 1250000000);
+}
+
+static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+				    unsigned long parent_rate)
+{
+	struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+	dev_dbg(mipi_tx->dev, "set rate: %lu Hz\n", rate);
+
+	mipi_tx->data_rate = rate;
+
+	return 0;
+}
+
+static unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
+						 unsigned long parent_rate)
+{
+	struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+	return mipi_tx->data_rate;
+}
+
+static const struct clk_ops mtk_mipi_tx_pll_ops = {
+	.prepare = mtk_mipi_tx_pll_prepare,
+	.unprepare = mtk_mipi_tx_pll_unprepare,
+	.round_rate = mtk_mipi_tx_pll_round_rate,
+	.set_rate = mtk_mipi_tx_pll_set_rate,
+	.recalc_rate = mtk_mipi_tx_pll_recalc_rate,
+};
+
+static int mtk_mipi_tx_power_on_signal(struct phy *phy)
+{
+	struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+	unsigned int reg;
+
+	for (reg = MIPITX_DSI_CLOCK_LANE;
+	     reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+		mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+
+	mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+			       RG_DSI_PAD_TIE_LOW_EN);
+
+	return 0;
+}
+
+static int mtk_mipi_tx_power_on(struct phy *phy)
+{
+	struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+	int ret;
+
+	/* Power up core and enable PLL */
+	ret = clk_prepare_enable(mipi_tx->pll);
+	if (ret < 0)
+		return ret;
+
+	/* Enable DSI Lane LDO outputs, disable pad tie low */
+	mtk_mipi_tx_power_on_signal(phy);
+
+	return 0;
+}
+
+static void mtk_mipi_tx_power_off_signal(struct phy *phy)
+{
+	struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+	unsigned int reg;
+
+	mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+			     RG_DSI_PAD_TIE_LOW_EN);
+
+	for (reg = MIPITX_DSI_CLOCK_LANE;
+	     reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+		mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+}
+
+static int mtk_mipi_tx_power_off(struct phy *phy)
+{
+	struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+
+	/* Enable pad tie low, disable DSI Lane LDO outputs */
+	mtk_mipi_tx_power_off_signal(phy);
+
+	/* Disable PLL and power down core */
+	clk_disable_unprepare(mipi_tx->pll);
+
+	return 0;
+}
+
+static const struct phy_ops mtk_mipi_tx_ops = {
+	.power_on = mtk_mipi_tx_power_on,
+	.power_off = mtk_mipi_tx_power_off,
+	.owner = THIS_MODULE,
+};
+
+static int mtk_mipi_tx_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mtk_mipi_tx *mipi_tx;
+	struct resource *mem;
+	struct clk *ref_clk;
+	const char *ref_clk_name;
+	struct clk_init_data clk_init = {
+		.ops = &mtk_mipi_tx_pll_ops,
+		.num_parents = 1,
+		.parent_names = (const char * const *)&ref_clk_name,
+		.flags = CLK_SET_RATE_GATE,
+	};
+	struct phy *phy;
+	struct phy_provider *phy_provider;
+	int ret;
+
+	mipi_tx = devm_kzalloc(dev, sizeof(*mipi_tx), GFP_KERNEL);
+	if (!mipi_tx)
+		return -ENOMEM;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	mipi_tx->regs = devm_ioremap_resource(dev, mem);
+	if (IS_ERR(mipi_tx->regs)) {
+		ret = PTR_ERR(mipi_tx->regs);
+		dev_err(dev, "Failed to get memory resource: %d\n", ret);
+		return ret;
+	}
+
+	ref_clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(ref_clk)) {
+		ret = PTR_ERR(ref_clk);
+		dev_err(dev, "Failed to get reference clock: %d\n", ret);
+		return ret;
+	}
+	ref_clk_name = __clk_get_name(ref_clk);
+
+	ret = of_property_read_string(dev->of_node, "clock-output-names",
+				      &clk_init.name);
+	if (ret < 0) {
+		dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
+		return ret;
+	}
+
+	mipi_tx->pll_hw.init = &clk_init;
+	mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw);
+	if (IS_ERR(mipi_tx->pll)) {
+		ret = PTR_ERR(mipi_tx->pll);
+		dev_err(dev, "Failed to register PLL: %d\n", ret);
+		return ret;
+	}
+
+	phy = devm_phy_create(dev, NULL, &mtk_mipi_tx_ops);
+	if (IS_ERR(phy)) {
+		ret = PTR_ERR(phy);
+		dev_err(dev, "Failed to create MIPI D-PHY: %d\n", ret);
+		return ret;
+	}
+	phy_set_drvdata(phy, mipi_tx);
+
+	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+	if (IS_ERR(phy)) {
+		ret = PTR_ERR(phy_provider);
+		return ret;
+	}
+
+	mipi_tx->dev = dev;
+
+	return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
+				   mipi_tx->pll);
+}
+
+static int mtk_mipi_tx_remove(struct platform_device *pdev)
+{
+	of_clk_del_provider(pdev->dev.of_node);
+	return 0;
+}
+
+static const struct of_device_id mtk_mipi_tx_match[] = {
+	{ .compatible = "mediatek,mt8173-mipi-tx", },
+	{},
+};
+
+struct platform_driver mtk_mipi_tx_driver = {
+	.probe = mtk_mipi_tx_probe,
+	.remove = mtk_mipi_tx_remove,
+	.driver = {
+		.name = "mediatek-mipi-tx",
+		.of_match_table = mtk_mipi_tx_match,
+	},
+};
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index a7bf6a9..2ac3fcb 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -75,7 +75,7 @@
 		return 0;
 	}
 
-	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	obj = drm_gem_object_lookup(file_priv, handle);
 	if (!obj)
 		return -ENOENT;
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index b0af774..ebb470f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -116,10 +116,8 @@
 
 static int __init mgag200_init(void)
 {
-#ifdef CONFIG_VGA_CONSOLE
 	if (vgacon_text_force() && mgag200_modeset == -1)
 		return -EINVAL;
-#endif
 
 	if (mgag200_modeset == 0)
 		return -EINVAL;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 205b280..3e02ac2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -281,7 +281,7 @@
 {
 	int ret;
 
-	ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
+	ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
 	if (ret) {
 		if (ret != -ERESTARTSYS && ret != -EBUSY)
 			DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 9147444..615cbb0 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -53,7 +53,7 @@
 	struct mga_framebuffer *mga_fb;
 	int ret;
 
-	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+	obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
 	if (obj == NULL)
 		return ERR_PTR(-ENOENT);
 
@@ -358,7 +358,7 @@
 	struct drm_gem_object *obj;
 	struct mgag200_bo *bo;
 
-	obj = drm_gem_object_lookup(dev, file, handle);
+	obj = drm_gem_object_lookup(file, handle);
 	if (obj == NULL)
 		return -ENOENT;
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 14e64e0..d347dca 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -182,7 +182,7 @@
 			}
 		}
 
-		fvv = pllreffreq * testn / testm;
+		fvv = pllreffreq * (n + 1) / (m + 1);
 		fvv = (fvv - 800000) / 50000;
 
 		if (fvv > 15)
@@ -202,6 +202,14 @@
 	WREG_DAC(MGA1064_PIX_PLLC_M, m);
 	WREG_DAC(MGA1064_PIX_PLLC_N, n);
 	WREG_DAC(MGA1064_PIX_PLLC_P, p);
+
+	if (mdev->unique_rev_id >= 0x04) {
+		WREG_DAC(0x1a, 0x09);
+		msleep(20);
+		WREG_DAC(0x1a, 0x01);
+
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 05108b5..9d5083d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -245,6 +245,8 @@
 	.verify_access = mgag200_bo_verify_access,
 	.io_mem_reserve = &mgag200_ttm_io_mem_reserve,
 	.io_mem_free = &mgag200_ttm_io_mem_free,
+	.lru_tail = &ttm_bo_default_lru_tail,
+	.swap_lru_tail = &ttm_bo_default_swap_lru_tail,
 };
 
 int mgag200_mm_init(struct mga_device *mdev)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 215495c..167a497 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -23,6 +23,13 @@
 	  that can be parsed by envytools demsm tool.  If enabled, register
 	  logging can be switched on via msm.reglog=y module param.
 
+config DRM_MSM_HDMI_HDCP
+	bool "Enable HDMI HDCP support in MSM DRM driver"
+	depends on DRM_MSM && QCOM_SCM
+	default y
+	help
+	  Choose this option to enable HDCP state machine
+
 config DRM_MSM_DSI
 	bool "Enable DSI support in MSM DRM driver"
 	depends on DRM_MSM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index ddb4c9d..60cb026 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -10,7 +10,6 @@
 	hdmi/hdmi_audio.o \
 	hdmi/hdmi_bridge.o \
 	hdmi/hdmi_connector.o \
-	hdmi/hdmi_hdcp.o \
 	hdmi/hdmi_i2c.o \
 	hdmi/hdmi_phy.o \
 	hdmi/hdmi_phy_8960.o \
@@ -40,8 +39,10 @@
 	mdp/mdp5/mdp5_plane.o \
 	mdp/mdp5/mdp5_smp.o \
 	msm_atomic.o \
+	msm_debugfs.o \
 	msm_drv.o \
 	msm_fb.o \
+	msm_fence.o \
 	msm_gem.o \
 	msm_gem_prime.o \
 	msm_gem_submit.o \
@@ -56,6 +57,8 @@
 msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
 msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
 
+msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
+
 msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
 			mdp/mdp4/mdp4_dsi_encoder.o \
 			dsi/dsi_cfg.o \
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 4951172..2aec27d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -120,8 +120,8 @@
 	/* reset ringbuffer: */
 	gpu->rb->cur = gpu->rb->start;
 
-	/* reset completed fence seqno, just discard anything pending: */
-	adreno_gpu->memptrs->fence = gpu->submitted_fence;
+	/* reset completed fence seqno: */
+	adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
 	adreno_gpu->memptrs->rptr  = 0;
 	adreno_gpu->memptrs->wptr  = 0;
 
@@ -133,7 +133,7 @@
 	}
 }
 
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 		struct msm_file_private *ctx)
 {
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -168,7 +168,7 @@
 		OUT_PKT2(ring);
 
 	OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
-	OUT_RING(ring, submit->fence);
+	OUT_RING(ring, submit->fence->seqno);
 
 	if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
 		/* Flush HLSQ lazy updates to make sure there is nothing
@@ -185,7 +185,7 @@
 	OUT_PKT3(ring, CP_EVENT_WRITE, 3);
 	OUT_RING(ring, CACHE_FLUSH_TS);
 	OUT_RING(ring, rbmemptr(adreno_gpu, fence));
-	OUT_RING(ring, submit->fence);
+	OUT_RING(ring, submit->fence->seqno);
 
 	/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
 	OUT_PKT3(ring, CP_INTERRUPT, 1);
@@ -212,8 +212,6 @@
 #endif
 
 	gpu->funcs->flush(gpu);
-
-	return 0;
 }
 
 void adreno_flush(struct msm_gpu *gpu)
@@ -254,7 +252,7 @@
 			adreno_gpu->rev.patchid);
 
 	seq_printf(m, "fence:    %d/%d\n", adreno_gpu->memptrs->fence,
-			gpu->submitted_fence);
+			gpu->fctx->last_fence);
 	seq_printf(m, "rptr:     %d\n", get_rptr(adreno_gpu));
 	seq_printf(m, "wptr:     %d\n", adreno_gpu->memptrs->wptr);
 	seq_printf(m, "rb wptr:  %d\n", get_wptr(gpu->rb));
@@ -295,7 +293,7 @@
 			adreno_gpu->rev.patchid);
 
 	printk("fence:    %d/%d\n", adreno_gpu->memptrs->fence,
-			gpu->submitted_fence);
+			gpu->fctx->last_fence);
 	printk("rptr:     %d\n", get_rptr(adreno_gpu));
 	printk("wptr:     %d\n", adreno_gpu->memptrs->wptr);
 	printk("rb wptr:  %d\n", get_wptr(gpu->rb));
@@ -410,7 +408,7 @@
 	}
 
 	adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
-	if (!adreno_gpu->memptrs) {
+	if (IS_ERR(adreno_gpu->memptrs)) {
 		dev_err(drm->dev, "could not vmap memptrs\n");
 		return -ENOMEM;
 	}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 1d07511..a54f6e0 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -238,7 +238,7 @@
 int adreno_hw_init(struct msm_gpu *gpu);
 uint32_t adreno_last_fence(struct msm_gpu *gpu);
 void adreno_recover(struct msm_gpu *gpu);
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 		struct msm_file_private *ctx);
 void adreno_flush(struct msm_gpu *gpu);
 void adreno_idle(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 749fbb2..03f115f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -41,8 +41,6 @@
 /* Regulators for DSI devices */
 struct dsi_reg_entry {
 	char name[32];
-	int min_voltage;
-	int max_voltage;
 	int enable_load;
 	int disable_load;
 };
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index e58e9b9..93c1ee0 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -22,9 +22,9 @@
 	.reg_cfg = {
 		.num = 3,
 		.regs = {
-			{"vdda", 1200000, 1200000, 100000, 100},
-			{"avdd", 3000000, 3000000, 110000, 100},
-			{"vddio", 1800000, 1800000, 100000, 100},
+			{"vdda", 100000, 100},	/* 1.2 V */
+			{"avdd", 10000, 100},	/* 3.0 V */
+			{"vddio", 100000, 100},	/* 1.8 V */
 		},
 	},
 	.bus_clk_names = dsi_v2_bus_clk_names,
@@ -40,10 +40,10 @@
 	.reg_cfg = {
 		.num = 4,
 		.regs = {
-			{"gdsc", -1, -1, -1, -1},
-			{"vdd", 3000000, 3000000, 150000, 100},
-			{"vdda", 1200000, 1200000, 100000, 100},
-			{"vddio", 1800000, 1800000, 100000, 100},
+			{"gdsc", -1, -1},
+			{"vdd", 150000, 100},	/* 3.0 V */
+			{"vdda", 100000, 100},	/* 1.2 V */
+			{"vddio", 100000, 100},	/* 1.8 V */
 		},
 	},
 	.bus_clk_names = dsi_6g_bus_clk_names,
@@ -59,9 +59,9 @@
 	.reg_cfg = {
 		.num = 3,
 		.regs = {
-			{"gdsc", -1, -1, -1, -1},
-			{"vdda", 1200000, 1200000, 100000, 100},
-			{"vddio", 1800000, 1800000, 100000, 100},
+			{"gdsc", -1, -1},
+			{"vdda", 100000, 100},	/* 1.2 V */
+			{"vddio", 100000, 100},	/* 1.8 V */
 		},
 	},
 	.bus_clk_names = dsi_8916_bus_clk_names,
@@ -73,13 +73,13 @@
 	.reg_cfg = {
 		.num = 7,
 		.regs = {
-			{"gdsc", -1, -1, -1, -1},
-			{"vdda", 1250000, 1250000, 100000, 100},
-			{"vddio", 1800000, 1800000, 100000, 100},
-			{"vcca", 1000000, 1000000, 10000, 100},
-			{"vdd", 1800000, 1800000, 100000, 100},
-			{"lab_reg", -1, -1, -1, -1},
-			{"ibb_reg", -1, -1, -1, -1},
+			{"gdsc", -1, -1},
+			{"vdda", 100000, 100},	/* 1.25 V */
+			{"vddio", 100000, 100},	/* 1.8 V */
+			{"vcca", 10000, 100},	/* 1.0 V */
+			{"vdd", 100000, 100},	/* 1.8 V */
+			{"lab_reg", -1, -1},
+			{"ibb_reg", -1, -1},
 		},
 	},
 	.bus_clk_names = dsi_6g_bus_clk_names,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4282ec6..a3e47ad8 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -325,18 +325,6 @@
 		return ret;
 	}
 
-	for (i = 0; i < num; i++) {
-		if (regulator_can_change_voltage(s[i].consumer)) {
-			ret = regulator_set_voltage(s[i].consumer,
-				regs[i].min_voltage, regs[i].max_voltage);
-			if (ret < 0) {
-				pr_err("regulator %d set voltage failed, %d\n",
-					i, ret);
-				return ret;
-			}
-		}
-	}
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 58ba7ec..c8d1f19 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -198,9 +198,13 @@
 
 static void dsi_mgr_connector_destroy(struct drm_connector *connector)
 {
+	struct dsi_connector *dsi_connector = to_dsi_connector(connector);
+
 	DBG("");
-	drm_connector_unregister(connector);
+
 	drm_connector_cleanup(connector);
+
+	kfree(dsi_connector);
 }
 
 static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
@@ -538,12 +542,9 @@
 	struct dsi_connector *dsi_connector;
 	int ret, i;
 
-	dsi_connector = devm_kzalloc(msm_dsi->dev->dev,
-				sizeof(*dsi_connector), GFP_KERNEL);
-	if (!dsi_connector) {
-		ret = -ENOMEM;
-		goto fail;
-	}
+	dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL);
+	if (!dsi_connector)
+		return ERR_PTR(-ENOMEM);
 
 	dsi_connector->id = id;
 
@@ -552,7 +553,7 @@
 	ret = drm_connector_init(msm_dsi->dev, connector,
 			&dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI);
 	if (ret)
-		goto fail;
+		return ERR_PTR(ret);
 
 	drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs);
 
@@ -565,21 +566,11 @@
 	connector->interlace_allowed = 0;
 	connector->doublescan_allowed = 0;
 
-	ret = drm_connector_register(connector);
-	if (ret)
-		goto fail;
-
 	for (i = 0; i < MSM_DSI_ENCODER_NUM; i++)
 		drm_mode_connector_attach_encoder(connector,
 						msm_dsi->encoders[i]);
 
 	return connector;
-
-fail:
-	if (connector)
-		dsi_mgr_connector_destroy(connector);
-
-	return ERR_PTR(ret);
 }
 
 /* initialize bridge */
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 91a95fb..e2f42d8 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -177,19 +177,6 @@
 		return ret;
 	}
 
-	for (i = 0; i < num; i++) {
-		if (regulator_can_change_voltage(s[i].consumer)) {
-			ret = regulator_set_voltage(s[i].consumer,
-				regs[i].min_voltage, regs[i].max_voltage);
-			if (ret < 0) {
-				dev_err(dev,
-					"regulator %d set voltage failed, %d\n",
-					i, ret);
-				return ret;
-			}
-		}
-	}
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index 2e9ba11..f4bc11a 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -138,8 +138,8 @@
 	.reg_cfg = {
 		.num = 2,
 		.regs = {
-			{"vddio", 1800000, 1800000, 100000, 100},
-			{"vcca", 1000000, 1000000, 10000, 100},
+			{"vddio", 100000, 100},	/* 1.8 V */
+			{"vcca", 10000, 100},	/* 1.0 V */
 		},
 	},
 	.ops = {
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index edf7411..96d1852 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -138,7 +138,7 @@
 	.reg_cfg = {
 		.num = 1,
 		.regs = {
-			{"vddio", 1800000, 1800000, 100000, 100},
+			{"vddio", 100000, 100},
 		},
 	},
 	.ops = {
@@ -153,7 +153,7 @@
 	.reg_cfg = {
 		.num = 1,
 		.regs = {
-			{"vddio", 1800000, 1800000, 100000, 100},
+			{"vddio", 100000, 100},	/* 1.8 V */
 		},
 	},
 	.ops = {
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 197b039..213355a 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -185,7 +185,7 @@
 	.reg_cfg = {
 		.num = 1,
 		.regs = {
-			{"vddio", 1800000, 1800000, 100000, 100},
+			{"vddio", 100000, 100},	/* 1.8 V */
 		},
 	},
 	.ops = {
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index b4d1b46..72360cd 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -37,7 +37,7 @@
 	struct edp_connector *edp_connector = to_edp_connector(connector);
 
 	DBG("");
-	drm_connector_unregister(connector);
+
 	drm_connector_cleanup(connector);
 
 	kfree(edp_connector);
@@ -124,10 +124,8 @@
 	int ret;
 
 	edp_connector = kzalloc(sizeof(*edp_connector), GFP_KERNEL);
-	if (!edp_connector) {
-		ret = -ENOMEM;
-		goto fail;
-	}
+	if (!edp_connector)
+		return ERR_PTR(-ENOMEM);
 
 	edp_connector->edp = edp;
 
@@ -136,7 +134,7 @@
 	ret = drm_connector_init(edp->dev, connector, &edp_connector_funcs,
 			DRM_MODE_CONNECTOR_eDP);
 	if (ret)
-		goto fail;
+		return ERR_PTR(ret);
 
 	drm_connector_helper_add(connector, &edp_connector_helper_funcs);
 
@@ -147,17 +145,7 @@
 	connector->interlace_allowed = false;
 	connector->doublescan_allowed = false;
 
-	ret = drm_connector_register(connector);
-	if (ret)
-		goto fail;
-
 	drm_mode_connector_attach_encoder(connector, edp->encoder);
 
 	return connector;
-
-fail:
-	if (connector)
-		edp_connector_destroy(connector);
-
-	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 81200e9..149bfe7 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -21,8 +21,6 @@
 #include "edp.h"
 #include "edp.xml.h"
 
-#define VDDA_MIN_UV		1800000	/* uV units */
-#define VDDA_MAX_UV		1800000	/* uV units */
 #define VDDA_UA_ON_LOAD		100000	/* uA units */
 #define VDDA_UA_OFF_LOAD	100	/* uA units */
 
@@ -67,7 +65,7 @@
 	void __iomem *base;
 
 	/* regulators */
-	struct regulator *vdda_vreg;
+	struct regulator *vdda_vreg;	/* 1.8 V */
 	struct regulator *lvl_vreg;
 
 	/* clocks */
@@ -302,21 +300,24 @@
 static int edp_regulator_init(struct edp_ctrl *ctrl)
 {
 	struct device *dev = &ctrl->pdev->dev;
+	int ret;
 
 	DBG("");
 	ctrl->vdda_vreg = devm_regulator_get(dev, "vdda");
-	if (IS_ERR(ctrl->vdda_vreg)) {
-		pr_err("%s: Could not get vdda reg, ret = %ld\n", __func__,
-				PTR_ERR(ctrl->vdda_vreg));
+	ret = PTR_ERR_OR_ZERO(ctrl->vdda_vreg);
+	if (ret) {
+		pr_err("%s: Could not get vdda reg, ret = %d\n", __func__,
+				ret);
 		ctrl->vdda_vreg = NULL;
-		return PTR_ERR(ctrl->vdda_vreg);
+		return ret;
 	}
 	ctrl->lvl_vreg = devm_regulator_get(dev, "lvl-vdd");
-	if (IS_ERR(ctrl->lvl_vreg)) {
-		pr_err("Could not get lvl-vdd reg, %ld",
-				PTR_ERR(ctrl->lvl_vreg));
+	ret = PTR_ERR_OR_ZERO(ctrl->lvl_vreg);
+	if (ret) {
+		pr_err("%s: Could not get lvl-vdd reg, ret = %d\n", __func__,
+				ret);
 		ctrl->lvl_vreg = NULL;
-		return PTR_ERR(ctrl->lvl_vreg);
+		return ret;
 	}
 
 	return 0;
@@ -326,12 +327,6 @@
 {
 	int ret;
 
-	ret = regulator_set_voltage(ctrl->vdda_vreg, VDDA_MIN_UV, VDDA_MAX_UV);
-	if (ret) {
-		pr_err("%s:vdda_vreg set_voltage failed, %d\n", __func__, ret);
-		goto vdda_set_fail;
-	}
-
 	ret = regulator_set_load(ctrl->vdda_vreg, VDDA_UA_ON_LOAD);
 	if (ret < 0) {
 		pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 65428cf..bc7ba0b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -243,10 +243,21 @@
 /*
  * hdcp
  */
+#ifdef CONFIG_DRM_MSM_HDMI_HDCP
 struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi);
 void msm_hdmi_hdcp_destroy(struct hdmi *hdmi);
 void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
 void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
 void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+#else
+static inline struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi)
+{
+	return ERR_PTR(-ENXIO);
+}
+static inline void msm_hdmi_hdcp_destroy(struct hdmi *hdmi) {}
+static inline void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl) {}
+static inline void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl) {}
+static inline void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl) {}
+#endif
 
 #endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 26129bf..b15d726 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -112,6 +112,9 @@
 		for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
 			struct hdmi_gpio_data gpio = config->gpios[i];
 
+			if (gpio.num == -1)
+				continue;
+
 			if (gpio.output) {
 				int value = gpio.value ? 0 : 1;
 
@@ -126,8 +129,10 @@
 
 	return 0;
 err:
-	while (i--)
-		gpio_free(config->gpios[i].num);
+	while (i--) {
+		if (config->gpios[i].num != -1)
+			gpio_free(config->gpios[i].num);
+	}
 
 	return ret;
 }
@@ -341,7 +346,6 @@
 
 	hdp_disable(hdmi_connector);
 
-	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 
 	kfree(hdmi_connector);
@@ -433,10 +437,8 @@
 	int ret;
 
 	hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
-	if (!hdmi_connector) {
-		ret = -ENOMEM;
-		goto fail;
-	}
+	if (!hdmi_connector)
+		return ERR_PTR(-ENOMEM);
 
 	hdmi_connector->hdmi = hdmi;
 	INIT_WORK(&hdmi_connector->hpd_work, msm_hdmi_hotplug_work);
@@ -453,21 +455,13 @@
 	connector->interlace_allowed = 0;
 	connector->doublescan_allowed = 0;
 
-	drm_connector_register(connector);
-
 	ret = hpd_enable(hdmi_connector);
 	if (ret) {
 		dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
-		goto fail;
+		return ERR_PTR(ret);
 	}
 
 	drm_mode_connector_attach_encoder(connector, hdmi->encoder);
 
 	return connector;
-
-fail:
-	if (connector)
-		hdmi_connector_destroy(connector);
-
-	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index e233acf..9527daf 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -121,7 +121,7 @@
 		if (!file || (event->base.file_priv == file)) {
 			mdp4_crtc->event = NULL;
 			DBG("%s: send event: %p", mdp4_crtc->name, event);
-			drm_send_vblank_event(dev, mdp4_crtc->id, event);
+			drm_crtc_send_vblank_event(crtc, event);
 		}
 	}
 	spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -427,7 +427,7 @@
 	}
 
 	if (handle) {
-		cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
+		cursor_bo = drm_gem_object_lookup(file_priv, handle);
 		if (!cursor_bo)
 			return -ENOENT;
 	} else {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 76e1dfb..67442d5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -50,30 +50,6 @@
 
 	mdp4_kms->rev = minor;
 
-	if (mdp4_kms->dsi_pll_vdda) {
-		if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) {
-			ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda,
-					1200000, 1200000);
-			if (ret) {
-				dev_err(dev->dev,
-					"failed to set dsi_pll_vdda voltage: %d\n", ret);
-				goto out;
-			}
-		}
-	}
-
-	if (mdp4_kms->dsi_pll_vddio) {
-		if (mdp4_kms->rev == 2) {
-			ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio,
-					1800000, 1800000);
-			if (ret) {
-				dev_err(dev->dev,
-					"failed to set dsi_pll_vddio voltage: %d\n", ret);
-				goto out;
-			}
-		}
-	}
-
 	if (mdp4_kms->rev > 1) {
 		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
 		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
@@ -485,16 +461,6 @@
 		goto fail;
 	}
 
-	mdp4_kms->dsi_pll_vdda =
-			devm_regulator_get_optional(&pdev->dev, "dsi_pll_vdda");
-	if (IS_ERR(mdp4_kms->dsi_pll_vdda))
-		mdp4_kms->dsi_pll_vdda = NULL;
-
-	mdp4_kms->dsi_pll_vddio =
-			devm_regulator_get_optional(&pdev->dev, "dsi_pll_vddio");
-	if (IS_ERR(mdp4_kms->dsi_pll_vddio))
-		mdp4_kms->dsi_pll_vddio = NULL;
-
 	/* NOTE: driver for this regulator still missing upstream.. use
 	 * _get_exclusive() and ignore the error if it does not exist
 	 * (and hope that the bootloader left it on for us)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index b282871..c5d045d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -37,8 +37,6 @@
 
 	void __iomem *mmio;
 
-	struct regulator *dsi_pll_vdda;
-	struct regulator *dsi_pll_vddio;
 	struct regulator *vdd;
 
 	struct clk *clk;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index e73e174..2648cd7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -48,7 +48,6 @@
 	struct mdp4_lvds_connector *mdp4_lvds_connector =
 			to_mdp4_lvds_connector(connector);
 
-	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 
 	kfree(mdp4_lvds_connector);
@@ -121,13 +120,10 @@
 {
 	struct drm_connector *connector = NULL;
 	struct mdp4_lvds_connector *mdp4_lvds_connector;
-	int ret;
 
 	mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL);
-	if (!mdp4_lvds_connector) {
-		ret = -ENOMEM;
-		goto fail;
-	}
+	if (!mdp4_lvds_connector)
+		return ERR_PTR(-ENOMEM);
 
 	mdp4_lvds_connector->encoder = encoder;
 	mdp4_lvds_connector->panel_node = panel_node;
@@ -143,15 +139,7 @@
 	connector->interlace_allowed = 0;
 	connector->doublescan_allowed = 0;
 
-	drm_connector_register(connector);
-
 	drm_mode_connector_attach_encoder(connector, encoder);
 
 	return connector;
-
-fail:
-	if (connector)
-		mdp4_lvds_connector_destroy(connector);
-
-	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 9673b95..88fe256 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -149,7 +149,7 @@
 		if (!file || (event->base.file_priv == file)) {
 			mdp5_crtc->event = NULL;
 			DBG("%s: send event: %p", mdp5_crtc->name, event);
-			drm_send_vblank_event(dev, mdp5_crtc->id, event);
+			drm_crtc_send_vblank_event(crtc, event);
 		}
 	}
 	spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -518,7 +518,7 @@
 		goto set_cursor;
 	}
 
-	cursor_bo = drm_gem_object_lookup(dev, file, handle);
+	cursor_bo = drm_gem_object_lookup(file, handle);
 	if (!cursor_bo)
 		return -ENOENT;
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
index 1c2caff..b4a8aa4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_format.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -105,6 +105,12 @@
 			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(XRGB8888, 8, 8, 8, 8,  1, 0, 2, 3,  false,  true,  4,  4,
 			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+	FMT(XBGR8888, 8, 8, 8, 8,  2, 0, 1, 3,  false,   true,  4,  4,
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+	FMT(RGBX8888, 8, 8, 8, 8,  3, 1, 0, 2,  false,   true,  4,  4,
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+	FMT(BGRX8888, 8, 8, 8, 8,  3, 2, 0, 1,  false,   true,  4,  4,
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(RGB888,   0, 8, 8, 8,  1, 0, 2, 0,  false,  true,  3,  3,
 			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(BGR888,   0, 8, 8, 8,  2, 0, 1, 0,  false,  true,  3,  3,
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 7eb253b..e3892c2 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -18,16 +18,16 @@
 #include "msm_drv.h"
 #include "msm_kms.h"
 #include "msm_gem.h"
+#include "msm_fence.h"
 
 struct msm_commit {
 	struct drm_device *dev;
 	struct drm_atomic_state *state;
-	uint32_t fence;
-	struct msm_fence_cb fence_cb;
+	struct work_struct work;
 	uint32_t crtc_mask;
 };
 
-static void fence_cb(struct msm_fence_cb *cb);
+static void commit_worker(struct work_struct *work);
 
 /* block until specified crtcs are no longer pending update, and
  * atomically mark them as pending update
@@ -69,11 +69,7 @@
 	c->dev = state->dev;
 	c->state = state;
 
-	/* TODO we might need a way to indicate to run the cb on a
-	 * different wq so wait_for_vblanks() doesn't block retiring
-	 * bo's..
-	 */
-	INIT_FENCE_CB(&c->fence_cb, fence_cb);
+	INIT_WORK(&c->work, commit_worker);
 
 	return c;
 }
@@ -114,13 +110,15 @@
 /* The (potentially) asynchronous part of the commit.  At this point
  * nothing can fail short of armageddon.
  */
-static void complete_commit(struct msm_commit *c)
+static void complete_commit(struct msm_commit *c, bool async)
 {
 	struct drm_atomic_state *state = c->state;
 	struct drm_device *dev = state->dev;
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 
+	drm_atomic_helper_wait_for_fences(dev, state);
+
 	kms->funcs->prepare_commit(kms, state);
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
@@ -153,17 +151,9 @@
 	commit_destroy(c);
 }
 
-static void fence_cb(struct msm_fence_cb *cb)
+static void commit_worker(struct work_struct *work)
 {
-	struct msm_commit *c =
-			container_of(cb, struct msm_commit, fence_cb);
-	complete_commit(c);
-}
-
-static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
-{
-	struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
-	c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
+	complete_commit(container_of(work, struct msm_commit, work), true);
 }
 
 int msm_atomic_check(struct drm_device *dev,
@@ -190,21 +180,20 @@
  * drm_atomic_helper_commit - commit validated state object
  * @dev: DRM device
  * @state: the driver state object
- * @async: asynchronous commit
+ * @nonblock: nonblocking commit
  *
  * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement asynchronous commits.
+ * object. This can still fail when e.g. the framebuffer reservation fails.
  *
  * RETURNS
  * Zero for success or -errno.
  */
 int msm_atomic_commit(struct drm_device *dev,
-		struct drm_atomic_state *state, bool async)
+		struct drm_atomic_state *state, bool nonblock)
 {
+	struct msm_drm_private *priv = dev->dev_private;
 	int nplanes = dev->mode_config.num_total_plane;
 	int ncrtcs = dev->mode_config.num_crtc;
-	ktime_t timeout;
 	struct msm_commit *c;
 	int i, ret;
 
@@ -238,8 +227,12 @@
 		if (!plane)
 			continue;
 
-		if ((plane->state->fb != new_state->fb) && new_state->fb)
-			add_fb(c, new_state->fb);
+		if ((plane->state->fb != new_state->fb) && new_state->fb) {
+			struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0);
+			struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+			new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
+		}
 	}
 
 	/*
@@ -276,17 +269,12 @@
 	 * current layout.
 	 */
 
-	if (async) {
-		msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
+	if (nonblock) {
+		queue_work(priv->atomic_wq, &c->work);
 		return 0;
 	}
 
-	timeout = ktime_add_ms(ktime_get(), 1000);
-
-	/* uninterruptible wait */
-	msm_wait_fence(dev, c->fence, &timeout, false);
-
-	complete_commit(c);
+	complete_commit(c, false);
 
 	return 0;
 
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
new file mode 100644
index 0000000..663f2b6
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2013-2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+#include "msm_drv.h"
+#include "msm_gpu.h"
+
+static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_gpu *gpu = priv->gpu;
+
+	if (gpu) {
+		seq_printf(m, "%s Status:\n", gpu->name);
+		gpu->funcs->show(gpu, m);
+	}
+
+	return 0;
+}
+
+static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_gpu *gpu = priv->gpu;
+
+	if (gpu) {
+		seq_printf(m, "Active Objects (%s):\n", gpu->name);
+		msm_gem_describe_objects(&gpu->active_list, m);
+	}
+
+	seq_printf(m, "Inactive Objects:\n");
+	msm_gem_describe_objects(&priv->inactive_list, m);
+
+	return 0;
+}
+
+static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
+{
+	return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
+}
+
+static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_framebuffer *fb, *fbdev_fb = NULL;
+
+	if (priv->fbdev) {
+		seq_printf(m, "fbcon ");
+		fbdev_fb = priv->fbdev->fb;
+		msm_framebuffer_describe(fbdev_fb, m);
+	}
+
+	mutex_lock(&dev->mode_config.fb_lock);
+	list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+		if (fb == fbdev_fb)
+			continue;
+
+		seq_printf(m, "user ");
+		msm_framebuffer_describe(fb, m);
+	}
+	mutex_unlock(&dev->mode_config.fb_lock);
+
+	return 0;
+}
+
+static int show_locked(struct seq_file *m, void *arg)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	int (*show)(struct drm_device *dev, struct seq_file *m) =
+			node->info_ent->data;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	ret = show(dev, m);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static struct drm_info_list msm_debugfs_list[] = {
+		{"gpu", show_locked, 0, msm_gpu_show},
+		{"gem", show_locked, 0, msm_gem_show},
+		{ "mm", show_locked, 0, msm_mm_show },
+		{ "fb", show_locked, 0, msm_fb_show },
+};
+
+static int late_init_minor(struct drm_minor *minor)
+{
+	int ret;
+
+	if (!minor)
+		return 0;
+
+	ret = msm_rd_debugfs_init(minor);
+	if (ret) {
+		dev_err(minor->dev->dev, "could not install rd debugfs\n");
+		return ret;
+	}
+
+	ret = msm_perf_debugfs_init(minor);
+	if (ret) {
+		dev_err(minor->dev->dev, "could not install perf debugfs\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+int msm_debugfs_late_init(struct drm_device *dev)
+{
+	int ret;
+	ret = late_init_minor(dev->primary);
+	if (ret)
+		return ret;
+	ret = late_init_minor(dev->render);
+	if (ret)
+		return ret;
+	ret = late_init_minor(dev->control);
+	return ret;
+}
+
+int msm_debugfs_init(struct drm_minor *minor)
+{
+	struct drm_device *dev = minor->dev;
+	int ret;
+
+	ret = drm_debugfs_create_files(msm_debugfs_list,
+			ARRAY_SIZE(msm_debugfs_list),
+			minor->debugfs_root, minor);
+
+	if (ret) {
+		dev_err(dev->dev, "could not install msm_debugfs_list\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+void msm_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(msm_debugfs_list,
+			ARRAY_SIZE(msm_debugfs_list), minor);
+	if (!minor->dev->dev_private)
+		return;
+	msm_rd_debugfs_cleanup(minor);
+	msm_perf_debugfs_cleanup(minor);
+}
+#endif
+
diff --git a/drivers/gpu/drm/msm/msm_debugfs.h b/drivers/gpu/drm/msm/msm_debugfs.h
new file mode 100644
index 0000000..6110c97
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_debugfs.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_DEBUGFS_H__
+#define __MSM_DEBUGFS_H__
+
+#ifdef CONFIG_DEBUG_FS
+int msm_debugfs_init(struct drm_minor *minor);
+void msm_debugfs_cleanup(struct drm_minor *minor);
+#endif
+
+#endif /* __MSM_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c03b967..9c65409 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -16,6 +16,8 @@
  */
 
 #include "msm_drv.h"
+#include "msm_debugfs.h"
+#include "msm_fence.h"
 #include "msm_gpu.h"
 #include "msm_kms.h"
 
@@ -173,13 +175,11 @@
 	return 0;
 }
 
-/*
- * DRM operations:
- */
-
-static int msm_unload(struct drm_device *dev)
+static int msm_drm_uninit(struct device *dev)
 {
-	struct msm_drm_private *priv = dev->dev_private;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct drm_device *ddev = platform_get_drvdata(pdev);
+	struct msm_drm_private *priv = ddev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	struct msm_gpu *gpu = priv->gpu;
 	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
@@ -195,31 +195,37 @@
 		kfree(vbl_ev);
 	}
 
-	drm_kms_helper_poll_fini(dev);
+	drm_kms_helper_poll_fini(ddev);
+
+	drm_connector_unregister_all(ddev);
+
+	drm_dev_unregister(ddev);
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
 	if (fbdev && priv->fbdev)
-		msm_fbdev_free(dev);
+		msm_fbdev_free(ddev);
 #endif
-	drm_mode_config_cleanup(dev);
-	drm_vblank_cleanup(dev);
+	drm_mode_config_cleanup(ddev);
 
-	pm_runtime_get_sync(dev->dev);
-	drm_irq_uninstall(dev);
-	pm_runtime_put_sync(dev->dev);
+	pm_runtime_get_sync(dev);
+	drm_irq_uninstall(ddev);
+	pm_runtime_put_sync(dev);
 
 	flush_workqueue(priv->wq);
 	destroy_workqueue(priv->wq);
 
+	flush_workqueue(priv->atomic_wq);
+	destroy_workqueue(priv->atomic_wq);
+
 	if (kms) {
-		pm_runtime_disable(dev->dev);
+		pm_runtime_disable(dev);
 		kms->funcs->destroy(kms);
 	}
 
 	if (gpu) {
-		mutex_lock(&dev->struct_mutex);
+		mutex_lock(&ddev->struct_mutex);
 		gpu->funcs->pm_suspend(gpu);
-		mutex_unlock(&dev->struct_mutex);
+		mutex_unlock(&ddev->struct_mutex);
 		gpu->funcs->destroy(gpu);
 	}
 
@@ -227,13 +233,14 @@
 		DEFINE_DMA_ATTRS(attrs);
 		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
 		drm_mm_takedown(&priv->vram.mm);
-		dma_free_attrs(dev->dev, priv->vram.size, NULL,
-				priv->vram.paddr, &attrs);
+		dma_free_attrs(dev, priv->vram.size, NULL,
+			       priv->vram.paddr, &attrs);
 	}
 
-	component_unbind_all(dev->dev, dev);
+	component_unbind_all(dev, ddev);
 
-	dev->dev_private = NULL;
+	ddev->dev_private = NULL;
+	drm_dev_unref(ddev);
 
 	kfree(priv);
 
@@ -321,50 +328,60 @@
 	return ret;
 }
 
-static int msm_load(struct drm_device *dev, unsigned long flags)
+static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 {
-	struct platform_device *pdev = dev->platformdev;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct drm_device *ddev;
 	struct msm_drm_private *priv;
 	struct msm_kms *kms;
 	int ret;
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv) {
-		dev_err(dev->dev, "failed to allocate private data\n");
+	ddev = drm_dev_alloc(drv, dev);
+	if (!ddev) {
+		dev_err(dev, "failed to allocate drm_device\n");
 		return -ENOMEM;
 	}
 
-	dev->dev_private = priv;
+	platform_set_drvdata(pdev, ddev);
+	ddev->platformdev = pdev;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		drm_dev_unref(ddev);
+		return -ENOMEM;
+	}
+
+	ddev->dev_private = priv;
 
 	priv->wq = alloc_ordered_workqueue("msm", 0);
-	init_waitqueue_head(&priv->fence_event);
+	priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
 	init_waitqueue_head(&priv->pending_crtcs_event);
 
 	INIT_LIST_HEAD(&priv->inactive_list);
-	INIT_LIST_HEAD(&priv->fence_cbs);
 	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
 	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
 	spin_lock_init(&priv->vblank_ctrl.lock);
 
-	drm_mode_config_init(dev);
-
-	platform_set_drvdata(pdev, dev);
+	drm_mode_config_init(ddev);
 
 	/* Bind all our sub-components: */
-	ret = component_bind_all(dev->dev, dev);
-	if (ret)
+	ret = component_bind_all(dev, ddev);
+	if (ret) {
+		kfree(priv);
+		drm_dev_unref(ddev);
 		return ret;
+	}
 
-	ret = msm_init_vram(dev);
+	ret = msm_init_vram(ddev);
 	if (ret)
 		goto fail;
 
 	switch (get_mdp_ver(pdev)) {
 	case 4:
-		kms = mdp4_kms_init(dev);
+		kms = mdp4_kms_init(ddev);
 		break;
 	case 5:
-		kms = mdp5_kms_init(dev);
+		kms = mdp5_kms_init(ddev);
 		break;
 	default:
 		kms = ERR_PTR(-ENODEV);
@@ -378,7 +395,7 @@
 		 * and (for example) use dmabuf/prime to share buffers with
 		 * imx drm driver on iMX5
 		 */
-		dev_err(dev->dev, "failed to load kms\n");
+		dev_err(dev, "failed to load kms\n");
 		ret = PTR_ERR(kms);
 		goto fail;
 	}
@@ -386,50 +403,64 @@
 	priv->kms = kms;
 
 	if (kms) {
-		pm_runtime_enable(dev->dev);
+		pm_runtime_enable(dev);
 		ret = kms->funcs->hw_init(kms);
 		if (ret) {
-			dev_err(dev->dev, "kms hw init failed: %d\n", ret);
+			dev_err(dev, "kms hw init failed: %d\n", ret);
 			goto fail;
 		}
 	}
 
-	dev->mode_config.funcs = &mode_config_funcs;
+	ddev->mode_config.funcs = &mode_config_funcs;
 
-	ret = drm_vblank_init(dev, priv->num_crtcs);
+	ret = drm_vblank_init(ddev, priv->num_crtcs);
 	if (ret < 0) {
-		dev_err(dev->dev, "failed to initialize vblank\n");
+		dev_err(dev, "failed to initialize vblank\n");
 		goto fail;
 	}
 
-	pm_runtime_get_sync(dev->dev);
-	ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
-	pm_runtime_put_sync(dev->dev);
+	pm_runtime_get_sync(dev);
+	ret = drm_irq_install(ddev, platform_get_irq(pdev, 0));
+	pm_runtime_put_sync(dev);
 	if (ret < 0) {
-		dev_err(dev->dev, "failed to install IRQ handler\n");
+		dev_err(dev, "failed to install IRQ handler\n");
 		goto fail;
 	}
 
-	drm_mode_config_reset(dev);
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-	if (fbdev)
-		priv->fbdev = msm_fbdev_init(dev);
-#endif
-
-	ret = msm_debugfs_late_init(dev);
+	ret = drm_dev_register(ddev, 0);
 	if (ret)
 		goto fail;
 
-	drm_kms_helper_poll_init(dev);
+	ret = drm_connector_register_all(ddev);
+	if (ret) {
+		dev_err(dev, "failed to register connectors\n");
+		goto fail;
+	}
+
+	drm_mode_config_reset(ddev);
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+	if (fbdev)
+		priv->fbdev = msm_fbdev_init(ddev);
+#endif
+
+	ret = msm_debugfs_late_init(ddev);
+	if (ret)
+		goto fail;
+
+	drm_kms_helper_poll_init(ddev);
 
 	return 0;
 
 fail:
-	msm_unload(dev);
+	msm_drm_uninit(dev);
 	return ret;
 }
 
+/*
+ * DRM operations:
+ */
+
 static void load_gpu(struct drm_device *dev)
 {
 	static DEFINE_MUTEX(init_lock);
@@ -465,7 +496,6 @@
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_file_private *ctx = file->driver_priv;
-	struct msm_kms *kms = priv->kms;
 
 	mutex_lock(&dev->struct_mutex);
 	if (ctx == priv->lastctx)
@@ -536,265 +566,6 @@
 }
 
 /*
- * DRM debugfs:
- */
-
-#ifdef CONFIG_DEBUG_FS
-static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_gpu *gpu = priv->gpu;
-
-	if (gpu) {
-		seq_printf(m, "%s Status:\n", gpu->name);
-		gpu->funcs->show(gpu, m);
-	}
-
-	return 0;
-}
-
-static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_gpu *gpu = priv->gpu;
-
-	if (gpu) {
-		seq_printf(m, "Active Objects (%s):\n", gpu->name);
-		msm_gem_describe_objects(&gpu->active_list, m);
-	}
-
-	seq_printf(m, "Inactive Objects:\n");
-	msm_gem_describe_objects(&priv->inactive_list, m);
-
-	return 0;
-}
-
-static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
-{
-	return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
-}
-
-static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	struct drm_framebuffer *fb, *fbdev_fb = NULL;
-
-	if (priv->fbdev) {
-		seq_printf(m, "fbcon ");
-		fbdev_fb = priv->fbdev->fb;
-		msm_framebuffer_describe(fbdev_fb, m);
-	}
-
-	mutex_lock(&dev->mode_config.fb_lock);
-	list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
-		if (fb == fbdev_fb)
-			continue;
-
-		seq_printf(m, "user ");
-		msm_framebuffer_describe(fb, m);
-	}
-	mutex_unlock(&dev->mode_config.fb_lock);
-
-	return 0;
-}
-
-static int show_locked(struct seq_file *m, void *arg)
-{
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
-	int (*show)(struct drm_device *dev, struct seq_file *m) =
-			node->info_ent->data;
-	int ret;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	ret = show(dev, m);
-
-	mutex_unlock(&dev->struct_mutex);
-
-	return ret;
-}
-
-static struct drm_info_list msm_debugfs_list[] = {
-		{"gpu", show_locked, 0, msm_gpu_show},
-		{"gem", show_locked, 0, msm_gem_show},
-		{ "mm", show_locked, 0, msm_mm_show },
-		{ "fb", show_locked, 0, msm_fb_show },
-};
-
-static int late_init_minor(struct drm_minor *minor)
-{
-	int ret;
-
-	if (!minor)
-		return 0;
-
-	ret = msm_rd_debugfs_init(minor);
-	if (ret) {
-		dev_err(minor->dev->dev, "could not install rd debugfs\n");
-		return ret;
-	}
-
-	ret = msm_perf_debugfs_init(minor);
-	if (ret) {
-		dev_err(minor->dev->dev, "could not install perf debugfs\n");
-		return ret;
-	}
-
-	return 0;
-}
-
-int msm_debugfs_late_init(struct drm_device *dev)
-{
-	int ret;
-	ret = late_init_minor(dev->primary);
-	if (ret)
-		return ret;
-	ret = late_init_minor(dev->render);
-	if (ret)
-		return ret;
-	ret = late_init_minor(dev->control);
-	return ret;
-}
-
-static int msm_debugfs_init(struct drm_minor *minor)
-{
-	struct drm_device *dev = minor->dev;
-	int ret;
-
-	ret = drm_debugfs_create_files(msm_debugfs_list,
-			ARRAY_SIZE(msm_debugfs_list),
-			minor->debugfs_root, minor);
-
-	if (ret) {
-		dev_err(dev->dev, "could not install msm_debugfs_list\n");
-		return ret;
-	}
-
-	return 0;
-}
-
-static void msm_debugfs_cleanup(struct drm_minor *minor)
-{
-	drm_debugfs_remove_files(msm_debugfs_list,
-			ARRAY_SIZE(msm_debugfs_list), minor);
-	if (!minor->dev->dev_private)
-		return;
-	msm_rd_debugfs_cleanup(minor);
-	msm_perf_debugfs_cleanup(minor);
-}
-#endif
-
-/*
- * Fences:
- */
-
-int msm_wait_fence(struct drm_device *dev, uint32_t fence,
-		ktime_t *timeout , bool interruptible)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	int ret;
-
-	if (!priv->gpu)
-		return 0;
-
-	if (fence > priv->gpu->submitted_fence) {
-		DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
-				fence, priv->gpu->submitted_fence);
-		return -EINVAL;
-	}
-
-	if (!timeout) {
-		/* no-wait: */
-		ret = fence_completed(dev, fence) ? 0 : -EBUSY;
-	} else {
-		ktime_t now = ktime_get();
-		unsigned long remaining_jiffies;
-
-		if (ktime_compare(*timeout, now) < 0) {
-			remaining_jiffies = 0;
-		} else {
-			ktime_t rem = ktime_sub(*timeout, now);
-			struct timespec ts = ktime_to_timespec(rem);
-			remaining_jiffies = timespec_to_jiffies(&ts);
-		}
-
-		if (interruptible)
-			ret = wait_event_interruptible_timeout(priv->fence_event,
-				fence_completed(dev, fence),
-				remaining_jiffies);
-		else
-			ret = wait_event_timeout(priv->fence_event,
-				fence_completed(dev, fence),
-				remaining_jiffies);
-
-		if (ret == 0) {
-			DBG("timeout waiting for fence: %u (completed: %u)",
-					fence, priv->completed_fence);
-			ret = -ETIMEDOUT;
-		} else if (ret != -ERESTARTSYS) {
-			ret = 0;
-		}
-	}
-
-	return ret;
-}
-
-int msm_queue_fence_cb(struct drm_device *dev,
-		struct msm_fence_cb *cb, uint32_t fence)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	int ret = 0;
-
-	mutex_lock(&dev->struct_mutex);
-	if (!list_empty(&cb->work.entry)) {
-		ret = -EINVAL;
-	} else if (fence > priv->completed_fence) {
-		cb->fence = fence;
-		list_add_tail(&cb->work.entry, &priv->fence_cbs);
-	} else {
-		queue_work(priv->wq, &cb->work);
-	}
-	mutex_unlock(&dev->struct_mutex);
-
-	return ret;
-}
-
-/* called from workqueue */
-void msm_update_fence(struct drm_device *dev, uint32_t fence)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-
-	mutex_lock(&dev->struct_mutex);
-	priv->completed_fence = max(fence, priv->completed_fence);
-
-	while (!list_empty(&priv->fence_cbs)) {
-		struct msm_fence_cb *cb;
-
-		cb = list_first_entry(&priv->fence_cbs,
-				struct msm_fence_cb, work.entry);
-
-		if (cb->fence > priv->completed_fence)
-			break;
-
-		list_del_init(&cb->work.entry);
-		queue_work(priv->wq, &cb->work);
-	}
-
-	mutex_unlock(&dev->struct_mutex);
-
-	wake_up_all(&priv->fence_event);
-}
-
-void __msm_fence_worker(struct work_struct *work)
-{
-	struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
-	cb->func(cb);
-}
-
-/*
  * DRM ioctls:
  */
 
@@ -851,7 +622,7 @@
 		return -EINVAL;
 	}
 
-	obj = drm_gem_object_lookup(dev, file, args->handle);
+	obj = drm_gem_object_lookup(file, args->handle);
 	if (!obj)
 		return -ENOENT;
 
@@ -869,7 +640,7 @@
 	struct drm_gem_object *obj;
 	int ret;
 
-	obj = drm_gem_object_lookup(dev, file, args->handle);
+	obj = drm_gem_object_lookup(file, args->handle);
 	if (!obj)
 		return -ENOENT;
 
@@ -890,7 +661,7 @@
 	if (args->pad)
 		return -EINVAL;
 
-	obj = drm_gem_object_lookup(dev, file, args->handle);
+	obj = drm_gem_object_lookup(file, args->handle);
 	if (!obj)
 		return -ENOENT;
 
@@ -904,6 +675,7 @@
 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
 		struct drm_file *file)
 {
+	struct msm_drm_private *priv = dev->dev_private;
 	struct drm_msm_wait_fence *args = data;
 	ktime_t timeout = to_ktime(args->timeout);
 
@@ -912,7 +684,10 @@
 		return -EINVAL;
 	}
 
-	return msm_wait_fence(dev, args->fence, &timeout, true);
+	if (!priv->gpu)
+		return 0;
+
+	return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
 }
 
 static const struct drm_ioctl_desc msm_ioctls[] = {
@@ -952,8 +727,6 @@
 				DRIVER_RENDER |
 				DRIVER_ATOMIC |
 				DRIVER_MODESET,
-	.load               = msm_load,
-	.unload             = msm_unload,
 	.open               = msm_open,
 	.preclose           = msm_preclose,
 	.lastclose          = msm_lastclose,
@@ -1053,12 +826,12 @@
 
 static int msm_drm_bind(struct device *dev)
 {
-	return drm_platform_init(&msm_driver, to_platform_device(dev));
+	return msm_drm_init(dev, &msm_driver);
 }
 
 static void msm_drm_unbind(struct device *dev)
 {
-	drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+	msm_drm_uninit(dev);
 }
 
 static const struct component_master_ops msm_drm_ops = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 870dbe5..5b2963f 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -49,6 +49,8 @@
 struct msm_rd_state;
 struct msm_perf_state;
 struct msm_gem_submit;
+struct msm_fence_context;
+struct msm_fence_cb;
 
 #define NUM_DOMAINS 2    /* one for KMS, then one per gpu core (?) */
 
@@ -100,9 +102,6 @@
 
 	struct drm_fb_helper *fbdev;
 
-	uint32_t next_fence, completed_fence;
-	wait_queue_head_t fence_event;
-
 	struct msm_rd_state *rd;
 	struct msm_perf_state *perf;
 
@@ -110,9 +109,7 @@
 	struct list_head inactive_list;
 
 	struct workqueue_struct *wq;
-
-	/* callbacks deferred until bo is inactive: */
-	struct list_head fence_cbs;
+	struct workqueue_struct *atomic_wq;
 
 	/* crtcs pending async atomic updates: */
 	uint32_t pending_crtcs;
@@ -157,33 +154,14 @@
 	uint32_t pixel_format;
 };
 
-/* callback from wq once fence has passed: */
-struct msm_fence_cb {
-	struct work_struct work;
-	uint32_t fence;
-	void (*func)(struct msm_fence_cb *cb);
-};
-
-void __msm_fence_worker(struct work_struct *work);
-
-#define INIT_FENCE_CB(_cb, _func)  do {                     \
-		INIT_WORK(&(_cb)->work, __msm_fence_worker); \
-		(_cb)->func = _func;                         \
-	} while (0)
-
 int msm_atomic_check(struct drm_device *dev,
 		     struct drm_atomic_state *state);
 int msm_atomic_commit(struct drm_device *dev,
-		struct drm_atomic_state *state, bool async);
+		struct drm_atomic_state *state, bool nonblock);
 
 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
 
-int msm_wait_fence(struct drm_device *dev, uint32_t fence,
-		ktime_t *timeout, bool interruptible);
-int msm_queue_fence_cb(struct drm_device *dev,
-		struct msm_fence_cb *cb, uint32_t fence);
-void msm_update_fence(struct drm_device *dev, uint32_t fence);
-
+void msm_gem_submit_free(struct msm_gem_submit *submit);
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 		struct drm_file *file);
 
@@ -213,13 +191,12 @@
 void msm_gem_prime_unpin(struct drm_gem_object *obj);
 void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
 void *msm_gem_vaddr(struct drm_gem_object *obj);
-int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
-		struct msm_fence_cb *cb);
+int msm_gem_sync_object(struct drm_gem_object *obj,
+		struct msm_fence_context *fctx, bool exclusive);
 void msm_gem_move_to_active(struct drm_gem_object *obj,
-		struct msm_gpu *gpu, bool write, uint32_t fence);
+		struct msm_gpu *gpu, bool exclusive, struct fence *fence);
 void msm_gem_move_to_inactive(struct drm_gem_object *obj);
-int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
-		ktime_t *timeout);
+int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
 int msm_gem_cpu_fini(struct drm_gem_object *obj);
 void msm_gem_free_object(struct drm_gem_object *obj);
 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
@@ -227,7 +204,7 @@
 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 		uint32_t size, uint32_t flags);
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
-		uint32_t size, struct sg_table *sgt);
+		struct dma_buf *dmabuf, struct sg_table *sgt);
 
 int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
 void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
@@ -303,12 +280,6 @@
 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 
-static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	return priv->completed_fence >= fence;
-}
-
 static inline int align_pitch(int width, int bpp)
 {
 	int bytespp = (bpp + 7) / 8;
@@ -327,5 +298,20 @@
 /* for conditionally setting boolean flag(s): */
 #define COND(bool, val) ((bool) ? (val) : 0)
 
+static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
+{
+	ktime_t now = ktime_get();
+	unsigned long remaining_jiffies;
+
+	if (ktime_compare(*timeout, now) < 0) {
+		remaining_jiffies = 0;
+	} else {
+		ktime_t rem = ktime_sub(*timeout, now);
+		struct timespec ts = ktime_to_timespec(rem);
+		remaining_jiffies = timespec_to_jiffies(&ts);
+	}
+
+	return remaining_jiffies;
+}
 
 #endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index a474d6c..461dc8b 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -77,7 +77,7 @@
 
 	seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
 			fb->width, fb->height, (char *)&fb->pixel_format,
-			fb->refcount.refcount.counter, fb->base.id);
+			drm_framebuffer_read_refcount(fb), fb->base.id);
 
 	for (i = 0; i < n; i++) {
 		seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
@@ -145,8 +145,7 @@
 	int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
 
 	for (i = 0; i < n; i++) {
-		bos[i] = drm_gem_object_lookup(dev, file,
-				mode_cmd->handles[i]);
+		bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
 		if (!bos[i]) {
 			ret = -ENXIO;
 			goto out_unref;
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index d9759bf..c6cf837 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -159,6 +159,10 @@
 	dev->mode_config.fb_base = paddr;
 
 	fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+	if (IS_ERR(fbi->screen_base)) {
+		ret = PTR_ERR(fbi->screen_base);
+		goto fail_unlock;
+	}
 	fbi->screen_size = fbdev->bo->size;
 	fbi->fix.smem_start = paddr;
 	fbi->fix.smem_len = fbdev->bo->size;
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
new file mode 100644
index 0000000..a9b9b1c
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2013-2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fence.h>
+
+#include "msm_drv.h"
+#include "msm_fence.h"
+
+
+struct msm_fence_context *
+msm_fence_context_alloc(struct drm_device *dev, const char *name)
+{
+	struct msm_fence_context *fctx;
+
+	fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
+	if (!fctx)
+		return ERR_PTR(-ENOMEM);
+
+	fctx->dev = dev;
+	fctx->name = name;
+	fctx->context = fence_context_alloc(1);
+	init_waitqueue_head(&fctx->event);
+	spin_lock_init(&fctx->spinlock);
+
+	return fctx;
+}
+
+void msm_fence_context_free(struct msm_fence_context *fctx)
+{
+	kfree(fctx);
+}
+
+static inline bool fence_completed(struct msm_fence_context *fctx, uint32_t fence)
+{
+	return (int32_t)(fctx->completed_fence - fence) >= 0;
+}
+
+/* legacy path for WAIT_FENCE ioctl: */
+int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
+		ktime_t *timeout, bool interruptible)
+{
+	int ret;
+
+	if (fence > fctx->last_fence) {
+		DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n",
+				fctx->name, fence, fctx->last_fence);
+		return -EINVAL;
+	}
+
+	if (!timeout) {
+		/* no-wait: */
+		ret = fence_completed(fctx, fence) ? 0 : -EBUSY;
+	} else {
+		unsigned long remaining_jiffies = timeout_to_jiffies(timeout);
+
+		if (interruptible)
+			ret = wait_event_interruptible_timeout(fctx->event,
+				fence_completed(fctx, fence),
+				remaining_jiffies);
+		else
+			ret = wait_event_timeout(fctx->event,
+				fence_completed(fctx, fence),
+				remaining_jiffies);
+
+		if (ret == 0) {
+			DBG("timeout waiting for fence: %u (completed: %u)",
+					fence, fctx->completed_fence);
+			ret = -ETIMEDOUT;
+		} else if (ret != -ERESTARTSYS) {
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
+/* called from workqueue */
+void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
+{
+	spin_lock(&fctx->spinlock);
+	fctx->completed_fence = max(fence, fctx->completed_fence);
+	spin_unlock(&fctx->spinlock);
+
+	wake_up_all(&fctx->event);
+}
+
+struct msm_fence {
+	struct msm_fence_context *fctx;
+	struct fence base;
+};
+
+static inline struct msm_fence *to_msm_fence(struct fence *fence)
+{
+	return container_of(fence, struct msm_fence, base);
+}
+
+static const char *msm_fence_get_driver_name(struct fence *fence)
+{
+	return "msm";
+}
+
+static const char *msm_fence_get_timeline_name(struct fence *fence)
+{
+	struct msm_fence *f = to_msm_fence(fence);
+	return f->fctx->name;
+}
+
+static bool msm_fence_enable_signaling(struct fence *fence)
+{
+	return true;
+}
+
+static bool msm_fence_signaled(struct fence *fence)
+{
+	struct msm_fence *f = to_msm_fence(fence);
+	return fence_completed(f->fctx, f->base.seqno);
+}
+
+static void msm_fence_release(struct fence *fence)
+{
+	struct msm_fence *f = to_msm_fence(fence);
+	kfree_rcu(f, base.rcu);
+}
+
+static const struct fence_ops msm_fence_ops = {
+	.get_driver_name = msm_fence_get_driver_name,
+	.get_timeline_name = msm_fence_get_timeline_name,
+	.enable_signaling = msm_fence_enable_signaling,
+	.signaled = msm_fence_signaled,
+	.wait = fence_default_wait,
+	.release = msm_fence_release,
+};
+
+struct fence *
+msm_fence_alloc(struct msm_fence_context *fctx)
+{
+	struct msm_fence *f;
+
+	f = kzalloc(sizeof(*f), GFP_KERNEL);
+	if (!f)
+		return ERR_PTR(-ENOMEM);
+
+	f->fctx = fctx;
+
+	fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
+			fctx->context, ++fctx->last_fence);
+
+	return &f->base;
+}
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
new file mode 100644
index 0000000..ceb5b3d
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2013-2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_FENCE_H__
+#define __MSM_FENCE_H__
+
+#include "msm_drv.h"
+
+struct msm_fence_context {
+	struct drm_device *dev;
+	const char *name;
+	unsigned context;
+	/* last_fence == completed_fence --> no pending work */
+	uint32_t last_fence;          /* last assigned fence */
+	uint32_t completed_fence;     /* last completed fence */
+	wait_queue_head_t event;
+	spinlock_t spinlock;
+};
+
+struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev,
+		const char *name);
+void msm_fence_context_free(struct msm_fence_context *fctx);
+
+int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
+		ktime_t *timeout, bool interruptible);
+int msm_queue_fence_cb(struct msm_fence_context *fctx,
+		struct msm_fence_cb *cb, uint32_t fence);
+void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
+
+struct fence * msm_fence_alloc(struct msm_fence_context *fctx);
+
+#endif
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 3cedb8d..69836f5 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -21,6 +21,7 @@
 #include <linux/pfn_t.h>
 
 #include "msm_drv.h"
+#include "msm_fence.h"
 #include "msm_gem.h"
 #include "msm_gpu.h"
 #include "msm_mmu.h"
@@ -373,7 +374,7 @@
 	int ret = 0;
 
 	/* GEM does all our handle to object mapping */
-	obj = drm_gem_object_lookup(dev, file, handle);
+	obj = drm_gem_object_lookup(file, handle);
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto fail;
@@ -397,6 +398,8 @@
 			return ERR_CAST(pages);
 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+		if (msm_obj->vaddr == NULL)
+			return ERR_PTR(-ENOMEM);
 	}
 	return msm_obj->vaddr;
 }
@@ -410,27 +413,62 @@
 	return ret;
 }
 
-/* setup callback for when bo is no longer busy..
- * TODO probably want to differentiate read vs write..
- */
-int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
-		struct msm_fence_cb *cb)
+/* must be called before _move_to_active().. */
+int msm_gem_sync_object(struct drm_gem_object *obj,
+		struct msm_fence_context *fctx, bool exclusive)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	uint32_t fence = msm_gem_fence(msm_obj,
-			MSM_PREP_READ | MSM_PREP_WRITE);
-	return msm_queue_fence_cb(obj->dev, cb, fence);
+	struct reservation_object_list *fobj;
+	struct fence *fence;
+	int i, ret;
+
+	if (!exclusive) {
+		/* NOTE: _reserve_shared() must happen before _add_shared_fence(),
+		 * which makes this a slightly strange place to call it.  OTOH this
+		 * is a convenient can-fail point to hook it in.  (And similar to
+		 * how etnaviv and nouveau handle this.)
+		 */
+		ret = reservation_object_reserve_shared(msm_obj->resv);
+		if (ret)
+			return ret;
+	}
+
+	fobj = reservation_object_get_list(msm_obj->resv);
+	if (!fobj || (fobj->shared_count == 0)) {
+		fence = reservation_object_get_excl(msm_obj->resv);
+		/* don't need to wait on our own fences, since ring is fifo */
+		if (fence && (fence->context != fctx->context)) {
+			ret = fence_wait(fence, true);
+			if (ret)
+				return ret;
+		}
+	}
+
+	if (!exclusive || !fobj)
+		return 0;
+
+	for (i = 0; i < fobj->shared_count; i++) {
+		fence = rcu_dereference_protected(fobj->shared[i],
+						reservation_object_held(msm_obj->resv));
+		if (fence->context != fctx->context) {
+			ret = fence_wait(fence, true);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
 }
 
 void msm_gem_move_to_active(struct drm_gem_object *obj,
-		struct msm_gpu *gpu, bool write, uint32_t fence)
+		struct msm_gpu *gpu, bool exclusive, struct fence *fence)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	msm_obj->gpu = gpu;
-	if (write)
-		msm_obj->write_fence = fence;
+	if (exclusive)
+		reservation_object_add_excl_fence(msm_obj->resv, fence);
 	else
-		msm_obj->read_fence = fence;
+		reservation_object_add_shared_fence(msm_obj->resv, fence);
 	list_del_init(&msm_obj->mm_list);
 	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 }
@@ -444,30 +482,30 @@
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
 	msm_obj->gpu = NULL;
-	msm_obj->read_fence = 0;
-	msm_obj->write_fence = 0;
 	list_del_init(&msm_obj->mm_list);
 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 }
 
 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 {
-	struct drm_device *dev = obj->dev;
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	int ret = 0;
+	bool write = !!(op & MSM_PREP_WRITE);
 
-	if (is_active(msm_obj)) {
-		uint32_t fence = msm_gem_fence(msm_obj, op);
+	if (op & MSM_PREP_NOSYNC) {
+		if (!reservation_object_test_signaled_rcu(msm_obj->resv, write))
+			return -EBUSY;
+	} else {
+		int ret;
 
-		if (op & MSM_PREP_NOSYNC)
-			timeout = NULL;
-
-		ret = msm_wait_fence(dev, fence, timeout, true);
+		ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
+				true, timeout_to_jiffies(timeout));
+		if (ret <= 0)
+			return ret == 0 ? -ETIMEDOUT : ret;
 	}
 
 	/* TODO cache maintenance */
 
-	return ret;
+	return 0;
 }
 
 int msm_gem_cpu_fini(struct drm_gem_object *obj)
@@ -477,18 +515,46 @@
 }
 
 #ifdef CONFIG_DEBUG_FS
+static void describe_fence(struct fence *fence, const char *type,
+		struct seq_file *m)
+{
+	if (!fence_is_signaled(fence))
+		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
+				fence->ops->get_driver_name(fence),
+				fence->ops->get_timeline_name(fence),
+				fence->seqno);
+}
+
 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 {
-	struct drm_device *dev = obj->dev;
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct reservation_object *robj = msm_obj->resv;
+	struct reservation_object_list *fobj;
+	struct fence *fence;
 	uint64_t off = drm_vma_node_start(&obj->vma_node);
 
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-	seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n",
+	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
+	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n",
 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
-			msm_obj->read_fence, msm_obj->write_fence,
 			obj->name, obj->refcount.refcount.counter,
 			off, msm_obj->vaddr, obj->size);
+
+	rcu_read_lock();
+	fobj = rcu_dereference(robj->fence);
+	if (fobj) {
+		unsigned int i, shared_count = fobj->shared_count;
+
+		for (i = 0; i < shared_count; i++) {
+			fence = rcu_dereference(fobj->shared[i]);
+			describe_fence(fence, "Shared", m);
+		}
+	}
+
+	fence = rcu_dereference(robj->fence_excl);
+	if (fence)
+		describe_fence(fence, "Exclusive", m);
+	rcu_read_unlock();
 }
 
 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@@ -583,6 +649,7 @@
 
 static int msm_gem_new_impl(struct drm_device *dev,
 		uint32_t size, uint32_t flags,
+		struct reservation_object *resv,
 		struct drm_gem_object **obj)
 {
 	struct msm_drm_private *priv = dev->dev_private;
@@ -622,8 +689,12 @@
 
 	msm_obj->flags = flags;
 
-	msm_obj->resv = &msm_obj->_resv;
-	reservation_object_init(msm_obj->resv);
+	if (resv) {
+		msm_obj->resv = resv;
+	} else {
+		msm_obj->resv = &msm_obj->_resv;
+		reservation_object_init(msm_obj->resv);
+	}
 
 	INIT_LIST_HEAD(&msm_obj->submit_entry);
 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
@@ -643,7 +714,7 @@
 
 	size = PAGE_ALIGN(size);
 
-	ret = msm_gem_new_impl(dev, size, flags, &obj);
+	ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
 	if (ret)
 		goto fail;
 
@@ -665,10 +736,11 @@
 }
 
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
-		uint32_t size, struct sg_table *sgt)
+		struct dma_buf *dmabuf, struct sg_table *sgt)
 {
 	struct msm_gem_object *msm_obj;
 	struct drm_gem_object *obj;
+	uint32_t size;
 	int ret, npages;
 
 	/* if we don't have IOMMU, don't bother pretending we can import: */
@@ -677,9 +749,9 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	size = PAGE_ALIGN(size);
+	size = PAGE_ALIGN(dmabuf->size);
 
-	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
+	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
 	if (ret)
 		goto fail;
 
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 6fc59bf..9facd4b 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -39,7 +39,6 @@
 	 */
 	struct list_head mm_list;
 	struct msm_gpu *gpu;     /* non-null if active */
-	uint32_t read_fence, write_fence;
 
 	/* Transiently in the process of submit ioctl, objects associated
 	 * with the submit are on submit->bo_list.. this only lasts for
@@ -73,19 +72,6 @@
 	return msm_obj->gpu != NULL;
 }
 
-static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
-		uint32_t op)
-{
-	uint32_t fence = 0;
-
-	if (op & MSM_PREP_READ)
-		fence = msm_obj->write_fence;
-	if (op & MSM_PREP_WRITE)
-		fence = max(fence, msm_obj->read_fence);
-
-	return fence;
-}
-
 #define MAX_CMDS 4
 
 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
@@ -99,8 +85,9 @@
 	struct list_head node;   /* node in gpu submit_list */
 	struct list_head bo_list;
 	struct ww_acquire_ctx ticket;
-	uint32_t fence;
-	bool valid;
+	struct fence *fence;
+	struct pid *pid;    /* submitting process */
+	bool valid;         /* true if no cmdstream patching needed */
 	unsigned int nr_cmds;
 	unsigned int nr_bos;
 	struct {
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 121975b..6b90890 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -55,7 +55,7 @@
 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
 		struct dma_buf_attachment *attach, struct sg_table *sg)
 {
-	return msm_gem_import(dev, attach->dmabuf->size, sg);
+	return msm_gem_import(dev, attach->dmabuf, sg);
 }
 
 int msm_gem_prime_pin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 23d2528..eb4bb8b 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -24,7 +24,7 @@
  */
 
 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
-#define BO_VALID    0x8000
+#define BO_VALID    0x8000   /* is current addr in cmdstream correct/valid? */
 #define BO_LOCKED   0x4000
 #define BO_PINNED   0x2000
 
@@ -35,21 +35,33 @@
 	int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
 
 	submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
-	if (submit) {
-		submit->dev = dev;
-		submit->gpu = gpu;
+	if (!submit)
+		return NULL;
 
-		/* initially, until copy_from_user() and bo lookup succeeds: */
-		submit->nr_bos = 0;
-		submit->nr_cmds = 0;
+	submit->dev = dev;
+	submit->gpu = gpu;
+	submit->fence = NULL;
+	submit->pid = get_pid(task_pid(current));
 
-		INIT_LIST_HEAD(&submit->bo_list);
-		ww_acquire_init(&submit->ticket, &reservation_ww_class);
-	}
+	/* initially, until copy_from_user() and bo lookup succeeds: */
+	submit->nr_bos = 0;
+	submit->nr_cmds = 0;
+
+	INIT_LIST_HEAD(&submit->node);
+	INIT_LIST_HEAD(&submit->bo_list);
+	ww_acquire_init(&submit->ticket, &reservation_ww_class);
 
 	return submit;
 }
 
+void msm_gem_submit_free(struct msm_gem_submit *submit)
+{
+	fence_put(submit->fence);
+	list_del(&submit->node);
+	put_pid(submit->pid);
+	kfree(submit);
+}
+
 static int submit_lookup_objects(struct msm_gem_submit *submit,
 		struct drm_msm_gem_submit *args, struct drm_file *file)
 {
@@ -65,6 +77,11 @@
 		void __user *userptr =
 			u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
 
+		/* make sure we don't have garbage flags, in case we hit
+		 * error path before flags is initialized:
+		 */
+		submit->bos[i].flags = 0;
+
 		ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
 		if (ret) {
 			ret = -EFAULT;
@@ -131,16 +148,13 @@
 }
 
 /* This is where we make sure all the bo's are reserved and pin'd: */
-static int submit_validate_objects(struct msm_gem_submit *submit)
+static int submit_lock_objects(struct msm_gem_submit *submit)
 {
 	int contended, slow_locked = -1, i, ret = 0;
 
 retry:
-	submit->valid = true;
-
 	for (i = 0; i < submit->nr_bos; i++) {
 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
-		uint32_t iova;
 
 		if (slow_locked == i)
 			slow_locked = -1;
@@ -154,30 +168,6 @@
 				goto fail;
 			submit->bos[i].flags |= BO_LOCKED;
 		}
-
-
-		/* if locking succeeded, pin bo: */
-		ret = msm_gem_get_iova_locked(&msm_obj->base,
-				submit->gpu->id, &iova);
-
-		/* this would break the logic in the fail path.. there is no
-		 * reason for this to happen, but just to be on the safe side
-		 * let's notice if this starts happening in the future:
-		 */
-		WARN_ON(ret == -EDEADLK);
-
-		if (ret)
-			goto fail;
-
-		submit->bos[i].flags |= BO_PINNED;
-
-		if (iova == submit->bos[i].iova) {
-			submit->bos[i].flags |= BO_VALID;
-		} else {
-			submit->bos[i].iova = iova;
-			submit->bos[i].flags &= ~BO_VALID;
-			submit->valid = false;
-		}
 	}
 
 	ww_acquire_done(&submit->ticket);
@@ -206,6 +196,54 @@
 	return ret;
 }
 
+static int submit_fence_sync(struct msm_gem_submit *submit)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct msm_gem_object *msm_obj = submit->bos[i].obj;
+		bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
+
+		ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+static int submit_pin_objects(struct msm_gem_submit *submit)
+{
+	int i, ret = 0;
+
+	submit->valid = true;
+
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct msm_gem_object *msm_obj = submit->bos[i].obj;
+		uint32_t iova;
+
+		/* if locking succeeded, pin bo: */
+		ret = msm_gem_get_iova_locked(&msm_obj->base,
+				submit->gpu->id, &iova);
+
+		if (ret)
+			break;
+
+		submit->bos[i].flags |= BO_PINNED;
+
+		if (iova == submit->bos[i].iova) {
+			submit->bos[i].flags |= BO_VALID;
+		} else {
+			submit->bos[i].iova = iova;
+			/* iova changed, so address in cmdstream is not valid: */
+			submit->bos[i].flags &= ~BO_VALID;
+			submit->valid = false;
+		}
+	}
+
+	return ret;
+}
+
 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
 		struct msm_gem_object **obj, uint32_t *iova, bool *valid)
 {
@@ -297,7 +335,7 @@
 	return 0;
 }
 
-static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
+static void submit_cleanup(struct msm_gem_submit *submit)
 {
 	unsigned i;
 
@@ -344,7 +382,15 @@
 	if (ret)
 		goto out;
 
-	ret = submit_validate_objects(submit);
+	ret = submit_lock_objects(submit);
+	if (ret)
+		goto out;
+
+	ret = submit_fence_sync(submit);
+	if (ret)
+		goto out;
+
+	ret = submit_pin_objects(submit);
 	if (ret)
 		goto out;
 
@@ -410,10 +456,12 @@
 
 	ret = msm_gpu_submit(gpu, submit, ctx);
 
-	args->fence = submit->fence;
+	args->fence = submit->fence->seqno;
 
 out:
-	submit_cleanup(submit, !!ret);
+	submit_cleanup(submit);
+	if (ret)
+		msm_gem_submit_free(submit);
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
 }
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 6b02ada..36ed53e 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -18,6 +18,7 @@
 #include "msm_gpu.h"
 #include "msm_gem.h"
 #include "msm_mmu.h"
+#include "msm_fence.h"
 
 
 /*
@@ -265,22 +266,38 @@
  * Hangcheck detection for locked gpu:
  */
 
-static void retire_submits(struct msm_gpu *gpu, uint32_t fence);
+static void retire_submits(struct msm_gpu *gpu);
 
 static void recover_worker(struct work_struct *work)
 {
 	struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
 	struct drm_device *dev = gpu->dev;
+	struct msm_gem_submit *submit;
+	uint32_t fence = gpu->funcs->last_fence(gpu);
 
-	dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
+	msm_update_fence(gpu->fctx, fence + 1);
 
 	mutex_lock(&dev->struct_mutex);
-	if (msm_gpu_active(gpu)) {
-		struct msm_gem_submit *submit;
-		uint32_t fence = gpu->funcs->last_fence(gpu);
 
+	dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
+	list_for_each_entry(submit, &gpu->submit_list, node) {
+		if (submit->fence->seqno == (fence + 1)) {
+			struct task_struct *task;
+
+			rcu_read_lock();
+			task = pid_task(submit->pid, PIDTYPE_PID);
+			if (task) {
+				dev_err(dev->dev, "%s: offending task: %s\n",
+						gpu->name, task->comm);
+			}
+			rcu_read_unlock();
+			break;
+		}
+	}
+
+	if (msm_gpu_active(gpu)) {
 		/* retire completed submits, plus the one that hung: */
-		retire_submits(gpu, fence + 1);
+		retire_submits(gpu);
 
 		inactive_cancel(gpu);
 		gpu->funcs->recover(gpu);
@@ -290,6 +307,7 @@
 			gpu->funcs->submit(gpu, submit, NULL);
 		}
 	}
+
 	mutex_unlock(&dev->struct_mutex);
 
 	msm_gpu_retire(gpu);
@@ -312,7 +330,7 @@
 	if (fence != gpu->hangcheck_fence) {
 		/* some progress has been made.. ya! */
 		gpu->hangcheck_fence = fence;
-	} else if (fence < gpu->submitted_fence) {
+	} else if (fence < gpu->fctx->last_fence) {
 		/* no progress and not done.. hung! */
 		gpu->hangcheck_fence = fence;
 		dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
@@ -320,12 +338,12 @@
 		dev_err(dev->dev, "%s:     completed fence: %u\n",
 				gpu->name, fence);
 		dev_err(dev->dev, "%s:     submitted fence: %u\n",
-				gpu->name, gpu->submitted_fence);
+				gpu->name, gpu->fctx->last_fence);
 		queue_work(priv->wq, &gpu->recover_work);
 	}
 
 	/* if still more pending work, reset the hangcheck timer: */
-	if (gpu->submitted_fence > gpu->hangcheck_fence)
+	if (gpu->fctx->last_fence > gpu->hangcheck_fence)
 		hangcheck_timer_reset(gpu);
 
 	/* workaround for missing irq: */
@@ -431,7 +449,22 @@
  * Cmdstream submission/retirement:
  */
 
-static void retire_submits(struct msm_gpu *gpu, uint32_t fence)
+static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+	int i;
+
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct msm_gem_object *msm_obj = submit->bos[i].obj;
+		/* move to inactive: */
+		msm_gem_move_to_inactive(&msm_obj->base);
+		msm_gem_put_iova(&msm_obj->base, gpu->id);
+		drm_gem_object_unreference(&msm_obj->base);
+	}
+
+	msm_gem_submit_free(submit);
+}
+
+static void retire_submits(struct msm_gpu *gpu)
 {
 	struct drm_device *dev = gpu->dev;
 
@@ -443,9 +476,8 @@
 		submit = list_first_entry(&gpu->submit_list,
 				struct msm_gem_submit, node);
 
-		if (submit->fence <= fence) {
-			list_del(&submit->node);
-			kfree(submit);
+		if (fence_is_signaled(submit->fence)) {
+			retire_submit(gpu, submit);
 		} else {
 			break;
 		}
@@ -458,29 +490,10 @@
 	struct drm_device *dev = gpu->dev;
 	uint32_t fence = gpu->funcs->last_fence(gpu);
 
-	msm_update_fence(gpu->dev, fence);
+	msm_update_fence(gpu->fctx, fence);
 
 	mutex_lock(&dev->struct_mutex);
-
-	retire_submits(gpu, fence);
-
-	while (!list_empty(&gpu->active_list)) {
-		struct msm_gem_object *obj;
-
-		obj = list_first_entry(&gpu->active_list,
-				struct msm_gem_object, mm_list);
-
-		if ((obj->read_fence <= fence) &&
-				(obj->write_fence <= fence)) {
-			/* move to inactive: */
-			msm_gem_move_to_inactive(&obj->base);
-			msm_gem_put_iova(&obj->base, gpu->id);
-			drm_gem_object_unreference(&obj->base);
-		} else {
-			break;
-		}
-	}
-
+	retire_submits(gpu);
 	mutex_unlock(&dev->struct_mutex);
 
 	if (!msm_gpu_active(gpu))
@@ -505,9 +518,12 @@
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	submit->fence = ++priv->next_fence;
-
-	gpu->submitted_fence = submit->fence;
+	submit->fence = msm_fence_alloc(gpu->fctx);
+	if (IS_ERR(submit->fence)) {
+		ret = PTR_ERR(submit->fence);
+		submit->fence = NULL;
+		return ret;
+	}
 
 	inactive_cancel(gpu);
 
@@ -515,40 +531,34 @@
 
 	msm_rd_dump_submit(submit);
 
-	gpu->submitted_fence = submit->fence;
-
 	update_sw_cntrs(gpu);
 
 	for (i = 0; i < submit->nr_bos; i++) {
 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
+		uint32_t iova;
 
 		/* can't happen yet.. but when we add 2d support we'll have
 		 * to deal w/ cross-ring synchronization:
 		 */
 		WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
 
-		if (!is_active(msm_obj)) {
-			uint32_t iova;
-
-			/* ring takes a reference to the bo and iova: */
-			drm_gem_object_reference(&msm_obj->base);
-			msm_gem_get_iova_locked(&msm_obj->base,
-					submit->gpu->id, &iova);
-		}
-
-		if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
-			msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
+		/* submit takes a reference to the bo and iova until retired: */
+		drm_gem_object_reference(&msm_obj->base);
+		msm_gem_get_iova_locked(&msm_obj->base,
+				submit->gpu->id, &iova);
 
 		if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
 			msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
+		else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
+			msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
 	}
 
-	ret = gpu->funcs->submit(gpu, submit, ctx);
+	gpu->funcs->submit(gpu, submit, ctx);
 	priv->lastctx = ctx;
 
 	hangcheck_timer_reset(gpu);
 
-	return ret;
+	return 0;
 }
 
 /*
@@ -580,6 +590,12 @@
 	gpu->funcs = funcs;
 	gpu->name = name;
 	gpu->inactive = true;
+	gpu->fctx = msm_fence_context_alloc(drm, name);
+	if (IS_ERR(gpu->fctx)) {
+		ret = PTR_ERR(gpu->fctx);
+		gpu->fctx = NULL;
+		goto fail;
+	}
 
 	INIT_LIST_HEAD(&gpu->active_list);
 	INIT_WORK(&gpu->retire_work, retire_worker);
@@ -700,4 +716,7 @@
 
 	if (gpu->mmu)
 		gpu->mmu->funcs->destroy(gpu->mmu);
+
+	if (gpu->fctx)
+		msm_fence_context_free(gpu->fctx);
 }
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 2bbe85a..c902283 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -22,6 +22,7 @@
 #include <linux/regulator/consumer.h>
 
 #include "msm_drv.h"
+#include "msm_fence.h"
 #include "msm_ringbuffer.h"
 
 struct msm_gem_submit;
@@ -46,7 +47,7 @@
 	int (*hw_init)(struct msm_gpu *gpu);
 	int (*pm_suspend)(struct msm_gpu *gpu);
 	int (*pm_resume)(struct msm_gpu *gpu);
-	int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+	void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 			struct msm_file_private *ctx);
 	void (*flush)(struct msm_gpu *gpu);
 	void (*idle)(struct msm_gpu *gpu);
@@ -77,13 +78,15 @@
 	const struct msm_gpu_perfcntr *perfcntrs;
 	uint32_t num_perfcntrs;
 
+	/* ringbuffer: */
 	struct msm_ringbuffer *rb;
 	uint32_t rb_iova;
 
 	/* list of GEM active objects: */
 	struct list_head active_list;
 
-	uint32_t submitted_fence;
+	/* fencing: */
+	struct msm_fence_context *fctx;
 
 	/* is gpu powered/active? */
 	int active_cnt;
@@ -125,7 +128,7 @@
 
 static inline bool msm_gpu_active(struct msm_gpu *gpu)
 {
-	return gpu->submitted_fence > gpu->funcs->last_fence(gpu);
+	return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu);
 }
 
 /* Perf-Counters:
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 9a78c48..0857710 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -296,7 +296,7 @@
 
 	n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
 			TASK_COMM_LEN, current->comm, task_pid_nr(current),
-			submit->fence);
+			submit->fence->seqno);
 
 	rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
 
@@ -312,6 +312,9 @@
 		struct msm_gem_object *obj = submit->bos[idx].obj;
 		const char *buf = msm_gem_vaddr_locked(&obj->base);
 
+		if (IS_ERR(buf))
+			continue;
+
 		buf += iova - submit->bos[idx].iova;
 
 		rd_write_section(rd, RD_GPUADDR,
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 1f14b90..42f5359 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -40,6 +40,10 @@
 	}
 
 	ring->start = msm_gem_vaddr_locked(ring->bo);
+	if (IS_ERR(ring->start)) {
+		ret = PTR_ERR(ring->start);
+		goto fail;
+	}
 	ring->end   = ring->start + (size / 4);
 	ring->cur   = ring->start;
 
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 82bd465..a555681 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -23,7 +23,7 @@
 
 #include <drm/drmP.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_reg.h"
 #include "hw.h"
 
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 55ccbf0..6f318c5 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -28,8 +28,9 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_plane_helper.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_reg.h"
+#include "nouveau_ttm.h"
 #include "nouveau_bo.h"
 #include "nouveau_gem.h"
 #include "nouveau_encoder.h"
@@ -995,7 +996,7 @@
 	if (width != 64 || height != 64)
 		return -EINVAL;
 
-	gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
+	gem = drm_gem_object_lookup(file_priv, buffer_handle);
 	if (!gem)
 		return -ENOENT;
 	cursor = nouveau_gem_object(gem);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
index 4e61173..c83116a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -1,6 +1,6 @@
 #include <drm/drmP.h>
 #include <drm/drm_mode.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_reg.h"
 #include "nouveau_crtc.h"
 #include "hw.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index b48eec3..b6cc776 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -27,7 +27,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 05bfd15..c2947ef 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -27,7 +27,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_reg.h"
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index b4a6bc4..aea81a5 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -25,7 +25,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_reg.h"
 #include "hw.h"
 #include "nouveau_encoder.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 6c9a1e8..7030307 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -1,6 +1,6 @@
 #ifndef __NV04_DISPLAY_H__
 #define __NV04_DISPLAY_H__
-
+#include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 
 #include "nouveau_display.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 956a833..74856a8 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -23,7 +23,7 @@
  */
 
 #include <drm/drmP.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "hw.h"
 
 #include <subdev/bios/pll.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index aeebdd4..ec444ea 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -27,7 +27,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_fourcc.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 
 #include "nouveau_bo.h"
 #include "nouveau_connector.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
index 903c473..2b83b2c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
@@ -26,7 +26,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
 #include "hw.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 54e9fb9e..477a8d0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -25,7 +25,7 @@
  */
 
 #include <drm/drmP.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_reg.h"
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 163317d..a665b78 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -26,7 +26,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_reg.h"
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 4993a86..126a85c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -7,6 +7,7 @@
 	NVKM_SUBDEV_PCI,
 	NVKM_SUBDEV_VBIOS,
 	NVKM_SUBDEV_DEVINIT,
+	NVKM_SUBDEV_TOP,
 	NVKM_SUBDEV_IBUS,
 	NVKM_SUBDEV_GPIO,
 	NVKM_SUBDEV_I2C,
@@ -15,9 +16,9 @@
 	NVKM_SUBDEV_MC,
 	NVKM_SUBDEV_BUS,
 	NVKM_SUBDEV_TIMER,
+	NVKM_SUBDEV_INSTMEM,
 	NVKM_SUBDEV_FB,
 	NVKM_SUBDEV_LTC,
-	NVKM_SUBDEV_INSTMEM,
 	NVKM_SUBDEV_MMU,
 	NVKM_SUBDEV_BAR,
 	NVKM_SUBDEV_PMU,
@@ -131,6 +132,7 @@
 	struct nvkm_secboot *secboot;
 	struct nvkm_therm *therm;
 	struct nvkm_timer *timer;
+	struct nvkm_top *top;
 	struct nvkm_volt *volt;
 
 	struct nvkm_engine *bsp;
@@ -200,6 +202,7 @@
 	int (*secboot )(struct nvkm_device *, int idx, struct nvkm_secboot **);
 	int (*therm   )(struct nvkm_device *, int idx, struct nvkm_therm **);
 	int (*timer   )(struct nvkm_device *, int idx, struct nvkm_timer **);
+	int (*top     )(struct nvkm_device *, int idx, struct nvkm_top **);
 	int (*volt    )(struct nvkm_device *, int idx, struct nvkm_volt **);
 
 	int (*bsp     )(struct nvkm_device *, int idx, struct nvkm_engine **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index 48bf128..9ebfd87 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -38,11 +38,9 @@
 };
 
 int nvkm_engine_ctor(const struct nvkm_engine_func *, struct nvkm_device *,
-		     int index, u32 pmc_enable, bool enable,
-		     struct nvkm_engine *);
+		     int index, bool enable, struct nvkm_engine *);
 int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *,
-		     int index, u32 pmc_enable, bool enable,
-		     struct nvkm_engine **);
+		     int index, bool enable, struct nvkm_engine **);
 struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *);
 void nvkm_engine_unref(struct nvkm_engine **);
 void nvkm_engine_tile(struct nvkm_engine *, int region);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 3b5dc9c6..57adefa 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -6,7 +6,6 @@
 	const struct nvkm_subdev_func *func;
 	struct nvkm_device *device;
 	enum nvkm_devidx index;
-	u32 pmc_enable;
 	struct mutex mutex;
 	u32 debug;
 
@@ -24,7 +23,7 @@
 
 extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR];
 void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
-		      int index, u32 pmc_enable, struct nvkm_subdev *);
+		      int index, struct nvkm_subdev *);
 void nvkm_subdev_del(struct nvkm_subdev **);
 int  nvkm_subdev_preinit(struct nvkm_subdev *);
 int  nvkm_subdev_init(struct nvkm_subdev *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index 81c0bc6..e6baf03 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -40,7 +40,6 @@
 		u32 *data;
 		u32  size;
 	} data;
-	u32 pmc_enable;
 	void (*init)(struct nvkm_falcon *);
 	void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *);
 	struct nvkm_sclass sclass[];
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
index 3128d21..b1fcc41 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
@@ -15,7 +15,6 @@
 		     int index, bool enable, u32 addr, struct nvkm_engine **);
 
 struct nvkm_xtensa_func {
-	u32 pmc_enable;
 	u32 fifo_val;
 	u32 unkd28;
 	struct nvkm_sclass sclass[];
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
index db10c11..c5a6ebd 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
@@ -25,7 +25,8 @@
 		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
 
 struct nvbios_ocfg {
-	u16 match;
+	u8  proto;
+	u8  flags;
 	u16 clkcmp[2];
 };
 
@@ -33,7 +34,7 @@
 		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
 u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
 		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
-u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type,
+u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags,
 		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
 u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 193626c..709d786 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -7,6 +7,7 @@
 	const struct nvkm_devinit_func *func;
 	struct nvkm_subdev subdev;
 	bool post;
+	bool force_post;
 };
 
 u32 nvkm_devinit_mmio(struct nvkm_devinit *, u32 addr);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 85ab72c..0a734fd 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -55,6 +55,9 @@
 		struct nvkm_fb_tile region[16];
 		int regions;
 	} tile;
+
+	struct nvkm_memory *mmu_rd;
+	struct nvkm_memory *mmu_wr;
 };
 
 bool nvkm_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
@@ -87,6 +90,7 @@
 int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 
 #include <subdev/bios.h>
 #include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
index 530c621..3c2ddd9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
@@ -3,15 +3,13 @@
 
 #include <core/subdev.h>
 
-struct nkvm_iccsense_rail;
 struct nvkm_iccsense {
 	struct nvkm_subdev subdev;
-	u8 rail_count;
 	bool data_valid;
-	struct nvkm_iccsense_rail *rails;
+	struct list_head sensors;
+	struct list_head rails;
 };
 
 int gf100_iccsense_new(struct nvkm_device *, int index, struct nvkm_iccsense **);
-int nvkm_iccsense_read(struct nvkm_iccsense *iccsense, u8 idx);
 int nvkm_iccsense_read_all(struct nvkm_iccsense *iccsense);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 4de05e7..2e80682 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -10,12 +10,18 @@
 void nvkm_mc_intr(struct nvkm_mc *, bool *handled);
 void nvkm_mc_intr_unarm(struct nvkm_mc *);
 void nvkm_mc_intr_rearm(struct nvkm_mc *);
+void nvkm_mc_reset(struct nvkm_mc *, enum nvkm_devidx);
 void nvkm_mc_unk260(struct nvkm_mc *, u32 data);
 
 int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int nv17_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int nv44_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int nv50_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int g84_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int g98_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gt215_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
new file mode 100644
index 0000000..8fb575a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_TOP_H__
+#define __NVKM_TOP_H__
+#include <core/subdev.h>
+
+struct nvkm_top {
+	const struct nvkm_top_func *func;
+	struct nvkm_subdev subdev;
+	struct list_head device;
+};
+
+u32 nvkm_top_reset(struct nvkm_top *, enum nvkm_devidx);
+u32 nvkm_top_intr(struct nvkm_top *, u32 intr, u64 *subdevs);
+enum nvkm_devidx nvkm_top_fault(struct nvkm_top *, int fault);
+enum nvkm_devidx nvkm_top_engine(struct nvkm_top *, int, int *runl, int *engn);
+
+int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index a59e524..eb7de48 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -29,7 +29,7 @@
 #include <nvif/cla06f.h>
 #include <nvif/unpack.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_gem.h"
 #include "nouveau_chan.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index cdf5227..db76b94 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -6,7 +6,7 @@
 #include <drm/drm_edid.h>
 #include <acpi/video.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_acpi.h"
 
 #define NOUVEAU_DSM_LED 0x02
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 89eb460..f5101be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -32,7 +32,7 @@
 
 #include <linux/backlight.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_reg.h"
 #include "nouveau_encoder.h"
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 4dca65a..a1570b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -24,7 +24,7 @@
 
 #include <drm/drmP.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_reg.h"
 #include "dispnv04/hw.h"
 #include "nouveau_encoder.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2cdaea5..5e3f3e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -30,7 +30,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/swiotlb.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fence.h"
 
@@ -312,7 +312,7 @@
 	bool force = false, evict = false;
 	int ret;
 
-	ret = ttm_bo_reserve(bo, false, false, false, NULL);
+	ret = ttm_bo_reserve(bo, false, false, NULL);
 	if (ret)
 		return ret;
 
@@ -385,7 +385,7 @@
 	struct ttm_buffer_object *bo = &nvbo->bo;
 	int ret, ref;
 
-	ret = ttm_bo_reserve(bo, false, false, false, NULL);
+	ret = ttm_bo_reserve(bo, false, false, NULL);
 	if (ret)
 		return ret;
 
@@ -420,7 +420,7 @@
 {
 	int ret;
 
-	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
+	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 	if (ret)
 		return ret;
 
@@ -1322,7 +1322,7 @@
 	}
 
 	/* Fallback to software copy. */
-	ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
+	ret = ttm_bo_wait(bo, intr, no_wait_gpu);
 	if (ret == 0)
 		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
 
@@ -1611,6 +1611,8 @@
 	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
 	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
 	.io_mem_free = &nouveau_ttm_io_mem_free,
+	.lru_tail = &ttm_bo_default_lru_tail,
+	.swap_lru_tail = &ttm_bo_default_swap_lru_tail,
 };
 
 struct nvkm_vma *
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 879655c..b1d2527 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -34,7 +34,7 @@
 /*XXX*/
 #include <core/client.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_bo.h"
 #include "nouveau_chan.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index e81aefe..c108408 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -34,7 +34,7 @@
 #include <drm/drm_crtc_helper.h>
 
 #include "nouveau_reg.h"
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "dispnv04/hw.h"
 #include "nouveau_acpi.h"
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 3d0dc19..411c12c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -32,7 +32,7 @@
 #include <nvif/class.h>
 #include <nvif/if0001.h>
 #include "nouveau_debugfs.h"
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 
 static int
 nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
index b8c03ff..eab5881 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -5,7 +5,7 @@
 
 #if defined(CONFIG_DEBUG_FS)
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 
 struct nouveau_debugfs {
 	struct nvif_object ctrl;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7ce7fa5..7c77f96 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -279,7 +279,7 @@
 	struct drm_gem_object *gem;
 	int ret = -ENOMEM;
 
-	gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+	gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
 	if (!gem)
 		return ERR_PTR(-ENOENT);
 
@@ -296,7 +296,7 @@
 err:
 	kfree(nouveau_fb);
 err_unref:
-	drm_gem_object_unreference(gem);
+	drm_gem_object_unreference_unlocked(gem);
 	return ERR_PTR(ret);
 }
 
@@ -739,7 +739,7 @@
 	}
 
 	mutex_lock(&cli->mutex);
-	ret = ttm_bo_reserve(&new_bo->bo, true, false, false, NULL);
+	ret = ttm_bo_reserve(&new_bo->bo, true, false, NULL);
 	if (ret)
 		goto fail_unpin;
 
@@ -753,7 +753,7 @@
 	if (new_bo != old_bo) {
 		ttm_bo_unreserve(&new_bo->bo);
 
-		ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
+		ret = ttm_bo_reserve(&old_bo->bo, true, false, NULL);
 		if (ret)
 			goto fail_unpin;
 	}
@@ -916,7 +916,7 @@
 {
 	struct drm_gem_object *gem;
 
-	gem = drm_gem_object_lookup(dev, file_priv, handle);
+	gem = drm_gem_object_lookup(file_priv, handle);
 	if (gem) {
 		struct nouveau_bo *bo = nouveau_gem_object(gem);
 		*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 5a57d8b..24273ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -3,7 +3,7 @@
 
 #include <subdev/mmu.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 
 struct nouveau_framebuffer {
 	struct drm_framebuffer base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index d168c63..2634a1a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -24,7 +24,7 @@
  *
  */
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 
 void
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index e17e15e..87d52d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -25,7 +25,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_dp_helper.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_connector.h"
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index d06877d..11f8dd9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -44,7 +44,7 @@
 #include <nvif/cla06f.h>
 #include <nvif/if0004.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_ttm.h"
 #include "nouveau_gem.h"
@@ -1083,10 +1083,8 @@
 	nouveau_display_options();
 
 	if (nouveau_modeset == -1) {
-#ifdef CONFIG_VGA_CONSOLE
 		if (vgacon_text_force())
 			nouveau_modeset = 0;
-#endif
 	}
 
 	if (!nouveau_modeset)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
similarity index 98%
rename from drivers/gpu/drm/nouveau/nouveau_drm.h
rename to drivers/gpu/drm/nouveau/nouveau_drv.h
index 5c363ed..822a021 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1,5 +1,5 @@
-#ifndef __NOUVEAU_DRMCLI_H__
-#define __NOUVEAU_DRMCLI_H__
+#ifndef __NOUVEAU_DRV_H__
+#define __NOUVEAU_DRV_H__
 
 #define DRIVER_AUTHOR		"Nouveau Project"
 #define DRIVER_EMAIL		"nouveau@lists.freedesktop.org"
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 59f27e7..300ea03 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -43,7 +43,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_gem.h"
 #include "nouveau_bo.h"
 #include "nouveau_fbcon.h"
@@ -386,8 +386,6 @@
 		}
 	}
 
-	mutex_lock(&dev->struct_mutex);
-
 	info = drm_fb_helper_alloc_fbi(helper);
 	if (IS_ERR(info)) {
 		ret = PTR_ERR(info);
@@ -426,8 +424,6 @@
 
 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
-	mutex_unlock(&dev->struct_mutex);
-
 	if (chan)
 		nouveau_fbcon_accel_init(dev);
 	nouveau_fbcon_zfill(dev, fbcon);
@@ -441,7 +437,6 @@
 	return 0;
 
 out_unlock:
-	mutex_unlock(&dev->struct_mutex);
 	if (chan)
 		nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma);
 	nouveau_bo_unmap(nvbo);
@@ -557,6 +552,7 @@
 	if (ret)
 		goto fini;
 
+	fbcon->helper.fbdev->pixmap.buf_align = 4;
 	return 0;
 
 fini:
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 9a8c5b7..4bb9ab8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -34,7 +34,7 @@
 #include <nvif/notify.h>
 #include <nvif/event.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fence.h"
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index a0865c4..72e2399 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -24,7 +24,7 @@
  *
  */
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fence.h"
 #include "nouveau_abi16.h"
@@ -71,7 +71,7 @@
 	if (!cli->vm)
 		return 0;
 
-	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
+	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 	if (ret)
 		return ret;
 
@@ -126,7 +126,7 @@
 	list_del(&vma->head);
 
 	if (fobj && fobj->shared_count > 1)
-		ttm_bo_wait(&nvbo->bo, true, false, false);
+		ttm_bo_wait(&nvbo->bo, false, false);
 	else if (fobj && fobj->shared_count == 1)
 		fence = rcu_dereference_protected(fobj->shared[0],
 						reservation_object_held(resv));
@@ -156,7 +156,7 @@
 	if (!cli->vm)
 		return;
 
-	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
+	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 	if (ret)
 		return;
 
@@ -368,7 +368,6 @@
 	      int nr_buffers, struct validate_op *op)
 {
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
-	struct drm_device *dev = chan->drm->dev;
 	int trycnt = 0;
 	int ret, i;
 	struct nouveau_bo *res_bo = NULL;
@@ -388,7 +387,7 @@
 		struct drm_gem_object *gem;
 		struct nouveau_bo *nvbo;
 
-		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
+		gem = drm_gem_object_lookup(file_priv, b->handle);
 		if (!gem) {
 			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
 			ret = -ENOENT;
@@ -409,7 +408,7 @@
 			break;
 		}
 
-		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
+		ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
 		if (ret) {
 			list_splice_tail_init(&vram_list, &op->list);
 			list_splice_tail_init(&gart_list, &op->list);
@@ -651,7 +650,7 @@
 				data |= r->vor;
 		}
 
-		ret = ttm_bo_wait(&nvbo->bo, true, false, false);
+		ret = ttm_bo_wait(&nvbo->bo, false, false);
 		if (ret) {
 			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
 			break;
@@ -864,7 +863,7 @@
 	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
 	int ret;
 
-	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+	gem = drm_gem_object_lookup(file_priv, req->handle);
 	if (!gem)
 		return -ENOENT;
 	nvbo = nouveau_gem_object(gem);
@@ -896,7 +895,7 @@
 	struct drm_gem_object *gem;
 	struct nouveau_bo *nvbo;
 
-	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+	gem = drm_gem_object_lookup(file_priv, req->handle);
 	if (!gem)
 		return -ENOENT;
 	nvbo = nouveau_gem_object(gem);
@@ -914,7 +913,7 @@
 	struct drm_gem_object *gem;
 	int ret;
 
-	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+	gem = drm_gem_object_lookup(file_priv, req->handle);
 	if (!gem)
 		return -ENOENT;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index e4049fa..7e32da2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -3,7 +3,7 @@
 
 #include <drm/drmP.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_bo.h"
 
 #define nouveau_bo_tile_layout(nvbo)				\
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 67edd2f5..1ff4166 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -31,7 +31,7 @@
 
 #include <drm/drmP.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_hwmon.h"
 
 #include <nvkm/subdev/iccsense.h>
@@ -689,7 +689,7 @@
 			goto error;
 	}
 
-	if (iccsense && iccsense->data_valid && iccsense->rail_count) {
+	if (iccsense && iccsense->data_valid && !list_empty(&iccsense->rails)) {
 		ret = sysfs_create_group(&hwmon_dev->kobj,
 					 &hwmon_power_attrgroup);
 		if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index 55eb942..15f0925 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -36,7 +36,7 @@
 #include <nvif/event.h>
 #include <nvif/ioctl.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_usif.h"
 
 static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
index f41056d..a90d727 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.h
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -21,7 +21,7 @@
  */
 #ifndef __NOUVEAU_PLATFORM_H__
 #define __NOUVEAU_PLATFORM_H__
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 
 extern struct platform_driver nouveau_platform_driver;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index dd32ad6..a0a9704 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -25,7 +25,7 @@
 #include <drm/drmP.h>
 #include <linux/dma-buf.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_gem.h"
 
 struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 8c3053a..db35ab5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,7 +1,7 @@
 #include <linux/pagemap.h>
 #include <linux/slab.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_ttm.h"
 
 struct nouveau_sgdma_be {
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index d2e7d20..bcee914 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -24,7 +24,7 @@
  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_ttm.h"
 #include "nouveau_gem.h"
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index e9f52ef..675e9e0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_usif.h"
 #include "nouveau_abi16.h"
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index af89c36..c6a180a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -4,7 +4,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_acpi.h"
 #include "nouveau_fbcon.h"
 #include "nouveau_vga.h"
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 789dc29..7d9248b 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -22,7 +22,7 @@
  * DEALINGS IN THE SOFTWARE.
  */
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fbcon.h"
 
@@ -82,7 +82,6 @@
 	uint32_t fg;
 	uint32_t bg;
 	uint32_t dsize;
-	uint32_t width;
 	uint32_t *data = (uint32_t *)image->data;
 	int ret;
 
@@ -93,9 +92,6 @@
 	if (ret)
 		return ret;
 
-	width = ALIGN(image->width, 8);
-	dsize = ALIGN(width * image->height, 32) >> 5;
-
 	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
 	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
 		fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
@@ -111,10 +107,11 @@
 			 ((image->dx + image->width) & 0xffff));
 	OUT_RING(chan, bg);
 	OUT_RING(chan, fg);
-	OUT_RING(chan, (image->height << 16) | width);
+	OUT_RING(chan, (image->height << 16) | image->width);
 	OUT_RING(chan, (image->height << 16) | image->width);
 	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
 
+	dsize = ALIGN(image->width * image->height, 32) >> 5;
 	while (dsize) {
 		int iter_len = dsize > 128 ? 128 : dsize;
 
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 3022d24..1915b7b 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fence.h"
 
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 2c35213..4e3de34 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nv10_fence.h"
 
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 6a141c9..7d5e562 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -26,7 +26,7 @@
 #include <nvif/class.h>
 #include <nvif/cl0002.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nv10_fence.h"
 
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a43445c..3ffc2b0 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -39,7 +39,7 @@
 #include <nvif/cl507d.h>
 #include <nvif/cl507e.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_gem.h"
 #include "nouveau_connector.h"
@@ -1305,7 +1305,6 @@
 		     uint32_t handle, uint32_t width, uint32_t height)
 {
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct drm_device *dev = crtc->dev;
 	struct drm_gem_object *gem = NULL;
 	struct nouveau_bo *nvbo = NULL;
 	int ret = 0;
@@ -1314,7 +1313,7 @@
 		if (width != 64 || height != 64)
 			return -EINVAL;
 
-		gem = drm_gem_object_lookup(dev, file_priv, handle);
+		gem = drm_gem_object_lookup(file_priv, handle);
 		if (unlikely(!gem))
 			return -ENOENT;
 		nvbo = nouveau_gem_object(gem);
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index e05499d..1aeb698 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fbcon.h"
 
@@ -95,7 +95,7 @@
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
 	struct nouveau_channel *chan = drm->channel;
-	uint32_t width, dwords, *data = (uint32_t *)image->data;
+	uint32_t dwords, *data = (uint32_t *)image->data;
 	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
 	uint32_t *palette = info->pseudo_palette;
 	int ret;
@@ -107,9 +107,6 @@
 	if (ret)
 		return ret;
 
-	width = ALIGN(image->width, 32);
-	dwords = (width * image->height) >> 5;
-
 	BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
 	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
 	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@
 	OUT_RING(chan, 0);
 	OUT_RING(chan, image->dy);
 
+	dwords = ALIGN(image->width * image->height, 32) >> 5;
 	while (dwords) {
 		int push = dwords > 2047 ? 2047 : dwords;
 
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 3695ccce..4d6f202 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -26,7 +26,7 @@
 #include <nvif/class.h>
 #include <nvif/cl0002.h>
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nv10_fence.h"
 
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 412c5be..18bde9d 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fence.h"
 
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index c97395b..839f4c8 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fbcon.h"
 
@@ -95,7 +95,7 @@
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
 	struct nouveau_channel *chan = drm->channel;
-	uint32_t width, dwords, *data = (uint32_t *)image->data;
+	uint32_t dwords, *data = (uint32_t *)image->data;
 	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
 	uint32_t *palette = info->pseudo_palette;
 	int ret;
@@ -107,9 +107,6 @@
 	if (ret)
 		return ret;
 
-	width = ALIGN(image->width, 32);
-	dwords = (width * image->height) >> 5;
-
 	BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
 	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
 	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@
 	OUT_RING  (chan, 0);
 	OUT_RING  (chan, image->dy);
 
+	dwords = ALIGN(image->width * image->height, 32) >> 5;
 	while (dwords) {
 		int push = dwords > 2047 ? 2047 : dwords;
 
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index becf19a..b797757 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_fence.h"
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index 8a7bae7..ee8e583 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -137,11 +137,10 @@
 
 int
 nvkm_engine_ctor(const struct nvkm_engine_func *func,
-		 struct nvkm_device *device, int index, u32 pmc_enable,
-		 bool enable, struct nvkm_engine *engine)
+		 struct nvkm_device *device, int index, bool enable,
+		 struct nvkm_engine *engine)
 {
-	nvkm_subdev_ctor(&nvkm_engine_func, device, index,
-			 pmc_enable, &engine->subdev);
+	nvkm_subdev_ctor(&nvkm_engine_func, device, index, &engine->subdev);
 	engine->func = func;
 
 	if (!nvkm_boolopt(device->cfgopt, nvkm_subdev_name[index], enable)) {
@@ -155,11 +154,10 @@
 
 int
 nvkm_engine_new_(const struct nvkm_engine_func *func,
-		 struct nvkm_device *device, int index, u32 pmc_enable,
-		 bool enable, struct nvkm_engine **pengine)
+		 struct nvkm_device *device, int index, bool enable,
+		 struct nvkm_engine **pengine)
 {
 	if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL)))
 		return -ENOMEM;
-	return nvkm_engine_ctor(func, device, index, pmc_enable,
-				enable, *pengine);
+	return nvkm_engine_ctor(func, device, index, enable, *pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 3bf08cb..b185578 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -24,6 +24,7 @@
 #include <core/subdev.h>
 #include <core/device.h>
 #include <core/option.h>
+#include <subdev/mc.h>
 
 static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR];
 
@@ -50,6 +51,7 @@
 	[NVKM_SUBDEV_SECBOOT ] = "secboot",
 	[NVKM_SUBDEV_THERM   ] = "therm",
 	[NVKM_SUBDEV_TIMER   ] = "tmr",
+	[NVKM_SUBDEV_TOP     ] = "top",
 	[NVKM_SUBDEV_VOLT    ] = "volt",
 	[NVKM_ENGINE_BSP     ] = "bsp",
 	[NVKM_ENGINE_CE0     ] = "ce0",
@@ -89,7 +91,6 @@
 {
 	struct nvkm_device *device = subdev->device;
 	const char *action = suspend ? "suspend" : "fini";
-	u32 pmc_enable = subdev->pmc_enable;
 	s64 time;
 
 	nvkm_trace(subdev, "%s running...\n", action);
@@ -104,11 +105,7 @@
 		}
 	}
 
-	if (pmc_enable) {
-		nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
-		nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
-		nvkm_rd32(device, 0x000200);
-	}
+	nvkm_mc_reset(device->mc, subdev->index);
 
 	time = ktime_to_us(ktime_get()) - time;
 	nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
@@ -193,14 +190,13 @@
 
 void
 nvkm_subdev_ctor(const struct nvkm_subdev_func *func,
-		 struct nvkm_device *device, int index, u32 pmc_enable,
+		 struct nvkm_device *device, int index,
 		 struct nvkm_subdev *subdev)
 {
 	const char *name = nvkm_subdev_name[index];
 	subdev->func = func;
 	subdev->device = device;
 	subdev->index = index;
-	subdev->pmc_enable = pmc_enable;
 
 	__mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]);
 	subdev->debug = nvkm_dbgopt(device->dbgopt, name);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index 3ef0107..8e2e24a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -27,7 +27,6 @@
 
 static const struct nvkm_xtensa_func
 g84_bsp = {
-	.pmc_enable = 0x04008000,
 	.fifo_val = 0x1111,
 	.unkd28 = 0x90044,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
index 92a9f35..ad9f855 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
@@ -40,7 +40,6 @@
 	.code.size = sizeof(gf100_ce_code),
 	.data.data = gf100_ce_data,
 	.data.size = sizeof(gf100_ce_data),
-	.pmc_enable = 0x00000040,
 	.init = gf100_ce_init,
 	.intr = gt215_ce_intr,
 	.sclass = {
@@ -55,7 +54,6 @@
 	.code.size = sizeof(gf100_ce_code),
 	.data.data = gf100_ce_data,
 	.data.size = sizeof(gf100_ce_data),
-	.pmc_enable = 0x00000080,
 	.init = gf100_ce_init,
 	.intr = gt215_ce_intr,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
index e2b944d..9e0b53a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
@@ -97,17 +97,5 @@
 gk104_ce_new(struct nvkm_device *device, int index,
 	     struct nvkm_engine **pengine)
 {
-	if (index == NVKM_ENGINE_CE0) {
-		return nvkm_engine_new_(&gk104_ce, device, index,
-					0x00000040, true, pengine);
-	} else
-	if (index == NVKM_ENGINE_CE1) {
-		return nvkm_engine_new_(&gk104_ce, device, index,
-					0x00000080, true, pengine);
-	} else
-	if (index == NVKM_ENGINE_CE2) {
-		return nvkm_engine_new_(&gk104_ce, device, index,
-					0x00200000, true, pengine);
-	}
-	return -ENODEV;
+	return nvkm_engine_new_(&gk104_ce, device, index, true, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
index 4c2f429..c0df7da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
@@ -39,17 +39,5 @@
 gm107_ce_new(struct nvkm_device *device, int index,
 	     struct nvkm_engine **pengine)
 {
-	if (index == NVKM_ENGINE_CE0) {
-		return nvkm_engine_new_(&gm107_ce, device, index,
-					0x00000040, true, pengine);
-	} else
-	if (index == NVKM_ENGINE_CE1) {
-		return nvkm_engine_new_(&gm107_ce, device, index,
-					0x00000080, true, pengine);
-	} else
-	if (index == NVKM_ENGINE_CE2) {
-		return nvkm_engine_new_(&gm107_ce, device, index,
-					0x00200000, true, pengine);
-	}
-	return -ENODEV;
+	return nvkm_engine_new_(&gm107_ce, device, index, true, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
index 13f07b3..c6fa8b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
@@ -38,17 +38,5 @@
 gm200_ce_new(struct nvkm_device *device, int index,
 	     struct nvkm_engine **pengine)
 {
-	if (index == NVKM_ENGINE_CE0) {
-		return nvkm_engine_new_(&gm200_ce, device, index,
-					0x00000040, true, pengine);
-	} else
-	if (index == NVKM_ENGINE_CE1) {
-		return nvkm_engine_new_(&gm200_ce, device, index,
-					0x00000080, true, pengine);
-	} else
-	if (index == NVKM_ENGINE_CE2) {
-		return nvkm_engine_new_(&gm200_ce, device, index,
-					0x00200000, true, pengine);
-	}
-	return -ENODEV;
+	return nvkm_engine_new_(&gm200_ce, device, index, true, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
index 402dcbc..63ac51a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
@@ -67,7 +67,6 @@
 	.code.size = sizeof(gt215_ce_code),
 	.data.data = gt215_ce_data,
 	.data.size = sizeof(gt215_ce_data),
-	.pmc_enable = 0x00802000,
 	.intr = gt215_ce_intr,
 	.sclass = {
 		{ -1, -1, GT212_DMA },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
index bfd0162..68ffb52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
@@ -130,6 +130,5 @@
 g84_cipher_new(struct nvkm_device *device, int index,
 	       struct nvkm_engine **pengine)
 {
-	return nvkm_engine_new_(&g84_cipher, device, index,
-				0x00004000, true, pengine);
+	return nvkm_engine_new_(&g84_cipher, device, index, true, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 9f32c87..4572deb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -146,7 +146,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv11_mc_new,
 	.mmu = nv04_mmu_new,
 	.pci = nv04_pci_new,
 	.timer = nv04_timer_new,
@@ -190,7 +190,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv04_mmu_new,
 	.pci = nv04_pci_new,
 	.timer = nv04_timer_new,
@@ -212,7 +212,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv04_mmu_new,
 	.pci = nv04_pci_new,
 	.timer = nv04_timer_new,
@@ -256,7 +256,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv04_mmu_new,
 	.pci = nv04_pci_new,
 	.timer = nv04_timer_new,
@@ -278,7 +278,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv04_mmu_new,
 	.pci = nv04_pci_new,
 	.timer = nv04_timer_new,
@@ -300,7 +300,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv04_mmu_new,
 	.pci = nv04_pci_new,
 	.timer = nv04_timer_new,
@@ -322,7 +322,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv04_mmu_new,
 	.pci = nv04_pci_new,
 	.timer = nv04_timer_new,
@@ -344,7 +344,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv04_mmu_new,
 	.pci = nv04_pci_new,
 	.timer = nv04_timer_new,
@@ -366,7 +366,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv04_mmu_new,
 	.pci = nv04_pci_new,
 	.timer = nv04_timer_new,
@@ -388,7 +388,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv04_mmu_new,
 	.pci = nv04_pci_new,
 	.timer = nv04_timer_new,
@@ -411,7 +411,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv04_mmu_new,
 	.pci = nv04_pci_new,
 	.timer = nv04_timer_new,
@@ -434,7 +434,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv04_mmu_new,
 	.pci = nv04_pci_new,
 	.timer = nv04_timer_new,
@@ -456,7 +456,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv04_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv04_mmu_new,
 	.pci = nv04_pci_new,
 	.timer = nv04_timer_new,
@@ -479,7 +479,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv40_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv04_mmu_new,
 	.pci = nv40_pci_new,
 	.therm = nv40_therm_new,
@@ -505,7 +505,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv40_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv41_mmu_new,
 	.pci = nv40_pci_new,
 	.therm = nv40_therm_new,
@@ -531,7 +531,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv40_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv41_mmu_new,
 	.pci = nv40_pci_new,
 	.therm = nv40_therm_new,
@@ -557,7 +557,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv40_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv41_mmu_new,
 	.pci = nv40_pci_new,
 	.therm = nv40_therm_new,
@@ -609,7 +609,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv40_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv04_mmu_new,
 	.pci = nv40_pci_new,
 	.therm = nv40_therm_new,
@@ -661,7 +661,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv40_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv41_mmu_new,
 	.pci = nv40_pci_new,
 	.therm = nv40_therm_new,
@@ -687,7 +687,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv40_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv41_mmu_new,
 	.pci = nv40_pci_new,
 	.therm = nv40_therm_new,
@@ -739,7 +739,7 @@
 	.gpio = nv10_gpio_new,
 	.i2c = nv04_i2c_new,
 	.imem = nv40_instmem_new,
-	.mc = nv04_mc_new,
+	.mc = nv17_mc_new,
 	.mmu = nv41_mmu_new,
 	.pci = nv40_pci_new,
 	.therm = nv40_therm_new,
@@ -926,7 +926,7 @@
 	.gpio = nv50_gpio_new,
 	.i2c = nv50_i2c_new,
 	.imem = nv50_instmem_new,
-	.mc = nv50_mc_new,
+	.mc = g84_mc_new,
 	.mmu = nv50_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g84_pci_new,
@@ -958,7 +958,7 @@
 	.gpio = nv50_gpio_new,
 	.i2c = nv50_i2c_new,
 	.imem = nv50_instmem_new,
-	.mc = nv50_mc_new,
+	.mc = g84_mc_new,
 	.mmu = nv50_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g84_pci_new,
@@ -990,7 +990,7 @@
 	.gpio = nv50_gpio_new,
 	.i2c = nv50_i2c_new,
 	.imem = nv50_instmem_new,
-	.mc = nv50_mc_new,
+	.mc = g84_mc_new,
 	.mmu = nv50_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g84_pci_new,
@@ -1022,7 +1022,7 @@
 	.gpio = g94_gpio_new,
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
-	.mc = nv50_mc_new,
+	.mc = g84_mc_new,
 	.mmu = nv50_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
@@ -1054,7 +1054,7 @@
 	.gpio = g94_gpio_new,
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
-	.mc = nv50_mc_new,
+	.mc = g84_mc_new,
 	.mmu = nv50_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
@@ -1118,7 +1118,7 @@
 	.gpio = g94_gpio_new,
 	.i2c = nv50_i2c_new,
 	.imem = nv50_instmem_new,
-	.mc = g98_mc_new,
+	.mc = g84_mc_new,
 	.mmu = nv50_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
@@ -1150,7 +1150,7 @@
 	.gpio = g94_gpio_new,
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
-	.mc = g98_mc_new,
+	.mc = gt215_mc_new,
 	.mmu = nv50_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
@@ -1184,7 +1184,7 @@
 	.gpio = g94_gpio_new,
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
-	.mc = g98_mc_new,
+	.mc = gt215_mc_new,
 	.mmu = nv50_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
@@ -1217,7 +1217,7 @@
 	.gpio = g94_gpio_new,
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
-	.mc = g98_mc_new,
+	.mc = gt215_mc_new,
 	.mmu = nv50_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
@@ -1314,7 +1314,7 @@
 	.gpio = g94_gpio_new,
 	.i2c = g94_i2c_new,
 	.imem = nv50_instmem_new,
-	.mc = g98_mc_new,
+	.mc = gt215_mc_new,
 	.mmu = nv50_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = g94_pci_new,
@@ -1676,13 +1676,14 @@
 	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
-	.mc = gf100_mc_new,
+	.mc = gk104_mc_new,
 	.mmu = gf100_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gk104_pmu_new,
 	.therm = gf119_therm_new,
 	.timer = nv41_timer_new,
+	.top = gk104_top_new,
 	.volt = gk104_volt_new,
 	.ce[0] = gk104_ce_new,
 	.ce[1] = gk104_ce_new,
@@ -1714,13 +1715,14 @@
 	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
-	.mc = gf100_mc_new,
+	.mc = gk104_mc_new,
 	.mmu = gf100_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gk104_pmu_new,
 	.therm = gf119_therm_new,
 	.timer = nv41_timer_new,
+	.top = gk104_top_new,
 	.volt = gk104_volt_new,
 	.ce[0] = gk104_ce_new,
 	.ce[1] = gk104_ce_new,
@@ -1752,13 +1754,14 @@
 	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
-	.mc = gf100_mc_new,
+	.mc = gk104_mc_new,
 	.mmu = gf100_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gk104_pmu_new,
 	.therm = gf119_therm_new,
 	.timer = nv41_timer_new,
+	.top = gk104_top_new,
 	.volt = gk104_volt_new,
 	.ce[0] = gk104_ce_new,
 	.ce[1] = gk104_ce_new,
@@ -1789,6 +1792,7 @@
 	.mmu = gf100_mmu_new,
 	.pmu = gk20a_pmu_new,
 	.timer = gk20a_timer_new,
+	.top = gk104_top_new,
 	.volt = gk20a_volt_new,
 	.ce[2] = gk104_ce_new,
 	.dma = gf119_dma_new,
@@ -1814,13 +1818,14 @@
 	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
-	.mc = gf100_mc_new,
+	.mc = gk104_mc_new,
 	.mmu = gf100_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gk110_pmu_new,
 	.therm = gf119_therm_new,
 	.timer = nv41_timer_new,
+	.top = gk104_top_new,
 	.volt = gk104_volt_new,
 	.ce[0] = gk104_ce_new,
 	.ce[1] = gk104_ce_new,
@@ -1851,13 +1856,14 @@
 	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
-	.mc = gf100_mc_new,
+	.mc = gk104_mc_new,
 	.mmu = gf100_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gk110_pmu_new,
 	.therm = gf119_therm_new,
 	.timer = nv41_timer_new,
+	.top = gk104_top_new,
 	.volt = gk104_volt_new,
 	.ce[0] = gk104_ce_new,
 	.ce[1] = gk104_ce_new,
@@ -1895,6 +1901,7 @@
 	.pmu = gk208_pmu_new,
 	.therm = gf119_therm_new,
 	.timer = nv41_timer_new,
+	.top = gk104_top_new,
 	.volt = gk104_volt_new,
 	.ce[0] = gk104_ce_new,
 	.ce[1] = gk104_ce_new,
@@ -1932,6 +1939,7 @@
 	.pmu = gk208_pmu_new,
 	.therm = gf119_therm_new,
 	.timer = nv41_timer_new,
+	.top = gk104_top_new,
 	.volt = gk104_volt_new,
 	.ce[0] = gk104_ce_new,
 	.ce[1] = gk104_ce_new,
@@ -1969,6 +1977,41 @@
 	.pmu = gm107_pmu_new,
 	.therm = gm107_therm_new,
 	.timer = gk20a_timer_new,
+	.top = gk104_top_new,
+	.volt = gk104_volt_new,
+	.ce[0] = gm107_ce_new,
+	.ce[2] = gm107_ce_new,
+	.disp = gm107_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gm107_fifo_new,
+	.gr = gm107_gr_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv118_chipset = {
+	.name = "GM108",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gm107_devinit_new,
+	.fb = gm107_fb_new,
+	.fuse = gm107_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gf119_i2c_new,
+	.ibus = gk104_ibus_new,
+	.iccsense = gf100_iccsense_new,
+	.imem = nv50_instmem_new,
+	.ltc = gm107_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = gk104_pci_new,
+	.pmu = gm107_pmu_new,
+	.therm = gm107_therm_new,
+	.timer = gk20a_timer_new,
+	.top = gk104_top_new,
 	.volt = gk104_volt_new,
 	.ce[0] = gm107_ce_new,
 	.ce[2] = gm107_ce_new,
@@ -1986,7 +2029,7 @@
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
-	.fb = gm107_fb_new,
+	.fb = gm200_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
 	.i2c = gm200_i2c_new,
@@ -2001,6 +2044,7 @@
 	.pmu = gm107_pmu_new,
 	.secboot = gm200_secboot_new,
 	.timer = gk20a_timer_new,
+	.top = gk104_top_new,
 	.volt = gk104_volt_new,
 	.ce[0] = gm200_ce_new,
 	.ce[1] = gm200_ce_new,
@@ -2019,7 +2063,7 @@
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
-	.fb = gm107_fb_new,
+	.fb = gm200_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
 	.i2c = gm200_i2c_new,
@@ -2034,6 +2078,7 @@
 	.pmu = gm107_pmu_new,
 	.secboot = gm200_secboot_new,
 	.timer = gk20a_timer_new,
+	.top = gk104_top_new,
 	.volt = gk104_volt_new,
 	.ce[0] = gm200_ce_new,
 	.ce[1] = gm200_ce_new,
@@ -2052,7 +2097,7 @@
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
-	.fb = gm107_fb_new,
+	.fb = gm200_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
 	.i2c = gm200_i2c_new,
@@ -2067,6 +2112,7 @@
 	.pmu = gm107_pmu_new,
 	.secboot = gm200_secboot_new,
 	.timer = gk20a_timer_new,
+	.top = gk104_top_new,
 	.volt = gk104_volt_new,
 	.ce[0] = gm200_ce_new,
 	.ce[1] = gm200_ce_new,
@@ -2093,6 +2139,7 @@
 	.mmu = gf100_mmu_new,
 	.secboot = gm20b_secboot_new,
 	.timer = gk20a_timer_new,
+	.top = gk104_top_new,
 	.ce[2] = gm200_ce_new,
 	.volt = gm20b_volt_new,
 	.dma = gf119_dma_new,
@@ -2150,6 +2197,7 @@
 	_(SECBOOT , device->secboot , &device->secboot->subdev);
 	_(THERM   , device->therm   , &device->therm->subdev);
 	_(TIMER   , device->timer   , &device->timer->subdev);
+	_(TOP     , device->top     , &device->top->subdev);
 	_(VOLT    , device->volt    , &device->volt->subdev);
 #undef _
 	default:
@@ -2523,6 +2571,7 @@
 		case 0x106: device->chip = &nv106_chipset; break;
 		case 0x108: device->chip = &nv108_chipset; break;
 		case 0x117: device->chip = &nv117_chipset; break;
+		case 0x118: device->chip = &nv118_chipset; break;
 		case 0x120: device->chip = &nv120_chipset; break;
 		case 0x124: device->chip = &nv124_chipset; break;
 		case 0x126: device->chip = &nv126_chipset; break;
@@ -2604,6 +2653,7 @@
 		_(NVKM_SUBDEV_SECBOOT ,  secboot);
 		_(NVKM_SUBDEV_THERM   ,    therm);
 		_(NVKM_SUBDEV_TIMER   ,    timer);
+		_(NVKM_SUBDEV_TOP     ,      top);
 		_(NVKM_SUBDEV_VOLT    ,     volt);
 		_(NVKM_ENGINE_BSP     ,      bsp);
 		_(NVKM_ENGINE_CE0     ,    ce[0]);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index e80f6ab..1a06ac1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -22,6 +22,7 @@
 #include <subdev/pmu.h>
 #include <subdev/therm.h>
 #include <subdev/timer.h>
+#include <subdev/top.h>
 #include <subdev/volt.h>
 #include <subdev/secboot.h>
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index a74c5dd..e2a64ed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -18,6 +18,7 @@
 nvkm-y += nvkm/engine/disp/sornv50.o
 nvkm-y += nvkm/engine/disp/sorg94.o
 nvkm-y += nvkm/engine/disp/sorgf119.o
+nvkm-y += nvkm/engine/disp/sorgm107.o
 nvkm-y += nvkm/engine/disp/sorgm200.o
 nvkm-y += nvkm/engine/disp/dport.o
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 785fa76..1efe91b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -298,8 +298,7 @@
 	disp->func = func;
 	disp->head.nr = heads;
 
-	ret = nvkm_engine_ctor(&nvkm_disp, device, index, 0,
-			       true, &disp->engine);
+	ret = nvkm_engine_ctor(&nvkm_disp, device, index, true, &disp->engine);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index f031466..5dd3438 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -76,6 +76,7 @@
 	mask |= 0x0001 << or;
 	mask |= 0x0100 << head;
 
+
 	list_for_each_entry(outp, &disp->base.outp, head) {
 		if ((outp->info.hasht & 0xff) == type &&
 		    (outp->info.hashm & mask) == mask) {
@@ -155,25 +156,21 @@
 	if (!outp)
 		return NULL;
 
+	*conf = (ctrl & 0x00000f00) >> 8;
 	switch (outp->info.type) {
 	case DCB_OUTPUT_TMDS:
-		*conf = (ctrl & 0x00000f00) >> 8;
 		if (*conf == 5)
 			*conf |= 0x0100;
 		break;
 	case DCB_OUTPUT_LVDS:
-		*conf = disp->sor.lvdsconf;
+		*conf |= disp->sor.lvdsconf;
 		break;
-	case DCB_OUTPUT_DP:
-		*conf = (ctrl & 0x00000f00) >> 8;
-		break;
-	case DCB_OUTPUT_ANALOG:
 	default:
-		*conf = 0x00ff;
 		break;
 	}
 
-	data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+	data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+				 &ver, &hdr, &cnt, &len, &info2);
 	if (data && id < 0xff) {
 		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
 		if (data) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index b694414..f4b9cf8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -36,7 +36,7 @@
 	.outp.internal.crt = nv50_dac_output_new,
 	.outp.internal.tmds = nv50_sor_output_new,
 	.outp.internal.lvds = nv50_sor_output_new,
-	.outp.internal.dp = gf119_sor_dp_new,
+	.outp.internal.dp = gm107_sor_dp_new,
 	.dac.nr = 3,
 	.dac.power = nv50_dac_power,
 	.dac.sense = nv50_dac_sense,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 4226d21..fcb1b0c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -387,22 +387,17 @@
 	if (!outp)
 		return NULL;
 
+	*conf = (ctrl & 0x00000f00) >> 8;
 	if (outp->info.location == 0) {
 		switch (outp->info.type) {
 		case DCB_OUTPUT_TMDS:
-			*conf = (ctrl & 0x00000f00) >> 8;
 			if (*conf == 5)
 				*conf |= 0x0100;
 			break;
 		case DCB_OUTPUT_LVDS:
-			*conf = disp->sor.lvdsconf;
+			*conf |= disp->sor.lvdsconf;
 			break;
-		case DCB_OUTPUT_DP:
-			*conf = (ctrl & 0x00000f00) >> 8;
-			break;
-		case DCB_OUTPUT_ANALOG:
 		default:
-			*conf = 0x00ff;
 			break;
 		}
 	} else {
@@ -410,7 +405,8 @@
 		pclk = pclk / 2;
 	}
 
-	data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+	data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+				 &ver, &hdr, &cnt, &len, &info2);
 	if (data && id < 0xff) {
 		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
 		if (data) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index e9067ba..4e983f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -62,7 +62,12 @@
 int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
 		     struct nvkm_output **);
 int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
+int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
 
-int  gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
-		      struct nvkm_output **);
+int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+		     struct nvkm_output **);
+int gm107_sor_dp_pattern(struct nvkm_output_dp *, int);
+
+int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+		     struct nvkm_output **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index b4b41b1..22706c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -40,8 +40,7 @@
 gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
 {
 	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
-	const u32 loff = gf119_sor_loff(outp);
-	nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+	nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern);
 	return 0;
 }
 
@@ -64,7 +63,7 @@
 	return 0;
 }
 
-static int
+int
 gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
 		     int ln, int vs, int pe, int pc)
 {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
new file mode 100644
index 0000000..37790b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "outpdp.h"
+
+int
+gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+{
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	const u32 soff = outp->base.or * 0x800;
+	const u32 data = 0x01010101 * pattern;
+	if (outp->base.info.sorconf.link & 1)
+		nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
+	else
+		nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
+	return 0;
+}
+
+static const struct nvkm_output_dp_func
+gm107_sor_dp_func = {
+	.pattern = gm107_sor_dp_pattern,
+	.lnk_pwr = g94_sor_dp_lnk_pwr,
+	.lnk_ctl = gf119_sor_dp_lnk_ctl,
+	.drv_ctl = gf119_sor_dp_drv_ctl,
+};
+
+int
+gm107_sor_dp_new(struct nvkm_disp *disp, int index,
+		 struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+	return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index 2cfbef9..c44fa7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -57,19 +57,6 @@
 }
 
 static int
-gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
-{
-	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
-	const u32 soff = gm200_sor_soff(outp);
-	const u32 data = 0x01010101 * pattern;
-	if (outp->base.info.sorconf.link & 1)
-		nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
-	else
-		nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
-	return 0;
-}
-
-static int
 gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
 {
 	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
@@ -129,7 +116,7 @@
 
 static const struct nvkm_output_dp_func
 gm200_sor_dp_func = {
-	.pattern = gm200_sor_dp_pattern,
+	.pattern = gm107_sor_dp_pattern,
 	.lnk_pwr = gm200_sor_dp_lnk_pwr,
 	.lnk_ctl = gf119_sor_dp_lnk_ctl,
 	.drv_ctl = gm200_sor_dp_drv_ctl,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
index 9769fc0..f11ebdd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
@@ -152,6 +152,5 @@
 		return -ENOMEM;
 	dma->func = func;
 
-	return nvkm_engine_ctor(&nvkm_dma, device, index,
-				0, true, &dma->engine);
+	return nvkm_engine_ctor(&nvkm_dma, device, index, true, &dma->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 7400060..2e7b4e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -348,6 +348,6 @@
 	falcon->data.size = func->data.size;
 	*pengine = &falcon->engine;
 
-	return nvkm_engine_ctor(&nvkm_falcon, device, index, func->pmc_enable,
+	return nvkm_engine_ctor(&nvkm_falcon, device, index,
 				enable, &falcon->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index cfc7d57..1c9682a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -178,6 +178,17 @@
 	const struct nvkm_fifo_chan_oclass *sclass;
 	int c = 0;
 
+	if (fifo->func->class_get) {
+		int ret = fifo->func->class_get(fifo, index, &sclass);
+		if (ret == 0) {
+			oclass->base = sclass->base;
+			oclass->engn = sclass;
+			*class = &nvkm_fifo_class;
+			return 0;
+		}
+		return ret;
+	}
+
 	while ((sclass = fifo->func->chan[c])) {
 		if (c++ == index) {
 			oclass->base = sclass->base;
@@ -261,8 +272,7 @@
 		fifo->nr = nr;
 	bitmap_clear(fifo->mask, 0, fifo->nr);
 
-	ret = nvkm_engine_ctor(&nvkm_fifo, device, index, 0x00000100,
-			       true, &fifo->engine);
+	ret = nvkm_engine_ctor(&nvkm_fifo, device, index, true, &fifo->engine);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 68acb36..743f3a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -25,21 +25,36 @@
 #include "changk104.h"
 
 #include <core/client.h>
-#include <core/enum.h>
 #include <core/gpuobj.h>
 #include <subdev/bar.h>
+#include <subdev/top.h>
 #include <engine/sw.h>
 
 #include <nvif/class.h>
 
-void
+static int
+gk104_fifo_class_get(struct nvkm_fifo *base, int index,
+		     const struct nvkm_fifo_chan_oclass **psclass)
+{
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	int c = 0;
+
+	while ((*psclass = fifo->func->chan[c])) {
+		if (c++ == index)
+			return 0;
+	}
+
+	return c;
+}
+
+static void
 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
 {
 	struct nvkm_device *device = fifo->engine.subdev.device;
 	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
 }
 
-void
+static void
 gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
 {
 	struct nvkm_device *device = fifo->engine.subdev.device;
@@ -267,111 +282,6 @@
 	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
 }
 
-static const struct nvkm_enum
-gk104_fifo_fault_engine[] = {
-	{ 0x00, "GR", NULL, NVKM_ENGINE_GR },
-	{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
-	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
-	{ 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
-	{ 0x07, "PBDMA0", NULL, NVKM_ENGINE_FIFO },
-	{ 0x08, "PBDMA1", NULL, NVKM_ENGINE_FIFO },
-	{ 0x09, "PBDMA2", NULL, NVKM_ENGINE_FIFO },
-	{ 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
-	{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
-	{ 0x13, "PERF" },
-	{ 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
-	{ 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
-	{ 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
-	{ 0x17, "PMU" },
-	{ 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
-	{ 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
-	{}
-};
-
-static const struct nvkm_enum
-gk104_fifo_fault_reason[] = {
-	{ 0x00, "PDE" },
-	{ 0x01, "PDE_SIZE" },
-	{ 0x02, "PTE" },
-	{ 0x03, "VA_LIMIT_VIOLATION" },
-	{ 0x04, "UNBOUND_INST_BLOCK" },
-	{ 0x05, "PRIV_VIOLATION" },
-	{ 0x06, "RO_VIOLATION" },
-	{ 0x07, "WO_VIOLATION" },
-	{ 0x08, "PITCH_MASK_VIOLATION" },
-	{ 0x09, "WORK_CREATION" },
-	{ 0x0a, "UNSUPPORTED_APERTURE" },
-	{ 0x0b, "COMPRESSION_FAILURE" },
-	{ 0x0c, "UNSUPPORTED_KIND" },
-	{ 0x0d, "REGION_VIOLATION" },
-	{ 0x0e, "BOTH_PTES_VALID" },
-	{ 0x0f, "INFO_TYPE_POISONED" },
-	{}
-};
-
-static const struct nvkm_enum
-gk104_fifo_fault_hubclient[] = {
-	{ 0x00, "VIP" },
-	{ 0x01, "CE0" },
-	{ 0x02, "CE1" },
-	{ 0x03, "DNISO" },
-	{ 0x04, "FE" },
-	{ 0x05, "FECS" },
-	{ 0x06, "HOST" },
-	{ 0x07, "HOST_CPU" },
-	{ 0x08, "HOST_CPU_NB" },
-	{ 0x09, "ISO" },
-	{ 0x0a, "MMU" },
-	{ 0x0b, "MSPDEC" },
-	{ 0x0c, "MSPPP" },
-	{ 0x0d, "MSVLD" },
-	{ 0x0e, "NISO" },
-	{ 0x0f, "P2P" },
-	{ 0x10, "PD" },
-	{ 0x11, "PERF" },
-	{ 0x12, "PMU" },
-	{ 0x13, "RASTERTWOD" },
-	{ 0x14, "SCC" },
-	{ 0x15, "SCC_NB" },
-	{ 0x16, "SEC" },
-	{ 0x17, "SSYNC" },
-	{ 0x18, "GR_CE" },
-	{ 0x19, "CE2" },
-	{ 0x1a, "XV" },
-	{ 0x1b, "MMU_NB" },
-	{ 0x1c, "MSENC" },
-	{ 0x1d, "DFALCON" },
-	{ 0x1e, "SKED" },
-	{ 0x1f, "AFALCON" },
-	{}
-};
-
-static const struct nvkm_enum
-gk104_fifo_fault_gpcclient[] = {
-	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
-	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
-	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
-	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
-	{ 0x0c, "RAST" },
-	{ 0x0d, "GCC" },
-	{ 0x0e, "GPCCS" },
-	{ 0x0f, "PROP_0" },
-	{ 0x10, "PROP_1" },
-	{ 0x11, "PROP_2" },
-	{ 0x12, "PROP_3" },
-	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
-	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
-	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
-	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
-	{ 0x1f, "GPM" },
-	{ 0x20, "LTP_UTLB_0" },
-	{ 0x21, "LTP_UTLB_1" },
-	{ 0x22, "LTP_UTLB_2" },
-	{ 0x23, "LTP_UTLB_3" },
-	{ 0x24, "GPC_RGG_UTLB" },
-	{}
-};
-
 static void
 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
 {
@@ -390,14 +300,14 @@
 	struct nvkm_engine *engine = NULL;
 	struct nvkm_fifo_chan *chan;
 	unsigned long flags;
-	char gpcid[8] = "";
+	char gpcid[8] = "", en[16] = "";
 
-	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
-	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
+	er = nvkm_enum_find(fifo->func->fault.reason, reason);
+	eu = nvkm_enum_find(fifo->func->fault.engine, unit);
 	if (hub) {
-		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
+		ec = nvkm_enum_find(fifo->func->fault.hubclient, client);
 	} else {
-		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
+		ec = nvkm_enum_find(fifo->func->fault.gpcclient, client);
 		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
 	}
 
@@ -418,13 +328,27 @@
 		}
 	}
 
+	if (eu == NULL) {
+		enum nvkm_devidx engidx = nvkm_top_fault(device->top, unit);
+		if (engidx < NVKM_SUBDEV_NR) {
+			const char *src = nvkm_subdev_name[engidx];
+			char *dst = en;
+			do {
+				*dst++ = toupper(*src++);
+			} while(*src);
+			engine = nvkm_device_engine(device, engidx);
+		}
+	} else {
+		snprintf(en, sizeof(en), "%s", eu->name);
+	}
+
 	chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
 
 	nvkm_error(subdev,
 		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
 		   "reason %02x [%s] on channel %d [%010llx %s]\n",
 		   write ? "write" : "read", (u64)vahi << 32 | valo,
-		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
+		   unit, en, client, gpcid, ec ? ec->name : "",
 		   reason, er ? er->name : "", chan ? chan->chid : -1,
 		   (u64)inst << 12,
 		   chan ? chan->object.client->name : "unknown");
@@ -557,7 +481,7 @@
 	nvkm_fifo_uevent(&fifo->base);
 }
 
-void
+static void
 gk104_fifo_intr(struct nvkm_fifo *base)
 {
 	struct gk104_fifo *fifo = gk104_fifo(base);
@@ -649,7 +573,7 @@
 	}
 }
 
-void
+static void
 gk104_fifo_fini(struct nvkm_fifo *base)
 {
 	struct gk104_fifo *fifo = gk104_fifo(base);
@@ -659,13 +583,15 @@
 	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
 }
 
-int
+static int
 gk104_fifo_oneinit(struct nvkm_fifo *base)
 {
 	struct gk104_fifo *fifo = gk104_fifo(base);
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int ret, i;
+	struct nvkm_top *top = device->top;
+	int engn, runl, pbid, ret, i, j;
+	enum nvkm_devidx engidx;
 	u32 *map;
 
 	/* Determine number of PBDMAs by checking valid enable bits. */
@@ -680,86 +606,26 @@
 	for (i = 0; i < fifo->pbdma_nr; i++)
 		map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04));
 
-	/* Read device topology from HW. */
-	for (i = 0; i < 64; i++) {
-		int type = -1, pbid = -1, engidx = -1;
-		int engn = -1, runl = -1, intr = -1, mcen = -1;
-		int fault = -1, j;
-		u32 data, addr = 0;
-
-		do {
-			data = nvkm_rd32(device, 0x022700 + (i * 0x04));
-			nvkm_trace(subdev, "%02x: %08x\n", i, data);
-			switch (data & 0x00000003) {
-			case 0x00000000: /* NOT_VALID */
-				continue;
-			case 0x00000001: /* DATA */
-				addr  = (data & 0x00fff000);
-				fault = (data & 0x000000f8) >> 3;
-				break;
-			case 0x00000002: /* ENUM */
-				if (data & 0x00000020)
-					engn = (data & 0x3c000000) >> 26;
-				if (data & 0x00000010)
-					runl = (data & 0x01e00000) >> 21;
-				if (data & 0x00000008)
-					intr = (data & 0x000f8000) >> 15;
-				if (data & 0x00000004)
-					mcen = (data & 0x00003e00) >> 9;
-				break;
-			case 0x00000003: /* ENGINE_TYPE */
-				type = (data & 0x7ffffffc) >> 2;
-				break;
-			}
-		} while ((data & 0x80000000) && ++i < 64);
-
-		if (!data)
-			continue;
-
+	/* Determine runlist configuration from topology device info. */
+	i = 0;
+	while ((int)(engidx = nvkm_top_engine(top, i++, &runl, &engn)) >= 0) {
 		/* Determine which PBDMA handles requests for this engine. */
-		for (j = 0; runl >= 0 && j < fifo->pbdma_nr; j++) {
+		for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
 			if (map[j] & (1 << runl)) {
 				pbid = j;
 				break;
 			}
 		}
 
-		/* Translate engine type to NVKM engine identifier. */
-		switch (type) {
-		case 0x00000000: engidx = NVKM_ENGINE_GR; break;
-		case 0x00000001: engidx = NVKM_ENGINE_CE0; break;
-		case 0x00000002: engidx = NVKM_ENGINE_CE1; break;
-		case 0x00000003: engidx = NVKM_ENGINE_CE2; break;
-		case 0x00000008: engidx = NVKM_ENGINE_MSPDEC; break;
-		case 0x00000009: engidx = NVKM_ENGINE_MSPPP; break;
-		case 0x0000000a: engidx = NVKM_ENGINE_MSVLD; break;
-		case 0x0000000b: engidx = NVKM_ENGINE_MSENC; break;
-		case 0x0000000c: engidx = NVKM_ENGINE_VIC; break;
-		case 0x0000000d: engidx = NVKM_ENGINE_SEC; break;
-		case 0x0000000e: engidx = NVKM_ENGINE_NVENC0; break;
-		case 0x0000000f: engidx = NVKM_ENGINE_NVENC1; break;
-		case 0x00000010: engidx = NVKM_ENGINE_NVDEC; break;
-			break;
-		default:
-			break;
-		}
+		nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d\n",
+			   engn, runl, pbid);
 
-		nvkm_debug(subdev, "%02x (%8s): engine %2d runlist %2d "
-				   "pbdma %2d intr %2d reset %2d "
-				   "fault %2d addr %06x\n", type,
-			   engidx < 0 ? NULL : nvkm_subdev_name[engidx],
-			   engn, runl, pbid, intr, mcen, fault, addr);
-
-		/* Mark the engine as supported if everything checks out. */
-		if (engn >= 0 && runl >= 0) {
-			fifo->engine[engn].engine = engidx < 0 ? NULL :
-				nvkm_device_engine(device, engidx);
-			fifo->engine[engn].runl = runl;
-			fifo->engine[engn].pbid = pbid;
-			fifo->engine_nr = max(fifo->engine_nr, engn + 1);
-			fifo->runlist[runl].engm |= 1 << engn;
-			fifo->runlist_nr = max(fifo->runlist_nr, runl + 1);
-		}
+		fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
+		fifo->engine[engn].runl = runl;
+		fifo->engine[engn].pbid = pbid;
+		fifo->engine_nr = max(fifo->engine_nr, engn + 1);
+		fifo->runlist[runl].engm |= 1 << engn;
+		fifo->runlist_nr = max(fifo->runlist_nr, runl + 1);
 	}
 
 	kfree(map);
@@ -796,7 +662,7 @@
 	return 0;
 }
 
-void
+static void
 gk104_fifo_init(struct nvkm_fifo *base)
 {
 	struct gk104_fifo *fifo = gk104_fifo(base);
@@ -825,7 +691,7 @@
 	nvkm_wr32(device, 0x002140, 0x7fffffff);
 }
 
-void *
+static void *
 gk104_fifo_dtor(struct nvkm_fifo *base)
 {
 	struct gk104_fifo *fifo = gk104_fifo(base);
@@ -842,22 +708,8 @@
 	return fifo;
 }
 
-int
-gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
-		int index, int nr, struct nvkm_fifo **pfifo)
-{
-	struct gk104_fifo *fifo;
-
-	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
-		return -ENOMEM;
-	INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
-	*pfifo = &fifo->base;
-
-	return nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
-}
-
 static const struct nvkm_fifo_func
-gk104_fifo = {
+gk104_fifo_ = {
 	.dtor = gk104_fifo_dtor,
 	.oneinit = gk104_fifo_oneinit,
 	.init = gk104_fifo_init,
@@ -865,6 +717,145 @@
 	.intr = gk104_fifo_intr,
 	.uevent_init = gk104_fifo_uevent_init,
 	.uevent_fini = gk104_fifo_uevent_fini,
+	.class_get = gk104_fifo_class_get,
+};
+
+int
+gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
+		int index, int nr, struct nvkm_fifo **pfifo)
+{
+	struct gk104_fifo *fifo;
+
+	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+		return -ENOMEM;
+	fifo->func = func;
+	INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
+	*pfifo = &fifo->base;
+
+	return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base);
+}
+
+const struct nvkm_enum
+gk104_fifo_fault_engine[] = {
+	{ 0x00, "GR", NULL, NVKM_ENGINE_GR },
+	{ 0x01, "DISPLAY" },
+	{ 0x02, "CAPTURE" },
+	{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+	{ 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+	{ 0x06, "SCHED" },
+	{ 0x07, "HOST0" },
+	{ 0x08, "HOST1" },
+	{ 0x09, "HOST2" },
+	{ 0x0a, "HOST3" },
+	{ 0x0b, "HOST4" },
+	{ 0x0c, "HOST5" },
+	{ 0x0d, "HOST6" },
+	{ 0x0e, "HOST7" },
+	{ 0x0f, "HOSTSR" },
+	{ 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
+	{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
+	{ 0x13, "PERF" },
+	{ 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
+	{ 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
+	{ 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
+	{ 0x17, "PMU" },
+	{ 0x18, "PTP" },
+	{ 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
+	{ 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
+	{}
+};
+
+const struct nvkm_enum
+gk104_fifo_fault_reason[] = {
+	{ 0x00, "PDE" },
+	{ 0x01, "PDE_SIZE" },
+	{ 0x02, "PTE" },
+	{ 0x03, "VA_LIMIT_VIOLATION" },
+	{ 0x04, "UNBOUND_INST_BLOCK" },
+	{ 0x05, "PRIV_VIOLATION" },
+	{ 0x06, "RO_VIOLATION" },
+	{ 0x07, "WO_VIOLATION" },
+	{ 0x08, "PITCH_MASK_VIOLATION" },
+	{ 0x09, "WORK_CREATION" },
+	{ 0x0a, "UNSUPPORTED_APERTURE" },
+	{ 0x0b, "COMPRESSION_FAILURE" },
+	{ 0x0c, "UNSUPPORTED_KIND" },
+	{ 0x0d, "REGION_VIOLATION" },
+	{ 0x0e, "BOTH_PTES_VALID" },
+	{ 0x0f, "INFO_TYPE_POISONED" },
+	{}
+};
+
+const struct nvkm_enum
+gk104_fifo_fault_hubclient[] = {
+	{ 0x00, "VIP" },
+	{ 0x01, "CE0" },
+	{ 0x02, "CE1" },
+	{ 0x03, "DNISO" },
+	{ 0x04, "FE" },
+	{ 0x05, "FECS" },
+	{ 0x06, "HOST" },
+	{ 0x07, "HOST_CPU" },
+	{ 0x08, "HOST_CPU_NB" },
+	{ 0x09, "ISO" },
+	{ 0x0a, "MMU" },
+	{ 0x0b, "MSPDEC" },
+	{ 0x0c, "MSPPP" },
+	{ 0x0d, "MSVLD" },
+	{ 0x0e, "NISO" },
+	{ 0x0f, "P2P" },
+	{ 0x10, "PD" },
+	{ 0x11, "PERF" },
+	{ 0x12, "PMU" },
+	{ 0x13, "RASTERTWOD" },
+	{ 0x14, "SCC" },
+	{ 0x15, "SCC_NB" },
+	{ 0x16, "SEC" },
+	{ 0x17, "SSYNC" },
+	{ 0x18, "GR_CE" },
+	{ 0x19, "CE2" },
+	{ 0x1a, "XV" },
+	{ 0x1b, "MMU_NB" },
+	{ 0x1c, "MSENC" },
+	{ 0x1d, "DFALCON" },
+	{ 0x1e, "SKED" },
+	{ 0x1f, "AFALCON" },
+	{}
+};
+
+const struct nvkm_enum
+gk104_fifo_fault_gpcclient[] = {
+	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
+	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
+	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
+	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
+	{ 0x0c, "RAST" },
+	{ 0x0d, "GCC" },
+	{ 0x0e, "GPCCS" },
+	{ 0x0f, "PROP_0" },
+	{ 0x10, "PROP_1" },
+	{ 0x11, "PROP_2" },
+	{ 0x12, "PROP_3" },
+	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
+	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
+	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
+	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
+	{ 0x1f, "GPM" },
+	{ 0x20, "LTP_UTLB_0" },
+	{ 0x21, "LTP_UTLB_1" },
+	{ 0x22, "LTP_UTLB_2" },
+	{ 0x23, "LTP_UTLB_3" },
+	{ 0x24, "GPC_RGG_UTLB" },
+	{}
+};
+
+static const struct gk104_fifo_func
+gk104_fifo = {
+	.fault.engine = gk104_fifo_fault_engine,
+	.fault.reason = gk104_fifo_fault_reason,
+	.fault.hubclient = gk104_fifo_fault_hubclient,
+	.fault.gpcclient = gk104_fifo_fault_gpcclient,
 	.chan = {
 		&gk104_fifo_gpfifo_oclass,
 		NULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 9e5d00b..679f3ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -3,10 +3,12 @@
 #define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
 #include "priv.h"
 
+#include <core/enum.h>
 #include <subdev/mmu.h>
 
 struct gk104_fifo_chan;
 struct gk104_fifo {
+	const struct gk104_fifo_func *func;
 	struct nvkm_fifo base;
 
 	struct {
@@ -39,15 +41,19 @@
 	} user;
 };
 
-int gk104_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
+struct gk104_fifo_func {
+	struct {
+		const struct nvkm_enum *engine;
+		const struct nvkm_enum *reason;
+		const struct nvkm_enum *hubclient;
+		const struct nvkm_enum *gpcclient;
+	} fault;
+
+	const struct nvkm_fifo_chan_oclass *chan[];
+};
+
+int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *,
 		    int index, int nr, struct nvkm_fifo **);
-void *gk104_fifo_dtor(struct nvkm_fifo *);
-int gk104_fifo_oneinit(struct nvkm_fifo *);
-void gk104_fifo_init(struct nvkm_fifo *);
-void gk104_fifo_fini(struct nvkm_fifo *);
-void gk104_fifo_intr(struct nvkm_fifo *);
-void gk104_fifo_uevent_init(struct nvkm_fifo *);
-void gk104_fifo_uevent_fini(struct nvkm_fifo *);
 void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
 void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
 void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl);
@@ -70,4 +76,11 @@
 		return 0;
 	}
 }
+
+extern const struct nvkm_enum gk104_fifo_fault_engine[];
+extern const struct nvkm_enum gk104_fifo_fault_reason[];
+extern const struct nvkm_enum gk104_fifo_fault_hubclient[];
+extern const struct nvkm_enum gk104_fifo_fault_gpcclient[];
+
+extern const struct nvkm_enum gm107_fifo_fault_engine[];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index 41307fc..b2f8ab7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -24,15 +24,12 @@
 #include "gk104.h"
 #include "changk104.h"
 
-static const struct nvkm_fifo_func
+static const struct gk104_fifo_func
 gk110_fifo = {
-	.dtor = gk104_fifo_dtor,
-	.oneinit = gk104_fifo_oneinit,
-	.init = gk104_fifo_init,
-	.fini = gk104_fifo_fini,
-	.intr = gk104_fifo_intr,
-	.uevent_init = gk104_fifo_uevent_init,
-	.uevent_fini = gk104_fifo_uevent_fini,
+	.fault.engine = gk104_fifo_fault_engine,
+	.fault.reason = gk104_fifo_fault_reason,
+	.fault.hubclient = gk104_fifo_fault_hubclient,
+	.fault.gpcclient = gk104_fifo_fault_gpcclient,
 	.chan = {
 		&gk110_fifo_gpfifo_oclass,
 		NULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index ce01c1a7..160617d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -24,15 +24,12 @@
 #include "gk104.h"
 #include "changk104.h"
 
-static const struct nvkm_fifo_func
+static const struct gk104_fifo_func
 gk208_fifo = {
-	.dtor = gk104_fifo_dtor,
-	.oneinit = gk104_fifo_oneinit,
-	.init = gk104_fifo_init,
-	.fini = gk104_fifo_fini,
-	.intr = gk104_fifo_intr,
-	.uevent_init = gk104_fifo_uevent_init,
-	.uevent_fini = gk104_fifo_uevent_fini,
+	.fault.engine = gk104_fifo_fault_engine,
+	.fault.reason = gk104_fifo_fault_reason,
+	.fault.hubclient = gk104_fifo_fault_hubclient,
+	.fault.gpcclient = gk104_fifo_fault_gpcclient,
 	.chan = {
 		&gk104_fifo_gpfifo_oclass,
 		NULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index b47fe98..be9f5c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -22,15 +22,12 @@
 #include "gk104.h"
 #include "changk104.h"
 
-static const struct nvkm_fifo_func
+static const struct gk104_fifo_func
 gk20a_fifo = {
-	.dtor = gk104_fifo_dtor,
-	.oneinit = gk104_fifo_oneinit,
-	.init = gk104_fifo_init,
-	.fini = gk104_fifo_fini,
-	.intr = gk104_fifo_intr,
-	.uevent_init = gk104_fifo_uevent_init,
-	.uevent_fini = gk104_fifo_uevent_fini,
+	.fault.engine = gk104_fifo_fault_engine,
+	.fault.reason = gk104_fifo_fault_reason,
+	.fault.hubclient = gk104_fifo_fault_hubclient,
+	.fault.gpcclient = gk104_fifo_fault_gpcclient,
 	.chan = {
 		&gk104_fifo_gpfifo_oclass,
 		NULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index 6d59d65..bd1ff87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -24,15 +24,35 @@
 #include "gk104.h"
 #include "changk104.h"
 
-static const struct nvkm_fifo_func
+const struct nvkm_enum
+gm107_fifo_fault_engine[] = {
+	{ 0x01, "DISPLAY" },
+	{ 0x02, "CAPTURE" },
+	{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+	{ 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+	{ 0x06, "SCHED" },
+	{ 0x07, "HOST0" },
+	{ 0x08, "HOST1" },
+	{ 0x09, "HOST2" },
+	{ 0x0a, "HOST3" },
+	{ 0x0b, "HOST4" },
+	{ 0x0c, "HOST5" },
+	{ 0x0d, "HOST6" },
+	{ 0x0e, "HOST7" },
+	{ 0x0f, "HOSTSR" },
+	{ 0x13, "PERF" },
+	{ 0x17, "PMU" },
+	{ 0x18, "PTP" },
+	{}
+};
+
+static const struct gk104_fifo_func
 gm107_fifo = {
-	.dtor = gk104_fifo_dtor,
-	.oneinit = gk104_fifo_oneinit,
-	.init = gk104_fifo_init,
-	.fini = gk104_fifo_fini,
-	.intr = gk104_fifo_intr,
-	.uevent_init = gk104_fifo_uevent_init,
-	.uevent_fini = gk104_fifo_uevent_fini,
+	.fault.engine = gm107_fifo_fault_engine,
+	.fault.reason = gk104_fifo_fault_reason,
+	.fault.hubclient = gk104_fifo_fault_hubclient,
+	.fault.gpcclient = gk104_fifo_fault_gpcclient,
 	.chan = {
 		&gk110_fifo_gpfifo_oclass,
 		NULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index 4bdd430..b069f78 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -24,15 +24,12 @@
 #include "gk104.h"
 #include "changk104.h"
 
-static const struct nvkm_fifo_func
+static const struct gk104_fifo_func
 gm200_fifo = {
-	.dtor = gk104_fifo_dtor,
-	.oneinit = gk104_fifo_oneinit,
-	.init = gk104_fifo_init,
-	.fini = gk104_fifo_fini,
-	.intr = gk104_fifo_intr,
-	.uevent_init = gk104_fifo_uevent_init,
-	.uevent_fini = gk104_fifo_uevent_fini,
+	.fault.engine = gm107_fifo_fault_engine,
+	.fault.reason = gk104_fifo_fault_reason,
+	.fault.hubclient = gk104_fifo_fault_hubclient,
+	.fault.gpcclient = gk104_fifo_fault_gpcclient,
 	.chan = {
 		&gm200_fifo_gpfifo_oclass,
 		NULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index 4c91d4a..2ed87c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -22,15 +22,12 @@
 #include "gk104.h"
 #include "changk104.h"
 
-static const struct nvkm_fifo_func
+static const struct gk104_fifo_func
 gm20b_fifo = {
-	.dtor = gk104_fifo_dtor,
-	.oneinit = gk104_fifo_oneinit,
-	.init = gk104_fifo_init,
-	.fini = gk104_fifo_fini,
-	.intr = gk104_fifo_intr,
-	.uevent_init = gk104_fifo_uevent_init,
-	.uevent_fini = gk104_fifo_uevent_fini,
+	.fault.engine = gm107_fifo_fault_engine,
+	.fault.reason = gk104_fifo_fault_reason,
+	.fault.hubclient = gk104_fifo_fault_hubclient,
+	.fault.gpcclient = gk104_fifo_fault_gpcclient,
 	.chan = {
 		&gm200_fifo_gpfifo_oclass,
 		NULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index cb1432e..f6dfb37 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -7,6 +7,7 @@
 		   int index, int nr, struct nvkm_fifo *);
 void nvkm_fifo_uevent(struct nvkm_fifo *);
 
+struct nvkm_fifo_chan_oclass;
 struct nvkm_fifo_func {
 	void *(*dtor)(struct nvkm_fifo *);
 	int (*oneinit)(struct nvkm_fifo *);
@@ -17,6 +18,8 @@
 	void (*start)(struct nvkm_fifo *, unsigned long *);
 	void (*uevent_init)(struct nvkm_fifo *);
 	void (*uevent_fini)(struct nvkm_fifo *);
+	int (*class_get)(struct nvkm_fifo *, int index,
+			 const struct nvkm_fifo_chan_oclass **);
 	const struct nvkm_fifo_chan_oclass *chan[];
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
index 090765f..467065d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -128,9 +128,8 @@
 
 int
 nvkm_gr_ctor(const struct nvkm_gr_func *func, struct nvkm_device *device,
-	     int index, u32 pmc_enable, bool enable, struct nvkm_gr *gr)
+	     int index, bool enable, struct nvkm_gr *gr)
 {
 	gr->func = func;
-	return nvkm_engine_ctor(&nvkm_gr, device, index, pmc_enable,
-				enable, &gr->engine);
+	return nvkm_engine_ctor(&nvkm_gr, device, index, enable, &gr->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 56f392d..b02d8f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1181,20 +1181,20 @@
 
 	/* GPC_BROADCAST */
 	nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
-				 gr->magic_not_rop_nr);
+				     gr->screen_tile_row_offset);
 	for (i = 0; i < 6; i++)
 		nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
 
 	/* GPC_BROADCAST.TP_BROADCAST */
 	nvkm_wr32(device, 0x419bd0, (gr->tpc_total << 8) |
-				 gr->magic_not_rop_nr | data2[0]);
+				     gr->screen_tile_row_offset | data2[0]);
 	nvkm_wr32(device, 0x419be4, data2[1]);
 	for (i = 0; i < 6; i++)
 		nvkm_wr32(device, 0x419b00 + (i * 4), data[i]);
 
 	/* UNK78xx */
 	nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
-				 gr->magic_not_rop_nr);
+				     gr->screen_tile_row_offset);
 	for (i = 0; i < 6; i++)
 		nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
 }
@@ -1238,6 +1238,7 @@
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	u32 idle_timeout;
 
 	nvkm_mc_unk260(device->mc, 0);
 
@@ -1247,7 +1248,7 @@
 	gf100_gr_mmio(gr, grctx->tpc);
 	gf100_gr_mmio(gr, grctx->ppc);
 
-	nvkm_wr32(device, 0x404154, 0x00000000);
+	idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
 
 	grctx->bundle(info);
 	grctx->pagepool(info);
@@ -1261,7 +1262,7 @@
 	gf100_grctx_generate_r406800(gr);
 
 	gf100_gr_icmd(gr, grctx->icmd);
-	nvkm_wr32(device, 0x404154, 0x00000400);
+	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_mthd(gr, grctx->mthd);
 	nvkm_mc_unk260(device->mc, 1);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 3c86739..ac895ed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -81,8 +81,6 @@
 void gk104_grctx_generate_pagepool(struct gf100_grctx *);
 void gk104_grctx_generate_unkn(struct gf100_gr *);
 void gk104_grctx_generate_r418bb8(struct gf100_gr *);
-void gk104_grctx_generate_rop_active_fbps(struct gf100_gr *);
-
 
 void gm107_grctx_generate_bundle(struct gf100_grctx *);
 void gm107_grctx_generate_pagepool(struct gf100_grctx *);
@@ -98,7 +96,6 @@
 void gm107_grctx_generate_attrib(struct gf100_grctx *);
 
 extern const struct gf100_grctx_func gm200_grctx;
-void gm200_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
 void gm200_grctx_generate_tpcid(struct gf100_gr *);
 void gm200_grctx_generate_405b60(struct gf100_gr *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 74de7a9..f521de1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -223,6 +223,7 @@
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	u32 idle_timeout;
 	int i;
 
 	nvkm_mc_unk260(device->mc, 0);
@@ -233,7 +234,7 @@
 	gf100_gr_mmio(gr, grctx->tpc);
 	gf100_gr_mmio(gr, grctx->ppc);
 
-	nvkm_wr32(device, 0x404154, 0x00000000);
+	idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
 
 	grctx->bundle(info);
 	grctx->pagepool(info);
@@ -250,7 +251,7 @@
 		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
 
 	gf100_gr_icmd(gr, grctx->icmd);
-	nvkm_wr32(device, 0x404154, 0x00000400);
+	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_mthd(gr, grctx->mthd);
 	nvkm_mc_unk260(device->mc, 1);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index a843e36..9ba3377 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -924,38 +924,30 @@
 
 	/* GPC_BROADCAST */
 	nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
-				 gr->magic_not_rop_nr);
+				     gr->screen_tile_row_offset);
 	for (i = 0; i < 6; i++)
 		nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
 
 	/* GPC_BROADCAST.TP_BROADCAST */
 	nvkm_wr32(device, 0x41bfd0, (gr->tpc_total << 8) |
-				 gr->magic_not_rop_nr | data2[0]);
+				     gr->screen_tile_row_offset | data2[0]);
 	nvkm_wr32(device, 0x41bfe4, data2[1]);
 	for (i = 0; i < 6; i++)
 		nvkm_wr32(device, 0x41bf00 + (i * 4), data[i]);
 
 	/* UNK78xx */
 	nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
-				 gr->magic_not_rop_nr);
+				     gr->screen_tile_row_offset);
 	for (i = 0; i < 6; i++)
 		nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
 }
 
 void
-gk104_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const u32 fbp_count = nvkm_rd32(device, 0x120074);
-	nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
-	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
-}
-
-void
 gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	u32 idle_timeout;
 	int i;
 
 	nvkm_mc_unk260(device->mc, 0);
@@ -966,7 +958,7 @@
 	gf100_gr_mmio(gr, grctx->tpc);
 	gf100_gr_mmio(gr, grctx->ppc);
 
-	nvkm_wr32(device, 0x404154, 0x00000000);
+	idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
 
 	grctx->bundle(info);
 	grctx->pagepool(info);
@@ -982,11 +974,10 @@
 		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
 
 	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
-	gk104_grctx_generate_rop_active_fbps(gr);
 	nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
 
 	gf100_gr_icmd(gr, grctx->icmd);
-	nvkm_wr32(device, 0x404154, 0x00000400);
+	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_mthd(gr, grctx->mthd);
 	nvkm_mc_unk260(device->mc, 1);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index ad0a6cf..da7c35a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -29,15 +29,14 @@
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
-	int idle_timeout_save;
+	u32 idle_timeout;
 	int i;
 
 	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
 
 	gf100_gr_wait_idle(gr);
 
-	idle_timeout_save = nvkm_rd32(device, 0x404154);
-	nvkm_wr32(device, 0x404154, 0x00000000);
+	idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
 
 	grctx->attrib(info);
 
@@ -53,13 +52,11 @@
 
 	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 
-	gk104_grctx_generate_rop_active_fbps(gr);
-
 	nvkm_mask(device, 0x5044b0, 0x08000000, 0x08000000);
 
 	gf100_gr_wait_idle(gr);
 
-	nvkm_wr32(device, 0x404154, idle_timeout_save);
+	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_wait_idle(gr);
 
 	gf100_gr_mthd(gr, gr->fuc_method);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 95f59e3..6d3c501 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -920,13 +920,15 @@
 			const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
 			const u32 u = 0x418ea0 + (n * 0x04);
 			const u32 o = PPC_UNIT(gpc, ppc, 0);
+			if (!(gr->ppc_mask[gpc] & (1 << ppc)))
+				continue;
 			mmio_wr32(info, o + 0xc0, bs);
 			mmio_wr32(info, o + 0xf4, bo);
 			bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
 			mmio_wr32(info, o + 0xe4, as);
 			mmio_wr32(info, o + 0xf8, ao);
 			ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
-			mmio_wr32(info, u, ((bs / 3 /*XXX*/) << 16) | bs);
+			mmio_wr32(info, u, ((bs / 3) << 16) | bs);
 		}
 	}
 }
@@ -957,6 +959,7 @@
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	u32 idle_timeout;
 	int i;
 
 	gf100_gr_mmio(gr, grctx->hub);
@@ -965,7 +968,7 @@
 	gf100_gr_mmio(gr, grctx->tpc);
 	gf100_gr_mmio(gr, grctx->ppc);
 
-	nvkm_wr32(device, 0x404154, 0x00000000);
+	idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
 
 	grctx->bundle(info);
 	grctx->pagepool(info);
@@ -984,10 +987,8 @@
 
 	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 
-	gk104_grctx_generate_rop_active_fbps(gr);
-
 	gf100_gr_icmd(gr, grctx->icmd);
-	nvkm_wr32(device, 0x404154, 0x00000400);
+	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_mthd(gr, grctx->mthd);
 
 	nvkm_mask(device, 0x419e00, 0x00808080, 0x00808080);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
index e586699..db209d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -33,7 +33,7 @@
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int gpc, tpc, id;
 
-	for (tpc = 0, id = 0; tpc < 4; tpc++) {
+	for (tpc = 0, id = 0; tpc < TPC_MAX_PER_GPC; tpc++) {
 		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
 			if (tpc < gr->tpc_nr[gpc]) {
 				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
@@ -45,15 +45,6 @@
 	}
 }
 
-static void
-gm200_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const u32 fbp_count = nvkm_rd32(device, 0x12006c);
-	nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
-	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
-}
-
 void
 gm200_grctx_generate_405b60(struct gf100_gr *gr)
 {
@@ -86,17 +77,17 @@
 		nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
 }
 
-void
+static void
 gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
-	u32 tmp;
+	u32 idle_timeout, tmp;
 	int i;
 
 	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
 
-	nvkm_wr32(device, 0x404154, 0x00000000);
+	idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
 
 	grctx->bundle(info);
 	grctx->pagepool(info);
@@ -113,8 +104,6 @@
 
 	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 
-	gm200_grctx_generate_rop_active_fbps(gr);
-
 	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
 		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
 	nvkm_wr32(device, 0x4041c4, tmp);
@@ -122,7 +111,7 @@
 	gm200_grctx_generate_405b60(gr);
 
 	gf100_gr_icmd(gr, gr->fuc_bundle);
-	nvkm_wr32(device, 0x404154, 0x00000800);
+	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_mthd(gr, gr->fuc_method);
 
 	nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
index a8827ef..e5702e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -40,15 +40,14 @@
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
-	int idle_timeout_save;
+	u32 idle_timeout;
 	int i, tmp;
 
 	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
 
 	gf100_gr_wait_idle(gr);
 
-	idle_timeout_save = nvkm_rd32(device, 0x404154);
-	nvkm_wr32(device, 0x404154, 0x00000000);
+	idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
 
 	grctx->attrib(info);
 
@@ -63,7 +62,6 @@
 
 	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 
-	gk104_grctx_generate_rop_active_fbps(gr);
 	nvkm_wr32(device, 0x408908, nvkm_rd32(device, 0x410108) | 0x80000000);
 
 	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
@@ -74,7 +72,7 @@
 
 	gf100_gr_wait_idle(gr);
 
-	nvkm_wr32(device, 0x404154, idle_timeout_save);
+	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_wait_idle(gr);
 
 	gf100_gr_mthd(gr, gr->fuc_method);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
index dc60509..4984b00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
@@ -291,12 +291,13 @@
 // Main program loop, very simple, sleeps until woken up by the interrupt
 // handler, pulls a command from the queue and executes its handler
 //
-main:
-	bset $flags $p0
+wait:
 	sleep $p0
+	bset $flags $p0
+main:
 	mov $r13 #cmd_queue
 	call(queue_get)
-	bra $p1 #main
+	bra $p1 #wait
 
 	// 0x0000-0x0003 are all context transfers
 	cmpu b32 $r14 0x04
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
index 5f4ddfe..8cb240b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
@@ -370,9 +370,10 @@
 	0xf11f29f0,
 	0xf0080007,
 	0x02d00203,
-/* 0x04bb: main */
+/* 0x04bb: wait */
 	0xf404bd00,
-	0x28f40031,
+	0x31f40028,
+/* 0x04c1: main */
 	0x1cd7f000,
 	0xf43921f4,
 	0xe4b0f401,
@@ -384,10 +385,10 @@
 	0x0018fe05,
 	0x05b421f5,
 /* 0x04eb: main_not_ctx_xfer */
-	0x94d30ef4,
+	0x94d90ef4,
 	0xf5f010ef,
 	0x7e21f501,
-	0xc60ef403,
+	0xcc0ef403,
 /* 0x04f8: ih */
 	0x80f900f9,
 	0xf90188fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index 03381b1..550d6ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -397,9 +397,10 @@
 	0x080007f1,
 	0xd00203f0,
 	0x04bd0002,
-/* 0x0508: main */
-	0xf40031f4,
-	0xd7f00028,
+/* 0x0508: wait */
+	0xf40028f4,
+/* 0x050e: main */
+	0xd7f00031,
 	0x3921f424,
 	0xb0f401f4,
 	0x18f404e4,
@@ -409,13 +410,13 @@
 	0xfd01e4b6,
 	0x18fe051e,
 	0x0121f500,
-	0xd30ef406,
+	0xd90ef406,
 /* 0x0538: main_not_ctx_xfer */
 	0xf010ef94,
 	0x21f501f5,
 	0x0ef4037e,
 /* 0x0545: ih */
-	0xf900f9c6,
+	0xf900f9cc,
 	0x0188fe80,
 	0x90f980f9,
 	0xb0f9a0f9,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 99d9b48..271b59d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -397,9 +397,10 @@
 	0x080007f1,
 	0xd00203f0,
 	0x04bd0002,
-/* 0x0508: main */
-	0xf40031f4,
-	0xd7f00028,
+/* 0x0508: wait */
+	0xf40028f4,
+/* 0x050e: main */
+	0xd7f00031,
 	0x3921f424,
 	0xb0f401f4,
 	0x18f404e4,
@@ -409,13 +410,13 @@
 	0xfd01e4b6,
 	0x18fe051e,
 	0x0121f500,
-	0xd30ef406,
+	0xd90ef406,
 /* 0x0538: main_not_ctx_xfer */
 	0xf010ef94,
 	0x21f501f5,
 	0x0ef4037e,
 /* 0x0545: ih */
-	0xf900f9c6,
+	0xf900f9cc,
 	0x0188fe80,
 	0x90f980f9,
 	0xb0f9a0f9,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index f726769..73b4a32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -397,9 +397,10 @@
 	0x300007f1,
 	0xd00203f0,
 	0x04bd0002,
-/* 0x0508: main */
-	0xf40031f4,
-	0xd7f00028,
+/* 0x0508: wait */
+	0xf40028f4,
+/* 0x050e: main */
+	0xd7f00031,
 	0x3921f424,
 	0xb0f401f4,
 	0x18f404e4,
@@ -409,13 +410,13 @@
 	0xfd01e4b6,
 	0x18fe051e,
 	0x0121f500,
-	0xd30ef406,
+	0xd90ef406,
 /* 0x0538: main_not_ctx_xfer */
 	0xf010ef94,
 	0x21f501f5,
 	0x0ef4037e,
 /* 0x0545: ih */
-	0xf900f9c6,
+	0xf900f9cc,
 	0x0188fe80,
 	0x90f980f9,
 	0xb0f9a0f9,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index 387d1fa..0181698 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -349,9 +349,10 @@
 	0x801f29f0,
 	0xf6023000,
 	0x04bd0002,
-/* 0x0448: main */
-	0xf40031f4,
-	0x240d0028,
+/* 0x0448: wait */
+	0xf40028f4,
+/* 0x044e: main */
+	0x240d0031,
 	0x0000377e,
 	0xb0f401f4,
 	0x18f404e4,
@@ -362,10 +363,10 @@
 	0x0018fe05,
 	0x00051f7e,
 /* 0x0477: main_not_ctx_xfer */
-	0x94d40ef4,
+	0x94da0ef4,
 	0xf5f010ef,
 	0x02f87e01,
-	0xc70ef400,
+	0xcd0ef400,
 /* 0x0484: ih */
 	0x80f900f9,
 	0xf90188fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index fa9f3c0..eca007f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -427,9 +427,10 @@
 	0x1f29f024,
 	0x02300080,
 	0xbd0002f6,
-/* 0x0571: main */
-	0x0031f404,
-	0x0d0028f4,
+/* 0x0571: wait */
+	0x0028f404,
+/* 0x0577: main */
+	0x0d0031f4,
 	0x00377e24,
 	0xf401f400,
 	0xf404e4b0,
@@ -439,13 +440,13 @@
 	0xfd01e4b6,
 	0x18fe051e,
 	0x06487e00,
-	0xd40ef400,
+	0xda0ef400,
 /* 0x05a0: main_not_ctx_xfer */
 	0xf010ef94,
 	0xf87e01f5,
 	0x0ef40002,
 /* 0x05ad: ih */
-	0xf900f9c7,
+	0xf900f9cd,
 	0x0188fe80,
 	0x90f980f9,
 	0xb0f9a0f9,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc
index e3a2fb3..4d416d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc
@@ -218,13 +218,14 @@
 // Main program loop, very simple, sleeps until woken up by the interrupt
 // handler, pulls a command from the queue and executes its handler
 //
-main:
+wait:
 	// sleep until we have something to do
-	bset $flags $p0
 	sleep $p0
+	bset $flags $p0
+main:
 	mov $r13 #cmd_queue
 	call(queue_get)
-	bra $p1 #main
+	bra $p1 #wait
 
 	// context switch, requested by GPU?
 	cmpu b32 $r14 0x4001
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
index 397921a..8015b40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
@@ -584,9 +584,10 @@
 	0x080007f1,
 	0xd00203f0,
 	0x04bd0001,
-/* 0x0564: main */
-	0xf40031f4,
-	0xd7f00028,
+/* 0x0564: wait */
+	0xf40028f4,
+/* 0x056a: main */
+	0xd7f00031,
 	0x3921f410,
 	0xb1f401f4,
 	0xf54001e4,
@@ -650,7 +651,7 @@
 	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-	0xff080ef5,
+	0xff0e0ef5,
 /* 0x0660: main_not_ctx_switch */
 	0xf401e4b0,
 	0xf2b90d1b,
@@ -675,12 +676,12 @@
 	0xf501f5f0,
 	0xf5037e21,
 /* 0x06b3: main_done */
-	0xbdfeb50e,
+	0xbdfebb0e,
 	0x1f29f024,
 	0x080007f1,
 	0xd00203f0,
 	0x04bd0002,
-	0xfea00ef5,
+	0xfea60ef5,
 /* 0x06c8: ih */
 	0x80f900f9,
 	0xf90188fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
index 50c9716..2af90ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
@@ -584,9 +584,10 @@
 	0x080007f1,
 	0xd00203f0,
 	0x04bd0001,
-/* 0x0564: main */
-	0xf40031f4,
-	0xd7f00028,
+/* 0x0564: wait */
+	0xf40028f4,
+/* 0x056a: main */
+	0xd7f00031,
 	0x3921f410,
 	0xb1f401f4,
 	0xf54001e4,
@@ -650,7 +651,7 @@
 	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-	0xff080ef5,
+	0xff0e0ef5,
 /* 0x0660: main_not_ctx_switch */
 	0xf401e4b0,
 	0xf2b90d1b,
@@ -675,12 +676,12 @@
 	0xf501f5f0,
 	0xf5037e21,
 /* 0x06b3: main_done */
-	0xbdfeb50e,
+	0xbdfebb0e,
 	0x1f29f024,
 	0x080007f1,
 	0xd00203f0,
 	0x04bd0002,
-	0xfea00ef5,
+	0xfea60ef5,
 /* 0x06c8: ih */
 	0x80f900f9,
 	0xf90188fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
index 125824b..e8b8c1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
@@ -584,9 +584,10 @@
 	0x080007f1,
 	0xd00203f0,
 	0x04bd0001,
-/* 0x0564: main */
-	0xf40031f4,
-	0xd7f00028,
+/* 0x0564: wait */
+	0xf40028f4,
+/* 0x056a: main */
+	0xd7f00031,
 	0x3921f410,
 	0xb1f401f4,
 	0xf54001e4,
@@ -650,7 +651,7 @@
 	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-	0xff080ef5,
+	0xff0e0ef5,
 /* 0x0660: main_not_ctx_switch */
 	0xf401e4b0,
 	0xf2b90d1b,
@@ -675,12 +676,12 @@
 	0xf501f5f0,
 	0xf5037e21,
 /* 0x06b3: main_done */
-	0xbdfeb50e,
+	0xbdfebb0e,
 	0x1f29f024,
 	0x080007f1,
 	0xd00203f0,
 	0x04bd0002,
-	0xfea00ef5,
+	0xfea60ef5,
 /* 0x06c8: ih */
 	0x80f900f9,
 	0xf90188fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
index 0a1b8c0..f4ed2fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
@@ -584,9 +584,10 @@
 	0x300007f1,
 	0xd00203f0,
 	0x04bd0001,
-/* 0x0564: main */
-	0xf40031f4,
-	0xd7f00028,
+/* 0x0564: wait */
+	0xf40028f4,
+/* 0x056a: main */
+	0xd7f00031,
 	0x3921f410,
 	0xb1f401f4,
 	0xf54001e4,
@@ -650,7 +651,7 @@
 	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-	0xff080ef5,
+	0xff0e0ef5,
 /* 0x0660: main_not_ctx_switch */
 	0xf401e4b0,
 	0xf2b90d1b,
@@ -675,12 +676,12 @@
 	0xf501f5f0,
 	0xf5037e21,
 /* 0x06b3: main_done */
-	0xbdfeb50e,
+	0xbdfebb0e,
 	0x1f29f024,
 	0x300007f1,
 	0xd00203f0,
 	0x04bd0002,
-	0xfea00ef5,
+	0xfea60ef5,
 /* 0x06c8: ih */
 	0x80f900f9,
 	0xf90188fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index 16869d0..ed48897 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -531,9 +531,10 @@
 	0x1f19f014,
 	0x02300080,
 	0xbd0001f6,
-/* 0x0491: main */
-	0x0031f404,
-	0x0d0028f4,
+/* 0x0491: wait */
+	0x0028f404,
+/* 0x0497: main */
+	0x0d0031f4,
 	0x00377e10,
 	0xf401f400,
 	0x4001e4b1,
@@ -590,7 +591,7 @@
 	0x09f60217,
 	0xf504bd00,
 /* 0x056b: main_not_ctx_switch */
-	0xb0ff2a0e,
+	0xb0ff300e,
 	0x1bf401e4,
 	0x7ef2b20c,
 	0xf4000820,
@@ -612,11 +613,11 @@
 	0x7e01f5f0,
 	0xf50002f8,
 /* 0x05b7: main_done */
-	0xbdfede0e,
+	0xbdfee40e,
 	0x1f29f024,
 	0x02300080,
 	0xbd0002f6,
-	0xcc0ef504,
+	0xd20ef504,
 /* 0x05c9: ih */
 	0xf900f9fe,
 	0x0188fe80,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index d6343d2..5c90518 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
@@ -531,9 +531,10 @@
 	0x1f19f014,
 	0x02300080,
 	0xbd0001f6,
-/* 0x0491: main */
-	0x0031f404,
-	0x0d0028f4,
+/* 0x0491: wait */
+	0x0028f404,
+/* 0x0497: main */
+	0x0d0031f4,
 	0x00377e10,
 	0xf401f400,
 	0x4001e4b1,
@@ -590,7 +591,7 @@
 	0x09f60217,
 	0xf504bd00,
 /* 0x056b: main_not_ctx_switch */
-	0xb0ff2a0e,
+	0xb0ff300e,
 	0x1bf401e4,
 	0x7ef2b20c,
 	0xf4000820,
@@ -612,11 +613,11 @@
 	0x7e01f5f0,
 	0xf50002f8,
 /* 0x05b7: main_done */
-	0xbdfede0e,
+	0xbdfee40e,
 	0x1f29f024,
 	0x02300080,
 	0xbd0002f6,
-	0xcc0ef504,
+	0xd20ef504,
 /* 0x05c9: ih */
 	0xf900f9fe,
 	0x0188fe80,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index b2de290..ae9ab5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -702,6 +702,13 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
+int
+gf100_gr_rops(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	return (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16;
+}
+
 void
 gf100_gr_zbc_init(struct gf100_gr *gr)
 {
@@ -942,22 +949,41 @@
 }
 
 static const struct nvkm_enum gf100_mp_warp_error[] = {
-	{ 0x00, "NO_ERROR" },
-	{ 0x01, "STACK_MISMATCH" },
+	{ 0x01, "STACK_ERROR" },
+	{ 0x02, "API_STACK_ERROR" },
+	{ 0x03, "RET_EMPTY_STACK_ERROR" },
+	{ 0x04, "PC_WRAP" },
 	{ 0x05, "MISALIGNED_PC" },
-	{ 0x08, "MISALIGNED_GPR" },
-	{ 0x09, "INVALID_OPCODE" },
-	{ 0x0d, "GPR_OUT_OF_BOUNDS" },
-	{ 0x0e, "MEM_OUT_OF_BOUNDS" },
-	{ 0x0f, "UNALIGNED_MEM_ACCESS" },
+	{ 0x06, "PC_OVERFLOW" },
+	{ 0x07, "MISALIGNED_IMMC_ADDR" },
+	{ 0x08, "MISALIGNED_REG" },
+	{ 0x09, "ILLEGAL_INSTR_ENCODING" },
+	{ 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
+	{ 0x0b, "ILLEGAL_INSTR_PARAM" },
+	{ 0x0c, "INVALID_CONST_ADDR" },
+	{ 0x0d, "OOR_REG" },
+	{ 0x0e, "OOR_ADDR" },
+	{ 0x0f, "MISALIGNED_ADDR" },
 	{ 0x10, "INVALID_ADDR_SPACE" },
-	{ 0x11, "INVALID_PARAM" },
+	{ 0x11, "ILLEGAL_INSTR_PARAM2" },
+	{ 0x12, "INVALID_CONST_ADDR_LDC" },
+	{ 0x13, "GEOMETRY_SM_ERROR" },
+	{ 0x14, "DIVERGENT" },
+	{ 0x15, "WARP_EXIT" },
 	{}
 };
 
 static const struct nvkm_bitfield gf100_mp_global_error[] = {
+	{ 0x00000001, "SM_TO_SM_FAULT" },
+	{ 0x00000002, "L1_ERROR" },
 	{ 0x00000004, "MULTIPLE_WARP_ERRORS" },
-	{ 0x00000008, "OUT_OF_STACK_SPACE" },
+	{ 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
+	{ 0x00000010, "BPT_INT" },
+	{ 0x00000020, "BPT_PAUSE" },
+	{ 0x00000040, "SINGLE_STEP_COMPLETE" },
+	{ 0x20000000, "ECC_SEC_ERROR" },
+	{ 0x40000000, "ECC_DED_ERROR" },
+	{ 0x80000000, "TIMEOUT" },
 	{}
 };
 
@@ -1609,32 +1635,12 @@
 {
 	struct gf100_gr *gr = gf100_gr(base);
 	struct nvkm_device *device = gr->base.engine.subdev.device;
-	int ret, i, j;
+	int i, j;
 
 	nvkm_pmu_pgob(device->pmu, false);
 
-	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
-			      &gr->unk4188b4);
-	if (ret)
-		return ret;
-
-	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
-			      &gr->unk4188b8);
-	if (ret)
-		return ret;
-
-	nvkm_kmap(gr->unk4188b4);
-	for (i = 0; i < 0x1000; i += 4)
-		nvkm_wo32(gr->unk4188b4, i, 0x00000010);
-	nvkm_done(gr->unk4188b4);
-
-	nvkm_kmap(gr->unk4188b8);
-	for (i = 0; i < 0x1000; i += 4)
-		nvkm_wo32(gr->unk4188b8, i, 0x00000010);
-	nvkm_done(gr->unk4188b8);
-
-	gr->rop_nr = (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16;
-	gr->gpc_nr =  nvkm_rd32(device, 0x409604) & 0x0000001f;
+	gr->rop_nr = gr->func->rops(gr);
+	gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f;
 	for (i = 0; i < gr->gpc_nr; i++) {
 		gr->tpc_nr[i]  = nvkm_rd32(device, GPC_UNIT(i, 0x2608));
 		gr->tpc_total += gr->tpc_nr[i];
@@ -1651,38 +1657,38 @@
 	switch (device->chipset) {
 	case 0xc0:
 		if (gr->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
-			gr->magic_not_rop_nr = 0x07;
+			gr->screen_tile_row_offset = 0x07;
 		} else
 		if (gr->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
-			gr->magic_not_rop_nr = 0x05;
+			gr->screen_tile_row_offset = 0x05;
 		} else
 		if (gr->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
-			gr->magic_not_rop_nr = 0x06;
+			gr->screen_tile_row_offset = 0x06;
 		}
 		break;
 	case 0xc3: /* 450, 4/0/0/0, 2 */
-		gr->magic_not_rop_nr = 0x03;
+		gr->screen_tile_row_offset = 0x03;
 		break;
 	case 0xc4: /* 460, 3/4/0/0, 4 */
-		gr->magic_not_rop_nr = 0x01;
+		gr->screen_tile_row_offset = 0x01;
 		break;
 	case 0xc1: /* 2/0/0/0, 1 */
-		gr->magic_not_rop_nr = 0x01;
+		gr->screen_tile_row_offset = 0x01;
 		break;
 	case 0xc8: /* 4/4/3/4, 5 */
-		gr->magic_not_rop_nr = 0x06;
+		gr->screen_tile_row_offset = 0x06;
 		break;
 	case 0xce: /* 4/4/0/0, 4 */
-		gr->magic_not_rop_nr = 0x03;
+		gr->screen_tile_row_offset = 0x03;
 		break;
 	case 0xcf: /* 4/0/0/0, 3 */
-		gr->magic_not_rop_nr = 0x03;
+		gr->screen_tile_row_offset = 0x03;
 		break;
 	case 0xd7:
 	case 0xd9: /* 1/0/0/0, 1 */
 	case 0xea: /* gk20a */
 	case 0x12b: /* gm20b */
-		gr->magic_not_rop_nr = 0x01;
+		gr->screen_tile_row_offset = 0x01;
 		break;
 	}
 
@@ -1729,8 +1735,6 @@
 	gf100_gr_dtor_init(gr->fuc_sw_ctx);
 	gf100_gr_dtor_init(gr->fuc_sw_nonctx);
 
-	nvkm_memory_del(&gr->unk4188b8);
-	nvkm_memory_del(&gr->unk4188b4);
 	return gr;
 }
 
@@ -1776,7 +1780,7 @@
 	gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
 				    func->fecs.ucode == NULL);
 
-	ret = nvkm_gr_ctor(&gf100_gr_, device, index, 0x08001000,
+	ret = nvkm_gr_ctor(&gf100_gr_, device, index,
 			   gr->firmware || func->fecs.ucode != NULL,
 			   &gr->base);
 	if (ret)
@@ -1815,6 +1819,7 @@
 gf100_gr_init(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fb *fb = device->fb;
 	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
 	u32 data[TPC_MAX / 8] = {};
 	u8  tpcnr[GPC_MAX];
@@ -1827,8 +1832,8 @@
 	nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
 	nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
 	nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
-	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
-	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8);
 
 	gf100_gr_mmio(gr, gr->func->mmio);
 
@@ -1851,9 +1856,9 @@
 
 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
-			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+			  gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			gr->tpc_total);
+							 gr->tpc_total);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
@@ -1946,6 +1951,7 @@
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
+	.rops = gf100_gr_rops,
 	.grctx = &gf100_grctx,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index f0c6acb..2b98abd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -31,7 +31,8 @@
 #include <subdev/mmu.h>
 
 #define GPC_MAX 32
-#define TPC_MAX (GPC_MAX * 8)
+#define TPC_MAX_PER_GPC 8
+#define TPC_MAX (GPC_MAX * TPC_MAX_PER_GPC)
 
 #define ROP_BCAST(r)      (0x408800 + (r))
 #define ROP_UNIT(u, r)    (0x410000 + (u) * 0x400 + (r))
@@ -100,15 +101,12 @@
 	u8 ppc_mask[GPC_MAX];
 	u8 ppc_tpc_nr[GPC_MAX][4];
 
-	struct nvkm_memory *unk4188b4;
-	struct nvkm_memory *unk4188b8;
-
 	struct gf100_gr_data mmio_data[4];
 	struct gf100_gr_mmio mmio_list[4096/8];
 	u32  size;
 	u32 *data;
 
-	u8 magic_not_rop_nr;
+	u8 screen_tile_row_offset;
 };
 
 int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *,
@@ -121,6 +119,8 @@
 	void (*dtor)(struct gf100_gr *);
 	int (*init)(struct gf100_gr *);
 	void (*init_gpc_mmu)(struct gf100_gr *);
+	void (*init_rop_active_fbps)(struct gf100_gr *);
+	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
 	struct {
@@ -129,18 +129,23 @@
 	struct {
 		struct gf100_gr_ucode *ucode;
 	} gpccs;
+	int (*rops)(struct gf100_gr *);
 	int ppc_nr;
 	const struct gf100_grctx_func *grctx;
 	struct nvkm_sclass sclass[];
 };
 
 int gf100_gr_init(struct gf100_gr *);
+int gf100_gr_rops(struct gf100_gr *);
 
 int gk104_gr_init(struct gf100_gr *);
+void gk104_gr_init_rop_active_fbps(struct gf100_gr *);
+void gk104_gr_init_ppc_exceptions(struct gf100_gr *);
 
 int gk20a_gr_init(struct gf100_gr *);
 
 int gm200_gr_init(struct gf100_gr *);
+int gm200_gr_rops(struct gf100_gr *);
 
 #define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 8f253e0..d736dcd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -118,6 +118,7 @@
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
+	.rops = gf100_gr_rops,
 	.grctx = &gf104_grctx,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 815a5aa..2f0d244 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -109,6 +109,7 @@
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
+	.rops = gf100_gr_rops,
 	.grctx = &gf108_grctx,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index d081ee4..d1d942e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -90,6 +90,7 @@
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
+	.rops = gf100_gr_rops,
 	.grctx = &gf110_grctx,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index d8e8af4..70335f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -126,6 +126,7 @@
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
+	.rops = gf100_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gf117_grctx,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 01faf9a..8d8e4ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -181,6 +181,7 @@
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
+	.rops = gf100_gr_rops,
 	.grctx = &gf119_grctx,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index abf5492..ec22da6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -24,6 +24,8 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <subdev/fb.h>
+
 #include <nvif/class.h>
 
 /*******************************************************************************
@@ -177,10 +179,35 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
+void
+gk104_gr_init_rop_active_fbps(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 fbp_count = nvkm_rd32(device, 0x120074);
+	nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
+}
+
+void
+gk104_gr_init_ppc_exceptions(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	int gpc, ppc;
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++) {
+			if (!(gr->ppc_mask[gpc] & (1 << ppc)))
+				continue;
+			nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
+		}
+	}
+}
+
 int
 gk104_gr_init(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fb *fb = device->fb;
 	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
 	u32 data[TPC_MAX / 8] = {};
 	u8  tpcnr[GPC_MAX];
@@ -193,8 +220,8 @@
 	nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
 	nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
 	nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
-	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
-	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8);
 
 	gf100_gr_mmio(gr, gr->func->mmio);
 
@@ -218,15 +245,17 @@
 
 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
-			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+			  gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			gr->tpc_total);
+							 gr->tpc_total);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
 	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
 	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
 
+	gr->func->init_rop_active_fbps(gr);
+
 	nvkm_wr32(device, 0x400500, 0x00010001);
 
 	nvkm_wr32(device, 0x400100, 0xffffffff);
@@ -246,8 +275,9 @@
 	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
 	nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
 
+	gr->func->init_ppc_exceptions(gr);
+
 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x3038), 0xc0000000);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
@@ -309,9 +339,12 @@
 static const struct gf100_gr_func
 gk104_gr = {
 	.init = gk104_gr_init,
+	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk104_gr_pack_mmio,
 	.fecs.ucode = &gk104_gr_fecs_ucode,
 	.gpccs.ucode = &gk104_gr_gpccs_ucode,
+	.rops = gf100_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gk104_grctx,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 32aa294..f31b171 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -183,9 +183,12 @@
 static const struct gf100_gr_func
 gk110_gr = {
 	.init = gk104_gr_init,
+	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
+	.rops = gf100_gr_rops,
 	.ppc_nr = 2,
 	.grctx = &gk110_grctx,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 22f88af..d76dd17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -103,9 +103,12 @@
 static const struct gf100_gr_func
 gk110b_gr = {
 	.init = gk104_gr_init,
+	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110b_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
+	.rops = gf100_gr_rops,
 	.ppc_nr = 2,
 	.grctx = &gk110b_grctx,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index ee7554f..14bbe6e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -162,9 +162,12 @@
 static const struct gf100_gr_func
 gk208_gr = {
 	.init = gk104_gr_init,
+	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk208_gr_pack_mmio,
 	.fecs.ucode = &gk208_gr_fecs_ucode,
 	.gpccs.ucode = &gk208_gr_gpccs_ucode,
+	.rops = gf100_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gk208_grctx,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 7ffb8a6..4ca8ed1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -239,9 +239,6 @@
 		return ret;
 
 	/* MMU debug buffer */
-	nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
-	nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
-
 	if (gr->func->init_gpc_mmu)
 		gr->func->init_gpc_mmu(gr);
 
@@ -267,7 +264,7 @@
 
 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
-			  gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+			  gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
 			  gr->tpc_total);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
@@ -275,6 +272,8 @@
 
 	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
 
+	gr->func->init_rop_active_fbps(gr);
+
 	/* Enable FIFO access */
 	nvkm_wr32(device, 0x400500, 0x00010001);
 
@@ -312,7 +311,9 @@
 static const struct gf100_gr_func
 gk20a_gr = {
 	.init = gk20a_gr_init,
+	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
+	.rops = gf100_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gk20a_grctx,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 56e9602..45f965f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -26,6 +26,7 @@
 
 #include <subdev/bios.h>
 #include <subdev/bios/P0260.h>
+#include <subdev/fb.h>
 
 #include <nvif/class.h>
 
@@ -311,17 +312,18 @@
 gm107_gr_init(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fb *fb = device->fb;
 	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
 	u32 data[TPC_MAX / 8] = {};
 	u8  tpcnr[GPC_MAX];
-	int gpc, tpc, ppc, rop;
+	int gpc, tpc, rop;
 	int i;
 
 	nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
 	nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
 	nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
-	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
-	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8);
 
 	gf100_gr_mmio(gr, gr->func->mmio);
 
@@ -347,15 +349,17 @@
 
 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
-			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+			  gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			gr->tpc_total);
+							 gr->tpc_total);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
 	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
 	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
 
+	gr->func->init_rop_active_fbps(gr);
+
 	nvkm_wr32(device, 0x400500, 0x00010001);
 
 	nvkm_wr32(device, 0x400100, 0xffffffff);
@@ -373,9 +377,9 @@
 	nvkm_wr32(device, 0x405844, 0x00ffffff);
 	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
 
+	gr->func->init_ppc_exceptions(gr);
+
 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-		for (ppc = 0; ppc < 2 /* gr->ppc_nr[gpc] */; ppc++)
-			nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
@@ -438,9 +442,12 @@
 static const struct gf100_gr_func
 gm107_gr = {
 	.init = gm107_gr_init,
+	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gm107_gr_pack_mmio,
 	.fecs.ucode = &gm107_gr_fecs_ucode,
 	.gpccs.ucode = &gm107_gr_gpccs_ucode,
+	.rops = gf100_gr_rops,
 	.ppc_nr = 2,
 	.grctx = &gm107_grctx,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 058fc1d..4dfa451 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -33,27 +33,45 @@
  ******************************************************************************/
 
 int
+gm200_gr_rops(struct gf100_gr *gr)
+{
+	return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
+}
+
+static void
+gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+
+	nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf0001fff);
+	nvkm_wr32(device, 0x418890, 0x00000000);
+	nvkm_wr32(device, 0x418894, 0x00000000);
+
+	nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
+	nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
+	nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
+}
+
+static void
+gm200_gr_init_rop_active_fbps(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 fbp_count = nvkm_rd32(device, 0x12006c);
+	nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
+}
+
+int
 gm200_gr_init(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
-	u32 data[TPC_MAX / 8] = {}, tmp;
+	u32 data[TPC_MAX / 8] = {};
 	u8  tpcnr[GPC_MAX];
-	int gpc, tpc, ppc, rop;
+	int gpc, tpc, rop;
 	int i;
 
-	tmp = nvkm_rd32(device, 0x100c80); /*XXX: mask? */
-	nvkm_wr32(device, 0x418880, 0x00001000 | (tmp & 0x00000fff));
-	nvkm_wr32(device, 0x418890, 0x00000000);
-	nvkm_wr32(device, 0x418894, 0x00000000);
-	nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(gr->unk4188b4) >> 8);
-	nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(gr->unk4188b8) >> 8);
-	nvkm_mask(device, 0x4188b0, 0x00040000, 0x00040000);
-
-	/*XXX: belongs in fb */
-	nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
-	nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
-	nvkm_mask(device, 0x100cc4, 0x00040000, 0x00040000);
+	gr->func->init_gpc_mmu(gr);
 
 	gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
 
@@ -79,9 +97,9 @@
 
 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
-			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+			  gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			gr->tpc_total);
+							 gr->tpc_total);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
@@ -89,6 +107,8 @@
 	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
 	nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
 
+	gr->func->init_rop_active_fbps(gr);
+
 	nvkm_wr32(device, 0x400500, 0x00010001);
 	nvkm_wr32(device, 0x400100, 0xffffffff);
 	nvkm_wr32(device, 0x40013c, 0xffffffff);
@@ -106,9 +126,9 @@
 	nvkm_wr32(device, 0x405844, 0x00ffffff);
 	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
 
+	gr->func->init_ppc_exceptions(gr);
+
 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++)
-			nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
@@ -189,6 +209,10 @@
 static const struct gf100_gr_func
 gm200_gr = {
 	.init = gm200_gr_init,
+	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
+	.init_rop_active_fbps = gm200_gr_init_rop_active_fbps,
+	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
 	.grctx = &gm200_grctx,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index 29732bc..69479af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -42,7 +42,7 @@
 	}
 
 	val = nvkm_rd32(device, 0x100c80);
-	val &= 0xf000087f;
+	val &= 0xf000187f;
 	nvkm_wr32(device, 0x418880, val);
 	nvkm_wr32(device, 0x418890, 0);
 	nvkm_wr32(device, 0x418894, 0);
@@ -66,7 +66,9 @@
 gm20b_gr = {
 	.init = gk20a_gr_init,
 	.init_gpc_mmu = gm20b_gr_init_gpc_mmu,
+	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.set_hww_esr_report_mask = gm20b_gr_set_hww_esr_report_mask,
+	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gm20b_grctx,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
index 85c5b7f..9c2e985 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
@@ -1422,6 +1422,5 @@
 	spin_lock_init(&gr->lock);
 	*pgr = &gr->base;
 
-	return nvkm_gr_ctor(&nv04_gr, device, index, 0x00001000,
-			    true, &gr->base);
+	return nvkm_gr_ctor(&nv04_gr, device, index, true, &gr->base);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
index 4542867..4ebbfbd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
@@ -1182,7 +1182,7 @@
 	spin_lock_init(&gr->lock);
 	*pgr = &gr->base;
 
-	return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
+	return nvkm_gr_ctor(func, device, index, true, &gr->base);
 }
 
 static const struct nvkm_gr_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index 5caef65..d1dc929 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -337,7 +337,7 @@
 		return -ENOMEM;
 	*pgr = &gr->base;
 
-	return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
+	return nvkm_gr_ctor(func, device, index, true, &gr->base);
 }
 
 static const struct nvkm_gr_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index 05a8954..5f1ad83 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -438,7 +438,7 @@
 	*pgr = &gr->base;
 	INIT_LIST_HEAD(&gr->chan);
 
-	return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
+	return nvkm_gr_ctor(func, device, index, true, &gr->base);
 }
 
 static const struct nvkm_gr_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
index b19b912..fca67de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
@@ -768,7 +768,7 @@
 	spin_lock_init(&gr->lock);
 	*pgr = &gr->base;
 
-	return nvkm_gr_ctor(func, device, index, 0x00201000, true, &gr->base);
+	return nvkm_gr_ctor(func, device, index, true, &gr->base);
 }
 
 static const struct nvkm_gr_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
index a234590..d8adcdf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -7,8 +7,7 @@
 struct nvkm_fifo_chan;
 
 int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *,
-		 int index, u32 pmc_enable, bool enable,
-		 struct nvkm_gr *);
+		 int index, bool enable, struct nvkm_gr *);
 
 bool nv04_gr_idle(struct nvkm_gr *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
index 34ff001..c0e11a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
@@ -39,6 +39,5 @@
 int
 g84_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
 {
-	return nvkm_engine_new_(&g84_mpeg, device, index, 0x00000002,
-				true, pmpeg);
+	return nvkm_engine_new_(&g84_mpeg, device, index, true, pmpeg);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index d4d8942..003ac91 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -278,7 +278,7 @@
 	mpeg->func = func;
 	*pmpeg = &mpeg->engine;
 
-	return nvkm_engine_ctor(&nv31_mpeg_, device, index, 0x00000002,
+	return nvkm_engine_ctor(&nv31_mpeg_, device, index,
 				true, &mpeg->engine);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index d433cfa..e536f37 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -212,6 +212,5 @@
 	INIT_LIST_HEAD(&mpeg->chan);
 	*pmpeg = &mpeg->engine;
 
-	return nvkm_engine_ctor(&nv44_mpeg, device, index, 0x00000002,
-				true, &mpeg->engine);
+	return nvkm_engine_ctor(&nv44_mpeg, device, index, true, &mpeg->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
index c3a85df..4e52885 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
@@ -130,6 +130,5 @@
 int
 nv50_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
 {
-	return nvkm_engine_new_(&nv50_mpeg, device, index, 0x00400002,
-				true, pmpeg);
+	return nvkm_engine_new_(&nv50_mpeg, device, index, true, pmpeg);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
index 1f1a99e..f30cf1d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
@@ -35,7 +35,6 @@
 
 static const struct nvkm_falcon_func
 g98_mspdec = {
-	.pmc_enable = 0x01020000,
 	.init = g98_mspdec_init,
 	.sclass = {
 		{ -1, -1, G98_MSPDEC },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
index 371fd6c..cfe1aa8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
@@ -35,7 +35,6 @@
 
 static const struct nvkm_falcon_func
 gf100_mspdec = {
-	.pmc_enable = 0x00020000,
 	.init = gf100_mspdec_init,
 	.sclass = {
 		{ -1, -1, GF100_MSPDEC },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
index de804a1..24272b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
@@ -27,7 +27,6 @@
 
 static const struct nvkm_falcon_func
 gk104_mspdec = {
-	.pmc_enable = 0x00020000,
 	.init = gf100_mspdec_init,
 	.sclass = {
 		{ -1, -1, GK104_MSPDEC },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
index 8356317..cf6e59a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
@@ -27,7 +27,6 @@
 
 static const struct nvkm_falcon_func
 gt215_mspdec = {
-	.pmc_enable = 0x01020000,
 	.init = g98_mspdec_init,
 	.sclass = {
 		{ -1, -1, GT212_MSPDEC },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
index 73f633a..c45dbf7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
@@ -35,7 +35,6 @@
 
 static const struct nvkm_falcon_func
 g98_msppp = {
-	.pmc_enable = 0x00400002,
 	.init = g98_msppp_init,
 	.sclass = {
 		{ -1, -1, G98_MSPPP },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
index c42c0c0..803c62a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
@@ -35,7 +35,6 @@
 
 static const struct nvkm_falcon_func
 gf100_msppp = {
-	.pmc_enable = 0x00000002,
 	.init = gf100_msppp_init,
 	.sclass = {
 		{ -1, -1, GF100_MSPPP },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
index 00e7795..49cbf72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
@@ -27,7 +27,6 @@
 
 static const struct nvkm_falcon_func
 gt215_msppp = {
-	.pmc_enable = 0x00400002,
 	.init = g98_msppp_init,
 	.sclass = {
 		{ -1, -1, GT212_MSPPP },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
index 47e2929..4a2a9f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
@@ -35,7 +35,6 @@
 
 static const struct nvkm_falcon_func
 g98_msvld = {
-	.pmc_enable = 0x04008000,
 	.init = g98_msvld_init,
 	.sclass = {
 		{ -1, -1, G98_MSVLD },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
index 1ac581b..1695e53 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
@@ -35,7 +35,6 @@
 
 static const struct nvkm_falcon_func
 gf100_msvld = {
-	.pmc_enable = 0x00008000,
 	.init = gf100_msvld_init,
 	.sclass = {
 		{ -1, -1, GF100_MSVLD },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
index 4bba16e..b640cd6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
@@ -27,7 +27,6 @@
 
 static const struct nvkm_falcon_func
 gk104_msvld = {
-	.pmc_enable = 0x00008000,
 	.init = gf100_msvld_init,
 	.sclass = {
 		{ -1, -1, GK104_MSVLD },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
index e17cb56..201e8ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
@@ -27,7 +27,6 @@
 
 static const struct nvkm_falcon_func
 gt215_msvld = {
-	.pmc_enable = 0x04008000,
 	.init = g98_msvld_init,
 	.sclass = {
 		{ -1, -1, GT212_MSVLD },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
index 511800f..a0f540e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
@@ -27,7 +27,6 @@
 
 static const struct nvkm_falcon_func
 mcp89_msvld = {
-	.pmc_enable = 0x04008000,
 	.init = g98_msvld_init,
 	.sclass = {
 		{ -1, -1, IGT21A_MSVLD },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index f19fabe..8616636 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -863,5 +863,5 @@
 	pm->func = func;
 	INIT_LIST_HEAD(&pm->domains);
 	INIT_LIST_HEAD(&pm->sources);
-	return nvkm_engine_ctor(&nvkm_pm, device, index, 0, true, &pm->engine);
+	return nvkm_engine_ctor(&nvkm_pm, device, index, true, &pm->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
index 995c2c5..6d2a7f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
@@ -66,7 +66,6 @@
 	.code.size = sizeof(g98_sec_code),
 	.data.data = g98_sec_data,
 	.data.size = sizeof(g98_sec_data),
-	.pmc_enable = 0x00004000,
 	.intr = g98_sec_intr,
 	.sclass = {
 		{ -1, -1, G98_SEC },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
index 53c1f7e..7be3198 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
@@ -106,5 +106,5 @@
 	INIT_LIST_HEAD(&sw->chan);
 	sw->func = func;
 
-	return nvkm_engine_ctor(&nvkm_sw, device, index, 0, true, &sw->engine);
+	return nvkm_engine_ctor(&nvkm_sw, device, index, true, &sw->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
index 4188c77..7a96178 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
@@ -27,7 +27,6 @@
 
 static const struct nvkm_xtensa_func
 g84_vp = {
-	.pmc_enable = 0x01020000,
 	.fifo_val = 0x111,
 	.unkd28 = 0x9c544,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
index a3d4f5b..06bdb67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
@@ -187,6 +187,6 @@
 	xtensa->addr = addr;
 	*pengine = &xtensa->engine;
 
-	return nvkm_engine_ctor(&nvkm_xtensa, device, index, func->pmc_enable,
+	return nvkm_engine_ctor(&nvkm_xtensa, device, index,
 				enable, &xtensa->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index 642d27d..3f5d38d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -19,4 +19,5 @@
 include $(src)/nvkm/subdev/secboot/Kbuild
 include $(src)/nvkm/subdev/therm/Kbuild
 include $(src)/nvkm/subdev/timer/Kbuild
+include $(src)/nvkm/subdev/top/Kbuild
 include $(src)/nvkm/subdev/volt/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index a9433ad..c561d148 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -77,7 +77,7 @@
 nvkm_bar_ctor(const struct nvkm_bar_func *func, struct nvkm_device *device,
 	      int index, struct nvkm_bar *bar)
 {
-	nvkm_subdev_ctor(&nvkm_bar, device, index, 0, &bar->subdev);
+	nvkm_subdev_ctor(&nvkm_bar, device, index, &bar->subdev);
 	bar->func = func;
 	spin_lock_init(&bar->lock);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index 7953689..e15b962 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -105,7 +105,7 @@
 
 	if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&nvkm_bios, device, index, 0, &bios->subdev);
+	nvkm_subdev_ctor(&nvkm_bios, device, index, &bios->subdev);
 
 	ret = nvbios_shadow(bios);
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
index a5e9213..9efb1b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
@@ -141,7 +141,8 @@
 {
 	u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
 	if (data) {
-		info->match     = nvbios_rd16(bios, data + 0x00);
+		info->proto     = nvbios_rd08(bios, data + 0x00);
+		info->flags     = nvbios_rd16(bios, data + 0x01);
 		info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
 		info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
 	}
@@ -149,12 +150,13 @@
 }
 
 u16
-nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type,
+nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags,
 		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
 {
 	u16 data, idx = 0;
 	while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
-		if (info->match == type)
+		if ((info->proto == proto || info->proto == 0xff) &&
+		    (info->flags == flags))
 			break;
 	}
 	return data;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index 125ec2e..91a7dc5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -81,9 +81,11 @@
 pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
 	struct bit_entry bit_C;
+	u16 data = 0x0000;
 
-	if (!bit_entry(bios, 'C', &bit_C) && bit_C.length >= 10) {
-		u16 data = nvbios_rd16(bios, bit_C.offset + 8);
+	if (!bit_entry(bios, 'C', &bit_C)) {
+		if (bit_C.version == 1 && bit_C.length >= 10)
+			data = nvbios_rd16(bios, bit_C.offset + 8);
 		if (data) {
 			*ver = nvbios_rd08(bios, data + 0);
 			*hdr = nvbios_rd08(bios, data + 1);
@@ -94,7 +96,7 @@
 	}
 
 	if (bmp_version(bios) >= 0x0524) {
-		u16 data = nvbios_rd16(bios, bios->bmp_offset + 142);
+		data = nvbios_rd16(bios, bios->bmp_offset + 142);
 		if (data) {
 			*ver = nvbios_rd08(bios, data + 0);
 			*hdr = 1;
@@ -105,7 +107,7 @@
 	}
 
 	*ver = 0x00;
-	return 0x0000;
+	return data;
 }
 
 static struct pll_mapping *
@@ -156,7 +158,7 @@
 	}
 
 	map = pll_map(bios);
-	while (map->reg) {
+	while (map && map->reg) {
 		if (map->reg == reg && *ver >= 0x20) {
 			u16 addr = (data += hdr);
 			*type = map->type;
@@ -198,7 +200,7 @@
 	}
 
 	map = pll_map(bios);
-	while (map->reg) {
+	while (map && map->reg) {
 		if (map->type == type && *ver >= 0x20) {
 			u16 addr = (data += hdr);
 			*reg = map->reg;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
index dc5a10f..52ad73b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
@@ -58,7 +58,7 @@
 	struct nvkm_bus *bus;
 	if (!(bus = *pbus = kzalloc(sizeof(*bus), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&nvkm_bus, device, index, 0, &bus->subdev);
+	nvkm_subdev_ctor(&nvkm_bus, device, index, &bus->subdev);
 	bus->func = func;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 889cce2..7102c25 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -564,7 +564,7 @@
 	int ret, idx, arglen;
 	const char *mode;
 
-	nvkm_subdev_ctor(&nvkm_clk, device, index, 0, &clk->subdev);
+	nvkm_subdev_ctor(&nvkm_clk, device, index, &clk->subdev);
 	clk->func = func;
 	INIT_LIST_HEAD(&clk->states);
 	clk->domains = func->domains;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
index 5f25402..4756019 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
@@ -83,6 +83,12 @@
 	if (init->func->preinit)
 		init->func->preinit(init);
 
+	/* Override the post flag during the first call if NvForcePost is set */
+	if (init->force_post) {
+		init->post = init->force_post;
+		init->force_post = false;
+	}
+
 	/* unlock the extended vga crtc regs */
 	nvkm_lockvgac(subdev->device, false);
 	return 0;
@@ -124,7 +130,7 @@
 		  struct nvkm_device *device, int index,
 		  struct nvkm_devinit *init)
 {
-	nvkm_subdev_ctor(&nvkm_devinit, device, index, 0, &init->subdev);
+	nvkm_subdev_ctor(&nvkm_devinit, device, index, &init->subdev);
 	init->func = func;
-	init->post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
+	init->force_post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
index 2923598..8b1b34c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
@@ -97,9 +97,11 @@
 	struct nvkm_subdev *subdev = &init->base.subdev;
 	struct nvkm_device *device = subdev->device;
 
-	/* This bit is set by devinit, and flips back to 0 on suspend */
-	if (!base->post)
-		base->post = ((nvkm_rd32(device, 0x2240c) & BIT(1)) == 0);
+	/*
+	 * This bit is set by devinit, and flips back to 0 on suspend. We
+	 * can use it as a reliable way to know whether we should run devinit.
+	 */
+	base->post = ((nvkm_rd32(device, 0x2240c) & BIT(1)) == 0);
 }
 
 static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 0810570..842d5de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -23,6 +23,7 @@
 nvkm-y += nvkm/subdev/fb/gk104.o
 nvkm-y += nvkm/subdev/fb/gk20a.o
 nvkm-y += nvkm/subdev/fb/gm107.o
+nvkm-y += nvkm/subdev/fb/gm200.o
 
 nvkm-y += nvkm/subdev/fb/ram.o
 nvkm-y += nvkm/subdev/fb/ramnv04.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index a719b9b..ce90242 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -24,6 +24,7 @@
 #include "priv.h"
 #include "ram.h"
 
+#include <core/memory.h>
 #include <subdev/bios.h>
 #include <subdev/bios/M0203.h>
 #include <engine/gr.h>
@@ -98,6 +99,7 @@
 nvkm_fb_oneinit(struct nvkm_subdev *subdev)
 {
 	struct nvkm_fb *fb = nvkm_fb(subdev);
+
 	if (fb->func->ram_new) {
 		int ret = fb->func->ram_new(fb, &fb->ram);
 		if (ret) {
@@ -105,6 +107,13 @@
 			return ret;
 		}
 	}
+
+	if (fb->func->oneinit) {
+		int ret = fb->func->oneinit(fb);
+		if (ret)
+			return ret;
+	}
+
 	return 0;
 }
 
@@ -134,6 +143,9 @@
 	struct nvkm_fb *fb = nvkm_fb(subdev);
 	int i;
 
+	nvkm_memory_del(&fb->mmu_wr);
+	nvkm_memory_del(&fb->mmu_rd);
+
 	for (i = 0; i < fb->tile.regions; i++)
 		fb->func->tile.fini(fb, i, &fb->tile.region[i]);
 
@@ -156,7 +168,7 @@
 nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
 	     int index, struct nvkm_fb *fb)
 {
-	nvkm_subdev_ctor(&nvkm_fb, device, index, 0, &fb->subdev);
+	nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
 	fb->func = func;
 	fb->tile.regions = fb->func->tile.regions;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 008bb98..e649ead 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -24,6 +24,9 @@
 #include "gf100.h"
 #include "ram.h"
 
+#include <core/memory.h>
+#include <core/option.h>
+
 extern const u8 gf100_pte_storage_type_map[256];
 
 bool
@@ -46,6 +49,28 @@
 		nvkm_debug(subdev, "PBFB intr\n");
 }
 
+int
+gf100_fb_oneinit(struct nvkm_fb *fb)
+{
+	struct nvkm_device *device = fb->subdev.device;
+	int ret, size = 0x1000;
+
+	size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
+	size = min(size, 0x1000);
+
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
+			      false, &fb->mmu_rd);
+	if (ret)
+		return ret;
+
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
+			      false, &fb->mmu_wr);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
 void
 gf100_fb_init(struct nvkm_fb *base)
 {
@@ -98,6 +123,7 @@
 static const struct nvkm_fb_func
 gf100_fb = {
 	.dtor = gf100_fb_dtor,
+	.oneinit = gf100_fb_oneinit,
 	.init = gf100_fb_init,
 	.intr = gf100_fb_intr,
 	.ram_new = gf100_ram_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 0edb3c3..b41f0f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -27,6 +27,7 @@
 static const struct nvkm_fb_func
 gk104_fb = {
 	.dtor = gf100_fb_dtor,
+	.oneinit = gf100_fb_oneinit,
 	.init = gf100_fb_init,
 	.intr = gf100_fb_intr,
 	.ram_new = gk104_ram_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index 81447eb..7306f7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -21,15 +21,20 @@
  */
 #include "priv.h"
 
+#include <core/memory.h>
+
 static void
 gk20a_fb_init(struct nvkm_fb *fb)
 {
 	struct nvkm_device *device = fb->subdev.device;
 	nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
+	nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8);
+	nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8);
 }
 
 static const struct nvkm_fb_func
 gk20a_fb = {
+	.oneinit = gf100_fb_oneinit,
 	.init = gk20a_fb_init,
 	.memtype_valid = gf100_fb_memtype_valid,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index 2a91df8..4869fdb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -27,6 +27,7 @@
 static const struct nvkm_fb_func
 gm107_fb = {
 	.dtor = gf100_fb_dtor,
+	.oneinit = gf100_fb_oneinit,
 	.init = gf100_fb_init,
 	.intr = gf100_fb_intr,
 	.ram_new = gm107_ram_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
new file mode 100644
index 0000000..44f5716
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gf100.h"
+#include "ram.h"
+
+#include <core/memory.h>
+
+static void
+gm200_fb_init(struct nvkm_fb *base)
+{
+	struct gf100_fb *fb = gf100_fb(base);
+	struct nvkm_device *device = fb->base.subdev.device;
+
+	if (fb->r100c10_page)
+		nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
+
+	nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
+
+	nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
+	nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
+	nvkm_mask(device, 0x100cc4, 0x00060000,
+		  min(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17);
+}
+
+static const struct nvkm_fb_func
+gm200_fb = {
+	.dtor = gf100_fb_dtor,
+	.oneinit = gf100_fb_oneinit,
+	.init = gm200_fb_init,
+	.intr = gf100_fb_intr,
+	.ram_new = gm107_ram_new,
+	.memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gm200_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return gf100_fb_new_(&gm200_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 62b9feb5..d97d640 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -6,6 +6,7 @@
 
 struct nvkm_fb_func {
 	void *(*dtor)(struct nvkm_fb *);
+	int (*oneinit)(struct nvkm_fb *);
 	void (*init)(struct nvkm_fb *);
 	void (*intr)(struct nvkm_fb *);
 
@@ -58,5 +59,6 @@
 void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
 		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
 
+int gf100_fb_oneinit(struct nvkm_fb *);
 bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
index f414497..1c3c18e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
@@ -47,7 +47,7 @@
 	struct nvkm_fuse *fuse;
 	if (!(fuse = *pfuse = kzalloc(sizeof(*fuse), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&nvkm_fuse, device, index, 0, &fuse->subdev);
+	nvkm_subdev_ctor(&nvkm_fuse, device, index, &fuse->subdev);
 	fuse->func = func;
 	spin_lock_init(&fuse->lock);
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
index d45ec99..77c64972 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
@@ -216,7 +216,7 @@
 	if (!(gpio = *pgpio = kzalloc(sizeof(*gpio), GFP_KERNEL)))
 		return -ENOMEM;
 
-	nvkm_subdev_ctor(&nvkm_gpio, device, index, 0, &gpio->subdev);
+	nvkm_subdev_ctor(&nvkm_gpio, device, index, &gpio->subdev);
 	gpio->func = func;
 
 	return nvkm_event_init(&nvkm_gpio_intr_func, 2, func->lines,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 243a71f..4f197b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -254,7 +254,7 @@
 	if (!(i2c = *pi2c = kzalloc(sizeof(*i2c), GFP_KERNEL)))
 		return -ENOMEM;
 
-	nvkm_subdev_ctor(&nvkm_i2c, device, index, 0, &i2c->subdev);
+	nvkm_subdev_ctor(&nvkm_i2c, device, index, &i2c->subdev);
 	i2c->func = func;
 	INIT_LIST_HEAD(&i2c->pad);
 	INIT_LIST_HEAD(&i2c->bus);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index 72d6330..2c6b374 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -117,6 +117,6 @@
 	struct nvkm_subdev *ibus;
 	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&gf100_ibus, device, index, 0, ibus);
+	nvkm_subdev_ctor(&gf100_ibus, device, index, ibus);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
index f69f263..3905a80 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
@@ -46,6 +46,6 @@
 	struct nvkm_subdev *ibus;
 	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&gf117_ibus, device, index, 0, ibus);
+	nvkm_subdev_ctor(&gf117_ibus, device, index, ibus);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index b5cee3f..c673853 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -120,6 +120,6 @@
 	struct nvkm_subdev *ibus;
 	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&gk104_ibus, device, index, 0, ibus);
+	nvkm_subdev_ctor(&gk104_ibus, device, index, ibus);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index 3484079..b7159b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -84,6 +84,6 @@
 	struct nvkm_subdev *ibus;
 	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&gk20a_ibus, device, index, 0, ibus);
+	nvkm_subdev_ctor(&gk20a_ibus, device, index, ibus);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
index ef0b7f3..c633281 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
@@ -35,6 +35,6 @@
 	struct nvkm_subdev *ibus;
 	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&gm200_ibus, device, index, 0, ibus);
+	nvkm_subdev_ctor(&gm200_ibus, device, index, ibus);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index c44a852..323c79a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -30,15 +30,14 @@
 
 static bool
 nvkm_iccsense_validate_device(struct i2c_adapter *i2c, u8 addr,
-			      enum nvbios_extdev_type type, u8 rail)
+			      enum nvbios_extdev_type type)
 {
 	switch (type) {
 	case NVBIOS_EXTDEV_INA209:
 	case NVBIOS_EXTDEV_INA219:
-		return rail == 0 && nv_rd16i2cr(i2c, addr, 0x0) >= 0;
+		return nv_rd16i2cr(i2c, addr, 0x0) >= 0;
 	case NVBIOS_EXTDEV_INA3221:
-		return rail <= 3 &&
-		       nv_rd16i2cr(i2c, addr, 0xff) == 0x3220 &&
+		return nv_rd16i2cr(i2c, addr, 0xff) == 0x3220 &&
 		       nv_rd16i2cr(i2c, addr, 0xfe) == 0x5449;
 	default:
 		return false;
@@ -67,8 +66,9 @@
                           struct nvkm_iccsense_rail *rail,
 			  u8 shunt_reg, u8 bus_reg)
 {
-	return nvkm_iccsense_poll_lane(rail->i2c, rail->addr, shunt_reg, 0,
-				       bus_reg, 3, rail->mohm, 10 * 4);
+	return nvkm_iccsense_poll_lane(rail->sensor->i2c, rail->sensor->addr,
+				       shunt_reg, 0, bus_reg, 3, rail->mohm,
+				       10 * 4);
 }
 
 static int
@@ -89,37 +89,87 @@
 nvkm_iccsense_ina3221_read(struct nvkm_iccsense *iccsense,
 			   struct nvkm_iccsense_rail *rail)
 {
-	return nvkm_iccsense_poll_lane(rail->i2c, rail->addr,
-				       1 + (rail->rail * 2), 3,
-				       2 + (rail->rail * 2), 3, rail->mohm,
+	return nvkm_iccsense_poll_lane(rail->sensor->i2c, rail->sensor->addr,
+				       1 + (rail->idx * 2), 3,
+				       2 + (rail->idx * 2), 3, rail->mohm,
 				       40 * 8);
 }
 
-int
-nvkm_iccsense_read(struct nvkm_iccsense *iccsense, u8 idx)
+static void
+nvkm_iccsense_ina209_config(struct nvkm_iccsense *iccsense,
+			    struct nvkm_iccsense_sensor *sensor)
 {
-	struct nvkm_iccsense_rail *rail;
+	struct nvkm_subdev *subdev = &iccsense->subdev;
+	/* configuration:
+	 * 0x0007: 0x0007 shunt and bus continous
+	 * 0x0078: 0x0078 128 samples shunt
+	 * 0x0780: 0x0780 128 samples bus
+	 * 0x1800: 0x0000 +-40 mV shunt range
+	 * 0x2000: 0x0000 16V FSR
+         */
+	u16 value = 0x07ff;
+	nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
+	nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
+}
 
-	if (!iccsense || idx >= iccsense->rail_count)
-		return -EINVAL;
+static void
+nvkm_iccsense_ina3221_config(struct nvkm_iccsense *iccsense,
+			     struct nvkm_iccsense_sensor *sensor)
+{
+	struct nvkm_subdev *subdev = &iccsense->subdev;
+	/* configuration:
+	 * 0x0007: 0x0007 shunt and bus continous
+	 * 0x0031: 0x0000 140 us conversion time shunt
+	 * 0x01c0: 0x0000 140 us conversion time bus
+	 * 0x0f00: 0x0f00 1024 samples
+	 * 0x7000: 0x?000 channels
+         */
+	u16 value = 0x0e07;
+	if (sensor->rail_mask & 0x1)
+		value |= 0x1 << 14;
+	if (sensor->rail_mask & 0x2)
+		value |= 0x1 << 13;
+	if (sensor->rail_mask & 0x4)
+		value |= 0x1 << 12;
+	nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
+	nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
+}
 
-	rail = &iccsense->rails[idx];
-	if (!rail->read)
-		return -ENODEV;
-
-	return rail->read(iccsense, rail);
+static void
+nvkm_iccsense_sensor_config(struct nvkm_iccsense *iccsense,
+		            struct nvkm_iccsense_sensor *sensor)
+{
+	switch (sensor->type) {
+	case NVBIOS_EXTDEV_INA209:
+	case NVBIOS_EXTDEV_INA219:
+		nvkm_iccsense_ina209_config(iccsense, sensor);
+		break;
+	case NVBIOS_EXTDEV_INA3221:
+		nvkm_iccsense_ina3221_config(iccsense, sensor);
+		break;
+	default:
+		break;
+	}
 }
 
 int
 nvkm_iccsense_read_all(struct nvkm_iccsense *iccsense)
 {
-	int result = 0, i;
-	for (i = 0; i < iccsense->rail_count; ++i) {
-		int res = nvkm_iccsense_read(iccsense, i);
-		if (res >= 0)
-			result += res;
-		else
+	int result = 0;
+	struct nvkm_iccsense_rail *rail;
+
+	if (!iccsense)
+		return -EINVAL;
+
+	list_for_each_entry(rail, &iccsense->rails, head) {
+		int res;
+		if (!rail->read)
+			return -ENODEV;
+
+		res = rail->read(iccsense, rail);
+		if (res < 0)
 			return res;
+		result += res;
 	}
 	return result;
 }
@@ -128,89 +178,158 @@
 nvkm_iccsense_dtor(struct nvkm_subdev *subdev)
 {
 	struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev);
+	struct nvkm_iccsense_sensor *sensor, *tmps;
+	struct nvkm_iccsense_rail *rail, *tmpr;
 
-	if (iccsense->rails)
-		kfree(iccsense->rails);
+	list_for_each_entry_safe(sensor, tmps, &iccsense->sensors, head) {
+		list_del(&sensor->head);
+		kfree(sensor);
+	}
+	list_for_each_entry_safe(rail, tmpr, &iccsense->rails, head) {
+		list_del(&rail->head);
+		kfree(rail);
+	}
 
 	return iccsense;
 }
 
+static struct nvkm_iccsense_sensor*
+nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id)
+{
+
+	struct nvkm_subdev *subdev = &iccsense->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
+	struct nvkm_i2c *i2c = subdev->device->i2c;
+	struct nvbios_extdev_func extdev;
+	struct nvkm_i2c_bus *i2c_bus;
+	struct nvkm_iccsense_sensor *sensor;
+	u8 addr;
+
+	if (!i2c || !bios || nvbios_extdev_parse(bios, id, &extdev))
+		return NULL;
+
+	if (extdev.type == 0xff)
+		return NULL;
+
+	if (extdev.type != NVBIOS_EXTDEV_INA209 &&
+	    extdev.type != NVBIOS_EXTDEV_INA219 &&
+	    extdev.type != NVBIOS_EXTDEV_INA3221) {
+		iccsense->data_valid = false;
+		nvkm_error(subdev, "Unknown sensor type %x, power reading "
+			   "disabled\n", extdev.type);
+		return NULL;
+	}
+
+	if (extdev.bus)
+		i2c_bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_SEC);
+	else
+		i2c_bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
+	if (!i2c_bus)
+		return NULL;
+
+	addr = extdev.addr >> 1;
+	if (!nvkm_iccsense_validate_device(&i2c_bus->i2c, addr,
+					   extdev.type)) {
+		iccsense->data_valid = false;
+		nvkm_warn(subdev, "found invalid sensor id: %i, power reading"
+			  "might be invalid\n", id);
+		return NULL;
+	}
+
+	sensor = kmalloc(sizeof(*sensor), GFP_KERNEL);
+	if (!sensor)
+		return NULL;
+
+	list_add_tail(&sensor->head, &iccsense->sensors);
+	sensor->id = id;
+	sensor->type = extdev.type;
+	sensor->i2c = &i2c_bus->i2c;
+	sensor->addr = addr;
+	sensor->rail_mask = 0x0;
+	return sensor;
+}
+
+static struct nvkm_iccsense_sensor*
+nvkm_iccsense_get_sensor(struct nvkm_iccsense *iccsense, u8 id)
+{
+	struct nvkm_iccsense_sensor *sensor;
+	list_for_each_entry(sensor, &iccsense->sensors, head) {
+		if (sensor->id == id)
+			return sensor;
+	}
+	return nvkm_iccsense_create_sensor(iccsense, id);
+}
+
 static int
 nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
 {
 	struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev);
 	struct nvkm_bios *bios = subdev->device->bios;
-	struct nvkm_i2c *i2c = subdev->device->i2c;
 	struct nvbios_iccsense stbl;
 	int i;
 
-	if (!i2c || !bios || nvbios_iccsense_parse(bios, &stbl)
-	    || !stbl.nr_entry)
+	if (!bios || nvbios_iccsense_parse(bios, &stbl) || !stbl.nr_entry)
 		return 0;
 
-	iccsense->rails = kmalloc(sizeof(*iccsense->rails) * stbl.nr_entry,
-	                          GFP_KERNEL);
-	if (!iccsense->rails)
-		return -ENOMEM;
-
 	iccsense->data_valid = true;
 	for (i = 0; i < stbl.nr_entry; ++i) {
 		struct pwr_rail_t *r = &stbl.rail[i];
-		struct nvbios_extdev_func extdev;
 		struct nvkm_iccsense_rail *rail;
-		struct nvkm_i2c_bus *i2c_bus;
-		u8 addr;
+		struct nvkm_iccsense_sensor *sensor;
 
 		if (!r->mode || r->resistor_mohm == 0)
 			continue;
 
-		if (nvbios_extdev_parse(bios, r->extdev_id, &extdev))
+		sensor = nvkm_iccsense_get_sensor(iccsense, r->extdev_id);
+		if (!sensor)
 			continue;
 
-		if (extdev.type == 0xff)
-			continue;
+		rail = kmalloc(sizeof(*rail), GFP_KERNEL);
+		if (!rail)
+			return -ENOMEM;
 
-		if (extdev.bus)
-			i2c_bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_SEC);
-		else
-			i2c_bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
-		if (!i2c_bus)
-			continue;
-
-		addr = extdev.addr >> 1;
-		if (!nvkm_iccsense_validate_device(&i2c_bus->i2c, addr,
-						   extdev.type, r->rail)) {
-			iccsense->data_valid = false;
-			nvkm_warn(subdev, "found unknown or invalid rail entry"
-				  " type 0x%x rail %i, power reading might be"
-				  " invalid\n", extdev.type, r->rail);
-			continue;
-		}
-
-		rail = &iccsense->rails[iccsense->rail_count];
-		switch (extdev.type) {
+		switch (sensor->type) {
 		case NVBIOS_EXTDEV_INA209:
+			if (r->rail != 0)
+				continue;
 			rail->read = nvkm_iccsense_ina209_read;
 			break;
 		case NVBIOS_EXTDEV_INA219:
+			if (r->rail != 0)
+				continue;
 			rail->read = nvkm_iccsense_ina219_read;
 			break;
 		case NVBIOS_EXTDEV_INA3221:
+			if (r->rail >= 3)
+				continue;
 			rail->read = nvkm_iccsense_ina3221_read;
 			break;
+		default:
+			continue;
 		}
 
-		rail->addr = addr;
-		rail->rail = r->rail;
+		sensor->rail_mask |= 1 << r->rail;
+		rail->sensor = sensor;
+		rail->idx = r->rail;
 		rail->mohm = r->resistor_mohm;
-		rail->i2c = &i2c_bus->i2c;
-		++iccsense->rail_count;
+		list_add_tail(&rail->head, &iccsense->rails);
 	}
 	return 0;
 }
 
+static int
+nvkm_iccsense_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev);
+	struct nvkm_iccsense_sensor *sensor;
+	list_for_each_entry(sensor, &iccsense->sensors, head)
+		nvkm_iccsense_sensor_config(iccsense, sensor);
+	return 0;
+}
+
 struct nvkm_subdev_func iccsense_func = {
 	.oneinit = nvkm_iccsense_oneinit,
+	.init = nvkm_iccsense_init,
 	.dtor = nvkm_iccsense_dtor,
 };
 
@@ -218,7 +337,7 @@
 nvkm_iccsense_ctor(struct nvkm_device *device, int index,
 		   struct nvkm_iccsense *iccsense)
 {
-	nvkm_subdev_ctor(&iccsense_func, device, index, 0, &iccsense->subdev);
+	nvkm_subdev_ctor(&iccsense_func, device, index, &iccsense->subdev);
 }
 
 int
@@ -227,6 +346,8 @@
 {
 	if (!(*iccsense = kzalloc(sizeof(**iccsense), GFP_KERNEL)))
 		return -ENOMEM;
+	INIT_LIST_HEAD(&(*iccsense)->sensors);
+	INIT_LIST_HEAD(&(*iccsense)->rails);
 	nvkm_iccsense_ctor(device, index, *iccsense);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
index ed398b8..b72c31d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -2,12 +2,22 @@
 #define __NVKM_ICCSENSE_PRIV_H__
 #define nvkm_iccsense(p) container_of((p), struct nvkm_iccsense, subdev)
 #include <subdev/iccsense.h>
+#include <subdev/bios/extdev.h>
 
-struct nvkm_iccsense_rail {
-	int (*read)(struct nvkm_iccsense *, struct nvkm_iccsense_rail *);
+struct nvkm_iccsense_sensor {
+	struct list_head head;
+	int id;
+	enum nvbios_extdev_type type;
 	struct i2c_adapter *i2c;
 	u8 addr;
-	u8 rail;
+	u8 rail_mask;
+};
+
+struct nvkm_iccsense_rail {
+	struct list_head head;
+	int (*read)(struct nvkm_iccsense *, struct nvkm_iccsense_rail *);
+	struct nvkm_iccsense_sensor *sensor;
+	u8 idx;
 	u8 mohm;
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 1d7dd38..8ed8f65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -311,7 +311,7 @@
 		  struct nvkm_device *device, int index,
 		  struct nvkm_instmem *imem)
 {
-	nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev);
+	nvkm_subdev_ctor(&nvkm_instmem, device, index, &imem->subdev);
 	imem->func = func;
 	spin_lock_init(&imem->lock);
 	INIT_LIST_HEAD(&imem->list);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 85b1464..39c2a38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -138,7 +138,7 @@
 	if (!(ltc = *pltc = kzalloc(sizeof(*ltc), GFP_KERNEL)))
 		return -ENOMEM;
 
-	nvkm_subdev_ctor(&nvkm_ltc, device, index, 0, &ltc->subdev);
+	nvkm_subdev_ctor(&nvkm_ltc, device, index, &ltc->subdev);
 	ltc->func = func;
 	ltc->zbc_min = 1; /* reserve 0 for disabled */
 	ltc->zbc_max = min(func->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index e292f56..389fb13 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -69,11 +69,11 @@
 }
 
 static void
-gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
+gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
 {
 	struct nvkm_subdev *subdev = &ltc->subdev;
 	struct nvkm_device *device = subdev->device;
-	u32 base = 0x140000 + (c * 0x2000) + (s * 0x200);
+	u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
 	u32 stat = nvkm_rd32(device, base + 0x00c);
 
 	if (stat) {
@@ -92,7 +92,7 @@
 	while (mask) {
 		u32 s, c = __ffs(mask);
 		for (s = 0; s < ltc->lts_nr; s++)
-			gm107_ltc_lts_isr(ltc, c, s);
+			gm107_ltc_intr_lts(ltc, c, s);
 		mask &= ~(1 << c);
 	}
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
index 2a29bfd..e18e0dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
@@ -46,7 +46,7 @@
 gm200_ltc = {
 	.oneinit = gm200_ltc_oneinit,
 	.init = gm200_ltc_init,
-	.intr = gm107_ltc_intr, /*XXX: not validated */
+	.intr = gm107_ltc_intr,
 	.cbc_clear = gm107_ltc_cbc_clear,
 	.cbc_wait = gm107_ltc_cbc_wait,
 	.zbc = 16,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index bef325d..49695ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -1,7 +1,12 @@
 nvkm-y += nvkm/subdev/mc/base.o
 nvkm-y += nvkm/subdev/mc/nv04.o
+nvkm-y += nvkm/subdev/mc/nv11.o
+nvkm-y += nvkm/subdev/mc/nv17.o
 nvkm-y += nvkm/subdev/mc/nv44.o
 nvkm-y += nvkm/subdev/mc/nv50.o
+nvkm-y += nvkm/subdev/mc/g84.o
 nvkm-y += nvkm/subdev/mc/g98.o
+nvkm-y += nvkm/subdev/mc/gt215.o
 nvkm-y += nvkm/subdev/mc/gf100.o
+nvkm-y += nvkm/subdev/mc/gk104.o
 nvkm-y += nvkm/subdev/mc/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 954fbbe..350a8ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -24,6 +24,7 @@
 #include "priv.h"
 
 #include <core/option.h>
+#include <subdev/top.h>
 
 void
 nvkm_mc_unk260(struct nvkm_mc *mc, u32 data)
@@ -58,10 +59,19 @@
 {
 	struct nvkm_device *device = mc->subdev.device;
 	struct nvkm_subdev *subdev;
-	const struct nvkm_mc_intr *map = mc->func->intr;
-	u32 stat, intr;
+	const struct nvkm_mc_map *map = mc->func->intr;
+	u32 stat, intr = nvkm_mc_intr_mask(mc);
+	u64 subdevs;
 
-	stat = intr = nvkm_mc_intr_mask(mc);
+	stat = nvkm_top_intr(device->top, intr, &subdevs);
+	while (subdevs) {
+		enum nvkm_devidx subidx = __ffs64(subdevs);
+		subdev = nvkm_device_subdev(device, subidx);
+		if (subdev)
+			nvkm_subdev_intr(subdev);
+		subdevs &= ~BIT_ULL(subidx);
+	}
+
 	while (map->stat) {
 		if (intr & map->stat) {
 			subdev = nvkm_device_subdev(device, map->unit);
@@ -77,6 +87,36 @@
 	*handled = intr != 0;
 }
 
+static void
+nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx)
+{
+	struct nvkm_device *device = mc->subdev.device;
+	const struct nvkm_mc_map *map;
+	u64 pmc_enable;
+
+	if (!(pmc_enable = nvkm_top_reset(device->top, devidx))) {
+		for (map = mc->func->reset; map && map->stat; map++) {
+			if (map->unit == devidx) {
+				pmc_enable = map->stat;
+				break;
+			}
+		}
+	}
+
+	if (pmc_enable) {
+		nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
+		nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
+		nvkm_rd32(device, 0x000200);
+	}
+}
+
+void
+nvkm_mc_reset(struct nvkm_mc *mc, enum nvkm_devidx devidx)
+{
+	if (likely(mc))
+		nvkm_mc_reset_(mc, devidx);
+}
+
 static int
 nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
 {
@@ -117,7 +157,7 @@
 	if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
 		return -ENOMEM;
 
-	nvkm_subdev_ctor(&nvkm_mc, device, index, 0, &mc->subdev);
+	nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
 	mc->func = func;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
new file mode 100644
index 0000000..5c85b47
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+static const struct nvkm_mc_map
+g84_mc_reset[] = {
+	{ 0x04008000, NVKM_ENGINE_BSP },
+	{ 0x02004000, NVKM_ENGINE_CIPHER },
+	{ 0x01020000, NVKM_ENGINE_VP },
+	{ 0x00400002, NVKM_ENGINE_MPEG },
+	{ 0x00201000, NVKM_ENGINE_GR },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{}
+};
+
+const struct nvkm_mc_map
+g84_mc_intr[] = {
+	{ 0x04000000, NVKM_ENGINE_DISP },
+	{ 0x00020000, NVKM_ENGINE_VP },
+	{ 0x00008000, NVKM_ENGINE_BSP },
+	{ 0x00004000, NVKM_ENGINE_CIPHER },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00000001, NVKM_ENGINE_MPEG },
+	{ 0x0002d101, NVKM_SUBDEV_FB },
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },
+	{ 0x00200000, NVKM_SUBDEV_I2C },
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{},
+};
+
+static const struct nvkm_mc_func
+g84_mc = {
+	.init = nv50_mc_init,
+	.intr = g84_mc_intr,
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+	.reset = g84_mc_reset,
+};
+
+int
+g84_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&g84_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
index 7344ad6..0280b43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
@@ -23,24 +23,31 @@
  */
 #include "priv.h"
 
-static const struct nvkm_mc_intr
-g98_mc_intr[] = {
-	{ 0x04000000, NVKM_ENGINE_DISP },  /* DISP first, so pageflip timestamps work */
-	{ 0x00000001, NVKM_ENGINE_MSPPP },
+static const struct nvkm_mc_map
+g98_mc_reset[] = {
+	{ 0x04008000, NVKM_ENGINE_MSVLD },
+	{ 0x02004000, NVKM_ENGINE_SEC },
+	{ 0x01020000, NVKM_ENGINE_MSPDEC },
+	{ 0x00400002, NVKM_ENGINE_MSPPP },
+	{ 0x00201000, NVKM_ENGINE_GR },
 	{ 0x00000100, NVKM_ENGINE_FIFO },
-	{ 0x00001000, NVKM_ENGINE_GR },
-	{ 0x00004000, NVKM_ENGINE_SEC },	/* NV84:NVA3 */
-	{ 0x00008000, NVKM_ENGINE_MSVLD },
+	{}
+};
+
+static const struct nvkm_mc_map
+g98_mc_intr[] = {
+	{ 0x04000000, NVKM_ENGINE_DISP },
 	{ 0x00020000, NVKM_ENGINE_MSPDEC },
-	{ 0x00040000, NVKM_SUBDEV_PMU },	/* NVA3:NVC0 */
-	{ 0x00080000, NVKM_SUBDEV_THERM },	/* NVA3:NVC0 */
-	{ 0x00100000, NVKM_SUBDEV_TIMER },
-	{ 0x00200000, NVKM_SUBDEV_GPIO },	/* PMGR->GPIO */
-	{ 0x00200000, NVKM_SUBDEV_I2C }, 	/* PMGR->I2C/AUX */
-	{ 0x00400000, NVKM_ENGINE_CE0 },	/* NVA3-     */
+	{ 0x00008000, NVKM_ENGINE_MSVLD },
+	{ 0x00004000, NVKM_ENGINE_SEC },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00000001, NVKM_ENGINE_MSPPP },
+	{ 0x0002d101, NVKM_SUBDEV_FB },
 	{ 0x10000000, NVKM_SUBDEV_BUS },
-	{ 0x80000000, NVKM_ENGINE_SW },
-	{ 0x0042d101, NVKM_SUBDEV_FB },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },
+	{ 0x00200000, NVKM_SUBDEV_I2C },
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
 	{},
 };
 
@@ -51,6 +58,7 @@
 	.intr_unarm = nv04_mc_intr_unarm,
 	.intr_rearm = nv04_mc_intr_rearm,
 	.intr_mask = nv04_mc_intr_mask,
+	.reset = g98_mc_reset,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index 122fe69..8397e22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -23,28 +23,38 @@
  */
 #include "priv.h"
 
-const struct nvkm_mc_intr
-gf100_mc_intr[] = {
-	{ 0x04000000, NVKM_ENGINE_DISP },  /* DISP first, so pageflip timestamps work. */
-	{ 0x00000001, NVKM_ENGINE_MSPPP },
-	{ 0x00000020, NVKM_ENGINE_CE0 },
-	{ 0x00000040, NVKM_ENGINE_CE1 },
-	{ 0x00000080, NVKM_ENGINE_CE2 },
-	{ 0x00000100, NVKM_ENGINE_FIFO },
-	{ 0x00001000, NVKM_ENGINE_GR },
-	{ 0x00002000, NVKM_SUBDEV_FB },
-	{ 0x00008000, NVKM_ENGINE_MSVLD },
-	{ 0x00040000, NVKM_SUBDEV_THERM },
+static const struct nvkm_mc_map
+gf100_mc_reset[] = {
 	{ 0x00020000, NVKM_ENGINE_MSPDEC },
-	{ 0x00100000, NVKM_SUBDEV_TIMER },
-	{ 0x00200000, NVKM_SUBDEV_GPIO },	/* PMGR->GPIO */
-	{ 0x00200000, NVKM_SUBDEV_I2C },	/* PMGR->I2C/AUX */
-	{ 0x01000000, NVKM_SUBDEV_PMU },
-	{ 0x02000000, NVKM_SUBDEV_LTC },
-	{ 0x08000000, NVKM_SUBDEV_FB },
-	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x00008000, NVKM_ENGINE_MSVLD },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00000080, NVKM_ENGINE_CE1 },
+	{ 0x00000040, NVKM_ENGINE_CE0 },
+	{ 0x00000002, NVKM_ENGINE_MSPPP },
+	{}
+};
+
+static const struct nvkm_mc_map
+gf100_mc_intr[] = {
+	{ 0x04000000, NVKM_ENGINE_DISP },
+	{ 0x00020000, NVKM_ENGINE_MSPDEC },
+	{ 0x00008000, NVKM_ENGINE_MSVLD },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00000040, NVKM_ENGINE_CE1 },
+	{ 0x00000020, NVKM_ENGINE_CE0 },
+	{ 0x00000001, NVKM_ENGINE_MSPPP },
 	{ 0x40000000, NVKM_SUBDEV_IBUS },
-	{ 0x80000000, NVKM_ENGINE_SW },
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x08000000, NVKM_SUBDEV_FB },
+	{ 0x02000000, NVKM_SUBDEV_LTC },
+	{ 0x01000000, NVKM_SUBDEV_PMU },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },
+	{ 0x00200000, NVKM_SUBDEV_I2C },
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x00040000, NVKM_SUBDEV_THERM },
+	{ 0x00002000, NVKM_SUBDEV_FB },
 	{},
 };
 
@@ -87,6 +97,7 @@
 	.intr_unarm = gf100_mc_intr_unarm,
 	.intr_rearm = gf100_mc_intr_rearm,
 	.intr_mask = gf100_mc_intr_mask,
+	.reset = gf100_mc_reset,
 	.unk260 = gf100_mc_unk260,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
new file mode 100644
index 0000000..3174642
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+const struct nvkm_mc_map
+gk104_mc_reset[] = {
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{}
+};
+
+const struct nvkm_mc_map
+gk104_mc_intr[] = {
+	{ 0x04000000, NVKM_ENGINE_DISP },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x40000000, NVKM_SUBDEV_IBUS },
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x08000000, NVKM_SUBDEV_FB },
+	{ 0x02000000, NVKM_SUBDEV_LTC },
+	{ 0x01000000, NVKM_SUBDEV_PMU },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },
+	{ 0x00200000, NVKM_SUBDEV_I2C },
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x00040000, NVKM_SUBDEV_THERM },
+	{ 0x00002000, NVKM_SUBDEV_FB },
+	{},
+};
+
+static const struct nvkm_mc_func
+gk104_mc = {
+	.init = nv50_mc_init,
+	.intr = gk104_mc_intr,
+	.intr_unarm = gf100_mc_intr_unarm,
+	.intr_rearm = gf100_mc_intr_rearm,
+	.intr_mask = gf100_mc_intr_mask,
+	.reset = gk104_mc_reset,
+	.unk260 = gf100_mc_unk260,
+};
+
+int
+gk104_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&gk104_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
index d92efb3..60b044f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
@@ -26,10 +26,11 @@
 static const struct nvkm_mc_func
 gk20a_mc = {
 	.init = nv50_mc_init,
-	.intr = gf100_mc_intr,
+	.intr = gk104_mc_intr,
 	.intr_unarm = gf100_mc_intr_unarm,
 	.intr_rearm = gf100_mc_intr_rearm,
 	.intr_mask = gf100_mc_intr_mask,
+	.reset = gk104_mc_reset,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
new file mode 100644
index 0000000..aad0ba9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+static const struct nvkm_mc_map
+gt215_mc_reset[] = {
+	{ 0x04008000, NVKM_ENGINE_MSVLD },
+	{ 0x01020000, NVKM_ENGINE_MSPDEC },
+	{ 0x00802000, NVKM_ENGINE_CE0 },
+	{ 0x00400002, NVKM_ENGINE_MSPPP },
+	{ 0x00201000, NVKM_ENGINE_GR },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{}
+};
+
+static const struct nvkm_mc_map
+gt215_mc_intr[] = {
+	{ 0x04000000, NVKM_ENGINE_DISP },
+	{ 0x00400000, NVKM_ENGINE_CE0 },
+	{ 0x00020000, NVKM_ENGINE_MSPDEC },
+	{ 0x00008000, NVKM_ENGINE_MSVLD },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00000001, NVKM_ENGINE_MSPPP },
+	{ 0x00429101, NVKM_SUBDEV_FB },
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },
+	{ 0x00200000, NVKM_SUBDEV_I2C },
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x00080000, NVKM_SUBDEV_THERM },
+	{ 0x00040000, NVKM_SUBDEV_PMU },
+	{},
+};
+
+static const struct nvkm_mc_func
+gt215_mc = {
+	.init = nv50_mc_init,
+	.intr = gt215_mc_intr,
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+	.reset = gt215_mc_reset,
+};
+
+int
+gt215_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&gt215_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
index d282ec1..a062624 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
@@ -23,18 +23,20 @@
  */
 #include "priv.h"
 
-const struct nvkm_mc_intr
-nv04_mc_intr[] = {
-	{ 0x00000001, NVKM_ENGINE_MPEG },	/* NV17- MPEG/ME */
-	{ 0x00000100, NVKM_ENGINE_FIFO },
+const struct nvkm_mc_map
+nv04_mc_reset[] = {
 	{ 0x00001000, NVKM_ENGINE_GR },
-	{ 0x00010000, NVKM_ENGINE_DISP },
-	{ 0x00020000, NVKM_ENGINE_VP },	/* NV40- */
-	{ 0x00100000, NVKM_SUBDEV_TIMER },
-	{ 0x01000000, NVKM_ENGINE_DISP },	/* NV04- PCRTC0 */
-	{ 0x02000000, NVKM_ENGINE_DISP },	/* NV11- PCRTC1 */
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{}
+};
+
+static const struct nvkm_mc_map
+nv04_mc_intr[] = {
+	{ 0x01010000, NVKM_ENGINE_DISP },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
 	{ 0x10000000, NVKM_SUBDEV_BUS },
-	{ 0x80000000, NVKM_ENGINE_SW },
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
 	{}
 };
 
@@ -74,6 +76,7 @@
 	.intr_unarm = nv04_mc_intr_unarm,
 	.intr_rearm = nv04_mc_intr_rearm,
 	.intr_mask = nv04_mc_intr_mask,
+	.reset = nv04_mc_reset,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
new file mode 100644
index 0000000..55f0b91
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static const struct nvkm_mc_map
+nv11_mc_intr[] = {
+	{ 0x03010000, NVKM_ENGINE_DISP },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{}
+};
+
+static const struct nvkm_mc_func
+nv11_mc = {
+	.init = nv04_mc_init,
+	.intr = nv11_mc_intr,
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+	.reset = nv04_mc_reset,
+};
+
+int
+nv11_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&nv11_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
new file mode 100644
index 0000000..c40fa67
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+const struct nvkm_mc_map
+nv17_mc_reset[] = {
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00000002, NVKM_ENGINE_MPEG },
+	{}
+};
+
+const struct nvkm_mc_map
+nv17_mc_intr[] = {
+	{ 0x03010000, NVKM_ENGINE_DISP },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00000001, NVKM_ENGINE_MPEG },
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{}
+};
+
+static const struct nvkm_mc_func
+nv17_mc = {
+	.init = nv04_mc_init,
+	.intr = nv17_mc_intr,
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+	.reset = nv17_mc_reset,
+};
+
+int
+nv17_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&nv17_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
index 9a3ac99..cc56271 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
@@ -40,10 +40,11 @@
 static const struct nvkm_mc_func
 nv44_mc = {
 	.init = nv44_mc_init,
-	.intr = nv04_mc_intr,
+	.intr = nv17_mc_intr,
 	.intr_unarm = nv04_mc_intr_unarm,
 	.intr_rearm = nv04_mc_intr_rearm,
 	.intr_mask = nv04_mc_intr_mask,
+	.reset = nv17_mc_reset,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
index 5f27d7b..343b607 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
@@ -23,21 +23,17 @@
  */
 #include "priv.h"
 
-const struct nvkm_mc_intr
+static const struct nvkm_mc_map
 nv50_mc_intr[] = {
-	{ 0x04000000, NVKM_ENGINE_DISP },  /* DISP before FIFO, so pageflip-timestamping works! */
-	{ 0x00000001, NVKM_ENGINE_MPEG },
-	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x04000000, NVKM_ENGINE_DISP },
 	{ 0x00001000, NVKM_ENGINE_GR },
-	{ 0x00004000, NVKM_ENGINE_CIPHER },	/* NV84- */
-	{ 0x00008000, NVKM_ENGINE_BSP },	/* NV84- */
-	{ 0x00020000, NVKM_ENGINE_VP },	/* NV84- */
-	{ 0x00100000, NVKM_SUBDEV_TIMER },
-	{ 0x00200000, NVKM_SUBDEV_GPIO },	/* PMGR->GPIO */
-	{ 0x00200000, NVKM_SUBDEV_I2C }, 	/* PMGR->I2C/AUX */
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00000001, NVKM_ENGINE_MPEG },
+	{ 0x00001101, NVKM_SUBDEV_FB },
 	{ 0x10000000, NVKM_SUBDEV_BUS },
-	{ 0x80000000, NVKM_ENGINE_SW },
-	{ 0x0002d101, NVKM_SUBDEV_FB },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },
+	{ 0x00200000, NVKM_SUBDEV_I2C },
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
 	{},
 };
 
@@ -55,6 +51,7 @@
 	.intr_unarm = nv04_mc_intr_unarm,
 	.intr_rearm = nv04_mc_intr_rearm,
 	.intr_mask = nv04_mc_intr_mask,
+	.reset = nv17_mc_reset,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index 307f6c6..a120381 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -6,37 +6,42 @@
 int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
 		 int index, struct nvkm_mc **);
 
-struct nvkm_mc_intr {
+struct nvkm_mc_map {
 	u32 stat;
 	u32 unit;
 };
 
 struct nvkm_mc_func {
 	void (*init)(struct nvkm_mc *);
-	const struct nvkm_mc_intr *intr;
+	const struct nvkm_mc_map *intr;
 	/* disable reporting of interrupts to host */
 	void (*intr_unarm)(struct nvkm_mc *);
 	/* enable reporting of interrupts to host */
 	void (*intr_rearm)(struct nvkm_mc *);
 	/* retrieve pending interrupt mask (NV_PMC_INTR) */
 	u32 (*intr_mask)(struct nvkm_mc *);
+	const struct nvkm_mc_map *reset;
 	void (*unk260)(struct nvkm_mc *, u32);
 };
 
 void nv04_mc_init(struct nvkm_mc *);
-extern const struct nvkm_mc_intr nv04_mc_intr[];
 void nv04_mc_intr_unarm(struct nvkm_mc *);
 void nv04_mc_intr_rearm(struct nvkm_mc *);
 u32 nv04_mc_intr_mask(struct nvkm_mc *);
+extern const struct nvkm_mc_map nv04_mc_reset[];
+
+extern const struct nvkm_mc_map nv17_mc_intr[];
+extern const struct nvkm_mc_map nv17_mc_reset[];
 
 void nv44_mc_init(struct nvkm_mc *);
 
 void nv50_mc_init(struct nvkm_mc *);
-extern const struct nvkm_mc_intr nv50_mc_intr[];
 
-extern const struct nvkm_mc_intr gf100_mc_intr[];
 void gf100_mc_intr_unarm(struct nvkm_mc *);
 void gf100_mc_intr_rearm(struct nvkm_mc *);
 u32 gf100_mc_intr_mask(struct nvkm_mc *);
 void gf100_mc_unk260(struct nvkm_mc *, u32);
+
+extern const struct nvkm_mc_map gk104_mc_intr[];
+extern const struct nvkm_mc_map gk104_mc_reset[];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index e04a229..5df9669 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -524,7 +524,7 @@
 nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
 	      int index, struct nvkm_mmu *mmu)
 {
-	nvkm_subdev_ctor(&nvkm_mmu, device, index, 0, &mmu->subdev);
+	nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
 	mmu->func = func;
 	mmu->limit = func->limit;
 	mmu->dma_bits = func->dma_bits;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index 9700a76..21b65ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -241,7 +241,7 @@
 	if (!(mxm = *pmxm = kzalloc(sizeof(*mxm), GFP_KERNEL)))
 		return -ENOMEM;
 
-	nvkm_subdev_ctor(&nvkm_mxm, device, index, 0, &mxm->subdev);
+	nvkm_subdev_ctor(&nvkm_mxm, device, index, &mxm->subdev);
 
 	data = mxm_table(bios, &ver, &len);
 	if (!data || !(ver = nvbios_rd08(bios, data))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index 65057c8..6b0328b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -168,7 +168,7 @@
 
 	if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&nvkm_pci_func, device, index, 0, &pci->subdev);
+	nvkm_subdev_ctor(&nvkm_pci_func, device, index, &pci->subdev);
 	pci->func = func;
 	pci->pdev = device->func->pci(device)->pdev;
 	pci->irq = -1;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index d95eb86..8dd164d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -40,21 +40,23 @@
 	struct nvkm_device *device = subdev->device;
 	u32 addr;
 
+	mutex_lock(&subdev->mutex);
 	/* wait for a free slot in the fifo */
 	addr  = nvkm_rd32(device, 0x10a4a0);
 	if (nvkm_msec(device, 2000,
 		u32 tmp = nvkm_rd32(device, 0x10a4b0);
 		if (tmp != (addr ^ 8))
 			break;
-	) < 0)
+	) < 0) {
+		mutex_unlock(&subdev->mutex);
 		return -EBUSY;
+	}
 
 	/* we currently only support a single process at a time waiting
 	 * on a synchronous reply, take the PMU mutex and tell the
 	 * receive handler what we're waiting for
 	 */
 	if (reply) {
-		mutex_lock(&subdev->mutex);
 		pmu->recv.message = message;
 		pmu->recv.process = process;
 	}
@@ -81,9 +83,9 @@
 		wait_event(pmu->recv.wait, (pmu->recv.process == 0));
 		reply[0] = pmu->recv.data[0];
 		reply[1] = pmu->recv.data[1];
-		mutex_unlock(&subdev->mutex);
 	}
 
+	mutex_unlock(&subdev->mutex);
 	return 0;
 }
 
@@ -272,7 +274,7 @@
 	struct nvkm_pmu *pmu;
 	if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&nvkm_pmu, device, index, 0, &pmu->subdev);
+	nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
 	pmu->func = func;
 	INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
 	init_waitqueue_head(&pmu->recv.wait);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 6689d02..f996d90 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -220,7 +220,7 @@
 	pmu->base.func = &func;
 	*ppmu = &pmu->base;
 
-	nvkm_subdev_ctor(&gk20a_pmu, device, index, 0, &pmu->base.subdev);
+	nvkm_subdev_ctor(&gk20a_pmu, device, index, &pmu->base.subdev);
 	pmu->data = &gk20a_dvfs_data;
 	nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
index 520facf..213fdba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
@@ -264,7 +264,7 @@
 {
 	unsigned long fid;
 
-	nvkm_subdev_ctor(&nvkm_secboot, device, index, 0, &sb->subdev);
+	nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev);
 	sb->func = func;
 
 	/* setup the performing falcon's base address and masks */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 949dc61..8894fee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -366,7 +366,7 @@
 	if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
 		return -ENOMEM;
 
-	nvkm_subdev_ctor(&nvkm_therm, device, index, 0, &therm->subdev);
+	nvkm_subdev_ctor(&nvkm_therm, device, index, &therm->subdev);
 	therm->func = func;
 
 	nvkm_alarm_init(&therm->alarm, nvkm_therm_alarm);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index d4dae1f..07dc82b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -143,7 +143,7 @@
 	if (!(tmr = *ptmr = kzalloc(sizeof(*tmr), GFP_KERNEL)))
 		return -ENOMEM;
 
-	nvkm_subdev_ctor(&nvkm_timer, device, index, 0, &tmr->subdev);
+	nvkm_subdev_ctor(&nvkm_timer, device, index, &tmr->subdev);
 	tmr->func = func;
 	INIT_LIST_HEAD(&tmr->alarms);
 	spin_lock_init(&tmr->lock);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
new file mode 100644
index 0000000..1078401
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
@@ -0,0 +1,2 @@
+nvkm-y += nvkm/subdev/top/base.o
+nvkm-y += nvkm/subdev/top/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
new file mode 100644
index 0000000..a1b2646
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+struct nvkm_top_device *
+nvkm_top_device_new(struct nvkm_top *top)
+{
+	struct nvkm_top_device *info = kmalloc(sizeof(*info), GFP_KERNEL);
+	if (info) {
+		info->index = NVKM_SUBDEV_NR;
+		info->addr = 0;
+		info->fault = -1;
+		info->engine = -1;
+		info->runlist = -1;
+		info->reset = -1;
+		info->intr = -1;
+		list_add_tail(&info->head, &top->device);
+	}
+	return info;
+}
+
+u32
+nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index)
+{
+	struct nvkm_top_device *info;
+
+	if (top) {
+		list_for_each_entry(info, &top->device, head) {
+			if (info->index == index && info->reset >= 0)
+				return BIT(info->reset);
+		}
+	}
+
+	return 0;
+}
+
+u32
+nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs)
+{
+	struct nvkm_top_device *info;
+	u64 subdevs = 0;
+	u32 handled = 0;
+
+	if (top) {
+		list_for_each_entry(info, &top->device, head) {
+			if (info->index != NVKM_SUBDEV_NR && info->intr >= 0) {
+				if (intr & BIT(info->intr)) {
+					subdevs |= BIT_ULL(info->index);
+					handled |= BIT(info->intr);
+				}
+			}
+		}
+	}
+
+	*psubdevs = subdevs;
+	return intr & ~handled;
+}
+
+enum nvkm_devidx
+nvkm_top_fault(struct nvkm_top *top, int fault)
+{
+	struct nvkm_top_device *info;
+
+	list_for_each_entry(info, &top->device, head) {
+		if (info->fault == fault)
+			return info->index;
+	}
+
+	return NVKM_SUBDEV_NR;
+}
+
+enum nvkm_devidx
+nvkm_top_engine(struct nvkm_top *top, int index, int *runl, int *engn)
+{
+	struct nvkm_top_device *info;
+	int n = 0;
+
+	list_for_each_entry(info, &top->device, head) {
+		if (info->engine >= 0 && info->runlist >= 0 && n++ == index) {
+			*runl = info->runlist;
+			*engn = info->engine;
+			return info->index;
+		}
+	}
+
+	return -ENODEV;
+}
+
+static int
+nvkm_top_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_top *top = nvkm_top(subdev);
+	return top->func->oneinit(top);
+}
+
+static void *
+nvkm_top_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_top *top = nvkm_top(subdev);
+	struct nvkm_top_device *info, *temp;
+
+	list_for_each_entry_safe(info, temp, &top->device, head) {
+		list_del(&info->head);
+		kfree(info);
+	}
+
+	return top;
+}
+
+static const struct nvkm_subdev_func
+nvkm_top = {
+	.dtor = nvkm_top_dtor,
+	.oneinit = nvkm_top_oneinit,
+};
+
+int
+nvkm_top_new_(const struct nvkm_top_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_top **ptop)
+{
+	struct nvkm_top *top;
+	if (!(top = *ptop = kzalloc(sizeof(*top), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_top, device, index, &top->subdev);
+	top->func = func;
+	INIT_LIST_HEAD(&top->device);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
new file mode 100644
index 0000000..e06acc3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static int
+gk104_top_oneinit(struct nvkm_top *top)
+{
+	struct nvkm_subdev *subdev = &top->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_top_device *info = NULL;
+	u32 data, type;
+	int i;
+
+	for (i = 0; i < 64; i++) {
+		if (!info) {
+			if (!(info = nvkm_top_device_new(top)))
+				return -ENOMEM;
+			type = ~0;
+		}
+
+		data = nvkm_rd32(device, 0x022700 + (i * 0x04));
+		nvkm_trace(subdev, "%02x: %08x\n", i, data);
+		switch (data & 0x00000003) {
+		case 0x00000000: /* NOT_VALID */
+			continue;
+		case 0x00000001: /* DATA */
+			info->addr  = (data & 0x00fff000);
+			info->fault = (data & 0x000000f8) >> 3;
+			break;
+		case 0x00000002: /* ENUM */
+			if (data & 0x00000020)
+				info->engine  = (data & 0x3c000000) >> 26;
+			if (data & 0x00000010)
+				info->runlist = (data & 0x01e00000) >> 21;
+			if (data & 0x00000008)
+				info->intr    = (data & 0x000f8000) >> 15;
+			if (data & 0x00000004)
+				info->reset   = (data & 0x00003e00) >> 9;
+			break;
+		case 0x00000003: /* ENGINE_TYPE */
+			type = (data & 0x7ffffffc) >> 2;
+			break;
+		}
+
+		if (data & 0x80000000)
+			continue;
+
+		/* Translate engine type to NVKM engine identifier. */
+		switch (type) {
+		case 0x00000000: info->index = NVKM_ENGINE_GR; break;
+		case 0x00000001: info->index = NVKM_ENGINE_CE0; break;
+		case 0x00000002: info->index = NVKM_ENGINE_CE1; break;
+		case 0x00000003: info->index = NVKM_ENGINE_CE2; break;
+		case 0x00000008: info->index = NVKM_ENGINE_MSPDEC; break;
+		case 0x00000009: info->index = NVKM_ENGINE_MSPPP; break;
+		case 0x0000000a: info->index = NVKM_ENGINE_MSVLD; break;
+		case 0x0000000b: info->index = NVKM_ENGINE_MSENC; break;
+		case 0x0000000c: info->index = NVKM_ENGINE_VIC; break;
+		case 0x0000000d: info->index = NVKM_ENGINE_SEC; break;
+		case 0x0000000e: info->index = NVKM_ENGINE_NVENC0; break;
+		case 0x0000000f: info->index = NVKM_ENGINE_NVENC1; break;
+		case 0x00000010: info->index = NVKM_ENGINE_NVDEC; break;
+			break;
+		default:
+			break;
+		}
+
+		nvkm_debug(subdev, "%02x (%8s): addr %06x fault %2d engine %2d "
+				   "runlist %2d intr %2d reset %2d\n", type,
+			   info->index == NVKM_SUBDEV_NR ? NULL :
+					  nvkm_subdev_name[info->index],
+			   info->addr, info->fault, info->engine, info->runlist,
+			   info->intr, info->reset);
+		info = NULL;
+	}
+
+	return 0;
+}
+
+static const struct nvkm_top_func
+gk104_top = {
+	.oneinit = gk104_top_oneinit,
+};
+
+int
+gk104_top_new(struct nvkm_device *device, int index, struct nvkm_top **ptop)
+{
+	return nvkm_top_new_(&gk104_top, device, index, ptop);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
new file mode 100644
index 0000000..adb3ed0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
@@ -0,0 +1,25 @@
+#ifndef __NVKM_TOP_PRIV_H__
+#define __NVKM_TOP_PRIV_H__
+#define nvkm_top(p) container_of((p), struct nvkm_top, subdev)
+#include <subdev/top.h>
+
+struct nvkm_top_func {
+	int (*oneinit)(struct nvkm_top *);
+};
+
+int nvkm_top_new_(const struct nvkm_top_func *, struct nvkm_device *,
+		  int, struct nvkm_top **);
+
+struct nvkm_top_device {
+	enum nvkm_devidx index;
+	u32 addr;
+	int fault;
+	int engine;
+	int runlist;
+	int reset;
+	int intr;
+	struct list_head head;
+};
+
+struct nvkm_top_device *nvkm_top_device_new(struct nvkm_top *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 50b5649..6b2d753 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -177,7 +177,7 @@
 	struct nvkm_bios *bios = device->bios;
 	int i;
 
-	nvkm_subdev_ctor(&nvkm_volt, device, index, 0, &volt->subdev);
+	nvkm_subdev_ctor(&nvkm_volt, device, index, &volt->subdev);
 	volt->func = func;
 
 	/* Assuming the non-bios device should build the voltage table later */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
index b735173..420bd84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -56,7 +56,7 @@
 
 	/* the blob uses this crystal frequency, let's use it too. */
 	div = 27648000 / bios->pwm_freq;
-	duty = (uv - bios->base) * div / bios->pwm_range;
+	duty = DIV_ROUND_UP((uv - bios->base) * div, bios->pwm_range);
 
 	nvkm_wr32(device, 0x20340, div);
 	nvkm_wr32(device, 0x20344, 0x80000000 | duty);
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 73241c4..336ad4d 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -2,6 +2,7 @@
 	tristate "OMAP DRM"
 	depends on DRM
 	depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
+	select OMAP2_DSS
 	select DRM_KMS_HELPER
 	select DRM_KMS_FB_HELPER
 	select FB_SYS_FILLRECT
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 225fd8d..667ca4a 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -9,6 +9,7 @@
  * the Free Software Foundation.
  */
 
+#include <linux/gpio/consumer.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index 8c246c2..9594ff7 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -14,7 +14,7 @@
  * the Free Software Foundation.
  */
 
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index 2fd5602..671806c 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -9,7 +9,7 @@
  * the Free Software Foundation.
  */
 
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index e780fd4..7c2331b 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -9,7 +9,7 @@
  * the Free Software Foundation.
  */
 
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 36485c2..2b11807 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -14,7 +14,7 @@
 #include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/interrupt.h>
 #include <linux/jiffies.h>
 #include <linux/module.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 458f77b..ac680e1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -15,6 +15,7 @@
 #include <linux/spi/spi.h>
 #include <linux/mutex.h>
 #include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 
 #include <video/omapdss.h>
 #include <video/omap-panel-data.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 780cb26..38d2920 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -15,7 +15,7 @@
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
 #include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/of_gpio.h>
 
 #include <video/omapdss.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 529a017..4363fff 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -10,7 +10,7 @@
  */
 
 #include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 31efcca..deb4167 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -29,7 +29,7 @@
 #include <linux/sched.h>
 #include <linux/backlight.h>
 #include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 03e2beb..d93175b 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -14,7 +14,7 @@
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
 #include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/of_gpio.h>
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 8730646..56c43f3 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1167,7 +1167,6 @@
 {
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 	struct regulator *vdds_dsi;
-	int r;
 
 	if (dsi->vdds_dsi_reg != NULL)
 		return 0;
@@ -1180,15 +1179,6 @@
 		return PTR_ERR(vdds_dsi);
 	}
 
-	if (regulator_can_change_voltage(vdds_dsi)) {
-		r = regulator_set_voltage(vdds_dsi, 1800000, 1800000);
-		if (r) {
-			devm_regulator_put(vdds_dsi);
-			DSSERR("can't set the DSI regulator voltage\n");
-			return r;
-		}
-	}
-
 	dsi->vdds_dsi_reg = vdds_dsi;
 
 	return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index f95ff31..3303cfa 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -30,6 +30,7 @@
 #include <linux/delay.h>
 #include <linux/seq_file.h>
 #include <linux/clk.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/gfp.h>
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index f892ae15..4d46cdf 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -33,6 +33,7 @@
 #include <linux/gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/component.h>
+#include <linux/of.h>
 #include <video/omapdss.h>
 #include <sound/omap-hdmi-audio.h>
 
@@ -100,7 +101,6 @@
 
 static int hdmi_init_regulator(void)
 {
-	int r;
 	struct regulator *reg;
 
 	if (hdmi.vdda_reg != NULL)
@@ -114,15 +114,6 @@
 		return PTR_ERR(reg);
 	}
 
-	if (regulator_can_change_voltage(reg)) {
-		r = regulator_set_voltage(reg, 1800000, 1800000);
-		if (r) {
-			devm_regulator_put(reg);
-			DSSWARN("can't set the regulator voltage\n");
-			return r;
-		}
-	}
-
 	hdmi.vdda_reg = reg;
 
 	return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index fa72e73..ef3afe9 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -211,7 +211,7 @@
 static void hdmi_core_powerdown_disable(struct hdmi_core_data *core)
 {
 	DSSDBG("Enter hdmi_core_powerdown_disable\n");
-	REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0);
+	REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0);
 }
 
 static void hdmi_core_swreset_release(struct hdmi_core_data *core)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index a43f7b1..9255c0e 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -38,6 +38,7 @@
 #include <linux/gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/component.h>
+#include <linux/of.h>
 #include <video/omapdss.h>
 #include <sound/omap-hdmi-audio.h>
 
@@ -119,7 +120,6 @@
 
 static int hdmi_init_regulator(void)
 {
-	int r;
 	struct regulator *reg;
 
 	if (hdmi.vdda_reg != NULL)
@@ -131,15 +131,6 @@
 		return PTR_ERR(reg);
 	}
 
-	if (regulator_can_change_voltage(reg)) {
-		r = regulator_set_voltage(reg, 1800000, 1800000);
-		if (r) {
-			devm_regulator_put(reg);
-			DSSWARN("can't set the regulator voltage\n");
-			return r;
-		}
-	}
-
 	hdmi.vdda_reg = reg;
 
 	return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 6a39752..8ab2093 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -51,8 +51,8 @@
 {
 	void __iomem *base = core->base;
 	const unsigned long long iclk = 266000000;	/* DSS L3 ICLK */
-	const unsigned ss_scl_high = 4000;		/* ns */
-	const unsigned ss_scl_low = 4700;		/* ns */
+	const unsigned ss_scl_high = 4600;		/* ns */
+	const unsigned ss_scl_low = 5400;		/* ns */
 	const unsigned fs_scl_high = 600;		/* ns */
 	const unsigned fs_scl_low = 1300;		/* ns */
 	const unsigned sda_hold = 1000;			/* ns */
@@ -458,7 +458,7 @@
 
 	c = (ptr[1] >> 6) & 0x3;
 	m = (ptr[1] >> 4) & 0x3;
-	r = (ptr[1] >> 0) & 0x3;
+	r = (ptr[1] >> 0) & 0xf;
 
 	itc = (ptr[2] >> 7) & 0x1;
 	ec = (ptr[2] >> 4) & 0x7;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
index 1f5d19c..f98b750 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
@@ -13,6 +13,7 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/seq_file.h>
 #include <video/omapdss.h>
 
 #include "dss.h"
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
index 06e23a7..f1015e8 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
@@ -16,6 +16,7 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/seq_file.h>
 
 #include <video/omapdss.h>
 
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 13442b9..055f62f 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -14,6 +14,7 @@
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
+#include <linux/seq_file.h>
 #include <video/omapdss.h>
 
 #include "dss.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 6f5fc14..479bf24 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -17,6 +17,8 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/seq_file.h>
+
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
 
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index de275a5..4ceed7a9 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -27,6 +27,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h> /* platform_device() */
 #include <linux/sched.h>
+#include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/time.h>
 #include <linux/vmalloc.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 80398a6..d86f547 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -138,7 +138,7 @@
 }
 
 static int omap_atomic_commit(struct drm_device *dev,
-			      struct drm_atomic_state *state, bool async)
+			      struct drm_atomic_state *state, bool nonblock)
 {
 	struct omap_drm_private *priv = dev->dev_private;
 	struct omap_atomic_state_commit *commit;
@@ -177,7 +177,7 @@
 	/* Swap the state, this is the point of no return. */
 	drm_atomic_helper_swap_state(dev, state);
 
-	if (async)
+	if (nonblock)
 		schedule_work(&commit->work);
 	else
 		omap_atomic_complete(commit);
@@ -561,7 +561,7 @@
 
 	VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = drm_gem_object_lookup(file_priv, args->handle);
 	if (!obj)
 		return -ENOENT;
 
@@ -584,7 +584,7 @@
 
 	VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = drm_gem_object_lookup(file_priv, args->handle);
 	if (!obj)
 		return -ENOENT;
 
@@ -608,7 +608,7 @@
 
 	VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = drm_gem_object_lookup(file_priv, args->handle);
 	if (!obj)
 		return -ENOENT;
 
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 0fbe17d..3f823c3 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -257,14 +257,14 @@
 /* should these be made into common util helpers?
  */
 
-static inline int objects_lookup(struct drm_device *dev,
+static inline int objects_lookup(
 		struct drm_file *filp, uint32_t pixel_format,
 		struct drm_gem_object **bos, const uint32_t *handles)
 {
 	int i, n = drm_format_num_planes(pixel_format);
 
 	for (i = 0; i < n; i++) {
-		bos[i] = drm_gem_object_lookup(dev, filp, handles[i]);
+		bos[i] = drm_gem_object_lookup(filp, handles[i]);
 		if (!bos[i])
 			goto fail;
 
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 6109623..f84570d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -17,6 +17,8 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/seq_file.h>
+
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 
@@ -378,7 +380,7 @@
 	struct drm_framebuffer *fb;
 	int ret;
 
-	ret = objects_lookup(dev, file, mode_cmd->pixel_format,
+	ret = objects_lookup(file, mode_cmd->pixel_format,
 			bos, mode_cmd->handles);
 	if (ret)
 		return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 3cb16f0..89da41a 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -153,7 +153,7 @@
 		/* note: if fb creation failed, we can't rely on fb destroy
 		 * to unref the bo:
 		 */
-		drm_gem_object_unreference(fbdev->bo);
+		drm_gem_object_unreference_unlocked(fbdev->bo);
 		ret = PTR_ERR(fb);
 		goto fail;
 	}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 907154f..03698b6 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -17,6 +17,7 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/seq_file.h>
 #include <linux/shmem_fs.h>
 #include <linux/spinlock.h>
 #include <linux/pfn_t.h>
@@ -687,7 +688,7 @@
 	int ret = 0;
 
 	/* GEM does all our handle to object mapping */
-	obj = drm_gem_object_lookup(dev, file, handle);
+	obj = drm_gem_object_lookup(file, handle);
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto fail;
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 93ee538..5252ab7 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -245,7 +245,7 @@
 static void omap_plane_atomic_destroy_state(struct drm_plane *plane,
 					    struct drm_plane_state *state)
 {
-	__drm_atomic_helper_plane_destroy_state(plane, state);
+	__drm_atomic_helper_plane_destroy_state(state);
 	kfree(to_omap_plane_state(state));
 }
 
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index ceb2048..3a7bdf1 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -72,6 +72,7 @@
 	} delay;
 
 	u32 bus_format;
+	u32 bus_flags;
 };
 
 struct panel_simple {
@@ -116,7 +117,11 @@
 		}
 
 		drm_display_mode_from_videomode(&vm, mode);
-		drm_mode_set_name(mode);
+
+		mode->type |= DRM_MODE_TYPE_DRIVER;
+
+		if (panel->desc->num_modes == 1)
+			mode->type |= DRM_MODE_TYPE_PREFERRED;
 
 		drm_mode_probed_add(connector, mode);
 		num++;
@@ -132,6 +137,11 @@
 			continue;
 		}
 
+		mode->type |= DRM_MODE_TYPE_DRIVER;
+
+		if (panel->desc->num_modes == 1)
+			mode->type |= DRM_MODE_TYPE_PREFERRED;
+
 		drm_mode_set_name(mode);
 
 		drm_mode_probed_add(connector, mode);
@@ -144,6 +154,7 @@
 	if (panel->desc->bus_format)
 		drm_display_info_set_bus_formats(&connector->display_info,
 						 &panel->desc->bus_format, 1);
+	connector->display_info.bus_flags = panel->desc->bus_flags;
 
 	return num;
 }
@@ -813,6 +824,29 @@
 	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
 };
 
+static const struct drm_display_mode innolux_at070tn92_mode = {
+	.clock = 33333,
+	.hdisplay = 800,
+	.hsync_start = 800 + 210,
+	.hsync_end = 800 + 210 + 20,
+	.htotal = 800 + 210 + 20 + 46,
+	.vdisplay = 480,
+	.vsync_start = 480 + 22,
+	.vsync_end = 480 + 22 + 10,
+	.vtotal = 480 + 22 + 23 + 10,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc innolux_at070tn92 = {
+	.modes = &innolux_at070tn92_mode,
+	.num_modes = 1,
+	.size = {
+		.width = 154,
+		.height = 86,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
 static const struct drm_display_mode innolux_g121i1_l01_mode = {
 	.clock = 71000,
 	.hdisplay = 1280,
@@ -1051,7 +1085,8 @@
 		.width = 95,
 		.height = 54,
 	},
-	.bus_format = MEDIA_BUS_FMT_RGB888_1X24
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+	.bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
 };
 
 static const struct display_timing okaya_rs800480t_7x0gp_timing = {
@@ -1084,6 +1119,63 @@
 	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
 };
 
+static const struct drm_display_mode olimex_lcd_olinuxino_43ts_mode = {
+	.clock = 9000,
+	.hdisplay = 480,
+	.hsync_start = 480 + 5,
+	.hsync_end = 480 + 5 + 30,
+	.htotal = 480 + 5 + 30 + 10,
+	.vdisplay = 272,
+	.vsync_start = 272 + 8,
+	.vsync_end = 272 + 8 + 5,
+	.vtotal = 272 + 8 + 5 + 3,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc olimex_lcd_olinuxino_43ts = {
+	.modes = &olimex_lcd_olinuxino_43ts_mode,
+	.num_modes = 1,
+	.size = {
+		.width = 105,
+		.height = 67,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
+/*
+ * 800x480 CVT. The panel appears to be quite accepting, at least as far as
+ * pixel clocks, but this is the timing that was being used in the Adafruit
+ * installation instructions.
+ */
+static const struct drm_display_mode ontat_yx700wv03_mode = {
+	.clock = 29500,
+	.hdisplay = 800,
+	.hsync_start = 824,
+	.hsync_end = 896,
+	.htotal = 992,
+	.vdisplay = 480,
+	.vsync_start = 483,
+	.vsync_end = 493,
+	.vtotal = 500,
+	.vrefresh = 60,
+	.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+/*
+ * Specification at:
+ * https://www.adafruit.com/images/product-files/2406/c3163.pdf
+ */
+static const struct panel_desc ontat_yx700wv03 = {
+	.modes = &ontat_yx700wv03_mode,
+	.num_modes = 1,
+	.bpc = 8,
+	.size = {
+		.width = 154,
+		.height = 83,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
 static const struct drm_display_mode ortustech_com43h4m85ulc_mode  = {
 	.clock = 25000,
 	.hdisplay = 480,
@@ -1201,6 +1293,51 @@
 	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
 };
 
+static const struct drm_display_mode tpk_f07a_0102_mode = {
+	.clock = 33260,
+	.hdisplay = 800,
+	.hsync_start = 800 + 40,
+	.hsync_end = 800 + 40 + 128,
+	.htotal = 800 + 40 + 128 + 88,
+	.vdisplay = 480,
+	.vsync_start = 480 + 10,
+	.vsync_end = 480 + 10 + 2,
+	.vtotal = 480 + 10 + 2 + 33,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc tpk_f07a_0102 = {
+	.modes = &tpk_f07a_0102_mode,
+	.num_modes = 1,
+	.size = {
+		.width = 152,
+		.height = 91,
+	},
+	.bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
+static const struct drm_display_mode tpk_f10a_0102_mode = {
+	.clock = 45000,
+	.hdisplay = 1024,
+	.hsync_start = 1024 + 176,
+	.hsync_end = 1024 + 176 + 5,
+	.htotal = 1024 + 176 + 5 + 88,
+	.vdisplay = 600,
+	.vsync_start = 600 + 20,
+	.vsync_end = 600 + 20 + 5,
+	.vtotal = 600 + 20 + 5 + 25,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc tpk_f10a_0102 = {
+	.modes = &tpk_f10a_0102_mode,
+	.num_modes = 1,
+	.size = {
+		.width = 223,
+		.height = 125,
+	},
+};
+
 static const struct display_timing urt_umsh_8596md_timing = {
 	.pixelclock = { 33260000, 33260000, 33260000 },
 	.hactive = { 800, 800, 800 },
@@ -1296,6 +1433,9 @@
 		.compatible = "innolux,at043tn24",
 		.data = &innolux_at043tn24,
 	}, {
+		.compatible = "innolux,at070tn92",
+		.data = &innolux_at070tn92,
+	}, {
 		.compatible ="innolux,g121i1-l01",
 		.data = &innolux_g121i1_l01
 	}, {
@@ -1329,6 +1469,12 @@
 		.compatible = "okaya,rs800480t-7x0gp",
 		.data = &okaya_rs800480t_7x0gp,
 	}, {
+		.compatible = "olimex,lcd-olinuxino-43-ts",
+		.data = &olimex_lcd_olinuxino_43ts,
+	}, {
+		.compatible = "ontat,yx700wv03",
+		.data = &ontat_yx700wv03,
+	}, {
 		.compatible = "ortustech,com43h4m85ulc",
 		.data = &ortustech_com43h4m85ulc,
 	}, {
@@ -1344,6 +1490,12 @@
 		.compatible = "shelly,sca07010-bfn-lnn",
 		.data = &shelly_sca07010_bfn_lnn,
 	}, {
+		.compatible = "tpk,f07a-0102",
+		.data = &tpk_f07a_0102,
+	}, {
+		.compatible = "tpk,f10a-0102",
+		.data = &tpk_f10a_0102,
+	}, {
 		.compatible = "urt,umsh-8596md-t",
 		.data = &urt_umsh_8596md_parallel,
 	}, {
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index fdc1833..b5d4b41 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -624,7 +624,7 @@
 	if (stall)
 		mutex_unlock(&qdev->surf_evict_mutex);
 
-	ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
+	ret = ttm_bo_wait(&surf->tbo, true, !stall);
 
 	if (stall)
 		mutex_lock(&qdev->surf_evict_mutex);
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 030409a..8b5d543 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -318,7 +318,7 @@
 	if (!handle)
 		return qxl_hide_cursor(qdev);
 
-	obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+	obj = drm_gem_object_lookup(file_priv, handle);
 	if (!obj) {
 		DRM_ERROR("cannot find cursor object\n");
 		return -ENOENT;
@@ -465,7 +465,7 @@
 	.page_flip = qxl_crtc_page_flip,
 };
 
-static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
+void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
 	struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
 
@@ -527,12 +527,13 @@
 qxl_framebuffer_init(struct drm_device *dev,
 		     struct qxl_framebuffer *qfb,
 		     const struct drm_mode_fb_cmd2 *mode_cmd,
-		     struct drm_gem_object *obj)
+		     struct drm_gem_object *obj,
+		     const struct drm_framebuffer_funcs *funcs)
 {
 	int ret;
 
 	qfb->obj = obj;
-	ret = drm_framebuffer_init(dev, &qfb->base, &qxl_fb_funcs);
+	ret = drm_framebuffer_init(dev, &qfb->base, funcs);
 	if (ret) {
 		qfb->obj = NULL;
 		return ret;
@@ -993,13 +994,15 @@
 	struct qxl_framebuffer *qxl_fb;
 	int ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+	obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
+	if (!obj)
+		return NULL;
 
 	qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL);
 	if (qxl_fb == NULL)
 		return NULL;
 
-	ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj);
+	ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj, &qxl_fb_funcs);
 	if (ret) {
 		kfree(qxl_fb);
 		drm_gem_object_unreference_unlocked(obj);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 7307b07..dc9df5f 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -272,10 +272,8 @@
 
 static int __init qxl_init(void)
 {
-#ifdef CONFIG_VGA_CONSOLE
 	if (vgacon_text_force() && qxl_modeset == -1)
 		return -EINVAL;
-#endif
 
 	if (qxl_modeset == 0)
 		return -EINVAL;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 3f3897e..3ad6604 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -324,8 +324,6 @@
 	struct workqueue_struct *gc_queue;
 	struct work_struct gc_work;
 
-	struct work_struct fb_work;
-
 	struct drm_property *hotplug_mode_update_property;
 	int monitors_config_width;
 	int monitors_config_height;
@@ -389,11 +387,13 @@
 void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state);
 
 /* qxl_display.c */
+void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb);
 int
 qxl_framebuffer_init(struct drm_device *dev,
 		     struct qxl_framebuffer *rfb,
 		     const struct drm_mode_fb_cmd2 *mode_cmd,
-		     struct drm_gem_object *obj);
+		     struct drm_gem_object *obj,
+		     const struct drm_framebuffer_funcs *funcs);
 void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
 void qxl_send_monitors_config(struct qxl_device *qdev);
 int qxl_create_monitors_object(struct qxl_device *qdev);
@@ -553,7 +553,6 @@
 irqreturn_t qxl_irq_handler(int irq, void *arg);
 
 /* qxl_fb.c */
-int qxl_fb_init(struct qxl_device *qdev);
 bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj);
 
 int qxl_debugfs_add_files(struct qxl_device *qdev,
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index d34bb41..5e65d5d 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -76,7 +76,7 @@
 	struct qxl_bo *qobj;
 
 	BUG_ON(!offset_p);
-	gobj = drm_gem_object_lookup(dev, file_priv, handle);
+	gobj = drm_gem_object_lookup(file_priv, handle);
 	if (gobj == NULL)
 		return -ENOENT;
 	qobj = gem_to_qxl_bo(gobj);
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 7136e52..5ea57f6 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -46,15 +46,6 @@
 	struct list_head delayed_ops;
 	void *shadow;
 	int size;
-
-	/* dirty memory logging */
-	struct {
-		spinlock_t lock;
-		unsigned x1;
-		unsigned y1;
-		unsigned x2;
-		unsigned y2;
-	} dirty;
 };
 
 static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
@@ -82,169 +73,18 @@
 	}
 }
 
-static void qxl_fb_dirty_flush(struct fb_info *info)
-{
-	struct qxl_fbdev *qfbdev = info->par;
-	struct qxl_device *qdev = qfbdev->qdev;
-	struct qxl_fb_image qxl_fb_image;
-	struct fb_image *image = &qxl_fb_image.fb_image;
-	unsigned long flags;
-	u32 x1, x2, y1, y2;
-
-	/* TODO: hard coding 32 bpp */
-	int stride = qfbdev->qfb.base.pitches[0];
-
-	spin_lock_irqsave(&qfbdev->dirty.lock, flags);
-
-	x1 = qfbdev->dirty.x1;
-	x2 = qfbdev->dirty.x2;
-	y1 = qfbdev->dirty.y1;
-	y2 = qfbdev->dirty.y2;
-	qfbdev->dirty.x1 = 0;
-	qfbdev->dirty.x2 = 0;
-	qfbdev->dirty.y1 = 0;
-	qfbdev->dirty.y2 = 0;
-
-	spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
-
-	/*
-	 * we are using a shadow draw buffer, at qdev->surface0_shadow
-	 */
-	qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
-	image->dx = x1;
-	image->dy = y1;
-	image->width = x2 - x1 + 1;
-	image->height = y2 - y1 + 1;
-	image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
-					 warnings */
-	image->bg_color = 0;
-	image->depth = 32;	     /* TODO: take from somewhere? */
-	image->cmap.start = 0;
-	image->cmap.len = 0;
-	image->cmap.red = NULL;
-	image->cmap.green = NULL;
-	image->cmap.blue = NULL;
-	image->cmap.transp = NULL;
-	image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
-
-	qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
-	qxl_draw_opaque_fb(&qxl_fb_image, stride);
-}
-
-static void qxl_dirty_update(struct qxl_fbdev *qfbdev,
-			     int x, int y, int width, int height)
-{
-	struct qxl_device *qdev = qfbdev->qdev;
-	unsigned long flags;
-	int x2, y2;
-
-	x2 = x + width - 1;
-	y2 = y + height - 1;
-
-	spin_lock_irqsave(&qfbdev->dirty.lock, flags);
-
-	if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) &&
-	    (qfbdev->dirty.x2 - qfbdev->dirty.x1)) {
-		if (qfbdev->dirty.y1 < y)
-			y = qfbdev->dirty.y1;
-		if (qfbdev->dirty.y2 > y2)
-			y2 = qfbdev->dirty.y2;
-		if (qfbdev->dirty.x1 < x)
-			x = qfbdev->dirty.x1;
-		if (qfbdev->dirty.x2 > x2)
-			x2 = qfbdev->dirty.x2;
-	}
-
-	qfbdev->dirty.x1 = x;
-	qfbdev->dirty.x2 = x2;
-	qfbdev->dirty.y1 = y;
-	qfbdev->dirty.y2 = y2;
-
-	spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
-
-	schedule_work(&qdev->fb_work);
-}
-
-static void qxl_deferred_io(struct fb_info *info,
-			    struct list_head *pagelist)
-{
-	struct qxl_fbdev *qfbdev = info->par;
-	unsigned long start, end, min, max;
-	struct page *page;
-	int y1, y2;
-
-	min = ULONG_MAX;
-	max = 0;
-	list_for_each_entry(page, pagelist, lru) {
-		start = page->index << PAGE_SHIFT;
-		end = start + PAGE_SIZE - 1;
-		min = min(min, start);
-		max = max(max, end);
-	}
-
-	if (min < max) {
-		y1 = min / info->fix.line_length;
-		y2 = (max / info->fix.line_length) + 1;
-		qxl_dirty_update(qfbdev, 0, y1, info->var.xres, y2 - y1);
-	}
-};
-
 static struct fb_deferred_io qxl_defio = {
 	.delay		= QXL_DIRTY_DELAY,
-	.deferred_io	= qxl_deferred_io,
+	.deferred_io	= drm_fb_helper_deferred_io,
 };
 
-static void qxl_fb_fillrect(struct fb_info *info,
-			    const struct fb_fillrect *rect)
-{
-	struct qxl_fbdev *qfbdev = info->par;
-
-	drm_fb_helper_sys_fillrect(info, rect);
-	qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width,
-			 rect->height);
-}
-
-static void qxl_fb_copyarea(struct fb_info *info,
-			    const struct fb_copyarea *area)
-{
-	struct qxl_fbdev *qfbdev = info->par;
-
-	drm_fb_helper_sys_copyarea(info, area);
-	qxl_dirty_update(qfbdev, area->dx, area->dy, area->width,
-			 area->height);
-}
-
-static void qxl_fb_imageblit(struct fb_info *info,
-			     const struct fb_image *image)
-{
-	struct qxl_fbdev *qfbdev = info->par;
-
-	drm_fb_helper_sys_imageblit(info, image);
-	qxl_dirty_update(qfbdev, image->dx, image->dy, image->width,
-			 image->height);
-}
-
-static void qxl_fb_work(struct work_struct *work)
-{
-	struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
-	struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
-
-	qxl_fb_dirty_flush(qfbdev->helper.fbdev);
-}
-
-int qxl_fb_init(struct qxl_device *qdev)
-{
-	INIT_WORK(&qdev->fb_work, qxl_fb_work);
-	return 0;
-}
-
 static struct fb_ops qxlfb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
-	.fb_fillrect = qxl_fb_fillrect,
-	.fb_copyarea = qxl_fb_copyarea,
-	.fb_imageblit = qxl_fb_imageblit,
+	.fb_fillrect = drm_fb_helper_sys_fillrect,
+	.fb_copyarea = drm_fb_helper_sys_copyarea,
+	.fb_imageblit = drm_fb_helper_sys_imageblit,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -338,6 +178,57 @@
 	return ret;
 }
 
+/*
+ * FIXME
+ * It should not be necessary to have a special dirty() callback for fbdev.
+ */
+static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
+				   struct drm_file *file_priv,
+				   unsigned flags, unsigned color,
+				   struct drm_clip_rect *clips,
+				   unsigned num_clips)
+{
+	struct qxl_device *qdev = fb->dev->dev_private;
+	struct fb_info *info = qdev->fbdev_info;
+	struct qxl_fbdev *qfbdev = info->par;
+	struct qxl_fb_image qxl_fb_image;
+	struct fb_image *image = &qxl_fb_image.fb_image;
+
+	/* TODO: hard coding 32 bpp */
+	int stride = qfbdev->qfb.base.pitches[0];
+
+	/*
+	 * we are using a shadow draw buffer, at qdev->surface0_shadow
+	 */
+	qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", clips->x1, clips->x2,
+		   clips->y1, clips->y2);
+	image->dx = clips->x1;
+	image->dy = clips->y1;
+	image->width = clips->x2 - clips->x1;
+	image->height = clips->y2 - clips->y1;
+	image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
+					 warnings */
+	image->bg_color = 0;
+	image->depth = 32;	     /* TODO: take from somewhere? */
+	image->cmap.start = 0;
+	image->cmap.len = 0;
+	image->cmap.red = NULL;
+	image->cmap.green = NULL;
+	image->cmap.blue = NULL;
+	image->cmap.transp = NULL;
+	image->data = qfbdev->shadow + (clips->x1 * 4) + (stride * clips->y1);
+
+	qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
+	qxl_draw_opaque_fb(&qxl_fb_image, stride);
+
+	return 0;
+}
+
+static const struct drm_framebuffer_funcs qxlfb_fb_funcs = {
+	.destroy = qxl_user_framebuffer_destroy,
+	.dirty = qxlfb_framebuffer_dirty,
+};
+
 static int qxlfb_create(struct qxl_fbdev *qfbdev,
 			struct drm_fb_helper_surface_size *sizes)
 {
@@ -360,6 +251,9 @@
 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
 
 	ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
+	if (ret < 0)
+		return ret;
+
 	qbo = gem_to_qxl_bo(gobj);
 	QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
 		 mode_cmd.height, mode_cmd.pitches[0]);
@@ -383,7 +277,8 @@
 
 	info->par = qfbdev;
 
-	qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
+	qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj,
+			     &qxlfb_fb_funcs);
 
 	fb = &qfbdev->qfb.base;
 
@@ -443,11 +338,11 @@
 		}
 	}
 	if (fb && ret) {
-		drm_gem_object_unreference(gobj);
+		drm_gem_object_unreference_unlocked(gobj);
 		drm_framebuffer_cleanup(fb);
 		kfree(fb);
 	}
-	drm_gem_object_unreference(gobj);
+	drm_gem_object_unreference_unlocked(gobj);
 	return ret;
 }
 
@@ -504,7 +399,6 @@
 	qfbdev->qdev = qdev;
 	qdev->mode_info.qfbdev = qfbdev;
 	spin_lock_init(&qfbdev->delayed_ops_lock);
-	spin_lock_init(&qfbdev->dirty.lock);
 	INIT_LIST_HEAD(&qfbdev->delayed_ops);
 
 	drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 7c2e782..5a4c8c4 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -107,15 +107,14 @@
 }
 
 /* return holding the reference to this object */
-static int qxlhw_handle_to_bo(struct qxl_device *qdev,
-			      struct drm_file *file_priv, uint64_t handle,
+static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
 			      struct qxl_release *release, struct qxl_bo **qbo_p)
 {
 	struct drm_gem_object *gobj;
 	struct qxl_bo *qobj;
 	int ret;
 
-	gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
+	gobj = drm_gem_object_lookup(file_priv, handle);
 	if (!gobj)
 		return -EINVAL;
 
@@ -221,7 +220,7 @@
 		reloc_info[i].type = reloc.reloc_type;
 
 		if (reloc.dst_handle) {
-			ret = qxlhw_handle_to_bo(qdev, file_priv, reloc.dst_handle, release,
+			ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
 						 &reloc_info[i].dst_bo);
 			if (ret)
 				goto out_free_bos;
@@ -234,7 +233,7 @@
 
 		/* reserve and validate the reloc dst bo */
 		if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
-			ret = qxlhw_handle_to_bo(qdev, file_priv, reloc.src_handle, release,
+			ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
 						 &reloc_info[i].src_bo);
 			if (ret)
 				goto out_free_bos;
@@ -314,7 +313,7 @@
 	    update_area->top >= update_area->bottom)
 		return -EINVAL;
 
-	gobj = drm_gem_object_lookup(dev, file, update_area->handle);
+	gobj = drm_gem_object_lookup(file, update_area->handle);
 	if (gobj == NULL)
 		return -ENOENT;
 
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index b2977a1..2319800 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -261,10 +261,6 @@
 	qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
 	INIT_WORK(&qdev->gc_work, qxl_gc_work);
 
-	r = qxl_fb_init(qdev);
-	if (r)
-		return r;
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 37af1bc..4d83113 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -31,7 +31,7 @@
 {
 	int r;
 
-	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS) {
 			struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
@@ -67,7 +67,7 @@
 {
 	int r;
 
-	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS) {
 			struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
@@ -79,7 +79,7 @@
 	if (mem_type)
 		*mem_type = bo->tbo.mem.mem_type;
 
-	r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+	r = ttm_bo_wait(&bo->tbo, true, no_wait);
 	ttm_bo_unreserve(&bo->tbo);
 	return r;
 }
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 9534127..0738d74 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -384,6 +384,8 @@
 	.io_mem_reserve = &qxl_ttm_io_mem_reserve,
 	.io_mem_free = &qxl_ttm_io_mem_free,
 	.move_notify = &qxl_bo_move_notify,
+	.lru_tail = &ttm_bo_default_lru_tail,
+	.swap_lru_tail = &ttm_bo_default_swap_lru_tail,
 };
 
 int qxl_ttm_init(struct qxl_device *qdev)
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 532127c..2e216e2 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1375,6 +1375,11 @@
 		break;
 	}
 
+	/* Make sure surface address is updated at vertical blank rather than
+	 * horizontal blank
+	 */
+	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, 0);
+
 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
 	       upper_32_bits(fb_location));
 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
@@ -1427,12 +1432,6 @@
 	WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
-	/* pageflip setup */
-	/* make sure flip is at vb rather than hb */
-	tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
-	tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
-	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
-
 	/* set pageflip to happen only at start of vblank interval (front porch) */
 	WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
 
@@ -1466,7 +1465,7 @@
 	uint64_t fb_location;
 	uint32_t fb_format, fb_pitch_pixels, tiling_flags;
 	u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
-	u32 tmp, viewport_w, viewport_h;
+	u32 viewport_w, viewport_h;
 	int r;
 	bool bypass_lut = false;
 
@@ -1581,6 +1580,11 @@
 	else
 		WREG32(AVIVO_D2VGA_CONTROL, 0);
 
+	/* Make sure surface address is update at vertical blank rather than
+	 * horizontal blank
+	 */
+	WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, 0);
+
 	if (rdev->family >= CHIP_RV770) {
 		if (radeon_crtc->crtc_id) {
 			WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
@@ -1627,12 +1631,6 @@
 	WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
-	/* pageflip setup */
-	/* make sure flip is at vb rather than hb */
-	tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
-	tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
-	WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
-
 	/* set pageflip to happen only at start of vblank interval (front porch) */
 	WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
 
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 8ac82df..ba192a3 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -5261,15 +5261,21 @@
  * cik_asic_reset - soft reset GPU
  *
  * @rdev: radeon_device pointer
+ * @hard: force hard reset
  *
  * Look up which blocks are hung and attempt
  * to reset them.
  * Returns 0 for success.
  */
-int cik_asic_reset(struct radeon_device *rdev)
+int cik_asic_reset(struct radeon_device *rdev, bool hard)
 {
 	u32 reset_mask;
 
+	if (hard) {
+		cik_gpu_pci_config_reset(rdev);
+		return 0;
+	}
+
 	reset_mask = cik_gpu_check_soft_reset(rdev);
 
 	if (reset_mask)
@@ -8137,6 +8143,164 @@
 /*
  * startup/shutdown callbacks
  */
+static void cik_uvd_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = radeon_uvd_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+		/*
+		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+		 * to early fails cik_uvd_start() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable uvd here.
+		 */
+		rdev->has_uvd = 0;
+		return;
+	}
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void cik_uvd_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = radeon_uvd_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+		goto error;
+	}
+	r = uvd_v4_2_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD 4.2 resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void cik_uvd_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+		return;
+	}
+	r = uvd_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+		return;
+	}
+}
+
+static void cik_vce_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_vce)
+		return;
+
+	r = radeon_vce_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
+		/*
+		 * At this point rdev->vce.vcpu_bo is NULL which trickles down
+		 * to early fails cik_vce_start() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable vce here.
+		 */
+		rdev->has_vce = 0;
+		return;
+	}
+	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
+	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
+}
+
+static void cik_vce_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_vce)
+		return;
+
+	r = radeon_vce_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+		goto error;
+	}
+	r = vce_v2_0_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
+	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
+}
+
+static void cik_vce_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+		return;
+	}
+	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+		return;
+	}
+	r = vce_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
+		return;
+	}
+}
+
 /**
  * cik_startup - program the asic to a functional state
  *
@@ -8239,34 +8403,8 @@
 		return r;
 	}
 
-	r = radeon_uvd_resume(rdev);
-	if (!r) {
-		r = uvd_v4_2_resume(rdev);
-		if (!r) {
-			r = radeon_fence_driver_start_ring(rdev,
-							   R600_RING_TYPE_UVD_INDEX);
-			if (r)
-				dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
-		}
-	}
-	if (r)
-		rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
-
-	r = radeon_vce_resume(rdev);
-	if (!r) {
-		r = vce_v2_0_resume(rdev);
-		if (!r)
-			r = radeon_fence_driver_start_ring(rdev,
-							   TN_RING_TYPE_VCE1_INDEX);
-		if (!r)
-			r = radeon_fence_driver_start_ring(rdev,
-							   TN_RING_TYPE_VCE2_INDEX);
-	}
-	if (r) {
-		dev_err(rdev->dev, "VCE init error (%d).\n", r);
-		rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
-		rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
-	}
+	cik_uvd_start(rdev);
+	cik_vce_start(rdev);
 
 	/* Enable IRQ */
 	if (!rdev->irq.installed) {
@@ -8342,32 +8480,8 @@
 	if (r)
 		return r;
 
-	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	if (ring->ring_size) {
-		r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-				     RADEON_CP_PACKET2);
-		if (!r)
-			r = uvd_v1_0_init(rdev);
-		if (r)
-			DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
-	}
-
-	r = -ENOENT;
-
-	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
-	if (ring->ring_size)
-		r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-				     VCE_CMD_NO_OP);
-
-	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
-	if (ring->ring_size)
-		r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-				     VCE_CMD_NO_OP);
-
-	if (!r)
-		r = vce_v1_0_init(rdev);
-	else if (r != -ENOENT)
-		DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
+	cik_uvd_resume(rdev);
+	cik_vce_resume(rdev);
 
 	r = radeon_ib_pool_init(rdev);
 	if (r) {
@@ -8443,9 +8557,12 @@
 	radeon_vm_manager_fini(rdev);
 	cik_cp_enable(rdev, false);
 	cik_sdma_enable(rdev, false);
-	uvd_v1_0_fini(rdev);
-	radeon_uvd_suspend(rdev);
-	radeon_vce_suspend(rdev);
+	if (rdev->has_uvd) {
+		uvd_v1_0_fini(rdev);
+		radeon_uvd_suspend(rdev);
+	}
+	if (rdev->has_vce)
+		radeon_vce_suspend(rdev);
 	cik_fini_pg(rdev);
 	cik_fini_cg(rdev);
 	cik_irq_suspend(rdev);
@@ -8571,23 +8688,8 @@
 	ring->ring_obj = NULL;
 	r600_ring_init(rdev, ring, 256 * 1024);
 
-	r = radeon_uvd_init(rdev);
-	if (!r) {
-		ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-		ring->ring_obj = NULL;
-		r600_ring_init(rdev, ring, 4096);
-	}
-
-	r = radeon_vce_init(rdev);
-	if (!r) {
-		ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
-		ring->ring_obj = NULL;
-		r600_ring_init(rdev, ring, 4096);
-
-		ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
-		ring->ring_obj = NULL;
-		r600_ring_init(rdev, ring, 4096);
-	}
+	cik_uvd_init(rdev);
+	cik_vce_init(rdev);
 
 	rdev->ih.ring_obj = NULL;
 	r600_ih_ring_init(rdev, 64 * 1024);
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 391ff9d..cead228 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -2071,6 +2071,7 @@
 #define UVD_UDEC_DBW_ADDR_CONFIG	0xef54
 
 #define UVD_LMI_EXT40_ADDR		0xf498
+#define UVD_GP_SCRATCH4			0xf4e0
 #define UVD_LMI_ADDR_EXT		0xf594
 #define UVD_VCPU_CACHE_OFFSET0		0xf608
 #define UVD_VCPU_CACHE_SIZE0		0xf60c
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 34f7a29..db275b7 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1407,11 +1407,14 @@
  * Triggers the actual pageflip by updating the primary
  * surface base address (evergreen+).
  */
-void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base,
+			 bool async)
 {
 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
 
 	/* update the scanout addresses */
+	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
+	       async ? EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
 	       upper_32_bits(crtc_base));
 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
@@ -1864,7 +1867,8 @@
 			break;
 		}
 		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
-		enabled |= 1 << radeon_connector->hpd.hpd;
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			enabled |= 1 << radeon_connector->hpd.hpd;
 	}
 	radeon_irq_kms_enable_hpd(rdev, enabled);
 }
@@ -1907,7 +1911,8 @@
 		default:
 			break;
 		}
-		disabled |= 1 << radeon_connector->hpd.hpd;
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			disabled |= 1 << radeon_connector->hpd.hpd;
 	}
 	radeon_irq_kms_disable_hpd(rdev, disabled);
 }
@@ -4136,10 +4141,15 @@
 	}
 }
 
-int evergreen_asic_reset(struct radeon_device *rdev)
+int evergreen_asic_reset(struct radeon_device *rdev, bool hard)
 {
 	u32 reset_mask;
 
+	if (hard) {
+		evergreen_gpu_pci_config_reset(rdev);
+		return 0;
+	}
+
 	reset_mask = evergreen_gpu_check_soft_reset(rdev);
 
 	if (reset_mask)
@@ -5515,6 +5525,73 @@
 	return IRQ_HANDLED;
 }
 
+static void evergreen_uvd_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = radeon_uvd_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+		/*
+		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+		 * to early fails uvd_v2_2_resume() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable uvd here.
+		 */
+		rdev->has_uvd = 0;
+		return;
+	}
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void evergreen_uvd_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = uvd_v2_2_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void evergreen_uvd_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+		return;
+	}
+	r = uvd_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+		return;
+	}
+}
+
 static int evergreen_startup(struct radeon_device *rdev)
 {
 	struct radeon_ring *ring;
@@ -5579,16 +5656,7 @@
 		return r;
 	}
 
-	r = uvd_v2_2_resume(rdev);
-	if (!r) {
-		r = radeon_fence_driver_start_ring(rdev,
-						   R600_RING_TYPE_UVD_INDEX);
-		if (r)
-			dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
-	}
-
-	if (r)
-		rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+	evergreen_uvd_start(rdev);
 
 	/* Enable IRQ */
 	if (!rdev->irq.installed) {
@@ -5627,16 +5695,7 @@
 	if (r)
 		return r;
 
-	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	if (ring->ring_size) {
-		r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-				     RADEON_CP_PACKET2);
-		if (!r)
-			r = uvd_v1_0_init(rdev);
-
-		if (r)
-			DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
-	}
+	evergreen_uvd_resume(rdev);
 
 	r = radeon_ib_pool_init(rdev);
 	if (r) {
@@ -5691,8 +5750,10 @@
 {
 	radeon_pm_suspend(rdev);
 	radeon_audio_fini(rdev);
-	uvd_v1_0_fini(rdev);
-	radeon_uvd_suspend(rdev);
+	if (rdev->has_uvd) {
+		uvd_v1_0_fini(rdev);
+		radeon_uvd_suspend(rdev);
+	}
 	r700_cp_stop(rdev);
 	r600_dma_stop(rdev);
 	evergreen_irq_suspend(rdev);
@@ -5793,12 +5854,7 @@
 	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
 
-	r = radeon_uvd_init(rdev);
-	if (!r) {
-		rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
-		r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
-			       4096);
-	}
+	evergreen_uvd_init(rdev);
 
 	rdev->ih.ring_obj = NULL;
 	r600_ih_ring_init(rdev, 64 * 1024);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 9e93205..0d3f744 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -2608,6 +2608,51 @@
 			}
 		}
 		break;
+	case PACKET3_SET_APPEND_CNT:
+	{
+		uint32_t areg;
+		uint32_t allowed_reg_base;
+		uint32_t source_sel;
+		if (pkt->count != 2) {
+			DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
+			return -EINVAL;
+		}
+
+		allowed_reg_base = GDS_APPEND_COUNT_0;
+		allowed_reg_base -= PACKET3_SET_CONTEXT_REG_START;
+		allowed_reg_base >>= 2;
+
+		areg = idx_value >> 16;
+		if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
+			dev_warn(p->dev, "forbidden register for append cnt 0x%08x at %d\n",
+				 areg, idx);
+			return -EINVAL;
+		}
+
+		source_sel = G_PACKET3_SET_APPEND_CNT_SRC_SELECT(idx_value);
+		if (source_sel == PACKET3_SAC_SRC_SEL_MEM) {
+			uint64_t offset;
+			uint32_t swap;
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad SET_APPEND_CNT (missing reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx + 1);
+			swap = offset & 0x3;
+			offset &= ~0x3;
+
+			offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32;
+
+			offset += reloc->gpu_offset;
+			ib[idx+1] = (offset & 0xfffffffc) | swap;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		} else {
+			DRM_ERROR("bad SET_APPEND_CNT (unsupported operation)\n");
+			return -EINVAL;
+		}
+		break;
+	}
 	case PACKET3_NOP:
 		break;
 	default:
@@ -3438,6 +3483,27 @@
 			}
 		}
 		break;
+	case PACKET3_SET_APPEND_CNT: {
+		uint32_t areg;
+		uint32_t allowed_reg_base;
+
+		if (pkt->count != 2) {
+			DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
+			return -EINVAL;
+		}
+
+		allowed_reg_base = GDS_APPEND_COUNT_0;
+		allowed_reg_base -= PACKET3_SET_CONTEXT_REG_START;
+		allowed_reg_base >>= 2;
+
+		areg = idx_value >> 16;
+		if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
+			DRM_ERROR("forbidden register for append cnt 0x%08x at %d\n",
+				  areg, idx);
+			return -EINVAL;
+		}
+		break;
+	}
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 13b6029..0b174e1 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1689,6 +1689,36 @@
 #define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
 #define	PACKET3_SET_RESOURCE_INDIRECT			0x74
 #define	PACKET3_SET_APPEND_CNT			        0x75
+/* SET_APPEND_CNT - documentation
+ * 1. header
+ * 2. COMMAND
+ *  1:0 - SOURCE SEL
+ *  15:2 - Reserved
+ *  31:16 - WR_REG_OFFSET - context register to write source data to.
+ *          (one of R_02872C_GDS_APPEND_COUNT_0-11)
+ * 3. CONTROL
+ *  (for source == mem)
+ *  31:2 SRC_ADDRESS_LO
+ *  0:1 SWAP
+ *  (for source == GDS)
+ *  31:0 GDS offset
+ *  (for source == DATA)
+ *  31:0 DATA
+ *  (for source == REG)
+ *  31:0 REG
+ * 4. SRC_ADDRESS_HI[7:0]
+ * kernel driver 2.44 only supports SRC == MEM.
+ */
+#define PACKET3_SET_APPEND_CNT_SRC_SELECT(x) ((x) << 0)
+#define G_PACKET3_SET_APPEND_CNT_SRC_SELECT(x) ((x & 0x3) >> 0)
+/* source is from the data in CONTROL */
+#define PACKET3_SAC_SRC_SEL_DATA 0x0
+/* source is from register */
+#define PACKET3_SAC_SRC_SEL_REG  0x1
+/* source is from GDS offset in CONTROL */
+#define PACKET3_SAC_SRC_SEL_GDS  0x2
+/* source is from memory address */
+#define PACKET3_SAC_SRC_SEL_MEM  0x3
 
 #define	SQ_RESOURCE_CONSTANT_WORD7_0				0x3001c
 #define		S__SQ_CONSTANT_TYPE(x)			(((x) & 3) << 30)
@@ -2005,6 +2035,19 @@
 
 #define GDS_ADDR_BASE					0x28720
 
+#define GDS_APPEND_COUNT_0				0x2872C
+#define GDS_APPEND_COUNT_1				0x28730
+#define GDS_APPEND_COUNT_2				0x28734
+#define GDS_APPEND_COUNT_3				0x28738
+#define GDS_APPEND_COUNT_4				0x2873C
+#define GDS_APPEND_COUNT_5				0x28740
+#define GDS_APPEND_COUNT_6				0x28744
+#define GDS_APPEND_COUNT_7				0x28748
+#define GDS_APPEND_COUNT_8				0x2874c
+#define GDS_APPEND_COUNT_9				0x28750
+#define GDS_APPEND_COUNT_10				0x28754
+#define GDS_APPEND_COUNT_11				0x28758
+
 #define	CB_IMMED0_BASE					0x28b9c
 #define	CB_IMMED1_BASE					0x28ba0
 #define	CB_IMMED2_BASE					0x28ba4
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index d024074..a7e9786 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2164,7 +2164,7 @@
 	if (pi->caps_stable_p_state) {
 		stable_p_state_sclk = (max_limits->sclk * 75) / 100;
 
-		for (i = table->count - 1; i >= 0; i++) {
+		for (i = table->count - 1; i >= 0; i--) {
 			if (stable_p_state_sclk >= table->entries[i].clk) {
 				stable_p_state_sclk = table->entries[i].clk;
 				break;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index b88d63c9..4a3d7ca 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1959,10 +1959,15 @@
 	evergreen_print_gpu_status_regs(rdev);
 }
 
-int cayman_asic_reset(struct radeon_device *rdev)
+int cayman_asic_reset(struct radeon_device *rdev, bool hard)
 {
 	u32 reset_mask;
 
+	if (hard) {
+		evergreen_gpu_pci_config_reset(rdev);
+		return 0;
+	}
+
 	reset_mask = cayman_gpu_check_soft_reset(rdev);
 
 	if (reset_mask)
@@ -2002,6 +2007,160 @@
 	return radeon_ring_test_lockup(rdev, ring);
 }
 
+static void cayman_uvd_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = radeon_uvd_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+		/*
+		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+		 * to early fails uvd_v2_2_resume() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable uvd here.
+		 */
+		rdev->has_uvd = 0;
+		return;
+	}
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void cayman_uvd_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = uvd_v2_2_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void cayman_uvd_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+		return;
+	}
+	r = uvd_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+		return;
+	}
+}
+
+static void cayman_vce_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Only set for CHIP_ARUBA */
+	if (!rdev->has_vce)
+		return;
+
+	r = radeon_vce_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
+		/*
+		 * At this point rdev->vce.vcpu_bo is NULL which trickles down
+		 * to early fails cayman_vce_start() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable vce here.
+		 */
+		rdev->has_vce = 0;
+		return;
+	}
+	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
+	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
+}
+
+static void cayman_vce_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_vce)
+		return;
+
+	r = radeon_vce_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+		goto error;
+	}
+	r = vce_v1_0_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
+	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
+}
+
+static void cayman_vce_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+		return;
+	}
+	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+		return;
+	}
+	r = vce_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
+		return;
+	}
+}
+
 static int cayman_startup(struct radeon_device *rdev)
 {
 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -2056,34 +2215,8 @@
 		return r;
 	}
 
-	r = uvd_v2_2_resume(rdev);
-	if (!r) {
-		r = radeon_fence_driver_start_ring(rdev,
-						   R600_RING_TYPE_UVD_INDEX);
-		if (r)
-			dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
-	}
-	if (r)
-		rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
-
-	if (rdev->family == CHIP_ARUBA) {
-		r = radeon_vce_resume(rdev);
-		if (!r)
-			r = vce_v1_0_resume(rdev);
-
-		if (!r)
-			r = radeon_fence_driver_start_ring(rdev,
-							   TN_RING_TYPE_VCE1_INDEX);
-		if (!r)
-			r = radeon_fence_driver_start_ring(rdev,
-							   TN_RING_TYPE_VCE2_INDEX);
-
-		if (r) {
-			dev_err(rdev->dev, "VCE init error (%d).\n", r);
-			rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
-			rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
-		}
-	}
+	cayman_uvd_start(rdev);
+	cayman_vce_start(rdev);
 
 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
 	if (r) {
@@ -2152,30 +2285,8 @@
 	if (r)
 		return r;
 
-	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	if (ring->ring_size) {
-		r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-				     RADEON_CP_PACKET2);
-		if (!r)
-			r = uvd_v1_0_init(rdev);
-		if (r)
-			DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
-	}
-
-	if (rdev->family == CHIP_ARUBA) {
-		ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
-		if (ring->ring_size)
-			r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
-
-		ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
-		if (ring->ring_size)
-			r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
-
-		if (!r)
-			r = vce_v1_0_init(rdev);
-		if (r)
-			DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
-	}
+	cayman_uvd_resume(rdev);
+	cayman_vce_resume(rdev);
 
 	r = radeon_ib_pool_init(rdev);
 	if (r) {
@@ -2230,8 +2341,10 @@
 	radeon_vm_manager_fini(rdev);
 	cayman_cp_enable(rdev, false);
 	cayman_dma_stop(rdev);
-	uvd_v1_0_fini(rdev);
-	radeon_uvd_suspend(rdev);
+	if (rdev->has_uvd) {
+		uvd_v1_0_fini(rdev);
+		radeon_uvd_suspend(rdev);
+	}
 	evergreen_irq_suspend(rdev);
 	radeon_wb_disable(rdev);
 	cayman_pcie_gart_disable(rdev);
@@ -2325,25 +2438,8 @@
 	ring->ring_obj = NULL;
 	r600_ring_init(rdev, ring, 64 * 1024);
 
-	r = radeon_uvd_init(rdev);
-	if (!r) {
-		ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-		ring->ring_obj = NULL;
-		r600_ring_init(rdev, ring, 4096);
-	}
-
-	if (rdev->family == CHIP_ARUBA) {
-		r = radeon_vce_init(rdev);
-		if (!r) {
-			ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
-			ring->ring_obj = NULL;
-			r600_ring_init(rdev, ring, 4096);
-
-			ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
-			ring->ring_obj = NULL;
-			r600_ring_init(rdev, ring, 4096);
-		}
-	}
+	cayman_uvd_init(rdev);
+	cayman_vce_init(rdev);
 
 	rdev->ih.ring_obj = NULL;
 	r600_ih_ring_init(rdev, 64 * 1024);
@@ -2398,7 +2494,7 @@
 	radeon_irq_kms_fini(rdev);
 	uvd_v1_0_fini(rdev);
 	radeon_uvd_fini(rdev);
-	if (rdev->family == CHIP_ARUBA)
+	if (rdev->has_vce)
 		radeon_vce_fini(rdev);
 	cayman_pcie_gart_fini(rdev);
 	r600_vram_scratch_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 6e478a2..f25994b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -153,7 +153,7 @@
  * bit to go high, when it does, we release the lock, and allow the
  * double buffered update to take place.
  */
-void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async)
 {
 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
 	u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
@@ -592,7 +592,8 @@
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-		enable |= 1 << radeon_connector->hpd.hpd;
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			enable |= 1 << radeon_connector->hpd.hpd;
 		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 	}
 	radeon_irq_kms_enable_hpd(rdev, enable);
@@ -614,7 +615,8 @@
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-		disable |= 1 << radeon_connector->hpd.hpd;
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			disable |= 1 << radeon_connector->hpd.hpd;
 	}
 	radeon_irq_kms_disable_hpd(rdev, disable);
 }
@@ -2555,7 +2557,7 @@
 	mdelay(1);
 }
 
-int r100_asic_reset(struct radeon_device *rdev)
+int r100_asic_reset(struct radeon_device *rdev, bool hard)
 {
 	struct r100_mc_save save;
 	u32 status, tmp;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 718b12b..7e417d8 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -410,7 +410,7 @@
 		 rdev->num_gb_pipes, rdev->num_z_pipes);
 }
 
-int r300_asic_reset(struct radeon_device *rdev)
+int r300_asic_reset(struct radeon_device *rdev, bool hard)
 {
 	struct r100_mc_save save;
 	u32 status, tmp;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f86ab69..9247e7d 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1002,7 +1002,8 @@
 				break;
 			}
 		}
-		enable |= 1 << radeon_connector->hpd.hpd;
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			enable |= 1 << radeon_connector->hpd.hpd;
 		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 	}
 	radeon_irq_kms_enable_hpd(rdev, enable);
@@ -1055,7 +1056,8 @@
 				break;
 			}
 		}
-		disable |= 1 << radeon_connector->hpd.hpd;
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			disable |= 1 << radeon_connector->hpd.hpd;
 	}
 	radeon_irq_kms_disable_hpd(rdev, disable);
 }
@@ -1871,10 +1873,15 @@
 	}
 }
 
-int r600_asic_reset(struct radeon_device *rdev)
+int r600_asic_reset(struct radeon_device *rdev, bool hard)
 {
 	u32 reset_mask;
 
+	if (hard) {
+		r600_gpu_pci_config_reset(rdev);
+		return 0;
+	}
+
 	reset_mask = r600_gpu_check_soft_reset(rdev);
 
 	if (reset_mask)
@@ -3035,6 +3042,73 @@
 	/* FIXME: implement */
 }
 
+static void r600_uvd_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = radeon_uvd_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+		/*
+		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+		 * to early fails uvd_v1_0_resume() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable uvd here.
+		 */
+		rdev->has_uvd = 0;
+		return;
+	}
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void r600_uvd_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = uvd_v1_0_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void r600_uvd_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+		return;
+	}
+	r = uvd_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+		return;
+	}
+}
+
 static int r600_startup(struct radeon_device *rdev)
 {
 	struct radeon_ring *ring;
@@ -3070,17 +3144,7 @@
 		return r;
 	}
 
-	if (rdev->has_uvd) {
-		r = uvd_v1_0_resume(rdev);
-		if (!r) {
-			r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
-			if (r) {
-				dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
-			}
-		}
-		if (r)
-			rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
-	}
+	r600_uvd_start(rdev);
 
 	/* Enable IRQ */
 	if (!rdev->irq.installed) {
@@ -3110,17 +3174,7 @@
 	if (r)
 		return r;
 
-	if (rdev->has_uvd) {
-		ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-		if (ring->ring_size) {
-			r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-					     RADEON_CP_PACKET2);
-			if (!r)
-				r = uvd_v1_0_init(rdev);
-			if (r)
-				DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
-		}
-	}
+	r600_uvd_resume(rdev);
 
 	r = radeon_ib_pool_init(rdev);
 	if (r) {
@@ -3264,13 +3318,7 @@
 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
-	if (rdev->has_uvd) {
-		r = radeon_uvd_init(rdev);
-		if (!r) {
-			rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
-			r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
-		}
-	}
+	r600_uvd_init(rdev);
 
 	rdev->ih.ring_obj = NULL;
 	r600_ih_ring_init(rdev, 64 * 1024);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 007be29..80b24a4 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -113,6 +113,8 @@
 extern int radeon_backlight;
 extern int radeon_auxch;
 extern int radeon_mst;
+extern int radeon_uvd;
+extern int radeon_vce;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -744,6 +746,7 @@
 	struct drm_pending_vblank_event *event;
 	struct radeon_bo		*old_rbo;
 	struct fence			*fence;
+	bool				async;
 };
 
 struct r500_irq_stat_regs {
@@ -1671,14 +1674,18 @@
 /*
  * UVD
  */
-#define RADEON_MAX_UVD_HANDLES	10
-#define RADEON_UVD_STACK_SIZE	(1024*1024)
-#define RADEON_UVD_HEAP_SIZE	(1024*1024)
+#define RADEON_DEFAULT_UVD_HANDLES	10
+#define RADEON_MAX_UVD_HANDLES		30
+#define RADEON_UVD_STACK_SIZE		(200*1024)
+#define RADEON_UVD_HEAP_SIZE		(256*1024)
+#define RADEON_UVD_SESSION_SIZE		(50*1024)
 
 struct radeon_uvd {
+	bool			fw_header_present;
 	struct radeon_bo	*vcpu_bo;
 	void			*cpu_addr;
 	uint64_t		gpu_addr;
+	unsigned		max_handles;
 	atomic_t		handles[RADEON_MAX_UVD_HANDLES];
 	struct drm_file		*filp[RADEON_MAX_UVD_HANDLES];
 	unsigned		img_size[RADEON_MAX_UVD_HANDLES];
@@ -1852,7 +1859,7 @@
 	int (*resume)(struct radeon_device *rdev);
 	int (*suspend)(struct radeon_device *rdev);
 	void (*vga_set_state)(struct radeon_device *rdev, bool state);
-	int (*asic_reset)(struct radeon_device *rdev);
+	int (*asic_reset)(struct radeon_device *rdev, bool hard);
 	/* Flush the HDP cache via MMIO */
 	void (*mmio_hdp_flush)(struct radeon_device *rdev);
 	/* check if 3D engine is idle */
@@ -1998,7 +2005,7 @@
 	} dpm;
 	/* pageflipping */
 	struct {
-		void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+		void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base, bool async);
 		bool (*page_flip_pending)(struct radeon_device *rdev, int crtc);
 	} pflip;
 };
@@ -2394,7 +2401,6 @@
 	struct radeon_wb		wb;
 	struct radeon_dummy_page	dummy_page;
 	bool				shutdown;
-	bool				suspend;
 	bool				need_dma32;
 	bool				accel_working;
 	bool				fastfb_working; /* IGP feature*/
@@ -2423,6 +2429,7 @@
 	int num_crtc; /* number of crtcs */
 	struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
 	bool has_uvd;
+	bool has_vce;
 	struct r600_audio audio; /* audio stuff */
 	struct notifier_block acpi_nb;
 	/* only one userspace can use Hyperz features or CMASK at a time */
@@ -2717,7 +2724,7 @@
 #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
 #define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
+#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev), false)
 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
 #define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
 #define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
@@ -2775,7 +2782,7 @@
 #define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
 #define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
 #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
-#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
+#define radeon_page_flip(rdev, crtc, base, async) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base), (async))
 #define radeon_page_flip_pending(rdev, crtc) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc))
 #define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
 #define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
@@ -2832,7 +2839,8 @@
 extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
 extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
 extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
-extern int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
+extern int radeon_suspend_kms(struct drm_device *dev, bool suspend,
+			      bool fbcon, bool freeze);
 extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
 extern void radeon_program_register_sequence(struct radeon_device *rdev,
 					     const u32 *registers,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 7d5a36d..bc5121d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2324,6 +2324,7 @@
 		rdev->num_crtc = 2;
 
 	rdev->has_uvd = false;
+	rdev->has_vce = false;
 
 	switch (rdev->family) {
 	case CHIP_R100:
@@ -2454,6 +2455,7 @@
 		/* set num crtcs */
 		rdev->num_crtc = 4;
 		rdev->has_uvd = true;
+		rdev->has_vce = true;
 		rdev->cg_flags =
 			RADEON_CG_SUPPORT_VCE_MGCG;
 		break;
@@ -2470,10 +2472,13 @@
 			rdev->num_crtc = 2;
 		else
 			rdev->num_crtc = 6;
-		if (rdev->family == CHIP_HAINAN)
+		if (rdev->family == CHIP_HAINAN) {
 			rdev->has_uvd = false;
-		else
+			rdev->has_vce = false;
+		} else {
 			rdev->has_uvd = true;
+			rdev->has_vce = true;
+		}
 		switch (rdev->family) {
 		case CHIP_TAHITI:
 			rdev->cg_flags =
@@ -2578,6 +2583,7 @@
 		rdev->asic = &ci_asic;
 		rdev->num_crtc = 6;
 		rdev->has_uvd = true;
+		rdev->has_vce = true;
 		if (rdev->family == CHIP_BONAIRE) {
 			rdev->cg_flags =
 				RADEON_CG_SUPPORT_GFX_MGCG |
@@ -2678,6 +2684,7 @@
 				RADEON_PG_SUPPORT_SAMU;*/
 		}
 		rdev->has_uvd = true;
+		rdev->has_vce = true;
 		break;
 	default:
 		/* FIXME: not supported yet */
@@ -2689,6 +2696,11 @@
 		rdev->asic->pm.set_memory_clock = NULL;
 	}
 
+	if (!radeon_uvd)
+		rdev->has_uvd = false;
+	if (!radeon_vce)
+		rdev->has_vce = false;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e0aa332..e3f036c 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -64,7 +64,7 @@
 int r100_resume(struct radeon_device *rdev);
 void r100_vga_set_state(struct radeon_device *rdev, bool state);
 bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
-int r100_asic_reset(struct radeon_device *rdev);
+int r100_asic_reset(struct radeon_device *rdev, bool hard);
 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
 void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
 uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
@@ -138,7 +138,7 @@
 extern void r100_pm_init_profile(struct radeon_device *rdev);
 extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
 extern void r100_page_flip(struct radeon_device *rdev, int crtc,
-			   u64 crtc_base);
+			   u64 crtc_base, bool async);
 extern bool r100_page_flip_pending(struct radeon_device *rdev, int crtc);
 extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
 extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
@@ -167,7 +167,7 @@
 extern void r300_fini(struct radeon_device *rdev);
 extern int r300_suspend(struct radeon_device *rdev);
 extern int r300_resume(struct radeon_device *rdev);
-extern int r300_asic_reset(struct radeon_device *rdev);
+extern int r300_asic_reset(struct radeon_device *rdev, bool hard);
 extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
 extern void r300_fence_ring_emit(struct radeon_device *rdev,
 				struct radeon_fence *fence);
@@ -225,7 +225,7 @@
 /*
  * rs600.
  */
-extern int rs600_asic_reset(struct radeon_device *rdev);
+extern int rs600_asic_reset(struct radeon_device *rdev, bool hard);
 extern int rs600_init(struct radeon_device *rdev);
 extern void rs600_fini(struct radeon_device *rdev);
 extern int rs600_suspend(struct radeon_device *rdev);
@@ -250,7 +250,7 @@
 extern void rs600_pm_prepare(struct radeon_device *rdev);
 extern void rs600_pm_finish(struct radeon_device *rdev);
 extern void rs600_page_flip(struct radeon_device *rdev, int crtc,
-			    u64 crtc_base);
+			    u64 crtc_base, bool async);
 extern bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc);
 void rs600_set_safe_registers(struct radeon_device *rdev);
 extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
@@ -334,7 +334,7 @@
 void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
 bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
-int r600_asic_reset(struct radeon_device *rdev);
+int r600_asic_reset(struct radeon_device *rdev, bool hard);
 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
 			 uint32_t tiling_flags, uint32_t pitch,
 			 uint32_t offset, uint32_t obj_size);
@@ -464,7 +464,8 @@
 int rv770_suspend(struct radeon_device *rdev);
 int rv770_resume(struct radeon_device *rdev);
 void rv770_pm_misc(struct radeon_device *rdev);
-void rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+void rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base,
+		     bool async);
 bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc);
 void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
 void r700_cp_stop(struct radeon_device *rdev);
@@ -513,7 +514,7 @@
 int evergreen_resume(struct radeon_device *rdev);
 bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
-int evergreen_asic_reset(struct radeon_device *rdev);
+int evergreen_asic_reset(struct radeon_device *rdev, bool hard);
 void evergreen_bandwidth_update(struct radeon_device *rdev);
 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 void evergreen_hpd_init(struct radeon_device *rdev);
@@ -534,7 +535,7 @@
 int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
 int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
 extern void evergreen_page_flip(struct radeon_device *rdev, int crtc,
-				u64 crtc_base);
+				u64 crtc_base, bool async);
 extern bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc);
 extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
 void evergreen_disable_interrupt_state(struct radeon_device *rdev);
@@ -606,7 +607,7 @@
 void cayman_fini(struct radeon_device *rdev);
 int cayman_suspend(struct radeon_device *rdev);
 int cayman_resume(struct radeon_device *rdev);
-int cayman_asic_reset(struct radeon_device *rdev);
+int cayman_asic_reset(struct radeon_device *rdev, bool hard);
 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int cayman_vm_init(struct radeon_device *rdev);
 void cayman_vm_fini(struct radeon_device *rdev);
@@ -712,7 +713,7 @@
 int si_resume(struct radeon_device *rdev);
 bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
-int si_asic_reset(struct radeon_device *rdev);
+int si_asic_reset(struct radeon_device *rdev, bool hard);
 void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int si_irq_set(struct radeon_device *rdev);
 int si_irq_process(struct radeon_device *rdev);
@@ -817,7 +818,7 @@
 int cik_suspend(struct radeon_device *rdev);
 int cik_resume(struct radeon_device *rdev);
 bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
-int cik_asic_reset(struct radeon_device *rdev);
+int cik_asic_reset(struct radeon_device *rdev, bool hard);
 void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
 int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ab39b85..510ea37 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -74,7 +74,6 @@
 
 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 {
-	struct drm_device *ddev = p->rdev->ddev;
 	struct radeon_cs_chunk *chunk;
 	struct radeon_cs_buckets buckets;
 	unsigned i;
@@ -101,7 +100,7 @@
 		unsigned priority;
 
 		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
-		gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
+		gobj = drm_gem_object_lookup(p->filp, r->handle);
 		if (gobj == NULL) {
 			DRM_ERROR("gem object lookup failed 0x%x\n",
 				  r->handle);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index afaf346..2a10e24 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -274,7 +274,7 @@
 		return -EINVAL;
 	}
 
-	obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+	obj = drm_gem_object_lookup(file_priv, handle);
 	if (!obj) {
 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
 		return -ENOENT;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index d0826fb..e721e6b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1230,7 +1230,7 @@
 		printk(KERN_INFO "radeon: switched off\n");
 		drm_kms_helper_poll_disable(dev);
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-		radeon_suspend_kms(dev, true, true);
+		radeon_suspend_kms(dev, true, true, false);
 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 	}
 }
@@ -1555,7 +1555,8 @@
  * Returns 0 for success or an error on failure.
  * Called at driver suspend.
  */
-int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
+int radeon_suspend_kms(struct drm_device *dev, bool suspend,
+		       bool fbcon, bool freeze)
 {
 	struct radeon_device *rdev;
 	struct drm_crtc *crtc;
@@ -1630,7 +1631,10 @@
 	radeon_agp_suspend(rdev);
 
 	pci_save_state(dev->pdev);
-	if (suspend) {
+	if (freeze && rdev->family >= CHIP_R600) {
+		rdev->asic->asic_reset(rdev, true);
+		pci_restore_state(dev->pdev);
+	} else if (suspend) {
 		/* Shut down the device */
 		pci_disable_device(dev->pdev);
 		pci_set_power_state(dev->pdev, PCI_D3hot);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index fcc7483..6a41b49 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -377,7 +377,7 @@
 
 	/* wakeup userspace */
 	if (work->event)
-		drm_send_vblank_event(rdev->ddev, crtc_id, work->event);
+		drm_crtc_send_vblank_event(&radeon_crtc->base, work->event);
 
 	spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
 
@@ -490,7 +490,7 @@
 				 vblank->linedur_ns / 1000, stat, vpos, hpos);
 
 	/* do the flip (mmio) */
-	radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
+	radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base, work->async);
 
 	radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
@@ -525,6 +525,7 @@
 	work->rdev = rdev;
 	work->crtc_id = radeon_crtc->crtc_id;
 	work->event = event;
+	work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
 
 	/* schedule unpin of the old buffer */
 	old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
@@ -1367,7 +1368,7 @@
 	struct radeon_framebuffer *radeon_fb;
 	int ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+	obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
 	if (obj ==  NULL) {
 		dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
 			"can't create framebuffer\n", mode_cmd->handles[0]);
@@ -1630,6 +1631,9 @@
 
 	rdev->ddev->mode_config.funcs = &radeon_mode_funcs;
 
+	if (radeon_use_pflipirq == 2 && rdev->family >= CHIP_R600)
+		rdev->ddev->mode_config.async_page_flip = true;
+
 	if (ASIC_IS_DCE5(rdev)) {
 		rdev->ddev->mode_config.max_width = 16384;
 		rdev->ddev->mode_config.max_height = 16384;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ccd4ad4..b55aa74 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -93,9 +93,11 @@
  *   2.41.0 - evergreen/cayman: Add SET_BASE/DRAW_INDIRECT command parsing support
  *   2.42.0 - Add VCE/VUI (Video Usability Information) support
  *   2.43.0 - RADEON_INFO_GPU_RESET_COUNTER
+ *   2.44.0 - SET_APPEND_CNT packet3 support
+ *   2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
  */
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	43
+#define KMS_DRIVER_MINOR	45
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -105,7 +107,8 @@
 				 struct drm_file *file_priv);
 void radeon_driver_preclose_kms(struct drm_device *dev,
 				struct drm_file *file_priv);
-int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
+int radeon_suspend_kms(struct drm_device *dev, bool suspend,
+		       bool fbcon, bool freeze);
 int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
 int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
@@ -196,6 +199,8 @@
 int radeon_backlight = -1;
 int radeon_auxch = -1;
 int radeon_mst = 0;
+int radeon_uvd = 1;
+int radeon_vce = 1;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -287,6 +292,12 @@
 MODULE_PARM_DESC(mst, "DisplayPort MST experimental support (1 = enable, 0 = disable)");
 module_param_named(mst, radeon_mst, int, 0444);
 
+MODULE_PARM_DESC(uvd, "uvd enable/disable uvd support (1 = enable, 0 = disable)");
+module_param_named(uvd, radeon_uvd, int, 0444);
+
+MODULE_PARM_DESC(vce, "vce enable/disable vce support (1 = enable, 0 = disable)");
+module_param_named(vce, radeon_vce, int, 0444);
+
 static struct pci_device_id pciidlist[] = {
 	radeon_PCI_IDS
 };
@@ -358,7 +369,7 @@
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-	return radeon_suspend_kms(drm_dev, true, true);
+	return radeon_suspend_kms(drm_dev, true, true, false);
 }
 
 static int radeon_pmops_resume(struct device *dev)
@@ -372,7 +383,7 @@
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
-	return radeon_suspend_kms(drm_dev, false, true);
+	return radeon_suspend_kms(drm_dev, false, true, true);
 }
 
 static int radeon_pmops_thaw(struct device *dev)
@@ -397,7 +408,7 @@
 	drm_kms_helper_poll_disable(drm_dev);
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
 
-	ret = radeon_suspend_kms(drm_dev, false, false);
+	ret = radeon_suspend_kms(drm_dev, false, false, false);
 	pci_save_state(pdev);
 	pci_disable_device(pdev);
 	pci_ignore_hotplug(pdev);
@@ -525,7 +536,7 @@
 	.irq_uninstall = radeon_driver_irq_uninstall_kms,
 	.irq_handler = radeon_driver_irq_handler_kms,
 	.ioctls = radeon_ioctls_kms,
-	.gem_free_object = radeon_gem_object_free,
+	.gem_free_object_unlocked = radeon_gem_object_free,
 	.gem_open_object = radeon_gem_object_open,
 	.gem_close_object = radeon_gem_object_close,
 	.dumb_create = radeon_mode_dumb_create,
@@ -566,12 +577,10 @@
 
 static int __init radeon_init(void)
 {
-#ifdef CONFIG_VGA_CONSOLE
 	if (vgacon_text_force() && radeon_modeset == -1) {
 		DRM_INFO("VGACON disable radeon kernel modesetting.\n");
 		radeon_modeset = 0;
 	}
-#endif
 	/* set to modesetting by default if not nomodeset */
 	if (radeon_modeset == -1)
 		radeon_modeset = 1;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index e26c963..deb9511 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -382,7 +382,7 @@
 	down_read(&rdev->exclusive_lock);
 
 	/* just do a BO wait for now */
-	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	gobj = drm_gem_object_lookup(filp, args->handle);
 	if (gobj == NULL) {
 		up_read(&rdev->exclusive_lock);
 		return -ENOENT;
@@ -404,7 +404,7 @@
 	struct drm_gem_object *gobj;
 	struct radeon_bo *robj;
 
-	gobj = drm_gem_object_lookup(dev, filp, handle);
+	gobj = drm_gem_object_lookup(filp, handle);
 	if (gobj == NULL) {
 		return -ENOENT;
 	}
@@ -435,7 +435,7 @@
 	int r;
 	uint32_t cur_placement = 0;
 
-	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	gobj = drm_gem_object_lookup(filp, args->handle);
 	if (gobj == NULL) {
 		return -ENOENT;
 	}
@@ -464,7 +464,7 @@
 	uint32_t cur_placement = 0;
 	long ret;
 
-	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	gobj = drm_gem_object_lookup(filp, args->handle);
 	if (gobj == NULL) {
 		return -ENOENT;
 	}
@@ -495,7 +495,7 @@
 	int r = 0;
 
 	DRM_DEBUG("%d \n", args->handle);
-	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	gobj = drm_gem_object_lookup(filp, args->handle);
 	if (gobj == NULL)
 		return -ENOENT;
 	robj = gem_to_radeon_bo(gobj);
@@ -513,7 +513,7 @@
 	int r = 0;
 
 	DRM_DEBUG("\n");
-	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	gobj = drm_gem_object_lookup(filp, args->handle);
 	if (gobj == NULL)
 		return -ENOENT;
 	rbo = gem_to_radeon_bo(gobj);
@@ -648,7 +648,7 @@
 		return -EINVAL;
 	}
 
-	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	gobj = drm_gem_object_lookup(filp, args->handle);
 	if (gobj == NULL) {
 		args->operation = RADEON_VA_RESULT_ERROR;
 		return -ENOENT;
@@ -703,7 +703,7 @@
 	struct radeon_bo *robj;
 	int r;
 
-	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	gobj = drm_gem_object_lookup(filp, args->handle);
 	if (gobj == NULL) {
 		return -ENOENT;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 1e9304d..c084cad 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -291,7 +291,6 @@
 	if (r) {
 		return r;
 	}
-	rdev->ddev->vblank_disable_allowed = true;
 
 	/* enable msi */
 	rdev->msi_enabled = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index eef006c..896f2cf 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -186,7 +186,9 @@
 	struct radeon_mn *rmn;
 	int r;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return ERR_PTR(-EINTR);
+
 	mutex_lock(&rdev->mn_lock);
 
 	hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 2d901bf..be30861 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -832,13 +832,13 @@
 {
 	int r;
 
-	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
 	if (unlikely(r != 0))
 		return r;
 	if (mem_type)
 		*mem_type = bo->tbo.mem.mem_type;
 
-	r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+	r = ttm_bo_wait(&bo->tbo, true, no_wait);
 	ttm_bo_unreserve(&bo->tbo);
 	return r;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d8d295e..a10bb3d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -65,7 +65,7 @@
 {
 	int r;
 
-	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, NULL);
+	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS)
 			dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 90f7394..590b037 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -865,6 +865,8 @@
 	.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
 	.io_mem_reserve = &radeon_ttm_io_mem_reserve,
 	.io_mem_free = &radeon_ttm_io_mem_free,
+	.lru_tail = &ttm_bo_default_lru_tail,
+	.swap_lru_tail = &ttm_bo_default_swap_lru_tail,
 };
 
 int radeon_ttm_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 6fe9e4e..73dfe01 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -34,6 +34,7 @@
 #include <drm/drm.h>
 
 #include "radeon.h"
+#include "radeon_ucode.h"
 #include "r600d.h"
 
 /* 1 second timeout */
@@ -47,7 +48,8 @@
 #define FIRMWARE_CYPRESS	"radeon/CYPRESS_uvd.bin"
 #define FIRMWARE_SUMO		"radeon/SUMO_uvd.bin"
 #define FIRMWARE_TAHITI		"radeon/TAHITI_uvd.bin"
-#define FIRMWARE_BONAIRE	"radeon/BONAIRE_uvd.bin"
+#define FIRMWARE_BONAIRE_LEGACY	"radeon/BONAIRE_uvd.bin"
+#define FIRMWARE_BONAIRE	"radeon/bonaire_uvd.bin"
 
 MODULE_FIRMWARE(FIRMWARE_R600);
 MODULE_FIRMWARE(FIRMWARE_RS780);
@@ -56,6 +58,7 @@
 MODULE_FIRMWARE(FIRMWARE_CYPRESS);
 MODULE_FIRMWARE(FIRMWARE_SUMO);
 MODULE_FIRMWARE(FIRMWARE_TAHITI);
+MODULE_FIRMWARE(FIRMWARE_BONAIRE_LEGACY);
 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
 
 static void radeon_uvd_idle_work_handler(struct work_struct *work);
@@ -63,7 +66,7 @@
 int radeon_uvd_init(struct radeon_device *rdev)
 {
 	unsigned long bo_size;
-	const char *fw_name;
+	const char *fw_name = NULL, *legacy_fw_name = NULL;
 	int i, r;
 
 	INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
@@ -74,22 +77,22 @@
 	case CHIP_RV670:
 	case CHIP_RV620:
 	case CHIP_RV635:
-		fw_name = FIRMWARE_R600;
+		legacy_fw_name = FIRMWARE_R600;
 		break;
 
 	case CHIP_RS780:
 	case CHIP_RS880:
-		fw_name = FIRMWARE_RS780;
+		legacy_fw_name = FIRMWARE_RS780;
 		break;
 
 	case CHIP_RV770:
-		fw_name = FIRMWARE_RV770;
+		legacy_fw_name = FIRMWARE_RV770;
 		break;
 
 	case CHIP_RV710:
 	case CHIP_RV730:
 	case CHIP_RV740:
-		fw_name = FIRMWARE_RV710;
+		legacy_fw_name = FIRMWARE_RV710;
 		break;
 
 	case CHIP_CYPRESS:
@@ -97,7 +100,7 @@
 	case CHIP_JUNIPER:
 	case CHIP_REDWOOD:
 	case CHIP_CEDAR:
-		fw_name = FIRMWARE_CYPRESS;
+		legacy_fw_name = FIRMWARE_CYPRESS;
 		break;
 
 	case CHIP_SUMO:
@@ -107,7 +110,7 @@
 	case CHIP_BARTS:
 	case CHIP_TURKS:
 	case CHIP_CAICOS:
-		fw_name = FIRMWARE_SUMO;
+		legacy_fw_name = FIRMWARE_SUMO;
 		break;
 
 	case CHIP_TAHITI:
@@ -115,7 +118,7 @@
 	case CHIP_PITCAIRN:
 	case CHIP_ARUBA:
 	case CHIP_OLAND:
-		fw_name = FIRMWARE_TAHITI;
+		legacy_fw_name = FIRMWARE_TAHITI;
 		break;
 
 	case CHIP_BONAIRE:
@@ -123,6 +126,7 @@
 	case CHIP_KAVERI:
 	case CHIP_HAWAII:
 	case CHIP_MULLINS:
+		legacy_fw_name = FIRMWARE_BONAIRE_LEGACY;
 		fw_name = FIRMWARE_BONAIRE;
 		break;
 
@@ -130,16 +134,56 @@
 		return -EINVAL;
 	}
 
-	r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
-	if (r) {
-		dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
-			fw_name);
-		return r;
+	rdev->uvd.fw_header_present = false;
+	rdev->uvd.max_handles = RADEON_DEFAULT_UVD_HANDLES;
+	if (fw_name) {
+		/* Let's try to load the newer firmware first */
+		r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
+		if (r) {
+			dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
+				fw_name);
+		} else {
+			struct common_firmware_header *hdr = (void *)rdev->uvd_fw->data;
+			unsigned version_major, version_minor, family_id;
+
+			r = radeon_ucode_validate(rdev->uvd_fw);
+			if (r)
+				return r;
+
+			rdev->uvd.fw_header_present = true;
+
+			family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+			version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+			version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+			DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+				 version_major, version_minor, family_id);
+
+			/*
+			 * Limit the number of UVD handles depending on
+			 * microcode major and minor versions.
+			 */
+			if ((version_major >= 0x01) && (version_minor >= 0x37))
+				rdev->uvd.max_handles = RADEON_MAX_UVD_HANDLES;
+		}
+	}
+
+	/*
+	 * In case there is only legacy firmware, or we encounter an error
+	 * while loading the new firmware, we fall back to loading the legacy
+	 * firmware now.
+	 */
+	if (!fw_name || r) {
+		r = request_firmware(&rdev->uvd_fw, legacy_fw_name, rdev->dev);
+		if (r) {
+			dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
+				legacy_fw_name);
+			return r;
+		}
 	}
 
 	bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
 		  RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
-		  RADEON_GPU_PAGE_SIZE;
+		  RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles;
 	r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
 			     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
 			     NULL, &rdev->uvd.vcpu_bo);
@@ -172,7 +216,7 @@
 
 	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
 
-	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+	for (i = 0; i < rdev->uvd.max_handles; ++i) {
 		atomic_set(&rdev->uvd.handles[i], 0);
 		rdev->uvd.filp[i] = NULL;
 		rdev->uvd.img_size[i] = 0;
@@ -209,7 +253,7 @@
 	if (rdev->uvd.vcpu_bo == NULL)
 		return 0;
 
-	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+	for (i = 0; i < rdev->uvd.max_handles; ++i) {
 		uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
 		if (handle != 0) {
 			struct radeon_fence *fence;
@@ -284,7 +328,7 @@
 void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
 {
 	int i, r;
-	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+	for (i = 0; i < rdev->uvd.max_handles; ++i) {
 		uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
 		if (handle != 0 && rdev->uvd.filp[i] == filp) {
 			struct radeon_fence *fence;
@@ -469,7 +513,7 @@
 			return r;
 
 		/* try to alloc a new handle */
-		for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+		for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
 			if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
 				DRM_ERROR("Handle 0x%x already in use!\n", handle);
 				return -EINVAL;
@@ -495,7 +539,7 @@
 			return r;
 
 		/* validate the handle */
-		for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+		for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
 			if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
 				if (p->rdev->uvd.filp[i] != p->filp) {
 					DRM_ERROR("UVD handle collision detected!\n");
@@ -510,7 +554,7 @@
 
 	case 2:
 		/* it's a destroy msg, free the handle */
-		for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+		for (i = 0; i < p->rdev->uvd.max_handles; ++i)
 			atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
 		radeon_bo_kunmap(bo);
 		return 0;
@@ -809,7 +853,7 @@
 	*sd = 0;
 	*hd = 0;
 
-	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+	for (i = 0; i < rdev->uvd.max_handles; ++i) {
 		if (!atomic_read(&rdev->uvd.handles[i]))
 			continue;
 
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6244f4e..f16af11 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -110,7 +110,7 @@
 	}
 }
 
-void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async)
 {
 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
 	u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
@@ -121,6 +121,8 @@
 	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
 
 	/* update the scanout addresses */
+	WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
+	       async ? AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
 	WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
 	       (u32)crtc_base);
 	WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
@@ -413,7 +415,8 @@
 		default:
 			break;
 		}
-		enable |= 1 << radeon_connector->hpd.hpd;
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			enable |= 1 << radeon_connector->hpd.hpd;
 		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 	}
 	radeon_irq_kms_enable_hpd(rdev, enable);
@@ -439,12 +442,13 @@
 		default:
 			break;
 		}
-		disable |= 1 << radeon_connector->hpd.hpd;
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			disable |= 1 << radeon_connector->hpd.hpd;
 	}
 	radeon_irq_kms_disable_hpd(rdev, disable);
 }
 
-int rs600_asic_reset(struct radeon_device *rdev)
+int rs600_asic_reset(struct radeon_device *rdev, bool hard)
 {
 	struct rv515_mc_save save;
 	u32 status, tmp;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 01ee96a..1c120a4 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -801,7 +801,7 @@
 	return reference_clock;
 }
 
-void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async)
 {
 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
 	u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
@@ -812,6 +812,8 @@
 	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
 
 	/* update the scanout addresses */
+	WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
+	       async ? AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
 	if (radeon_crtc->crtc_id) {
 		WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
 		WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
@@ -1681,6 +1683,73 @@
 	return 0;
 }
 
+static void rv770_uvd_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = radeon_uvd_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+		/*
+		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+		 * to early fails uvd_v2_2_resume() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable uvd here.
+		 */
+		rdev->has_uvd = 0;
+		return;
+	}
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void rv770_uvd_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = uvd_v2_2_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void rv770_uvd_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+		return;
+	}
+	r = uvd_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+		return;
+	}
+}
+
 static int rv770_startup(struct radeon_device *rdev)
 {
 	struct radeon_ring *ring;
@@ -1723,16 +1792,7 @@
 		return r;
 	}
 
-	r = uvd_v2_2_resume(rdev);
-	if (!r) {
-		r = radeon_fence_driver_start_ring(rdev,
-						   R600_RING_TYPE_UVD_INDEX);
-		if (r)
-			dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
-	}
-
-	if (r)
-		rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+	rv770_uvd_start(rdev);
 
 	/* Enable IRQ */
 	if (!rdev->irq.installed) {
@@ -1772,16 +1832,7 @@
 	if (r)
 		return r;
 
-	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-	if (ring->ring_size) {
-		r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-				     RADEON_CP_PACKET2);
-		if (!r)
-			r = uvd_v1_0_init(rdev);
-
-		if (r)
-			DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
-	}
+	rv770_uvd_resume(rdev);
 
 	r = radeon_ib_pool_init(rdev);
 	if (r) {
@@ -1831,8 +1882,10 @@
 {
 	radeon_pm_suspend(rdev);
 	radeon_audio_fini(rdev);
-	uvd_v1_0_fini(rdev);
-	radeon_uvd_suspend(rdev);
+	if (rdev->has_uvd) {
+		uvd_v1_0_fini(rdev);
+		radeon_uvd_suspend(rdev);
+	}
 	r700_cp_stop(rdev);
 	r600_dma_stop(rdev);
 	r600_irq_suspend(rdev);
@@ -1917,12 +1970,7 @@
 	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
 
-	r = radeon_uvd_init(rdev);
-	if (!r) {
-		rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
-		r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
-			       4096);
-	}
+	rv770_uvd_init(rdev);
 
 	rdev->ih.ring_obj = NULL;
 	r600_ih_ring_init(rdev, 64 * 1024);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ae21550..b30e719 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4034,10 +4034,15 @@
 	}
 }
 
-int si_asic_reset(struct radeon_device *rdev)
+int si_asic_reset(struct radeon_device *rdev, bool hard)
 {
 	u32 reset_mask;
 
+	if (hard) {
+		si_gpu_pci_config_reset(rdev);
+		return 0;
+	}
+
 	reset_mask = si_gpu_check_soft_reset(rdev);
 
 	if (reset_mask)
@@ -4359,6 +4364,10 @@
 	if (reg >= 0x28000)
 		return true;
 
+	/* shader regs are also fine */
+	if (reg >= 0xB000 && reg < 0xC000)
+		return true;
+
 	/* check config regs */
 	switch (reg) {
 	case GRBM_GFX_INDEX:
@@ -6821,6 +6830,159 @@
 /*
  * startup/shutdown callbacks
  */
+static void si_uvd_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = radeon_uvd_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+		/*
+		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+		 * to early fails uvd_v2_2_resume() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable uvd here.
+		 */
+		rdev->has_uvd = 0;
+		return;
+	}
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void si_uvd_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_uvd)
+		return;
+
+	r = uvd_v2_2_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void si_uvd_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+		return;
+	}
+	r = uvd_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+		return;
+	}
+}
+
+static void si_vce_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_vce)
+		return;
+
+	r = radeon_vce_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
+		/*
+		 * At this point rdev->vce.vcpu_bo is NULL which trickles down
+		 * to early fails si_vce_start() and thus nothing happens
+		 * there. So it is pointless to try to go through that code
+		 * hence why we disable vce here.
+		 */
+		rdev->has_vce = 0;
+		return;
+	}
+	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
+	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
+}
+
+static void si_vce_start(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->has_vce)
+		return;
+
+	r = radeon_vce_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+		goto error;
+	}
+	r = vce_v1_0_resume(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
+		goto error;
+	}
+	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
+		goto error;
+	}
+	return;
+
+error:
+	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
+	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
+}
+
+static void si_vce_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
+		return;
+
+	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+		return;
+	}
+	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+		return;
+	}
+	r = vce_v1_0_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
+		return;
+	}
+}
+
 static int si_startup(struct radeon_device *rdev)
 {
 	struct radeon_ring *ring;
@@ -6899,33 +7061,8 @@
 		return r;
 	}
 
-	if (rdev->has_uvd) {
-		r = uvd_v2_2_resume(rdev);
-		if (!r) {
-			r = radeon_fence_driver_start_ring(rdev,
-							   R600_RING_TYPE_UVD_INDEX);
-			if (r)
-				dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
-		}
-		if (r)
-			rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
-	}
-
-	r = radeon_vce_resume(rdev);
-	if (!r) {
-		r = vce_v1_0_resume(rdev);
-		if (!r)
-			r = radeon_fence_driver_start_ring(rdev,
-							   TN_RING_TYPE_VCE1_INDEX);
-		if (!r)
-			r = radeon_fence_driver_start_ring(rdev,
-							   TN_RING_TYPE_VCE2_INDEX);
-	}
-	if (r) {
-		dev_err(rdev->dev, "VCE init error (%d).\n", r);
-		rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
-		rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
-	}
+	si_uvd_start(rdev);
+	si_vce_start(rdev);
 
 	/* Enable IRQ */
 	if (!rdev->irq.installed) {
@@ -6983,34 +7120,8 @@
 	if (r)
 		return r;
 
-	if (rdev->has_uvd) {
-		ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-		if (ring->ring_size) {
-			r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-					     RADEON_CP_PACKET2);
-			if (!r)
-				r = uvd_v1_0_init(rdev);
-			if (r)
-				DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
-		}
-	}
-
-	r = -ENOENT;
-
-	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
-	if (ring->ring_size)
-		r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-				     VCE_CMD_NO_OP);
-
-	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
-	if (ring->ring_size)
-		r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-				     VCE_CMD_NO_OP);
-
-	if (!r)
-		r = vce_v1_0_init(rdev);
-	else if (r != -ENOENT)
-		DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
+	si_uvd_resume(rdev);
+	si_vce_resume(rdev);
 
 	r = radeon_ib_pool_init(rdev);
 	if (r) {
@@ -7070,8 +7181,9 @@
 	if (rdev->has_uvd) {
 		uvd_v1_0_fini(rdev);
 		radeon_uvd_suspend(rdev);
-		radeon_vce_suspend(rdev);
 	}
+	if (rdev->has_vce)
+		radeon_vce_suspend(rdev);
 	si_fini_pg(rdev);
 	si_fini_cg(rdev);
 	si_irq_suspend(rdev);
@@ -7169,25 +7281,8 @@
 	ring->ring_obj = NULL;
 	r600_ring_init(rdev, ring, 64 * 1024);
 
-	if (rdev->has_uvd) {
-		r = radeon_uvd_init(rdev);
-		if (!r) {
-			ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-			ring->ring_obj = NULL;
-			r600_ring_init(rdev, ring, 4096);
-		}
-	}
-
-	r = radeon_vce_init(rdev);
-	if (!r) {
-		ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
-		ring->ring_obj = NULL;
-		r600_ring_init(rdev, ring, 4096);
-
-		ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
-		ring->ring_obj = NULL;
-		r600_ring_init(rdev, ring, 4096);
-	}
+	si_uvd_init(rdev);
+	si_vce_init(rdev);
 
 	rdev->ih.ring_obj = NULL;
 	r600_ih_ring_init(rdev, 64 * 1024);
@@ -7240,8 +7335,9 @@
 	if (rdev->has_uvd) {
 		uvd_v1_0_fini(rdev);
 		radeon_uvd_fini(rdev);
-		radeon_vce_fini(rdev);
 	}
+	if (rdev->has_vce)
+		radeon_vce_fini(rdev);
 	si_pcie_gart_fini(rdev);
 	r600_vram_scratch_fini(rdev);
 	radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 12ddcfa..0dbeb50 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -124,12 +124,13 @@
 	WREG32(UVD_VCPU_CACHE_SIZE0, size);
 
 	addr += size;
-	size = RADEON_UVD_STACK_SIZE >> 3;
+	size = RADEON_UVD_HEAP_SIZE >> 3;
 	WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
 	WREG32(UVD_VCPU_CACHE_SIZE1, size);
 
 	addr += size;
-	size = RADEON_UVD_HEAP_SIZE >> 3;
+	size = (RADEON_UVD_STACK_SIZE +
+	       (RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles)) >> 3;
 	WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
 	WREG32(UVD_VCPU_CACHE_SIZE2, size);
 
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index 7ed778c..9071e65 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -116,12 +116,13 @@
 	WREG32(UVD_VCPU_CACHE_SIZE0, size);
 
 	addr += size;
-	size = RADEON_UVD_STACK_SIZE >> 3;
+	size = RADEON_UVD_HEAP_SIZE >> 3;
 	WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
 	WREG32(UVD_VCPU_CACHE_SIZE1, size);
 
 	addr += size;
-	size = RADEON_UVD_HEAP_SIZE >> 3;
+	size = (RADEON_UVD_STACK_SIZE +
+	       (RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles)) >> 3;
 	WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
 	WREG32(UVD_VCPU_CACHE_SIZE2, size);
 
diff --git a/drivers/gpu/drm/radeon/uvd_v4_2.c b/drivers/gpu/drm/radeon/uvd_v4_2.c
index d04d507..91613b8 100644
--- a/drivers/gpu/drm/radeon/uvd_v4_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v4_2.c
@@ -41,18 +41,25 @@
 	uint32_t size;
 
 	/* programm the VCPU memory controller bits 0-27 */
-	addr = rdev->uvd.gpu_addr >> 3;
+
+	/* skip over the header of the new firmware format */
+	if (rdev->uvd.fw_header_present)
+		addr = (rdev->uvd.gpu_addr + 0x200) >> 3;
+	else
+		addr = rdev->uvd.gpu_addr >> 3;
+
 	size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
 	WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
 	WREG32(UVD_VCPU_CACHE_SIZE0, size);
 
 	addr += size;
-	size = RADEON_UVD_STACK_SIZE >> 3;
+	size = RADEON_UVD_HEAP_SIZE >> 3;
 	WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
 	WREG32(UVD_VCPU_CACHE_SIZE1, size);
 
 	addr += size;
-	size = RADEON_UVD_HEAP_SIZE >> 3;
+	size = (RADEON_UVD_STACK_SIZE +
+	       (RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles)) >> 3;
 	WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
 	WREG32(UVD_VCPU_CACHE_SIZE2, size);
 
@@ -64,5 +71,8 @@
 	addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
 	WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
 
+	if (rdev->uvd.fw_header_present)
+		WREG32(UVD_GP_SCRATCH4, rdev->uvd.max_handles);
+
 	return 0;
 }
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 1f10fa0..7fc3ca5 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -2,7 +2,7 @@
 	tristate "DRM Support for R-Car Display Unit"
 	depends on DRM && OF
 	depends on ARM || ARM64
-	depends on ARCH_SHMOBILE || COMPILE_TEST
+	depends on ARCH_RENESAS || COMPILE_TEST
 	select DRM_KMS_HELPER
 	select DRM_KMS_CMA_HELPER
 	select DRM_GEM_CMA_HELPER
@@ -27,6 +27,6 @@
 config DRM_RCAR_VSP
 	bool "R-Car DU VSP Compositor Support"
 	depends on DRM_RCAR_DU
-	depends on VIDEO_RENESAS_VSP1
+	depends on VIDEO_RENESAS_VSP1=y || (VIDEO_RENESAS_VSP1 && DRM_RCAR_DU=m)
 	help
 	  Enable support to expose the R-Car VSP Compositor as KMS planes.
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index d9f06cc..0d8bdda 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -314,7 +314,7 @@
 		return;
 
 	spin_lock_irqsave(&dev->event_lock, flags);
-	drm_send_vblank_event(dev, rcrtc->index, event);
+	drm_crtc_send_vblank_event(&rcrtc->crtc, event);
 	wake_up(&rcrtc->flip_wait);
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index ed6006b..fb9242d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -278,10 +278,7 @@
 	struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
 	struct drm_device *ddev = rcdu->ddev;
 
-	mutex_lock(&ddev->mode_config.mutex);
-	drm_connector_unplug_all(ddev);
-	mutex_unlock(&ddev->mode_config.mutex);
-
+	drm_connector_unregister_all(ddev);
 	drm_dev_unregister(ddev);
 
 	if (rcdu->fbdev)
@@ -300,7 +297,6 @@
 {
 	struct device_node *np = pdev->dev.of_node;
 	struct rcar_du_device *rcdu;
-	struct drm_connector *connector;
 	struct drm_device *ddev;
 	struct resource *mem;
 	int ret;
@@ -364,14 +360,7 @@
 	if (ret)
 		goto error;
 
-	mutex_lock(&ddev->mode_config.mutex);
-	drm_for_each_connector(connector, ddev) {
-		ret = drm_connector_register(connector);
-		if (ret < 0)
-			break;
-	}
-	mutex_unlock(&ddev->mode_config.mutex);
-
+	ret = drm_connector_register_all(ddev);
 	if (ret < 0)
 		goto error;
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 24725bf..e70a4f3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -283,7 +283,8 @@
 }
 
 static int rcar_du_atomic_commit(struct drm_device *dev,
-				 struct drm_atomic_state *state, bool async)
+				 struct drm_atomic_state *state,
+				 bool nonblock)
 {
 	struct rcar_du_device *rcdu = dev->dev_private;
 	struct rcar_du_commit *commit;
@@ -328,7 +329,7 @@
 	/* Swap the state, this is the point of no return. */
 	drm_atomic_helper_swap_state(dev, state);
 
-	if (async)
+	if (nonblock)
 		schedule_work(&commit->work);
 	else
 		rcar_du_atomic_complete(commit);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 8460ae1..d445e67 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -635,7 +635,7 @@
 static void rcar_du_plane_atomic_destroy_state(struct drm_plane *plane,
 					       struct drm_plane_state *state)
 {
-	__drm_atomic_helper_plane_destroy_state(plane, state);
+	__drm_atomic_helper_plane_destroy_state(state);
 	kfree(to_rcar_plane_state(state));
 }
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index de7ef04..e671a7c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -251,7 +251,7 @@
 static void rcar_du_vsp_plane_atomic_destroy_state(struct drm_plane *plane,
 						   struct drm_plane_state *state)
 {
-	__drm_atomic_helper_plane_destroy_state(plane, state);
+	__drm_atomic_helper_plane_destroy_state(state);
 	kfree(to_rcar_vsp_plane_state(state));
 }
 
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 76b3362..d30bdc3 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -16,6 +16,15 @@
 	  2D or 3D acceleration; acceleration is performed by other
 	  IP found on the SoC.
 
+config ROCKCHIP_ANALOGIX_DP
+	tristate "Rockchip specific extensions for Analogix DP driver"
+	depends on DRM_ROCKCHIP
+	select DRM_ANALOGIX_DP
+	help
+	  This selects support for Rockchip SoC specific extensions
+	  for the Analogix Core DP driver. If you want to enable DP
+	  on RK3288 based SoC, you should selet this option.
+
 config ROCKCHIP_DW_HDMI
         tristate "Rockchip specific extensions for Synopsys DW HDMI"
         depends on DRM_ROCKCHIP
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index df8fbef..05d0713 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -6,6 +6,7 @@
 		rockchip_drm_gem.o rockchip_drm_vop.o
 rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
 
+obj-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
 obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
 obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
 obj-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
new file mode 100644
index 0000000..7f6a55c
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -0,0 +1,390 @@
+/*
+ * Rockchip SoC DP (Display Port) interface driver.
+ *
+ * Copyright (C) Fuzhou Rockchip Electronics Co., Ltd.
+ * Author: Andy Yan <andy.yan@rock-chips.com>
+ *         Yakir Yang <ykk@rock-chips.com>
+ *         Jeff Chen <jeff.chen@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/component.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+#include <drm/bridge/analogix_dp.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+#define to_dp(nm)	container_of(nm, struct rockchip_dp_device, nm)
+
+/* dp grf register offset */
+#define GRF_SOC_CON6                            0x025c
+#define GRF_EDP_LCD_SEL_MASK                    BIT(5)
+#define GRF_EDP_SEL_VOP_LIT                     BIT(5)
+#define GRF_EDP_SEL_VOP_BIG                     0
+
+struct rockchip_dp_device {
+	struct drm_device        *drm_dev;
+	struct device            *dev;
+	struct drm_encoder       encoder;
+	struct drm_display_mode  mode;
+
+	struct clk               *pclk;
+	struct regmap            *grf;
+	struct reset_control     *rst;
+
+	struct analogix_dp_plat_data plat_data;
+};
+
+static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
+{
+	reset_control_assert(dp->rst);
+	usleep_range(10, 20);
+	reset_control_deassert(dp->rst);
+
+	return 0;
+}
+
+static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
+{
+	struct rockchip_dp_device *dp = to_dp(plat_data);
+	int ret;
+
+	ret = clk_prepare_enable(dp->pclk);
+	if (ret < 0) {
+		dev_err(dp->dev, "failed to enable pclk %d\n", ret);
+		return ret;
+	}
+
+	ret = rockchip_dp_pre_init(dp);
+	if (ret < 0) {
+		dev_err(dp->dev, "failed to dp pre init %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
+{
+	struct rockchip_dp_device *dp = to_dp(plat_data);
+
+	clk_disable_unprepare(dp->pclk);
+
+	return 0;
+}
+
+static bool
+rockchip_dp_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+				   const struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	/* do nothing */
+	return true;
+}
+
+static void rockchip_dp_drm_encoder_mode_set(struct drm_encoder *encoder,
+					     struct drm_display_mode *mode,
+					     struct drm_display_mode *adjusted)
+{
+	/* do nothing */
+}
+
+static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder)
+{
+	struct rockchip_dp_device *dp = to_dp(encoder);
+	int ret;
+	u32 val;
+
+	ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
+	if (ret < 0)
+		return;
+
+	if (ret)
+		val = GRF_EDP_SEL_VOP_LIT | (GRF_EDP_LCD_SEL_MASK << 16);
+	else
+		val = GRF_EDP_SEL_VOP_BIG | (GRF_EDP_LCD_SEL_MASK << 16);
+
+	dev_dbg(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
+
+	ret = regmap_write(dp->grf, GRF_SOC_CON6, val);
+	if (ret != 0) {
+		dev_err(dp->dev, "Could not write to GRF: %d\n", ret);
+		return;
+	}
+}
+
+static void rockchip_dp_drm_encoder_nop(struct drm_encoder *encoder)
+{
+	/* do nothing */
+}
+
+static int
+rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
+				      struct drm_crtc_state *crtc_state,
+				      struct drm_connector_state *conn_state)
+{
+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+	/*
+	 * FIXME(Yakir): driver should configure the CRTC output video
+	 * mode with the display information which indicated the monitor
+	 * support colorimetry.
+	 *
+	 * But don't know why the CRTC driver seems could only output the
+	 * RGBaaa rightly. For example, if connect the "innolux,n116bge"
+	 * eDP screen, EDID would indicated that screen only accepted the
+	 * 6bpc mode. But if I configure CRTC to RGB666 output, then eDP
+	 * screen would show a blue picture (RGB888 show a green picture).
+	 * But if I configure CTRC to RGBaaa, and eDP driver still keep
+	 * RGB666 input video mode, then screen would works prefect.
+	 */
+	s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
+	s->output_type = DRM_MODE_CONNECTOR_eDP;
+
+	return 0;
+}
+
+static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
+	.mode_fixup = rockchip_dp_drm_encoder_mode_fixup,
+	.mode_set = rockchip_dp_drm_encoder_mode_set,
+	.enable = rockchip_dp_drm_encoder_enable,
+	.disable = rockchip_dp_drm_encoder_nop,
+	.atomic_check = rockchip_dp_drm_encoder_atomic_check,
+};
+
+static void rockchip_dp_drm_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static struct drm_encoder_funcs rockchip_dp_encoder_funcs = {
+	.destroy = rockchip_dp_drm_encoder_destroy,
+};
+
+static int rockchip_dp_init(struct rockchip_dp_device *dp)
+{
+	struct device *dev = dp->dev;
+	struct device_node *np = dev->of_node;
+	int ret;
+
+	dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+	if (IS_ERR(dp->grf)) {
+		dev_err(dev, "failed to get rockchip,grf property\n");
+		return PTR_ERR(dp->grf);
+	}
+
+	dp->pclk = devm_clk_get(dev, "pclk");
+	if (IS_ERR(dp->pclk)) {
+		dev_err(dev, "failed to get pclk property\n");
+		return PTR_ERR(dp->pclk);
+	}
+
+	dp->rst = devm_reset_control_get(dev, "dp");
+	if (IS_ERR(dp->rst)) {
+		dev_err(dev, "failed to get dp reset control\n");
+		return PTR_ERR(dp->rst);
+	}
+
+	ret = clk_prepare_enable(dp->pclk);
+	if (ret < 0) {
+		dev_err(dp->dev, "failed to enable pclk %d\n", ret);
+		return ret;
+	}
+
+	ret = rockchip_dp_pre_init(dp);
+	if (ret < 0) {
+		dev_err(dp->dev, "failed to pre init %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int rockchip_dp_drm_create_encoder(struct rockchip_dp_device *dp)
+{
+	struct drm_encoder *encoder = &dp->encoder;
+	struct drm_device *drm_dev = dp->drm_dev;
+	struct device *dev = dp->dev;
+	int ret;
+
+	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
+							     dev->of_node);
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	ret = drm_encoder_init(drm_dev, encoder, &rockchip_dp_encoder_funcs,
+			       DRM_MODE_ENCODER_TMDS, NULL);
+	if (ret) {
+		DRM_ERROR("failed to initialize encoder with drm\n");
+		return ret;
+	}
+
+	drm_encoder_helper_add(encoder, &rockchip_dp_encoder_helper_funcs);
+
+	return 0;
+}
+
+static int rockchip_dp_bind(struct device *dev, struct device *master,
+			    void *data)
+{
+	struct rockchip_dp_device *dp = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	int ret;
+
+	/*
+	 * Just like the probe function said, we don't need the
+	 * device drvrate anymore, we should leave the charge to
+	 * analogix dp driver, set the device drvdata to NULL.
+	 */
+	dev_set_drvdata(dev, NULL);
+
+	ret = rockchip_dp_init(dp);
+	if (ret < 0)
+		return ret;
+
+	dp->drm_dev = drm_dev;
+
+	ret = rockchip_dp_drm_create_encoder(dp);
+	if (ret) {
+		DRM_ERROR("failed to create drm encoder\n");
+		return ret;
+	}
+
+	dp->plat_data.encoder = &dp->encoder;
+
+	dp->plat_data.dev_type = RK3288_DP;
+	dp->plat_data.power_on = rockchip_dp_poweron;
+	dp->plat_data.power_off = rockchip_dp_powerdown;
+
+	return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
+}
+
+static void rockchip_dp_unbind(struct device *dev, struct device *master,
+			       void *data)
+{
+	return analogix_dp_unbind(dev, master, data);
+}
+
+static const struct component_ops rockchip_dp_component_ops = {
+	.bind = rockchip_dp_bind,
+	.unbind = rockchip_dp_unbind,
+};
+
+static int rockchip_dp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *panel_node, *port, *endpoint;
+	struct rockchip_dp_device *dp;
+	struct drm_panel *panel;
+
+	port = of_graph_get_port_by_id(dev->of_node, 1);
+	if (!port) {
+		dev_err(dev, "can't find output port\n");
+		return -EINVAL;
+	}
+
+	endpoint = of_get_child_by_name(port, "endpoint");
+	of_node_put(port);
+	if (!endpoint) {
+		dev_err(dev, "no output endpoint found\n");
+		return -EINVAL;
+	}
+
+	panel_node = of_graph_get_remote_port_parent(endpoint);
+	of_node_put(endpoint);
+	if (!panel_node) {
+		dev_err(dev, "no output node found\n");
+		return -EINVAL;
+	}
+
+	panel = of_drm_find_panel(panel_node);
+	if (!panel) {
+		DRM_ERROR("failed to find panel\n");
+		of_node_put(panel_node);
+		return -EPROBE_DEFER;
+	}
+
+	of_node_put(panel_node);
+
+	dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+	if (!dp)
+		return -ENOMEM;
+
+	dp->dev = dev;
+
+	dp->plat_data.panel = panel;
+
+	/*
+	 * We just use the drvdata until driver run into component
+	 * add function, and then we would set drvdata to null, so
+	 * that analogix dp driver could take charge of the drvdata.
+	 */
+	platform_set_drvdata(pdev, dp);
+
+	return component_add(dev, &rockchip_dp_component_ops);
+}
+
+static int rockchip_dp_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &rockchip_dp_component_ops);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rockchip_dp_suspend(struct device *dev)
+{
+	return analogix_dp_suspend(dev);
+}
+
+static int rockchip_dp_resume(struct device *dev)
+{
+	return analogix_dp_resume(dev);
+}
+#endif
+
+static const struct dev_pm_ops rockchip_dp_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(rockchip_dp_suspend, rockchip_dp_resume)
+};
+
+static const struct of_device_id rockchip_dp_dt_ids[] = {
+	{.compatible = "rockchip,rk3288-dp",},
+	{}
+};
+MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
+
+static struct platform_driver rockchip_dp_driver = {
+	.probe = rockchip_dp_probe,
+	.remove = rockchip_dp_remove,
+	.driver = {
+		   .name = "rockchip-dp",
+		   .owner = THIS_MODULE,
+		   .pm = &rockchip_dp_pm_ops,
+		   .of_match_table = of_match_ptr(rockchip_dp_dt_ids),
+	},
+};
+
+module_platform_driver(rockchip_dp_driver);
+
+MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
+MODULE_AUTHOR("Jeff chen <jeff.chen@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip Specific Analogix-DP Driver Extension");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index 7975158..dedc65b 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -879,7 +879,6 @@
 {
 	struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
 	int mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node, encoder);
-	u32 interface_pix_fmt;
 	u32 val;
 
 	if (clk_prepare_enable(dsi->pclk)) {
@@ -895,24 +894,6 @@
 
 	clk_disable_unprepare(dsi->pclk);
 
-	switch (dsi->format) {
-	case MIPI_DSI_FMT_RGB888:
-		interface_pix_fmt = ROCKCHIP_OUT_MODE_P888;
-		break;
-	case MIPI_DSI_FMT_RGB666:
-		interface_pix_fmt = ROCKCHIP_OUT_MODE_P666;
-		break;
-	case MIPI_DSI_FMT_RGB565:
-		interface_pix_fmt = ROCKCHIP_OUT_MODE_P565;
-		break;
-	default:
-		WARN_ON(1);
-		return;
-	}
-
-	rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_DSI,
-				      interface_pix_fmt);
-
 	if (mux)
 		val = DSI0_SEL_VOP_LIT | (DSI0_SEL_VOP_LIT << 16);
 	else
@@ -922,11 +903,40 @@
 	dev_dbg(dsi->dev, "vop %s output to dsi0\n", (mux) ? "LIT" : "BIG");
 }
 
+static int
+dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
+				 struct drm_crtc_state *crtc_state,
+				 struct drm_connector_state *conn_state)
+{
+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+	struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
+
+	switch (dsi->format) {
+	case MIPI_DSI_FMT_RGB888:
+		s->output_mode = ROCKCHIP_OUT_MODE_P888;
+		break;
+	case MIPI_DSI_FMT_RGB666:
+		s->output_mode = ROCKCHIP_OUT_MODE_P666;
+		break;
+	case MIPI_DSI_FMT_RGB565:
+		s->output_mode = ROCKCHIP_OUT_MODE_P565;
+		break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	s->output_type = DRM_MODE_CONNECTOR_DSI;
+
+	return 0;
+}
+
 static struct drm_encoder_helper_funcs
 dw_mipi_dsi_encoder_helper_funcs = {
 	.commit = dw_mipi_dsi_encoder_commit,
 	.mode_set = dw_mipi_dsi_encoder_mode_set,
 	.disable = dw_mipi_dsi_encoder_disable,
+	.atomic_check = dw_mipi_dsi_encoder_atomic_check,
 };
 
 static struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index d5cfef7..801110f 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -201,9 +201,6 @@
 	u32 val;
 	int mux;
 
-	rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA,
-				      ROCKCHIP_OUT_MODE_AAAA);
-
 	mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
 	if (mux)
 		val = HDMI_SEL_VOP_LIT | (HDMI_SEL_VOP_LIT << 16);
@@ -215,11 +212,25 @@
 		(mux) ? "LIT" : "BIG");
 }
 
+static int
+dw_hdmi_rockchip_encoder_atomic_check(struct drm_encoder *encoder,
+				      struct drm_crtc_state *crtc_state,
+				      struct drm_connector_state *conn_state)
+{
+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+	s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
+	s->output_type = DRM_MODE_CONNECTOR_HDMIA;
+
+	return 0;
+}
+
 static const struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_funcs = {
 	.mode_fixup = dw_hdmi_rockchip_encoder_mode_fixup,
 	.mode_set   = dw_hdmi_rockchip_encoder_mode_set,
 	.enable     = dw_hdmi_rockchip_encoder_enable,
 	.disable    = dw_hdmi_rockchip_encoder_disable,
+	.atomic_check = dw_hdmi_rockchip_encoder_atomic_check,
 };
 
 static const struct dw_hdmi_plat_data rockchip_hdmi_drv_data = {
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 10d62ff..f8b4feb 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -500,9 +500,6 @@
 {
 	struct inno_hdmi *hdmi = to_inno_hdmi(encoder);
 
-	rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA,
-				      ROCKCHIP_OUT_MODE_P888);
-
 	inno_hdmi_set_pwr_mode(hdmi, NORMAL);
 }
 
@@ -520,11 +517,25 @@
 	return true;
 }
 
+static int
+inno_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+			       struct drm_crtc_state *crtc_state,
+			       struct drm_connector_state *conn_state)
+{
+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+	s->output_mode = ROCKCHIP_OUT_MODE_P888;
+	s->output_type = DRM_MODE_CONNECTOR_HDMIA;
+
+	return 0;
+}
+
 static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
 	.enable     = inno_hdmi_encoder_enable,
 	.disable    = inno_hdmi_encoder_disable,
 	.mode_fixup = inno_hdmi_encoder_mode_fixup,
 	.mode_set   = inno_hdmi_encoder_mode_set,
+	.atomic_check = inno_hdmi_encoder_atomic_check,
 };
 
 static struct drm_encoder_funcs inno_hdmi_encoder_funcs = {
@@ -855,8 +866,9 @@
 
 	hdmi->ddc = inno_hdmi_i2c_adapter(hdmi);
 	if (IS_ERR(hdmi->ddc)) {
+		ret = PTR_ERR(hdmi->ddc);
 		hdmi->ddc = NULL;
-		return PTR_ERR(hdmi->ddc);
+		return ret;
 	}
 
 	/*
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index f556a8f..a409d1f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -36,6 +36,8 @@
 #define DRIVER_MAJOR	1
 #define DRIVER_MINOR	0
 
+static bool is_support_iommu = true;
+
 /*
  * Attach a (component) device to the shared drm dma mapping from master drm
  * device.  This is used by the VOPs to map GEM buffers to a common DMA
@@ -47,6 +49,9 @@
 	struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping;
 	int ret;
 
+	if (!is_support_iommu)
+		return 0;
+
 	ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
 	if (ret)
 		return ret;
@@ -59,6 +64,9 @@
 void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
 				    struct device *dev)
 {
+	if (!is_support_iommu)
+		return;
+
 	arm_iommu_detach_device(dev);
 }
 
@@ -127,7 +135,7 @@
 static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
 {
 	struct rockchip_drm_private *private;
-	struct dma_iommu_mapping *mapping;
+	struct dma_iommu_mapping *mapping = NULL;
 	struct device *dev = drm_dev->dev;
 	struct drm_connector *connector;
 	int ret;
@@ -152,24 +160,27 @@
 		goto err_config_cleanup;
 	}
 
-	/* TODO(djkurtz): fetch the mapping start/size from somewhere */
-	mapping = arm_iommu_create_mapping(&platform_bus_type, 0x00000000,
-					   SZ_2G);
-	if (IS_ERR(mapping)) {
-		ret = PTR_ERR(mapping);
-		goto err_config_cleanup;
+	if (is_support_iommu) {
+		/* TODO(djkurtz): fetch the mapping start/size from somewhere */
+		mapping = arm_iommu_create_mapping(&platform_bus_type,
+						   0x00000000,
+						   SZ_2G);
+		if (IS_ERR(mapping)) {
+			ret = PTR_ERR(mapping);
+			goto err_config_cleanup;
+		}
+
+		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+		if (ret)
+			goto err_release_mapping;
+
+		dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+		ret = arm_iommu_attach_device(dev, mapping);
+		if (ret)
+			goto err_release_mapping;
 	}
 
-	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-	if (ret)
-		goto err_release_mapping;
-
-	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
-
-	ret = arm_iommu_attach_device(dev, mapping);
-	if (ret)
-		goto err_release_mapping;
-
 	/* Try to bind all sub drivers. */
 	ret = component_bind_all(dev, drm_dev);
 	if (ret)
@@ -205,19 +216,14 @@
 	if (ret)
 		goto err_kms_helper_poll_fini;
 
-	/*
-	 * with vblank_disable_allowed = true, vblank interrupt will be disabled
-	 * by drm timer once a current process gives up ownership of
-	 * vblank event.(after drm_vblank_put function is called)
-	 */
-	drm_dev->vblank_disable_allowed = true;
-
 	drm_mode_config_reset(drm_dev);
 
 	ret = rockchip_drm_fbdev_init(drm_dev);
 	if (ret)
 		goto err_vblank_cleanup;
 
+	if (is_support_iommu)
+		arm_iommu_release_mapping(mapping);
 	return 0;
 err_vblank_cleanup:
 	drm_vblank_cleanup(drm_dev);
@@ -226,9 +232,11 @@
 err_unbind:
 	component_unbind_all(dev, drm_dev);
 err_detach_device:
-	arm_iommu_detach_device(dev);
+	if (is_support_iommu)
+		arm_iommu_detach_device(dev);
 err_release_mapping:
-	arm_iommu_release_mapping(dev->archdata.mapping);
+	if (is_support_iommu)
+		arm_iommu_release_mapping(mapping);
 err_config_cleanup:
 	drm_mode_config_cleanup(drm_dev);
 	drm_dev->dev_private = NULL;
@@ -243,8 +251,8 @@
 	drm_vblank_cleanup(drm_dev);
 	drm_kms_helper_poll_fini(drm_dev);
 	component_unbind_all(dev, drm_dev);
-	arm_iommu_detach_device(dev);
-	arm_iommu_release_mapping(dev->archdata.mapping);
+	if (is_support_iommu)
+		arm_iommu_detach_device(dev);
 	drm_mode_config_cleanup(drm_dev);
 	drm_dev->dev_private = NULL;
 
@@ -488,6 +496,8 @@
 	 * works as expected.
 	 */
 	for (i = 0;; i++) {
+		struct device_node *iommu;
+
 		port = of_parse_phandle(np, "ports", i);
 		if (!port)
 			break;
@@ -497,6 +507,17 @@
 			continue;
 		}
 
+		iommu = of_parse_phandle(port->parent, "iommus", 0);
+		if (!iommu || !of_device_is_available(iommu->parent)) {
+			dev_dbg(dev, "no iommu attached for %s, using non-iommu buffers\n",
+				port->parent->full_name);
+			/*
+			 * if there is a crtc not support iommu, force set all
+			 * crtc use non-iommu buffer.
+			 */
+			is_support_iommu = false;
+		}
+
 		component_match_add(dev, &match, compare_of, port->parent);
 		of_node_put(port);
 	}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 00d17d7..56f43a3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -50,6 +50,14 @@
 	struct mutex lock;
 };
 
+struct rockchip_crtc_state {
+	struct drm_crtc_state base;
+	int output_type;
+	int output_mode;
+};
+#define to_rockchip_crtc_state(s) \
+		container_of(s, struct rockchip_crtc_state, base)
+
 /*
  * Rockchip drm private structure.
  *
@@ -68,8 +76,6 @@
 int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
 				 const struct rockchip_crtc_funcs *crtc_funcs);
 void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc);
-int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type,
-				  int out_mode);
 int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
 				   struct device *dev);
 void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 3b8f652..755cfdb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -123,8 +123,7 @@
 		unsigned int height = mode_cmd->height / (i ? vsub : 1);
 		unsigned int min_size;
 
-		obj = drm_gem_object_lookup(dev, file_priv,
-					    mode_cmd->handles[i]);
+		obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
 		if (!obj) {
 			dev_err(dev->dev, "Failed to lookup GEM object\n");
 			ret = -ENXIO;
@@ -276,7 +275,7 @@
 
 int rockchip_drm_atomic_commit(struct drm_device *dev,
 			       struct drm_atomic_state *state,
-			       bool async)
+			       bool nonblock)
 {
 	struct rockchip_drm_private *private = dev->dev_private;
 	struct rockchip_atomic_commit *commit = &private->commit;
@@ -286,7 +285,7 @@
 	if (ret)
 		return ret;
 
-	/* serialize outstanding asynchronous commits */
+	/* serialize outstanding nonblocking commits */
 	mutex_lock(&commit->lock);
 	flush_work(&commit->work);
 
@@ -295,7 +294,7 @@
 	commit->dev = dev;
 	commit->state = state;
 
-	if (async)
+	if (nonblock)
 		schedule_work(&commit->work);
 	else
 		rockchip_atomic_commit_complete(commit);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 18e0733..9c2d8a8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -198,7 +198,7 @@
 	struct drm_gem_object *obj;
 	int ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	obj = drm_gem_object_lookup(file_priv, handle);
 	if (!obj) {
 		DRM_ERROR("failed to lookup gem object.\n");
 		return -EINVAL;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index a619f12..1c4d5b5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -310,7 +310,7 @@
 	uint16_t vsu_mode;
 	uint16_t lb_mode;
 	uint32_t val;
-	int vskiplines;
+	int vskiplines = 0;
 
 	if (dst_w > 3840) {
 		DRM_ERROR("Maximum destination width (3840) exceeded\n");
@@ -560,6 +560,22 @@
 	drm_plane_cleanup(plane);
 }
 
+static int vop_plane_prepare_fb(struct drm_plane *plane,
+				const struct drm_plane_state *new_state)
+{
+	if (plane->state->fb)
+		drm_framebuffer_reference(plane->state->fb);
+
+	return 0;
+}
+
+static void vop_plane_cleanup_fb(struct drm_plane *plane,
+				 const struct drm_plane_state *old_state)
+{
+	if (old_state->fb)
+		drm_framebuffer_unreference(old_state->fb);
+}
+
 static int vop_plane_atomic_check(struct drm_plane *plane,
 			   struct drm_plane_state *state)
 {
@@ -756,6 +772,8 @@
 }
 
 static const struct drm_plane_helper_funcs plane_helper_funcs = {
+	.prepare_fb = vop_plane_prepare_fb,
+	.cleanup_fb = vop_plane_cleanup_fb,
 	.atomic_check = vop_plane_atomic_check,
 	.atomic_update = vop_plane_atomic_update,
 	.atomic_disable = vop_plane_atomic_disable,
@@ -804,7 +822,7 @@
 {
 	struct vop_plane_state *vop_state = to_vop_plane_state(state);
 
-	__drm_atomic_helper_plane_destroy_state(plane, state);
+	__drm_atomic_helper_plane_destroy_state(state);
 
 	kfree(vop_state);
 }
@@ -818,38 +836,6 @@
 	.atomic_destroy_state = vop_atomic_plane_destroy_state,
 };
 
-int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc,
-				  int connector_type,
-				  int out_mode)
-{
-	struct vop *vop = to_vop(crtc);
-
-	if (WARN_ON(!vop->is_enabled))
-		return -EINVAL;
-
-	switch (connector_type) {
-	case DRM_MODE_CONNECTOR_LVDS:
-		VOP_CTRL_SET(vop, rgb_en, 1);
-		break;
-	case DRM_MODE_CONNECTOR_eDP:
-		VOP_CTRL_SET(vop, edp_en, 1);
-		break;
-	case DRM_MODE_CONNECTOR_HDMIA:
-		VOP_CTRL_SET(vop, hdmi_en, 1);
-		break;
-	case DRM_MODE_CONNECTOR_DSI:
-		VOP_CTRL_SET(vop, mipi_en, 1);
-		break;
-	default:
-		DRM_ERROR("unsupport connector_type[%d]\n", connector_type);
-		return -EINVAL;
-	};
-	VOP_CTRL_SET(vop, out_mode, out_mode);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(rockchip_drm_crtc_mode_config);
-
 static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
 {
 	struct vop *vop = to_vop(crtc);
@@ -931,6 +917,7 @@
 static void vop_crtc_enable(struct drm_crtc *crtc)
 {
 	struct vop *vop = to_vop(crtc);
+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
 	struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
 	u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
 	u16 hdisplay = adjusted_mode->hdisplay;
@@ -985,6 +972,23 @@
 	val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
 	val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
 	VOP_CTRL_SET(vop, pin_pol, val);
+	switch (s->output_type) {
+	case DRM_MODE_CONNECTOR_LVDS:
+		VOP_CTRL_SET(vop, rgb_en, 1);
+		break;
+	case DRM_MODE_CONNECTOR_eDP:
+		VOP_CTRL_SET(vop, edp_en, 1);
+		break;
+	case DRM_MODE_CONNECTOR_HDMIA:
+		VOP_CTRL_SET(vop, hdmi_en, 1);
+		break;
+	case DRM_MODE_CONNECTOR_DSI:
+		VOP_CTRL_SET(vop, mipi_en, 1);
+		break;
+	default:
+		DRM_ERROR("unsupport connector_type[%d]\n", s->output_type);
+	}
+	VOP_CTRL_SET(vop, out_mode, s->output_mode);
 
 	VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
 	val = hact_st << 16;
@@ -1044,13 +1048,34 @@
 	drm_crtc_cleanup(crtc);
 }
 
+static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+	struct rockchip_crtc_state *rockchip_state;
+
+	rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
+	if (!rockchip_state)
+		return NULL;
+
+	__drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base);
+	return &rockchip_state->base;
+}
+
+static void vop_crtc_destroy_state(struct drm_crtc *crtc,
+				   struct drm_crtc_state *state)
+{
+	struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
+
+	__drm_atomic_helper_crtc_destroy_state(&s->base);
+	kfree(s);
+}
+
 static const struct drm_crtc_funcs vop_crtc_funcs = {
 	.set_config = drm_atomic_helper_set_config,
 	.page_flip = drm_atomic_helper_page_flip,
 	.destroy = vop_crtc_destroy,
 	.reset = drm_atomic_helper_crtc_reset,
-	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+	.atomic_duplicate_state = vop_crtc_duplicate_state,
+	.atomic_destroy_state = vop_crtc_destroy_state,
 };
 
 static bool vop_win_pending_is_complete(struct vop_win *vop_win)
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 88643ab..1e154fc 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -440,7 +440,7 @@
 	event = scrtc->event;
 	scrtc->event = NULL;
 	if (event) {
-		drm_send_vblank_event(dev, 0, event);
+		drm_crtc_send_vblank_event(&scrtc->crtc, event);
 		drm_vblank_put(dev, 0);
 	}
 	spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 505620c..e04deed 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -51,15 +51,6 @@
 	mixer->status = STI_MIXER_DISABLING;
 }
 
-static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
-				const struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
-{
-	/* accept the provided drm_display_mode, do not fix it up */
-	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
-	return true;
-}
-
 static int
 sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
 {
@@ -230,7 +221,6 @@
 static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
 	.enable = sti_crtc_enable,
 	.disable = sti_crtc_disabling,
-	.mode_fixup = sti_crtc_mode_fixup,
 	.mode_set = drm_helper_crtc_mode_set,
 	.mode_set_nofb = sti_crtc_mode_set_nofb,
 	.mode_set_base = drm_helper_crtc_mode_set_base,
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 3abb400..4e99029 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -6,6 +6,8 @@
  * License terms:  GNU General Public License (GPL), version 2
  */
 
+#include <linux/seq_file.h>
+
 #include <drm/drm_atomic.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 6bd6aba..872495e 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -202,7 +202,7 @@
 }
 
 static int sti_atomic_commit(struct drm_device *drm,
-			     struct drm_atomic_state *state, bool async)
+			     struct drm_atomic_state *state, bool nonblock)
 {
 	struct sti_private *private = drm->dev_private;
 	int err;
@@ -211,7 +211,7 @@
 	if (err)
 		return err;
 
-	/* serialize outstanding asynchronous commits */
+	/* serialize outstanding nonblocking commits */
 	mutex_lock(&private->commit.lock);
 	flush_work(&private->commit.work);
 
@@ -223,7 +223,7 @@
 
 	drm_atomic_helper_swap_state(drm, state);
 
-	if (async)
+	if (nonblock)
 		sti_atomic_schedule(private, state);
 	else
 		sti_atomic_complete(private, state);
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index ff3d3e7..ff33c38 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -5,6 +5,7 @@
  *          for STMicroelectronics.
  * License terms:  GNU General Public License (GPL), version 2
  */
+#include <linux/seq_file.h>
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_fb_cma_helper.h>
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index ec0d017..f7d3464 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -8,6 +8,7 @@
 #include <linux/component.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/seq_file.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index e05b0dc..1edec29 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -7,6 +7,7 @@
 #include <linux/component.h>
 #include <linux/firmware.h>
 #include <linux/reset.h>
+#include <linux/seq_file.h>
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_fb_cma_helper.h>
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index e7425c3..aed7801 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -5,6 +5,7 @@
  *          for STMicroelectronics.
  * License terms:  GNU General Public License (GPL), version 2
  */
+#include <linux/seq_file.h>
 
 #include "sti_compositor.h"
 #include "sti_mixer.h"
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 2c99016..f983db5 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -12,6 +12,7 @@
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/reset.h>
+#include <linux/seq_file.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 5a2c5dc..523ed19 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -3,6 +3,7 @@
  * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
  * License terms:  GNU General Public License (GPL), version 2
  */
+#include <linux/seq_file.h>
 
 #include <drm/drmP.h>
 
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 32c7986..6bf4ce4 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -437,7 +437,7 @@
 			return -EPROBE_DEFER;
 	} else {
 		vtg->irq = platform_get_irq(pdev, 0);
-		if (IS_ERR_VALUE(vtg->irq)) {
+		if (vtg->irq < 0) {
 			DRM_ERROR("Failed to get VTG interrupt\n");
 			return vtg->irq;
 		}
@@ -447,7 +447,7 @@
 		ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq,
 				vtg_irq_thread, IRQF_ONESHOT,
 				dev_name(dev), vtg);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			DRM_ERROR("Failed to register VTG interrupt\n");
 			return ret;
 		}
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
new file mode 100644
index 0000000..99510e6
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -0,0 +1,14 @@
+config DRM_SUN4I
+	tristate "DRM Support for Allwinner A10 Display Engine"
+	depends on DRM && ARM
+	depends on ARCH_SUNXI || COMPILE_TEST
+	select DRM_GEM_CMA_HELPER
+	select DRM_KMS_HELPER
+	select DRM_KMS_CMA_HELPER
+	select DRM_PANEL
+	select REGMAP_MMIO
+	select VIDEOMODE_HELPERS
+	help
+	  Choose this option if you have an Allwinner SoC with a
+	  Display Engine. If M is selected the module will be called
+	  sun4i-drm.
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
new file mode 100644
index 0000000..58cd551
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -0,0 +1,13 @@
+sun4i-drm-y += sun4i_crtc.o
+sun4i-drm-y += sun4i_drv.o
+sun4i-drm-y += sun4i_framebuffer.o
+sun4i-drm-y += sun4i_layer.o
+
+sun4i-tcon-y += sun4i_tcon.o
+sun4i-tcon-y += sun4i_rgb.o
+sun4i-tcon-y += sun4i_dotclock.o
+
+obj-$(CONFIG_DRM_SUN4I)		+= sun4i-drm.o sun4i-tcon.o
+obj-$(CONFIG_DRM_SUN4I)		+= sun4i_backend.o
+
+obj-$(CONFIG_DRM_SUN4I)		+= sun4i_tv.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
new file mode 100644
index 0000000..f7a15c1
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include <linux/component.h>
+#include <linux/reset.h>
+
+#include "sun4i_backend.h"
+#include "sun4i_drv.h"
+
+static u32 sunxi_rgb2yuv_coef[12] = {
+	0x00000107, 0x00000204, 0x00000064, 0x00000108,
+	0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
+	0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
+};
+
+void sun4i_backend_apply_color_correction(struct sun4i_backend *backend)
+{
+	int i;
+
+	DRM_DEBUG_DRIVER("Applying RGB to YUV color correction\n");
+
+	/* Set color correction */
+	regmap_write(backend->regs, SUN4I_BACKEND_OCCTL_REG,
+		     SUN4I_BACKEND_OCCTL_ENABLE);
+
+	for (i = 0; i < 12; i++)
+		regmap_write(backend->regs, SUN4I_BACKEND_OCRCOEF_REG(i),
+			     sunxi_rgb2yuv_coef[i]);
+}
+EXPORT_SYMBOL(sun4i_backend_apply_color_correction);
+
+void sun4i_backend_disable_color_correction(struct sun4i_backend *backend)
+{
+	DRM_DEBUG_DRIVER("Disabling color correction\n");
+
+	/* Disable color correction */
+	regmap_update_bits(backend->regs, SUN4I_BACKEND_OCCTL_REG,
+			   SUN4I_BACKEND_OCCTL_ENABLE, 0);
+}
+EXPORT_SYMBOL(sun4i_backend_disable_color_correction);
+
+void sun4i_backend_commit(struct sun4i_backend *backend)
+{
+	DRM_DEBUG_DRIVER("Committing changes\n");
+
+	regmap_write(backend->regs, SUN4I_BACKEND_REGBUFFCTL_REG,
+		     SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS |
+		     SUN4I_BACKEND_REGBUFFCTL_LOADCTL);
+}
+EXPORT_SYMBOL(sun4i_backend_commit);
+
+void sun4i_backend_layer_enable(struct sun4i_backend *backend,
+				int layer, bool enable)
+{
+	u32 val;
+
+	DRM_DEBUG_DRIVER("Enabling layer %d\n", layer);
+
+	if (enable)
+		val = SUN4I_BACKEND_MODCTL_LAY_EN(layer);
+	else
+		val = 0;
+
+	regmap_update_bits(backend->regs, SUN4I_BACKEND_MODCTL_REG,
+			   SUN4I_BACKEND_MODCTL_LAY_EN(layer), val);
+}
+EXPORT_SYMBOL(sun4i_backend_layer_enable);
+
+static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
+{
+	switch (format) {
+	case DRM_FORMAT_ARGB8888:
+		*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
+		break;
+
+	case DRM_FORMAT_XRGB8888:
+		*mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
+		break;
+
+	case DRM_FORMAT_RGB888:
+		*mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
+				     int layer, struct drm_plane *plane)
+{
+	struct drm_plane_state *state = plane->state;
+	struct drm_framebuffer *fb = state->fb;
+
+	DRM_DEBUG_DRIVER("Updating layer %d\n", layer);
+
+	if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+		DRM_DEBUG_DRIVER("Primary layer, updating global size W: %u H: %u\n",
+				 state->crtc_w, state->crtc_h);
+		regmap_write(backend->regs, SUN4I_BACKEND_DISSIZE_REG,
+			     SUN4I_BACKEND_DISSIZE(state->crtc_w,
+						   state->crtc_h));
+	}
+
+	/* Set the line width */
+	DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
+	regmap_write(backend->regs, SUN4I_BACKEND_LAYLINEWIDTH_REG(layer),
+		     fb->pitches[0] * 8);
+
+	/* Set height and width */
+	DRM_DEBUG_DRIVER("Layer size W: %u H: %u\n",
+			 state->crtc_w, state->crtc_h);
+	regmap_write(backend->regs, SUN4I_BACKEND_LAYSIZE_REG(layer),
+		     SUN4I_BACKEND_LAYSIZE(state->crtc_w,
+					   state->crtc_h));
+
+	/* Set base coordinates */
+	DRM_DEBUG_DRIVER("Layer coordinates X: %d Y: %d\n",
+			 state->crtc_x, state->crtc_y);
+	regmap_write(backend->regs, SUN4I_BACKEND_LAYCOOR_REG(layer),
+		     SUN4I_BACKEND_LAYCOOR(state->crtc_x,
+					   state->crtc_y));
+
+	return 0;
+}
+EXPORT_SYMBOL(sun4i_backend_update_layer_coord);
+
+int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
+				       int layer, struct drm_plane *plane)
+{
+	struct drm_plane_state *state = plane->state;
+	struct drm_framebuffer *fb = state->fb;
+	bool interlaced = false;
+	u32 val;
+	int ret;
+
+	if (plane->state->crtc)
+		interlaced = plane->state->crtc->state->adjusted_mode.flags
+			& DRM_MODE_FLAG_INTERLACE;
+
+	regmap_update_bits(backend->regs, SUN4I_BACKEND_MODCTL_REG,
+			   SUN4I_BACKEND_MODCTL_ITLMOD_EN,
+			   interlaced ? SUN4I_BACKEND_MODCTL_ITLMOD_EN : 0);
+
+	DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
+			 interlaced ? "on" : "off");
+
+	ret = sun4i_backend_drm_format_to_layer(fb->pixel_format, &val);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Invalid format\n");
+		return val;
+	}
+
+	regmap_update_bits(backend->regs, SUN4I_BACKEND_ATTCTL_REG1(layer),
+			   SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
+
+	return 0;
+}
+EXPORT_SYMBOL(sun4i_backend_update_layer_formats);
+
+int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
+				      int layer, struct drm_plane *plane)
+{
+	struct drm_plane_state *state = plane->state;
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_gem_cma_object *gem;
+	u32 lo_paddr, hi_paddr;
+	dma_addr_t paddr;
+	int bpp;
+
+	/* Get the physical address of the buffer in memory */
+	gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+	DRM_DEBUG_DRIVER("Using GEM @ 0x%x\n", gem->paddr);
+
+	/* Compute the start of the displayed memory */
+	bpp = drm_format_plane_cpp(fb->pixel_format, 0);
+	paddr = gem->paddr + fb->offsets[0];
+	paddr += (state->src_x >> 16) * bpp;
+	paddr += (state->src_y >> 16) * fb->pitches[0];
+
+	DRM_DEBUG_DRIVER("Setting buffer address to 0x%x\n", paddr);
+
+	/* Write the 32 lower bits of the address (in bits) */
+	lo_paddr = paddr << 3;
+	DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
+	regmap_write(backend->regs, SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
+		     lo_paddr);
+
+	/* And the upper bits */
+	hi_paddr = paddr >> 29;
+	DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
+	regmap_update_bits(backend->regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
+			   SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
+			   SUN4I_BACKEND_LAYFB_H4ADD(layer, hi_paddr));
+
+	return 0;
+}
+EXPORT_SYMBOL(sun4i_backend_update_layer_buffer);
+
+static struct regmap_config sun4i_backend_regmap_config = {
+	.reg_bits	= 32,
+	.val_bits	= 32,
+	.reg_stride	= 4,
+	.max_register	= 0x5800,
+};
+
+static int sun4i_backend_bind(struct device *dev, struct device *master,
+			      void *data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct drm_device *drm = data;
+	struct sun4i_drv *drv = drm->dev_private;
+	struct sun4i_backend *backend;
+	struct resource *res;
+	void __iomem *regs;
+	int i, ret;
+
+	backend = devm_kzalloc(dev, sizeof(*backend), GFP_KERNEL);
+	if (!backend)
+		return -ENOMEM;
+	dev_set_drvdata(dev, backend);
+	drv->backend = backend;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(regs)) {
+		dev_err(dev, "Couldn't map the backend registers\n");
+		return PTR_ERR(regs);
+	}
+
+	backend->regs = devm_regmap_init_mmio(dev, regs,
+					      &sun4i_backend_regmap_config);
+	if (IS_ERR(backend->regs)) {
+		dev_err(dev, "Couldn't create the backend0 regmap\n");
+		return PTR_ERR(backend->regs);
+	}
+
+	backend->reset = devm_reset_control_get(dev, NULL);
+	if (IS_ERR(backend->reset)) {
+		dev_err(dev, "Couldn't get our reset line\n");
+		return PTR_ERR(backend->reset);
+	}
+
+	ret = reset_control_deassert(backend->reset);
+	if (ret) {
+		dev_err(dev, "Couldn't deassert our reset line\n");
+		return ret;
+	}
+
+	backend->bus_clk = devm_clk_get(dev, "ahb");
+	if (IS_ERR(backend->bus_clk)) {
+		dev_err(dev, "Couldn't get the backend bus clock\n");
+		ret = PTR_ERR(backend->bus_clk);
+		goto err_assert_reset;
+	}
+	clk_prepare_enable(backend->bus_clk);
+
+	backend->mod_clk = devm_clk_get(dev, "mod");
+	if (IS_ERR(backend->mod_clk)) {
+		dev_err(dev, "Couldn't get the backend module clock\n");
+		ret = PTR_ERR(backend->mod_clk);
+		goto err_disable_bus_clk;
+	}
+	clk_prepare_enable(backend->mod_clk);
+
+	backend->ram_clk = devm_clk_get(dev, "ram");
+	if (IS_ERR(backend->ram_clk)) {
+		dev_err(dev, "Couldn't get the backend RAM clock\n");
+		ret = PTR_ERR(backend->ram_clk);
+		goto err_disable_mod_clk;
+	}
+	clk_prepare_enable(backend->ram_clk);
+
+	/* Reset the registers */
+	for (i = 0x800; i < 0x1000; i += 4)
+		regmap_write(backend->regs, i, 0);
+
+	/* Disable registers autoloading */
+	regmap_write(backend->regs, SUN4I_BACKEND_REGBUFFCTL_REG,
+		     SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS);
+
+	/* Enable the backend */
+	regmap_write(backend->regs, SUN4I_BACKEND_MODCTL_REG,
+		     SUN4I_BACKEND_MODCTL_DEBE_EN |
+		     SUN4I_BACKEND_MODCTL_START_CTL);
+
+	return 0;
+
+err_disable_mod_clk:
+	clk_disable_unprepare(backend->mod_clk);
+err_disable_bus_clk:
+	clk_disable_unprepare(backend->bus_clk);
+err_assert_reset:
+	reset_control_assert(backend->reset);
+	return ret;
+}
+
+static void sun4i_backend_unbind(struct device *dev, struct device *master,
+				 void *data)
+{
+	struct sun4i_backend *backend = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(backend->ram_clk);
+	clk_disable_unprepare(backend->mod_clk);
+	clk_disable_unprepare(backend->bus_clk);
+	reset_control_assert(backend->reset);
+}
+
+static struct component_ops sun4i_backend_ops = {
+	.bind	= sun4i_backend_bind,
+	.unbind	= sun4i_backend_unbind,
+};
+
+static int sun4i_backend_probe(struct platform_device *pdev)
+{
+	return component_add(&pdev->dev, &sun4i_backend_ops);
+}
+
+static int sun4i_backend_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &sun4i_backend_ops);
+
+	return 0;
+}
+
+static const struct of_device_id sun4i_backend_of_table[] = {
+	{ .compatible = "allwinner,sun5i-a13-display-backend" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
+
+static struct platform_driver sun4i_backend_platform_driver = {
+	.probe		= sun4i_backend_probe,
+	.remove		= sun4i_backend_remove,
+	.driver		= {
+		.name		= "sun4i-backend",
+		.of_match_table	= sun4i_backend_of_table,
+	},
+};
+module_platform_driver(sun4i_backend_platform_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A10 Display Backend Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
new file mode 100644
index 0000000..7070bb3
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN4I_BACKEND_H_
+#define _SUN4I_BACKEND_H_
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#define SUN4I_BACKEND_MODCTL_REG		0x800
+#define SUN4I_BACKEND_MODCTL_LINE_SEL			BIT(29)
+#define SUN4I_BACKEND_MODCTL_ITLMOD_EN			BIT(28)
+#define SUN4I_BACKEND_MODCTL_OUT_SEL			GENMASK(22, 20)
+#define SUN4I_BACKEND_MODCTL_OUT_LCD				(0 << 20)
+#define SUN4I_BACKEND_MODCTL_OUT_FE0				(6 << 20)
+#define SUN4I_BACKEND_MODCTL_OUT_FE1				(7 << 20)
+#define SUN4I_BACKEND_MODCTL_HWC_EN			BIT(16)
+#define SUN4I_BACKEND_MODCTL_LAY_EN(l)			BIT(8 + l)
+#define SUN4I_BACKEND_MODCTL_OCSC_EN			BIT(5)
+#define SUN4I_BACKEND_MODCTL_DFLK_EN			BIT(4)
+#define SUN4I_BACKEND_MODCTL_DLP_START_CTL		BIT(2)
+#define SUN4I_BACKEND_MODCTL_START_CTL			BIT(1)
+#define SUN4I_BACKEND_MODCTL_DEBE_EN			BIT(0)
+
+#define SUN4I_BACKEND_BACKCOLOR_REG		0x804
+#define SUN4I_BACKEND_BACKCOLOR(r, g, b)		(((r) << 16) | ((g) << 8) | (b))
+
+#define SUN4I_BACKEND_DISSIZE_REG		0x808
+#define SUN4I_BACKEND_DISSIZE(w, h)			(((((h) - 1) & 0xffff) << 16) | \
+							 (((w) - 1) & 0xffff))
+
+#define SUN4I_BACKEND_LAYSIZE_REG(l)		(0x810 + (0x4 * (l)))
+#define SUN4I_BACKEND_LAYSIZE(w, h)			(((((h) - 1) & 0x1fff) << 16) | \
+							 (((w) - 1) & 0x1fff))
+
+#define SUN4I_BACKEND_LAYCOOR_REG(l)		(0x820 + (0x4 * (l)))
+#define SUN4I_BACKEND_LAYCOOR(x, y)			((((u32)(y) & 0xffff) << 16) | \
+							 ((u32)(x) & 0xffff))
+
+#define SUN4I_BACKEND_LAYLINEWIDTH_REG(l)	(0x840 + (0x4 * (l)))
+
+#define SUN4I_BACKEND_LAYFB_L32ADD_REG(l)	(0x850 + (0x4 * (l)))
+
+#define SUN4I_BACKEND_LAYFB_H4ADD_REG		0x860
+#define SUN4I_BACKEND_LAYFB_H4ADD_MSK(l)		GENMASK(3 + ((l) * 8), 0)
+#define SUN4I_BACKEND_LAYFB_H4ADD(l, val)			((val) << ((l) * 8))
+
+#define SUN4I_BACKEND_REGBUFFCTL_REG		0x870
+#define SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS		BIT(1)
+#define SUN4I_BACKEND_REGBUFFCTL_LOADCTL		BIT(0)
+
+#define SUN4I_BACKEND_CKMAX_REG			0x880
+#define SUN4I_BACKEND_CKMIN_REG			0x884
+#define SUN4I_BACKEND_CKCFG_REG			0x888
+#define SUN4I_BACKEND_ATTCTL_REG0(l)		(0x890 + (0x4 * (l)))
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK	BIT(15)
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(x)		((x) << 15)
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK	GENMASK(11, 10)
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(x)			((x) << 10)
+
+#define SUN4I_BACKEND_ATTCTL_REG1(l)		(0x8a0 + (0x4 * (l)))
+#define SUN4I_BACKEND_ATTCTL_REG1_LAY_HSCAFCT		GENMASK(15, 14)
+#define SUN4I_BACKEND_ATTCTL_REG1_LAY_WSCAFCT		GENMASK(13, 12)
+#define SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT		GENMASK(11, 8)
+#define SUN4I_BACKEND_LAY_FBFMT_1BPP				(0 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_2BPP				(1 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_4BPP				(2 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_8BPP				(3 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_RGB655				(4 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_RGB565				(5 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_RGB556				(6 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_ARGB1555			(7 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_RGBA5551			(8 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_XRGB8888			(9 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_ARGB8888			(10 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_RGB888				(11 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_ARGB4444			(12 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_RGBA4444			(13 << 8)
+
+#define SUN4I_BACKEND_DLCDPCTL_REG		0x8b0
+#define SUN4I_BACKEND_DLCDPFRMBUF_ADDRCTL_REG	0x8b4
+#define SUN4I_BACKEND_DLCDPCOOR_REG0		0x8b8
+#define SUN4I_BACKEND_DLCDPCOOR_REG1		0x8bc
+
+#define SUN4I_BACKEND_INT_EN_REG		0x8c0
+#define SUN4I_BACKEND_INT_FLAG_REG		0x8c4
+#define SUN4I_BACKEND_REG_LOAD_FINISHED			BIT(1)
+
+#define SUN4I_BACKEND_HWCCTL_REG		0x8d8
+#define SUN4I_BACKEND_HWCFBCTL_REG		0x8e0
+#define SUN4I_BACKEND_WBCTL_REG			0x8f0
+#define SUN4I_BACKEND_WBADD_REG			0x8f4
+#define SUN4I_BACKEND_WBLINEWIDTH_REG		0x8f8
+#define SUN4I_BACKEND_SPREN_REG			0x900
+#define SUN4I_BACKEND_SPRFMTCTL_REG		0x908
+#define SUN4I_BACKEND_SPRALPHACTL_REG		0x90c
+#define SUN4I_BACKEND_IYUVCTL_REG		0x920
+#define SUN4I_BACKEND_IYUVADD_REG(c)		(0x930 + (0x4 * (c)))
+#define SUN4I_BACKEND_IYUVLINEWITDTH_REG(c)	(0x940 + (0x4 * (c)))
+#define SUN4I_BACKEND_YGCOEF_REG(c)		(0x950 + (0x4 * (c)))
+#define SUN4I_BACKEND_YGCONS_REG		0x95c
+#define SUN4I_BACKEND_URCOEF_REG(c)		(0x960 + (0x4 * (c)))
+#define SUN4I_BACKEND_URCONS_REG		0x96c
+#define SUN4I_BACKEND_VBCOEF_REG(c)		(0x970 + (0x4 * (c)))
+#define SUN4I_BACKEND_VBCONS_REG		0x97c
+#define SUN4I_BACKEND_KSCTL_REG			0x980
+#define SUN4I_BACKEND_KSBKCOLOR_REG		0x984
+#define SUN4I_BACKEND_KSFSTLINEWIDTH_REG	0x988
+#define SUN4I_BACKEND_KSVSCAFCT_REG		0x98c
+#define SUN4I_BACKEND_KSHSCACOEF_REG(x)		(0x9a0 + (0x4 * (x)))
+#define SUN4I_BACKEND_OCCTL_REG			0x9c0
+#define SUN4I_BACKEND_OCCTL_ENABLE			BIT(0)
+
+#define SUN4I_BACKEND_OCRCOEF_REG(x)		(0x9d0 + (0x4 * (x)))
+#define SUN4I_BACKEND_OCRCONS_REG		0x9dc
+#define SUN4I_BACKEND_OCGCOEF_REG(x)		(0x9e0 + (0x4 * (x)))
+#define SUN4I_BACKEND_OCGCONS_REG		0x9ec
+#define SUN4I_BACKEND_OCBCOEF_REG(x)		(0x9f0 + (0x4 * (x)))
+#define SUN4I_BACKEND_OCBCONS_REG		0x9fc
+#define SUN4I_BACKEND_SPRCOORCTL_REG(s)		(0xa00 + (0x4 * (s)))
+#define SUN4I_BACKEND_SPRATTCTL_REG(s)		(0xb00 + (0x4 * (s)))
+#define SUN4I_BACKEND_SPRADD_REG(s)		(0xc00 + (0x4 * (s)))
+#define SUN4I_BACKEND_SPRLINEWIDTH_REG(s)	(0xd00 + (0x4 * (s)))
+
+#define SUN4I_BACKEND_SPRPALTAB_OFF		0x4000
+#define SUN4I_BACKEND_GAMMATAB_OFF		0x4400
+#define SUN4I_BACKEND_HWCPATTERN_OFF		0x4800
+#define SUN4I_BACKEND_HWCCOLORTAB_OFF		0x4c00
+#define SUN4I_BACKEND_PIPE_OFF(p)		(0x5000 + (0x400 * (p)))
+
+struct sun4i_backend {
+	struct regmap		*regs;
+
+	struct reset_control	*reset;
+
+	struct clk		*bus_clk;
+	struct clk		*mod_clk;
+	struct clk		*ram_clk;
+};
+
+void sun4i_backend_apply_color_correction(struct sun4i_backend *backend);
+void sun4i_backend_disable_color_correction(struct sun4i_backend *backend);
+
+void sun4i_backend_commit(struct sun4i_backend *backend);
+
+void sun4i_backend_layer_enable(struct sun4i_backend *backend,
+				int layer, bool enable);
+int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
+				     int layer, struct drm_plane *plane);
+int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
+				       int layer, struct drm_plane *plane);
+int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
+				      int layer, struct drm_plane *plane);
+
+#endif /* _SUN4I_BACKEND_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
new file mode 100644
index 0000000..4182a21
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modes.h>
+
+#include <linux/clk-provider.h>
+#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#include <video/videomode.h>
+
+#include "sun4i_backend.h"
+#include "sun4i_crtc.h"
+#include "sun4i_drv.h"
+#include "sun4i_tcon.h"
+
+static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_state)
+{
+	struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	unsigned long flags;
+
+	if (crtc->state->event) {
+		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+		spin_lock_irqsave(&dev->event_lock, flags);
+		scrtc->event = crtc->state->event;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		crtc->state->event = NULL;
+	 }
+}
+
+static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_state)
+{
+	struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
+	struct sun4i_drv *drv = scrtc->drv;
+
+	DRM_DEBUG_DRIVER("Committing plane changes\n");
+
+	sun4i_backend_commit(drv->backend);
+}
+
+static void sun4i_crtc_disable(struct drm_crtc *crtc)
+{
+	struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
+	struct sun4i_drv *drv = scrtc->drv;
+
+	DRM_DEBUG_DRIVER("Disabling the CRTC\n");
+
+	sun4i_tcon_disable(drv->tcon);
+}
+
+static void sun4i_crtc_enable(struct drm_crtc *crtc)
+{
+	struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
+	struct sun4i_drv *drv = scrtc->drv;
+
+	DRM_DEBUG_DRIVER("Enabling the CRTC\n");
+
+	sun4i_tcon_enable(drv->tcon);
+}
+
+static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
+	.atomic_begin	= sun4i_crtc_atomic_begin,
+	.atomic_flush	= sun4i_crtc_atomic_flush,
+	.disable	= sun4i_crtc_disable,
+	.enable		= sun4i_crtc_enable,
+};
+
+static const struct drm_crtc_funcs sun4i_crtc_funcs = {
+	.atomic_destroy_state	= drm_atomic_helper_crtc_destroy_state,
+	.atomic_duplicate_state	= drm_atomic_helper_crtc_duplicate_state,
+	.destroy		= drm_crtc_cleanup,
+	.page_flip		= drm_atomic_helper_page_flip,
+	.reset			= drm_atomic_helper_crtc_reset,
+	.set_config		= drm_atomic_helper_set_config,
+};
+
+struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm)
+{
+	struct sun4i_drv *drv = drm->dev_private;
+	struct sun4i_crtc *scrtc;
+	int ret;
+
+	scrtc = devm_kzalloc(drm->dev, sizeof(*scrtc), GFP_KERNEL);
+	if (!scrtc)
+		return NULL;
+	scrtc->drv = drv;
+
+	ret = drm_crtc_init_with_planes(drm, &scrtc->crtc,
+					drv->primary,
+					NULL,
+					&sun4i_crtc_funcs,
+					NULL);
+	if (ret) {
+		dev_err(drm->dev, "Couldn't init DRM CRTC\n");
+		return NULL;
+	}
+
+	drm_crtc_helper_add(&scrtc->crtc, &sun4i_crtc_helper_funcs);
+
+	return scrtc;
+}
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.h b/drivers/gpu/drm/sun4i/sun4i_crtc.h
new file mode 100644
index 0000000..dec8ce4
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN4I_CRTC_H_
+#define _SUN4I_CRTC_H_
+
+struct sun4i_crtc {
+	struct drm_crtc			crtc;
+	struct drm_pending_vblank_event	*event;
+
+	struct sun4i_drv		*drv;
+};
+
+static inline struct sun4i_crtc *drm_crtc_to_sun4i_crtc(struct drm_crtc *crtc)
+{
+	return container_of(crtc, struct sun4i_crtc, crtc);
+}
+
+struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm);
+
+#endif /* _SUN4I_CRTC_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
new file mode 100644
index 0000000..3ff668c
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2016 Free Electrons
+ * Copyright (C) 2016 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include "sun4i_tcon.h"
+
+struct sun4i_dclk {
+	struct clk_hw	hw;
+	struct regmap	*regmap;
+};
+
+static inline struct sun4i_dclk *hw_to_dclk(struct clk_hw *hw)
+{
+	return container_of(hw, struct sun4i_dclk, hw);
+}
+
+static void sun4i_dclk_disable(struct clk_hw *hw)
+{
+	struct sun4i_dclk *dclk = hw_to_dclk(hw);
+
+	regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
+			   BIT(SUN4I_TCON0_DCLK_GATE_BIT), 0);
+}
+
+static int sun4i_dclk_enable(struct clk_hw *hw)
+{
+	struct sun4i_dclk *dclk = hw_to_dclk(hw);
+
+	return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
+				  BIT(SUN4I_TCON0_DCLK_GATE_BIT),
+				  BIT(SUN4I_TCON0_DCLK_GATE_BIT));
+}
+
+static int sun4i_dclk_is_enabled(struct clk_hw *hw)
+{
+	struct sun4i_dclk *dclk = hw_to_dclk(hw);
+	u32 val;
+
+	regmap_read(dclk->regmap, SUN4I_TCON0_DCLK_REG, &val);
+
+	return val & BIT(SUN4I_TCON0_DCLK_GATE_BIT);
+}
+
+static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw,
+					    unsigned long parent_rate)
+{
+	struct sun4i_dclk *dclk = hw_to_dclk(hw);
+	u32 val;
+
+	regmap_read(dclk->regmap, SUN4I_TCON0_DCLK_REG, &val);
+
+	val >>= SUN4I_TCON0_DCLK_DIV_SHIFT;
+	val &= SUN4I_TCON0_DCLK_DIV_WIDTH;
+
+	if (!val)
+		val = 1;
+
+	return parent_rate / val;
+}
+
+static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
+				  unsigned long *parent_rate)
+{
+	return *parent_rate / DIV_ROUND_CLOSEST(*parent_rate, rate);
+}
+
+static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
+			       unsigned long parent_rate)
+{
+	struct sun4i_dclk *dclk = hw_to_dclk(hw);
+	int div = DIV_ROUND_CLOSEST(parent_rate, rate);
+
+	return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
+				  GENMASK(6, 0), div);
+}
+
+static int sun4i_dclk_get_phase(struct clk_hw *hw)
+{
+	struct sun4i_dclk *dclk = hw_to_dclk(hw);
+	u32 val;
+
+	regmap_read(dclk->regmap, SUN4I_TCON0_IO_POL_REG, &val);
+
+	val >>= 28;
+	val &= 3;
+
+	return val * 120;
+}
+
+static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees)
+{
+	struct sun4i_dclk *dclk = hw_to_dclk(hw);
+
+	regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG,
+			   GENMASK(29, 28),
+			   degrees / 120);
+
+	return 0;
+}
+
+static const struct clk_ops sun4i_dclk_ops = {
+	.disable	= sun4i_dclk_disable,
+	.enable		= sun4i_dclk_enable,
+	.is_enabled	= sun4i_dclk_is_enabled,
+
+	.recalc_rate	= sun4i_dclk_recalc_rate,
+	.round_rate	= sun4i_dclk_round_rate,
+	.set_rate	= sun4i_dclk_set_rate,
+
+	.get_phase	= sun4i_dclk_get_phase,
+	.set_phase	= sun4i_dclk_set_phase,
+};
+
+int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon)
+{
+	const char *clk_name, *parent_name;
+	struct clk_init_data init;
+	struct sun4i_dclk *dclk;
+
+	parent_name = __clk_get_name(tcon->sclk0);
+	of_property_read_string_index(dev->of_node, "clock-output-names", 0,
+				      &clk_name);
+
+	dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL);
+	if (!dclk)
+		return -ENOMEM;
+
+	init.name = clk_name;
+	init.ops = &sun4i_dclk_ops;
+	init.parent_names = &parent_name;
+	init.num_parents = 1;
+
+	dclk->regmap = tcon->regs;
+	dclk->hw.init = &init;
+
+	tcon->dclk = clk_register(dev, &dclk->hw);
+	if (IS_ERR(tcon->dclk))
+		return PTR_ERR(tcon->dclk);
+
+	return 0;
+}
+EXPORT_SYMBOL(sun4i_dclk_create);
+
+int sun4i_dclk_free(struct sun4i_tcon *tcon)
+{
+	clk_unregister(tcon->dclk);
+	return 0;
+}
+EXPORT_SYMBOL(sun4i_dclk_free);
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.h b/drivers/gpu/drm/sun4i/sun4i_dotclock.h
new file mode 100644
index 0000000..d5e25fa
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN4I_DOTCLOCK_H_
+#define _SUN4I_DOTCLOCK_H_
+
+struct sun4i_tcon;
+
+int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon);
+int sun4i_dclk_free(struct sun4i_tcon *tcon);
+
+#endif /* _SUN4I_DOTCLOCK_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
new file mode 100644
index 0000000..76e922b
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -0,0 +1,358 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/component.h>
+#include <linux/of_graph.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "sun4i_crtc.h"
+#include "sun4i_drv.h"
+#include "sun4i_framebuffer.h"
+#include "sun4i_layer.h"
+#include "sun4i_tcon.h"
+
+static int sun4i_drv_connector_plug_all(struct drm_device *drm)
+{
+	struct drm_connector *connector, *failed;
+	int ret;
+
+	mutex_lock(&drm->mode_config.mutex);
+	list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+		ret = drm_connector_register(connector);
+		if (ret) {
+			failed = connector;
+			goto err;
+		}
+	}
+	mutex_unlock(&drm->mode_config.mutex);
+	return 0;
+
+err:
+	list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+		if (failed == connector)
+			break;
+
+		drm_connector_unregister(connector);
+	}
+	mutex_unlock(&drm->mode_config.mutex);
+
+	return ret;
+}
+
+static int sun4i_drv_enable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+	struct sun4i_drv *drv = drm->dev_private;
+	struct sun4i_tcon *tcon = drv->tcon;
+
+	DRM_DEBUG_DRIVER("Enabling VBLANK on pipe %d\n", pipe);
+
+	sun4i_tcon_enable_vblank(tcon, true);
+
+	return 0;
+}
+
+static void sun4i_drv_disable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+	struct sun4i_drv *drv = drm->dev_private;
+	struct sun4i_tcon *tcon = drv->tcon;
+
+	DRM_DEBUG_DRIVER("Disabling VBLANK on pipe %d\n", pipe);
+
+	sun4i_tcon_enable_vblank(tcon, false);
+}
+
+static const struct file_operations sun4i_drv_fops = {
+	.owner		= THIS_MODULE,
+	.open		= drm_open,
+	.release	= drm_release,
+	.unlocked_ioctl	= drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= drm_compat_ioctl,
+#endif
+	.poll		= drm_poll,
+	.read		= drm_read,
+	.llseek		= no_llseek,
+	.mmap		= drm_gem_cma_mmap,
+};
+
+static struct drm_driver sun4i_drv_driver = {
+	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+
+	/* Generic Operations */
+	.fops			= &sun4i_drv_fops,
+	.name			= "sun4i-drm",
+	.desc			= "Allwinner sun4i Display Engine",
+	.date			= "20150629",
+	.major			= 1,
+	.minor			= 0,
+
+	/* GEM Operations */
+	.dumb_create		= drm_gem_cma_dumb_create,
+	.dumb_destroy		= drm_gem_dumb_destroy,
+	.dumb_map_offset	= drm_gem_cma_dumb_map_offset,
+	.gem_free_object	= drm_gem_cma_free_object,
+	.gem_vm_ops		= &drm_gem_cma_vm_ops,
+
+	/* PRIME Operations */
+	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
+	.gem_prime_import	= drm_gem_prime_import,
+	.gem_prime_export	= drm_gem_prime_export,
+	.gem_prime_get_sg_table	= drm_gem_cma_prime_get_sg_table,
+	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+	.gem_prime_vmap		= drm_gem_cma_prime_vmap,
+	.gem_prime_vunmap	= drm_gem_cma_prime_vunmap,
+	.gem_prime_mmap		= drm_gem_cma_prime_mmap,
+
+	/* Frame Buffer Operations */
+
+	/* VBlank Operations */
+	.get_vblank_counter	= drm_vblank_count,
+	.enable_vblank		= sun4i_drv_enable_vblank,
+	.disable_vblank		= sun4i_drv_disable_vblank,
+};
+
+static int sun4i_drv_bind(struct device *dev)
+{
+	struct drm_device *drm;
+	struct sun4i_drv *drv;
+	int ret;
+
+	drm = drm_dev_alloc(&sun4i_drv_driver, dev);
+	if (!drm)
+		return -ENOMEM;
+
+	ret = drm_dev_set_unique(drm, dev_name(drm->dev));
+	if (ret)
+		goto free_drm;
+
+	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv) {
+		ret = -ENOMEM;
+		goto free_drm;
+	}
+	drm->dev_private = drv;
+
+	drm_vblank_init(drm, 1);
+	drm_mode_config_init(drm);
+
+	ret = component_bind_all(drm->dev, drm);
+	if (ret) {
+		dev_err(drm->dev, "Couldn't bind all pipelines components\n");
+		goto free_drm;
+	}
+
+	/* Create our layers */
+	drv->layers = sun4i_layers_init(drm);
+	if (!drv->layers) {
+		dev_err(drm->dev, "Couldn't create the planes\n");
+		ret = -EINVAL;
+		goto free_drm;
+	}
+
+	/* Create our CRTC */
+	drv->crtc = sun4i_crtc_init(drm);
+	if (!drv->crtc) {
+		dev_err(drm->dev, "Couldn't create the CRTC\n");
+		ret = -EINVAL;
+		goto free_drm;
+	}
+	drm->irq_enabled = true;
+
+	/* Create our framebuffer */
+	drv->fbdev = sun4i_framebuffer_init(drm);
+	if (IS_ERR(drv->fbdev)) {
+		dev_err(drm->dev, "Couldn't create our framebuffer\n");
+		ret = PTR_ERR(drv->fbdev);
+		goto free_drm;
+	}
+
+	/* Enable connectors polling */
+	drm_kms_helper_poll_init(drm);
+
+	ret = drm_dev_register(drm, 0);
+	if (ret)
+		goto free_drm;
+
+	ret = sun4i_drv_connector_plug_all(drm);
+	if (ret)
+		goto unregister_drm;
+
+	return 0;
+
+unregister_drm:
+	drm_dev_unregister(drm);
+free_drm:
+	drm_dev_unref(drm);
+	return ret;
+}
+
+static void sun4i_drv_unbind(struct device *dev)
+{
+	struct drm_device *drm = dev_get_drvdata(dev);
+
+	drm_dev_unregister(drm);
+	drm_kms_helper_poll_fini(drm);
+	sun4i_framebuffer_free(drm);
+	drm_vblank_cleanup(drm);
+	drm_dev_unref(drm);
+}
+
+static const struct component_master_ops sun4i_drv_master_ops = {
+	.bind	= sun4i_drv_bind,
+	.unbind	= sun4i_drv_unbind,
+};
+
+static bool sun4i_drv_node_is_frontend(struct device_node *node)
+{
+	return of_device_is_compatible(node,
+				       "allwinner,sun5i-a13-display-frontend");
+}
+
+static bool sun4i_drv_node_is_tcon(struct device_node *node)
+{
+	return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon");
+}
+
+static int compare_of(struct device *dev, void *data)
+{
+	DRM_DEBUG_DRIVER("Comparing of node %s with %s\n",
+			 of_node_full_name(dev->of_node),
+			 of_node_full_name(data));
+
+	return dev->of_node == data;
+}
+
+static int sun4i_drv_add_endpoints(struct device *dev,
+				   struct component_match **match,
+				   struct device_node *node)
+{
+	struct device_node *port, *ep, *remote;
+	int count = 0;
+
+	/*
+	 * We don't support the frontend for now, so we will never
+	 * have a device bound. Just skip over it, but we still want
+	 * the rest our pipeline to be added.
+	 */
+	if (!sun4i_drv_node_is_frontend(node) &&
+	    !of_device_is_available(node))
+		return 0;
+
+	if (!sun4i_drv_node_is_frontend(node)) {
+		/* Add current component */
+		DRM_DEBUG_DRIVER("Adding component %s\n",
+				 of_node_full_name(node));
+		component_match_add(dev, match, compare_of, node);
+		count++;
+	}
+
+	/* Inputs are listed first, then outputs */
+	port = of_graph_get_port_by_id(node, 1);
+	if (!port) {
+		DRM_DEBUG_DRIVER("No output to bind\n");
+		return count;
+	}
+
+	for_each_available_child_of_node(port, ep) {
+		remote = of_graph_get_remote_port_parent(ep);
+		if (!remote) {
+			DRM_DEBUG_DRIVER("Error retrieving the output node\n");
+			of_node_put(remote);
+			continue;
+		}
+
+		/*
+		 * If the node is our TCON, the first port is used for our
+		 * panel, and will not be part of the
+		 * component framework.
+		 */
+		if (sun4i_drv_node_is_tcon(node)) {
+			struct of_endpoint endpoint;
+
+			if (of_graph_parse_endpoint(ep, &endpoint)) {
+				DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
+				continue;
+			}
+
+			if (!endpoint.id) {
+				DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
+				continue;
+			}
+		}
+
+		/* Walk down our tree */
+		count += sun4i_drv_add_endpoints(dev, match, remote);
+
+		of_node_put(remote);
+	}
+
+	return count;
+}
+
+static int sun4i_drv_probe(struct platform_device *pdev)
+{
+	struct component_match *match = NULL;
+	struct device_node *np = pdev->dev.of_node;
+	int i, count = 0;
+
+	for (i = 0;; i++) {
+		struct device_node *pipeline = of_parse_phandle(np,
+								"allwinner,pipelines",
+								i);
+		if (!pipeline)
+			break;
+
+		count += sun4i_drv_add_endpoints(&pdev->dev, &match,
+						pipeline);
+
+		DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n",
+				 count, i);
+	}
+
+	if (count)
+		return component_master_add_with_match(&pdev->dev,
+						       &sun4i_drv_master_ops,
+						       match);
+	else
+		return 0;
+}
+
+static int sun4i_drv_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id sun4i_drv_of_table[] = {
+	{ .compatible = "allwinner,sun5i-a13-display-engine" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
+
+static struct platform_driver sun4i_drv_platform_driver = {
+	.probe		= sun4i_drv_probe,
+	.remove		= sun4i_drv_remove,
+	.driver		= {
+		.name		= "sun4i-drm",
+		.of_match_table	= sun4i_drv_of_table,
+	},
+};
+module_platform_driver(sun4i_drv_platform_driver);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A10 Display Engine DRM/KMS Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.h b/drivers/gpu/drm/sun4i/sun4i_drv.h
new file mode 100644
index 0000000..597353e
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN4I_DRV_H_
+#define _SUN4I_DRV_H_
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+
+struct sun4i_drv {
+	struct sun4i_backend	*backend;
+	struct sun4i_crtc	*crtc;
+	struct sun4i_tcon	*tcon;
+
+	struct drm_plane	*primary;
+	struct drm_fbdev_cma	*fbdev;
+
+	struct sun4i_layer	**layers;
+};
+
+#endif /* _SUN4I_DRV_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
new file mode 100644
index 0000000..a0b30c2
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drmP.h>
+
+#include "sun4i_drv.h"
+
+static void sun4i_de_output_poll_changed(struct drm_device *drm)
+{
+	struct sun4i_drv *drv = drm->dev_private;
+
+	if (drv->fbdev)
+		drm_fbdev_cma_hotplug_event(drv->fbdev);
+}
+
+static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
+	.output_poll_changed	= sun4i_de_output_poll_changed,
+	.atomic_check		= drm_atomic_helper_check,
+	.atomic_commit		= drm_atomic_helper_commit,
+	.fb_create		= drm_fb_cma_create,
+};
+
+struct drm_fbdev_cma *sun4i_framebuffer_init(struct drm_device *drm)
+{
+	drm_mode_config_reset(drm);
+
+	drm->mode_config.max_width = 8192;
+	drm->mode_config.max_height = 8192;
+
+	drm->mode_config.funcs = &sun4i_de_mode_config_funcs;
+
+	return drm_fbdev_cma_init(drm, 32,
+				  drm->mode_config.num_crtc,
+				  drm->mode_config.num_connector);
+}
+
+void sun4i_framebuffer_free(struct drm_device *drm)
+{
+	struct sun4i_drv *drv = drm->dev_private;
+
+	drm_fbdev_cma_fini(drv->fbdev);
+	drm_mode_config_cleanup(drm);
+}
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.h b/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
new file mode 100644
index 0000000..3afd652
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN4I_FRAMEBUFFER_H_
+#define _SUN4I_FRAMEBUFFER_H_
+
+struct drm_fbdev_cma *sun4i_framebuffer_init(struct drm_device *drm);
+void sun4i_framebuffer_free(struct drm_device *drm);
+
+#endif /* _SUN4I_FRAMEBUFFER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
new file mode 100644
index 0000000..068ab80
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drmP.h>
+
+#include "sun4i_backend.h"
+#include "sun4i_drv.h"
+#include "sun4i_layer.h"
+
+#define SUN4I_NUM_LAYERS	2
+
+static int sun4i_backend_layer_atomic_check(struct drm_plane *plane,
+					    struct drm_plane_state *state)
+{
+	return 0;
+}
+
+static void sun4i_backend_layer_atomic_disable(struct drm_plane *plane,
+					       struct drm_plane_state *old_state)
+{
+	struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
+	struct sun4i_drv *drv = layer->drv;
+	struct sun4i_backend *backend = drv->backend;
+
+	sun4i_backend_layer_enable(backend, layer->id, false);
+}
+
+static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
+					      struct drm_plane_state *old_state)
+{
+	struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
+	struct sun4i_drv *drv = layer->drv;
+	struct sun4i_backend *backend = drv->backend;
+
+	sun4i_backend_update_layer_coord(backend, layer->id, plane);
+	sun4i_backend_update_layer_formats(backend, layer->id, plane);
+	sun4i_backend_update_layer_buffer(backend, layer->id, plane);
+	sun4i_backend_layer_enable(backend, layer->id, true);
+}
+
+static struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = {
+	.atomic_check	= sun4i_backend_layer_atomic_check,
+	.atomic_disable	= sun4i_backend_layer_atomic_disable,
+	.atomic_update	= sun4i_backend_layer_atomic_update,
+};
+
+static const struct drm_plane_funcs sun4i_backend_layer_funcs = {
+	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
+	.atomic_duplicate_state	= drm_atomic_helper_plane_duplicate_state,
+	.destroy		= drm_plane_cleanup,
+	.disable_plane		= drm_atomic_helper_disable_plane,
+	.reset			= drm_atomic_helper_plane_reset,
+	.update_plane		= drm_atomic_helper_update_plane,
+};
+
+static const uint32_t sun4i_backend_layer_formats[] = {
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_RGB888,
+};
+
+static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
+						enum drm_plane_type type)
+{
+	struct sun4i_drv *drv = drm->dev_private;
+	struct sun4i_layer *layer;
+	int ret;
+
+	layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
+	if (!layer)
+		return ERR_PTR(-ENOMEM);
+
+	ret = drm_universal_plane_init(drm, &layer->plane, BIT(0),
+				       &sun4i_backend_layer_funcs,
+				       sun4i_backend_layer_formats,
+				       ARRAY_SIZE(sun4i_backend_layer_formats),
+				       type,
+				       NULL);
+	if (ret) {
+		dev_err(drm->dev, "Couldn't initialize layer\n");
+		return ERR_PTR(ret);
+	}
+
+	drm_plane_helper_add(&layer->plane,
+			     &sun4i_backend_layer_helper_funcs);
+	layer->drv = drv;
+
+	if (type == DRM_PLANE_TYPE_PRIMARY)
+		drv->primary = &layer->plane;
+
+	return layer;
+}
+
+struct sun4i_layer **sun4i_layers_init(struct drm_device *drm)
+{
+	struct sun4i_drv *drv = drm->dev_private;
+	struct sun4i_layer **layers;
+	int i;
+
+	layers = devm_kcalloc(drm->dev, SUN4I_NUM_LAYERS, sizeof(**layers),
+			      GFP_KERNEL);
+	if (!layers)
+		return ERR_PTR(-ENOMEM);
+
+	/*
+	 * The hardware is a bit unusual here.
+	 *
+	 * Even though it supports 4 layers, it does the composition
+	 * in two separate steps.
+	 *
+	 * The first one is assigning a layer to one of its two
+	 * pipes. If more that 1 layer is assigned to the same pipe,
+	 * and if pixels overlaps, the pipe will take the pixel from
+	 * the layer with the highest priority.
+	 *
+	 * The second step is the actual alpha blending, that takes
+	 * the two pipes as input, and uses the eventual alpha
+	 * component to do the transparency between the two.
+	 *
+	 * This two steps scenario makes us unable to guarantee a
+	 * robust alpha blending between the 4 layers in all
+	 * situations. So we just expose two layers, one per pipe. On
+	 * SoCs that support it, sprites could fill the need for more
+	 * layers.
+	 */
+	for (i = 0; i < SUN4I_NUM_LAYERS; i++) {
+		enum drm_plane_type type = (i == 0)
+					 ? DRM_PLANE_TYPE_PRIMARY
+					 : DRM_PLANE_TYPE_OVERLAY;
+		struct sun4i_layer *layer = layers[i];
+
+		layer = sun4i_layer_init_one(drm, type);
+		if (IS_ERR(layer)) {
+			dev_err(drm->dev, "Couldn't initialize %s plane\n",
+				i ? "overlay" : "primary");
+			return ERR_CAST(layer);
+		};
+
+		DRM_DEBUG_DRIVER("Assigning %s plane to pipe %d\n",
+				 i ? "overlay" : "primary", i);
+		regmap_update_bits(drv->backend->regs, SUN4I_BACKEND_ATTCTL_REG0(i),
+				   SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK,
+				   SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(i));
+
+		layer->id = i;
+	};
+
+	return layers;
+}
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.h b/drivers/gpu/drm/sun4i/sun4i_layer.h
new file mode 100644
index 0000000..a2f65d7
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN4I_LAYER_H_
+#define _SUN4I_LAYER_H_
+
+struct sun4i_layer {
+	struct drm_plane	plane;
+	struct sun4i_drv	*drv;
+	int			id;
+};
+
+static inline struct sun4i_layer *
+plane_to_sun4i_layer(struct drm_plane *plane)
+{
+	return container_of(plane, struct sun4i_layer, plane);
+}
+
+struct sun4i_layer **sun4i_layers_init(struct drm_device *drm);
+
+#endif /* _SUN4I_LAYER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
new file mode 100644
index 0000000..ab64948
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include "sun4i_drv.h"
+#include "sun4i_tcon.h"
+
+struct sun4i_rgb {
+	struct drm_connector	connector;
+	struct drm_encoder	encoder;
+
+	struct sun4i_drv	*drv;
+};
+
+static inline struct sun4i_rgb *
+drm_connector_to_sun4i_rgb(struct drm_connector *connector)
+{
+	return container_of(connector, struct sun4i_rgb,
+			    connector);
+}
+
+static inline struct sun4i_rgb *
+drm_encoder_to_sun4i_rgb(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct sun4i_rgb,
+			    encoder);
+}
+
+static int sun4i_rgb_get_modes(struct drm_connector *connector)
+{
+	struct sun4i_rgb *rgb =
+		drm_connector_to_sun4i_rgb(connector);
+	struct sun4i_drv *drv = rgb->drv;
+	struct sun4i_tcon *tcon = drv->tcon;
+
+	return drm_panel_get_modes(tcon->panel);
+}
+
+static int sun4i_rgb_mode_valid(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	u32 hsync = mode->hsync_end - mode->hsync_start;
+	u32 vsync = mode->vsync_end - mode->vsync_start;
+
+	DRM_DEBUG_DRIVER("Validating modes...\n");
+
+	if (hsync < 1)
+		return MODE_HSYNC_NARROW;
+
+	if (hsync > 0x3ff)
+		return MODE_HSYNC_WIDE;
+
+	if ((mode->hdisplay < 1) || (mode->htotal < 1))
+		return MODE_H_ILLEGAL;
+
+	if ((mode->hdisplay > 0x7ff) || (mode->htotal > 0xfff))
+		return MODE_BAD_HVALUE;
+
+	DRM_DEBUG_DRIVER("Horizontal parameters OK\n");
+
+	if (vsync < 1)
+		return MODE_VSYNC_NARROW;
+
+	if (vsync > 0x3ff)
+		return MODE_VSYNC_WIDE;
+
+	if ((mode->vdisplay < 1) || (mode->vtotal < 1))
+		return MODE_V_ILLEGAL;
+
+	if ((mode->vdisplay > 0x7ff) || (mode->vtotal > 0xfff))
+		return MODE_BAD_VVALUE;
+
+	DRM_DEBUG_DRIVER("Vertical parameters OK\n");
+
+	return MODE_OK;
+}
+
+static struct drm_encoder *
+sun4i_rgb_best_encoder(struct drm_connector *connector)
+{
+	struct sun4i_rgb *rgb =
+		drm_connector_to_sun4i_rgb(connector);
+
+	return &rgb->encoder;
+}
+
+static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = {
+	.get_modes	= sun4i_rgb_get_modes,
+	.mode_valid	= sun4i_rgb_mode_valid,
+	.best_encoder	= sun4i_rgb_best_encoder,
+};
+
+static enum drm_connector_status
+sun4i_rgb_connector_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static void
+sun4i_rgb_connector_destroy(struct drm_connector *connector)
+{
+	struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
+	struct sun4i_drv *drv = rgb->drv;
+	struct sun4i_tcon *tcon = drv->tcon;
+
+	drm_panel_detach(tcon->panel);
+	drm_connector_cleanup(connector);
+}
+
+static struct drm_connector_funcs sun4i_rgb_con_funcs = {
+	.dpms			= drm_atomic_helper_connector_dpms,
+	.detect			= sun4i_rgb_connector_detect,
+	.fill_modes		= drm_helper_probe_single_connector_modes,
+	.destroy		= sun4i_rgb_connector_destroy,
+	.reset			= drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state	= drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state	= drm_atomic_helper_connector_destroy_state,
+};
+
+static int sun4i_rgb_atomic_check(struct drm_encoder *encoder,
+				  struct drm_crtc_state *crtc_state,
+				  struct drm_connector_state *conn_state)
+{
+	return 0;
+}
+
+static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
+{
+	struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
+	struct sun4i_drv *drv = rgb->drv;
+	struct sun4i_tcon *tcon = drv->tcon;
+
+	DRM_DEBUG_DRIVER("Enabling RGB output\n");
+
+	drm_panel_enable(tcon->panel);
+	sun4i_tcon_channel_enable(tcon, 0);
+}
+
+static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
+{
+	struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
+	struct sun4i_drv *drv = rgb->drv;
+	struct sun4i_tcon *tcon = drv->tcon;
+
+	DRM_DEBUG_DRIVER("Disabling RGB output\n");
+
+	sun4i_tcon_channel_disable(tcon, 0);
+	drm_panel_disable(tcon->panel);
+}
+
+static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
+				       struct drm_display_mode *mode,
+				       struct drm_display_mode *adjusted_mode)
+{
+	struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
+	struct sun4i_drv *drv = rgb->drv;
+	struct sun4i_tcon *tcon = drv->tcon;
+
+	sun4i_tcon0_mode_set(tcon, mode);
+
+	clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
+
+	/* FIXME: This seems to be board specific */
+	clk_set_phase(tcon->dclk, 120);
+}
+
+static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
+	.atomic_check	= sun4i_rgb_atomic_check,
+	.mode_set	= sun4i_rgb_encoder_mode_set,
+	.disable	= sun4i_rgb_encoder_disable,
+	.enable		= sun4i_rgb_encoder_enable,
+};
+
+static void sun4i_rgb_enc_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static struct drm_encoder_funcs sun4i_rgb_enc_funcs = {
+	.destroy	= sun4i_rgb_enc_destroy,
+};
+
+int sun4i_rgb_init(struct drm_device *drm)
+{
+	struct sun4i_drv *drv = drm->dev_private;
+	struct sun4i_tcon *tcon = drv->tcon;
+	struct sun4i_rgb *rgb;
+	int ret;
+
+	/* If we don't have a panel, there's no point in going on */
+	if (!tcon->panel)
+		return -ENODEV;
+
+	rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
+	if (!rgb)
+		return -ENOMEM;
+	rgb->drv = drv;
+
+	drm_encoder_helper_add(&rgb->encoder,
+			       &sun4i_rgb_enc_helper_funcs);
+	ret = drm_encoder_init(drm,
+			       &rgb->encoder,
+			       &sun4i_rgb_enc_funcs,
+			       DRM_MODE_ENCODER_NONE,
+			       NULL);
+	if (ret) {
+		dev_err(drm->dev, "Couldn't initialise the rgb encoder\n");
+		goto err_out;
+	}
+
+	/* The RGB encoder can only work with the TCON channel 0 */
+	rgb->encoder.possible_crtcs = BIT(0);
+
+	drm_connector_helper_add(&rgb->connector,
+				 &sun4i_rgb_con_helper_funcs);
+	ret = drm_connector_init(drm, &rgb->connector,
+				 &sun4i_rgb_con_funcs,
+				 DRM_MODE_CONNECTOR_Unknown);
+	if (ret) {
+		dev_err(drm->dev, "Couldn't initialise the rgb connector\n");
+		goto err_cleanup_connector;
+	}
+
+	drm_mode_connector_attach_encoder(&rgb->connector, &rgb->encoder);
+
+	drm_panel_attach(tcon->panel, &rgb->connector);
+
+	return 0;
+
+err_cleanup_connector:
+	drm_encoder_cleanup(&rgb->encoder);
+err_out:
+	return ret;
+}
+EXPORT_SYMBOL(sun4i_rgb_init);
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.h b/drivers/gpu/drm/sun4i/sun4i_rgb.h
new file mode 100644
index 0000000..7c4da4c
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN4I_RGB_H_
+#define _SUN4I_RGB_H_
+
+int sun4i_rgb_init(struct drm_device *drm);
+
+#endif /* _SUN4I_RGB_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
new file mode 100644
index 0000000..9f19b0e
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -0,0 +1,561 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <linux/component.h>
+#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "sun4i_crtc.h"
+#include "sun4i_dotclock.h"
+#include "sun4i_drv.h"
+#include "sun4i_rgb.h"
+#include "sun4i_tcon.h"
+
+void sun4i_tcon_disable(struct sun4i_tcon *tcon)
+{
+	DRM_DEBUG_DRIVER("Disabling TCON\n");
+
+	/* Disable the TCON */
+	regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
+			   SUN4I_TCON_GCTL_TCON_ENABLE, 0);
+}
+EXPORT_SYMBOL(sun4i_tcon_disable);
+
+void sun4i_tcon_enable(struct sun4i_tcon *tcon)
+{
+	DRM_DEBUG_DRIVER("Enabling TCON\n");
+
+	/* Enable the TCON */
+	regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
+			   SUN4I_TCON_GCTL_TCON_ENABLE,
+			   SUN4I_TCON_GCTL_TCON_ENABLE);
+}
+EXPORT_SYMBOL(sun4i_tcon_enable);
+
+void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel)
+{
+	/* Disable the TCON's channel */
+	if (channel == 0) {
+		regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
+				   SUN4I_TCON0_CTL_TCON_ENABLE, 0);
+		clk_disable_unprepare(tcon->dclk);
+	} else if (channel == 1) {
+		regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+				   SUN4I_TCON1_CTL_TCON_ENABLE, 0);
+		clk_disable_unprepare(tcon->sclk1);
+	}
+}
+EXPORT_SYMBOL(sun4i_tcon_channel_disable);
+
+void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel)
+{
+	/* Enable the TCON's channel */
+	if (channel == 0) {
+		regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
+				   SUN4I_TCON0_CTL_TCON_ENABLE,
+				   SUN4I_TCON0_CTL_TCON_ENABLE);
+		clk_prepare_enable(tcon->dclk);
+	} else if (channel == 1) {
+		regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+				   SUN4I_TCON1_CTL_TCON_ENABLE,
+				   SUN4I_TCON1_CTL_TCON_ENABLE);
+		clk_prepare_enable(tcon->sclk1);
+	}
+}
+EXPORT_SYMBOL(sun4i_tcon_channel_enable);
+
+void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable)
+{
+	u32 mask, val = 0;
+
+	DRM_DEBUG_DRIVER("%sabling VBLANK interrupt\n", enable ? "En" : "Dis");
+
+	mask = SUN4I_TCON_GINT0_VBLANK_ENABLE(0) |
+	       SUN4I_TCON_GINT0_VBLANK_ENABLE(1);
+
+	if (enable)
+		val = mask;
+
+	regmap_update_bits(tcon->regs, SUN4I_TCON_GINT0_REG, mask, val);
+}
+EXPORT_SYMBOL(sun4i_tcon_enable_vblank);
+
+static int sun4i_tcon_get_clk_delay(struct drm_display_mode *mode,
+				    int channel)
+{
+	int delay = mode->vtotal - mode->vdisplay;
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		delay /= 2;
+
+	if (channel == 1)
+		delay -= 2;
+
+	delay = min(delay, 30);
+
+	DRM_DEBUG_DRIVER("TCON %d clock delay %u\n", channel, delay);
+
+	return delay;
+}
+
+void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
+			  struct drm_display_mode *mode)
+{
+	unsigned int bp, hsync, vsync;
+	u8 clk_delay;
+	u32 val = 0;
+
+	/* Adjust clock delay */
+	clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
+	regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
+			   SUN4I_TCON0_CTL_CLK_DELAY_MASK,
+			   SUN4I_TCON0_CTL_CLK_DELAY(clk_delay));
+
+	/* Set the resolution */
+	regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
+		     SUN4I_TCON0_BASIC0_X(mode->crtc_hdisplay) |
+		     SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
+
+	/*
+	 * This is called a backporch in the register documentation,
+	 * but it really is the front porch + hsync
+	 */
+	bp = mode->crtc_htotal - mode->crtc_hsync_start;
+	DRM_DEBUG_DRIVER("Setting horizontal total %d, backporch %d\n",
+			 mode->crtc_htotal, bp);
+
+	/* Set horizontal display timings */
+	regmap_write(tcon->regs, SUN4I_TCON0_BASIC1_REG,
+		     SUN4I_TCON0_BASIC1_H_TOTAL(mode->crtc_htotal) |
+		     SUN4I_TCON0_BASIC1_H_BACKPORCH(bp));
+
+	/*
+	 * This is called a backporch in the register documentation,
+	 * but it really is the front porch + hsync
+	 */
+	bp = mode->crtc_vtotal - mode->crtc_vsync_start;
+	DRM_DEBUG_DRIVER("Setting vertical total %d, backporch %d\n",
+			 mode->crtc_vtotal, bp);
+
+	/* Set vertical display timings */
+	regmap_write(tcon->regs, SUN4I_TCON0_BASIC2_REG,
+		     SUN4I_TCON0_BASIC2_V_TOTAL(mode->crtc_vtotal) |
+		     SUN4I_TCON0_BASIC2_V_BACKPORCH(bp));
+
+	/* Set Hsync and Vsync length */
+	hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
+	vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
+	DRM_DEBUG_DRIVER("Setting HSYNC %d, VSYNC %d\n", hsync, vsync);
+	regmap_write(tcon->regs, SUN4I_TCON0_BASIC3_REG,
+		     SUN4I_TCON0_BASIC3_V_SYNC(vsync) |
+		     SUN4I_TCON0_BASIC3_H_SYNC(hsync));
+
+	/* Setup the polarity of the various signals */
+	if (!(mode->flags & DRM_MODE_FLAG_PHSYNC))
+		val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
+
+	if (!(mode->flags & DRM_MODE_FLAG_PVSYNC))
+		val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+
+	regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
+			   SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
+			   val);
+
+	/* Map output pins to channel 0 */
+	regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
+			   SUN4I_TCON_GCTL_IOMAP_MASK,
+			   SUN4I_TCON_GCTL_IOMAP_TCON0);
+
+	/* Enable the output on the pins */
+	regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0);
+}
+EXPORT_SYMBOL(sun4i_tcon0_mode_set);
+
+void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
+			  struct drm_display_mode *mode)
+{
+	unsigned int bp, hsync, vsync;
+	u8 clk_delay;
+	u32 val;
+
+	/* Adjust clock delay */
+	clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
+	regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+			   SUN4I_TCON1_CTL_CLK_DELAY_MASK,
+			   SUN4I_TCON1_CTL_CLK_DELAY(clk_delay));
+
+	/* Set interlaced mode */
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		val = SUN4I_TCON1_CTL_INTERLACE_ENABLE;
+	else
+		val = 0;
+	regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+			   SUN4I_TCON1_CTL_INTERLACE_ENABLE,
+			   val);
+
+	/* Set the input resolution */
+	regmap_write(tcon->regs, SUN4I_TCON1_BASIC0_REG,
+		     SUN4I_TCON1_BASIC0_X(mode->crtc_hdisplay) |
+		     SUN4I_TCON1_BASIC0_Y(mode->crtc_vdisplay));
+
+	/* Set the upscaling resolution */
+	regmap_write(tcon->regs, SUN4I_TCON1_BASIC1_REG,
+		     SUN4I_TCON1_BASIC1_X(mode->crtc_hdisplay) |
+		     SUN4I_TCON1_BASIC1_Y(mode->crtc_vdisplay));
+
+	/* Set the output resolution */
+	regmap_write(tcon->regs, SUN4I_TCON1_BASIC2_REG,
+		     SUN4I_TCON1_BASIC2_X(mode->crtc_hdisplay) |
+		     SUN4I_TCON1_BASIC2_Y(mode->crtc_vdisplay));
+
+	/* Set horizontal display timings */
+	bp = mode->crtc_htotal - mode->crtc_hsync_end;
+	DRM_DEBUG_DRIVER("Setting horizontal total %d, backporch %d\n",
+			 mode->htotal, bp);
+	regmap_write(tcon->regs, SUN4I_TCON1_BASIC3_REG,
+		     SUN4I_TCON1_BASIC3_H_TOTAL(mode->crtc_htotal) |
+		     SUN4I_TCON1_BASIC3_H_BACKPORCH(bp));
+
+	/* Set vertical display timings */
+	bp = mode->crtc_vtotal - mode->crtc_vsync_end;
+	DRM_DEBUG_DRIVER("Setting vertical total %d, backporch %d\n",
+			 mode->vtotal, bp);
+	regmap_write(tcon->regs, SUN4I_TCON1_BASIC4_REG,
+		     SUN4I_TCON1_BASIC4_V_TOTAL(mode->vtotal) |
+		     SUN4I_TCON1_BASIC4_V_BACKPORCH(bp));
+
+	/* Set Hsync and Vsync length */
+	hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
+	vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
+	DRM_DEBUG_DRIVER("Setting HSYNC %d, VSYNC %d\n", hsync, vsync);
+	regmap_write(tcon->regs, SUN4I_TCON1_BASIC5_REG,
+		     SUN4I_TCON1_BASIC5_V_SYNC(vsync) |
+		     SUN4I_TCON1_BASIC5_H_SYNC(hsync));
+
+	/* Map output pins to channel 1 */
+	regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
+			   SUN4I_TCON_GCTL_IOMAP_MASK,
+			   SUN4I_TCON_GCTL_IOMAP_TCON1);
+
+	/*
+	 * FIXME: Undocumented bits
+	 */
+	if (tcon->has_mux)
+		regmap_write(tcon->regs, SUN4I_TCON_MUX_CTRL_REG, 1);
+}
+EXPORT_SYMBOL(sun4i_tcon1_mode_set);
+
+static void sun4i_tcon_finish_page_flip(struct drm_device *dev,
+					struct sun4i_crtc *scrtc)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (scrtc->event) {
+		drm_crtc_send_vblank_event(&scrtc->crtc, scrtc->event);
+		drm_crtc_vblank_put(&scrtc->crtc);
+		scrtc->event = NULL;
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static irqreturn_t sun4i_tcon_handler(int irq, void *private)
+{
+	struct sun4i_tcon *tcon = private;
+	struct drm_device *drm = tcon->drm;
+	struct sun4i_drv *drv = drm->dev_private;
+	struct sun4i_crtc *scrtc = drv->crtc;
+	unsigned int status;
+
+	regmap_read(tcon->regs, SUN4I_TCON_GINT0_REG, &status);
+
+	if (!(status & (SUN4I_TCON_GINT0_VBLANK_INT(0) |
+			SUN4I_TCON_GINT0_VBLANK_INT(1))))
+		return IRQ_NONE;
+
+	drm_crtc_handle_vblank(&scrtc->crtc);
+	sun4i_tcon_finish_page_flip(drm, scrtc);
+
+	/* Acknowledge the interrupt */
+	regmap_update_bits(tcon->regs, SUN4I_TCON_GINT0_REG,
+			   SUN4I_TCON_GINT0_VBLANK_INT(0) |
+			   SUN4I_TCON_GINT0_VBLANK_INT(1),
+			   0);
+
+	return IRQ_HANDLED;
+}
+
+static int sun4i_tcon_init_clocks(struct device *dev,
+				  struct sun4i_tcon *tcon)
+{
+	tcon->clk = devm_clk_get(dev, "ahb");
+	if (IS_ERR(tcon->clk)) {
+		dev_err(dev, "Couldn't get the TCON bus clock\n");
+		return PTR_ERR(tcon->clk);
+	}
+	clk_prepare_enable(tcon->clk);
+
+	tcon->sclk0 = devm_clk_get(dev, "tcon-ch0");
+	if (IS_ERR(tcon->sclk0)) {
+		dev_err(dev, "Couldn't get the TCON channel 0 clock\n");
+		return PTR_ERR(tcon->sclk0);
+	}
+
+	tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
+	if (IS_ERR(tcon->sclk1)) {
+		dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
+		return PTR_ERR(tcon->sclk1);
+	}
+
+	return sun4i_dclk_create(dev, tcon);
+}
+
+static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
+{
+	sun4i_dclk_free(tcon);
+	clk_disable_unprepare(tcon->clk);
+}
+
+static int sun4i_tcon_init_irq(struct device *dev,
+			       struct sun4i_tcon *tcon)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	int irq, ret;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "Couldn't retrieve the TCON interrupt\n");
+		return irq;
+	}
+
+	ret = devm_request_irq(dev, irq, sun4i_tcon_handler, 0,
+			       dev_name(dev), tcon);
+	if (ret) {
+		dev_err(dev, "Couldn't request the IRQ\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct regmap_config sun4i_tcon_regmap_config = {
+	.reg_bits	= 32,
+	.val_bits	= 32,
+	.reg_stride	= 4,
+	.max_register	= 0x800,
+};
+
+static int sun4i_tcon_init_regmap(struct device *dev,
+				  struct sun4i_tcon *tcon)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct resource *res;
+	void __iomem *regs;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(regs)) {
+		dev_err(dev, "Couldn't map the TCON registers\n");
+		return PTR_ERR(regs);
+	}
+
+	tcon->regs = devm_regmap_init_mmio(dev, regs,
+					   &sun4i_tcon_regmap_config);
+	if (IS_ERR(tcon->regs)) {
+		dev_err(dev, "Couldn't create the TCON regmap\n");
+		return PTR_ERR(tcon->regs);
+	}
+
+	/* Make sure the TCON is disabled and all IRQs are off */
+	regmap_write(tcon->regs, SUN4I_TCON_GCTL_REG, 0);
+	regmap_write(tcon->regs, SUN4I_TCON_GINT0_REG, 0);
+	regmap_write(tcon->regs, SUN4I_TCON_GINT1_REG, 0);
+
+	/* Disable IO lines and set them to tristate */
+	regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, ~0);
+	regmap_write(tcon->regs, SUN4I_TCON1_IO_TRI_REG, ~0);
+
+	return 0;
+}
+
+static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
+{
+	struct device_node *port, *remote, *child;
+	struct device_node *end_node = NULL;
+
+	/* Inputs are listed first, then outputs */
+	port = of_graph_get_port_by_id(node, 1);
+
+	/*
+	 * Our first output is the RGB interface where the panel will
+	 * be connected.
+	 */
+	for_each_child_of_node(port, child) {
+		u32 reg;
+
+		of_property_read_u32(child, "reg", &reg);
+		if (reg == 0)
+			end_node = child;
+	}
+
+	if (!end_node) {
+		DRM_DEBUG_DRIVER("Missing panel endpoint\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	remote = of_graph_get_remote_port_parent(end_node);
+	if (!remote) {
+		DRM_DEBUG_DRIVER("Enable to parse remote node\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	return of_drm_find_panel(remote);
+}
+
+static int sun4i_tcon_bind(struct device *dev, struct device *master,
+			   void *data)
+{
+	struct drm_device *drm = data;
+	struct sun4i_drv *drv = drm->dev_private;
+	struct sun4i_tcon *tcon;
+	int ret;
+
+	tcon = devm_kzalloc(dev, sizeof(*tcon), GFP_KERNEL);
+	if (!tcon)
+		return -ENOMEM;
+	dev_set_drvdata(dev, tcon);
+	drv->tcon = tcon;
+	tcon->drm = drm;
+
+	if (of_device_is_compatible(dev->of_node, "allwinner,sun5i-a13-tcon"))
+		tcon->has_mux = true;
+
+	tcon->lcd_rst = devm_reset_control_get(dev, "lcd");
+	if (IS_ERR(tcon->lcd_rst)) {
+		dev_err(dev, "Couldn't get our reset line\n");
+		return PTR_ERR(tcon->lcd_rst);
+	}
+
+	/* Make sure our TCON is reset */
+	if (!reset_control_status(tcon->lcd_rst))
+		reset_control_assert(tcon->lcd_rst);
+
+	ret = reset_control_deassert(tcon->lcd_rst);
+	if (ret) {
+		dev_err(dev, "Couldn't deassert our reset line\n");
+		return ret;
+	}
+
+	ret = sun4i_tcon_init_regmap(dev, tcon);
+	if (ret) {
+		dev_err(dev, "Couldn't init our TCON regmap\n");
+		goto err_assert_reset;
+	}
+
+	ret = sun4i_tcon_init_clocks(dev, tcon);
+	if (ret) {
+		dev_err(dev, "Couldn't init our TCON clocks\n");
+		goto err_assert_reset;
+	}
+
+	ret = sun4i_tcon_init_irq(dev, tcon);
+	if (ret) {
+		dev_err(dev, "Couldn't init our TCON interrupts\n");
+		goto err_free_clocks;
+	}
+
+	tcon->panel = sun4i_tcon_find_panel(dev->of_node);
+	if (IS_ERR(tcon->panel)) {
+		dev_info(dev, "No panel found... RGB output disabled\n");
+		return 0;
+	}
+
+	return sun4i_rgb_init(drm);
+
+err_free_clocks:
+	sun4i_tcon_free_clocks(tcon);
+err_assert_reset:
+	reset_control_assert(tcon->lcd_rst);
+	return ret;
+}
+
+static void sun4i_tcon_unbind(struct device *dev, struct device *master,
+			      void *data)
+{
+	struct sun4i_tcon *tcon = dev_get_drvdata(dev);
+
+	sun4i_tcon_free_clocks(tcon);
+}
+
+static struct component_ops sun4i_tcon_ops = {
+	.bind	= sun4i_tcon_bind,
+	.unbind	= sun4i_tcon_unbind,
+};
+
+static int sun4i_tcon_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct drm_panel *panel;
+
+	/*
+	 * The panel is not ready.
+	 * Defer the probe.
+	 */
+	panel = sun4i_tcon_find_panel(node);
+	if (IS_ERR(panel)) {
+		/*
+		 * If we don't have a panel endpoint, just go on
+		 */
+		if (PTR_ERR(panel) != -ENODEV)
+			return -EPROBE_DEFER;
+	}
+
+	return component_add(&pdev->dev, &sun4i_tcon_ops);
+}
+
+static int sun4i_tcon_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &sun4i_tcon_ops);
+
+	return 0;
+}
+
+static const struct of_device_id sun4i_tcon_of_table[] = {
+	{ .compatible = "allwinner,sun5i-a13-tcon" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, sun4i_tcon_of_table);
+
+static struct platform_driver sun4i_tcon_platform_driver = {
+	.probe		= sun4i_tcon_probe,
+	.remove		= sun4i_tcon_remove,
+	.driver		= {
+		.name		= "sun4i-tcon",
+		.of_match_table	= sun4i_tcon_of_table,
+	},
+};
+module_platform_driver(sun4i_tcon_platform_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A10 Timing Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
new file mode 100644
index 0000000..0e0b11d
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef __SUN4I_TCON_H__
+#define __SUN4I_TCON_H__
+
+#include <drm/drm_crtc.h>
+
+#include <linux/kernel.h>
+#include <linux/reset.h>
+
+#define SUN4I_TCON_GCTL_REG			0x0
+#define SUN4I_TCON_GCTL_TCON_ENABLE			BIT(31)
+#define SUN4I_TCON_GCTL_IOMAP_MASK			BIT(0)
+#define SUN4I_TCON_GCTL_IOMAP_TCON1			(1 << 0)
+#define SUN4I_TCON_GCTL_IOMAP_TCON0			(0 << 0)
+
+#define SUN4I_TCON_GINT0_REG			0x4
+#define SUN4I_TCON_GINT0_VBLANK_ENABLE(pipe)		BIT(31 - (pipe))
+#define SUN4I_TCON_GINT0_VBLANK_INT(pipe)		BIT(15 - (pipe))
+
+#define SUN4I_TCON_GINT1_REG			0x8
+#define SUN4I_TCON_FRM_CTL_REG			0x10
+
+#define SUN4I_TCON0_CTL_REG			0x40
+#define SUN4I_TCON0_CTL_TCON_ENABLE			BIT(31)
+#define SUN4I_TCON0_CTL_CLK_DELAY_MASK			GENMASK(8, 4)
+#define SUN4I_TCON0_CTL_CLK_DELAY(delay)		((delay << 4) & SUN4I_TCON0_CTL_CLK_DELAY_MASK)
+
+#define SUN4I_TCON0_DCLK_REG			0x44
+#define SUN4I_TCON0_DCLK_GATE_BIT			(31)
+#define SUN4I_TCON0_DCLK_DIV_SHIFT			(0)
+#define SUN4I_TCON0_DCLK_DIV_WIDTH			(7)
+
+#define SUN4I_TCON0_BASIC0_REG			0x48
+#define SUN4I_TCON0_BASIC0_X(width)			((((width) - 1) & 0xfff) << 16)
+#define SUN4I_TCON0_BASIC0_Y(height)			(((height) - 1) & 0xfff)
+
+#define SUN4I_TCON0_BASIC1_REG			0x4c
+#define SUN4I_TCON0_BASIC1_H_TOTAL(total)		((((total) - 1) & 0x1fff) << 16)
+#define SUN4I_TCON0_BASIC1_H_BACKPORCH(bp)		(((bp) - 1) & 0xfff)
+
+#define SUN4I_TCON0_BASIC2_REG			0x50
+#define SUN4I_TCON0_BASIC2_V_TOTAL(total)		((((total) * 2) & 0x1fff) << 16)
+#define SUN4I_TCON0_BASIC2_V_BACKPORCH(bp)		(((bp) - 1) & 0xfff)
+
+#define SUN4I_TCON0_BASIC3_REG			0x54
+#define SUN4I_TCON0_BASIC3_H_SYNC(width)		((((width) - 1) & 0x7ff) << 16)
+#define SUN4I_TCON0_BASIC3_V_SYNC(height)		(((height) - 1) & 0x7ff)
+
+#define SUN4I_TCON0_HV_IF_REG			0x58
+#define SUN4I_TCON0_CPU_IF_REG			0x60
+#define SUN4I_TCON0_CPU_WR_REG			0x64
+#define SUN4I_TCON0_CPU_RD0_REG			0x68
+#define SUN4I_TCON0_CPU_RDA_REG			0x6c
+#define SUN4I_TCON0_TTL0_REG			0x70
+#define SUN4I_TCON0_TTL1_REG			0x74
+#define SUN4I_TCON0_TTL2_REG			0x78
+#define SUN4I_TCON0_TTL3_REG			0x7c
+#define SUN4I_TCON0_TTL4_REG			0x80
+#define SUN4I_TCON0_LVDS_IF_REG			0x84
+#define SUN4I_TCON0_IO_POL_REG			0x88
+#define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase)		((phase & 3) << 28)
+#define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE		BIT(25)
+#define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE		BIT(24)
+
+#define SUN4I_TCON0_IO_TRI_REG			0x8c
+#define SUN4I_TCON0_IO_TRI_HSYNC_DISABLE		BIT(25)
+#define SUN4I_TCON0_IO_TRI_VSYNC_DISABLE		BIT(24)
+#define SUN4I_TCON0_IO_TRI_DATA_PINS_DISABLE(pins)	GENMASK(pins, 0)
+
+#define SUN4I_TCON1_CTL_REG			0x90
+#define SUN4I_TCON1_CTL_TCON_ENABLE			BIT(31)
+#define SUN4I_TCON1_CTL_INTERLACE_ENABLE		BIT(20)
+#define SUN4I_TCON1_CTL_CLK_DELAY_MASK			GENMASK(8, 4)
+#define SUN4I_TCON1_CTL_CLK_DELAY(delay)		((delay << 4) & SUN4I_TCON1_CTL_CLK_DELAY_MASK)
+
+#define SUN4I_TCON1_BASIC0_REG			0x94
+#define SUN4I_TCON1_BASIC0_X(width)			((((width) - 1) & 0xfff) << 16)
+#define SUN4I_TCON1_BASIC0_Y(height)			(((height) - 1) & 0xfff)
+
+#define SUN4I_TCON1_BASIC1_REG			0x98
+#define SUN4I_TCON1_BASIC1_X(width)			((((width) - 1) & 0xfff) << 16)
+#define SUN4I_TCON1_BASIC1_Y(height)			(((height) - 1) & 0xfff)
+
+#define SUN4I_TCON1_BASIC2_REG			0x9c
+#define SUN4I_TCON1_BASIC2_X(width)			((((width) - 1) & 0xfff) << 16)
+#define SUN4I_TCON1_BASIC2_Y(height)			(((height) - 1) & 0xfff)
+
+#define SUN4I_TCON1_BASIC3_REG			0xa0
+#define SUN4I_TCON1_BASIC3_H_TOTAL(total)		((((total) - 1) & 0x1fff) << 16)
+#define SUN4I_TCON1_BASIC3_H_BACKPORCH(bp)		(((bp) - 1) & 0xfff)
+
+#define SUN4I_TCON1_BASIC4_REG			0xa4
+#define SUN4I_TCON1_BASIC4_V_TOTAL(total)		(((total) & 0x1fff) << 16)
+#define SUN4I_TCON1_BASIC4_V_BACKPORCH(bp)		(((bp) - 1) & 0xfff)
+
+#define SUN4I_TCON1_BASIC5_REG			0xa8
+#define SUN4I_TCON1_BASIC5_H_SYNC(width)		((((width) - 1) & 0x3ff) << 16)
+#define SUN4I_TCON1_BASIC5_V_SYNC(height)		(((height) - 1) & 0x3ff)
+
+#define SUN4I_TCON1_IO_POL_REG			0xf0
+#define SUN4I_TCON1_IO_TRI_REG			0xf4
+#define SUN4I_TCON_CEU_CTL_REG			0x100
+#define SUN4I_TCON_CEU_MUL_RR_REG		0x110
+#define SUN4I_TCON_CEU_MUL_RG_REG		0x114
+#define SUN4I_TCON_CEU_MUL_RB_REG		0x118
+#define SUN4I_TCON_CEU_ADD_RC_REG		0x11c
+#define SUN4I_TCON_CEU_MUL_GR_REG		0x120
+#define SUN4I_TCON_CEU_MUL_GG_REG		0x124
+#define SUN4I_TCON_CEU_MUL_GB_REG		0x128
+#define SUN4I_TCON_CEU_ADD_GC_REG		0x12c
+#define SUN4I_TCON_CEU_MUL_BR_REG		0x130
+#define SUN4I_TCON_CEU_MUL_BG_REG		0x134
+#define SUN4I_TCON_CEU_MUL_BB_REG		0x138
+#define SUN4I_TCON_CEU_ADD_BC_REG		0x13c
+#define SUN4I_TCON_CEU_RANGE_R_REG		0x140
+#define SUN4I_TCON_CEU_RANGE_G_REG		0x144
+#define SUN4I_TCON_CEU_RANGE_B_REG		0x148
+#define SUN4I_TCON_MUX_CTRL_REG			0x200
+#define SUN4I_TCON1_FILL_CTL_REG		0x300
+#define SUN4I_TCON1_FILL_BEG0_REG		0x304
+#define SUN4I_TCON1_FILL_END0_REG		0x308
+#define SUN4I_TCON1_FILL_DATA0_REG		0x30c
+#define SUN4I_TCON1_FILL_BEG1_REG		0x310
+#define SUN4I_TCON1_FILL_END1_REG		0x314
+#define SUN4I_TCON1_FILL_DATA1_REG		0x318
+#define SUN4I_TCON1_FILL_BEG2_REG		0x31c
+#define SUN4I_TCON1_FILL_END2_REG		0x320
+#define SUN4I_TCON1_FILL_DATA2_REG		0x324
+#define SUN4I_TCON1_GAMMA_TABLE_REG		0x400
+
+#define SUN4I_TCON_MAX_CHANNELS		2
+
+struct sun4i_tcon {
+	struct drm_device		*drm;
+	struct regmap			*regs;
+
+	/* Main bus clock */
+	struct clk			*clk;
+
+	/* Clocks for the TCON channels */
+	struct clk			*sclk0;
+	struct clk			*sclk1;
+
+	/* Pixel clock */
+	struct clk			*dclk;
+
+	/* Reset control */
+	struct reset_control		*lcd_rst;
+
+	/* Platform adjustments */
+	bool				has_mux;
+
+	struct drm_panel		*panel;
+};
+
+/* Global Control */
+void sun4i_tcon_disable(struct sun4i_tcon *tcon);
+void sun4i_tcon_enable(struct sun4i_tcon *tcon);
+
+/* Channel Control */
+void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel);
+void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel);
+
+void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable);
+
+/* Mode Related Controls */
+void sun4i_tcon_switch_interlace(struct sun4i_tcon *tcon,
+				 bool enable);
+void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
+			  struct drm_display_mode *mode);
+void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
+			  struct drm_display_mode *mode);
+
+#endif /* __SUN4I_TCON_H__ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
new file mode 100644
index 0000000..bc047f9
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -0,0 +1,708 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include "sun4i_backend.h"
+#include "sun4i_drv.h"
+#include "sun4i_tcon.h"
+
+#define SUN4I_TVE_EN_REG		0x000
+#define SUN4I_TVE_EN_DAC_MAP_MASK		GENMASK(19, 4)
+#define SUN4I_TVE_EN_DAC_MAP(dac, out)		(((out) & 0xf) << (dac + 1) * 4)
+#define SUN4I_TVE_EN_ENABLE			BIT(0)
+
+#define SUN4I_TVE_CFG0_REG		0x004
+#define SUN4I_TVE_CFG0_DAC_CONTROL_54M		BIT(26)
+#define SUN4I_TVE_CFG0_CORE_DATAPATH_54M	BIT(25)
+#define SUN4I_TVE_CFG0_CORE_CONTROL_54M		BIT(24)
+#define SUN4I_TVE_CFG0_YC_EN			BIT(17)
+#define SUN4I_TVE_CFG0_COMP_EN			BIT(16)
+#define SUN4I_TVE_CFG0_RES(x)			((x) & 0xf)
+#define SUN4I_TVE_CFG0_RES_480i			SUN4I_TVE_CFG0_RES(0)
+#define SUN4I_TVE_CFG0_RES_576i			SUN4I_TVE_CFG0_RES(1)
+
+#define SUN4I_TVE_DAC0_REG		0x008
+#define SUN4I_TVE_DAC0_CLOCK_INVERT		BIT(24)
+#define SUN4I_TVE_DAC0_LUMA(x)			(((x) & 3) << 20)
+#define SUN4I_TVE_DAC0_LUMA_0_4			SUN4I_TVE_DAC0_LUMA(3)
+#define SUN4I_TVE_DAC0_CHROMA(x)		(((x) & 3) << 18)
+#define SUN4I_TVE_DAC0_CHROMA_0_75		SUN4I_TVE_DAC0_CHROMA(3)
+#define SUN4I_TVE_DAC0_INTERNAL_DAC(x)		(((x) & 3) << 16)
+#define SUN4I_TVE_DAC0_INTERNAL_DAC_37_5_OHMS	SUN4I_TVE_DAC0_INTERNAL_DAC(3)
+#define SUN4I_TVE_DAC0_DAC_EN(dac)		BIT(dac)
+
+#define SUN4I_TVE_NOTCH_REG		0x00c
+#define SUN4I_TVE_NOTCH_DAC0_TO_DAC_DLY(dac, x)	((4 - (x)) << (dac * 3))
+
+#define SUN4I_TVE_CHROMA_FREQ_REG	0x010
+
+#define SUN4I_TVE_PORCH_REG		0x014
+#define SUN4I_TVE_PORCH_BACK(x)			((x) << 16)
+#define SUN4I_TVE_PORCH_FRONT(x)		(x)
+
+#define SUN4I_TVE_LINE_REG		0x01c
+#define SUN4I_TVE_LINE_FIRST(x)			((x) << 16)
+#define SUN4I_TVE_LINE_NUMBER(x)		(x)
+
+#define SUN4I_TVE_LEVEL_REG		0x020
+#define SUN4I_TVE_LEVEL_BLANK(x)		((x) << 16)
+#define SUN4I_TVE_LEVEL_BLACK(x)		(x)
+
+#define SUN4I_TVE_DAC1_REG		0x024
+#define SUN4I_TVE_DAC1_AMPLITUDE(dac, x)	((x) << (dac * 8))
+
+#define SUN4I_TVE_DETECT_STA_REG	0x038
+#define SUN4I_TVE_DETECT_STA_DAC(dac)		BIT((dac * 8))
+#define SUN4I_TVE_DETECT_STA_UNCONNECTED		0
+#define SUN4I_TVE_DETECT_STA_CONNECTED			1
+#define SUN4I_TVE_DETECT_STA_GROUND			2
+
+#define SUN4I_TVE_CB_CR_LVL_REG		0x10c
+#define SUN4I_TVE_CB_CR_LVL_CR_BURST(x)		((x) << 8)
+#define SUN4I_TVE_CB_CR_LVL_CB_BURST(x)		(x)
+
+#define SUN4I_TVE_TINT_BURST_PHASE_REG	0x110
+#define SUN4I_TVE_TINT_BURST_PHASE_CHROMA(x)	(x)
+
+#define SUN4I_TVE_BURST_WIDTH_REG	0x114
+#define SUN4I_TVE_BURST_WIDTH_BREEZEWAY(x)	((x) << 16)
+#define SUN4I_TVE_BURST_WIDTH_BURST_WIDTH(x)	((x) << 8)
+#define SUN4I_TVE_BURST_WIDTH_HSYNC_WIDTH(x)	(x)
+
+#define SUN4I_TVE_CB_CR_GAIN_REG	0x118
+#define SUN4I_TVE_CB_CR_GAIN_CR(x)		((x) << 8)
+#define SUN4I_TVE_CB_CR_GAIN_CB(x)		(x)
+
+#define SUN4I_TVE_SYNC_VBI_REG		0x11c
+#define SUN4I_TVE_SYNC_VBI_SYNC(x)		((x) << 16)
+#define SUN4I_TVE_SYNC_VBI_VBLANK(x)		(x)
+
+#define SUN4I_TVE_ACTIVE_LINE_REG	0x124
+#define SUN4I_TVE_ACTIVE_LINE(x)		(x)
+
+#define SUN4I_TVE_CHROMA_REG		0x128
+#define SUN4I_TVE_CHROMA_COMP_GAIN(x)		((x) & 3)
+#define SUN4I_TVE_CHROMA_COMP_GAIN_50		SUN4I_TVE_CHROMA_COMP_GAIN(2)
+
+#define SUN4I_TVE_12C_REG		0x12c
+#define SUN4I_TVE_12C_NOTCH_WIDTH_WIDE		BIT(8)
+#define SUN4I_TVE_12C_COMP_YUV_EN		BIT(0)
+
+#define SUN4I_TVE_RESYNC_REG		0x130
+#define SUN4I_TVE_RESYNC_FIELD			BIT(31)
+#define SUN4I_TVE_RESYNC_LINE(x)		((x) << 16)
+#define SUN4I_TVE_RESYNC_PIXEL(x)		(x)
+
+#define SUN4I_TVE_SLAVE_REG		0x134
+
+#define SUN4I_TVE_WSS_DATA2_REG		0x244
+
+struct color_gains {
+	u16	cb;
+	u16	cr;
+};
+
+struct burst_levels {
+	u16	cb;
+	u16	cr;
+};
+
+struct video_levels {
+	u16	black;
+	u16	blank;
+};
+
+struct resync_parameters {
+	bool	field;
+	u16	line;
+	u16	pixel;
+};
+
+struct tv_mode {
+	char		*name;
+
+	u32		mode;
+	u32		chroma_freq;
+	u16		back_porch;
+	u16		front_porch;
+	u16		line_number;
+	u16		vblank_level;
+
+	u32		hdisplay;
+	u16		hfront_porch;
+	u16		hsync_len;
+	u16		hback_porch;
+
+	u32		vdisplay;
+	u16		vfront_porch;
+	u16		vsync_len;
+	u16		vback_porch;
+
+	bool		yc_en;
+	bool		dac3_en;
+	bool		dac_bit25_en;
+
+	struct color_gains		*color_gains;
+	struct burst_levels		*burst_levels;
+	struct video_levels		*video_levels;
+	struct resync_parameters	*resync_params;
+};
+
+struct sun4i_tv {
+	struct drm_connector	connector;
+	struct drm_encoder	encoder;
+
+	struct clk		*clk;
+	struct regmap		*regs;
+	struct reset_control	*reset;
+
+	struct sun4i_drv	*drv;
+};
+
+struct video_levels ntsc_video_levels = {
+	.black = 282,	.blank = 240,
+};
+
+struct video_levels pal_video_levels = {
+	.black = 252,	.blank = 252,
+};
+
+struct burst_levels ntsc_burst_levels = {
+	.cb = 79,	.cr = 0,
+};
+
+struct burst_levels pal_burst_levels = {
+	.cb = 40,	.cr = 40,
+};
+
+struct color_gains ntsc_color_gains = {
+	.cb = 160,	.cr = 160,
+};
+
+struct color_gains pal_color_gains = {
+	.cb = 224,	.cr = 224,
+};
+
+struct resync_parameters ntsc_resync_parameters = {
+	.field = false,	.line = 14,	.pixel = 12,
+};
+
+struct resync_parameters pal_resync_parameters = {
+	.field = true,	.line = 13,	.pixel = 12,
+};
+
+struct tv_mode tv_modes[] = {
+	{
+		.name		= "NTSC",
+		.mode		= SUN4I_TVE_CFG0_RES_480i,
+		.chroma_freq	= 0x21f07c1f,
+		.yc_en		= true,
+		.dac3_en	= true,
+		.dac_bit25_en	= true,
+
+		.back_porch	= 118,
+		.front_porch	= 32,
+		.line_number	= 525,
+
+		.hdisplay	= 720,
+		.hfront_porch	= 18,
+		.hsync_len	= 2,
+		.hback_porch	= 118,
+
+		.vdisplay	= 480,
+		.vfront_porch	= 26,
+		.vsync_len	= 2,
+		.vback_porch	= 17,
+
+		.vblank_level	= 240,
+
+		.color_gains	= &ntsc_color_gains,
+		.burst_levels	= &ntsc_burst_levels,
+		.video_levels	= &ntsc_video_levels,
+		.resync_params	= &ntsc_resync_parameters,
+	},
+	{
+		.name		= "PAL",
+		.mode		= SUN4I_TVE_CFG0_RES_576i,
+		.chroma_freq	= 0x2a098acb,
+
+		.back_porch	= 138,
+		.front_porch	= 24,
+		.line_number	= 625,
+
+		.hdisplay	= 720,
+		.hfront_porch	= 3,
+		.hsync_len	= 2,
+		.hback_porch	= 139,
+
+		.vdisplay	= 576,
+		.vfront_porch	= 28,
+		.vsync_len	= 2,
+		.vback_porch	= 19,
+
+		.vblank_level	= 252,
+
+		.color_gains	= &pal_color_gains,
+		.burst_levels	= &pal_burst_levels,
+		.video_levels	= &pal_video_levels,
+		.resync_params	= &pal_resync_parameters,
+	},
+};
+
+static inline struct sun4i_tv *
+drm_encoder_to_sun4i_tv(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct sun4i_tv,
+			    encoder);
+}
+
+static inline struct sun4i_tv *
+drm_connector_to_sun4i_tv(struct drm_connector *connector)
+{
+	return container_of(connector, struct sun4i_tv,
+			    connector);
+}
+
+/*
+ * FIXME: If only the drm_display_mode private field was usable, this
+ * could go away...
+ *
+ * So far, it doesn't seem to be preserved when the mode is passed by
+ * to mode_set for some reason.
+ */
+static struct tv_mode *sun4i_tv_find_tv_by_mode(struct drm_display_mode *mode)
+{
+	int i;
+
+	/* First try to identify the mode by name */
+	for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+		struct tv_mode *tv_mode = &tv_modes[i];
+
+		DRM_DEBUG_DRIVER("Comparing mode %s vs %s",
+				 mode->name, tv_mode->name);
+
+		if (!strcmp(mode->name, tv_mode->name))
+			return tv_mode;
+	}
+
+	/* Then by number of lines */
+	for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+		struct tv_mode *tv_mode = &tv_modes[i];
+
+		DRM_DEBUG_DRIVER("Comparing mode %s vs %s (X: %d vs %d)",
+				 mode->name, tv_mode->name,
+				 mode->vdisplay, tv_mode->vdisplay);
+
+		if (mode->vdisplay == tv_mode->vdisplay)
+			return tv_mode;
+	}
+
+	return NULL;
+}
+
+static void sun4i_tv_mode_to_drm_mode(struct tv_mode *tv_mode,
+				      struct drm_display_mode *mode)
+{
+	DRM_DEBUG_DRIVER("Creating mode %s\n", mode->name);
+
+	mode->type = DRM_MODE_TYPE_DRIVER;
+	mode->clock = 13500;
+	mode->flags = DRM_MODE_FLAG_INTERLACE;
+
+	mode->hdisplay = tv_mode->hdisplay;
+	mode->hsync_start = mode->hdisplay + tv_mode->hfront_porch;
+	mode->hsync_end = mode->hsync_start + tv_mode->hsync_len;
+	mode->htotal = mode->hsync_end  + tv_mode->hback_porch;
+
+	mode->vdisplay = tv_mode->vdisplay;
+	mode->vsync_start = mode->vdisplay + tv_mode->vfront_porch;
+	mode->vsync_end = mode->vsync_start + tv_mode->vsync_len;
+	mode->vtotal = mode->vsync_end  + tv_mode->vback_porch;
+}
+
+static int sun4i_tv_atomic_check(struct drm_encoder *encoder,
+				 struct drm_crtc_state *crtc_state,
+				 struct drm_connector_state *conn_state)
+{
+	return 0;
+}
+
+static void sun4i_tv_disable(struct drm_encoder *encoder)
+{
+	struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
+	struct sun4i_drv *drv = tv->drv;
+	struct sun4i_tcon *tcon = drv->tcon;
+
+	DRM_DEBUG_DRIVER("Disabling the TV Output\n");
+
+	sun4i_tcon_channel_disable(tcon, 1);
+
+	regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
+			   SUN4I_TVE_EN_ENABLE,
+			   0);
+	sun4i_backend_disable_color_correction(drv->backend);
+}
+
+static void sun4i_tv_enable(struct drm_encoder *encoder)
+{
+	struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
+	struct sun4i_drv *drv = tv->drv;
+	struct sun4i_tcon *tcon = drv->tcon;
+
+	DRM_DEBUG_DRIVER("Enabling the TV Output\n");
+
+	sun4i_backend_apply_color_correction(drv->backend);
+
+	regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
+			   SUN4I_TVE_EN_ENABLE,
+			   SUN4I_TVE_EN_ENABLE);
+
+	sun4i_tcon_channel_enable(tcon, 1);
+}
+
+static void sun4i_tv_mode_set(struct drm_encoder *encoder,
+			      struct drm_display_mode *mode,
+			      struct drm_display_mode *adjusted_mode)
+{
+	struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
+	struct sun4i_drv *drv = tv->drv;
+	struct sun4i_tcon *tcon = drv->tcon;
+	struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
+
+	sun4i_tcon1_mode_set(tcon, mode);
+
+	/* Enable and map the DAC to the output */
+	regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
+			   SUN4I_TVE_EN_DAC_MAP_MASK,
+			   SUN4I_TVE_EN_DAC_MAP(0, 1) |
+			   SUN4I_TVE_EN_DAC_MAP(1, 2) |
+			   SUN4I_TVE_EN_DAC_MAP(2, 3) |
+			   SUN4I_TVE_EN_DAC_MAP(3, 4));
+
+	/* Set PAL settings */
+	regmap_write(tv->regs, SUN4I_TVE_CFG0_REG,
+		     tv_mode->mode |
+		     (tv_mode->yc_en ? SUN4I_TVE_CFG0_YC_EN : 0) |
+		     SUN4I_TVE_CFG0_COMP_EN |
+		     SUN4I_TVE_CFG0_DAC_CONTROL_54M |
+		     SUN4I_TVE_CFG0_CORE_DATAPATH_54M |
+		     SUN4I_TVE_CFG0_CORE_CONTROL_54M);
+
+	/* Configure the DAC for a composite output */
+	regmap_write(tv->regs, SUN4I_TVE_DAC0_REG,
+		     SUN4I_TVE_DAC0_DAC_EN(0) |
+		     (tv_mode->dac3_en ? SUN4I_TVE_DAC0_DAC_EN(3) : 0) |
+		     SUN4I_TVE_DAC0_INTERNAL_DAC_37_5_OHMS |
+		     SUN4I_TVE_DAC0_CHROMA_0_75 |
+		     SUN4I_TVE_DAC0_LUMA_0_4 |
+		     SUN4I_TVE_DAC0_CLOCK_INVERT |
+		     (tv_mode->dac_bit25_en ? BIT(25) : 0) |
+		     BIT(30));
+
+	/* Configure the sample delay between DAC0 and the other DAC */
+	regmap_write(tv->regs, SUN4I_TVE_NOTCH_REG,
+		     SUN4I_TVE_NOTCH_DAC0_TO_DAC_DLY(1, 0) |
+		     SUN4I_TVE_NOTCH_DAC0_TO_DAC_DLY(2, 0));
+
+	regmap_write(tv->regs, SUN4I_TVE_CHROMA_FREQ_REG,
+		     tv_mode->chroma_freq);
+
+	/* Set the front and back porch */
+	regmap_write(tv->regs, SUN4I_TVE_PORCH_REG,
+		     SUN4I_TVE_PORCH_BACK(tv_mode->back_porch) |
+		     SUN4I_TVE_PORCH_FRONT(tv_mode->front_porch));
+
+	/* Set the lines setup */
+	regmap_write(tv->regs, SUN4I_TVE_LINE_REG,
+		     SUN4I_TVE_LINE_FIRST(22) |
+		     SUN4I_TVE_LINE_NUMBER(tv_mode->line_number));
+
+	regmap_write(tv->regs, SUN4I_TVE_LEVEL_REG,
+		     SUN4I_TVE_LEVEL_BLANK(tv_mode->video_levels->blank) |
+		     SUN4I_TVE_LEVEL_BLACK(tv_mode->video_levels->black));
+
+	regmap_write(tv->regs, SUN4I_TVE_DAC1_REG,
+		     SUN4I_TVE_DAC1_AMPLITUDE(0, 0x18) |
+		     SUN4I_TVE_DAC1_AMPLITUDE(1, 0x18) |
+		     SUN4I_TVE_DAC1_AMPLITUDE(2, 0x18) |
+		     SUN4I_TVE_DAC1_AMPLITUDE(3, 0x18));
+
+	regmap_write(tv->regs, SUN4I_TVE_CB_CR_LVL_REG,
+		     SUN4I_TVE_CB_CR_LVL_CB_BURST(tv_mode->burst_levels->cb) |
+		     SUN4I_TVE_CB_CR_LVL_CR_BURST(tv_mode->burst_levels->cr));
+
+	/* Set burst width for a composite output */
+	regmap_write(tv->regs, SUN4I_TVE_BURST_WIDTH_REG,
+		     SUN4I_TVE_BURST_WIDTH_HSYNC_WIDTH(126) |
+		     SUN4I_TVE_BURST_WIDTH_BURST_WIDTH(68) |
+		     SUN4I_TVE_BURST_WIDTH_BREEZEWAY(22));
+
+	regmap_write(tv->regs, SUN4I_TVE_CB_CR_GAIN_REG,
+		     SUN4I_TVE_CB_CR_GAIN_CB(tv_mode->color_gains->cb) |
+		     SUN4I_TVE_CB_CR_GAIN_CR(tv_mode->color_gains->cr));
+
+	regmap_write(tv->regs, SUN4I_TVE_SYNC_VBI_REG,
+		     SUN4I_TVE_SYNC_VBI_SYNC(0x10) |
+		     SUN4I_TVE_SYNC_VBI_VBLANK(tv_mode->vblank_level));
+
+	regmap_write(tv->regs, SUN4I_TVE_ACTIVE_LINE_REG,
+		     SUN4I_TVE_ACTIVE_LINE(1440));
+
+	/* Set composite chroma gain to 50 % */
+	regmap_write(tv->regs, SUN4I_TVE_CHROMA_REG,
+		     SUN4I_TVE_CHROMA_COMP_GAIN_50);
+
+	regmap_write(tv->regs, SUN4I_TVE_12C_REG,
+		     SUN4I_TVE_12C_COMP_YUV_EN |
+		     SUN4I_TVE_12C_NOTCH_WIDTH_WIDE);
+
+	regmap_write(tv->regs, SUN4I_TVE_RESYNC_REG,
+		     SUN4I_TVE_RESYNC_PIXEL(tv_mode->resync_params->pixel) |
+		     SUN4I_TVE_RESYNC_LINE(tv_mode->resync_params->line) |
+		     (tv_mode->resync_params->field ?
+		      SUN4I_TVE_RESYNC_FIELD : 0));
+
+	regmap_write(tv->regs, SUN4I_TVE_SLAVE_REG, 0);
+
+	clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000);
+}
+
+static struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
+	.atomic_check	= sun4i_tv_atomic_check,
+	.disable	= sun4i_tv_disable,
+	.enable		= sun4i_tv_enable,
+	.mode_set	= sun4i_tv_mode_set,
+};
+
+static void sun4i_tv_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static struct drm_encoder_funcs sun4i_tv_funcs = {
+	.destroy	= sun4i_tv_destroy,
+};
+
+static int sun4i_tv_comp_get_modes(struct drm_connector *connector)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+		struct drm_display_mode *mode = drm_mode_create(connector->dev);
+		struct tv_mode *tv_mode = &tv_modes[i];
+
+		strcpy(mode->name, tv_mode->name);
+
+		sun4i_tv_mode_to_drm_mode(tv_mode, mode);
+		drm_mode_probed_add(connector, mode);
+	}
+
+	return i;
+}
+
+static int sun4i_tv_comp_mode_valid(struct drm_connector *connector,
+				    struct drm_display_mode *mode)
+{
+	/* TODO */
+	return MODE_OK;
+}
+
+static struct drm_encoder *
+sun4i_tv_comp_best_encoder(struct drm_connector *connector)
+{
+	struct sun4i_tv *tv = drm_connector_to_sun4i_tv(connector);
+
+	return &tv->encoder;
+}
+
+static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = {
+	.get_modes	= sun4i_tv_comp_get_modes,
+	.mode_valid	= sun4i_tv_comp_mode_valid,
+	.best_encoder	= sun4i_tv_comp_best_encoder,
+};
+
+static enum drm_connector_status
+sun4i_tv_comp_connector_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static void
+sun4i_tv_comp_connector_destroy(struct drm_connector *connector)
+{
+	drm_connector_cleanup(connector);
+}
+
+static struct drm_connector_funcs sun4i_tv_comp_connector_funcs = {
+	.dpms			= drm_atomic_helper_connector_dpms,
+	.detect			= sun4i_tv_comp_connector_detect,
+	.fill_modes		= drm_helper_probe_single_connector_modes,
+	.destroy		= sun4i_tv_comp_connector_destroy,
+	.reset			= drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state	= drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state	= drm_atomic_helper_connector_destroy_state,
+};
+
+static struct regmap_config sun4i_tv_regmap_config = {
+	.reg_bits	= 32,
+	.val_bits	= 32,
+	.reg_stride	= 4,
+	.max_register	= SUN4I_TVE_WSS_DATA2_REG,
+	.name		= "tv-encoder",
+};
+
+static int sun4i_tv_bind(struct device *dev, struct device *master,
+			 void *data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct drm_device *drm = data;
+	struct sun4i_drv *drv = drm->dev_private;
+	struct sun4i_tv *tv;
+	struct resource *res;
+	void __iomem *regs;
+	int ret;
+
+	tv = devm_kzalloc(dev, sizeof(*tv), GFP_KERNEL);
+	if (!tv)
+		return -ENOMEM;
+	tv->drv = drv;
+	dev_set_drvdata(dev, tv);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(regs)) {
+		dev_err(dev, "Couldn't map the TV encoder registers\n");
+		return PTR_ERR(regs);
+	}
+
+	tv->regs = devm_regmap_init_mmio(dev, regs,
+					 &sun4i_tv_regmap_config);
+	if (IS_ERR(tv->regs)) {
+		dev_err(dev, "Couldn't create the TV encoder regmap\n");
+		return PTR_ERR(tv->regs);
+	}
+
+	tv->reset = devm_reset_control_get(dev, NULL);
+	if (IS_ERR(tv->reset)) {
+		dev_err(dev, "Couldn't get our reset line\n");
+		return PTR_ERR(tv->reset);
+	}
+
+	ret = reset_control_deassert(tv->reset);
+	if (ret) {
+		dev_err(dev, "Couldn't deassert our reset line\n");
+		return ret;
+	}
+
+	tv->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(tv->clk)) {
+		dev_err(dev, "Couldn't get the TV encoder clock\n");
+		ret = PTR_ERR(tv->clk);
+		goto err_assert_reset;
+	}
+	clk_prepare_enable(tv->clk);
+
+	drm_encoder_helper_add(&tv->encoder,
+			       &sun4i_tv_helper_funcs);
+	ret = drm_encoder_init(drm,
+			       &tv->encoder,
+			       &sun4i_tv_funcs,
+			       DRM_MODE_ENCODER_TVDAC,
+			       NULL);
+	if (ret) {
+		dev_err(dev, "Couldn't initialise the TV encoder\n");
+		goto err_disable_clk;
+	}
+
+	tv->encoder.possible_crtcs = BIT(0);
+
+	drm_connector_helper_add(&tv->connector,
+				 &sun4i_tv_comp_connector_helper_funcs);
+	ret = drm_connector_init(drm, &tv->connector,
+				 &sun4i_tv_comp_connector_funcs,
+				 DRM_MODE_CONNECTOR_Composite);
+	if (ret) {
+		dev_err(dev,
+			"Couldn't initialise the Composite connector\n");
+		goto err_cleanup_connector;
+	}
+	tv->connector.interlace_allowed = true;
+
+	drm_mode_connector_attach_encoder(&tv->connector, &tv->encoder);
+
+	return 0;
+
+err_cleanup_connector:
+	drm_encoder_cleanup(&tv->encoder);
+err_disable_clk:
+	clk_disable_unprepare(tv->clk);
+err_assert_reset:
+	reset_control_assert(tv->reset);
+	return ret;
+}
+
+static void sun4i_tv_unbind(struct device *dev, struct device *master,
+			    void *data)
+{
+	struct sun4i_tv *tv = dev_get_drvdata(dev);
+
+	drm_connector_cleanup(&tv->connector);
+	drm_encoder_cleanup(&tv->encoder);
+	clk_disable_unprepare(tv->clk);
+}
+
+static struct component_ops sun4i_tv_ops = {
+	.bind	= sun4i_tv_bind,
+	.unbind	= sun4i_tv_unbind,
+};
+
+static int sun4i_tv_probe(struct platform_device *pdev)
+{
+	return component_add(&pdev->dev, &sun4i_tv_ops);
+}
+
+static int sun4i_tv_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &sun4i_tv_ops);
+
+	return 0;
+}
+
+static const struct of_device_id sun4i_tv_of_table[] = {
+	{ .compatible = "allwinner,sun4i-a10-tv-encoder" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, sun4i_tv_of_table);
+
+static struct platform_driver sun4i_tv_platform_driver = {
+	.probe		= sun4i_tv_probe,
+	.remove		= sun4i_tv_remove,
+	.driver		= {
+		.name		= "sun4i-tve",
+		.of_match_table	= sun4i_tv_of_table,
+	},
+};
+module_platform_driver(sun4i_tv_platform_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A10 TV Encoder Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index fb2b4b0..39940f5 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -434,7 +434,7 @@
 	struct tegra_plane_state *state;
 
 	if (plane->state)
-		__drm_atomic_helper_plane_destroy_state(plane, plane->state);
+		__drm_atomic_helper_plane_destroy_state(plane->state);
 
 	kfree(plane->state);
 	plane->state = NULL;
@@ -466,7 +466,7 @@
 static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
 					     struct drm_plane_state *state)
 {
-	__drm_atomic_helper_plane_destroy_state(plane, state);
+	__drm_atomic_helper_plane_destroy_state(state);
 	kfree(state);
 }
 
@@ -998,7 +998,7 @@
 	struct tegra_dc_state *state;
 
 	if (crtc->state)
-		__drm_atomic_helper_crtc_destroy_state(crtc, crtc->state);
+		__drm_atomic_helper_crtc_destroy_state(crtc->state);
 
 	kfree(crtc->state);
 	crtc->state = NULL;
@@ -1034,7 +1034,7 @@
 static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc,
 					    struct drm_crtc_state *state)
 {
-	__drm_atomic_helper_crtc_destroy_state(crtc, state);
+	__drm_atomic_helper_crtc_destroy_state(state);
 	kfree(state);
 }
 
@@ -1722,7 +1722,6 @@
 	if (err < 0)
 		goto cleanup;
 
-	drm_mode_crtc_set_gamma_size(&dc->base, 256);
 	drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
 
 	/*
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 8e6b18c..b59c3bf0 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -74,7 +74,7 @@
 }
 
 static int tegra_atomic_commit(struct drm_device *drm,
-			       struct drm_atomic_state *state, bool async)
+			       struct drm_atomic_state *state, bool nonblock)
 {
 	struct tegra_drm *tegra = drm->dev_private;
 	int err;
@@ -83,7 +83,7 @@
 	if (err)
 		return err;
 
-	/* serialize outstanding asynchronous commits */
+	/* serialize outstanding nonblocking commits */
 	mutex_lock(&tegra->commit.lock);
 	flush_work(&tegra->commit.work);
 
@@ -95,7 +95,7 @@
 
 	drm_atomic_helper_swap_state(drm, state);
 
-	if (async)
+	if (nonblock)
 		tegra_atomic_schedule(tegra, state);
 	else
 		tegra_atomic_complete(tegra, state);
@@ -180,7 +180,6 @@
 
 	/* syncpoints are used for full 32-bit hardware VBLANK counters */
 	drm->max_vblank_count = 0xffffffff;
-	drm->vblank_disable_allowed = true;
 
 	err = drm_vblank_init(drm, drm->mode_config.num_crtc);
 	if (err < 0)
@@ -268,12 +267,12 @@
 }
 
 static struct host1x_bo *
-host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
+host1x_bo_lookup(struct drm_file *file, u32 handle)
 {
 	struct drm_gem_object *gem;
 	struct tegra_bo *bo;
 
-	gem = drm_gem_object_lookup(drm, file, handle);
+	gem = drm_gem_object_lookup(file, handle);
 	if (!gem)
 		return NULL;
 
@@ -311,11 +310,11 @@
 	if (err < 0)
 		return err;
 
-	dest->cmdbuf.bo = host1x_bo_lookup(drm, file, cmdbuf);
+	dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
 	if (!dest->cmdbuf.bo)
 		return -ENOENT;
 
-	dest->target.bo = host1x_bo_lookup(drm, file, target);
+	dest->target.bo = host1x_bo_lookup(file, target);
 	if (!dest->target.bo)
 		return -ENOENT;
 
@@ -363,7 +362,7 @@
 			goto fail;
 		}
 
-		bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
+		bo = host1x_bo_lookup(file, cmdbuf.handle);
 		if (!bo) {
 			err = -ENOENT;
 			goto fail;
@@ -463,7 +462,7 @@
 	struct drm_gem_object *gem;
 	struct tegra_bo *bo;
 
-	gem = drm_gem_object_lookup(drm, file, args->handle);
+	gem = drm_gem_object_lookup(file, args->handle);
 	if (!gem)
 		return -EINVAL;
 
@@ -672,7 +671,7 @@
 		return -EINVAL;
 	}
 
-	gem = drm_gem_object_lookup(drm, file, args->handle);
+	gem = drm_gem_object_lookup(file, args->handle);
 	if (!gem)
 		return -ENOENT;
 
@@ -694,7 +693,7 @@
 	struct tegra_bo *bo;
 	int err = 0;
 
-	gem = drm_gem_object_lookup(drm, file, args->handle);
+	gem = drm_gem_object_lookup(file, args->handle);
 	if (!gem)
 		return -ENOENT;
 
@@ -736,7 +735,7 @@
 	if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
 		return -EINVAL;
 
-	gem = drm_gem_object_lookup(drm, file, args->handle);
+	gem = drm_gem_object_lookup(file, args->handle);
 	if (!gem)
 		return -ENOENT;
 
@@ -758,7 +757,7 @@
 	struct drm_gem_object *gem;
 	struct tegra_bo *bo;
 
-	gem = drm_gem_object_lookup(drm, file, args->handle);
+	gem = drm_gem_object_lookup(file, args->handle);
 	if (!gem)
 		return -ENOENT;
 
@@ -878,7 +877,7 @@
 		seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
 			   fb->base.id, fb->width, fb->height, fb->depth,
 			   fb->bits_per_pixel,
-			   atomic_read(&fb->refcount.refcount));
+			   drm_framebuffer_read_refcount(fb));
 	}
 
 	mutex_unlock(&drm->mode_config.fb_lock);
@@ -932,7 +931,7 @@
 	.debugfs_cleanup = tegra_debugfs_cleanup,
 #endif
 
-	.gem_free_object = tegra_bo_free_object,
+	.gem_free_object_unlocked = tegra_bo_free_object,
 	.gem_vm_ops = &tegra_bo_vm_ops,
 
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 44e1027..d1239eb 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -745,13 +745,17 @@
 
 static void tegra_dsi_connector_reset(struct drm_connector *connector)
 {
-	struct tegra_dsi_state *state =
-		kzalloc(sizeof(*state), GFP_KERNEL);
+	struct tegra_dsi_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
 
-	if (state) {
+	if (!state)
+		return;
+
+	if (connector->state) {
+		__drm_atomic_helper_connector_destroy_state(connector->state);
 		kfree(connector->state);
-		__drm_atomic_helper_connector_reset(connector, &state->base);
 	}
+
+	__drm_atomic_helper_connector_reset(connector, &state->base);
 }
 
 static struct drm_connector_state *
@@ -764,6 +768,9 @@
 	if (!copy)
 		return NULL;
 
+	__drm_atomic_helper_connector_duplicate_state(connector,
+						      &copy->base);
+
 	return &copy->base;
 }
 
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index ca84de9..1b12aa7 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -149,7 +149,7 @@
 		unsigned int height = cmd->height / (i ? vsub : 1);
 		unsigned int size, bpp;
 
-		gem = drm_gem_object_lookup(drm, file, cmd->handles[i]);
+		gem = drm_gem_object_lookup(file, cmd->handles[i]);
 		if (!gem) {
 			err = -ENXIO;
 			goto unreference;
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 3b0d8c3..aa60d99 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -401,7 +401,7 @@
 	struct drm_gem_object *gem;
 	struct tegra_bo *bo;
 
-	gem = drm_gem_object_lookup(drm, file, handle);
+	gem = drm_gem_object_lookup(file, handle);
 	if (!gem) {
 		dev_err(drm->dev, "failed to lookup GEM object\n");
 		return -EINVAL;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 051e5e1..79027b1 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -707,7 +707,7 @@
 			event = tilcdc_crtc->event;
 			tilcdc_crtc->event = NULL;
 			if (event)
-				drm_send_vblank_event(dev, 0, event);
+				drm_crtc_send_vblank_event(crtc, event);
 
 			spin_unlock_irqrestore(&dev->event_lock, flags);
 		}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 7716f42..6b8c5b3 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -342,7 +342,7 @@
 
 	tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio",
 			0, NULL);
-	if (IS_ERR_VALUE(tfp410_mod->gpio)) {
+	if (tfp410_mod->gpio < 0) {
 		dev_warn(&pdev->dev, "No power down GPIO\n");
 	} else {
 		ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b433b9f..f923258 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -2,9 +2,10 @@
 # Makefile for the drm device driver.  This driver provides support for the
 
 ccflags-y := -Iinclude/drm
-ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
+ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
 	ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
 	ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
 	ttm_bo_manager.o ttm_page_alloc_dma.o
+ttm-$(CONFIG_AGP) += ttm_agp_backend.o
 
 obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 764be36..028ab60 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -34,7 +34,6 @@
 #include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_page_alloc.h>
-#ifdef TTM_HAS_AGP
 #include <drm/ttm/ttm_placement.h>
 #include <linux/agp_backend.h>
 #include <linux/module.h>
@@ -148,5 +147,3 @@
 	ttm_pool_unpopulate(ttm);
 }
 EXPORT_SYMBOL(ttm_agp_tt_unpopulate);
-
-#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e3daafa..39386f5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -164,7 +164,6 @@
 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
-	struct ttm_mem_type_manager *man;
 
 	lockdep_assert_held(&bo->resv->lock.base);
 
@@ -172,12 +171,11 @@
 
 		BUG_ON(!list_empty(&bo->lru));
 
-		man = &bdev->man[bo->mem.mem_type];
-		list_add_tail(&bo->lru, &man->lru);
+		list_add(&bo->lru, bdev->driver->lru_tail(bo));
 		kref_get(&bo->list_kref);
 
 		if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
-			list_add_tail(&bo->swap, &bo->glob->swap_lru);
+			list_add(&bo->swap, bdev->driver->swap_lru_tail(bo));
 			kref_get(&bo->list_kref);
 		}
 	}
@@ -186,8 +184,12 @@
 
 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 {
+	struct ttm_bo_device *bdev = bo->bdev;
 	int put_count = 0;
 
+	if (bdev->driver->lru_removal)
+		bdev->driver->lru_removal(bo);
+
 	if (!list_empty(&bo->swap)) {
 		list_del_init(&bo->swap);
 		++put_count;
@@ -197,11 +199,6 @@
 		++put_count;
 	}
 
-	/*
-	 * TODO: Add a driver hook to delete from
-	 * driver-specific LRU's here.
-	 */
-
 	return put_count;
 }
 
@@ -230,16 +227,32 @@
 
 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
 {
+	struct ttm_bo_device *bdev = bo->bdev;
 	int put_count = 0;
 
 	lockdep_assert_held(&bo->resv->lock.base);
 
+	if (bdev->driver->lru_removal)
+		bdev->driver->lru_removal(bo);
+
 	put_count = ttm_bo_del_from_lru(bo);
 	ttm_bo_list_ref_sub(bo, put_count, true);
 	ttm_bo_add_to_lru(bo);
 }
 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
 
+struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo)
+{
+	return bo->bdev->man[bo->mem.mem_type].lru.prev;
+}
+EXPORT_SYMBOL(ttm_bo_default_lru_tail);
+
+struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo)
+{
+	return bo->glob->swap_lru.prev;
+}
+EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail);
+
 /*
  * Call bo->mutex locked.
  */
@@ -443,10 +456,10 @@
 	int ret;
 
 	spin_lock(&glob->lru_lock);
-	ret = __ttm_bo_reserve(bo, false, true, false, NULL);
+	ret = __ttm_bo_reserve(bo, false, true, NULL);
 
 	if (!ret) {
-		if (!ttm_bo_wait(bo, false, false, true)) {
+		if (!ttm_bo_wait(bo, false, true)) {
 			put_count = ttm_bo_del_from_lru(bo);
 
 			spin_unlock(&glob->lru_lock);
@@ -499,7 +512,7 @@
 	int put_count;
 	int ret;
 
-	ret = ttm_bo_wait(bo, false, false, true);
+	ret = ttm_bo_wait(bo, false, true);
 
 	if (ret && !no_wait_gpu) {
 		long lret;
@@ -517,7 +530,7 @@
 			return -EBUSY;
 
 		spin_lock(&glob->lru_lock);
-		ret = __ttm_bo_reserve(bo, false, true, false, NULL);
+		ret = __ttm_bo_reserve(bo, false, true, NULL);
 
 		/*
 		 * We raced, and lost, someone else holds the reservation now,
@@ -536,7 +549,7 @@
 		 * remove sync_obj with ttm_bo_wait, the wait should be
 		 * finished, and no new wait object should have been added.
 		 */
-		ret = ttm_bo_wait(bo, false, false, true);
+		ret = ttm_bo_wait(bo, false, true);
 		WARN_ON(ret);
 	}
 
@@ -586,11 +599,10 @@
 			kref_get(&nentry->list_kref);
 		}
 
-		ret = __ttm_bo_reserve(entry, false, true, false, NULL);
+		ret = __ttm_bo_reserve(entry, false, true, NULL);
 		if (remove_all && ret) {
 			spin_unlock(&glob->lru_lock);
-			ret = __ttm_bo_reserve(entry, false, false,
-					       false, NULL);
+			ret = __ttm_bo_reserve(entry, false, false, NULL);
 			spin_lock(&glob->lru_lock);
 		}
 
@@ -676,7 +688,7 @@
 	struct ttm_placement placement;
 	int ret = 0;
 
-	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+	ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
 
 	if (unlikely(ret != 0)) {
 		if (ret != -ERESTARTSYS) {
@@ -732,7 +744,7 @@
 
 	spin_lock(&glob->lru_lock);
 	list_for_each_entry(bo, &man->lru, lru) {
-		ret = __ttm_bo_reserve(bo, false, true, false, NULL);
+		ret = __ttm_bo_reserve(bo, false, true, NULL);
 		if (!ret) {
 			if (place && (place->fpfn || place->lpfn)) {
 				/* Don't evict this BO if it's outside of the
@@ -989,13 +1001,19 @@
 	lockdep_assert_held(&bo->resv->lock.base);
 
 	/*
-	 * FIXME: It's possible to pipeline buffer moves.
-	 * Have the driver move function wait for idle when necessary,
-	 * instead of doing it here.
+	 * Don't wait for the BO on initial allocation. This is important when
+	 * the BO has an imported reservation object.
 	 */
-	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
-	if (ret)
-		return ret;
+	if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm != NULL) {
+		/*
+		 * FIXME: It's possible to pipeline buffer moves.
+		 * Have the driver move function wait for idle when necessary,
+		 * instead of doing it here.
+		 */
+		ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+		if (ret)
+			return ret;
+	}
 	mem.num_pages = bo->num_pages;
 	mem.size = mem.num_pages << PAGE_SHIFT;
 	mem.page_alignment = bo->mem.page_alignment;
@@ -1206,7 +1224,7 @@
 	size_t size = 0;
 
 	size += ttm_round_pot(struct_size);
-	size += PAGE_ALIGN(npages * sizeof(void *));
+	size += ttm_round_pot(npages * sizeof(void *));
 	size += ttm_round_pot(sizeof(struct ttm_tt));
 	return size;
 }
@@ -1220,8 +1238,7 @@
 	size_t size = 0;
 
 	size += ttm_round_pot(struct_size);
-	size += PAGE_ALIGN(npages * sizeof(void *));
-	size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
+	size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
 	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
 	return size;
 }
@@ -1500,7 +1517,6 @@
 	bdev->dev_mapping = mapping;
 	bdev->glob = glob;
 	bdev->need_dma32 = need_dma32;
-	bdev->val_seq = 0;
 	mutex_lock(&glob->device_list_mutex);
 	list_add_tail(&bdev->device_list, &glob->device_list);
 	mutex_unlock(&glob->device_list_mutex);
@@ -1554,7 +1570,7 @@
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
 int ttm_bo_wait(struct ttm_buffer_object *bo,
-		bool lazy, bool interruptible, bool no_wait)
+		bool interruptible, bool no_wait)
 {
 	struct reservation_object_list *fobj;
 	struct reservation_object *resv;
@@ -1609,10 +1625,10 @@
 	 * Using ttm_bo_reserve makes sure the lru lists are updated.
 	 */
 
-	ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
+	ret = ttm_bo_reserve(bo, true, no_wait, NULL);
 	if (unlikely(ret != 0))
 		return ret;
-	ret = ttm_bo_wait(bo, false, true, no_wait);
+	ret = ttm_bo_wait(bo, true, no_wait);
 	if (likely(ret == 0))
 		atomic_inc(&bo->cpu_writers);
 	ttm_bo_unreserve(bo);
@@ -1642,7 +1658,7 @@
 
 	spin_lock(&glob->lru_lock);
 	list_for_each_entry(bo, &glob->swap_lru, swap) {
-		ret = __ttm_bo_reserve(bo, false, true, false, NULL);
+		ret = __ttm_bo_reserve(bo, false, true, NULL);
 		if (!ret)
 			break;
 	}
@@ -1669,7 +1685,7 @@
 	 * Wait for GPU, then move to system cached.
 	 */
 
-	ret = ttm_bo_wait(bo, false, false, false);
+	ret = ttm_bo_wait(bo, false, false);
 
 	if (unlikely(ret != 0))
 		goto out;
@@ -1741,7 +1757,7 @@
 		return -ERESTARTSYS;
 	if (!ww_mutex_is_locked(&bo->resv->lock))
 		goto out_unlock;
-	ret = __ttm_bo_reserve(bo, true, false, false, NULL);
+	ret = __ttm_bo_reserve(bo, true, false, NULL);
 	if (unlikely(ret != 0))
 		goto out_unlock;
 	__ttm_bo_unreserve(bo);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ac6fe40..d983155 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -645,7 +645,7 @@
 
 	reservation_object_add_excl_fence(bo->resv, fence);
 	if (evict) {
-		ret = ttm_bo_wait(bo, false, false, false);
+		ret = ttm_bo_wait(bo, false, false);
 		if (ret)
 			return ret;
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 06d26dc..3216878 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -54,7 +54,7 @@
 	/*
 	 * Quick non-stalling check for idle.
 	 */
-	ret = ttm_bo_wait(bo, false, false, true);
+	ret = ttm_bo_wait(bo, false, true);
 	if (likely(ret == 0))
 		goto out_unlock;
 
@@ -68,14 +68,14 @@
 			goto out_unlock;
 
 		up_read(&vma->vm_mm->mmap_sem);
-		(void) ttm_bo_wait(bo, false, true, false);
+		(void) ttm_bo_wait(bo, true, false);
 		goto out_unlock;
 	}
 
 	/*
 	 * Ordinary wait.
 	 */
-	ret = ttm_bo_wait(bo, false, true, false);
+	ret = ttm_bo_wait(bo, true, false);
 	if (unlikely(ret != 0))
 		ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
 			VM_FAULT_NOPAGE;
@@ -108,7 +108,7 @@
 	 * for reserve, and if it fails, retry the fault after waiting
 	 * for the buffer to become unreserved.
 	 */
-	ret = ttm_bo_reserve(bo, true, true, false, NULL);
+	ret = ttm_bo_reserve(bo, true, true, NULL);
 	if (unlikely(ret != 0)) {
 		if (ret != -EBUSY)
 			return VM_FAULT_NOPAGE;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 3820ae9..a80717b 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -112,8 +112,7 @@
 	list_for_each_entry(entry, list, head) {
 		struct ttm_buffer_object *bo = entry->bo;
 
-		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
-				       ticket);
+		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
 		if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
 			__ttm_bo_unreserve(bo);
 
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 025c429..a37de5d 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -48,7 +48,7 @@
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_page_alloc.h>
 
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 #include <asm/agp.h>
 #endif
 
@@ -219,7 +219,7 @@
 #ifndef CONFIG_X86
 static int set_pages_array_wb(struct page **pages, int addrinarray)
 {
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 	int i;
 
 	for (i = 0; i < addrinarray; i++)
@@ -230,7 +230,7 @@
 
 static int set_pages_array_wc(struct page **pages, int addrinarray)
 {
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 	int i;
 
 	for (i = 0; i < addrinarray; i++)
@@ -241,7 +241,7 @@
 
 static int set_pages_array_uc(struct page **pages, int addrinarray)
 {
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 	int i;
 
 	for (i = 0; i < addrinarray; i++)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 624d941..bef9f6f 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -50,7 +50,7 @@
 #include <linux/kthread.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_page_alloc.h>
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 #include <asm/agp.h>
 #endif
 
@@ -271,7 +271,7 @@
 #ifndef CONFIG_X86
 static int set_pages_array_wb(struct page **pages, int addrinarray)
 {
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 	int i;
 
 	for (i = 0; i < addrinarray; i++)
@@ -282,7 +282,7 @@
 
 static int set_pages_array_wc(struct page **pages, int addrinarray)
 {
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 	int i;
 
 	for (i = 0; i < addrinarray; i++)
@@ -293,7 +293,7 @@
 
 static int set_pages_array_uc(struct page **pages, int addrinarray)
 {
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 	int i;
 
 	for (i = 0; i < addrinarray; i++)
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 772ec9e..c204089 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -94,7 +94,7 @@
 	struct drm_device *dev = usb_get_intfdata(interface);
 
 	drm_kms_helper_poll_disable(dev);
-	drm_connector_unplug_all(dev);
+	drm_connector_unregister_all(dev);
 	udl_fbdev_unplug(dev);
 	udl_drop_usb(dev);
 	drm_unplug_dev(dev);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 4a064ef..0b03d34 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -81,8 +81,6 @@
 	struct drm_framebuffer base;
 	struct udl_gem_object *obj;
 	bool active_16; /* active on the 16-bit channel */
-	int x1, y1, x2, y2; /* dirty rect */
-	spinlock_t dirty_lock;
 };
 
 #define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index fd1eb9d..d5df555 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -77,68 +77,6 @@
 }
 #endif
 
-/*
- * NOTE: fb_defio.c is holding info->fbdefio.mutex
- *   Touching ANY framebuffer memory that triggers a page fault
- *   in fb_defio will cause a deadlock, when it also tries to
- *   grab the same mutex.
- */
-static void udlfb_dpy_deferred_io(struct fb_info *info,
-				  struct list_head *pagelist)
-{
-	struct page *cur;
-	struct fb_deferred_io *fbdefio = info->fbdefio;
-	struct udl_fbdev *ufbdev = info->par;
-	struct drm_device *dev = ufbdev->ufb.base.dev;
-	struct udl_device *udl = dev->dev_private;
-	struct urb *urb;
-	char *cmd;
-	cycles_t start_cycles, end_cycles;
-	int bytes_sent = 0;
-	int bytes_identical = 0;
-	int bytes_rendered = 0;
-
-	if (!fb_defio)
-		return;
-
-	start_cycles = get_cycles();
-
-	urb = udl_get_urb(dev);
-	if (!urb)
-		return;
-
-	cmd = urb->transfer_buffer;
-
-	/* walk the written page list and render each to device */
-	list_for_each_entry(cur, &fbdefio->pagelist, lru) {
-
-		if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
-				     &urb, (char *) info->fix.smem_start,
-				     &cmd, cur->index << PAGE_SHIFT,
-				     cur->index << PAGE_SHIFT,
-				     PAGE_SIZE, &bytes_identical, &bytes_sent))
-			goto error;
-		bytes_rendered += PAGE_SIZE;
-	}
-
-	if (cmd > (char *) urb->transfer_buffer) {
-		/* Send partial buffer remaining before exiting */
-		int len = cmd - (char *) urb->transfer_buffer;
-		udl_submit_urb(dev, urb, len);
-		bytes_sent += len;
-	} else
-		udl_urb_completion(urb);
-
-error:
-	atomic_add(bytes_sent, &udl->bytes_sent);
-	atomic_add(bytes_identical, &udl->bytes_identical);
-	atomic_add(bytes_rendered, &udl->bytes_rendered);
-	end_cycles = get_cycles();
-	atomic_add(((unsigned int) ((end_cycles - start_cycles)
-		    >> 10)), /* Kcycles */
-		   &udl->cpu_kcycles_used);
-}
-
 int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
 		      int width, int height)
 {
@@ -152,9 +90,6 @@
 	struct urb *urb;
 	int aligned_x;
 	int bpp = (fb->base.bits_per_pixel / 8);
-	int x2, y2;
-	bool store_for_later = false;
-	unsigned long flags;
 
 	if (!fb->active_16)
 		return 0;
@@ -180,38 +115,6 @@
 	    (y + height > fb->base.height))
 		return -EINVAL;
 
-	/* if we are in atomic just store the info
-	   can't test inside spin lock */
-	if (in_atomic())
-		store_for_later = true;
-
-	x2 = x + width - 1;
-	y2 = y + height - 1;
-
-	spin_lock_irqsave(&fb->dirty_lock, flags);
-
-	if (fb->y1 < y)
-		y = fb->y1;
-	if (fb->y2 > y2)
-		y2 = fb->y2;
-	if (fb->x1 < x)
-		x = fb->x1;
-	if (fb->x2 > x2)
-		x2 = fb->x2;
-
-	if (store_for_later) {
-		fb->x1 = x;
-		fb->x2 = x2;
-		fb->y1 = y;
-		fb->y2 = y2;
-		spin_unlock_irqrestore(&fb->dirty_lock, flags);
-		return 0;
-	}
-
-	fb->x1 = fb->y1 = INT_MAX;
-	fb->x2 = fb->y2 = 0;
-
-	spin_unlock_irqrestore(&fb->dirty_lock, flags);
 	start_cycles = get_cycles();
 
 	urb = udl_get_urb(dev);
@@ -219,14 +122,14 @@
 		return 0;
 	cmd = urb->transfer_buffer;
 
-	for (i = y; i <= y2 ; i++) {
+	for (i = y; i < height ; i++) {
 		const int line_offset = fb->base.pitches[0] * i;
 		const int byte_offset = line_offset + (x * bpp);
 		const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
 		if (udl_render_hline(dev, bpp, &urb,
 				     (char *) fb->obj->vmapping,
 				     &cmd, byte_offset, dev_byte_offset,
-				     (x2 - x + 1) * bpp,
+				     width * bpp,
 				     &bytes_identical, &bytes_sent))
 			goto error;
 	}
@@ -283,36 +186,6 @@
 	return 0;
 }
 
-static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
-{
-	struct udl_fbdev *ufbdev = info->par;
-
-	drm_fb_helper_sys_fillrect(info, rect);
-
-	udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
-			  rect->height);
-}
-
-static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
-{
-	struct udl_fbdev *ufbdev = info->par;
-
-	drm_fb_helper_sys_copyarea(info, region);
-
-	udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
-			  region->height);
-}
-
-static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
-{
-	struct udl_fbdev *ufbdev = info->par;
-
-	drm_fb_helper_sys_imageblit(info, image);
-
-	udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
-			  image->height);
-}
-
 /*
  * It's common for several clients to have framebuffer open simultaneously.
  * e.g. both fbcon and X. Makes things interesting.
@@ -339,7 +212,7 @@
 
 		if (fbdefio) {
 			fbdefio->delay = DL_DEFIO_WRITE_DELAY;
-			fbdefio->deferred_io = udlfb_dpy_deferred_io;
+			fbdefio->deferred_io = drm_fb_helper_deferred_io;
 		}
 
 		info->fbdefio = fbdefio;
@@ -379,9 +252,9 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = udl_fb_fillrect,
-	.fb_copyarea = udl_fb_copyarea,
-	.fb_imageblit = udl_fb_imageblit,
+	.fb_fillrect = drm_fb_helper_sys_fillrect,
+	.fb_copyarea = drm_fb_helper_sys_copyarea,
+	.fb_imageblit = drm_fb_helper_sys_imageblit,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -458,7 +331,6 @@
 {
 	int ret;
 
-	spin_lock_init(&ufb->dirty_lock);
 	ufb->obj = obj;
 	drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
 	ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
@@ -628,7 +500,7 @@
 	int ret;
 	uint32_t size;
 
-	obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]);
+	obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
 	if (obj == NULL)
 		return ERR_PTR(-ENOENT);
 
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index d7528e0..818e707 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -217,7 +217,7 @@
 	int ret = 0;
 
 	mutex_lock(&dev->struct_mutex);
-	obj = drm_gem_object_lookup(dev, file, handle);
+	obj = drm_gem_object_lookup(file, handle);
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 5848104..e53df59 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -5,6 +5,7 @@
 	select DRM_KMS_HELPER
 	select DRM_KMS_CMA_HELPER
 	select DRM_GEM_CMA_HELPER
+	select DRM_PANEL
 	help
 	  Choose this option if you have a system that has a Broadcom
 	  VC4 GPU, such as the Raspberry Pi or other BCM2708/BCM2835.
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index 4c6a99f..fb77db7 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -7,6 +7,7 @@
 	vc4_bo.o \
 	vc4_crtc.o \
 	vc4_drv.o \
+	vc4_dpi.o \
 	vc4_kms.o \
 	vc4_gem.o \
 	vc4_hdmi.o \
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 9807bc9..e5a9d3a 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -457,7 +457,7 @@
 	struct drm_vc4_mmap_bo *args = data;
 	struct drm_gem_object *gem_obj;
 
-	gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 	if (!gem_obj) {
 		DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
 		return -EINVAL;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 355ee4b..0f18b76 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -49,6 +49,10 @@
 	/* Which HVS channel we're using for our CRTC. */
 	int channel;
 
+	u8 lut_r[256];
+	u8 lut_g[256];
+	u8 lut_b[256];
+
 	struct drm_pending_vblank_event *event;
 };
 
@@ -147,6 +151,46 @@
 	drm_crtc_cleanup(crtc);
 }
 
+static void
+vc4_crtc_lut_load(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct vc4_dev *vc4 = to_vc4_dev(dev);
+	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+	u32 i;
+
+	/* The LUT memory is laid out with each HVS channel in order,
+	 * each of which takes 256 writes for R, 256 for G, then 256
+	 * for B.
+	 */
+	HVS_WRITE(SCALER_GAMADDR,
+		  SCALER_GAMADDR_AUTOINC |
+		  (vc4_crtc->channel * 3 * crtc->gamma_size));
+
+	for (i = 0; i < crtc->gamma_size; i++)
+		HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_r[i]);
+	for (i = 0; i < crtc->gamma_size; i++)
+		HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_g[i]);
+	for (i = 0; i < crtc->gamma_size; i++)
+		HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
+}
+
+static void
+vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+		   uint32_t start, uint32_t size)
+{
+	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+	u32 i;
+
+	for (i = start; i < start + size; i++) {
+		vc4_crtc->lut_r[i] = r[i] >> 8;
+		vc4_crtc->lut_g[i] = g[i] >> 8;
+		vc4_crtc->lut_b[i] = b[i] >> 8;
+	}
+
+	vc4_crtc_lut_load(crtc);
+}
+
 static u32 vc4_get_fifo_full_level(u32 format)
 {
 	static const u32 fifo_len_bytes = 64;
@@ -260,8 +304,14 @@
 
 	HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel),
 		  SCALER_DISPBKGND_AUTOHS |
+		  SCALER_DISPBKGND_GAMMA |
 		  (interlace ? SCALER_DISPBKGND_INTERLACE : 0));
 
+	/* Reload the LUT, since the SRAMs would have been disabled if
+	 * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once.
+	 */
+	vc4_crtc_lut_load(crtc);
+
 	if (debug_dump_regs) {
 		DRM_INFO("CRTC %d regs after:\n", drm_crtc_index(crtc));
 		vc4_crtc_dump_regs(vc4_crtc);
@@ -406,14 +456,6 @@
 
 	WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
 
-	HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
-		  vc4_state->mm.start);
-
-	if (debug_dump_regs) {
-		DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
-		vc4_hvs_dump_state(dev);
-	}
-
 	if (crtc->state->event) {
 		unsigned long flags;
 
@@ -423,8 +465,20 @@
 
 		spin_lock_irqsave(&dev->event_lock, flags);
 		vc4_crtc->event = crtc->state->event;
-		spin_unlock_irqrestore(&dev->event_lock, flags);
 		crtc->state->event = NULL;
+
+		HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+			  vc4_state->mm.start);
+
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	} else {
+		HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+			  vc4_state->mm.start);
+	}
+
+	if (debug_dump_regs) {
+		DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
+		vc4_hvs_dump_state(dev);
 	}
 }
 
@@ -450,12 +504,17 @@
 {
 	struct drm_crtc *crtc = &vc4_crtc->base;
 	struct drm_device *dev = crtc->dev;
+	struct vc4_dev *vc4 = to_vc4_dev(dev);
+	struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+	u32 chan = vc4_crtc->channel;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev->event_lock, flags);
-	if (vc4_crtc->event) {
+	if (vc4_crtc->event &&
+	    (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) {
 		drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
 		vc4_crtc->event = NULL;
+		drm_crtc_vblank_put(crtc);
 	}
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 }
@@ -506,6 +565,7 @@
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 	}
 
+	drm_crtc_vblank_put(crtc);
 	drm_framebuffer_unreference(flip_state->fb);
 	kfree(flip_state);
 
@@ -548,6 +608,8 @@
 		return ret;
 	}
 
+	WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
 	/* Immediately update the plane's legacy fb pointer, so that later
 	 * modeset prep sees the state that will be present when the semaphore
 	 * is released.
@@ -600,7 +662,7 @@
 
 	}
 
-	__drm_atomic_helper_crtc_destroy_state(crtc, state);
+	__drm_atomic_helper_crtc_destroy_state(state);
 }
 
 static const struct drm_crtc_funcs vc4_crtc_funcs = {
@@ -613,6 +675,7 @@
 	.reset = drm_atomic_helper_crtc_reset,
 	.atomic_duplicate_state = vc4_crtc_duplicate_state,
 	.atomic_destroy_state = vc4_crtc_destroy_state,
+	.gamma_set = vc4_crtc_gamma_set,
 };
 
 static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
@@ -711,6 +774,7 @@
 	primary_plane->crtc = crtc;
 	vc4->crtc[drm_crtc_index(crtc)] = vc4_crtc;
 	vc4_crtc->channel = vc4_crtc->data->hvs_channel;
+	drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
 
 	/* Set up some arbitrary number of planes.  We're not limited
 	 * by a set number of physical registers, just the space in
@@ -751,6 +815,12 @@
 
 	vc4_set_crtc_possible_masks(drm, crtc);
 
+	for (i = 0; i < crtc->gamma_size; i++) {
+		vc4_crtc->lut_r[i] = i;
+		vc4_crtc->lut_g[i] = i;
+		vc4_crtc->lut_b[i] = i;
+	}
+
 	platform_set_drvdata(pdev, vc4_crtc);
 
 	return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index d76ad10..245115d 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -17,6 +17,7 @@
 
 static const struct drm_info_list vc4_debugfs_list[] = {
 	{"bo_stats", vc4_bo_stats_debugfs, 0},
+	{"dpi_regs", vc4_dpi_debugfs_regs, 0},
 	{"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
 	{"hvs_regs", vc4_hvs_debugfs_regs, 0},
 	{"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
new file mode 100644
index 0000000..9817dbf
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -0,0 +1,520 @@
+/*
+ * Copyright (C) 2016 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**
+ * DOC: VC4 DPI module
+ *
+ * The VC4 DPI hardware supports MIPI DPI type 4 and Nokia ViSSI
+ * signals, which are routed out to GPIO0-27 with the ALT2 function.
+ */
+
+#include "drm_atomic_helper.h"
+#include "drm_crtc_helper.h"
+#include "drm_edid.h"
+#include "drm_panel.h"
+#include "linux/clk.h"
+#include "linux/component.h"
+#include "linux/of_graph.h"
+#include "linux/of_platform.h"
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+#define DPI_C			0x00
+# define DPI_OUTPUT_ENABLE_MODE		BIT(16)
+
+/* The order field takes the incoming 24 bit RGB from the pixel valve
+ * and shuffles the 3 channels.
+ */
+# define DPI_ORDER_MASK			VC4_MASK(15, 14)
+# define DPI_ORDER_SHIFT		14
+# define DPI_ORDER_RGB			0
+# define DPI_ORDER_BGR			1
+# define DPI_ORDER_GRB			2
+# define DPI_ORDER_BRG			3
+
+/* The format field takes the ORDER-shuffled pixel valve data and
+ * formats it onto the output lines.
+ */
+# define DPI_FORMAT_MASK		VC4_MASK(13, 11)
+# define DPI_FORMAT_SHIFT		11
+/* This define is named in the hardware, but actually just outputs 0. */
+# define DPI_FORMAT_9BIT_666_RGB	0
+/* Outputs 00000000rrrrrggggggbbbbb */
+# define DPI_FORMAT_16BIT_565_RGB_1	1
+/* Outputs 000rrrrr00gggggg000bbbbb */
+# define DPI_FORMAT_16BIT_565_RGB_2	2
+/* Outputs 00rrrrr000gggggg00bbbbb0 */
+# define DPI_FORMAT_16BIT_565_RGB_3	3
+/* Outputs 000000rrrrrrggggggbbbbbb */
+# define DPI_FORMAT_18BIT_666_RGB_1	4
+/* Outputs 00rrrrrr00gggggg00bbbbbb */
+# define DPI_FORMAT_18BIT_666_RGB_2	5
+/* Outputs rrrrrrrrggggggggbbbbbbbb */
+# define DPI_FORMAT_24BIT_888_RGB	6
+
+/* Reverses the polarity of the corresponding signal */
+# define DPI_PIXEL_CLK_INVERT		BIT(10)
+# define DPI_HSYNC_INVERT		BIT(9)
+# define DPI_VSYNC_INVERT		BIT(8)
+# define DPI_OUTPUT_ENABLE_INVERT	BIT(7)
+
+/* Outputs the signal the falling clock edge instead of rising. */
+# define DPI_HSYNC_NEGATE		BIT(6)
+# define DPI_VSYNC_NEGATE		BIT(5)
+# define DPI_OUTPUT_ENABLE_NEGATE	BIT(4)
+
+/* Disables the signal */
+# define DPI_HSYNC_DISABLE		BIT(3)
+# define DPI_VSYNC_DISABLE		BIT(2)
+# define DPI_OUTPUT_ENABLE_DISABLE	BIT(1)
+
+/* Power gate to the device, full reset at 0 -> 1 transition */
+# define DPI_ENABLE			BIT(0)
+
+/* All other registers besides DPI_C return the ID */
+#define DPI_ID			0x04
+# define DPI_ID_VALUE		0x00647069
+
+/* General DPI hardware state. */
+struct vc4_dpi {
+	struct platform_device *pdev;
+
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	struct drm_panel *panel;
+
+	void __iomem *regs;
+
+	struct clk *pixel_clock;
+	struct clk *core_clock;
+};
+
+#define DPI_READ(offset) readl(dpi->regs + (offset))
+#define DPI_WRITE(offset, val) writel(val, dpi->regs + (offset))
+
+/* VC4 DPI encoder KMS struct */
+struct vc4_dpi_encoder {
+	struct vc4_encoder base;
+	struct vc4_dpi *dpi;
+};
+
+static inline struct vc4_dpi_encoder *
+to_vc4_dpi_encoder(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct vc4_dpi_encoder, base.base);
+}
+
+/* VC4 DPI connector KMS struct */
+struct vc4_dpi_connector {
+	struct drm_connector base;
+	struct vc4_dpi *dpi;
+
+	/* Since the connector is attached to just the one encoder,
+	 * this is the reference to it so we can do the best_encoder()
+	 * hook.
+	 */
+	struct drm_encoder *encoder;
+};
+
+static inline struct vc4_dpi_connector *
+to_vc4_dpi_connector(struct drm_connector *connector)
+{
+	return container_of(connector, struct vc4_dpi_connector, base);
+}
+
+#define DPI_REG(reg) { reg, #reg }
+static const struct {
+	u32 reg;
+	const char *name;
+} dpi_regs[] = {
+	DPI_REG(DPI_C),
+	DPI_REG(DPI_ID),
+};
+
+static void vc4_dpi_dump_regs(struct vc4_dpi *dpi)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dpi_regs); i++) {
+		DRM_INFO("0x%04x (%s): 0x%08x\n",
+			 dpi_regs[i].reg, dpi_regs[i].name,
+			 DPI_READ(dpi_regs[i].reg));
+	}
+}
+
+#ifdef CONFIG_DEBUG_FS
+int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct vc4_dev *vc4 = to_vc4_dev(dev);
+	struct vc4_dpi *dpi = vc4->dpi;
+	int i;
+
+	if (!dpi)
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(dpi_regs); i++) {
+		seq_printf(m, "%s (0x%04x): 0x%08x\n",
+			   dpi_regs[i].name, dpi_regs[i].reg,
+			   DPI_READ(dpi_regs[i].reg));
+	}
+
+	return 0;
+}
+#endif
+
+static enum drm_connector_status
+vc4_dpi_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct vc4_dpi_connector *vc4_connector =
+		to_vc4_dpi_connector(connector);
+	struct vc4_dpi *dpi = vc4_connector->dpi;
+
+	if (dpi->panel)
+		return connector_status_connected;
+	else
+		return connector_status_disconnected;
+}
+
+static void vc4_dpi_connector_destroy(struct drm_connector *connector)
+{
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+}
+
+static int vc4_dpi_connector_get_modes(struct drm_connector *connector)
+{
+	struct vc4_dpi_connector *vc4_connector =
+		to_vc4_dpi_connector(connector);
+	struct vc4_dpi *dpi = vc4_connector->dpi;
+
+	if (dpi->panel)
+		return drm_panel_get_modes(dpi->panel);
+
+	return 0;
+}
+
+static struct drm_encoder *
+vc4_dpi_connector_best_encoder(struct drm_connector *connector)
+{
+	struct vc4_dpi_connector *dpi_connector =
+		to_vc4_dpi_connector(connector);
+	return dpi_connector->encoder;
+}
+
+static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.detect = vc4_dpi_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = vc4_dpi_connector_destroy,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = {
+	.get_modes = vc4_dpi_connector_get_modes,
+	.best_encoder = vc4_dpi_connector_best_encoder,
+};
+
+static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
+						    struct vc4_dpi *dpi)
+{
+	struct drm_connector *connector = NULL;
+	struct vc4_dpi_connector *dpi_connector;
+	int ret = 0;
+
+	dpi_connector = devm_kzalloc(dev->dev, sizeof(*dpi_connector),
+				     GFP_KERNEL);
+	if (!dpi_connector) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	connector = &dpi_connector->base;
+
+	dpi_connector->encoder = dpi->encoder;
+	dpi_connector->dpi = dpi;
+
+	drm_connector_init(dev, connector, &vc4_dpi_connector_funcs,
+			   DRM_MODE_CONNECTOR_DPI);
+	drm_connector_helper_add(connector, &vc4_dpi_connector_helper_funcs);
+
+	connector->polled = 0;
+	connector->interlace_allowed = 0;
+	connector->doublescan_allowed = 0;
+
+	drm_mode_connector_attach_encoder(connector, dpi->encoder);
+
+	return connector;
+
+ fail:
+	if (connector)
+		vc4_dpi_connector_destroy(connector);
+
+	return ERR_PTR(ret);
+}
+
+static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
+{
+	struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
+	struct vc4_dpi *dpi = vc4_encoder->dpi;
+
+	drm_panel_disable(dpi->panel);
+
+	clk_disable_unprepare(dpi->pixel_clock);
+
+	drm_panel_unprepare(dpi->panel);
+}
+
+static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
+{
+	struct drm_display_mode *mode = &encoder->crtc->mode;
+	struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
+	struct vc4_dpi *dpi = vc4_encoder->dpi;
+	u32 dpi_c = DPI_ENABLE | DPI_OUTPUT_ENABLE_MODE;
+	int ret;
+
+	ret = drm_panel_prepare(dpi->panel);
+	if (ret) {
+		DRM_ERROR("Panel failed to prepare\n");
+		return;
+	}
+
+	if (dpi->connector->display_info.num_bus_formats) {
+		u32 bus_format = dpi->connector->display_info.bus_formats[0];
+
+		switch (bus_format) {
+		case MEDIA_BUS_FMT_RGB888_1X24:
+			dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
+					       DPI_FORMAT);
+			break;
+		case MEDIA_BUS_FMT_BGR888_1X24:
+			dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
+					       DPI_FORMAT);
+			dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR, DPI_ORDER);
+			break;
+		case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+			dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_2,
+					       DPI_FORMAT);
+			break;
+		case MEDIA_BUS_FMT_RGB666_1X18:
+			dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_1,
+					       DPI_FORMAT);
+			break;
+		case MEDIA_BUS_FMT_RGB565_1X16:
+			dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_3,
+					       DPI_FORMAT);
+			break;
+		default:
+			DRM_ERROR("Unknown media bus format %d\n", bus_format);
+			break;
+		}
+	}
+
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		dpi_c |= DPI_HSYNC_INVERT;
+	else if (!(mode->flags & DRM_MODE_FLAG_PHSYNC))
+		dpi_c |= DPI_HSYNC_DISABLE;
+
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		dpi_c |= DPI_VSYNC_INVERT;
+	else if (!(mode->flags & DRM_MODE_FLAG_PVSYNC))
+		dpi_c |= DPI_VSYNC_DISABLE;
+
+	DPI_WRITE(DPI_C, dpi_c);
+
+	ret = clk_set_rate(dpi->pixel_clock, mode->clock * 1000);
+	if (ret)
+		DRM_ERROR("Failed to set clock rate: %d\n", ret);
+
+	ret = clk_prepare_enable(dpi->pixel_clock);
+	if (ret)
+		DRM_ERROR("Failed to set clock rate: %d\n", ret);
+
+	ret = drm_panel_enable(dpi->panel);
+	if (ret) {
+		DRM_ERROR("Panel failed to enable\n");
+		drm_panel_unprepare(dpi->panel);
+		return;
+	}
+}
+
+static const struct drm_encoder_helper_funcs vc4_dpi_encoder_helper_funcs = {
+	.disable = vc4_dpi_encoder_disable,
+	.enable = vc4_dpi_encoder_enable,
+};
+
+static const struct of_device_id vc4_dpi_dt_match[] = {
+	{ .compatible = "brcm,bcm2835-dpi", .data = NULL },
+	{}
+};
+
+/* Walks the OF graph to find the panel node and then asks DRM to look
+ * up the panel.
+ */
+static struct drm_panel *vc4_dpi_get_panel(struct device *dev)
+{
+	struct device_node *endpoint, *panel_node;
+	struct device_node *np = dev->of_node;
+	struct drm_panel *panel;
+
+	endpoint = of_graph_get_next_endpoint(np, NULL);
+	if (!endpoint) {
+		dev_err(dev, "no endpoint to fetch DPI panel\n");
+		return NULL;
+	}
+
+	/* don't proceed if we have an endpoint but no panel_node tied to it */
+	panel_node = of_graph_get_remote_port_parent(endpoint);
+	of_node_put(endpoint);
+	if (!panel_node) {
+		dev_err(dev, "no valid panel node\n");
+		return NULL;
+	}
+
+	panel = of_drm_find_panel(panel_node);
+	of_node_put(panel_node);
+
+	return panel;
+}
+
+static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct drm_device *drm = dev_get_drvdata(master);
+	struct vc4_dev *vc4 = to_vc4_dev(drm);
+	struct vc4_dpi *dpi;
+	struct vc4_dpi_encoder *vc4_dpi_encoder;
+	int ret;
+
+	dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
+	if (!dpi)
+		return -ENOMEM;
+
+	vc4_dpi_encoder = devm_kzalloc(dev, sizeof(*vc4_dpi_encoder),
+				       GFP_KERNEL);
+	if (!vc4_dpi_encoder)
+		return -ENOMEM;
+	vc4_dpi_encoder->base.type = VC4_ENCODER_TYPE_DPI;
+	vc4_dpi_encoder->dpi = dpi;
+	dpi->encoder = &vc4_dpi_encoder->base.base;
+
+	dpi->pdev = pdev;
+	dpi->regs = vc4_ioremap_regs(pdev, 0);
+	if (IS_ERR(dpi->regs))
+		return PTR_ERR(dpi->regs);
+
+	vc4_dpi_dump_regs(dpi);
+
+	if (DPI_READ(DPI_ID) != DPI_ID_VALUE) {
+		dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
+			DPI_READ(DPI_ID), DPI_ID_VALUE);
+		return -ENODEV;
+	}
+
+	dpi->core_clock = devm_clk_get(dev, "core");
+	if (IS_ERR(dpi->core_clock)) {
+		ret = PTR_ERR(dpi->core_clock);
+		if (ret != -EPROBE_DEFER)
+			DRM_ERROR("Failed to get core clock: %d\n", ret);
+		return ret;
+	}
+	dpi->pixel_clock = devm_clk_get(dev, "pixel");
+	if (IS_ERR(dpi->pixel_clock)) {
+		ret = PTR_ERR(dpi->pixel_clock);
+		if (ret != -EPROBE_DEFER)
+			DRM_ERROR("Failed to get pixel clock: %d\n", ret);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(dpi->core_clock);
+	if (ret)
+		DRM_ERROR("Failed to turn on core clock: %d\n", ret);
+
+	dpi->panel = vc4_dpi_get_panel(dev);
+
+	drm_encoder_init(drm, dpi->encoder, &vc4_dpi_encoder_funcs,
+			 DRM_MODE_ENCODER_DPI, NULL);
+	drm_encoder_helper_add(dpi->encoder, &vc4_dpi_encoder_helper_funcs);
+
+	dpi->connector = vc4_dpi_connector_init(drm, dpi);
+	if (IS_ERR(dpi->connector)) {
+		ret = PTR_ERR(dpi->connector);
+		goto err_destroy_encoder;
+	}
+
+	if (dpi->panel)
+		drm_panel_attach(dpi->panel, dpi->connector);
+
+	dev_set_drvdata(dev, dpi);
+
+	vc4->dpi = dpi;
+
+	return 0;
+
+err_destroy_encoder:
+	drm_encoder_cleanup(dpi->encoder);
+	clk_disable_unprepare(dpi->core_clock);
+	return ret;
+}
+
+static void vc4_dpi_unbind(struct device *dev, struct device *master,
+			   void *data)
+{
+	struct drm_device *drm = dev_get_drvdata(master);
+	struct vc4_dev *vc4 = to_vc4_dev(drm);
+	struct vc4_dpi *dpi = dev_get_drvdata(dev);
+
+	if (dpi->panel)
+		drm_panel_detach(dpi->panel);
+
+	vc4_dpi_connector_destroy(dpi->connector);
+	drm_encoder_cleanup(dpi->encoder);
+
+	clk_disable_unprepare(dpi->core_clock);
+
+	vc4->dpi = NULL;
+}
+
+static const struct component_ops vc4_dpi_ops = {
+	.bind   = vc4_dpi_bind,
+	.unbind = vc4_dpi_unbind,
+};
+
+static int vc4_dpi_dev_probe(struct platform_device *pdev)
+{
+	return component_add(&pdev->dev, &vc4_dpi_ops);
+}
+
+static int vc4_dpi_dev_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &vc4_dpi_ops);
+	return 0;
+}
+
+struct platform_driver vc4_dpi_driver = {
+	.probe = vc4_dpi_dev_probe,
+	.remove = vc4_dpi_dev_remove,
+	.driver = {
+		.name = "vc4_dpi",
+		.of_match_table = vc4_dpi_dt_match,
+	},
+};
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 109b106..250ed7e 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -66,12 +66,12 @@
 };
 
 static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
-	DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
-	DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
-	DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
-	DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
-	DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
-	DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
 			  DRM_ROOT_ONLY),
 };
@@ -81,6 +81,7 @@
 			    DRIVER_ATOMIC |
 			    DRIVER_GEM |
 			    DRIVER_HAVE_IRQ |
+			    DRIVER_RENDER |
 			    DRIVER_PRIME),
 	.lastclose = vc4_lastclose,
 	.irq_handler = vc4_irq,
@@ -90,7 +91,7 @@
 
 	.enable_vblank = vc4_enable_vblank,
 	.disable_vblank = vc4_disable_vblank,
-	.get_vblank_counter = drm_vblank_count,
+	.get_vblank_counter = drm_vblank_no_hw_counter,
 
 #if defined(CONFIG_DEBUG_FS)
 	.debugfs_init = vc4_debugfs_init,
@@ -257,6 +258,7 @@
 
 static struct platform_driver *const component_drivers[] = {
 	&vc4_hdmi_driver,
+	&vc4_dpi_driver,
 	&vc4_crtc_driver,
 	&vc4_hvs_driver,
 	&vc4_v3d_driver,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index fa2ad15..37cac59 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -16,6 +16,7 @@
 	struct vc4_hvs *hvs;
 	struct vc4_crtc *crtc[3];
 	struct vc4_v3d *v3d;
+	struct vc4_dpi *dpi;
 
 	struct drm_fbdev_cma *fbdev;
 
@@ -422,6 +423,10 @@
 /* vc4_drv.c */
 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
 
+/* vc4_dpi.c */
+extern struct platform_driver vc4_dpi_driver;
+int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
+
 /* vc4_gem.c */
 void vc4_gem_init(struct drm_device *dev);
 void vc4_gem_destroy(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 8d4384f..46899d6 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -822,7 +822,7 @@
 	if (args->pad != 0)
 		return -EINVAL;
 
-	gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 	if (!gem_obj) {
 		DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
 		return -EINVAL;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index d8b8649..fd2644d 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -573,7 +573,7 @@
 err_unprepare_pix:
 	clk_disable_unprepare(hdmi->pixel_clock);
 err_put_i2c:
-	put_device(&vc4->hdmi->ddc->dev);
+	put_device(&hdmi->ddc->dev);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 4718ae5..861a623 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -93,7 +93,7 @@
  * vc4_atomic_commit - commit validated state object
  * @dev: DRM device
  * @state: the driver state object
- * @async: asynchronous commit
+ * @nonblock: nonblocking commit
  *
  * This function commits a with drm_atomic_helper_check() pre-validated state
  * object. This can still fail when e.g. the framebuffer reservation fails. For
@@ -104,7 +104,7 @@
  */
 static int vc4_atomic_commit(struct drm_device *dev,
 			     struct drm_atomic_state *state,
-			     bool async)
+			     bool nonblock)
 {
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	int ret;
@@ -117,10 +117,18 @@
 		return -ENOMEM;
 
 	/* Make sure that any outstanding modesets have finished. */
-	ret = down_interruptible(&vc4->async_modeset);
-	if (ret) {
-		kfree(c);
-		return ret;
+	if (nonblock) {
+		ret = down_trylock(&vc4->async_modeset);
+		if (ret) {
+			kfree(c);
+			return -EBUSY;
+		}
+	} else {
+		ret = down_interruptible(&vc4->async_modeset);
+		if (ret) {
+			kfree(c);
+			return ret;
+		}
 	}
 
 	ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -170,7 +178,7 @@
 	 * current layout.
 	 */
 
-	if (async) {
+	if (nonblock) {
 		vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
 				   vc4_atomic_complete_commit_seqno_cb);
 	} else {
@@ -207,8 +215,6 @@
 	dev->mode_config.preferred_depth = 24;
 	dev->mode_config.async_page_flip = true;
 
-	dev->vblank_disable_allowed = true;
-
 	drm_mode_config_reset(dev);
 
 	vc4->fbdev = drm_fbdev_cma_init(dev, 32,
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 7b0c72a..4037b52 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -208,7 +208,7 @@
 	}
 
 	kfree(vc4_state->dlist);
-	__drm_atomic_helper_plane_destroy_state(plane, &vc4_state->base);
+	__drm_atomic_helper_plane_destroy_state(&vc4_state->base);
 	kfree(state);
 }
 
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index bf42a8e..f99eece 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -341,6 +341,10 @@
 #define SCALER_DISPLACT0                        0x00000030
 #define SCALER_DISPLACT1                        0x00000034
 #define SCALER_DISPLACT2                        0x00000038
+#define SCALER_DISPLACTX(x)			(SCALER_DISPLACT0 +	\
+						 (x) * (SCALER_DISPLACT1 - \
+							SCALER_DISPLACT0))
+
 #define SCALER_DISPCTRL0                        0x00000040
 # define SCALER_DISPCTRLX_ENABLE		BIT(31)
 # define SCALER_DISPCTRLX_RESET			BIT(30)
@@ -390,6 +394,12 @@
 #define SCALER_DISPBASE2                        0x0000006c
 #define SCALER_DISPALPHA2                       0x00000070
 #define SCALER_GAMADDR                          0x00000078
+# define SCALER_GAMADDR_AUTOINC			BIT(31)
+/* Enables all gamma ramp SRAMs, not just those of CRTCs with gamma
+ * enabled.
+ */
+# define SCALER_GAMADDR_SRAMENB			BIT(30)
+
 #define SCALER_GAMDATA                          0x000000e0
 #define SCALER_DLIST_START                      0x00002000
 #define SCALER_DLIST_SIZE                       0x00004000
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index c503a84..341f9be 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -89,7 +89,6 @@
 static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct drm_vgem_gem_object *obj = vma->vm_private_data;
-	struct drm_device *dev = obj->base.dev;
 	loff_t num_pages;
 	pgoff_t page_offset;
 	int ret;
@@ -103,12 +102,8 @@
 	if (page_offset > num_pages)
 		return VM_FAULT_SIGBUS;
 
-	mutex_lock(&dev->struct_mutex);
-
 	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
 			     obj->pages[page_offset]);
-
-	mutex_unlock(&dev->struct_mutex);
 	switch (ret) {
 	case 0:
 		return VM_FAULT_NOPAGE;
@@ -154,6 +149,10 @@
 	if (err)
 		goto out;
 
+	err = vgem_gem_get_pages(obj);
+	if (err)
+		goto out;
+
 	err = drm_gem_handle_create(file, gem_object, handle);
 	if (err)
 		goto handle_out;
@@ -201,37 +200,23 @@
 	int ret = 0;
 	struct drm_gem_object *obj;
 
-	mutex_lock(&dev->struct_mutex);
-	obj = drm_gem_object_lookup(dev, file, handle);
-	if (!obj) {
-		ret = -ENOENT;
-		goto unlock;
-	}
+	obj = drm_gem_object_lookup(file, handle);
+	if (!obj)
+		return -ENOENT;
 
-	if (!drm_vma_node_has_offset(&obj->vma_node)) {
-		ret = drm_gem_create_mmap_offset(obj);
-		if (ret)
-			goto unref;
-	}
+	ret = drm_gem_create_mmap_offset(obj);
+	if (ret)
+		goto unref;
 
 	BUG_ON(!obj->filp);
 
 	obj->filp->private_data = obj;
 
-	ret = vgem_gem_get_pages(to_vgem_bo(obj));
-	if (ret)
-		goto fail_get_pages;
-
 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
 
-	goto unref;
-
-fail_get_pages:
-	drm_gem_free_mmap_offset(obj);
 unref:
-	drm_gem_object_unreference(obj);
-unlock:
-	mutex_unlock(&dev->struct_mutex);
+	drm_gem_object_unreference_unlocked(obj);
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 5fd1fd0..d4305da 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -38,13 +38,6 @@
 #define XRES_MAX  8192
 #define YRES_MAX  8192
 
-static void virtio_gpu_crtc_gamma_set(struct drm_crtc *crtc,
-				      u16 *red, u16 *green, u16 *blue,
-				      uint32_t start, uint32_t size)
-{
-	/* TODO */
-}
-
 static void
 virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev,
 		       struct virtio_gpu_output *output)
@@ -75,7 +68,7 @@
 	}
 
 	/* lookup the cursor */
-	gobj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+	gobj = drm_gem_object_lookup(file_priv, handle);
 	if (gobj == NULL)
 		return -ENOENT;
 
@@ -173,7 +166,6 @@
 static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
 	.cursor_set2            = virtio_gpu_crtc_cursor_set,
 	.cursor_move            = virtio_gpu_crtc_cursor_move,
-	.gamma_set              = virtio_gpu_crtc_gamma_set,
 	.set_config             = drm_atomic_helper_set_config,
 	.destroy                = drm_crtc_cleanup,
 
@@ -428,7 +420,6 @@
 		return PTR_ERR(plane);
 	drm_crtc_init_with_planes(dev, crtc, plane, NULL,
 				  &virtio_gpu_crtc_funcs, NULL);
-	drm_mode_crtc_set_gamma_size(crtc, 256);
 	drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
 	plane->crtc = crtc;
 
@@ -456,7 +447,7 @@
 	int ret;
 
 	/* lookup object associated with res handle */
-	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+	obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
 	if (!obj)
 		return ERR_PTR(-EINVAL);
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 7f898cf..3cc7afa 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -42,10 +42,8 @@
 
 static int virtio_gpu_probe(struct virtio_device *vdev)
 {
-#ifdef CONFIG_VGA_CONSOLE
 	if (vgacon_text_force() && virtio_gpu_modeset == -1)
 		return -EINVAL;
-#endif
 
 	if (virtio_gpu_modeset == 0)
 		return -EINVAL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 8f486f4..0a54f43 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -400,7 +400,7 @@
 {
 	int r;
 
-	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS) {
 			struct virtio_gpu_device *qdev =
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 1feb7ce..336a57f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -130,7 +130,7 @@
 	struct drm_gem_object *gobj;
 	struct virtio_gpu_object *obj;
 	BUG_ON(!offset_p);
-	gobj = drm_gem_object_lookup(dev, file_priv, handle);
+	gobj = drm_gem_object_lookup(file_priv, handle);
 	if (gobj == NULL)
 		return -ENOENT;
 	obj = gem_to_virtio_gpu_obj(gobj);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index b4de18e..c046903 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -133,8 +133,7 @@
 		}
 
 		for (i = 0; i < exbuf->num_bo_handles; i++) {
-			gobj = drm_gem_object_lookup(dev,
-						     drm_file, bo_handles[i]);
+			gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
 			if (!gobj) {
 				drm_free_large(bo_handles);
 				drm_free_large(buflist);
@@ -345,7 +344,7 @@
 	struct drm_gem_object *gobj = NULL;
 	struct virtio_gpu_object *qobj = NULL;
 
-	gobj = drm_gem_object_lookup(dev, file_priv, ri->bo_handle);
+	gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
 	if (gobj == NULL)
 		return -ENOENT;
 
@@ -374,7 +373,7 @@
 	if (vgdev->has_virgl_3d == false)
 		return -ENOSYS;
 
-	gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
+	gobj = drm_gem_object_lookup(file, args->bo_handle);
 	if (gobj == NULL)
 		return -ENOENT;
 
@@ -418,7 +417,7 @@
 	int ret;
 	u32 offset = args->offset;
 
-	gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
+	gobj = drm_gem_object_lookup(file, args->bo_handle);
 	if (gobj == NULL)
 		return -ENOENT;
 
@@ -464,7 +463,7 @@
 	int ret;
 	bool nowait = false;
 
-	gobj = drm_gem_object_lookup(dev, file, args->handle);
+	gobj = drm_gem_object_lookup(file, args->handle);
 	if (gobj == NULL)
 		return -ENOENT;
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index f300eba..1483dae 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -155,10 +155,10 @@
 {
 	int r;
 
-	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
 	if (unlikely(r != 0))
 		return r;
-	r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+	r = ttm_bo_wait(&bo->tbo, true, no_wait);
 	ttm_bo_unreserve(&bo->tbo);
 	return r;
 }
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 9fd924c..a058081 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -426,6 +426,8 @@
 	.io_mem_free = &virtio_gpu_ttm_io_mem_free,
 	.move_notify = &virtio_gpu_bo_move_notify,
 	.swap_notify = &virtio_gpu_bo_swap_notify,
+	.lru_tail = &ttm_bo_default_lru_tail,
+	.swap_lru_tail = &ttm_bo_default_swap_lru_tail,
 };
 
 int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index d281575..473d004 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -8,6 +8,6 @@
 	    vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
 	    vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
 	    vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
-	    vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o
+	    vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 3329f62..78b75ee 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -839,7 +839,7 @@
  */
 static void vmw_swap_notify(struct ttm_buffer_object *bo)
 {
-	ttm_bo_wait(bo, false, false, false);
+	ttm_bo_wait(bo, false, false);
 }
 
 
@@ -857,4 +857,6 @@
 	.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
 	.io_mem_reserve = &vmw_ttm_io_mem_reserve,
 	.io_mem_free = &vmw_ttm_io_mem_free,
+	.lru_tail = &ttm_bo_default_lru_tail,
+	.swap_lru_tail = &ttm_bo_default_swap_lru_tail,
 };
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 092ea81..265c81e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -421,9 +421,9 @@
 	}
 
 	bo = &buf->base;
-	WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL));
+	WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
 
-	ret = ttm_bo_wait(old_bo, false, false, false);
+	ret = ttm_bo_wait(old_bo, false, false);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed waiting for cotable unbind.\n");
 		goto out_wait;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 299925a..9b078a4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -56,7 +56,7 @@
 
 	vmw_execbuf_release_pinned_bo(dev_priv);
 
-	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
+	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 	if (unlikely(ret != 0))
 		goto err;
 
@@ -98,7 +98,7 @@
 
 	vmw_execbuf_release_pinned_bo(dev_priv);
 
-	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
+	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 	if (unlikely(ret != 0))
 		goto err;
 
@@ -174,7 +174,7 @@
 		return ret;
 
 	vmw_execbuf_release_pinned_bo(dev_priv);
-	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
+	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 	if (unlikely(ret != 0))
 		goto err_unlock;
 
@@ -225,7 +225,7 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
+	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 	if (unlikely(ret != 0))
 		goto err;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 6cbb7d4..9fcd820 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2016 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -44,6 +44,12 @@
 #define VMW_MIN_INITIAL_WIDTH 800
 #define VMW_MIN_INITIAL_HEIGHT 600
 
+#ifndef VMWGFX_GIT_VERSION
+#define VMWGFX_GIT_VERSION "Unknown"
+#endif
+
+#define VMWGFX_REPO "In Tree"
+
 
 /**
  * Fully encoded drm commands. Might move to vmw_drm.h
@@ -326,7 +332,7 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
+	ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
 	BUG_ON(ret != 0);
 	vmw_bo_pin_reserved(vbo, true);
 
@@ -613,6 +619,7 @@
 	uint32_t svga_id;
 	enum vmw_res_type i;
 	bool refuse_dma = false;
+	char host_log[100] = {0};
 
 	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 	if (unlikely(dev_priv == NULL)) {
@@ -628,6 +635,7 @@
 	mutex_init(&dev_priv->cmdbuf_mutex);
 	mutex_init(&dev_priv->release_mutex);
 	mutex_init(&dev_priv->binding_mutex);
+	mutex_init(&dev_priv->global_kms_state_mutex);
 	rwlock_init(&dev_priv->resource_lock);
 	ttm_lock_init(&dev_priv->reservation_sem);
 	spin_lock_init(&dev_priv->hw_lock);
@@ -873,6 +881,16 @@
 
 	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
 
+	snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
+		VMWGFX_REPO, VMWGFX_GIT_VERSION);
+	vmw_host_log(host_log);
+
+	memset(host_log, 0, sizeof(host_log));
+	snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
+		VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
+		VMWGFX_DRIVER_PATCHLEVEL);
+	vmw_host_log(host_log);
+
 	if (dev_priv->enable_fb) {
 		vmw_fifo_resource_inc(dev_priv);
 		vmw_svga_enable(dev_priv);
@@ -1530,10 +1548,8 @@
 {
 	int ret;
 
-#ifdef CONFIG_VGA_CONSOLE
 	if (vgacon_text_force())
 		return -EINVAL;
-#endif
 
 	ret = drm_pci_init(&driver, &vmw_pci_driver);
 	if (ret)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 019a6ca..1980e2a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -412,6 +412,7 @@
 	struct drm_property *implicit_placement_property;
 	unsigned num_implicit;
 	struct vmw_framebuffer *implicit_fb;
+	struct mutex global_kms_state_mutex;
 
 	/*
 	 * Context and surface management.
@@ -1234,4 +1235,10 @@
 {
 	WRITE_ONCE(*addr, value);
 }
+
+/**
+ * Add vmw_msg module function
+ */
+extern int vmw_host_log(const char *log);
+
 #endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 4742ec4..55231cc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -98,7 +98,7 @@
 	kmap_offset = 0;
 	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
-	ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
+	ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("reserve failed\n");
 		return -EINVAL;
@@ -318,7 +318,7 @@
 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
 	kmap_num = (64*64*4) >> PAGE_SHIFT;
 
-	ret = ttm_bo_reserve(bo, true, false, false, NULL);
+	ret = ttm_bo_reserve(bo, true, false, NULL);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("reserve failed\n");
 		return;
@@ -1859,7 +1859,7 @@
 	struct ttm_buffer_object *bo = &buf->base;
 	int ret;
 
-	ttm_bo_reserve(bo, false, false, interruptible, NULL);
+	ttm_bo_reserve(bo, false, false, NULL);
 	ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
 					 validate_as_mob);
 	if (ret)
@@ -2143,13 +2143,13 @@
 void vmw_kms_del_active(struct vmw_private *dev_priv,
 			struct vmw_display_unit *du)
 {
-	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
-
+	mutex_lock(&dev_priv->global_kms_state_mutex);
 	if (du->active_implicit) {
 		if (--(dev_priv->num_implicit) == 0)
 			dev_priv->implicit_fb = NULL;
 		du->active_implicit = false;
 	}
+	mutex_unlock(&dev_priv->global_kms_state_mutex);
 }
 
 /**
@@ -2165,8 +2165,7 @@
 			struct vmw_display_unit *du,
 			struct vmw_framebuffer *vfb)
 {
-	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
-
+	mutex_lock(&dev_priv->global_kms_state_mutex);
 	WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb);
 
 	if (!du->active_implicit && du->is_implicit) {
@@ -2174,6 +2173,7 @@
 		du->active_implicit = true;
 		dev_priv->num_implicit++;
 	}
+	mutex_unlock(&dev_priv->global_kms_state_mutex);
 }
 
 /**
@@ -2190,16 +2190,13 @@
 			    struct drm_crtc *crtc)
 {
 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+	bool ret;
 
-	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
+	mutex_lock(&dev_priv->global_kms_state_mutex);
+	ret = !du->is_implicit || dev_priv->num_implicit == 1;
+	mutex_unlock(&dev_priv->global_kms_state_mutex);
 
-	if (!du->is_implicit)
-		return true;
-
-	if (dev_priv->num_implicit != 1)
-		return false;
-
-	return true;
+	return ret;
 }
 
 /**
@@ -2214,16 +2211,18 @@
 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 	struct vmw_framebuffer *vfb;
 
-	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
+	mutex_lock(&dev_priv->global_kms_state_mutex);
 
 	if (!du->is_implicit)
-		return;
+		goto out_unlock;
 
 	vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
 	WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
 		     dev_priv->implicit_fb != vfb);
 
 	dev_priv->implicit_fb = vfb;
+out_unlock:
+	mutex_unlock(&dev_priv->global_kms_state_mutex);
 }
 
 /**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 23db160..b6126a5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -222,7 +222,7 @@
 	if (bo) {
 		int ret;
 
-		ret = ttm_bo_reserve(bo, false, true, false, NULL);
+		ret = ttm_bo_reserve(bo, false, true, NULL);
 		BUG_ON(ret != 0);
 
 		vmw_fence_single_bo(bo, NULL);
@@ -262,7 +262,7 @@
 	if (unlikely(ret != 0))
 		goto out_no_bo;
 
-	ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
+	ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
 	BUG_ON(ret != 0);
 	ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
 	if (unlikely(ret != 0))
@@ -357,7 +357,7 @@
 			vmw_takedown_otable_base(dev_priv, i,
 						 &batch->otables[i]);
 
-	ret = ttm_bo_reserve(bo, false, true, false, NULL);
+	ret = ttm_bo_reserve(bo, false, true, NULL);
 	BUG_ON(ret != 0);
 
 	vmw_fence_single_bo(bo, NULL);
@@ -440,7 +440,7 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL);
+	ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
 
 	BUG_ON(ret != 0);
 	ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
@@ -545,7 +545,7 @@
 	const struct vmw_sg_table *vsgt;
 	int ret;
 
-	ret = ttm_bo_reserve(bo, false, true, false, NULL);
+	ret = ttm_bo_reserve(bo, false, true, NULL);
 	BUG_ON(ret != 0);
 
 	vsgt = vmw_bo_sg_table(bo);
@@ -595,7 +595,7 @@
 	struct ttm_buffer_object *bo = mob->pt_bo;
 
 	if (bo) {
-		ret = ttm_bo_reserve(bo, false, true, false, NULL);
+		ret = ttm_bo_reserve(bo, false, true, NULL);
 		/*
 		 * Noone else should be using this buffer.
 		 */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
new file mode 100644
index 0000000..6de283c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright © 2016 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <asm/hypervisor.h>
+#include "drmP.h"
+#include "vmwgfx_msg.h"
+
+
+#define MESSAGE_STATUS_SUCCESS  0x0001
+#define MESSAGE_STATUS_DORECV   0x0002
+#define MESSAGE_STATUS_CPT      0x0010
+#define MESSAGE_STATUS_HB       0x0080
+
+#define RPCI_PROTOCOL_NUM       0x49435052
+#define GUESTMSG_FLAG_COOKIE    0x80000000
+
+#define RETRIES                 3
+
+#define VMW_HYPERVISOR_MAGIC    0x564D5868
+#define VMW_HYPERVISOR_PORT     0x5658
+#define VMW_HYPERVISOR_HB_PORT  0x5659
+
+#define VMW_PORT_CMD_MSG        30
+#define VMW_PORT_CMD_HB_MSG     0
+#define VMW_PORT_CMD_OPEN_CHANNEL  (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG)
+#define VMW_PORT_CMD_CLOSE_CHANNEL (MSG_TYPE_CLOSE << 16 | VMW_PORT_CMD_MSG)
+#define VMW_PORT_CMD_SENDSIZE   (MSG_TYPE_SENDSIZE << 16 | VMW_PORT_CMD_MSG)
+#define VMW_PORT_CMD_RECVSIZE   (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG)
+#define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG)
+
+#define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16)
+
+static u32 vmw_msg_enabled = 1;
+
+enum rpc_msg_type {
+	MSG_TYPE_OPEN,
+	MSG_TYPE_SENDSIZE,
+	MSG_TYPE_SENDPAYLOAD,
+	MSG_TYPE_RECVSIZE,
+	MSG_TYPE_RECVPAYLOAD,
+	MSG_TYPE_RECVSTATUS,
+	MSG_TYPE_CLOSE,
+};
+
+struct rpc_channel {
+	u16 channel_id;
+	u32 cookie_high;
+	u32 cookie_low;
+};
+
+
+
+/**
+ * vmw_open_channel
+ *
+ * @channel: RPC channel
+ * @protocol:
+ *
+ * Returns: 0 on success
+ */
+static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
+{
+	unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
+
+	VMW_PORT(VMW_PORT_CMD_OPEN_CHANNEL,
+		(protocol | GUESTMSG_FLAG_COOKIE), si, di,
+		VMW_HYPERVISOR_PORT,
+		VMW_HYPERVISOR_MAGIC,
+		eax, ebx, ecx, edx, si, di);
+
+	if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
+		return -EINVAL;
+
+	channel->channel_id  = HIGH_WORD(edx);
+	channel->cookie_high = si;
+	channel->cookie_low  = di;
+
+	return 0;
+}
+
+
+
+/**
+ * vmw_close_channel
+ *
+ * @channel: RPC channel
+ *
+ * Returns: 0 on success
+ */
+static int vmw_close_channel(struct rpc_channel *channel)
+{
+	unsigned long eax, ebx, ecx, edx, si, di;
+
+	/* Set up additional parameters */
+	si  = channel->cookie_high;
+	di  = channel->cookie_low;
+
+	VMW_PORT(VMW_PORT_CMD_CLOSE_CHANNEL,
+		0, si, di,
+		(VMW_HYPERVISOR_PORT | (channel->channel_id << 16)),
+		VMW_HYPERVISOR_MAGIC,
+		eax, ebx, ecx, edx, si, di);
+
+	if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+
+
+/**
+ * vmw_send_msg: Sends a message to the host
+ *
+ * @channel: RPC channel
+ * @logmsg: NULL terminated string
+ *
+ * Returns: 0 on success
+ */
+static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
+{
+	unsigned long eax, ebx, ecx, edx, si, di, bp;
+	size_t msg_len = strlen(msg);
+	int retries = 0;
+
+
+	while (retries < RETRIES) {
+		retries++;
+
+		/* Set up additional parameters */
+		si  = channel->cookie_high;
+		di  = channel->cookie_low;
+
+		VMW_PORT(VMW_PORT_CMD_SENDSIZE,
+			msg_len, si, di,
+			VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
+			VMW_HYPERVISOR_MAGIC,
+			eax, ebx, ecx, edx, si, di);
+
+		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
+		    (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
+			/* Expected success + high-bandwidth. Give up. */
+			return -EINVAL;
+		}
+
+		/* Send msg */
+		si  = (uintptr_t) msg;
+		di  = channel->cookie_low;
+		bp  = channel->cookie_high;
+
+		VMW_PORT_HB_OUT(
+			(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
+			msg_len, si, di,
+			VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
+			VMW_HYPERVISOR_MAGIC, bp,
+			eax, ebx, ecx, edx, si, di);
+
+		if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) {
+			return 0;
+		} else if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
+			/* A checkpoint occurred. Retry. */
+			continue;
+		} else {
+			break;
+		}
+	}
+
+	return -EINVAL;
+}
+
+
+
+/**
+ * vmw_recv_msg: Receives a message from the host
+ *
+ * Note:  It is the caller's responsibility to call kfree() on msg.
+ *
+ * @channel:  channel opened by vmw_open_channel
+ * @msg:  [OUT] message received from the host
+ * @msg_len: message length
+ */
+static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
+			size_t *msg_len)
+{
+	unsigned long eax, ebx, ecx, edx, si, di, bp;
+	char *reply;
+	size_t reply_len;
+	int retries = 0;
+
+
+	*msg_len = 0;
+	*msg = NULL;
+
+	while (retries < RETRIES) {
+		retries++;
+
+		/* Set up additional parameters */
+		si  = channel->cookie_high;
+		di  = channel->cookie_low;
+
+		VMW_PORT(VMW_PORT_CMD_RECVSIZE,
+			0, si, di,
+			(VMW_HYPERVISOR_PORT | (channel->channel_id << 16)),
+			VMW_HYPERVISOR_MAGIC,
+			eax, ebx, ecx, edx, si, di);
+
+		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
+		    (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
+			DRM_ERROR("Failed to get reply size\n");
+			return -EINVAL;
+		}
+
+		/* No reply available.  This is okay. */
+		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_DORECV) == 0)
+			return 0;
+
+		reply_len = ebx;
+		reply     = kzalloc(reply_len + 1, GFP_KERNEL);
+		if (reply == NULL) {
+			DRM_ERROR("Cannot allocate memory for reply\n");
+			return -ENOMEM;
+		}
+
+
+		/* Receive buffer */
+		si  = channel->cookie_high;
+		di  = (uintptr_t) reply;
+		bp  = channel->cookie_low;
+
+		VMW_PORT_HB_IN(
+			(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
+			reply_len, si, di,
+			VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
+			VMW_HYPERVISOR_MAGIC, bp,
+			eax, ebx, ecx, edx, si, di);
+
+		if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
+			kfree(reply);
+
+			if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
+				/* A checkpoint occurred. Retry. */
+				continue;
+			}
+
+			return -EINVAL;
+		}
+
+		reply[reply_len] = '\0';
+
+
+		/* Ack buffer */
+		si  = channel->cookie_high;
+		di  = channel->cookie_low;
+
+		VMW_PORT(VMW_PORT_CMD_RECVSTATUS,
+			MESSAGE_STATUS_SUCCESS, si, di,
+			(VMW_HYPERVISOR_PORT | (channel->channel_id << 16)),
+			VMW_HYPERVISOR_MAGIC,
+			eax, ebx, ecx, edx, si, di);
+
+		if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
+			kfree(reply);
+
+			if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
+				/* A checkpoint occurred. Retry. */
+				continue;
+			}
+
+			return -EINVAL;
+		}
+
+		break;
+	}
+
+	*msg_len = reply_len;
+	*msg     = reply;
+
+	return 0;
+}
+
+
+/**
+ * vmw_host_get_guestinfo: Gets a GuestInfo parameter
+ *
+ * Gets the value of a  GuestInfo.* parameter.  The value returned will be in
+ * a string, and it is up to the caller to post-process.
+ *
+ * @guest_info_param:  Parameter to get, e.g. GuestInfo.svga.gl3
+ * @buffer: if NULL, *reply_len will contain reply size.
+ * @length: size of the reply_buf.  Set to size of reply upon return
+ *
+ * Returns: 0 on success
+ */
+int vmw_host_get_guestinfo(const char *guest_info_param,
+			   char *buffer, size_t *length)
+{
+	struct rpc_channel channel;
+	char *msg, *reply = NULL;
+	size_t msg_len, reply_len = 0;
+	int ret = 0;
+
+
+	if (!vmw_msg_enabled)
+		return -ENODEV;
+
+	if (!guest_info_param || !length)
+		return -EINVAL;
+
+	msg_len = strlen(guest_info_param) + strlen("info-get ") + 1;
+	msg = kzalloc(msg_len, GFP_KERNEL);
+	if (msg == NULL) {
+		DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
+		return -ENOMEM;
+	}
+
+	sprintf(msg, "info-get %s", guest_info_param);
+
+	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
+	    vmw_send_msg(&channel, msg) ||
+	    vmw_recv_msg(&channel, (void *) &reply, &reply_len) ||
+	    vmw_close_channel(&channel)) {
+		DRM_ERROR("Failed to get %s", guest_info_param);
+
+		ret = -EINVAL;
+	}
+
+	if (buffer && reply && reply_len > 0) {
+		/* Remove reply code, which are the first 2 characters of
+		 * the reply
+		 */
+		reply_len = max(reply_len - 2, (size_t) 0);
+		reply_len = min(reply_len, *length);
+
+		if (reply_len > 0)
+			memcpy(buffer, reply + 2, reply_len);
+	}
+
+	*length = reply_len;
+
+	kfree(reply);
+	kfree(msg);
+
+	return ret;
+}
+
+
+
+/**
+ * vmw_host_log: Sends a log message to the host
+ *
+ * @log: NULL terminated string
+ *
+ * Returns: 0 on success
+ */
+int vmw_host_log(const char *log)
+{
+	struct rpc_channel channel;
+	char *msg;
+	int msg_len;
+	int ret = 0;
+
+
+	if (!vmw_msg_enabled)
+		return -ENODEV;
+
+	if (!log)
+		return ret;
+
+	msg_len = strlen(log) + strlen("log ") + 1;
+	msg = kzalloc(msg_len, GFP_KERNEL);
+	if (msg == NULL) {
+		DRM_ERROR("Cannot allocate memory for log message\n");
+		return -ENOMEM;
+	}
+
+	sprintf(msg, "log %s", log);
+
+	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
+	    vmw_send_msg(&channel, msg) ||
+	    vmw_close_channel(&channel)) {
+		DRM_ERROR("Failed to send log\n");
+
+		ret = -EINVAL;
+	}
+
+	kfree(msg);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
new file mode 100644
index 0000000..557a033
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2016, VMware, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * Based on code from vmware.c and vmmouse.c.
+ * Author:
+ *   Sinclair Yeh <syeh@vmware.com>
+ */
+#ifndef _VMWGFX_MSG_H
+#define _VMWGFX_MSG_H
+
+
+/**
+ * Hypervisor-specific bi-directional communication channel.  Should never
+ * execute on bare metal hardware.  The caller must make sure to check for
+ * supported hypervisor before using these macros.
+ *
+ * The last two parameters are both input and output and must be initialized.
+ *
+ * @cmd: [IN] Message Cmd
+ * @in_ebx: [IN] Message Len, through EBX
+ * @in_si: [IN] Input argument through SI, set to 0 if not used
+ * @in_di: [IN] Input argument through DI, set ot 0 if not used
+ * @port_num: [IN] port number + [channel id]
+ * @magic: [IN] hypervisor magic value
+ * @eax: [OUT] value of EAX register
+ * @ebx: [OUT] e.g. status from an HB message status command
+ * @ecx: [OUT] e.g. status from a non-HB message status command
+ * @edx: [OUT] e.g. channel id
+ * @si:  [OUT]
+ * @di:  [OUT]
+ */
+#define VMW_PORT(cmd, in_ebx, in_si, in_di,	\
+		 port_num, magic,		\
+		 eax, ebx, ecx, edx, si, di)	\
+({						\
+	asm volatile ("inl %%dx, %%eax;" :	\
+		"=a"(eax),			\
+		"=b"(ebx),			\
+		"=c"(ecx),			\
+		"=d"(edx),			\
+		"=S"(si),			\
+		"=D"(di) :			\
+		"a"(magic),			\
+		"b"(in_ebx),			\
+		"c"(cmd),			\
+		"d"(port_num),			\
+		"S"(in_si),			\
+		"D"(in_di) :			\
+		"memory");			\
+})
+
+
+/**
+ * Hypervisor-specific bi-directional communication channel.  Should never
+ * execute on bare metal hardware.  The caller must make sure to check for
+ * supported hypervisor before using these macros.
+ *
+ * The last 3 parameters are both input and output and must be initialized.
+ *
+ * @cmd: [IN] Message Cmd
+ * @in_ecx: [IN] Message Len, through ECX
+ * @in_si: [IN] Input argument through SI, set to 0 if not used
+ * @in_di: [IN] Input argument through DI, set to 0 if not used
+ * @port_num: [IN] port number + [channel id]
+ * @magic: [IN] hypervisor magic value
+ * @bp:  [IN]
+ * @eax: [OUT] value of EAX register
+ * @ebx: [OUT] e.g. status from an HB message status command
+ * @ecx: [OUT] e.g. status from a non-HB message status command
+ * @edx: [OUT] e.g. channel id
+ * @si:  [OUT]
+ * @di:  [OUT]
+ */
+#ifdef __x86_64__
+
+#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di,	\
+			port_num, magic, bp,		\
+			eax, ebx, ecx, edx, si, di)	\
+({							\
+	asm volatile ("push %%rbp;"			\
+		"mov %12, %%rbp;"			\
+		"rep outsb;"				\
+		"pop %%rbp;" :				\
+		"=a"(eax),				\
+		"=b"(ebx),				\
+		"=c"(ecx),				\
+		"=d"(edx),				\
+		"=S"(si),				\
+		"=D"(di) :				\
+		"a"(magic),				\
+		"b"(cmd),				\
+		"c"(in_ecx),				\
+		"d"(port_num),				\
+		"S"(in_si),				\
+		"D"(in_di),				\
+		"r"(bp) :				\
+		"memory", "cc");			\
+})
+
+
+#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di,	\
+		       port_num, magic, bp,		\
+		       eax, ebx, ecx, edx, si, di)	\
+({							\
+	asm volatile ("push %%rbp;"			\
+		"mov %12, %%rbp;"			\
+		"rep insb;"				\
+		"pop %%rbp" :				\
+		"=a"(eax),				\
+		"=b"(ebx),				\
+		"=c"(ecx),				\
+		"=d"(edx),				\
+		"=S"(si),				\
+		"=D"(di) :				\
+		"a"(magic),				\
+		"b"(cmd),				\
+		"c"(in_ecx),				\
+		"d"(port_num),				\
+		"S"(in_si),				\
+		"D"(in_di),				\
+		"r"(bp) :				\
+		"memory", "cc");			\
+})
+
+#else
+
+/* In the 32-bit version of this macro, we use "m" because there is no
+ * more register left for bp
+ */
+#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di,	\
+			port_num, magic, bp,		\
+			eax, ebx, ecx, edx, si, di)	\
+({							\
+	asm volatile ("push %%ebp;"			\
+		"mov %12, %%ebp;"			\
+		"rep outsb;"				\
+		"pop %%ebp;" :				\
+		"=a"(eax),				\
+		"=b"(ebx),				\
+		"=c"(ecx),				\
+		"=d"(edx),				\
+		"=S"(si),				\
+		"=D"(di) :				\
+		"a"(magic),				\
+		"b"(cmd),				\
+		"c"(in_ecx),				\
+		"d"(port_num),				\
+		"S"(in_si),				\
+		"D"(in_di),				\
+		"m"(bp) :				\
+		"memory", "cc");			\
+})
+
+
+#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di,	\
+		       port_num, magic, bp,		\
+		       eax, ebx, ecx, edx, si, di)	\
+({							\
+	asm volatile ("push %%ebp;"			\
+		"mov %12, %%ebp;"			\
+		"rep insb;"				\
+		"pop %%ebp" :				\
+		"=a"(eax),				\
+		"=b"(ebx),				\
+		"=c"(ecx),				\
+		"=d"(edx),				\
+		"=S"(si),				\
+		"=D"(di) :				\
+		"a"(magic),				\
+		"b"(cmd),				\
+		"c"(in_ecx),				\
+		"d"(port_num),				\
+		"S"(in_si),				\
+		"D"(in_di),				\
+		"m"(bp) :				\
+		"memory", "cc");			\
+})
+#endif /* #if __x86_64__ */
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index e57667c..6a328d5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -129,7 +129,7 @@
 	if (res->backup) {
 		struct ttm_buffer_object *bo = &res->backup->base;
 
-		ttm_bo_reserve(bo, false, false, false, NULL);
+		ttm_bo_reserve(bo, false, false, NULL);
 		if (!list_empty(&res->mob_head) &&
 		    res->func->unbind != NULL) {
 			struct ttm_validate_buffer val_buf;
@@ -1512,7 +1512,7 @@
 			list_del_init(&res->mob_head);
 		}
 
-		(void) ttm_bo_wait(bo, false, false, false);
+		(void) ttm_bo_wait(bo, false, false);
 	}
 }
 
@@ -1605,7 +1605,7 @@
 		if (fence != NULL)
 			vmw_fence_obj_unreference(&fence);
 
-		(void) ttm_bo_wait(bo, false, false, false);
+		(void) ttm_bo_wait(bo, false, false);
 	} else
 		mutex_unlock(&dev_priv->binding_mutex);
 
@@ -1717,8 +1717,7 @@
 		if (res->backup) {
 			vbo = res->backup;
 
-			ttm_bo_reserve(&vbo->base, interruptible, false, false,
-				       NULL);
+			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
 			if (!vbo->pin_count) {
 				ret = ttm_bo_validate
 					(&vbo->base,
@@ -1773,7 +1772,7 @@
 	if (--res->pin_count == 0 && res->backup) {
 		struct vmw_dma_buffer *vbo = res->backup;
 
-		ttm_bo_reserve(&vbo->base, false, false, false, NULL);
+		ttm_bo_reserve(&vbo->base, false, false, NULL);
 		vmw_bo_pin_reserved(vbo, false);
 		ttm_bo_unreserve(&vbo->base);
 	}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 0ea22fd..b74eae2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -285,14 +285,17 @@
 	}
 
 	/* Only one active implicit frame-buffer at a time. */
+	mutex_lock(&dev_priv->global_kms_state_mutex);
 	if (sou->base.is_implicit &&
 	    dev_priv->implicit_fb && vfb &&
 	    !(dev_priv->num_implicit == 1 &&
 	      sou->base.active_implicit) &&
 	    dev_priv->implicit_fb != vfb) {
+		mutex_unlock(&dev_priv->global_kms_state_mutex);
 		DRM_ERROR("Multiple implicit framebuffers not supported.\n");
 		return -EINVAL;
 	}
+	mutex_unlock(&dev_priv->global_kms_state_mutex);
 
 	/* since they always map one to one these are safe */
 	connector = &sou->base.connector;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index fd47547..92f8b1d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -988,7 +988,7 @@
 	if (unlikely(ret != 0))
 		goto out;
 
-	ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
+	ret = ttm_bo_reserve(&buf->base, false, true, NULL);
 	if (unlikely(ret != 0))
 		goto no_reserve;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index b949102..9ca818f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -553,12 +553,15 @@
 	}
 
 	/* Only one active implicit frame-buffer at a time. */
+	mutex_lock(&dev_priv->global_kms_state_mutex);
 	if (!turning_off && stdu->base.is_implicit && dev_priv->implicit_fb &&
 	    !(dev_priv->num_implicit == 1 && stdu->base.active_implicit)
 	    && dev_priv->implicit_fb != vfb) {
+		mutex_unlock(&dev_priv->global_kms_state_mutex);
 		DRM_ERROR("Multiple implicit framebuffers not supported.\n");
 		return -EINVAL;
 	}
+	mutex_unlock(&dev_priv->global_kms_state_mutex);
 
 	/* Since they always map one to one these are safe */
 	connector = &stdu->base.connector;
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index 498b37e..e1e31e9 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -85,7 +85,7 @@
 	err = devm_request_irq(host->dev, host->intr_syncpt_irq,
 			       syncpt_thresh_isr, IRQF_SHARED,
 			       "host1x_syncpt", host);
-	if (IS_ERR_VALUE(err)) {
+	if (err < 0) {
 		WARN_ON(1);
 		return err;
 	}
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index abb98c7..99dcacf 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -997,7 +997,7 @@
 };
 
 /* These must be in the order of the corresponding device tree port nodes */
-static const struct ipu_platform_reg client_reg[] = {
+static struct ipu_platform_reg client_reg[] = {
 	{
 		.pdata = {
 			.csi = 0,
@@ -1048,7 +1048,7 @@
 	mutex_unlock(&ipu_client_id_mutex);
 
 	for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
-		const struct ipu_platform_reg *reg = &client_reg[i];
+		struct ipu_platform_reg *reg = &client_reg[i];
 		struct platform_device *pdev;
 		struct device_node *of_node;
 
@@ -1070,6 +1070,7 @@
 
 		pdev->dev.parent = dev;
 
+		reg->pdata.of_node = of_node;
 		ret = platform_device_add_data(pdev, &reg->pdata,
 					       sizeof(reg->pdata));
 		if (!ret)
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 952fe69..24e395c 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -58,7 +58,7 @@
  */
 static int apd = -1;
 module_param(apd, bint, 0);
-MODULE_PARM_DESC(init, "Set to zero to disable anti-parallel diode mode");
+MODULE_PARM_DESC(apd, "Set to zero to disable anti-parallel diode mode");
 
 struct temperature {
 	s8	degrees;
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 0addc84..69166ab 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -77,7 +77,6 @@
 struct lm75_data {
 	struct i2c_client	*client;
 	struct device		*hwmon_dev;
-	struct thermal_zone_device	*tz;
 	struct mutex		update_lock;
 	u8			orig_conf;
 	u8			resolution;	/* In bits, between 9 and 12 */
@@ -306,11 +305,9 @@
 	if (IS_ERR(data->hwmon_dev))
 		return PTR_ERR(data->hwmon_dev);
 
-	data->tz = thermal_zone_of_sensor_register(data->hwmon_dev, 0,
-						   data->hwmon_dev,
-						   &lm75_of_thermal_ops);
-	if (IS_ERR(data->tz))
-		data->tz = NULL;
+	devm_thermal_zone_of_sensor_register(data->hwmon_dev, 0,
+					     data->hwmon_dev,
+					     &lm75_of_thermal_ops);
 
 	dev_info(dev, "%s: sensor '%s'\n",
 		 dev_name(data->hwmon_dev), client->name);
@@ -322,7 +319,6 @@
 {
 	struct lm75_data *data = i2c_get_clientdata(client);
 
-	thermal_zone_of_sensor_unregister(data->hwmon_dev, data->tz);
 	hwmon_device_unregister(data->hwmon_dev);
 	lm75_write_value(client, LM75_REG_CONF, data->orig_conf);
 	return 0;
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index faa6e8d..8ef7b71 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -259,7 +259,6 @@
 	struct device *dev;
 	int n_comp;
 	char name[PLATFORM_NAME_SIZE];
-	struct thermal_zone_device *tz;
 };
 
 #if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
@@ -579,6 +578,7 @@
 
 static int ntc_thermistor_probe(struct platform_device *pdev)
 {
+	struct thermal_zone_device *tz;
 	const struct of_device_id *of_id =
 			of_match_device(of_match_ptr(ntc_match), &pdev->dev);
 	const struct platform_device_id *pdev_id;
@@ -677,12 +677,10 @@
 	dev_info(&pdev->dev, "Thermistor type: %s successfully probed.\n",
 								pdev_id->name);
 
-	data->tz = thermal_zone_of_sensor_register(data->dev, 0, data->dev,
-						   &ntc_of_thermal_ops);
-	if (IS_ERR(data->tz)) {
+	tz = devm_thermal_zone_of_sensor_register(data->dev, 0, data->dev,
+						  &ntc_of_thermal_ops);
+	if (IS_ERR(tz))
 		dev_dbg(&pdev->dev, "Failed to register to thermal fw.\n");
-		data->tz = NULL;
-	}
 
 	return 0;
 err_after_sysfs:
@@ -700,8 +698,6 @@
 	sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
 	ntc_iio_channel_release(pdata);
 
-	thermal_zone_of_sensor_unregister(data->dev, data->tz);
-
 	return 0;
 }
 
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 3e23003..f9af393 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -40,15 +40,18 @@
 
 static int  __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
 {
+	struct pwm_args pargs;
 	unsigned long duty;
 	int ret = 0;
 
+	pwm_get_args(ctx->pwm, &pargs);
+
 	mutex_lock(&ctx->lock);
 	if (ctx->pwm_value == pwm)
 		goto exit_set_pwm_err;
 
-	duty = DIV_ROUND_UP(pwm * (ctx->pwm->period - 1), MAX_PWM);
-	ret = pwm_config(ctx->pwm, duty, ctx->pwm->period);
+	duty = DIV_ROUND_UP(pwm * (pargs.period - 1), MAX_PWM);
+	ret = pwm_config(ctx->pwm, duty, pargs.period);
 	if (ret)
 		goto exit_set_pwm_err;
 
@@ -215,6 +218,7 @@
 {
 	struct thermal_cooling_device *cdev;
 	struct pwm_fan_ctx *ctx;
+	struct pwm_args pargs;
 	struct device *hwmon;
 	int duty_cycle;
 	int ret;
@@ -233,11 +237,19 @@
 
 	platform_set_drvdata(pdev, ctx);
 
+	/*
+	 * FIXME: pwm_apply_args() should be removed when switching to the
+	 * atomic PWM API.
+	 */
+	pwm_apply_args(ctx->pwm);
+
 	/* Set duty cycle to maximum allowed */
-	duty_cycle = ctx->pwm->period - 1;
+	pwm_get_args(ctx->pwm, &pargs);
+
+	duty_cycle = pargs.period - 1;
 	ctx->pwm_value = MAX_PWM;
 
-	ret = pwm_config(ctx->pwm, duty_cycle, ctx->pwm->period);
+	ret = pwm_config(ctx->pwm, duty_cycle, pargs.period);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to configure PWM\n");
 		return ret;
@@ -303,14 +315,16 @@
 static int pwm_fan_resume(struct device *dev)
 {
 	struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+	struct pwm_args pargs;
 	unsigned long duty;
 	int ret;
 
 	if (ctx->pwm_value == 0)
 		return 0;
 
-	duty = DIV_ROUND_UP(ctx->pwm_value * (ctx->pwm->period - 1), MAX_PWM);
-	ret = pwm_config(ctx->pwm, duty, ctx->pwm->period);
+	pwm_get_args(ctx->pwm, &pargs);
+	duty = DIV_ROUND_UP(ctx->pwm_value * (pargs.period - 1), MAX_PWM);
+	ret = pwm_config(ctx->pwm, duty, pargs.period);
 	if (ret)
 		return ret;
 	return pwm_enable(ctx->pwm);
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 912b449..25b44e6 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -31,10 +31,8 @@
 };
 
 struct scpi_thermal_zone {
-	struct list_head list;
 	int sensor_id;
 	struct scpi_sensors *scpi_sensors;
-	struct thermal_zone_device *tzd;
 };
 
 struct scpi_sensors {
@@ -92,20 +90,6 @@
 	return sprintf(buf, "%s\n", sensor->info.name);
 }
 
-static void
-unregister_thermal_zones(struct platform_device *pdev,
-			 struct scpi_sensors *scpi_sensors)
-{
-	struct list_head *pos;
-
-	list_for_each(pos, &scpi_sensors->thermal_zones) {
-		struct scpi_thermal_zone *zone;
-
-		zone = list_entry(pos, struct scpi_thermal_zone, list);
-		thermal_zone_of_sensor_unregister(&pdev->dev, zone->tzd);
-	}
-}
-
 static struct thermal_zone_of_device_ops scpi_sensor_ops = {
 	.get_temp = scpi_read_temp,
 };
@@ -118,7 +102,7 @@
 	struct scpi_ops *scpi_ops;
 	struct device *hwdev, *dev = &pdev->dev;
 	struct scpi_sensors *scpi_sensors;
-	int ret, idx;
+	int idx, ret;
 
 	scpi_ops = get_scpi_ops();
 	if (!scpi_ops)
@@ -232,48 +216,35 @@
 	INIT_LIST_HEAD(&scpi_sensors->thermal_zones);
 	for (i = 0; i < nr_sensors; i++) {
 		struct sensor_data *sensor = &scpi_sensors->data[i];
+		struct thermal_zone_device *z;
 		struct scpi_thermal_zone *zone;
 
 		if (sensor->info.class != TEMPERATURE)
 			continue;
 
 		zone = devm_kzalloc(dev, sizeof(*zone), GFP_KERNEL);
-		if (!zone) {
-			ret = -ENOMEM;
-			goto unregister_tzd;
-		}
+		if (!zone)
+			return -ENOMEM;
 
 		zone->sensor_id = i;
 		zone->scpi_sensors = scpi_sensors;
-		zone->tzd = thermal_zone_of_sensor_register(dev,
-				sensor->info.sensor_id, zone, &scpi_sensor_ops);
+		z = devm_thermal_zone_of_sensor_register(dev,
+							 sensor->info.sensor_id,
+							 zone,
+							 &scpi_sensor_ops);
 		/*
 		 * The call to thermal_zone_of_sensor_register returns
 		 * an error for sensors that are not associated with
 		 * any thermal zones or if the thermal subsystem is
 		 * not configured.
 		 */
-		if (IS_ERR(zone->tzd)) {
+		if (IS_ERR(z)) {
 			devm_kfree(dev, zone);
 			continue;
 		}
-		list_add(&zone->list, &scpi_sensors->thermal_zones);
 	}
 
 	return 0;
-
-unregister_tzd:
-	unregister_thermal_zones(pdev, scpi_sensors);
-	return ret;
-}
-
-static int scpi_hwmon_remove(struct platform_device *pdev)
-{
-	struct scpi_sensors *scpi_sensors = platform_get_drvdata(pdev);
-
-	unregister_thermal_zones(pdev, scpi_sensors);
-
-	return 0;
 }
 
 static const struct of_device_id scpi_of_match[] = {
@@ -288,7 +259,6 @@
 		.of_match_table = scpi_of_match,
 	},
 	.probe		= scpi_hwmon_probe,
-	.remove		= scpi_hwmon_remove,
 };
 module_platform_driver(scpi_hwmon_platdrv);
 
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 5289aa0..f1e96fd 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -53,7 +53,6 @@
 struct tmp102 {
 	struct i2c_client *client;
 	struct device *hwmon_dev;
-	struct thermal_zone_device *tz;
 	struct mutex lock;
 	u16 config_orig;
 	unsigned long last_update;
@@ -232,10 +231,8 @@
 		goto fail_restore_config;
 	}
 	tmp102->hwmon_dev = hwmon_dev;
-	tmp102->tz = thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
-						     &tmp102_of_thermal_ops);
-	if (IS_ERR(tmp102->tz))
-		tmp102->tz = NULL;
+	devm_thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
+					     &tmp102_of_thermal_ops);
 
 	dev_info(dev, "initialized\n");
 
@@ -251,7 +248,6 @@
 {
 	struct tmp102 *tmp102 = i2c_get_clientdata(client);
 
-	thermal_zone_of_sensor_unregister(tmp102->hwmon_dev, tmp102->tz);
 	hwmon_device_unregister(tmp102->hwmon_dev);
 
 	/* Stop monitoring if device was stopped originally */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 2dd40dd..f167021 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -965,7 +965,7 @@
 
 config I2C_XLR
 	tristate "Netlogic XLR and Sigma Designs I2C support"
-	depends on CPU_XLR || ARCH_TANGOX
+	depends on CPU_XLR || ARCH_TANGO
 	help
 	  This driver enables support for the on-chip I2C interface of
 	  the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs.
@@ -985,6 +985,7 @@
 
 config I2C_RCAR
 	tristate "Renesas R-Car I2C Controller"
+	depends on HAS_DMA
 	depends on ARCH_RENESAS || COMPILE_TEST
 	select I2C_SLAVE
 	help
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 921d32b..f233726 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -1013,7 +1013,7 @@
 
 error:
 	if (ret != -EPROBE_DEFER)
-		dev_info(dev->dev, "can't use DMA, error %d\n", ret);
+		dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n");
 	if (dma->chan_rx)
 		dma_release_channel(dma->chan_rx);
 	if (dma->chan_tx)
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 9aca1b4..52407f3 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -623,7 +623,7 @@
 	char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
 	int ret;
 
-	chan = dma_request_slave_channel_reason(dev, chan_name);
+	chan = dma_request_chan(dev, chan_name);
 	if (IS_ERR(chan)) {
 		ret = PTR_ERR(chan);
 		dev_dbg(dev, "request_channel failed for %s (%d)\n",
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 0b1108d..6ecfd76 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -22,6 +22,7 @@
 
 /* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */
 
+#include <linux/cdev.h>
 #include <linux/device.h>
 #include <linux/fs.h>
 #include <linux/i2c-dev.h>
@@ -47,9 +48,10 @@
 	struct list_head list;
 	struct i2c_adapter *adap;
 	struct device *dev;
+	struct cdev cdev;
 };
 
-#define I2C_MINORS	256
+#define I2C_MINORS	MINORMASK
 static LIST_HEAD(i2c_dev_list);
 static DEFINE_SPINLOCK(i2c_dev_list_lock);
 
@@ -89,7 +91,7 @@
 	return i2c_dev;
 }
 
-static void return_i2c_dev(struct i2c_dev *i2c_dev)
+static void put_i2c_dev(struct i2c_dev *i2c_dev)
 {
 	spin_lock(&i2c_dev_list_lock);
 	list_del(&i2c_dev->list);
@@ -552,6 +554,12 @@
 	if (IS_ERR(i2c_dev))
 		return PTR_ERR(i2c_dev);
 
+	cdev_init(&i2c_dev->cdev, &i2cdev_fops);
+	i2c_dev->cdev.owner = THIS_MODULE;
+	res = cdev_add(&i2c_dev->cdev, MKDEV(I2C_MAJOR, adap->nr), 1);
+	if (res)
+		goto error_cdev;
+
 	/* register this i2c device with the driver core */
 	i2c_dev->dev = device_create(i2c_dev_class, &adap->dev,
 				     MKDEV(I2C_MAJOR, adap->nr), NULL,
@@ -565,7 +573,9 @@
 		 adap->name, adap->nr);
 	return 0;
 error:
-	return_i2c_dev(i2c_dev);
+	cdev_del(&i2c_dev->cdev);
+error_cdev:
+	put_i2c_dev(i2c_dev);
 	return res;
 }
 
@@ -582,7 +592,8 @@
 	if (!i2c_dev) /* attach_adapter must have failed */
 		return 0;
 
-	return_i2c_dev(i2c_dev);
+	cdev_del(&i2c_dev->cdev);
+	put_i2c_dev(i2c_dev);
 	device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
 
 	pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
@@ -620,7 +631,7 @@
 
 	printk(KERN_INFO "i2c /dev entries driver\n");
 
-	res = register_chrdev(I2C_MAJOR, "i2c", &i2cdev_fops);
+	res = register_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS, "i2c");
 	if (res)
 		goto out;
 
@@ -644,7 +655,7 @@
 out_unreg_class:
 	class_destroy(i2c_dev_class);
 out_unreg_chrdev:
-	unregister_chrdev(I2C_MAJOR, "i2c");
+	unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
 out:
 	printk(KERN_ERR "%s: Driver Initialisation failed\n", __FILE__);
 	return res;
@@ -655,7 +666,7 @@
 	bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
 	i2c_for_each_dev(NULL, i2cdev_detach_adapter);
 	class_destroy(i2c_dev_class);
-	unregister_chrdev(I2C_MAJOR, "i2c");
+	unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
 }
 
 MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 6425c0e..2137adf 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -85,4 +85,6 @@
 
 source "drivers/infiniband/sw/rdmavt/Kconfig"
 
+source "drivers/infiniband/hw/hfi1/Kconfig"
+
 endif # INFINIBAND
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 26987d9..edaae9f 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,8 +1,7 @@
 infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS)	:= rdma_cm.o
 user_access-$(CONFIG_INFINIBAND_ADDR_TRANS)	:= rdma_ucm.o
 
-obj-$(CONFIG_INFINIBAND) +=		ib_core.o ib_mad.o ib_sa.o \
-					ib_cm.o iw_cm.o ib_addr.o \
+obj-$(CONFIG_INFINIBAND) +=		ib_core.o ib_cm.o iw_cm.o \
 					$(infiniband-y)
 obj-$(CONFIG_INFINIBAND_USER_MAD) +=	ib_umad.o
 obj-$(CONFIG_INFINIBAND_USER_ACCESS) +=	ib_uverbs.o ib_ucm.o \
@@ -10,14 +9,11 @@
 
 ib_core-y :=			packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
 				device.o fmr_pool.o cache.o netlink.o \
-				roce_gid_mgmt.o mr_pool.o
+				roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
+				multicast.o mad.o smi.o agent.o mad_rmpp.o
 ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
 ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
 
-ib_mad-y :=			mad.o smi.o agent.o mad_rmpp.o
-
-ib_sa-y :=			sa_query.o multicast.o
-
 ib_cm-y :=			cm.o
 
 iw_cm-y :=			iwcm.o iwpm_util.o iwpm_msg.o
@@ -28,8 +24,6 @@
 
 rdma_ucm-y :=			ucma.o
 
-ib_addr-y :=			addr.o
-
 ib_umad-y :=			user_mad.o
 
 ib_ucm-y :=			ucm.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 337353d..1374541 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -46,10 +46,10 @@
 #include <net/ip6_route.h>
 #include <rdma/ib_addr.h>
 #include <rdma/ib.h>
+#include <rdma/rdma_netlink.h>
+#include <net/netlink.h>
 
-MODULE_AUTHOR("Sean Hefty");
-MODULE_DESCRIPTION("IB Address Translation");
-MODULE_LICENSE("Dual BSD/GPL");
+#include "core_priv.h"
 
 struct addr_req {
 	struct list_head list;
@@ -62,8 +62,11 @@
 			 struct rdma_dev_addr *addr, void *context);
 	unsigned long timeout;
 	int status;
+	u32 seq;
 };
 
+static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0);
+
 static void process_req(struct work_struct *work);
 
 static DEFINE_MUTEX(lock);
@@ -71,6 +74,126 @@
 static DECLARE_DELAYED_WORK(work, process_req);
 static struct workqueue_struct *addr_wq;
 
+static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
+	[LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
+		.len = sizeof(struct rdma_nla_ls_gid)},
+};
+
+static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
+{
+	struct nlattr *tb[LS_NLA_TYPE_MAX] = {};
+	int ret;
+
+	if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
+		return false;
+
+	ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+			nlmsg_len(nlh), ib_nl_addr_policy);
+	if (ret)
+		return false;
+
+	return true;
+}
+
+static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
+{
+	const struct nlattr *head, *curr;
+	union ib_gid gid;
+	struct addr_req *req;
+	int len, rem;
+	int found = 0;
+
+	head = (const struct nlattr *)nlmsg_data(nlh);
+	len = nlmsg_len(nlh);
+
+	nla_for_each_attr(curr, head, len, rem) {
+		if (curr->nla_type == LS_NLA_TYPE_DGID)
+			memcpy(&gid, nla_data(curr), nla_len(curr));
+	}
+
+	mutex_lock(&lock);
+	list_for_each_entry(req, &req_list, list) {
+		if (nlh->nlmsg_seq != req->seq)
+			continue;
+		/* We set the DGID part, the rest was set earlier */
+		rdma_addr_set_dgid(req->addr, &gid);
+		req->status = 0;
+		found = 1;
+		break;
+	}
+	mutex_unlock(&lock);
+
+	if (!found)
+		pr_info("Couldn't find request waiting for DGID: %pI6\n",
+			&gid);
+}
+
+int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
+			     struct netlink_callback *cb)
+{
+	const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
+
+	if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
+	    !(NETLINK_CB(skb).sk) ||
+	    !netlink_capable(skb, CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (ib_nl_is_good_ip_resp(nlh))
+		ib_nl_process_good_ip_rsep(nlh);
+
+	return skb->len;
+}
+
+static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
+			     const void *daddr,
+			     u32 seq, u16 family)
+{
+	struct sk_buff *skb = NULL;
+	struct nlmsghdr *nlh;
+	struct rdma_ls_ip_resolve_header *header;
+	void *data;
+	size_t size;
+	int attrtype;
+	int len;
+
+	if (family == AF_INET) {
+		size = sizeof(struct in_addr);
+		attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV4;
+	} else {
+		size = sizeof(struct in6_addr);
+		attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV6;
+	}
+
+	len = nla_total_size(sizeof(size));
+	len += NLMSG_ALIGN(sizeof(*header));
+
+	skb = nlmsg_new(len, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	data = ibnl_put_msg(skb, &nlh, seq, 0, RDMA_NL_LS,
+			    RDMA_NL_LS_OP_IP_RESOLVE, NLM_F_REQUEST);
+	if (!data) {
+		nlmsg_free(skb);
+		return -ENODATA;
+	}
+
+	/* Construct the family header first */
+	header = (struct rdma_ls_ip_resolve_header *)
+		skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
+	header->ifindex = dev_addr->bound_dev_if;
+	nla_put(skb, attrtype, size, daddr);
+
+	/* Repair the nlmsg header length */
+	nlmsg_end(skb, nlh);
+	ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL);
+
+	/* Make the request retry, so when we get the response from userspace
+	 * we will have something.
+	 */
+	return -ENODATA;
+}
+
 int rdma_addr_size(struct sockaddr *addr)
 {
 	switch (addr->sa_family) {
@@ -199,6 +322,17 @@
 	mutex_unlock(&lock);
 }
 
+static int ib_nl_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
+			  const void *daddr, u32 seq, u16 family)
+{
+	if (ibnl_chk_listeners(RDMA_NL_GROUP_LS))
+		return -EADDRNOTAVAIL;
+
+	/* We fill in what we can, the response will fill the rest */
+	rdma_copy_addr(dev_addr, dst->dev, NULL);
+	return ib_nl_ip_send_msg(dev_addr, daddr, seq, family);
+}
+
 static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
 			const void *daddr)
 {
@@ -223,6 +357,39 @@
 	return ret;
 }
 
+static bool has_gateway(struct dst_entry *dst, sa_family_t family)
+{
+	struct rtable *rt;
+	struct rt6_info *rt6;
+
+	if (family == AF_INET) {
+		rt = container_of(dst, struct rtable, dst);
+		return rt->rt_uses_gateway;
+	}
+
+	rt6 = container_of(dst, struct rt6_info, dst);
+	return rt6->rt6i_flags & RTF_GATEWAY;
+}
+
+static int fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
+		    const struct sockaddr *dst_in, u32 seq)
+{
+	const struct sockaddr_in *dst_in4 =
+		(const struct sockaddr_in *)dst_in;
+	const struct sockaddr_in6 *dst_in6 =
+		(const struct sockaddr_in6 *)dst_in;
+	const void *daddr = (dst_in->sa_family == AF_INET) ?
+		(const void *)&dst_in4->sin_addr.s_addr :
+		(const void *)&dst_in6->sin6_addr;
+	sa_family_t family = dst_in->sa_family;
+
+	/* Gateway + ARPHRD_INFINIBAND -> IB router */
+	if (has_gateway(dst, family) && dst->dev->type == ARPHRD_INFINIBAND)
+		return ib_nl_fetch_ha(dst, dev_addr, daddr, seq, family);
+	else
+		return dst_fetch_ha(dst, dev_addr, daddr);
+}
+
 static int addr4_resolve(struct sockaddr_in *src_in,
 			 const struct sockaddr_in *dst_in,
 			 struct rdma_dev_addr *addr,
@@ -246,10 +413,11 @@
 	src_in->sin_family = AF_INET;
 	src_in->sin_addr.s_addr = fl4.saddr;
 
-	/* If there's a gateway, we're definitely in RoCE v2 (as RoCE v1 isn't
-	 * routable) and we could set the network type accordingly.
+	/* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
+	 * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
+	 * type accordingly.
 	 */
-	if (rt->rt_uses_gateway)
+	if (rt->rt_uses_gateway && rt->dst.dev->type != ARPHRD_INFINIBAND)
 		addr->network = RDMA_NETWORK_IPV4;
 
 	addr->hoplimit = ip4_dst_hoplimit(&rt->dst);
@@ -291,10 +459,12 @@
 		src_in->sin6_addr = fl6.saddr;
 	}
 
-	/* If there's a gateway, we're definitely in RoCE v2 (as RoCE v1 isn't
-	 * routable) and we could set the network type accordingly.
+	/* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
+	 * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
+	 * type accordingly.
 	 */
-	if (rt->rt6i_flags & RTF_GATEWAY)
+	if (rt->rt6i_flags & RTF_GATEWAY &&
+	    ip6_dst_idev(dst)->dev->type != ARPHRD_INFINIBAND)
 		addr->network = RDMA_NETWORK_IPV6;
 
 	addr->hoplimit = ip6_dst_hoplimit(dst);
@@ -317,7 +487,8 @@
 
 static int addr_resolve_neigh(struct dst_entry *dst,
 			      const struct sockaddr *dst_in,
-			      struct rdma_dev_addr *addr)
+			      struct rdma_dev_addr *addr,
+			      u32 seq)
 {
 	if (dst->dev->flags & IFF_LOOPBACK) {
 		int ret;
@@ -331,17 +502,8 @@
 	}
 
 	/* If the device doesn't do ARP internally */
-	if (!(dst->dev->flags & IFF_NOARP)) {
-		const struct sockaddr_in *dst_in4 =
-			(const struct sockaddr_in *)dst_in;
-		const struct sockaddr_in6 *dst_in6 =
-			(const struct sockaddr_in6 *)dst_in;
-
-		return dst_fetch_ha(dst, addr,
-				    dst_in->sa_family == AF_INET ?
-				    (const void *)&dst_in4->sin_addr.s_addr :
-				    (const void *)&dst_in6->sin6_addr);
-	}
+	if (!(dst->dev->flags & IFF_NOARP))
+		return fetch_ha(dst, addr, dst_in, seq);
 
 	return rdma_copy_addr(addr, dst->dev, NULL);
 }
@@ -349,7 +511,8 @@
 static int addr_resolve(struct sockaddr *src_in,
 			const struct sockaddr *dst_in,
 			struct rdma_dev_addr *addr,
-			bool resolve_neigh)
+			bool resolve_neigh,
+			u32 seq)
 {
 	struct net_device *ndev;
 	struct dst_entry *dst;
@@ -366,7 +529,7 @@
 			return ret;
 
 		if (resolve_neigh)
-			ret = addr_resolve_neigh(&rt->dst, dst_in, addr);
+			ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq);
 
 		ndev = rt->dst.dev;
 		dev_hold(ndev);
@@ -383,7 +546,7 @@
 			return ret;
 
 		if (resolve_neigh)
-			ret = addr_resolve_neigh(dst, dst_in, addr);
+			ret = addr_resolve_neigh(dst, dst_in, addr, seq);
 
 		ndev = dst->dev;
 		dev_hold(ndev);
@@ -412,7 +575,7 @@
 			src_in = (struct sockaddr *) &req->src_addr;
 			dst_in = (struct sockaddr *) &req->dst_addr;
 			req->status = addr_resolve(src_in, dst_in, req->addr,
-						   true);
+						   true, req->seq);
 			if (req->status && time_after_eq(jiffies, req->timeout))
 				req->status = -ETIMEDOUT;
 			else if (req->status == -ENODATA)
@@ -471,8 +634,9 @@
 	req->context = context;
 	req->client = client;
 	atomic_inc(&client->refcount);
+	req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
 
-	req->status = addr_resolve(src_in, dst_in, addr, true);
+	req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
 	switch (req->status) {
 	case 0:
 		req->timeout = jiffies;
@@ -510,7 +674,7 @@
 		src_in->sa_family = dst_addr->sa_family;
 	}
 
-	return addr_resolve(src_in, dst_addr, addr, false);
+	return addr_resolve(src_in, dst_addr, addr, false, 0);
 }
 EXPORT_SYMBOL(rdma_resolve_ip_route);
 
@@ -634,7 +798,7 @@
 	.notifier_call = netevent_callback
 };
 
-static int __init addr_init(void)
+int addr_init(void)
 {
 	addr_wq = create_singlethread_workqueue("ib_addr");
 	if (!addr_wq)
@@ -642,15 +806,13 @@
 
 	register_netevent_notifier(&nb);
 	rdma_addr_register_client(&self);
+
 	return 0;
 }
 
-static void __exit addr_cleanup(void)
+void addr_cleanup(void)
 {
 	rdma_addr_unregister_client(&self);
 	unregister_netevent_notifier(&nb);
 	destroy_workqueue(addr_wq);
 }
-
-module_init(addr_init);
-module_exit(addr_cleanup);
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index c2e257d..0409667 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -178,6 +178,7 @@
 {
 	int ret = 0;
 	struct net_device *old_net_dev;
+	enum ib_gid_type old_gid_type;
 
 	/* in rdma_cap_roce_gid_table, this funciton should be protected by a
 	 * sleep-able lock.
@@ -199,6 +200,7 @@
 	}
 
 	old_net_dev = table->data_vec[ix].attr.ndev;
+	old_gid_type = table->data_vec[ix].attr.gid_type;
 	if (old_net_dev && old_net_dev != attr->ndev)
 		dev_put(old_net_dev);
 	/* if modify_gid failed, just delete the old gid */
@@ -207,10 +209,14 @@
 		attr = &zattr;
 		table->data_vec[ix].context = NULL;
 	}
-	if (default_gid)
-		table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+
 	memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
 	memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
+	if (default_gid) {
+		table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+		if (action == GID_TABLE_WRITE_ACTION_DEL)
+			table->data_vec[ix].attr.gid_type = old_gid_type;
+	}
 	if (table->data_vec[ix].attr.ndev &&
 	    table->data_vec[ix].attr.ndev != old_net_dev)
 		dev_hold(table->data_vec[ix].attr.ndev);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 1d92e09..c995255 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3452,14 +3452,14 @@
 	work->cm_event.event = IB_CM_USER_ESTABLISHED;
 
 	/* Check if the device started its remove_one */
-	spin_lock_irq(&cm.lock);
+	spin_lock_irqsave(&cm.lock, flags);
 	if (!cm_dev->going_down) {
 		queue_delayed_work(cm.wq, &work->work, 0);
 	} else {
 		kfree(work);
 		ret = -ENODEV;
 	}
-	spin_unlock_irq(&cm.lock);
+	spin_unlock_irqrestore(&cm.lock, flags);
 
 out:
 	return ret;
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index eab3221..19d499d 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -137,4 +137,20 @@
 	return _upper == upper;
 }
 
+int addr_init(void);
+void addr_cleanup(void);
+
+int ib_mad_init(void);
+void ib_mad_cleanup(void);
+
+int ib_sa_init(void);
+void ib_sa_cleanup(void);
+
+int ib_nl_handle_resolve_resp(struct sk_buff *skb,
+			      struct netlink_callback *cb);
+int ib_nl_handle_set_timeout(struct sk_buff *skb,
+			     struct netlink_callback *cb);
+int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
+			     struct netlink_callback *cb);
+
 #endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 1097984..5c155fa 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -661,6 +661,9 @@
 	if (err || port_attr->subnet_prefix)
 		return err;
 
+	if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
+		return 0;
+
 	err = ib_query_gid(device, port_num, 0, &gid, NULL);
 	if (err)
 		return err;
@@ -955,6 +958,29 @@
 }
 EXPORT_SYMBOL(ib_get_net_dev_by_params);
 
+static struct ibnl_client_cbs ibnl_ls_cb_table[] = {
+	[RDMA_NL_LS_OP_RESOLVE] = {
+		.dump = ib_nl_handle_resolve_resp,
+		.module = THIS_MODULE },
+	[RDMA_NL_LS_OP_SET_TIMEOUT] = {
+		.dump = ib_nl_handle_set_timeout,
+		.module = THIS_MODULE },
+	[RDMA_NL_LS_OP_IP_RESOLVE] = {
+		.dump = ib_nl_handle_ip_res_resp,
+		.module = THIS_MODULE },
+};
+
+static int ib_add_ibnl_clients(void)
+{
+	return ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ibnl_ls_cb_table),
+			       ibnl_ls_cb_table);
+}
+
+static void ib_remove_ibnl_clients(void)
+{
+	ibnl_remove_client(RDMA_NL_LS);
+}
+
 static int __init ib_core_init(void)
 {
 	int ret;
@@ -983,10 +1009,42 @@
 		goto err_sysfs;
 	}
 
+	ret = addr_init();
+	if (ret) {
+		pr_warn("Could't init IB address resolution\n");
+		goto err_ibnl;
+	}
+
+	ret = ib_mad_init();
+	if (ret) {
+		pr_warn("Couldn't init IB MAD\n");
+		goto err_addr;
+	}
+
+	ret = ib_sa_init();
+	if (ret) {
+		pr_warn("Couldn't init SA\n");
+		goto err_mad;
+	}
+
+	ret = ib_add_ibnl_clients();
+	if (ret) {
+		pr_warn("Couldn't register ibnl clients\n");
+		goto err_sa;
+	}
+
 	ib_cache_setup();
 
 	return 0;
 
+err_sa:
+	ib_sa_cleanup();
+err_mad:
+	ib_mad_cleanup();
+err_addr:
+	addr_cleanup();
+err_ibnl:
+	ibnl_cleanup();
 err_sysfs:
 	class_unregister(&ib_class);
 err_comp:
@@ -999,6 +1057,10 @@
 static void __exit ib_core_cleanup(void)
 {
 	ib_cache_cleanup();
+	ib_remove_ibnl_clients();
+	ib_sa_cleanup();
+	ib_mad_cleanup();
+	addr_cleanup();
 	ibnl_cleanup();
 	class_unregister(&ib_class);
 	destroy_workqueue(ib_comp_wq);
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 43e3fa2..1c41b95 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -506,7 +506,7 @@
 	if (!nlmsg_request) {
 		pr_info("%s: Could not find a matching request (seq = %u)\n",
 				 __func__, msg_seq);
-			return -EINVAL;
+		return -EINVAL;
 	}
 	pm_msg = nlmsg_request->req_buffer;
 	local_sockaddr = (struct sockaddr_storage *)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 9fa5bf3..2d49228 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -47,11 +47,7 @@
 #include "smi.h"
 #include "opa_smi.h"
 #include "agent.h"
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("kernel IB MAD API");
-MODULE_AUTHOR("Hal Rosenstock");
-MODULE_AUTHOR("Sean Hefty");
+#include "core_priv.h"
 
 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
@@ -1642,9 +1638,9 @@
 		/* Now, check to see if there are any methods still in use */
 		if (!check_method_table(method)) {
 			/* If not, release management method table */
-			 kfree(method);
-			 class->method_table[mgmt_class] = NULL;
-			 /* Any management classes left ? */
+			kfree(method);
+			class->method_table[mgmt_class] = NULL;
+			/* Any management classes left ? */
 			if (!check_class_table(class)) {
 				/* If not, release management class table */
 				kfree(class);
@@ -3316,7 +3312,7 @@
 	.remove = ib_mad_remove_device
 };
 
-static int __init ib_mad_init_module(void)
+int ib_mad_init(void)
 {
 	mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
 	mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
@@ -3334,10 +3330,7 @@
 	return 0;
 }
 
-static void __exit ib_mad_cleanup_module(void)
+void ib_mad_cleanup(void)
 {
 	ib_unregister_client(&mad_client);
 }
-
-module_init(ib_mad_init_module);
-module_exit(ib_mad_cleanup_module);
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 250937c..a83ec28 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -93,6 +93,18 @@
 
 struct mcast_member;
 
+/*
+* There are 4 types of join states:
+* FullMember, NonMember, SendOnlyNonMember, SendOnlyFullMember.
+*/
+enum {
+	FULLMEMBER_JOIN,
+	NONMEMBER_JOIN,
+	SENDONLY_NONMEBER_JOIN,
+	SENDONLY_FULLMEMBER_JOIN,
+	NUM_JOIN_MEMBERSHIP_TYPES,
+};
+
 struct mcast_group {
 	struct ib_sa_mcmember_rec rec;
 	struct rb_node		node;
@@ -102,7 +114,7 @@
 	struct list_head	pending_list;
 	struct list_head	active_list;
 	struct mcast_member	*last_join;
-	int			members[3];
+	int			members[NUM_JOIN_MEMBERSHIP_TYPES];
 	atomic_t		refcount;
 	enum mcast_group_state	state;
 	struct ib_sa_query	*query;
@@ -220,8 +232,9 @@
 }
 
 /*
- * A multicast group has three types of members: full member, non member, and
- * send only member.  We need to keep track of the number of members of each
+ * A multicast group has four types of members: full member, non member,
+ * sendonly non member and sendonly full member.
+ * We need to keep track of the number of members of each
  * type based on their join state.  Adjust the number of members the belong to
  * the specified join states.
  */
@@ -229,7 +242,7 @@
 {
 	int i;
 
-	for (i = 0; i < 3; i++, join_state >>= 1)
+	for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++, join_state >>= 1)
 		if (join_state & 0x1)
 			group->members[i] += inc;
 }
@@ -245,7 +258,7 @@
 	u8 leave_state = 0;
 	int i;
 
-	for (i = 0; i < 3; i++)
+	for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++)
 		if (!group->members[i])
 			leave_state |= (0x1 << i);
 
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 3ebd108..e955386 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -53,10 +53,6 @@
 #include "sa.h"
 #include "core_priv.h"
 
-MODULE_AUTHOR("Roland Dreier");
-MODULE_DESCRIPTION("InfiniBand subnet administration query support");
-MODULE_LICENSE("Dual BSD/GPL");
-
 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN		100
 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT		2000
 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX		200000
@@ -119,6 +115,12 @@
 	struct ib_sa_query sa_query;
 };
 
+struct ib_sa_classport_info_query {
+	void (*callback)(int, struct ib_class_port_info *, void *);
+	void *context;
+	struct ib_sa_query sa_query;
+};
+
 struct ib_sa_mcmember_query {
 	void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
 	void *context;
@@ -392,6 +394,82 @@
 	  .size_bits    = 2*64 },
 };
 
+#define CLASSPORTINFO_REC_FIELD(field) \
+	.struct_offset_bytes = offsetof(struct ib_class_port_info, field),	\
+	.struct_size_bytes   = sizeof((struct ib_class_port_info *)0)->field,	\
+	.field_name          = "ib_class_port_info:" #field
+
+static const struct ib_field classport_info_rec_table[] = {
+	{ CLASSPORTINFO_REC_FIELD(base_version),
+	  .offset_words = 0,
+	  .offset_bits  = 0,
+	  .size_bits    = 8 },
+	{ CLASSPORTINFO_REC_FIELD(class_version),
+	  .offset_words = 0,
+	  .offset_bits  = 8,
+	  .size_bits    = 8 },
+	{ CLASSPORTINFO_REC_FIELD(capability_mask),
+	  .offset_words = 0,
+	  .offset_bits  = 16,
+	  .size_bits    = 16 },
+	{ CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
+	  .offset_words = 1,
+	  .offset_bits  = 0,
+	  .size_bits    = 32 },
+	{ CLASSPORTINFO_REC_FIELD(redirect_gid),
+	  .offset_words = 2,
+	  .offset_bits  = 0,
+	  .size_bits    = 128 },
+	{ CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
+	  .offset_words = 6,
+	  .offset_bits  = 0,
+	  .size_bits    = 32 },
+	{ CLASSPORTINFO_REC_FIELD(redirect_lid),
+	  .offset_words = 7,
+	  .offset_bits  = 0,
+	  .size_bits    = 16 },
+	{ CLASSPORTINFO_REC_FIELD(redirect_pkey),
+	  .offset_words = 7,
+	  .offset_bits  = 16,
+	  .size_bits    = 16 },
+
+	{ CLASSPORTINFO_REC_FIELD(redirect_qp),
+	  .offset_words = 8,
+	  .offset_bits  = 0,
+	  .size_bits    = 32 },
+	{ CLASSPORTINFO_REC_FIELD(redirect_qkey),
+	  .offset_words = 9,
+	  .offset_bits  = 0,
+	  .size_bits    = 32 },
+
+	{ CLASSPORTINFO_REC_FIELD(trap_gid),
+	  .offset_words = 10,
+	  .offset_bits  = 0,
+	  .size_bits    = 128 },
+	{ CLASSPORTINFO_REC_FIELD(trap_tcslfl),
+	  .offset_words = 14,
+	  .offset_bits  = 0,
+	  .size_bits    = 32 },
+
+	{ CLASSPORTINFO_REC_FIELD(trap_lid),
+	  .offset_words = 15,
+	  .offset_bits  = 0,
+	  .size_bits    = 16 },
+	{ CLASSPORTINFO_REC_FIELD(trap_pkey),
+	  .offset_words = 15,
+	  .offset_bits  = 16,
+	  .size_bits    = 16 },
+
+	{ CLASSPORTINFO_REC_FIELD(trap_hlqp),
+	  .offset_words = 16,
+	  .offset_bits  = 0,
+	  .size_bits    = 32 },
+	{ CLASSPORTINFO_REC_FIELD(trap_qkey),
+	  .offset_words = 17,
+	  .offset_bits  = 0,
+	  .size_bits    = 32 },
+};
+
 #define GUIDINFO_REC_FIELD(field) \
 	.struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field),	\
 	.struct_size_bytes   = sizeof((struct ib_sa_guidinfo_rec *) 0)->field,	\
@@ -705,8 +783,8 @@
 	spin_unlock_irqrestore(&ib_nl_request_lock, flags);
 }
 
-static int ib_nl_handle_set_timeout(struct sk_buff *skb,
-				    struct netlink_callback *cb)
+int ib_nl_handle_set_timeout(struct sk_buff *skb,
+			     struct netlink_callback *cb)
 {
 	const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
 	int timeout, delta, abs_delta;
@@ -782,8 +860,8 @@
 	return 1;
 }
 
-static int ib_nl_handle_resolve_resp(struct sk_buff *skb,
-				     struct netlink_callback *cb)
+int ib_nl_handle_resolve_resp(struct sk_buff *skb,
+			      struct netlink_callback *cb)
 {
 	const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
 	unsigned long flags;
@@ -838,15 +916,6 @@
 	return skb->len;
 }
 
-static struct ibnl_client_cbs ib_sa_cb_table[] = {
-	[RDMA_NL_LS_OP_RESOLVE] = {
-		.dump = ib_nl_handle_resolve_resp,
-		.module = THIS_MODULE },
-	[RDMA_NL_LS_OP_SET_TIMEOUT] = {
-		.dump = ib_nl_handle_set_timeout,
-		.module = THIS_MODULE },
-};
-
 static void free_sm_ah(struct kref *kref)
 {
 	struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
@@ -1645,6 +1714,97 @@
 }
 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
 
+/* Support get SA ClassPortInfo */
+static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
+					      int status,
+					      struct ib_sa_mad *mad)
+{
+	struct ib_sa_classport_info_query *query =
+		container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
+
+	if (mad) {
+		struct ib_class_port_info rec;
+
+		ib_unpack(classport_info_rec_table,
+			  ARRAY_SIZE(classport_info_rec_table),
+			  mad->data, &rec);
+		query->callback(status, &rec, query->context);
+	} else {
+		query->callback(status, NULL, query->context);
+	}
+}
+
+static void ib_sa_portclass_info_rec_release(struct ib_sa_query *sa_query)
+{
+	kfree(container_of(sa_query, struct ib_sa_classport_info_query,
+			   sa_query));
+}
+
+int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
+				   struct ib_device *device, u8 port_num,
+				   int timeout_ms, gfp_t gfp_mask,
+				   void (*callback)(int status,
+						    struct ib_class_port_info *resp,
+						    void *context),
+				   void *context,
+				   struct ib_sa_query **sa_query)
+{
+	struct ib_sa_classport_info_query *query;
+	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
+	struct ib_sa_port *port;
+	struct ib_mad_agent *agent;
+	struct ib_sa_mad *mad;
+	int ret;
+
+	if (!sa_dev)
+		return -ENODEV;
+
+	port  = &sa_dev->port[port_num - sa_dev->start_port];
+	agent = port->agent;
+
+	query = kzalloc(sizeof(*query), gfp_mask);
+	if (!query)
+		return -ENOMEM;
+
+	query->sa_query.port = port;
+	ret = alloc_mad(&query->sa_query, gfp_mask);
+	if (ret)
+		goto err1;
+
+	ib_sa_client_get(client);
+	query->sa_query.client = client;
+	query->callback        = callback;
+	query->context         = context;
+
+	mad = query->sa_query.mad_buf->mad;
+	init_mad(mad, agent);
+
+	query->sa_query.callback = callback ? ib_sa_classport_info_rec_callback : NULL;
+
+	query->sa_query.release  = ib_sa_portclass_info_rec_release;
+	/* support GET only */
+	mad->mad_hdr.method	 = IB_MGMT_METHOD_GET;
+	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
+	mad->sa_hdr.comp_mask	 = 0;
+	*sa_query = &query->sa_query;
+
+	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
+	if (ret < 0)
+		goto err2;
+
+	return ret;
+
+err2:
+	*sa_query = NULL;
+	ib_sa_client_put(query->sa_query.client);
+	free_mad(&query->sa_query);
+
+err1:
+	kfree(query);
+	return ret;
+}
+EXPORT_SYMBOL(ib_sa_classport_info_rec_query);
+
 static void send_handler(struct ib_mad_agent *agent,
 			 struct ib_mad_send_wc *mad_send_wc)
 {
@@ -1794,7 +1954,7 @@
 	kfree(sa_dev);
 }
 
-static int __init ib_sa_init(void)
+int ib_sa_init(void)
 {
 	int ret;
 
@@ -1820,17 +1980,10 @@
 		goto err3;
 	}
 
-	if (ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ib_sa_cb_table),
-			    ib_sa_cb_table)) {
-		pr_err("Failed to add netlink callback\n");
-		ret = -EINVAL;
-		goto err4;
-	}
 	INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
 
 	return 0;
-err4:
-	destroy_workqueue(ib_nl_wq);
+
 err3:
 	mcast_cleanup();
 err2:
@@ -1839,9 +1992,8 @@
 	return ret;
 }
 
-static void __exit ib_sa_cleanup(void)
+void ib_sa_cleanup(void)
 {
-	ibnl_remove_client(RDMA_NL_LS);
 	cancel_delayed_work(&ib_nl_timed_work);
 	flush_workqueue(ib_nl_wq);
 	destroy_workqueue(ib_nl_wq);
@@ -1849,6 +2001,3 @@
 	ib_unregister_client(&sa_client);
 	idr_destroy(&query_idr);
 }
-
-module_init(ib_sa_init);
-module_exit(ib_sa_cleanup);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 14606af..a5793c8 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -56,8 +56,10 @@
 	struct gid_attr_group *gid_attr_group;
 	struct attribute_group gid_group;
 	struct attribute_group pkey_group;
-	u8                     port_num;
 	struct attribute_group *pma_table;
+	struct attribute_group *hw_stats_ag;
+	struct rdma_hw_stats   *hw_stats;
+	u8                     port_num;
 };
 
 struct port_attribute {
@@ -80,6 +82,18 @@
 	__be16			attr_id;
 };
 
+struct hw_stats_attribute {
+	struct attribute	attr;
+	ssize_t			(*show)(struct kobject *kobj,
+					struct attribute *attr, char *buf);
+	ssize_t			(*store)(struct kobject *kobj,
+					 struct attribute *attr,
+					 const char *buf,
+					 size_t count);
+	int			index;
+	u8			port_num;
+};
+
 static ssize_t port_attr_show(struct kobject *kobj,
 			      struct attribute *attr, char *buf)
 {
@@ -733,6 +747,220 @@
 	return &pma_group;
 }
 
+static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats,
+			   u8 port_num, int index)
+{
+	int ret;
+
+	if (time_is_after_eq_jiffies(stats->timestamp + stats->lifespan))
+		return 0;
+	ret = dev->get_hw_stats(dev, stats, port_num, index);
+	if (ret < 0)
+		return ret;
+	if (ret == stats->num_counters)
+		stats->timestamp = jiffies;
+
+	return 0;
+}
+
+static ssize_t print_hw_stat(struct rdma_hw_stats *stats, int index, char *buf)
+{
+	return sprintf(buf, "%llu\n", stats->value[index]);
+}
+
+static ssize_t show_hw_stats(struct kobject *kobj, struct attribute *attr,
+			     char *buf)
+{
+	struct ib_device *dev;
+	struct ib_port *port;
+	struct hw_stats_attribute *hsa;
+	struct rdma_hw_stats *stats;
+	int ret;
+
+	hsa = container_of(attr, struct hw_stats_attribute, attr);
+	if (!hsa->port_num) {
+		dev = container_of((struct device *)kobj,
+				   struct ib_device, dev);
+		stats = dev->hw_stats;
+	} else {
+		port = container_of(kobj, struct ib_port, kobj);
+		dev = port->ibdev;
+		stats = port->hw_stats;
+	}
+	ret = update_hw_stats(dev, stats, hsa->port_num, hsa->index);
+	if (ret)
+		return ret;
+	return print_hw_stat(stats, hsa->index, buf);
+}
+
+static ssize_t show_stats_lifespan(struct kobject *kobj,
+				   struct attribute *attr,
+				   char *buf)
+{
+	struct hw_stats_attribute *hsa;
+	int msecs;
+
+	hsa = container_of(attr, struct hw_stats_attribute, attr);
+	if (!hsa->port_num) {
+		struct ib_device *dev = container_of((struct device *)kobj,
+						     struct ib_device, dev);
+		msecs = jiffies_to_msecs(dev->hw_stats->lifespan);
+	} else {
+		struct ib_port *p = container_of(kobj, struct ib_port, kobj);
+		msecs = jiffies_to_msecs(p->hw_stats->lifespan);
+	}
+	return sprintf(buf, "%d\n", msecs);
+}
+
+static ssize_t set_stats_lifespan(struct kobject *kobj,
+				  struct attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct hw_stats_attribute *hsa;
+	int msecs;
+	int jiffies;
+	int ret;
+
+	ret = kstrtoint(buf, 10, &msecs);
+	if (ret)
+		return ret;
+	if (msecs < 0 || msecs > 10000)
+		return -EINVAL;
+	jiffies = msecs_to_jiffies(msecs);
+	hsa = container_of(attr, struct hw_stats_attribute, attr);
+	if (!hsa->port_num) {
+		struct ib_device *dev = container_of((struct device *)kobj,
+						     struct ib_device, dev);
+		dev->hw_stats->lifespan = jiffies;
+	} else {
+		struct ib_port *p = container_of(kobj, struct ib_port, kobj);
+		p->hw_stats->lifespan = jiffies;
+	}
+	return count;
+}
+
+static void free_hsag(struct kobject *kobj, struct attribute_group *attr_group)
+{
+	struct attribute **attr;
+
+	sysfs_remove_group(kobj, attr_group);
+
+	for (attr = attr_group->attrs; *attr; attr++)
+		kfree(*attr);
+	kfree(attr_group);
+}
+
+static struct attribute *alloc_hsa(int index, u8 port_num, const char *name)
+{
+	struct hw_stats_attribute *hsa;
+
+	hsa = kmalloc(sizeof(*hsa), GFP_KERNEL);
+	if (!hsa)
+		return NULL;
+
+	hsa->attr.name = (char *)name;
+	hsa->attr.mode = S_IRUGO;
+	hsa->show = show_hw_stats;
+	hsa->store = NULL;
+	hsa->index = index;
+	hsa->port_num = port_num;
+
+	return &hsa->attr;
+}
+
+static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
+{
+	struct hw_stats_attribute *hsa;
+
+	hsa = kmalloc(sizeof(*hsa), GFP_KERNEL);
+	if (!hsa)
+		return NULL;
+
+	hsa->attr.name = name;
+	hsa->attr.mode = S_IWUSR | S_IRUGO;
+	hsa->show = show_stats_lifespan;
+	hsa->store = set_stats_lifespan;
+	hsa->index = 0;
+	hsa->port_num = port_num;
+
+	return &hsa->attr;
+}
+
+static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
+			   u8 port_num)
+{
+	struct attribute_group *hsag;
+	struct rdma_hw_stats *stats;
+	int i, ret;
+
+	stats = device->alloc_hw_stats(device, port_num);
+
+	if (!stats)
+		return;
+
+	if (!stats->names || stats->num_counters <= 0)
+		goto err_free_stats;
+
+	/*
+	 * Two extra attribue elements here, one for the lifespan entry and
+	 * one to NULL terminate the list for the sysfs core code
+	 */
+	hsag = kzalloc(sizeof(*hsag) +
+		       sizeof(void *) * (stats->num_counters + 2),
+		       GFP_KERNEL);
+	if (!hsag)
+		goto err_free_stats;
+
+	ret = device->get_hw_stats(device, stats, port_num,
+				   stats->num_counters);
+	if (ret != stats->num_counters)
+		goto err_free_hsag;
+
+	stats->timestamp = jiffies;
+
+	hsag->name = "hw_counters";
+	hsag->attrs = (void *)hsag + sizeof(*hsag);
+
+	for (i = 0; i < stats->num_counters; i++) {
+		hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]);
+		if (!hsag->attrs[i])
+			goto err;
+		sysfs_attr_init(hsag->attrs[i]);
+	}
+
+	/* treat an error here as non-fatal */
+	hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num);
+	if (hsag->attrs[i])
+		sysfs_attr_init(hsag->attrs[i]);
+
+	if (port) {
+		struct kobject *kobj = &port->kobj;
+		ret = sysfs_create_group(kobj, hsag);
+		if (ret)
+			goto err;
+		port->hw_stats_ag = hsag;
+		port->hw_stats = stats;
+	} else {
+		struct kobject *kobj = &device->dev.kobj;
+		ret = sysfs_create_group(kobj, hsag);
+		if (ret)
+			goto err;
+		device->hw_stats_ag = hsag;
+		device->hw_stats = stats;
+	}
+
+	return;
+
+err:
+	for (; i >= 0; i--)
+		kfree(hsag->attrs[i]);
+err_free_hsag:
+	kfree(hsag);
+err_free_stats:
+	kfree(stats);
+	return;
+}
+
 static int add_port(struct ib_device *device, int port_num,
 		    int (*port_callback)(struct ib_device *,
 					 u8, struct kobject *))
@@ -835,6 +1063,14 @@
 			goto err_remove_pkey;
 	}
 
+	/*
+	 * If port == 0, it means we have only one port and the parent
+	 * device, not this port device, should be the holder of the
+	 * hw_counters
+	 */
+	if (device->alloc_hw_stats && port_num)
+		setup_hw_stats(device, p, port_num);
+
 	list_add_tail(&p->kobj.entry, &device->port_list);
 
 	kobject_uevent(&p->kobj, KOBJ_ADD);
@@ -972,120 +1208,6 @@
 	&dev_attr_node_desc
 };
 
-/* Show a given an attribute in the statistics group */
-static ssize_t show_protocol_stat(const struct device *device,
-			    struct device_attribute *attr, char *buf,
-			    unsigned offset)
-{
-	struct ib_device *dev = container_of(device, struct ib_device, dev);
-	union rdma_protocol_stats stats;
-	ssize_t ret;
-
-	ret = dev->get_protocol_stats(dev, &stats);
-	if (ret)
-		return ret;
-
-	return sprintf(buf, "%llu\n",
-		       (unsigned long long) ((u64 *) &stats)[offset]);
-}
-
-/* generate a read-only iwarp statistics attribute */
-#define IW_STATS_ENTRY(name)						\
-static ssize_t show_##name(struct device *device,			\
-			   struct device_attribute *attr, char *buf)	\
-{									\
-	return show_protocol_stat(device, attr, buf,			\
-				  offsetof(struct iw_protocol_stats, name) / \
-				  sizeof (u64));			\
-}									\
-static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
-
-IW_STATS_ENTRY(ipInReceives);
-IW_STATS_ENTRY(ipInHdrErrors);
-IW_STATS_ENTRY(ipInTooBigErrors);
-IW_STATS_ENTRY(ipInNoRoutes);
-IW_STATS_ENTRY(ipInAddrErrors);
-IW_STATS_ENTRY(ipInUnknownProtos);
-IW_STATS_ENTRY(ipInTruncatedPkts);
-IW_STATS_ENTRY(ipInDiscards);
-IW_STATS_ENTRY(ipInDelivers);
-IW_STATS_ENTRY(ipOutForwDatagrams);
-IW_STATS_ENTRY(ipOutRequests);
-IW_STATS_ENTRY(ipOutDiscards);
-IW_STATS_ENTRY(ipOutNoRoutes);
-IW_STATS_ENTRY(ipReasmTimeout);
-IW_STATS_ENTRY(ipReasmReqds);
-IW_STATS_ENTRY(ipReasmOKs);
-IW_STATS_ENTRY(ipReasmFails);
-IW_STATS_ENTRY(ipFragOKs);
-IW_STATS_ENTRY(ipFragFails);
-IW_STATS_ENTRY(ipFragCreates);
-IW_STATS_ENTRY(ipInMcastPkts);
-IW_STATS_ENTRY(ipOutMcastPkts);
-IW_STATS_ENTRY(ipInBcastPkts);
-IW_STATS_ENTRY(ipOutBcastPkts);
-IW_STATS_ENTRY(tcpRtoAlgorithm);
-IW_STATS_ENTRY(tcpRtoMin);
-IW_STATS_ENTRY(tcpRtoMax);
-IW_STATS_ENTRY(tcpMaxConn);
-IW_STATS_ENTRY(tcpActiveOpens);
-IW_STATS_ENTRY(tcpPassiveOpens);
-IW_STATS_ENTRY(tcpAttemptFails);
-IW_STATS_ENTRY(tcpEstabResets);
-IW_STATS_ENTRY(tcpCurrEstab);
-IW_STATS_ENTRY(tcpInSegs);
-IW_STATS_ENTRY(tcpOutSegs);
-IW_STATS_ENTRY(tcpRetransSegs);
-IW_STATS_ENTRY(tcpInErrs);
-IW_STATS_ENTRY(tcpOutRsts);
-
-static struct attribute *iw_proto_stats_attrs[] = {
-	&dev_attr_ipInReceives.attr,
-	&dev_attr_ipInHdrErrors.attr,
-	&dev_attr_ipInTooBigErrors.attr,
-	&dev_attr_ipInNoRoutes.attr,
-	&dev_attr_ipInAddrErrors.attr,
-	&dev_attr_ipInUnknownProtos.attr,
-	&dev_attr_ipInTruncatedPkts.attr,
-	&dev_attr_ipInDiscards.attr,
-	&dev_attr_ipInDelivers.attr,
-	&dev_attr_ipOutForwDatagrams.attr,
-	&dev_attr_ipOutRequests.attr,
-	&dev_attr_ipOutDiscards.attr,
-	&dev_attr_ipOutNoRoutes.attr,
-	&dev_attr_ipReasmTimeout.attr,
-	&dev_attr_ipReasmReqds.attr,
-	&dev_attr_ipReasmOKs.attr,
-	&dev_attr_ipReasmFails.attr,
-	&dev_attr_ipFragOKs.attr,
-	&dev_attr_ipFragFails.attr,
-	&dev_attr_ipFragCreates.attr,
-	&dev_attr_ipInMcastPkts.attr,
-	&dev_attr_ipOutMcastPkts.attr,
-	&dev_attr_ipInBcastPkts.attr,
-	&dev_attr_ipOutBcastPkts.attr,
-	&dev_attr_tcpRtoAlgorithm.attr,
-	&dev_attr_tcpRtoMin.attr,
-	&dev_attr_tcpRtoMax.attr,
-	&dev_attr_tcpMaxConn.attr,
-	&dev_attr_tcpActiveOpens.attr,
-	&dev_attr_tcpPassiveOpens.attr,
-	&dev_attr_tcpAttemptFails.attr,
-	&dev_attr_tcpEstabResets.attr,
-	&dev_attr_tcpCurrEstab.attr,
-	&dev_attr_tcpInSegs.attr,
-	&dev_attr_tcpOutSegs.attr,
-	&dev_attr_tcpRetransSegs.attr,
-	&dev_attr_tcpInErrs.attr,
-	&dev_attr_tcpOutRsts.attr,
-	NULL
-};
-
-static struct attribute_group iw_stats_group = {
-	.name	= "proto_stats",
-	.attrs	= iw_proto_stats_attrs,
-};
-
 static void free_port_list_attributes(struct ib_device *device)
 {
 	struct kobject *p, *t;
@@ -1093,6 +1215,10 @@
 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
 		struct ib_port *port = container_of(p, struct ib_port, kobj);
 		list_del(&p->entry);
+		if (port->hw_stats) {
+			kfree(port->hw_stats);
+			free_hsag(&port->kobj, port->hw_stats_ag);
+		}
 		sysfs_remove_group(p, port->pma_table);
 		sysfs_remove_group(p, &port->pkey_group);
 		sysfs_remove_group(p, &port->gid_group);
@@ -1149,11 +1275,8 @@
 		}
 	}
 
-	if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats) {
-		ret = sysfs_create_group(&class_dev->kobj, &iw_stats_group);
-		if (ret)
-			goto err_put;
-	}
+	if (device->alloc_hw_stats)
+		setup_hw_stats(device, NULL, 0);
 
 	return 0;
 
@@ -1169,15 +1292,18 @@
 
 void ib_device_unregister_sysfs(struct ib_device *device)
 {
-	/* Hold kobject until ib_dealloc_device() */
-	struct kobject *kobj_dev = kobject_get(&device->dev.kobj);
 	int i;
 
-	if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats)
-		sysfs_remove_group(kobj_dev, &iw_stats_group);
+	/* Hold kobject until ib_dealloc_device() */
+	kobject_get(&device->dev.kobj);
 
 	free_port_list_attributes(device);
 
+	if (device->hw_stats) {
+		kfree(device->hw_stats);
+		free_hsag(&device->dev.kobj, device->hw_stats_ag);
+	}
+
 	for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i)
 		device_remove_file(&device->dev, ib_class_attributes[i]);
 
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index c7ad0a4..c0c7cf8 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -8,3 +8,4 @@
 obj-$(CONFIG_INFINIBAND_NES)		+= nes/
 obj-$(CONFIG_INFINIBAND_OCRDMA)		+= ocrdma/
 obj-$(CONFIG_INFINIBAND_USNIC)		+= usnic/
+obj-$(CONFIG_INFINIBAND_HFI1)		+= hfi1/
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index de1c61b4..ada2e50 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -327,7 +327,7 @@
 	kfree(cq->sw_queue);
 	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
 			  (1UL << (cq->size_log2))
-			  * sizeof(struct t3_cqe), cq->queue,
+			  * sizeof(struct t3_cqe) + 1, cq->queue,
 			  dma_unmap_addr(cq, mapping));
 	cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
 	return err;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 47cb927..bb1a839 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1218,59 +1218,119 @@
 		       iwch_dev->rdev.rnic_info.pdev->device);
 }
 
-static int iwch_get_mib(struct ib_device *ibdev,
-			union rdma_protocol_stats *stats)
+enum counters {
+	IPINRECEIVES,
+	IPINHDRERRORS,
+	IPINADDRERRORS,
+	IPINUNKNOWNPROTOS,
+	IPINDISCARDS,
+	IPINDELIVERS,
+	IPOUTREQUESTS,
+	IPOUTDISCARDS,
+	IPOUTNOROUTES,
+	IPREASMTIMEOUT,
+	IPREASMREQDS,
+	IPREASMOKS,
+	IPREASMFAILS,
+	TCPACTIVEOPENS,
+	TCPPASSIVEOPENS,
+	TCPATTEMPTFAILS,
+	TCPESTABRESETS,
+	TCPCURRESTAB,
+	TCPINSEGS,
+	TCPOUTSEGS,
+	TCPRETRANSSEGS,
+	TCPINERRS,
+	TCPOUTRSTS,
+	TCPRTOMIN,
+	TCPRTOMAX,
+	NR_COUNTERS
+};
+
+static const char * const names[] = {
+	[IPINRECEIVES] = "ipInReceives",
+	[IPINHDRERRORS] = "ipInHdrErrors",
+	[IPINADDRERRORS] = "ipInAddrErrors",
+	[IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
+	[IPINDISCARDS] = "ipInDiscards",
+	[IPINDELIVERS] = "ipInDelivers",
+	[IPOUTREQUESTS] = "ipOutRequests",
+	[IPOUTDISCARDS] = "ipOutDiscards",
+	[IPOUTNOROUTES] = "ipOutNoRoutes",
+	[IPREASMTIMEOUT] = "ipReasmTimeout",
+	[IPREASMREQDS] = "ipReasmReqds",
+	[IPREASMOKS] = "ipReasmOKs",
+	[IPREASMFAILS] = "ipReasmFails",
+	[TCPACTIVEOPENS] = "tcpActiveOpens",
+	[TCPPASSIVEOPENS] = "tcpPassiveOpens",
+	[TCPATTEMPTFAILS] = "tcpAttemptFails",
+	[TCPESTABRESETS] = "tcpEstabResets",
+	[TCPCURRESTAB] = "tcpCurrEstab",
+	[TCPINSEGS] = "tcpInSegs",
+	[TCPOUTSEGS] = "tcpOutSegs",
+	[TCPRETRANSSEGS] = "tcpRetransSegs",
+	[TCPINERRS] = "tcpInErrs",
+	[TCPOUTRSTS] = "tcpOutRsts",
+	[TCPRTOMIN] = "tcpRtoMin",
+	[TCPRTOMAX] = "tcpRtoMax",
+};
+
+static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
+					      u8 port_num)
+{
+	BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
+
+	/* Our driver only supports device level stats */
+	if (port_num != 0)
+		return NULL;
+
+	return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
+					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+			u8 port, int index)
 {
 	struct iwch_dev *dev;
 	struct tp_mib_stats m;
 	int ret;
 
+	if (port != 0 || !stats)
+		return -ENOSYS;
+
 	PDBG("%s ibdev %p\n", __func__, ibdev);
 	dev = to_iwch_dev(ibdev);
 	ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
 	if (ret)
 		return -ENOSYS;
 
-	memset(stats, 0, sizeof *stats);
-	stats->iw.ipInReceives = ((u64) m.ipInReceive_hi << 32) +
-				m.ipInReceive_lo;
-	stats->iw.ipInHdrErrors = ((u64) m.ipInHdrErrors_hi << 32) +
-				  m.ipInHdrErrors_lo;
-	stats->iw.ipInAddrErrors = ((u64) m.ipInAddrErrors_hi << 32) +
-				   m.ipInAddrErrors_lo;
-	stats->iw.ipInUnknownProtos = ((u64) m.ipInUnknownProtos_hi << 32) +
-				      m.ipInUnknownProtos_lo;
-	stats->iw.ipInDiscards = ((u64) m.ipInDiscards_hi << 32) +
-				 m.ipInDiscards_lo;
-	stats->iw.ipInDelivers = ((u64) m.ipInDelivers_hi << 32) +
-				 m.ipInDelivers_lo;
-	stats->iw.ipOutRequests = ((u64) m.ipOutRequests_hi << 32) +
-				  m.ipOutRequests_lo;
-	stats->iw.ipOutDiscards = ((u64) m.ipOutDiscards_hi << 32) +
-				  m.ipOutDiscards_lo;
-	stats->iw.ipOutNoRoutes = ((u64) m.ipOutNoRoutes_hi << 32) +
-				  m.ipOutNoRoutes_lo;
-	stats->iw.ipReasmTimeout = (u64) m.ipReasmTimeout;
-	stats->iw.ipReasmReqds = (u64) m.ipReasmReqds;
-	stats->iw.ipReasmOKs = (u64) m.ipReasmOKs;
-	stats->iw.ipReasmFails = (u64) m.ipReasmFails;
-	stats->iw.tcpActiveOpens = (u64) m.tcpActiveOpens;
-	stats->iw.tcpPassiveOpens = (u64) m.tcpPassiveOpens;
-	stats->iw.tcpAttemptFails = (u64) m.tcpAttemptFails;
-	stats->iw.tcpEstabResets = (u64) m.tcpEstabResets;
-	stats->iw.tcpOutRsts = (u64) m.tcpOutRsts;
-	stats->iw.tcpCurrEstab = (u64) m.tcpCurrEstab;
-	stats->iw.tcpInSegs = ((u64) m.tcpInSegs_hi << 32) +
-			      m.tcpInSegs_lo;
-	stats->iw.tcpOutSegs = ((u64) m.tcpOutSegs_hi << 32) +
-			       m.tcpOutSegs_lo;
-	stats->iw.tcpRetransSegs = ((u64) m.tcpRetransSeg_hi << 32) +
-				  m.tcpRetransSeg_lo;
-	stats->iw.tcpInErrs = ((u64) m.tcpInErrs_hi << 32) +
-			      m.tcpInErrs_lo;
-	stats->iw.tcpRtoMin = (u64) m.tcpRtoMin;
-	stats->iw.tcpRtoMax = (u64) m.tcpRtoMax;
-	return 0;
+	stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) +	m.ipInReceive_lo;
+	stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
+	stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
+	stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
+	stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
+	stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
+	stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
+	stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
+	stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
+	stats->value[IPREASMTIMEOUT] = 	m.ipReasmTimeout;
+	stats->value[IPREASMREQDS] = m.ipReasmReqds;
+	stats->value[IPREASMOKS] = m.ipReasmOKs;
+	stats->value[IPREASMFAILS] = m.ipReasmFails;
+	stats->value[TCPACTIVEOPENS] =	m.tcpActiveOpens;
+	stats->value[TCPPASSIVEOPENS] =	m.tcpPassiveOpens;
+	stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
+	stats->value[TCPESTABRESETS] = m.tcpEstabResets;
+	stats->value[TCPCURRESTAB] = m.tcpOutRsts;
+	stats->value[TCPINSEGS] = m.tcpCurrEstab;
+	stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
+	stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
+	stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
+	stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
+	stats->value[TCPRTOMIN] = m.tcpRtoMin;
+	stats->value[TCPRTOMAX] = m.tcpRtoMax;
+
+	return stats->num_counters;
 }
 
 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
@@ -1373,7 +1433,8 @@
 	dev->ibdev.req_notify_cq = iwch_arm_cq;
 	dev->ibdev.post_send = iwch_post_send;
 	dev->ibdev.post_recv = iwch_post_receive;
-	dev->ibdev.get_protocol_stats = iwch_get_mib;
+	dev->ibdev.alloc_hw_stats = iwch_alloc_stats;
+	dev->ibdev.get_hw_stats = iwch_get_mib;
 	dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
 	dev->ibdev.get_port_immutable = iwch_port_immutable;
 
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 7574f394..dd8a86b 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -446,20 +446,59 @@
 		       c4iw_dev->rdev.lldi.pdev->device);
 }
 
+enum counters {
+	IP4INSEGS,
+	IP4OUTSEGS,
+	IP4RETRANSSEGS,
+	IP4OUTRSTS,
+	IP6INSEGS,
+	IP6OUTSEGS,
+	IP6RETRANSSEGS,
+	IP6OUTRSTS,
+	NR_COUNTERS
+};
+
+static const char * const names[] = {
+	[IP4INSEGS] = "ip4InSegs",
+	[IP4OUTSEGS] = "ip4OutSegs",
+	[IP4RETRANSSEGS] = "ip4RetransSegs",
+	[IP4OUTRSTS] = "ip4OutRsts",
+	[IP6INSEGS] = "ip6InSegs",
+	[IP6OUTSEGS] = "ip6OutSegs",
+	[IP6RETRANSSEGS] = "ip6RetransSegs",
+	[IP6OUTRSTS] = "ip6OutRsts"
+};
+
+static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev,
+					      u8 port_num)
+{
+	BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
+
+	if (port_num != 0)
+		return NULL;
+
+	return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
+					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
 static int c4iw_get_mib(struct ib_device *ibdev,
-			union rdma_protocol_stats *stats)
+			struct rdma_hw_stats *stats,
+			u8 port, int index)
 {
 	struct tp_tcp_stats v4, v6;
 	struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
 
 	cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
-	memset(stats, 0, sizeof *stats);
-	stats->iw.tcpInSegs = v4.tcp_in_segs + v6.tcp_in_segs;
-	stats->iw.tcpOutSegs = v4.tcp_out_segs + v6.tcp_out_segs;
-	stats->iw.tcpRetransSegs = v4.tcp_retrans_segs + v6.tcp_retrans_segs;
-	stats->iw.tcpOutRsts = v4.tcp_out_rsts + v6.tcp_out_rsts;
+	stats->value[IP4INSEGS] = v4.tcp_in_segs;
+	stats->value[IP4OUTSEGS] = v4.tcp_out_segs;
+	stats->value[IP4RETRANSSEGS] = v4.tcp_retrans_segs;
+	stats->value[IP4OUTRSTS] = v4.tcp_out_rsts;
+	stats->value[IP6INSEGS] = v6.tcp_in_segs;
+	stats->value[IP6OUTSEGS] = v6.tcp_out_segs;
+	stats->value[IP6RETRANSSEGS] = v6.tcp_retrans_segs;
+	stats->value[IP6OUTRSTS] = v6.tcp_out_rsts;
 
-	return 0;
+	return stats->num_counters;
 }
 
 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
@@ -562,7 +601,8 @@
 	dev->ibdev.req_notify_cq = c4iw_arm_cq;
 	dev->ibdev.post_send = c4iw_post_send;
 	dev->ibdev.post_recv = c4iw_post_receive;
-	dev->ibdev.get_protocol_stats = c4iw_get_mib;
+	dev->ibdev.alloc_hw_stats = c4iw_alloc_stats;
+	dev->ibdev.get_hw_stats = c4iw_get_mib;
 	dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
 	dev->ibdev.get_port_immutable = c4iw_port_immutable;
 	dev->ibdev.drain_sq = c4iw_drain_sq;
diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig
similarity index 100%
rename from drivers/staging/rdma/hfi1/Kconfig
rename to drivers/infiniband/hw/hfi1/Kconfig
diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
similarity index 88%
rename from drivers/staging/rdma/hfi1/Makefile
rename to drivers/infiniband/hw/hfi1/Makefile
index 8dc5938..9b5382c 100644
--- a/drivers/staging/rdma/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -7,7 +7,7 @@
 #
 obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
 
-hfi1-y := affinity.o chip.o device.o diag.o driver.o efivar.o \
+hfi1-y := affinity.o chip.o device.o driver.o efivar.o \
 	eprom.o file_ops.o firmware.o \
 	init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \
 	qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \
diff --git a/drivers/staging/rdma/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
similarity index 92%
rename from drivers/staging/rdma/hfi1/affinity.c
rename to drivers/infiniband/hw/hfi1/affinity.c
index 6e7050a..14d7eeb 100644
--- a/drivers/staging/rdma/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -300,16 +300,15 @@
 	const struct cpumask *node_mask,
 		*proc_mask = tsk_cpus_allowed(current);
 	struct cpu_mask_set *set = &dd->affinity->proc;
-	char buf[1024];
 
 	/*
 	 * check whether process/context affinity has already
 	 * been set
 	 */
 	if (cpumask_weight(proc_mask) == 1) {
-		scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
-		hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s",
-			  current->pid, current->comm, buf);
+		hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
+			  current->pid, current->comm,
+			  cpumask_pr_args(proc_mask));
 		/*
 		 * Mark the pre-set CPU as used. This is atomic so we don't
 		 * need the lock
@@ -318,9 +317,9 @@
 		cpumask_set_cpu(cpu, &set->used);
 		goto done;
 	} else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
-		scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
-		hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s",
-			  current->pid, current->comm, buf);
+		hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
+			  current->pid, current->comm,
+			  cpumask_pr_args(proc_mask));
 		goto done;
 	}
 
@@ -356,8 +355,8 @@
 	cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
 				  &dd->affinity->rcv_intr.mask :
 				  &dd->affinity->rcv_intr.used));
-	scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs));
-	hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf);
+	hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
+		  cpumask_pr_args(intrs));
 
 	/*
 	 * If we don't have a NUMA node requested, preference is towards
@@ -366,18 +365,16 @@
 	if (node == -1)
 		node = dd->node;
 	node_mask = cpumask_of_node(node);
-	scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask));
-	hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf);
+	hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node,
+		  cpumask_pr_args(node_mask));
 
 	/* diff will hold all unused cpus */
 	cpumask_andnot(diff, &set->mask, &set->used);
-	scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff));
-	hfi1_cdbg(PROC, "unused CPUs (all) %s", buf);
+	hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff));
 
 	/* get cpumask of available CPUs on preferred NUMA */
 	cpumask_and(mask, diff, node_mask);
-	scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
-	hfi1_cdbg(PROC, "available cpus on NUMA %s", buf);
+	hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask));
 
 	/*
 	 * At first, we don't want to place processes on the same
@@ -395,8 +392,8 @@
 		cpumask_andnot(diff, &set->mask, &set->used);
 		cpumask_andnot(mask, diff, node_mask);
 	}
-	scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
-	hfi1_cdbg(PROC, "possible CPUs for process %s", buf);
+	hfi1_cdbg(PROC, "possible CPUs for process %*pbl",
+		  cpumask_pr_args(mask));
 
 	cpu = cpumask_first(mask);
 	if (cpu >= nr_cpu_ids) /* empty */
diff --git a/drivers/staging/rdma/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/affinity.h
rename to drivers/infiniband/hw/hfi1/affinity.h
diff --git a/drivers/staging/rdma/hfi1/aspm.h b/drivers/infiniband/hw/hfi1/aspm.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/aspm.h
rename to drivers/infiniband/hw/hfi1/aspm.h
diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
similarity index 99%
rename from drivers/staging/rdma/hfi1/chip.c
rename to drivers/infiniband/hw/hfi1/chip.c
index dcae8e7..81619fb 100644
--- a/drivers/staging/rdma/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1037,6 +1037,7 @@
 static void dc_start(struct hfi1_devdata *);
 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
 			   unsigned int *np);
+static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd);
 
 /*
  * Error interrupt table entry.  This is used as input to the interrupt
@@ -6105,7 +6106,7 @@
 	}
 
 	/* this access is valid only when the link is up */
-	if ((ppd->host_link_state & HLS_UP) == 0) {
+	if (ppd->host_link_state & HLS_DOWN) {
 		dd_dev_info(dd, "%s: link state %s not up\n",
 			    __func__, link_state_name(ppd->host_link_state));
 		ret = -EBUSY;
@@ -6961,6 +6962,8 @@
 	}
 
 	reset_neighbor_info(ppd);
+	if (ppd->mgmt_allowed)
+		remove_full_mgmt_pkey(ppd);
 
 	/* disable the port */
 	clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
@@ -7069,6 +7072,12 @@
 	(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
 }
 
+static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd)
+{
+	ppd->pkeys[2] = 0;
+	(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+}
+
 /*
  * Convert the given link width to the OPA link width bitmask.
  */
@@ -7429,7 +7438,7 @@
 retry:
 	mutex_lock(&ppd->hls_lock);
 	/* only apply if the link is up */
-	if (!(ppd->host_link_state & HLS_UP)) {
+	if (ppd->host_link_state & HLS_DOWN) {
 		/* still going up..wait and retry */
 		if (ppd->host_link_state & HLS_GOING_UP) {
 			if (++tries < 1000) {
@@ -7823,8 +7832,8 @@
 			 * save first 2 flits in the packet that caused
 			 * the error
 			 */
-			 dd->err_info_rcvport.packet_flit1 = hdr0;
-			 dd->err_info_rcvport.packet_flit2 = hdr1;
+			dd->err_info_rcvport.packet_flit1 = hdr0;
+			dd->err_info_rcvport.packet_flit2 = hdr1;
 		}
 		switch (info) {
 		case 1:
@@ -9212,9 +9221,6 @@
 
 	/* Reset the QSFP */
 	mask = (u64)QSFP_HFI0_RESET_N;
-	qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
-	qsfp_mask |= mask;
-	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
 
 	qsfp_mask = read_csr(dd,
 			     dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
@@ -9252,6 +9258,12 @@
 		dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
 			    __func__);
 
+	/*
+	 * The remaining alarms/warnings don't matter if the link is down.
+	 */
+	if (ppd->host_link_state & HLS_DOWN)
+		return 0;
+
 	if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
 	    (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
 		dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
@@ -9346,9 +9358,8 @@
 		return;
 
 	/*
-	 * Turn DC back on after cables has been
-	 * re-inserted. Up until now, the DC has been in
-	 * reset to save power.
+	 * Turn DC back on after cable has been re-inserted. Up until
+	 * now, the DC has been in reset to save power.
 	 */
 	dc_start(dd);
 
@@ -9480,7 +9491,15 @@
 			return ret;
 	}
 
-	/* tune the SERDES to a ballpark setting for
+	get_port_type(ppd);
+	if (ppd->port_type == PORT_TYPE_QSFP) {
+		set_qsfp_int_n(ppd, 0);
+		wait_for_qsfp_init(ppd);
+		set_qsfp_int_n(ppd, 1);
+	}
+
+	/*
+	 * Tune the SerDes to a ballpark setting for
 	 * optimal signal and bit error rate
 	 * Needs to be done before starting the link
 	 */
@@ -10074,7 +10093,7 @@
  */
 u32 driver_logical_state(struct hfi1_pportdata *ppd)
 {
-	if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
+	if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
 		return IB_PORT_DOWN;
 
 	switch (ppd->host_link_state & HLS_UP) {
@@ -11887,7 +11906,7 @@
 		hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
 	}
 
-mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
+	mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
 }
 
 #define C_MAX_NAME 13 /* 12 chars + one for /0 */
@@ -14578,7 +14597,7 @@
 		   (reason), (ret))
 
 /*
- * Initialize the Avago Thermal sensor.
+ * Initialize the thermal sensor.
  *
  * After initialization, enable polling of thermal sensor through
  * SBus interface. In order for this to work, the SBus Master
diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
similarity index 99%
rename from drivers/staging/rdma/hfi1/chip.h
rename to drivers/infiniband/hw/hfi1/chip.h
index 1948706..66a3279 100644
--- a/drivers/staging/rdma/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -398,6 +398,12 @@
 /* Lane ID for general configuration registers */
 #define GENERAL_CONFIG 4
 
+/* LINK_TUNING_PARAMETERS fields */
+#define TUNING_METHOD_SHIFT 24
+
+/* LINK_OPTIMIZATION_SETTINGS fields */
+#define ENABLE_EXT_DEV_CONFIG_SHIFT 24
+
 /* LOAD_DATA 8051 command shifts and fields */
 #define LOAD_DATA_FIELD_ID_SHIFT 40
 #define LOAD_DATA_FIELD_ID_MASK 0xfull
diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/chip_registers.h
rename to drivers/infiniband/hw/hfi1/chip_registers.h
diff --git a/drivers/staging/rdma/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
similarity index 98%
rename from drivers/staging/rdma/hfi1/common.h
rename to drivers/infiniband/hw/hfi1/common.h
index e9b6bb3..fcc9c21 100644
--- a/drivers/staging/rdma/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -178,7 +178,8 @@
 		     HFI1_CAP_PKEY_CHECK |		\
 		     HFI1_CAP_NO_INTEGRITY)
 
-#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << 16) | HFI1_USER_SWMINOR)
+#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << HFI1_SWMAJOR_SHIFT) | \
+			     HFI1_USER_SWMINOR)
 
 #ifndef HFI1_KERN_TYPE
 #define HFI1_KERN_TYPE 0
@@ -349,6 +350,8 @@
 #define HFI1_BECN_MASK 1
 #define HFI1_BECN_SMASK BIT(HFI1_BECN_SHIFT)
 
+#define HFI1_PSM_IOC_BASE_SEQ 0x0
+
 static inline __u64 rhf_to_cpu(const __le32 *rbuf)
 {
 	return __le64_to_cpu(*((__le64 *)rbuf));
diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/debugfs.c
rename to drivers/infiniband/hw/hfi1/debugfs.c
diff --git a/drivers/staging/rdma/hfi1/debugfs.h b/drivers/infiniband/hw/hfi1/debugfs.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/debugfs.h
rename to drivers/infiniband/hw/hfi1/debugfs.h
diff --git a/drivers/staging/rdma/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c
similarity index 94%
rename from drivers/staging/rdma/hfi1/device.c
rename to drivers/infiniband/hw/hfi1/device.c
index c05c39d..bf64b5a 100644
--- a/drivers/staging/rdma/hfi1/device.c
+++ b/drivers/infiniband/hw/hfi1/device.c
@@ -60,7 +60,8 @@
 int hfi1_cdev_init(int minor, const char *name,
 		   const struct file_operations *fops,
 		   struct cdev *cdev, struct device **devp,
-		   bool user_accessible)
+		   bool user_accessible,
+		   struct kobject *parent)
 {
 	const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor);
 	struct device *device = NULL;
@@ -68,6 +69,7 @@
 
 	cdev_init(cdev, fops);
 	cdev->owner = THIS_MODULE;
+	cdev->kobj.parent = parent;
 	kobject_set_name(&cdev->kobj, name);
 
 	ret = cdev_add(cdev, dev, 1);
@@ -82,13 +84,13 @@
 	else
 		device = device_create(class, NULL, dev, NULL, "%s", name);
 
-	if (!IS_ERR(device))
-		goto done;
-	ret = PTR_ERR(device);
-	device = NULL;
-	pr_err("Could not create device for minor %d, %s (err %d)\n",
-	       minor, name, -ret);
-	cdev_del(cdev);
+	if (IS_ERR(device)) {
+		ret = PTR_ERR(device);
+		device = NULL;
+		pr_err("Could not create device for minor %d, %s (err %d)\n",
+			minor, name, -ret);
+		cdev_del(cdev);
+	}
 done:
 	*devp = device;
 	return ret;
diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/infiniband/hw/hfi1/device.h
similarity index 97%
rename from drivers/staging/rdma/hfi1/device.h
rename to drivers/infiniband/hw/hfi1/device.h
index 5bb3e83..c3ec19c 100644
--- a/drivers/staging/rdma/hfi1/device.h
+++ b/drivers/infiniband/hw/hfi1/device.h
@@ -50,7 +50,8 @@
 int hfi1_cdev_init(int minor, const char *name,
 		   const struct file_operations *fops,
 		   struct cdev *cdev, struct device **devp,
-		   bool user_accessible);
+		   bool user_accessible,
+		   struct kobject *parent);
 void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp);
 const char *class_name(void);
 int __init dev_init(void);
diff --git a/drivers/staging/rdma/hfi1/dma.c b/drivers/infiniband/hw/hfi1/dma.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/dma.c
rename to drivers/infiniband/hw/hfi1/dma.c
diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
similarity index 99%
rename from drivers/staging/rdma/hfi1/driver.c
rename to drivers/infiniband/hw/hfi1/driver.c
index 700c6fa..c75b0ae 100644
--- a/drivers/staging/rdma/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1161,7 +1161,7 @@
 	ppd->lmc = lmc;
 	hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
 
-	dd_dev_info(dd, "IB%u:%u got a lid: 0x%x\n", dd->unit, ppd->port, lid);
+	dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid);
 
 	return 0;
 }
diff --git a/drivers/staging/rdma/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/efivar.c
rename to drivers/infiniband/hw/hfi1/efivar.c
diff --git a/drivers/staging/rdma/hfi1/efivar.h b/drivers/infiniband/hw/hfi1/efivar.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/efivar.h
rename to drivers/infiniband/hw/hfi1/efivar.h
diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/infiniband/hw/hfi1/eprom.c
similarity index 60%
copy from drivers/staging/rdma/hfi1/device.h
copy to drivers/infiniband/hw/hfi1/eprom.c
index 5bb3e83..36b7794 100644
--- a/drivers/staging/rdma/hfi1/device.h
+++ b/drivers/infiniband/hw/hfi1/eprom.c
@@ -1,5 +1,3 @@
-#ifndef _HFI1_DEVICE_H
-#define _HFI1_DEVICE_H
 /*
  * Copyright(c) 2015, 2016 Intel Corporation.
  *
@@ -46,14 +44,59 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  */
+#include <linux/delay.h>
+#include "hfi.h"
+#include "common.h"
+#include "eprom.h"
 
-int hfi1_cdev_init(int minor, const char *name,
-		   const struct file_operations *fops,
-		   struct cdev *cdev, struct device **devp,
-		   bool user_accessible);
-void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp);
-const char *class_name(void);
-int __init dev_init(void);
-void dev_cleanup(void);
+#define CMD_SHIFT 24
+#define CMD_RELEASE_POWERDOWN_NOID  ((0xab << CMD_SHIFT))
 
-#endif                          /* _HFI1_DEVICE_H */
+/* controller interface speeds */
+#define EP_SPEED_FULL 0x2	/* full speed */
+
+/*
+ * How long to wait for the EPROM to become available, in ms.
+ * The spec 32 Mb EPROM takes around 40s to erase then write.
+ * Double it for safety.
+ */
+#define EPROM_TIMEOUT 80000 /* ms */
+/*
+ * Initialize the EPROM handler.
+ */
+int eprom_init(struct hfi1_devdata *dd)
+{
+	int ret = 0;
+
+	/* only the discrete chip has an EPROM */
+	if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0)
+		return 0;
+
+	/*
+	 * It is OK if both HFIs reset the EPROM as long as they don't
+	 * do it at the same time.
+	 */
+	ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
+	if (ret) {
+		dd_dev_err(dd,
+			   "%s: unable to acquire EPROM resource, no EPROM support\n",
+			   __func__);
+		goto done_asic;
+	}
+
+	/* reset EPROM to be sure it is in a good state */
+
+	/* set reset */
+	write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
+	/* clear reset, set speed */
+	write_csr(dd, ASIC_EEP_CTL_STAT,
+		  EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
+
+	/* wake the device with command "release powerdown NoID" */
+	write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
+
+	dd->eprom_available = true;
+	release_chip_resource(dd, CR_EPROM);
+done_asic:
+	return ret;
+}
diff --git a/drivers/staging/rdma/hfi1/eprom.h b/drivers/infiniband/hw/hfi1/eprom.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/eprom.h
rename to drivers/infiniband/hw/hfi1/eprom.h
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
similarity index 78%
rename from drivers/staging/rdma/hfi1/file_ops.c
rename to drivers/infiniband/hw/hfi1/file_ops.c
index c1c5bf8..7a5b0e6 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -72,8 +72,6 @@
  */
 static int hfi1_file_open(struct inode *, struct file *);
 static int hfi1_file_close(struct inode *, struct file *);
-static ssize_t hfi1_file_write(struct file *, const char __user *,
-			       size_t, loff_t *);
 static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
 static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
 static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
@@ -86,8 +84,7 @@
 static int get_base_info(struct file *, void __user *, __u32);
 static int setup_ctxt(struct file *);
 static int setup_subctxt(struct hfi1_ctxtdata *);
-static int get_user_context(struct file *, struct hfi1_user_info *,
-			    int, unsigned);
+static int get_user_context(struct file *, struct hfi1_user_info *, int);
 static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
 static int allocate_ctxt(struct file *, struct hfi1_devdata *,
 			 struct hfi1_user_info *);
@@ -97,13 +94,15 @@
 static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
 static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
 static int vma_fault(struct vm_area_struct *, struct vm_fault *);
+static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
+			    unsigned long arg);
 
 static const struct file_operations hfi1_file_ops = {
 	.owner = THIS_MODULE,
-	.write = hfi1_file_write,
 	.write_iter = hfi1_write_iter,
 	.open = hfi1_file_open,
 	.release = hfi1_file_close,
+	.unlocked_ioctl = hfi1_file_ioctl,
 	.poll = hfi1_poll,
 	.mmap = hfi1_file_mmap,
 	.llseek = noop_llseek,
@@ -169,6 +168,13 @@
 
 static int hfi1_file_open(struct inode *inode, struct file *fp)
 {
+	struct hfi1_devdata *dd = container_of(inode->i_cdev,
+					       struct hfi1_devdata,
+					       user_cdev);
+
+	/* Just take a ref now. Not all opens result in a context assign */
+	kobject_get(&dd->kobj);
+
 	/* The real work is performed later in assign_ctxt() */
 	fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
 	if (fp->private_data) /* no cpu affinity by default */
@@ -176,127 +182,59 @@
 	return fp->private_data ? 0 : -ENOMEM;
 }
 
-static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
-			       size_t count, loff_t *offset)
+static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
+			    unsigned long arg)
 {
-	const struct hfi1_cmd __user *ucmd;
 	struct hfi1_filedata *fd = fp->private_data;
 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
-	struct hfi1_cmd cmd;
 	struct hfi1_user_info uinfo;
 	struct hfi1_tid_info tinfo;
+	int ret = 0;
 	unsigned long addr;
-	ssize_t consumed = 0, copy = 0, ret = 0;
-	void *dest = NULL;
-	__u64 user_val = 0;
-	int uctxt_required = 1;
-	int must_be_root = 0;
+	int uval = 0;
+	unsigned long ul_uval = 0;
+	u16 uval16 = 0;
 
-	/* FIXME: This interface cannot continue out of staging */
-	if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
-		return -EACCES;
+	hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
+	if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
+	    cmd != HFI1_IOCTL_GET_VERS &&
+	    !uctxt)
+		return -EINVAL;
 
-	if (count < sizeof(cmd)) {
-		ret = -EINVAL;
-		goto bail;
-	}
+	switch (cmd) {
+	case HFI1_IOCTL_ASSIGN_CTXT:
+		if (copy_from_user(&uinfo,
+				   (struct hfi1_user_info __user *)arg,
+				   sizeof(uinfo)))
+			return -EFAULT;
 
-	ucmd = (const struct hfi1_cmd __user *)data;
-	if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
-		ret = -EFAULT;
-		goto bail;
-	}
-
-	consumed = sizeof(cmd);
-
-	switch (cmd.type) {
-	case HFI1_CMD_ASSIGN_CTXT:
-		uctxt_required = 0;	/* assigned user context not required */
-		copy = sizeof(uinfo);
-		dest = &uinfo;
-		break;
-	case HFI1_CMD_SDMA_STATUS_UPD:
-	case HFI1_CMD_CREDIT_UPD:
-		copy = 0;
-		break;
-	case HFI1_CMD_TID_UPDATE:
-	case HFI1_CMD_TID_FREE:
-	case HFI1_CMD_TID_INVAL_READ:
-		copy = sizeof(tinfo);
-		dest = &tinfo;
-		break;
-	case HFI1_CMD_USER_INFO:
-	case HFI1_CMD_RECV_CTRL:
-	case HFI1_CMD_POLL_TYPE:
-	case HFI1_CMD_ACK_EVENT:
-	case HFI1_CMD_CTXT_INFO:
-	case HFI1_CMD_SET_PKEY:
-	case HFI1_CMD_CTXT_RESET:
-		copy = 0;
-		user_val = cmd.addr;
-		break;
-	case HFI1_CMD_EP_INFO:
-	case HFI1_CMD_EP_ERASE_CHIP:
-	case HFI1_CMD_EP_ERASE_RANGE:
-	case HFI1_CMD_EP_READ_RANGE:
-	case HFI1_CMD_EP_WRITE_RANGE:
-		uctxt_required = 0;	/* assigned user context not required */
-		must_be_root = 1;	/* validate user */
-		copy = 0;
-		break;
-	default:
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	/* If the command comes with user data, copy it. */
-	if (copy) {
-		if (copy_from_user(dest, (void __user *)cmd.addr, copy)) {
-			ret = -EFAULT;
-			goto bail;
-		}
-		consumed += copy;
-	}
-
-	/*
-	 * Make sure there is a uctxt when needed.
-	 */
-	if (uctxt_required && !uctxt) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	/* only root can do these operations */
-	if (must_be_root && !capable(CAP_SYS_ADMIN)) {
-		ret = -EPERM;
-		goto bail;
-	}
-
-	switch (cmd.type) {
-	case HFI1_CMD_ASSIGN_CTXT:
 		ret = assign_ctxt(fp, &uinfo);
 		if (ret < 0)
-			goto bail;
-		ret = setup_ctxt(fp);
+			return ret;
+		setup_ctxt(fp);
 		if (ret)
-			goto bail;
+			return ret;
 		ret = user_init(fp);
 		break;
-	case HFI1_CMD_CTXT_INFO:
-		ret = get_ctxt_info(fp, (void __user *)(unsigned long)
-				    user_val, cmd.len);
+	case HFI1_IOCTL_CTXT_INFO:
+		ret = get_ctxt_info(fp, (void __user *)(unsigned long)arg,
+				    sizeof(struct hfi1_ctxt_info));
 		break;
-	case HFI1_CMD_USER_INFO:
-		ret = get_base_info(fp, (void __user *)(unsigned long)
-				    user_val, cmd.len);
+	case HFI1_IOCTL_USER_INFO:
+		ret = get_base_info(fp, (void __user *)(unsigned long)arg,
+				    sizeof(struct hfi1_base_info));
 		break;
-	case HFI1_CMD_SDMA_STATUS_UPD:
-		break;
-	case HFI1_CMD_CREDIT_UPD:
+	case HFI1_IOCTL_CREDIT_UPD:
 		if (uctxt && uctxt->sc)
 			sc_return_credits(uctxt->sc);
 		break;
-	case HFI1_CMD_TID_UPDATE:
+
+	case HFI1_IOCTL_TID_UPDATE:
+		if (copy_from_user(&tinfo,
+				   (struct hfi11_tid_info __user *)arg,
+				   sizeof(tinfo)))
+			return -EFAULT;
+
 		ret = hfi1_user_exp_rcv_setup(fp, &tinfo);
 		if (!ret) {
 			/*
@@ -305,57 +243,82 @@
 			 * These fields are adjacent in the structure so
 			 * we can copy them at the same time.
 			 */
-			addr = (unsigned long)cmd.addr +
-				offsetof(struct hfi1_tid_info, tidcnt);
+			addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
 			if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
 					 sizeof(tinfo.tidcnt) +
 					 sizeof(tinfo.length)))
 				ret = -EFAULT;
 		}
 		break;
-	case HFI1_CMD_TID_INVAL_READ:
-		ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
-		if (ret)
-			break;
-		addr = (unsigned long)cmd.addr +
-			offsetof(struct hfi1_tid_info, tidcnt);
-		if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
-				 sizeof(tinfo.tidcnt)))
-			ret = -EFAULT;
-		break;
-	case HFI1_CMD_TID_FREE:
+
+	case HFI1_IOCTL_TID_FREE:
+		if (copy_from_user(&tinfo,
+				   (struct hfi11_tid_info __user *)arg,
+				   sizeof(tinfo)))
+			return -EFAULT;
+
 		ret = hfi1_user_exp_rcv_clear(fp, &tinfo);
 		if (ret)
 			break;
-		addr = (unsigned long)cmd.addr +
-			offsetof(struct hfi1_tid_info, tidcnt);
+		addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
 		if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
 				 sizeof(tinfo.tidcnt)))
 			ret = -EFAULT;
 		break;
-	case HFI1_CMD_RECV_CTRL:
-		ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val);
+
+	case HFI1_IOCTL_TID_INVAL_READ:
+		if (copy_from_user(&tinfo,
+				   (struct hfi11_tid_info __user *)arg,
+				   sizeof(tinfo)))
+			return -EFAULT;
+
+		ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
+		if (ret)
+			break;
+		addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
+		if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+				 sizeof(tinfo.tidcnt)))
+			ret = -EFAULT;
 		break;
-	case HFI1_CMD_POLL_TYPE:
-		uctxt->poll_type = (typeof(uctxt->poll_type))user_val;
+
+	case HFI1_IOCTL_RECV_CTRL:
+		ret = get_user(uval, (int __user *)arg);
+		if (ret != 0)
+			return -EFAULT;
+		ret = manage_rcvq(uctxt, fd->subctxt, uval);
 		break;
-	case HFI1_CMD_ACK_EVENT:
-		ret = user_event_ack(uctxt, fd->subctxt, user_val);
+
+	case HFI1_IOCTL_POLL_TYPE:
+		ret = get_user(uval, (int __user *)arg);
+		if (ret != 0)
+			return -EFAULT;
+		uctxt->poll_type = (typeof(uctxt->poll_type))uval;
 		break;
-	case HFI1_CMD_SET_PKEY:
+
+	case HFI1_IOCTL_ACK_EVENT:
+		ret = get_user(ul_uval, (unsigned long __user *)arg);
+		if (ret != 0)
+			return -EFAULT;
+		ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
+		break;
+
+	case HFI1_IOCTL_SET_PKEY:
+		ret = get_user(uval16, (u16 __user *)arg);
+		if (ret != 0)
+			return -EFAULT;
 		if (HFI1_CAP_IS_USET(PKEY_CHECK))
-			ret = set_ctxt_pkey(uctxt, fd->subctxt, user_val);
+			ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
 		else
-			ret = -EPERM;
+			return -EPERM;
 		break;
-	case HFI1_CMD_CTXT_RESET: {
+
+	case HFI1_IOCTL_CTXT_RESET: {
 		struct send_context *sc;
 		struct hfi1_devdata *dd;
 
-		if (!uctxt || !uctxt->dd || !uctxt->sc) {
-			ret = -EINVAL;
-			break;
-		}
+		if (!uctxt || !uctxt->dd || !uctxt->sc)
+			return -EINVAL;
+
 		/*
 		 * There is no protection here. User level has to
 		 * guarantee that no one will be writing to the send
@@ -373,10 +336,9 @@
 		wait_event_interruptible_timeout(
 			sc->halt_wait, (sc->flags & SCF_HALTED),
 			msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
-		if (!(sc->flags & SCF_HALTED)) {
-			ret = -ENOLCK;
-			break;
-		}
+		if (!(sc->flags & SCF_HALTED))
+			return -ENOLCK;
+
 		/*
 		 * If the send context was halted due to a Freeze,
 		 * wait until the device has been "unfrozen" before
@@ -387,18 +349,16 @@
 				dd->event_queue,
 				!(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
 				msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
-			if (dd->flags & HFI1_FROZEN) {
-				ret = -ENOLCK;
-				break;
-			}
-			if (dd->flags & HFI1_FORCED_FREEZE) {
+			if (dd->flags & HFI1_FROZEN)
+				return -ENOLCK;
+
+			if (dd->flags & HFI1_FORCED_FREEZE)
 				/*
 				 * Don't allow context reset if we are into
 				 * forced freeze
 				 */
-				ret = -ENODEV;
-				break;
-			}
+				return -ENODEV;
+
 			sc_disable(sc);
 			ret = sc_enable(sc);
 			hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
@@ -410,18 +370,17 @@
 			sc_return_credits(sc);
 		break;
 	}
-	case HFI1_CMD_EP_INFO:
-	case HFI1_CMD_EP_ERASE_CHIP:
-	case HFI1_CMD_EP_ERASE_RANGE:
-	case HFI1_CMD_EP_READ_RANGE:
-	case HFI1_CMD_EP_WRITE_RANGE:
-		ret = handle_eprom_command(fp, &cmd);
+
+	case HFI1_IOCTL_GET_VERS:
+		uval = HFI1_USER_SWVERSION;
+		if (put_user(uval, (int __user *)arg))
+			return -EFAULT;
 		break;
+
+	default:
+		return -EINVAL;
 	}
 
-	if (ret >= 0)
-		ret = consumed;
-bail:
 	return ret;
 }
 
@@ -738,7 +697,9 @@
 {
 	struct hfi1_filedata *fdata = fp->private_data;
 	struct hfi1_ctxtdata *uctxt = fdata->uctxt;
-	struct hfi1_devdata *dd;
+	struct hfi1_devdata *dd = container_of(inode->i_cdev,
+					       struct hfi1_devdata,
+					       user_cdev);
 	unsigned long flags, *ev;
 
 	fp->private_data = NULL;
@@ -747,7 +708,6 @@
 		goto done;
 
 	hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
-	dd = uctxt->dd;
 	mutex_lock(&hfi1_mutex);
 
 	flush_wc();
@@ -813,6 +773,7 @@
 	mutex_unlock(&hfi1_mutex);
 	hfi1_free_ctxtdata(dd, uctxt);
 done:
+	kobject_put(&dd->kobj);
 	kfree(fdata);
 	return 0;
 }
@@ -836,7 +797,7 @@
 static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
 {
 	int i_minor, ret = 0;
-	unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS;
+	unsigned int swmajor, swminor;
 
 	swmajor = uinfo->userversion >> 16;
 	if (swmajor != HFI1_USER_SWMAJOR) {
@@ -846,9 +807,6 @@
 
 	swminor = uinfo->userversion & 0xffff;
 
-	if (uinfo->hfi1_alg < HFI1_ALG_COUNT)
-		alg = uinfo->hfi1_alg;
-
 	mutex_lock(&hfi1_mutex);
 	/* First, lets check if we need to setup a shared context? */
 	if (uinfo->subctxt_cnt) {
@@ -868,7 +826,7 @@
 	 */
 	if (!ret) {
 		i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
-		ret = get_user_context(fp, uinfo, i_minor - 1, alg);
+		ret = get_user_context(fp, uinfo, i_minor);
 	}
 done_unlock:
 	mutex_unlock(&hfi1_mutex);
@@ -876,71 +834,26 @@
 	return ret;
 }
 
-/* return true if the device available for general use */
-static int usable_device(struct hfi1_devdata *dd)
-{
-	struct hfi1_pportdata *ppd = dd->pport;
-
-	return driver_lstate(ppd) == IB_PORT_ACTIVE;
-}
-
 static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
-			    int devno, unsigned alg)
+			    int devno)
 {
 	struct hfi1_devdata *dd = NULL;
-	int ret = 0, devmax, npresent, nup, dev;
+	int devmax, npresent, nup;
 
 	devmax = hfi1_count_units(&npresent, &nup);
-	if (!npresent) {
-		ret = -ENXIO;
-		goto done;
-	}
-	if (!nup) {
-		ret = -ENETDOWN;
-		goto done;
-	}
-	if (devno >= 0) {
-		dd = hfi1_lookup(devno);
-		if (!dd)
-			ret = -ENODEV;
-		else if (!dd->freectxts)
-			ret = -EBUSY;
-	} else {
-		struct hfi1_devdata *pdd;
+	if (!npresent)
+		return -ENXIO;
 
-		if (alg == HFI1_ALG_ACROSS) {
-			unsigned free = 0U;
+	if (!nup)
+		return -ENETDOWN;
 
-			for (dev = 0; dev < devmax; dev++) {
-				pdd = hfi1_lookup(dev);
-				if (!pdd)
-					continue;
-				if (!usable_device(pdd))
-					continue;
-				if (pdd->freectxts &&
-				    pdd->freectxts > free) {
-					dd = pdd;
-					free = pdd->freectxts;
-				}
-			}
-		} else {
-			for (dev = 0; dev < devmax; dev++) {
-				pdd = hfi1_lookup(dev);
-				if (!pdd)
-					continue;
-				if (!usable_device(pdd))
-					continue;
-				if (pdd->freectxts) {
-					dd = pdd;
-					break;
-				}
-			}
-		}
-		if (!dd)
-			ret = -EBUSY;
-	}
-done:
-	return ret ? ret : allocate_ctxt(fp, dd, uinfo);
+	dd = hfi1_lookup(devno);
+	if (!dd)
+		return -ENODEV;
+	else if (!dd->freectxts)
+		return -EBUSY;
+
+	return allocate_ctxt(fp, dd, uinfo);
 }
 
 static int find_shared_ctxt(struct file *fp,
@@ -1546,170 +1459,10 @@
 	return ret;
 }
 
-static int ui_open(struct inode *inode, struct file *filp)
-{
-	struct hfi1_devdata *dd;
-
-	dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev);
-	filp->private_data = dd; /* for other methods */
-	return 0;
-}
-
-static int ui_release(struct inode *inode, struct file *filp)
-{
-	/* nothing to do */
-	return 0;
-}
-
-static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
-{
-	struct hfi1_devdata *dd = filp->private_data;
-
-	return fixed_size_llseek(filp, offset, whence,
-		(dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
-}
-
-/* NOTE: assumes unsigned long is 8 bytes */
-static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
-		       loff_t *f_pos)
-{
-	struct hfi1_devdata *dd = filp->private_data;
-	void __iomem *base = dd->kregbase;
-	unsigned long total, csr_off,
-		barlen = (dd->kregend - dd->kregbase);
-	u64 data;
-
-	/* only read 8 byte quantities */
-	if ((count % 8) != 0)
-		return -EINVAL;
-	/* offset must be 8-byte aligned */
-	if ((*f_pos % 8) != 0)
-		return -EINVAL;
-	/* destination buffer must be 8-byte aligned */
-	if ((unsigned long)buf % 8 != 0)
-		return -EINVAL;
-	/* must be in range */
-	if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE))
-		return -EINVAL;
-	/* only set the base if we are not starting past the BAR */
-	if (*f_pos < barlen)
-		base += *f_pos;
-	csr_off = *f_pos;
-	for (total = 0; total < count; total += 8, csr_off += 8) {
-		/* accessing LCB CSRs requires more checks */
-		if (is_lcb_offset(csr_off)) {
-			if (read_lcb_csr(dd, csr_off, (u64 *)&data))
-				break; /* failed */
-		}
-		/*
-		 * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
-		 * false parity error.  Avoid the whole issue by not reading
-		 * them.  These registers are defined as having a read value
-		 * of 0.
-		 */
-		else if (csr_off == ASIC_GPIO_CLEAR ||
-			 csr_off == ASIC_GPIO_FORCE ||
-			 csr_off == ASIC_QSFP1_CLEAR ||
-			 csr_off == ASIC_QSFP1_FORCE ||
-			 csr_off == ASIC_QSFP2_CLEAR ||
-			 csr_off == ASIC_QSFP2_FORCE)
-			data = 0;
-		else if (csr_off >= barlen) {
-			/*
-			 * read_8051_data can read more than just 8 bytes at
-			 * a time. However, folding this into the loop and
-			 * handling the reads in 8 byte increments allows us
-			 * to smoothly transition from chip memory to 8051
-			 * memory.
-			 */
-			if (read_8051_data(dd,
-					   (u32)(csr_off - barlen),
-					   sizeof(data), &data))
-				break; /* failed */
-		} else
-			data = readq(base + total);
-		if (put_user(data, (unsigned long __user *)(buf + total)))
-			break;
-	}
-	*f_pos += total;
-	return total;
-}
-
-/* NOTE: assumes unsigned long is 8 bytes */
-static ssize_t ui_write(struct file *filp, const char __user *buf,
-			size_t count, loff_t *f_pos)
-{
-	struct hfi1_devdata *dd = filp->private_data;
-	void __iomem *base;
-	unsigned long total, data, csr_off;
-	int in_lcb;
-
-	/* only write 8 byte quantities */
-	if ((count % 8) != 0)
-		return -EINVAL;
-	/* offset must be 8-byte aligned */
-	if ((*f_pos % 8) != 0)
-		return -EINVAL;
-	/* source buffer must be 8-byte aligned */
-	if ((unsigned long)buf % 8 != 0)
-		return -EINVAL;
-	/* must be in range */
-	if (*f_pos + count > dd->kregend - dd->kregbase)
-		return -EINVAL;
-
-	base = (void __iomem *)dd->kregbase + *f_pos;
-	csr_off = *f_pos;
-	in_lcb = 0;
-	for (total = 0; total < count; total += 8, csr_off += 8) {
-		if (get_user(data, (unsigned long __user *)(buf + total)))
-			break;
-		/* accessing LCB CSRs requires a special procedure */
-		if (is_lcb_offset(csr_off)) {
-			if (!in_lcb) {
-				int ret = acquire_lcb_access(dd, 1);
-
-				if (ret)
-					break;
-				in_lcb = 1;
-			}
-		} else {
-			if (in_lcb) {
-				release_lcb_access(dd, 1);
-				in_lcb = 0;
-			}
-		}
-		writeq(data, base + total);
-	}
-	if (in_lcb)
-		release_lcb_access(dd, 1);
-	*f_pos += total;
-	return total;
-}
-
-static const struct file_operations ui_file_ops = {
-	.owner = THIS_MODULE,
-	.llseek = ui_lseek,
-	.read = ui_read,
-	.write = ui_write,
-	.open = ui_open,
-	.release = ui_release,
-};
-
-#define UI_OFFSET 192	/* device minor offset for UI devices */
-static int create_ui = 1;
-
-static struct cdev wildcard_cdev;
-static struct device *wildcard_device;
-
-static atomic_t user_count = ATOMIC_INIT(0);
-
 static void user_remove(struct hfi1_devdata *dd)
 {
-	if (atomic_dec_return(&user_count) == 0)
-		hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device);
 
 	hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
-	hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device);
 }
 
 static int user_add(struct hfi1_devdata *dd)
@@ -1717,34 +1470,13 @@
 	char name[10];
 	int ret;
 
-	if (atomic_inc_return(&user_count) == 1) {
-		ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
-				     &wildcard_cdev, &wildcard_device,
-				     true);
-		if (ret)
-			goto done;
-	}
-
 	snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
-	ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops,
+	ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
 			     &dd->user_cdev, &dd->user_device,
-			     true);
+			     true, &dd->kobj);
 	if (ret)
-		goto done;
+		user_remove(dd);
 
-	if (create_ui) {
-		snprintf(name, sizeof(name),
-			 "%s_ui%d", class_name(), dd->unit);
-		ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
-				     &dd->ui_cdev, &dd->ui_device,
-				     false);
-		if (ret)
-			goto done;
-	}
-
-	return 0;
-done:
-	user_remove(dd);
 	return ret;
 }
 
@@ -1753,13 +1485,7 @@
  */
 int hfi1_device_create(struct hfi1_devdata *dd)
 {
-	int r, ret;
-
-	r = user_add(dd);
-	ret = hfi1_diag_add(dd);
-	if (r && !ret)
-		ret = r;
-	return ret;
+	return user_add(dd);
 }
 
 /*
@@ -1769,5 +1495,4 @@
 void hfi1_device_remove(struct hfi1_devdata *dd)
 {
 	user_remove(dd);
-	hfi1_diag_remove(dd);
 }
diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/firmware.c
rename to drivers/infiniband/hw/hfi1/firmware.c
diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
similarity index 99%
rename from drivers/staging/rdma/hfi1/hfi.h
rename to drivers/infiniband/hw/hfi1/hfi.h
index 7b78d56..4417a0f 100644
--- a/drivers/staging/rdma/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -453,6 +453,7 @@
 #define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP)
 
 #define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
+#define HLS_DOWN ~(HLS_UP)
 
 /* use this MTU size if none other is given */
 #define HFI1_DEFAULT_ACTIVE_MTU 10240
@@ -1168,6 +1169,7 @@
 	atomic_t aspm_disabled_cnt;
 
 	struct hfi1_affinity *affinity;
+	struct kobject kobj;
 };
 
 /* 8051 firmware version helper */
@@ -1882,9 +1884,8 @@
 		get_unit_name((dd)->unit), ##__VA_ARGS__)
 
 #define hfi1_dev_porterr(dd, port, fmt, ...) \
-	dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
-			get_unit_name((dd)->unit), (dd)->unit, (port), \
-			##__VA_ARGS__)
+	dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
+			get_unit_name((dd)->unit), (port), ##__VA_ARGS__)
 
 /*
  * this is used for formatting hw error messages...
diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
similarity index 98%
rename from drivers/staging/rdma/hfi1/init.c
rename to drivers/infiniband/hw/hfi1/init.c
index 502b7cf..0d28a5a 100644
--- a/drivers/staging/rdma/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -732,12 +732,12 @@
 		lastfail = hfi1_create_rcvhdrq(dd, rcd);
 		if (!lastfail)
 			lastfail = hfi1_setup_eagerbufs(rcd);
-		if (lastfail)
+		if (lastfail) {
 			dd_dev_err(dd,
 				   "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
+			ret = lastfail;
+		}
 	}
-	if (lastfail)
-		ret = lastfail;
 
 	/* Allocate enough memory for user event notification. */
 	len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
@@ -989,8 +989,10 @@
 	dd->asic_data = NULL;
 }
 
-void hfi1_free_devdata(struct hfi1_devdata *dd)
+static void __hfi1_free_devdata(struct kobject *kobj)
 {
+	struct hfi1_devdata *dd =
+		container_of(kobj, struct hfi1_devdata, kobj);
 	unsigned long flags;
 
 	spin_lock_irqsave(&hfi1_devs_lock, flags);
@@ -1007,6 +1009,15 @@
 	rvt_dealloc_device(&dd->verbs_dev.rdi);
 }
 
+static struct kobj_type hfi1_devdata_type = {
+	.release = __hfi1_free_devdata,
+};
+
+void hfi1_free_devdata(struct hfi1_devdata *dd)
+{
+	kobject_put(&dd->kobj);
+}
+
 /*
  * Allocate our primary per-unit data structure.  Must be done via verbs
  * allocator, because the verbs cleanup process both does cleanup and
@@ -1102,6 +1113,7 @@
 			&pdev->dev,
 			"Could not alloc cpulist info, cpu affinity might be wrong\n");
 	}
+	kobject_init(&dd->kobj, &hfi1_devdata_type);
 	return dd;
 
 bail:
@@ -1300,7 +1312,7 @@
 
 		spin_lock(&ppd->cc_state_lock);
 		cc_state = get_cc_state(ppd);
-		rcu_assign_pointer(ppd->cc_state, NULL);
+		RCU_INIT_POINTER(ppd->cc_state, NULL);
 		spin_unlock(&ppd->cc_state_lock);
 
 		if (cc_state)
@@ -1325,7 +1337,7 @@
 		dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
 				  (void *)dd->rcvhdrtail_dummy_kvaddr,
 				  dd->rcvhdrtail_dummy_physaddr);
-				  dd->rcvhdrtail_dummy_kvaddr = NULL;
+		dd->rcvhdrtail_dummy_kvaddr = NULL;
 	}
 
 	for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/intr.c
rename to drivers/infiniband/hw/hfi1/intr.c
diff --git a/drivers/staging/rdma/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/iowait.h
rename to drivers/infiniband/hw/hfi1/iowait.h
diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
similarity index 98%
rename from drivers/staging/rdma/hfi1/mad.c
rename to drivers/infiniband/hw/hfi1/mad.c
index ed58cf2..21902957 100644
--- a/drivers/staging/rdma/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1403,6 +1403,12 @@
 		if (key == okey)
 			continue;
 		/*
+		 * Don't update pkeys[2], if an HFI port without MgmtAllowed
+		 * by neighbor is a switch.
+		 */
+		if (i == 2 && !ppd->mgmt_allowed && ppd->neighbor_type == 1)
+			continue;
+		/*
 		 * The SM gives us the complete PKey table. We have
 		 * to ensure that we put the PKeys in the matching
 		 * slots.
@@ -3363,6 +3369,50 @@
 	return reply((struct ib_mad_hdr *)smp);
 }
 
+/*
+ * Apply congestion control information stored in the ppd to the
+ * active structure.
+ */
+static void apply_cc_state(struct hfi1_pportdata *ppd)
+{
+	struct cc_state *old_cc_state, *new_cc_state;
+
+	new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
+	if (!new_cc_state)
+		return;
+
+	/*
+	 * Hold the lock for updating *and* to prevent ppd information
+	 * from changing during the update.
+	 */
+	spin_lock(&ppd->cc_state_lock);
+
+	old_cc_state = get_cc_state(ppd);
+	if (!old_cc_state) {
+		/* never active, or shutting down */
+		spin_unlock(&ppd->cc_state_lock);
+		kfree(new_cc_state);
+		return;
+	}
+
+	*new_cc_state = *old_cc_state;
+
+	new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
+	memcpy(new_cc_state->cct.entries, ppd->ccti_entries,
+	       ppd->total_cct_entry * sizeof(struct ib_cc_table_entry));
+
+	new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
+	new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
+	memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
+	       OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
+
+	rcu_assign_pointer(ppd->cc_state, new_cc_state);
+
+	spin_unlock(&ppd->cc_state_lock);
+
+	call_rcu(&old_cc_state->rcu, cc_state_reclaim);
+}
+
 static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
 				       struct ib_device *ibdev, u8 port,
 				       u32 *resp_len)
@@ -3374,6 +3424,11 @@
 	struct opa_congestion_setting_entry_shadow *entries;
 	int i;
 
+	/*
+	 * Save details from packet into the ppd.  Hold the cc_state_lock so
+	 * our information is consistent with anyone trying to apply the state.
+	 */
+	spin_lock(&ppd->cc_state_lock);
 	ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
 
 	entries = ppd->congestion_entries;
@@ -3384,6 +3439,10 @@
 			p->entries[i].trigger_threshold;
 		entries[i].ccti_min = p->entries[i].ccti_min;
 	}
+	spin_unlock(&ppd->cc_state_lock);
+
+	/* now apply the information */
+	apply_cc_state(ppd);
 
 	return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
 					   resp_len);
@@ -3526,7 +3585,6 @@
 	int i, j;
 	u32 sentry, eentry;
 	u16 ccti_limit;
-	struct cc_state *old_cc_state, *new_cc_state;
 
 	/* sanity check n_blocks, start_block */
 	if (n_blocks == 0 ||
@@ -3546,45 +3604,20 @@
 		return reply((struct ib_mad_hdr *)smp);
 	}
 
-	new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
-	if (!new_cc_state)
-		goto getit;
-
+	/*
+	 * Save details from packet into the ppd.  Hold the cc_state_lock so
+	 * our information is consistent with anyone trying to apply the state.
+	 */
 	spin_lock(&ppd->cc_state_lock);
-
-	old_cc_state = get_cc_state(ppd);
-
-	if (!old_cc_state) {
-		spin_unlock(&ppd->cc_state_lock);
-		kfree(new_cc_state);
-		return reply((struct ib_mad_hdr *)smp);
-	}
-
-	*new_cc_state = *old_cc_state;
-
-	new_cc_state->cct.ccti_limit = ccti_limit;
-
-	entries = ppd->ccti_entries;
 	ppd->total_cct_entry = ccti_limit + 1;
-
+	entries = ppd->ccti_entries;
 	for (j = 0, i = sentry; i < eentry; j++, i++)
 		entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
-
-	memcpy(new_cc_state->cct.entries, entries,
-	       eentry * sizeof(struct ib_cc_table_entry));
-
-	new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
-	new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
-	memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
-	       OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
-
-	rcu_assign_pointer(ppd->cc_state, new_cc_state);
-
 	spin_unlock(&ppd->cc_state_lock);
 
-	call_rcu(&old_cc_state->rcu, cc_state_reclaim);
+	/* now apply the information */
+	apply_cc_state(ppd);
 
-getit:
 	return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len);
 }
 
diff --git a/drivers/staging/rdma/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/mad.h
rename to drivers/infiniband/hw/hfi1/mad.h
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
similarity index 95%
rename from drivers/staging/rdma/hfi1/mmu_rb.c
rename to drivers/infiniband/hw/hfi1/mmu_rb.c
index 2b0e91d..b7a80aa 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -45,6 +45,7 @@
  *
  */
 #include <linux/list.h>
+#include <linux/rculist.h>
 #include <linux/mmu_notifier.h>
 #include <linux/interval_tree_generic.h>
 
@@ -97,7 +98,6 @@
 int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops)
 {
 	struct mmu_rb_handler *handlr;
-	unsigned long flags;
 
 	if (!ops->invalidate)
 		return -EINVAL;
@@ -111,9 +111,9 @@
 	INIT_HLIST_NODE(&handlr->mn.hlist);
 	spin_lock_init(&handlr->lock);
 	handlr->mn.ops = &mn_opts;
-	spin_lock_irqsave(&mmu_rb_lock, flags);
-	list_add_tail(&handlr->list, &mmu_rb_handlers);
-	spin_unlock_irqrestore(&mmu_rb_lock, flags);
+	spin_lock(&mmu_rb_lock);
+	list_add_tail_rcu(&handlr->list, &mmu_rb_handlers);
+	spin_unlock(&mmu_rb_lock);
 
 	return mmu_notifier_register(&handlr->mn, current->mm);
 }
@@ -130,9 +130,10 @@
 	if (current->mm)
 		mmu_notifier_unregister(&handler->mn, current->mm);
 
-	spin_lock_irqsave(&mmu_rb_lock, flags);
-	list_del(&handler->list);
-	spin_unlock_irqrestore(&mmu_rb_lock, flags);
+	spin_lock(&mmu_rb_lock);
+	list_del_rcu(&handler->list);
+	spin_unlock(&mmu_rb_lock);
+	synchronize_rcu();
 
 	spin_lock_irqsave(&handler->lock, flags);
 	if (!RB_EMPTY_ROOT(root)) {
@@ -271,16 +272,15 @@
 static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
 {
 	struct mmu_rb_handler *handler;
-	unsigned long flags;
 
-	spin_lock_irqsave(&mmu_rb_lock, flags);
-	list_for_each_entry(handler, &mmu_rb_handlers, list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(handler, &mmu_rb_handlers, list) {
 		if (handler->root == root)
 			goto unlock;
 	}
 	handler = NULL;
 unlock:
-	spin_unlock_irqrestore(&mmu_rb_lock, flags);
+	rcu_read_unlock();
 	return handler;
 }
 
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/mmu_rb.h
rename to drivers/infiniband/hw/hfi1/mmu_rb.h
diff --git a/drivers/staging/rdma/hfi1/opa_compat.h b/drivers/infiniband/hw/hfi1/opa_compat.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/opa_compat.h
rename to drivers/infiniband/hw/hfi1/opa_compat.h
diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/pcie.c
rename to drivers/infiniband/hw/hfi1/pcie.c
diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
similarity index 99%
rename from drivers/staging/rdma/hfi1/pio.c
rename to drivers/infiniband/hw/hfi1/pio.c
index c67b9ad..d5edb1a 100644
--- a/drivers/staging/rdma/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1835,8 +1835,7 @@
 	struct pio_vl_map *oldmap, *newmap;
 
 	if (!vl_scontexts) {
-		/* send context 0 reserved for VL15 */
-		for (i = 1; i < dd->num_send_contexts; i++)
+		for (i = 0; i < dd->num_send_contexts; i++)
 			if (dd->send_contexts[i].type == SC_KERNEL)
 				num_kernel_send_contexts++;
 		/* truncate divide */
diff --git a/drivers/staging/rdma/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
similarity index 98%
rename from drivers/staging/rdma/hfi1/pio.h
rename to drivers/infiniband/hw/hfi1/pio.h
index 53a08ed..464cbd2 100644
--- a/drivers/staging/rdma/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -49,10 +49,10 @@
 
 /* send context types */
 #define SC_KERNEL 0
-#define SC_ACK    1
-#define SC_USER   2
-#define SC_VL15   3
-#define SC_MAX    4
+#define SC_VL15   1
+#define SC_ACK    2
+#define SC_USER   3	/* must be the last one: it may take all left */
+#define SC_MAX    4	/* count of send context types */
 
 /* invalid send context index */
 #define INVALID_SCI 0xff
diff --git a/drivers/staging/rdma/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/pio_copy.c
rename to drivers/infiniband/hw/hfi1/pio_copy.c
diff --git a/drivers/staging/rdma/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
similarity index 98%
rename from drivers/staging/rdma/hfi1/platform.c
rename to drivers/infiniband/hw/hfi1/platform.c
index 8fe8a20..03df932 100644
--- a/drivers/staging/rdma/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -87,6 +87,17 @@
 	 */
 }
 
+void get_port_type(struct hfi1_pportdata *ppd)
+{
+	int ret;
+
+	ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+					PORT_TABLE_PORT_TYPE, &ppd->port_type,
+					4);
+	if (ret)
+		ppd->port_type = PORT_TYPE_UNKNOWN;
+}
+
 int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
 {
 	u8 tx_ctrl_byte = on ? 0x0 : 0xF;
@@ -529,7 +540,8 @@
 	/* Enable external device config if channel is limiting active */
 	read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
 			 GENERAL_CONFIG, &config_data);
-	config_data |= limiting_active;
+	config_data &= ~(0xff << ENABLE_EXT_DEV_CONFIG_SHIFT);
+	config_data |= ((u32)limiting_active << ENABLE_EXT_DEV_CONFIG_SHIFT);
 	ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
 			       GENERAL_CONFIG, config_data);
 	if (ret != HCMD_SUCCESS)
@@ -542,7 +554,8 @@
 	/* Pass tuning method to 8051 */
 	read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
 			 &config_data);
-	config_data |= tuning_method;
+	config_data &= ~(0xff << TUNING_METHOD_SHIFT);
+	config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
 	ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
 			       config_data);
 	if (ret != HCMD_SUCCESS)
@@ -564,8 +577,8 @@
 		ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
 				       GENERAL_CONFIG, &config_data);
 		/* Clear, then set the external device config field */
-		config_data &= ~(0xFF << 24);
-		config_data |= (external_device_config << 24);
+		config_data &= ~(u32)0xFF;
+		config_data |= external_device_config;
 		ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
 				       GENERAL_CONFIG, config_data);
 		if (ret != HCMD_SUCCESS)
@@ -784,12 +797,6 @@
 		return;
 	}
 
-	ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
-					PORT_TABLE_PORT_TYPE, &ppd->port_type,
-					4);
-	if (ret)
-		ppd->port_type = PORT_TYPE_UNKNOWN;
-
 	switch (ppd->port_type) {
 	case PORT_TYPE_DISCONNECTED:
 		ppd->offline_disabled_reason =
diff --git a/drivers/staging/rdma/hfi1/platform.h b/drivers/infiniband/hw/hfi1/platform.h
similarity index 99%
rename from drivers/staging/rdma/hfi1/platform.h
rename to drivers/infiniband/hw/hfi1/platform.h
index 19620cf..e2c2161 100644
--- a/drivers/staging/rdma/hfi1/platform.h
+++ b/drivers/infiniband/hw/hfi1/platform.h
@@ -298,6 +298,7 @@
 /* platform.c */
 void get_platform_config(struct hfi1_devdata *dd);
 void free_platform_config(struct hfi1_devdata *dd);
+void get_port_type(struct hfi1_pportdata *ppd);
 int set_qsfp_tx(struct hfi1_pportdata *ppd, int on);
 void tune_serdes(struct hfi1_pportdata *ppd);
 
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
similarity index 98%
rename from drivers/staging/rdma/hfi1/qp.c
rename to drivers/infiniband/hw/hfi1/qp.c
index 91eb423..1a942ff 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -49,7 +49,6 @@
 #include <linux/vmalloc.h>
 #include <linux/hash.h>
 #include <linux/module.h>
-#include <linux/random.h>
 #include <linux/seq_file.h>
 #include <rdma/rdma_vt.h>
 #include <rdma/rdmavt_qp.h>
@@ -161,9 +160,6 @@
  * This function is what we would push to the core layer if we wanted to be a
  * "first class citizen".  Instead we hide this here and rely on Verbs ULPs
  * to blindly pass the MTU enum value from the PathRecord to us.
- *
- * The actual flag used to determine "8k MTU" will change and is currently
- * unknown.
  */
 static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
 {
@@ -516,6 +512,7 @@
 static void iowait_sdma_drained(struct iowait *wait)
 {
 	struct rvt_qp *qp = iowait_to_qp(wait);
+	unsigned long flags;
 
 	/*
 	 * This happens when the send engine notes
@@ -523,12 +520,12 @@
 	 * do the flush work until that QP's
 	 * sdma work has finished.
 	 */
-	spin_lock(&qp->s_lock);
+	spin_lock_irqsave(&qp->s_lock, flags);
 	if (qp->s_flags & RVT_S_WAIT_DMA) {
 		qp->s_flags &= ~RVT_S_WAIT_DMA;
 		hfi1_schedule_send(qp);
 	}
-	spin_unlock(&qp->s_lock);
+	spin_unlock_irqrestore(&qp->s_lock, flags);
 }
 
 /**
diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/qp.h
rename to drivers/infiniband/hw/hfi1/qp.h
diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/qsfp.c
rename to drivers/infiniband/hw/hfi1/qsfp.c
diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/qsfp.h
rename to drivers/infiniband/hw/hfi1/qsfp.h
diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/rc.c
rename to drivers/infiniband/hw/hfi1/rc.c
diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/ruc.c
rename to drivers/infiniband/hw/hfi1/ruc.c
diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
similarity index 99%
rename from drivers/staging/rdma/hfi1/sdma.c
rename to drivers/infiniband/hw/hfi1/sdma.c
index abb8ebc..f9befc0 100644
--- a/drivers/staging/rdma/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -134,6 +134,7 @@
 	[sdma_state_s99_running]                = "s99_Running",
 };
 
+#ifdef CONFIG_SDMA_VERBOSITY
 static const char * const sdma_event_names[] = {
 	[sdma_event_e00_go_hw_down]   = "e00_GoHwDown",
 	[sdma_event_e10_go_hw_start]  = "e10_GoHwStart",
@@ -150,6 +151,7 @@
 	[sdma_event_e85_link_down]    = "e85_LinkDown",
 	[sdma_event_e90_sw_halted]    = "e90_SwHalted",
 };
+#endif
 
 static const struct sdma_set_state_action sdma_action_table[] = {
 	[sdma_state_s00_hw_down] = {
@@ -376,7 +378,7 @@
 	sdma_txclean(sde->dd, tx);
 	if (complete)
 		(*complete)(tx, res);
-	if (iowait_sdma_dec(wait) && wait)
+	if (wait && iowait_sdma_dec(wait))
 		iowait_drain_wakeup(wait);
 }
 
diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/sdma.h
rename to drivers/infiniband/hw/hfi1/sdma.h
diff --git a/drivers/staging/rdma/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/sdma_txreq.h
rename to drivers/infiniband/hw/hfi1/sdma_txreq.h
diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
similarity index 99%
rename from drivers/staging/rdma/hfi1/sysfs.c
rename to drivers/infiniband/hw/hfi1/sysfs.c
index 8cd6df8..91fc2ae 100644
--- a/drivers/staging/rdma/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -721,8 +721,8 @@
 	}
 
 	dd_dev_info(dd,
-		    "IB%u: Congestion Control Agent enabled for port %d\n",
-		    dd->unit, port_num);
+		    "Congestion Control Agent enabled for port %d\n",
+		    port_num);
 
 	return 0;
 
diff --git a/drivers/staging/rdma/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
similarity index 96%
rename from drivers/staging/rdma/hfi1/trace.c
rename to drivers/infiniband/hw/hfi1/trace.c
index 8b62fef..4cfb137 100644
--- a/drivers/staging/rdma/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -66,6 +66,7 @@
 #define RETH_PRN "reth vaddr 0x%.16llx rkey 0x%.8x dlen 0x%.8x"
 #define AETH_PRN "aeth syn 0x%.2x %s msn 0x%.8x"
 #define DETH_PRN "deth qkey 0x%.8x sqpn 0x%.6x"
+#define IETH_PRN "ieth rkey 0x%.8x"
 #define ATOMICACKETH_PRN "origdata %lld"
 #define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %lld cdata %lld"
 
@@ -166,6 +167,12 @@
 				 be32_to_cpu(eh->ud.deth[0]),
 				 be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK);
 		break;
+	/* ieth */
+	case OP(RC, SEND_LAST_WITH_INVALIDATE):
+	case OP(RC, SEND_ONLY_WITH_INVALIDATE):
+		trace_seq_printf(p, IETH_PRN,
+				 be32_to_cpu(eh->ieth));
+		break;
 	}
 	trace_seq_putc(p, 0);
 	return ret;
@@ -207,19 +214,6 @@
 	return ret;
 }
 
-const char *print_u64_array(
-	struct trace_seq *p,
-	u64 *arr, int len)
-{
-	int i;
-	const char *ret = trace_seq_buffer_ptr(p);
-
-	for (i = 0; i < len; i++)
-		trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]);
-	trace_seq_putc(p, 0);
-	return ret;
-}
-
 __hfi1_trace_fn(PKT);
 __hfi1_trace_fn(PROC);
 __hfi1_trace_fn(SDMA);
@@ -233,3 +227,4 @@
 __hfi1_trace_fn(RCVCTRL);
 __hfi1_trace_fn(TID);
 __hfi1_trace_fn(MMU);
+__hfi1_trace_fn(IOCTL);
diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h
similarity index 99%
rename from drivers/staging/rdma/hfi1/trace.h
rename to drivers/infiniband/hw/hfi1/trace.h
index 963dc94..28c1d08 100644
--- a/drivers/staging/rdma/hfi1/trace.h
+++ b/drivers/infiniband/hw/hfi1/trace.h
@@ -74,8 +74,8 @@
 
 TRACE_EVENT(hfi1_rcvhdr,
 	    TP_PROTO(struct hfi1_devdata *dd,
-		     u64 eflags,
 		     u32 ctxt,
+		     u64 eflags,
 		     u32 etype,
 		     u32 hlen,
 		     u32 tlen,
@@ -392,6 +392,8 @@
 	ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE),             \
 	ib_opcode_name(RC_COMPARE_SWAP),                   \
 	ib_opcode_name(RC_FETCH_ADD),                      \
+	ib_opcode_name(RC_SEND_LAST_WITH_INVALIDATE),      \
+	ib_opcode_name(RC_SEND_ONLY_WITH_INVALIDATE),      \
 	ib_opcode_name(UC_SEND_FIRST),                     \
 	ib_opcode_name(UC_SEND_MIDDLE),                    \
 	ib_opcode_name(UC_SEND_LAST),                      \
@@ -1341,6 +1343,7 @@
 __hfi1_trace_def(RCVCTRL);
 __hfi1_trace_def(TID);
 __hfi1_trace_def(MMU);
+__hfi1_trace_def(IOCTL);
 
 #define hfi1_cdbg(which, fmt, ...) \
 	__hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
diff --git a/drivers/staging/rdma/hfi1/twsi.c b/drivers/infiniband/hw/hfi1/twsi.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/twsi.c
rename to drivers/infiniband/hw/hfi1/twsi.c
diff --git a/drivers/staging/rdma/hfi1/twsi.h b/drivers/infiniband/hw/hfi1/twsi.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/twsi.h
rename to drivers/infiniband/hw/hfi1/twsi.h
diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/uc.c
rename to drivers/infiniband/hw/hfi1/uc.c
diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/ud.c
rename to drivers/infiniband/hw/hfi1/ud.c
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/user_exp_rcv.c
rename to drivers/infiniband/hw/hfi1/user_exp_rcv.c
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/user_exp_rcv.h
rename to drivers/infiniband/hw/hfi1/user_exp_rcv.h
diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/user_pages.c
rename to drivers/infiniband/hw/hfi1/user_pages.c
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
similarity index 98%
rename from drivers/staging/rdma/hfi1/user_sdma.c
rename to drivers/infiniband/hw/hfi1/user_sdma.c
index 0014c9c..47ffd27 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -166,6 +166,8 @@
 
 #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
 
+struct sdma_mmu_node;
+
 struct user_sdma_iovec {
 	struct list_head list;
 	struct iovec iov;
@@ -178,9 +180,10 @@
 	 * which we last left off.
 	 */
 	u64 offset;
+	struct sdma_mmu_node *node;
 };
 
-#define SDMA_CACHE_NODE_EVICT BIT(0)
+#define SDMA_CACHE_NODE_EVICT 0
 
 struct sdma_mmu_node {
 	struct mmu_rb_node rb;
@@ -507,6 +510,7 @@
 	struct sdma_req_info info;
 	struct user_sdma_request *req;
 	u8 opcode, sc, vl;
+	int req_queued = 0;
 
 	if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
 		hfi1_cdbg(
@@ -703,6 +707,7 @@
 
 	set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
 	atomic_inc(&pq->n_reqs);
+	req_queued = 1;
 	/* Send the first N packets in the request to buy us some time */
 	ret = user_sdma_send_pkts(req, pcount);
 	if (unlikely(ret < 0 && ret != -EBUSY)) {
@@ -747,7 +752,8 @@
 	return 0;
 free_req:
 	user_sdma_free_request(req, true);
-	pq_update(pq);
+	if (req_queued)
+		pq_update(pq);
 	set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
 	return ret;
 }
@@ -1153,6 +1159,7 @@
 	}
 	iovec->pages = node->pages;
 	iovec->npages = npages;
+	iovec->node = node;
 
 	ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb);
 	if (ret) {
@@ -1348,11 +1355,11 @@
 		 */
 		SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
 			 req->tidoffset, req->tidoffset / req->omfactor,
-			 !!(req->omfactor - KDETH_OM_SMALL));
+			 req->omfactor != KDETH_OM_SMALL);
 		KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
 			  req->tidoffset / req->omfactor);
 		KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
-			  !!(req->omfactor - KDETH_OM_SMALL));
+			  req->omfactor != KDETH_OM_SMALL);
 	}
 done:
 	trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
@@ -1519,18 +1526,13 @@
 	}
 	if (req->data_iovs) {
 		struct sdma_mmu_node *node;
-		struct mmu_rb_node *mnode;
 		int i;
 
 		for (i = 0; i < req->data_iovs; i++) {
-			mnode = hfi1_mmu_rb_search(
-				&req->pq->sdma_rb_root,
-				(unsigned long)req->iovs[i].iov.iov_base,
-				req->iovs[i].iov.iov_len);
-			if (!mnode || IS_ERR(mnode))
+			node = req->iovs[i].node;
+			if (!node)
 				continue;
 
-			node = container_of(mnode, struct sdma_mmu_node, rb);
 			if (unpin)
 				hfi1_mmu_rb_remove(&req->pq->sdma_rb_root,
 						   &node->rb);
diff --git a/drivers/staging/rdma/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/user_sdma.h
rename to drivers/infiniband/hw/hfi1/user_sdma.h
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
similarity index 99%
rename from drivers/staging/rdma/hfi1/verbs.c
rename to drivers/infiniband/hw/hfi1/verbs.c
index 9cdc85f..849c4b9 100644
--- a/drivers/staging/rdma/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -52,7 +52,6 @@
 #include <linux/utsname.h>
 #include <linux/rculist.h>
 #include <linux/mm.h>
-#include <linux/random.h>
 #include <linux/vmalloc.h>
 
 #include "hfi.h"
@@ -336,6 +335,8 @@
 	[IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE]             = 12 + 8 + 4,
 	[IB_OPCODE_RC_COMPARE_SWAP]                   = 12 + 8 + 28,
 	[IB_OPCODE_RC_FETCH_ADD]                      = 12 + 8 + 28,
+	[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE]      = 12 + 8 + 4,
+	[IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE]      = 12 + 8 + 4,
 	/* UC */
 	[IB_OPCODE_UC_SEND_FIRST]                     = 12 + 8,
 	[IB_OPCODE_UC_SEND_MIDDLE]                    = 12 + 8,
@@ -946,7 +947,6 @@
 
 			dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
 			dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN);
-			dev->n_piowait++;
 			qp->s_flags |= flag;
 			was_empty = list_empty(&sc->piowait);
 			list_add_tail(&priv->s_iowait.list, &sc->piowait);
diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
similarity index 99%
rename from drivers/staging/rdma/hfi1/verbs.h
rename to drivers/infiniband/hw/hfi1/verbs.h
index 3ee2239..4883567 100644
--- a/drivers/staging/rdma/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -152,6 +152,7 @@
 	} at;
 	__be32 imm_data;
 	__be32 aeth;
+	__be32 ieth;
 	struct ib_atomic_eth atomic_eth;
 }  __packed;
 
diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
similarity index 100%
rename from drivers/staging/rdma/hfi1/verbs_txreq.c
rename to drivers/infiniband/hw/hfi1/verbs_txreq.c
diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
similarity index 100%
rename from drivers/staging/rdma/hfi1/verbs_txreq.h
rename to drivers/infiniband/hw/hfi1/verbs_txreq.h
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 4a740f7..02a735b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -2361,58 +2361,130 @@
 	return 0;
 }
 
+static const char * const i40iw_hw_stat_names[] = {
+	// 32bit names
+	[I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
+	[I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
+	[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
+	[I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
+	[I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
+	[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
+	[I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
+	[I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
+	[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
+	// 64bit names
+	[I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip4InOctets",
+	[I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip4InPkts",
+	[I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip4InReasmRqd",
+	[I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip4InMcastPkts",
+	[I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip4OutOctets",
+	[I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip4OutPkts",
+	[I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip4OutSegRqd",
+	[I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip4OutMcastPkts",
+	[I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip6InOctets",
+	[I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip6InPkts",
+	[I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip6InReasmRqd",
+	[I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip6InMcastPkts",
+	[I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip6OutOctets",
+	[I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip6OutPkts",
+	[I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip6OutSegRqd",
+	[I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"ip6OutMcastPkts",
+	[I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"tcpInSegs",
+	[I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =
+		"tcpOutSegs",
+	[I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"iwInRdmaReads",
+	[I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"iwInRdmaSends",
+	[I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"iwInRdmaWrites",
+	[I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"iwOutRdmaReads",
+	[I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"iwOutRdmaSends",
+	[I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
+		"iwOutRdmaWrites",
+	[I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =
+		"iwRdmaBnd",
+	[I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =
+		"iwRdmaInv"
+};
+
 /**
- * i40iw_get_protocol_stats - Populates the rdma_stats structure
- * @ibdev: ib dev struct
- * @stats: iw protocol stats struct
+ * i40iw_alloc_hw_stats - Allocate a hw stats structure
+ * @ibdev: device pointer from stack
+ * @port_num: port number
  */
-static int i40iw_get_protocol_stats(struct ib_device *ibdev,
-				    union rdma_protocol_stats *stats)
+static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
+						  u8 port_num)
+{
+	struct i40iw_device *iwdev = to_iwdev(ibdev);
+	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+	int num_counters = I40IW_HW_STAT_INDEX_MAX_32 +
+		I40IW_HW_STAT_INDEX_MAX_64;
+	unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
+
+	BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=
+		     (I40IW_HW_STAT_INDEX_MAX_32 +
+		      I40IW_HW_STAT_INDEX_MAX_64));
+
+	/*
+	 * PFs get the default update lifespan, but VFs only update once
+	 * per second
+	 */
+	if (!dev->is_pf)
+		lifespan = 1000;
+	return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,
+					  lifespan);
+}
+
+/**
+ * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
+ * @ibdev: device pointer from stack
+ * @stats: stats pointer from stack
+ * @port_num: port number
+ * @index: which hw counter the stack is requesting we update
+ */
+static int i40iw_get_hw_stats(struct ib_device *ibdev,
+			      struct rdma_hw_stats *stats,
+			      u8 port_num, int index)
 {
 	struct i40iw_device *iwdev = to_iwdev(ibdev);
 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
 	struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
 	struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
-	struct timespec curr_time;
-	static struct timespec last_rd_time = {0, 0};
 	unsigned long flags;
 
-	curr_time = current_kernel_time();
-	memset(stats, 0, sizeof(*stats));
-
 	if (dev->is_pf) {
 		spin_lock_irqsave(&devstat->stats_lock, flags);
 		devstat->ops.iw_hw_stat_read_all(devstat,
 			&devstat->hw_stats);
 		spin_unlock_irqrestore(&devstat->stats_lock, flags);
 	} else {
-		if (((u64)curr_time.tv_sec - (u64)last_rd_time.tv_sec) > 1)
-			if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
-				return -ENOSYS;
+		if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
+			return -ENOSYS;
 	}
 
-	stats->iw.ipInReceives = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] +
-				 hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXPKTS];
-	stats->iw.ipInTruncatedPkts = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] +
-				      hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC];
-	stats->iw.ipInDiscards = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] +
-				 hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD];
-	stats->iw.ipOutNoRoutes = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] +
-				  hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE];
-	stats->iw.ipReasmReqds = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] +
-				 hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS];
-	stats->iw.ipFragCreates = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] +
-				  hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS];
-	stats->iw.ipInMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] +
-				  hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS];
-	stats->iw.ipOutMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] +
-				   hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXMCPKTS];
-	stats->iw.tcpOutSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPTXSEG];
-	stats->iw.tcpInSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPRXSEGS];
-	stats->iw.tcpRetransSegs = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_TCPRTXSEG];
+	memcpy(&stats->value[0], &hw_stats, sizeof(*hw_stats));
 
-	last_rd_time = curr_time;
-	return 0;
+	return stats->num_counters;
 }
 
 /**
@@ -2551,7 +2623,8 @@
 	iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr;
 	iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr;
 	iwibdev->ibdev.dereg_mr = i40iw_dereg_mr;
-	iwibdev->ibdev.get_protocol_stats = i40iw_get_protocol_stats;
+	iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats;
+	iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats;
 	iwibdev->ibdev.query_device = i40iw_query_device;
 	iwibdev->ibdev.create_ah = i40iw_create_ah;
 	iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index b01ef6e..0eb09e1 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -505,9 +505,9 @@
 			props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
 		else
 			props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
-	if (dev->steering_support ==  MLX4_STEERING_MODE_DEVICE_MANAGED)
-		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
 	}
+	if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
+		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
 
 	props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
 
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index dabcc65..9c0e67b 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -822,7 +822,8 @@
 	int eqn;
 	int err;
 
-	if (entries < 0)
+	if (entries < 0 ||
+	    (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
 		return ERR_PTR(-EINVAL);
 
 	if (check_cq_create_flags(attr->flags))
@@ -1168,11 +1169,16 @@
 		return -ENOSYS;
 	}
 
-	if (entries < 1)
+	if (entries < 1 ||
+	    entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
+		mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
+			     entries,
+			     1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
 		return -EINVAL;
+	}
 
 	entries = roundup_pow_of_two(entries + 1);
-	if (entries >  (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
+	if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
 		return -EINVAL;
 
 	if (entries == ibcq->cqe + 1)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c72797c..b48ad85 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -524,6 +524,9 @@
 	    MLX5_CAP_ETH(dev->mdev, scatter_fcs))
 		props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
 
+	if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
+		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+
 	props->vendor_part_id	   = mdev->pdev->device;
 	props->hw_ver		   = mdev->pdev->revision;
 
@@ -915,7 +918,8 @@
 	num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
 	gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
 	resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
-	resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+	if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
+		resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
 	resp.cache_line_size = L1_CACHE_BYTES;
 	resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
 	resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
@@ -988,7 +992,14 @@
 	if (field_avail(typeof(resp), cqe_version, udata->outlen))
 		resp.response_length += sizeof(resp.cqe_version);
 
-	if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+	/*
+	 * We don't want to expose information from the PCI bar that is located
+	 * after 4096 bytes, so if the arch only supports larger pages, let's
+	 * pretend we don't support reading the HCA's core clock. This is also
+	 * forced by mmap function.
+	 */
+	if (PAGE_SIZE <= 4096 &&
+	    field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
 		resp.comp_mask |=
 			MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
 		resp.hca_core_clock_offset =
@@ -1798,7 +1809,7 @@
 {
 	struct mlx5_ib_dev *dev =
 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
-	return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
+	return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
 		       fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
 }
 
@@ -1866,14 +1877,11 @@
 		break;
 
 	case MLX5_DEV_EVENT_PORT_DOWN:
+	case MLX5_DEV_EVENT_PORT_INITIALIZED:
 		ibev.event = IB_EVENT_PORT_ERR;
 		port = (u8)param;
 		break;
 
-	case MLX5_DEV_EVENT_PORT_INITIALIZED:
-		/* not used by ULPs */
-		return;
-
 	case MLX5_DEV_EVENT_LID_CHANGE:
 		ibev.event = IB_EVENT_LID_CHANGE;
 		port = (u8)param;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 5041176..ce43422 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -235,6 +235,8 @@
 		qp->rq.max_gs = 0;
 		qp->rq.wqe_cnt = 0;
 		qp->rq.wqe_shift = 0;
+		cap->max_recv_wr = 0;
+		cap->max_recv_sge = 0;
 	} else {
 		if (ucmd) {
 			qp->rq.wqe_cnt = ucmd->rq_wqe_count;
@@ -1851,13 +1853,15 @@
 static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
 			 const struct ib_ah_attr *ah,
 			 struct mlx5_qp_path *path, u8 port, int attr_mask,
-			 u32 path_flags, const struct ib_qp_attr *attr)
+			 u32 path_flags, const struct ib_qp_attr *attr,
+			 bool alt)
 {
 	enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
 	int err;
 
 	if (attr_mask & IB_QP_PKEY_INDEX)
-		path->pkey_index = attr->pkey_index;
+		path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
+						     attr->pkey_index);
 
 	if (ah->ah_flags & IB_AH_GRH) {
 		if (ah->grh.sgid_index >=
@@ -1877,9 +1881,9 @@
 							  ah->grh.sgid_index);
 		path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
 	} else {
-		path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
-		path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 :
-									0;
+		path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
+		path->fl_free_ar |=
+			(path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
 		path->rlid = cpu_to_be16(ah->dlid);
 		path->grh_mlid = ah->src_path_bits & 0x7f;
 		if (ah->ah_flags & IB_AH_GRH)
@@ -1903,7 +1907,7 @@
 	path->port = port;
 
 	if (attr_mask & IB_QP_TIMEOUT)
-		path->ackto_lt = attr->timeout << 3;
+		path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
 
 	if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
 		return modify_raw_packet_eth_prio(dev->mdev,
@@ -2264,7 +2268,7 @@
 		context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
 
 	if (attr_mask & IB_QP_PKEY_INDEX)
-		context->pri_path.pkey_index = attr->pkey_index;
+		context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
 
 	/* todo implement counter_index functionality */
 
@@ -2277,7 +2281,7 @@
 	if (attr_mask & IB_QP_AV) {
 		err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
 				    attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
-				    attr_mask, 0, attr);
+				    attr_mask, 0, attr, false);
 		if (err)
 			goto out;
 	}
@@ -2288,7 +2292,9 @@
 	if (attr_mask & IB_QP_ALT_PATH) {
 		err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
 				    &context->alt_path,
-				    attr->alt_port_num, attr_mask, 0, attr);
+				    attr->alt_port_num,
+				    attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
+				    0, attr, true);
 		if (err)
 			goto out;
 	}
@@ -4013,11 +4019,12 @@
 	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
 		to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
 		to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
-		qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
+		qp_attr->alt_pkey_index =
+			be16_to_cpu(context->alt_path.pkey_index);
 		qp_attr->alt_port_num	= qp_attr->alt_ah_attr.port_num;
 	}
 
-	qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
+	qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
 	qp_attr->port_num = context->pri_path.port;
 
 	/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
@@ -4079,17 +4086,19 @@
 	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
 
 	if (!ibqp->uobject) {
-		qp_attr->cap.max_send_wr  = qp->sq.wqe_cnt;
+		qp_attr->cap.max_send_wr  = qp->sq.max_post;
 		qp_attr->cap.max_send_sge = qp->sq.max_gs;
+		qp_init_attr->qp_context = ibqp->qp_context;
 	} else {
 		qp_attr->cap.max_send_wr  = 0;
 		qp_attr->cap.max_send_sge = 0;
 	}
 
-	/* We don't support inline sends for kernel QPs (yet), and we
-	 * don't know what userspace's value should be.
-	 */
-	qp_attr->cap.max_inline_data = 0;
+	qp_init_attr->qp_type = ibqp->qp_type;
+	qp_init_attr->recv_cq = ibqp->recv_cq;
+	qp_init_attr->send_cq = ibqp->send_cq;
+	qp_init_attr->srq = ibqp->srq;
+	qp_attr->cap.max_inline_data = qp->max_inline_data;
 
 	qp_init_attr->cap	     = qp_attr->cap;
 
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 82d7c4b..ce40340 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -1308,21 +1308,6 @@
 	SYM_LSB(IntMask, fldname##17IntMask)), \
 	.msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
 
-static const struct  qib_hwerror_msgs qib_7322_intr_msgs[] = {
-	INTR_AUTO_P(SDmaInt),
-	INTR_AUTO_P(SDmaProgressInt),
-	INTR_AUTO_P(SDmaIdleInt),
-	INTR_AUTO_P(SDmaCleanupDone),
-	INTR_AUTO_C(RcvUrg),
-	INTR_AUTO_P(ErrInt),
-	INTR_AUTO(ErrInt),      /* non-port-specific errs */
-	INTR_AUTO(AssertGPIOInt),
-	INTR_AUTO_P(SendDoneInt),
-	INTR_AUTO(SendBufAvailInt),
-	INTR_AUTO_C(RcvAvail),
-	{ .mask = 0, .sz = 0 }
-};
-
 #define TXSYMPTOM_AUTO_P(fldname) \
 	{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
 	.msg = #fldname, .sz = sizeof(#fldname) }
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 0bd1837..d2ac298 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -1172,11 +1172,13 @@
 	 * Set the most significant bit of CM2 to indicate support for
 	 * congestion statistics
 	 */
-	p->reserved[0] = dd->psxmitwait_supported << 7;
+	ib_set_cpi_capmask2(p,
+			    dd->psxmitwait_supported <<
+			    (31 - IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE));
 	/*
 	 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
 	 */
-	p->resp_time_value = 18;
+	ib_set_cpi_resp_time(p, 18);
 
 	return reply((struct ib_smp *) pmp);
 }
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 6888f03..4f87815 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -159,6 +159,7 @@
 		} at;
 		__be32 imm_data;
 		__be32 aeth;
+		__be32 ieth;
 		struct ib_atomic_eth atomic_eth;
 	} u;
 } __packed;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 7209fbc..a0b6ebe 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -36,7 +36,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/sched.h>
 #include <linux/hugetlb.h>
-#include <linux/dma-attrs.h>
 #include <linux/iommu.h>
 #include <linux/workqueue.h>
 #include <linux/list.h>
@@ -112,10 +111,6 @@
 	int i;
 	int flags;
 	dma_addr_t pa;
-	DEFINE_DMA_ATTRS(attrs);
-
-	if (dmasync)
-		dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
 
 	if (!can_do_mlock())
 		return -EPERM;
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index b1ffc8b..6ca6fa8 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -525,6 +525,7 @@
 		return PTR_ERR(task);
 	}
 
+	set_user_nice(task, MIN_NICE);
 	cpu = cpumask_first(cpumask_of_node(rdi->dparms.node));
 	kthread_bind(task, cpu);
 	wake_up_process(task);
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 0ff765b..0f4d450 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -124,11 +124,13 @@
 			    int count)
 {
 	int m, i = 0;
+	struct rvt_dev_info *dev = ib_to_rvt(pd->device);
 
 	mr->mapsz = 0;
 	m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
 	for (; i < m; i++) {
-		mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
+		mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL,
+					  dev->dparms.node);
 		if (!mr->map[i]) {
 			rvt_deinit_mregion(mr);
 			return -ENOMEM;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 0f12c21..7de5134 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -397,6 +397,7 @@
 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
 {
 	unsigned n;
+	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
 
 	if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
 		rvt_put_ss(&qp->s_rdma_read_sge);
@@ -431,7 +432,7 @@
 	if (qp->ibqp.qp_type != IB_QPT_RC)
 		return;
 
-	for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
+	for (n = 0; n < rvt_max_atomic(rdi); n++) {
 		struct rvt_ack_entry *e = &qp->s_ack_queue[n];
 
 		if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
@@ -501,6 +502,12 @@
  */
 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
 		  enum ib_qp_type type)
+	__releases(&qp->s_lock)
+	__releases(&qp->s_hlock)
+	__releases(&qp->r_lock)
+	__acquires(&qp->r_lock)
+	__acquires(&qp->s_hlock)
+	__acquires(&qp->s_lock)
 {
 	if (qp->state != IB_QPS_RESET) {
 		qp->state = IB_QPS_RESET;
@@ -569,7 +576,12 @@
 	qp->s_ssn = 1;
 	qp->s_lsn = 0;
 	qp->s_mig_state = IB_MIG_MIGRATED;
-	memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
+	if (qp->s_ack_queue)
+		memset(
+			qp->s_ack_queue,
+			0,
+			rvt_max_atomic(rdi) *
+				sizeof(*qp->s_ack_queue));
 	qp->r_head_ack_queue = 0;
 	qp->s_tail_ack_queue = 0;
 	qp->s_num_rd_atomic = 0;
@@ -653,9 +665,9 @@
 		if (gfp == GFP_NOIO)
 			swq = __vmalloc(
 				(init_attr->cap.max_send_wr + 1) * sz,
-				gfp, PAGE_KERNEL);
+				gfp | __GFP_ZERO, PAGE_KERNEL);
 		else
-			swq = vmalloc_node(
+			swq = vzalloc_node(
 				(init_attr->cap.max_send_wr + 1) * sz,
 				rdi->dparms.node);
 		if (!swq)
@@ -677,6 +689,16 @@
 			goto bail_swq;
 
 		RCU_INIT_POINTER(qp->next, NULL);
+		if (init_attr->qp_type == IB_QPT_RC) {
+			qp->s_ack_queue =
+				kzalloc_node(
+					sizeof(*qp->s_ack_queue) *
+					 rvt_max_atomic(rdi),
+					gfp,
+					rdi->dparms.node);
+			if (!qp->s_ack_queue)
+				goto bail_qp;
+		}
 
 		/*
 		 * Driver needs to set up it's private QP structure and do any
@@ -704,9 +726,9 @@
 				qp->r_rq.wq = __vmalloc(
 						sizeof(struct rvt_rwq) +
 						qp->r_rq.size * sz,
-						gfp, PAGE_KERNEL);
+						gfp | __GFP_ZERO, PAGE_KERNEL);
 			else
-				qp->r_rq.wq = vmalloc_node(
+				qp->r_rq.wq = vzalloc_node(
 						sizeof(struct rvt_rwq) +
 						qp->r_rq.size * sz,
 						rdi->dparms.node);
@@ -857,6 +879,7 @@
 	rdi->driver_f.qp_priv_free(rdi, qp);
 
 bail_qp:
+	kfree(qp->s_ack_queue);
 	kfree(qp);
 
 bail_swq:
@@ -1284,6 +1307,7 @@
 		vfree(qp->r_rq.wq);
 	vfree(qp->s_wq);
 	rdi->driver_f.qp_priv_free(rdi, qp);
+	kfree(qp->s_ack_queue);
 	kfree(qp);
 	return 0;
 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index caec8e9..4f7d9b4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -92,6 +92,9 @@
 	IPOIB_FLAG_UMCAST	  = 10,
 	IPOIB_STOP_NEIGH_GC	  = 11,
 	IPOIB_NEIGH_TBL_FLUSH	  = 12,
+	IPOIB_FLAG_DEV_ADDR_SET	  = 13,
+	IPOIB_FLAG_DEV_ADDR_CTRL  = 14,
+	IPOIB_FLAG_GOING_DOWN	  = 15,
 
 	IPOIB_MAX_BACKOFF_SECONDS = 16,
 
@@ -392,6 +395,7 @@
 	struct ipoib_ethtool_st ethtool;
 	struct timer_list poll_timer;
 	unsigned max_send_sge;
+	bool sm_fullmember_sendonly_support;
 };
 
 struct ipoib_ah {
@@ -476,6 +480,7 @@
 
 void ipoib_mark_paths_invalid(struct net_device *dev);
 void ipoib_flush_paths(struct net_device *dev);
+int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv);
 struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
 
 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index b2f4283..951d9ab 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1486,6 +1486,10 @@
 {
 	struct net_device *dev = to_net_dev(d);
 	int ret;
+	struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+	if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
+		return -EPERM;
 
 	if (!rtnl_trylock())
 		return restart_syscall();
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 418e5a1c..dc6d241 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -997,6 +997,106 @@
 	return 0;
 }
 
+/*
+ * returns true if the device address of the ipoib interface has changed and the
+ * new address is a valid one (i.e in the gid table), return false otherwise.
+ */
+static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
+{
+	union ib_gid search_gid;
+	union ib_gid gid0;
+	union ib_gid *netdev_gid;
+	int err;
+	u16 index;
+	u8 port;
+	bool ret = false;
+
+	netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
+	if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
+		return false;
+
+	netif_addr_lock_bh(priv->dev);
+
+	/* The subnet prefix may have changed, update it now so we won't have
+	 * to do it later
+	 */
+	priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
+	netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix;
+	search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
+
+	search_gid.global.interface_id = priv->local_gid.global.interface_id;
+
+	netif_addr_unlock_bh(priv->dev);
+
+	err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
+			  priv->dev, &port, &index);
+
+	netif_addr_lock_bh(priv->dev);
+
+	if (search_gid.global.interface_id !=
+	    priv->local_gid.global.interface_id)
+		/* There was a change while we were looking up the gid, bail
+		 * here and let the next work sort this out
+		 */
+		goto out;
+
+	/* The next section of code needs some background:
+	 * Per IB spec the port GUID can't change if the HCA is powered on.
+	 * port GUID is the basis for GID at index 0 which is the basis for
+	 * the default device address of a ipoib interface.
+	 *
+	 * so it seems the flow should be:
+	 * if user_changed_dev_addr && gid in gid tbl
+	 *	set bit dev_addr_set
+	 *	return true
+	 * else
+	 *	return false
+	 *
+	 * The issue is that there are devices that don't follow the spec,
+	 * they change the port GUID when the HCA is powered, so in order
+	 * not to break userspace applications, We need to check if the
+	 * user wanted to control the device address and we assume that
+	 * if he sets the device address back to be based on GID index 0,
+	 * he no longer wishs to control it.
+	 *
+	 * If the user doesn't control the the device address,
+	 * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
+	 * the port GUID has changed and GID at index 0 has changed
+	 * so we need to change priv->local_gid and priv->dev->dev_addr
+	 * to reflect the new GID.
+	 */
+	if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+		if (!err && port == priv->port) {
+			set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+			if (index == 0)
+				clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
+					  &priv->flags);
+			else
+				set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
+			ret = true;
+		} else {
+			ret = false;
+		}
+	} else {
+		if (!err && port == priv->port) {
+			ret = true;
+		} else {
+			if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
+				memcpy(&priv->local_gid, &gid0,
+				       sizeof(priv->local_gid));
+				memcpy(priv->dev->dev_addr + 4, &gid0,
+				       sizeof(priv->local_gid));
+				ret = true;
+			}
+		}
+	}
+
+out:
+	netif_addr_unlock_bh(priv->dev);
+
+	return ret;
+}
+
 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
 				enum ipoib_flush_level level,
 				int nesting)
@@ -1018,6 +1118,9 @@
 
 	if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
 	    level != IPOIB_FLUSH_HEAVY) {
+		/* Make sure the dev_addr is set even if not flushing */
+		if (level == IPOIB_FLUSH_LIGHT)
+			ipoib_dev_addr_changed_valid(priv);
 		ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
 		return;
 	}
@@ -1029,7 +1132,8 @@
 				update_parent_pkey(priv);
 			else
 				update_child_pkey(priv);
-		}
+		} else if (level == IPOIB_FLUSH_LIGHT)
+			ipoib_dev_addr_changed_valid(priv);
 		ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
 		return;
 	}
@@ -1081,7 +1185,8 @@
 	if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
 		if (level >= IPOIB_FLUSH_NORMAL)
 			ipoib_ib_dev_up(dev);
-		ipoib_mcast_restart_task(&priv->restart_task);
+		if (ipoib_dev_addr_changed_valid(priv))
+			ipoib_mcast_restart_task(&priv->restart_task);
 	}
 }
 
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index b940ef1..5f58c41 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -99,6 +99,7 @@
 		struct ib_device *dev, u8 port, u16 pkey,
 		const union ib_gid *gid, const struct sockaddr *addr,
 		void *client_data);
+static int ipoib_set_mac(struct net_device *dev, void *addr);
 
 static struct ib_client ipoib_client = {
 	.name   = "ipoib",
@@ -117,6 +118,8 @@
 
 	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
 
+	priv->sm_fullmember_sendonly_support = false;
+
 	if (ipoib_ib_dev_open(dev)) {
 		if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
 			return 0;
@@ -629,6 +632,77 @@
 	spin_unlock_irq(&priv->lock);
 }
 
+struct classport_info_context {
+	struct ipoib_dev_priv	*priv;
+	struct completion	done;
+	struct ib_sa_query	*sa_query;
+};
+
+static void classport_info_query_cb(int status, struct ib_class_port_info *rec,
+				    void *context)
+{
+	struct classport_info_context *cb_ctx = context;
+	struct ipoib_dev_priv *priv;
+
+	WARN_ON(!context);
+
+	priv = cb_ctx->priv;
+
+	if (status || !rec) {
+		pr_debug("device: %s failed query classport_info status: %d\n",
+			 priv->dev->name, status);
+		/* keeps the default, will try next mcast_restart */
+		priv->sm_fullmember_sendonly_support = false;
+		goto out;
+	}
+
+	if (ib_get_cpi_capmask2(rec) &
+	    IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT) {
+		pr_debug("device: %s enabled fullmember-sendonly for sendonly MCG\n",
+			 priv->dev->name);
+		priv->sm_fullmember_sendonly_support = true;
+	} else {
+		pr_debug("device: %s disabled fullmember-sendonly for sendonly MCG\n",
+			 priv->dev->name);
+		priv->sm_fullmember_sendonly_support = false;
+	}
+
+out:
+	complete(&cb_ctx->done);
+}
+
+int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv)
+{
+	struct classport_info_context *callback_context;
+	int ret;
+
+	callback_context = kmalloc(sizeof(*callback_context), GFP_KERNEL);
+	if (!callback_context)
+		return -ENOMEM;
+
+	callback_context->priv = priv;
+	init_completion(&callback_context->done);
+
+	ret = ib_sa_classport_info_rec_query(&ipoib_sa_client,
+					     priv->ca, priv->port, 3000,
+					     GFP_KERNEL,
+					     classport_info_query_cb,
+					     callback_context,
+					     &callback_context->sa_query);
+	if (ret < 0) {
+		pr_info("%s failed to send ib_sa_classport_info query, ret: %d\n",
+			priv->dev->name, ret);
+		kfree(callback_context);
+		return ret;
+	}
+
+	/* waiting for the callback to finish before returnning */
+	wait_for_completion(&callback_context->done);
+	kfree(callback_context);
+
+	return ret;
+}
+
 void ipoib_flush_paths(struct net_device *dev)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -1132,7 +1206,9 @@
 				neigh = NULL;
 				goto out_unlock;
 			}
-			neigh->alive = jiffies;
+
+			if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
+				neigh->alive = jiffies;
 			goto out_unlock;
 		}
 	}
@@ -1649,6 +1725,7 @@
 	.ndo_get_vf_config	 = ipoib_get_vf_config,
 	.ndo_get_vf_stats	 = ipoib_get_vf_stats,
 	.ndo_set_vf_guid	 = ipoib_set_vf_guid,
+	.ndo_set_mac_address	 = ipoib_set_mac,
 };
 
 static const struct net_device_ops ipoib_netdev_ops_vf = {
@@ -1771,6 +1848,70 @@
 	return device_create_file(&dev->dev, &dev_attr_umcast);
 }
 
+static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
+{
+	struct ipoib_dev_priv *child_priv;
+	struct net_device *netdev = priv->dev;
+
+	netif_addr_lock_bh(netdev);
+
+	memcpy(&priv->local_gid.global.interface_id,
+	       &gid->global.interface_id,
+	       sizeof(gid->global.interface_id));
+	memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
+	clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+
+	netif_addr_unlock_bh(netdev);
+
+	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+		down_read(&priv->vlan_rwsem);
+		list_for_each_entry(child_priv, &priv->child_intfs, list)
+			set_base_guid(child_priv, gid);
+		up_read(&priv->vlan_rwsem);
+	}
+}
+
+static int ipoib_check_lladdr(struct net_device *dev,
+			      struct sockaddr_storage *ss)
+{
+	union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
+	int ret = 0;
+
+	netif_addr_lock_bh(dev);
+
+	/* Make sure the QPN, reserved and subnet prefix match the current
+	 * lladdr, it also makes sure the lladdr is unicast.
+	 */
+	if (memcmp(dev->dev_addr, ss->__data,
+		   4 + sizeof(gid->global.subnet_prefix)) ||
+	    gid->global.interface_id == 0)
+		ret = -EINVAL;
+
+	netif_addr_unlock_bh(dev);
+
+	return ret;
+}
+
+static int ipoib_set_mac(struct net_device *dev, void *addr)
+{
+	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	struct sockaddr_storage *ss = addr;
+	int ret;
+
+	if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
+		return -EBUSY;
+
+	ret = ipoib_check_lladdr(dev, ss);
+	if (ret)
+		return ret;
+
+	set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
+
+	queue_work(ipoib_workqueue, &priv->flush_light);
+
+	return 0;
+}
+
 static ssize_t create_child(struct device *dev,
 			    struct device_attribute *attr,
 			    const char *buf, size_t count)
@@ -1894,6 +2035,7 @@
 		goto device_init_failed;
 	} else
 		memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+	set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
 
 	result = ipoib_dev_init(priv->dev, hca, port);
 	if (result < 0) {
@@ -2001,6 +2143,9 @@
 		ib_unregister_event_handler(&priv->event_handler);
 		flush_workqueue(ipoib_workqueue);
 
+		/* mark interface in the middle of destruction */
+		set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
+
 		rtnl_lock();
 		dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
 		rtnl_unlock();
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 2588931..d3394b6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -64,6 +64,9 @@
 	unsigned int       send_only;
 };
 
+/* join state that allows creating mcg with sendonly member request */
+#define SENDONLY_FULLMEMBER_JOIN	8
+
 /*
  * This should be called with the priv->lock held
  */
@@ -326,12 +329,23 @@
 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
 						   carrier_on_task);
 	struct ib_port_attr attr;
+	int ret;
 
 	if (ib_query_port(priv->ca, priv->port, &attr) ||
 	    attr.state != IB_PORT_ACTIVE) {
 		ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
 		return;
 	}
+	/*
+	 * Check if can send sendonly MCG's with sendonly-fullmember join state.
+	 * It done here after the successfully join to the broadcast group,
+	 * because the broadcast group must always be joined first and is always
+	 * re-joined if the SM changes substantially.
+	 */
+	ret = ipoib_check_sm_sendonly_fullmember_support(priv);
+	if (ret < 0)
+		pr_debug("%s failed query sm support for sendonly-fullmember (ret: %d)\n",
+			 priv->dev->name, ret);
 
 	/*
 	 * Take rtnl_lock to avoid racing with ipoib_stop() and
@@ -515,22 +529,20 @@
 		rec.hop_limit	  = priv->broadcast->mcmember.hop_limit;
 
 		/*
-		 * Send-only IB Multicast joins do not work at the core
-		 * IB layer yet, so we can't use them here.  However,
-		 * we are emulating an Ethernet multicast send, which
-		 * does not require a multicast subscription and will
-		 * still send properly.  The most appropriate thing to
+		 * Send-only IB Multicast joins work at the core IB layer but
+		 * require specific SM support.
+		 * We can use such joins here only if the current SM supports that feature.
+		 * However, if not, we emulate an Ethernet multicast send,
+		 * which does not require a multicast subscription and will
+		 * still send properly. The most appropriate thing to
 		 * do is to create the group if it doesn't exist as that
 		 * most closely emulates the behavior, from a user space
-		 * application perspecitive, of Ethernet multicast
-		 * operation.  For now, we do a full join, maybe later
-		 * when the core IB layers support send only joins we
-		 * will use them.
+		 * application perspective, of Ethernet multicast operation.
 		 */
-#if 0
-		if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
-			rec.join_state = 4;
-#endif
+		if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
+		    priv->sm_fullmember_sendonly_support)
+			/* SM supports sendonly-fullmember, otherwise fallback to full-member */
+			rec.join_state = SENDONLY_FULLMEMBER_JOIN;
 	}
 	spin_unlock_irq(&priv->lock);
 
@@ -570,11 +582,13 @@
 		return;
 	}
 	priv->local_lid = port_attr.lid;
+	netif_addr_lock_bh(dev);
 
-	if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid, NULL))
-		ipoib_warn(priv, "ib_query_gid() failed\n");
-	else
-		memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+	if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+		netif_addr_unlock_bh(dev);
+		return;
+	}
+	netif_addr_unlock_bh(dev);
 
 	spin_lock_irq(&priv->lock);
 	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b809c37..1e7cbba 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -307,5 +307,8 @@
 		queue_work(ipoib_workqueue, &priv->flush_normal);
 	} else if (record->event == IB_EVENT_PKEY_CHANGE) {
 		queue_work(ipoib_workqueue, &priv->flush_heavy);
+	} else if (record->event == IB_EVENT_GID_CHANGE &&
+		   !test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+		queue_work(ipoib_workqueue, &priv->flush_light);
 	}
 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index fca1a88..a2f9f29 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -68,6 +68,8 @@
 	priv->pkey = pkey;
 
 	memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
+	memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid));
+	set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
 	priv->dev->broadcast[8] = pkey >> 8;
 	priv->dev->broadcast[9] = pkey & 0xff;
 
@@ -129,6 +131,9 @@
 
 	ppriv = netdev_priv(pdev);
 
+	if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+		return -EPERM;
+
 	snprintf(intf_name, sizeof intf_name, "%s.%04x",
 		 ppriv->dev->name, pkey);
 	priv = ipoib_intf_alloc(intf_name);
@@ -181,6 +186,9 @@
 
 	ppriv = netdev_priv(pdev);
 
+	if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+		return -EPERM;
+
 	if (!rtnl_trylock())
 		return restart_syscall();
 
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 897b5a4..a990c04 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2596,9 +2596,19 @@
 	isert_put_conn(isert_conn);
 }
 
+static void isert_get_rx_pdu(struct iscsi_conn *conn)
+{
+	struct completion comp;
+
+	init_completion(&comp);
+
+	wait_for_completion_interruptible(&comp);
+}
+
 static struct iscsit_transport iser_target_transport = {
 	.name			= "IB/iSER",
 	.transport_type		= ISCSI_INFINIBAND,
+	.rdma_shutdown		= true,
 	.priv_size		= sizeof(struct isert_cmd),
 	.owner			= THIS_MODULE,
 	.iscsit_setup_np	= isert_setup_np,
@@ -2614,6 +2624,7 @@
 	.iscsit_queue_data_in	= isert_put_datain,
 	.iscsit_queue_status	= isert_put_response,
 	.iscsit_aborted_task	= isert_aborted_task,
+	.iscsit_get_rx_pdu	= isert_get_rx_pdu,
 	.iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
 };
 
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 646de17..3322ed7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1457,7 +1457,6 @@
 {
 	unsigned int sg_offset = 0;
 
-	state->desc = req->indirect_desc;
 	state->fr.next = req->fr_list;
 	state->fr.end = req->fr_list + ch->target->mr_per_cmd;
 	state->sg = scat;
@@ -1489,7 +1488,6 @@
 	struct scatterlist *sg;
 	int i;
 
-	state->desc = req->indirect_desc;
 	for_each_sg(scat, sg, count, i) {
 		srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
 			     ib_sg_dma_len(dev->dev, sg),
@@ -1655,6 +1653,7 @@
 				   target->indirect_size, DMA_TO_DEVICE);
 
 	memset(&state, 0, sizeof(state));
+	state.desc = req->indirect_desc;
 	if (dev->use_fast_reg)
 		ret = srp_map_sg_fr(&state, ch, req, scat, count);
 	else if (dev->use_fmr)
@@ -3526,7 +3525,7 @@
 	int mr_page_shift, p;
 	u64 max_pages_per_mr;
 
-	srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
+	srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
 	if (!srp_dev)
 		return;
 
@@ -3586,8 +3585,6 @@
 						   IB_ACCESS_REMOTE_WRITE);
 		if (IS_ERR(srp_dev->global_mr))
 			goto err_pd;
-	} else {
-		srp_dev->global_mr = NULL;
 	}
 
 	for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 2843f1a..e68b20cb 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -254,8 +254,8 @@
 	memset(cif, 0, sizeof(*cif));
 	cif->base_version = 1;
 	cif->class_version = 1;
-	cif->resp_time_value = 20;
 
+	ib_set_cpi_resp_time(cif, 20);
 	mad->mad_hdr.status = 0;
 }
 
@@ -1767,14 +1767,6 @@
 	}
 }
 
-/**
- * srpt_shutdown_session() - Whether or not a session may be shut down.
- */
-static int srpt_shutdown_session(struct se_session *se_sess)
-{
-	return 1;
-}
-
 static void srpt_free_ch(struct kref *kref)
 {
 	struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
@@ -3064,7 +3056,6 @@
 	.tpg_get_inst_index		= srpt_tpg_get_inst_index,
 	.release_cmd			= srpt_release_cmd,
 	.check_stop_free		= srpt_check_stop_free,
-	.shutdown_session		= srpt_shutdown_session,
 	.close_session			= srpt_close_session,
 	.sess_get_index			= srpt_sess_get_index,
 	.sess_get_initiator_sid		= NULL,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 1142a93..804dbcc 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -87,7 +87,7 @@
 #define DRIVER_AUTHOR "Marko Friedemann <mfr@bmx-chemnitz.de>"
 #define DRIVER_DESC "X-Box pad driver"
 
-#define XPAD_PKT_LEN 32
+#define XPAD_PKT_LEN 64
 
 /* xbox d-pads should map to buttons, as is required for DDR pads
    but we map them to axes when possible to simplify things */
@@ -129,6 +129,7 @@
 	{ 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
 	{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
 	{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
+	{ 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
 	{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
 	{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
 	{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -173,9 +174,11 @@
 	{ 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
 	{ 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
 	{ 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+	{ 0x0e6f, 0x0139, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
 	{ 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
 	{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
 	{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+	{ 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
 	{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
 	{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
 	{ 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
@@ -183,6 +186,7 @@
 	{ 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 },
 	{ 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
 	{ 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+	{ 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE },
 	{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
 	{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
 	{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
@@ -199,6 +203,7 @@
 	{ 0x162e, 0xbeef, "Joytech Neo-Se Take2", 0, XTYPE_XBOX360 },
 	{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
 	{ 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
+	{ 0x24c6, 0x542a, "Xbox ONE spectra", 0, XTYPE_XBOXONE },
 	{ 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
 	{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
 	{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
@@ -212,6 +217,8 @@
 	{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
 	{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
 	{ 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
+	{ 0x24c6, 0x541a, "PowerA Xbox One Mini Wired Controller", 0, XTYPE_XBOXONE },
+	{ 0x24c6, 0x543a, "PowerA Xbox One wired controller", 0, XTYPE_XBOXONE },
 	{ 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 },
 	{ 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 },
 	{ 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 },
@@ -307,13 +314,16 @@
 	{ USB_DEVICE(0x0738, 0x4540) },		/* Mad Catz Beat Pad */
 	XPAD_XBOXONE_VENDOR(0x0738),		/* Mad Catz FightStick TE 2 */
 	XPAD_XBOX360_VENDOR(0x0e6f),		/* 0x0e6f X-Box 360 controllers */
+	XPAD_XBOXONE_VENDOR(0x0e6f),		/* 0x0e6f X-Box One controllers */
 	XPAD_XBOX360_VENDOR(0x12ab),		/* X-Box 360 dance pads */
 	XPAD_XBOX360_VENDOR(0x1430),		/* RedOctane X-Box 360 controllers */
 	XPAD_XBOX360_VENDOR(0x146b),		/* BigBen Interactive Controllers */
 	XPAD_XBOX360_VENDOR(0x1bad),		/* Harminix Rock Band Guitar and Drums */
 	XPAD_XBOX360_VENDOR(0x0f0d),		/* Hori Controllers */
+	XPAD_XBOXONE_VENDOR(0x0f0d),		/* Hori Controllers */
 	XPAD_XBOX360_VENDOR(0x1689),		/* Razer Onza */
 	XPAD_XBOX360_VENDOR(0x24c6),		/* PowerA Controllers */
+	XPAD_XBOXONE_VENDOR(0x24c6),		/* PowerA Controllers */
 	XPAD_XBOX360_VENDOR(0x1532),		/* Razer Sabertooth */
 	XPAD_XBOX360_VENDOR(0x15e4),		/* Numark X-Box 360 controllers */
 	XPAD_XBOX360_VENDOR(0x162e),		/* Joytech X-Box 360 controllers */
@@ -457,6 +467,10 @@
 static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev,
 				   u16 cmd, unsigned char *data)
 {
+	/* valid pad data */
+	if (data[0] != 0x00)
+		return;
+
 	/* digital pad */
 	if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
 		/* dpad as buttons (left, right, up, down) */
@@ -756,6 +770,7 @@
 	if (packet) {
 		memcpy(xpad->odata, packet->data, packet->len);
 		xpad->irq_out->transfer_buffer_length = packet->len;
+		packet->pending = false;
 		return true;
 	}
 
@@ -797,7 +812,6 @@
 	switch (status) {
 	case 0:
 		/* success */
-		xpad->out_packets[xpad->last_out_packet].pending = false;
 		xpad->irq_out_active = xpad_prepare_next_out_packet(xpad);
 		break;
 
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 6d96bff..29ddeb7 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -70,10 +70,13 @@
 
 static int max77693_haptic_set_duty_cycle(struct max77693_haptic *haptic)
 {
-	int delta = (haptic->pwm_dev->period + haptic->pwm_duty) / 2;
+	struct pwm_args pargs;
+	int delta;
 	int error;
 
-	error = pwm_config(haptic->pwm_dev, delta, haptic->pwm_dev->period);
+	pwm_get_args(haptic->pwm_dev, &pargs);
+	delta = (pargs.period + haptic->pwm_duty) / 2;
+	error = pwm_config(haptic->pwm_dev, delta, pargs.period);
 	if (error) {
 		dev_err(haptic->dev, "failed to configure pwm: %d\n", error);
 		return error;
@@ -234,6 +237,7 @@
 				       struct ff_effect *effect)
 {
 	struct max77693_haptic *haptic = input_get_drvdata(dev);
+	struct pwm_args pargs;
 	u64 period_mag_multi;
 
 	haptic->magnitude = effect->u.rumble.strong_magnitude;
@@ -245,7 +249,8 @@
 	 * The formula to convert magnitude to pwm_duty as follows:
 	 * - pwm_duty = (magnitude * pwm_period) / MAX_MAGNITUDE(0xFFFF)
 	 */
-	period_mag_multi = (u64)haptic->pwm_dev->period * haptic->magnitude;
+	pwm_get_args(haptic->pwm_dev, &pargs);
+	period_mag_multi = (u64)pargs.period * haptic->magnitude;
 	haptic->pwm_duty = (unsigned int)(period_mag_multi >>
 						MAX_MAGNITUDE_SHIFT);
 
@@ -329,6 +334,12 @@
 		return PTR_ERR(haptic->pwm_dev);
 	}
 
+	/*
+	 * FIXME: pwm_apply_args() should be removed when switching to the
+	 * atomic PWM API.
+	 */
+	pwm_apply_args(haptic->pwm_dev);
+
 	haptic->motor_reg = devm_regulator_get(&pdev->dev, "haptic");
 	if (IS_ERR(haptic->motor_reg)) {
 		dev_err(&pdev->dev, "failed to get regulator\n");
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index 8d6326d..99bc762 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -306,6 +306,12 @@
 				error);
 			goto err_free_mem;
 		}
+
+		/*
+		 * FIXME: pwm_apply_args() should be removed when switching to
+		 * the atomic PWM API.
+		 */
+		pwm_apply_args(chip->pwm);
 		break;
 
 	default:
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index f2261ab..5f9655d 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -20,21 +20,40 @@
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
 #include <linux/slab.h>
+#include <linux/workqueue.h>
 
 struct pwm_beeper {
 	struct input_dev *input;
 	struct pwm_device *pwm;
+	struct work_struct work;
 	unsigned long period;
 };
 
 #define HZ_TO_NANOSECONDS(x) (1000000000UL/(x))
 
+static void __pwm_beeper_set(struct pwm_beeper *beeper)
+{
+	unsigned long period = beeper->period;
+
+	if (period) {
+		pwm_config(beeper->pwm, period / 2, period);
+		pwm_enable(beeper->pwm);
+	} else
+		pwm_disable(beeper->pwm);
+}
+
+static void pwm_beeper_work(struct work_struct *work)
+{
+	struct pwm_beeper *beeper =
+		container_of(work, struct pwm_beeper, work);
+
+	__pwm_beeper_set(beeper);
+}
+
 static int pwm_beeper_event(struct input_dev *input,
 			    unsigned int type, unsigned int code, int value)
 {
-	int ret = 0;
 	struct pwm_beeper *beeper = input_get_drvdata(input);
-	unsigned long period;
 
 	if (type != EV_SND || value < 0)
 		return -EINVAL;
@@ -49,22 +68,31 @@
 		return -EINVAL;
 	}
 
-	if (value == 0) {
-		pwm_disable(beeper->pwm);
-	} else {
-		period = HZ_TO_NANOSECONDS(value);
-		ret = pwm_config(beeper->pwm, period / 2, period);
-		if (ret)
-			return ret;
-		ret = pwm_enable(beeper->pwm);
-		if (ret)
-			return ret;
-		beeper->period = period;
-	}
+	if (value == 0)
+		beeper->period = 0;
+	else
+		beeper->period = HZ_TO_NANOSECONDS(value);
+
+	schedule_work(&beeper->work);
 
 	return 0;
 }
 
+static void pwm_beeper_stop(struct pwm_beeper *beeper)
+{
+	cancel_work_sync(&beeper->work);
+
+	if (beeper->period)
+		pwm_disable(beeper->pwm);
+}
+
+static void pwm_beeper_close(struct input_dev *input)
+{
+	struct pwm_beeper *beeper = input_get_drvdata(input);
+
+	pwm_beeper_stop(beeper);
+}
+
 static int pwm_beeper_probe(struct platform_device *pdev)
 {
 	unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev);
@@ -87,6 +115,14 @@
 		goto err_free;
 	}
 
+	/*
+	 * FIXME: pwm_apply_args() should be removed when switching to
+	 * the atomic PWM API.
+	 */
+	pwm_apply_args(beeper->pwm);
+
+	INIT_WORK(&beeper->work, pwm_beeper_work);
+
 	beeper->input = input_allocate_device();
 	if (!beeper->input) {
 		dev_err(&pdev->dev, "Failed to allocate input device\n");
@@ -106,6 +142,7 @@
 	beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL);
 
 	beeper->input->event = pwm_beeper_event;
+	beeper->input->close = pwm_beeper_close;
 
 	input_set_drvdata(beeper->input, beeper);
 
@@ -135,7 +172,6 @@
 
 	input_unregister_device(beeper->input);
 
-	pwm_disable(beeper->pwm);
 	pwm_free(beeper->pwm);
 
 	kfree(beeper);
@@ -147,8 +183,7 @@
 {
 	struct pwm_beeper *beeper = dev_get_drvdata(dev);
 
-	if (beeper->period)
-		pwm_disable(beeper->pwm);
+	pwm_beeper_stop(beeper);
 
 	return 0;
 }
@@ -157,10 +192,8 @@
 {
 	struct pwm_beeper *beeper = dev_get_drvdata(dev);
 
-	if (beeper->period) {
-		pwm_config(beeper->pwm, beeper->period / 2, beeper->period);
-		pwm_enable(beeper->pwm);
-	}
+	if (beeper->period)
+		__pwm_beeper_set(beeper);
 
 	return 0;
 }
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index abe1a92..65ebbd1 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -981,9 +981,15 @@
 }
 
 #ifdef CONFIG_COMPAT
+
+#define UI_SET_PHYS_COMPAT	_IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
+
 static long uinput_compat_ioctl(struct file *file,
 				unsigned int cmd, unsigned long arg)
 {
+	if (cmd == UI_SET_PHYS_COMPAT)
+		cmd = UI_SET_PHYS;
+
 	return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
 }
 #endif
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index 4857943..d07dd29 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -115,7 +115,6 @@
 struct sun4i_ts_data {
 	struct device *dev;
 	struct input_dev *input;
-	struct thermal_zone_device *tz;
 	void __iomem *base;
 	unsigned int irq;
 	bool ignore_fifo_data;
@@ -366,10 +365,7 @@
 	if (IS_ERR(hwmon))
 		return PTR_ERR(hwmon);
 
-	ts->tz = thermal_zone_of_sensor_register(ts->dev, 0, ts,
-						 &sun4i_ts_tz_ops);
-	if (IS_ERR(ts->tz))
-		ts->tz = NULL;
+	devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
 
 	writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
 
@@ -377,7 +373,6 @@
 		error = input_register_device(ts->input);
 		if (error) {
 			writel(0, ts->base + TP_INT_FIFOC);
-			thermal_zone_of_sensor_unregister(ts->dev, ts->tz);
 			return error;
 		}
 	}
@@ -394,8 +389,6 @@
 	if (ts->input)
 		input_unregister_device(ts->input);
 
-	thermal_zone_of_sensor_unregister(ts->dev, ts->tz);
-
 	/* Deactivate all IRQs */
 	writel(0, ts->base + TP_INT_FIFOC);
 
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index ebab33e..94b6821 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1477,7 +1477,7 @@
 	struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
 
 	asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
-	if (IS_ERR_VALUE(asid))
+	if (asid < 0)
 		return asid;
 
 	cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
@@ -1508,7 +1508,7 @@
 	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
 
 	vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
-	if (IS_ERR_VALUE(vmid))
+	if (vmid < 0)
 		return vmid;
 
 	cfg->vmid	= (u16)vmid;
@@ -1569,7 +1569,7 @@
 	smmu_domain->pgtbl_ops = pgtbl_ops;
 
 	ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		free_io_pgtable_ops(pgtbl_ops);
 
 	return ret;
@@ -1642,7 +1642,7 @@
 	struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
 
 	smmu_group->ste.bypass = true;
-	if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group)))
+	if (arm_smmu_install_ste_for_group(smmu_group) < 0)
 		dev_warn(dev, "failed to install bypass STE\n");
 
 	smmu_group->domain = NULL;
@@ -1694,7 +1694,7 @@
 	smmu_group->ste.bypass	= domain->type == IOMMU_DOMAIN_DMA;
 
 	ret = arm_smmu_install_ste_for_group(smmu_group);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		smmu_group->domain = NULL;
 
 out_unlock:
@@ -2235,7 +2235,7 @@
 						arm_smmu_evtq_handler,
 						arm_smmu_evtq_thread,
 						0, "arm-smmu-v3-evtq", smmu);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			dev_warn(smmu->dev, "failed to enable evtq irq\n");
 	}
 
@@ -2244,7 +2244,7 @@
 		ret = devm_request_irq(smmu->dev, irq,
 				       arm_smmu_cmdq_sync_handler, 0,
 				       "arm-smmu-v3-cmdq-sync", smmu);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
 	}
 
@@ -2252,7 +2252,7 @@
 	if (irq) {
 		ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
 				       0, "arm-smmu-v3-gerror", smmu);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			dev_warn(smmu->dev, "failed to enable gerror irq\n");
 	}
 
@@ -2264,7 +2264,7 @@
 							arm_smmu_priq_thread,
 							0, "arm-smmu-v3-priq",
 							smmu);
-			if (IS_ERR_VALUE(ret))
+			if (ret < 0)
 				dev_warn(smmu->dev,
 					 "failed to enable priq irq\n");
 			else
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index e206ce7..9345a3f 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -950,7 +950,7 @@
 
 	ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
 				      smmu->num_context_banks);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		goto out_unlock;
 
 	cfg->cbndx = ret;
@@ -989,7 +989,7 @@
 	irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
 	ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
 			  "arm-smmu-context-fault", domain);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
 			cfg->irptndx, irq);
 		cfg->irptndx = INVALID_IRPTNDX;
@@ -1099,7 +1099,7 @@
 	for (i = 0; i < cfg->num_streamids; ++i) {
 		int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
 						  smmu->num_mapping_groups);
-		if (IS_ERR_VALUE(idx)) {
+		if (idx < 0) {
 			dev_err(smmu->dev, "failed to allocate free SMR\n");
 			goto err_free_smrs;
 		}
@@ -1233,7 +1233,7 @@
 
 	/* Ensure that the domain is finalised */
 	ret = arm_smmu_init_domain_context(domain, smmu);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	/*
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b2bfb95..a644d0c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -33,6 +33,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/mempool.h>
 #include <linux/memory.h>
+#include <linux/cpu.h>
 #include <linux/timer.h>
 #include <linux/io.h>
 #include <linux/iova.h>
@@ -390,6 +391,7 @@
 					 * domain ids are 16 bit wide according
 					 * to VT-d spec, section 9.3 */
 
+	bool has_iotlb_device;
 	struct list_head devices;	/* all devices' list */
 	struct iova_domain iovad;	/* iova's that belong to this domain */
 
@@ -456,27 +458,32 @@
 
 static void flush_unmaps_timeout(unsigned long data);
 
-static DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
-
-#define HIGH_WATER_MARK 250
-struct deferred_flush_tables {
-	int next;
-	struct iova *iova[HIGH_WATER_MARK];
-	struct dmar_domain *domain[HIGH_WATER_MARK];
-	struct page *freelist[HIGH_WATER_MARK];
+struct deferred_flush_entry {
+	unsigned long iova_pfn;
+	unsigned long nrpages;
+	struct dmar_domain *domain;
+	struct page *freelist;
 };
 
-static struct deferred_flush_tables *deferred_flush;
+#define HIGH_WATER_MARK 250
+struct deferred_flush_table {
+	int next;
+	struct deferred_flush_entry entries[HIGH_WATER_MARK];
+};
+
+struct deferred_flush_data {
+	spinlock_t lock;
+	int timer_on;
+	struct timer_list timer;
+	long size;
+	struct deferred_flush_table *tables;
+};
+
+DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
 
 /* bitmap for indexing intel_iommus */
 static int g_num_of_iommus;
 
-static DEFINE_SPINLOCK(async_umap_flush_lock);
-static LIST_HEAD(unmaps_to_do);
-
-static int timer_on;
-static long list_size;
-
 static void domain_exit(struct dmar_domain *domain);
 static void domain_remove_dev_info(struct dmar_domain *domain);
 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
@@ -1458,10 +1465,35 @@
 	return NULL;
 }
 
+static void domain_update_iotlb(struct dmar_domain *domain)
+{
+	struct device_domain_info *info;
+	bool has_iotlb_device = false;
+
+	assert_spin_locked(&device_domain_lock);
+
+	list_for_each_entry(info, &domain->devices, link) {
+		struct pci_dev *pdev;
+
+		if (!info->dev || !dev_is_pci(info->dev))
+			continue;
+
+		pdev = to_pci_dev(info->dev);
+		if (pdev->ats_enabled) {
+			has_iotlb_device = true;
+			break;
+		}
+	}
+
+	domain->has_iotlb_device = has_iotlb_device;
+}
+
 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
 {
 	struct pci_dev *pdev;
 
+	assert_spin_locked(&device_domain_lock);
+
 	if (!info || !dev_is_pci(info->dev))
 		return;
 
@@ -1481,6 +1513,7 @@
 #endif
 	if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
 		info->ats_enabled = 1;
+		domain_update_iotlb(info->domain);
 		info->ats_qdep = pci_ats_queue_depth(pdev);
 	}
 }
@@ -1489,6 +1522,8 @@
 {
 	struct pci_dev *pdev;
 
+	assert_spin_locked(&device_domain_lock);
+
 	if (!dev_is_pci(info->dev))
 		return;
 
@@ -1497,6 +1532,7 @@
 	if (info->ats_enabled) {
 		pci_disable_ats(pdev);
 		info->ats_enabled = 0;
+		domain_update_iotlb(info->domain);
 	}
 #ifdef CONFIG_INTEL_IOMMU_SVM
 	if (info->pri_enabled) {
@@ -1517,6 +1553,9 @@
 	unsigned long flags;
 	struct device_domain_info *info;
 
+	if (!domain->has_iotlb_device)
+		return;
+
 	spin_lock_irqsave(&device_domain_lock, flags);
 	list_for_each_entry(info, &domain->devices, link) {
 		if (!info->ats_enabled)
@@ -1734,6 +1773,7 @@
 	memset(domain, 0, sizeof(*domain));
 	domain->nid = -1;
 	domain->flags = flags;
+	domain->has_iotlb_device = false;
 	INIT_LIST_HEAD(&domain->devices);
 
 	return domain;
@@ -1918,8 +1958,12 @@
 		return;
 
 	/* Flush any lazy unmaps that may reference this domain */
-	if (!intel_iommu_strict)
-		flush_unmaps_timeout(0);
+	if (!intel_iommu_strict) {
+		int cpu;
+
+		for_each_possible_cpu(cpu)
+			flush_unmaps_timeout(cpu);
+	}
 
 	/* Remove associated devices and clear attached or cached domains */
 	rcu_read_lock();
@@ -3077,7 +3121,7 @@
 	bool copied_tables = false;
 	struct device *dev;
 	struct intel_iommu *iommu;
-	int i, ret;
+	int i, ret, cpu;
 
 	/*
 	 * for each drhd
@@ -3110,11 +3154,20 @@
 		goto error;
 	}
 
-	deferred_flush = kzalloc(g_num_of_iommus *
-		sizeof(struct deferred_flush_tables), GFP_KERNEL);
-	if (!deferred_flush) {
-		ret = -ENOMEM;
-		goto free_g_iommus;
+	for_each_possible_cpu(cpu) {
+		struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
+							      cpu);
+
+		dfd->tables = kzalloc(g_num_of_iommus *
+				      sizeof(struct deferred_flush_table),
+				      GFP_KERNEL);
+		if (!dfd->tables) {
+			ret = -ENOMEM;
+			goto free_g_iommus;
+		}
+
+		spin_lock_init(&dfd->lock);
+		setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
 	}
 
 	for_each_active_iommu(iommu, drhd) {
@@ -3291,19 +3344,20 @@
 		disable_dmar_iommu(iommu);
 		free_dmar_iommu(iommu);
 	}
-	kfree(deferred_flush);
 free_g_iommus:
+	for_each_possible_cpu(cpu)
+		kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
 	kfree(g_iommus);
 error:
 	return ret;
 }
 
 /* This takes a number of _MM_ pages, not VTD pages */
-static struct iova *intel_alloc_iova(struct device *dev,
+static unsigned long intel_alloc_iova(struct device *dev,
 				     struct dmar_domain *domain,
 				     unsigned long nrpages, uint64_t dma_mask)
 {
-	struct iova *iova = NULL;
+	unsigned long iova_pfn = 0;
 
 	/* Restrict dma_mask to the width that the iommu can handle */
 	dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
@@ -3316,19 +3370,19 @@
 		 * DMA_BIT_MASK(32) and if that fails then try allocating
 		 * from higher range
 		 */
-		iova = alloc_iova(&domain->iovad, nrpages,
-				  IOVA_PFN(DMA_BIT_MASK(32)), 1);
-		if (iova)
-			return iova;
+		iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
+					   IOVA_PFN(DMA_BIT_MASK(32)));
+		if (iova_pfn)
+			return iova_pfn;
 	}
-	iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
-	if (unlikely(!iova)) {
+	iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
+	if (unlikely(!iova_pfn)) {
 		pr_err("Allocating %ld-page iova for %s failed",
 		       nrpages, dev_name(dev));
-		return NULL;
+		return 0;
 	}
 
-	return iova;
+	return iova_pfn;
 }
 
 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
@@ -3426,7 +3480,7 @@
 {
 	struct dmar_domain *domain;
 	phys_addr_t start_paddr;
-	struct iova *iova;
+	unsigned long iova_pfn;
 	int prot = 0;
 	int ret;
 	struct intel_iommu *iommu;
@@ -3444,8 +3498,8 @@
 	iommu = domain_get_iommu(domain);
 	size = aligned_nrpages(paddr, size);
 
-	iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
-	if (!iova)
+	iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
+	if (!iova_pfn)
 		goto error;
 
 	/*
@@ -3463,7 +3517,7 @@
 	 * might have two guest_addr mapping to the same host paddr, but this
 	 * is not a big problem
 	 */
-	ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
+	ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
 				 mm_to_dma_pfn(paddr_pfn), size, prot);
 	if (ret)
 		goto error;
@@ -3471,18 +3525,18 @@
 	/* it's a non-present to present mapping. Only flush if caching mode */
 	if (cap_caching_mode(iommu->cap))
 		iommu_flush_iotlb_psi(iommu, domain,
-				      mm_to_dma_pfn(iova->pfn_lo),
+				      mm_to_dma_pfn(iova_pfn),
 				      size, 0, 1);
 	else
 		iommu_flush_write_buffer(iommu);
 
-	start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
+	start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
 	start_paddr += paddr & ~PAGE_MASK;
 	return start_paddr;
 
 error:
-	if (iova)
-		__free_iova(&domain->iovad, iova);
+	if (iova_pfn)
+		free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
 	pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
 		dev_name(dev), size, (unsigned long long)paddr, dir);
 	return 0;
@@ -3497,91 +3551,120 @@
 				  dir, *dev->dma_mask);
 }
 
-static void flush_unmaps(void)
+static void flush_unmaps(struct deferred_flush_data *flush_data)
 {
 	int i, j;
 
-	timer_on = 0;
+	flush_data->timer_on = 0;
 
 	/* just flush them all */
 	for (i = 0; i < g_num_of_iommus; i++) {
 		struct intel_iommu *iommu = g_iommus[i];
+		struct deferred_flush_table *flush_table =
+				&flush_data->tables[i];
 		if (!iommu)
 			continue;
 
-		if (!deferred_flush[i].next)
+		if (!flush_table->next)
 			continue;
 
 		/* In caching mode, global flushes turn emulation expensive */
 		if (!cap_caching_mode(iommu->cap))
 			iommu->flush.flush_iotlb(iommu, 0, 0, 0,
 					 DMA_TLB_GLOBAL_FLUSH);
-		for (j = 0; j < deferred_flush[i].next; j++) {
+		for (j = 0; j < flush_table->next; j++) {
 			unsigned long mask;
-			struct iova *iova = deferred_flush[i].iova[j];
-			struct dmar_domain *domain = deferred_flush[i].domain[j];
+			struct deferred_flush_entry *entry =
+						&flush_table->entries[j];
+			unsigned long iova_pfn = entry->iova_pfn;
+			unsigned long nrpages = entry->nrpages;
+			struct dmar_domain *domain = entry->domain;
+			struct page *freelist = entry->freelist;
 
 			/* On real hardware multiple invalidations are expensive */
 			if (cap_caching_mode(iommu->cap))
 				iommu_flush_iotlb_psi(iommu, domain,
-					iova->pfn_lo, iova_size(iova),
-					!deferred_flush[i].freelist[j], 0);
+					mm_to_dma_pfn(iova_pfn),
+					nrpages, !freelist, 0);
 			else {
-				mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
-				iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
-						(uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
+				mask = ilog2(nrpages);
+				iommu_flush_dev_iotlb(domain,
+						(uint64_t)iova_pfn << PAGE_SHIFT, mask);
 			}
-			__free_iova(&deferred_flush[i].domain[j]->iovad, iova);
-			if (deferred_flush[i].freelist[j])
-				dma_free_pagelist(deferred_flush[i].freelist[j]);
+			free_iova_fast(&domain->iovad, iova_pfn, nrpages);
+			if (freelist)
+				dma_free_pagelist(freelist);
 		}
-		deferred_flush[i].next = 0;
+		flush_table->next = 0;
 	}
 
-	list_size = 0;
+	flush_data->size = 0;
 }
 
-static void flush_unmaps_timeout(unsigned long data)
+static void flush_unmaps_timeout(unsigned long cpuid)
 {
+	struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
 	unsigned long flags;
 
-	spin_lock_irqsave(&async_umap_flush_lock, flags);
-	flush_unmaps();
-	spin_unlock_irqrestore(&async_umap_flush_lock, flags);
+	spin_lock_irqsave(&flush_data->lock, flags);
+	flush_unmaps(flush_data);
+	spin_unlock_irqrestore(&flush_data->lock, flags);
 }
 
-static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
+static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
+		      unsigned long nrpages, struct page *freelist)
 {
 	unsigned long flags;
-	int next, iommu_id;
+	int entry_id, iommu_id;
 	struct intel_iommu *iommu;
+	struct deferred_flush_entry *entry;
+	struct deferred_flush_data *flush_data;
+	unsigned int cpuid;
 
-	spin_lock_irqsave(&async_umap_flush_lock, flags);
-	if (list_size == HIGH_WATER_MARK)
-		flush_unmaps();
+	cpuid = get_cpu();
+	flush_data = per_cpu_ptr(&deferred_flush, cpuid);
+
+	/* Flush all CPUs' entries to avoid deferring too much.  If
+	 * this becomes a bottleneck, can just flush us, and rely on
+	 * flush timer for the rest.
+	 */
+	if (flush_data->size == HIGH_WATER_MARK) {
+		int cpu;
+
+		for_each_online_cpu(cpu)
+			flush_unmaps_timeout(cpu);
+	}
+
+	spin_lock_irqsave(&flush_data->lock, flags);
 
 	iommu = domain_get_iommu(dom);
 	iommu_id = iommu->seq_id;
 
-	next = deferred_flush[iommu_id].next;
-	deferred_flush[iommu_id].domain[next] = dom;
-	deferred_flush[iommu_id].iova[next] = iova;
-	deferred_flush[iommu_id].freelist[next] = freelist;
-	deferred_flush[iommu_id].next++;
+	entry_id = flush_data->tables[iommu_id].next;
+	++(flush_data->tables[iommu_id].next);
 
-	if (!timer_on) {
-		mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
-		timer_on = 1;
+	entry = &flush_data->tables[iommu_id].entries[entry_id];
+	entry->domain = dom;
+	entry->iova_pfn = iova_pfn;
+	entry->nrpages = nrpages;
+	entry->freelist = freelist;
+
+	if (!flush_data->timer_on) {
+		mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
+		flush_data->timer_on = 1;
 	}
-	list_size++;
-	spin_unlock_irqrestore(&async_umap_flush_lock, flags);
+	flush_data->size++;
+	spin_unlock_irqrestore(&flush_data->lock, flags);
+
+	put_cpu();
 }
 
-static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
+static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
 {
 	struct dmar_domain *domain;
 	unsigned long start_pfn, last_pfn;
-	struct iova *iova;
+	unsigned long nrpages;
+	unsigned long iova_pfn;
 	struct intel_iommu *iommu;
 	struct page *freelist;
 
@@ -3593,13 +3676,11 @@
 
 	iommu = domain_get_iommu(domain);
 
-	iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
-	if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
-		      (unsigned long long)dev_addr))
-		return;
+	iova_pfn = IOVA_PFN(dev_addr);
 
-	start_pfn = mm_to_dma_pfn(iova->pfn_lo);
-	last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
+	nrpages = aligned_nrpages(dev_addr, size);
+	start_pfn = mm_to_dma_pfn(iova_pfn);
+	last_pfn = start_pfn + nrpages - 1;
 
 	pr_debug("Device %s unmapping: pfn %lx-%lx\n",
 		 dev_name(dev), start_pfn, last_pfn);
@@ -3608,12 +3689,12 @@
 
 	if (intel_iommu_strict) {
 		iommu_flush_iotlb_psi(iommu, domain, start_pfn,
-				      last_pfn - start_pfn + 1, !freelist, 0);
+				      nrpages, !freelist, 0);
 		/* free iova */
-		__free_iova(&domain->iovad, iova);
+		free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
 		dma_free_pagelist(freelist);
 	} else {
-		add_unmap(domain, iova, freelist);
+		add_unmap(domain, iova_pfn, nrpages, freelist);
 		/*
 		 * queue up the release of the unmap to save the 1/6th of the
 		 * cpu used up by the iotlb flush operation...
@@ -3625,7 +3706,7 @@
 			     size_t size, enum dma_data_direction dir,
 			     struct dma_attrs *attrs)
 {
-	intel_unmap(dev, dev_addr);
+	intel_unmap(dev, dev_addr, size);
 }
 
 static void *intel_alloc_coherent(struct device *dev, size_t size,
@@ -3684,7 +3765,7 @@
 	size = PAGE_ALIGN(size);
 	order = get_order(size);
 
-	intel_unmap(dev, dma_handle);
+	intel_unmap(dev, dma_handle, size);
 	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
 		__free_pages(page, order);
 }
@@ -3693,7 +3774,16 @@
 			   int nelems, enum dma_data_direction dir,
 			   struct dma_attrs *attrs)
 {
-	intel_unmap(dev, sglist[0].dma_address);
+	dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
+	unsigned long nrpages = 0;
+	struct scatterlist *sg;
+	int i;
+
+	for_each_sg(sglist, sg, nelems, i) {
+		nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
+	}
+
+	intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
 }
 
 static int intel_nontranslate_map_sg(struct device *hddev,
@@ -3717,7 +3807,7 @@
 	struct dmar_domain *domain;
 	size_t size = 0;
 	int prot = 0;
-	struct iova *iova = NULL;
+	unsigned long iova_pfn;
 	int ret;
 	struct scatterlist *sg;
 	unsigned long start_vpfn;
@@ -3736,9 +3826,9 @@
 	for_each_sg(sglist, sg, nelems, i)
 		size += aligned_nrpages(sg->offset, sg->length);
 
-	iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
+	iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
 				*dev->dma_mask);
-	if (!iova) {
+	if (!iova_pfn) {
 		sglist->dma_length = 0;
 		return 0;
 	}
@@ -3753,13 +3843,13 @@
 	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
 		prot |= DMA_PTE_WRITE;
 
-	start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
+	start_vpfn = mm_to_dma_pfn(iova_pfn);
 
 	ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
 	if (unlikely(ret)) {
 		dma_pte_free_pagetable(domain, start_vpfn,
 				       start_vpfn + size - 1);
-		__free_iova(&domain->iovad, iova);
+		free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
 		return 0;
 	}
 
@@ -4498,6 +4588,46 @@
 	.priority = 0
 };
 
+static void free_all_cpu_cached_iovas(unsigned int cpu)
+{
+	int i;
+
+	for (i = 0; i < g_num_of_iommus; i++) {
+		struct intel_iommu *iommu = g_iommus[i];
+		struct dmar_domain *domain;
+		u16 did;
+
+		if (!iommu)
+			continue;
+
+		for (did = 0; did < 0xffff; did++) {
+			domain = get_iommu_domain(iommu, did);
+
+			if (!domain)
+				continue;
+			free_cpu_cached_iovas(cpu, &domain->iovad);
+		}
+	}
+}
+
+static int intel_iommu_cpu_notifier(struct notifier_block *nfb,
+				    unsigned long action, void *v)
+{
+	unsigned int cpu = (unsigned long)v;
+
+	switch (action) {
+	case CPU_DEAD:
+	case CPU_DEAD_FROZEN:
+		free_all_cpu_cached_iovas(cpu);
+		flush_unmaps_timeout(cpu);
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block intel_iommu_cpu_nb = {
+	.notifier_call = intel_iommu_cpu_notifier,
+};
 
 static ssize_t intel_iommu_show_version(struct device *dev,
 					struct device_attribute *attr,
@@ -4631,7 +4761,6 @@
 	up_write(&dmar_global_lock);
 	pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
 
-	init_timer(&unmap_timer);
 #ifdef CONFIG_SWIOTLB
 	swiotlb = 0;
 #endif
@@ -4648,6 +4777,7 @@
 	bus_register_notifier(&pci_bus_type, &device_nb);
 	if (si_domain && !hw_pass_through)
 		register_memory_notifier(&intel_iommu_memory_nb);
+	register_hotcpu_notifier(&intel_iommu_cpu_nb);
 
 	intel_iommu_enabled = 1;
 
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index fa0adef..ba764a0 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -20,6 +20,17 @@
 #include <linux/iova.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/bitops.h>
+
+static bool iova_rcache_insert(struct iova_domain *iovad,
+			       unsigned long pfn,
+			       unsigned long size);
+static unsigned long iova_rcache_get(struct iova_domain *iovad,
+				     unsigned long size,
+				     unsigned long limit_pfn);
+static void init_iova_rcaches(struct iova_domain *iovad);
+static void free_iova_rcaches(struct iova_domain *iovad);
 
 void
 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
@@ -38,6 +49,7 @@
 	iovad->granule = granule;
 	iovad->start_pfn = start_pfn;
 	iovad->dma_32bit_pfn = pfn_32bit;
+	init_iova_rcaches(iovad);
 }
 EXPORT_SYMBOL_GPL(init_iova_domain);
 
@@ -291,33 +303,18 @@
 }
 EXPORT_SYMBOL_GPL(alloc_iova);
 
-/**
- * find_iova - find's an iova for a given pfn
- * @iovad: - iova domain in question.
- * @pfn: - page frame number
- * This function finds and returns an iova belonging to the
- * given doamin which matches the given pfn.
- */
-struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
+static struct iova *
+private_find_iova(struct iova_domain *iovad, unsigned long pfn)
 {
-	unsigned long flags;
-	struct rb_node *node;
+	struct rb_node *node = iovad->rbroot.rb_node;
 
-	/* Take the lock so that no other thread is manipulating the rbtree */
-	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
-	node = iovad->rbroot.rb_node;
+	assert_spin_locked(&iovad->iova_rbtree_lock);
+
 	while (node) {
 		struct iova *iova = container_of(node, struct iova, node);
 
 		/* If pfn falls within iova's range, return iova */
 		if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
-			spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-			/* We are not holding the lock while this iova
-			 * is referenced by the caller as the same thread
-			 * which called this function also calls __free_iova()
-			 * and it is by design that only one thread can possibly
-			 * reference a particular iova and hence no conflict.
-			 */
 			return iova;
 		}
 
@@ -327,9 +324,35 @@
 			node = node->rb_right;
 	}
 
-	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 	return NULL;
 }
+
+static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
+{
+	assert_spin_locked(&iovad->iova_rbtree_lock);
+	__cached_rbnode_delete_update(iovad, iova);
+	rb_erase(&iova->node, &iovad->rbroot);
+	free_iova_mem(iova);
+}
+
+/**
+ * find_iova - finds an iova for a given pfn
+ * @iovad: - iova domain in question.
+ * @pfn: - page frame number
+ * This function finds and returns an iova belonging to the
+ * given doamin which matches the given pfn.
+ */
+struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
+{
+	unsigned long flags;
+	struct iova *iova;
+
+	/* Take the lock so that no other thread is manipulating the rbtree */
+	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+	iova = private_find_iova(iovad, pfn);
+	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+	return iova;
+}
 EXPORT_SYMBOL_GPL(find_iova);
 
 /**
@@ -344,10 +367,8 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
-	__cached_rbnode_delete_update(iovad, iova);
-	rb_erase(&iova->node, &iovad->rbroot);
+	private_free_iova(iovad, iova);
 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-	free_iova_mem(iova);
 }
 EXPORT_SYMBOL_GPL(__free_iova);
 
@@ -370,6 +391,63 @@
 EXPORT_SYMBOL_GPL(free_iova);
 
 /**
+ * alloc_iova_fast - allocates an iova from rcache
+ * @iovad: - iova domain in question
+ * @size: - size of page frames to allocate
+ * @limit_pfn: - max limit address
+ * This function tries to satisfy an iova allocation from the rcache,
+ * and falls back to regular allocation on failure.
+*/
+unsigned long
+alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
+		unsigned long limit_pfn)
+{
+	bool flushed_rcache = false;
+	unsigned long iova_pfn;
+	struct iova *new_iova;
+
+	iova_pfn = iova_rcache_get(iovad, size, limit_pfn);
+	if (iova_pfn)
+		return iova_pfn;
+
+retry:
+	new_iova = alloc_iova(iovad, size, limit_pfn, true);
+	if (!new_iova) {
+		unsigned int cpu;
+
+		if (flushed_rcache)
+			return 0;
+
+		/* Try replenishing IOVAs by flushing rcache. */
+		flushed_rcache = true;
+		for_each_online_cpu(cpu)
+			free_cpu_cached_iovas(cpu, iovad);
+		goto retry;
+	}
+
+	return new_iova->pfn_lo;
+}
+EXPORT_SYMBOL_GPL(alloc_iova_fast);
+
+/**
+ * free_iova_fast - free iova pfn range into rcache
+ * @iovad: - iova domain in question.
+ * @pfn: - pfn that is allocated previously
+ * @size: - # of pages in range
+ * This functions frees an iova range by trying to put it into the rcache,
+ * falling back to regular iova deallocation via free_iova() if this fails.
+ */
+void
+free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
+{
+	if (iova_rcache_insert(iovad, pfn, size))
+		return;
+
+	free_iova(iovad, pfn);
+}
+EXPORT_SYMBOL_GPL(free_iova_fast);
+
+/**
  * put_iova_domain - destroys the iova doamin
  * @iovad: - iova domain in question.
  * All the iova's in that domain are destroyed.
@@ -379,6 +457,7 @@
 	struct rb_node *node;
 	unsigned long flags;
 
+	free_iova_rcaches(iovad);
 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
 	node = rb_first(&iovad->rbroot);
 	while (node) {
@@ -550,5 +629,295 @@
 	return NULL;
 }
 
+/*
+ * Magazine caches for IOVA ranges.  For an introduction to magazines,
+ * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
+ * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
+ * For simplicity, we use a static magazine size and don't implement the
+ * dynamic size tuning described in the paper.
+ */
+
+#define IOVA_MAG_SIZE 128
+
+struct iova_magazine {
+	unsigned long size;
+	unsigned long pfns[IOVA_MAG_SIZE];
+};
+
+struct iova_cpu_rcache {
+	spinlock_t lock;
+	struct iova_magazine *loaded;
+	struct iova_magazine *prev;
+};
+
+static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
+{
+	return kzalloc(sizeof(struct iova_magazine), flags);
+}
+
+static void iova_magazine_free(struct iova_magazine *mag)
+{
+	kfree(mag);
+}
+
+static void
+iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
+{
+	unsigned long flags;
+	int i;
+
+	if (!mag)
+		return;
+
+	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+
+	for (i = 0 ; i < mag->size; ++i) {
+		struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
+
+		BUG_ON(!iova);
+		private_free_iova(iovad, iova);
+	}
+
+	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+
+	mag->size = 0;
+}
+
+static bool iova_magazine_full(struct iova_magazine *mag)
+{
+	return (mag && mag->size == IOVA_MAG_SIZE);
+}
+
+static bool iova_magazine_empty(struct iova_magazine *mag)
+{
+	return (!mag || mag->size == 0);
+}
+
+static unsigned long iova_magazine_pop(struct iova_magazine *mag,
+				       unsigned long limit_pfn)
+{
+	BUG_ON(iova_magazine_empty(mag));
+
+	if (mag->pfns[mag->size - 1] >= limit_pfn)
+		return 0;
+
+	return mag->pfns[--mag->size];
+}
+
+static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
+{
+	BUG_ON(iova_magazine_full(mag));
+
+	mag->pfns[mag->size++] = pfn;
+}
+
+static void init_iova_rcaches(struct iova_domain *iovad)
+{
+	struct iova_cpu_rcache *cpu_rcache;
+	struct iova_rcache *rcache;
+	unsigned int cpu;
+	int i;
+
+	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+		rcache = &iovad->rcaches[i];
+		spin_lock_init(&rcache->lock);
+		rcache->depot_size = 0;
+		rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
+		if (WARN_ON(!rcache->cpu_rcaches))
+			continue;
+		for_each_possible_cpu(cpu) {
+			cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+			spin_lock_init(&cpu_rcache->lock);
+			cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
+			cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
+		}
+	}
+}
+
+/*
+ * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
+ * return true on success.  Can fail if rcache is full and we can't free
+ * space, and free_iova() (our only caller) will then return the IOVA
+ * range to the rbtree instead.
+ */
+static bool __iova_rcache_insert(struct iova_domain *iovad,
+				 struct iova_rcache *rcache,
+				 unsigned long iova_pfn)
+{
+	struct iova_magazine *mag_to_free = NULL;
+	struct iova_cpu_rcache *cpu_rcache;
+	bool can_insert = false;
+	unsigned long flags;
+
+	cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
+	spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+	if (!iova_magazine_full(cpu_rcache->loaded)) {
+		can_insert = true;
+	} else if (!iova_magazine_full(cpu_rcache->prev)) {
+		swap(cpu_rcache->prev, cpu_rcache->loaded);
+		can_insert = true;
+	} else {
+		struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
+
+		if (new_mag) {
+			spin_lock(&rcache->lock);
+			if (rcache->depot_size < MAX_GLOBAL_MAGS) {
+				rcache->depot[rcache->depot_size++] =
+						cpu_rcache->loaded;
+			} else {
+				mag_to_free = cpu_rcache->loaded;
+			}
+			spin_unlock(&rcache->lock);
+
+			cpu_rcache->loaded = new_mag;
+			can_insert = true;
+		}
+	}
+
+	if (can_insert)
+		iova_magazine_push(cpu_rcache->loaded, iova_pfn);
+
+	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+
+	if (mag_to_free) {
+		iova_magazine_free_pfns(mag_to_free, iovad);
+		iova_magazine_free(mag_to_free);
+	}
+
+	return can_insert;
+}
+
+static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
+			       unsigned long size)
+{
+	unsigned int log_size = order_base_2(size);
+
+	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
+		return false;
+
+	return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
+}
+
+/*
+ * Caller wants to allocate a new IOVA range from 'rcache'.  If we can
+ * satisfy the request, return a matching non-NULL range and remove
+ * it from the 'rcache'.
+ */
+static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
+				       unsigned long limit_pfn)
+{
+	struct iova_cpu_rcache *cpu_rcache;
+	unsigned long iova_pfn = 0;
+	bool has_pfn = false;
+	unsigned long flags;
+
+	cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
+	spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+	if (!iova_magazine_empty(cpu_rcache->loaded)) {
+		has_pfn = true;
+	} else if (!iova_magazine_empty(cpu_rcache->prev)) {
+		swap(cpu_rcache->prev, cpu_rcache->loaded);
+		has_pfn = true;
+	} else {
+		spin_lock(&rcache->lock);
+		if (rcache->depot_size > 0) {
+			iova_magazine_free(cpu_rcache->loaded);
+			cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
+			has_pfn = true;
+		}
+		spin_unlock(&rcache->lock);
+	}
+
+	if (has_pfn)
+		iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
+
+	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+
+	return iova_pfn;
+}
+
+/*
+ * Try to satisfy IOVA allocation range from rcache.  Fail if requested
+ * size is too big or the DMA limit we are given isn't satisfied by the
+ * top element in the magazine.
+ */
+static unsigned long iova_rcache_get(struct iova_domain *iovad,
+				     unsigned long size,
+				     unsigned long limit_pfn)
+{
+	unsigned int log_size = order_base_2(size);
+
+	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
+		return 0;
+
+	return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn);
+}
+
+/*
+ * Free a cpu's rcache.
+ */
+static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
+				 struct iova_rcache *rcache)
+{
+	struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+	unsigned long flags;
+
+	spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+	iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
+	iova_magazine_free(cpu_rcache->loaded);
+
+	iova_magazine_free_pfns(cpu_rcache->prev, iovad);
+	iova_magazine_free(cpu_rcache->prev);
+
+	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+}
+
+/*
+ * free rcache data structures.
+ */
+static void free_iova_rcaches(struct iova_domain *iovad)
+{
+	struct iova_rcache *rcache;
+	unsigned long flags;
+	unsigned int cpu;
+	int i, j;
+
+	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+		rcache = &iovad->rcaches[i];
+		for_each_possible_cpu(cpu)
+			free_cpu_iova_rcache(cpu, iovad, rcache);
+		spin_lock_irqsave(&rcache->lock, flags);
+		free_percpu(rcache->cpu_rcaches);
+		for (j = 0; j < rcache->depot_size; ++j) {
+			iova_magazine_free_pfns(rcache->depot[j], iovad);
+			iova_magazine_free(rcache->depot[j]);
+		}
+		spin_unlock_irqrestore(&rcache->lock, flags);
+	}
+}
+
+/*
+ * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
+ */
+void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
+{
+	struct iova_cpu_rcache *cpu_rcache;
+	struct iova_rcache *rcache;
+	unsigned long flags;
+	int i;
+
+	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+		rcache = &iovad->rcaches[i];
+		cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+		spin_lock_irqsave(&cpu_rcache->lock, flags);
+		iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
+		iova_magazine_free_pfns(cpu_rcache->prev, iovad);
+		spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+	}
+}
+
 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index eb5eb0c..2223b3f 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -182,7 +182,7 @@
 	writel_relaxed(0, clps711x_intc->intmr[2]);
 
 	err = irq_alloc_descs(-1, 0, ARRAY_SIZE(clps711x_irqs), numa_node_id());
-	if (IS_ERR_VALUE(err))
+	if (err < 0)
 		goto out_iounmap;
 
 	clps711x_intc->ops.map = clps711x_intc_irq_map;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 6bd881b..5eb1f9e 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -41,6 +41,7 @@
 
 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING		(1ULL << 0)
 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375	(1ULL << 1)
+#define ITS_FLAGS_WORKAROUND_CAVIUM_23144	(1ULL << 2)
 
 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING	(1 << 0)
 
@@ -82,6 +83,7 @@
 	u64			flags;
 	u32			ite_size;
 	u32			device_ids;
+	int			numa_node;
 };
 
 #define ITS_ITT_ALIGN		SZ_256
@@ -613,11 +615,23 @@
 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 			    bool force)
 {
-	unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+	unsigned int cpu;
+	const struct cpumask *cpu_mask = cpu_online_mask;
 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
 	struct its_collection *target_col;
 	u32 id = its_get_event_id(d);
 
+       /* lpi cannot be routed to a redistributor that is on a foreign node */
+	if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+		if (its_dev->its->numa_node >= 0) {
+			cpu_mask = cpumask_of_node(its_dev->its->numa_node);
+			if (!cpumask_intersects(mask_val, cpu_mask))
+				return -EINVAL;
+		}
+	}
+
+	cpu = cpumask_any_and(mask_val, cpu_mask);
+
 	if (cpu >= nr_cpu_ids)
 		return -EINVAL;
 
@@ -1101,6 +1115,16 @@
 	list_for_each_entry(its, &its_nodes, entry) {
 		u64 target;
 
+		/* avoid cross node collections and its mapping */
+		if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+			struct device_node *cpu_node;
+
+			cpu_node = of_get_cpu_node(cpu, NULL);
+			if (its->numa_node != NUMA_NO_NODE &&
+				its->numa_node != of_node_to_nid(cpu_node))
+				continue;
+		}
+
 		/*
 		 * We now have to bind each collection to its target
 		 * redistributor.
@@ -1351,9 +1375,14 @@
 {
 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
 	u32 event = its_get_event_id(d);
+	const struct cpumask *cpu_mask = cpu_online_mask;
+
+	/* get the cpu_mask of local node */
+	if (its_dev->its->numa_node >= 0)
+		cpu_mask = cpumask_of_node(its_dev->its->numa_node);
 
 	/* Bind the LPI to the first possible CPU */
-	its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
+	its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
 
 	/* Map the GIC IRQ and event to the device */
 	its_send_mapvi(its_dev, d->hwirq, event);
@@ -1443,6 +1472,13 @@
 	its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
 }
 
+static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
+{
+	struct its_node *its = data;
+
+	its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
+}
+
 static const struct gic_quirk its_quirks[] = {
 #ifdef CONFIG_CAVIUM_ERRATUM_22375
 	{
@@ -1452,6 +1488,14 @@
 		.init	= its_enable_quirk_cavium_22375,
 	},
 #endif
+#ifdef CONFIG_CAVIUM_ERRATUM_23144
+	{
+		.desc	= "ITS: Cavium erratum 23144",
+		.iidr	= 0xa100034c,	/* ThunderX pass 1.x */
+		.mask	= 0xffff0fff,
+		.init	= its_enable_quirk_cavium_23144,
+	},
+#endif
 	{
 	}
 };
@@ -1514,6 +1558,7 @@
 	its->base = its_base;
 	its->phys_base = res.start;
 	its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+	its->numa_node = of_node_to_nid(node);
 
 	its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
 	if (!its->cmd_base) {
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index fb042ba..2c5ba0e 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -155,7 +155,7 @@
 
 	while (count--) {
 		val = readl_relaxed(rbase + GICR_WAKER);
-		if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
+		if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
 			break;
 		cpu_relax();
 		udelay(1);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index b4e6471..fbc4ae2 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1123,7 +1123,7 @@
 
 		irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
 					   numa_node_id());
-		if (IS_ERR_VALUE(irq_base)) {
+		if (irq_base < 0) {
 			WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
 			     irq_start);
 			irq_base = irq_start;
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 9688d2e..9e25d8c 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -402,7 +402,7 @@
 	nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */
 
 	irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id());
-	if (IS_ERR_VALUE(irq_base)) {
+	if (irq_base < 0) {
 		pr_err("failed to allocate IRQ numbers\n");
 		return -EINVAL;
 	}
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index c089f49..3b5e10a 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -968,7 +968,7 @@
 			      unsigned int cpu_vec, unsigned int irqbase,
 			      struct device_node *node)
 {
-	unsigned int gicconfig;
+	unsigned int gicconfig, cpu;
 	unsigned int v[2];
 
 	__gic_base_addr = gic_base_addr;
@@ -985,6 +985,14 @@
 	gic_vpes = gic_vpes + 1;
 
 	if (cpu_has_veic) {
+		/* Set EIC mode for all VPEs */
+		for_each_present_cpu(cpu) {
+			gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
+				  mips_cm_vp_id(cpu));
+			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL),
+				  GIC_VPE_CTL_EIC_MODE_MSK);
+		}
+
 		/* Always use vector 1 in EIC mode */
 		gic_cpu_pin = 0;
 		timer_cpu_pin = gic_cpu_pin;
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c
index e7155db..73addb4 100644
--- a/drivers/irqchip/irq-pic32-evic.c
+++ b/drivers/irqchip/irq-pic32-evic.c
@@ -91,7 +91,7 @@
 	/* set polarity for external interrupts only */
 	for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) {
 		if (priv->ext_irqs[i] == data->hwirq) {
-			ret = pic32_set_ext_polarity(i + 1, flow_type);
+			ret = pic32_set_ext_polarity(i, flow_type);
 			if (ret)
 				return ret;
 		}
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 1ccd2ab..1518ba3 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -232,7 +232,7 @@
 		nr_irqs += shirq_blocks[i]->nr_irqs;
 
 	virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
-	if (IS_ERR_VALUE(virq_base)) {
+	if (virq_base < 0) {
 		pr_err("%s: irq desc alloc failed\n", __func__);
 		goto err_unmap;
 	}
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 4783bac..a9145aa 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -91,6 +91,7 @@
 		       struct led_pwm *led, struct device_node *child)
 {
 	struct led_pwm_data *led_data = &priv->leds[priv->num_leds];
+	struct pwm_args pargs;
 	int ret;
 
 	led_data->active_low = led->active_low;
@@ -117,7 +118,15 @@
 	else
 		led_data->cdev.brightness_set_blocking = led_pwm_set_blocking;
 
-	led_data->period = pwm_get_period(led_data->pwm);
+	/*
+	 * FIXME: pwm_apply_args() should be removed when switching to the
+	 * atomic PWM API.
+	 */
+	pwm_apply_args(led_data->pwm);
+
+	pwm_get_args(led_data->pwm, &pargs);
+
+	led_data->period = pargs.period;
 	if (!led_data->period && (led->pwm_period_ns > 0))
 		led_data->period = led->pwm_period_ns;
 
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 8eeab72..ca4abe1 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -64,7 +64,6 @@
 #include "btree.h"
 
 #include <linux/blkdev.h>
-#include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/random.h>
 #include <trace/events/bcache.h>
@@ -288,7 +287,6 @@
 		if (kthread_should_stop())				\
 			return 0;					\
 									\
-		try_to_freeze();					\
 		schedule();						\
 		mutex_lock(&(ca)->set->bucket_lock);			\
 	}								\
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 22b9e34..eab505e 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -27,7 +27,6 @@
 
 #include <linux/slab.h>
 #include <linux/bitops.h>
-#include <linux/freezer.h>
 #include <linux/hash.h>
 #include <linux/kthread.h>
 #include <linux/prefetch.h>
@@ -1787,7 +1786,6 @@
 
 		mutex_unlock(&c->bucket_lock);
 
-		try_to_freeze();
 		schedule();
 	}
 
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index b9346cd..6012367 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -12,7 +12,6 @@
 #include "writeback.h"
 
 #include <linux/delay.h>
-#include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <trace/events/bcache.h>
 
@@ -228,7 +227,6 @@
 	 */
 
 	while (!kthread_should_stop()) {
-		try_to_freeze();
 
 		w = bch_keybuf_next(&dc->writeback_keys);
 		if (!w)
@@ -433,7 +431,6 @@
 			if (kthread_should_stop())
 				return 0;
 
-			try_to_freeze();
 			schedule();
 			continue;
 		}
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index 9e1731c..e191e29 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -95,7 +95,7 @@
 	int rval;
 
 	fault = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT);
-	if (IS_ERR_VALUE(fault))
+	if (fault < 0)
 		return fault;
 
 	flash->fault |= fault;
@@ -105,13 +105,13 @@
 
 	/* Clear faults. */
 	rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0);
-	if (IS_ERR_VALUE(rval))
+	if (rval < 0)
 		return rval;
 
 	flash->led_mode->val = V4L2_FLASH_LED_MODE_NONE;
 
 	rval = adp1653_update_hw(flash);
-	if (IS_ERR_VALUE(rval))
+	if (rval)
 		return rval;
 
 	return flash->fault;
@@ -158,7 +158,7 @@
 	int rval;
 
 	rval = adp1653_get_fault(flash);
-	if (IS_ERR_VALUE(rval))
+	if (rval)
 		return rval;
 
 	ctrl->cur.val = 0;
@@ -184,7 +184,7 @@
 	int rval;
 
 	rval = adp1653_get_fault(flash);
-	if (IS_ERR_VALUE(rval))
+	if (rval)
 		return rval;
 	if ((rval & (ADP1653_REG_FAULT_FLT_SCP |
 		     ADP1653_REG_FAULT_FLT_OT |
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
index 5ef6777..8a5d194 100644
--- a/drivers/media/platform/s5p-tv/mixer_drv.c
+++ b/drivers/media/platform/s5p-tv/mixer_drv.c
@@ -146,7 +146,7 @@
 
 	/* returning 1 means that power is already enabled,
 	 * so zero success be returned */
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 	return 0;
 }
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 95a7388..09e0f58 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -398,6 +398,8 @@
 }
 
 #define AF9015_EEPROM_SIZE 256
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
 
 /* hash (and dump) eeprom */
 static int af9015_eeprom_hash(struct dvb_usb_device *d)
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index c61a284..81ddb17 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -51,6 +51,7 @@
 
 config OMAP_GPMC
 	bool
+	select GPIOLIB
 	help
 	  This driver is for the General Purpose Memory Controller (GPMC)
 	  present on Texas Instruments SoCs (e.g. OMAP2+). GPMC allows
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 2a691da..904b4af 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -59,11 +59,11 @@
 {
 	int i = 0;
 
-	if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
+	if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->gregs)
 		return -ENODEV;
 
 	for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) {
-		u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
+		u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->gregs->cspr_cs[i].cspr);
 		if (cspr & CSPR_V && (cspr & CSPR_BA) ==
 				convert_ifc_address(addr_base))
 			return i;
@@ -75,7 +75,7 @@
 
 static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl)
 {
-	struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+	struct fsl_ifc_global __iomem *ifc = ctrl->gregs;
 
 	/*
 	 * Clear all the common status and event registers
@@ -104,7 +104,7 @@
 	irq_dispose_mapping(ctrl->nand_irq);
 	irq_dispose_mapping(ctrl->irq);
 
-	iounmap(ctrl->regs);
+	iounmap(ctrl->gregs);
 
 	dev_set_drvdata(&dev->dev, NULL);
 	kfree(ctrl);
@@ -122,7 +122,7 @@
 
 static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl)
 {
-	struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
 	unsigned long flags;
 	u32 stat;
 
@@ -157,7 +157,7 @@
 static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
 {
 	struct fsl_ifc_ctrl *ctrl = data;
-	struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+	struct fsl_ifc_global __iomem *ifc = ctrl->gregs;
 	u32 err_axiid, err_srcid, status, cs_err, err_addr;
 	irqreturn_t ret = IRQ_NONE;
 
@@ -215,6 +215,7 @@
 {
 	int ret = 0;
 	int version, banks;
+	void __iomem *addr;
 
 	dev_info(&dev->dev, "Freescale Integrated Flash Controller\n");
 
@@ -225,22 +226,13 @@
 	dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev);
 
 	/* IOMAP the entire IFC region */
-	fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0);
-	if (!fsl_ifc_ctrl_dev->regs) {
+	fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0);
+	if (!fsl_ifc_ctrl_dev->gregs) {
 		dev_err(&dev->dev, "failed to get memory region\n");
 		ret = -ENODEV;
 		goto err;
 	}
 
-	version = ifc_in32(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
-			FSL_IFC_VERSION_MASK;
-	banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
-	dev_info(&dev->dev, "IFC version %d.%d, %d banks\n",
-		version >> 24, (version >> 16) & 0xf, banks);
-
-	fsl_ifc_ctrl_dev->version = version;
-	fsl_ifc_ctrl_dev->banks = banks;
-
 	if (of_property_read_bool(dev->dev.of_node, "little-endian")) {
 		fsl_ifc_ctrl_dev->little_endian = true;
 		dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n");
@@ -249,8 +241,9 @@
 		dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n");
 	}
 
-	version = ioread32be(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
+	version = ifc_in32(&fsl_ifc_ctrl_dev->gregs->ifc_rev) &
 			FSL_IFC_VERSION_MASK;
+
 	banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
 	dev_info(&dev->dev, "IFC version %d.%d, %d banks\n",
 		version >> 24, (version >> 16) & 0xf, banks);
@@ -258,6 +251,13 @@
 	fsl_ifc_ctrl_dev->version = version;
 	fsl_ifc_ctrl_dev->banks = banks;
 
+	addr = fsl_ifc_ctrl_dev->gregs;
+	if (version >= FSL_IFC_VERSION_2_0_0)
+		addr += PGOFFSET_64K;
+	else
+		addr += PGOFFSET_4K;
+	fsl_ifc_ctrl_dev->rregs = addr;
+
 	/* get the Controller level irq */
 	fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
 	if (fsl_ifc_ctrl_dev->irq == 0) {
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 089091f..f6b5757 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -91,6 +91,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(mtk_smi_larb_get);
 
 void mtk_smi_larb_put(struct device *larbdev)
 {
@@ -106,6 +107,7 @@
 	mtk_smi_disable(&larb->smi);
 	mtk_smi_disable(common);
 }
+EXPORT_SYMBOL_GPL(mtk_smi_larb_put);
 
 static int
 mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 21825dd..af4884b 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -21,15 +21,15 @@
 #include <linux/spinlock.h>
 #include <linux/io.h>
 #include <linux/module.h>
+#include <linux/gpio/driver.h>
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
-#include <linux/of_mtd.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
 #include <linux/omap-gpmc.h>
-#include <linux/mtd/nand.h>
 #include <linux/pm_runtime.h>
 
 #include <linux/platform_data/mtd-nand-omap2.h>
@@ -81,6 +81,8 @@
 
 #define GPMC_CONFIG_LIMITEDADDRESS		BIT(1)
 
+#define GPMC_STATUS_EMPTYWRITEBUFFERSTATUS	BIT(0)
+
 #define	GPMC_CONFIG2_CSEXTRADELAY		BIT(7)
 #define	GPMC_CONFIG3_ADVEXTRADELAY		BIT(7)
 #define	GPMC_CONFIG4_OEEXTRADELAY		BIT(7)
@@ -92,6 +94,14 @@
 #define GPMC_CS_SIZE		0x30
 #define	GPMC_BCH_SIZE		0x10
 
+/*
+ * The first 1MB of GPMC address space is typically mapped to
+ * the internal ROM. Never allocate the first page, to
+ * facilitate bug detection; even if we didn't boot from ROM.
+ * As GPMC minimum partition size is 16MB we can only start from
+ * there.
+ */
+#define GPMC_MEM_START		0x1000000
 #define GPMC_MEM_END		0x3FFFFFFF
 
 #define GPMC_CHUNK_SHIFT	24		/* 16 MB */
@@ -125,7 +135,6 @@
 #define GPMC_CONFIG_RDY_BSY	0x00000001
 #define GPMC_CONFIG_DEV_SIZE	0x00000002
 #define GPMC_CONFIG_DEV_TYPE	0x00000003
-#define GPMC_SET_IRQ_STATUS	0x00000004
 
 #define GPMC_CONFIG1_WRAPBURST_SUPP     (1 << 31)
 #define GPMC_CONFIG1_READMULTIPLE_SUPP  (1 << 30)
@@ -174,16 +183,12 @@
 #define GPMC_CONFIG_WRITEPROTECT	0x00000010
 #define WR_RD_PIN_MONITORING		0x00600000
 
-#define GPMC_ENABLE_IRQ		0x0000000d
-
 /* ECC commands */
 #define GPMC_ECC_READ		0 /* Reset Hardware ECC for read */
 #define GPMC_ECC_WRITE		1 /* Reset Hardware ECC for write */
 #define GPMC_ECC_READSYN	2 /* Reset before syndrom is read back */
 
-/* XXX: Only NAND irq has been considered,currently these are the only ones used
- */
-#define	GPMC_NR_IRQ		2
+#define	GPMC_NR_NAND_IRQS	2 /* number of NAND specific IRQs */
 
 enum gpmc_clk_domain {
 	GPMC_CD_FCLK,
@@ -199,11 +204,6 @@
 	struct resource mem;
 };
 
-struct gpmc_client_irq	{
-	unsigned		irq;
-	u32			bitmask;
-};
-
 /* Structure to save gpmc cs context */
 struct gpmc_cs_config {
 	u32 config1;
@@ -231,9 +231,15 @@
 	struct gpmc_cs_config cs_context[GPMC_CS_NUM];
 };
 
-static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
-static struct irq_chip gpmc_irq_chip;
-static int gpmc_irq_start;
+struct gpmc_device {
+	struct device *dev;
+	int irq;
+	struct irq_chip irq_chip;
+	struct gpio_chip gpio_chip;
+	int nirqs;
+};
+
+static struct irq_domain *gpmc_irq_domain;
 
 static struct resource	gpmc_mem_root;
 static struct gpmc_cs_data gpmc_cs[GPMC_CS_NUM];
@@ -241,8 +247,6 @@
 /* Define chip-selects as reserved by default until probe completes */
 static unsigned int gpmc_cs_num = GPMC_CS_NUM;
 static unsigned int gpmc_nr_waitpins;
-static struct device *gpmc_dev;
-static int gpmc_irq;
 static resource_size_t phys_base, mem_size;
 static unsigned gpmc_capability;
 static void __iomem *gpmc_base;
@@ -1054,14 +1058,6 @@
 	u32 regval;
 
 	switch (cmd) {
-	case GPMC_ENABLE_IRQ:
-		gpmc_write_reg(GPMC_IRQENABLE, wval);
-		break;
-
-	case GPMC_SET_IRQ_STATUS:
-		gpmc_write_reg(GPMC_IRQSTATUS, wval);
-		break;
-
 	case GPMC_CONFIG_WP:
 		regval = gpmc_read_reg(GPMC_CONFIG);
 		if (wval)
@@ -1084,7 +1080,7 @@
 {
 	int i;
 
-	reg->gpmc_status = gpmc_base + GPMC_STATUS;
+	reg->gpmc_status = NULL;	/* deprecated */
 	reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET +
 				GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs;
 	reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET +
@@ -1118,87 +1114,201 @@
 	}
 }
 
-int gpmc_get_client_irq(unsigned irq_config)
+static bool gpmc_nand_writebuffer_empty(void)
 {
-	int i;
+	if (gpmc_read_reg(GPMC_STATUS) & GPMC_STATUS_EMPTYWRITEBUFFERSTATUS)
+		return true;
 
-	if (hweight32(irq_config) > 1)
-		return 0;
-
-	for (i = 0; i < GPMC_NR_IRQ; i++)
-		if (gpmc_client_irq[i].bitmask & irq_config)
-			return gpmc_client_irq[i].irq;
-
-	return 0;
+	return false;
 }
 
-static int gpmc_irq_endis(unsigned irq, bool endis)
+static struct gpmc_nand_ops nand_ops = {
+	.nand_writebuffer_empty = gpmc_nand_writebuffer_empty,
+};
+
+/**
+ * gpmc_omap_get_nand_ops - Get the GPMC NAND interface
+ * @regs: the GPMC NAND register map exclusive for NAND use.
+ * @cs: GPMC chip select number on which the NAND sits. The
+ *      register map returned will be specific to this chip select.
+ *
+ * Returns NULL on error e.g. invalid cs.
+ */
+struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs)
 {
-	int i;
+	if (cs >= gpmc_cs_num)
+		return NULL;
+
+	gpmc_update_nand_reg(reg, cs);
+
+	return &nand_ops;
+}
+EXPORT_SYMBOL_GPL(gpmc_omap_get_nand_ops);
+
+int gpmc_get_client_irq(unsigned irq_config)
+{
+	if (!gpmc_irq_domain) {
+		pr_warn("%s called before GPMC IRQ domain available\n",
+			__func__);
+		return 0;
+	}
+
+	/* we restrict this to NAND IRQs only */
+	if (irq_config >= GPMC_NR_NAND_IRQS)
+		return 0;
+
+	return irq_create_mapping(gpmc_irq_domain, irq_config);
+}
+
+static int gpmc_irq_endis(unsigned long hwirq, bool endis)
+{
 	u32 regval;
 
-	for (i = 0; i < GPMC_NR_IRQ; i++)
-		if (irq == gpmc_client_irq[i].irq) {
-			regval = gpmc_read_reg(GPMC_IRQENABLE);
-			if (endis)
-				regval |= gpmc_client_irq[i].bitmask;
-			else
-				regval &= ~gpmc_client_irq[i].bitmask;
-			gpmc_write_reg(GPMC_IRQENABLE, regval);
-			break;
-		}
+	/* bits GPMC_NR_NAND_IRQS to 8 are reserved */
+	if (hwirq >= GPMC_NR_NAND_IRQS)
+		hwirq += 8 - GPMC_NR_NAND_IRQS;
+
+	regval = gpmc_read_reg(GPMC_IRQENABLE);
+	if (endis)
+		regval |= BIT(hwirq);
+	else
+		regval &= ~BIT(hwirq);
+	gpmc_write_reg(GPMC_IRQENABLE, regval);
 
 	return 0;
 }
 
 static void gpmc_irq_disable(struct irq_data *p)
 {
-	gpmc_irq_endis(p->irq, false);
+	gpmc_irq_endis(p->hwirq, false);
 }
 
 static void gpmc_irq_enable(struct irq_data *p)
 {
-	gpmc_irq_endis(p->irq, true);
+	gpmc_irq_endis(p->hwirq, true);
 }
 
-static void gpmc_irq_noop(struct irq_data *data) { }
-
-static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
-
-static int gpmc_setup_irq(void)
+static void gpmc_irq_mask(struct irq_data *d)
 {
-	int i;
+	gpmc_irq_endis(d->hwirq, false);
+}
+
+static void gpmc_irq_unmask(struct irq_data *d)
+{
+	gpmc_irq_endis(d->hwirq, true);
+}
+
+static void gpmc_irq_edge_config(unsigned long hwirq, bool rising_edge)
+{
 	u32 regval;
 
-	if (!gpmc_irq)
+	/* NAND IRQs polarity is not configurable */
+	if (hwirq < GPMC_NR_NAND_IRQS)
+		return;
+
+	/* WAITPIN starts at BIT 8 */
+	hwirq += 8 - GPMC_NR_NAND_IRQS;
+
+	regval = gpmc_read_reg(GPMC_CONFIG);
+	if (rising_edge)
+		regval &= ~BIT(hwirq);
+	else
+		regval |= BIT(hwirq);
+
+	gpmc_write_reg(GPMC_CONFIG, regval);
+}
+
+static void gpmc_irq_ack(struct irq_data *d)
+{
+	unsigned int hwirq = d->hwirq;
+
+	/* skip reserved bits */
+	if (hwirq >= GPMC_NR_NAND_IRQS)
+		hwirq += 8 - GPMC_NR_NAND_IRQS;
+
+	/* Setting bit to 1 clears (or Acks) the interrupt */
+	gpmc_write_reg(GPMC_IRQSTATUS, BIT(hwirq));
+}
+
+static int gpmc_irq_set_type(struct irq_data *d, unsigned int trigger)
+{
+	/* can't set type for NAND IRQs */
+	if (d->hwirq < GPMC_NR_NAND_IRQS)
 		return -EINVAL;
 
-	gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0);
-	if (gpmc_irq_start < 0) {
-		pr_err("irq_alloc_descs failed\n");
-		return gpmc_irq_start;
+	/* We can support either rising or falling edge at a time */
+	if (trigger == IRQ_TYPE_EDGE_FALLING)
+		gpmc_irq_edge_config(d->hwirq, false);
+	else if (trigger == IRQ_TYPE_EDGE_RISING)
+		gpmc_irq_edge_config(d->hwirq, true);
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+static int gpmc_irq_map(struct irq_domain *d, unsigned int virq,
+			irq_hw_number_t hw)
+{
+	struct gpmc_device *gpmc = d->host_data;
+
+	irq_set_chip_data(virq, gpmc);
+	if (hw < GPMC_NR_NAND_IRQS) {
+		irq_modify_status(virq, IRQ_NOREQUEST, IRQ_NOAUTOEN);
+		irq_set_chip_and_handler(virq, &gpmc->irq_chip,
+					 handle_simple_irq);
+	} else {
+		irq_set_chip_and_handler(virq, &gpmc->irq_chip,
+					 handle_edge_irq);
 	}
 
-	gpmc_irq_chip.name = "gpmc";
-	gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
-	gpmc_irq_chip.irq_enable = gpmc_irq_enable;
-	gpmc_irq_chip.irq_disable = gpmc_irq_disable;
-	gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
-	gpmc_irq_chip.irq_ack = gpmc_irq_noop;
-	gpmc_irq_chip.irq_mask = gpmc_irq_noop;
-	gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
+	return 0;
+}
 
-	gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
-	gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
+static const struct irq_domain_ops gpmc_irq_domain_ops = {
+	.map    = gpmc_irq_map,
+	.xlate  = irq_domain_xlate_twocell,
+};
 
-	for (i = 0; i < GPMC_NR_IRQ; i++) {
-		gpmc_client_irq[i].irq = gpmc_irq_start + i;
-		irq_set_chip_and_handler(gpmc_client_irq[i].irq,
-					&gpmc_irq_chip, handle_simple_irq);
-		irq_modify_status(gpmc_client_irq[i].irq, IRQ_NOREQUEST,
-				  IRQ_NOAUTOEN);
+static irqreturn_t gpmc_handle_irq(int irq, void *data)
+{
+	int hwirq, virq;
+	u32 regval, regvalx;
+	struct gpmc_device *gpmc = data;
+
+	regval = gpmc_read_reg(GPMC_IRQSTATUS);
+	regvalx = regval;
+
+	if (!regval)
+		return IRQ_NONE;
+
+	for (hwirq = 0; hwirq < gpmc->nirqs; hwirq++) {
+		/* skip reserved status bits */
+		if (hwirq == GPMC_NR_NAND_IRQS)
+			regvalx >>= 8 - GPMC_NR_NAND_IRQS;
+
+		if (regvalx & BIT(hwirq)) {
+			virq = irq_find_mapping(gpmc_irq_domain, hwirq);
+			if (!virq) {
+				dev_warn(gpmc->dev,
+					 "spurious irq detected hwirq %d, virq %d\n",
+					 hwirq, virq);
+			}
+
+			generic_handle_irq(virq);
+		}
 	}
 
+	gpmc_write_reg(GPMC_IRQSTATUS, regval);
+
+	return IRQ_HANDLED;
+}
+
+static int gpmc_setup_irq(struct gpmc_device *gpmc)
+{
+	u32 regval;
+	int rc;
+
 	/* Disable interrupts */
 	gpmc_write_reg(GPMC_IRQENABLE, 0);
 
@@ -1206,22 +1316,45 @@
 	regval = gpmc_read_reg(GPMC_IRQSTATUS);
 	gpmc_write_reg(GPMC_IRQSTATUS, regval);
 
-	return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL);
-}
+	gpmc->irq_chip.name = "gpmc";
+	gpmc->irq_chip.irq_enable = gpmc_irq_enable;
+	gpmc->irq_chip.irq_disable = gpmc_irq_disable;
+	gpmc->irq_chip.irq_ack = gpmc_irq_ack;
+	gpmc->irq_chip.irq_mask = gpmc_irq_mask;
+	gpmc->irq_chip.irq_unmask = gpmc_irq_unmask;
+	gpmc->irq_chip.irq_set_type = gpmc_irq_set_type;
 
-static int gpmc_free_irq(void)
-{
-	int i;
-
-	if (gpmc_irq)
-		free_irq(gpmc_irq, NULL);
-
-	for (i = 0; i < GPMC_NR_IRQ; i++) {
-		irq_set_handler(gpmc_client_irq[i].irq, NULL);
-		irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip);
+	gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node,
+						gpmc->nirqs,
+						&gpmc_irq_domain_ops,
+						gpmc);
+	if (!gpmc_irq_domain) {
+		dev_err(gpmc->dev, "IRQ domain add failed\n");
+		return -ENODEV;
 	}
 
-	irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ);
+	rc = request_irq(gpmc->irq, gpmc_handle_irq, 0, "gpmc", gpmc);
+	if (rc) {
+		dev_err(gpmc->dev, "failed to request irq %d: %d\n",
+			gpmc->irq, rc);
+		irq_domain_remove(gpmc_irq_domain);
+		gpmc_irq_domain = NULL;
+	}
+
+	return rc;
+}
+
+static int gpmc_free_irq(struct gpmc_device *gpmc)
+{
+	int hwirq;
+
+	free_irq(gpmc->irq, gpmc);
+
+	for (hwirq = 0; hwirq < gpmc->nirqs; hwirq++)
+		irq_dispose_mapping(irq_find_mapping(gpmc_irq_domain, hwirq));
+
+	irq_domain_remove(gpmc_irq_domain);
+	gpmc_irq_domain = NULL;
 
 	return 0;
 }
@@ -1242,12 +1375,7 @@
 {
 	int cs;
 
-	/*
-	 * The first 1MB of GPMC address space is typically mapped to
-	 * the internal ROM. Never allocate the first page, to
-	 * facilitate bug detection; even if we didn't boot from ROM.
-	 */
-	gpmc_mem_root.start = SZ_1M;
+	gpmc_mem_root.start = GPMC_MEM_START;
 	gpmc_mem_root.end = GPMC_MEM_END;
 
 	/* Reserve all regions that has been set up by bootloader */
@@ -1796,105 +1924,6 @@
 		of_property_read_bool(np, "gpmc,time-para-granularity");
 }
 
-#if IS_ENABLED(CONFIG_MTD_NAND)
-
-static const char * const nand_xfer_types[] = {
-	[NAND_OMAP_PREFETCH_POLLED]		= "prefetch-polled",
-	[NAND_OMAP_POLLED]			= "polled",
-	[NAND_OMAP_PREFETCH_DMA]		= "prefetch-dma",
-	[NAND_OMAP_PREFETCH_IRQ]		= "prefetch-irq",
-};
-
-static int gpmc_probe_nand_child(struct platform_device *pdev,
-				 struct device_node *child)
-{
-	u32 val;
-	const char *s;
-	struct gpmc_timings gpmc_t;
-	struct omap_nand_platform_data *gpmc_nand_data;
-
-	if (of_property_read_u32(child, "reg", &val) < 0) {
-		dev_err(&pdev->dev, "%s has no 'reg' property\n",
-			child->full_name);
-		return -ENODEV;
-	}
-
-	gpmc_nand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_nand_data),
-				      GFP_KERNEL);
-	if (!gpmc_nand_data)
-		return -ENOMEM;
-
-	gpmc_nand_data->cs = val;
-	gpmc_nand_data->of_node = child;
-
-	/* Detect availability of ELM module */
-	gpmc_nand_data->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
-	if (gpmc_nand_data->elm_of_node == NULL)
-		gpmc_nand_data->elm_of_node =
-					of_parse_phandle(child, "elm_id", 0);
-
-	/* select ecc-scheme for NAND */
-	if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
-		pr_err("%s: ti,nand-ecc-opt not found\n", __func__);
-		return -ENODEV;
-	}
-
-	if (!strcmp(s, "sw"))
-		gpmc_nand_data->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
-	else if (!strcmp(s, "ham1") ||
-		 !strcmp(s, "hw") || !strcmp(s, "hw-romcode"))
-		gpmc_nand_data->ecc_opt =
-				OMAP_ECC_HAM1_CODE_HW;
-	else if (!strcmp(s, "bch4"))
-		if (gpmc_nand_data->elm_of_node)
-			gpmc_nand_data->ecc_opt =
-				OMAP_ECC_BCH4_CODE_HW;
-		else
-			gpmc_nand_data->ecc_opt =
-				OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
-	else if (!strcmp(s, "bch8"))
-		if (gpmc_nand_data->elm_of_node)
-			gpmc_nand_data->ecc_opt =
-				OMAP_ECC_BCH8_CODE_HW;
-		else
-			gpmc_nand_data->ecc_opt =
-				OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
-	else if (!strcmp(s, "bch16"))
-		if (gpmc_nand_data->elm_of_node)
-			gpmc_nand_data->ecc_opt =
-				OMAP_ECC_BCH16_CODE_HW;
-		else
-			pr_err("%s: BCH16 requires ELM support\n", __func__);
-	else
-		pr_err("%s: ti,nand-ecc-opt invalid value\n", __func__);
-
-	/* select data transfer mode for NAND controller */
-	if (!of_property_read_string(child, "ti,nand-xfer-type", &s))
-		for (val = 0; val < ARRAY_SIZE(nand_xfer_types); val++)
-			if (!strcasecmp(s, nand_xfer_types[val])) {
-				gpmc_nand_data->xfer_type = val;
-				break;
-			}
-
-	gpmc_nand_data->flash_bbt = of_get_nand_on_flash_bbt(child);
-
-	val = of_get_nand_bus_width(child);
-	if (val == 16)
-		gpmc_nand_data->devsize = NAND_BUSWIDTH_16;
-
-	gpmc_read_timings_dt(child, &gpmc_t);
-	gpmc_nand_init(gpmc_nand_data, &gpmc_t);
-
-	return 0;
-}
-#else
-static int gpmc_probe_nand_child(struct platform_device *pdev,
-				 struct device_node *child)
-{
-	return 0;
-}
-#endif
-
 #if IS_ENABLED(CONFIG_MTD_ONENAND)
 static int gpmc_probe_onenand_child(struct platform_device *pdev,
 				 struct device_node *child)
@@ -1950,6 +1979,8 @@
 	const char *name;
 	int ret, cs;
 	u32 val;
+	struct gpio_desc *waitpin_desc = NULL;
+	struct gpmc_device *gpmc = platform_get_drvdata(pdev);
 
 	if (of_property_read_u32(child, "reg", &cs) < 0) {
 		dev_err(&pdev->dev, "%s has no 'reg' property\n",
@@ -2010,23 +2041,80 @@
 	if (ret < 0) {
 		dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n",
 			cs, &res.start);
+		if (res.start < GPMC_MEM_START) {
+			dev_info(&pdev->dev,
+				 "GPMC CS %d start cannot be lesser than 0x%x\n",
+				 cs, GPMC_MEM_START);
+		} else if (res.end > GPMC_MEM_END) {
+			dev_info(&pdev->dev,
+				 "GPMC CS %d end cannot be greater than 0x%x\n",
+				 cs, GPMC_MEM_END);
+		}
 		goto err;
 	}
 
-	ret = of_property_read_u32(child, "bank-width", &gpmc_s.device_width);
-	if (ret < 0)
-		goto err;
+	if (of_node_cmp(child->name, "nand") == 0) {
+		/* Warn about older DT blobs with no compatible property */
+		if (!of_property_read_bool(child, "compatible")) {
+			dev_warn(&pdev->dev,
+				 "Incompatible NAND node: missing compatible");
+			ret = -EINVAL;
+			goto err;
+		}
+	}
+
+	if (of_device_is_compatible(child, "ti,omap2-nand")) {
+		/* NAND specific setup */
+		val = 8;
+		of_property_read_u32(child, "nand-bus-width", &val);
+		switch (val) {
+		case 8:
+			gpmc_s.device_width = GPMC_DEVWIDTH_8BIT;
+			break;
+		case 16:
+			gpmc_s.device_width = GPMC_DEVWIDTH_16BIT;
+			break;
+		default:
+			dev_err(&pdev->dev, "%s: invalid 'nand-bus-width'\n",
+				child->name);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		/* disable write protect */
+		gpmc_configure(GPMC_CONFIG_WP, 0);
+		gpmc_s.device_nand = true;
+	} else {
+		ret = of_property_read_u32(child, "bank-width",
+					   &gpmc_s.device_width);
+		if (ret < 0)
+			goto err;
+	}
+
+	/* Reserve wait pin if it is required and valid */
+	if (gpmc_s.wait_on_read || gpmc_s.wait_on_write) {
+		unsigned int wait_pin = gpmc_s.wait_pin;
+
+		waitpin_desc = gpiochip_request_own_desc(&gpmc->gpio_chip,
+							 wait_pin, "WAITPIN");
+		if (IS_ERR(waitpin_desc)) {
+			dev_err(&pdev->dev, "invalid wait-pin: %d\n", wait_pin);
+			ret = PTR_ERR(waitpin_desc);
+			goto err;
+		}
+	}
 
 	gpmc_cs_show_timings(cs, "before gpmc_cs_program_settings");
+
 	ret = gpmc_cs_program_settings(cs, &gpmc_s);
 	if (ret < 0)
-		goto err;
+		goto err_cs;
 
 	ret = gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to set gpmc timings for: %s\n",
 			child->name);
-		goto err;
+		goto err_cs;
 	}
 
 	/* Clear limited address i.e. enable A26-A11 */
@@ -2057,16 +2145,81 @@
 	dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name);
 	ret = -ENODEV;
 
+err_cs:
+	if (waitpin_desc)
+		gpiochip_free_own_desc(waitpin_desc);
+
 err:
 	gpmc_cs_free(cs);
 
 	return ret;
 }
 
+static int gpmc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+	return 1;	/* we're input only */
+}
+
+static int gpmc_gpio_direction_input(struct gpio_chip *chip,
+				     unsigned int offset)
+{
+	return 0;	/* we're input only */
+}
+
+static int gpmc_gpio_direction_output(struct gpio_chip *chip,
+				      unsigned int offset, int value)
+{
+	return -EINVAL;	/* we're input only */
+}
+
+static void gpmc_gpio_set(struct gpio_chip *chip, unsigned int offset,
+			  int value)
+{
+}
+
+static int gpmc_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+	u32 reg;
+
+	offset += 8;
+
+	reg = gpmc_read_reg(GPMC_STATUS) & BIT(offset);
+
+	return !!reg;
+}
+
+static int gpmc_gpio_init(struct gpmc_device *gpmc)
+{
+	int ret;
+
+	gpmc->gpio_chip.parent = gpmc->dev;
+	gpmc->gpio_chip.owner = THIS_MODULE;
+	gpmc->gpio_chip.label = DEVICE_NAME;
+	gpmc->gpio_chip.ngpio = gpmc_nr_waitpins;
+	gpmc->gpio_chip.get_direction = gpmc_gpio_get_direction;
+	gpmc->gpio_chip.direction_input = gpmc_gpio_direction_input;
+	gpmc->gpio_chip.direction_output = gpmc_gpio_direction_output;
+	gpmc->gpio_chip.set = gpmc_gpio_set;
+	gpmc->gpio_chip.get = gpmc_gpio_get;
+	gpmc->gpio_chip.base = -1;
+
+	ret = gpiochip_add(&gpmc->gpio_chip);
+	if (ret < 0) {
+		dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void gpmc_gpio_exit(struct gpmc_device *gpmc)
+{
+	gpiochip_remove(&gpmc->gpio_chip);
+}
+
 static int gpmc_probe_dt(struct platform_device *pdev)
 {
 	int ret;
-	struct device_node *child;
 	const struct of_device_id *of_id =
 		of_match_device(gpmc_dt_ids, &pdev->dev);
 
@@ -2094,17 +2247,26 @@
 		return ret;
 	}
 
+	return 0;
+}
+
+static int gpmc_probe_dt_children(struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *child;
+
 	for_each_available_child_of_node(pdev->dev.of_node, child) {
 
 		if (!child->name)
 			continue;
 
-		if (of_node_cmp(child->name, "nand") == 0)
-			ret = gpmc_probe_nand_child(pdev, child);
-		else if (of_node_cmp(child->name, "onenand") == 0)
+		if (of_node_cmp(child->name, "onenand") == 0)
 			ret = gpmc_probe_onenand_child(pdev, child);
 		else
 			ret = gpmc_probe_generic_child(pdev, child);
+
+		if (ret)
+			return ret;
 	}
 
 	return 0;
@@ -2114,6 +2276,11 @@
 {
 	return 0;
 }
+
+static int gpmc_probe_dt_children(struct platform_device *pdev)
+{
+	return 0;
+}
 #endif
 
 static int gpmc_probe(struct platform_device *pdev)
@@ -2121,6 +2288,14 @@
 	int rc;
 	u32 l;
 	struct resource *res;
+	struct gpmc_device *gpmc;
+
+	gpmc = devm_kzalloc(&pdev->dev, sizeof(*gpmc), GFP_KERNEL);
+	if (!gpmc)
+		return -ENOMEM;
+
+	gpmc->dev = &pdev->dev;
+	platform_set_drvdata(pdev, gpmc);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (res == NULL)
@@ -2134,15 +2309,16 @@
 		return PTR_ERR(gpmc_base);
 
 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (res == NULL)
-		dev_warn(&pdev->dev, "Failed to get resource: irq\n");
-	else
-		gpmc_irq = res->start;
+	if (!res) {
+		dev_err(&pdev->dev, "Failed to get resource: irq\n");
+		return -ENOENT;
+	}
+
+	gpmc->irq = res->start;
 
 	gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck");
 	if (IS_ERR(gpmc_l3_clk)) {
 		dev_err(&pdev->dev, "Failed to get GPMC fck\n");
-		gpmc_irq = 0;
 		return PTR_ERR(gpmc_l3_clk);
 	}
 
@@ -2151,11 +2327,18 @@
 		return -EINVAL;
 	}
 
+	if (pdev->dev.of_node) {
+		rc = gpmc_probe_dt(pdev);
+		if (rc)
+			return rc;
+	} else {
+		gpmc_cs_num = GPMC_CS_NUM;
+		gpmc_nr_waitpins = GPMC_NR_WAITPINS;
+	}
+
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_get_sync(&pdev->dev);
 
-	gpmc_dev = &pdev->dev;
-
 	l = gpmc_read_reg(GPMC_REVISION);
 
 	/*
@@ -2174,36 +2357,51 @@
 		gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS;
 	if (GPMC_REVISION_MAJOR(l) > 0x5)
 		gpmc_capability |= GPMC_HAS_MUX_AAD;
-	dev_info(gpmc_dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
+	dev_info(gpmc->dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
 		 GPMC_REVISION_MINOR(l));
 
 	gpmc_mem_init();
+	rc = gpmc_gpio_init(gpmc);
+	if (rc)
+		goto gpio_init_failed;
 
-	if (gpmc_setup_irq() < 0)
-		dev_warn(gpmc_dev, "gpmc_setup_irq failed\n");
-
-	if (!pdev->dev.of_node) {
-		gpmc_cs_num	 = GPMC_CS_NUM;
-		gpmc_nr_waitpins = GPMC_NR_WAITPINS;
+	gpmc->nirqs = GPMC_NR_NAND_IRQS + gpmc_nr_waitpins;
+	rc = gpmc_setup_irq(gpmc);
+	if (rc) {
+		dev_err(gpmc->dev, "gpmc_setup_irq failed\n");
+		goto setup_irq_failed;
 	}
 
-	rc = gpmc_probe_dt(pdev);
+	rc = gpmc_probe_dt_children(pdev);
 	if (rc < 0) {
-		pm_runtime_put_sync(&pdev->dev);
-		dev_err(gpmc_dev, "failed to probe DT parameters\n");
-		return rc;
+		dev_err(gpmc->dev, "failed to probe DT children\n");
+		goto dt_children_failed;
 	}
 
 	return 0;
+
+dt_children_failed:
+	gpmc_free_irq(gpmc);
+setup_irq_failed:
+	gpmc_gpio_exit(gpmc);
+gpio_init_failed:
+	gpmc_mem_exit();
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	return rc;
 }
 
 static int gpmc_remove(struct platform_device *pdev)
 {
-	gpmc_free_irq();
+	struct gpmc_device *gpmc = platform_get_drvdata(pdev);
+
+	gpmc_free_irq(gpmc);
+	gpmc_gpio_exit(gpmc);
 	gpmc_mem_exit();
 	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
-	gpmc_dev = NULL;
+
 	return 0;
 }
 
@@ -2249,25 +2447,6 @@
 postcore_initcall(gpmc_init);
 module_exit(gpmc_exit);
 
-static irqreturn_t gpmc_handle_irq(int irq, void *dev)
-{
-	int i;
-	u32 regval;
-
-	regval = gpmc_read_reg(GPMC_IRQSTATUS);
-
-	if (!regval)
-		return IRQ_NONE;
-
-	for (i = 0; i < GPMC_NR_IRQ; i++)
-		if (regval & gpmc_client_irq[i].bitmask)
-			generic_handle_irq(gpmc_client_irq[i].irq);
-
-	gpmc_write_reg(GPMC_IRQSTATUS, regval);
-
-	return IRQ_HANDLED;
-}
-
 static struct omap3_gpmc_regs gpmc_context;
 
 void omap3_gpmc_save_context(void)
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 922a750..0fb27d3 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1033,12 +1033,11 @@
 	}
 	msb->attr_group.name = "media_attributes";
 
-	buffer = kmalloc(attr_len, GFP_KERNEL);
+	buffer = kmemdup(attr, attr_len, GFP_KERNEL);
 	if (!buffer) {
 		rc = -ENOMEM;
 		goto out_free_attr;
 	}
-	memcpy(buffer, (char *)attr, attr_len);
 
 	for (cnt = 0; cnt < attr_count; ++cnt) {
 		s_attr = kzalloc(sizeof(struct mspro_sys_attr), GFP_KERNEL);
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index 1105db2..d34bc35 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -706,7 +706,7 @@
 		if (host->eject)
 			break;
 
-		msleep(1000);
+		schedule_timeout_idle(HZ);
 	}
 
 	complete(&host->detect_ms_exit);
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 40e51b0..b46c0cf 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -696,7 +696,7 @@
 	nr_irqs = TWL4030_PWR_NR_IRQS + TWL4030_CORE_NR_IRQS;
 
 	irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
-	if (IS_ERR_VALUE(irq_base)) {
+	if (irq_base < 0) {
 		dev_err(dev, "Fail to allocate IRQ descs\n");
 		return irq_base;
 	}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index ddc9620..e62fde3 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -618,6 +618,10 @@
 
 	ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
 
+	/* Always switch back to main area after RPMB access */
+	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+		mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
+
 	mmc_put_card(card);
 
 	err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
@@ -685,6 +689,10 @@
 	for (i = 0; i < num_of_cmds && !ioc_err; i++)
 		ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
 
+	/* Always switch back to main area after RPMB access */
+	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+		mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
+
 	mmc_put_card(card);
 
 	/* copy to user if data and response */
@@ -748,16 +756,25 @@
 	if (mmc_card_mmc(card)) {
 		u8 part_config = card->ext_csd.part_config;
 
+		if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+			mmc_retune_pause(card->host);
+
 		part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
 		part_config |= md->part_type;
 
 		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				 EXT_CSD_PART_CONFIG, part_config,
 				 card->ext_csd.part_time);
-		if (ret)
+		if (ret) {
+			if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+				mmc_retune_unpause(card->host);
 			return ret;
+		}
 
 		card->ext_csd.part_config = part_config;
+
+		if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
+			mmc_retune_unpause(card->host);
 	}
 
 	main_md->part_curr = md->part_type;
@@ -2519,11 +2536,12 @@
 		  MMC_QUIRK_BLK_NO_CMD23),
 
 	/*
-	 * Some Micron MMC cards needs longer data read timeout than
-	 * indicated in CSD.
+	 * Some MMC cards need longer data read timeout than indicated in CSD.
 	 */
 	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
 		  MMC_QUIRK_LONG_READ_TIME),
+	MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_LONG_READ_TIME),
 
 	/*
 	 * On these Samsung MoviNAND parts, performing secure erase or
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 99275e4..8b4dfd4 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -875,11 +875,11 @@
 	/*
 	 * Some cards require longer data read timeout than indicated in CSD.
 	 * Address this by setting the read timeout to a "reasonably high"
-	 * value. For the cards tested, 300ms has proven enough. If necessary,
+	 * value. For the cards tested, 600ms has proven enough. If necessary,
 	 * this value can be increased if other problematic cards require this.
 	 */
 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
-		data->timeout_ns = 300000000;
+		data->timeout_ns = 600000000;
 		data->timeout_clks = 0;
 	}
 
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index e0a3ee1..1be42fa 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -68,8 +68,32 @@
 			  jiffies + host->retune_period * HZ);
 }
 
+/*
+ * Pause re-tuning for a small set of operations.  The pause begins after the
+ * next command and after first doing re-tuning.
+ */
+void mmc_retune_pause(struct mmc_host *host)
+{
+	if (!host->retune_paused) {
+		host->retune_paused = 1;
+		mmc_retune_needed(host);
+		mmc_retune_hold(host);
+	}
+}
+EXPORT_SYMBOL(mmc_retune_pause);
+
+void mmc_retune_unpause(struct mmc_host *host)
+{
+	if (host->retune_paused) {
+		host->retune_paused = 0;
+		mmc_retune_release(host);
+	}
+}
+EXPORT_SYMBOL(mmc_retune_unpause);
+
 void mmc_retune_disable(struct mmc_host *host)
 {
+	mmc_retune_unpause(host);
 	host->can_retune = 0;
 	del_timer_sync(&host->retune_timer);
 	host->retune_now = 0;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index b81b08f..5d438ad 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1276,7 +1276,7 @@
 	 * switch to HS200 mode if bus width is set successfully.
 	 */
 	err = mmc_select_bus_width(card);
-	if (!IS_ERR_VALUE(err)) {
+	if (err >= 0) {
 		val = EXT_CSD_TIMING_HS200 |
 		      card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
 		err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1583,7 +1583,7 @@
 	} else if (mmc_card_hs(card)) {
 		/* Select the desired bus width optionally */
 		err = mmc_select_bus_width(card);
-		if (!IS_ERR_VALUE(err)) {
+		if (err >= 0) {
 			err = mmc_select_hs_ddr(card);
 			if (err)
 				goto free_card;
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 8c20b81..358b0dc 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -66,6 +66,70 @@
 	/* Make sure we use phases which we can enumerate with */
 	if (!IS_ERR(priv->sample_clk))
 		clk_set_phase(priv->sample_clk, priv->default_sample_phase);
+
+	/*
+	 * Set the drive phase offset based on speed mode to achieve hold times.
+	 *
+	 * NOTE: this is _not_ a value that is dynamically tuned and is also
+	 * _not_ a value that will vary from board to board.  It is a value
+	 * that could vary between different SoC models if they had massively
+	 * different output clock delays inside their dw_mmc IP block (delay_o),
+	 * but since it's OK to overshoot a little we don't need to do complex
+	 * calculations and can pick values that will just work for everyone.
+	 *
+	 * When picking values we'll stick with picking 0/90/180/270 since
+	 * those can be made very accurately on all known Rockchip SoCs.
+	 *
+	 * Note that these values match values from the DesignWare Databook
+	 * tables for the most part except for SDR12 and "ID mode".  For those
+	 * two modes the databook calculations assume a clock in of 50MHz.  As
+	 * seen above, we always use a clock in rate that is exactly the
+	 * card's input clock (times RK3288_CLKGEN_DIV, but that gets divided
+	 * back out before the controller sees it).
+	 *
+	 * From measurement of a single device, it appears that delay_o is
+	 * about .5 ns.  Since we try to leave a bit of margin, it's expected
+	 * that numbers here will be fine even with much larger delay_o
+	 * (the 1.4 ns assumed by the DesignWare Databook would result in the
+	 * same results, for instance).
+	 */
+	if (!IS_ERR(priv->drv_clk)) {
+		int phase;
+
+		/*
+		 * In almost all cases a 90 degree phase offset will provide
+		 * sufficient hold times across all valid input clock rates
+		 * assuming delay_o is not absurd for a given SoC.  We'll use
+		 * that as a default.
+		 */
+		phase = 90;
+
+		switch (ios->timing) {
+		case MMC_TIMING_MMC_DDR52:
+			/*
+			 * Since clock in rate with MMC_DDR52 is doubled when
+			 * bus width is 8 we need to double the phase offset
+			 * to get the same timings.
+			 */
+			if (ios->bus_width == MMC_BUS_WIDTH_8)
+				phase = 180;
+			break;
+		case MMC_TIMING_UHS_SDR104:
+		case MMC_TIMING_MMC_HS200:
+			/*
+			 * In the case of 150 MHz clock (typical max for
+			 * Rockchip SoCs), 90 degree offset will add a delay
+			 * of 1.67 ns.  That will meet min hold time of .8 ns
+			 * as long as clock output delay is < .87 ns.  On
+			 * SoCs measured this seems to be OK, but it doesn't
+			 * hurt to give margin here, so we use 180.
+			 */
+			phase = 180;
+			break;
+		}
+
+		clk_set_phase(priv->drv_clk, phase);
+	}
 }
 
 #define NUM_PHASES			360
@@ -233,10 +297,10 @@
 
 /* Common capabilities of RK3288 SoC */
 static unsigned long dw_mci_rk3288_dwmmc_caps[4] = {
-	MMC_CAP_ERASE,
-	MMC_CAP_ERASE,
-	MMC_CAP_ERASE,
-	MMC_CAP_ERASE,
+	MMC_CAP_ERASE | MMC_CAP_CMD23,
+	MMC_CAP_ERASE | MMC_CAP_CMD23,
+	MMC_CAP_ERASE | MMC_CAP_CMD23,
+	MMC_CAP_ERASE | MMC_CAP_CMD23,
 };
 
 static const struct dw_mci_drv_data rk2928_drv_data = {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 9dd1bd3..2cc6123 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1431,7 +1431,7 @@
 	int gpio_ro = mmc_gpio_get_ro(mmc);
 
 	/* Use platform get_ro function, else try on board write protect */
-	if (!IS_ERR_VALUE(gpio_ro))
+	if (gpio_ro >= 0)
 		read_only = gpio_ro;
 	else
 		read_only =
@@ -1454,7 +1454,7 @@
 	if ((mmc->caps & MMC_CAP_NEEDS_POLL) ||
 	    (mmc->caps & MMC_CAP_NONREMOVABLE))
 		present = 1;
-	else if (!IS_ERR_VALUE(gpio_cd))
+	else if (gpio_cd >= 0)
 		present = gpio_cd;
 	else
 		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
@@ -2595,13 +2595,13 @@
 	/* Useful defaults if platform data is unset. */
 	if (host->use_dma == TRANS_MODE_IDMAC) {
 		mmc->max_segs = host->ring_size;
-		mmc->max_blk_size = 65536;
+		mmc->max_blk_size = 65535;
 		mmc->max_seg_size = 0x1000;
 		mmc->max_req_size = mmc->max_seg_size * host->ring_size;
 		mmc->max_blk_count = mmc->max_req_size / 512;
 	} else if (host->use_dma == TRANS_MODE_EDMAC) {
 		mmc->max_segs = 64;
-		mmc->max_blk_size = 65536;
+		mmc->max_blk_size = 65535;
 		mmc->max_blk_count = 65535;
 		mmc->max_req_size =
 				mmc->max_blk_size * mmc->max_blk_count;
@@ -2609,7 +2609,7 @@
 	} else {
 		/* TRANS_MODE_PIO */
 		mmc->max_segs = 64;
-		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
+		mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
 		mmc->max_blk_count = 512;
 		mmc->max_req_size = mmc->max_blk_size *
 				    mmc->max_blk_count;
@@ -2927,7 +2927,7 @@
 		if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
 			return;
 
-		if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc)))
+		if (mmc_gpio_get_cd(slot->mmc) < 0)
 			break;
 	}
 	if (i == host->num_slots)
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index b2d70ba..458ffb7 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -274,7 +274,7 @@
 	.chip    = &sdhci_acpi_chip_int,
 	.caps    = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
 		   MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
-		   MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
+		   MMC_CAP_WAIT_WHILE_BUSY,
 	.caps2   = MMC_CAP2_HC_ERASE_SZ,
 	.flags   = SDHCI_ACPI_RUNTIME_PM,
 	.quirks  = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
@@ -289,7 +289,7 @@
 		   SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
 	.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
 	.caps    = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD |
-		   MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
+		   MMC_CAP_WAIT_WHILE_BUSY,
 	.flags   = SDHCI_ACPI_RUNTIME_PM,
 	.pm_caps = MMC_PM_KEEP_POWER,
 	.probe_slot	= sdhci_acpi_sdio_probe_slot,
@@ -301,7 +301,7 @@
 	.quirks  = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
 	.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
 		   SDHCI_QUIRK2_STOP_WITH_TC,
-	.caps    = MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
+	.caps    = MMC_CAP_WAIT_WHILE_BUSY,
 	.probe_slot	= sdhci_acpi_sd_probe_slot,
 };
 
@@ -378,7 +378,7 @@
 {
 	struct device *dev = &pdev->dev;
 	acpi_handle handle = ACPI_HANDLE(dev);
-	struct acpi_device *device;
+	struct acpi_device *device, *child;
 	struct sdhci_acpi_host *c;
 	struct sdhci_host *host;
 	struct resource *iomem;
@@ -390,6 +390,11 @@
 	if (acpi_bus_get_device(handle, &device))
 		return -ENODEV;
 
+	/* Power on the SDHCI controller and its children */
+	acpi_device_fix_up_power(device);
+	list_for_each_entry(child, &device->children, node)
+		acpi_device_fix_up_power(child);
+
 	if (acpi_bus_get_status(device) || !device->status.present)
 		return -ENODEV;
 
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 2d300d8..9d3ae1f 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1011,7 +1011,7 @@
 	if (ret)
 		return ret;
 
-	if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+	if (mmc_gpio_get_cd(host->mmc) >= 0)
 		host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
 
 	return 0;
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 25f779e..d4cef71 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -289,7 +289,7 @@
 	 * to enable polling via device tree with broken-cd property.
 	 */
 	if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) &&
-	    IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) {
+	    mmc_gpio_get_cd(host->mmc) < 0) {
 		host->mmc->caps |= MMC_CAP_NEEDS_POLL;
 		host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
 	}
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 97d4eeb..a4dbf74 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -356,7 +356,6 @@
 {
 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
 				 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
-				 MMC_CAP_BUS_WIDTH_TEST |
 				 MMC_CAP_WAIT_WHILE_BUSY;
 	slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
 	slot->hw_reset = sdhci_pci_int_hw_reset;
@@ -372,15 +371,13 @@
 static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
 {
 	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
-				 MMC_CAP_BUS_WIDTH_TEST |
 				 MMC_CAP_WAIT_WHILE_BUSY;
 	return 0;
 }
 
 static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
 {
-	slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST |
-				 MMC_CAP_WAIT_WHILE_BUSY;
+	slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
 	slot->cd_con_id = NULL;
 	slot->cd_idx = 0;
 	slot->cd_override_level = true;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e010ea4..0e3d7c0 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1624,7 +1624,7 @@
 	 * Try slot gpio detect, if defined it take precedence
 	 * over build in controller functionality
 	 */
-	if (!IS_ERR_VALUE(gpio_cd))
+	if (gpio_cd >= 0)
 		return !!gpio_cd;
 
 	/* If polling, assume that the card is always present. */
@@ -3077,7 +3077,7 @@
 
 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
 	    !(mmc->caps & MMC_CAP_NONREMOVABLE) &&
-	    IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+	    mmc_gpio_get_cd(host->mmc) < 0)
 		mmc->caps |= MMC_CAP_NEEDS_POLL;
 
 	/* If there are external regulators, get them */
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 7fc8b7a..2ee4c21 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -970,8 +970,8 @@
 	[SDXC_CLK_400K]		= { .output = 180, .sample = 180 },
 	[SDXC_CLK_25M]		= { .output = 180, .sample =  75 },
 	[SDXC_CLK_50M]		= { .output = 150, .sample = 120 },
-	[SDXC_CLK_50M_DDR]	= { .output =  90, .sample = 120 },
-	[SDXC_CLK_50M_DDR_8BIT]	= { .output =  90, .sample = 120 },
+	[SDXC_CLK_50M_DDR]	= { .output =  54, .sample =  36 },
+	[SDXC_CLK_50M_DDR_8BIT]	= { .output =  72, .sample =  72 },
 };
 
 static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
@@ -1129,11 +1129,6 @@
 				  MMC_CAP_1_8V_DDR |
 				  MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
 
-	/* TODO MMC DDR is not working on A80 */
-	if (of_device_is_compatible(pdev->dev.of_node,
-				    "allwinner,sun9i-a80-mmc"))
-		mmc->caps &= ~MMC_CAP_1_8V_DDR;
-
 	ret = mmc_of_parse(mmc);
 	if (ret)
 		goto error_free_dma;
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index 3b3dabc..bbfa1f1 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -115,6 +115,7 @@
 
 config MTD_MAP_BANK_WIDTH_32
 	bool "Support 256-bit buswidth" if MTD_CFI_GEOMETRY
+	select MTD_COMPLEX_MAPPINGS if HAS_IOMEM
 	default n
 	help
 	  If you wish to support CFI devices on a physical bus which is
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 347bb83..1c65c15 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -2,6 +2,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
+#include <linux/ioport.h>
 #include <linux/mtd/mtd.h>
 #include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
@@ -109,8 +110,7 @@
 	if ((from + len) > mtd->size)
 		return -EINVAL;
 
-	memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(b47s->window + from),
-		      len);
+	memcpy_fromio(buf, b47s->window + from, len);
 	*retlen = len;
 
 	return len;
@@ -275,15 +275,33 @@
 
 static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
 {
-	struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+	struct device *dev = &pdev->dev;
+	struct bcma_sflash *sflash = dev_get_platdata(dev);
 	struct bcm47xxsflash *b47s;
+	struct resource *res;
 	int err;
 
-	b47s = devm_kzalloc(&pdev->dev, sizeof(*b47s), GFP_KERNEL);
+	b47s = devm_kzalloc(dev, sizeof(*b47s), GFP_KERNEL);
 	if (!b47s)
 		return -ENOMEM;
 	sflash->priv = b47s;
 
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "invalid resource\n");
+		return -EINVAL;
+	}
+	if (!devm_request_mem_region(dev, res->start, resource_size(res),
+				     res->name)) {
+		dev_err(dev, "can't request region for resource %pR\n", res);
+		return -EBUSY;
+	}
+	b47s->window = ioremap_cache(res->start, resource_size(res));
+	if (!b47s->window) {
+		dev_err(dev, "ioremap failed for resource %pR\n", res);
+		return -ENOMEM;
+	}
+
 	b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash);
 	b47s->cc_read = bcm47xxsflash_bcma_cc_read;
 	b47s->cc_write = bcm47xxsflash_bcma_cc_write;
@@ -297,7 +315,6 @@
 		break;
 	}
 
-	b47s->window = sflash->window;
 	b47s->blocksize = sflash->blocksize;
 	b47s->numblocks = sflash->numblocks;
 	b47s->size = sflash->size;
@@ -306,6 +323,7 @@
 	err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0);
 	if (err) {
 		pr_err("Failed to register MTD device: %d\n", err);
+		iounmap(b47s->window);
 		return err;
 	}
 
@@ -321,6 +339,7 @@
 	struct bcm47xxsflash *b47s = sflash->priv;
 
 	mtd_device_unregister(&b47s->mtd);
+	iounmap(b47s->window);
 
 	return 0;
 }
diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h
index fe93daf..1564b62 100644
--- a/drivers/mtd/devices/bcm47xxsflash.h
+++ b/drivers/mtd/devices/bcm47xxsflash.h
@@ -65,7 +65,8 @@
 
 	enum bcm47xxsflash_type type;
 
-	u32 window;
+	void __iomem *window;
+
 	u32 blocksize;
 	u16 numblocks;
 	u32 size;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index e7b2e43..b833e6c 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -67,16 +67,40 @@
 MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, "
 		 "2=reliable) : MLC normal operations are in normal mode");
 
-/**
- * struct docg3_oobinfo - DiskOnChip G3 OOB layout
- * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC)
- * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC)
- * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15
- */
-static struct nand_ecclayout docg3_oobinfo = {
-	.eccbytes = 8,
-	.eccpos = {7, 8, 9, 10, 11, 12, 13, 14},
-	.oobfree = {{0, 7}, {15, 1} },
+static int docg3_ooblayout_ecc(struct mtd_info *mtd, int section,
+			       struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	/* byte 7 is Hamming ECC, byte 8-14 are BCH ECC */
+	oobregion->offset = 7;
+	oobregion->length = 8;
+
+	return 0;
+}
+
+static int docg3_ooblayout_free(struct mtd_info *mtd, int section,
+				struct mtd_oob_region *oobregion)
+{
+	if (section > 1)
+		return -ERANGE;
+
+	/* free bytes: byte 0 until byte 6, byte 15 */
+	if (!section) {
+		oobregion->offset = 0;
+		oobregion->length = 7;
+	} else {
+		oobregion->offset = 15;
+		oobregion->length = 1;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops nand_ooblayout_docg3_ops = {
+	.ecc = docg3_ooblayout_ecc,
+	.free = docg3_ooblayout_free,
 };
 
 static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
@@ -1857,7 +1881,7 @@
 	mtd->_read_oob = doc_read_oob;
 	mtd->_write_oob = doc_write_oob;
 	mtd->_block_isbad = doc_block_isbad;
-	mtd->ecclayout = &docg3_oobinfo;
+	mtd_set_ooblayout(mtd, &nand_ooblayout_docg3_ops);
 	mtd->oobavail = 8;
 	mtd->ecc_strength = DOC_ECC_BCH_T;
 
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index c9c3b7f..9d68544 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -131,6 +131,28 @@
 	/* convert the dummy cycles to the number of bytes */
 	dummy /= 8;
 
+	if (spi_flash_read_supported(spi)) {
+		struct spi_flash_read_message msg;
+		int ret;
+
+		memset(&msg, 0, sizeof(msg));
+
+		msg.buf = buf;
+		msg.from = from;
+		msg.len = len;
+		msg.read_opcode = nor->read_opcode;
+		msg.addr_width = nor->addr_width;
+		msg.dummy_bytes = dummy;
+		/* TODO: Support other combinations */
+		msg.opcode_nbits = SPI_NBITS_SINGLE;
+		msg.addr_nbits = SPI_NBITS_SINGLE;
+		msg.data_nbits = m25p80_rx_nbits(nor);
+
+		ret = spi_flash_read(spi, &msg);
+		*retlen = msg.retlen;
+		return ret;
+	}
+
 	spi_message_init(&m);
 	memset(t, 0, (sizeof t));
 
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 708b7e8..220f920 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -353,7 +353,7 @@
  * mechanism
  * returns the size of the memory region found.
  */
-static int fixup_pmc551(struct pci_dev *dev)
+static int __init fixup_pmc551(struct pci_dev *dev)
 {
 #ifdef CONFIG_MTD_PMC551_BUGFIX
 	u32 dram_data;
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 0455166..4f206a9 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -112,8 +112,8 @@
 }
 
 
-static int ck804xrom_init_one(struct pci_dev *pdev,
-			      const struct pci_device_id *ent)
+static int __init ck804xrom_init_one(struct pci_dev *pdev,
+				     const struct pci_device_id *ent)
 {
 	static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
 	u8 byte;
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 76ed651..9646b07 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -144,8 +144,8 @@
 	pci_dev_put(window->pdev);
 }
 
-static int esb2rom_init_one(struct pci_dev *pdev,
-			    const struct pci_device_id *ent)
+static int __init esb2rom_init_one(struct pci_dev *pdev,
+				   const struct pci_device_id *ent)
 {
 	static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
 	struct esb2rom_window *window = &esb2rom_window;
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 8636bba..e17d02a 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -84,8 +84,8 @@
 }
 
 
-static int ichxrom_init_one(struct pci_dev *pdev,
-			    const struct pci_device_id *ent)
+static int __init ichxrom_init_one(struct pci_dev *pdev,
+				   const struct pci_device_id *ent)
 {
 	static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
 	struct ichxrom_window *window = &ichxrom_window;
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index c1af83d..00a8190 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -4,11 +4,13 @@
  *	uclinux.c -- generic memory mapped MTD driver for uclinux
  *
  *	(C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
+ *
+ *      License: GPL
  */
 
 /****************************************************************************/
 
-#include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -117,27 +119,6 @@
 
 	return(0);
 }
-
-/****************************************************************************/
-
-static void __exit uclinux_mtd_cleanup(void)
-{
-	if (uclinux_ram_mtdinfo) {
-		mtd_device_unregister(uclinux_ram_mtdinfo);
-		map_destroy(uclinux_ram_mtdinfo);
-		uclinux_ram_mtdinfo = NULL;
-	}
-	if (uclinux_ram_map.virt)
-		uclinux_ram_map.virt = 0;
-}
-
-/****************************************************************************/
-
-module_init(uclinux_mtd_init);
-module_exit(uclinux_mtd_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
-MODULE_DESCRIPTION("Generic MTD for uClinux");
+device_initcall(uclinux_mtd_init);
 
 /****************************************************************************/
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 6d19835..2a47a3f 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -465,38 +465,111 @@
 }
 
 /*
- * Copies (and truncates, if necessary) data from the larger struct,
- * nand_ecclayout, to the smaller, deprecated layout struct,
- * nand_ecclayout_user. This is necessary only to support the deprecated
- * API ioctl ECCGETLAYOUT while allowing all new functionality to use
- * nand_ecclayout flexibly (i.e. the struct may change size in new
- * releases without requiring major rewrites).
+ * Copies (and truncates, if necessary) OOB layout information to the
+ * deprecated layout struct, nand_ecclayout_user. This is necessary only to
+ * support the deprecated API ioctl ECCGETLAYOUT while allowing all new
+ * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
+ * can describe any kind of OOB layout with almost zero overhead from a
+ * memory usage point of view).
  */
-static int shrink_ecclayout(const struct nand_ecclayout *from,
-		struct nand_ecclayout_user *to)
+static int shrink_ecclayout(struct mtd_info *mtd,
+			    struct nand_ecclayout_user *to)
 {
-	int i;
+	struct mtd_oob_region oobregion;
+	int i, section = 0, ret;
 
-	if (!from || !to)
+	if (!mtd || !to)
 		return -EINVAL;
 
 	memset(to, 0, sizeof(*to));
 
-	to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES);
-	for (i = 0; i < to->eccbytes; i++)
-		to->eccpos[i] = from->eccpos[i];
+	to->eccbytes = 0;
+	for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
+		u32 eccpos;
+
+		ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
+		if (ret < 0) {
+			if (ret != -ERANGE)
+				return ret;
+
+			break;
+		}
+
+		eccpos = oobregion.offset;
+		for (; i < MTD_MAX_ECCPOS_ENTRIES &&
+		       eccpos < oobregion.offset + oobregion.length; i++) {
+			to->eccpos[i] = eccpos++;
+			to->eccbytes++;
+		}
+	}
 
 	for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
-		if (from->oobfree[i].length == 0 &&
-				from->oobfree[i].offset == 0)
+		ret = mtd_ooblayout_free(mtd, i, &oobregion);
+		if (ret < 0) {
+			if (ret != -ERANGE)
+				return ret;
+
 			break;
-		to->oobavail += from->oobfree[i].length;
-		to->oobfree[i] = from->oobfree[i];
+		}
+
+		to->oobfree[i].offset = oobregion.offset;
+		to->oobfree[i].length = oobregion.length;
+		to->oobavail += to->oobfree[i].length;
 	}
 
 	return 0;
 }
 
+static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
+{
+	struct mtd_oob_region oobregion;
+	int i, section = 0, ret;
+
+	if (!mtd || !to)
+		return -EINVAL;
+
+	memset(to, 0, sizeof(*to));
+
+	to->eccbytes = 0;
+	for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
+		u32 eccpos;
+
+		ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
+		if (ret < 0) {
+			if (ret != -ERANGE)
+				return ret;
+
+			break;
+		}
+
+		if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
+			return -EINVAL;
+
+		eccpos = oobregion.offset;
+		for (; eccpos < oobregion.offset + oobregion.length; i++) {
+			to->eccpos[i] = eccpos++;
+			to->eccbytes++;
+		}
+	}
+
+	for (i = 0; i < 8; i++) {
+		ret = mtd_ooblayout_free(mtd, i, &oobregion);
+		if (ret < 0) {
+			if (ret != -ERANGE)
+				return ret;
+
+			break;
+		}
+
+		to->oobfree[i][0] = oobregion.offset;
+		to->oobfree[i][1] = oobregion.length;
+	}
+
+	to->useecc = MTD_NANDECC_AUTOPLACE;
+
+	return 0;
+}
+
 static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
 			       struct blkpg_ioctl_arg *arg)
 {
@@ -815,16 +888,12 @@
 	{
 		struct nand_oobinfo oi;
 
-		if (!mtd->ecclayout)
+		if (!mtd->ooblayout)
 			return -EOPNOTSUPP;
-		if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
-			return -EINVAL;
 
-		oi.useecc = MTD_NANDECC_AUTOPLACE;
-		memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
-		memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
-		       sizeof(oi.oobfree));
-		oi.eccbytes = mtd->ecclayout->eccbytes;
+		ret = get_oobinfo(mtd, &oi);
+		if (ret)
+			return ret;
 
 		if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
 			return -EFAULT;
@@ -913,14 +982,14 @@
 	{
 		struct nand_ecclayout_user *usrlay;
 
-		if (!mtd->ecclayout)
+		if (!mtd->ooblayout)
 			return -EOPNOTSUPP;
 
 		usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
 		if (!usrlay)
 			return -ENOMEM;
 
-		shrink_ecclayout(mtd->ecclayout, usrlay);
+		shrink_ecclayout(mtd, usrlay);
 
 		if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
 			ret = -EFAULT;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 239a8c8..d573606 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -777,7 +777,7 @@
 
 	}
 
-	concat->mtd.ecclayout = subdev[0]->ecclayout;
+	mtd_set_ooblayout(&concat->mtd, subdev[0]->ooblayout);
 
 	concat->num_subdev = num_devs;
 	concat->mtd.name = name;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index bee180bd..e3936b8 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1016,6 +1016,366 @@
 }
 EXPORT_SYMBOL_GPL(mtd_write_oob);
 
+/**
+ * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
+ * @mtd: MTD device structure
+ * @section: ECC section. Depending on the layout you may have all the ECC
+ *	     bytes stored in a single contiguous section, or one section
+ *	     per ECC chunk (and sometime several sections for a single ECC
+ *	     ECC chunk)
+ * @oobecc: OOB region struct filled with the appropriate ECC position
+ *	    information
+ *
+ * This functions return ECC section information in the OOB area. I you want
+ * to get all the ECC bytes information, then you should call
+ * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
+		      struct mtd_oob_region *oobecc)
+{
+	memset(oobecc, 0, sizeof(*oobecc));
+
+	if (!mtd || section < 0)
+		return -EINVAL;
+
+	if (!mtd->ooblayout || !mtd->ooblayout->ecc)
+		return -ENOTSUPP;
+
+	return mtd->ooblayout->ecc(mtd, section, oobecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
+
+/**
+ * mtd_ooblayout_free - Get the OOB region definition of a specific free
+ *			section
+ * @mtd: MTD device structure
+ * @section: Free section you are interested in. Depending on the layout
+ *	     you may have all the free bytes stored in a single contiguous
+ *	     section, or one section per ECC chunk plus an extra section
+ *	     for the remaining bytes (or other funky layout).
+ * @oobfree: OOB region struct filled with the appropriate free position
+ *	     information
+ *
+ * This functions return free bytes position in the OOB area. I you want
+ * to get all the free bytes information, then you should call
+ * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_free(struct mtd_info *mtd, int section,
+		       struct mtd_oob_region *oobfree)
+{
+	memset(oobfree, 0, sizeof(*oobfree));
+
+	if (!mtd || section < 0)
+		return -EINVAL;
+
+	if (!mtd->ooblayout || !mtd->ooblayout->free)
+		return -ENOTSUPP;
+
+	return mtd->ooblayout->free(mtd, section, oobfree);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
+
+/**
+ * mtd_ooblayout_find_region - Find the region attached to a specific byte
+ * @mtd: mtd info structure
+ * @byte: the byte we are searching for
+ * @sectionp: pointer where the section id will be stored
+ * @oobregion: used to retrieve the ECC position
+ * @iter: iterator function. Should be either mtd_ooblayout_free or
+ *	  mtd_ooblayout_ecc depending on the region type you're searching for
+ *
+ * This functions returns the section id and oobregion information of a
+ * specific byte. For example, say you want to know where the 4th ECC byte is
+ * stored, you'll use:
+ *
+ * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
+				int *sectionp, struct mtd_oob_region *oobregion,
+				int (*iter)(struct mtd_info *,
+					    int section,
+					    struct mtd_oob_region *oobregion))
+{
+	int pos = 0, ret, section = 0;
+
+	memset(oobregion, 0, sizeof(*oobregion));
+
+	while (1) {
+		ret = iter(mtd, section, oobregion);
+		if (ret)
+			return ret;
+
+		if (pos + oobregion->length > byte)
+			break;
+
+		pos += oobregion->length;
+		section++;
+	}
+
+	/*
+	 * Adjust region info to make it start at the beginning at the
+	 * 'start' ECC byte.
+	 */
+	oobregion->offset += byte - pos;
+	oobregion->length -= byte - pos;
+	*sectionp = section;
+
+	return 0;
+}
+
+/**
+ * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
+ *				  ECC byte
+ * @mtd: mtd info structure
+ * @eccbyte: the byte we are searching for
+ * @sectionp: pointer where the section id will be stored
+ * @oobregion: OOB region information
+ *
+ * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
+ * byte.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
+				 int *section,
+				 struct mtd_oob_region *oobregion)
+{
+	return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
+					 mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
+
+/**
+ * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
+ * @mtd: mtd info structure
+ * @buf: destination buffer to store OOB bytes
+ * @oobbuf: OOB buffer
+ * @start: first byte to retrieve
+ * @nbytes: number of bytes to retrieve
+ * @iter: section iterator
+ *
+ * Extract bytes attached to a specific category (ECC or free)
+ * from the OOB buffer and copy them into buf.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
+				const u8 *oobbuf, int start, int nbytes,
+				int (*iter)(struct mtd_info *,
+					    int section,
+					    struct mtd_oob_region *oobregion))
+{
+	struct mtd_oob_region oobregion = { };
+	int section = 0, ret;
+
+	ret = mtd_ooblayout_find_region(mtd, start, &section,
+					&oobregion, iter);
+
+	while (!ret) {
+		int cnt;
+
+		cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
+		memcpy(buf, oobbuf + oobregion.offset, cnt);
+		buf += cnt;
+		nbytes -= cnt;
+
+		if (!nbytes)
+			break;
+
+		ret = iter(mtd, ++section, &oobregion);
+	}
+
+	return ret;
+}
+
+/**
+ * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
+ * @mtd: mtd info structure
+ * @buf: source buffer to get OOB bytes from
+ * @oobbuf: OOB buffer
+ * @start: first OOB byte to set
+ * @nbytes: number of OOB bytes to set
+ * @iter: section iterator
+ *
+ * Fill the OOB buffer with data provided in buf. The category (ECC or free)
+ * is selected by passing the appropriate iterator.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
+				u8 *oobbuf, int start, int nbytes,
+				int (*iter)(struct mtd_info *,
+					    int section,
+					    struct mtd_oob_region *oobregion))
+{
+	struct mtd_oob_region oobregion = { };
+	int section = 0, ret;
+
+	ret = mtd_ooblayout_find_region(mtd, start, &section,
+					&oobregion, iter);
+
+	while (!ret) {
+		int cnt;
+
+		cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
+		memcpy(oobbuf + oobregion.offset, buf, cnt);
+		buf += cnt;
+		nbytes -= cnt;
+
+		if (!nbytes)
+			break;
+
+		ret = iter(mtd, ++section, &oobregion);
+	}
+
+	return ret;
+}
+
+/**
+ * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
+ * @mtd: mtd info structure
+ * @iter: category iterator
+ *
+ * Count the number of bytes in a given category.
+ *
+ * Returns a positive value on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
+				int (*iter)(struct mtd_info *,
+					    int section,
+					    struct mtd_oob_region *oobregion))
+{
+	struct mtd_oob_region oobregion = { };
+	int section = 0, ret, nbytes = 0;
+
+	while (1) {
+		ret = iter(mtd, section++, &oobregion);
+		if (ret) {
+			if (ret == -ERANGE)
+				ret = nbytes;
+			break;
+		}
+
+		nbytes += oobregion.length;
+	}
+
+	return ret;
+}
+
+/**
+ * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
+ * @mtd: mtd info structure
+ * @eccbuf: destination buffer to store ECC bytes
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to retrieve
+ * @nbytes: number of ECC bytes to retrieve
+ *
+ * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
+			       const u8 *oobbuf, int start, int nbytes)
+{
+	return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
+				       mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
+
+/**
+ * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
+ * @mtd: mtd info structure
+ * @eccbuf: source buffer to get ECC bytes from
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to set
+ * @nbytes: number of ECC bytes to set
+ *
+ * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
+			       u8 *oobbuf, int start, int nbytes)
+{
+	return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
+				       mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
+
+/**
+ * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
+ * @mtd: mtd info structure
+ * @databuf: destination buffer to store ECC bytes
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to retrieve
+ * @nbytes: number of ECC bytes to retrieve
+ *
+ * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
+				const u8 *oobbuf, int start, int nbytes)
+{
+	return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
+				       mtd_ooblayout_free);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
+
+/**
+ * mtd_ooblayout_get_eccbytes - set data bytes into the oob buffer
+ * @mtd: mtd info structure
+ * @eccbuf: source buffer to get data bytes from
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to set
+ * @nbytes: number of ECC bytes to set
+ *
+ * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
+				u8 *oobbuf, int start, int nbytes)
+{
+	return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
+				       mtd_ooblayout_free);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
+
+/**
+ * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
+ * @mtd: mtd info structure
+ *
+ * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
+{
+	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
+
+/**
+ * mtd_ooblayout_count_freebytes - count the number of ECC bytes in OOB
+ * @mtd: mtd info structure
+ *
+ * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
+{
+	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
+
 /*
  * Method to access the protection register area, present in some flash
  * devices. The user data is one time programmable but the factory data is read
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 08de4b2..1f13e32 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -317,6 +317,27 @@
 	return res;
 }
 
+static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
+			      struct mtd_oob_region *oobregion)
+{
+	struct mtd_part *part = mtd_to_part(mtd);
+
+	return mtd_ooblayout_ecc(part->master, section, oobregion);
+}
+
+static int part_ooblayout_free(struct mtd_info *mtd, int section,
+			       struct mtd_oob_region *oobregion)
+{
+	struct mtd_part *part = mtd_to_part(mtd);
+
+	return mtd_ooblayout_free(part->master, section, oobregion);
+}
+
+static const struct mtd_ooblayout_ops part_ooblayout_ops = {
+	.ecc = part_ooblayout_ecc,
+	.free = part_ooblayout_free,
+};
+
 static inline void free_partition(struct mtd_part *p)
 {
 	kfree(p->mtd.name);
@@ -533,7 +554,7 @@
 			part->name);
 	}
 
-	slave->mtd.ecclayout = master->ecclayout;
+	mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
 	slave->mtd.ecc_step_size = master->ecc_step_size;
 	slave->mtd.ecc_strength = master->ecc_strength;
 	slave->mtd.bitflip_threshold = master->bitflip_threshold;
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 68b58c8..78e12cc 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -224,6 +224,7 @@
 	/* 25 us command delay time */
 	this->chip_delay = 30;
 	this->ecc.mode = NAND_ECC_SOFT;
+	this->ecc.algo = NAND_ECC_HAMMING;
 
 	platform_set_drvdata(pdev, io_base);
 
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 20cbaab..68b9160 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -36,7 +36,6 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
-#include <linux/of_mtd.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
@@ -68,34 +67,44 @@
 	uint8_t pmecc_max_correction;
 };
 
-struct atmel_nand_nfc_caps {
-	uint32_t rb_mask;
-};
-
-/* oob layout for large page size
+/*
+ * oob layout for large page size
  * bad block info is on bytes 0 and 1
  * the bytes have to be consecutives to avoid
  * several NAND_CMD_RNDOUT during read
- */
-static struct nand_ecclayout atmel_oobinfo_large = {
-	.eccbytes = 4,
-	.eccpos = {60, 61, 62, 63},
-	.oobfree = {
-		{2, 58}
-	},
-};
-
-/* oob layout for small page size
+ *
+ * oob layout for small page size
  * bad block info is on bytes 4 and 5
  * the bytes have to be consecutives to avoid
  * several NAND_CMD_RNDOUT during read
  */
-static struct nand_ecclayout atmel_oobinfo_small = {
-	.eccbytes = 4,
-	.eccpos = {0, 1, 2, 3},
-	.oobfree = {
-		{6, 10}
-	},
+static int atmel_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->length = 4;
+	oobregion->offset = 0;
+
+	return 0;
+}
+
+static int atmel_ooblayout_free_sp(struct mtd_info *mtd, int section,
+				   struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 6;
+	oobregion->length = mtd->oobsize - oobregion->offset;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops atmel_ooblayout_sp_ops = {
+	.ecc = atmel_ooblayout_ecc_sp,
+	.free = atmel_ooblayout_free_sp,
 };
 
 struct atmel_nfc {
@@ -116,7 +125,6 @@
 	/* Point to the sram bank which include readed data via NFC */
 	void			*data_in_sram;
 	bool			will_write_sram;
-	const struct atmel_nand_nfc_caps *caps;
 };
 static struct atmel_nfc	nand_nfc;
 
@@ -163,8 +171,6 @@
 	int			*pmecc_delta;
 };
 
-static struct nand_ecclayout atmel_pmecc_oobinfo;
-
 /*
  * Enable NAND.
  */
@@ -434,14 +440,13 @@
 static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
 {
 	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(chip);
 
 	if (use_dma && len > mtd->oobsize)
 		/* only use DMA for bigger than oob size: better performances */
 		if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
 			return;
 
-	if (host->board.bus_width_16)
+	if (chip->options & NAND_BUSWIDTH_16)
 		atmel_read_buf16(mtd, buf, len);
 	else
 		atmel_read_buf8(mtd, buf, len);
@@ -450,14 +455,13 @@
 static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
 {
 	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(chip);
 
 	if (use_dma && len > mtd->oobsize)
 		/* only use DMA for bigger than oob size: better performances */
 		if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
 			return;
 
-	if (host->board.bus_width_16)
+	if (chip->options & NAND_BUSWIDTH_16)
 		atmel_write_buf16(mtd, buf, len);
 	else
 		atmel_write_buf8(mtd, buf, len);
@@ -483,22 +487,6 @@
 	return (m * cap + 7) / 8;
 }
 
-static void pmecc_config_ecc_layout(struct nand_ecclayout *layout,
-				    int oobsize, int ecc_len)
-{
-	int i;
-
-	layout->eccbytes = ecc_len;
-
-	/* ECC will occupy the last ecc_len bytes continuously */
-	for (i = 0; i < ecc_len; i++)
-		layout->eccpos[i] = oobsize - ecc_len + i;
-
-	layout->oobfree[0].offset = PMECC_OOB_RESERVED_BYTES;
-	layout->oobfree[0].length =
-		oobsize - ecc_len - layout->oobfree[0].offset;
-}
-
 static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
 {
 	int table_size;
@@ -836,13 +824,16 @@
 			dev_dbg(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
 				pos, bit_pos, err_byte, *(buf + byte_pos));
 		} else {
+			struct mtd_oob_region oobregion;
+
 			/* Bit flip in OOB area */
 			tmp = sector_num * nand_chip->ecc.bytes
 					+ (byte_pos - sector_size);
 			err_byte = ecc[tmp];
 			ecc[tmp] ^= (1 << bit_pos);
 
-			pos = tmp + nand_chip->ecc.layout->eccpos[0];
+			mtd_ooblayout_ecc(mtd, 0, &oobregion);
+			pos = tmp + oobregion.offset;
 			dev_dbg(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
 				pos, bit_pos, err_byte, ecc[tmp]);
 		}
@@ -863,17 +854,6 @@
 	uint8_t *buf_pos;
 	int max_bitflips = 0;
 
-	/* If can correct bitfilps from erased page, do the normal check */
-	if (host->caps->pmecc_correct_erase_page)
-		goto normal_check;
-
-	for (i = 0; i < nand_chip->ecc.total; i++)
-		if (ecc[i] != 0xff)
-			goto normal_check;
-	/* Erased page, return OK */
-	return 0;
-
-normal_check:
 	for (i = 0; i < nand_chip->ecc.steps; i++) {
 		err_nbr = 0;
 		if (pmecc_stat & 0x1) {
@@ -884,16 +864,30 @@
 			pmecc_get_sigma(mtd);
 
 			err_nbr = pmecc_err_location(mtd);
-			if (err_nbr == -1) {
+			if (err_nbr >= 0) {
+				pmecc_correct_data(mtd, buf_pos, ecc, i,
+						   nand_chip->ecc.bytes,
+						   err_nbr);
+			} else if (!host->caps->pmecc_correct_erase_page) {
+				u8 *ecc_pos = ecc + (i * nand_chip->ecc.bytes);
+
+				/* Try to detect erased pages */
+				err_nbr = nand_check_erased_ecc_chunk(buf_pos,
+							host->pmecc_sector_size,
+							ecc_pos,
+							nand_chip->ecc.bytes,
+							NULL, 0,
+							nand_chip->ecc.strength);
+			}
+
+			if (err_nbr < 0) {
 				dev_err(host->dev, "PMECC: Too many errors\n");
 				mtd->ecc_stats.failed++;
 				return -EIO;
-			} else {
-				pmecc_correct_data(mtd, buf_pos, ecc, i,
-					nand_chip->ecc.bytes, err_nbr);
-				mtd->ecc_stats.corrected += err_nbr;
-				max_bitflips = max_t(int, max_bitflips, err_nbr);
 			}
+
+			mtd->ecc_stats.corrected += err_nbr;
+			max_bitflips = max_t(int, max_bitflips, err_nbr);
 		}
 		pmecc_stat >>= 1;
 	}
@@ -931,7 +925,6 @@
 	struct atmel_nand_host *host = nand_get_controller_data(chip);
 	int eccsize = chip->ecc.size * chip->ecc.steps;
 	uint8_t *oob = chip->oob_poi;
-	uint32_t *eccpos = chip->ecc.layout->eccpos;
 	uint32_t stat;
 	unsigned long end_time;
 	int bitflips = 0;
@@ -953,7 +946,11 @@
 
 	stat = pmecc_readl_relaxed(host->ecc, ISR);
 	if (stat != 0) {
-		bitflips = pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]);
+		struct mtd_oob_region oobregion;
+
+		mtd_ooblayout_ecc(mtd, 0, &oobregion);
+		bitflips = pmecc_correction(mtd, stat, buf,
+					    &oob[oobregion.offset]);
 		if (bitflips < 0)
 			/* uncorrectable errors */
 			return 0;
@@ -967,8 +964,8 @@
 		int page)
 {
 	struct atmel_nand_host *host = nand_get_controller_data(chip);
-	uint32_t *eccpos = chip->ecc.layout->eccpos;
-	int i, j;
+	struct mtd_oob_region oobregion = { };
+	int i, j, section = 0;
 	unsigned long end_time;
 
 	if (!host->nfc || !host->nfc->write_by_sram) {
@@ -987,11 +984,14 @@
 
 	for (i = 0; i < chip->ecc.steps; i++) {
 		for (j = 0; j < chip->ecc.bytes; j++) {
-			int pos;
+			if (!oobregion.length)
+				mtd_ooblayout_ecc(mtd, section, &oobregion);
 
-			pos = i * chip->ecc.bytes + j;
-			chip->oob_poi[eccpos[pos]] =
+			chip->oob_poi[oobregion.offset] =
 				pmecc_readb_ecc_relaxed(host->ecc, i, j);
+			oobregion.length--;
+			oobregion.offset++;
+			section++;
 		}
 	}
 	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -1003,8 +1003,9 @@
 {
 	struct nand_chip *nand_chip = mtd_to_nand(mtd);
 	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+	int eccbytes = mtd_ooblayout_count_eccbytes(mtd);
 	uint32_t val = 0;
-	struct nand_ecclayout *ecc_layout;
+	struct mtd_oob_region oobregion;
 
 	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
 	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
@@ -1054,11 +1055,11 @@
 		| PMECC_CFG_AUTO_DISABLE);
 	pmecc_writel(host->ecc, CFG, val);
 
-	ecc_layout = nand_chip->ecc.layout;
 	pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1);
-	pmecc_writel(host->ecc, SADDR, ecc_layout->eccpos[0]);
+	mtd_ooblayout_ecc(mtd, 0, &oobregion);
+	pmecc_writel(host->ecc, SADDR, oobregion.offset);
 	pmecc_writel(host->ecc, EADDR,
-			ecc_layout->eccpos[ecc_layout->eccbytes - 1]);
+		     oobregion.offset + eccbytes - 1);
 	/* See datasheet about PMECC Clock Control Register */
 	pmecc_writel(host->ecc, CLK, 2);
 	pmecc_writel(host->ecc, IDR, 0xff);
@@ -1206,6 +1207,7 @@
 		dev_warn(host->dev,
 			"Can't get I/O resource regs for PMECC controller, rolling back on software ECC\n");
 		nand_chip->ecc.mode = NAND_ECC_SOFT;
+		nand_chip->ecc.algo = NAND_ECC_HAMMING;
 		return 0;
 	}
 
@@ -1280,11 +1282,8 @@
 			err_no = -EINVAL;
 			goto err;
 		}
-		pmecc_config_ecc_layout(&atmel_pmecc_oobinfo,
-					mtd->oobsize,
-					nand_chip->ecc.total);
 
-		nand_chip->ecc.layout = &atmel_pmecc_oobinfo;
+		mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
 		break;
 	default:
 		dev_warn(host->dev,
@@ -1292,6 +1291,7 @@
 		/* page size not handled by HW ECC */
 		/* switching back to soft ECC */
 		nand_chip->ecc.mode = NAND_ECC_SOFT;
+		nand_chip->ecc.algo = NAND_ECC_HAMMING;
 		return 0;
 	}
 
@@ -1359,12 +1359,12 @@
 {
 	int eccsize = chip->ecc.size;
 	int eccbytes = chip->ecc.bytes;
-	uint32_t *eccpos = chip->ecc.layout->eccpos;
 	uint8_t *p = buf;
 	uint8_t *oob = chip->oob_poi;
 	uint8_t *ecc_pos;
 	int stat;
 	unsigned int max_bitflips = 0;
+	struct mtd_oob_region oobregion = {};
 
 	/*
 	 * Errata: ALE is incorrectly wired up to the ECC controller
@@ -1382,19 +1382,20 @@
 	chip->read_buf(mtd, p, eccsize);
 
 	/* move to ECC position if needed */
-	if (eccpos[0] != 0) {
-		/* This only works on large pages
-		 * because the ECC controller waits for
-		 * NAND_CMD_RNDOUTSTART after the
-		 * NAND_CMD_RNDOUT.
-		 * anyway, for small pages, the eccpos[0] == 0
+	mtd_ooblayout_ecc(mtd, 0, &oobregion);
+	if (oobregion.offset != 0) {
+		/*
+		 * This only works on large pages because the ECC controller
+		 * waits for NAND_CMD_RNDOUTSTART after the NAND_CMD_RNDOUT.
+		 * Anyway, for small pages, the first ECC byte is at offset
+		 * 0 in the OOB area.
 		 */
 		chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
-				mtd->writesize + eccpos[0], -1);
+			      mtd->writesize + oobregion.offset, -1);
 	}
 
 	/* the ECC controller needs to read the ECC just after the data */
-	ecc_pos = oob + eccpos[0];
+	ecc_pos = oob + oobregion.offset;
 	chip->read_buf(mtd, ecc_pos, eccbytes);
 
 	/* check if there's an error */
@@ -1504,58 +1505,17 @@
 		ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
 }
 
-static int atmel_of_init_port(struct atmel_nand_host *host,
-			      struct device_node *np)
+static int atmel_of_init_ecc(struct atmel_nand_host *host,
+			     struct device_node *np)
 {
-	u32 val;
 	u32 offset[2];
-	int ecc_mode;
-	struct atmel_nand_data *board = &host->board;
-	enum of_gpio_flags flags = 0;
-
-	host->caps = (struct atmel_nand_caps *)
-		of_device_get_match_data(host->dev);
-
-	if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
-		if (val >= 32) {
-			dev_err(host->dev, "invalid addr-offset %u\n", val);
-			return -EINVAL;
-		}
-		board->ale = val;
-	}
-
-	if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) {
-		if (val >= 32) {
-			dev_err(host->dev, "invalid cmd-offset %u\n", val);
-			return -EINVAL;
-		}
-		board->cle = val;
-	}
-
-	ecc_mode = of_get_nand_ecc_mode(np);
-
-	board->ecc_mode = ecc_mode < 0 ? NAND_ECC_SOFT : ecc_mode;
-
-	board->on_flash_bbt = of_get_nand_on_flash_bbt(np);
-
-	board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma");
-
-	if (of_get_nand_bus_width(np) == 16)
-		board->bus_width_16 = 1;
-
-	board->rdy_pin = of_get_gpio_flags(np, 0, &flags);
-	board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW);
-
-	board->enable_pin = of_get_gpio(np, 1);
-	board->det_pin = of_get_gpio(np, 2);
+	u32 val;
 
 	host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc");
 
-	/* load the nfc driver if there is */
-	of_platform_populate(np, NULL, NULL, host->dev);
-
-	if (!(board->ecc_mode == NAND_ECC_HW) || !host->has_pmecc)
-		return 0;	/* Not using PMECC */
+	/* Not using PMECC */
+	if (!(host->nand_chip.ecc.mode == NAND_ECC_HW) || !host->has_pmecc)
+		return 0;
 
 	/* use PMECC, get correction capability, sector size and lookup
 	 * table offset.
@@ -1596,16 +1556,65 @@
 		/* Will build a lookup table and initialize the offset later */
 		return 0;
 	}
+
 	if (!offset[0] && !offset[1]) {
 		dev_err(host->dev, "Invalid PMECC lookup table offset\n");
 		return -EINVAL;
 	}
+
 	host->pmecc_lookup_table_offset_512 = offset[0];
 	host->pmecc_lookup_table_offset_1024 = offset[1];
 
 	return 0;
 }
 
+static int atmel_of_init_port(struct atmel_nand_host *host,
+			      struct device_node *np)
+{
+	u32 val;
+	struct atmel_nand_data *board = &host->board;
+	enum of_gpio_flags flags = 0;
+
+	host->caps = (struct atmel_nand_caps *)
+		of_device_get_match_data(host->dev);
+
+	if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
+		if (val >= 32) {
+			dev_err(host->dev, "invalid addr-offset %u\n", val);
+			return -EINVAL;
+		}
+		board->ale = val;
+	}
+
+	if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) {
+		if (val >= 32) {
+			dev_err(host->dev, "invalid cmd-offset %u\n", val);
+			return -EINVAL;
+		}
+		board->cle = val;
+	}
+
+	board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma");
+
+	board->rdy_pin = of_get_gpio_flags(np, 0, &flags);
+	board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW);
+
+	board->enable_pin = of_get_gpio(np, 1);
+	board->det_pin = of_get_gpio(np, 2);
+
+	/* load the nfc driver if there is */
+	of_platform_populate(np, NULL, NULL, host->dev);
+
+	/*
+	 * Initialize ECC mode to NAND_ECC_SOFT so that we have a correct value
+	 * even if the nand-ecc-mode property is not defined.
+	 */
+	host->nand_chip.ecc.mode = NAND_ECC_SOFT;
+	host->nand_chip.ecc.algo = NAND_ECC_HAMMING;
+
+	return 0;
+}
+
 static int atmel_hw_nand_init_params(struct platform_device *pdev,
 					 struct atmel_nand_host *host)
 {
@@ -1618,6 +1627,7 @@
 		dev_err(host->dev,
 			"Can't get I/O resource regs, use software ECC\n");
 		nand_chip->ecc.mode = NAND_ECC_SOFT;
+		nand_chip->ecc.algo = NAND_ECC_HAMMING;
 		return 0;
 	}
 
@@ -1631,25 +1641,26 @@
 	/* set ECC page size and oob layout */
 	switch (mtd->writesize) {
 	case 512:
-		nand_chip->ecc.layout = &atmel_oobinfo_small;
+		mtd_set_ooblayout(mtd, &atmel_ooblayout_sp_ops);
 		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
 		break;
 	case 1024:
-		nand_chip->ecc.layout = &atmel_oobinfo_large;
+		mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
 		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
 		break;
 	case 2048:
-		nand_chip->ecc.layout = &atmel_oobinfo_large;
+		mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
 		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
 		break;
 	case 4096:
-		nand_chip->ecc.layout = &atmel_oobinfo_large;
+		mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
 		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
 		break;
 	default:
 		/* page size not handled by HW ECC */
 		/* switching back to soft ECC */
 		nand_chip->ecc.mode = NAND_ECC_SOFT;
+		nand_chip->ecc.algo = NAND_ECC_HAMMING;
 		return 0;
 	}
 
@@ -1699,9 +1710,9 @@
 		nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
 		ret = IRQ_HANDLED;
 	}
-	if (pending & host->nfc->caps->rb_mask) {
+	if (pending & NFC_SR_RB_EDGE) {
 		complete(&host->nfc->comp_ready);
-		nfc_writel(host->nfc->hsmc_regs, IDR, host->nfc->caps->rb_mask);
+		nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
 		ret = IRQ_HANDLED;
 	}
 	if (pending & NFC_SR_CMD_DONE) {
@@ -1719,7 +1730,7 @@
 	if (flag & NFC_SR_XFR_DONE)
 		init_completion(&host->nfc->comp_xfer_done);
 
-	if (flag & host->nfc->caps->rb_mask)
+	if (flag & NFC_SR_RB_EDGE)
 		init_completion(&host->nfc->comp_ready);
 
 	if (flag & NFC_SR_CMD_DONE)
@@ -1737,7 +1748,7 @@
 	if (flag & NFC_SR_XFR_DONE)
 		comp[index++] = &host->nfc->comp_xfer_done;
 
-	if (flag & host->nfc->caps->rb_mask)
+	if (flag & NFC_SR_RB_EDGE)
 		comp[index++] = &host->nfc->comp_ready;
 
 	if (flag & NFC_SR_CMD_DONE)
@@ -1805,7 +1816,7 @@
 		dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n",
 				mask & status);
 
-	return status & host->nfc->caps->rb_mask;
+	return status & NFC_SR_RB_EDGE;
 }
 
 static void nfc_select_chip(struct mtd_info *mtd, int chip)
@@ -1978,8 +1989,8 @@
 		}
 		/* fall through */
 	default:
-		nfc_prepare_interrupt(host, host->nfc->caps->rb_mask);
-		nfc_wait_interrupt(host, host->nfc->caps->rb_mask);
+		nfc_prepare_interrupt(host, NFC_SR_RB_EDGE);
+		nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
 	}
 }
 
@@ -2147,6 +2158,19 @@
 	} else {
 		memcpy(&host->board, dev_get_platdata(&pdev->dev),
 		       sizeof(struct atmel_nand_data));
+		nand_chip->ecc.mode = host->board.ecc_mode;
+
+		/*
+		 * When using software ECC every supported avr32 board means
+		 * Hamming algorithm. If that ever changes we'll need to add
+		 * ecc_algo field to the struct atmel_nand_data.
+		 */
+		if (nand_chip->ecc.mode == NAND_ECC_SOFT)
+			nand_chip->ecc.algo = NAND_ECC_HAMMING;
+
+		/* 16-bit bus width */
+		if (host->board.bus_width_16)
+			nand_chip->options |= NAND_BUSWIDTH_16;
 	}
 
 	 /* link the private data structures */
@@ -2188,11 +2212,8 @@
 		nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
 	}
 
-	nand_chip->ecc.mode = host->board.ecc_mode;
 	nand_chip->chip_delay = 40;		/* 40us command delay time */
 
-	if (host->board.bus_width_16)	/* 16-bit bus width */
-		nand_chip->options |= NAND_BUSWIDTH_16;
 
 	nand_chip->read_buf = atmel_read_buf;
 	nand_chip->write_buf = atmel_write_buf;
@@ -2225,11 +2246,6 @@
 		}
 	}
 
-	if (host->board.on_flash_bbt || on_flash_bbt) {
-		dev_info(&pdev->dev, "Use On Flash BBT\n");
-		nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
-	}
-
 	if (!host->board.has_dma)
 		use_dma = 0;
 
@@ -2256,6 +2272,18 @@
 		goto err_scan_ident;
 	}
 
+	if (host->board.on_flash_bbt || on_flash_bbt)
+		nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+	if (nand_chip->bbt_options & NAND_BBT_USE_FLASH)
+		dev_info(&pdev->dev, "Use On Flash BBT\n");
+
+	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+		res = atmel_of_init_ecc(host, pdev->dev.of_node);
+		if (res)
+			goto err_hw_ecc;
+	}
+
 	if (nand_chip->ecc.mode == NAND_ECC_HW) {
 		if (host->has_pmecc)
 			res = atmel_pmecc_nand_init_params(pdev, host);
@@ -2393,11 +2421,6 @@
 		}
 	}
 
-	nfc->caps = (const struct atmel_nand_nfc_caps *)
-		of_device_get_match_data(&pdev->dev);
-	if (!nfc->caps)
-		return -ENODEV;
-
 	nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff);
 	nfc_readl(nfc->hsmc_regs, SR);	/* clear the NFC_SR */
 
@@ -2426,17 +2449,8 @@
 	return 0;
 }
 
-static const struct atmel_nand_nfc_caps sama5d3_nfc_caps = {
-	.rb_mask = NFC_SR_RB_EDGE0,
-};
-
-static const struct atmel_nand_nfc_caps sama5d4_nfc_caps = {
-	.rb_mask = NFC_SR_RB_EDGE3,
-};
-
 static const struct of_device_id atmel_nand_nfc_match[] = {
-	{ .compatible = "atmel,sama5d3-nfc", .data = &sama5d3_nfc_caps },
-	{ .compatible = "atmel,sama5d4-nfc", .data = &sama5d4_nfc_caps },
+	{ .compatible = "atmel,sama5d3-nfc" },
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
index 0bbc1fa..4d5d262 100644
--- a/drivers/mtd/nand/atmel_nand_nfc.h
+++ b/drivers/mtd/nand/atmel_nand_nfc.h
@@ -42,8 +42,7 @@
 #define		NFC_SR_UNDEF		(1 << 21)
 #define		NFC_SR_AWB		(1 << 22)
 #define		NFC_SR_ASE		(1 << 23)
-#define		NFC_SR_RB_EDGE0		(1 << 24)
-#define		NFC_SR_RB_EDGE3		(1 << 27)
+#define		NFC_SR_RB_EDGE		(1 << 24)
 
 #define ATMEL_HSMC_NFC_IER	0x0c
 #define ATMEL_HSMC_NFC_IDR	0x10
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 341ea49..9bf6d99 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -459,6 +459,7 @@
 	/* 30 us command delay time */
 	this->chip_delay = 30;
 	this->ecc.mode = NAND_ECC_SOFT;
+	this->ecc.algo = NAND_ECC_HAMMING;
 
 	if (pd->devwidth)
 		this->options |= NAND_BUSWIDTH_16;
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 7f6b30e..37da423 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -109,28 +109,33 @@
 	 0};
 
 #ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
-static struct nand_ecclayout bootrom_ecclayout = {
-	.eccbytes = 24,
-	.eccpos = {
-		0x8 * 0, 0x8 * 0 + 1, 0x8 * 0 + 2,
-		0x8 * 1, 0x8 * 1 + 1, 0x8 * 1 + 2,
-		0x8 * 2, 0x8 * 2 + 1, 0x8 * 2 + 2,
-		0x8 * 3, 0x8 * 3 + 1, 0x8 * 3 + 2,
-		0x8 * 4, 0x8 * 4 + 1, 0x8 * 4 + 2,
-		0x8 * 5, 0x8 * 5 + 1, 0x8 * 5 + 2,
-		0x8 * 6, 0x8 * 6 + 1, 0x8 * 6 + 2,
-		0x8 * 7, 0x8 * 7 + 1, 0x8 * 7 + 2
-	},
-	.oobfree = {
-		{ 0x8 * 0 + 3, 5 },
-		{ 0x8 * 1 + 3, 5 },
-		{ 0x8 * 2 + 3, 5 },
-		{ 0x8 * 3 + 3, 5 },
-		{ 0x8 * 4 + 3, 5 },
-		{ 0x8 * 5 + 3, 5 },
-		{ 0x8 * 6 + 3, 5 },
-		{ 0x8 * 7 + 3, 5 },
-	}
+static int bootrom_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	if (section > 7)
+		return -ERANGE;
+
+	oobregion->offset = section * 8;
+	oobregion->length = 3;
+
+	return 0;
+}
+
+static int bootrom_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	if (section > 7)
+		return -ERANGE;
+
+	oobregion->offset = (section * 8) + 3;
+	oobregion->length = 5;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops bootrom_ooblayout_ops = {
+	.ecc = bootrom_ooblayout_ecc,
+	.free = bootrom_ooblayout_free,
 };
 #endif
 
@@ -800,7 +805,7 @@
 	/* setup hardware ECC data struct */
 	if (hardware_ecc) {
 #ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
-		chip->ecc.layout = &bootrom_ecclayout;
+		mtd_set_ooblayout(mtd, &bootrom_ooblayout_ops);
 #endif
 		chip->read_buf      = bf5xx_nand_dma_read_buf;
 		chip->write_buf     = bf5xx_nand_dma_write_buf;
@@ -812,6 +817,7 @@
 		chip->ecc.write_page_raw = bf5xx_nand_write_page_raw;
 	} else {
 		chip->ecc.mode	    = NAND_ECC_SOFT;
+		chip->ecc.algo	= NAND_ECC_HAMMING;
 	}
 
 	/* scan hardware nand chip and setup mtd info data struct */
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index e052839..b76ad7c 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -32,7 +32,6 @@
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/of.h>
-#include <linux/of_mtd.h>
 #include <linux/of_platform.h>
 #include <linux/slab.h>
 #include <linux/list.h>
@@ -601,7 +600,7 @@
 
 static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
 {
-	if (ctrl->nand_version < 0x0700)
+	if (ctrl->nand_version < 0x0602)
 		return 24;
 	return 0;
 }
@@ -781,55 +780,161 @@
 }
 
 /*
- * Returns a nand_ecclayout strucutre for the given layout/configuration.
- * Returns NULL on failure.
+ * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given
+ * the layout/configuration.
+ * Returns -ERRCODE on failure.
  */
-static struct nand_ecclayout *brcmnand_create_layout(int ecc_level,
-						     struct brcmnand_host *host)
+static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section,
+					  struct mtd_oob_region *oobregion)
 {
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct brcmnand_host *host = nand_get_controller_data(chip);
 	struct brcmnand_cfg *cfg = &host->hwcfg;
-	int i, j;
-	struct nand_ecclayout *layout;
-	int req;
-	int sectors;
-	int sas;
-	int idx1, idx2;
+	int sas = cfg->spare_area_size << cfg->sector_size_1k;
+	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
 
-	layout = devm_kzalloc(&host->pdev->dev, sizeof(*layout), GFP_KERNEL);
-	if (!layout)
-		return NULL;
+	if (section >= sectors)
+		return -ERANGE;
 
-	sectors = cfg->page_size / (512 << cfg->sector_size_1k);
-	sas = cfg->spare_area_size << cfg->sector_size_1k;
+	oobregion->offset = (section * sas) + 6;
+	oobregion->length = 3;
 
-	/* Hamming */
-	if (is_hamming_ecc(cfg)) {
-		for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) {
-			/* First sector of each page may have BBI */
-			if (i == 0) {
-				layout->oobfree[idx2].offset = i * sas + 1;
-				/* Small-page NAND use byte 6 for BBI */
-				if (cfg->page_size == 512)
-					layout->oobfree[idx2].offset--;
-				layout->oobfree[idx2].length = 5;
-			} else {
-				layout->oobfree[idx2].offset = i * sas;
-				layout->oobfree[idx2].length = 6;
-			}
-			idx2++;
-			layout->eccpos[idx1++] = i * sas + 6;
-			layout->eccpos[idx1++] = i * sas + 7;
-			layout->eccpos[idx1++] = i * sas + 8;
-			layout->oobfree[idx2].offset = i * sas + 9;
-			layout->oobfree[idx2].length = 7;
-			idx2++;
-			/* Leave zero-terminated entry for OOBFREE */
-			if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE ||
-				    idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
-				break;
+	return 0;
+}
+
+static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
+					   struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_cfg *cfg = &host->hwcfg;
+	int sas = cfg->spare_area_size << cfg->sector_size_1k;
+	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+
+	if (section >= sectors * 2)
+		return -ERANGE;
+
+	oobregion->offset = (section / 2) * sas;
+
+	if (section & 1) {
+		oobregion->offset += 9;
+		oobregion->length = 7;
+	} else {
+		oobregion->length = 6;
+
+		/* First sector of each page may have BBI */
+		if (!section) {
+			/*
+			 * Small-page NAND use byte 6 for BBI while large-page
+			 * NAND use byte 0.
+			 */
+			if (cfg->page_size > 512)
+				oobregion->offset++;
+			oobregion->length--;
 		}
+	}
 
-		return layout;
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = {
+	.ecc = brcmnand_hamming_ooblayout_ecc,
+	.free = brcmnand_hamming_ooblayout_free,
+};
+
+static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section,
+				      struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_cfg *cfg = &host->hwcfg;
+	int sas = cfg->spare_area_size << cfg->sector_size_1k;
+	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+
+	if (section >= sectors)
+		return -ERANGE;
+
+	oobregion->offset = (section * (sas + 1)) - chip->ecc.bytes;
+	oobregion->length = chip->ecc.bytes;
+
+	return 0;
+}
+
+static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section,
+					  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_cfg *cfg = &host->hwcfg;
+	int sas = cfg->spare_area_size << cfg->sector_size_1k;
+	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+
+	if (section >= sectors)
+		return -ERANGE;
+
+	if (sas <= chip->ecc.bytes)
+		return 0;
+
+	oobregion->offset = section * sas;
+	oobregion->length = sas - chip->ecc.bytes;
+
+	if (!section) {
+		oobregion->offset++;
+		oobregion->length--;
+	}
+
+	return 0;
+}
+
+static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section,
+					  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_cfg *cfg = &host->hwcfg;
+	int sas = cfg->spare_area_size << cfg->sector_size_1k;
+
+	if (section > 1 || sas - chip->ecc.bytes < 6 ||
+	    (section && sas - chip->ecc.bytes == 6))
+		return -ERANGE;
+
+	if (!section) {
+		oobregion->offset = 0;
+		oobregion->length = 5;
+	} else {
+		oobregion->offset = 6;
+		oobregion->length = sas - chip->ecc.bytes - 6;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = {
+	.ecc = brcmnand_bch_ooblayout_ecc,
+	.free = brcmnand_bch_ooblayout_free_lp,
+};
+
+static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = {
+	.ecc = brcmnand_bch_ooblayout_ecc,
+	.free = brcmnand_bch_ooblayout_free_sp,
+};
+
+static int brcmstb_choose_ecc_layout(struct brcmnand_host *host)
+{
+	struct brcmnand_cfg *p = &host->hwcfg;
+	struct mtd_info *mtd = nand_to_mtd(&host->chip);
+	struct nand_ecc_ctrl *ecc = &host->chip.ecc;
+	unsigned int ecc_level = p->ecc_level;
+	int sas = p->spare_area_size << p->sector_size_1k;
+	int sectors = p->page_size / (512 << p->sector_size_1k);
+
+	if (p->sector_size_1k)
+		ecc_level <<= 1;
+
+	if (is_hamming_ecc(p)) {
+		ecc->bytes = 3 * sectors;
+		mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops);
+		return 0;
 	}
 
 	/*
@@ -838,70 +943,20 @@
 	 *  >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
 	 * But we will just be conservative.
 	 */
-	req = DIV_ROUND_UP(ecc_level * 14, 8);
-	if (req >= sas) {
+	ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8);
+	if (p->page_size == 512)
+		mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops);
+	else
+		mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops);
+
+	if (ecc->bytes >= sas) {
 		dev_err(&host->pdev->dev,
 			"error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
-			req, sas);
-		return NULL;
+			ecc->bytes, sas);
+		return -EINVAL;
 	}
 
-	layout->eccbytes = req * sectors;
-	for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) {
-		for (j = sas - req; j < sas && idx1 <
-				MTD_MAX_ECCPOS_ENTRIES_LARGE; j++, idx1++)
-			layout->eccpos[idx1] = i * sas + j;
-
-		/* First sector of each page may have BBI */
-		if (i == 0) {
-			if (cfg->page_size == 512 && (sas - req >= 6)) {
-				/* Small-page NAND use byte 6 for BBI */
-				layout->oobfree[idx2].offset = 0;
-				layout->oobfree[idx2].length = 5;
-				idx2++;
-				if (sas - req > 6) {
-					layout->oobfree[idx2].offset = 6;
-					layout->oobfree[idx2].length =
-						sas - req - 6;
-					idx2++;
-				}
-			} else if (sas > req + 1) {
-				layout->oobfree[idx2].offset = i * sas + 1;
-				layout->oobfree[idx2].length = sas - req - 1;
-				idx2++;
-			}
-		} else if (sas > req) {
-			layout->oobfree[idx2].offset = i * sas;
-			layout->oobfree[idx2].length = sas - req;
-			idx2++;
-		}
-		/* Leave zero-terminated entry for OOBFREE */
-		if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE ||
-				idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
-			break;
-	}
-
-	return layout;
-}
-
-static struct nand_ecclayout *brcmstb_choose_ecc_layout(
-		struct brcmnand_host *host)
-{
-	struct nand_ecclayout *layout;
-	struct brcmnand_cfg *p = &host->hwcfg;
-	unsigned int ecc_level = p->ecc_level;
-
-	if (p->sector_size_1k)
-		ecc_level <<= 1;
-
-	layout = brcmnand_create_layout(ecc_level, host);
-	if (!layout) {
-		dev_err(&host->pdev->dev,
-				"no proper ecc_layout for this NAND cfg\n");
-		return NULL;
-	}
-
-	return layout;
+	return 0;
 }
 
 static void brcmnand_wp(struct mtd_info *mtd, int wp)
@@ -1870,9 +1925,31 @@
 	cfg->col_adr_bytes = 2;
 	cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
 
+	if (chip->ecc.mode != NAND_ECC_HW) {
+		dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
+			chip->ecc.mode);
+		return -EINVAL;
+	}
+
+	if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
+		if (chip->ecc.strength == 1 && chip->ecc.size == 512)
+			/* Default to Hamming for 1-bit ECC, if unspecified */
+			chip->ecc.algo = NAND_ECC_HAMMING;
+		else
+			/* Otherwise, BCH */
+			chip->ecc.algo = NAND_ECC_BCH;
+	}
+
+	if (chip->ecc.algo == NAND_ECC_HAMMING && (chip->ecc.strength != 1 ||
+						   chip->ecc.size != 512)) {
+		dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
+			chip->ecc.strength, chip->ecc.size);
+		return -EINVAL;
+	}
+
 	switch (chip->ecc.size) {
 	case 512:
-		if (chip->ecc.strength == 1) /* Hamming */
+		if (chip->ecc.algo == NAND_ECC_HAMMING)
 			cfg->ecc_level = 15;
 		else
 			cfg->ecc_level = chip->ecc.strength;
@@ -2001,8 +2078,8 @@
 	 */
 	chip->options |= NAND_USE_BOUNCE_BUFFER;
 
-	if (of_get_nand_on_flash_bbt(dn))
-		chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+	if (chip->bbt_options & NAND_BBT_USE_FLASH)
+		chip->bbt_options |= NAND_BBT_NO_OOB;
 
 	if (brcmnand_setup_dev(host))
 		return -ENXIO;
@@ -2011,9 +2088,9 @@
 	/* only use our internal HW threshold */
 	mtd->bitflip_threshold = 1;
 
-	chip->ecc.layout = brcmstb_choose_ecc_layout(host);
-	if (!chip->ecc.layout)
-		return -ENXIO;
+	ret = brcmstb_choose_ecc_layout(host);
+	if (ret)
+		return ret;
 
 	if (nand_scan_tail(mtd))
 		return -ENXIO;
@@ -2115,6 +2192,7 @@
 	{ .compatible = "brcm,brcmnand-v5.0" },
 	{ .compatible = "brcm,brcmnand-v6.0" },
 	{ .compatible = "brcm,brcmnand-v6.1" },
+	{ .compatible = "brcm,brcmnand-v6.2" },
 	{ .compatible = "brcm,brcmnand-v7.0" },
 	{ .compatible = "brcm,brcmnand-v7.1" },
 	{},
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index e553aff..0b0c937 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -459,10 +459,37 @@
 	return max_bitflips;
 }
 
-static struct nand_ecclayout cafe_oobinfo_2048 = {
-	.eccbytes = 14,
-	.eccpos = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13},
-	.oobfree = {{14, 50}}
+static int cafe_ooblayout_ecc(struct mtd_info *mtd, int section,
+			      struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 0;
+	oobregion->length = chip->ecc.total;
+
+	return 0;
+}
+
+static int cafe_ooblayout_free(struct mtd_info *mtd, int section,
+			       struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = chip->ecc.total;
+	oobregion->length = mtd->oobsize - chip->ecc.total;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops cafe_ooblayout_ops = {
+	.ecc = cafe_ooblayout_ecc,
+	.free = cafe_ooblayout_free,
 };
 
 /* Ick. The BBT code really ought to be able to work this bit out
@@ -494,12 +521,6 @@
 	.pattern = cafe_mirror_pattern_2048
 };
 
-static struct nand_ecclayout cafe_oobinfo_512 = {
-	.eccbytes = 14,
-	.eccpos = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13},
-	.oobfree = {{14, 2}}
-};
-
 static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
 	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
 		| NAND_BBT_2BIT | NAND_BBT_VERSION,
@@ -743,12 +764,11 @@
 		cafe->ctl2 |= 1<<29; /* 2KiB page size */
 
 	/* Set up ECC according to the type of chip we found */
+	mtd_set_ooblayout(mtd, &cafe_ooblayout_ops);
 	if (mtd->writesize == 2048) {
-		cafe->nand.ecc.layout = &cafe_oobinfo_2048;
 		cafe->nand.bbt_td = &cafe_bbt_main_descr_2048;
 		cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048;
 	} else if (mtd->writesize == 512) {
-		cafe->nand.ecc.layout = &cafe_oobinfo_512;
 		cafe->nand.bbt_td = &cafe_bbt_main_descr_512;
 		cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512;
 	} else {
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 6f97ebb..4913378 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -187,6 +187,7 @@
 	/* 15 us command delay time */
 	this->chip_delay = 20;
 	this->ecc.mode = NAND_ECC_SOFT;
+	this->ecc.algo = NAND_ECC_HAMMING;
 
 	/* read/write functions */
 	this->read_byte = cmx270_read_byte;
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 8cb821b..cc07ba0 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -34,7 +34,6 @@
 #include <linux/slab.h>
 #include <linux/of_device.h>
 #include <linux/of.h>
-#include <linux/of_mtd.h>
 
 #include <linux/platform_data/mtd-davinci.h>
 #include <linux/platform_data/mtd-davinci-aemif.h>
@@ -54,7 +53,6 @@
  */
 struct davinci_nand_info {
 	struct nand_chip	chip;
-	struct nand_ecclayout	ecclayout;
 
 	struct device		*dev;
 	struct clk		*clk;
@@ -480,63 +478,46 @@
  * ten ECC bytes plus the manufacturer's bad block marker byte, and
  * and not overlapping the default BBT markers.
  */
-static struct nand_ecclayout hwecc4_small = {
-	.eccbytes = 10,
-	.eccpos = { 0, 1, 2, 3, 4,
-		/* offset 5 holds the badblock marker */
-		6, 7,
-		13, 14, 15, },
-	.oobfree = {
-		{.offset = 8, .length = 5, },
-		{.offset = 16, },
-	},
-};
+static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
+				      struct mtd_oob_region *oobregion)
+{
+	if (section > 2)
+		return -ERANGE;
 
-/* An ECC layout for using 4-bit ECC with large-page (2048bytes) flash,
- * storing ten ECC bytes plus the manufacturer's bad block marker byte,
- * and not overlapping the default BBT markers.
- */
-static struct nand_ecclayout hwecc4_2048 = {
-	.eccbytes = 40,
-	.eccpos = {
-		/* at the end of spare sector */
-		24, 25, 26, 27, 28, 29,	30, 31, 32, 33,
-		34, 35, 36, 37, 38, 39,	40, 41, 42, 43,
-		44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
-		54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
-		},
-	.oobfree = {
-		/* 2 bytes at offset 0 hold manufacturer badblock markers */
-		{.offset = 2, .length = 22, },
-		/* 5 bytes at offset 8 hold BBT markers */
-		/* 8 bytes at offset 16 hold JFFS2 clean markers */
-	},
-};
+	if (!section) {
+		oobregion->offset = 0;
+		oobregion->length = 5;
+	} else if (section == 1) {
+		oobregion->offset = 6;
+		oobregion->length = 2;
+	} else {
+		oobregion->offset = 13;
+		oobregion->length = 3;
+	}
 
-/*
- * An ECC layout for using 4-bit ECC with large-page (4096bytes) flash,
- * storing ten ECC bytes plus the manufacturer's bad block marker byte,
- * and not overlapping the default BBT markers.
- */
-static struct nand_ecclayout hwecc4_4096 = {
-	.eccbytes = 80,
-	.eccpos = {
-		/* at the end of spare sector */
-		48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
-		58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
-		68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
-		78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
-		88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
-		98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
-		108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
-		118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
-	},
-	.oobfree = {
-		/* 2 bytes at offset 0 hold manufacturer badblock markers */
-		{.offset = 2, .length = 46, },
-		/* 5 bytes at offset 8 hold BBT markers */
-		/* 8 bytes at offset 16 hold JFFS2 clean markers */
-	},
+	return 0;
+}
+
+static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
+				       struct mtd_oob_region *oobregion)
+{
+	if (section > 1)
+		return -ERANGE;
+
+	if (!section) {
+		oobregion->offset = 8;
+		oobregion->length = 5;
+	} else {
+		oobregion->offset = 16;
+		oobregion->length = mtd->oobsize - 16;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
+	.ecc = hwecc4_ooblayout_small_ecc,
+	.free = hwecc4_ooblayout_small_free,
 };
 
 #if defined(CONFIG_OF)
@@ -577,8 +558,6 @@
 			"ti,davinci-mask-chipsel", &prop))
 			pdata->mask_chipsel = prop;
 		if (!of_property_read_string(pdev->dev.of_node,
-			"nand-ecc-mode", &mode) ||
-		    !of_property_read_string(pdev->dev.of_node,
 			"ti,davinci-ecc-mode", &mode)) {
 			if (!strncmp("none", mode, 4))
 				pdata->ecc_mode = NAND_ECC_NONE;
@@ -591,14 +570,11 @@
 			"ti,davinci-ecc-bits", &prop))
 			pdata->ecc_bits = prop;
 
-		prop = of_get_nand_bus_width(pdev->dev.of_node);
-		if (0 < prop || !of_property_read_u32(pdev->dev.of_node,
-			"ti,davinci-nand-buswidth", &prop))
-			if (prop == 16)
-				pdata->options |= NAND_BUSWIDTH_16;
+		if (!of_property_read_u32(pdev->dev.of_node,
+			"ti,davinci-nand-buswidth", &prop) && prop == 16)
+			pdata->options |= NAND_BUSWIDTH_16;
+
 		if (of_property_read_bool(pdev->dev.of_node,
-			"nand-on-flash-bbt") ||
-		    of_property_read_bool(pdev->dev.of_node,
 			"ti,davinci-nand-use-bbt"))
 			pdata->bbt_options = NAND_BBT_USE_FLASH;
 
@@ -628,7 +604,6 @@
 	void __iomem			*base;
 	int				ret;
 	uint32_t			val;
-	nand_ecc_modes_t		ecc_mode;
 	struct mtd_info			*mtd;
 
 	pdata = nand_davinci_get_pdata(pdev);
@@ -712,13 +687,53 @@
 	info->chip.write_buf    = nand_davinci_write_buf;
 
 	/* Use board-specific ECC config */
-	ecc_mode		= pdata->ecc_mode;
+	info->chip.ecc.mode	= pdata->ecc_mode;
 
 	ret = -EINVAL;
-	switch (ecc_mode) {
+
+	info->clk = devm_clk_get(&pdev->dev, "aemif");
+	if (IS_ERR(info->clk)) {
+		ret = PTR_ERR(info->clk);
+		dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(info->clk);
+	if (ret < 0) {
+		dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
+			ret);
+		goto err_clk_enable;
+	}
+
+	spin_lock_irq(&davinci_nand_lock);
+
+	/* put CSxNAND into NAND mode */
+	val = davinci_nand_readl(info, NANDFCR_OFFSET);
+	val |= BIT(info->core_chipsel);
+	davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+	spin_unlock_irq(&davinci_nand_lock);
+
+	/* Scan to find existence of the device(s) */
+	ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
+	if (ret < 0) {
+		dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
+		goto err;
+	}
+
+	switch (info->chip.ecc.mode) {
 	case NAND_ECC_NONE:
+		pdata->ecc_bits = 0;
+		break;
 	case NAND_ECC_SOFT:
 		pdata->ecc_bits = 0;
+		/*
+		 * This driver expects Hamming based ECC when ecc_mode is set
+		 * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
+		 * avoid adding an extra ->ecc_algo field to
+		 * davinci_nand_pdata.
+		 */
+		info->chip.ecc.algo = NAND_ECC_HAMMING;
 		break;
 	case NAND_ECC_HW:
 		if (pdata->ecc_bits == 4) {
@@ -754,37 +769,6 @@
 	default:
 		return -EINVAL;
 	}
-	info->chip.ecc.mode = ecc_mode;
-
-	info->clk = devm_clk_get(&pdev->dev, "aemif");
-	if (IS_ERR(info->clk)) {
-		ret = PTR_ERR(info->clk);
-		dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
-		return ret;
-	}
-
-	ret = clk_prepare_enable(info->clk);
-	if (ret < 0) {
-		dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
-			ret);
-		goto err_clk_enable;
-	}
-
-	spin_lock_irq(&davinci_nand_lock);
-
-	/* put CSxNAND into NAND mode */
-	val = davinci_nand_readl(info, NANDFCR_OFFSET);
-	val |= BIT(info->core_chipsel);
-	davinci_nand_writel(info, NANDFCR_OFFSET, val);
-
-	spin_unlock_irq(&davinci_nand_lock);
-
-	/* Scan to find existence of the device(s) */
-	ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
-	if (ret < 0) {
-		dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
-		goto err;
-	}
 
 	/* Update ECC layout if needed ... for 1-bit HW ECC, the default
 	 * is OK, but it allocates 6 bytes when only 3 are needed (for
@@ -805,26 +789,14 @@
 		 * table marker fits in the free bytes.
 		 */
 		if (chunks == 1) {
-			info->ecclayout = hwecc4_small;
-			info->ecclayout.oobfree[1].length = mtd->oobsize - 16;
-			goto syndrome_done;
-		}
-		if (chunks == 4) {
-			info->ecclayout = hwecc4_2048;
+			mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
+		} else if (chunks == 4 || chunks == 8) {
+			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
 			info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
-			goto syndrome_done;
+		} else {
+			ret = -EIO;
+			goto err;
 		}
-		if (chunks == 8) {
-			info->ecclayout = hwecc4_4096;
-			info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
-			goto syndrome_done;
-		}
-
-		ret = -EIO;
-		goto err;
-
-syndrome_done:
-		info->chip.ecc.layout = &info->ecclayout;
 	}
 
 	ret = nand_scan_tail(mtd);
@@ -850,7 +822,7 @@
 
 err_clk_enable:
 	spin_lock_irq(&davinci_nand_lock);
-	if (ecc_mode == NAND_ECC_HW_SYNDROME)
+	if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
 		ecc4_busy = false;
 	spin_unlock_irq(&davinci_nand_lock);
 	return ret;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 30bf5f6..0476ae8 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1374,13 +1374,41 @@
  * correction
  */
 #define ECC_8BITS	14
-static struct nand_ecclayout nand_8bit_oob = {
-	.eccbytes = 14,
-};
-
 #define ECC_15BITS	26
-static struct nand_ecclayout nand_15bit_oob = {
-	.eccbytes = 26,
+
+static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
+				struct mtd_oob_region *oobregion)
+{
+	struct denali_nand_info *denali = mtd_to_denali(mtd);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = denali->bbtskipbytes;
+	oobregion->length = chip->ecc.total;
+
+	return 0;
+}
+
+static int denali_ooblayout_free(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct denali_nand_info *denali = mtd_to_denali(mtd);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = chip->ecc.total + denali->bbtskipbytes;
+	oobregion->length = mtd->oobsize - oobregion->offset;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
+	.ecc = denali_ooblayout_ecc,
+	.free = denali_ooblayout_free,
 };
 
 static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
@@ -1561,7 +1589,6 @@
 			ECC_SECTOR_SIZE)))) {
 		/* if MLC OOB size is large enough, use 15bit ECC*/
 		denali->nand.ecc.strength = 15;
-		denali->nand.ecc.layout = &nand_15bit_oob;
 		denali->nand.ecc.bytes = ECC_15BITS;
 		iowrite32(15, denali->flash_reg + ECC_CORRECTION);
 	} else if (mtd->oobsize < (denali->bbtskipbytes +
@@ -1571,20 +1598,13 @@
 		goto failed_req_irq;
 	} else {
 		denali->nand.ecc.strength = 8;
-		denali->nand.ecc.layout = &nand_8bit_oob;
 		denali->nand.ecc.bytes = ECC_8BITS;
 		iowrite32(8, denali->flash_reg + ECC_CORRECTION);
 	}
 
+	mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
 	denali->nand.ecc.bytes *= denali->devnum;
 	denali->nand.ecc.strength *= denali->devnum;
-	denali->nand.ecc.layout->eccbytes *=
-		mtd->writesize / ECC_SECTOR_SIZE;
-	denali->nand.ecc.layout->oobfree[0].offset =
-		denali->bbtskipbytes + denali->nand.ecc.layout->eccbytes;
-	denali->nand.ecc.layout->oobfree[0].length =
-		mtd->oobsize - denali->nand.ecc.layout->eccbytes -
-		denali->bbtskipbytes;
 
 	/*
 	 * Let driver know the total blocks number and how many blocks
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 547c100..a023ab9 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -950,20 +950,50 @@
 
 //u_char mydatabuf[528];
 
-/* The strange out-of-order .oobfree list below is a (possibly unneeded)
- * attempt to retain compatibility.  It used to read:
- * 	.oobfree = { {8, 8} }
- * Since that leaves two bytes unusable, it was changed.  But the following
- * scheme might affect existing jffs2 installs by moving the cleanmarker:
- * 	.oobfree = { {6, 10} }
- * jffs2 seems to handle the above gracefully, but the current scheme seems
- * safer.  The only problem with it is that any code that parses oobfree must
- * be able to handle out-of-order segments.
- */
-static struct nand_ecclayout doc200x_oobinfo = {
-	.eccbytes = 6,
-	.eccpos = {0, 1, 2, 3, 4, 5},
-	.oobfree = {{8, 8}, {6, 2}}
+static int doc200x_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 0;
+	oobregion->length = 6;
+
+	return 0;
+}
+
+static int doc200x_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	if (section > 1)
+		return -ERANGE;
+
+	/*
+	 * The strange out-of-order free bytes definition is a (possibly
+	 * unneeded) attempt to retain compatibility.  It used to read:
+	 *	.oobfree = { {8, 8} }
+	 * Since that leaves two bytes unusable, it was changed.  But the
+	 * following scheme might affect existing jffs2 installs by moving the
+	 * cleanmarker:
+	 *	.oobfree = { {6, 10} }
+	 * jffs2 seems to handle the above gracefully, but the current scheme
+	 * seems safer. The only problem with it is that any code retrieving
+	 * free bytes position must be able to handle out-of-order segments.
+	 */
+	if (!section) {
+		oobregion->offset = 8;
+		oobregion->length = 8;
+	} else {
+		oobregion->offset = 6;
+		oobregion->length = 2;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops doc200x_ooblayout_ops = {
+	.ecc = doc200x_ooblayout_ecc,
+	.free = doc200x_ooblayout_free,
 };
 
 /* Find the (I)NFTL Media Header, and optionally also the mirror media header.
@@ -1537,6 +1567,7 @@
 	nand->bbt_md		= nand->bbt_td + 1;
 
 	mtd->owner		= THIS_MODULE;
+	mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops);
 
 	nand_set_controller_data(nand, doc);
 	nand->select_chip	= doc200x_select_chip;
@@ -1548,7 +1579,6 @@
 	nand->ecc.calculate	= doc200x_calculate_ecc;
 	nand->ecc.correct	= doc200x_correct_data;
 
-	nand->ecc.layout	= &doc200x_oobinfo;
 	nand->ecc.mode		= NAND_ECC_HW_SYNDROME;
 	nand->ecc.size		= 512;
 	nand->ecc.bytes		= 6;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index d86a60e..4731699 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -222,10 +222,33 @@
  * Bytes 8 - 14 are hw-generated ecc covering entire page + oob bytes 0 - 14.
  * Byte 15 (the last) is used by the driver as a "page written" flag.
  */
-static struct nand_ecclayout docg4_oobinfo = {
-	.eccbytes = 9,
-	.eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
-	.oobfree = { {.offset = 2, .length = 5} }
+static int docg4_ooblayout_ecc(struct mtd_info *mtd, int section,
+			       struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 7;
+	oobregion->length = 9;
+
+	return 0;
+}
+
+static int docg4_ooblayout_free(struct mtd_info *mtd, int section,
+				struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 2;
+	oobregion->length = 5;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops docg4_ooblayout_ops = {
+	.ecc = docg4_ooblayout_ecc,
+	.free = docg4_ooblayout_free,
 };
 
 /*
@@ -1209,6 +1232,7 @@
 	mtd->writesize = DOCG4_PAGE_SIZE;
 	mtd->erasesize = DOCG4_BLOCK_SIZE;
 	mtd->oobsize = DOCG4_OOB_SIZE;
+	mtd_set_ooblayout(mtd, &docg4_ooblayout_ops);
 	nand->chipsize = DOCG4_CHIP_SIZE;
 	nand->chip_shift = DOCG4_CHIP_SHIFT;
 	nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT;
@@ -1217,7 +1241,6 @@
 	nand->pagemask = 0x3ffff;
 	nand->badblockpos = NAND_LARGE_BADBLOCK_POS;
 	nand->badblockbits = 8;
-	nand->ecc.layout = &docg4_oobinfo;
 	nand->ecc.mode = NAND_ECC_HW_SYNDROME;
 	nand->ecc.size = DOCG4_PAGE_SIZE;
 	nand->ecc.prepad = 8;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 059d5f7..60a88f2 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -79,32 +79,53 @@
 
 /* These map to the positions used by the FCM hardware ECC generator */
 
-/* Small Page FLASH with FMR[ECCM] = 0 */
-static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = {
-	.eccbytes = 3,
-	.eccpos = {6, 7, 8},
-	.oobfree = { {0, 5}, {9, 7} },
-};
+static int fsl_elbc_ooblayout_ecc(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
 
-/* Small Page FLASH with FMR[ECCM] = 1 */
-static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = {
-	.eccbytes = 3,
-	.eccpos = {8, 9, 10},
-	.oobfree = { {0, 5}, {6, 2}, {11, 5} },
-};
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
 
-/* Large Page FLASH with FMR[ECCM] = 0 */
-static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = {
-	.eccbytes = 12,
-	.eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56},
-	.oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} },
-};
+	oobregion->offset = (16 * section) + 6;
+	if (priv->fmr & FMR_ECCM)
+		oobregion->offset += 2;
 
-/* Large Page FLASH with FMR[ECCM] = 1 */
-static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
-	.eccbytes = 12,
-	.eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58},
-	.oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} },
+	oobregion->length = chip->ecc.bytes;
+
+	return 0;
+}
+
+static int fsl_elbc_ooblayout_free(struct mtd_info *mtd, int section,
+				   struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+
+	if (section > chip->ecc.steps)
+		return -ERANGE;
+
+	if (!section) {
+		oobregion->offset = 0;
+		if (mtd->writesize > 512)
+			oobregion->offset++;
+		oobregion->length = (priv->fmr & FMR_ECCM) ? 7 : 5;
+	} else {
+		oobregion->offset = (16 * section) -
+				    ((priv->fmr & FMR_ECCM) ? 5 : 7);
+		if (section < chip->ecc.steps)
+			oobregion->length = 13;
+		else
+			oobregion->length = mtd->oobsize - oobregion->offset;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops fsl_elbc_ooblayout_ops = {
+	.ecc = fsl_elbc_ooblayout_ecc,
+	.free = fsl_elbc_ooblayout_free,
 };
 
 /*
@@ -657,8 +678,8 @@
 	        chip->ecc.bytes);
 	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
 	        chip->ecc.total);
-	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.layout = %p\n",
-	        chip->ecc.layout);
+	dev_dbg(priv->dev, "fsl_elbc_init: mtd->ooblayout = %p\n",
+		mtd->ooblayout);
 	dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
 	dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
 	dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
@@ -675,14 +696,6 @@
 	} else if (mtd->writesize == 2048) {
 		priv->page_size = 1;
 		setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
-		/* adjust ecc setup if needed */
-		if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
-		    BR_DECC_CHK_GEN) {
-			chip->ecc.size = 512;
-			chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
-			                   &fsl_elbc_oob_lp_eccm1 :
-			                   &fsl_elbc_oob_lp_eccm0;
-		}
 	} else {
 		dev_err(priv->dev,
 		        "fsl_elbc_init: page size %d is not supported\n",
@@ -780,15 +793,14 @@
 	if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
 	    BR_DECC_CHK_GEN) {
 		chip->ecc.mode = NAND_ECC_HW;
-		/* put in small page settings and adjust later if needed */
-		chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
-				&fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0;
+		mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
 		chip->ecc.size = 512;
 		chip->ecc.bytes = 3;
 		chip->ecc.strength = 1;
 	} else {
 		/* otherwise fall back to default software ECC */
 		chip->ecc.mode = NAND_ECC_SOFT;
+		chip->ecc.algo = NAND_ECC_HAMMING;
 	}
 
 	return 0;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 43f5a3a..4e9e5fd 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -67,136 +67,6 @@
 
 static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
 
-/* 512-byte page with 4-bit ECC, 8-bit */
-static struct nand_ecclayout oob_512_8bit_ecc4 = {
-	.eccbytes = 8,
-	.eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
-	.oobfree = { {0, 5}, {6, 2} },
-};
-
-/* 512-byte page with 4-bit ECC, 16-bit */
-static struct nand_ecclayout oob_512_16bit_ecc4 = {
-	.eccbytes = 8,
-	.eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
-	.oobfree = { {2, 6}, },
-};
-
-/* 2048-byte page size with 4-bit ECC */
-static struct nand_ecclayout oob_2048_ecc4 = {
-	.eccbytes = 32,
-	.eccpos = {
-		8, 9, 10, 11, 12, 13, 14, 15,
-		16, 17, 18, 19, 20, 21, 22, 23,
-		24, 25, 26, 27, 28, 29, 30, 31,
-		32, 33, 34, 35, 36, 37, 38, 39,
-	},
-	.oobfree = { {2, 6}, {40, 24} },
-};
-
-/* 4096-byte page size with 4-bit ECC */
-static struct nand_ecclayout oob_4096_ecc4 = {
-	.eccbytes = 64,
-	.eccpos = {
-		8, 9, 10, 11, 12, 13, 14, 15,
-		16, 17, 18, 19, 20, 21, 22, 23,
-		24, 25, 26, 27, 28, 29, 30, 31,
-		32, 33, 34, 35, 36, 37, 38, 39,
-		40, 41, 42, 43, 44, 45, 46, 47,
-		48, 49, 50, 51, 52, 53, 54, 55,
-		56, 57, 58, 59, 60, 61, 62, 63,
-		64, 65, 66, 67, 68, 69, 70, 71,
-	},
-	.oobfree = { {2, 6}, {72, 56} },
-};
-
-/* 4096-byte page size with 8-bit ECC -- requires 218-byte OOB */
-static struct nand_ecclayout oob_4096_ecc8 = {
-	.eccbytes = 128,
-	.eccpos = {
-		8, 9, 10, 11, 12, 13, 14, 15,
-		16, 17, 18, 19, 20, 21, 22, 23,
-		24, 25, 26, 27, 28, 29, 30, 31,
-		32, 33, 34, 35, 36, 37, 38, 39,
-		40, 41, 42, 43, 44, 45, 46, 47,
-		48, 49, 50, 51, 52, 53, 54, 55,
-		56, 57, 58, 59, 60, 61, 62, 63,
-		64, 65, 66, 67, 68, 69, 70, 71,
-		72, 73, 74, 75, 76, 77, 78, 79,
-		80, 81, 82, 83, 84, 85, 86, 87,
-		88, 89, 90, 91, 92, 93, 94, 95,
-		96, 97, 98, 99, 100, 101, 102, 103,
-		104, 105, 106, 107, 108, 109, 110, 111,
-		112, 113, 114, 115, 116, 117, 118, 119,
-		120, 121, 122, 123, 124, 125, 126, 127,
-		128, 129, 130, 131, 132, 133, 134, 135,
-	},
-	.oobfree = { {2, 6}, {136, 82} },
-};
-
-/* 8192-byte page size with 4-bit ECC */
-static struct nand_ecclayout oob_8192_ecc4 = {
-	.eccbytes = 128,
-	.eccpos = {
-		8, 9, 10, 11, 12, 13, 14, 15,
-		16, 17, 18, 19, 20, 21, 22, 23,
-		24, 25, 26, 27, 28, 29, 30, 31,
-		32, 33, 34, 35, 36, 37, 38, 39,
-		40, 41, 42, 43, 44, 45, 46, 47,
-		48, 49, 50, 51, 52, 53, 54, 55,
-		56, 57, 58, 59, 60, 61, 62, 63,
-		64, 65, 66, 67, 68, 69, 70, 71,
-		72, 73, 74, 75, 76, 77, 78, 79,
-		80, 81, 82, 83, 84, 85, 86, 87,
-		88, 89, 90, 91, 92, 93, 94, 95,
-		96, 97, 98, 99, 100, 101, 102, 103,
-		104, 105, 106, 107, 108, 109, 110, 111,
-		112, 113, 114, 115, 116, 117, 118, 119,
-		120, 121, 122, 123, 124, 125, 126, 127,
-		128, 129, 130, 131, 132, 133, 134, 135,
-	},
-	.oobfree = { {2, 6}, {136, 208} },
-};
-
-/* 8192-byte page size with 8-bit ECC -- requires 218-byte OOB */
-static struct nand_ecclayout oob_8192_ecc8 = {
-	.eccbytes = 256,
-	.eccpos = {
-		8, 9, 10, 11, 12, 13, 14, 15,
-		16, 17, 18, 19, 20, 21, 22, 23,
-		24, 25, 26, 27, 28, 29, 30, 31,
-		32, 33, 34, 35, 36, 37, 38, 39,
-		40, 41, 42, 43, 44, 45, 46, 47,
-		48, 49, 50, 51, 52, 53, 54, 55,
-		56, 57, 58, 59, 60, 61, 62, 63,
-		64, 65, 66, 67, 68, 69, 70, 71,
-		72, 73, 74, 75, 76, 77, 78, 79,
-		80, 81, 82, 83, 84, 85, 86, 87,
-		88, 89, 90, 91, 92, 93, 94, 95,
-		96, 97, 98, 99, 100, 101, 102, 103,
-		104, 105, 106, 107, 108, 109, 110, 111,
-		112, 113, 114, 115, 116, 117, 118, 119,
-		120, 121, 122, 123, 124, 125, 126, 127,
-		128, 129, 130, 131, 132, 133, 134, 135,
-		136, 137, 138, 139, 140, 141, 142, 143,
-		144, 145, 146, 147, 148, 149, 150, 151,
-		152, 153, 154, 155, 156, 157, 158, 159,
-		160, 161, 162, 163, 164, 165, 166, 167,
-		168, 169, 170, 171, 172, 173, 174, 175,
-		176, 177, 178, 179, 180, 181, 182, 183,
-		184, 185, 186, 187, 188, 189, 190, 191,
-		192, 193, 194, 195, 196, 197, 198, 199,
-		200, 201, 202, 203, 204, 205, 206, 207,
-		208, 209, 210, 211, 212, 213, 214, 215,
-		216, 217, 218, 219, 220, 221, 222, 223,
-		224, 225, 226, 227, 228, 229, 230, 231,
-		232, 233, 234, 235, 236, 237, 238, 239,
-		240, 241, 242, 243, 244, 245, 246, 247,
-		248, 249, 250, 251, 252, 253, 254, 255,
-		256, 257, 258, 259, 260, 261, 262, 263,
-	},
-	.oobfree = { {2, 6}, {264, 80} },
-};
-
 /*
  * Generic flash bbt descriptors
  */
@@ -223,6 +93,57 @@
 	.pattern = mirror_pattern,
 };
 
+static int fsl_ifc_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 8;
+	oobregion->length = chip->ecc.total;
+
+	return 0;
+}
+
+static int fsl_ifc_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section > 1)
+		return -ERANGE;
+
+	if (mtd->writesize == 512 &&
+	    !(chip->options & NAND_BUSWIDTH_16)) {
+		if (!section) {
+			oobregion->offset = 0;
+			oobregion->length = 5;
+		} else {
+			oobregion->offset = 6;
+			oobregion->length = 2;
+		}
+
+		return 0;
+	}
+
+	if (!section) {
+		oobregion->offset = 2;
+		oobregion->length = 6;
+	} else {
+		oobregion->offset = chip->ecc.total + 8;
+		oobregion->length = mtd->oobsize - oobregion->offset;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops fsl_ifc_ooblayout_ops = {
+	.ecc = fsl_ifc_ooblayout_ecc,
+	.free = fsl_ifc_ooblayout_free,
+};
+
 /*
  * Set up the IFC hardware block and page address fields, and the ifc nand
  * structure addr field to point to the correct IFC buffer in memory
@@ -232,7 +153,7 @@
 	struct nand_chip *chip = mtd_to_nand(mtd);
 	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
 	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
-	struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
 	int buf_num;
 
 	ifc_nand_ctrl->page = page_addr;
@@ -257,18 +178,22 @@
 	u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2);
 	u32 __iomem *mainarea = (u32 __iomem *)addr;
 	u8 __iomem *oob = addr + mtd->writesize;
-	int i;
+	struct mtd_oob_region oobregion = { };
+	int i, section = 0;
 
 	for (i = 0; i < mtd->writesize / 4; i++) {
 		if (__raw_readl(&mainarea[i]) != 0xffffffff)
 			return 0;
 	}
 
-	for (i = 0; i < chip->ecc.layout->eccbytes; i++) {
-		int pos = chip->ecc.layout->eccpos[i];
+	mtd_ooblayout_ecc(mtd, section++, &oobregion);
+	while (oobregion.length) {
+		for (i = 0; i < oobregion.length; i++) {
+			if (__raw_readb(&oob[oobregion.offset + i]) != 0xff)
+				return 0;
+		}
 
-		if (__raw_readb(&oob[pos]) != 0xff)
-			return 0;
+		mtd_ooblayout_ecc(mtd, section++, &oobregion);
 	}
 
 	return 1;
@@ -295,7 +220,7 @@
 	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
 	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
 	struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
-	struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
 	u32 eccstat[4];
 	int i;
 
@@ -371,7 +296,7 @@
 {
 	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
 	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
-	struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
 
 	/* Program FIR/IFC_NAND_FCR0 for Small/Large page */
 	if (mtd->writesize > 512) {
@@ -411,7 +336,7 @@
 	struct nand_chip *chip = mtd_to_nand(mtd);
 	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
 	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
-	struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
 
 	/* clear the read buffer */
 	ifc_nand_ctrl->read_bytes = 0;
@@ -723,7 +648,7 @@
 {
 	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
 	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
-	struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
 	u32 nand_fsr;
 
 	/* Use READ_STATUS command, but wait for the device to be ready */
@@ -808,8 +733,8 @@
 							chip->ecc.bytes);
 	dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__,
 							chip->ecc.total);
-	dev_dbg(priv->dev, "%s: nand->ecc.layout = %p\n", __func__,
-							chip->ecc.layout);
+	dev_dbg(priv->dev, "%s: mtd->ooblayout = %p\n", __func__,
+							mtd->ooblayout);
 	dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags);
 	dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size);
 	dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__,
@@ -825,39 +750,42 @@
 static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
 {
 	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
-	struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+	struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
+	struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
 	uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
 	uint32_t cs = priv->bank;
 
 	/* Save CSOR and CSOR_ext */
-	csor = ifc_in32(&ifc->csor_cs[cs].csor);
-	csor_ext = ifc_in32(&ifc->csor_cs[cs].csor_ext);
+	csor = ifc_in32(&ifc_global->csor_cs[cs].csor);
+	csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext);
 
 	/* chage PageSize 8K and SpareSize 1K*/
 	csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
-	ifc_out32(csor_8k, &ifc->csor_cs[cs].csor);
-	ifc_out32(0x0000400, &ifc->csor_cs[cs].csor_ext);
+	ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor);
+	ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext);
 
 	/* READID */
 	ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-		  (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
-		  (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
-		  &ifc->ifc_nand.nand_fir0);
+		    (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
+		    (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+		    &ifc_runtime->ifc_nand.nand_fir0);
 	ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
-		  &ifc->ifc_nand.nand_fcr0);
-	ifc_out32(0x0, &ifc->ifc_nand.row3);
+		    &ifc_runtime->ifc_nand.nand_fcr0);
+	ifc_out32(0x0, &ifc_runtime->ifc_nand.row3);
 
-	ifc_out32(0x0, &ifc->ifc_nand.nand_fbcr);
+	ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr);
 
 	/* Program ROW0/COL0 */
-	ifc_out32(0x0, &ifc->ifc_nand.row0);
-	ifc_out32(0x0, &ifc->ifc_nand.col0);
+	ifc_out32(0x0, &ifc_runtime->ifc_nand.row0);
+	ifc_out32(0x0, &ifc_runtime->ifc_nand.col0);
 
 	/* set the chip select for NAND Transaction */
-	ifc_out32(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
+	ifc_out32(cs << IFC_NAND_CSEL_SHIFT,
+		&ifc_runtime->ifc_nand.nand_csel);
 
 	/* start read seq */
-	ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+	ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT,
+		&ifc_runtime->ifc_nand.nandseq_strt);
 
 	/* wait for command complete flag or timeout */
 	wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -867,17 +795,17 @@
 		printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
 
 	/* Restore CSOR and CSOR_ext */
-	ifc_out32(csor, &ifc->csor_cs[cs].csor);
-	ifc_out32(csor_ext, &ifc->csor_cs[cs].csor_ext);
+	ifc_out32(csor, &ifc_global->csor_cs[cs].csor);
+	ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext);
 }
 
 static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
 {
 	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
-	struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+	struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
+	struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
 	struct nand_chip *chip = &priv->chip;
 	struct mtd_info *mtd = nand_to_mtd(&priv->chip);
-	struct nand_ecclayout *layout;
 	u32 csor;
 
 	/* Fill in fsl_ifc_mtd structure */
@@ -886,7 +814,8 @@
 
 	/* fill in nand_chip structure */
 	/* set up function call table */
-	if ((ifc_in32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
+	if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr))
+		& CSPR_PORT_SIZE_16)
 		chip->read_byte = fsl_ifc_read_byte16;
 	else
 		chip->read_byte = fsl_ifc_read_byte;
@@ -900,13 +829,14 @@
 	chip->bbt_td = &bbt_main_descr;
 	chip->bbt_md = &bbt_mirror_descr;
 
-	ifc_out32(0x0, &ifc->ifc_nand.ncfgr);
+	ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr);
 
 	/* set up nand options */
 	chip->bbt_options = NAND_BBT_USE_FLASH;
 	chip->options = NAND_NO_SUBPAGE_WRITE;
 
-	if (ifc_in32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
+	if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)
+		& CSPR_PORT_SIZE_16) {
 		chip->read_byte = fsl_ifc_read_byte16;
 		chip->options |= NAND_BUSWIDTH_16;
 	} else {
@@ -919,20 +849,11 @@
 	chip->ecc.read_page = fsl_ifc_read_page;
 	chip->ecc.write_page = fsl_ifc_write_page;
 
-	csor = ifc_in32(&ifc->csor_cs[priv->bank].csor);
-
-	/* Hardware generates ECC per 512 Bytes */
-	chip->ecc.size = 512;
-	chip->ecc.bytes = 8;
-	chip->ecc.strength = 4;
+	csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
 
 	switch (csor & CSOR_NAND_PGS_MASK) {
 	case CSOR_NAND_PGS_512:
-		if (chip->options & NAND_BUSWIDTH_16) {
-			layout = &oob_512_16bit_ecc4;
-		} else {
-			layout = &oob_512_8bit_ecc4;
-
+		if (!(chip->options & NAND_BUSWIDTH_16)) {
 			/* Avoid conflict with bad block marker */
 			bbt_main_descr.offs = 0;
 			bbt_mirror_descr.offs = 0;
@@ -942,35 +863,16 @@
 		break;
 
 	case CSOR_NAND_PGS_2K:
-		layout = &oob_2048_ecc4;
 		priv->bufnum_mask = 3;
 		break;
 
 	case CSOR_NAND_PGS_4K:
-		if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
-		    CSOR_NAND_ECC_MODE_4) {
-			layout = &oob_4096_ecc4;
-		} else {
-			layout = &oob_4096_ecc8;
-			chip->ecc.bytes = 16;
-			chip->ecc.strength = 8;
-		}
-
 		priv->bufnum_mask = 1;
 		break;
 
 	case CSOR_NAND_PGS_8K:
-		if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
-		    CSOR_NAND_ECC_MODE_4) {
-			layout = &oob_8192_ecc4;
-		} else {
-			layout = &oob_8192_ecc8;
-			chip->ecc.bytes = 16;
-			chip->ecc.strength = 8;
-		}
-
 		priv->bufnum_mask = 0;
-	break;
+		break;
 
 	default:
 		dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
@@ -980,9 +882,20 @@
 	/* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
 	if (csor & CSOR_NAND_ECC_DEC_EN) {
 		chip->ecc.mode = NAND_ECC_HW;
-		chip->ecc.layout = layout;
+		mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops);
+
+		/* Hardware generates ECC per 512 Bytes */
+		chip->ecc.size = 512;
+		if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) {
+			chip->ecc.bytes = 8;
+			chip->ecc.strength = 4;
+		} else {
+			chip->ecc.bytes = 16;
+			chip->ecc.strength = 8;
+		}
 	} else {
 		chip->ecc.mode = NAND_ECC_SOFT;
+		chip->ecc.algo = NAND_ECC_HAMMING;
 	}
 
 	if (ctrl->version == FSL_IFC_VERSION_1_1_0)
@@ -1007,10 +920,10 @@
 	return 0;
 }
 
-static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
+static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank,
 		      phys_addr_t addr)
 {
-	u32 cspr = ifc_in32(&ifc->cspr_cs[bank].cspr);
+	u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr);
 
 	if (!(cspr & CSPR_V))
 		return 0;
@@ -1024,7 +937,7 @@
 
 static int fsl_ifc_nand_probe(struct platform_device *dev)
 {
-	struct fsl_ifc_regs __iomem *ifc;
+	struct fsl_ifc_runtime __iomem *ifc;
 	struct fsl_ifc_mtd *priv;
 	struct resource res;
 	static const char *part_probe_types[]
@@ -1034,9 +947,9 @@
 	struct device_node *node = dev->dev.of_node;
 	struct mtd_info *mtd;
 
-	if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
+	if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs)
 		return -ENODEV;
-	ifc = fsl_ifc_ctrl_dev->regs;
+	ifc = fsl_ifc_ctrl_dev->rregs;
 
 	/* get, allocate and map the memory resource */
 	ret = of_address_to_resource(node, 0, &res);
@@ -1047,7 +960,7 @@
 
 	/* find which chip select it is connected to */
 	for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) {
-		if (match_bank(ifc, bank, res.start))
+		if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start))
 			break;
 	}
 
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index cafd12d..d85fa25 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -170,6 +170,7 @@
 	fun->chip.read_buf = fun_read_buf;
 	fun->chip.write_buf = fun_write_buf;
 	fun->chip.ecc.mode = NAND_ECC_SOFT;
+	fun->chip.ecc.algo = NAND_ECC_HAMMING;
 	if (fun->mchip_count > 1)
 		fun->chip.select_chip = fun_select_chip;
 
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 1bdcd4f..d4f454a 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -39,210 +39,41 @@
 #include <linux/amba/bus.h>
 #include <mtd/mtd-abi.h>
 
-static struct nand_ecclayout fsmc_ecc1_128_layout = {
-	.eccbytes = 24,
-	.eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52,
-		66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116},
-	.oobfree = {
-		{.offset = 8, .length = 8},
-		{.offset = 24, .length = 8},
-		{.offset = 40, .length = 8},
-		{.offset = 56, .length = 8},
-		{.offset = 72, .length = 8},
-		{.offset = 88, .length = 8},
-		{.offset = 104, .length = 8},
-		{.offset = 120, .length = 8}
-	}
-};
+static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section,
+				   struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
 
-static struct nand_ecclayout fsmc_ecc1_64_layout = {
-	.eccbytes = 12,
-	.eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52},
-	.oobfree = {
-		{.offset = 8, .length = 8},
-		{.offset = 24, .length = 8},
-		{.offset = 40, .length = 8},
-		{.offset = 56, .length = 8},
-	}
-};
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
 
-static struct nand_ecclayout fsmc_ecc1_16_layout = {
-	.eccbytes = 3,
-	.eccpos = {2, 3, 4},
-	.oobfree = {
-		{.offset = 8, .length = 8},
-	}
-};
+	oobregion->offset = (section * 16) + 2;
+	oobregion->length = 3;
 
-/*
- * ECC4 layout for NAND of pagesize 8192 bytes & OOBsize 256 bytes. 13*16 bytes
- * of OB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 46
- * bytes are free for use.
- */
-static struct nand_ecclayout fsmc_ecc4_256_layout = {
-	.eccbytes = 208,
-	.eccpos = {  2,   3,   4,   5,   6,   7,   8,
-		9,  10,  11,  12,  13,  14,
-		18,  19,  20,  21,  22,  23,  24,
-		25,  26,  27,  28,  29,  30,
-		34,  35,  36,  37,  38,  39,  40,
-		41,  42,  43,  44,  45,  46,
-		50,  51,  52,  53,  54,  55,  56,
-		57,  58,  59,  60,  61,  62,
-		66,  67,  68,  69,  70,  71,  72,
-		73,  74,  75,  76,  77,  78,
-		82,  83,  84,  85,  86,  87,  88,
-		89,  90,  91,  92,  93,  94,
-		98,  99, 100, 101, 102, 103, 104,
-		105, 106, 107, 108, 109, 110,
-		114, 115, 116, 117, 118, 119, 120,
-		121, 122, 123, 124, 125, 126,
-		130, 131, 132, 133, 134, 135, 136,
-		137, 138, 139, 140, 141, 142,
-		146, 147, 148, 149, 150, 151, 152,
-		153, 154, 155, 156, 157, 158,
-		162, 163, 164, 165, 166, 167, 168,
-		169, 170, 171, 172, 173, 174,
-		178, 179, 180, 181, 182, 183, 184,
-		185, 186, 187, 188, 189, 190,
-		194, 195, 196, 197, 198, 199, 200,
-		201, 202, 203, 204, 205, 206,
-		210, 211, 212, 213, 214, 215, 216,
-		217, 218, 219, 220, 221, 222,
-		226, 227, 228, 229, 230, 231, 232,
-		233, 234, 235, 236, 237, 238,
-		242, 243, 244, 245, 246, 247, 248,
-		249, 250, 251, 252, 253, 254
-	},
-	.oobfree = {
-		{.offset = 15, .length = 3},
-		{.offset = 31, .length = 3},
-		{.offset = 47, .length = 3},
-		{.offset = 63, .length = 3},
-		{.offset = 79, .length = 3},
-		{.offset = 95, .length = 3},
-		{.offset = 111, .length = 3},
-		{.offset = 127, .length = 3},
-		{.offset = 143, .length = 3},
-		{.offset = 159, .length = 3},
-		{.offset = 175, .length = 3},
-		{.offset = 191, .length = 3},
-		{.offset = 207, .length = 3},
-		{.offset = 223, .length = 3},
-		{.offset = 239, .length = 3},
-		{.offset = 255, .length = 1}
-	}
-};
+	return 0;
+}
 
-/*
- * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes
- * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118
- * bytes are free for use.
- */
-static struct nand_ecclayout fsmc_ecc4_224_layout = {
-	.eccbytes = 104,
-	.eccpos = {  2,   3,   4,   5,   6,   7,   8,
-		9,  10,  11,  12,  13,  14,
-		18,  19,  20,  21,  22,  23,  24,
-		25,  26,  27,  28,  29,  30,
-		34,  35,  36,  37,  38,  39,  40,
-		41,  42,  43,  44,  45,  46,
-		50,  51,  52,  53,  54,  55,  56,
-		57,  58,  59,  60,  61,  62,
-		66,  67,  68,  69,  70,  71,  72,
-		73,  74,  75,  76,  77,  78,
-		82,  83,  84,  85,  86,  87,  88,
-		89,  90,  91,  92,  93,  94,
-		98,  99, 100, 101, 102, 103, 104,
-		105, 106, 107, 108, 109, 110,
-		114, 115, 116, 117, 118, 119, 120,
-		121, 122, 123, 124, 125, 126
-	},
-	.oobfree = {
-		{.offset = 15, .length = 3},
-		{.offset = 31, .length = 3},
-		{.offset = 47, .length = 3},
-		{.offset = 63, .length = 3},
-		{.offset = 79, .length = 3},
-		{.offset = 95, .length = 3},
-		{.offset = 111, .length = 3},
-		{.offset = 127, .length = 97}
-	}
-};
+static int fsmc_ecc1_ooblayout_free(struct mtd_info *mtd, int section,
+				    struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
 
-/*
- * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 128 bytes. 13*8 bytes
- * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 22
- * bytes are free for use.
- */
-static struct nand_ecclayout fsmc_ecc4_128_layout = {
-	.eccbytes = 104,
-	.eccpos = {  2,   3,   4,   5,   6,   7,   8,
-		9,  10,  11,  12,  13,  14,
-		18,  19,  20,  21,  22,  23,  24,
-		25,  26,  27,  28,  29,  30,
-		34,  35,  36,  37,  38,  39,  40,
-		41,  42,  43,  44,  45,  46,
-		50,  51,  52,  53,  54,  55,  56,
-		57,  58,  59,  60,  61,  62,
-		66,  67,  68,  69,  70,  71,  72,
-		73,  74,  75,  76,  77,  78,
-		82,  83,  84,  85,  86,  87,  88,
-		89,  90,  91,  92,  93,  94,
-		98,  99, 100, 101, 102, 103, 104,
-		105, 106, 107, 108, 109, 110,
-		114, 115, 116, 117, 118, 119, 120,
-		121, 122, 123, 124, 125, 126
-	},
-	.oobfree = {
-		{.offset = 15, .length = 3},
-		{.offset = 31, .length = 3},
-		{.offset = 47, .length = 3},
-		{.offset = 63, .length = 3},
-		{.offset = 79, .length = 3},
-		{.offset = 95, .length = 3},
-		{.offset = 111, .length = 3},
-		{.offset = 127, .length = 1}
-	}
-};
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
 
-/*
- * ECC4 layout for NAND of pagesize 2048 bytes & OOBsize 64 bytes. 13*4 bytes of
- * OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 10
- * bytes are free for use.
- */
-static struct nand_ecclayout fsmc_ecc4_64_layout = {
-	.eccbytes = 52,
-	.eccpos = {  2,   3,   4,   5,   6,   7,   8,
-		9,  10,  11,  12,  13,  14,
-		18,  19,  20,  21,  22,  23,  24,
-		25,  26,  27,  28,  29,  30,
-		34,  35,  36,  37,  38,  39,  40,
-		41,  42,  43,  44,  45,  46,
-		50,  51,  52,  53,  54,  55,  56,
-		57,  58,  59,  60,  61,  62,
-	},
-	.oobfree = {
-		{.offset = 15, .length = 3},
-		{.offset = 31, .length = 3},
-		{.offset = 47, .length = 3},
-		{.offset = 63, .length = 1},
-	}
-};
+	oobregion->offset = (section * 16) + 8;
 
-/*
- * ECC4 layout for NAND of pagesize 512 bytes & OOBsize 16 bytes. 13 bytes of
- * OOB size is reserved for ECC, Byte no. 4 & 5 reserved for bad block and One
- * byte is free for use.
- */
-static struct nand_ecclayout fsmc_ecc4_16_layout = {
-	.eccbytes = 13,
-	.eccpos = { 0,  1,  2,  3,  6,  7, 8,
-		9, 10, 11, 12, 13, 14
-	},
-	.oobfree = {
-		{.offset = 15, .length = 1},
-	}
+	if (section < chip->ecc.steps - 1)
+		oobregion->length = 8;
+	else
+		oobregion->length = mtd->oobsize - oobregion->offset;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops fsmc_ecc1_ooblayout_ops = {
+	.ecc = fsmc_ecc1_ooblayout_ecc,
+	.free = fsmc_ecc1_ooblayout_free,
 };
 
 /*
@@ -250,28 +81,46 @@
  * There are 13 bytes of ecc for every 512 byte block and it has to be read
  * consecutively and immediately after the 512 byte data block for hardware to
  * generate the error bit offsets in 512 byte data.
- * Managing the ecc bytes in the following way makes it easier for software to
- * read ecc bytes consecutive to data bytes. This way is similar to
- * oobfree structure maintained already in generic nand driver
  */
-static struct fsmc_eccplace fsmc_ecc4_lp_place = {
-	.eccplace = {
-		{.offset = 2, .length = 13},
-		{.offset = 18, .length = 13},
-		{.offset = 34, .length = 13},
-		{.offset = 50, .length = 13},
-		{.offset = 66, .length = 13},
-		{.offset = 82, .length = 13},
-		{.offset = 98, .length = 13},
-		{.offset = 114, .length = 13}
-	}
-};
+static int fsmc_ecc4_ooblayout_ecc(struct mtd_info *mtd, int section,
+				   struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
 
-static struct fsmc_eccplace fsmc_ecc4_sp_place = {
-	.eccplace = {
-		{.offset = 0, .length = 4},
-		{.offset = 6, .length = 9}
-	}
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->length = chip->ecc.bytes;
+
+	if (!section && mtd->writesize <= 512)
+		oobregion->offset = 0;
+	else
+		oobregion->offset = (section * 16) + 2;
+
+	return 0;
+}
+
+static int fsmc_ecc4_ooblayout_free(struct mtd_info *mtd, int section,
+				    struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = (section * 16) + 15;
+
+	if (section < chip->ecc.steps - 1)
+		oobregion->length = 3;
+	else
+		oobregion->length = mtd->oobsize - oobregion->offset;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = {
+	.ecc = fsmc_ecc4_ooblayout_ecc,
+	.free = fsmc_ecc4_ooblayout_free,
 };
 
 /**
@@ -283,7 +132,6 @@
  * @partitions:		Partition info for a NAND Flash.
  * @nr_partitions:	Total number of partition of a NAND flash.
  *
- * @ecc_place:		ECC placing locations in oobfree type format.
  * @bank:		Bank number for probed device.
  * @clk:		Clock structure for FSMC.
  *
@@ -303,7 +151,6 @@
 	struct mtd_partition	*partitions;
 	unsigned int		nr_partitions;
 
-	struct fsmc_eccplace	*ecc_place;
 	unsigned int		bank;
 	struct device		*dev;
 	enum access_mode	mode;
@@ -710,8 +557,6 @@
 static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
 				 uint8_t *buf, int oob_required, int page)
 {
-	struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
-	struct fsmc_eccplace *ecc_place = host->ecc_place;
 	int i, j, s, stat, eccsize = chip->ecc.size;
 	int eccbytes = chip->ecc.bytes;
 	int eccsteps = chip->ecc.steps;
@@ -734,9 +579,15 @@
 		chip->read_buf(mtd, p, eccsize);
 
 		for (j = 0; j < eccbytes;) {
-			off = ecc_place->eccplace[group].offset;
-			len = ecc_place->eccplace[group].length;
-			group++;
+			struct mtd_oob_region oobregion;
+			int ret;
+
+			ret = mtd_ooblayout_ecc(mtd, group++, &oobregion);
+			if (ret)
+				return ret;
+
+			off = oobregion.offset;
+			len = oobregion.length;
 
 			/*
 			 * length is intentionally kept a higher multiple of 2
@@ -1084,24 +935,10 @@
 	if (AMBA_REV_BITS(host->pid) >= 8) {
 		switch (mtd->oobsize) {
 		case 16:
-			nand->ecc.layout = &fsmc_ecc4_16_layout;
-			host->ecc_place = &fsmc_ecc4_sp_place;
-			break;
 		case 64:
-			nand->ecc.layout = &fsmc_ecc4_64_layout;
-			host->ecc_place = &fsmc_ecc4_lp_place;
-			break;
 		case 128:
-			nand->ecc.layout = &fsmc_ecc4_128_layout;
-			host->ecc_place = &fsmc_ecc4_lp_place;
-			break;
 		case 224:
-			nand->ecc.layout = &fsmc_ecc4_224_layout;
-			host->ecc_place = &fsmc_ecc4_lp_place;
-			break;
 		case 256:
-			nand->ecc.layout = &fsmc_ecc4_256_layout;
-			host->ecc_place = &fsmc_ecc4_lp_place;
 			break;
 		default:
 			dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
@@ -1109,6 +946,8 @@
 			ret = -EINVAL;
 			goto err_probe;
 		}
+
+		mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
 	} else {
 		switch (nand->ecc.mode) {
 		case NAND_ECC_HW:
@@ -1119,9 +958,11 @@
 			nand->ecc.strength = 1;
 			break;
 
-		case NAND_ECC_SOFT_BCH:
-			dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n");
-			break;
+		case NAND_ECC_SOFT:
+			if (nand->ecc.algo == NAND_ECC_BCH) {
+				dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n");
+				break;
+			}
 
 		default:
 			dev_err(&pdev->dev, "Unsupported ECC mode!\n");
@@ -1132,16 +973,13 @@
 		 * Don't set layout for BCH4 SW ECC. This will be
 		 * generated later in nand_bch_init() later.
 		 */
-		if (nand->ecc.mode != NAND_ECC_SOFT_BCH) {
+		if (nand->ecc.mode == NAND_ECC_HW) {
 			switch (mtd->oobsize) {
 			case 16:
-				nand->ecc.layout = &fsmc_ecc1_16_layout;
-				break;
 			case 64:
-				nand->ecc.layout = &fsmc_ecc1_64_layout;
-				break;
 			case 128:
-				nand->ecc.layout = &fsmc_ecc1_128_layout;
+				mtd_set_ooblayout(mtd,
+						  &fsmc_ecc1_ooblayout_ops);
 				break;
 			default:
 				dev_warn(&pdev->dev,
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index ded658f..6317f68 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -273,6 +273,7 @@
 	nand_set_flash_node(chip, pdev->dev.of_node);
 	chip->IO_ADDR_W		= chip->IO_ADDR_R;
 	chip->ecc.mode		= NAND_ECC_SOFT;
+	chip->ecc.algo		= NAND_ECC_HAMMING;
 	chip->options		= gpiomtd->plat.options;
 	chip->chip_delay	= gpiomtd->plat.chip_delay;
 	chip->cmd_ctrl		= gpio_nand_cmd_ctrl;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 8122c69..6e46156 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -25,7 +25,6 @@
 #include <linux/mtd/partitions.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
-#include <linux/of_mtd.h>
 #include "gpmi-nand.h"
 #include "bch-regs.h"
 
@@ -47,10 +46,44 @@
  * We may change the layout if we can get the ECC info from the datasheet,
  * else we will use all the (page + OOB).
  */
-static struct nand_ecclayout gpmi_hw_ecclayout = {
-	.eccbytes = 0,
-	.eccpos = { 0, },
-	.oobfree = { {.offset = 0, .length = 0} }
+static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
+			      struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+	struct bch_geometry *geo = &this->bch_geometry;
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 0;
+	oobregion->length = geo->page_size - mtd->writesize;
+
+	return 0;
+}
+
+static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
+			       struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+	struct bch_geometry *geo = &this->bch_geometry;
+
+	if (section)
+		return -ERANGE;
+
+	/* The available oob size we have. */
+	if (geo->page_size < mtd->writesize + mtd->oobsize) {
+		oobregion->offset = geo->page_size - mtd->writesize;
+		oobregion->length = mtd->oobsize - oobregion->offset;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
+	.ecc = gpmi_ooblayout_ecc,
+	.free = gpmi_ooblayout_free,
 };
 
 static const struct gpmi_devdata gpmi_devdata_imx23 = {
@@ -141,7 +174,6 @@
 	struct bch_geometry *geo = &this->bch_geometry;
 	struct nand_chip *chip = &this->nand;
 	struct mtd_info *mtd = nand_to_mtd(chip);
-	struct nand_oobfree *of = gpmi_hw_ecclayout.oobfree;
 	unsigned int block_mark_bit_offset;
 
 	if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
@@ -229,12 +261,6 @@
 	geo->page_size = mtd->writesize + geo->metadata_size +
 		(geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
 
-	/* The available oob size we have. */
-	if (geo->page_size < mtd->writesize + mtd->oobsize) {
-		of->offset = geo->page_size - mtd->writesize;
-		of->length = mtd->oobsize - of->offset;
-	}
-
 	geo->payload_size = mtd->writesize;
 
 	geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
@@ -797,6 +823,7 @@
 
 	this->cmd_buffer	= NULL;
 	this->data_buffer_dma	= NULL;
+	this->raw_buffer	= NULL;
 	this->page_buffer_virt	= NULL;
 	this->page_buffer_size	=  0;
 }
@@ -1037,14 +1064,87 @@
 	/* Loop over status bytes, accumulating ECC status. */
 	status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
 
+	read_page_swap_end(this, buf, nfc_geo->payload_size,
+			   this->payload_virt, this->payload_phys,
+			   nfc_geo->payload_size,
+			   payload_virt, payload_phys);
+
 	for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
 		if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
 			continue;
 
 		if (*status == STATUS_UNCORRECTABLE) {
+			int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+			u8 *eccbuf = this->raw_buffer;
+			int offset, bitoffset;
+			int eccbytes;
+			int flips;
+
+			/* Read ECC bytes into our internal raw_buffer */
+			offset = nfc_geo->metadata_size * 8;
+			offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
+			offset -= eccbits;
+			bitoffset = offset % 8;
+			eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
+			offset /= 8;
+			eccbytes -= offset;
+			chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+			chip->read_buf(mtd, eccbuf, eccbytes);
+
+			/*
+			 * ECC data are not byte aligned and we may have
+			 * in-band data in the first and last byte of
+			 * eccbuf. Set non-eccbits to one so that
+			 * nand_check_erased_ecc_chunk() does not count them
+			 * as bitflips.
+			 */
+			if (bitoffset)
+				eccbuf[0] |= GENMASK(bitoffset - 1, 0);
+
+			bitoffset = (bitoffset + eccbits) % 8;
+			if (bitoffset)
+				eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
+
+			/*
+			 * The ECC hardware has an uncorrectable ECC status
+			 * code in case we have bitflips in an erased page. As
+			 * nothing was written into this subpage the ECC is
+			 * obviously wrong and we can not trust it. We assume
+			 * at this point that we are reading an erased page and
+			 * try to correct the bitflips in buffer up to
+			 * ecc_strength bitflips. If this is a page with random
+			 * data, we exceed this number of bitflips and have a
+			 * ECC failure. Otherwise we use the corrected buffer.
+			 */
+			if (i == 0) {
+				/* The first block includes metadata */
+				flips = nand_check_erased_ecc_chunk(
+						buf + i * nfc_geo->ecc_chunk_size,
+						nfc_geo->ecc_chunk_size,
+						eccbuf, eccbytes,
+						auxiliary_virt,
+						nfc_geo->metadata_size,
+						nfc_geo->ecc_strength);
+			} else {
+				flips = nand_check_erased_ecc_chunk(
+						buf + i * nfc_geo->ecc_chunk_size,
+						nfc_geo->ecc_chunk_size,
+						eccbuf, eccbytes,
+						NULL, 0,
+						nfc_geo->ecc_strength);
+			}
+
+			if (flips > 0) {
+				max_bitflips = max_t(unsigned int, max_bitflips,
+						     flips);
+				mtd->ecc_stats.corrected += flips;
+				continue;
+			}
+
 			mtd->ecc_stats.failed++;
 			continue;
 		}
+
 		mtd->ecc_stats.corrected += *status;
 		max_bitflips = max_t(unsigned int, max_bitflips, *status);
 	}
@@ -1064,11 +1164,6 @@
 		chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
 	}
 
-	read_page_swap_end(this, buf, nfc_geo->payload_size,
-			this->payload_virt, this->payload_phys,
-			nfc_geo->payload_size,
-			payload_virt, payload_phys);
-
 	return max_bitflips;
 }
 
@@ -1327,18 +1422,19 @@
 static int
 gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
 {
-	struct nand_oobfree *of = mtd->ecclayout->oobfree;
+	struct mtd_oob_region of = { };
 	int status = 0;
 
 	/* Do we have available oob area? */
-	if (!of->length)
+	mtd_ooblayout_free(mtd, 0, &of);
+	if (!of.length)
 		return -EPERM;
 
 	if (!nand_is_slc(chip))
 		return -EPERM;
 
-	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of->offset, page);
-	chip->write_buf(mtd, chip->oob_poi + of->offset, of->length);
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of.offset, page);
+	chip->write_buf(mtd, chip->oob_poi + of.offset, of.length);
 	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
 
 	status = chip->waitfunc(mtd, chip);
@@ -1840,6 +1936,7 @@
 static int gpmi_init_last(struct gpmi_nand_data *this)
 {
 	struct nand_chip *chip = &this->nand;
+	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct nand_ecc_ctrl *ecc = &chip->ecc;
 	struct bch_geometry *bch_geo = &this->bch_geometry;
 	int ret;
@@ -1861,7 +1958,7 @@
 	ecc->mode	= NAND_ECC_HW;
 	ecc->size	= bch_geo->ecc_chunk_size;
 	ecc->strength	= bch_geo->ecc_strength;
-	ecc->layout	= &gpmi_hw_ecclayout;
+	mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
 
 	/*
 	 * We only enable the subpage read when:
@@ -1914,16 +2011,6 @@
 	/* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
 	this->swap_block_mark = !GPMI_IS_MX23(this);
 
-	if (of_get_nand_on_flash_bbt(this->dev->of_node)) {
-		chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
-
-		if (of_property_read_bool(this->dev->of_node,
-						"fsl,no-blockmark-swap"))
-			this->swap_block_mark = false;
-	}
-	dev_dbg(this->dev, "Blockmark swapping %sabled\n",
-		this->swap_block_mark ? "en" : "dis");
-
 	/*
 	 * Allocate a temporary DMA buffer for reading ID in the
 	 * nand_scan_ident().
@@ -1938,6 +2025,16 @@
 	if (ret)
 		goto err_out;
 
+	if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+		chip->bbt_options |= NAND_BBT_NO_OOB;
+
+		if (of_property_read_bool(this->dev->of_node,
+						"fsl,no-blockmark-swap"))
+			this->swap_block_mark = false;
+	}
+	dev_dbg(this->dev, "Blockmark swapping %sabled\n",
+		this->swap_block_mark ? "en" : "dis");
+
 	ret = gpmi_init_last(this);
 	if (ret)
 		goto err_out;
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index 96502b6..9432546 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -19,7 +19,6 @@
  * GNU General Public License for more details.
  */
 #include <linux/of.h>
-#include <linux/of_mtd.h>
 #include <linux/mtd/mtd.h>
 #include <linux/sizes.h>
 #include <linux/clk.h>
@@ -631,8 +630,28 @@
 	hinfc_write(host, HINFC504_INTEN_DMA, HINFC504_INTEN);
 }
 
-static struct nand_ecclayout nand_ecc_2K_16bits = {
-	.oobfree = { {2, 6} },
+static int hisi_ooblayout_ecc(struct mtd_info *mtd, int section,
+			      struct mtd_oob_region *oobregion)
+{
+	/* FIXME: add ECC bytes position */
+	return -ENOTSUPP;
+}
+
+static int hisi_ooblayout_free(struct mtd_info *mtd, int section,
+			       struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 2;
+	oobregion->length = 6;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops hisi_ooblayout_ops = {
+	.ecc = hisi_ooblayout_ecc,
+	.free = hisi_ooblayout_free,
 };
 
 static int hisi_nfc_ecc_probe(struct hinfc_host *host)
@@ -642,10 +661,9 @@
 	struct device *dev = host->dev;
 	struct nand_chip *chip = &host->chip;
 	struct mtd_info *mtd = nand_to_mtd(chip);
-	struct device_node *np = host->dev->of_node;
 
-	size = of_get_nand_ecc_step_size(np);
-	strength = of_get_nand_ecc_strength(np);
+	size = chip->ecc.size;
+	strength = chip->ecc.strength;
 	if (size != 1024) {
 		dev_err(dev, "error ecc size: %d\n", size);
 		return -EINVAL;
@@ -668,7 +686,7 @@
 	case 16:
 		ecc_bits = 6;
 		if (mtd->writesize == 2048)
-			chip->ecc.layout = &nand_ecc_2K_16bits;
+			mtd_set_ooblayout(mtd, &hisi_ooblayout_ops);
 
 		/* TODO: add more page size support */
 		break;
@@ -695,7 +713,7 @@
 
 static int hisi_nfc_probe(struct platform_device *pdev)
 {
-	int ret = 0, irq, buswidth, flag, max_chips = HINFC504_MAX_CHIP;
+	int ret = 0, irq, flag, max_chips = HINFC504_MAX_CHIP;
 	struct device *dev = &pdev->dev;
 	struct hinfc_host *host;
 	struct nand_chip  *chip;
@@ -747,12 +765,6 @@
 	chip->read_buf		= hisi_nfc_read_buf;
 	chip->chip_delay	= HINFC504_CHIP_DELAY;
 
-	chip->ecc.mode = of_get_nand_ecc_mode(np);
-
-	buswidth = of_get_nand_bus_width(np);
-	if (buswidth == 16)
-		chip->options |= NAND_BUSWIDTH_16;
-
 	hisi_nfc_host_init(host);
 
 	ret = devm_request_irq(dev, irq, hinfc_irq_handle, 0x0, "nandc", host);
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 673ceb2..5551c36 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -221,7 +221,6 @@
 	struct jz_nand *nand = mtd_to_jz_nand(mtd);
 	int i, error_count, index;
 	uint32_t reg, status, error;
-	uint32_t t;
 	unsigned int timeout = 1000;
 
 	for (i = 0; i < 9; ++i)
@@ -476,7 +475,7 @@
 	}
 
 	if (pdata && pdata->ident_callback) {
-		pdata->ident_callback(pdev, chip, &pdata->partitions,
+		pdata->ident_callback(pdev, mtd, &pdata->partitions,
 					&pdata->num_partitions);
 	}
 
diff --git a/drivers/mtd/nand/jz4780_bch.c b/drivers/mtd/nand/jz4780_bch.c
index 755499c..d74f4ba 100644
--- a/drivers/mtd/nand/jz4780_bch.c
+++ b/drivers/mtd/nand/jz4780_bch.c
@@ -287,7 +287,6 @@
 	bch = platform_get_drvdata(pdev);
 	clk_prepare_enable(bch->clk);
 
-	bch->dev = &pdev->dev;
 	return bch;
 }
 
diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c
index e1c016c..daf3c42 100644
--- a/drivers/mtd/nand/jz4780_nand.c
+++ b/drivers/mtd/nand/jz4780_nand.c
@@ -17,7 +17,6 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/gpio/consumer.h>
-#include <linux/of_mtd.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/mtd/mtd.h>
@@ -56,8 +55,6 @@
 	struct nand_chip chip;
 	struct list_head chip_list;
 
-	struct nand_ecclayout ecclayout;
-
 	struct gpio_desc *busy_gpio;
 	struct gpio_desc *wp_gpio;
 	unsigned int reading: 1;
@@ -165,8 +162,7 @@
 	struct nand_chip *chip = &nand->chip;
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(chip->controller);
-	struct nand_ecclayout *layout = &nand->ecclayout;
-	u32 start, i;
+	int eccbytes;
 
 	chip->ecc.bytes = fls((1 + 8) * chip->ecc.size)	*
 				(chip->ecc.strength / 8);
@@ -183,7 +179,6 @@
 		chip->ecc.correct = jz4780_nand_ecc_correct;
 		/* fall through */
 	case NAND_ECC_SOFT:
-	case NAND_ECC_SOFT_BCH:
 		dev_info(dev, "using %s (strength %d, size %d, bytes %d)\n",
 			(nfc->bch) ? "hardware BCH" : "software ECC",
 			chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
@@ -201,23 +196,17 @@
 		return 0;
 
 	/* Generate ECC layout. ECC codes are right aligned in the OOB area. */
-	layout->eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes;
+	eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes;
 
-	if (layout->eccbytes > mtd->oobsize - 2) {
+	if (eccbytes > mtd->oobsize - 2) {
 		dev_err(dev,
 			"invalid ECC config: required %d ECC bytes, but only %d are available",
-			layout->eccbytes, mtd->oobsize - 2);
+			eccbytes, mtd->oobsize - 2);
 		return -EINVAL;
 	}
 
-	start = mtd->oobsize - layout->eccbytes;
-	for (i = 0; i < layout->eccbytes; i++)
-		layout->eccpos[i] = start + i;
+	mtd->ooblayout = &nand_ooblayout_lp_ops;
 
-	layout->oobfree[0].offset = 2;
-	layout->oobfree[0].length = mtd->oobsize - layout->eccbytes - 2;
-
-	chip->ecc.layout = layout;
 	return 0;
 }
 
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index d8c3e7a..8523881 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -35,7 +35,6 @@
 #include <linux/completion.h>
 #include <linux/interrupt.h>
 #include <linux/of.h>
-#include <linux/of_mtd.h>
 #include <linux/of_gpio.h>
 #include <linux/mtd/lpc32xx_mlc.h>
 #include <linux/io.h>
@@ -139,22 +138,37 @@
 	unsigned num_parts;
 };
 
-static struct nand_ecclayout lpc32xx_nand_oob = {
-	.eccbytes = 40,
-	.eccpos = { 6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
-		   22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-		   38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
-		   54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
-	.oobfree = {
-		{ .offset = 0,
-		  .length = 6, },
-		{ .offset = 16,
-		  .length = 6, },
-		{ .offset = 32,
-		  .length = 6, },
-		{ .offset = 48,
-		  .length = 6, },
-		},
+static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+	if (section >= nand_chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = ((section + 1) * 16) - nand_chip->ecc.bytes;
+	oobregion->length = nand_chip->ecc.bytes;
+
+	return 0;
+}
+
+static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+	if (section >= nand_chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = 16 * section;
+	oobregion->length = 16 - nand_chip->ecc.bytes;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
+	.ecc = lpc32xx_ooblayout_ecc,
+	.free = lpc32xx_ooblayout_free,
 };
 
 static struct nand_bbt_descr lpc32xx_nand_bbt = {
@@ -713,6 +727,7 @@
 	nand_chip->ecc.write_oob = lpc32xx_write_oob;
 	nand_chip->ecc.read_oob = lpc32xx_read_oob;
 	nand_chip->ecc.strength = 4;
+	nand_chip->ecc.bytes = 10;
 	nand_chip->waitfunc = lpc32xx_waitfunc;
 
 	nand_chip->options = NAND_NO_SUBPAGE_WRITE;
@@ -751,7 +766,7 @@
 
 	nand_chip->ecc.mode = NAND_ECC_HW;
 	nand_chip->ecc.size = 512;
-	nand_chip->ecc.layout = &lpc32xx_nand_oob;
+	mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
 	host->mlcsubpages = mtd->writesize / 512;
 
 	/* initially clear interrupt status */
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index 3b8f373..8d3edc3 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -35,7 +35,6 @@
 #include <linux/mtd/nand_ecc.h>
 #include <linux/gpio.h>
 #include <linux/of.h>
-#include <linux/of_mtd.h>
 #include <linux/of_gpio.h>
 #include <linux/mtd/lpc32xx_slc.h>
 
@@ -146,13 +145,38 @@
  * NAND ECC Layout for small page NAND devices
  * Note: For large and huge page devices, the default layouts are used
  */
-static struct nand_ecclayout lpc32xx_nand_oob_16 = {
-	.eccbytes = 6,
-	.eccpos = {10, 11, 12, 13, 14, 15},
-	.oobfree = {
-		{ .offset = 0, .length = 4 },
-		{ .offset = 6, .length = 4 },
-	},
+static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->length = 6;
+	oobregion->offset = 10;
+
+	return 0;
+}
+
+static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	if (section > 1)
+		return -ERANGE;
+
+	if (!section) {
+		oobregion->offset = 0;
+		oobregion->length = 4;
+	} else {
+		oobregion->offset = 6;
+		oobregion->length = 4;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
+	.ecc = lpc32xx_ooblayout_ecc,
+	.free = lpc32xx_ooblayout_free,
 };
 
 static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
@@ -194,7 +218,6 @@
 	uint32_t rwidth;
 	uint32_t rhold;
 	uint32_t rsetup;
-	bool use_bbt;
 	int wp_gpio;
 	struct mtd_partition *parts;
 	unsigned num_parts;
@@ -604,7 +627,8 @@
 					   int oob_required, int page)
 {
 	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-	int stat, i, status;
+	struct mtd_oob_region oobregion = { };
+	int stat, i, status, error;
 	uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
 
 	/* Issue read command */
@@ -620,7 +644,11 @@
 	lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
 
 	/* Pointer to ECC data retrieved from NAND spare area */
-	oobecc = chip->oob_poi + chip->ecc.layout->eccpos[0];
+	error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
+	if (error)
+		return error;
+
+	oobecc = chip->oob_poi + oobregion.offset;
 
 	for (i = 0; i < chip->ecc.steps; i++) {
 		stat = chip->ecc.correct(mtd, buf, oobecc,
@@ -666,7 +694,8 @@
 					    int oob_required, int page)
 {
 	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-	uint8_t *pb = chip->oob_poi + chip->ecc.layout->eccpos[0];
+	struct mtd_oob_region oobregion = { };
+	uint8_t *pb;
 	int error;
 
 	/* Write data, calculate ECC on outbound data */
@@ -678,6 +707,11 @@
 	 * The calculated ECC needs some manual work done to it before
 	 * committing it to NAND. Process the calculated ECC and place
 	 * the resultant values directly into the OOB buffer. */
+	error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
+	if (error)
+		return error;
+
+	pb = chip->oob_poi + oobregion.offset;
 	lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
 
 	/* Write ECC data to device */
@@ -747,7 +781,6 @@
 		return NULL;
 	}
 
-	ncfg->use_bbt = of_get_nand_on_flash_bbt(np);
 	ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
 
 	return ncfg;
@@ -875,26 +908,22 @@
 	 * custom BBT marker layout.
 	 */
 	if (mtd->writesize <= 512)
-		chip->ecc.layout = &lpc32xx_nand_oob_16;
+		mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
 
 	/* These sizes remain the same regardless of page size */
 	chip->ecc.size = 256;
 	chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
 	chip->ecc.prepad = chip->ecc.postpad = 0;
 
-	/* Avoid extra scan if using BBT, setup BBT support */
-	if (host->ncfg->use_bbt) {
-		chip->bbt_options |= NAND_BBT_USE_FLASH;
-
-		/*
-		 * Use a custom BBT marker setup for small page FLASH that
-		 * won't interfere with the ECC layout. Large and huge page
-		 * FLASH use the standard layout.
-		 */
-		if (mtd->writesize <= 512) {
-			chip->bbt_td = &bbt_smallpage_main_descr;
-			chip->bbt_md = &bbt_smallpage_mirror_descr;
-		}
+	/*
+	 * Use a custom BBT marker setup for small page FLASH that
+	 * won't interfere with the ECC layout. Large and huge page
+	 * FLASH use the standard layout.
+	 */
+	if ((chip->bbt_options & NAND_BBT_USE_FLASH) &&
+	    mtd->writesize <= 512) {
+		chip->bbt_td = &bbt_smallpage_main_descr;
+		chip->bbt_md = &bbt_smallpage_mirror_descr;
 	}
 
 	/*
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 5d7843f..7eacb2f 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -710,6 +710,7 @@
 	chip->select_chip = mpc5121_nfc_select_chip;
 	chip->bbt_options = NAND_BBT_USE_FLASH;
 	chip->ecc.mode = NAND_ECC_SOFT;
+	chip->ecc.algo = NAND_ECC_HAMMING;
 
 	/* Support external chip-select logic on ADS5121 board */
 	if (of_machine_is_compatible("fsl,mpc5121ads")) {
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 854c832..5173fad 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -34,7 +34,6 @@
 #include <linux/completion.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
-#include <linux/of_mtd.h>
 
 #include <asm/mach/flash.h>
 #include <linux/platform_data/mtd-mxc_nand.h>
@@ -149,7 +148,7 @@
 	int (*check_int)(struct mxc_nand_host *);
 	void (*irq_control)(struct mxc_nand_host *, int);
 	u32 (*get_ecc_status)(struct mxc_nand_host *);
-	struct nand_ecclayout *ecclayout_512, *ecclayout_2k, *ecclayout_4k;
+	const struct mtd_ooblayout_ops *ooblayout;
 	void (*select_chip)(struct mtd_info *mtd, int chip);
 	int (*correct_data)(struct mtd_info *mtd, u_char *dat,
 			u_char *read_ecc, u_char *calc_ecc);
@@ -200,73 +199,6 @@
 	struct mxc_nand_platform_data pdata;
 };
 
-/* OOB placement block for use with hardware ecc generation */
-static struct nand_ecclayout nandv1_hw_eccoob_smallpage = {
-	.eccbytes = 5,
-	.eccpos = {6, 7, 8, 9, 10},
-	.oobfree = {{0, 5}, {12, 4}, }
-};
-
-static struct nand_ecclayout nandv1_hw_eccoob_largepage = {
-	.eccbytes = 20,
-	.eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26,
-		   38, 39, 40, 41, 42, 54, 55, 56, 57, 58},
-	.oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, }
-};
-
-/* OOB description for 512 byte pages with 16 byte OOB */
-static struct nand_ecclayout nandv2_hw_eccoob_smallpage = {
-	.eccbytes = 1 * 9,
-	.eccpos = {
-		 7,  8,  9, 10, 11, 12, 13, 14, 15
-	},
-	.oobfree = {
-		{.offset = 0, .length = 5}
-	}
-};
-
-/* OOB description for 2048 byte pages with 64 byte OOB */
-static struct nand_ecclayout nandv2_hw_eccoob_largepage = {
-	.eccbytes = 4 * 9,
-	.eccpos = {
-		 7,  8,  9, 10, 11, 12, 13, 14, 15,
-		23, 24, 25, 26, 27, 28, 29, 30, 31,
-		39, 40, 41, 42, 43, 44, 45, 46, 47,
-		55, 56, 57, 58, 59, 60, 61, 62, 63
-	},
-	.oobfree = {
-		{.offset = 2, .length = 4},
-		{.offset = 16, .length = 7},
-		{.offset = 32, .length = 7},
-		{.offset = 48, .length = 7}
-	}
-};
-
-/* OOB description for 4096 byte pages with 128 byte OOB */
-static struct nand_ecclayout nandv2_hw_eccoob_4k = {
-	.eccbytes = 8 * 9,
-	.eccpos = {
-		7,  8,  9, 10, 11, 12, 13, 14, 15,
-		23, 24, 25, 26, 27, 28, 29, 30, 31,
-		39, 40, 41, 42, 43, 44, 45, 46, 47,
-		55, 56, 57, 58, 59, 60, 61, 62, 63,
-		71, 72, 73, 74, 75, 76, 77, 78, 79,
-		87, 88, 89, 90, 91, 92, 93, 94, 95,
-		103, 104, 105, 106, 107, 108, 109, 110, 111,
-		119, 120, 121, 122, 123, 124, 125, 126, 127,
-	},
-	.oobfree = {
-		{.offset = 2, .length = 4},
-		{.offset = 16, .length = 7},
-		{.offset = 32, .length = 7},
-		{.offset = 48, .length = 7},
-		{.offset = 64, .length = 7},
-		{.offset = 80, .length = 7},
-		{.offset = 96, .length = 7},
-		{.offset = 112, .length = 7},
-	}
-};
-
 static const char * const part_probes[] = {
 	"cmdlinepart", "RedBoot", "ofpart", NULL };
 
@@ -942,6 +874,99 @@
 	}
 }
 
+static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
+				struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+	if (section >= nand_chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = (section * 16) + 6;
+	oobregion->length = nand_chip->ecc.bytes;
+
+	return 0;
+}
+
+static int mxc_v1_ooblayout_free(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+	if (section > nand_chip->ecc.steps)
+		return -ERANGE;
+
+	if (!section) {
+		if (mtd->writesize <= 512) {
+			oobregion->offset = 0;
+			oobregion->length = 5;
+		} else {
+			oobregion->offset = 2;
+			oobregion->length = 4;
+		}
+	} else {
+		oobregion->offset = ((section - 1) * 16) +
+				    nand_chip->ecc.bytes + 6;
+		if (section < nand_chip->ecc.steps)
+			oobregion->length = (section * 16) + 6 -
+					    oobregion->offset;
+		else
+			oobregion->length = mtd->oobsize - oobregion->offset;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops mxc_v1_ooblayout_ops = {
+	.ecc = mxc_v1_ooblayout_ecc,
+	.free = mxc_v1_ooblayout_free,
+};
+
+static int mxc_v2_ooblayout_ecc(struct mtd_info *mtd, int section,
+				struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
+
+	if (section >= nand_chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = (section * stepsize) + 7;
+	oobregion->length = nand_chip->ecc.bytes;
+
+	return 0;
+}
+
+static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
+
+	if (section > nand_chip->ecc.steps)
+		return -ERANGE;
+
+	if (!section) {
+		if (mtd->writesize <= 512) {
+			oobregion->offset = 0;
+			oobregion->length = 5;
+		} else {
+			oobregion->offset = 2;
+			oobregion->length = 4;
+		}
+	} else {
+		oobregion->offset = section * stepsize;
+		oobregion->length = 7;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops mxc_v2_ooblayout_ops = {
+	.ecc = mxc_v2_ooblayout_ecc,
+	.free = mxc_v2_ooblayout_free,
+};
+
 /*
  * v2 and v3 type controllers can do 4bit or 8bit ecc depending
  * on how much oob the nand chip has. For 8bit ecc we need at least
@@ -959,23 +984,6 @@
 		return 8;
 }
 
-static void ecc_8bit_layout_4k(struct nand_ecclayout *layout)
-{
-	int i, j;
-
-	layout->eccbytes = 8*18;
-	for (i = 0; i < 8; i++)
-		for (j = 0; j < 18; j++)
-			layout->eccpos[i*18 + j] = i*26 + j + 7;
-
-	layout->oobfree[0].offset = 2;
-	layout->oobfree[0].length = 4;
-	for (i = 1; i < 8; i++) {
-		layout->oobfree[i].offset = i*26;
-		layout->oobfree[i].length = 7;
-	}
-}
-
 static void preset_v1(struct mtd_info *mtd)
 {
 	struct nand_chip *nand_chip = mtd_to_nand(mtd);
@@ -1269,9 +1277,7 @@
 	.check_int = check_int_v1_v2,
 	.irq_control = irq_control_v1_v2,
 	.get_ecc_status = get_ecc_status_v1,
-	.ecclayout_512 = &nandv1_hw_eccoob_smallpage,
-	.ecclayout_2k = &nandv1_hw_eccoob_largepage,
-	.ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+	.ooblayout = &mxc_v1_ooblayout_ops,
 	.select_chip = mxc_nand_select_chip_v1_v3,
 	.correct_data = mxc_nand_correct_data_v1,
 	.irqpending_quirk = 1,
@@ -1294,9 +1300,7 @@
 	.check_int = check_int_v1_v2,
 	.irq_control = irq_control_v1_v2,
 	.get_ecc_status = get_ecc_status_v1,
-	.ecclayout_512 = &nandv1_hw_eccoob_smallpage,
-	.ecclayout_2k = &nandv1_hw_eccoob_largepage,
-	.ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+	.ooblayout = &mxc_v1_ooblayout_ops,
 	.select_chip = mxc_nand_select_chip_v1_v3,
 	.correct_data = mxc_nand_correct_data_v1,
 	.irqpending_quirk = 0,
@@ -1320,9 +1324,7 @@
 	.check_int = check_int_v1_v2,
 	.irq_control = irq_control_v1_v2,
 	.get_ecc_status = get_ecc_status_v2,
-	.ecclayout_512 = &nandv2_hw_eccoob_smallpage,
-	.ecclayout_2k = &nandv2_hw_eccoob_largepage,
-	.ecclayout_4k = &nandv2_hw_eccoob_4k,
+	.ooblayout = &mxc_v2_ooblayout_ops,
 	.select_chip = mxc_nand_select_chip_v2,
 	.correct_data = mxc_nand_correct_data_v2_v3,
 	.irqpending_quirk = 0,
@@ -1346,9 +1348,7 @@
 	.check_int = check_int_v3,
 	.irq_control = irq_control_v3,
 	.get_ecc_status = get_ecc_status_v3,
-	.ecclayout_512 = &nandv2_hw_eccoob_smallpage,
-	.ecclayout_2k = &nandv2_hw_eccoob_largepage,
-	.ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+	.ooblayout = &mxc_v2_ooblayout_ops,
 	.select_chip = mxc_nand_select_chip_v1_v3,
 	.correct_data = mxc_nand_correct_data_v2_v3,
 	.irqpending_quirk = 0,
@@ -1373,9 +1373,7 @@
 	.check_int = check_int_v3,
 	.irq_control = irq_control_v3,
 	.get_ecc_status = get_ecc_status_v3,
-	.ecclayout_512 = &nandv2_hw_eccoob_smallpage,
-	.ecclayout_2k = &nandv2_hw_eccoob_largepage,
-	.ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+	.ooblayout = &mxc_v2_ooblayout_ops,
 	.select_chip = mxc_nand_select_chip_v1_v3,
 	.correct_data = mxc_nand_correct_data_v2_v3,
 	.irqpending_quirk = 0,
@@ -1461,25 +1459,12 @@
 static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
 {
 	struct device_node *np = host->dev->of_node;
-	struct mxc_nand_platform_data *pdata = &host->pdata;
 	const struct of_device_id *of_id =
 		of_match_device(mxcnd_dt_ids, host->dev);
-	int buswidth;
 
 	if (!np)
 		return 1;
 
-	if (of_get_nand_ecc_mode(np) >= 0)
-		pdata->hw_ecc = 1;
-
-	pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
-
-	buswidth = of_get_nand_bus_width(np);
-	if (buswidth < 0)
-		return buswidth;
-
-	pdata->width = buswidth / 8;
-
 	host->devtype_data = of_id->data;
 
 	return 0;
@@ -1576,27 +1561,22 @@
 
 	this->select_chip = host->devtype_data->select_chip;
 	this->ecc.size = 512;
-	this->ecc.layout = host->devtype_data->ecclayout_512;
+	mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
 
 	if (host->pdata.hw_ecc) {
-		this->ecc.calculate = mxc_nand_calculate_ecc;
-		this->ecc.hwctl = mxc_nand_enable_hwecc;
-		this->ecc.correct = host->devtype_data->correct_data;
 		this->ecc.mode = NAND_ECC_HW;
 	} else {
 		this->ecc.mode = NAND_ECC_SOFT;
+		this->ecc.algo = NAND_ECC_HAMMING;
 	}
 
 	/* NAND bus width determines access functions used by upper layer */
 	if (host->pdata.width == 2)
 		this->options |= NAND_BUSWIDTH_16;
 
-	if (host->pdata.flash_bbt) {
-		this->bbt_td = &bbt_main_descr;
-		this->bbt_md = &bbt_mirror_descr;
-		/* update flash based bbt */
+	/* update flash based bbt */
+	if (host->pdata.flash_bbt)
 		this->bbt_options |= NAND_BBT_USE_FLASH;
-	}
 
 	init_completion(&host->op_completion);
 
@@ -1637,6 +1617,26 @@
 		goto escan;
 	}
 
+	switch (this->ecc.mode) {
+	case NAND_ECC_HW:
+		this->ecc.calculate = mxc_nand_calculate_ecc;
+		this->ecc.hwctl = mxc_nand_enable_hwecc;
+		this->ecc.correct = host->devtype_data->correct_data;
+		break;
+
+	case NAND_ECC_SOFT:
+		break;
+
+	default:
+		err = -EINVAL;
+		goto escan;
+	}
+
+	if (this->bbt_options & NAND_BBT_USE_FLASH) {
+		this->bbt_td = &bbt_main_descr;
+		this->bbt_md = &bbt_mirror_descr;
+	}
+
 	/* allocate the right size buffer now */
 	devm_kfree(&pdev->dev, (void *)host->data_buf);
 	host->data_buf = devm_kzalloc(&pdev->dev, mtd->writesize + mtd->oobsize,
@@ -1649,12 +1649,11 @@
 	/* Call preset again, with correct writesize this time */
 	host->devtype_data->preset(mtd);
 
-	if (mtd->writesize == 2048)
-		this->ecc.layout = host->devtype_data->ecclayout_2k;
-	else if (mtd->writesize == 4096) {
-		this->ecc.layout = host->devtype_data->ecclayout_4k;
-		if (get_eccsize(mtd) == 8)
-			ecc_8bit_layout_4k(this->ecc.layout);
+	if (!this->ecc.bytes) {
+		if (host->eccsize == 8)
+			this->ecc.bytes = 18;
+		else if (host->eccsize == 4)
+			this->ecc.bytes = 9;
 	}
 
 	/*
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index ba4f603..0b0dc29 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -45,57 +45,99 @@
 #include <linux/bitops.h>
 #include <linux/io.h>
 #include <linux/mtd/partitions.h>
-#include <linux/of_mtd.h>
-
-/* Define default oob placement schemes for large and small page devices */
-static struct nand_ecclayout nand_oob_8 = {
-	.eccbytes = 3,
-	.eccpos = {0, 1, 2},
-	.oobfree = {
-		{.offset = 3,
-		 .length = 2},
-		{.offset = 6,
-		 .length = 2} }
-};
-
-static struct nand_ecclayout nand_oob_16 = {
-	.eccbytes = 6,
-	.eccpos = {0, 1, 2, 3, 6, 7},
-	.oobfree = {
-		{.offset = 8,
-		 . length = 8} }
-};
-
-static struct nand_ecclayout nand_oob_64 = {
-	.eccbytes = 24,
-	.eccpos = {
-		   40, 41, 42, 43, 44, 45, 46, 47,
-		   48, 49, 50, 51, 52, 53, 54, 55,
-		   56, 57, 58, 59, 60, 61, 62, 63},
-	.oobfree = {
-		{.offset = 2,
-		 .length = 38} }
-};
-
-static struct nand_ecclayout nand_oob_128 = {
-	.eccbytes = 48,
-	.eccpos = {
-		   80, 81, 82, 83, 84, 85, 86, 87,
-		   88, 89, 90, 91, 92, 93, 94, 95,
-		   96, 97, 98, 99, 100, 101, 102, 103,
-		   104, 105, 106, 107, 108, 109, 110, 111,
-		   112, 113, 114, 115, 116, 117, 118, 119,
-		   120, 121, 122, 123, 124, 125, 126, 127},
-	.oobfree = {
-		{.offset = 2,
-		 .length = 78} }
-};
+#include <linux/of.h>
 
 static int nand_get_device(struct mtd_info *mtd, int new_state);
 
 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
 			     struct mtd_oob_ops *ops);
 
+/* Define default oob placement schemes for large and small page devices */
+static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (section > 1)
+		return -ERANGE;
+
+	if (!section) {
+		oobregion->offset = 0;
+		oobregion->length = 4;
+	} else {
+		oobregion->offset = 6;
+		oobregion->length = ecc->total - 4;
+	}
+
+	return 0;
+}
+
+static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	if (section > 1)
+		return -ERANGE;
+
+	if (mtd->oobsize == 16) {
+		if (section)
+			return -ERANGE;
+
+		oobregion->length = 8;
+		oobregion->offset = 8;
+	} else {
+		oobregion->length = 2;
+		if (!section)
+			oobregion->offset = 3;
+		else
+			oobregion->offset = 6;
+	}
+
+	return 0;
+}
+
+const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
+	.ecc = nand_ooblayout_ecc_sp,
+	.free = nand_ooblayout_free_sp,
+};
+EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
+
+static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->length = ecc->total;
+	oobregion->offset = mtd->oobsize - oobregion->length;
+
+	return 0;
+}
+
+static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->length = mtd->oobsize - ecc->total - 2;
+	oobregion->offset = 2;
+
+	return 0;
+}
+
+const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
+	.ecc = nand_ooblayout_ecc_lp,
+	.free = nand_ooblayout_free_lp,
+};
+EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
+
 static int check_offs_len(struct mtd_info *mtd,
 					loff_t ofs, uint64_t len)
 {
@@ -1279,13 +1321,12 @@
 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
 				uint8_t *buf, int oob_required, int page)
 {
-	int i, eccsize = chip->ecc.size;
+	int i, eccsize = chip->ecc.size, ret;
 	int eccbytes = chip->ecc.bytes;
 	int eccsteps = chip->ecc.steps;
 	uint8_t *p = buf;
 	uint8_t *ecc_calc = chip->buffers->ecccalc;
 	uint8_t *ecc_code = chip->buffers->ecccode;
-	uint32_t *eccpos = chip->ecc.layout->eccpos;
 	unsigned int max_bitflips = 0;
 
 	chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
@@ -1293,8 +1334,10 @@
 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
 
-	for (i = 0; i < chip->ecc.total; i++)
-		ecc_code[i] = chip->oob_poi[eccpos[i]];
+	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
 
 	eccsteps = chip->ecc.steps;
 	p = buf;
@@ -1326,14 +1369,14 @@
 			uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
 			int page)
 {
-	int start_step, end_step, num_steps;
-	uint32_t *eccpos = chip->ecc.layout->eccpos;
+	int start_step, end_step, num_steps, ret;
 	uint8_t *p;
 	int data_col_addr, i, gaps = 0;
 	int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
 	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
-	int index;
+	int index, section = 0;
 	unsigned int max_bitflips = 0;
+	struct mtd_oob_region oobregion = { };
 
 	/* Column address within the page aligned to ECC size (256bytes) */
 	start_step = data_offs / chip->ecc.size;
@@ -1361,12 +1404,13 @@
 	 * The performance is faster if we position offsets according to
 	 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
 	 */
-	for (i = 0; i < eccfrag_len - 1; i++) {
-		if (eccpos[i + index] + 1 != eccpos[i + index + 1]) {
-			gaps = 1;
-			break;
-		}
-	}
+	ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
+	if (ret)
+		return ret;
+
+	if (oobregion.length < eccfrag_len)
+		gaps = 1;
+
 	if (gaps) {
 		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
 		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -1375,20 +1419,23 @@
 		 * Send the command to read the particular ECC bytes take care
 		 * about buswidth alignment in read_buf.
 		 */
-		aligned_pos = eccpos[index] & ~(busw - 1);
+		aligned_pos = oobregion.offset & ~(busw - 1);
 		aligned_len = eccfrag_len;
-		if (eccpos[index] & (busw - 1))
+		if (oobregion.offset & (busw - 1))
 			aligned_len++;
-		if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
+		if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
+		    (busw - 1))
 			aligned_len++;
 
 		chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
-					mtd->writesize + aligned_pos, -1);
+			      mtd->writesize + aligned_pos, -1);
 		chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
 	}
 
-	for (i = 0; i < eccfrag_len; i++)
-		chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
+	ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
+					 chip->oob_poi, index, eccfrag_len);
+	if (ret)
+		return ret;
 
 	p = bufpoi + data_col_addr;
 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
@@ -1429,13 +1476,12 @@
 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
 				uint8_t *buf, int oob_required, int page)
 {
-	int i, eccsize = chip->ecc.size;
+	int i, eccsize = chip->ecc.size, ret;
 	int eccbytes = chip->ecc.bytes;
 	int eccsteps = chip->ecc.steps;
 	uint8_t *p = buf;
 	uint8_t *ecc_calc = chip->buffers->ecccalc;
 	uint8_t *ecc_code = chip->buffers->ecccode;
-	uint32_t *eccpos = chip->ecc.layout->eccpos;
 	unsigned int max_bitflips = 0;
 
 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
@@ -1445,8 +1491,10 @@
 	}
 	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
 
-	for (i = 0; i < chip->ecc.total; i++)
-		ecc_code[i] = chip->oob_poi[eccpos[i]];
+	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
 
 	eccsteps = chip->ecc.steps;
 	p = buf;
@@ -1491,12 +1539,11 @@
 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
 	struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
 {
-	int i, eccsize = chip->ecc.size;
+	int i, eccsize = chip->ecc.size, ret;
 	int eccbytes = chip->ecc.bytes;
 	int eccsteps = chip->ecc.steps;
 	uint8_t *p = buf;
 	uint8_t *ecc_code = chip->buffers->ecccode;
-	uint32_t *eccpos = chip->ecc.layout->eccpos;
 	uint8_t *ecc_calc = chip->buffers->ecccalc;
 	unsigned int max_bitflips = 0;
 
@@ -1505,8 +1552,10 @@
 	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
 	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
 
-	for (i = 0; i < chip->ecc.total; i++)
-		ecc_code[i] = chip->oob_poi[eccpos[i]];
+	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
 
 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
 		int stat;
@@ -1607,14 +1656,17 @@
 
 /**
  * nand_transfer_oob - [INTERN] Transfer oob to client buffer
- * @chip: nand chip structure
+ * @mtd: mtd info structure
  * @oob: oob destination address
  * @ops: oob ops structure
  * @len: size of oob to transfer
  */
-static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
+static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
 				  struct mtd_oob_ops *ops, size_t len)
 {
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int ret;
+
 	switch (ops->mode) {
 
 	case MTD_OPS_PLACE_OOB:
@@ -1622,31 +1674,12 @@
 		memcpy(oob, chip->oob_poi + ops->ooboffs, len);
 		return oob + len;
 
-	case MTD_OPS_AUTO_OOB: {
-		struct nand_oobfree *free = chip->ecc.layout->oobfree;
-		uint32_t boffs = 0, roffs = ops->ooboffs;
-		size_t bytes = 0;
+	case MTD_OPS_AUTO_OOB:
+		ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
+						  ops->ooboffs, len);
+		BUG_ON(ret);
+		return oob + len;
 
-		for (; free->length && len; free++, len -= bytes) {
-			/* Read request not from offset 0? */
-			if (unlikely(roffs)) {
-				if (roffs >= free->length) {
-					roffs -= free->length;
-					continue;
-				}
-				boffs = free->offset + roffs;
-				bytes = min_t(size_t, len,
-					      (free->length - roffs));
-				roffs = 0;
-			} else {
-				bytes = min_t(size_t, len, free->length);
-				boffs = free->offset;
-			}
-			memcpy(oob, chip->oob_poi + boffs, bytes);
-			oob += bytes;
-		}
-		return oob;
-	}
 	default:
 		BUG();
 	}
@@ -1780,7 +1813,7 @@
 				int toread = min(oobreadlen, max_oobsize);
 
 				if (toread) {
-					oob = nand_transfer_oob(chip,
+					oob = nand_transfer_oob(mtd,
 						oob, ops, toread);
 					oobreadlen -= toread;
 				}
@@ -1893,13 +1926,13 @@
  * @chip: nand chip info structure
  * @page: page number to read
  */
-static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
-			     int page)
+int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
 {
 	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
 	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
 	return 0;
 }
+EXPORT_SYMBOL(nand_read_oob_std);
 
 /**
  * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
@@ -1908,8 +1941,8 @@
  * @chip: nand chip info structure
  * @page: page number to read
  */
-static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
-				  int page)
+int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+			   int page)
 {
 	int length = mtd->oobsize;
 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
@@ -1937,6 +1970,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(nand_read_oob_syndrome);
 
 /**
  * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
@@ -1944,8 +1978,7 @@
  * @chip: nand chip info structure
  * @page: page number to write
  */
-static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
-			      int page)
+int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
 {
 	int status = 0;
 	const uint8_t *buf = chip->oob_poi;
@@ -1960,6 +1993,7 @@
 
 	return status & NAND_STATUS_FAIL ? -EIO : 0;
 }
+EXPORT_SYMBOL(nand_write_oob_std);
 
 /**
  * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
@@ -1968,8 +2002,8 @@
  * @chip: nand chip info structure
  * @page: page number to write
  */
-static int nand_write_oob_syndrome(struct mtd_info *mtd,
-				   struct nand_chip *chip, int page)
+int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+			    int page)
 {
 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
 	int eccsize = chip->ecc.size, length = mtd->oobsize;
@@ -2019,6 +2053,7 @@
 
 	return status & NAND_STATUS_FAIL ? -EIO : 0;
 }
+EXPORT_SYMBOL(nand_write_oob_syndrome);
 
 /**
  * nand_do_read_oob - [INTERN] NAND read out-of-band
@@ -2078,7 +2113,7 @@
 			break;
 
 		len = min(len, readlen);
-		buf = nand_transfer_oob(chip, buf, ops, len);
+		buf = nand_transfer_oob(mtd, buf, ops, len);
 
 		if (chip->options & NAND_NEED_READRDY) {
 			/* Apply delay or wait for ready/busy pin */
@@ -2237,19 +2272,20 @@
 				 const uint8_t *buf, int oob_required,
 				 int page)
 {
-	int i, eccsize = chip->ecc.size;
+	int i, eccsize = chip->ecc.size, ret;
 	int eccbytes = chip->ecc.bytes;
 	int eccsteps = chip->ecc.steps;
 	uint8_t *ecc_calc = chip->buffers->ecccalc;
 	const uint8_t *p = buf;
-	uint32_t *eccpos = chip->ecc.layout->eccpos;
 
 	/* Software ECC calculation */
 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
 
-	for (i = 0; i < chip->ecc.total; i++)
-		chip->oob_poi[eccpos[i]] = ecc_calc[i];
+	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
 
 	return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
 }
@@ -2266,12 +2302,11 @@
 				  const uint8_t *buf, int oob_required,
 				  int page)
 {
-	int i, eccsize = chip->ecc.size;
+	int i, eccsize = chip->ecc.size, ret;
 	int eccbytes = chip->ecc.bytes;
 	int eccsteps = chip->ecc.steps;
 	uint8_t *ecc_calc = chip->buffers->ecccalc;
 	const uint8_t *p = buf;
-	uint32_t *eccpos = chip->ecc.layout->eccpos;
 
 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
 		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -2279,8 +2314,10 @@
 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
 	}
 
-	for (i = 0; i < chip->ecc.total; i++)
-		chip->oob_poi[eccpos[i]] = ecc_calc[i];
+	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
 
 	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
 
@@ -2308,11 +2345,10 @@
 	int ecc_size      = chip->ecc.size;
 	int ecc_bytes     = chip->ecc.bytes;
 	int ecc_steps     = chip->ecc.steps;
-	uint32_t *eccpos  = chip->ecc.layout->eccpos;
 	uint32_t start_step = offset / ecc_size;
 	uint32_t end_step   = (offset + data_len - 1) / ecc_size;
 	int oob_bytes       = mtd->oobsize / ecc_steps;
-	int step, i;
+	int step, ret;
 
 	for (step = 0; step < ecc_steps; step++) {
 		/* configure controller for WRITE access */
@@ -2340,8 +2376,10 @@
 	/* copy calculated ECC for whole page to chip->buffer->oob */
 	/* this include masked-value(0xFF) for unwritten subpages */
 	ecc_calc = chip->buffers->ecccalc;
-	for (i = 0; i < chip->ecc.total; i++)
-		chip->oob_poi[eccpos[i]] = ecc_calc[i];
+	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
 
 	/* write OOB buffer to NAND device */
 	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -2478,6 +2516,7 @@
 			      struct mtd_oob_ops *ops)
 {
 	struct nand_chip *chip = mtd_to_nand(mtd);
+	int ret;
 
 	/*
 	 * Initialise to all 0xFF, to avoid the possibility of left over OOB
@@ -2492,31 +2531,12 @@
 		memcpy(chip->oob_poi + ops->ooboffs, oob, len);
 		return oob + len;
 
-	case MTD_OPS_AUTO_OOB: {
-		struct nand_oobfree *free = chip->ecc.layout->oobfree;
-		uint32_t boffs = 0, woffs = ops->ooboffs;
-		size_t bytes = 0;
+	case MTD_OPS_AUTO_OOB:
+		ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
+						  ops->ooboffs, len);
+		BUG_ON(ret);
+		return oob + len;
 
-		for (; free->length && len; free++, len -= bytes) {
-			/* Write request not from offset 0? */
-			if (unlikely(woffs)) {
-				if (woffs >= free->length) {
-					woffs -= free->length;
-					continue;
-				}
-				boffs = free->offset + woffs;
-				bytes = min_t(size_t, len,
-					      (free->length - woffs));
-				woffs = 0;
-			} else {
-				bytes = min_t(size_t, len, free->length);
-				boffs = free->offset;
-			}
-			memcpy(chip->oob_poi + boffs, oob, bytes);
-			oob += bytes;
-		}
-		return oob;
-	}
 	default:
 		BUG();
 	}
@@ -3951,10 +3971,115 @@
 	return type;
 }
 
+static const char * const nand_ecc_modes[] = {
+	[NAND_ECC_NONE]		= "none",
+	[NAND_ECC_SOFT]		= "soft",
+	[NAND_ECC_HW]		= "hw",
+	[NAND_ECC_HW_SYNDROME]	= "hw_syndrome",
+	[NAND_ECC_HW_OOB_FIRST]	= "hw_oob_first",
+};
+
+static int of_get_nand_ecc_mode(struct device_node *np)
+{
+	const char *pm;
+	int err, i;
+
+	err = of_property_read_string(np, "nand-ecc-mode", &pm);
+	if (err < 0)
+		return err;
+
+	for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
+		if (!strcasecmp(pm, nand_ecc_modes[i]))
+			return i;
+
+	/*
+	 * For backward compatibility we support few obsoleted values that don't
+	 * have their mappings into nand_ecc_modes_t anymore (they were merged
+	 * with other enums).
+	 */
+	if (!strcasecmp(pm, "soft_bch"))
+		return NAND_ECC_SOFT;
+
+	return -ENODEV;
+}
+
+static const char * const nand_ecc_algos[] = {
+	[NAND_ECC_HAMMING]	= "hamming",
+	[NAND_ECC_BCH]		= "bch",
+};
+
+static int of_get_nand_ecc_algo(struct device_node *np)
+{
+	const char *pm;
+	int err, i;
+
+	err = of_property_read_string(np, "nand-ecc-algo", &pm);
+	if (!err) {
+		for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
+			if (!strcasecmp(pm, nand_ecc_algos[i]))
+				return i;
+		return -ENODEV;
+	}
+
+	/*
+	 * For backward compatibility we also read "nand-ecc-mode" checking
+	 * for some obsoleted values that were specifying ECC algorithm.
+	 */
+	err = of_property_read_string(np, "nand-ecc-mode", &pm);
+	if (err < 0)
+		return err;
+
+	if (!strcasecmp(pm, "soft"))
+		return NAND_ECC_HAMMING;
+	else if (!strcasecmp(pm, "soft_bch"))
+		return NAND_ECC_BCH;
+
+	return -ENODEV;
+}
+
+static int of_get_nand_ecc_step_size(struct device_node *np)
+{
+	int ret;
+	u32 val;
+
+	ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
+	return ret ? ret : val;
+}
+
+static int of_get_nand_ecc_strength(struct device_node *np)
+{
+	int ret;
+	u32 val;
+
+	ret = of_property_read_u32(np, "nand-ecc-strength", &val);
+	return ret ? ret : val;
+}
+
+static int of_get_nand_bus_width(struct device_node *np)
+{
+	u32 val;
+
+	if (of_property_read_u32(np, "nand-bus-width", &val))
+		return 8;
+
+	switch (val) {
+	case 8:
+	case 16:
+		return val;
+	default:
+		return -EIO;
+	}
+}
+
+static bool of_get_nand_on_flash_bbt(struct device_node *np)
+{
+	return of_property_read_bool(np, "nand-on-flash-bbt");
+}
+
 static int nand_dt_init(struct nand_chip *chip)
 {
 	struct device_node *dn = nand_get_flash_node(chip);
-	int ecc_mode, ecc_strength, ecc_step;
+	int ecc_mode, ecc_algo, ecc_strength, ecc_step;
 
 	if (!dn)
 		return 0;
@@ -3966,6 +4091,7 @@
 		chip->bbt_options |= NAND_BBT_USE_FLASH;
 
 	ecc_mode = of_get_nand_ecc_mode(dn);
+	ecc_algo = of_get_nand_ecc_algo(dn);
 	ecc_strength = of_get_nand_ecc_strength(dn);
 	ecc_step = of_get_nand_ecc_step_size(dn);
 
@@ -3978,6 +4104,9 @@
 	if (ecc_mode >= 0)
 		chip->ecc.mode = ecc_mode;
 
+	if (ecc_algo >= 0)
+		chip->ecc.algo = ecc_algo;
+
 	if (ecc_strength >= 0)
 		chip->ecc.strength = ecc_strength;
 
@@ -4054,6 +4183,82 @@
 }
 EXPORT_SYMBOL(nand_scan_ident);
 
+static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
+		return -EINVAL;
+
+	switch (ecc->algo) {
+	case NAND_ECC_HAMMING:
+		ecc->calculate = nand_calculate_ecc;
+		ecc->correct = nand_correct_data;
+		ecc->read_page = nand_read_page_swecc;
+		ecc->read_subpage = nand_read_subpage;
+		ecc->write_page = nand_write_page_swecc;
+		ecc->read_page_raw = nand_read_page_raw;
+		ecc->write_page_raw = nand_write_page_raw;
+		ecc->read_oob = nand_read_oob_std;
+		ecc->write_oob = nand_write_oob_std;
+		if (!ecc->size)
+			ecc->size = 256;
+		ecc->bytes = 3;
+		ecc->strength = 1;
+		return 0;
+	case NAND_ECC_BCH:
+		if (!mtd_nand_has_bch()) {
+			WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+			return -EINVAL;
+		}
+		ecc->calculate = nand_bch_calculate_ecc;
+		ecc->correct = nand_bch_correct_data;
+		ecc->read_page = nand_read_page_swecc;
+		ecc->read_subpage = nand_read_subpage;
+		ecc->write_page = nand_write_page_swecc;
+		ecc->read_page_raw = nand_read_page_raw;
+		ecc->write_page_raw = nand_write_page_raw;
+		ecc->read_oob = nand_read_oob_std;
+		ecc->write_oob = nand_write_oob_std;
+		/*
+		* Board driver should supply ecc.size and ecc.strength
+		* values to select how many bits are correctable.
+		* Otherwise, default to 4 bits for large page devices.
+		*/
+		if (!ecc->size && (mtd->oobsize >= 64)) {
+			ecc->size = 512;
+			ecc->strength = 4;
+		}
+
+		/*
+		 * if no ecc placement scheme was provided pickup the default
+		 * large page one.
+		 */
+		if (!mtd->ooblayout) {
+			/* handle large page devices only */
+			if (mtd->oobsize < 64) {
+				WARN(1, "OOB layout is required when using software BCH on small pages\n");
+				return -EINVAL;
+			}
+
+			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+		}
+
+		/* See nand_bch_init() for details. */
+		ecc->bytes = 0;
+		ecc->priv = nand_bch_init(mtd);
+		if (!ecc->priv) {
+			WARN(1, "BCH ECC initialization failed!\n");
+			return -EINVAL;
+		}
+		return 0;
+	default:
+		WARN(1, "Unsupported ECC algorithm!\n");
+		return -EINVAL;
+	}
+}
+
 /*
  * Check if the chip configuration meet the datasheet requirements.
 
@@ -4098,14 +4303,15 @@
  */
 int nand_scan_tail(struct mtd_info *mtd)
 {
-	int i;
 	struct nand_chip *chip = mtd_to_nand(mtd);
 	struct nand_ecc_ctrl *ecc = &chip->ecc;
 	struct nand_buffers *nbuf;
+	int ret;
 
 	/* New bad blocks should be marked in OOB, flash-based BBT, or both */
-	BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
-			!(chip->bbt_options & NAND_BBT_USE_FLASH));
+	if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
+		   !(chip->bbt_options & NAND_BBT_USE_FLASH)))
+		return -EINVAL;
 
 	if (!(chip->options & NAND_OWN_BUFFERS)) {
 		nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize
@@ -4128,24 +4334,22 @@
 	/*
 	 * If no default placement scheme is given, select an appropriate one.
 	 */
-	if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
+	if (!mtd->ooblayout &&
+	    !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
 		switch (mtd->oobsize) {
 		case 8:
-			ecc->layout = &nand_oob_8;
-			break;
 		case 16:
-			ecc->layout = &nand_oob_16;
+			mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
 			break;
 		case 64:
-			ecc->layout = &nand_oob_64;
-			break;
 		case 128:
-			ecc->layout = &nand_oob_128;
+			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
 			break;
 		default:
-			pr_warn("No oob scheme defined for oobsize %d\n",
-				   mtd->oobsize);
-			BUG();
+			WARN(1, "No oob scheme defined for oobsize %d\n",
+				mtd->oobsize);
+			ret = -EINVAL;
+			goto err_free;
 		}
 	}
 
@@ -4161,8 +4365,9 @@
 	case NAND_ECC_HW_OOB_FIRST:
 		/* Similar to NAND_ECC_HW, but a separate read_page handle */
 		if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
-			pr_warn("No ECC functions supplied; hardware ECC not possible\n");
-			BUG();
+			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
+			ret = -EINVAL;
+			goto err_free;
 		}
 		if (!ecc->read_page)
 			ecc->read_page = nand_read_page_hwecc_oob_first;
@@ -4192,8 +4397,9 @@
 		     ecc->read_page == nand_read_page_hwecc ||
 		     !ecc->write_page ||
 		     ecc->write_page == nand_write_page_hwecc)) {
-			pr_warn("No ECC functions supplied; hardware ECC not possible\n");
-			BUG();
+			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
+			ret = -EINVAL;
+			goto err_free;
 		}
 		/* Use standard syndrome read/write page function? */
 		if (!ecc->read_page)
@@ -4211,61 +4417,22 @@
 
 		if (mtd->writesize >= ecc->size) {
 			if (!ecc->strength) {
-				pr_warn("Driver must set ecc.strength when using hardware ECC\n");
-				BUG();
+				WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
+				ret = -EINVAL;
+				goto err_free;
 			}
 			break;
 		}
 		pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
 			ecc->size, mtd->writesize);
 		ecc->mode = NAND_ECC_SOFT;
+		ecc->algo = NAND_ECC_HAMMING;
 
 	case NAND_ECC_SOFT:
-		ecc->calculate = nand_calculate_ecc;
-		ecc->correct = nand_correct_data;
-		ecc->read_page = nand_read_page_swecc;
-		ecc->read_subpage = nand_read_subpage;
-		ecc->write_page = nand_write_page_swecc;
-		ecc->read_page_raw = nand_read_page_raw;
-		ecc->write_page_raw = nand_write_page_raw;
-		ecc->read_oob = nand_read_oob_std;
-		ecc->write_oob = nand_write_oob_std;
-		if (!ecc->size)
-			ecc->size = 256;
-		ecc->bytes = 3;
-		ecc->strength = 1;
-		break;
-
-	case NAND_ECC_SOFT_BCH:
-		if (!mtd_nand_has_bch()) {
-			pr_warn("CONFIG_MTD_NAND_ECC_BCH not enabled\n");
-			BUG();
-		}
-		ecc->calculate = nand_bch_calculate_ecc;
-		ecc->correct = nand_bch_correct_data;
-		ecc->read_page = nand_read_page_swecc;
-		ecc->read_subpage = nand_read_subpage;
-		ecc->write_page = nand_write_page_swecc;
-		ecc->read_page_raw = nand_read_page_raw;
-		ecc->write_page_raw = nand_write_page_raw;
-		ecc->read_oob = nand_read_oob_std;
-		ecc->write_oob = nand_write_oob_std;
-		/*
-		 * Board driver should supply ecc.size and ecc.strength values
-		 * to select how many bits are correctable. Otherwise, default
-		 * to 4 bits for large page devices.
-		 */
-		if (!ecc->size && (mtd->oobsize >= 64)) {
-			ecc->size = 512;
-			ecc->strength = 4;
-		}
-
-		/* See nand_bch_init() for details. */
-		ecc->bytes = 0;
-		ecc->priv = nand_bch_init(mtd);
-		if (!ecc->priv) {
-			pr_warn("BCH ECC initialization failed!\n");
-			BUG();
+		ret = nand_set_ecc_soft_ops(mtd);
+		if (ret) {
+			ret = -EINVAL;
+			goto err_free;
 		}
 		break;
 
@@ -4283,8 +4450,9 @@
 		break;
 
 	default:
-		pr_warn("Invalid NAND_ECC_MODE %d\n", ecc->mode);
-		BUG();
+		WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
+		ret = -EINVAL;
+		goto err_free;
 	}
 
 	/* For many systems, the standard OOB write also works for raw */
@@ -4293,20 +4461,9 @@
 	if (!ecc->write_oob_raw)
 		ecc->write_oob_raw = ecc->write_oob;
 
-	/*
-	 * The number of bytes available for a client to place data into
-	 * the out of band area.
-	 */
-	mtd->oobavail = 0;
-	if (ecc->layout) {
-		for (i = 0; ecc->layout->oobfree[i].length; i++)
-			mtd->oobavail += ecc->layout->oobfree[i].length;
-	}
-
-	/* ECC sanity check: warn if it's too weak */
-	if (!nand_ecc_strength_good(mtd))
-		pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
-			mtd->name);
+	/* propagate ecc info to mtd_info */
+	mtd->ecc_strength = ecc->strength;
+	mtd->ecc_step_size = ecc->size;
 
 	/*
 	 * Set the number of read / write steps for one page depending on ECC
@@ -4314,11 +4471,27 @@
 	 */
 	ecc->steps = mtd->writesize / ecc->size;
 	if (ecc->steps * ecc->size != mtd->writesize) {
-		pr_warn("Invalid ECC parameters\n");
-		BUG();
+		WARN(1, "Invalid ECC parameters\n");
+		ret = -EINVAL;
+		goto err_free;
 	}
 	ecc->total = ecc->steps * ecc->bytes;
 
+	/*
+	 * The number of bytes available for a client to place data into
+	 * the out of band area.
+	 */
+	ret = mtd_ooblayout_count_freebytes(mtd);
+	if (ret < 0)
+		ret = 0;
+
+	mtd->oobavail = ret;
+
+	/* ECC sanity check: warn if it's too weak */
+	if (!nand_ecc_strength_good(mtd))
+		pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
+			mtd->name);
+
 	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
 		switch (ecc->steps) {
@@ -4343,7 +4516,6 @@
 	/* Large page NAND with SOFT_ECC should support subpage reads */
 	switch (ecc->mode) {
 	case NAND_ECC_SOFT:
-	case NAND_ECC_SOFT_BCH:
 		if (chip->page_shift > 9)
 			chip->options |= NAND_SUBPAGE_READ;
 		break;
@@ -4375,10 +4547,6 @@
 	mtd->_block_markbad = nand_block_markbad;
 	mtd->writebufsize = mtd->writesize;
 
-	/* propagate ecc info to mtd_info */
-	mtd->ecclayout = ecc->layout;
-	mtd->ecc_strength = ecc->strength;
-	mtd->ecc_step_size = ecc->size;
 	/*
 	 * Initialize bitflip_threshold to its default prior scan_bbt() call.
 	 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
@@ -4393,6 +4561,10 @@
 
 	/* Build bad block table */
 	return chip->scan_bbt(mtd);
+err_free:
+	if (!(chip->options & NAND_OWN_BUFFERS))
+		kfree(chip->buffers);
+	return ret;
 }
 EXPORT_SYMBOL(nand_scan_tail);
 
@@ -4436,7 +4608,8 @@
 {
 	struct nand_chip *chip = mtd_to_nand(mtd);
 
-	if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
+	if (chip->ecc.mode == NAND_ECC_SOFT &&
+	    chip->ecc.algo == NAND_ECC_BCH)
 		nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
 
 	mtd_device_unregister(mtd);
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
index b585bae..44763f8 100644
--- a/drivers/mtd/nand/nand_bch.c
+++ b/drivers/mtd/nand/nand_bch.c
@@ -32,13 +32,11 @@
 /**
  * struct nand_bch_control - private NAND BCH control structure
  * @bch:       BCH control structure
- * @ecclayout: private ecc layout for this BCH configuration
  * @errloc:    error location array
  * @eccmask:   XOR ecc mask, allows erased pages to be decoded as valid
  */
 struct nand_bch_control {
 	struct bch_control   *bch;
-	struct nand_ecclayout ecclayout;
 	unsigned int         *errloc;
 	unsigned char        *eccmask;
 };
@@ -124,7 +122,6 @@
 {
 	struct nand_chip *nand = mtd_to_nand(mtd);
 	unsigned int m, t, eccsteps, i;
-	struct nand_ecclayout *layout = nand->ecc.layout;
 	struct nand_bch_control *nbc = NULL;
 	unsigned char *erased_page;
 	unsigned int eccsize = nand->ecc.size;
@@ -161,34 +158,10 @@
 
 	eccsteps = mtd->writesize/eccsize;
 
-	/* if no ecc placement scheme was provided, build one */
-	if (!layout) {
-
-		/* handle large page devices only */
-		if (mtd->oobsize < 64) {
-			printk(KERN_WARNING "must provide an oob scheme for "
-			       "oobsize %d\n", mtd->oobsize);
-			goto fail;
-		}
-
-		layout = &nbc->ecclayout;
-		layout->eccbytes = eccsteps*eccbytes;
-
-		/* reserve 2 bytes for bad block marker */
-		if (layout->eccbytes+2 > mtd->oobsize) {
-			printk(KERN_WARNING "no suitable oob scheme available "
-			       "for oobsize %d eccbytes %u\n", mtd->oobsize,
-			       eccbytes);
-			goto fail;
-		}
-		/* put ecc bytes at oob tail */
-		for (i = 0; i < layout->eccbytes; i++)
-			layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
-
-		layout->oobfree[0].offset = 2;
-		layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
-
-		nand->ecc.layout = layout;
+	/* Check that we have an oob layout description. */
+	if (!mtd->ooblayout) {
+		pr_warn("missing oob scheme");
+		goto fail;
 	}
 
 	/* sanity checks */
@@ -196,7 +169,18 @@
 		printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
 		goto fail;
 	}
-	if (layout->eccbytes != (eccsteps*eccbytes)) {
+
+	/*
+	 * ecc->steps and ecc->total might be used by mtd->ooblayout->ecc(),
+	 * which is called by mtd_ooblayout_count_eccbytes().
+	 * Make sure they are properly initialized before calling
+	 * mtd_ooblayout_count_eccbytes().
+	 * FIXME: we should probably rework the sequencing in nand_scan_tail()
+	 * to avoid setting those fields twice.
+	 */
+	nand->ecc.steps = eccsteps;
+	nand->ecc.total = eccsteps * eccbytes;
+	if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) {
 		printk(KERN_WARNING "invalid ecc layout\n");
 		goto fail;
 	}
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index a58169a2..1eb9344 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -569,7 +569,7 @@
  *
  * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
  */
-static int alloc_device(struct nandsim *ns)
+static int __init alloc_device(struct nandsim *ns)
 {
 	struct file *cfile;
 	int i, err;
@@ -654,7 +654,7 @@
 	}
 }
 
-static char *get_partition_name(int i)
+static char __init *get_partition_name(int i)
 {
 	return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
 }
@@ -664,7 +664,7 @@
  *
  * RETURNS: 0 if success, -ERRNO if failure.
  */
-static int init_nandsim(struct mtd_info *mtd)
+static int __init init_nandsim(struct mtd_info *mtd)
 {
 	struct nand_chip *chip = mtd_to_nand(mtd);
 	struct nandsim   *ns   = nand_get_controller_data(chip);
@@ -2261,6 +2261,7 @@
 	chip->read_buf   = ns_nand_read_buf;
 	chip->read_word  = ns_nand_read_word;
 	chip->ecc.mode   = NAND_ECC_SOFT;
+	chip->ecc.algo   = NAND_ECC_HAMMING;
 	/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
 	/* and 'badblocks' parameters to work */
 	chip->options   |= NAND_SKIP_BBTSCAN;
@@ -2338,7 +2339,8 @@
 			retval = -EINVAL;
 			goto error;
 		}
-		chip->ecc.mode = NAND_ECC_SOFT_BCH;
+		chip->ecc.mode = NAND_ECC_SOFT;
+		chip->ecc.algo = NAND_ECC_BCH;
 		chip->ecc.size = 512;
 		chip->ecc.strength = bch;
 		chip->ecc.bytes = eccbytes;
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index dbc5b57..8f64011 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -261,6 +261,7 @@
 	chip->chip_delay	= 50;
 	chip->options		= 0;
 	chip->ecc.mode		= NAND_ECC_SOFT;
+	chip->ecc.algo		= NAND_ECC_HAMMING;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	nuc900_nand->reg = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 0749ca1..08e1588 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -12,6 +12,7 @@
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/jiffies.h>
@@ -28,6 +29,7 @@
 #include <linux/mtd/nand_bch.h>
 #include <linux/platform_data/elm.h>
 
+#include <linux/omap-gpmc.h>
 #include <linux/platform_data/mtd-nand-omap2.h>
 
 #define	DRIVER_NAME	"omap2-nand"
@@ -151,13 +153,17 @@
 };
 
 struct omap_nand_info {
-	struct omap_nand_platform_data	*pdata;
 	struct nand_chip		nand;
 	struct platform_device		*pdev;
 
 	int				gpmc_cs;
-	unsigned long			phys_base;
+	bool				dev_ready;
+	enum nand_io			xfer_type;
+	int				devsize;
 	enum omap_ecc			ecc_opt;
+	struct device_node		*elm_of_node;
+
+	unsigned long			phys_base;
 	struct completion		comp;
 	struct dma_chan			*dma;
 	int				gpmc_irq_fifo;
@@ -168,12 +174,14 @@
 	} iomode;
 	u_char				*buf;
 	int					buf_len;
+	/* Interface to GPMC */
 	struct gpmc_nand_regs		reg;
-	/* generated at runtime depending on ECC algorithm and layout selected */
-	struct nand_ecclayout		oobinfo;
+	struct gpmc_nand_ops		*ops;
+	bool				flash_bbt;
 	/* fields specific for BCHx_HW ECC scheme */
 	struct device			*elm_dev;
-	struct device_node		*of_node;
+	/* NAND ready gpio */
+	struct gpio_desc		*ready_gpiod;
 };
 
 static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
@@ -208,7 +216,7 @@
 	 */
 	val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
 		PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
-		(dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write));
+		(dma_mode << DMA_MPU_MODE_SHIFT) | (is_write & 0x1));
 	writel(val, info->reg.gpmc_prefetch_config1);
 
 	/*  Start the prefetch engine */
@@ -288,14 +296,13 @@
 {
 	struct omap_nand_info *info = mtd_to_omap(mtd);
 	u_char *p = (u_char *)buf;
-	u32	status = 0;
+	bool status;
 
 	while (len--) {
 		iowrite8(*p++, info->nand.IO_ADDR_W);
 		/* wait until buffer is available for write */
 		do {
-			status = readl(info->reg.gpmc_status) &
-					STATUS_BUFF_EMPTY;
+			status = info->ops->nand_writebuffer_empty();
 		} while (!status);
 	}
 }
@@ -323,7 +330,7 @@
 {
 	struct omap_nand_info *info = mtd_to_omap(mtd);
 	u16 *p = (u16 *) buf;
-	u32	status = 0;
+	bool status;
 	/* FIXME try bursts of writesw() or DMA ... */
 	len >>= 1;
 
@@ -331,8 +338,7 @@
 		iowrite16(*p++, info->nand.IO_ADDR_W);
 		/* wait until buffer is available for write */
 		do {
-			status = readl(info->reg.gpmc_status) &
-					STATUS_BUFF_EMPTY;
+			status = info->ops->nand_writebuffer_empty();
 		} while (!status);
 	}
 }
@@ -467,17 +473,8 @@
 	int ret;
 	u32 val;
 
-	if (addr >= high_memory) {
-		struct page *p1;
-
-		if (((size_t)addr & PAGE_MASK) !=
-			((size_t)(addr + len - 1) & PAGE_MASK))
-			goto out_copy;
-		p1 = vmalloc_to_page(addr);
-		if (!p1)
-			goto out_copy;
-		addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
-	}
+	if (!virt_addr_valid(addr))
+		goto out_copy;
 
 	sg_init_one(&sg, addr, len);
 	n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
@@ -497,6 +494,11 @@
 	tx->callback_param = &info->comp;
 	dmaengine_submit(tx);
 
+	init_completion(&info->comp);
+
+	/* setup and start DMA using dma_addr */
+	dma_async_issue_pending(info->dma);
+
 	/*  configure and start prefetch transfer */
 	ret = omap_prefetch_enable(info->gpmc_cs,
 		PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
@@ -504,10 +506,6 @@
 		/* PFPW engine is busy, use cpu copy method */
 		goto out_copy_unmap;
 
-	init_completion(&info->comp);
-	dma_async_issue_pending(info->dma);
-
-	/* setup and start DMA using dma_addr */
 	wait_for_completion(&info->comp);
 	tim = 0;
 	limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
@@ -1017,21 +1015,16 @@
 }
 
 /**
- * omap_dev_ready - calls the platform specific dev_ready function
+ * omap_dev_ready - checks the NAND Ready GPIO line
  * @mtd: MTD device structure
+ *
+ * Returns true if ready and false if busy.
  */
 static int omap_dev_ready(struct mtd_info *mtd)
 {
-	unsigned int val = 0;
 	struct omap_nand_info *info = mtd_to_omap(mtd);
 
-	val = readl(info->reg.gpmc_status);
-
-	if ((val & 0x100) == 0x100) {
-		return 1;
-	} else {
-		return 0;
-	}
+	return gpiod_get_value(info->ready_gpiod);
 }
 
 /**
@@ -1495,9 +1488,8 @@
 static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
 			       const uint8_t *buf, int oob_required, int page)
 {
-	int i;
+	int ret;
 	uint8_t *ecc_calc = chip->buffers->ecccalc;
-	uint32_t *eccpos = chip->ecc.layout->eccpos;
 
 	/* Enable GPMC ecc engine */
 	chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -1508,8 +1500,10 @@
 	/* Update ecc vector from GPMC result registers */
 	chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
 
-	for (i = 0; i < chip->ecc.total; i++)
-		chip->oob_poi[eccpos[i]] = ecc_calc[i];
+	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
 
 	/* Write ecc vector to OOB area */
 	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -1536,10 +1530,7 @@
 {
 	uint8_t *ecc_calc = chip->buffers->ecccalc;
 	uint8_t *ecc_code = chip->buffers->ecccode;
-	uint32_t *eccpos = chip->ecc.layout->eccpos;
-	uint8_t *oob = &chip->oob_poi[eccpos[0]];
-	uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0];
-	int stat;
+	int stat, ret;
 	unsigned int max_bitflips = 0;
 
 	/* Enable GPMC ecc engine */
@@ -1549,13 +1540,18 @@
 	chip->read_buf(mtd, buf, mtd->writesize);
 
 	/* Read oob bytes */
-	chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1);
-	chip->read_buf(mtd, oob, chip->ecc.total);
+	chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+		      mtd->writesize + BADBLOCK_MARKER_LENGTH, -1);
+	chip->read_buf(mtd, chip->oob_poi + BADBLOCK_MARKER_LENGTH,
+		       chip->ecc.total);
 
 	/* Calculate ecc bytes */
 	chip->ecc.calculate(mtd, buf, ecc_calc);
 
-	memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total);
+	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
 
 	stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc);
 
@@ -1630,7 +1626,7 @@
 			"CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
 		return false;
 	}
-	if (ecc_needs_elm && !is_elm_present(info, pdata->elm_of_node)) {
+	if (ecc_needs_elm && !is_elm_present(info, info->elm_of_node)) {
 		dev_err(&info->pdev->dev, "ELM not available\n");
 		return false;
 	}
@@ -1638,43 +1634,227 @@
 	return true;
 }
 
+static const char * const nand_xfer_types[] = {
+	[NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
+	[NAND_OMAP_POLLED] = "polled",
+	[NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
+	[NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
+};
+
+static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
+{
+	struct device_node *child = dev->of_node;
+	int i;
+	const char *s;
+	u32 cs;
+
+	if (of_property_read_u32(child, "reg", &cs) < 0) {
+		dev_err(dev, "reg not found in DT\n");
+		return -EINVAL;
+	}
+
+	info->gpmc_cs = cs;
+
+	/* detect availability of ELM module. Won't be present pre-OMAP4 */
+	info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
+	if (!info->elm_of_node)
+		dev_dbg(dev, "ti,elm-id not in DT\n");
+
+	/* select ecc-scheme for NAND */
+	if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
+		dev_err(dev, "ti,nand-ecc-opt not found\n");
+		return -EINVAL;
+	}
+
+	if (!strcmp(s, "sw")) {
+		info->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
+	} else if (!strcmp(s, "ham1") ||
+		   !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) {
+		info->ecc_opt =	OMAP_ECC_HAM1_CODE_HW;
+	} else if (!strcmp(s, "bch4")) {
+		if (info->elm_of_node)
+			info->ecc_opt = OMAP_ECC_BCH4_CODE_HW;
+		else
+			info->ecc_opt = OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
+	} else if (!strcmp(s, "bch8")) {
+		if (info->elm_of_node)
+			info->ecc_opt = OMAP_ECC_BCH8_CODE_HW;
+		else
+			info->ecc_opt = OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
+	} else if (!strcmp(s, "bch16")) {
+		info->ecc_opt =	OMAP_ECC_BCH16_CODE_HW;
+	} else {
+		dev_err(dev, "unrecognized value for ti,nand-ecc-opt\n");
+		return -EINVAL;
+	}
+
+	/* select data transfer mode */
+	if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) {
+		for (i = 0; i < ARRAY_SIZE(nand_xfer_types); i++) {
+			if (!strcasecmp(s, nand_xfer_types[i])) {
+				info->xfer_type = i;
+				return 0;
+			}
+		}
+
+		dev_err(dev, "unrecognized value for ti,nand-xfer-type\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int omap_ooblayout_ecc(struct mtd_info *mtd, int section,
+			      struct mtd_oob_region *oobregion)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	struct nand_chip *chip = &info->nand;
+	int off = BADBLOCK_MARKER_LENGTH;
+
+	if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
+	    !(chip->options & NAND_BUSWIDTH_16))
+		off = 1;
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = off;
+	oobregion->length = chip->ecc.total;
+
+	return 0;
+}
+
+static int omap_ooblayout_free(struct mtd_info *mtd, int section,
+			       struct mtd_oob_region *oobregion)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	struct nand_chip *chip = &info->nand;
+	int off = BADBLOCK_MARKER_LENGTH;
+
+	if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
+	    !(chip->options & NAND_BUSWIDTH_16))
+		off = 1;
+
+	if (section)
+		return -ERANGE;
+
+	off += chip->ecc.total;
+	if (off >= mtd->oobsize)
+		return -ERANGE;
+
+	oobregion->offset = off;
+	oobregion->length = mtd->oobsize - off;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
+	.ecc = omap_ooblayout_ecc,
+	.free = omap_ooblayout_free,
+};
+
+static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int off = BADBLOCK_MARKER_LENGTH;
+
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
+
+	/*
+	 * When SW correction is employed, one OMAP specific marker byte is
+	 * reserved after each ECC step.
+	 */
+	oobregion->offset = off + (section * (chip->ecc.bytes + 1));
+	oobregion->length = chip->ecc.bytes;
+
+	return 0;
+}
+
+static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int off = BADBLOCK_MARKER_LENGTH;
+
+	if (section)
+		return -ERANGE;
+
+	/*
+	 * When SW correction is employed, one OMAP specific marker byte is
+	 * reserved after each ECC step.
+	 */
+	off += ((chip->ecc.bytes + 1) * chip->ecc.steps);
+	if (off >= mtd->oobsize)
+		return -ERANGE;
+
+	oobregion->offset = off;
+	oobregion->length = mtd->oobsize - off;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
+	.ecc = omap_sw_ooblayout_ecc,
+	.free = omap_sw_ooblayout_free,
+};
+
 static int omap_nand_probe(struct platform_device *pdev)
 {
 	struct omap_nand_info		*info;
-	struct omap_nand_platform_data	*pdata;
+	struct omap_nand_platform_data	*pdata = NULL;
 	struct mtd_info			*mtd;
 	struct nand_chip		*nand_chip;
-	struct nand_ecclayout		*ecclayout;
 	int				err;
-	int				i;
 	dma_cap_mask_t			mask;
 	unsigned			sig;
-	unsigned			oob_index;
 	struct resource			*res;
-
-	pdata = dev_get_platdata(&pdev->dev);
-	if (pdata == NULL) {
-		dev_err(&pdev->dev, "platform data missing\n");
-		return -ENODEV;
-	}
+	struct device			*dev = &pdev->dev;
+	int				min_oobbytes = BADBLOCK_MARKER_LENGTH;
+	int				oobbytes_per_step;
 
 	info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
 				GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
 
-	platform_set_drvdata(pdev, info);
+	info->pdev = pdev;
 
-	info->pdev		= pdev;
-	info->gpmc_cs		= pdata->cs;
-	info->reg		= pdata->reg;
-	info->of_node		= pdata->of_node;
-	info->ecc_opt		= pdata->ecc_opt;
+	if (dev->of_node) {
+		if (omap_get_dt_info(dev, info))
+			return -EINVAL;
+	} else {
+		pdata = dev_get_platdata(&pdev->dev);
+		if (!pdata) {
+			dev_err(&pdev->dev, "platform data missing\n");
+			return -EINVAL;
+		}
+
+		info->gpmc_cs = pdata->cs;
+		info->reg = pdata->reg;
+		info->ecc_opt = pdata->ecc_opt;
+		if (pdata->dev_ready)
+			dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n");
+
+		info->xfer_type = pdata->xfer_type;
+		info->devsize = pdata->devsize;
+		info->elm_of_node = pdata->elm_of_node;
+		info->flash_bbt = pdata->flash_bbt;
+	}
+
+	platform_set_drvdata(pdev, info);
+	info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
+	if (!info->ops) {
+		dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
+		return -ENODEV;
+	}
+
 	nand_chip		= &info->nand;
 	mtd			= nand_to_mtd(nand_chip);
 	mtd->dev.parent		= &pdev->dev;
 	nand_chip->ecc.priv	= NULL;
-	nand_set_flash_node(nand_chip, pdata->of_node);
+	nand_set_flash_node(nand_chip, dev->of_node);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
@@ -1688,6 +1868,13 @@
 	nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
 	nand_chip->cmd_ctrl  = omap_hwcontrol;
 
+	info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
+						    GPIOD_IN);
+	if (IS_ERR(info->ready_gpiod)) {
+		dev_err(dev, "failed to get ready gpio\n");
+		return PTR_ERR(info->ready_gpiod);
+	}
+
 	/*
 	 * If RDY/BSY line is connected to OMAP then use the omap ready
 	 * function and the generic nand_wait function which reads the status
@@ -1695,7 +1882,7 @@
 	 * chip delay which is slightly more than tR (AC Timing) of the NAND
 	 * device and read status register until you get a failure or success
 	 */
-	if (pdata->dev_ready) {
+	if (info->ready_gpiod) {
 		nand_chip->dev_ready = omap_dev_ready;
 		nand_chip->chip_delay = 0;
 	} else {
@@ -1703,21 +1890,25 @@
 		nand_chip->chip_delay = 50;
 	}
 
-	if (pdata->flash_bbt)
-		nand_chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
-	else
-		nand_chip->options |= NAND_SKIP_BBTSCAN;
+	if (info->flash_bbt)
+		nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
 
 	/* scan NAND device connected to chip controller */
-	nand_chip->options |= pdata->devsize & NAND_BUSWIDTH_16;
+	nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
 	if (nand_scan_ident(mtd, 1, NULL)) {
-		dev_err(&info->pdev->dev, "scan failed, may be bus-width mismatch\n");
+		dev_err(&info->pdev->dev,
+			"scan failed, may be bus-width mismatch\n");
 		err = -ENXIO;
 		goto return_error;
 	}
 
+	if (nand_chip->bbt_options & NAND_BBT_USE_FLASH)
+		nand_chip->bbt_options |= NAND_BBT_NO_OOB;
+	else
+		nand_chip->options |= NAND_SKIP_BBTSCAN;
+
 	/* re-populate low-level callbacks based on xfer modes */
-	switch (pdata->xfer_type) {
+	switch (info->xfer_type) {
 	case NAND_OMAP_PREFETCH_POLLED:
 		nand_chip->read_buf   = omap_read_buf_pref;
 		nand_chip->write_buf  = omap_write_buf_pref;
@@ -1797,7 +1988,7 @@
 
 	default:
 		dev_err(&pdev->dev,
-			"xfer_type(%d) not supported!\n", pdata->xfer_type);
+			"xfer_type(%d) not supported!\n", info->xfer_type);
 		err = -EINVAL;
 		goto return_error;
 	}
@@ -1809,16 +2000,15 @@
 
 	/*
 	 * Bail out earlier to let NAND_ECC_SOFT code create its own
-	 * ecclayout instead of using ours.
+	 * ooblayout instead of using ours.
 	 */
 	if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
 		nand_chip->ecc.mode = NAND_ECC_SOFT;
+		nand_chip->ecc.algo = NAND_ECC_HAMMING;
 		goto scan_tail;
 	}
 
 	/* populate MTD interface based on ECC scheme */
-	ecclayout		= &info->oobinfo;
-	nand_chip->ecc.layout	= ecclayout;
 	switch (info->ecc_opt) {
 	case OMAP_ECC_HAM1_CODE_HW:
 		pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
@@ -1829,19 +2019,12 @@
 		nand_chip->ecc.calculate        = omap_calculate_ecc;
 		nand_chip->ecc.hwctl            = omap_enable_hwecc;
 		nand_chip->ecc.correct          = omap_correct_data;
-		/* define ECC layout */
-		ecclayout->eccbytes		= nand_chip->ecc.bytes *
-							(mtd->writesize /
-							nand_chip->ecc.size);
-		if (nand_chip->options & NAND_BUSWIDTH_16)
-			oob_index		= BADBLOCK_MARKER_LENGTH;
-		else
-			oob_index		= 1;
-		for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
-			ecclayout->eccpos[i]	= oob_index;
-		/* no reserved-marker in ecclayout for this ecc-scheme */
-		ecclayout->oobfree->offset	=
-				ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+		oobbytes_per_step		= nand_chip->ecc.bytes;
+
+		if (!(nand_chip->options & NAND_BUSWIDTH_16))
+			min_oobbytes		= 1;
+
 		break;
 
 	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
@@ -1853,19 +2036,9 @@
 		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
 		nand_chip->ecc.correct		= nand_bch_correct_data;
 		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
-		/* define ECC layout */
-		ecclayout->eccbytes		= nand_chip->ecc.bytes *
-							(mtd->writesize /
-							nand_chip->ecc.size);
-		oob_index			= BADBLOCK_MARKER_LENGTH;
-		for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
-			ecclayout->eccpos[i] = oob_index;
-			if (((i + 1) % nand_chip->ecc.bytes) == 0)
-				oob_index++;
-		}
-		/* include reserved-marker in ecclayout->oobfree calculation */
-		ecclayout->oobfree->offset	= 1 +
-				ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+		mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
+		/* Reserve one byte for the OMAP marker */
+		oobbytes_per_step		= nand_chip->ecc.bytes + 1;
 		/* software bch library is used for locating errors */
 		nand_chip->ecc.priv		= nand_bch_init(mtd);
 		if (!nand_chip->ecc.priv) {
@@ -1887,16 +2060,8 @@
 		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
 		nand_chip->ecc.read_page	= omap_read_page_bch;
 		nand_chip->ecc.write_page	= omap_write_page_bch;
-		/* define ECC layout */
-		ecclayout->eccbytes		= nand_chip->ecc.bytes *
-							(mtd->writesize /
-							nand_chip->ecc.size);
-		oob_index			= BADBLOCK_MARKER_LENGTH;
-		for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
-			ecclayout->eccpos[i]	= oob_index;
-		/* reserved marker already included in ecclayout->eccbytes */
-		ecclayout->oobfree->offset	=
-				ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+		oobbytes_per_step		= nand_chip->ecc.bytes;
 
 		err = elm_config(info->elm_dev, BCH4_ECC,
 				 mtd->writesize / nand_chip->ecc.size,
@@ -1914,19 +2079,9 @@
 		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
 		nand_chip->ecc.correct		= nand_bch_correct_data;
 		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
-		/* define ECC layout */
-		ecclayout->eccbytes		= nand_chip->ecc.bytes *
-							(mtd->writesize /
-							nand_chip->ecc.size);
-		oob_index			= BADBLOCK_MARKER_LENGTH;
-		for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
-			ecclayout->eccpos[i] = oob_index;
-			if (((i + 1) % nand_chip->ecc.bytes) == 0)
-				oob_index++;
-		}
-		/* include reserved-marker in ecclayout->oobfree calculation */
-		ecclayout->oobfree->offset	= 1 +
-				ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+		mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
+		/* Reserve one byte for the OMAP marker */
+		oobbytes_per_step		= nand_chip->ecc.bytes + 1;
 		/* software bch library is used for locating errors */
 		nand_chip->ecc.priv		= nand_bch_init(mtd);
 		if (!nand_chip->ecc.priv) {
@@ -1948,6 +2103,8 @@
 		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
 		nand_chip->ecc.read_page	= omap_read_page_bch;
 		nand_chip->ecc.write_page	= omap_write_page_bch;
+		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+		oobbytes_per_step		= nand_chip->ecc.bytes;
 
 		err = elm_config(info->elm_dev, BCH8_ECC,
 				 mtd->writesize / nand_chip->ecc.size,
@@ -1955,16 +2112,6 @@
 		if (err < 0)
 			goto return_error;
 
-		/* define ECC layout */
-		ecclayout->eccbytes		= nand_chip->ecc.bytes *
-							(mtd->writesize /
-							nand_chip->ecc.size);
-		oob_index			= BADBLOCK_MARKER_LENGTH;
-		for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
-			ecclayout->eccpos[i]	= oob_index;
-		/* reserved marker already included in ecclayout->eccbytes */
-		ecclayout->oobfree->offset	=
-				ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
 		break;
 
 	case OMAP_ECC_BCH16_CODE_HW:
@@ -1978,6 +2125,8 @@
 		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
 		nand_chip->ecc.read_page	= omap_read_page_bch;
 		nand_chip->ecc.write_page	= omap_write_page_bch;
+		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+		oobbytes_per_step		= nand_chip->ecc.bytes;
 
 		err = elm_config(info->elm_dev, BCH16_ECC,
 				 mtd->writesize / nand_chip->ecc.size,
@@ -1985,16 +2134,6 @@
 		if (err < 0)
 			goto return_error;
 
-		/* define ECC layout */
-		ecclayout->eccbytes		= nand_chip->ecc.bytes *
-							(mtd->writesize /
-							nand_chip->ecc.size);
-		oob_index			= BADBLOCK_MARKER_LENGTH;
-		for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
-			ecclayout->eccpos[i]	= oob_index;
-		/* reserved marker already included in ecclayout->eccbytes */
-		ecclayout->oobfree->offset	=
-				ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
 		break;
 	default:
 		dev_err(&info->pdev->dev, "invalid or unsupported ECC scheme\n");
@@ -2002,13 +2141,13 @@
 		goto return_error;
 	}
 
-	/* all OOB bytes from oobfree->offset till end off OOB are free */
-	ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset;
 	/* check if NAND device's OOB is enough to store ECC signatures */
-	if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) {
+	min_oobbytes += (oobbytes_per_step *
+			 (mtd->writesize / nand_chip->ecc.size));
+	if (mtd->oobsize < min_oobbytes) {
 		dev_err(&info->pdev->dev,
 			"not enough OOB bytes required = %d, available=%d\n",
-			ecclayout->eccbytes, mtd->oobsize);
+			min_oobbytes, mtd->oobsize);
 		err = -EINVAL;
 		goto return_error;
 	}
@@ -2020,7 +2159,10 @@
 		goto return_error;
 	}
 
-	mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+	if (dev->of_node)
+		mtd_device_register(mtd, NULL, 0);
+	else
+		mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
 
 	platform_set_drvdata(pdev, mtd);
 
@@ -2051,11 +2193,17 @@
 	return 0;
 }
 
+static const struct of_device_id omap_nand_ids[] = {
+	{ .compatible = "ti,omap2-nand", },
+	{},
+};
+
 static struct platform_driver omap_nand_driver = {
 	.probe		= omap_nand_probe,
 	.remove		= omap_nand_remove,
 	.driver		= {
 		.name	= DRIVER_NAME,
+		.of_match_table = of_match_ptr(omap_nand_ids),
 	},
 };
 
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index d4614bf..40a7c4a 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -130,6 +130,7 @@
 	nc->cmd_ctrl = orion_nand_cmd_ctrl;
 	nc->read_buf = orion_nand_read_buf;
 	nc->ecc.mode = NAND_ECC_SOFT;
+	nc->ecc.algo = NAND_ECC_HAMMING;
 
 	if (board->chip_delay)
 		nc->chip_delay = board->chip_delay;
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 3ab53ca..5de7591 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -92,8 +92,9 @@
 
 static int pasemi_nand_probe(struct platform_device *ofdev)
 {
+	struct device *dev = &ofdev->dev;
 	struct pci_dev *pdev;
-	struct device_node *np = ofdev->dev.of_node;
+	struct device_node *np = dev->of_node;
 	struct resource res;
 	struct nand_chip *chip;
 	int err = 0;
@@ -107,13 +108,11 @@
 	if (pasemi_nand_mtd)
 		return -ENODEV;
 
-	pr_debug("pasemi_nand at %pR\n", &res);
+	dev_dbg(dev, "pasemi_nand at %pR\n", &res);
 
 	/* Allocate memory for MTD device structure and private data */
 	chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
 	if (!chip) {
-		printk(KERN_WARNING
-		       "Unable to allocate PASEMI NAND MTD device structure\n");
 		err = -ENOMEM;
 		goto out;
 	}
@@ -121,7 +120,7 @@
 	pasemi_nand_mtd = nand_to_mtd(chip);
 
 	/* Link the private data with the MTD structure */
-	pasemi_nand_mtd->dev.parent = &ofdev->dev;
+	pasemi_nand_mtd->dev.parent = dev;
 
 	chip->IO_ADDR_R = of_iomap(np, 0);
 	chip->IO_ADDR_W = chip->IO_ADDR_R;
@@ -151,6 +150,7 @@
 	chip->write_buf = pasemi_write_buf;
 	chip->chip_delay = 0;
 	chip->ecc.mode = NAND_ECC_SOFT;
+	chip->ecc.algo = NAND_ECC_HAMMING;
 
 	/* Enable the following for a flash based bad block table */
 	chip->bbt_options = NAND_BBT_USE_FLASH;
@@ -162,13 +162,13 @@
 	}
 
 	if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
-		printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n");
+		dev_err(dev, "Unable to register MTD device\n");
 		err = -ENODEV;
 		goto out_lpc;
 	}
 
-	printk(KERN_INFO "PA Semi NAND flash at %08llx, control at I/O %x\n",
-	       res.start, lpcctl);
+	dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res,
+		 lpcctl);
 
 	return 0;
 
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index e4e50da..415a53a 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -74,6 +74,7 @@
 
 	data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
 	data->chip.ecc.mode = NAND_ECC_SOFT;
+	data->chip.ecc.algo = NAND_ECC_HAMMING;
 
 	platform_set_drvdata(pdev, data);
 
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index d650885..436dd6d 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -29,7 +29,6 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
-#include <linux/of_mtd.h>
 #include <linux/platform_data/mtd-nand-pxa3xx.h>
 
 #define	CHIP_DELAY_TIMEOUT	msecs_to_jiffies(200)
@@ -324,6 +323,62 @@
 	{ 0xba20, 16, 16, &timing[3] },
 };
 
+static int pxa3xx_ooblayout_ecc(struct mtd_info *mtd, int section,
+				struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+	struct pxa3xx_nand_info *info = host->info_data;
+	int nchunks = mtd->writesize / info->chunk_size;
+
+	if (section >= nchunks)
+		return -ERANGE;
+
+	oobregion->offset = ((info->ecc_size + info->spare_size) * section) +
+			    info->spare_size;
+	oobregion->length = info->ecc_size;
+
+	return 0;
+}
+
+static int pxa3xx_ooblayout_free(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+	struct pxa3xx_nand_info *info = host->info_data;
+	int nchunks = mtd->writesize / info->chunk_size;
+
+	if (section >= nchunks)
+		return -ERANGE;
+
+	if (!info->spare_size)
+		return 0;
+
+	oobregion->offset = section * (info->ecc_size + info->spare_size);
+	oobregion->length = info->spare_size;
+	if (!section) {
+		/*
+		 * Bootrom looks in bytes 0 & 5 for bad blocks for the
+		 * 4KB page / 4bit BCH combination.
+		 */
+		if (mtd->writesize == 4096 && info->chunk_size == 2048) {
+			oobregion->offset += 6;
+			oobregion->length -= 6;
+		} else {
+			oobregion->offset += 2;
+			oobregion->length -= 2;
+		}
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops pxa3xx_ooblayout_ops = {
+	.ecc = pxa3xx_ooblayout_ecc,
+	.free = pxa3xx_ooblayout_free,
+};
+
 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
 
@@ -347,41 +402,6 @@
 	.pattern = bbt_mirror_pattern
 };
 
-static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
-	.eccbytes = 32,
-	.eccpos = {
-		32, 33, 34, 35, 36, 37, 38, 39,
-		40, 41, 42, 43, 44, 45, 46, 47,
-		48, 49, 50, 51, 52, 53, 54, 55,
-		56, 57, 58, 59, 60, 61, 62, 63},
-	.oobfree = { {2, 30} }
-};
-
-static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
-	.eccbytes = 64,
-	.eccpos = {
-		32,  33,  34,  35,  36,  37,  38,  39,
-		40,  41,  42,  43,  44,  45,  46,  47,
-		48,  49,  50,  51,  52,  53,  54,  55,
-		56,  57,  58,  59,  60,  61,  62,  63,
-		96,  97,  98,  99,  100, 101, 102, 103,
-		104, 105, 106, 107, 108, 109, 110, 111,
-		112, 113, 114, 115, 116, 117, 118, 119,
-		120, 121, 122, 123, 124, 125, 126, 127},
-	/* Bootrom looks in bytes 0 & 5 for bad blocks */
-	.oobfree = { {6, 26}, { 64, 32} }
-};
-
-static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
-	.eccbytes = 128,
-	.eccpos = {
-		32,  33,  34,  35,  36,  37,  38,  39,
-		40,  41,  42,  43,  44,  45,  46,  47,
-		48,  49,  50,  51,  52,  53,  54,  55,
-		56,  57,  58,  59,  60,  61,  62,  63},
-	.oobfree = { }
-};
-
 #define NDTR0_tCH(c)	(min((c), 7) << 19)
 #define NDTR0_tCS(c)	(min((c), 7) << 16)
 #define NDTR0_tWH(c)	(min((c), 7) << 11)
@@ -1546,9 +1566,12 @@
 }
 
 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
-			struct nand_ecc_ctrl *ecc,
+			struct mtd_info *mtd,
 			int strength, int ecc_stepsize, int page_size)
 {
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
 	if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
 		info->nfullchunks = 1;
 		info->ntotalchunks = 1;
@@ -1582,7 +1605,7 @@
 		info->ecc_size = 32;
 		ecc->mode = NAND_ECC_HW;
 		ecc->size = info->chunk_size;
-		ecc->layout = &ecc_layout_2KB_bch4bit;
+		mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
 		ecc->strength = 16;
 
 	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
@@ -1594,7 +1617,7 @@
 		info->ecc_size = 32;
 		ecc->mode = NAND_ECC_HW;
 		ecc->size = info->chunk_size;
-		ecc->layout = &ecc_layout_4KB_bch4bit;
+		mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
 		ecc->strength = 16;
 
 	/*
@@ -1612,7 +1635,7 @@
 		info->ecc_size = 32;
 		ecc->mode = NAND_ECC_HW;
 		ecc->size = info->chunk_size;
-		ecc->layout = &ecc_layout_4KB_bch8bit;
+		mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
 		ecc->strength = 16;
 	} else {
 		dev_err(&info->pdev->dev,
@@ -1651,6 +1674,12 @@
 	if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
 		nand_writel(info, NDECCCTRL, 0x0);
 
+	if (pdata->flash_bbt)
+		chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+	chip->ecc.strength = pdata->ecc_strength;
+	chip->ecc.size = pdata->ecc_step_size;
+
 	if (nand_scan_ident(mtd, 1, NULL))
 		return -ENODEV;
 
@@ -1663,13 +1692,12 @@
 		}
 	}
 
-	if (pdata->flash_bbt) {
+	if (chip->bbt_options & NAND_BBT_USE_FLASH) {
 		/*
 		 * We'll use a bad block table stored in-flash and don't
 		 * allow writing the bad block marker to the flash.
 		 */
-		chip->bbt_options |= NAND_BBT_USE_FLASH |
-				     NAND_BBT_NO_OOB_BBM;
+		chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
 		chip->bbt_td = &bbt_main_descr;
 		chip->bbt_md = &bbt_mirror_descr;
 	}
@@ -1689,10 +1717,9 @@
 		}
 	}
 
-	if (pdata->ecc_strength && pdata->ecc_step_size) {
-		ecc_strength = pdata->ecc_strength;
-		ecc_step = pdata->ecc_step_size;
-	} else {
+	ecc_strength = chip->ecc.strength;
+	ecc_step = chip->ecc.size;
+	if (!ecc_strength || !ecc_step) {
 		ecc_strength = chip->ecc_strength_ds;
 		ecc_step = chip->ecc_step_ds;
 	}
@@ -1703,7 +1730,7 @@
 		ecc_step = 512;
 	}
 
-	ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
+	ret = pxa_ecc_init(info, mtd, ecc_strength,
 			   ecc_step, mtd->writesize);
 	if (ret)
 		return ret;
@@ -1903,15 +1930,6 @@
 	if (of_get_property(np, "marvell,nand-keep-config", NULL))
 		pdata->keep_config = 1;
 	of_property_read_u32(np, "num-cs", &pdata->num_cs);
-	pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
-
-	pdata->ecc_strength = of_get_nand_ecc_strength(np);
-	if (pdata->ecc_strength < 0)
-		pdata->ecc_strength = 0;
-
-	pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
-	if (pdata->ecc_step_size < 0)
-		pdata->ecc_step_size = 0;
 
 	pdev->dev.platform_data = pdata;
 
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index f550a57..de7d28e 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -21,7 +21,6 @@
 #include <linux/mtd/partitions.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
-#include <linux/of_mtd.h>
 #include <linux/delay.h>
 
 /* NANDc reg offsets */
@@ -1437,7 +1436,6 @@
 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
 	struct nand_ecc_ctrl *ecc = &chip->ecc;
 	u8 *oob = chip->oob_poi;
-	int free_boff;
 	int data_size, oob_size;
 	int ret, status = 0;
 
@@ -1451,12 +1449,11 @@
 
 	/* calculate the data and oob size for the last codeword/step */
 	data_size = ecc->size - ((ecc->steps - 1) << 2);
-	oob_size = ecc->steps << 2;
-
-	free_boff = ecc->layout->oobfree[0].offset;
+	oob_size = mtd->oobavail;
 
 	/* override new oob content to last codeword */
-	memcpy(nandc->data_buffer + data_size, oob + free_boff, oob_size);
+	mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
+				    0, mtd->oobavail);
 
 	set_address(host, host->cw_size * (ecc->steps - 1), page);
 	update_rw_regs(host, 1, false);
@@ -1710,61 +1707,52 @@
  * This layout is read as is when ECC is disabled. When ECC is enabled, the
  * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
  * and assumed as 0xffs when we read a page/oob. The ECC, unused and
- * dummy/real bad block bytes are grouped as ecc bytes in nand_ecclayout (i.e,
- * ecc->bytes is the sum of the three).
+ * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
+ * the sum of the three).
  */
-
-static struct nand_ecclayout *
-qcom_nand_create_layout(struct qcom_nand_host *host)
+static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+				   struct mtd_oob_region *oobregion)
 {
-	struct nand_chip *chip = &host->chip;
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct qcom_nand_host *host = to_qcom_nand_host(chip);
 	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	struct nand_ecclayout *layout;
-	int i, j, steps, pos = 0, shift = 0;
 
-	layout = devm_kzalloc(nandc->dev, sizeof(*layout), GFP_KERNEL);
-	if (!layout)
-		return NULL;
+	if (section > 1)
+		return -ERANGE;
 
-	steps = mtd->writesize / ecc->size;
-	layout->eccbytes = steps * ecc->bytes;
-
-	layout->oobfree[0].offset = (steps - 1) * ecc->bytes + host->bbm_size;
-	layout->oobfree[0].length = steps << 2;
-
-	/*
-	 * the oob bytes in the first n - 1 codewords are all grouped together
-	 * in the format:
-	 * DUMMY_BBM + UNUSED + ECC
-	 */
-	for (i = 0; i < steps - 1; i++) {
-		for (j = 0; j < ecc->bytes; j++)
-			layout->eccpos[pos++] = i * ecc->bytes + j;
+	if (!section) {
+		oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
+				    host->bbm_size;
+		oobregion->offset = 0;
+	} else {
+		oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
+		oobregion->offset = mtd->oobsize - oobregion->length;
 	}
 
-	/*
-	 * the oob bytes in the last codeword are grouped in the format:
-	 * BBM + FREE OOB + UNUSED + ECC
-	 */
-
-	/* fill up the bbm positions */
-	for (j = 0; j < host->bbm_size; j++)
-		layout->eccpos[pos++] = i * ecc->bytes + j;
-
-	/*
-	 * fill up the ecc and reserved positions, their indices are offseted
-	 * by the free oob region
-	 */
-	shift = layout->oobfree[0].length + host->bbm_size;
-
-	for (j = 0; j < (host->ecc_bytes_hw + host->spare_bytes); j++)
-		layout->eccpos[pos++] = i * ecc->bytes + shift + j;
-
-	return layout;
+	return 0;
 }
 
+static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
+				     struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct qcom_nand_host *host = to_qcom_nand_host(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->length = ecc->steps * 4;
+	oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
+	.ecc = qcom_nand_ooblayout_ecc,
+	.free = qcom_nand_ooblayout_free,
+};
+
 static int qcom_nand_host_setup(struct qcom_nand_host *host)
 {
 	struct nand_chip *chip = &host->chip;
@@ -1851,9 +1839,7 @@
 
 	ecc->mode = NAND_ECC_HW;
 
-	ecc->layout = qcom_nand_create_layout(host);
-	if (!ecc->layout)
-		return -ENOMEM;
+	mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
 
 	cwperpage = mtd->writesize / ecc->size;
 
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 9c9397b..d9309cf 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -84,11 +84,33 @@
 
 /* new oob placement block for use with hardware ecc generation
  */
+static int s3c2410_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
 
-static struct nand_ecclayout nand_hw_eccoob = {
-	.eccbytes = 3,
-	.eccpos = {0, 1, 2},
-	.oobfree = {{8, 8}}
+	oobregion->offset = 0;
+	oobregion->length = 3;
+
+	return 0;
+}
+
+static int s3c2410_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 8;
+	oobregion->length = 8;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops s3c2410_ooblayout_ops = {
+	.ecc = s3c2410_ooblayout_ecc,
+	.free = s3c2410_ooblayout_free,
 };
 
 /* controller and mtd information */
@@ -542,7 +564,8 @@
 	diff0 |= (diff1 << 8);
 	diff0 |= (diff2 << 16);
 
-	if ((diff0 & ~(1<<fls(diff0))) == 0)
+	/* equal to "(diff0 & ~(1 << __ffs(diff0)))" */
+	if ((diff0 & (diff0 - 1)) == 0)
 		return 1;
 
 	return -1;
@@ -859,6 +882,7 @@
 	}
 #else
 	chip->ecc.mode	    = NAND_ECC_SOFT;
+	chip->ecc.algo	= NAND_ECC_HAMMING;
 #endif
 
 	if (set->disable_ecc)
@@ -919,7 +943,7 @@
 	} else {
 		chip->ecc.size	    = 512;
 		chip->ecc.bytes	    = 3;
-		chip->ecc.layout    = &nand_hw_eccoob;
+		mtd_set_ooblayout(nand_to_mtd(chip), &s3c2410_ooblayout_ops);
 	}
 }
 
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 4814402..6fa3bcd 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -31,7 +31,6 @@
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
-#include <linux/of_mtd.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/sh_dma.h>
@@ -43,26 +42,73 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/sh_flctl.h>
 
-static struct nand_ecclayout flctl_4secc_oob_16 = {
-	.eccbytes = 10,
-	.eccpos = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
-	.oobfree = {
-		{.offset = 12,
-		. length = 4} },
+static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section,
+					struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 0;
+	oobregion->length = chip->ecc.bytes;
+
+	return 0;
+}
+
+static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section,
+					 struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 12;
+	oobregion->length = 4;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = {
+	.ecc = flctl_4secc_ooblayout_sp_ecc,
+	.free = flctl_4secc_ooblayout_sp_free,
 };
 
-static struct nand_ecclayout flctl_4secc_oob_64 = {
-	.eccbytes = 4 * 10,
-	.eccpos = {
-		 6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
-		22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-		38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
-		54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
-	.oobfree = {
-		{.offset =  2, .length = 4},
-		{.offset = 16, .length = 6},
-		{.offset = 32, .length = 6},
-		{.offset = 48, .length = 6} },
+static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section,
+					struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = (section * 16) + 6;
+	oobregion->length = chip->ecc.bytes;
+
+	return 0;
+}
+
+static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section,
+					 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = section * 16;
+	oobregion->length = 6;
+
+	if (!section) {
+		oobregion->offset += 2;
+		oobregion->length -= 2;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = {
+	.ecc = flctl_4secc_ooblayout_lp_ecc,
+	.free = flctl_4secc_ooblayout_lp_free,
 };
 
 static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
@@ -987,10 +1033,10 @@
 
 	if (flctl->hwecc) {
 		if (mtd->writesize == 512) {
-			chip->ecc.layout = &flctl_4secc_oob_16;
+			mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops);
 			chip->badblock_pattern = &flctl_4secc_smallpage;
 		} else {
-			chip->ecc.layout = &flctl_4secc_oob_64;
+			mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops);
 			chip->badblock_pattern = &flctl_4secc_largepage;
 		}
 
@@ -1005,6 +1051,7 @@
 		flctl->flcmncr_base |= _4ECCEN;
 	} else {
 		chip->ecc.mode = NAND_ECC_SOFT;
+		chip->ecc.algo = NAND_ECC_HAMMING;
 	}
 
 	return 0;
@@ -1044,8 +1091,6 @@
 	const struct of_device_id *match;
 	struct flctl_soc_config *config;
 	struct sh_flctl_platform_data *pdata;
-	struct device_node *dn = dev->of_node;
-	int ret;
 
 	match = of_match_device(of_flctl_match, dev);
 	if (match)
@@ -1065,15 +1110,6 @@
 	pdata->has_hwecc = config->has_hwecc;
 	pdata->use_holden = config->use_holden;
 
-	/* parse user defined options */
-	ret = of_get_nand_bus_width(dn);
-	if (ret == 16)
-		pdata->flcmncr_val |= SEL_16BIT;
-	else if (ret != 8) {
-		dev_err(dev, "%s: invalid bus width\n", __func__);
-		return NULL;
-	}
-
 	return pdata;
 }
 
@@ -1136,15 +1172,14 @@
 	nand->chip_delay = 20;
 
 	nand->read_byte = flctl_read_byte;
+	nand->read_word = flctl_read_word;
 	nand->write_buf = flctl_write_buf;
 	nand->read_buf = flctl_read_buf;
 	nand->select_chip = flctl_select_chip;
 	nand->cmdfunc = flctl_cmdfunc;
 
-	if (pdata->flcmncr_val & SEL_16BIT) {
+	if (pdata->flcmncr_val & SEL_16BIT)
 		nand->options |= NAND_BUSWIDTH_16;
-		nand->read_word = flctl_read_word;
-	}
 
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_resume(&pdev->dev);
@@ -1155,6 +1190,16 @@
 	if (ret)
 		goto err_chip;
 
+	if (nand->options & NAND_BUSWIDTH_16) {
+		/*
+		 * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
+		 * Add the SEL_16BIT flag in pdata->flcmncr_val and re-assign
+		 * flctl->flcmncr_base to pdata->flcmncr_val.
+		 */
+		pdata->flcmncr_val |= SEL_16BIT;
+		flctl->flcmncr_base = pdata->flcmncr_val;
+	}
+
 	ret = flctl_chip_init_tail(flctl_mtd);
 	if (ret)
 		goto err_chip;
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index b7d1b55..064ca17 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -148,6 +148,7 @@
 	/* Link the private data with the MTD structure */
 	mtd = nand_to_mtd(this);
 	mtd->dev.parent = &pdev->dev;
+	mtd_set_ooblayout(mtd, data->ecc_layout);
 
 	platform_set_drvdata(pdev, sharpsl);
 
@@ -170,7 +171,6 @@
 	this->ecc.bytes = 3;
 	this->ecc.strength = 1;
 	this->badblock_pattern = data->badblock_pattern;
-	this->ecc.layout = data->ecc_layout;
 	this->ecc.hwctl = sharpsl_nand_enable_hwecc;
 	this->ecc.calculate = sharpsl_nand_calculate_ecc;
 	this->ecc.correct = nand_correct_data;
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index c514740..5939dff 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -12,14 +12,47 @@
 #include <linux/sizes.h>
 #include "sm_common.h"
 
-static struct nand_ecclayout nand_oob_sm = {
-	.eccbytes = 6,
-	.eccpos = {8, 9, 10, 13, 14, 15},
-	.oobfree = {
-		{.offset = 0 , .length = 4}, /* reserved */
-		{.offset = 6 , .length = 2}, /* LBA1 */
-		{.offset = 11, .length = 2}  /* LBA2 */
+static int oob_sm_ooblayout_ecc(struct mtd_info *mtd, int section,
+				struct mtd_oob_region *oobregion)
+{
+	if (section > 1)
+		return -ERANGE;
+
+	oobregion->length = 3;
+	oobregion->offset = ((section + 1) * 8) - 3;
+
+	return 0;
+}
+
+static int oob_sm_ooblayout_free(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	switch (section) {
+	case 0:
+		/* reserved */
+		oobregion->offset = 0;
+		oobregion->length = 4;
+		break;
+	case 1:
+		/* LBA1 */
+		oobregion->offset = 6;
+		oobregion->length = 2;
+		break;
+	case 2:
+		/* LBA2 */
+		oobregion->offset = 11;
+		oobregion->length = 2;
+		break;
+	default:
+		return -ERANGE;
 	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops oob_sm_ops = {
+	.ecc = oob_sm_ooblayout_ecc,
+	.free = oob_sm_ooblayout_free,
 };
 
 /* NOTE: This layout is is not compatabable with SmartMedia, */
@@ -28,15 +61,43 @@
 /* If you use smftl, it will bypass this and work correctly */
 /* If you not, then you break SmartMedia compliance anyway */
 
-static struct nand_ecclayout nand_oob_sm_small = {
-	.eccbytes = 3,
-	.eccpos = {0, 1, 2},
-	.oobfree = {
-		{.offset = 3 , .length = 2}, /* reserved */
-		{.offset = 6 , .length = 2}, /* LBA1 */
-	}
-};
+static int oob_sm_small_ooblayout_ecc(struct mtd_info *mtd, int section,
+				      struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
 
+	oobregion->length = 3;
+	oobregion->offset = 0;
+
+	return 0;
+}
+
+static int oob_sm_small_ooblayout_free(struct mtd_info *mtd, int section,
+				       struct mtd_oob_region *oobregion)
+{
+	switch (section) {
+	case 0:
+		/* reserved */
+		oobregion->offset = 3;
+		oobregion->length = 2;
+		break;
+	case 1:
+		/* LBA1 */
+		oobregion->offset = 6;
+		oobregion->length = 2;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops oob_sm_small_ops = {
+	.ecc = oob_sm_small_ooblayout_ecc,
+	.free = oob_sm_small_ooblayout_free,
+};
 
 static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
 {
@@ -121,9 +182,9 @@
 
 	/* ECC layout */
 	if (mtd->writesize == SM_SECTOR_SIZE)
-		chip->ecc.layout = &nand_oob_sm;
+		mtd_set_ooblayout(mtd, &oob_sm_ops);
 	else if (mtd->writesize == SM_SMALL_PAGE)
-		chip->ecc.layout = &nand_oob_sm_small;
+		mtd_set_ooblayout(mtd, &oob_sm_small_ops);
 	else
 		return -ENODEV;
 
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index e3305f9..888fd31 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -180,6 +180,7 @@
 	nand_chip->dev_ready = socrates_nand_device_ready;
 
 	nand_chip->ecc.mode = NAND_ECC_SOFT;	/* enable ECC */
+	nand_chip->ecc.algo = NAND_ECC_HAMMING;
 
 	/* TODO: I have no idea what real delay is. */
 	nand_chip->chip_delay = 20;		/* 20us command delay time */
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 1c03eee..a83a690 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -30,7 +30,6 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
-#include <linux/of_mtd.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
@@ -39,7 +38,7 @@
 #include <linux/dmaengine.h>
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
-#include <linux/io.h>
+#include <linux/iopoll.h>
 
 #define NFC_REG_CTL		0x0000
 #define NFC_REG_ST		0x0004
@@ -155,7 +154,7 @@
 /* define bit use in NFC_ECC_ST */
 #define NFC_ECC_ERR(x)		BIT(x)
 #define NFC_ECC_PAT_FOUND(x)	BIT(x + 16)
-#define NFC_ECC_ERR_CNT(b, x)	(((x) >> ((b) * 8)) & 0xff)
+#define NFC_ECC_ERR_CNT(b, x)	(((x) >> (((b) % 4) * 8)) & 0xff)
 
 #define NFC_DEFAULT_TIMEOUT_MS	1000
 
@@ -212,12 +211,9 @@
  * sunxi HW ECC infos: stores information related to HW ECC support
  *
  * @mode:	the sunxi ECC mode field deduced from ECC requirements
- * @layout:	the OOB layout depending on the ECC requirements and the
- *		selected ECC mode
  */
 struct sunxi_nand_hw_ecc {
 	int mode;
-	struct nand_ecclayout layout;
 };
 
 /*
@@ -239,6 +235,10 @@
 	u32 timing_cfg;
 	u32 timing_ctl;
 	int selected;
+	int addr_cycles;
+	u32 addr[2];
+	int cmd_cycles;
+	u8 cmd[2];
 	int nsels;
 	struct sunxi_nand_chip_sel sels[0];
 };
@@ -298,54 +298,71 @@
 	return IRQ_HANDLED;
 }
 
-static int sunxi_nfc_wait_int(struct sunxi_nfc *nfc, u32 flags,
-			      unsigned int timeout_ms)
+static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
+				 bool use_polling, unsigned int timeout_ms)
 {
-	init_completion(&nfc->complete);
+	int ret;
 
-	writel(flags, nfc->regs + NFC_REG_INT);
+	if (events & ~NFC_INT_MASK)
+		return -EINVAL;
 
 	if (!timeout_ms)
 		timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
 
-	if (!wait_for_completion_timeout(&nfc->complete,
-					 msecs_to_jiffies(timeout_ms))) {
-		dev_err(nfc->dev, "wait interrupt timedout\n");
-		return -ETIMEDOUT;
+	if (!use_polling) {
+		init_completion(&nfc->complete);
+
+		writel(events, nfc->regs + NFC_REG_INT);
+
+		ret = wait_for_completion_timeout(&nfc->complete,
+						msecs_to_jiffies(timeout_ms));
+
+		writel(0, nfc->regs + NFC_REG_INT);
+	} else {
+		u32 status;
+
+		ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
+					 (status & events) == events, 1,
+					 timeout_ms * 1000);
 	}
 
-	return 0;
+	writel(events & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
+
+	if (ret)
+		dev_err(nfc->dev, "wait interrupt timedout\n");
+
+	return ret;
 }
 
 static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
 {
-	unsigned long timeout = jiffies +
-				msecs_to_jiffies(NFC_DEFAULT_TIMEOUT_MS);
+	u32 status;
+	int ret;
 
-	do {
-		if (!(readl(nfc->regs + NFC_REG_ST) & NFC_CMD_FIFO_STATUS))
-			return 0;
-	} while (time_before(jiffies, timeout));
+	ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
+				 !(status & NFC_CMD_FIFO_STATUS), 1,
+				 NFC_DEFAULT_TIMEOUT_MS * 1000);
+	if (ret)
+		dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
 
-	dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
-	return -ETIMEDOUT;
+	return ret;
 }
 
 static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
 {
-	unsigned long timeout = jiffies +
-				msecs_to_jiffies(NFC_DEFAULT_TIMEOUT_MS);
+	u32 ctl;
+	int ret;
 
 	writel(0, nfc->regs + NFC_REG_ECC_CTL);
 	writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
 
-	do {
-		if (!(readl(nfc->regs + NFC_REG_CTL) & NFC_RESET))
-			return 0;
-	} while (time_before(jiffies, timeout));
+	ret = readl_poll_timeout(nfc->regs + NFC_REG_CTL, ctl,
+				 !(ctl & NFC_RESET), 1,
+				 NFC_DEFAULT_TIMEOUT_MS * 1000);
+	if (ret)
+		dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
 
-	dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
-	return -ETIMEDOUT;
+	return ret;
 }
 
 static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
@@ -354,7 +371,6 @@
 	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
 	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
 	struct sunxi_nand_rb *rb;
-	unsigned long timeo = (sunxi_nand->nand.state == FL_ERASING ? 400 : 20);
 	int ret;
 
 	if (sunxi_nand->selected < 0)
@@ -366,12 +382,6 @@
 	case RB_NATIVE:
 		ret = !!(readl(nfc->regs + NFC_REG_ST) &
 			 NFC_RB_STATE(rb->info.nativeid));
-		if (ret)
-			break;
-
-		sunxi_nfc_wait_int(nfc, NFC_RB_B2R, timeo);
-		ret = !!(readl(nfc->regs + NFC_REG_ST) &
-			 NFC_RB_STATE(rb->info.nativeid));
 		break;
 	case RB_GPIO:
 		ret = gpio_get_value(rb->info.gpio);
@@ -407,7 +417,7 @@
 		sel = &sunxi_nand->sels[chip];
 
 		ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
-		       NFC_PAGE_SHIFT(nand->page_shift - 10);
+		       NFC_PAGE_SHIFT(nand->page_shift);
 		if (sel->rb.type == RB_NONE) {
 			nand->dev_ready = NULL;
 		} else {
@@ -452,7 +462,7 @@
 		tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
 		writel(tmp, nfc->regs + NFC_REG_CMD);
 
-		ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+		ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
 		if (ret)
 			break;
 
@@ -487,7 +497,7 @@
 		      NFC_ACCESS_DIR;
 		writel(tmp, nfc->regs + NFC_REG_CMD);
 
-		ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+		ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
 		if (ret)
 			break;
 
@@ -511,32 +521,54 @@
 	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
 	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
 	int ret;
-	u32 tmp;
 
 	ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
 	if (ret)
 		return;
 
-	if (ctrl & NAND_CTRL_CHANGE) {
-		tmp = readl(nfc->regs + NFC_REG_CTL);
-		if (ctrl & NAND_NCE)
-			tmp |= NFC_CE_CTL;
-		else
-			tmp &= ~NFC_CE_CTL;
-		writel(tmp, nfc->regs + NFC_REG_CTL);
-	}
+	if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) &&
+	    !(ctrl & (NAND_CLE | NAND_ALE))) {
+		u32 cmd = 0;
 
-	if (dat == NAND_CMD_NONE)
-		return;
+		if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles)
+			return;
+
+		if (sunxi_nand->cmd_cycles--)
+			cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0];
+
+		if (sunxi_nand->cmd_cycles--) {
+			cmd |= NFC_SEND_CMD2;
+			writel(sunxi_nand->cmd[1],
+			       nfc->regs + NFC_REG_RCMD_SET);
+		}
+
+		sunxi_nand->cmd_cycles = 0;
+
+		if (sunxi_nand->addr_cycles) {
+			cmd |= NFC_SEND_ADR |
+			       NFC_ADR_NUM(sunxi_nand->addr_cycles);
+			writel(sunxi_nand->addr[0],
+			       nfc->regs + NFC_REG_ADDR_LOW);
+		}
+
+		if (sunxi_nand->addr_cycles > 4)
+			writel(sunxi_nand->addr[1],
+			       nfc->regs + NFC_REG_ADDR_HIGH);
+
+		writel(cmd, nfc->regs + NFC_REG_CMD);
+		sunxi_nand->addr[0] = 0;
+		sunxi_nand->addr[1] = 0;
+		sunxi_nand->addr_cycles = 0;
+		sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+	}
 
 	if (ctrl & NAND_CLE) {
-		writel(NFC_SEND_CMD1 | dat, nfc->regs + NFC_REG_CMD);
-	} else {
-		writel(dat, nfc->regs + NFC_REG_ADDR_LOW);
-		writel(NFC_SEND_ADR, nfc->regs + NFC_REG_CMD);
+		sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat;
+	} else if (ctrl & NAND_ALE) {
+		sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |=
+				dat << ((sunxi_nand->addr_cycles % 4) * 8);
+		sunxi_nand->addr_cycles++;
 	}
-
-	sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
 }
 
 /* These seed values have been extracted from Allwinner's BSP */
@@ -717,7 +749,8 @@
 	ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
 	ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE |
 		     NFC_ECC_BLOCK_SIZE_MSK);
-	ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION;
+	ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION |
+		   NFC_ECC_PIPELINE;
 
 	writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
 }
@@ -739,18 +772,106 @@
 	buf[3] = user_data >> 24;
 }
 
+static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
+{
+	return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
+}
+
+static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob,
+						int step, bool bbm, int page)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+	sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)),
+				   oob);
+
+	/* De-randomize the Bad Block Marker. */
+	if (bbm && (nand->options & NAND_NEED_SCRAMBLING))
+		sunxi_nfc_randomize_bbm(mtd, page, oob);
+}
+
+static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd,
+						const u8 *oob, int step,
+						bool bbm, int page)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+	u8 user_data[4];
+
+	/* Randomize the Bad Block Marker. */
+	if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) {
+		memcpy(user_data, oob, sizeof(user_data));
+		sunxi_nfc_randomize_bbm(mtd, page, user_data);
+		oob = user_data;
+	}
+
+	writel(sunxi_nfc_buf_to_user_data(oob),
+	       nfc->regs + NFC_REG_USER_DATA(step));
+}
+
+static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
+					  unsigned int *max_bitflips, int ret)
+{
+	if (ret < 0) {
+		mtd->ecc_stats.failed++;
+	} else {
+		mtd->ecc_stats.corrected += ret;
+		*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+	}
+}
+
+static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
+				    int step, bool *erased)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+	u32 status, tmp;
+
+	*erased = false;
+
+	status = readl(nfc->regs + NFC_REG_ECC_ST);
+
+	if (status & NFC_ECC_ERR(step))
+		return -EBADMSG;
+
+	if (status & NFC_ECC_PAT_FOUND(step)) {
+		u8 pattern;
+
+		if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) {
+			pattern = 0x0;
+		} else {
+			pattern = 0xff;
+			*erased = true;
+		}
+
+		if (data)
+			memset(data, pattern, ecc->size);
+
+		if (oob)
+			memset(oob, pattern, ecc->bytes + 4);
+
+		return 0;
+	}
+
+	tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step));
+
+	return NFC_ECC_ERR_CNT(step, tmp);
+}
+
 static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
 				       u8 *data, int data_off,
 				       u8 *oob, int oob_off,
 				       int *cur_off,
 				       unsigned int *max_bitflips,
-				       bool bbm, int page)
+				       bool bbm, bool oob_required, int page)
 {
 	struct nand_chip *nand = mtd_to_nand(mtd);
 	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
 	struct nand_ecc_ctrl *ecc = &nand->ecc;
 	int raw_mode = 0;
-	u32 status;
+	bool erased;
 	int ret;
 
 	if (*cur_off != data_off)
@@ -769,34 +890,19 @@
 	writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
 	       nfc->regs + NFC_REG_CMD);
 
-	ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+	ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
 	sunxi_nfc_randomizer_disable(mtd);
 	if (ret)
 		return ret;
 
 	*cur_off = oob_off + ecc->bytes + 4;
 
-	status = readl(nfc->regs + NFC_REG_ECC_ST);
-	if (status & NFC_ECC_PAT_FOUND(0)) {
-		u8 pattern = 0xff;
-
-		if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1)))
-			pattern = 0x0;
-
-		memset(data, pattern, ecc->size);
-		memset(oob, pattern, ecc->bytes + 4);
-
+	ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0,
+				       &erased);
+	if (erased)
 		return 1;
-	}
 
-	ret = NFC_ECC_ERR_CNT(0, readl(nfc->regs + NFC_REG_ECC_ERR_CNT(0)));
-
-	memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
-
-	nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
-	sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, true, page);
-
-	if (status & NFC_ECC_ERR(0)) {
+	if (ret < 0) {
 		/*
 		 * Re-read the data with the randomizer disabled to identify
 		 * bitflips in erased pages.
@@ -804,34 +910,33 @@
 		if (nand->options & NAND_NEED_SCRAMBLING) {
 			nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
 			nand->read_buf(mtd, data, ecc->size);
-			nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
-			nand->read_buf(mtd, oob, ecc->bytes + 4);
+		} else {
+			memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
+				      ecc->size);
 		}
 
+		nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+		nand->read_buf(mtd, oob, ecc->bytes + 4);
+
 		ret = nand_check_erased_ecc_chunk(data,	ecc->size,
 						  oob, ecc->bytes + 4,
 						  NULL, 0, ecc->strength);
 		if (ret >= 0)
 			raw_mode = 1;
 	} else {
-		/*
-		 * The engine protects 4 bytes of OOB data per chunk.
-		 * Retrieve the corrected OOB bytes.
-		 */
-		sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(0)),
-					   oob);
+		memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
 
-		/* De-randomize the Bad Block Marker. */
-		if (bbm && nand->options & NAND_NEED_SCRAMBLING)
-			sunxi_nfc_randomize_bbm(mtd, page, oob);
+		if (oob_required) {
+			nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+			sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
+						      true, page);
+
+			sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, 0,
+							    bbm, page);
+		}
 	}
 
-	if (ret < 0) {
-		mtd->ecc_stats.failed++;
-	} else {
-		mtd->ecc_stats.corrected += ret;
-		*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
-	}
+	sunxi_nfc_hw_ecc_update_stats(mtd, max_bitflips, ret);
 
 	return raw_mode;
 }
@@ -848,7 +953,7 @@
 	if (len <= 0)
 		return;
 
-	if (*cur_off != offset)
+	if (!cur_off || *cur_off != offset)
 		nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
 			      offset + mtd->writesize, -1);
 
@@ -858,12 +963,8 @@
 		sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
 					      false, page);
 
-	*cur_off = mtd->oobsize + mtd->writesize;
-}
-
-static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
-{
-	return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
+	if (cur_off)
+		*cur_off = mtd->oobsize + mtd->writesize;
 }
 
 static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
@@ -882,19 +983,6 @@
 
 	sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
 
-	/* Fill OOB data in */
-	if ((nand->options & NAND_NEED_SCRAMBLING) && bbm) {
-		u8 user_data[4];
-
-		memcpy(user_data, oob, 4);
-		sunxi_nfc_randomize_bbm(mtd, page, user_data);
-		writel(sunxi_nfc_buf_to_user_data(user_data),
-		       nfc->regs + NFC_REG_USER_DATA(0));
-	} else {
-		writel(sunxi_nfc_buf_to_user_data(oob),
-		       nfc->regs + NFC_REG_USER_DATA(0));
-	}
-
 	if (data_off + ecc->size != oob_off)
 		nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
 
@@ -903,11 +991,13 @@
 		return ret;
 
 	sunxi_nfc_randomizer_enable(mtd);
+	sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, 0, bbm, page);
+
 	writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
 	       NFC_ACCESS_DIR | NFC_ECC_OP,
 	       nfc->regs + NFC_REG_CMD);
 
-	ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+	ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
 	sunxi_nfc_randomizer_disable(mtd);
 	if (ret)
 		return ret;
@@ -929,13 +1019,14 @@
 	if (len <= 0)
 		return;
 
-	if (*cur_off != offset)
+	if (!cur_off || *cur_off != offset)
 		nand->cmdfunc(mtd, NAND_CMD_RNDIN,
 			      offset + mtd->writesize, -1);
 
 	sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
 
-	*cur_off = mtd->oobsize + mtd->writesize;
+	if (cur_off)
+		*cur_off = mtd->oobsize + mtd->writesize;
 }
 
 static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
@@ -958,7 +1049,7 @@
 		ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
 						  oob_off + mtd->writesize,
 						  &cur_off, &max_bitflips,
-						  !i, page);
+						  !i, oob_required, page);
 		if (ret < 0)
 			return ret;
 		else if (ret)
@@ -974,6 +1065,39 @@
 	return max_bitflips;
 }
 
+static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
+					 struct nand_chip *chip,
+					 u32 data_offs, u32 readlen,
+					 u8 *bufpoi, int page)
+{
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int ret, i, cur_off = 0;
+	unsigned int max_bitflips = 0;
+
+	sunxi_nfc_hw_ecc_enable(mtd);
+
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+	for (i = data_offs / ecc->size;
+	     i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
+		int data_off = i * ecc->size;
+		int oob_off = i * (ecc->bytes + 4);
+		u8 *data = bufpoi + data_off;
+		u8 *oob = chip->oob_poi + oob_off;
+
+		ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off,
+						  oob,
+						  oob_off + mtd->writesize,
+						  &cur_off, &max_bitflips, !i,
+						  false, page);
+		if (ret < 0)
+			return ret;
+	}
+
+	sunxi_nfc_hw_ecc_disable(mtd);
+
+	return max_bitflips;
+}
+
 static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
 				       struct nand_chip *chip,
 				       const uint8_t *buf, int oob_required,
@@ -1026,7 +1150,9 @@
 
 		ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
 						  oob_off, &cur_off,
-						  &max_bitflips, !i, page);
+						  &max_bitflips, !i,
+						  oob_required,
+						  page);
 		if (ret < 0)
 			return ret;
 		else if (ret)
@@ -1074,6 +1200,40 @@
 	return 0;
 }
 
+static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd,
+					    struct nand_chip *chip,
+					    int page)
+{
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+	chip->pagebuf = -1;
+
+	return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page);
+}
+
+static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd,
+					     struct nand_chip *chip,
+					     int page)
+{
+	int ret, status;
+
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+
+	chip->pagebuf = -1;
+
+	memset(chip->buffers->databuf, 0xff, mtd->writesize);
+	ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page);
+	if (ret)
+		return ret;
+
+	/* Send command to program the OOB data */
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+	status = chip->waitfunc(mtd, chip);
+
+	return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
 static const s32 tWB_lut[] = {6, 12, 16, 20};
 static const s32 tRHW_lut[] = {4, 8, 12, 20};
 
@@ -1101,6 +1261,7 @@
 	struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
 	u32 min_clk_period = 0;
 	s32 tWB, tADL, tWHR, tRHW, tCAD;
+	long real_clk_rate;
 
 	/* T1 <=> tCLS */
 	if (timings->tCLS_min > min_clk_period)
@@ -1163,6 +1324,18 @@
 		min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
 
 	/* T16 - T19 + tCAD */
+	if (timings->tWB_max > (min_clk_period * 20))
+		min_clk_period = DIV_ROUND_UP(timings->tWB_max, 20);
+
+	if (timings->tADL_min > (min_clk_period * 32))
+		min_clk_period = DIV_ROUND_UP(timings->tADL_min, 32);
+
+	if (timings->tWHR_min > (min_clk_period * 32))
+		min_clk_period = DIV_ROUND_UP(timings->tWHR_min, 32);
+
+	if (timings->tRHW_min > (min_clk_period * 20))
+		min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20);
+
 	tWB  = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
 					min_clk_period);
 	if (tWB < 0) {
@@ -1198,23 +1371,26 @@
 	/* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
 	chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
 
+	/* Convert min_clk_period from picoseconds to nanoseconds */
+	min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
+
+	/*
+	 * Unlike what is stated in Allwinner datasheet, the clk_rate should
+	 * be set to (1 / min_clk_period), and not (2 / min_clk_period).
+	 * This new formula was verified with a scope and validated by
+	 * Allwinner engineers.
+	 */
+	chip->clk_rate = NSEC_PER_SEC / min_clk_period;
+	real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
+
 	/*
 	 * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
 	 * output cycle timings shall be used if the host drives tRC less than
 	 * 30 ns.
 	 */
-	chip->timing_ctl = (timings->tRC_min < 30000) ? NFC_TIMING_CTL_EDO : 0;
-
-	/* Convert min_clk_period from picoseconds to nanoseconds */
-	min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
-
-	/*
-	 * Convert min_clk_period into a clk frequency, then get the
-	 * appropriate rate for the NAND controller IP given this formula
-	 * (specified in the datasheet):
-	 * nand clk_rate = 2 * min_clk_rate
-	 */
-	chip->clk_rate = (2 * NSEC_PER_SEC) / min_clk_period;
+	min_clk_period = NSEC_PER_SEC / real_clk_rate;
+	chip->timing_ctl = ((min_clk_period * 2) < 30) ?
+			   NFC_TIMING_CTL_EDO : 0;
 
 	return 0;
 }
@@ -1257,6 +1433,57 @@
 	return sunxi_nand_chip_set_timings(chip, timings);
 }
 
+static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+				    struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+
+	if (section >= ecc->steps)
+		return -ERANGE;
+
+	oobregion->offset = section * (ecc->bytes + 4) + 4;
+	oobregion->length = ecc->bytes;
+
+	return 0;
+}
+
+static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
+				     struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+
+	if (section > ecc->steps)
+		return -ERANGE;
+
+	/*
+	 * The first 2 bytes are used for BB markers, hence we
+	 * only have 2 bytes available in the first user data
+	 * section.
+	 */
+	if (!section && ecc->mode == NAND_ECC_HW) {
+		oobregion->offset = 2;
+		oobregion->length = 2;
+
+		return 0;
+	}
+
+	oobregion->offset = section * (ecc->bytes + 4);
+
+	if (section < ecc->steps)
+		oobregion->length = 4;
+	else
+		oobregion->offset = mtd->oobsize - oobregion->offset;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = {
+	.ecc = sunxi_nand_ooblayout_ecc,
+	.free = sunxi_nand_ooblayout_free,
+};
+
 static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
 					      struct nand_ecc_ctrl *ecc,
 					      struct device_node *np)
@@ -1266,7 +1493,6 @@
 	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
 	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
 	struct sunxi_nand_hw_ecc *data;
-	struct nand_ecclayout *layout;
 	int nsectors;
 	int ret;
 	int i;
@@ -1295,7 +1521,6 @@
 	/* HW ECC always work with even numbers of ECC bytes */
 	ecc->bytes = ALIGN(ecc->bytes, 2);
 
-	layout = &data->layout;
 	nsectors = mtd->writesize / ecc->size;
 
 	if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) {
@@ -1303,9 +1528,9 @@
 		goto err;
 	}
 
-	layout->eccbytes = (ecc->bytes * nsectors);
-
-	ecc->layout = layout;
+	ecc->read_oob = sunxi_nfc_hw_common_ecc_read_oob;
+	ecc->write_oob = sunxi_nfc_hw_common_ecc_write_oob;
+	mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops);
 	ecc->priv = data;
 
 	return 0;
@@ -1325,9 +1550,6 @@
 				       struct nand_ecc_ctrl *ecc,
 				       struct device_node *np)
 {
-	struct nand_ecclayout *layout;
-	int nsectors;
-	int i, j;
 	int ret;
 
 	ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
@@ -1336,40 +1558,9 @@
 
 	ecc->read_page = sunxi_nfc_hw_ecc_read_page;
 	ecc->write_page = sunxi_nfc_hw_ecc_write_page;
-	layout = ecc->layout;
-	nsectors = mtd->writesize / ecc->size;
-
-	for (i = 0; i < nsectors; i++) {
-		if (i) {
-			layout->oobfree[i].offset =
-				layout->oobfree[i - 1].offset +
-				layout->oobfree[i - 1].length +
-				ecc->bytes;
-			layout->oobfree[i].length = 4;
-		} else {
-			/*
-			 * The first 2 bytes are used for BB markers, hence we
-			 * only have 2 bytes available in the first user data
-			 * section.
-			 */
-			layout->oobfree[i].length = 2;
-			layout->oobfree[i].offset = 2;
-		}
-
-		for (j = 0; j < ecc->bytes; j++)
-			layout->eccpos[(ecc->bytes * i) + j] =
-					layout->oobfree[i].offset +
-					layout->oobfree[i].length + j;
-	}
-
-	if (mtd->oobsize > (ecc->bytes + 4) * nsectors) {
-		layout->oobfree[nsectors].offset =
-				layout->oobfree[nsectors - 1].offset +
-				layout->oobfree[nsectors - 1].length +
-				ecc->bytes;
-		layout->oobfree[nsectors].length = mtd->oobsize -
-				((ecc->bytes + 4) * nsectors);
-	}
+	ecc->read_oob_raw = nand_read_oob_std;
+	ecc->write_oob_raw = nand_write_oob_std;
+	ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
 
 	return 0;
 }
@@ -1378,9 +1569,6 @@
 						struct nand_ecc_ctrl *ecc,
 						struct device_node *np)
 {
-	struct nand_ecclayout *layout;
-	int nsectors;
-	int i;
 	int ret;
 
 	ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
@@ -1390,15 +1578,8 @@
 	ecc->prepad = 4;
 	ecc->read_page = sunxi_nfc_hw_syndrome_ecc_read_page;
 	ecc->write_page = sunxi_nfc_hw_syndrome_ecc_write_page;
-
-	layout = ecc->layout;
-	nsectors = mtd->writesize / ecc->size;
-
-	for (i = 0; i < (ecc->bytes * nsectors); i++)
-		layout->eccpos[i] = i;
-
-	layout->oobfree[0].length = mtd->oobsize - i;
-	layout->oobfree[0].offset = i;
+	ecc->read_oob_raw = nand_read_oob_syndrome;
+	ecc->write_oob_raw = nand_write_oob_syndrome;
 
 	return 0;
 }
@@ -1411,7 +1592,6 @@
 		sunxi_nand_hw_common_ecc_ctrl_cleanup(ecc);
 		break;
 	case NAND_ECC_NONE:
-		kfree(ecc->layout);
 	default:
 		break;
 	}
@@ -1432,8 +1612,6 @@
 		return -EINVAL;
 
 	switch (ecc->mode) {
-	case NAND_ECC_SOFT_BCH:
-		break;
 	case NAND_ECC_HW:
 		ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
 		if (ret)
@@ -1445,10 +1623,6 @@
 			return ret;
 		break;
 	case NAND_ECC_NONE:
-		ecc->layout = kzalloc(sizeof(*ecc->layout), GFP_KERNEL);
-		if (!ecc->layout)
-			return -ENOMEM;
-		ecc->layout->oobfree[0].length = mtd->oobsize;
 	case NAND_ECC_SOFT:
 		break;
 	default:
@@ -1536,21 +1710,6 @@
 		}
 	}
 
-	timings = onfi_async_timing_mode_to_sdr_timings(0);
-	if (IS_ERR(timings)) {
-		ret = PTR_ERR(timings);
-		dev_err(dev,
-			"could not retrieve timings for ONFI mode 0: %d\n",
-			ret);
-		return ret;
-	}
-
-	ret = sunxi_nand_chip_set_timings(chip, timings);
-	if (ret) {
-		dev_err(dev, "could not configure chip timings: %d\n", ret);
-		return ret;
-	}
-
 	nand = &chip->nand;
 	/* Default tR value specified in the ONFI spec (chapter 4.15.1) */
 	nand->chip_delay = 200;
@@ -1570,6 +1729,21 @@
 	mtd = nand_to_mtd(nand);
 	mtd->dev.parent = dev;
 
+	timings = onfi_async_timing_mode_to_sdr_timings(0);
+	if (IS_ERR(timings)) {
+		ret = PTR_ERR(timings);
+		dev_err(dev,
+			"could not retrieve timings for ONFI mode 0: %d\n",
+			ret);
+		return ret;
+	}
+
+	ret = sunxi_nand_chip_set_timings(chip, timings);
+	if (ret) {
+		dev_err(dev, "could not configure chip timings: %d\n", ret);
+		return ret;
+	}
+
 	ret = nand_scan_ident(mtd, nsels, NULL);
 	if (ret)
 		return ret;
@@ -1580,6 +1754,8 @@
 	if (nand->options & NAND_NEED_SCRAMBLING)
 		nand->options |= NAND_NO_SUBPAGE_WRITE;
 
+	nand->options |= NAND_SUBPAGE_READ;
+
 	ret = sunxi_nand_chip_init_timings(chip, np);
 	if (ret) {
 		dev_err(dev, "could not configure chip timings: %d\n", ret);
@@ -1728,6 +1904,8 @@
 	struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
 
 	sunxi_nand_chips_cleanup(nfc);
+	clk_disable_unprepare(nfc->mod_clk);
+	clk_disable_unprepare(nfc->ahb_clk);
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 293feb1..3ad514c 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -33,7 +33,6 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
-#include <linux/of_mtd.h>
 #include <linux/of_device.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
@@ -175,34 +174,6 @@
 	return container_of(mtd_to_nand(mtd), struct vf610_nfc, chip);
 }
 
-static struct nand_ecclayout vf610_nfc_ecc45 = {
-	.eccbytes = 45,
-	.eccpos = {19, 20, 21, 22, 23,
-		   24, 25, 26, 27, 28, 29, 30, 31,
-		   32, 33, 34, 35, 36, 37, 38, 39,
-		   40, 41, 42, 43, 44, 45, 46, 47,
-		   48, 49, 50, 51, 52, 53, 54, 55,
-		   56, 57, 58, 59, 60, 61, 62, 63},
-	.oobfree = {
-		{.offset = 2,
-		 .length = 17} }
-};
-
-static struct nand_ecclayout vf610_nfc_ecc60 = {
-	.eccbytes = 60,
-	.eccpos = { 4,  5,  6,  7,  8,  9, 10, 11,
-		   12, 13, 14, 15, 16, 17, 18, 19,
-		   20, 21, 22, 23, 24, 25, 26, 27,
-		   28, 29, 30, 31, 32, 33, 34, 35,
-		   36, 37, 38, 39, 40, 41, 42, 43,
-		   44, 45, 46, 47, 48, 49, 50, 51,
-		   52, 53, 54, 55, 56, 57, 58, 59,
-		   60, 61, 62, 63 },
-	.oobfree = {
-		{.offset = 2,
-		 .length = 2} }
-};
-
 static inline u32 vf610_nfc_read(struct vf610_nfc *nfc, uint reg)
 {
 	return readl(nfc->regs + reg);
@@ -781,14 +752,16 @@
 		if (mtd->oobsize > 64)
 			mtd->oobsize = 64;
 
+		/*
+		 * mtd->ecclayout is not specified here because we're using the
+		 * default large page ECC layout defined in NAND core.
+		 */
 		if (chip->ecc.strength == 32) {
 			nfc->ecc_mode = ECC_60_BYTE;
 			chip->ecc.bytes = 60;
-			chip->ecc.layout = &vf610_nfc_ecc60;
 		} else if (chip->ecc.strength == 24) {
 			nfc->ecc_mode = ECC_45_BYTE;
 			chip->ecc.bytes = 45;
-			chip->ecc.layout = &vf610_nfc_ecc45;
 		} else {
 			dev_err(nfc->dev, "Unsupported ECC strength\n");
 			err = -ENXIO;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index af28bb3..a4b029a 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -68,21 +68,33 @@
  * flexonenand_oob_128 - oob info for Flex-Onenand with 4KB page
  * For now, we expose only 64 out of 80 ecc bytes
  */
-static struct nand_ecclayout flexonenand_oob_128 = {
-	.eccbytes	= 64,
-	.eccpos		= {
-		6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
-		22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
-		38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
-		54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
-		70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
-		86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
-		102, 103, 104, 105
-		},
-	.oobfree	= {
-		{2, 4}, {18, 4}, {34, 4}, {50, 4},
-		{66, 4}, {82, 4}, {98, 4}, {114, 4}
-	}
+static int flexonenand_ooblayout_ecc(struct mtd_info *mtd, int section,
+				     struct mtd_oob_region *oobregion)
+{
+	if (section > 7)
+		return -ERANGE;
+
+	oobregion->offset = (section * 16) + 6;
+	oobregion->length = 10;
+
+	return 0;
+}
+
+static int flexonenand_ooblayout_free(struct mtd_info *mtd, int section,
+				      struct mtd_oob_region *oobregion)
+{
+	if (section > 7)
+		return -ERANGE;
+
+	oobregion->offset = (section * 16) + 2;
+	oobregion->length = 4;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops flexonenand_ooblayout_ops = {
+	.ecc = flexonenand_ooblayout_ecc,
+	.free = flexonenand_ooblayout_free,
 };
 
 /*
@@ -91,56 +103,77 @@
  * Based on specification:
  * 4Gb M-die OneNAND Flash (KFM4G16Q4M, KFN8G16Q4M). Rev. 1.3, Apr. 2010
  *
- * For eccpos we expose only 64 bytes out of 72 (see struct nand_ecclayout)
- *
- * oobfree uses the spare area fields marked as
- * "Managed by internal ECC logic for Logical Sector Number area"
  */
-static struct nand_ecclayout onenand_oob_128 = {
-	.eccbytes	= 64,
-	.eccpos		= {
-		7, 8, 9, 10, 11, 12, 13, 14, 15,
-		23, 24, 25, 26, 27, 28, 29, 30, 31,
-		39, 40, 41, 42, 43, 44, 45, 46, 47,
-		55, 56, 57, 58, 59, 60, 61, 62, 63,
-		71, 72, 73, 74, 75, 76, 77, 78, 79,
-		87, 88, 89, 90, 91, 92, 93, 94, 95,
-		103, 104, 105, 106, 107, 108, 109, 110, 111,
-		119
-	},
-	.oobfree	= {
-		{2, 3}, {18, 3}, {34, 3}, {50, 3},
-		{66, 3}, {82, 3}, {98, 3}, {114, 3}
-	}
+static int onenand_ooblayout_128_ecc(struct mtd_info *mtd, int section,
+				     struct mtd_oob_region *oobregion)
+{
+	if (section > 7)
+		return -ERANGE;
+
+	oobregion->offset = (section * 16) + 7;
+	oobregion->length = 9;
+
+	return 0;
+}
+
+static int onenand_ooblayout_128_free(struct mtd_info *mtd, int section,
+				      struct mtd_oob_region *oobregion)
+{
+	if (section >= 8)
+		return -ERANGE;
+
+	/*
+	 * free bytes are using the spare area fields marked as
+	 * "Managed by internal ECC logic for Logical Sector Number area"
+	 */
+	oobregion->offset = (section * 16) + 2;
+	oobregion->length = 3;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops onenand_oob_128_ooblayout_ops = {
+	.ecc = onenand_ooblayout_128_ecc,
+	.free = onenand_ooblayout_128_free,
 };
 
 /**
- * onenand_oob_64 - oob info for large (2KB) page
+ * onenand_oob_32_64 - oob info for large (2KB) page
  */
-static struct nand_ecclayout onenand_oob_64 = {
-	.eccbytes	= 20,
-	.eccpos		= {
-		8, 9, 10, 11, 12,
-		24, 25, 26, 27, 28,
-		40, 41, 42, 43, 44,
-		56, 57, 58, 59, 60,
-		},
-	.oobfree	= {
-		{2, 3}, {14, 2}, {18, 3}, {30, 2},
-		{34, 3}, {46, 2}, {50, 3}, {62, 2}
-	}
-};
+static int onenand_ooblayout_32_64_ecc(struct mtd_info *mtd, int section,
+				       struct mtd_oob_region *oobregion)
+{
+	if (section > 3)
+		return -ERANGE;
 
-/**
- * onenand_oob_32 - oob info for middle (1KB) page
- */
-static struct nand_ecclayout onenand_oob_32 = {
-	.eccbytes	= 10,
-	.eccpos		= {
-		8, 9, 10, 11, 12,
-		24, 25, 26, 27, 28,
-		},
-	.oobfree	= { {2, 3}, {14, 2}, {18, 3}, {30, 2} }
+	oobregion->offset = (section * 16) + 8;
+	oobregion->length = 5;
+
+	return 0;
+}
+
+static int onenand_ooblayout_32_64_free(struct mtd_info *mtd, int section,
+					struct mtd_oob_region *oobregion)
+{
+	int sections = (mtd->oobsize / 32) * 2;
+
+	if (section >= sections)
+		return -ERANGE;
+
+	if (section & 1) {
+		oobregion->offset = ((section - 1) * 16) + 14;
+		oobregion->length = 2;
+	} else  {
+		oobregion->offset = (section * 16) + 2;
+		oobregion->length = 3;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops onenand_oob_32_64_ooblayout_ops = {
+	.ecc = onenand_ooblayout_32_64_ecc,
+	.free = onenand_ooblayout_32_64_free,
 };
 
 static const unsigned char ffchars[] = {
@@ -1024,34 +1057,15 @@
 				int thislen)
 {
 	struct onenand_chip *this = mtd->priv;
-	struct nand_oobfree *free;
-	int readcol = column;
-	int readend = column + thislen;
-	int lastgap = 0;
-	unsigned int i;
-	uint8_t *oob_buf = this->oob_buf;
+	int ret;
 
-	free = this->ecclayout->oobfree;
-	for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
-		if (readcol >= lastgap)
-			readcol += free->offset - lastgap;
-		if (readend >= lastgap)
-			readend += free->offset - lastgap;
-		lastgap = free->offset + free->length;
-	}
-	this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
-	free = this->ecclayout->oobfree;
-	for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
-		int free_end = free->offset + free->length;
-		if (free->offset < readend && free_end > readcol) {
-			int st = max_t(int,free->offset,readcol);
-			int ed = min_t(int,free_end,readend);
-			int n = ed - st;
-			memcpy(buf, oob_buf + st, n);
-			buf += n;
-		} else if (column == 0)
-			break;
-	}
+	this->read_bufferram(mtd, ONENAND_SPARERAM, this->oob_buf, 0,
+			     mtd->oobsize);
+	ret = mtd_ooblayout_get_databytes(mtd, buf, this->oob_buf,
+					  column, thislen);
+	if (ret)
+		return ret;
+
 	return 0;
 }
 
@@ -1808,34 +1822,7 @@
 static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf,
 				  const u_char *buf, int column, int thislen)
 {
-	struct onenand_chip *this = mtd->priv;
-	struct nand_oobfree *free;
-	int writecol = column;
-	int writeend = column + thislen;
-	int lastgap = 0;
-	unsigned int i;
-
-	free = this->ecclayout->oobfree;
-	for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
-		if (writecol >= lastgap)
-			writecol += free->offset - lastgap;
-		if (writeend >= lastgap)
-			writeend += free->offset - lastgap;
-		lastgap = free->offset + free->length;
-	}
-	free = this->ecclayout->oobfree;
-	for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
-		int free_end = free->offset + free->length;
-		if (free->offset < writeend && free_end > writecol) {
-			int st = max_t(int,free->offset,writecol);
-			int ed = min_t(int,free_end,writeend);
-			int n = ed - st;
-			memcpy(oob_buf + st, buf, n);
-			buf += n;
-		} else if (column == 0)
-			break;
-	}
-	return 0;
+	return mtd_ooblayout_set_databytes(mtd, buf, oob_buf, column, thislen);
 }
 
 /**
@@ -4003,22 +3990,22 @@
 	switch (mtd->oobsize) {
 	case 128:
 		if (FLEXONENAND(this)) {
-			this->ecclayout = &flexonenand_oob_128;
+			mtd_set_ooblayout(mtd, &flexonenand_ooblayout_ops);
 			mtd->subpage_sft = 0;
 		} else {
-			this->ecclayout = &onenand_oob_128;
+			mtd_set_ooblayout(mtd, &onenand_oob_128_ooblayout_ops);
 			mtd->subpage_sft = 2;
 		}
 		if (ONENAND_IS_NOP_1(this))
 			mtd->subpage_sft = 0;
 		break;
 	case 64:
-		this->ecclayout = &onenand_oob_64;
+		mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops);
 		mtd->subpage_sft = 2;
 		break;
 
 	case 32:
-		this->ecclayout = &onenand_oob_32;
+		mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops);
 		mtd->subpage_sft = 1;
 		break;
 
@@ -4027,7 +4014,7 @@
 			__func__, mtd->oobsize);
 		mtd->subpage_sft = 0;
 		/* To prevent kernel oops */
-		this->ecclayout = &onenand_oob_32;
+		mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops);
 		break;
 	}
 
@@ -4037,12 +4024,12 @@
 	 * The number of bytes available for a client to place data into
 	 * the out of band area
 	 */
-	mtd->oobavail = 0;
-	for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES &&
-	    this->ecclayout->oobfree[i].length; i++)
-		mtd->oobavail += this->ecclayout->oobfree[i].length;
+	ret = mtd_ooblayout_count_freebytes(mtd);
+	if (ret < 0)
+		ret = 0;
 
-	mtd->ecclayout = this->ecclayout;
+	mtd->oobavail = ret;
+
 	mtd->ecc_strength = 1;
 
 	/* Fill in remaining MTD driver data */
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 157841d..c52e455 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -832,6 +832,7 @@
 	/* GigaDevice */
 	{ "gd25q32", INFO(0xc84016, 0, 64 * 1024,  64, SECT_4K) },
 	{ "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
+	{ "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
 	{ "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) },
 
 	/* Intel/Numonyx -- xxxs33b */
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index a7d1feb..16baeb5 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -149,6 +149,8 @@
 	__ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
 static struct device_attribute dev_mtd_num =
 	__ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_ro_mode =
+	__ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL);
 
 /**
  * ubi_volume_notify - send a volume change notification.
@@ -385,6 +387,8 @@
 		ret = sprintf(buf, "%d\n", ubi->thread_enabled);
 	else if (attr == &dev_mtd_num)
 		ret = sprintf(buf, "%d\n", ubi->mtd->index);
+	else if (attr == &dev_ro_mode)
+		ret = sprintf(buf, "%d\n", ubi->ro_mode);
 	else
 		ret = -EINVAL;
 
@@ -404,6 +408,7 @@
 	&dev_min_io_size.attr,
 	&dev_bgt_enabled.attr,
 	&dev_mtd_num.attr,
+	&dev_ro_mode.attr,
 	NULL
 };
 ATTRIBUTE_GROUPS(ubi_dev);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index c4cb15a..f101a49 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -352,7 +352,8 @@
 	} else if (dent == d->dfs_emulate_power_cut) {
 		if (kstrtoint(buf, 0, &val) != 0)
 			count = -EINVAL;
-		d->emulate_power_cut = val;
+		else
+			d->emulate_power_cut = val;
 		goto out;
 	}
 
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 5b9834c..5780dd1 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -426,8 +426,25 @@
 						 pnum, vol_id, lnum);
 					err = -EBADMSG;
 				} else {
-					err = -EINVAL;
-					ubi_ro_mode(ubi);
+					/*
+					 * Ending up here in the non-Fastmap case
+					 * is a clear bug as the VID header had to
+					 * be present at scan time to have it referenced.
+					 * With fastmap the story is more complicated.
+					 * Fastmap has the mapping info without the need
+					 * of a full scan. So the LEB could have been
+					 * unmapped, Fastmap cannot know this and keeps
+					 * the LEB referenced.
+					 * This is valid and works as the layer above UBI
+					 * has to do bookkeeping about used/referenced
+					 * LEBs in any case.
+					 */
+					if (ubi->fast_attach) {
+						err = -EBADMSG;
+					} else {
+						err = -EINVAL;
+						ubi_ro_mode(ubi);
+					}
 				}
 			}
 			goto out_free;
@@ -1202,32 +1219,6 @@
 		}
 
 		cond_resched();
-
-		/*
-		 * We've written the data and are going to read it back to make
-		 * sure it was written correctly.
-		 */
-		memset(ubi->peb_buf, 0xFF, aldata_size);
-		err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
-		if (err) {
-			if (err != UBI_IO_BITFLIPS) {
-				ubi_warn(ubi, "error %d while reading data back from PEB %d",
-					 err, to);
-				if (is_error_sane(err))
-					err = MOVE_TARGET_RD_ERR;
-			} else
-				err = MOVE_TARGET_BITFLIPS;
-			goto out_unlock_buf;
-		}
-
-		cond_resched();
-
-		if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
-			ubi_warn(ubi, "read data back from PEB %d and it is different",
-				 to);
-			err = -EINVAL;
-			goto out_unlock_buf;
-		}
 	}
 
 	ubi_assert(vol->eba_tbl[lnum] == from);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 263b439..990898b 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1058,6 +1058,7 @@
 	ubi_msg(ubi, "fastmap WL pool size: %d",
 		ubi->fm_wl_pool.max_size);
 	ubi->fm_disabled = 0;
+	ubi->fast_attach = 1;
 
 	ubi_free_vid_hdr(ubi, vh);
 	kfree(ech);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 437757c..348dbbc 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -705,7 +705,7 @@
 	struct ubi_volume *vol = desc->vol;
 	struct ubi_device *ubi = vol->ubi;
 
-	dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
+	dbg_gen("map LEB %d:%d", vol->vol_id, lnum);
 
 	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
 		return -EROFS;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index dadc6a9..61d4e99 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -466,6 +466,7 @@
  * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
  * @fm_work: fastmap work queue
  * @fm_work_scheduled: non-zero if fastmap work was scheduled
+ * @fast_attach: non-zero if UBI was attached by fastmap
  *
  * @used: RB-tree of used physical eraseblocks
  * @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -574,6 +575,7 @@
 	size_t fm_size;
 	struct work_struct fm_work;
 	int fm_work_scheduled;
+	int fast_attach;
 
 	/* Wear-leveling sub-system's stuff */
 	struct rb_root used;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 1ae17bb..10059df 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -405,7 +405,7 @@
 	if (!no_vtbl)
 		self_check_volumes(ubi);
 
-	return err;
+	return 0;
 
 out_err:
 	ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 17ec948..959c7b12 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1534,6 +1534,7 @@
 		INIT_LIST_HEAD(&ubi->pq[i]);
 	ubi->pq_head = 0;
 
+	ubi->free_count = 0;
 	list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
 		cond_resched();
 
@@ -1552,7 +1553,6 @@
 		found_pebs++;
 	}
 
-	ubi->free_count = 0;
 	list_for_each_entry(aeb, &ai->free, u.list) {
 		cond_resched();
 
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 8b3275d..8f5e93c 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -712,9 +712,10 @@
 
 	/* upper group completed, look again in lower */
 	if (priv->rx_next > get_mb_rx_low_last(priv) &&
-	    quota > 0 && mb > get_mb_rx_last(priv)) {
+	    mb > get_mb_rx_last(priv)) {
 		priv->rx_next = get_mb_rx_first(priv);
-		goto again;
+		if (quota > 0)
+			goto again;
 	}
 
 	return received;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index f91b094..e3dccd3 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -332,9 +332,23 @@
 
 	priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
 
-	for (i = 0; i < frame->can_dlc; i += 2) {
-		priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
-				frame->data[i] | (frame->data[i + 1] << 8));
+	if (priv->type == BOSCH_D_CAN) {
+		u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
+
+		for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+			data = (u32)frame->data[i];
+			data |= (u32)frame->data[i + 1] << 8;
+			data |= (u32)frame->data[i + 2] << 16;
+			data |= (u32)frame->data[i + 3] << 24;
+			priv->write_reg32(priv, dreg, data);
+		}
+	} else {
+		for (i = 0; i < frame->can_dlc; i += 2) {
+			priv->write_reg(priv,
+					C_CAN_IFACE(DATA1_REG, iface) + i / 2,
+					frame->data[i] |
+					(frame->data[i + 1] << 8));
+		}
 	}
 }
 
@@ -402,10 +416,20 @@
 	} else {
 		int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
 
-		for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
-			data = priv->read_reg(priv, dreg);
-			frame->data[i] = data;
-			frame->data[i + 1] = data >> 8;
+		if (priv->type == BOSCH_D_CAN) {
+			for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+				data = priv->read_reg32(priv, dreg);
+				frame->data[i] = data;
+				frame->data[i + 1] = data >> 8;
+				frame->data[i + 2] = data >> 16;
+				frame->data[i + 3] = data >> 24;
+			}
+		} else {
+			for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
+				data = priv->read_reg(priv, dreg);
+				frame->data[i] = data;
+				frame->data[i + 1] = data >> 8;
+			}
 		}
 	}
 
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index bcb272f..2ff0df3 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -16,7 +16,8 @@
 config CAN_GS_USB
 	tristate "Geschwister Schneider UG interfaces"
 	---help---
-	  This driver supports the Geschwister Schneider USB/CAN devices.
+	  This driver supports the Geschwister Schneider and bytewerk.org
+	  candleLight USB CAN interfaces USB/CAN devices
 	  If unsure choose N,
 	  choose Y for built in support,
 	  M to compile as module (module will be named: gs_usb).
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 1556d42..acb0c84 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -1,7 +1,9 @@
-/* CAN driver for Geschwister Schneider USB/CAN devices.
+/* CAN driver for Geschwister Schneider USB/CAN devices
+ * and bytewerk.org candleLight USB CAN interfaces.
  *
- * Copyright (C) 2013 Geschwister Schneider Technologie-,
+ * Copyright (C) 2013-2016 Geschwister Schneider Technologie-,
  * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
+ * Copyright (C) 2016 Hubert Denkmair
  *
  * Many thanks to all socketcan devs!
  *
@@ -29,6 +31,9 @@
 #define USB_GSUSB_1_VENDOR_ID      0x1d50
 #define USB_GSUSB_1_PRODUCT_ID     0x606f
 
+#define USB_CANDLELIGHT_VENDOR_ID  0x1209
+#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
+
 #define GSUSB_ENDPOINT_IN          1
 #define GSUSB_ENDPOINT_OUT         2
 
@@ -952,6 +957,8 @@
 static const struct usb_device_id gs_usb_table[] = {
 	{ USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
 				      USB_GSUSB_1_PRODUCT_ID, 0) },
+	{ USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
+				      USB_CANDLELIGHT_PRODUCT_ID, 0) },
 	{} /* Terminating entry */
 };
 
@@ -969,5 +976,6 @@
 MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
 MODULE_DESCRIPTION(
 "Socket CAN device driver for Geschwister Schneider Technologie-, "
-"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
+"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces\n"
+"and bytewerk.org candleLight USB CAN interfaces.");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index e0fb0f1..20760e1 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -509,8 +509,8 @@
 	 * on the current MAC's MII bus
 	 */
 	for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
-		if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) {
-			phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr);
+		if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
+			phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
 			if (!aup->phy_search_highest_addr)
 				/* break out with first one found */
 				break;
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index d02c424..8fc93c5 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -96,10 +96,6 @@
 	unsigned int rx_ringsz;
 	unsigned int rxbuf_size;
 
-	struct page  *rx_page;
-	unsigned int rx_page_offset;
-	unsigned int rx_frag_size;
-
 	struct napi_struct napi;
 	struct alx_tx_queue txq;
 	struct alx_rx_queue rxq;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c98acdc..e708e36 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -70,35 +70,6 @@
 	}
 }
 
-static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
-{
-	struct sk_buff *skb;
-	struct page *page;
-
-	if (alx->rx_frag_size > PAGE_SIZE)
-		return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
-
-	page = alx->rx_page;
-	if (!page) {
-		alx->rx_page = page = alloc_page(gfp);
-		if (unlikely(!page))
-			return NULL;
-		alx->rx_page_offset = 0;
-	}
-
-	skb = build_skb(page_address(page) + alx->rx_page_offset,
-			alx->rx_frag_size);
-	if (likely(skb)) {
-		alx->rx_page_offset += alx->rx_frag_size;
-		if (alx->rx_page_offset >= PAGE_SIZE)
-			alx->rx_page = NULL;
-		else
-			get_page(page);
-	}
-	return skb;
-}
-
-
 static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
 {
 	struct alx_rx_queue *rxq = &alx->rxq;
@@ -115,9 +86,22 @@
 	while (!cur_buf->skb && next != rxq->read_idx) {
 		struct alx_rfd *rfd = &rxq->rfd[cur];
 
-		skb = alx_alloc_skb(alx, gfp);
+		/*
+		 * When DMA RX address is set to something like
+		 * 0x....fc0, it will be very likely to cause DMA
+		 * RFD overflow issue.
+		 *
+		 * To work around it, we apply rx skb with 64 bytes
+		 * longer space, and offset the address whenever
+		 * 0x....fc0 is detected.
+		 */
+		skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
 		if (!skb)
 			break;
+
+		if (((unsigned long)skb->data & 0xfff) == 0xfc0)
+			skb_reserve(skb, 64);
+
 		dma = dma_map_single(&alx->hw.pdev->dev,
 				     skb->data, alx->rxbuf_size,
 				     DMA_FROM_DEVICE);
@@ -153,7 +137,6 @@
 		alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
 	}
 
-
 	return count;
 }
 
@@ -622,11 +605,6 @@
 	kfree(alx->txq.bufs);
 	kfree(alx->rxq.bufs);
 
-	if (alx->rx_page) {
-		put_page(alx->rx_page);
-		alx->rx_page = NULL;
-	}
-
 	dma_free_coherent(&alx->hw.pdev->dev,
 			  alx->descmem.size,
 			  alx->descmem.virt,
@@ -681,7 +659,6 @@
 				  alx->dev->name, alx);
 		if (!err)
 			goto out;
-
 		/* fall back to legacy interrupt */
 		pci_disable_msi(alx->hw.pdev);
 	}
@@ -725,7 +702,6 @@
 	struct pci_dev *pdev = alx->hw.pdev;
 	struct alx_hw *hw = &alx->hw;
 	int err;
-	unsigned int head_size;
 
 	err = alx_identify_hw(alx);
 	if (err) {
@@ -741,12 +717,7 @@
 
 	hw->smb_timer = 400;
 	hw->mtu = alx->dev->mtu;
-
 	alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
-	head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
-		    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-	alx->rx_frag_size = roundup_pow_of_two(head_size);
-
 	alx->tx_ringsz = 256;
 	alx->rx_ringsz = 512;
 	hw->imt = 200;
@@ -848,7 +819,6 @@
 {
 	struct alx_priv *alx = netdev_priv(netdev);
 	int max_frame = ALX_MAX_FRAME_LEN(mtu);
-	unsigned int head_size;
 
 	if ((max_frame < ALX_MIN_FRAME_SIZE) ||
 	    (max_frame > ALX_MAX_FRAME_SIZE))
@@ -860,9 +830,6 @@
 	netdev->mtu = mtu;
 	alx->hw.mtu = mtu;
 	alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
-	head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
-		    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-	alx->rx_frag_size = roundup_pow_of_two(head_size);
 	netdev_update_features(netdev);
 	if (netif_running(netdev))
 		alx_reinit(alx);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 0a5b770..a59d55e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12895,52 +12895,71 @@
 	return rc;
 }
 
-int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
 {
 	struct bnx2x_vlan_entry *vlan;
 	int rc = 0;
 
-	if (!bp->vlan_cnt) {
-		DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
-		return 0;
-	}
-
+	/* Configure all non-configured entries */
 	list_for_each_entry(vlan, &bp->vlan_reg, link) {
-		/* Prepare for cleanup in case of errors */
-		if (rc) {
-			vlan->hw = false;
-			continue;
-		}
-
-		if (!vlan->hw)
+		if (vlan->hw)
 			continue;
 
-		DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+		if (bp->vlan_cnt >= bp->vlan_credit)
+			return -ENOBUFS;
 
 		rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
 		if (rc) {
-			BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
-			vlan->hw = false;
-			rc = -EINVAL;
-			continue;
+			BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
+			return rc;
 		}
+
+		DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
+		vlan->hw = true;
+		bp->vlan_cnt++;
 	}
 
-	return rc;
+	return 0;
+}
+
+static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
+{
+	bool need_accept_any_vlan;
+
+	need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
+
+	if (bp->accept_any_vlan != need_accept_any_vlan) {
+		bp->accept_any_vlan = need_accept_any_vlan;
+		DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
+		   bp->accept_any_vlan ? "raised" : "cleared");
+		if (set_rx_mode) {
+			if (IS_PF(bp))
+				bnx2x_set_rx_mode_inner(bp);
+			else
+				bnx2x_vfpf_storm_rx_mode(bp);
+		}
+	}
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+	struct bnx2x_vlan_entry *vlan;
+
+	/* The hw forgot all entries after reload */
+	list_for_each_entry(vlan, &bp->vlan_reg, link)
+		vlan->hw = false;
+	bp->vlan_cnt = 0;
+
+	/* Don't set rx mode here. Our caller will do it. */
+	bnx2x_vlan_configure(bp, false);
+
+	return 0;
 }
 
 static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	struct bnx2x_vlan_entry *vlan;
-	bool hw = false;
-	int rc = 0;
-
-	if (!netif_running(bp->dev)) {
-		DP(NETIF_MSG_IFUP,
-		   "Ignoring VLAN configuration the interface is down\n");
-		return -EFAULT;
-	}
 
 	DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
 
@@ -12948,93 +12967,47 @@
 	if (!vlan)
 		return -ENOMEM;
 
-	bp->vlan_cnt++;
-	if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
-		DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
-		bp->accept_any_vlan = true;
-		if (IS_PF(bp))
-			bnx2x_set_rx_mode_inner(bp);
-		else
-			bnx2x_vfpf_storm_rx_mode(bp);
-	} else if (bp->vlan_cnt <= bp->vlan_credit) {
-		rc = __bnx2x_vlan_configure_vid(bp, vid, true);
-		hw = true;
-	}
-
 	vlan->vid = vid;
-	vlan->hw = hw;
+	vlan->hw = false;
+	list_add_tail(&vlan->link, &bp->vlan_reg);
 
-	if (!rc) {
-		list_add(&vlan->link, &bp->vlan_reg);
-	} else {
-		bp->vlan_cnt--;
-		kfree(vlan);
-	}
+	if (netif_running(dev))
+		bnx2x_vlan_configure(bp, true);
 
-	DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
-
-	return rc;
+	return 0;
 }
 
 static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	struct bnx2x_vlan_entry *vlan;
+	bool found = false;
 	int rc = 0;
 
-	if (!netif_running(bp->dev)) {
-		DP(NETIF_MSG_IFUP,
-		   "Ignoring VLAN configuration the interface is down\n");
-		return -EFAULT;
-	}
-
 	DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
 
-	if (!bp->vlan_cnt) {
-		BNX2X_ERR("Unable to kill VLAN %d\n", vid);
-		return -EINVAL;
-	}
-
 	list_for_each_entry(vlan, &bp->vlan_reg, link)
-		if (vlan->vid == vid)
+		if (vlan->vid == vid) {
+			found = true;
 			break;
+		}
 
-	if (vlan->vid != vid) {
+	if (!found) {
 		BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
 		return -EINVAL;
 	}
 
-	if (vlan->hw)
+	if (netif_running(dev) && vlan->hw) {
 		rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+		DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
+		bp->vlan_cnt--;
+	}
 
 	list_del(&vlan->link);
 	kfree(vlan);
 
-	bp->vlan_cnt--;
-
-	if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
-		/* Configure all non-configured entries */
-		list_for_each_entry(vlan, &bp->vlan_reg, link) {
-			if (vlan->hw)
-				continue;
-
-			rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
-			if (rc) {
-				BNX2X_ERR("Unable to config VLAN %d\n",
-					  vlan->vid);
-				continue;
-			}
-			DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
-			   vlan->vid);
-			vlan->hw = true;
-		}
-		DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
-		bp->accept_any_vlan = false;
-		if (IS_PF(bp))
-			bnx2x_set_rx_mode_inner(bp);
-		else
-			bnx2x_vfpf_storm_rx_mode(bp);
-	}
+	if (netif_running(dev))
+		bnx2x_vlan_configure(bp, true);
 
 	DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
 
@@ -13941,14 +13914,14 @@
 		bp->doorbells = bnx2x_vf_doorbells(bp);
 		rc = bnx2x_vf_pci_alloc(bp);
 		if (rc)
-			goto init_one_exit;
+			goto init_one_freemem;
 	} else {
 		doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
 		if (doorbell_size > pci_resource_len(pdev, 2)) {
 			dev_err(&bp->pdev->dev,
 				"Cannot map doorbells, bar size too small, aborting\n");
 			rc = -ENOMEM;
-			goto init_one_exit;
+			goto init_one_freemem;
 		}
 		bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
 						doorbell_size);
@@ -13957,19 +13930,19 @@
 		dev_err(&bp->pdev->dev,
 			"Cannot map doorbell space, aborting\n");
 		rc = -ENOMEM;
-		goto init_one_exit;
+		goto init_one_freemem;
 	}
 
 	if (IS_VF(bp)) {
 		rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
 		if (rc)
-			goto init_one_exit;
+			goto init_one_freemem;
 	}
 
 	/* Enable SRIOV if capability found in configuration space */
 	rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
 	if (rc)
-		goto init_one_exit;
+		goto init_one_freemem;
 
 	/* calc qm_cid_count */
 	bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
@@ -13988,7 +13961,7 @@
 	rc = bnx2x_set_int_mode(bp);
 	if (rc) {
 		dev_err(&pdev->dev, "Cannot set interrupts\n");
-		goto init_one_exit;
+		goto init_one_freemem;
 	}
 	BNX2X_DEV_INFO("set interrupts successfully\n");
 
@@ -13996,7 +13969,7 @@
 	rc = register_netdev(dev);
 	if (rc) {
 		dev_err(&pdev->dev, "Cannot register net device\n");
-		goto init_one_exit;
+		goto init_one_freemem;
 	}
 	BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
 
@@ -14029,6 +14002,9 @@
 
 	return 0;
 
+init_one_freemem:
+	bnx2x_free_mem_bp(bp);
+
 init_one_exit:
 	bnx2x_disable_pcie_error_reporting(bp);
 
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 72a2eff..c777cde 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -286,7 +286,9 @@
 			cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
 		txr->tx_prod = prod;
 
+		tx_buf->is_push = 1;
 		netdev_tx_sent_queue(txq, skb->len);
+		wmb();	/* Sync is_push and byte queue before pushing data */
 
 		push_len = (length + sizeof(*tx_push) + 7) / 8;
 		if (push_len > 16) {
@@ -298,7 +300,6 @@
 					 push_len);
 		}
 
-		tx_buf->is_push = 1;
 		goto tx_done;
 	}
 
@@ -1112,19 +1113,13 @@
 	if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
 		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
 
-	if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
-		netdev_features_t features = skb->dev->features;
+	if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
+	    (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
 		u16 vlan_proto = tpa_info->metadata >>
 			RX_CMP_FLAGS2_METADATA_TPID_SFT;
+		u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
 
-		if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
-		     vlan_proto == ETH_P_8021Q) ||
-		    ((features & NETIF_F_HW_VLAN_STAG_RX) &&
-		     vlan_proto == ETH_P_8021AD)) {
-			__vlan_hwaccel_put_tag(skb, htons(vlan_proto),
-					       tpa_info->metadata &
-					       RX_CMP_FLAGS2_METADATA_VID_MASK);
-		}
+		__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
 	}
 
 	skb_checksum_none_assert(skb);
@@ -1277,19 +1272,14 @@
 
 	skb->protocol = eth_type_trans(skb, dev);
 
-	if (rxcmp1->rx_cmp_flags2 &
-	    cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
-		netdev_features_t features = skb->dev->features;
+	if ((rxcmp1->rx_cmp_flags2 &
+	     cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
+	    (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
 		u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+		u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
 		u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
 
-		if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
-		     vlan_proto == ETH_P_8021Q) ||
-		    ((features & NETIF_F_HW_VLAN_STAG_RX) &&
-		     vlan_proto == ETH_P_8021AD))
-			__vlan_hwaccel_put_tag(skb, htons(vlan_proto),
-					       meta_data &
-					       RX_CMP_FLAGS2_METADATA_VID_MASK);
+		__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
 	}
 
 	skb_checksum_none_assert(skb);
@@ -5466,6 +5456,20 @@
 
 	if (!bnxt_rfs_capable(bp))
 		features &= ~NETIF_F_NTUPLE;
+
+	/* Both CTAG and STAG VLAN accelaration on the RX side have to be
+	 * turned on or off together.
+	 */
+	if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
+	    (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+		if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+			features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+				      NETIF_F_HW_VLAN_STAG_RX);
+		else
+			features |= NETIF_F_HW_VLAN_CTAG_RX |
+				    NETIF_F_HW_VLAN_STAG_RX;
+	}
+
 	return features;
 }
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a2cdfc1..50812a1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -144,6 +144,7 @@
 	CH_PCI_ID_TABLE_FENTRY(0x5015),	/* T502-bt */
 	CH_PCI_ID_TABLE_FENTRY(0x5016),	/* T580-OCP-SO */
 	CH_PCI_ID_TABLE_FENTRY(0x5017),	/* T520-OCP-SO */
+	CH_PCI_ID_TABLE_FENTRY(0x5018),	/* T540-BT */
 	CH_PCI_ID_TABLE_FENTRY(0x5080),	/* Custom T540-cr */
 	CH_PCI_ID_TABLE_FENTRY(0x5081),	/* Custom T540-LL-cr */
 	CH_PCI_ID_TABLE_FENTRY(0x5082),	/* Custom T504-cr */
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 41b01064..4edb98c 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1195,7 +1195,7 @@
 	priv->mdio = mdiobus_alloc();
 	if (!priv->mdio) {
 		ret = -ENOMEM;
-		goto free;
+		goto free2;
 	}
 
 	priv->mdio->name = "ethoc-mdio";
@@ -1208,7 +1208,7 @@
 	ret = mdiobus_register(priv->mdio);
 	if (ret) {
 		dev_err(&netdev->dev, "failed to register MDIO bus\n");
-		goto free;
+		goto free2;
 	}
 
 	ret = ethoc_mdio_probe(netdev);
@@ -1241,9 +1241,10 @@
 error:
 	mdiobus_unregister(priv->mdio);
 	mdiobus_free(priv->mdio);
-free:
+free2:
 	if (priv->clk)
 		clk_disable_unprepare(priv->clk);
+free:
 	free_netdev(netdev);
 out:
 	return ret;
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 085f912..06f0317 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -205,8 +205,10 @@
 		 * re-adding ourselves to the poll list.
 		 */
 
-		if (priv->tx_skb && !tx_ctrl_ct)
+		if (priv->tx_skb && !tx_ctrl_ct) {
+			nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
 			napi_reschedule(napi);
+		}
 	}
 
 	return work_done;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ca2cccc..fea0f33 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1197,10 +1197,8 @@
 					 fec16_to_cpu(bdp->cbd_datlen),
 					 DMA_TO_DEVICE);
 		bdp->cbd_bufaddr = cpu_to_fec32(0);
-		if (!skb) {
-			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
-			continue;
-		}
+		if (!skb)
+			goto skb_done;
 
 		/* Check for errors. */
 		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1239,7 +1237,7 @@
 
 		/* Free the sk buffer associated with this last transmit */
 		dev_kfree_skb_any(skb);
-
+skb_done:
 		/* Make sure the update to bdp and tx_skbuff are performed
 		 * before dirty_tx
 		 */
@@ -2418,24 +2416,24 @@
 		return -EOPNOTSUPP;
 
 	if (ec->rx_max_coalesced_frames > 255) {
-		pr_err("Rx coalesced frames exceed hardware limiation");
+		pr_err("Rx coalesced frames exceed hardware limitation\n");
 		return -EINVAL;
 	}
 
 	if (ec->tx_max_coalesced_frames > 255) {
-		pr_err("Tx coalesced frame exceed hardware limiation");
+		pr_err("Tx coalesced frame exceed hardware limitation\n");
 		return -EINVAL;
 	}
 
 	cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
 	if (cycle > 0xFFFF) {
-		pr_err("Rx coalesed usec exceeed hardware limiation");
+		pr_err("Rx coalesced usec exceed hardware limitation\n");
 		return -EINVAL;
 	}
 
 	cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
 	if (cycle > 0xFFFF) {
-		pr_err("Rx coalesed usec exceeed hardware limiation");
+		pr_err("Rx coalesced usec exceed hardware limitation\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index bcb9dcc..1de2e1e 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -615,7 +615,7 @@
 	struct fman_cfg *cfg;
 	struct muram_info *muram;
 	/* cam section in muram */
-	int cam_offset;
+	unsigned long cam_offset;
 	size_t cam_size;
 	/* Fifo in MURAM */
 	int fifo_offset;
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 4eb0e9a..47394c4 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -129,7 +129,7 @@
  *
  * Return: address of the allocated memory; NULL otherwise.
  */
-int fman_muram_alloc(struct muram_info *muram, size_t size)
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size)
 {
 	unsigned long vaddr;
 
@@ -150,7 +150,7 @@
  *
  * Free an allocated memory from FM-MURAM partition.
  */
-void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size)
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size)
 {
 	unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
 
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
index dbf0af9..889649a 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -44,8 +44,8 @@
 unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
 					 unsigned long offset);
 
-int fman_muram_alloc(struct muram_info *muram, size_t size);
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
 
-void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size);
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size);
 
 #endif /* __FM_MURAM_EXT */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7615e06..2e6785b 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2440,7 +2440,8 @@
 						 tx_queue->tx_ring_size);
 
 	if (likely(!nr_frags)) {
-		lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+		if (likely(!do_tstamp))
+			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
 	} else {
 		u32 lstatus_start = lstatus;
 
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index c984462..d1cdc2d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -133,6 +133,8 @@
 static void mtk_phy_link_adjust(struct net_device *dev)
 {
 	struct mtk_mac *mac = netdev_priv(dev);
+	u16 lcl_adv = 0, rmt_adv = 0;
+	u8 flowctrl;
 	u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
 		  MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
 		  MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
@@ -150,11 +152,30 @@
 	if (mac->phy_dev->link)
 		mcr |= MAC_MCR_FORCE_LINK;
 
-	if (mac->phy_dev->duplex)
+	if (mac->phy_dev->duplex) {
 		mcr |= MAC_MCR_FORCE_DPX;
 
-	if (mac->phy_dev->pause)
-		mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
+		if (mac->phy_dev->pause)
+			rmt_adv = LPA_PAUSE_CAP;
+		if (mac->phy_dev->asym_pause)
+			rmt_adv |= LPA_PAUSE_ASYM;
+
+		if (mac->phy_dev->advertising & ADVERTISED_Pause)
+			lcl_adv |= ADVERTISE_PAUSE_CAP;
+		if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+			lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+		if (flowctrl & FLOW_CTRL_TX)
+			mcr |= MAC_MCR_FORCE_TX_FC;
+		if (flowctrl & FLOW_CTRL_RX)
+			mcr |= MAC_MCR_FORCE_RX_FC;
+
+		netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
+			  flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
+			  flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
+	}
 
 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
 
@@ -208,10 +229,16 @@
 	u32 val, ge_mode;
 
 	np = of_parse_phandle(mac->of_node, "phy-handle", 0);
+	if (!np && of_phy_is_fixed_link(mac->of_node))
+		if (!of_phy_register_fixed_link(mac->of_node))
+			np = of_node_get(mac->of_node);
 	if (!np)
 		return -ENODEV;
 
 	switch (of_get_phy_mode(np)) {
+	case PHY_INTERFACE_MODE_RGMII_TXID:
+	case PHY_INTERFACE_MODE_RGMII_RXID:
+	case PHY_INTERFACE_MODE_RGMII_ID:
 	case PHY_INTERFACE_MODE_RGMII:
 		ge_mode = 0;
 		break;
@@ -236,7 +263,8 @@
 	mac->phy_dev->autoneg = AUTONEG_ENABLE;
 	mac->phy_dev->speed = 0;
 	mac->phy_dev->duplex = 0;
-	mac->phy_dev->supported &= PHY_BASIC_FEATURES;
+	mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+				   SUPPORTED_Asym_Pause;
 	mac->phy_dev->advertising = mac->phy_dev->supported |
 				    ADVERTISED_Autoneg;
 	phy_start_aneg(mac->phy_dev);
@@ -280,7 +308,7 @@
 	return 0;
 
 err_free_bus:
-	kfree(eth->mii_bus);
+	mdiobus_free(eth->mii_bus);
 
 err_put_node:
 	of_node_put(mii_np);
@@ -295,7 +323,7 @@
 
 	mdiobus_unregister(eth->mii_bus);
 	of_node_put(eth->mii_bus->dev.of_node);
-	kfree(eth->mii_bus);
+	mdiobus_free(eth->mii_bus);
 }
 
 static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
@@ -453,20 +481,23 @@
 /* the qdma core needs scratch memory to be setup */
 static int mtk_init_fq_dma(struct mtk_eth *eth)
 {
-	dma_addr_t phy_ring_head, phy_ring_tail;
+	dma_addr_t phy_ring_tail;
 	int cnt = MTK_DMA_SIZE;
 	dma_addr_t dma_addr;
 	int i;
 
 	eth->scratch_ring = dma_alloc_coherent(eth->dev,
 					       cnt * sizeof(struct mtk_tx_dma),
-					       &phy_ring_head,
+					       &eth->phy_scratch_ring,
 					       GFP_ATOMIC | __GFP_ZERO);
 	if (unlikely(!eth->scratch_ring))
 		return -ENOMEM;
 
 	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
 				    GFP_KERNEL);
+	if (unlikely(!eth->scratch_head))
+		return -ENOMEM;
+
 	dma_addr = dma_map_single(eth->dev,
 				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
 				  DMA_FROM_DEVICE);
@@ -474,19 +505,19 @@
 		return -ENOMEM;
 
 	memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
-	phy_ring_tail = phy_ring_head +
+	phy_ring_tail = eth->phy_scratch_ring +
 			(sizeof(struct mtk_tx_dma) * (cnt - 1));
 
 	for (i = 0; i < cnt; i++) {
 		eth->scratch_ring[i].txd1 =
 					(dma_addr + (i * MTK_QDMA_PAGE_SIZE));
 		if (i < cnt - 1)
-			eth->scratch_ring[i].txd2 = (phy_ring_head +
+			eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
 				((i + 1) * sizeof(struct mtk_tx_dma)));
 		eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
 	}
 
-	mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
+	mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
 	mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
 	mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
 	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
@@ -643,7 +674,7 @@
 
 err_dma:
 	do {
-		tx_buf = mtk_desc_to_tx_buf(ring, txd);
+		tx_buf = mtk_desc_to_tx_buf(ring, itxd);
 
 		/* unmap dma */
 		mtk_tx_unmap(&dev->dev, tx_buf);
@@ -673,6 +704,20 @@
 	return nfrags;
 }
 
+static int mtk_queue_stopped(struct mtk_eth *eth)
+{
+	int i;
+
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		if (!eth->netdev[i])
+			continue;
+		if (netif_queue_stopped(eth->netdev[i]))
+			return 1;
+	}
+
+	return 0;
+}
+
 static void mtk_wake_queue(struct mtk_eth *eth)
 {
 	int i;
@@ -738,12 +783,9 @@
 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
 		goto drop;
 
-	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
+	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
 		mtk_stop_queue(eth);
-		if (unlikely(atomic_read(&ring->free_count) >
-			     ring->thresh))
-			mtk_wake_queue(eth);
-	}
+
 	spin_unlock_irqrestore(&eth->page_lock, flags);
 
 	return NETDEV_TX_OK;
@@ -798,6 +840,7 @@
 					  DMA_FROM_DEVICE);
 		if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
 			skb_free_frag(new_data);
+			netdev->stats.rx_dropped++;
 			goto release_desc;
 		}
 
@@ -805,6 +848,7 @@
 		skb = build_skb(data, ring->frag_size);
 		if (unlikely(!skb)) {
 			put_page(virt_to_head_page(new_data));
+			netdev->stats.rx_dropped++;
 			goto release_desc;
 		}
 		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
@@ -893,7 +937,6 @@
 		}
 		mtk_tx_unmap(eth->dev, tx_buf);
 
-		ring->last_free->txd2 = next_cpu;
 		ring->last_free = desc;
 		atomic_inc(&ring->free_count);
 
@@ -918,7 +961,8 @@
 	if (!total)
 		return 0;
 
-	if (atomic_read(&ring->free_count) > ring->thresh)
+	if (mtk_queue_stopped(eth) &&
+	    (atomic_read(&ring->free_count) > ring->thresh))
 		mtk_wake_queue(eth);
 
 	return total;
@@ -999,9 +1043,8 @@
 
 	atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
 	ring->next_free = &ring->dma[0];
-	ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
-	ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
-			      MAX_SKB_FRAGS);
+	ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+	ring->thresh = MAX_SKB_FRAGS;
 
 	/* make sure that all changes to the dma ring are flushed before we
 	 * continue
@@ -1179,6 +1222,14 @@
 	for (i = 0; i < MTK_MAC_COUNT; i++)
 		if (eth->netdev[i])
 			netdev_reset_queue(eth->netdev[i]);
+	if (eth->scratch_ring) {
+		dma_free_coherent(eth->dev,
+				  MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
+				  eth->scratch_ring,
+				  eth->phy_scratch_ring);
+		eth->scratch_ring = NULL;
+		eth->phy_scratch_ring = 0;
+	}
 	mtk_tx_clean(eth);
 	mtk_rx_clean(eth);
 	kfree(eth->scratch_head);
@@ -1241,7 +1292,7 @@
 	mtk_w32(eth,
 		MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
 		MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
-		MTK_RX_BT_32DWORDS,
+		MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
 		MTK_QDMA_GLO_CFG);
 
 	return 0;
@@ -1355,7 +1406,7 @@
 
 	/* disable delay and normal interrupt */
 	mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
-	mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+	mtk_irq_disable(eth, ~0);
 	mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
 	mtk_w32(eth, 0, MTK_RST_GL);
 
@@ -1669,7 +1720,7 @@
 	mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
 
 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
-	eth->netdev[id]->watchdog_timeo = HZ;
+	eth->netdev[id]->watchdog_timeo = 5 * HZ;
 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
 	eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index eed626d..a5eb7c6 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -91,6 +91,7 @@
 #define MTK_QDMA_GLO_CFG	0x1A04
 #define MTK_RX_2B_OFFSET	BIT(31)
 #define MTK_RX_BT_32DWORDS	(3 << 11)
+#define MTK_NDP_CO_PRO		BIT(10)
 #define MTK_TX_WB_DDONE		BIT(6)
 #define MTK_DMA_SIZE_16DWORDS	(2 << 4)
 #define MTK_RX_DMA_BUSY		BIT(3)
@@ -357,6 +358,7 @@
  * @rx_ring:		Pointer to the memore holding info about the RX ring
  * @rx_napi:		The NAPI struct
  * @scratch_ring:	Newer SoCs need memory for a second HW managed TX ring
+ * @phy_scratch_ring:	physical address of scratch_ring
  * @scratch_head:	The scratch memory that scratch_ring points to.
  * @clk_ethif:		The ethif clock
  * @clk_esw:		The switch clock
@@ -384,6 +386,7 @@
 	struct mtk_rx_ring		rx_ring;
 	struct napi_struct		rx_napi;
 	struct mtk_tx_dma		*scratch_ring;
+	dma_addr_t			phy_scratch_ring;
 	void				*scratch_head;
 	struct clk			*clk_ethif;
 	struct clk			*clk_esw;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e94ca1c..f04a423 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2597,7 +2597,6 @@
 	priv->cmd.free_head = 0;
 
 	sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
-	spin_lock_init(&priv->cmd.context_lock);
 
 	for (priv->cmd.token_mask = 1;
 	     priv->cmd.token_mask < priv->cmd.max_cmds;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 19ceced..0c0dfd6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -406,14 +406,18 @@
 	mutex_lock(&mdev->state_lock);
 	if (mdev->device_up && priv->port_up) {
 		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
-		if (err)
+		if (err) {
 			en_err(priv, "Failed configuring VLAN filter\n");
+			goto out;
+		}
 	}
-	if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
-		en_dbg(HW, priv, "failed adding vlan %d\n", vid);
-	mutex_unlock(&mdev->state_lock);
+	err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
+	if (err)
+		en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
 
-	return 0;
+out:
+	mutex_unlock(&mdev->state_lock);
+	return err;
 }
 
 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
@@ -421,7 +425,7 @@
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
-	int err;
+	int err = 0;
 
 	en_dbg(HW, priv, "Killing VID:%d\n", vid);
 
@@ -438,7 +442,7 @@
 	}
 	mutex_unlock(&mdev->state_lock);
 
-	return 0;
+	return err;
 }
 
 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
@@ -2032,11 +2036,20 @@
 	return -ENOMEM;
 }
 
+static void mlx4_en_shutdown(struct net_device *dev)
+{
+	rtnl_lock();
+	netif_device_detach(dev);
+	mlx4_en_close(dev);
+	rtnl_unlock();
+}
 
 void mlx4_en_destroy_netdev(struct net_device *dev)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
+	bool shutdown = mdev->dev->persist->interface_state &
+					    MLX4_INTERFACE_STATE_SHUTDOWN;
 
 	en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
 
@@ -2044,7 +2057,10 @@
 	if (priv->registered) {
 		devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
 							      priv->port));
-		unregister_netdev(dev);
+		if (shutdown)
+			mlx4_en_shutdown(dev);
+		else
+			unregister_netdev(dev);
 	}
 
 	if (priv->allocated)
@@ -2069,7 +2085,8 @@
 	kfree(priv->tx_ring);
 	kfree(priv->tx_cq);
 
-	free_netdev(dev);
+	if (!shutdown)
+		free_netdev(dev);
 }
 
 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
@@ -2447,9 +2464,14 @@
 	 * strip that feature if this is an IPv6 encapsulated frame.
 	 */
 	if (skb->encapsulation &&
-	    (skb->ip_summed == CHECKSUM_PARTIAL) &&
-	    (ip_hdr(skb)->version != 4))
-		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+	    (skb->ip_summed == CHECKSUM_PARTIAL)) {
+		struct mlx4_en_priv *priv = netdev_priv(dev);
+
+		if (!priv->vxlan_port ||
+		    (ip_hdr(skb)->version != 4) ||
+		    (udp_hdr(skb)->dest != priv->vxlan_port))
+			features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+	}
 
 	return features;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 12c77a7..546fab0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3222,6 +3222,7 @@
 
 	INIT_LIST_HEAD(&priv->pgdir_list);
 	mutex_init(&priv->pgdir_mutex);
+	spin_lock_init(&priv->cmd.context_lock);
 
 	INIT_LIST_HEAD(&priv->bf_list);
 	mutex_init(&priv->bf_mutex);
@@ -4134,8 +4135,11 @@
 
 	mlx4_info(persist->dev, "mlx4_shutdown was called\n");
 	mutex_lock(&persist->interface_state_mutex);
-	if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+	if (persist->interface_state & MLX4_INTERFACE_STATE_UP) {
+		/* Notify mlx4 clients that the kernel is being shut down */
+		persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN;
 		mlx4_unload_one(pdev);
+	}
 	mutex_unlock(&persist->interface_state_mutex);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index fd43929..f5c8d5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3192,10 +3192,7 @@
 	flush_workqueue(priv->wq);
 	if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
 		netif_device_detach(netdev);
-		mutex_lock(&priv->state_lock);
-		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-			mlx5e_close_locked(netdev);
-		mutex_unlock(&priv->state_lock);
+		mlx5e_close(netdev);
 	} else {
 		unregister_netdev(netdev);
 	}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 229ab16..b000ddc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -317,7 +317,8 @@
 	while ((sq->pc & wq->sz_m1) > sq->edge)
 		mlx5e_send_nop(sq, false);
 
-	sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+	if (bf)
+		sq->bf_budget--;
 
 	sq->stats.packets++;
 	sq->stats.bytes += num_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index b84a691..aebbd6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -383,7 +383,7 @@
 				   match_v,
 				   MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
 				   0, &dest);
-	if (IS_ERR_OR_NULL(flow_rule)) {
+	if (IS_ERR(flow_rule)) {
 		pr_warn(
 			"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
 			 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
@@ -457,7 +457,7 @@
 
 	table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
 	fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
-	if (IS_ERR_OR_NULL(fdb)) {
+	if (IS_ERR(fdb)) {
 		err = PTR_ERR(fdb);
 		esw_warn(dev, "Failed to create FDB Table err %d\n", err);
 		goto out;
@@ -474,7 +474,7 @@
 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
 	eth_broadcast_addr(dmac);
 	g = mlx5_create_flow_group(fdb, flow_group_in);
-	if (IS_ERR_OR_NULL(g)) {
+	if (IS_ERR(g)) {
 		err = PTR_ERR(g);
 		esw_warn(dev, "Failed to create flow group err(%d)\n", err);
 		goto out;
@@ -489,7 +489,7 @@
 	eth_zero_addr(dmac);
 	dmac[0] = 0x01;
 	g = mlx5_create_flow_group(fdb, flow_group_in);
-	if (IS_ERR_OR_NULL(g)) {
+	if (IS_ERR(g)) {
 		err = PTR_ERR(g);
 		esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
 		goto out;
@@ -506,7 +506,7 @@
 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
 	g = mlx5_create_flow_group(fdb, flow_group_in);
-	if (IS_ERR_OR_NULL(g)) {
+	if (IS_ERR(g)) {
 		err = PTR_ERR(g);
 		esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
 		goto out;
@@ -529,7 +529,7 @@
 		}
 	}
 
-	kfree(flow_group_in);
+	kvfree(flow_group_in);
 	return err;
 }
 
@@ -651,6 +651,7 @@
 					esw_fdb_set_vport_rule(esw,
 							       mac,
 							       vport_idx);
+			iter_vaddr->mc_promisc = true;
 			break;
 		case MLX5_ACTION_DEL:
 			if (!iter_vaddr)
@@ -1060,7 +1061,7 @@
 		return;
 
 	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
-	if (IS_ERR_OR_NULL(acl)) {
+	if (IS_ERR(acl)) {
 		err = PTR_ERR(acl);
 		esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
 			 vport->vport, err);
@@ -1075,7 +1076,7 @@
 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
 
 	vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
-	if (IS_ERR_OR_NULL(vlan_grp)) {
+	if (IS_ERR(vlan_grp)) {
 		err = PTR_ERR(vlan_grp);
 		esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
 			 vport->vport, err);
@@ -1086,7 +1087,7 @@
 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
 	drop_grp = mlx5_create_flow_group(acl, flow_group_in);
-	if (IS_ERR_OR_NULL(drop_grp)) {
+	if (IS_ERR(drop_grp)) {
 		err = PTR_ERR(drop_grp);
 		esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
 			 vport->vport, err);
@@ -1097,7 +1098,7 @@
 	vport->egress.drop_grp = drop_grp;
 	vport->egress.allowed_vlans_grp = vlan_grp;
 out:
-	kfree(flow_group_in);
+	kvfree(flow_group_in);
 	if (err && !IS_ERR_OR_NULL(vlan_grp))
 		mlx5_destroy_flow_group(vlan_grp);
 	if (err && !IS_ERR_OR_NULL(acl))
@@ -1174,7 +1175,7 @@
 		return;
 
 	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
-	if (IS_ERR_OR_NULL(acl)) {
+	if (IS_ERR(acl)) {
 		err = PTR_ERR(acl);
 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
 			 vport->vport, err);
@@ -1192,7 +1193,7 @@
 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
 
 	g = mlx5_create_flow_group(acl, flow_group_in);
-	if (IS_ERR_OR_NULL(g)) {
+	if (IS_ERR(g)) {
 		err = PTR_ERR(g);
 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
 			 vport->vport, err);
@@ -1207,7 +1208,7 @@
 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
 
 	g = mlx5_create_flow_group(acl, flow_group_in);
-	if (IS_ERR_OR_NULL(g)) {
+	if (IS_ERR(g)) {
 		err = PTR_ERR(g);
 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
 			 vport->vport, err);
@@ -1223,7 +1224,7 @@
 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
 
 	g = mlx5_create_flow_group(acl, flow_group_in);
-	if (IS_ERR_OR_NULL(g)) {
+	if (IS_ERR(g)) {
 		err = PTR_ERR(g);
 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
 			 vport->vport, err);
@@ -1236,7 +1237,7 @@
 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
 
 	g = mlx5_create_flow_group(acl, flow_group_in);
-	if (IS_ERR_OR_NULL(g)) {
+	if (IS_ERR(g)) {
 		err = PTR_ERR(g);
 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
 			 vport->vport, err);
@@ -1259,7 +1260,7 @@
 			mlx5_destroy_flow_table(vport->ingress.acl);
 	}
 
-	kfree(flow_group_in);
+	kvfree(flow_group_in);
 }
 
 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -1363,7 +1364,7 @@
 				   match_v,
 				   MLX5_FLOW_CONTEXT_ACTION_ALLOW,
 				   0, NULL);
-	if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
+	if (IS_ERR(vport->ingress.allow_rule)) {
 		err = PTR_ERR(vport->ingress.allow_rule);
 		pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
 			vport->vport, err);
@@ -1380,7 +1381,7 @@
 				   match_v,
 				   MLX5_FLOW_CONTEXT_ACTION_DROP,
 				   0, NULL);
-	if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
+	if (IS_ERR(vport->ingress.drop_rule)) {
 		err = PTR_ERR(vport->ingress.drop_rule);
 		pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
 			vport->vport, err);
@@ -1439,7 +1440,7 @@
 				   match_v,
 				   MLX5_FLOW_CONTEXT_ACTION_ALLOW,
 				   0, NULL);
-	if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
+	if (IS_ERR(vport->egress.allowed_vlan)) {
 		err = PTR_ERR(vport->egress.allowed_vlan);
 		pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
 			vport->vport, err);
@@ -1457,7 +1458,7 @@
 				   match_v,
 				   MLX5_FLOW_CONTEXT_ACTION_DROP,
 				   0, NULL);
-	if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
+	if (IS_ERR(vport->egress.drop_rule)) {
 		err = PTR_ERR(vport->egress.drop_rule);
 		pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
 			vport->vport, err);
@@ -1491,14 +1492,11 @@
 
 	/* Sync with current vport context */
 	vport->enabled_events = enable_events;
-	esw_vport_change_handle_locked(vport);
-
 	vport->enabled = true;
 
 	/* only PF is trusted by default */
 	vport->trusted = (vport_num) ? false : true;
-
-	arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
+	esw_vport_change_handle_locked(vport);
 
 	esw->enabled_vports++;
 	esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
@@ -1728,11 +1726,24 @@
 	(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
 
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+{
+	((u8 *)node_guid)[7] = mac[0];
+	((u8 *)node_guid)[6] = mac[1];
+	((u8 *)node_guid)[5] = mac[2];
+	((u8 *)node_guid)[4] = 0xff;
+	((u8 *)node_guid)[3] = 0xfe;
+	((u8 *)node_guid)[2] = mac[3];
+	((u8 *)node_guid)[1] = mac[4];
+	((u8 *)node_guid)[0] = mac[5];
+}
+
 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
 			       int vport, u8 mac[ETH_ALEN])
 {
-	int err = 0;
 	struct mlx5_vport *evport;
+	u64 node_guid;
+	int err = 0;
 
 	if (!ESW_ALLOWED(esw))
 		return -EPERM;
@@ -1756,11 +1767,17 @@
 		return err;
 	}
 
+	node_guid_gen_from_mac(&node_guid, mac);
+	err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
+	if (err)
+		mlx5_core_warn(esw->dev,
+			       "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
+			       vport, err);
+
 	mutex_lock(&esw->state_lock);
 	if (evport->enabled)
 		err = esw_vport_ingress_config(esw, evport);
 	mutex_unlock(&esw->state_lock);
-
 	return err;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 8b5f0b2..e912a3d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1292,8 +1292,8 @@
 				       ft->id);
 			return err;
 		}
-		root->root_ft = new_root_ft;
 	}
+	root->root_ft = new_root_ft;
 	return 0;
 }
 
@@ -1767,6 +1767,9 @@
 
 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
 {
+	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+		return;
+
 	cleanup_root_ns(dev);
 	cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
 	cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
@@ -1828,29 +1831,36 @@
 {
 	int err = 0;
 
+	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+		return 0;
+
 	err = mlx5_init_fc_stats(dev);
 	if (err)
 		return err;
 
-	if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+	if (MLX5_CAP_GEN(dev, nic_flow_table) &&
+	    MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
 		err = init_root_ns(dev);
 		if (err)
 			goto err;
 	}
+
 	if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
-		err = init_fdb_root_ns(dev);
-		if (err)
-			goto err;
-	}
-	if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
-		err = init_egress_acl_root_ns(dev);
-		if (err)
-			goto err;
-	}
-	if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
-		err = init_ingress_acl_root_ns(dev);
-		if (err)
-			goto err;
+		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
+			err = init_fdb_root_ns(dev);
+			if (err)
+				goto err;
+		}
+		if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+			err = init_egress_acl_root_ns(dev);
+			if (err)
+				goto err;
+		}
+		if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+			err = init_ingress_acl_root_ns(dev);
+			if (err)
+				goto err;
+		}
 	}
 
 	return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index b720a27..b82d658 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -418,7 +418,7 @@
 	if (out.hdr.status)
 		err = mlx5_cmd_status_to_err(&out.hdr);
 	else
-		*xrcdn = be32_to_cpu(out.xrcdn);
+		*xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
 
 	return err;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index b69dadc..daf44cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -508,6 +508,44 @@
 }
 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
 
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+				    u32 vport, u64 node_guid)
+{
+	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+	void *nic_vport_context;
+	u8 *guid;
+	void *in;
+	int err;
+
+	if (!vport)
+		return -EINVAL;
+	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+		return -EACCES;
+	if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
+		return -ENOTSUPP;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	MLX5_SET(modify_nic_vport_context_in, in,
+		 field_select.node_guid, 1);
+	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+	MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
+
+	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+					 in, nic_vport_context);
+	guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
+			    node_guid);
+	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+
+	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+	kvfree(in);
+
+	return err;
+}
+
 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
 					u16 *qkey_viol_cntr)
 {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 4a72737..660429e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -247,13 +247,21 @@
 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
 }
 
+static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+				    u8 swid)
+{
+	char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+	mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
+}
+
 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
 {
 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-	char pspa_pl[MLXSW_REG_PSPA_LEN];
 
-	mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
-	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
+	return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
+					swid);
 }
 
 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -305,9 +313,9 @@
 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
 }
 
-static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
-					   u8 local_port, u8 *p_module,
-					   u8 *p_width, u8 *p_lane)
+static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
+					 u8 local_port, u8 *p_module,
+					 u8 *p_width, u8 *p_lane)
 {
 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
 	int err;
@@ -322,16 +330,6 @@
 	return 0;
 }
 
-static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
-					 u8 local_port, u8 *p_module,
-					 u8 *p_width)
-{
-	u8 lane;
-
-	return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
-					       p_width, &lane);
-}
-
 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 				    u8 module, u8 width, u8 lane)
 {
@@ -410,7 +408,11 @@
 	}
 
 	mlxsw_sp_txhdr_construct(skb, &tx_info);
-	len = skb->len;
+	/* TX header is consumed by HW on the way so we shouldn't count its
+	 * bytes as being sent.
+	 */
+	len = skb->len - MLXSW_TXHDR_LEN;
+
 	/* Due to a race we might fail here because of a full queue. In that
 	 * unlikely case we simply drop the packet.
 	 */
@@ -949,17 +951,11 @@
 					    size_t len)
 {
 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-	u8 module, width, lane;
+	u8 module = mlxsw_sp_port->mapping.module;
+	u8 width = mlxsw_sp_port->mapping.width;
+	u8 lane = mlxsw_sp_port->mapping.lane;
 	int err;
 
-	err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
-					      mlxsw_sp_port->local_port,
-					      &module, &width, &lane);
-	if (err) {
-		netdev_err(dev, "Failed to retrieve module information\n");
-		return err;
-	}
-
 	if (!mlxsw_sp_port->split)
 		err = snprintf(name, len, "p%d", module + 1);
 	else
@@ -1681,8 +1677,8 @@
 	return 0;
 }
 
-static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-				  bool split, u8 module, u8 width)
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+				bool split, u8 module, u8 width, u8 lane)
 {
 	struct mlxsw_sp_port *mlxsw_sp_port;
 	struct net_device *dev;
@@ -1697,6 +1693,9 @@
 	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
 	mlxsw_sp_port->local_port = local_port;
 	mlxsw_sp_port->split = split;
+	mlxsw_sp_port->mapping.module = module;
+	mlxsw_sp_port->mapping.width = width;
+	mlxsw_sp_port->mapping.lane = lane;
 	bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
 	mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
 	if (!mlxsw_sp_port->active_vlans) {
@@ -1839,28 +1838,6 @@
 	return err;
 }
 
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-				bool split, u8 module, u8 width, u8 lane)
-{
-	int err;
-
-	err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
-				       lane);
-	if (err)
-		return err;
-
-	err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
-				     width);
-	if (err)
-		goto err_port_create;
-
-	return 0;
-
-err_port_create:
-	mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
-	return err;
-}
-
 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
 {
 	struct net_device *dev = mlxsw_sp_port->dev;
@@ -1909,8 +1886,8 @@
 
 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
 {
+	u8 module, width, lane;
 	size_t alloc_size;
-	u8 module, width;
 	int i;
 	int err;
 
@@ -1921,13 +1898,14 @@
 
 	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
 		err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
-						    &width);
+						    &width, &lane);
 		if (err)
 			goto err_port_module_info_get;
 		if (!width)
 			continue;
 		mlxsw_sp->port_to_module[i] = module;
-		err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
+		err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
+					   lane);
 		if (err)
 			goto err_port_create;
 	}
@@ -1948,12 +1926,85 @@
 	return local_port - offset;
 }
 
+static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+				      u8 module, unsigned int count)
+{
+	u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+	int err, i;
+
+	for (i = 0; i < count; i++) {
+		err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
+					       width, i * width);
+		if (err)
+			goto err_port_module_map;
+	}
+
+	for (i = 0; i < count; i++) {
+		err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
+		if (err)
+			goto err_port_swid_set;
+	}
+
+	for (i = 0; i < count; i++) {
+		err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
+					   module, width, i * width);
+		if (err)
+			goto err_port_create;
+	}
+
+	return 0;
+
+err_port_create:
+	for (i--; i >= 0; i--)
+		mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+	i = count;
+err_port_swid_set:
+	for (i--; i >= 0; i--)
+		__mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
+					 MLXSW_PORT_SWID_DISABLED_PORT);
+	i = count;
+err_port_module_map:
+	for (i--; i >= 0; i--)
+		mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
+	return err;
+}
+
+static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
+					 u8 base_port, unsigned int count)
+{
+	u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
+	int i;
+
+	/* Split by four means we need to re-create two ports, otherwise
+	 * only one.
+	 */
+	count = count / 2;
+
+	for (i = 0; i < count; i++) {
+		local_port = base_port + i * 2;
+		module = mlxsw_sp->port_to_module[local_port];
+
+		mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
+					 0);
+	}
+
+	for (i = 0; i < count; i++)
+		__mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
+
+	for (i = 0; i < count; i++) {
+		local_port = base_port + i * 2;
+		module = mlxsw_sp->port_to_module[local_port];
+
+		mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
+				     width, 0);
+	}
+}
+
 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
 			       unsigned int count)
 {
 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
 	struct mlxsw_sp_port *mlxsw_sp_port;
-	u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
 	u8 module, cur_width, base_port;
 	int i;
 	int err;
@@ -1965,18 +2016,14 @@
 		return -EINVAL;
 	}
 
+	module = mlxsw_sp_port->mapping.module;
+	cur_width = mlxsw_sp_port->mapping.width;
+
 	if (count != 2 && count != 4) {
 		netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
 		return -EINVAL;
 	}
 
-	err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
-					    &cur_width);
-	if (err) {
-		netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
-		return err;
-	}
-
 	if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
 		netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
 		return -EINVAL;
@@ -2001,25 +2048,16 @@
 	for (i = 0; i < count; i++)
 		mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
 
-	for (i = 0; i < count; i++) {
-		err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
-					   module, width, i * width);
-		if (err) {
-			dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
-			goto err_port_create;
-		}
+	err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
+		goto err_port_split_create;
 	}
 
 	return 0;
 
-err_port_create:
-	for (i--; i >= 0; i--)
-		mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
-	for (i = 0; i < count / 2; i++) {
-		module = mlxsw_sp->port_to_module[base_port + i * 2];
-		mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
-				     module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
-	}
+err_port_split_create:
+	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
 	return err;
 }
 
@@ -2027,10 +2065,9 @@
 {
 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
 	struct mlxsw_sp_port *mlxsw_sp_port;
-	u8 module, cur_width, base_port;
+	u8 cur_width, base_port;
 	unsigned int count;
 	int i;
-	int err;
 
 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
 	if (!mlxsw_sp_port) {
@@ -2044,12 +2081,7 @@
 		return -EINVAL;
 	}
 
-	err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
-					    &cur_width);
-	if (err) {
-		netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
-		return err;
-	}
+	cur_width = mlxsw_sp_port->mapping.width;
 	count = cur_width == 1 ? 4 : 2;
 
 	base_port = mlxsw_sp_cluster_base_port_get(local_port);
@@ -2061,14 +2093,7 @@
 	for (i = 0; i < count; i++)
 		mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
 
-	for (i = 0; i < count / 2; i++) {
-		module = mlxsw_sp->port_to_module[base_port + i * 2];
-		err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
-					   module, MLXSW_PORT_MODULE_MAX_WIDTH,
-					   0);
-		if (err)
-			dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
-	}
+	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index e2c022d..13b30ea 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -229,6 +229,11 @@
 		struct ieee_maxrate *maxrate;
 		struct ieee_pfc *pfc;
 	} dcb;
+	struct {
+		u8 module;
+		u8 width;
+		u8 lane;
+	} mapping;
 	/* 802.1Q bridge VLANs */
 	unsigned long *active_vlans;
 	unsigned long *untagged_vlans;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 3842eab..25f658b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -316,7 +316,10 @@
 		}
 	}
 	mlxsw_sx_txhdr_construct(skb, &tx_info);
-	len = skb->len;
+	/* TX header is consumed by HW on the way so we shouldn't count its
+	 * bytes as being sent.
+	 */
+	len = skb->len - MLXSW_TXHDR_LEN;
 	/* Due to a race we might fail here because of a full queue. In that
 	 * unlikely case we simply drop the packet.
 	 */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index fa47c14..ba26bb3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2015,7 +2015,7 @@
 
 	netif_tx_wake_all_queues(nn->netdev);
 
-	enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+	enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
 	nfp_net_read_link_status(nn);
 }
 
@@ -2044,7 +2044,7 @@
 				      NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
 	if (err)
 		goto err_free_exn;
-	disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+	disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
 
 	nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
 			       GFP_KERNEL);
@@ -2133,7 +2133,7 @@
 {
 	unsigned int r;
 
-	disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+	disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
 	netif_carrier_off(nn->netdev);
 	nn->link_up = false;
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 9afc15f..e29ed5a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -3700,6 +3700,7 @@
 #define MEDIA_DA_TWINAX         0x3
 #define MEDIA_BASE_T            0x4
 #define MEDIA_SFP_1G_FIBER      0x5
+#define MEDIA_MODULE_FIBER      0x6
 #define MEDIA_KR                0xf0
 #define MEDIA_NOT_PRESENT       0xff
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 8fba87dd..aada4c7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -72,6 +72,7 @@
 	p_ramrod->mtu			= cpu_to_le16(p_params->mtu);
 	p_ramrod->inner_vlan_removal_en	= p_params->remove_inner_vlan;
 	p_ramrod->drop_ttl0_en		= p_params->drop_ttl0;
+	p_ramrod->untagged		= p_params->only_untagged;
 
 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
@@ -247,10 +248,6 @@
 		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
 			  !!(accept_filter & QED_ACCEPT_NONE));
 
-		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
-			  (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
-			   !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
-
 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
 			  !!(accept_filter & QED_ACCEPT_NONE));
 
@@ -1748,7 +1745,8 @@
 			   start.vport_id, start.mtu);
 	}
 
-	qed_reset_vport_stats(cdev);
+	if (params->clear_stats)
+		qed_reset_vport_stats(cdev);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 7530646..c7e01b3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1085,6 +1085,7 @@
 	case MEDIA_SFPP_10G_FIBER:
 	case MEDIA_SFP_1G_FIBER:
 	case MEDIA_XFP_FIBER:
+	case MEDIA_MODULE_FIBER:
 	case MEDIA_KR:
 		port_type = PORT_FIBRE;
 		break;
@@ -1105,6 +1106,39 @@
 	return port_type;
 }
 
+static int qed_get_link_data(struct qed_hwfn *hwfn,
+			     struct qed_mcp_link_params *params,
+			     struct qed_mcp_link_state *link,
+			     struct qed_mcp_link_capabilities *link_caps)
+{
+	void *p;
+
+	if (!IS_PF(hwfn->cdev)) {
+		qed_vf_get_link_params(hwfn, params);
+		qed_vf_get_link_state(hwfn, link);
+		qed_vf_get_link_caps(hwfn, link_caps);
+
+		return 0;
+	}
+
+	p = qed_mcp_get_link_params(hwfn);
+	if (!p)
+		return -ENXIO;
+	memcpy(params, p, sizeof(*params));
+
+	p = qed_mcp_get_link_state(hwfn);
+	if (!p)
+		return -ENXIO;
+	memcpy(link, p, sizeof(*link));
+
+	p = qed_mcp_get_link_capabilities(hwfn);
+	if (!p)
+		return -ENXIO;
+	memcpy(link_caps, p, sizeof(*link_caps));
+
+	return 0;
+}
+
 static void qed_fill_link(struct qed_hwfn *hwfn,
 			  struct qed_link_output *if_link)
 {
@@ -1116,15 +1150,9 @@
 	memset(if_link, 0, sizeof(*if_link));
 
 	/* Prepare source inputs */
-	if (IS_PF(hwfn->cdev)) {
-		memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
-		memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
-		memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
-		       sizeof(link_caps));
-	} else {
-		qed_vf_get_link_params(hwfn, &params);
-		qed_vf_get_link_state(hwfn, &link);
-		qed_vf_get_link_caps(hwfn, &link_caps);
+	if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
+		dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
+		return;
 	}
 
 	/* Set the link parameters to pass to protocol driver */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index acac662..67d9893 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -614,7 +614,9 @@
 
 			*p_en2 = *p_ent;
 
-			kfree(p_ent);
+			/* EBLOCK responsible to free the allocated p_ent */
+			if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
+				kfree(p_ent);
 
 			p_ent = p_en2;
 		}
@@ -749,6 +751,15 @@
 		 * Thus, after gaining the answer perform the cleanup here.
 		 */
 		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+
+		if (p_ent->queue == &p_spq->unlimited_pending) {
+			/* This is an allocated p_ent which does not need to
+			 * return to pool.
+			 */
+			kfree(p_ent);
+			return rc;
+		}
+
 		if (rc)
 			goto spq_post_fail2;
 
@@ -844,8 +855,12 @@
 		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
 					fw_return_code);
 
-	if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
-		/* EBLOCK is responsible for freeing its own entry */
+	if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
+	    (found->queue == &p_spq->unlimited_pending))
+		/* EBLOCK  is responsible for returning its own entry into the
+		 * free list, unless it originally added the entry into the
+		 * unlimited pending list.
+		 */
 		qed_spq_return_entry(p_hwfn, found);
 
 	/* Attempt to post pending requests */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index c8667c6..c90b2b6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -12,11 +12,13 @@
 #include "qed_vf.h"
 #define QED_VF_ARRAY_LENGTH (3)
 
+#ifdef CONFIG_QED_SRIOV
 #define IS_VF(cdev)             ((cdev)->b_is_vf)
 #define IS_PF(cdev)             (!((cdev)->b_is_vf))
-#ifdef CONFIG_QED_SRIOV
 #define IS_PF_SRIOV(p_hwfn)     (!!((p_hwfn)->cdev->p_iov_info))
 #else
+#define IS_VF(cdev)             (0)
+#define IS_PF(cdev)             (1)
 #define IS_PF_SRIOV(p_hwfn)     (0)
 #endif
 #define IS_PF_SRIOV_ALLOC(p_hwfn)       (!!((p_hwfn)->pf_iov_info))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 5d00d14..f8e11f9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -87,7 +87,9 @@
 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
 	{ 0 }
 };
 
@@ -3229,7 +3231,7 @@
 	return rc;
 }
 
-static int qede_start_queues(struct qede_dev *edev)
+static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
 {
 	int rc, tc, i;
 	int vlan_removal_en = 1;
@@ -3460,6 +3462,7 @@
 
 enum qede_load_mode {
 	QEDE_LOAD_NORMAL,
+	QEDE_LOAD_RELOAD,
 };
 
 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
@@ -3498,7 +3501,7 @@
 		goto err3;
 	DP_INFO(edev, "Setup IRQs succeeded\n");
 
-	rc = qede_start_queues(edev);
+	rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
 	if (rc)
 		goto err4;
 	DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
@@ -3553,7 +3556,7 @@
 	if (func)
 		func(edev, args);
 
-	qede_load(edev, QEDE_LOAD_NORMAL);
+	qede_load(edev, QEDE_LOAD_RELOAD);
 
 	mutex_lock(&edev->qede_lock);
 	qede_config_rx_mode(edev->ndev);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0705ec86..097f363 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1726,14 +1726,33 @@
 
 #ifdef CONFIG_RFS_ACCEL
 	if (efx->type->offload_features & NETIF_F_NTUPLE) {
-		efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
-					   sizeof(*efx->rps_flow_id),
-					   GFP_KERNEL);
-		if (!efx->rps_flow_id) {
+		struct efx_channel *channel;
+		int i, success = 1;
+
+		efx_for_each_channel(channel, efx) {
+			channel->rps_flow_id =
+				kcalloc(efx->type->max_rx_ip_filters,
+					sizeof(*channel->rps_flow_id),
+					GFP_KERNEL);
+			if (!channel->rps_flow_id)
+				success = 0;
+			else
+				for (i = 0;
+				     i < efx->type->max_rx_ip_filters;
+				     ++i)
+					channel->rps_flow_id[i] =
+						RPS_FLOW_ID_INVALID;
+		}
+
+		if (!success) {
+			efx_for_each_channel(channel, efx)
+				kfree(channel->rps_flow_id);
 			efx->type->filter_table_remove(efx);
 			rc = -ENOMEM;
 			goto out_unlock;
 		}
+
+		efx->rps_expire_index = efx->rps_expire_channel = 0;
 	}
 #endif
 out_unlock:
@@ -1744,7 +1763,10 @@
 static void efx_remove_filters(struct efx_nic *efx)
 {
 #ifdef CONFIG_RFS_ACCEL
-	kfree(efx->rps_flow_id);
+	struct efx_channel *channel;
+
+	efx_for_each_channel(channel, efx)
+		kfree(channel->rps_flow_id);
 #endif
 	down_write(&efx->filter_sem);
 	efx->type->filter_table_remove(efx);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 133e9e3..4c83739 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -104,7 +104,8 @@
 			     const struct efx_farch_register_test *regs,
 			     size_t n_regs)
 {
-	unsigned address = 0, i, j;
+	unsigned address = 0;
+	int i, j;
 	efx_oword_t mask, imask, original, reg, buf;
 
 	for (i = 0; i < n_regs; ++i) {
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 7f295c4..2a9228a 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -189,11 +189,12 @@
 
 	case MC_CMD_MEDIA_XFP:
 	case MC_CMD_MEDIA_SFP_PLUS:
-		result |= SUPPORTED_FIBRE;
-		break;
-
 	case MC_CMD_MEDIA_QSFP_PLUS:
 		result |= SUPPORTED_FIBRE;
+		if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+			result |= SUPPORTED_1000baseT_Full;
+		if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+			result |= SUPPORTED_10000baseT_Full;
 		if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
 			result |= SUPPORTED_40000baseCR4_Full;
 		break;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 38c4223..d13ddf9 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -403,6 +403,8 @@
  * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
  * @irq_count: Number of IRQs since last adaptive moderation decision
  * @irq_mod_score: IRQ moderation score
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ *      indexed by filter ID
  * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
  * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
  * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
@@ -446,6 +448,8 @@
 	unsigned int irq_mod_score;
 #ifdef CONFIG_RFS_ACCEL
 	unsigned int rfs_filters_added;
+#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
+	u32 *rps_flow_id;
 #endif
 
 	unsigned n_rx_tobe_disc;
@@ -889,9 +893,9 @@
  * @filter_sem: Filter table rw_semaphore, for freeing the table
  * @filter_lock: Filter table lock, for mere content changes
  * @filter_state: Architecture-dependent filter table state
- * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
- *	indexed by filter ID
- * @rps_expire_index: Next index to check for expiry in @rps_flow_id
+ * @rps_expire_channel: Next channel to check for expiry
+ * @rps_expire_index: Next index to check for expiry in
+ *	@rps_expire_channel's @rps_flow_id
  * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
  * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
  *	Decremented when the efx_flush_rx_queue() is called.
@@ -1035,7 +1039,7 @@
 	spinlock_t filter_lock;
 	void *filter_state;
 #ifdef CONFIG_RFS_ACCEL
-	u32 *rps_flow_id;
+	unsigned int rps_expire_channel;
 	unsigned int rps_expire_index;
 #endif
 
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8956995..02b0b527 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -842,33 +842,18 @@
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_channel *channel;
 	struct efx_filter_spec spec;
-	const __be16 *ports;
-	__be16 ether_type;
-	int nhoff;
+	struct flow_keys fk;
 	int rc;
 
-	/* The core RPS/RFS code has already parsed and validated
-	 * VLAN, IP and transport headers.  We assume they are in the
-	 * header area.
-	 */
+	if (flow_id == RPS_FLOW_ID_INVALID)
+		return -EINVAL;
 
-	if (skb->protocol == htons(ETH_P_8021Q)) {
-		const struct vlan_hdr *vh =
-			(const struct vlan_hdr *)skb->data;
+	if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+		return -EPROTONOSUPPORT;
 
-		/* We can't filter on the IP 5-tuple and the vlan
-		 * together, so just strip the vlan header and filter
-		 * on the IP part.
-		 */
-		EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
-		ether_type = vh->h_vlan_encapsulated_proto;
-		nhoff = sizeof(struct vlan_hdr);
-	} else {
-		ether_type = skb->protocol;
-		nhoff = 0;
-	}
-
-	if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
+	if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
+		return -EPROTONOSUPPORT;
+	if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
 		return -EPROTONOSUPPORT;
 
 	efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
@@ -878,56 +863,41 @@
 		EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
 		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
 		EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
-	spec.ether_type = ether_type;
+	spec.ether_type = fk.basic.n_proto;
+	spec.ip_proto = fk.basic.ip_proto;
 
-	if (ether_type == htons(ETH_P_IP)) {
-		const struct iphdr *ip =
-			(const struct iphdr *)(skb->data + nhoff);
-
-		EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
-		if (ip_is_fragment(ip))
-			return -EPROTONOSUPPORT;
-		spec.ip_proto = ip->protocol;
-		spec.rem_host[0] = ip->saddr;
-		spec.loc_host[0] = ip->daddr;
-		EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
-		ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+	if (fk.basic.n_proto == htons(ETH_P_IP)) {
+		spec.rem_host[0] = fk.addrs.v4addrs.src;
+		spec.loc_host[0] = fk.addrs.v4addrs.dst;
 	} else {
-		const struct ipv6hdr *ip6 =
-			(const struct ipv6hdr *)(skb->data + nhoff);
-
-		EFX_BUG_ON_PARANOID(skb_headlen(skb) <
-				    nhoff + sizeof(*ip6) + 4);
-		spec.ip_proto = ip6->nexthdr;
-		memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
-		memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
-		ports = (const __be16 *)(ip6 + 1);
+		memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
+		memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
 	}
 
-	spec.rem_port = ports[0];
-	spec.loc_port = ports[1];
+	spec.rem_port = fk.ports.src;
+	spec.loc_port = fk.ports.dst;
 
 	rc = efx->type->filter_rfs_insert(efx, &spec);
 	if (rc < 0)
 		return rc;
 
 	/* Remember this so we can check whether to expire the filter later */
-	efx->rps_flow_id[rc] = flow_id;
-	channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+	channel = efx_get_channel(efx, rxq_index);
+	channel->rps_flow_id[rc] = flow_id;
 	++channel->rfs_filters_added;
 
-	if (ether_type == htons(ETH_P_IP))
+	if (spec.ether_type == htons(ETH_P_IP))
 		netif_info(efx, rx_status, efx->net_dev,
 			   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
 			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
-			   spec.rem_host, ntohs(ports[0]), spec.loc_host,
-			   ntohs(ports[1]), rxq_index, flow_id, rc);
+			   spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+			   ntohs(spec.loc_port), rxq_index, flow_id, rc);
 	else
 		netif_info(efx, rx_status, efx->net_dev,
 			   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
 			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
-			   spec.rem_host, ntohs(ports[0]), spec.loc_host,
-			   ntohs(ports[1]), rxq_index, flow_id, rc);
+			   spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+			   ntohs(spec.loc_port), rxq_index, flow_id, rc);
 
 	return rc;
 }
@@ -935,24 +905,34 @@
 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
 {
 	bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
-	unsigned int index, size;
+	unsigned int channel_idx, index, size;
 	u32 flow_id;
 
 	if (!spin_trylock_bh(&efx->filter_lock))
 		return false;
 
 	expire_one = efx->type->filter_rfs_expire_one;
+	channel_idx = efx->rps_expire_channel;
 	index = efx->rps_expire_index;
 	size = efx->type->max_rx_ip_filters;
 	while (quota--) {
-		flow_id = efx->rps_flow_id[index];
-		if (expire_one(efx, flow_id, index))
+		struct efx_channel *channel = efx_get_channel(efx, channel_idx);
+		flow_id = channel->rps_flow_id[index];
+
+		if (flow_id != RPS_FLOW_ID_INVALID &&
+		    expire_one(efx, flow_id, index)) {
 			netif_info(efx, rx_status, efx->net_dev,
-				   "expired filter %d [flow %u]\n",
-				   index, flow_id);
-		if (++index == size)
+				   "expired filter %d [queue %u flow %u]\n",
+				   index, channel_idx, flow_id);
+			channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+		}
+		if (++index == size) {
+			if (++channel_idx == efx->n_channels)
+				channel_idx = 0;
 			index = 0;
+		}
 	}
+	efx->rps_expire_channel = channel_idx;
 	efx->rps_expire_index = index;
 
 	spin_unlock_bh(&efx->filter_lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 4f7283d..44da877 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -156,7 +156,7 @@
 		struct netdev_hw_addr *ha;
 
 		netdev_for_each_uc_addr(ha, dev) {
-			dwmac4_set_umac_addr(ioaddr, ha->addr, reg);
+			dwmac4_set_umac_addr(hw, ha->addr, reg);
 			reg++;
 		}
 	}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index eac45d0..a473c18 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3450,8 +3450,6 @@
 	if (!netif_running(ndev))
 		return 0;
 
-	spin_lock_irqsave(&priv->lock, flags);
-
 	/* Power Down bit, into the PM register, is cleared
 	 * automatically as soon as a magic packet or a Wake-up frame
 	 * is received. Anyway, it's better to manually clear
@@ -3459,7 +3457,9 @@
 	 * from another devices (e.g. serial console).
 	 */
 	if (device_may_wakeup(priv->device)) {
+		spin_lock_irqsave(&priv->lock, flags);
 		priv->hw->mac->pmt(priv->hw, 0);
+		spin_unlock_irqrestore(&priv->lock, flags);
 		priv->irq_wake = 0;
 	} else {
 		pinctrl_pm_select_default_state(priv->device);
@@ -3473,6 +3473,8 @@
 
 	netif_device_attach(ndev);
 
+	spin_lock_irqsave(&priv->lock, flags);
+
 	priv->cur_rx = 0;
 	priv->dirty_rx = 0;
 	priv->dirty_tx = 0;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 4b08a2f..5319089 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1339,7 +1339,7 @@
 	if (priv->coal_intvl != 0) {
 		struct ethtool_coalesce coal;
 
-		coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+		coal.rx_coalesce_usecs = priv->coal_intvl;
 		cpsw_set_coalesce(ndev, &coal);
 	}
 
@@ -2505,8 +2505,6 @@
 clean_ale_ret:
 	cpsw_ale_destroy(priv->ale);
 clean_dma_ret:
-	cpdma_chan_destroy(priv->txch);
-	cpdma_chan_destroy(priv->rxch);
 	cpdma_ctlr_destroy(priv->dma);
 clean_runtime_disable_ret:
 	pm_runtime_disable(&pdev->dev);
@@ -2534,8 +2532,6 @@
 	unregister_netdev(ndev);
 
 	cpsw_ale_destroy(priv->ale);
-	cpdma_chan_destroy(priv->txch);
-	cpdma_chan_destroy(priv->rxch);
 	cpdma_ctlr_destroy(priv->dma);
 	pm_runtime_disable(&pdev->dev);
 	device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device);
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 0a15acc..11213a3 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -462,7 +462,7 @@
 	if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
 		struct mpipe_data *md = &mpipe_data[instance];
 		struct skb_shared_hwtstamps shhwtstamps;
-		struct timespec ts;
+		struct timespec64 ts;
 
 		shtx->tx_flags |= SKBTX_IN_PROGRESS;
 		gxio_mpipe_get_timestamp(&md->context, &ts);
@@ -886,9 +886,9 @@
 /* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
 static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
 {
-	struct timespec ts;
+	struct timespec64 ts;
 
-	getnstimeofday(&ts);
+	ktime_get_ts64(&ts);
 	gxio_mpipe_set_timestamp(&md->context, &ts);
 
 	mutex_init(&md->ptp_lock);
diff --git a/drivers/net/fddi/skfp/Makefile b/drivers/net/fddi/skfp/Makefile
index b0be023..a957a1c 100644
--- a/drivers/net/fddi/skfp/Makefile
+++ b/drivers/net/fddi/skfp/Makefile
@@ -17,4 +17,4 @@
 #   projects. To keep the source common for all those drivers (and
 #   thus simplify fixes to it), please do not clean it up!
 
-ccflags-y := -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
+ccflags-y := -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index cadefe4..cc39cef 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -958,8 +958,8 @@
 		dev->stats.collisions++;
 	else if (err == -ENETUNREACH)
 		dev->stats.tx_carrier_errors++;
-	else
-		dev->stats.tx_errors++;
+
+	dev->stats.tx_errors++;
 	return NETDEV_TX_OK;
 }
 
@@ -1048,8 +1048,8 @@
 		dev->stats.collisions++;
 	else if (err == -ENETUNREACH)
 		dev->stats.tx_carrier_errors++;
-	else
-		dev->stats.tx_errors++;
+
+	dev->stats.tx_errors++;
 	return NETDEV_TX_OK;
 }
 #endif
@@ -1508,6 +1508,7 @@
 {
 	struct nlattr *tb[IFLA_MAX + 1];
 	struct net_device *dev;
+	LIST_HEAD(list_kill);
 	int err;
 
 	memset(tb, 0, sizeof(tb));
@@ -1519,8 +1520,10 @@
 	err = geneve_configure(net, dev, &geneve_remote_unspec,
 			       0, 0, 0, 0, htons(dst_port), true,
 			       GENEVE_F_UDP_ZERO_CSUM6_RX);
-	if (err)
-		goto err;
+	if (err) {
+		free_netdev(dev);
+		return ERR_PTR(err);
+	}
 
 	/* openvswitch users expect packet sizes to be unrestricted,
 	 * so set the largest MTU we can.
@@ -1529,10 +1532,15 @@
 	if (err)
 		goto err;
 
+	err = rtnl_configure_link(dev, NULL);
+	if (err < 0)
+		goto err;
+
 	return dev;
 
  err:
-	free_netdev(dev);
+	geneve_dellink(dev, &list_kill);
+	unregister_netdevice_many(&list_kill);
 	return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 47ee2c8..0e7eff7 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -605,12 +605,41 @@
 	dev_put(dev);
 }
 
+static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
+					     unsigned char **iv,
+					     struct scatterlist **sg)
+{
+	size_t size, iv_offset, sg_offset;
+	struct aead_request *req;
+	void *tmp;
+
+	size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
+	iv_offset = size;
+	size += GCM_AES_IV_LEN;
+
+	size = ALIGN(size, __alignof__(struct scatterlist));
+	sg_offset = size;
+	size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
+
+	tmp = kmalloc(size, GFP_ATOMIC);
+	if (!tmp)
+		return NULL;
+
+	*iv = (unsigned char *)(tmp + iv_offset);
+	*sg = (struct scatterlist *)(tmp + sg_offset);
+	req = tmp;
+
+	aead_request_set_tfm(req, tfm);
+
+	return req;
+}
+
 static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
 				      struct net_device *dev)
 {
 	int ret;
-	struct scatterlist sg[MAX_SKB_FRAGS + 1];
-	unsigned char iv[GCM_AES_IV_LEN];
+	struct scatterlist *sg;
+	unsigned char *iv;
 	struct ethhdr *eth;
 	struct macsec_eth_header *hh;
 	size_t unprotected_len;
@@ -668,8 +697,6 @@
 	macsec_fill_sectag(hh, secy, pn);
 	macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
 
-	macsec_fill_iv(iv, secy->sci, pn);
-
 	skb_put(skb, secy->icv_len);
 
 	if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
@@ -684,13 +711,15 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	req = aead_request_alloc(tx_sa->key.tfm, GFP_ATOMIC);
+	req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
 	if (!req) {
 		macsec_txsa_put(tx_sa);
 		kfree_skb(skb);
 		return ERR_PTR(-ENOMEM);
 	}
 
+	macsec_fill_iv(iv, secy->sci, pn);
+
 	sg_init_table(sg, MAX_SKB_FRAGS + 1);
 	skb_to_sgvec(skb, sg, 0, skb->len);
 
@@ -861,7 +890,6 @@
 out:
 	macsec_rxsa_put(rx_sa);
 	dev_put(dev);
-	return;
 }
 
 static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
@@ -871,8 +899,8 @@
 				      struct macsec_secy *secy)
 {
 	int ret;
-	struct scatterlist sg[MAX_SKB_FRAGS + 1];
-	unsigned char iv[GCM_AES_IV_LEN];
+	struct scatterlist *sg;
+	unsigned char *iv;
 	struct aead_request *req;
 	struct macsec_eth_header *hdr;
 	u16 icv_len = secy->icv_len;
@@ -882,7 +910,7 @@
 	if (!skb)
 		return ERR_PTR(-ENOMEM);
 
-	req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
+	req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg);
 	if (!req) {
 		kfree_skb(skb);
 		return ERR_PTR(-ENOMEM);
@@ -1234,7 +1262,7 @@
 	struct crypto_aead *tfm;
 	int ret;
 
-	tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+	tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
 	if (!tfm || IS_ERR(tfm))
 		return NULL;
 
@@ -3361,6 +3389,7 @@
 	genl_unregister_family(&macsec_fam);
 	rtnl_link_unregister(&macsec_link_ops);
 	unregister_netdevice_notifier(&macsec_notifier);
+	rcu_barrier();
 }
 
 module_init(macsec_init);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 280e879..79ccc11 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -407,15 +407,7 @@
 	if (err < 0)
 		return err;
 
-	oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
-
-	phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
-	phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
-	phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
-
-	err = genphy_config_aneg(phydev);
-
-	return err;
+	return genphy_config_aneg(phydev);
 }
 
 static int m88e1318_config_aneg(struct phy_device *phydev)
@@ -636,6 +628,28 @@
 	return phy_write(phydev, MII_BMCR, BMCR_RESET);
 }
 
+static int m88e1121_config_init(struct phy_device *phydev)
+{
+	int err, oldpage;
+
+	oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+
+	err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
+	if (err < 0)
+		return err;
+
+	/* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
+	err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
+			MII_88E1121_PHY_LED_DEF);
+	if (err < 0)
+		return err;
+
+	phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+
+	/* Set marvell,reg-init configuration from device tree */
+	return marvell_config_init(phydev);
+}
+
 static int m88e1510_config_init(struct phy_device *phydev)
 {
 	int err;
@@ -668,7 +682,7 @@
 			return err;
 	}
 
-	return marvell_config_init(phydev);
+	return m88e1121_config_init(phydev);
 }
 
 static int m88e1118_config_aneg(struct phy_device *phydev)
@@ -1196,7 +1210,7 @@
 		.features = PHY_GBIT_FEATURES,
 		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
-		.config_init = &marvell_config_init,
+		.config_init = &m88e1121_config_init,
 		.config_aneg = &m88e1121_config_aneg,
 		.read_status = &marvell_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
@@ -1215,7 +1229,7 @@
 		.features = PHY_GBIT_FEATURES,
 		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
-		.config_init = &marvell_config_init,
+		.config_init = &m88e1121_config_init,
 		.config_aneg = &m88e1318_config_aneg,
 		.read_status = &marvell_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 2e21e93..b62c4aa 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -75,22 +75,13 @@
 	 * in all capable mode before using it.
 	 */
 	if ((rc & MII_LAN83C185_MODE_MASK) == MII_LAN83C185_MODE_POWERDOWN) {
-		int timeout = 50000;
-
-		/* set "all capable" mode and reset the phy */
+		/* set "all capable" mode */
 		rc |= MII_LAN83C185_MODE_ALL;
 		phy_write(phydev, MII_LAN83C185_SPECIAL_MODES, rc);
-		phy_write(phydev, MII_BMCR, BMCR_RESET);
-
-		/* wait end of reset (max 500 ms) */
-		do {
-			udelay(10);
-			if (timeout-- == 0)
-				return -1;
-			rc = phy_read(phydev, MII_BMCR);
-		} while (rc & BMCR_RESET);
 	}
-	return 0;
+
+	/* reset the phy */
+	return genphy_soft_reset(phydev);
 }
 
 static int lan911x_config_init(struct phy_device *phydev)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 2ace126..fdee772 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1203,8 +1203,10 @@
 		goto err_dev_open;
 	}
 
+	netif_addr_lock_bh(dev);
 	dev_uc_sync_multiple(port_dev, dev);
 	dev_mc_sync_multiple(port_dev, dev);
+	netif_addr_unlock_bh(dev);
 
 	err = vlan_vids_add_by_dev(port_dev, dev);
 	if (err) {
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 3f9f6ed..4e257b8 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -31,7 +31,7 @@
 #define NETNEXT_VERSION		"08"
 
 /* Information for net */
-#define NET_VERSION		"3"
+#define NET_VERSION		"4"
 
 #define DRIVER_VERSION		"v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -116,6 +116,7 @@
 #define USB_TX_DMA		0xd434
 #define USB_TOLERANCE		0xd490
 #define USB_LPM_CTRL		0xd41a
+#define USB_BMU_RESET		0xd4b0
 #define USB_UPS_CTRL		0xd800
 #define USB_MISC_0		0xd81a
 #define USB_POWER_CUT		0xd80a
@@ -338,6 +339,10 @@
 #define TEST_MODE_DISABLE	0x00000001
 #define TX_SIZE_ADJUST1		0x00000100
 
+/* USB_BMU_RESET */
+#define BMU_RESET_EP_IN		0x01
+#define BMU_RESET_EP_OUT	0x02
+
 /* USB_UPS_CTRL */
 #define POWER_CUT		0x0100
 
@@ -2169,7 +2174,7 @@
 static void r8153_set_rx_early_size(struct r8152 *tp)
 {
 	u32 mtu = tp->netdev->mtu;
-	u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4;
+	u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
 
 	ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
 }
@@ -2456,6 +2461,17 @@
 	ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
 }
 
+static void rtl_reset_bmu(struct r8152 *tp)
+{
+	u32 ocp_data;
+
+	ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_RESET);
+	ocp_data &= ~(BMU_RESET_EP_IN | BMU_RESET_EP_OUT);
+	ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+	ocp_data |= BMU_RESET_EP_IN | BMU_RESET_EP_OUT;
+	ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+}
+
 static void r8152_aldps_en(struct r8152 *tp, bool enable)
 {
 	if (enable) {
@@ -2681,6 +2697,7 @@
 	r8153_hw_phy_cfg(tp);
 
 	rtl8152_nic_reset(tp);
+	rtl_reset_bmu(tp);
 
 	ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
 	ocp_data &= ~NOW_IS_OOB;
@@ -2742,6 +2759,7 @@
 	ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
 
 	rtl_disable(tp);
+	rtl_reset_bmu(tp);
 
 	for (i = 0; i < 1000; i++) {
 		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -2803,6 +2821,7 @@
 {
 	r8153_aldps_en(tp, false);
 	rtl_disable(tp);
+	rtl_reset_bmu(tp);
 	r8153_aldps_en(tp, true);
 	usb_enable_lpm(tp->udev);
 }
@@ -3382,15 +3401,11 @@
 	r8153_power_cut_en(tp, false);
 	r8153_u1u2en(tp, true);
 
-	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
-	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
-	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
-		       PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
-		       U1U2_SPDWN_EN | L1_SPDWN_EN);
-	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
-		       PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
-		       TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN |
-		       EEE_SPDWN_EN);
+	/* MAC clock speed down */
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
 
 	r8153_enable_eee(tp);
 	r8153_aldps_en(tp, true);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index d9d2806..dc989a8 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -61,6 +61,8 @@
 #define SUSPEND_ALLMODES		(SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
 					 SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
 
+#define CARRIER_CHECK_DELAY (2 * HZ)
+
 struct smsc95xx_priv {
 	u32 mac_cr;
 	u32 hash_hi;
@@ -69,6 +71,9 @@
 	spinlock_t mac_cr_lock;
 	u8 features;
 	u8 suspend_flags;
+	bool link_ok;
+	struct delayed_work carrier_check;
+	struct usbnet *dev;
 };
 
 static bool turbo_mode = true;
@@ -624,6 +629,44 @@
 			    intdata);
 }
 
+static void set_carrier(struct usbnet *dev, bool link)
+{
+	struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+	if (pdata->link_ok == link)
+		return;
+
+	pdata->link_ok = link;
+
+	if (link)
+		usbnet_link_change(dev, 1, 0);
+	else
+		usbnet_link_change(dev, 0, 0);
+}
+
+static void check_carrier(struct work_struct *work)
+{
+	struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv,
+						carrier_check.work);
+	struct usbnet *dev = pdata->dev;
+	int ret;
+
+	if (pdata->suspend_flags != 0)
+		return;
+
+	ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR);
+	if (ret < 0) {
+		netdev_warn(dev->net, "Failed to read MII_BMSR\n");
+		return;
+	}
+	if (ret & BMSR_LSTATUS)
+		set_carrier(dev, 1);
+	else
+		set_carrier(dev, 0);
+
+	schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+}
+
 /* Enable or disable Tx & Rx checksum offload engines */
 static int smsc95xx_set_features(struct net_device *netdev,
 	netdev_features_t features)
@@ -1165,13 +1208,20 @@
 	dev->net->flags |= IFF_MULTICAST;
 	dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+	pdata->dev = dev;
+	INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier);
+	schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+
 	return 0;
 }
 
 static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
 {
 	struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
 	if (pdata) {
+		cancel_delayed_work(&pdata->carrier_check);
 		netif_dbg(dev, ifdown, dev->net, "free pdata\n");
 		kfree(pdata);
 		pdata = NULL;
@@ -1695,6 +1745,7 @@
 
 	/* do this first to ensure it's cleared even in error case */
 	pdata->suspend_flags = 0;
+	schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
 
 	if (suspend_flags & SUSPEND_ALLMODES) {
 		/* clear wake-up sources */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 49d84e5..e0638e5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1925,24 +1925,11 @@
 
 	virtio_device_ready(vdev);
 
-	/* Last of all, set up some receive buffers. */
-	for (i = 0; i < vi->curr_queue_pairs; i++) {
-		try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
-
-		/* If we didn't even get one input buffer, we're useless. */
-		if (vi->rq[i].vq->num_free ==
-		    virtqueue_get_vring_size(vi->rq[i].vq)) {
-			free_unused_bufs(vi);
-			err = -ENOMEM;
-			goto free_recv_bufs;
-		}
-	}
-
 	vi->nb.notifier_call = &virtnet_cpu_callback;
 	err = register_hotcpu_notifier(&vi->nb);
 	if (err) {
 		pr_debug("virtio_net: registering cpu notifier failed\n");
-		goto free_recv_bufs;
+		goto free_unregister_netdev;
 	}
 
 	/* Assume link up if device can't report link status,
@@ -1960,10 +1947,9 @@
 
 	return 0;
 
-free_recv_bufs:
+free_unregister_netdev:
 	vi->vdev->config->reset(vdev);
 
-	free_receive_bufs(vi);
 	unregister_netdev(dev);
 free_vqs:
 	cancel_delayed_work_sync(&vi->refill);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index db8022a..08885bc 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1369,7 +1369,7 @@
 				rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
 
 				segCnt = rcdlro->segCnt;
-				BUG_ON(segCnt <= 1);
+				WARN_ON_ONCE(segCnt == 0);
 				mss = rcdlro->mss;
 				if (unlikely(segCnt <= 1))
 					segCnt = 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index c482539..3d2b64e 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.7.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.8.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01040700
+#define VMXNET3_DRIVER_VERSION_NUM      0x01040800
 
 #if defined(CONFIG_PCI_MSI)
 	/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index dff0884..8bd8c7e 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -304,7 +304,7 @@
 	dst_hold(&rt6->dst);
 
 	rt6->rt6i_table = rt6i_table;
-	rt6->dst.output	= vrf_output6;
+	rt6->dst.output = vrf_output6;
 	rcu_assign_pointer(vrf->rt6, rt6);
 
 	rc = 0;
@@ -403,7 +403,7 @@
 	if (!rth)
 		return -ENOMEM;
 
-	rth->dst.output	= vrf_output;
+	rth->dst.output = vrf_output;
 	rth->rt_table_id = vrf->tb_id;
 
 	rcu_assign_pointer(vrf->rth, rth);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 8ff30c3..b3b9db6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2952,30 +2952,6 @@
 	return 0;
 }
 
-struct net_device *vxlan_dev_create(struct net *net, const char *name,
-				    u8 name_assign_type, struct vxlan_config *conf)
-{
-	struct nlattr *tb[IFLA_MAX+1];
-	struct net_device *dev;
-	int err;
-
-	memset(&tb, 0, sizeof(tb));
-
-	dev = rtnl_create_link(net, name, name_assign_type,
-			       &vxlan_link_ops, tb);
-	if (IS_ERR(dev))
-		return dev;
-
-	err = vxlan_dev_configure(net, dev, conf);
-	if (err < 0) {
-		free_netdev(dev);
-		return ERR_PTR(err);
-	}
-
-	return dev;
-}
-EXPORT_SYMBOL_GPL(vxlan_dev_create);
-
 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
 			 struct nlattr *tb[], struct nlattr *data[])
 {
@@ -3086,6 +3062,9 @@
 	if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
 		conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
 
+	if (tb[IFLA_MTU])
+		conf.mtu = nla_get_u32(tb[IFLA_MTU]);
+
 	err = vxlan_dev_configure(src_net, dev, &conf);
 	switch (err) {
 	case -ENODEV:
@@ -3265,6 +3244,40 @@
 	.get_link_net	= vxlan_get_link_net,
 };
 
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+				    u8 name_assign_type,
+				    struct vxlan_config *conf)
+{
+	struct nlattr *tb[IFLA_MAX + 1];
+	struct net_device *dev;
+	int err;
+
+	memset(&tb, 0, sizeof(tb));
+
+	dev = rtnl_create_link(net, name, name_assign_type,
+			       &vxlan_link_ops, tb);
+	if (IS_ERR(dev))
+		return dev;
+
+	err = vxlan_dev_configure(net, dev, conf);
+	if (err < 0) {
+		free_netdev(dev);
+		return ERR_PTR(err);
+	}
+
+	err = rtnl_configure_link(dev, NULL);
+	if (err < 0) {
+		LIST_HEAD(list_kill);
+
+		vxlan_dellink(dev, &list_kill);
+		unregister_netdevice_many(&list_kill);
+		return ERR_PTR(err);
+	}
+
+	return dev;
+}
+EXPORT_SYMBOL_GPL(vxlan_dev_create);
+
 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
 					     struct net_device *dev)
 {
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9ed0ed1..4dd5adc 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2776,6 +2776,7 @@
 	if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
 	    !info->attrs[HWSIM_ATTR_FLAGS] ||
 	    !info->attrs[HWSIM_ATTR_COOKIE] ||
+	    !info->attrs[HWSIM_ATTR_SIGNAL] ||
 	    !info->attrs[HWSIM_ATTR_TX_INFO])
 		goto out;
 
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 020ac1a..cea9443 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -382,7 +382,7 @@
 
 	ret = of_property_read_u32(dt_node, "ref-clock-frequency",
 				   &pdev_data->ref_clock_freq);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret) {
 		dev_err(glue->dev,
 			"can't get reference clock frequency (%d)\n", ret);
 		return ret;
@@ -425,7 +425,7 @@
 	}
 
 	ret = wlcore_probe_of(spi, glue, &pdev_data);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret) {
 		dev_err(glue->dev,
 			"can't get device tree parameters (%d)\n", ret);
 		return ret;
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 53c1162..7c8a3bf 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -88,4 +88,17 @@
 
 	  Select Y if unsure
 
+config NVDIMM_DAX
+	bool "NVDIMM DAX: Raw access to persistent memory"
+	default LIBNVDIMM
+	depends on NVDIMM_PFN
+	help
+	  Support raw device dax access to a persistent memory
+	  namespace.  For environments that want to hard partition
+	  peristent memory, this capability provides a mechanism to
+	  sub-divide a namespace into character devices that can only be
+	  accessed via DAX (mmap(2)).
+
+	  Select Y if unsure
+
 endif
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index ea84d3c..909554c 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -23,3 +23,4 @@
 libnvdimm-$(CONFIG_ND_CLAIM) += claim.o
 libnvdimm-$(CONFIG_BTT) += btt_devs.o
 libnvdimm-$(CONFIG_NVDIMM_PFN) += pfn_devs.o
+libnvdimm-$(CONFIG_NVDIMM_DAX) += dax_devs.o
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index e9ff922..495e06d9 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -21,19 +21,19 @@
 #include <linux/sizes.h>
 #include "nd.h"
 
-struct nd_blk_device {
-	struct request_queue *queue;
-	struct gendisk *disk;
-	struct nd_namespace_blk *nsblk;
-	struct nd_blk_region *ndbr;
-	size_t disk_size;
-	u32 sector_size;
-	u32 internal_lbasize;
-};
-
-static u32 nd_blk_meta_size(struct nd_blk_device *blk_dev)
+static u32 nsblk_meta_size(struct nd_namespace_blk *nsblk)
 {
-	return blk_dev->nsblk->lbasize - blk_dev->sector_size;
+	return nsblk->lbasize - ((nsblk->lbasize >= 4096) ? 4096 : 512);
+}
+
+static u32 nsblk_internal_lbasize(struct nd_namespace_blk *nsblk)
+{
+	return roundup(nsblk->lbasize, INT_LBASIZE_ALIGNMENT);
+}
+
+static u32 nsblk_sector_size(struct nd_namespace_blk *nsblk)
+{
+	return nsblk->lbasize - nsblk_meta_size(nsblk);
 }
 
 static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
@@ -57,20 +57,29 @@
 	return SIZE_MAX;
 }
 
-#ifdef CONFIG_BLK_DEV_INTEGRITY
-static int nd_blk_rw_integrity(struct nd_blk_device *blk_dev,
-				struct bio_integrity_payload *bip, u64 lba,
-				int rw)
+static struct nd_blk_region *to_ndbr(struct nd_namespace_blk *nsblk)
 {
-	unsigned int len = nd_blk_meta_size(blk_dev);
+	struct nd_region *nd_region;
+	struct device *parent;
+
+	parent = nsblk->common.dev.parent;
+	nd_region = container_of(parent, struct nd_region, dev);
+	return container_of(nd_region, struct nd_blk_region, nd_region);
+}
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
+		struct bio_integrity_payload *bip, u64 lba, int rw)
+{
+	struct nd_blk_region *ndbr = to_ndbr(nsblk);
+	unsigned int len = nsblk_meta_size(nsblk);
 	resource_size_t	dev_offset, ns_offset;
-	struct nd_namespace_blk *nsblk;
-	struct nd_blk_region *ndbr;
+	u32 internal_lbasize, sector_size;
 	int err = 0;
 
-	nsblk = blk_dev->nsblk;
-	ndbr = blk_dev->ndbr;
-	ns_offset = lba * blk_dev->internal_lbasize + blk_dev->sector_size;
+	internal_lbasize = nsblk_internal_lbasize(nsblk);
+	sector_size = nsblk_sector_size(nsblk);
+	ns_offset = lba * internal_lbasize + sector_size;
 	dev_offset = to_dev_offset(nsblk, ns_offset, len);
 	if (dev_offset == SIZE_MAX)
 		return -EIO;
@@ -104,25 +113,26 @@
 }
 
 #else /* CONFIG_BLK_DEV_INTEGRITY */
-static int nd_blk_rw_integrity(struct nd_blk_device *blk_dev,
-				struct bio_integrity_payload *bip, u64 lba,
-				int rw)
+static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
+		struct bio_integrity_payload *bip, u64 lba, int rw)
 {
 	return 0;
 }
 #endif
 
-static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
-			struct bio_integrity_payload *bip, struct page *page,
-			unsigned int len, unsigned int off, int rw,
-			sector_t sector)
+static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
+		struct bio_integrity_payload *bip, struct page *page,
+		unsigned int len, unsigned int off, int rw, sector_t sector)
 {
-	struct nd_blk_region *ndbr = blk_dev->ndbr;
+	struct nd_blk_region *ndbr = to_ndbr(nsblk);
 	resource_size_t	dev_offset, ns_offset;
+	u32 internal_lbasize, sector_size;
 	int err = 0;
 	void *iobuf;
 	u64 lba;
 
+	internal_lbasize = nsblk_internal_lbasize(nsblk);
+	sector_size = nsblk_sector_size(nsblk);
 	while (len) {
 		unsigned int cur_len;
 
@@ -132,11 +142,11 @@
 		 * Block Window setup/move steps. the do_io routine is capable
 		 * of handling len <= PAGE_SIZE.
 		 */
-		cur_len = bip ? min(len, blk_dev->sector_size) : len;
+		cur_len = bip ? min(len, sector_size) : len;
 
-		lba = div_u64(sector << SECTOR_SHIFT, blk_dev->sector_size);
-		ns_offset = lba * blk_dev->internal_lbasize;
-		dev_offset = to_dev_offset(blk_dev->nsblk, ns_offset, cur_len);
+		lba = div_u64(sector << SECTOR_SHIFT, sector_size);
+		ns_offset = lba * internal_lbasize;
+		dev_offset = to_dev_offset(nsblk, ns_offset, cur_len);
 		if (dev_offset == SIZE_MAX)
 			return -EIO;
 
@@ -147,13 +157,13 @@
 			return err;
 
 		if (bip) {
-			err = nd_blk_rw_integrity(blk_dev, bip, lba, rw);
+			err = nd_blk_rw_integrity(nsblk, bip, lba, rw);
 			if (err)
 				return err;
 		}
 		len -= cur_len;
 		off += cur_len;
-		sector += blk_dev->sector_size >> SECTOR_SHIFT;
+		sector += sector_size >> SECTOR_SHIFT;
 	}
 
 	return err;
@@ -161,10 +171,8 @@
 
 static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
 {
-	struct block_device *bdev = bio->bi_bdev;
-	struct gendisk *disk = bdev->bd_disk;
 	struct bio_integrity_payload *bip;
-	struct nd_blk_device *blk_dev;
+	struct nd_namespace_blk *nsblk;
 	struct bvec_iter iter;
 	unsigned long start;
 	struct bio_vec bvec;
@@ -183,17 +191,17 @@
 	}
 
 	bip = bio_integrity(bio);
-	blk_dev = disk->private_data;
+	nsblk = q->queuedata;
 	rw = bio_data_dir(bio);
 	do_acct = nd_iostat_start(bio, &start);
 	bio_for_each_segment(bvec, bio, iter) {
 		unsigned int len = bvec.bv_len;
 
 		BUG_ON(len > PAGE_SIZE);
-		err = nd_blk_do_bvec(blk_dev, bip, bvec.bv_page, len,
-					bvec.bv_offset, rw, iter.bi_sector);
+		err = nsblk_do_bvec(nsblk, bip, bvec.bv_page, len,
+				bvec.bv_offset, rw, iter.bi_sector);
 		if (err) {
-			dev_info(&blk_dev->nsblk->common.dev,
+			dev_dbg(&nsblk->common.dev,
 					"io error in %s sector %lld, len %d,\n",
 					(rw == READ) ? "READ" : "WRITE",
 					(unsigned long long) iter.bi_sector, len);
@@ -209,17 +217,16 @@
 	return BLK_QC_T_NONE;
 }
 
-static int nd_blk_rw_bytes(struct nd_namespace_common *ndns,
+static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
 		resource_size_t offset, void *iobuf, size_t n, int rw)
 {
-	struct nd_blk_device *blk_dev = dev_get_drvdata(ndns->claim);
-	struct nd_namespace_blk *nsblk = blk_dev->nsblk;
-	struct nd_blk_region *ndbr = blk_dev->ndbr;
+	struct nd_namespace_blk *nsblk = to_nd_namespace_blk(&ndns->dev);
+	struct nd_blk_region *ndbr = to_ndbr(nsblk);
 	resource_size_t	dev_offset;
 
 	dev_offset = to_dev_offset(nsblk, offset, n);
 
-	if (unlikely(offset + n > blk_dev->disk_size)) {
+	if (unlikely(offset + n > nsblk->size)) {
 		dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
 		return -EFAULT;
 	}
@@ -235,51 +242,65 @@
 	.revalidate_disk = nvdimm_revalidate_disk,
 };
 
-static int nd_blk_attach_disk(struct nd_namespace_common *ndns,
-		struct nd_blk_device *blk_dev)
+static void nd_blk_release_queue(void *q)
 {
+	blk_cleanup_queue(q);
+}
+
+static void nd_blk_release_disk(void *disk)
+{
+	del_gendisk(disk);
+	put_disk(disk);
+}
+
+static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
+{
+	struct device *dev = &nsblk->common.dev;
 	resource_size_t available_disk_size;
+	struct request_queue *q;
 	struct gendisk *disk;
 	u64 internal_nlba;
 
-	internal_nlba = div_u64(blk_dev->disk_size, blk_dev->internal_lbasize);
-	available_disk_size = internal_nlba * blk_dev->sector_size;
+	internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk));
+	available_disk_size = internal_nlba * nsblk_sector_size(nsblk);
 
-	blk_dev->queue = blk_alloc_queue(GFP_KERNEL);
-	if (!blk_dev->queue)
+	q = blk_alloc_queue(GFP_KERNEL);
+	if (!q)
 		return -ENOMEM;
-
-	blk_queue_make_request(blk_dev->queue, nd_blk_make_request);
-	blk_queue_max_hw_sectors(blk_dev->queue, UINT_MAX);
-	blk_queue_bounce_limit(blk_dev->queue, BLK_BOUNCE_ANY);
-	blk_queue_logical_block_size(blk_dev->queue, blk_dev->sector_size);
-	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, blk_dev->queue);
-
-	disk = blk_dev->disk = alloc_disk(0);
-	if (!disk) {
-		blk_cleanup_queue(blk_dev->queue);
+	if (devm_add_action(dev, nd_blk_release_queue, q)) {
+		blk_cleanup_queue(q);
 		return -ENOMEM;
 	}
 
-	disk->driverfs_dev	= &ndns->dev;
+	blk_queue_make_request(q, nd_blk_make_request);
+	blk_queue_max_hw_sectors(q, UINT_MAX);
+	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
+	blk_queue_logical_block_size(q, nsblk_sector_size(nsblk));
+	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+	q->queuedata = nsblk;
+
+	disk = alloc_disk(0);
+	if (!disk)
+		return -ENOMEM;
+	if (devm_add_action(dev, nd_blk_release_disk, disk)) {
+		put_disk(disk);
+		return -ENOMEM;
+	}
+
+	disk->driverfs_dev	= dev;
 	disk->first_minor	= 0;
 	disk->fops		= &nd_blk_fops;
-	disk->private_data	= blk_dev;
-	disk->queue		= blk_dev->queue;
+	disk->queue		= q;
 	disk->flags		= GENHD_FL_EXT_DEVT;
-	nvdimm_namespace_disk_name(ndns, disk->disk_name);
+	nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
 	set_capacity(disk, 0);
 	add_disk(disk);
 
-	if (nd_blk_meta_size(blk_dev)) {
-		int rc = nd_integrity_init(disk, nd_blk_meta_size(blk_dev));
+	if (nsblk_meta_size(nsblk)) {
+		int rc = nd_integrity_init(disk, nsblk_meta_size(nsblk));
 
-		if (rc) {
-			del_gendisk(disk);
-			put_disk(disk);
-			blk_cleanup_queue(blk_dev->queue);
+		if (rc)
 			return rc;
-		}
 	}
 
 	set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
@@ -291,56 +312,29 @@
 {
 	struct nd_namespace_common *ndns;
 	struct nd_namespace_blk *nsblk;
-	struct nd_blk_device *blk_dev;
-	int rc;
 
 	ndns = nvdimm_namespace_common_probe(dev);
 	if (IS_ERR(ndns))
 		return PTR_ERR(ndns);
 
-	blk_dev = kzalloc(sizeof(*blk_dev), GFP_KERNEL);
-	if (!blk_dev)
-		return -ENOMEM;
-
 	nsblk = to_nd_namespace_blk(&ndns->dev);
-	blk_dev->disk_size = nvdimm_namespace_capacity(ndns);
-	blk_dev->ndbr = to_nd_blk_region(dev->parent);
-	blk_dev->nsblk = to_nd_namespace_blk(&ndns->dev);
-	blk_dev->internal_lbasize = roundup(nsblk->lbasize,
-						INT_LBASIZE_ALIGNMENT);
-	blk_dev->sector_size = ((nsblk->lbasize >= 4096) ? 4096 : 512);
-	dev_set_drvdata(dev, blk_dev);
+	nsblk->size = nvdimm_namespace_capacity(ndns);
+	dev_set_drvdata(dev, nsblk);
 
-	ndns->rw_bytes = nd_blk_rw_bytes;
+	ndns->rw_bytes = nsblk_rw_bytes;
 	if (is_nd_btt(dev))
-		rc = nvdimm_namespace_attach_btt(ndns);
-	else if (nd_btt_probe(ndns, blk_dev) == 0) {
+		return nvdimm_namespace_attach_btt(ndns);
+	else if (nd_btt_probe(dev, ndns) == 0) {
 		/* we'll come back as btt-blk */
-		rc = -ENXIO;
+		return -ENXIO;
 	} else
-		rc = nd_blk_attach_disk(ndns, blk_dev);
-	if (rc)
-		kfree(blk_dev);
-	return rc;
-}
-
-static void nd_blk_detach_disk(struct nd_blk_device *blk_dev)
-{
-	del_gendisk(blk_dev->disk);
-	put_disk(blk_dev->disk);
-	blk_cleanup_queue(blk_dev->queue);
+		return nsblk_attach_disk(nsblk);
 }
 
 static int nd_blk_remove(struct device *dev)
 {
-	struct nd_blk_device *blk_dev = dev_get_drvdata(dev);
-
 	if (is_nd_btt(dev))
-		nvdimm_namespace_detach_btt(to_nd_btt(dev)->ndns);
-	else
-		nd_blk_detach_disk(blk_dev);
-	kfree(blk_dev);
-
+		nvdimm_namespace_detach_btt(to_nd_btt(dev));
 	return 0;
 }
 
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index f068b65..68a7c3c 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1306,7 +1306,7 @@
 	struct btt *btt;
 	struct device *dev = &nd_btt->dev;
 
-	btt = kzalloc(sizeof(struct btt), GFP_KERNEL);
+	btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
 	if (!btt)
 		return NULL;
 
@@ -1321,13 +1321,13 @@
 	ret = discover_arenas(btt);
 	if (ret) {
 		dev_err(dev, "init: error in arena_discover: %d\n", ret);
-		goto out_free;
+		return NULL;
 	}
 
 	if (btt->init_state != INIT_READY && nd_region->ro) {
 		dev_info(dev, "%s is read-only, unable to init btt metadata\n",
 				dev_name(&nd_region->dev));
-		goto out_free;
+		return NULL;
 	} else if (btt->init_state != INIT_READY) {
 		btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
 			((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
@@ -1337,29 +1337,25 @@
 		ret = create_arenas(btt);
 		if (ret) {
 			dev_info(dev, "init: create_arenas: %d\n", ret);
-			goto out_free;
+			return NULL;
 		}
 
 		ret = btt_meta_init(btt);
 		if (ret) {
 			dev_err(dev, "init: error in meta_init: %d\n", ret);
-			goto out_free;
+			return NULL;
 		}
 	}
 
 	ret = btt_blk_init(btt);
 	if (ret) {
 		dev_err(dev, "init: error in blk_init: %d\n", ret);
-		goto out_free;
+		return NULL;
 	}
 
 	btt_debugfs_init(btt);
 
 	return btt;
-
- out_free:
-	kfree(btt);
-	return NULL;
 }
 
 /**
@@ -1377,7 +1373,6 @@
 		btt_blk_cleanup(btt);
 		free_arenas(btt);
 		debugfs_remove_recursive(btt->debugfs_dir);
-		kfree(btt);
 	}
 }
 
@@ -1388,11 +1383,15 @@
 	struct btt *btt;
 	size_t rawsize;
 
-	if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize)
+	if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
+		dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
 		return -ENODEV;
+	}
 
 	rawsize = nvdimm_namespace_capacity(ndns) - SZ_4K;
 	if (rawsize < ARENA_MIN_SIZE) {
+		dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
+				dev_name(&ndns->dev), ARENA_MIN_SIZE + SZ_4K);
 		return -ENXIO;
 	}
 	nd_region = to_nd_region(nd_btt->dev.parent);
@@ -1406,9 +1405,8 @@
 }
 EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
 
-int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns)
+int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
 {
-	struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
 	struct btt *btt = nd_btt->btt;
 
 	btt_fini(btt);
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index cb47751..816d0da 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -273,10 +273,10 @@
 	return 0;
 }
 
-int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
+int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
 {
 	int rc;
-	struct device *dev;
+	struct device *btt_dev;
 	struct btt_sb *btt_sb;
 	struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
 
@@ -284,21 +284,19 @@
 		return -ENODEV;
 
 	nvdimm_bus_lock(&ndns->dev);
-	dev = __nd_btt_create(nd_region, 0, NULL, ndns);
+	btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns);
 	nvdimm_bus_unlock(&ndns->dev);
-	if (!dev)
+	if (!btt_dev)
 		return -ENOMEM;
-	dev_set_drvdata(dev, drvdata);
-	btt_sb = kzalloc(sizeof(*btt_sb), GFP_KERNEL);
-	rc = __nd_btt_probe(to_nd_btt(dev), ndns, btt_sb);
-	kfree(btt_sb);
-	dev_dbg(&ndns->dev, "%s: btt: %s\n", __func__,
-			rc == 0 ? dev_name(dev) : "<none>");
+	btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL);
+	rc = __nd_btt_probe(to_nd_btt(btt_dev), ndns, btt_sb);
+	dev_dbg(dev, "%s: btt: %s\n", __func__,
+			rc == 0 ? dev_name(btt_dev) : "<none>");
 	if (rc < 0) {
-		struct nd_btt *nd_btt = to_nd_btt(dev);
+		struct nd_btt *nd_btt = to_nd_btt(btt_dev);
 
-		__nd_detach_ndns(dev, &nd_btt->ndns);
-		put_device(dev);
+		__nd_detach_ndns(btt_dev, &nd_btt->ndns);
+		put_device(btt_dev);
 	}
 
 	return rc;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 19f822d..f085f8b 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -40,6 +40,8 @@
 		return ND_DEVICE_REGION_PMEM;
 	else if (is_nd_blk(dev))
 		return ND_DEVICE_REGION_BLK;
+	else if (is_nd_dax(dev))
+		return ND_DEVICE_DAX_PMEM;
 	else if (is_nd_pmem(dev->parent) || is_nd_blk(dev->parent))
 		return nd_region_to_nstype(to_nd_region(dev->parent));
 
@@ -122,9 +124,10 @@
 	struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
 	struct module *provider = to_bus_provider(dev);
 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-	int rc;
+	int rc = 0;
 
-	rc = nd_drv->remove(dev);
+	if (nd_drv->remove)
+		rc = nd_drv->remove(dev);
 	nd_region_disable(nvdimm_bus, dev);
 
 	dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name,
@@ -246,6 +249,8 @@
 
 void __nd_device_register(struct device *dev)
 {
+	if (!dev)
+		return;
 	dev->bus = &nvdimm_bus_type;
 	get_device(dev);
 	async_schedule_domain(nd_async_device_register, dev,
@@ -292,8 +297,8 @@
 		return -EINVAL;
 	}
 
-	if (!nd_drv->probe || !nd_drv->remove) {
-		pr_debug("->probe() and ->remove() must be specified\n");
+	if (!nd_drv->probe) {
+		pr_debug("%s ->probe() must be specified\n", mod_name);
 		return -EINVAL;
 	}
 
@@ -439,6 +444,12 @@
 		.out_num = 3,
 		.out_sizes = { 4, 4, UINT_MAX, },
 	},
+	[ND_CMD_CALL] = {
+		.in_num = 2,
+		.in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, },
+		.out_num = 1,
+		.out_sizes = { UINT_MAX, },
+	},
 };
 
 const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd)
@@ -473,6 +484,12 @@
 		.out_num = 3,
 		.out_sizes = { 4, 4, 8, },
 	},
+	[ND_CMD_CALL] = {
+		.in_num = 2,
+		.in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, },
+		.out_num = 1,
+		.out_sizes = { UINT_MAX, },
+	},
 };
 
 const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd)
@@ -500,6 +517,10 @@
 		struct nd_cmd_vendor_hdr *hdr = buf;
 
 		return hdr->in_length;
+	} else if (cmd == ND_CMD_CALL) {
+		struct nd_cmd_pkg *pkg = buf;
+
+		return pkg->nd_size_in;
 	}
 
 	return UINT_MAX;
@@ -522,6 +543,12 @@
 		return out_field[1];
 	else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2)
 		return out_field[1] - 8;
+	else if (cmd == ND_CMD_CALL) {
+		struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
+
+		return pkg->nd_size_out;
+	}
+
 
 	return UINT_MAX;
 }
@@ -588,25 +615,31 @@
 	unsigned int cmd = _IOC_NR(ioctl_cmd);
 	void __user *p = (void __user *) arg;
 	struct device *dev = &nvdimm_bus->dev;
+	struct nd_cmd_pkg pkg;
 	const char *cmd_name, *dimm_name;
-	unsigned long dsm_mask;
+	unsigned long cmd_mask;
 	void *buf;
 	int rc, i;
 
 	if (nvdimm) {
 		desc = nd_cmd_dimm_desc(cmd);
 		cmd_name = nvdimm_cmd_name(cmd);
-		dsm_mask = nvdimm->dsm_mask ? *(nvdimm->dsm_mask) : 0;
+		cmd_mask = nvdimm->cmd_mask;
 		dimm_name = dev_name(&nvdimm->dev);
 	} else {
 		desc = nd_cmd_bus_desc(cmd);
 		cmd_name = nvdimm_bus_cmd_name(cmd);
-		dsm_mask = nd_desc->dsm_mask;
+		cmd_mask = nd_desc->cmd_mask;
 		dimm_name = "bus";
 	}
 
+	if (cmd == ND_CMD_CALL) {
+		if (copy_from_user(&pkg, p, sizeof(pkg)))
+			return -EFAULT;
+	}
+
 	if (!desc || (desc->out_num + desc->in_num == 0) ||
-			!test_bit(cmd, &dsm_mask))
+			!test_bit(cmd, &cmd_mask))
 		return -ENOTTY;
 
 	/* fail write commands (when read-only) */
@@ -616,6 +649,7 @@
 		case ND_CMD_SET_CONFIG_DATA:
 		case ND_CMD_ARS_START:
 		case ND_CMD_CLEAR_ERROR:
+		case ND_CMD_CALL:
 			dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
 					nvdimm ? nvdimm_cmd_name(cmd)
 					: nvdimm_bus_cmd_name(cmd));
@@ -643,6 +677,16 @@
 		in_len += in_size;
 	}
 
+	if (cmd == ND_CMD_CALL) {
+		dev_dbg(dev, "%s:%s, idx: %llu, in: %zu, out: %zu, len %zu\n",
+				__func__, dimm_name, pkg.nd_command,
+				in_len, out_len, buf_len);
+
+		for (i = 0; i < ARRAY_SIZE(pkg.nd_reserved2); i++)
+			if (pkg.nd_reserved2[i])
+				return -EINVAL;
+	}
+
 	/* process an output envelope */
 	for (i = 0; i < desc->out_num; i++) {
 		u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
@@ -783,6 +827,9 @@
 {
 	int rc;
 
+	BUILD_BUG_ON(sizeof(struct nd_smart_payload) != 128);
+	BUILD_BUG_ON(sizeof(struct nd_smart_threshold_payload) != 8);
+
 	rc = bus_register(&nvdimm_bus_type);
 	if (rc)
 		return rc;
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index e8f03b0..8b2e3c4 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -12,6 +12,7 @@
  */
 #include <linux/device.h>
 #include <linux/sizes.h>
+#include <linux/pmem.h>
 #include "nd-core.h"
 #include "pfn.h"
 #include "btt.h"
@@ -84,12 +85,33 @@
 		seed = nd_region->btt_seed;
 	else if (is_nd_pfn(dev))
 		seed = nd_region->pfn_seed;
+	else if (is_nd_dax(dev))
+		seed = nd_region->dax_seed;
 
 	if (seed == dev || ndns || dev->driver)
 		return false;
 	return true;
 }
 
+struct nd_pfn *to_nd_pfn_safe(struct device *dev)
+{
+	/*
+	 * pfn device attributes are re-used by dax device instances, so we
+	 * need to be careful to correct device-to-nd_pfn conversion.
+	 */
+	if (is_nd_pfn(dev))
+		return to_nd_pfn(dev);
+
+	if (is_nd_dax(dev)) {
+		struct nd_dax *nd_dax = to_nd_dax(dev);
+
+		return &nd_dax->nd_pfn;
+	}
+
+	WARN_ON(1);
+	return NULL;
+}
+
 static void nd_detach_and_reset(struct device *dev,
 		struct nd_namespace_common **_ndns)
 {
@@ -103,8 +125,8 @@
 		nd_btt->lbasize = 0;
 		kfree(nd_btt->uuid);
 		nd_btt->uuid = NULL;
-	} else if (is_nd_pfn(dev)) {
-		struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+	} else if (is_nd_pfn(dev) || is_nd_dax(dev)) {
+		struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 
 		kfree(nd_pfn->uuid);
 		nd_pfn->uuid = NULL;
@@ -199,3 +221,63 @@
 	return sum;
 }
 EXPORT_SYMBOL(nd_sb_checksum);
+
+static int nsio_rw_bytes(struct nd_namespace_common *ndns,
+		resource_size_t offset, void *buf, size_t size, int rw)
+{
+	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+
+	if (unlikely(offset + size > nsio->size)) {
+		dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
+		return -EFAULT;
+	}
+
+	if (rw == READ) {
+		unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
+
+		if (unlikely(is_bad_pmem(&nsio->bb, offset / 512, sz_align)))
+			return -EIO;
+		return memcpy_from_pmem(buf, nsio->addr + offset, size);
+	} else {
+		memcpy_to_pmem(nsio->addr + offset, buf, size);
+		wmb_pmem();
+	}
+
+	return 0;
+}
+
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
+{
+	struct resource *res = &nsio->res;
+	struct nd_namespace_common *ndns = &nsio->common;
+
+	nsio->size = resource_size(res);
+	if (!devm_request_mem_region(dev, res->start, resource_size(res),
+				dev_name(dev))) {
+		dev_warn(dev, "could not reserve region %pR\n", res);
+		return -EBUSY;
+	}
+
+	ndns->rw_bytes = nsio_rw_bytes;
+	if (devm_init_badblocks(dev, &nsio->bb))
+		return -ENOMEM;
+	nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
+			&nsio->res);
+
+	nsio->addr = devm_memremap(dev, res->start, resource_size(res),
+			ARCH_MEMREMAP_PMEM);
+	if (IS_ERR(nsio->addr))
+		return PTR_ERR(nsio->addr);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devm_nsio_enable);
+
+void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
+{
+	struct resource *res = &nsio->res;
+
+	devm_memunmap(dev, nsio->addr);
+	devm_exit_badblocks(dev, &nsio->bb);
+	devm_release_mem_region(dev, res->start, resource_size(res));
+}
+EXPORT_SYMBOL_GPL(devm_nsio_disable);
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 182a93f..be89764 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -251,7 +251,7 @@
 	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
 
-	for_each_set_bit(cmd, &nd_desc->dsm_mask, BITS_PER_LONG)
+	for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG)
 		len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd));
 	len += sprintf(buf + len, "\n");
 	return len;
@@ -648,6 +648,9 @@
 	nd_region_exit();
 	nvdimm_exit();
 	nvdimm_bus_exit();
+	nd_region_devs_exit();
+	nvdimm_devs_exit();
+	ida_destroy(&nd_ida);
 }
 
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
new file mode 100644
index 0000000..45fa82c
--- /dev/null
+++ b/drivers/nvdimm/dax_devs.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include "nd-core.h"
+#include "pfn.h"
+#include "nd.h"
+
+static void nd_dax_release(struct device *dev)
+{
+	struct nd_region *nd_region = to_nd_region(dev->parent);
+	struct nd_dax *nd_dax = to_nd_dax(dev);
+	struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
+
+	dev_dbg(dev, "%s\n", __func__);
+	nd_detach_ndns(dev, &nd_pfn->ndns);
+	ida_simple_remove(&nd_region->dax_ida, nd_pfn->id);
+	kfree(nd_pfn->uuid);
+	kfree(nd_dax);
+}
+
+static struct device_type nd_dax_device_type = {
+	.name = "nd_dax",
+	.release = nd_dax_release,
+};
+
+bool is_nd_dax(struct device *dev)
+{
+	return dev ? dev->type == &nd_dax_device_type : false;
+}
+EXPORT_SYMBOL(is_nd_dax);
+
+struct nd_dax *to_nd_dax(struct device *dev)
+{
+	struct nd_dax *nd_dax = container_of(dev, struct nd_dax, nd_pfn.dev);
+
+	WARN_ON(!is_nd_dax(dev));
+	return nd_dax;
+}
+EXPORT_SYMBOL(to_nd_dax);
+
+static const struct attribute_group *nd_dax_attribute_groups[] = {
+	&nd_pfn_attribute_group,
+	&nd_device_attribute_group,
+	&nd_numa_attribute_group,
+	NULL,
+};
+
+static struct nd_dax *nd_dax_alloc(struct nd_region *nd_region)
+{
+	struct nd_pfn *nd_pfn;
+	struct nd_dax *nd_dax;
+	struct device *dev;
+
+	nd_dax = kzalloc(sizeof(*nd_dax), GFP_KERNEL);
+	if (!nd_dax)
+		return NULL;
+
+	nd_pfn = &nd_dax->nd_pfn;
+	nd_pfn->id = ida_simple_get(&nd_region->dax_ida, 0, 0, GFP_KERNEL);
+	if (nd_pfn->id < 0) {
+		kfree(nd_dax);
+		return NULL;
+	}
+
+	dev = &nd_pfn->dev;
+	dev_set_name(dev, "dax%d.%d", nd_region->id, nd_pfn->id);
+	dev->groups = nd_dax_attribute_groups;
+	dev->type = &nd_dax_device_type;
+	dev->parent = &nd_region->dev;
+
+	return nd_dax;
+}
+
+struct device *nd_dax_create(struct nd_region *nd_region)
+{
+	struct device *dev = NULL;
+	struct nd_dax *nd_dax;
+
+	if (!is_nd_pmem(&nd_region->dev))
+		return NULL;
+
+	nd_dax = nd_dax_alloc(nd_region);
+	if (nd_dax)
+		dev = nd_pfn_devinit(&nd_dax->nd_pfn, NULL);
+	__nd_device_register(dev);
+	return dev;
+}
+
+int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
+{
+	int rc;
+	struct nd_dax *nd_dax;
+	struct device *dax_dev;
+	struct nd_pfn *nd_pfn;
+	struct nd_pfn_sb *pfn_sb;
+	struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
+
+	if (ndns->force_raw)
+		return -ENODEV;
+
+	nvdimm_bus_lock(&ndns->dev);
+	nd_dax = nd_dax_alloc(nd_region);
+	nd_pfn = &nd_dax->nd_pfn;
+	dax_dev = nd_pfn_devinit(nd_pfn, ndns);
+	nvdimm_bus_unlock(&ndns->dev);
+	if (!dax_dev)
+		return -ENOMEM;
+	pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+	nd_pfn->pfn_sb = pfn_sb;
+	rc = nd_pfn_validate(nd_pfn, DAX_SIG);
+	dev_dbg(dev, "%s: dax: %s\n", __func__,
+			rc == 0 ? dev_name(dax_dev) : "<none>");
+	if (rc < 0) {
+		__nd_detach_ndns(dax_dev, &nd_pfn->ndns);
+		put_device(dax_dev);
+	} else
+		__nd_device_register(dax_dev);
+
+	return rc;
+}
+EXPORT_SYMBOL(nd_dax_probe);
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index c56f882..bbde28d 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -37,9 +37,9 @@
 
 	nvdimm = to_nvdimm(ndd->dev);
 
-	if (!nvdimm->dsm_mask)
+	if (!nvdimm->cmd_mask)
 		return -ENXIO;
-	if (!test_bit(ND_CMD_GET_CONFIG_DATA, nvdimm->dsm_mask))
+	if (!test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask))
 		return -ENXIO;
 
 	return 0;
@@ -263,6 +263,12 @@
 }
 EXPORT_SYMBOL_GPL(nvdimm_name);
 
+unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
+{
+	return nvdimm->cmd_mask;
+}
+EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
+
 void *nvdimm_provider_data(struct nvdimm *nvdimm)
 {
 	if (nvdimm)
@@ -277,10 +283,10 @@
 	struct nvdimm *nvdimm = to_nvdimm(dev);
 	int cmd, len = 0;
 
-	if (!nvdimm->dsm_mask)
+	if (!nvdimm->cmd_mask)
 		return sprintf(buf, "\n");
 
-	for_each_set_bit(cmd, nvdimm->dsm_mask, BITS_PER_LONG)
+	for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
 		len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
 	len += sprintf(buf + len, "\n");
 	return len;
@@ -340,7 +346,7 @@
 
 struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
 		const struct attribute_group **groups, unsigned long flags,
-		unsigned long *dsm_mask)
+		unsigned long cmd_mask)
 {
 	struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
 	struct device *dev;
@@ -355,7 +361,7 @@
 	}
 	nvdimm->provider_data = provider_data;
 	nvdimm->flags = flags;
-	nvdimm->dsm_mask = dsm_mask;
+	nvdimm->cmd_mask = cmd_mask;
 	atomic_set(&nvdimm->busy, 0);
 	dev = &nvdimm->dev;
 	dev_set_name(dev, "nmem%d", nvdimm->id);
@@ -546,3 +552,8 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
+
+void __exit nvdimm_devs_exit(void)
+{
+	ida_destroy(&dimm_ida);
+}
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index f5cb886..c5e3196 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1288,6 +1288,8 @@
 		mode = "safe";
 	else if (claim && is_nd_pfn(claim))
 		mode = "memory";
+	else if (claim && is_nd_dax(claim))
+		mode = "dax";
 	else if (!claim && pmem_should_map_pages(dev))
 		mode = "memory";
 	else
@@ -1379,21 +1381,19 @@
 {
 	struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
 	struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
-	struct nd_namespace_common *ndns;
+	struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
+	struct nd_namespace_common *ndns = NULL;
 	resource_size_t size;
 
-	if (nd_btt || nd_pfn) {
-		struct device *host = NULL;
-
-		if (nd_btt) {
-			host = &nd_btt->dev;
+	if (nd_btt || nd_pfn || nd_dax) {
+		if (nd_btt)
 			ndns = nd_btt->ndns;
-		} else if (nd_pfn) {
-			host = &nd_pfn->dev;
+		else if (nd_pfn)
 			ndns = nd_pfn->ndns;
-		}
+		else if (nd_dax)
+			ndns = nd_dax->nd_pfn.ndns;
 
-		if (!ndns || !host)
+		if (!ndns)
 			return ERR_PTR(-ENODEV);
 
 		/*
@@ -1404,12 +1404,12 @@
 		device_unlock(&ndns->dev);
 		if (ndns->dev.driver) {
 			dev_dbg(&ndns->dev, "is active, can't bind %s\n",
-					dev_name(host));
+					dev_name(dev));
 			return ERR_PTR(-EBUSY);
 		}
-		if (dev_WARN_ONCE(&ndns->dev, ndns->claim != host,
+		if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
 					"host (%s) vs claim (%s) mismatch\n",
-					dev_name(host),
+					dev_name(dev),
 					dev_name(ndns->claim)))
 			return ERR_PTR(-ENXIO);
 	} else {
@@ -1784,6 +1784,18 @@
 		nd_device_register(nd_region->ns_seed);
 }
 
+void nd_region_create_dax_seed(struct nd_region *nd_region)
+{
+	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
+	nd_region->dax_seed = nd_dax_create(nd_region);
+	/*
+	 * Seed creation failures are not fatal, provisioning is simply
+	 * disabled until memory becomes available
+	 */
+	if (!nd_region->dax_seed)
+		dev_err(&nd_region->dev, "failed to create dax namespace\n");
+}
+
 void nd_region_create_pfn_seed(struct nd_region *nd_region)
 {
 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 1d1500f..284cdaa 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -37,7 +37,7 @@
 struct nvdimm {
 	unsigned long flags;
 	void *provider_data;
-	unsigned long *dsm_mask;
+	unsigned long cmd_mask;
 	struct device dev;
 	atomic_t busy;
 	int id;
@@ -49,11 +49,14 @@
 struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev);
 int __init nvdimm_bus_init(void);
 void nvdimm_bus_exit(void);
+void nvdimm_devs_exit(void);
+void nd_region_devs_exit(void);
 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev);
 struct nd_region;
 void nd_region_create_blk_seed(struct nd_region *nd_region);
 void nd_region_create_btt_seed(struct nd_region *nd_region);
 void nd_region_create_pfn_seed(struct nd_region *nd_region);
+void nd_region_create_dax_seed(struct nd_region *nd_region);
 void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev);
 int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus);
 void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
@@ -91,4 +94,5 @@
 ssize_t nd_namespace_store(struct device *dev,
 		struct nd_namespace_common **_ndns, const char *buf,
 		size_t len);
+struct nd_pfn *to_nd_pfn_safe(struct device *dev);
 #endif /* __ND_CORE_H__ */
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 875c524..d0ac93c 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -13,6 +13,7 @@
 #ifndef __ND_H__
 #define __ND_H__
 #include <linux/libnvdimm.h>
+#include <linux/badblocks.h>
 #include <linux/blkdev.h>
 #include <linux/device.h>
 #include <linux/mutex.h>
@@ -100,10 +101,12 @@
 	struct ida ns_ida;
 	struct ida btt_ida;
 	struct ida pfn_ida;
+	struct ida dax_ida;
 	unsigned long flags;
 	struct device *ns_seed;
 	struct device *btt_seed;
 	struct device *pfn_seed;
+	struct device *dax_seed;
 	u16 ndr_mappings;
 	u64 ndr_size;
 	u64 ndr_start;
@@ -160,6 +163,10 @@
 	struct nd_namespace_common *ndns;
 };
 
+struct nd_dax {
+	struct nd_pfn nd_pfn;
+};
+
 enum nd_async_mode {
 	ND_SYNC,
 	ND_ASYNC,
@@ -197,11 +204,12 @@
 
 u64 nd_sb_checksum(struct nd_gen_sb *sb);
 #if IS_ENABLED(CONFIG_BTT)
-int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata);
+int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns);
 bool is_nd_btt(struct device *dev);
 struct device *nd_btt_create(struct nd_region *nd_region);
 #else
-static inline int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
+static inline int nd_btt_probe(struct device *dev,
+		struct nd_namespace_common *ndns)
 {
 	return -ENODEV;
 }
@@ -219,12 +227,16 @@
 
 struct nd_pfn *to_nd_pfn(struct device *dev);
 #if IS_ENABLED(CONFIG_NVDIMM_PFN)
-int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata);
+int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
 bool is_nd_pfn(struct device *dev);
 struct device *nd_pfn_create(struct nd_region *nd_region);
-int nd_pfn_validate(struct nd_pfn *nd_pfn);
+struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
+		struct nd_namespace_common *ndns);
+int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig);
+extern struct attribute_group nd_pfn_attribute_group;
 #else
-static inline int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
+static inline int nd_pfn_probe(struct device *dev,
+		struct nd_namespace_common *ndns)
 {
 	return -ENODEV;
 }
@@ -239,12 +251,35 @@
 	return NULL;
 }
 
-static inline int nd_pfn_validate(struct nd_pfn *nd_pfn)
+static inline int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 {
 	return -ENODEV;
 }
 #endif
 
+struct nd_dax *to_nd_dax(struct device *dev);
+#if IS_ENABLED(CONFIG_NVDIMM_DAX)
+int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns);
+bool is_nd_dax(struct device *dev);
+struct device *nd_dax_create(struct nd_region *nd_region);
+#else
+static inline int nd_dax_probe(struct device *dev,
+		struct nd_namespace_common *ndns)
+{
+	return -ENODEV;
+}
+
+static inline bool is_nd_dax(struct device *dev)
+{
+	return false;
+}
+
+static inline struct device *nd_dax_create(struct nd_region *nd_region)
+{
+	return NULL;
+}
+#endif
+
 struct nd_region *to_nd_region(struct device *dev);
 int nd_region_to_nstype(struct nd_region *nd_region);
 int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
@@ -263,11 +298,32 @@
 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
-int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns);
+int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
 		char *name);
 void nvdimm_badblocks_populate(struct nd_region *nd_region,
 		struct badblocks *bb, const struct resource *res);
+#if IS_ENABLED(CONFIG_ND_CLAIM)
+struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+		struct resource *res, struct vmem_altmap *altmap);
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
+void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
+#else
+static inline struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+		struct resource *res, struct vmem_altmap *altmap)
+{
+	return ERR_PTR(-ENXIO);
+}
+static inline int devm_nsio_enable(struct device *dev,
+		struct nd_namespace_io *nsio)
+{
+	return -ENXIO;
+}
+static inline void devm_nsio_disable(struct device *dev,
+		struct nd_namespace_io *nsio)
+{
+}
+#endif
 int nd_blk_region_init(struct nd_region *nd_region);
 void __nd_iostat_start(struct bio *bio, unsigned long *start);
 static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
@@ -281,6 +337,19 @@
 	return true;
 }
 void nd_iostat_end(struct bio *bio, unsigned long start);
+static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
+		unsigned int len)
+{
+	if (bb->count) {
+		sector_t first_bad;
+		int num_bad;
+
+		return !!badblocks_check(bb, sector, len / 512, &first_bad,
+				&num_bad);
+	}
+
+	return false;
+}
 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
 const u8 *nd_dev_to_uuid(struct device *dev);
 bool pmem_should_map_pages(struct device *dev);
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
index 8e343a3..dde9853 100644
--- a/drivers/nvdimm/pfn.h
+++ b/drivers/nvdimm/pfn.h
@@ -19,6 +19,7 @@
 
 #define PFN_SIG_LEN 16
 #define PFN_SIG "NVDIMM_PFN_INFO\0"
+#define DAX_SIG "NVDIMM_DAX_INFO\0"
 
 struct nd_pfn_sb {
 	u8 signature[PFN_SIG_LEN];
@@ -33,7 +34,9 @@
 	/* minor-version-1 additions for section alignment */
 	__le32 start_pad;
 	__le32 end_trunc;
-	u8 padding[4004];
+	/* minor-version-2 record the base alignment of the mapping */
+	__le32 align;
+	u8 padding[4000];
 	__le64 checksum;
 };
 
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index e071e21..f7718ec 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -10,6 +10,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  */
+#include <linux/memremap.h>
 #include <linux/blkdev.h>
 #include <linux/device.h>
 #include <linux/genhd.h>
@@ -56,7 +57,7 @@
 static ssize_t mode_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 
 	switch (nd_pfn->mode) {
 	case PFN_MODE_RAM:
@@ -71,7 +72,7 @@
 static ssize_t mode_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t len)
 {
-	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 	ssize_t rc = 0;
 
 	device_lock(dev);
@@ -105,7 +106,7 @@
 static ssize_t align_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 
 	return sprintf(buf, "%lx\n", nd_pfn->align);
 }
@@ -133,7 +134,7 @@
 static ssize_t align_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t len)
 {
-	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 	ssize_t rc;
 
 	device_lock(dev);
@@ -151,7 +152,7 @@
 static ssize_t uuid_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 
 	if (nd_pfn->uuid)
 		return sprintf(buf, "%pUb\n", nd_pfn->uuid);
@@ -161,7 +162,7 @@
 static ssize_t uuid_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t len)
 {
-	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 	ssize_t rc;
 
 	device_lock(dev);
@@ -177,7 +178,7 @@
 static ssize_t namespace_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 	ssize_t rc;
 
 	nvdimm_bus_lock(dev);
@@ -190,7 +191,7 @@
 static ssize_t namespace_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t len)
 {
-	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 	ssize_t rc;
 
 	device_lock(dev);
@@ -208,7 +209,7 @@
 static ssize_t resource_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 	ssize_t rc;
 
 	device_lock(dev);
@@ -234,7 +235,7 @@
 static ssize_t size_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+	struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
 	ssize_t rc;
 
 	device_lock(dev);
@@ -269,7 +270,7 @@
 	NULL,
 };
 
-static struct attribute_group nd_pfn_attribute_group = {
+struct attribute_group nd_pfn_attribute_group = {
 	.attrs = nd_pfn_attributes,
 };
 
@@ -280,16 +281,32 @@
 	NULL,
 };
 
-static struct device *__nd_pfn_create(struct nd_region *nd_region,
+struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
 		struct nd_namespace_common *ndns)
 {
+	struct device *dev = &nd_pfn->dev;
+
+	if (!nd_pfn)
+		return NULL;
+
+	nd_pfn->mode = PFN_MODE_NONE;
+	nd_pfn->align = HPAGE_SIZE;
+	dev = &nd_pfn->dev;
+	device_initialize(&nd_pfn->dev);
+	if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
+		dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
+				__func__, dev_name(ndns->claim));
+		put_device(dev);
+		return NULL;
+	}
+	return dev;
+}
+
+static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
+{
 	struct nd_pfn *nd_pfn;
 	struct device *dev;
 
-	/* we can only create pages for contiguous ranged of pmem */
-	if (!is_nd_pmem(&nd_region->dev))
-		return NULL;
-
 	nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL);
 	if (!nd_pfn)
 		return NULL;
@@ -300,33 +317,31 @@
 		return NULL;
 	}
 
-	nd_pfn->mode = PFN_MODE_NONE;
-	nd_pfn->align = HPAGE_SIZE;
 	dev = &nd_pfn->dev;
 	dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
-	dev->parent = &nd_region->dev;
-	dev->type = &nd_pfn_device_type;
 	dev->groups = nd_pfn_attribute_groups;
-	device_initialize(&nd_pfn->dev);
-	if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
-		dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
-				__func__, dev_name(ndns->claim));
-		put_device(dev);
-		return NULL;
-	}
-	return dev;
+	dev->type = &nd_pfn_device_type;
+	dev->parent = &nd_region->dev;
+
+	return nd_pfn;
 }
 
 struct device *nd_pfn_create(struct nd_region *nd_region)
 {
-	struct device *dev = __nd_pfn_create(nd_region, NULL);
+	struct nd_pfn *nd_pfn;
+	struct device *dev;
 
-	if (dev)
-		__nd_device_register(dev);
+	if (!is_nd_pmem(&nd_region->dev))
+		return NULL;
+
+	nd_pfn = nd_pfn_alloc(nd_region);
+	dev = nd_pfn_devinit(nd_pfn, NULL);
+
+	__nd_device_register(dev);
 	return dev;
 }
 
-int nd_pfn_validate(struct nd_pfn *nd_pfn)
+int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 {
 	u64 checksum, offset;
 	struct nd_namespace_io *nsio;
@@ -343,7 +358,7 @@
 	if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb)))
 		return -ENXIO;
 
-	if (memcmp(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN) != 0)
+	if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0)
 		return -ENODEV;
 
 	checksum = le64_to_cpu(pfn_sb->checksum);
@@ -360,6 +375,9 @@
 		pfn_sb->end_trunc = 0;
 	}
 
+	if (__le16_to_cpu(pfn_sb->version_minor) < 2)
+		pfn_sb->align = 0;
+
 	switch (le32_to_cpu(pfn_sb->mode)) {
 	case PFN_MODE_RAM:
 	case PFN_MODE_PMEM:
@@ -379,6 +397,8 @@
 			return -ENODEV;
 	}
 
+	if (nd_pfn->align == 0)
+		nd_pfn->align = le32_to_cpu(pfn_sb->align);
 	if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
 		dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
 				nd_pfn->align, nvdimm_namespace_capacity(ndns));
@@ -399,8 +419,8 @@
 		return -EBUSY;
 	}
 
-	nd_pfn->align = 1UL << ilog2(offset);
-	if (!is_power_of_2(offset) || offset < PAGE_SIZE) {
+	if ((nd_pfn->align && !IS_ALIGNED(offset, nd_pfn->align))
+			|| !IS_ALIGNED(offset, PAGE_SIZE)) {
 		dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled\n",
 				offset);
 		return -ENXIO;
@@ -410,11 +430,11 @@
 }
 EXPORT_SYMBOL(nd_pfn_validate);
 
-int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
+int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
 {
 	int rc;
-	struct device *dev;
 	struct nd_pfn *nd_pfn;
+	struct device *pfn_dev;
 	struct nd_pfn_sb *pfn_sb;
 	struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
 
@@ -422,25 +442,218 @@
 		return -ENODEV;
 
 	nvdimm_bus_lock(&ndns->dev);
-	dev = __nd_pfn_create(nd_region, ndns);
+	nd_pfn = nd_pfn_alloc(nd_region);
+	pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
 	nvdimm_bus_unlock(&ndns->dev);
-	if (!dev)
+	if (!pfn_dev)
 		return -ENOMEM;
-	dev_set_drvdata(dev, drvdata);
-	pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
-	nd_pfn = to_nd_pfn(dev);
+	pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+	nd_pfn = to_nd_pfn(pfn_dev);
 	nd_pfn->pfn_sb = pfn_sb;
-	rc = nd_pfn_validate(nd_pfn);
-	nd_pfn->pfn_sb = NULL;
-	kfree(pfn_sb);
-	dev_dbg(&ndns->dev, "%s: pfn: %s\n", __func__,
-			rc == 0 ? dev_name(dev) : "<none>");
+	rc = nd_pfn_validate(nd_pfn, PFN_SIG);
+	dev_dbg(dev, "%s: pfn: %s\n", __func__,
+			rc == 0 ? dev_name(pfn_dev) : "<none>");
 	if (rc < 0) {
-		__nd_detach_ndns(dev, &nd_pfn->ndns);
-		put_device(dev);
+		__nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
+		put_device(pfn_dev);
 	} else
-		__nd_device_register(&nd_pfn->dev);
+		__nd_device_register(pfn_dev);
 
 	return rc;
 }
 EXPORT_SYMBOL(nd_pfn_probe);
+
+/*
+ * We hotplug memory at section granularity, pad the reserved area from
+ * the previous section base to the namespace base address.
+ */
+static unsigned long init_altmap_base(resource_size_t base)
+{
+	unsigned long base_pfn = PHYS_PFN(base);
+
+	return PFN_SECTION_ALIGN_DOWN(base_pfn);
+}
+
+static unsigned long init_altmap_reserve(resource_size_t base)
+{
+	unsigned long reserve = PHYS_PFN(SZ_8K);
+	unsigned long base_pfn = PHYS_PFN(base);
+
+	reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
+	return reserve;
+}
+
+static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+		struct resource *res, struct vmem_altmap *altmap)
+{
+	struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+	u64 offset = le64_to_cpu(pfn_sb->dataoff);
+	u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
+	u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
+	struct nd_namespace_common *ndns = nd_pfn->ndns;
+	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+	resource_size_t base = nsio->res.start + start_pad;
+	struct vmem_altmap __altmap = {
+		.base_pfn = init_altmap_base(base),
+		.reserve = init_altmap_reserve(base),
+	};
+
+	memcpy(res, &nsio->res, sizeof(*res));
+	res->start += start_pad;
+	res->end -= end_trunc;
+
+	nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
+	if (nd_pfn->mode == PFN_MODE_RAM) {
+		if (offset < SZ_8K)
+			return ERR_PTR(-EINVAL);
+		nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
+		altmap = NULL;
+	} else if (nd_pfn->mode == PFN_MODE_PMEM) {
+		nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE;
+		if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
+			dev_info(&nd_pfn->dev,
+					"number of pfns truncated from %lld to %ld\n",
+					le64_to_cpu(nd_pfn->pfn_sb->npfns),
+					nd_pfn->npfns);
+		memcpy(altmap, &__altmap, sizeof(*altmap));
+		altmap->free = PHYS_PFN(offset - SZ_8K);
+		altmap->alloc = 0;
+	} else
+		return ERR_PTR(-ENXIO);
+
+	return altmap;
+}
+
+static int nd_pfn_init(struct nd_pfn *nd_pfn)
+{
+	u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
+	struct nd_namespace_common *ndns = nd_pfn->ndns;
+	u32 start_pad = 0, end_trunc = 0;
+	resource_size_t start, size;
+	struct nd_namespace_io *nsio;
+	struct nd_region *nd_region;
+	struct nd_pfn_sb *pfn_sb;
+	unsigned long npfns;
+	phys_addr_t offset;
+	const char *sig;
+	u64 checksum;
+	int rc;
+
+	pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
+	if (!pfn_sb)
+		return -ENOMEM;
+
+	nd_pfn->pfn_sb = pfn_sb;
+	if (is_nd_dax(&nd_pfn->dev))
+		sig = DAX_SIG;
+	else
+		sig = PFN_SIG;
+	rc = nd_pfn_validate(nd_pfn, sig);
+	if (rc != -ENODEV)
+		return rc;
+
+	/* no info block, do init */;
+	nd_region = to_nd_region(nd_pfn->dev.parent);
+	if (nd_region->ro) {
+		dev_info(&nd_pfn->dev,
+				"%s is read-only, unable to init metadata\n",
+				dev_name(&nd_region->dev));
+		return -ENXIO;
+	}
+
+	memset(pfn_sb, 0, sizeof(*pfn_sb));
+
+	/*
+	 * Check if pmem collides with 'System RAM' when section aligned and
+	 * trim it accordingly
+	 */
+	nsio = to_nd_namespace_io(&ndns->dev);
+	start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
+	size = resource_size(&nsio->res);
+	if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
+				IORES_DESC_NONE) == REGION_MIXED) {
+		start = nsio->res.start;
+		start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
+	}
+
+	start = nsio->res.start;
+	size = PHYS_SECTION_ALIGN_UP(start + size) - start;
+	if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
+				IORES_DESC_NONE) == REGION_MIXED) {
+		size = resource_size(&nsio->res);
+		end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
+	}
+
+	if (start_pad + end_trunc)
+		dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
+				dev_name(&ndns->dev), start_pad + end_trunc);
+
+	/*
+	 * Note, we use 64 here for the standard size of struct page,
+	 * debugging options may cause it to be larger in which case the
+	 * implementation will limit the pfns advertised through
+	 * ->direct_access() to those that are included in the memmap.
+	 */
+	start += start_pad;
+	size = resource_size(&nsio->res);
+	npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
+	if (nd_pfn->mode == PFN_MODE_PMEM) {
+		unsigned long memmap_size;
+
+		/*
+		 * vmemmap_populate_hugepages() allocates the memmap array in
+		 * HPAGE_SIZE chunks.
+		 */
+		memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
+		offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve,
+				nd_pfn->align) - start;
+	} else if (nd_pfn->mode == PFN_MODE_RAM)
+		offset = ALIGN(start + SZ_8K + dax_label_reserve,
+				nd_pfn->align) - start;
+	else
+		return -ENXIO;
+
+	if (offset + start_pad + end_trunc >= size) {
+		dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
+				dev_name(&ndns->dev));
+		return -ENXIO;
+	}
+
+	npfns = (size - offset - start_pad - end_trunc) / SZ_4K;
+	pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
+	pfn_sb->dataoff = cpu_to_le64(offset);
+	pfn_sb->npfns = cpu_to_le64(npfns);
+	memcpy(pfn_sb->signature, sig, PFN_SIG_LEN);
+	memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
+	memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
+	pfn_sb->version_major = cpu_to_le16(1);
+	pfn_sb->version_minor = cpu_to_le16(2);
+	pfn_sb->start_pad = cpu_to_le32(start_pad);
+	pfn_sb->end_trunc = cpu_to_le32(end_trunc);
+	pfn_sb->align = cpu_to_le32(nd_pfn->align);
+	checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
+	pfn_sb->checksum = cpu_to_le64(checksum);
+
+	return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
+}
+
+/*
+ * Determine the effective resource range and vmem_altmap from an nd_pfn
+ * instance.
+ */
+struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+		struct resource *res, struct vmem_altmap *altmap)
+{
+	int rc;
+
+	if (!nd_pfn->uuid || !nd_pfn->ndns)
+		return ERR_PTR(-ENODEV);
+
+	rc = nd_pfn_init(nd_pfn);
+	if (rc)
+		return ERR_PTR(rc);
+
+	/* we need a valid pfn_sb before we can init a vmem_altmap */
+	return __nvdimm_setup_pfn(nd_pfn, res, altmap);
+}
+EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 92f5365..608fc44 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -33,10 +33,6 @@
 #include "nd.h"
 
 struct pmem_device {
-	struct request_queue	*pmem_queue;
-	struct gendisk		*pmem_disk;
-	struct nd_namespace_common *ndns;
-
 	/* One contiguous memory region per device */
 	phys_addr_t		phys_addr;
 	/* when non-zero this device is hosting a 'pfn' instance */
@@ -50,23 +46,10 @@
 	struct badblocks	bb;
 };
 
-static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len)
-{
-	if (bb->count) {
-		sector_t first_bad;
-		int num_bad;
-
-		return !!badblocks_check(bb, sector, len / 512, &first_bad,
-				&num_bad);
-	}
-
-	return false;
-}
-
 static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
 		unsigned int len)
 {
-	struct device *dev = disk_to_dev(pmem->pmem_disk);
+	struct device *dev = pmem->bb.dev;
 	sector_t sector;
 	long cleared;
 
@@ -136,8 +119,7 @@
 	unsigned long start;
 	struct bio_vec bvec;
 	struct bvec_iter iter;
-	struct block_device *bdev = bio->bi_bdev;
-	struct pmem_device *pmem = bdev->bd_disk->private_data;
+	struct pmem_device *pmem = q->queuedata;
 
 	do_acct = nd_iostat_start(bio, &start);
 	bio_for_each_segment(bvec, bio, iter) {
@@ -162,7 +144,7 @@
 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
 		       struct page *page, int rw)
 {
-	struct pmem_device *pmem = bdev->bd_disk->private_data;
+	struct pmem_device *pmem = bdev->bd_queue->queuedata;
 	int rc;
 
 	rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
@@ -182,14 +164,22 @@
 }
 
 static long pmem_direct_access(struct block_device *bdev, sector_t sector,
-		      void __pmem **kaddr, pfn_t *pfn)
+		      void __pmem **kaddr, pfn_t *pfn, long size)
 {
-	struct pmem_device *pmem = bdev->bd_disk->private_data;
+	struct pmem_device *pmem = bdev->bd_queue->queuedata;
 	resource_size_t offset = sector * 512 + pmem->data_offset;
 
+	if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
+		return -EIO;
 	*kaddr = pmem->virt_addr + offset;
 	*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
 
+	/*
+	 * If badblocks are present, limit known good range to the
+	 * requested range.
+	 */
+	if (unlikely(pmem->bb.count))
+		return size;
 	return pmem->size - pmem->pfn_pad - offset;
 }
 
@@ -200,104 +190,119 @@
 	.revalidate_disk =	nvdimm_revalidate_disk,
 };
 
-static struct pmem_device *pmem_alloc(struct device *dev,
-		struct resource *res, int id)
+static void pmem_release_queue(void *q)
 {
+	blk_cleanup_queue(q);
+}
+
+void pmem_release_disk(void *disk)
+{
+	del_gendisk(disk);
+	put_disk(disk);
+}
+
+static int pmem_attach_disk(struct device *dev,
+		struct nd_namespace_common *ndns)
+{
+	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+	struct vmem_altmap __altmap, *altmap = NULL;
+	struct resource *res = &nsio->res;
+	struct nd_pfn *nd_pfn = NULL;
+	int nid = dev_to_node(dev);
+	struct nd_pfn_sb *pfn_sb;
 	struct pmem_device *pmem;
+	struct resource pfn_res;
 	struct request_queue *q;
+	struct gendisk *disk;
+	void *addr;
+
+	/* while nsio_rw_bytes is active, parse a pfn info block if present */
+	if (is_nd_pfn(dev)) {
+		nd_pfn = to_nd_pfn(dev);
+		altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
+		if (IS_ERR(altmap))
+			return PTR_ERR(altmap);
+	}
+
+	/* we're attaching a block device, disable raw namespace access */
+	devm_nsio_disable(dev, nsio);
 
 	pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
 	if (!pmem)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
+	dev_set_drvdata(dev, pmem);
 	pmem->phys_addr = res->start;
 	pmem->size = resource_size(res);
 	if (!arch_has_wmb_pmem())
 		dev_warn(dev, "unable to guarantee persistence of writes\n");
 
-	if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
-			dev_name(dev))) {
-		dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
-				&pmem->phys_addr, pmem->size);
-		return ERR_PTR(-EBUSY);
+	if (!devm_request_mem_region(dev, res->start, resource_size(res),
+				dev_name(dev))) {
+		dev_warn(dev, "could not reserve region %pR\n", res);
+		return -EBUSY;
 	}
 
 	q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
 	if (!q)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
 	pmem->pfn_flags = PFN_DEV;
-	if (pmem_should_map_pages(dev)) {
-		pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res,
+	if (is_nd_pfn(dev)) {
+		addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
+				altmap);
+		pfn_sb = nd_pfn->pfn_sb;
+		pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
+		pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
+		pmem->pfn_flags |= PFN_MAP;
+		res = &pfn_res; /* for badblocks populate */
+		res->start += pmem->data_offset;
+	} else if (pmem_should_map_pages(dev)) {
+		addr = devm_memremap_pages(dev, &nsio->res,
 				&q->q_usage_counter, NULL);
 		pmem->pfn_flags |= PFN_MAP;
 	} else
-		pmem->virt_addr = (void __pmem *) devm_memremap(dev,
-				pmem->phys_addr, pmem->size,
-				ARCH_MEMREMAP_PMEM);
+		addr = devm_memremap(dev, pmem->phys_addr,
+				pmem->size, ARCH_MEMREMAP_PMEM);
 
-	if (IS_ERR(pmem->virt_addr)) {
+	/*
+	 * At release time the queue must be dead before
+	 * devm_memremap_pages is unwound
+	 */
+	if (devm_add_action(dev, pmem_release_queue, q)) {
 		blk_cleanup_queue(q);
-		return (void __force *) pmem->virt_addr;
+		return -ENOMEM;
 	}
 
-	pmem->pmem_queue = q;
-	return pmem;
-}
+	if (IS_ERR(addr))
+		return PTR_ERR(addr);
+	pmem->virt_addr = (void __pmem *) addr;
 
-static void pmem_detach_disk(struct pmem_device *pmem)
-{
-	if (!pmem->pmem_disk)
-		return;
-
-	del_gendisk(pmem->pmem_disk);
-	put_disk(pmem->pmem_disk);
-	blk_cleanup_queue(pmem->pmem_queue);
-}
-
-static int pmem_attach_disk(struct device *dev,
-		struct nd_namespace_common *ndns, struct pmem_device *pmem)
-{
-	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
-	int nid = dev_to_node(dev);
-	struct resource bb_res;
-	struct gendisk *disk;
-
-	blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
-	blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
-	blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
-	blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
-	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
+	blk_queue_make_request(q, pmem_make_request);
+	blk_queue_physical_block_size(q, PAGE_SIZE);
+	blk_queue_max_hw_sectors(q, UINT_MAX);
+	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
+	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+	q->queuedata = pmem;
 
 	disk = alloc_disk_node(0, nid);
-	if (!disk) {
-		blk_cleanup_queue(pmem->pmem_queue);
+	if (!disk)
+		return -ENOMEM;
+	if (devm_add_action(dev, pmem_release_disk, disk)) {
+		put_disk(disk);
 		return -ENOMEM;
 	}
 
 	disk->fops		= &pmem_fops;
-	disk->private_data	= pmem;
-	disk->queue		= pmem->pmem_queue;
+	disk->queue		= q;
 	disk->flags		= GENHD_FL_EXT_DEVT;
 	nvdimm_namespace_disk_name(ndns, disk->disk_name);
 	disk->driverfs_dev = dev;
 	set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
 			/ 512);
-	pmem->pmem_disk = disk;
-	devm_exit_badblocks(dev, &pmem->bb);
 	if (devm_init_badblocks(dev, &pmem->bb))
 		return -ENOMEM;
-	bb_res.start = nsio->res.start + pmem->data_offset;
-	bb_res.end = nsio->res.end;
-	if (is_nd_pfn(dev)) {
-		struct nd_pfn *nd_pfn = to_nd_pfn(dev);
-		struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
-
-		bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
-		bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
-	}
-	nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
-			&bb_res);
+	nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb, res);
 	disk->bb = &pmem->bb;
 	add_disk(disk);
 	revalidate_disk(disk);
@@ -305,346 +310,68 @@
 	return 0;
 }
 
-static int pmem_rw_bytes(struct nd_namespace_common *ndns,
-		resource_size_t offset, void *buf, size_t size, int rw)
-{
-	struct pmem_device *pmem = dev_get_drvdata(ndns->claim);
-
-	if (unlikely(offset + size > pmem->size)) {
-		dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
-		return -EFAULT;
-	}
-
-	if (rw == READ) {
-		unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
-
-		if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
-			return -EIO;
-		return memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
-	} else {
-		memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
-		wmb_pmem();
-	}
-
-	return 0;
-}
-
-static int nd_pfn_init(struct nd_pfn *nd_pfn)
-{
-	struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
-	struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
-	struct nd_namespace_common *ndns = nd_pfn->ndns;
-	u32 start_pad = 0, end_trunc = 0;
-	resource_size_t start, size;
-	struct nd_namespace_io *nsio;
-	struct nd_region *nd_region;
-	unsigned long npfns;
-	phys_addr_t offset;
-	u64 checksum;
-	int rc;
-
-	if (!pfn_sb)
-		return -ENOMEM;
-
-	nd_pfn->pfn_sb = pfn_sb;
-	rc = nd_pfn_validate(nd_pfn);
-	if (rc == -ENODEV)
-		/* no info block, do init */;
-	else
-		return rc;
-
-	nd_region = to_nd_region(nd_pfn->dev.parent);
-	if (nd_region->ro) {
-		dev_info(&nd_pfn->dev,
-				"%s is read-only, unable to init metadata\n",
-				dev_name(&nd_region->dev));
-		goto err;
-	}
-
-	memset(pfn_sb, 0, sizeof(*pfn_sb));
-
-	/*
-	 * Check if pmem collides with 'System RAM' when section aligned and
-	 * trim it accordingly
-	 */
-	nsio = to_nd_namespace_io(&ndns->dev);
-	start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
-	size = resource_size(&nsio->res);
-	if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
-				IORES_DESC_NONE) == REGION_MIXED) {
-
-		start = nsio->res.start;
-		start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
-	}
-
-	start = nsio->res.start;
-	size = PHYS_SECTION_ALIGN_UP(start + size) - start;
-	if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
-				IORES_DESC_NONE) == REGION_MIXED) {
-		size = resource_size(&nsio->res);
-		end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
-	}
-
-	if (start_pad + end_trunc)
-		dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
-				dev_name(&ndns->dev), start_pad + end_trunc);
-
-	/*
-	 * Note, we use 64 here for the standard size of struct page,
-	 * debugging options may cause it to be larger in which case the
-	 * implementation will limit the pfns advertised through
-	 * ->direct_access() to those that are included in the memmap.
-	 */
-	start += start_pad;
-	npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
-	if (nd_pfn->mode == PFN_MODE_PMEM) {
-		unsigned long memmap_size;
-
-		/*
-		 * vmemmap_populate_hugepages() allocates the memmap array in
-		 * PMD_SIZE chunks.
-		 */
-		memmap_size = ALIGN(64 * npfns, PMD_SIZE);
-		offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align)
-			- start;
-	} else if (nd_pfn->mode == PFN_MODE_RAM)
-		offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
-	else
-		goto err;
-
-	if (offset + start_pad + end_trunc >= pmem->size) {
-		dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
-				dev_name(&ndns->dev));
-		goto err;
-	}
-
-	npfns = (pmem->size - offset - start_pad - end_trunc) / SZ_4K;
-	pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
-	pfn_sb->dataoff = cpu_to_le64(offset);
-	pfn_sb->npfns = cpu_to_le64(npfns);
-	memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
-	memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
-	memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
-	pfn_sb->version_major = cpu_to_le16(1);
-	pfn_sb->version_minor = cpu_to_le16(1);
-	pfn_sb->start_pad = cpu_to_le32(start_pad);
-	pfn_sb->end_trunc = cpu_to_le32(end_trunc);
-	checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
-	pfn_sb->checksum = cpu_to_le64(checksum);
-
-	rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
-	if (rc)
-		goto err;
-
-	return 0;
- err:
-	nd_pfn->pfn_sb = NULL;
-	kfree(pfn_sb);
-	return -ENXIO;
-}
-
-static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
-{
-	struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
-	struct pmem_device *pmem;
-
-	/* free pmem disk */
-	pmem = dev_get_drvdata(&nd_pfn->dev);
-	pmem_detach_disk(pmem);
-
-	/* release nd_pfn resources */
-	kfree(nd_pfn->pfn_sb);
-	nd_pfn->pfn_sb = NULL;
-
-	return 0;
-}
-
-/*
- * We hotplug memory at section granularity, pad the reserved area from
- * the previous section base to the namespace base address.
- */
-static unsigned long init_altmap_base(resource_size_t base)
-{
-	unsigned long base_pfn = PHYS_PFN(base);
-
-	return PFN_SECTION_ALIGN_DOWN(base_pfn);
-}
-
-static unsigned long init_altmap_reserve(resource_size_t base)
-{
-	unsigned long reserve = PHYS_PFN(SZ_8K);
-	unsigned long base_pfn = PHYS_PFN(base);
-
-	reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
-	return reserve;
-}
-
-static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
-{
-	int rc;
-	struct resource res;
-	struct request_queue *q;
-	struct pmem_device *pmem;
-	struct vmem_altmap *altmap;
-	struct device *dev = &nd_pfn->dev;
-	struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
-	struct nd_namespace_common *ndns = nd_pfn->ndns;
-	u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
-	u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
-	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
-	resource_size_t base = nsio->res.start + start_pad;
-	struct vmem_altmap __altmap = {
-		.base_pfn = init_altmap_base(base),
-		.reserve = init_altmap_reserve(base),
-	};
-
-	pmem = dev_get_drvdata(dev);
-	pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
-	pmem->pfn_pad = start_pad + end_trunc;
-	nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
-	if (nd_pfn->mode == PFN_MODE_RAM) {
-		if (pmem->data_offset < SZ_8K)
-			return -EINVAL;
-		nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
-		altmap = NULL;
-	} else if (nd_pfn->mode == PFN_MODE_PMEM) {
-		nd_pfn->npfns = (pmem->size - pmem->pfn_pad - pmem->data_offset)
-			/ PAGE_SIZE;
-		if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
-			dev_info(&nd_pfn->dev,
-					"number of pfns truncated from %lld to %ld\n",
-					le64_to_cpu(nd_pfn->pfn_sb->npfns),
-					nd_pfn->npfns);
-		altmap = & __altmap;
-		altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K);
-		altmap->alloc = 0;
-	} else {
-		rc = -ENXIO;
-		goto err;
-	}
-
-	/* establish pfn range for lookup, and switch to direct map */
-	q = pmem->pmem_queue;
-	memcpy(&res, &nsio->res, sizeof(res));
-	res.start += start_pad;
-	res.end -= end_trunc;
-	devm_memunmap(dev, (void __force *) pmem->virt_addr);
-	pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res,
-			&q->q_usage_counter, altmap);
-	pmem->pfn_flags |= PFN_MAP;
-	if (IS_ERR(pmem->virt_addr)) {
-		rc = PTR_ERR(pmem->virt_addr);
-		goto err;
-	}
-
-	/* attach pmem disk in "pfn-mode" */
-	rc = pmem_attach_disk(dev, ndns, pmem);
-	if (rc)
-		goto err;
-
-	return rc;
- err:
-	nvdimm_namespace_detach_pfn(ndns);
-	return rc;
-
-}
-
-static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
-{
-	struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
-	int rc;
-
-	if (!nd_pfn->uuid || !nd_pfn->ndns)
-		return -ENODEV;
-
-	rc = nd_pfn_init(nd_pfn);
-	if (rc)
-		return rc;
-	/* we need a valid pfn_sb before we can init a vmem_altmap */
-	return __nvdimm_namespace_attach_pfn(nd_pfn);
-}
-
 static int nd_pmem_probe(struct device *dev)
 {
-	struct nd_region *nd_region = to_nd_region(dev->parent);
 	struct nd_namespace_common *ndns;
-	struct nd_namespace_io *nsio;
-	struct pmem_device *pmem;
 
 	ndns = nvdimm_namespace_common_probe(dev);
 	if (IS_ERR(ndns))
 		return PTR_ERR(ndns);
 
-	nsio = to_nd_namespace_io(&ndns->dev);
-	pmem = pmem_alloc(dev, &nsio->res, nd_region->id);
-	if (IS_ERR(pmem))
-		return PTR_ERR(pmem);
+	if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
+		return -ENXIO;
 
-	pmem->ndns = ndns;
-	dev_set_drvdata(dev, pmem);
-	ndns->rw_bytes = pmem_rw_bytes;
-	if (devm_init_badblocks(dev, &pmem->bb))
-		return -ENOMEM;
-	nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
-
-	if (is_nd_btt(dev)) {
-		/* btt allocates its own request_queue */
-		blk_cleanup_queue(pmem->pmem_queue);
-		pmem->pmem_queue = NULL;
+	if (is_nd_btt(dev))
 		return nvdimm_namespace_attach_btt(ndns);
-	}
 
 	if (is_nd_pfn(dev))
-		return nvdimm_namespace_attach_pfn(ndns);
+		return pmem_attach_disk(dev, ndns);
 
-	if (nd_btt_probe(ndns, pmem) == 0 || nd_pfn_probe(ndns, pmem) == 0) {
-		/*
-		 * We'll come back as either btt-pmem, or pfn-pmem, so
-		 * drop the queue allocation for now.
-		 */
-		blk_cleanup_queue(pmem->pmem_queue);
+	/* if we find a valid info-block we'll come back as that personality */
+	if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
+			|| nd_dax_probe(dev, ndns) == 0)
 		return -ENXIO;
-	}
 
-	return pmem_attach_disk(dev, ndns, pmem);
+	/* ...otherwise we're just a raw pmem device */
+	return pmem_attach_disk(dev, ndns);
 }
 
 static int nd_pmem_remove(struct device *dev)
 {
-	struct pmem_device *pmem = dev_get_drvdata(dev);
-
 	if (is_nd_btt(dev))
-		nvdimm_namespace_detach_btt(pmem->ndns);
-	else if (is_nd_pfn(dev))
-		nvdimm_namespace_detach_pfn(pmem->ndns);
-	else
-		pmem_detach_disk(pmem);
-
+		nvdimm_namespace_detach_btt(to_nd_btt(dev));
 	return 0;
 }
 
 static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
 {
-	struct pmem_device *pmem = dev_get_drvdata(dev);
-	struct nd_namespace_common *ndns = pmem->ndns;
 	struct nd_region *nd_region = to_nd_region(dev->parent);
-	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
-	struct resource res = {
-		.start = nsio->res.start + pmem->data_offset,
-		.end = nsio->res.end,
-	};
+	struct pmem_device *pmem = dev_get_drvdata(dev);
+	resource_size_t offset = 0, end_trunc = 0;
+	struct nd_namespace_common *ndns;
+	struct nd_namespace_io *nsio;
+	struct resource res;
 
 	if (event != NVDIMM_REVALIDATE_POISON)
 		return;
 
-	if (is_nd_pfn(dev)) {
+	if (is_nd_btt(dev)) {
+		struct nd_btt *nd_btt = to_nd_btt(dev);
+
+		ndns = nd_btt->ndns;
+	} else if (is_nd_pfn(dev)) {
 		struct nd_pfn *nd_pfn = to_nd_pfn(dev);
 		struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
 
-		res.start += __le32_to_cpu(pfn_sb->start_pad);
-		res.end -= __le32_to_cpu(pfn_sb->end_trunc);
-	}
+		ndns = nd_pfn->ndns;
+		offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
+		end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
+	} else
+		ndns = to_ndns(dev);
 
+	nsio = to_nd_namespace_io(&ndns->dev);
+	res.start = nsio->res.start + offset;
+	res.end = nsio->res.end - end_trunc;
 	nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
 }
 
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 4b7715e..05a9123 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -54,6 +54,7 @@
 
 	nd_region->btt_seed = nd_btt_create(nd_region);
 	nd_region->pfn_seed = nd_pfn_create(nd_region);
+	nd_region->dax_seed = nd_dax_create(nd_region);
 	if (err == 0)
 		return 0;
 
@@ -86,6 +87,7 @@
 	nd_region->ns_seed = NULL;
 	nd_region->btt_seed = NULL;
 	nd_region->pfn_seed = NULL;
+	nd_region->dax_seed = NULL;
 	dev_set_drvdata(dev, NULL);
 	nvdimm_bus_unlock(dev);
 
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 139bf71..40fcfea 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -306,6 +306,23 @@
 }
 static DEVICE_ATTR_RO(pfn_seed);
 
+static ssize_t dax_seed_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct nd_region *nd_region = to_nd_region(dev);
+	ssize_t rc;
+
+	nvdimm_bus_lock(dev);
+	if (nd_region->dax_seed)
+		rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
+	else
+		rc = sprintf(buf, "\n");
+	nvdimm_bus_unlock(dev);
+
+	return rc;
+}
+static DEVICE_ATTR_RO(dax_seed);
+
 static ssize_t read_only_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -335,6 +352,7 @@
 	&dev_attr_mappings.attr,
 	&dev_attr_btt_seed.attr,
 	&dev_attr_pfn_seed.attr,
+	&dev_attr_dax_seed.attr,
 	&dev_attr_read_only.attr,
 	&dev_attr_set_cookie.attr,
 	&dev_attr_available_size.attr,
@@ -353,6 +371,9 @@
 	if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
 		return 0;
 
+	if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
+		return 0;
+
 	if (a != &dev_attr_set_cookie.attr
 			&& a != &dev_attr_available_size.attr)
 		return a->mode;
@@ -441,6 +462,13 @@
 			nd_region_create_pfn_seed(nd_region);
 		nvdimm_bus_unlock(dev);
 	}
+	if (is_nd_dax(dev) && probe) {
+		nd_region = to_nd_region(dev->parent);
+		nvdimm_bus_lock(dev);
+		if (nd_region->dax_seed == dev)
+			nd_region_create_dax_seed(nd_region);
+		nvdimm_bus_unlock(dev);
+	}
 }
 
 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
@@ -718,6 +746,7 @@
 	ida_init(&nd_region->ns_ida);
 	ida_init(&nd_region->btt_ida);
 	ida_init(&nd_region->pfn_ida);
+	ida_init(&nd_region->dax_ida);
 	dev = &nd_region->dev;
 	dev_set_name(dev, "region%d", nd_region->id);
 	dev->parent = &nvdimm_bus->dev;
@@ -764,3 +793,8 @@
 			__func__);
 }
 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
+
+void __exit nd_region_devs_exit(void)
+{
+	ida_destroy(&region_ida);
+}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2de248b..1a51584 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -95,6 +95,15 @@
 			break;
 		}
 		break;
+	case NVME_CTRL_DEAD:
+		switch (old_state) {
+		case NVME_CTRL_DELETING:
+			changed = true;
+			/* FALLTHRU */
+		default:
+			break;
+		}
+		break;
 	default:
 		break;
 	}
@@ -720,10 +729,14 @@
 	switch (ns->pi_type) {
 	case NVME_NS_DPS_PI_TYPE3:
 		integrity.profile = &t10_pi_type3_crc;
+		integrity.tag_size = sizeof(u16) + sizeof(u32);
+		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
 		break;
 	case NVME_NS_DPS_PI_TYPE1:
 	case NVME_NS_DPS_PI_TYPE2:
 		integrity.profile = &t10_pi_type1_crc;
+		integrity.tag_size = sizeof(u16);
+		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
 		break;
 	default:
 		integrity.profile = NULL;
@@ -1212,6 +1225,9 @@
 		return ctrl->ops->reset_ctrl(ctrl);
 	case NVME_IOCTL_SUBSYS_RESET:
 		return nvme_reset_subsystem(ctrl);
+	case NVME_IOCTL_RESCAN:
+		nvme_queue_scan(ctrl);
+		return 0;
 	default:
 		return -ENOTTY;
 	}
@@ -1239,6 +1255,17 @@
 }
 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
 
+static ssize_t nvme_sysfs_rescan(struct device *dev,
+				struct device_attribute *attr, const char *buf,
+				size_t count)
+{
+	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+	nvme_queue_scan(ctrl);
+	return count;
+}
+static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
+
 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
 								char *buf)
 {
@@ -1342,6 +1369,7 @@
 
 static struct attribute *nvme_dev_attrs[] = {
 	&dev_attr_reset_controller.attr,
+	&dev_attr_rescan_controller.attr,
 	&dev_attr_model.attr,
 	&dev_attr_serial.attr,
 	&dev_attr_firmware_rev.attr,
@@ -1580,6 +1608,15 @@
 {
 	struct nvme_ns *ns, *next;
 
+	/*
+	 * The dead states indicates the controller was not gracefully
+	 * disconnected. In that case, we won't be able to flush any data while
+	 * removing the namespaces' disks; fail all the queues now to avoid
+	 * potentially having to clean up the failed sync later.
+	 */
+	if (ctrl->state == NVME_CTRL_DEAD)
+		nvme_kill_queues(ctrl);
+
 	mutex_lock(&ctrl->namespaces_mutex);
 	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
 		nvme_ns_remove(ns);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 114b928..1daa048 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -72,6 +72,7 @@
 	NVME_CTRL_LIVE,
 	NVME_CTRL_RESETTING,
 	NVME_CTRL_DELETING,
+	NVME_CTRL_DEAD,
 };
 
 struct nvme_ctrl {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0f093f1..78dca31 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1394,7 +1394,7 @@
 	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	int result, i, vecs, nr_io_queues, size;
 
-	nr_io_queues = num_possible_cpus();
+	nr_io_queues = num_online_cpus();
 	result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
 	if (result < 0)
 		return result;
@@ -1551,12 +1551,12 @@
 
 static void nvme_disable_io_queues(struct nvme_dev *dev)
 {
-	int pass;
+	int pass, queues = dev->online_queues - 1;
 	unsigned long timeout;
 	u8 opcode = nvme_admin_delete_sq;
 
 	for (pass = 0; pass < 2; pass++) {
-		int sent = 0, i = dev->queue_count - 1;
+		int sent = 0, i = queues;
 
 		reinit_completion(&dev->ioq_wait);
  retry:
@@ -1857,7 +1857,7 @@
 
 	nvme_kill_queues(&dev->ctrl);
 	if (pci_get_drvdata(pdev))
-		pci_stop_and_remove_bus_device_locked(pdev);
+		device_release_driver(&pdev->dev);
 	nvme_put_ctrl(&dev->ctrl);
 }
 
@@ -2017,6 +2017,10 @@
 	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
 
 	pci_set_drvdata(pdev, NULL);
+
+	if (!pci_device_is_present(pdev))
+		nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
+
 	flush_work(&dev->reset_work);
 	nvme_uninit_ctrl(&dev->ctrl);
 	nvme_dev_disable(dev, true);
@@ -2060,14 +2064,17 @@
 	 * shutdown the controller to quiesce. The controller will be restarted
 	 * after the slot reset through driver's slot_reset callback.
 	 */
-	dev_warn(dev->ctrl.device, "error detected: state:%d\n", state);
 	switch (state) {
 	case pci_channel_io_normal:
 		return PCI_ERS_RESULT_CAN_RECOVER;
 	case pci_channel_io_frozen:
+		dev_warn(dev->ctrl.device,
+			"frozen state error detected, reset controller\n");
 		nvme_dev_disable(dev, false);
 		return PCI_ERS_RESULT_NEED_RESET;
 	case pci_channel_io_perm_failure:
+		dev_warn(dev->ctrl.device,
+			"failure state error detected, request disconnect\n");
 		return PCI_ERS_RESULT_DISCONNECT;
 	}
 	return PCI_ERS_RESULT_NEED_RESET;
@@ -2102,6 +2109,12 @@
 	{ PCI_VDEVICE(INTEL, 0x0953),
 		.driver_data = NVME_QUIRK_STRIPE_SIZE |
 				NVME_QUIRK_DISCARD_ZEROES, },
+	{ PCI_VDEVICE(INTEL, 0x0a53),
+		.driver_data = NVME_QUIRK_STRIPE_SIZE |
+				NVME_QUIRK_DISCARD_ZEROES, },
+	{ PCI_VDEVICE(INTEL, 0x0a54),
+		.driver_data = NVME_QUIRK_STRIPE_SIZE |
+				NVME_QUIRK_DISCARD_ZEROES, },
 	{ PCI_VDEVICE(INTEL, 0x5845),	/* Qemu emulated controller */
 		.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
 	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index bb4ea12..965911d 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -113,7 +113,7 @@
 
 	rc = nvmem_reg_read(nvmem, pos, buf, count);
 
-	if (IS_ERR_VALUE(rc))
+	if (rc)
 		return rc;
 
 	return count;
@@ -147,7 +147,7 @@
 
 	rc = nvmem_reg_write(nvmem, pos, buf, count);
 
-	if (IS_ERR_VALUE(rc))
+	if (rc)
 		return rc;
 
 	return count;
@@ -366,7 +366,7 @@
 		}
 
 		rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
-		if (IS_ERR_VALUE(rval)) {
+		if (rval) {
 			kfree(cells[i]);
 			goto err;
 		}
@@ -963,7 +963,7 @@
 
 	rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
 
-	if (IS_ERR_VALUE(rc))
+	if (rc)
 		return rc;
 
 	/* shift bits in-place */
@@ -998,7 +998,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	rc = __nvmem_cell_read(nvmem, cell, buf, len);
-	if (IS_ERR_VALUE(rc)) {
+	if (rc) {
 		kfree(buf);
 		return ERR_PTR(rc);
 	}
@@ -1083,7 +1083,7 @@
 	if (cell->bit_offset || cell->nbits)
 		kfree(buf);
 
-	if (IS_ERR_VALUE(rc))
+	if (rc)
 		return rc;
 
 	return len;
@@ -1111,11 +1111,11 @@
 		return -EINVAL;
 
 	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
-	if (IS_ERR_VALUE(rc))
+	if (rc)
 		return rc;
 
 	rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
-	if (IS_ERR_VALUE(rc))
+	if (rc)
 		return rc;
 
 	return len;
@@ -1141,7 +1141,7 @@
 		return -EINVAL;
 
 	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
-	if (IS_ERR_VALUE(rc))
+	if (rc)
 		return rc;
 
 	return nvmem_cell_write(&cell, buf, cell.bytes);
@@ -1170,7 +1170,7 @@
 
 	rc = nvmem_reg_read(nvmem, offset, buf, bytes);
 
-	if (IS_ERR_VALUE(rc))
+	if (rc)
 		return rc;
 
 	return bytes;
@@ -1198,7 +1198,7 @@
 
 	rc = nvmem_reg_write(nvmem, offset, buf, bytes);
 
-	if (IS_ERR_VALUE(rc))
+	if (rc)
 		return rc;
 
 
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index bee3fa9..d7efd9d 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -10,7 +10,6 @@
 obj-$(CONFIG_OF_MDIO)	+= of_mdio.o
 obj-$(CONFIG_OF_PCI)	+= of_pci.o
 obj-$(CONFIG_OF_PCI_IRQ)  += of_pci_irq.o
-obj-$(CONFIG_OF_MTD)	+= of_mtd.o
 obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
 obj-$(CONFIG_OF_RESOLVE)  += resolver.o
 obj-$(CONFIG_OF_OVERLAY) += overlay.o
diff --git a/drivers/of/of_mtd.c b/drivers/of/of_mtd.c
deleted file mode 100644
index b7361ed..0000000
--- a/drivers/of/of_mtd.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
- *
- * OF helpers for mtd.
- *
- * This file is released under the GPLv2
- *
- */
-#include <linux/kernel.h>
-#include <linux/of_mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/export.h>
-
-/**
- * It maps 'enum nand_ecc_modes_t' found in include/linux/mtd/nand.h
- * into the device tree binding of 'nand-ecc', so that MTD
- * device driver can get nand ecc from device tree.
- */
-static const char *nand_ecc_modes[] = {
-	[NAND_ECC_NONE]		= "none",
-	[NAND_ECC_SOFT]		= "soft",
-	[NAND_ECC_HW]		= "hw",
-	[NAND_ECC_HW_SYNDROME]	= "hw_syndrome",
-	[NAND_ECC_HW_OOB_FIRST]	= "hw_oob_first",
-	[NAND_ECC_SOFT_BCH]	= "soft_bch",
-};
-
-/**
- * of_get_nand_ecc_mode - Get nand ecc mode for given device_node
- * @np:	Pointer to the given device_node
- *
- * The function gets ecc mode string from property 'nand-ecc-mode',
- * and return its index in nand_ecc_modes table, or errno in error case.
- */
-int of_get_nand_ecc_mode(struct device_node *np)
-{
-	const char *pm;
-	int err, i;
-
-	err = of_property_read_string(np, "nand-ecc-mode", &pm);
-	if (err < 0)
-		return err;
-
-	for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
-		if (!strcasecmp(pm, nand_ecc_modes[i]))
-			return i;
-
-	return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(of_get_nand_ecc_mode);
-
-/**
- * of_get_nand_ecc_step_size - Get ECC step size associated to
- * the required ECC strength (see below).
- * @np:	Pointer to the given device_node
- *
- * return the ECC step size, or errno in error case.
- */
-int of_get_nand_ecc_step_size(struct device_node *np)
-{
-	int ret;
-	u32 val;
-
-	ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
-	return ret ? ret : val;
-}
-EXPORT_SYMBOL_GPL(of_get_nand_ecc_step_size);
-
-/**
- * of_get_nand_ecc_strength - Get required ECC strength over the
- * correspnding step size as defined by 'nand-ecc-size'
- * @np:	Pointer to the given device_node
- *
- * return the ECC strength, or errno in error case.
- */
-int of_get_nand_ecc_strength(struct device_node *np)
-{
-	int ret;
-	u32 val;
-
-	ret = of_property_read_u32(np, "nand-ecc-strength", &val);
-	return ret ? ret : val;
-}
-EXPORT_SYMBOL_GPL(of_get_nand_ecc_strength);
-
-/**
- * of_get_nand_bus_width - Get nand bus witdh for given device_node
- * @np:	Pointer to the given device_node
- *
- * return bus width option, or errno in error case.
- */
-int of_get_nand_bus_width(struct device_node *np)
-{
-	u32 val;
-
-	if (of_property_read_u32(np, "nand-bus-width", &val))
-		return 8;
-
-	switch(val) {
-	case 8:
-	case 16:
-		return val;
-	default:
-		return -EIO;
-	}
-}
-EXPORT_SYMBOL_GPL(of_get_nand_bus_width);
-
-/**
- * of_get_nand_on_flash_bbt - Get nand on flash bbt for given device_node
- * @np:	Pointer to the given device_node
- *
- * return true if present false other wise
- */
-bool of_get_nand_on_flash_bbt(struct device_node *np)
-{
-	return of_property_read_bool(np, "nand-on-flash-bbt");
-}
-EXPORT_SYMBOL_GPL(of_get_nand_on_flash_bbt);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index f2d01d4..1b8304e 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -950,17 +950,14 @@
 
 		/* For SPIs, we need to track the affinity per IRQ */
 		if (using_spi) {
-			if (i >= pdev->num_resources) {
-				of_node_put(dn);
+			if (i >= pdev->num_resources)
 				break;
-			}
 
 			irqs[i] = cpu;
 		}
 
 		/* Keep track of the CPUs containing this PMU type */
 		cpumask_set_cpu(cpu, &pmu->supported_cpus);
-		of_node_put(dn);
 		i++;
 	} while (1);
 
@@ -995,9 +992,6 @@
 
 	armpmu_init(pmu);
 
-	if (!__oprofile_cpu_pmu)
-		__oprofile_cpu_pmu = pmu;
-
 	pmu->plat_device = pdev;
 
 	if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
@@ -1033,6 +1027,9 @@
 	if (ret)
 		goto out_destroy;
 
+	if (!__oprofile_cpu_pmu)
+		__oprofile_cpu_pmu = pmu;
+
 	pr_info("enabled with %s PMU driver, %d counters available\n",
 			pmu->name, pmu->num_events);
 
@@ -1043,6 +1040,7 @@
 out_free:
 	pr_info("%s: failed to register PMU devices!\n",
 		of_node_full_name(node));
+	kfree(pmu->irq_affinity);
 	kfree(pmu);
 	return ret;
 }
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 55182fc..677a811 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -153,8 +153,10 @@
 		.name			= (n),			\
 		.pins			= (p),			\
 		.npins			= ARRAY_SIZE((p)),	\
-		.has_simple_funcs	= 1,		\
-		.simple_funcs		= (f),			\
+		.has_simple_funcs	= 1,			\
+		{						\
+			.simple_funcs		= (f),		\
+		},						\
 		.nfuncs			= ARRAY_SIZE((f)),	\
 	}
 #define PIN_GROUP_MIXED(n, p, f)				\
@@ -163,7 +165,9 @@
 		.pins			= (p),			\
 		.npins			= ARRAY_SIZE((p)),	\
 		.has_simple_funcs	= 0,			\
-		.mixed_funcs		= (f),			\
+		{						\
+			.mixed_funcs		= (f),		\
+		},						\
 		.nfuncs			= ARRAY_SIZE((f)),	\
 	}
 
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 207b13b..a607655 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1256,9 +1256,10 @@
 	const struct mtk_desc_pin *pin;
 
 	chained_irq_enter(chip, desc);
-	for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) {
+	for (eint_num = 0;
+	     eint_num < pctl->devdata->ap_num;
+	     eint_num += 32, reg += 4) {
 		status = readl(reg);
-		reg += 4;
 		while (status) {
 			offset = __ffs(status);
 			index = eint_num + offset;
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index ccbfc32..38facef 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -854,7 +854,7 @@
 
 	clk_enable(nmk_chip->clk);
 
-	dir = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
+	dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
 
 	clk_disable(nmk_chip->clk);
 
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index d03df4a..76bdae1 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -64,4 +64,14 @@
         help
           ChromeOS EC communication protocol helpers.
 
+config CROS_KBD_LED_BACKLIGHT
+	tristate "Backlight LED support for Chrome OS keyboards"
+	depends on LEDS_CLASS && ACPI
+	help
+	  This option enables support for the keyboard backlight LEDs on
+	  select Chrome OS systems.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called cros_kbd_led_backlight.
+
 endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index bc498bd..4f34627 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -1,8 +1,9 @@
 
-obj-$(CONFIG_CHROMEOS_LAPTOP)	+= chromeos_laptop.o
-obj-$(CONFIG_CHROMEOS_PSTORE)	+= chromeos_pstore.o
-cros_ec_devs-objs		:= cros_ec_dev.o cros_ec_sysfs.o \
-				   cros_ec_lightbar.o cros_ec_vbc.o
-obj-$(CONFIG_CROS_EC_CHARDEV)   += cros_ec_devs.o
-obj-$(CONFIG_CROS_EC_LPC)       += cros_ec_lpc.o
-obj-$(CONFIG_CROS_EC_PROTO)	+= cros_ec_proto.o
+obj-$(CONFIG_CHROMEOS_LAPTOP)		+= chromeos_laptop.o
+obj-$(CONFIG_CHROMEOS_PSTORE)		+= chromeos_pstore.o
+cros_ec_devs-objs			:= cros_ec_dev.o cros_ec_sysfs.o \
+					   cros_ec_lightbar.o cros_ec_vbc.o
+obj-$(CONFIG_CROS_EC_CHARDEV)		+= cros_ec_devs.o
+obj-$(CONFIG_CROS_EC_LPC)		+= cros_ec_lpc.o
+obj-$(CONFIG_CROS_EC_PROTO)		+= cros_ec_proto.o
+obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT)	+= cros_kbd_led_backlight.o
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 2b441e9..e8a44a9 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -34,6 +34,7 @@
 #define ATMEL_TS_I2C_ADDR	0x4a
 #define ATMEL_TS_I2C_BL_ADDR	0x26
 #define CYAPA_TP_I2C_ADDR	0x67
+#define ELAN_TP_I2C_ADDR	0x15
 #define ISL_ALS_I2C_ADDR	0x44
 #define TAOS_ALS_I2C_ADDR	0x29
 
@@ -73,7 +74,7 @@
 	int tries;
 };
 
-#define MAX_I2C_PERIPHERALS 3
+#define MAX_I2C_PERIPHERALS 4
 
 struct chromeos_laptop {
 	struct i2c_peripheral i2c_peripherals[MAX_I2C_PERIPHERALS];
@@ -86,6 +87,11 @@
 	.flags		= I2C_CLIENT_WAKE,
 };
 
+static struct i2c_board_info elantech_device = {
+	I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR),
+	.flags		= I2C_CLIENT_WAKE,
+};
+
 static struct i2c_board_info isl_als_device = {
 	I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
 };
@@ -306,6 +312,16 @@
 	return (!tp) ? -EAGAIN : 0;
 }
 
+static int setup_elantech_tp(enum i2c_adapter_type type)
+{
+	if (tp)
+		return 0;
+
+	/* add elantech touchpad */
+	tp = add_i2c_device("trackpad", type, &elantech_device);
+	return (!tp) ? -EAGAIN : 0;
+}
+
 static int setup_atmel_1664s_ts(enum i2c_adapter_type type)
 {
 	const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR,
@@ -445,6 +461,8 @@
 	.i2c_peripherals = {
 		/* Touchpad. */
 		{ .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
+		/* Elan Touchpad option. */
+		{ .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 },
 	},
 };
 
@@ -475,6 +493,8 @@
 		{ .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 },
 		/* Touchpad. */
 		{ .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
+		/* Elan Touchpad option. */
+		{ .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 },
 		/* Light Sensor. */
 		{ .add = setup_isl29018_als, I2C_ADAPTER_DESIGNWARE_1 },
 	},
diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
index 3474920..308a853 100644
--- a/drivers/platform/chrome/chromeos_pstore.c
+++ b/drivers/platform/chrome/chromeos_pstore.c
@@ -8,6 +8,7 @@
  *  the Free Software Foundation, version 2 of the License.
  */
 
+#include <linux/acpi.h>
 #include <linux/dmi.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -58,7 +59,7 @@
 static struct ramoops_platform_data chromeos_ramoops_data = {
 	.mem_size	= 0x100000,
 	.mem_address	= 0xf00000,
-	.record_size	= 0x20000,
+	.record_size	= 0x40000,
 	.console_size	= 0x20000,
 	.ftrace_size	= 0x20000,
 	.dump_oops	= 1,
@@ -71,9 +72,59 @@
 	},
 };
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_ramoops_acpi_match[] = {
+	{ "GOOG9999", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ramoops_acpi_match);
+
+static struct platform_driver chromeos_ramoops_acpi = {
+	.driver		= {
+		.name	= "chromeos_pstore",
+		.acpi_match_table = ACPI_PTR(cros_ramoops_acpi_match),
+	},
+};
+
+static int __init chromeos_probe_acpi(struct platform_device *pdev)
+{
+	struct resource *res;
+	resource_size_t len;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENOMEM;
+
+	len = resource_size(res);
+	if (!res->start || !len)
+		return -ENOMEM;
+
+	pr_info("chromeos ramoops using acpi device.\n");
+
+	chromeos_ramoops_data.mem_size = len;
+	chromeos_ramoops_data.mem_address = res->start;
+
+	return 0;
+}
+
+static bool __init chromeos_check_acpi(void)
+{
+	if (!platform_driver_probe(&chromeos_ramoops_acpi, chromeos_probe_acpi))
+		return true;
+	return false;
+}
+#else
+static inline bool chromeos_check_acpi(void) { return false; }
+#endif
+
 static int __init chromeos_pstore_init(void)
 {
-	if (dmi_check_system(chromeos_pstore_dmi_table))
+	bool acpi_dev_found;
+
+	/* First check ACPI for non-hardcoded values from firmware. */
+	acpi_dev_found = chromeos_check_acpi();
+
+	if (acpi_dev_found || dmi_check_system(chromeos_pstore_dmi_table))
 		return platform_device_register(&chromeos_ramoops);
 
 	return -ENODEV;
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index d45cd25..6d8ee3b 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -137,6 +137,10 @@
 	if (copy_from_user(&u_cmd, arg, sizeof(u_cmd)))
 		return -EFAULT;
 
+	if ((u_cmd.outsize > EC_MAX_MSG_BYTES) ||
+	    (u_cmd.insize > EC_MAX_MSG_BYTES))
+		return -EINVAL;
+
 	s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
 			GFP_KERNEL);
 	if (!s_cmd)
@@ -208,6 +212,9 @@
 	.release = ec_device_release,
 	.read = ec_device_read,
 	.unlocked_ioctl = ec_device_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = ec_device_ioctl,
+#endif
 };
 
 static void __remove(struct device *dev)
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index ff76405..8df3d44 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -412,9 +412,13 @@
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct cros_ec_dev *ec = container_of(dev,
 					      struct cros_ec_dev, class_dev);
-	struct platform_device *pdev = container_of(ec->dev,
-						   struct platform_device, dev);
-	if (pdev->id != 0)
+	struct platform_device *pdev = to_platform_device(ec->dev);
+	struct cros_ec_platform *pdata = pdev->dev.platform_data;
+	int is_cros_ec;
+
+	is_cros_ec = strcmp(pdata->ec_name, CROS_EC_DEV_NAME);
+
+	if (is_cros_ec != 0)
 		return 0;
 
 	/* Only instantiate this stuff if the EC has a lightbar */
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 990308c..b6e161f 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -298,8 +298,8 @@
 			ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE;
 			ec_dev->max_passthru = 0;
 			ec_dev->pkt_xfer = NULL;
-			ec_dev->din_size = EC_MSG_BYTES;
-			ec_dev->dout_size = EC_MSG_BYTES;
+			ec_dev->din_size = EC_PROTO2_MSG_BYTES;
+			ec_dev->dout_size = EC_PROTO2_MSG_BYTES;
 		} else {
 			/*
 			 * It's possible for a test to occur too early when
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c
new file mode 100644
index 0000000..ca3e4da
--- /dev/null
+++ b/drivers/platform/chrome/cros_kbd_led_backlight.c
@@ -0,0 +1,122 @@
+/*
+ *  Keyboard backlight LED driver for Chrome OS.
+ *
+ *  Copyright (C) 2012 Google, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/leds.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Keyboard LED ACPI Device must be defined in firmware */
+#define ACPI_KEYBOARD_BACKLIGHT_DEVICE	"\\_SB.KBLT"
+#define ACPI_KEYBOARD_BACKLIGHT_READ	ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBQC"
+#define ACPI_KEYBOARD_BACKLIGHT_WRITE	ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBCM"
+
+#define ACPI_KEYBOARD_BACKLIGHT_MAX		100
+
+static void keyboard_led_set_brightness(struct led_classdev *cdev,
+					enum led_brightness brightness)
+{
+	union acpi_object param;
+	struct acpi_object_list input;
+	acpi_status status;
+
+	param.type = ACPI_TYPE_INTEGER;
+	param.integer.value = brightness;
+	input.count = 1;
+	input.pointer = &param;
+
+	status = acpi_evaluate_object(NULL, ACPI_KEYBOARD_BACKLIGHT_WRITE,
+				      &input, NULL);
+	if (ACPI_FAILURE(status))
+		dev_err(cdev->dev, "Error setting keyboard LED value: %d\n",
+			status);
+}
+
+static enum led_brightness
+keyboard_led_get_brightness(struct led_classdev *cdev)
+{
+	unsigned long long brightness;
+	acpi_status status;
+
+	status = acpi_evaluate_integer(NULL, ACPI_KEYBOARD_BACKLIGHT_READ,
+				       NULL, &brightness);
+	if (ACPI_FAILURE(status)) {
+		dev_err(cdev->dev, "Error getting keyboard LED value: %d\n",
+			status);
+		return -EIO;
+	}
+
+	return brightness;
+}
+
+static int keyboard_led_probe(struct platform_device *pdev)
+{
+	struct led_classdev *cdev;
+	acpi_handle handle;
+	acpi_status status;
+	int error;
+
+	/* Look for the keyboard LED ACPI Device */
+	status = acpi_get_handle(ACPI_ROOT_OBJECT,
+				 ACPI_KEYBOARD_BACKLIGHT_DEVICE,
+				 &handle);
+	if (ACPI_FAILURE(status)) {
+		dev_err(&pdev->dev, "Unable to find ACPI device %s: %d\n",
+			ACPI_KEYBOARD_BACKLIGHT_DEVICE, status);
+		return -ENXIO;
+	}
+
+	cdev = devm_kzalloc(&pdev->dev, sizeof(*cdev), GFP_KERNEL);
+	if (!cdev)
+		return -ENOMEM;
+
+	cdev->name = "chromeos::kbd_backlight";
+	cdev->max_brightness = ACPI_KEYBOARD_BACKLIGHT_MAX;
+	cdev->flags |= LED_CORE_SUSPENDRESUME;
+	cdev->brightness_set = keyboard_led_set_brightness;
+	cdev->brightness_get = keyboard_led_get_brightness;
+
+	error = devm_led_classdev_register(&pdev->dev, cdev);
+	if (error)
+		return error;
+
+	return 0;
+}
+
+static const struct acpi_device_id keyboard_led_id[] = {
+	{ "GOOG0002", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, keyboard_led_id);
+
+static struct platform_driver keyboard_led_driver = {
+	.driver		= {
+		.name	= "chromeos-keyboard-leds",
+		.acpi_match_table = ACPI_PTR(keyboard_led_id),
+	},
+	.probe		= keyboard_led_probe,
+};
+module_platform_driver(keyboard_led_driver);
+
+MODULE_AUTHOR("Simon Que <sque@chromium.org>");
+MODULE_DESCRIPTION("ChromeOS Keyboard backlight LED Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:chromeos-keyboard-leds");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index ed2004b..c06bb85 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -846,6 +846,18 @@
 
 	  If you are running on a Galileo/Quark say Y here.
 
+config INTEL_PMC_CORE
+	bool "Intel PMC Core driver"
+	depends on X86 && PCI
+	---help---
+	  The Intel Platform Controller Hub for Intel Core SoCs provides access
+	  to Power Management Controller registers via a PCI interface. This
+	  driver can utilize debugging capabilities and supported features as
+	  exposed by the Power Management Controller.
+
+	  Supported features:
+		- SLP_S0_RESIDENCY counter.
+
 config IBM_RTL
 	tristate "Device driver to enable PRTL support"
 	depends on X86 && PCI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 448443c..9b11b40 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -69,3 +69,4 @@
 obj-$(CONFIG_INTEL_TELEMETRY)	+= intel_telemetry_core.o \
 				   intel_telemetry_pltdrv.o \
 				   intel_telemetry_debugfs.o
+obj-$(CONFIG_INTEL_PMC_CORE)    += intel_pmc_core.o
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index f2b5d0a..15f1311 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -771,12 +771,14 @@
 {
 	struct asus_laptop *asus = bl_get_data(bd);
 	unsigned long long value;
-	acpi_status rv = AE_OK;
+	acpi_status rv;
 
 	rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
 				   NULL, &value);
-	if (ACPI_FAILURE(rv))
+	if (ACPI_FAILURE(rv)) {
 		pr_warn("Error reading brightness\n");
+		return 0;
+	}
 
 	return value;
 }
@@ -865,7 +867,7 @@
 	int len = 0;
 	unsigned long long temp;
 	char buf[16];		/* enough for all info */
-	acpi_status rv = AE_OK;
+	acpi_status rv;
 
 	/*
 	 * We use the easy way, we don't care of off and count,
@@ -946,11 +948,10 @@
 			      const char *method)
 {
 	int rv, value;
-	int out = 0;
 
 	rv = parse_arg(buf, count, &value);
-	if (rv > 0)
-		out = value ? 1 : 0;
+	if (rv <= 0)
+		return rv;
 
 	if (write_acpi_int(asus->handle, method, value))
 		return -ENODEV;
@@ -1265,7 +1266,7 @@
 static int asus_gps_status(struct asus_laptop *asus)
 {
 	unsigned long long status;
-	acpi_status rv = AE_OK;
+	acpi_status rv;
 
 	rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
 				   NULL, &status);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index a96630d..a26dca3 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -114,6 +114,7 @@
 #define ASUS_WMI_DEVID_LED6		0x00020016
 
 /* Backlight and Brightness */
+#define ASUS_WMI_DEVID_ALS_ENABLE	0x00050001 /* Ambient Light Sensor */
 #define ASUS_WMI_DEVID_BACKLIGHT	0x00050011
 #define ASUS_WMI_DEVID_BRIGHTNESS	0x00050012
 #define ASUS_WMI_DEVID_KBD_BACKLIGHT	0x00050021
@@ -1730,6 +1731,7 @@
 ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA);
 ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
 ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME);
+ASUS_WMI_CREATE_DEVICE_ATTR(als_enable, 0644, ASUS_WMI_DEVID_ALS_ENABLE);
 
 static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
 			   const char *buf, size_t count)
@@ -1756,6 +1758,7 @@
 	&dev_attr_cardr.attr,
 	&dev_attr_touchpad.attr,
 	&dev_attr_lid_resume.attr,
+	&dev_attr_als_enable.attr,
 	NULL
 };
 
@@ -1776,6 +1779,8 @@
 		devid = ASUS_WMI_DEVID_TOUCHPAD;
 	else if (attr == &dev_attr_lid_resume.attr)
 		devid = ASUS_WMI_DEVID_LID_RESUME;
+	else if (attr == &dev_attr_als_enable.attr)
+		devid = ASUS_WMI_DEVID_ALS_ENABLE;
 
 	if (devid != -1)
 		ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
index b51a200..dcd9f40 100644
--- a/drivers/platform/x86/dell-rbtn.c
+++ b/drivers/platform/x86/dell-rbtn.c
@@ -28,6 +28,7 @@
 	enum rbtn_type type;
 	struct rfkill *rfkill;
 	struct input_dev *input_dev;
+	bool suspended;
 };
 
 
@@ -235,9 +236,55 @@
 	{ "", 0 },
 };
 
+#ifdef CONFIG_PM_SLEEP
+static void ACPI_SYSTEM_XFACE rbtn_clear_suspended_flag(void *context)
+{
+	struct rbtn_data *rbtn_data = context;
+
+	rbtn_data->suspended = false;
+}
+
+static int rbtn_suspend(struct device *dev)
+{
+	struct acpi_device *device = to_acpi_device(dev);
+	struct rbtn_data *rbtn_data = acpi_driver_data(device);
+
+	rbtn_data->suspended = true;
+
+	return 0;
+}
+
+static int rbtn_resume(struct device *dev)
+{
+	struct acpi_device *device = to_acpi_device(dev);
+	struct rbtn_data *rbtn_data = acpi_driver_data(device);
+	acpi_status status;
+
+	/*
+	 * Upon resume, some BIOSes send an ACPI notification thet triggers
+	 * an unwanted input event. In order to ignore it, we use a flag
+	 * that we set at suspend and clear once we have received the extra
+	 * ACPI notification. Since ACPI notifications are delivered
+	 * asynchronously to drivers, we clear the flag from the workqueue
+	 * used to deliver the notifications. This should be enough
+	 * to have the flag cleared only after we received the extra
+	 * notification, if any.
+	 */
+	status = acpi_os_execute(OSL_NOTIFY_HANDLER,
+			 rbtn_clear_suspended_flag, rbtn_data);
+	if (ACPI_FAILURE(status))
+		rbtn_clear_suspended_flag(rbtn_data);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(rbtn_pm_ops, rbtn_suspend, rbtn_resume);
+
 static struct acpi_driver rbtn_driver = {
 	.name = "dell-rbtn",
 	.ids = rbtn_ids,
+	.drv.pm = &rbtn_pm_ops,
 	.ops = {
 		.add = rbtn_add,
 		.remove = rbtn_remove,
@@ -399,6 +446,15 @@
 {
 	struct rbtn_data *rbtn_data = device->driver_data;
 
+	/*
+	 * Some BIOSes send a notification at resume.
+	 * Ignore it to prevent unwanted input events.
+	 */
+	if (rbtn_data->suspended) {
+		dev_dbg(&device->dev, "ACPI notification ignored\n");
+		return;
+	}
+
 	if (event != 0x80) {
 		dev_info(&device->dev, "Received unknown event (0x%x)\n",
 			 event);
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index ffc84cc..ce41bc3 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -69,7 +69,7 @@
 #include <linux/kfifo.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
 #include <linux/leds.h>
 #endif
 #include <acpi/video.h>
@@ -100,13 +100,14 @@
 /* FUNC interface - responses */
 #define UNSUPPORTED_CMD 0x80000000
 
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
 /* FUNC interface - LED control */
 #define FUNC_LED_OFF	0x1
 #define FUNC_LED_ON	0x30001
 #define KEYBOARD_LAMPS	0x100
 #define LOGOLAMP_POWERON 0x2000
 #define LOGOLAMP_ALWAYS  0x4000
+#define RADIO_LED_ON	0x20
 #endif
 
 /* Hotkey details */
@@ -174,13 +175,14 @@
 	int rfkill_state;
 	int logolamp_registered;
 	int kblamps_registered;
+	int radio_led_registered;
 };
 
 static struct fujitsu_hotkey_t *fujitsu_hotkey;
 
 static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
 
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
 static enum led_brightness logolamp_get(struct led_classdev *cdev);
 static void logolamp_set(struct led_classdev *cdev,
 			       enum led_brightness brightness);
@@ -200,6 +202,16 @@
  .brightness_get = kblamps_get,
  .brightness_set = kblamps_set
 };
+
+static enum led_brightness radio_led_get(struct led_classdev *cdev);
+static void radio_led_set(struct led_classdev *cdev,
+			       enum led_brightness brightness);
+
+static struct led_classdev radio_led = {
+ .name = "fujitsu::radio_led",
+ .brightness_get = radio_led_get,
+ .brightness_set = radio_led_set
+};
 #endif
 
 #ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
@@ -249,7 +261,7 @@
 	return value;
 }
 
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
 /* LED class callbacks */
 
 static void logolamp_set(struct led_classdev *cdev,
@@ -275,6 +287,15 @@
 		call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF);
 }
 
+static void radio_led_set(struct led_classdev *cdev,
+				enum led_brightness brightness)
+{
+	if (brightness >= LED_FULL)
+		call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
+	else
+		call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
+}
+
 static enum led_brightness logolamp_get(struct led_classdev *cdev)
 {
 	enum led_brightness brightness = LED_OFF;
@@ -299,6 +320,16 @@
 
 	return brightness;
 }
+
+static enum led_brightness radio_led_get(struct led_classdev *cdev)
+{
+	enum led_brightness brightness = LED_OFF;
+
+	if (call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0) & RADIO_LED_ON)
+		brightness = LED_FULL;
+
+	return brightness;
+}
 #endif
 
 /* Hardware access for LCD brightness control */
@@ -872,7 +903,7 @@
 	/* Suspect this is a keymap of the application panel, print it */
 	pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
 
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
 	if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
 		result = led_classdev_register(&fujitsu->pf_device->dev,
 						&logolamp_led);
@@ -895,6 +926,23 @@
 			       result);
 		}
 	}
+
+	/*
+	 * BTNI bit 24 seems to indicate the presence of a radio toggle
+	 * button in place of a slide switch, and all such machines appear
+	 * to also have an RF LED.  Therefore use bit 24 as an indicator
+	 * that an RF LED is present.
+	 */
+	if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
+		result = led_classdev_register(&fujitsu->pf_device->dev,
+						&radio_led);
+		if (result == 0) {
+			fujitsu_hotkey->radio_led_registered = 1;
+		} else {
+			pr_err("Could not register LED handler for radio LED, error %i\n",
+			       result);
+		}
+	}
 #endif
 
 	return result;
@@ -915,12 +963,15 @@
 	struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device);
 	struct input_dev *input = fujitsu_hotkey->input;
 
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
 	if (fujitsu_hotkey->logolamp_registered)
 		led_classdev_unregister(&logolamp_led);
 
 	if (fujitsu_hotkey->kblamps_registered)
 		led_classdev_unregister(&kblamps_led);
+
+	if (fujitsu_hotkey->radio_led_registered)
+		led_classdev_unregister(&radio_led);
 #endif
 
 	input_unregister_device(input);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index be3bc2f..4a23fbc 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -48,7 +48,10 @@
 #define CFG_CAMERA_BIT	(19)
 
 #if IS_ENABLED(CONFIG_ACPI_WMI)
-static const char ideapad_wmi_fnesc_event[] = "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6";
+static const char *const ideapad_wmi_fnesc_events[] = {
+	"26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6", /* Yoga 3 */
+	"56322276-8493-4CE8-A783-98C991274F5E", /* Yoga 700 */
+};
 #endif
 
 enum {
@@ -93,6 +96,7 @@
 	struct dentry *debug;
 	unsigned long cfg;
 	bool has_hw_rfkill_switch;
+	const char *fnesc_guid;
 };
 
 static bool no_bt_rfkill;
@@ -989,8 +993,16 @@
 		ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv);
 	if (ret)
 		goto notification_failed;
+
 #if IS_ENABLED(CONFIG_ACPI_WMI)
-	ret = wmi_install_notify_handler(ideapad_wmi_fnesc_event, ideapad_wmi_notify, priv);
+	for (i = 0; i < ARRAY_SIZE(ideapad_wmi_fnesc_events); i++) {
+		ret = wmi_install_notify_handler(ideapad_wmi_fnesc_events[i],
+						 ideapad_wmi_notify, priv);
+		if (ret == AE_OK) {
+			priv->fnesc_guid = ideapad_wmi_fnesc_events[i];
+			break;
+		}
+	}
 	if (ret != AE_OK && ret != AE_NOT_EXIST)
 		goto notification_failed_wmi;
 #endif
@@ -1020,7 +1032,8 @@
 	int i;
 
 #if IS_ENABLED(CONFIG_ACPI_WMI)
-	wmi_remove_notify_handler(ideapad_wmi_fnesc_event);
+	if (priv->fnesc_guid)
+		wmi_remove_notify_handler(priv->fnesc_guid);
 #endif
 	acpi_remove_notify_handler(priv->adev->handle,
 		ACPI_DEVICE_NOTIFY, ideapad_acpi_notify);
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index 0a919d8..cbe0102 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -306,66 +306,61 @@
 #define to_intel_menlow_attr(_attr)	\
 	container_of(_attr, struct intel_menlow_attribute, attr)
 
-static ssize_t aux0_show(struct device *dev,
-			 struct device_attribute *dev_attr, char *buf)
+static ssize_t aux_show(struct device *dev, struct device_attribute *dev_attr,
+			char *buf, int idx)
 {
 	struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
 	unsigned long long value;
 	int result;
 
-	result = sensor_get_auxtrip(attr->handle, 0, &value);
+	result = sensor_get_auxtrip(attr->handle, idx, &value);
 
 	return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value));
 }
 
+static ssize_t aux0_show(struct device *dev,
+			 struct device_attribute *dev_attr, char *buf)
+{
+	return aux_show(dev, dev_attr, buf, 0);
+}
+
 static ssize_t aux1_show(struct device *dev,
 			 struct device_attribute *dev_attr, char *buf)
 {
+	return aux_show(dev, dev_attr, buf, 1);
+}
+
+static ssize_t aux_store(struct device *dev, struct device_attribute *dev_attr,
+			 const char *buf, size_t count, int idx)
+{
 	struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
-	unsigned long long value;
+	int value;
 	int result;
 
-	result = sensor_get_auxtrip(attr->handle, 1, &value);
+	/*Sanity check; should be a positive integer */
+	if (!sscanf(buf, "%d", &value))
+		return -EINVAL;
 
-	return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value));
+	if (value < 0)
+		return -EINVAL;
+
+	result = sensor_set_auxtrip(attr->handle, idx, 
+				    CELSIUS_TO_DECI_KELVIN(value));
+	return result ? result : count;
 }
 
 static ssize_t aux0_store(struct device *dev,
 			  struct device_attribute *dev_attr,
 			  const char *buf, size_t count)
 {
-	struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
-	int value;
-	int result;
-
-	/*Sanity check; should be a positive integer */
-	if (!sscanf(buf, "%d", &value))
-		return -EINVAL;
-
-	if (value < 0)
-		return -EINVAL;
-
-	result = sensor_set_auxtrip(attr->handle, 0, CELSIUS_TO_DECI_KELVIN(value));
-	return result ? result : count;
+	return aux_store(dev, dev_attr, buf, count, 0);
 }
 
 static ssize_t aux1_store(struct device *dev,
 			  struct device_attribute *dev_attr,
 			  const char *buf, size_t count)
 {
-	struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
-	int value;
-	int result;
-
-	/*Sanity check; should be a positive integer */
-	if (!sscanf(buf, "%d", &value))
-		return -EINVAL;
-
-	if (value < 0)
-		return -EINVAL;
-
-	result = sensor_set_auxtrip(attr->handle, 1, CELSIUS_TO_DECI_KELVIN(value));
-	return result ? result : count;
+	return aux_store(dev, dev_attr, buf, count, 1);
 }
 
 /* BIOS can enable/disable the thermal user application in dabney platform */
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
new file mode 100644
index 0000000..2776bec
--- /dev/null
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -0,0 +1,200 @@
+/*
+ * Intel Core SoC Power Management Controller Driver
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+ *          Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/pmc_core.h>
+
+#include "intel_pmc_core.h"
+
+static struct pmc_dev pmc;
+
+static const struct pci_device_id pmc_pci_ids[] = {
+	{ PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), (kernel_ulong_t)NULL },
+	{ 0, },
+};
+
+static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
+{
+	return readl(pmcdev->regbase + reg_offset);
+}
+
+static inline u32 pmc_core_adjust_slp_s0_step(u32 value)
+{
+	return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
+}
+
+/**
+ * intel_pmc_slp_s0_counter_read() - Read SLP_S0 residency.
+ * @data: Out param that contains current SLP_S0 count.
+ *
+ * This API currently supports Intel Skylake SoC and Sunrise
+ * Point Platform Controller Hub. Future platform support
+ * should be added for platforms that support low power modes
+ * beyond Package C10 state.
+ *
+ * SLP_S0_RESIDENCY counter counts in 100 us granularity per
+ * step hence function populates the multiplied value in out
+ * parameter @data.
+ *
+ * Return: an error code or 0 on success.
+ */
+int intel_pmc_slp_s0_counter_read(u32 *data)
+{
+	struct pmc_dev *pmcdev = &pmc;
+	u32 value;
+
+	if (!pmcdev->has_slp_s0_res)
+		return -EACCES;
+
+	value = pmc_core_reg_read(pmcdev, SPT_PMC_SLP_S0_RES_COUNTER_OFFSET);
+	*data = pmc_core_adjust_slp_s0_step(value);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(intel_pmc_slp_s0_counter_read);
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static int pmc_core_dev_state_show(struct seq_file *s, void *unused)
+{
+	struct pmc_dev *pmcdev = s->private;
+	u32 counter_val;
+
+	counter_val = pmc_core_reg_read(pmcdev,
+					SPT_PMC_SLP_S0_RES_COUNTER_OFFSET);
+	seq_printf(s, "%u\n", pmc_core_adjust_slp_s0_step(counter_val));
+
+	return 0;
+}
+
+static int pmc_core_dev_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmc_core_dev_state_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_dev_state_ops = {
+	.open           = pmc_core_dev_state_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
+static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
+{
+	debugfs_remove_recursive(pmcdev->dbgfs_dir);
+}
+
+static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
+{
+	struct dentry *dir, *file;
+
+	dir = debugfs_create_dir("pmc_core", NULL);
+	if (!dir)
+		return -ENOMEM;
+
+	pmcdev->dbgfs_dir = dir;
+	file = debugfs_create_file("slp_s0_residency_usec", S_IFREG | S_IRUGO,
+				   dir, pmcdev, &pmc_core_dev_state_ops);
+
+	if (!file) {
+		pmc_core_dbgfs_unregister(pmcdev);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+#else
+static inline int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
+{
+	return 0;
+}
+
+static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static const struct x86_cpu_id intel_pmc_core_ids[] = {
+	{ X86_VENDOR_INTEL, 6, 0x4e, X86_FEATURE_MWAIT,
+		(kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
+	{ X86_VENDOR_INTEL, 6, 0x5e, X86_FEATURE_MWAIT,
+		(kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
+	{}
+};
+
+static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	struct device *ptr_dev = &dev->dev;
+	struct pmc_dev *pmcdev = &pmc;
+	const struct x86_cpu_id *cpu_id;
+	int err;
+
+	cpu_id = x86_match_cpu(intel_pmc_core_ids);
+	if (!cpu_id) {
+		dev_dbg(&dev->dev, "PMC Core: cpuid mismatch.\n");
+		return -EINVAL;
+	}
+
+	err = pcim_enable_device(dev);
+	if (err < 0) {
+		dev_dbg(&dev->dev, "PMC Core: failed to enable Power Management Controller.\n");
+		return err;
+	}
+
+	err = pci_read_config_dword(dev,
+				    SPT_PMC_BASE_ADDR_OFFSET,
+				    &pmcdev->base_addr);
+	if (err < 0) {
+		dev_dbg(&dev->dev, "PMC Core: failed to read PCI config space.\n");
+		return err;
+	}
+	dev_dbg(&dev->dev, "PMC Core: PWRMBASE is %#x\n", pmcdev->base_addr);
+
+	pmcdev->regbase = devm_ioremap_nocache(ptr_dev,
+					      pmcdev->base_addr,
+					      SPT_PMC_MMIO_REG_LEN);
+	if (!pmcdev->regbase) {
+		dev_dbg(&dev->dev, "PMC Core: ioremap failed.\n");
+		return -ENOMEM;
+	}
+
+	err = pmc_core_dbgfs_register(pmcdev);
+	if (err < 0) {
+		dev_err(&dev->dev, "PMC Core: debugfs register failed.\n");
+		return err;
+	}
+
+	pmc.has_slp_s0_res = true;
+	return 0;
+}
+
+static struct pci_driver intel_pmc_core_driver = {
+	.name = "intel_pmc_core",
+	.id_table = pmc_pci_ids,
+	.probe = pmc_core_probe,
+};
+
+builtin_pci_driver(intel_pmc_core_driver);
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
new file mode 100644
index 0000000..a9dadaf
--- /dev/null
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -0,0 +1,51 @@
+/*
+ * Intel Core SoC Power Management Controller Header File
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+ *          Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef PMC_CORE_H
+#define PMC_CORE_H
+
+/* Sunrise Point Power Management Controller PCI Device ID */
+#define SPT_PMC_PCI_DEVICE_ID			0x9d21
+#define SPT_PMC_BASE_ADDR_OFFSET		0x48
+#define SPT_PMC_SLP_S0_RES_COUNTER_OFFSET	0x13c
+#define SPT_PMC_MMIO_REG_LEN			0x100
+#define SPT_PMC_SLP_S0_RES_COUNTER_STEP		0x64
+
+/**
+ * struct pmc_dev - pmc device structure
+ * @base_addr:		comtains pmc base address
+ * @regbase:		pointer to io-remapped memory location
+ * @dbgfs_dir:		path to debug fs interface
+ * @feature_available:	flag to indicate whether
+ *			the feature is available
+ *			on a particular platform or not.
+ *
+ * pmc_dev contains info about power management controller device.
+ */
+struct pmc_dev {
+	u32 base_addr;
+	void __iomem *regbase;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+	struct dentry *dbgfs_dir;
+#endif /* CONFIG_DEBUG_FS */
+	bool has_slp_s0_res;
+};
+
+#endif /* PMC_CORE_H */
diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel_telemetry_core.c
index a695a43..0d4c380 100644
--- a/drivers/platform/x86/intel_telemetry_core.c
+++ b/drivers/platform/x86/intel_telemetry_core.c
@@ -25,7 +25,7 @@
 
 struct telemetry_core_config {
 	struct telemetry_plt_config *plt_config;
-	struct telemetry_core_ops *telem_ops;
+	const struct telemetry_core_ops *telem_ops;
 };
 
 static struct telemetry_core_config telm_core_conf;
@@ -95,7 +95,7 @@
 	return 0;
 }
 
-static struct telemetry_core_ops telm_defpltops = {
+static const struct telemetry_core_ops telm_defpltops = {
 	.set_sampling_period = telemetry_def_set_sampling_period,
 	.get_sampling_period = telemetry_def_get_sampling_period,
 	.get_trace_verbosity = telemetry_def_get_trace_verbosity,
@@ -332,7 +332,7 @@
  *
  * Return: 0 success, < 0 for failure
  */
-int telemetry_set_pltdata(struct telemetry_core_ops *ops,
+int telemetry_set_pltdata(const struct telemetry_core_ops *ops,
 			  struct telemetry_plt_config *pltconfig)
 {
 	if (ops)
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 781bd10..09c84a2 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -1081,7 +1081,7 @@
 	return ret;
 }
 
-static struct telemetry_core_ops telm_pltops = {
+static const struct telemetry_core_ops telm_pltops = {
 	.get_trace_verbosity = telemetry_plt_get_trace_verbosity,
 	.set_trace_verbosity = telemetry_plt_set_trace_verbosity,
 	.set_sampling_period = telemetry_plt_set_sampling_period,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index e9caa34..1dba359 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1446,6 +1446,9 @@
 {
 	unsigned int i, result, bitmask, handle;
 
+	if (!handles)
+		return;
+
 	/* get enabled events and disable them */
 	sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask);
 	sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result);
diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c
index 700e0fa..6505c97 100644
--- a/drivers/platform/x86/surfacepro3_button.c
+++ b/drivers/platform/x86/surfacepro3_button.c
@@ -24,6 +24,8 @@
 #define SURFACE_BUTTON_OBJ_NAME		"VGBI"
 #define SURFACE_BUTTON_DEVICE_NAME	"Surface Pro 3/4 Buttons"
 
+#define SURFACE_BUTTON_NOTIFY_TABLET_MODE	0xc8
+
 #define SURFACE_BUTTON_NOTIFY_PRESS_POWER	0xc6
 #define SURFACE_BUTTON_NOTIFY_RELEASE_POWER	0xc7
 
@@ -33,7 +35,7 @@
 #define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP	0xc0
 #define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP	0xc1
 
-#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN	0xc2
+#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN		0xc2
 #define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN	0xc3
 
 ACPI_MODULE_NAME("surface pro 3 button");
@@ -105,9 +107,12 @@
 	case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN:
 		key_code = KEY_VOLUMEDOWN;
 		break;
+	case SURFACE_BUTTON_NOTIFY_TABLET_MODE:
+		dev_warn_once(&device->dev, "Tablet mode is not supported\n");
+		break;
 	default:
 		dev_info_ratelimited(&device->dev,
-				  "Unsupported event [0x%x]\n", event);
+				     "Unsupported event [0x%x]\n", event);
 		break;
 	}
 	input = button->input;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 9255ff3..c3bfa1fe 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -5001,6 +5001,8 @@
 	return 0;
 }
 
+static int kbdlight_set_level_and_update(int level);
+
 static int kbdlight_get_level(void)
 {
 	int status = 0;
@@ -5068,7 +5070,7 @@
 			container_of(work, struct tpacpi_led_classdev, work);
 
 	if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
-		kbdlight_set_level(data->new_state);
+		kbdlight_set_level_and_update(data->new_state);
 }
 
 static void kbdlight_sysfs_set(struct led_classdev *led_cdev,
@@ -5099,7 +5101,6 @@
 		.max_brightness	= 2,
 		.brightness_set	= &kbdlight_sysfs_set,
 		.brightness_get	= &kbdlight_sysfs_get,
-		.flags		= LED_CORE_SUSPENDRESUME,
 	}
 };
 
@@ -5137,6 +5138,20 @@
 	flush_workqueue(tpacpi_wq);
 }
 
+static int kbdlight_set_level_and_update(int level)
+{
+	int ret;
+	struct led_classdev *led_cdev;
+
+	ret = kbdlight_set_level(level);
+	led_cdev = &tpacpi_led_kbdlight.led_classdev;
+
+	if (ret == 0 && !(led_cdev->flags & LED_SUSPENDED))
+		led_cdev->brightness = level;
+
+	return ret;
+}
+
 static int kbdlight_read(struct seq_file *m)
 {
 	int level;
@@ -5177,13 +5192,35 @@
 	if (level == -1)
 		return -EINVAL;
 
-	return kbdlight_set_level(level);
+	return kbdlight_set_level_and_update(level);
+}
+
+static void kbdlight_suspend(void)
+{
+	struct led_classdev *led_cdev;
+
+	if (!tp_features.kbdlight)
+		return;
+
+	led_cdev = &tpacpi_led_kbdlight.led_classdev;
+	led_update_brightness(led_cdev);
+	led_classdev_suspend(led_cdev);
+}
+
+static void kbdlight_resume(void)
+{
+	if (!tp_features.kbdlight)
+		return;
+
+	led_classdev_resume(&tpacpi_led_kbdlight.led_classdev);
 }
 
 static struct ibm_struct kbdlight_driver_data = {
 	.name = "kbdlight",
 	.read = kbdlight_read,
 	.write = kbdlight_write,
+	.suspend = kbdlight_suspend,
+	.resume = kbdlight_resume,
 	.exit = kbdlight_exit,
 };
 
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 0b1ac6b..d637c93 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -211,6 +211,7 @@
 		sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
 		if (IS_ERR(sysoff)) {
 			err = PTR_ERR(sysoff);
+			sysoff = NULL;
 			break;
 		}
 		if (sysoff->n_samples > PTP_MAX_SAMPLES) {
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 680fbc7..dba3843 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -75,6 +75,7 @@
 
 	for (i = 0; i < chip->npwm; i++) {
 		struct pwm_device *pwm = &chip->pwms[i];
+
 		radix_tree_delete(&pwm_tree, pwm->pwm);
 	}
 
@@ -128,13 +129,6 @@
 	set_bit(PWMF_REQUESTED, &pwm->flags);
 	pwm->label = label;
 
-	/*
-	 * FIXME: This should be removed once all PWM users properly make use
-	 * of struct pwm_args to initialize the PWM device. As long as this is
-	 * here, the PWM state and hardware state can get out of sync.
-	 */
-	pwm_apply_args(pwm);
-
 	return 0;
 }
 
@@ -233,6 +227,19 @@
 }
 EXPORT_SYMBOL_GPL(pwm_get_chip_data);
 
+static bool pwm_ops_check(const struct pwm_ops *ops)
+{
+	/* driver supports legacy, non-atomic operation */
+	if (ops->config && ops->enable && ops->disable)
+		return true;
+
+	/* driver supports atomic operation */
+	if (ops->apply)
+		return true;
+
+	return false;
+}
+
 /**
  * pwmchip_add_with_polarity() - register a new PWM chip
  * @chip: the PWM chip to add
@@ -251,8 +258,10 @@
 	unsigned int i;
 	int ret;
 
-	if (!chip || !chip->dev || !chip->ops || !chip->ops->config ||
-	    !chip->ops->enable || !chip->ops->disable || !chip->npwm)
+	if (!chip || !chip->dev || !chip->ops || !chip->npwm)
+		return -EINVAL;
+
+	if (!pwm_ops_check(chip->ops))
 		return -EINVAL;
 
 	mutex_lock(&pwm_lock);
@@ -261,7 +270,7 @@
 	if (ret < 0)
 		goto out;
 
-	chip->pwms = kzalloc(chip->npwm * sizeof(*pwm), GFP_KERNEL);
+	chip->pwms = kcalloc(chip->npwm, sizeof(*pwm), GFP_KERNEL);
 	if (!chip->pwms) {
 		ret = -ENOMEM;
 		goto out;
@@ -275,8 +284,10 @@
 		pwm->chip = chip;
 		pwm->pwm = chip->base + i;
 		pwm->hwpwm = i;
-		pwm->polarity = polarity;
-		mutex_init(&pwm->lock);
+		pwm->state.polarity = polarity;
+
+		if (chip->ops->get_state)
+			chip->ops->get_state(chip, pwm, &pwm->state);
 
 		radix_tree_insert(&pwm_tree, pwm->pwm, pwm);
 	}
@@ -436,107 +447,138 @@
 EXPORT_SYMBOL_GPL(pwm_free);
 
 /**
- * pwm_config() - change a PWM device configuration
+ * pwm_apply_state() - atomically apply a new state to a PWM device
  * @pwm: PWM device
- * @duty_ns: "on" time (in nanoseconds)
- * @period_ns: duration (in nanoseconds) of one cycle
- *
- * Returns: 0 on success or a negative error code on failure.
+ * @state: new state to apply. This can be adjusted by the PWM driver
+ *	   if the requested config is not achievable, for example,
+ *	   ->duty_cycle and ->period might be approximated.
  */
-int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state)
 {
 	int err;
 
-	if (!pwm || duty_ns < 0 || period_ns <= 0 || duty_ns > period_ns)
-		return -EINVAL;
-
-	err = pwm->chip->ops->config(pwm->chip, pwm, duty_ns, period_ns);
-	if (err)
-		return err;
-
-	pwm->duty_cycle = duty_ns;
-	pwm->period = period_ns;
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(pwm_config);
-
-/**
- * pwm_set_polarity() - configure the polarity of a PWM signal
- * @pwm: PWM device
- * @polarity: new polarity of the PWM signal
- *
- * Note that the polarity cannot be configured while the PWM device is
- * enabled.
- *
- * Returns: 0 on success or a negative error code on failure.
- */
-int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity)
-{
-	int err;
-
-	if (!pwm || !pwm->chip->ops)
-		return -EINVAL;
-
-	if (!pwm->chip->ops->set_polarity)
-		return -ENOSYS;
-
-	mutex_lock(&pwm->lock);
-
-	if (pwm_is_enabled(pwm)) {
-		err = -EBUSY;
-		goto unlock;
-	}
-
-	err = pwm->chip->ops->set_polarity(pwm->chip, pwm, polarity);
-	if (err)
-		goto unlock;
-
-	pwm->polarity = polarity;
-
-unlock:
-	mutex_unlock(&pwm->lock);
-	return err;
-}
-EXPORT_SYMBOL_GPL(pwm_set_polarity);
-
-/**
- * pwm_enable() - start a PWM output toggling
- * @pwm: PWM device
- *
- * Returns: 0 on success or a negative error code on failure.
- */
-int pwm_enable(struct pwm_device *pwm)
-{
-	int err = 0;
-
 	if (!pwm)
 		return -EINVAL;
 
-	mutex_lock(&pwm->lock);
+	if (!memcmp(state, &pwm->state, sizeof(*state)))
+		return 0;
 
-	if (!test_and_set_bit(PWMF_ENABLED, &pwm->flags)) {
-		err = pwm->chip->ops->enable(pwm->chip, pwm);
+	if (pwm->chip->ops->apply) {
+		err = pwm->chip->ops->apply(pwm->chip, pwm, state);
 		if (err)
-			clear_bit(PWMF_ENABLED, &pwm->flags);
+			return err;
+
+		pwm->state = *state;
+	} else {
+		/*
+		 * FIXME: restore the initial state in case of error.
+		 */
+		if (state->polarity != pwm->state.polarity) {
+			if (!pwm->chip->ops->set_polarity)
+				return -ENOTSUPP;
+
+			/*
+			 * Changing the polarity of a running PWM is
+			 * only allowed when the PWM driver implements
+			 * ->apply().
+			 */
+			if (pwm->state.enabled) {
+				pwm->chip->ops->disable(pwm->chip, pwm);
+				pwm->state.enabled = false;
+			}
+
+			err = pwm->chip->ops->set_polarity(pwm->chip, pwm,
+							   state->polarity);
+			if (err)
+				return err;
+
+			pwm->state.polarity = state->polarity;
+		}
+
+		if (state->period != pwm->state.period ||
+		    state->duty_cycle != pwm->state.duty_cycle) {
+			err = pwm->chip->ops->config(pwm->chip, pwm,
+						     state->duty_cycle,
+						     state->period);
+			if (err)
+				return err;
+
+			pwm->state.duty_cycle = state->duty_cycle;
+			pwm->state.period = state->period;
+		}
+
+		if (state->enabled != pwm->state.enabled) {
+			if (state->enabled) {
+				err = pwm->chip->ops->enable(pwm->chip, pwm);
+				if (err)
+					return err;
+			} else {
+				pwm->chip->ops->disable(pwm->chip, pwm);
+			}
+
+			pwm->state.enabled = state->enabled;
+		}
 	}
 
-	mutex_unlock(&pwm->lock);
-
-	return err;
+	return 0;
 }
-EXPORT_SYMBOL_GPL(pwm_enable);
+EXPORT_SYMBOL_GPL(pwm_apply_state);
 
 /**
- * pwm_disable() - stop a PWM output toggling
+ * pwm_adjust_config() - adjust the current PWM config to the PWM arguments
  * @pwm: PWM device
+ *
+ * This function will adjust the PWM config to the PWM arguments provided
+ * by the DT or PWM lookup table. This is particularly useful to adapt
+ * the bootloader config to the Linux one.
  */
-void pwm_disable(struct pwm_device *pwm)
+int pwm_adjust_config(struct pwm_device *pwm)
 {
-	if (pwm && test_and_clear_bit(PWMF_ENABLED, &pwm->flags))
-		pwm->chip->ops->disable(pwm->chip, pwm);
+	struct pwm_state state;
+	struct pwm_args pargs;
+
+	pwm_get_args(pwm, &pargs);
+	pwm_get_state(pwm, &state);
+
+	/*
+	 * If the current period is zero it means that either the PWM driver
+	 * does not support initial state retrieval or the PWM has not yet
+	 * been configured.
+	 *
+	 * In either case, we setup the new period and polarity, and assign a
+	 * duty cycle of 0.
+	 */
+	if (!state.period) {
+		state.duty_cycle = 0;
+		state.period = pargs.period;
+		state.polarity = pargs.polarity;
+
+		return pwm_apply_state(pwm, &state);
+	}
+
+	/*
+	 * Adjust the PWM duty cycle/period based on the period value provided
+	 * in PWM args.
+	 */
+	if (pargs.period != state.period) {
+		u64 dutycycle = (u64)state.duty_cycle * pargs.period;
+
+		do_div(dutycycle, state.period);
+		state.duty_cycle = dutycycle;
+		state.period = pargs.period;
+	}
+
+	/*
+	 * If the polarity changed, we should also change the duty cycle.
+	 */
+	if (pargs.polarity != state.polarity) {
+		state.polarity = pargs.polarity;
+		state.duty_cycle = state.period - state.duty_cycle;
+	}
+
+	return pwm_apply_state(pwm, &state);
 }
-EXPORT_SYMBOL_GPL(pwm_disable);
+EXPORT_SYMBOL_GPL(pwm_adjust_config);
 
 static struct pwm_chip *of_node_to_pwmchip(struct device_node *np)
 {
@@ -754,13 +796,13 @@
 	if (!chip)
 		goto out;
 
-	pwm->args.period = chosen->period;
-	pwm->args.polarity = chosen->polarity;
-
 	pwm = pwm_request_from_chip(chip, chosen->index, con_id ?: dev_id);
 	if (IS_ERR(pwm))
 		goto out;
 
+	pwm->args.period = chosen->period;
+	pwm->args.polarity = chosen->polarity;
+
 out:
 	mutex_unlock(&pwm_lookup_lock);
 	return pwm;
@@ -907,15 +949,23 @@
 
 	for (i = 0; i < chip->npwm; i++) {
 		struct pwm_device *pwm = &chip->pwms[i];
+		struct pwm_state state;
+
+		pwm_get_state(pwm, &state);
 
 		seq_printf(s, " pwm-%-3d (%-20.20s):", i, pwm->label);
 
 		if (test_bit(PWMF_REQUESTED, &pwm->flags))
 			seq_puts(s, " requested");
 
-		if (pwm_is_enabled(pwm))
+		if (state.enabled)
 			seq_puts(s, " enabled");
 
+		seq_printf(s, " period: %u ns", state.period);
+		seq_printf(s, " duty: %u ns", state.duty_cycle);
+		seq_printf(s, " polarity: %s",
+			   state.polarity ? "inverse" : "normal");
+
 		seq_puts(s, "\n");
 	}
 }
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
index 7101c70..bd0ebd0 100644
--- a/drivers/pwm/pwm-crc.c
+++ b/drivers/pwm/pwm-crc.c
@@ -75,7 +75,7 @@
 		return -EINVAL;
 	}
 
-	if (pwm->period != period_ns) {
+	if (pwm_get_period(pwm) != period_ns) {
 		int clk_div;
 
 		/* changing the clk divisor, need to disable fisrt */
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index 9861fed..19dc64c 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -249,7 +249,7 @@
 			   LPC18XX_PWM_EVSTATEMSK(lpc18xx_data->duty_event),
 			   LPC18XX_PWM_EVSTATEMSK_ALL);
 
-	if (pwm->polarity == PWM_POLARITY_NORMAL) {
+	if (pwm_get_polarity(pwm) == PWM_POLARITY_NORMAL) {
 		set_event = lpc18xx_pwm->period_event;
 		clear_event = lpc18xx_data->duty_event;
 		res_action = LPC18XX_PWM_RES_SET;
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index b7e6ecb..3e95090 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -192,7 +192,7 @@
 		load_value, load_value,	match_value, match_value);
 
 	omap->pdata->set_pwm(omap->dm_timer,
-			      pwm->polarity == PWM_POLARITY_INVERSED,
+			      pwm_get_polarity(pwm) == PWM_POLARITY_INVERSED,
 			      true,
 			      PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE);
 
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 7b8ac06..1c85ecc 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -157,7 +157,7 @@
 		return div;
 
 	/* Let the core driver set pwm->period if disabled and duty_ns == 0 */
-	if (!test_bit(PWMF_ENABLED, &pwm->flags) && !duty_ns)
+	if (!pwm_is_enabled(pwm) && !duty_ns)
 		return 0;
 
 	rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR);
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 67af9f6..03a99a5 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -354,7 +354,8 @@
 	val = sun4i_pwm_readl(pwm, PWM_CTRL_REG);
 	for (i = 0; i < pwm->chip.npwm; i++)
 		if (!(val & BIT_CH(PWM_ACT_STATE, i)))
-			pwm->chip.pwms[i].polarity = PWM_POLARITY_INVERSED;
+			pwm_set_polarity(&pwm->chip.pwms[i],
+					 PWM_POLARITY_INVERSED);
 	clk_disable_unprepare(pwm->clk);
 
 	return 0;
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 9c90886..d985992 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -26,6 +26,7 @@
 struct pwm_export {
 	struct device child;
 	struct pwm_device *pwm;
+	struct mutex lock;
 };
 
 static struct pwm_export *child_to_pwm_export(struct device *child)
@@ -45,15 +46,20 @@
 			   char *buf)
 {
 	const struct pwm_device *pwm = child_to_pwm_device(child);
+	struct pwm_state state;
 
-	return sprintf(buf, "%u\n", pwm_get_period(pwm));
+	pwm_get_state(pwm, &state);
+
+	return sprintf(buf, "%u\n", state.period);
 }
 
 static ssize_t period_store(struct device *child,
 			    struct device_attribute *attr,
 			    const char *buf, size_t size)
 {
-	struct pwm_device *pwm = child_to_pwm_device(child);
+	struct pwm_export *export = child_to_pwm_export(child);
+	struct pwm_device *pwm = export->pwm;
+	struct pwm_state state;
 	unsigned int val;
 	int ret;
 
@@ -61,7 +67,11 @@
 	if (ret)
 		return ret;
 
-	ret = pwm_config(pwm, pwm_get_duty_cycle(pwm), val);
+	mutex_lock(&export->lock);
+	pwm_get_state(pwm, &state);
+	state.period = val;
+	ret = pwm_apply_state(pwm, &state);
+	mutex_unlock(&export->lock);
 
 	return ret ? : size;
 }
@@ -71,15 +81,20 @@
 			       char *buf)
 {
 	const struct pwm_device *pwm = child_to_pwm_device(child);
+	struct pwm_state state;
 
-	return sprintf(buf, "%u\n", pwm_get_duty_cycle(pwm));
+	pwm_get_state(pwm, &state);
+
+	return sprintf(buf, "%u\n", state.duty_cycle);
 }
 
 static ssize_t duty_cycle_store(struct device *child,
 				struct device_attribute *attr,
 				const char *buf, size_t size)
 {
-	struct pwm_device *pwm = child_to_pwm_device(child);
+	struct pwm_export *export = child_to_pwm_export(child);
+	struct pwm_device *pwm = export->pwm;
+	struct pwm_state state;
 	unsigned int val;
 	int ret;
 
@@ -87,7 +102,11 @@
 	if (ret)
 		return ret;
 
-	ret = pwm_config(pwm, val, pwm_get_period(pwm));
+	mutex_lock(&export->lock);
+	pwm_get_state(pwm, &state);
+	state.duty_cycle = val;
+	ret = pwm_apply_state(pwm, &state);
+	mutex_unlock(&export->lock);
 
 	return ret ? : size;
 }
@@ -97,33 +116,46 @@
 			   char *buf)
 {
 	const struct pwm_device *pwm = child_to_pwm_device(child);
+	struct pwm_state state;
 
-	return sprintf(buf, "%d\n", pwm_is_enabled(pwm));
+	pwm_get_state(pwm, &state);
+
+	return sprintf(buf, "%d\n", state.enabled);
 }
 
 static ssize_t enable_store(struct device *child,
 			    struct device_attribute *attr,
 			    const char *buf, size_t size)
 {
-	struct pwm_device *pwm = child_to_pwm_device(child);
+	struct pwm_export *export = child_to_pwm_export(child);
+	struct pwm_device *pwm = export->pwm;
+	struct pwm_state state;
 	int val, ret;
 
 	ret = kstrtoint(buf, 0, &val);
 	if (ret)
 		return ret;
 
+	mutex_lock(&export->lock);
+
+	pwm_get_state(pwm, &state);
+
 	switch (val) {
 	case 0:
-		pwm_disable(pwm);
+		state.enabled = false;
 		break;
 	case 1:
-		ret = pwm_enable(pwm);
+		state.enabled = true;
 		break;
 	default:
 		ret = -EINVAL;
-		break;
+		goto unlock;
 	}
 
+	pwm_apply_state(pwm, &state);
+
+unlock:
+	mutex_unlock(&export->lock);
 	return ret ? : size;
 }
 
@@ -133,8 +165,11 @@
 {
 	const struct pwm_device *pwm = child_to_pwm_device(child);
 	const char *polarity = "unknown";
+	struct pwm_state state;
 
-	switch (pwm_get_polarity(pwm)) {
+	pwm_get_state(pwm, &state);
+
+	switch (state.polarity) {
 	case PWM_POLARITY_NORMAL:
 		polarity = "normal";
 		break;
@@ -151,8 +186,10 @@
 			      struct device_attribute *attr,
 			      const char *buf, size_t size)
 {
-	struct pwm_device *pwm = child_to_pwm_device(child);
+	struct pwm_export *export = child_to_pwm_export(child);
+	struct pwm_device *pwm = export->pwm;
 	enum pwm_polarity polarity;
+	struct pwm_state state;
 	int ret;
 
 	if (sysfs_streq(buf, "normal"))
@@ -162,7 +199,11 @@
 	else
 		return -EINVAL;
 
-	ret = pwm_set_polarity(pwm, polarity);
+	mutex_lock(&export->lock);
+	pwm_get_state(pwm, &state);
+	state.polarity = polarity;
+	ret = pwm_apply_state(pwm, &state);
+	mutex_unlock(&export->lock);
 
 	return ret ? : size;
 }
@@ -203,6 +244,7 @@
 	}
 
 	export->pwm = pwm;
+	mutex_init(&export->lock);
 
 	export->child.release = pwm_export_release;
 	export->child.parent = parent;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index b839086..bed53c4 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -31,7 +31,7 @@
 static blk_qc_t dcssblk_make_request(struct request_queue *q,
 						struct bio *bio);
 static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
-			 void __pmem **kaddr, pfn_t *pfn);
+			 void __pmem **kaddr, pfn_t *pfn, long size);
 
 static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
 
@@ -884,7 +884,7 @@
 
 static long
 dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
-			void __pmem **kaddr, pfn_t *pfn)
+			void __pmem **kaddr, pfn_t *pfn, long size)
 {
 	struct dcssblk_dev_info *dev_info;
 	unsigned long offset, dev_sz;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 8f90d9e..969c312 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -621,6 +621,11 @@
 #define AAC_QUIRK_SCSI_32	0x0020
 
 /*
+ * SRC based adapters support the AifReqEvent functions
+ */
+#define AAC_QUIRK_SRC 0x0040
+
+/*
  *	The adapter interface specs all queues to be located in the same
  *	physically contiguous block. The host structure that defines the
  *	commuication queues will assume they are each a separate physically
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index a943bd2..79871f3 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -236,10 +236,10 @@
 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Catch All */
 	{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Rocket Catch All */
 	{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID           ", 2 }, /* Adaptec NEMER/ARK Catch All */
-	{ aac_src_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */
-	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec PMC Series 7 (Denali) */
-	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec PMC Series 8 */
-	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2 } /* Adaptec PMC Series 9 */
+	{ aac_src_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
+	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
+	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
+	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
 };
 
 /**
@@ -1299,7 +1299,8 @@
 	else
 		shost->this_id = shost->max_id;
 
-	aac_intr_normal(aac, 0, 2, 0, NULL);
+	if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
+		aac_intr_normal(aac, 0, 2, 0, NULL);
 
 	/*
 	 * dmb - we may need to move the setting of these parms somewhere else once
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index bbe98ec..bd20c54 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1630,6 +1630,9 @@
 	switch (cmd) {
 	case ATA_CMD_FPDMA_WRITE:
 	case ATA_CMD_FPDMA_READ:
+	case ATA_CMD_FPDMA_RECV:
+	case ATA_CMD_FPDMA_SEND:
+	case ATA_CMD_NCQ_NON_DATA:
 	return SATA_PROTOCOL_FPDMA;
 
 	case ATA_CMD_ID_ATA:
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index cfd0084..b709d2b 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -3169,7 +3169,10 @@
 	status = sci_io_request_construct_basic_sata(ireq);
 
 	if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
-		   qc->tf.command == ATA_CMD_FPDMA_READ)) {
+		   qc->tf.command == ATA_CMD_FPDMA_READ ||
+		   qc->tf.command == ATA_CMD_FPDMA_RECV ||
+		   qc->tf.command == ATA_CMD_FPDMA_SEND ||
+		   qc->tf.command == ATA_CMD_NCQ_NON_DATA)) {
 		fis->sector_count = qc->tag << 3;
 		ireq->tc->type.stp.ncq_tag = qc->tag;
 	}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 9c706d8..935c430 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -205,7 +205,10 @@
 	task->task_done = sas_ata_task_done;
 
 	if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
-	    qc->tf.command == ATA_CMD_FPDMA_READ) {
+	    qc->tf.command == ATA_CMD_FPDMA_READ ||
+	    qc->tf.command == ATA_CMD_FPDMA_RECV ||
+	    qc->tf.command == ATA_CMD_FPDMA_SEND ||
+	    qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
 		/* Need to zero out the tag libata assigned us */
 		qc->tf.nsect = 0;
 	}
@@ -548,7 +551,7 @@
 
 static struct ata_port_info sata_port_info = {
 	.flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
-		 ATA_FLAG_SAS_HOST,
+		 ATA_FLAG_SAS_HOST | ATA_FLAG_FPDMA_AUX,
 	.pio_mask = ATA_PIO4,
 	.mwdma_mask = ATA_MWDMA2,
 	.udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 6a4df5a..6bff13e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -7975,13 +7975,14 @@
 		ActiveCableEventData =
 		    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
 		if (ActiveCableEventData->ReasonCode ==
-				MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER)
+				MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) {
 			pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d",
 			    ioc->name, ActiveCableEventData->ReceptacleID);
 			pr_info("cannot be powered and devices connected to this active cable");
 			pr_info("will not be seen. This active cable");
 			pr_info("requires %d mW of power",
 			    ActiveCableEventData->ActiveCablePowerRequirement);
+		}
 		break;
 
 	default: /* ignore the rest */
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 83cd3ea..5b9fcff 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -429,7 +429,10 @@
 
 	if (qc) {
 		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
-			qc->tf.command == ATA_CMD_FPDMA_READ) {
+		    qc->tf.command == ATA_CMD_FPDMA_READ ||
+		    qc->tf.command == ATA_CMD_FPDMA_RECV ||
+		    qc->tf.command == ATA_CMD_FPDMA_SEND ||
+		    qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
 			*tag = qc->tag;
 			return 1;
 		}
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 949198c..dc33dfa 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -280,7 +280,10 @@
 	struct ata_queued_cmd *qc = task->uldd_task;
 	if (qc) {
 		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
-			qc->tf.command == ATA_CMD_FPDMA_READ) {
+		    qc->tf.command == ATA_CMD_FPDMA_READ ||
+		    qc->tf.command == ATA_CMD_FPDMA_RECV ||
+		    qc->tf.command == ATA_CMD_FPDMA_SEND ||
+		    qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
 			*tag = qc->tag;
 			return 1;
 		}
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 10aa18b..67c0d5a 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -36,3 +36,12 @@
 	default n
 	---help---
 	Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs
+
+if TCM_QLA2XXX
+config TCM_QLA2XXX_DEBUG
+	bool "TCM_QLA2XXX fabric module DEBUG mode for QLogic 24xx+ series target mode HBAs"
+	default n
+	---help---
+	Say Y here to enable the TCM_QLA2XXX fabric module DEBUG for QLogic 24xx+ series target mode HBAs
+	This will include code to enable the SCSI command jammer
+endif
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8a44d15..ca39deb 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -637,8 +637,10 @@
 }
 
 /* ha->tgt.sess_lock supposed to be held on entry */
-void qlt_unreg_sess(struct qla_tgt_sess *sess)
+static void qlt_release_session(struct kref *kref)
 {
+	struct qla_tgt_sess *sess =
+		container_of(kref, struct qla_tgt_sess, sess_kref);
 	struct scsi_qla_host *vha = sess->vha;
 
 	if (sess->se_sess)
@@ -651,8 +653,16 @@
 	INIT_WORK(&sess->free_work, qlt_free_session_done);
 	schedule_work(&sess->free_work);
 }
-EXPORT_SYMBOL(qlt_unreg_sess);
 
+void qlt_put_sess(struct qla_tgt_sess *sess)
+{
+	if (!sess)
+		return;
+
+	assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
+	kref_put(&sess->sess_kref, qlt_release_session);
+}
+EXPORT_SYMBOL(qlt_put_sess);
 
 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
 {
@@ -857,12 +867,9 @@
 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
 			    "Timeout: sess %p about to be deleted\n",
 			    sess);
-			if (sess->se_sess) {
+			if (sess->se_sess)
 				ha->tgt.tgt_ops->shutdown_sess(sess);
-				ha->tgt.tgt_ops->put_sess(sess);
-			} else {
-				qlt_unreg_sess(sess);
-			}
+			qlt_put_sess(sess);
 		} else {
 			schedule_delayed_work(&tgt->sess_del_work,
 			    sess->expires - elapsed);
@@ -917,7 +924,7 @@
 				}
 			}
 
-			kref_get(&sess->se_sess->sess_kref);
+			kref_get(&sess->sess_kref);
 			ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
 						(fcport->flags & FCF_CONF_COMP_SUPPORTED));
 
@@ -947,6 +954,7 @@
 	sess->s_id = fcport->d_id;
 	sess->loop_id = fcport->loop_id;
 	sess->local = local;
+	kref_init(&sess->sess_kref);
 	INIT_LIST_HEAD(&sess->del_list_entry);
 
 	/* Under normal circumstances we want to logout from firmware when
@@ -991,7 +999,7 @@
 		 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
 		 * access across ->tgt.sess_lock reaquire.
 		 */
-		kref_get(&sess->se_sess->sess_kref);
+		kref_get(&sess->sess_kref);
 	}
 
 	return sess;
@@ -1035,7 +1043,7 @@
 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 		return;
 	} else {
-		kref_get(&sess->se_sess->sess_kref);
+		kref_get(&sess->sess_kref);
 
 		if (sess->deleted) {
 			qlt_undelete_sess(sess);
@@ -1060,7 +1068,7 @@
 		    fcport->port_name, sess->loop_id);
 		sess->local = 0;
 	}
-	ha->tgt.tgt_ops->put_sess(sess);
+	qlt_put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 }
 
@@ -3817,7 +3825,7 @@
 	 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
 	 */
 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
-	ha->tgt.tgt_ops->put_sess(sess);
+	qlt_put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 	return;
 
@@ -3836,7 +3844,7 @@
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
-	ha->tgt.tgt_ops->put_sess(sess);
+	qlt_put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 }
 
@@ -3936,13 +3944,13 @@
 	if (!cmd) {
 		spin_lock_irqsave(&ha->hardware_lock, flags);
 		qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
-		ha->tgt.tgt_ops->put_sess(sess);
+		qlt_put_sess(sess);
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 		kfree(op);
 		return;
 	}
 	/*
-	 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
+	 * __qlt_do_work() will call qlt_put_sess() to release
 	 * the extra reference taken above by qlt_make_local_sess()
 	 */
 	__qlt_do_work(cmd);
@@ -4003,13 +4011,13 @@
 	/*
 	 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
 	 */
-	kref_get(&sess->se_sess->sess_kref);
+	kref_get(&sess->sess_kref);
 
 	cmd = qlt_get_tag(vha, sess, atio);
 	if (!cmd) {
 		ql_dbg(ql_dbg_io, vha, 0x3062,
 		    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
-		ha->tgt.tgt_ops->put_sess(sess);
+		qlt_put_sess(sess);
 		return -ENOMEM;
 	}
 
@@ -5911,7 +5919,7 @@
 			goto out_term2;
 		}
 
-		kref_get(&sess->se_sess->sess_kref);
+		kref_get(&sess->sess_kref);
 	}
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -5924,7 +5932,7 @@
 		goto out_term;
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-	ha->tgt.tgt_ops->put_sess(sess);
+	qlt_put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
 	return;
 
@@ -5935,8 +5943,7 @@
 	qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-	if (sess)
-		ha->tgt.tgt_ops->put_sess(sess);
+	qlt_put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
 }
 
@@ -5976,7 +5983,7 @@
 			goto out_term;
 		}
 
-		kref_get(&sess->se_sess->sess_kref);
+		kref_get(&sess->sess_kref);
 	}
 
 	iocb = a;
@@ -5988,14 +5995,13 @@
 	if (rc != 0)
 		goto out_term;
 
-	ha->tgt.tgt_ops->put_sess(sess);
+	qlt_put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 	return;
 
 out_term:
 	qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
-	if (sess)
-		ha->tgt.tgt_ops->put_sess(sess);
+	qlt_put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 }
 
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index d857fee..f26c5f6 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -738,7 +738,6 @@
 	struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
 						const uint8_t *);
 	void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
-	void (*put_sess)(struct qla_tgt_sess *);
 	void (*shutdown_sess)(struct qla_tgt_sess *);
 };
 
@@ -930,6 +929,7 @@
 	int generation;
 
 	struct se_session *se_sess;
+	struct kref sess_kref;
 	struct scsi_qla_host *vha;
 	struct qla_tgt *tgt;
 
@@ -1101,7 +1101,7 @@
 extern int qlt_lport_register(void *, u64, u64, u64,
 			int (*callback)(struct scsi_qla_host *, void *, u64, u64));
 extern void qlt_lport_deregister(struct scsi_qla_host *);
-extern void qlt_unreg_sess(struct qla_tgt_sess *);
+void qlt_put_sess(struct qla_tgt_sess *sess);
 extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
 extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
 extern int __init qlt_init(void);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index c1461d2..6643f6f 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -339,22 +339,6 @@
 	qlt_free_cmd(cmd);
 }
 
-static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
-{
-	struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
-	struct scsi_qla_host *vha;
-	unsigned long flags;
-
-	BUG_ON(!sess);
-	vha = sess->vha;
-
-	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
-	target_sess_cmd_list_set_waiting(se_sess);
-	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
-
-	return 1;
-}
-
 static void tcm_qla2xxx_close_session(struct se_session *se_sess)
 {
 	struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
@@ -365,7 +349,8 @@
 	vha = sess->vha;
 
 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
-	qlt_unreg_sess(sess);
+	target_sess_cmd_list_set_waiting(se_sess);
+	qlt_put_sess(sess);
 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 }
 
@@ -457,6 +442,10 @@
 	struct se_cmd *se_cmd = &cmd->se_cmd;
 	struct se_session *se_sess;
 	struct qla_tgt_sess *sess;
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+	struct se_portal_group *se_tpg;
+	struct tcm_qla2xxx_tpg *tpg;
+#endif
 	int flags = TARGET_SCF_ACK_KREF;
 
 	if (bidi)
@@ -477,6 +466,15 @@
 		return -EINVAL;
 	}
 
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+	se_tpg = se_sess->se_tpg;
+	tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg);
+	if (unlikely(tpg->tpg_attrib.jam_host)) {
+		/* return, and dont run target_submit_cmd,discarding command */
+		return 0;
+	}
+#endif
+
 	cmd->vha->tgt_counters.qla_core_sbt_cmd++;
 	return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
 				cmd->unpacked_lun, data_length, fcp_task_attr,
@@ -758,23 +756,6 @@
 	tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
 }
 
-static void tcm_qla2xxx_release_session(struct kref *kref)
-{
-	struct se_session *se_sess = container_of(kref,
-			struct se_session, sess_kref);
-
-	qlt_unreg_sess(se_sess->fabric_sess_ptr);
-}
-
-static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
-{
-	if (!sess)
-		return;
-
-	assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
-	kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
-}
-
 static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
 {
 	assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
@@ -844,6 +825,9 @@
 DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
 DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
 DEF_QLA_TPG_ATTRIB(demo_mode_login_only);
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+DEF_QLA_TPG_ATTRIB(jam_host);
+#endif
 
 static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
 	&tcm_qla2xxx_tpg_attrib_attr_generate_node_acls,
@@ -851,6 +835,9 @@
 	&tcm_qla2xxx_tpg_attrib_attr_demo_mode_write_protect,
 	&tcm_qla2xxx_tpg_attrib_attr_prod_mode_write_protect,
 	&tcm_qla2xxx_tpg_attrib_attr_demo_mode_login_only,
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+	&tcm_qla2xxx_tpg_attrib_attr_jam_host,
+#endif
 	NULL,
 };
 
@@ -1023,6 +1010,7 @@
 	tpg->tpg_attrib.demo_mode_write_protect = 1;
 	tpg->tpg_attrib.cache_dynamic_acls = 1;
 	tpg->tpg_attrib.demo_mode_login_only = 1;
+	tpg->tpg_attrib.jam_host = 0;
 
 	ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
 	if (ret < 0) {
@@ -1579,7 +1567,6 @@
 	.find_sess_by_s_id	= tcm_qla2xxx_find_sess_by_s_id,
 	.find_sess_by_loop_id	= tcm_qla2xxx_find_sess_by_loop_id,
 	.clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
-	.put_sess		= tcm_qla2xxx_put_sess,
 	.shutdown_sess		= tcm_qla2xxx_shutdown_sess,
 };
 
@@ -1847,7 +1834,6 @@
 	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index,
 	.check_stop_free		= tcm_qla2xxx_check_stop_free,
 	.release_cmd			= tcm_qla2xxx_release_cmd,
-	.shutdown_session		= tcm_qla2xxx_shutdown_session,
 	.close_session			= tcm_qla2xxx_close_session,
 	.sess_get_index			= tcm_qla2xxx_sess_get_index,
 	.sess_get_initiator_sid		= NULL,
@@ -1890,7 +1876,6 @@
 	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index,
 	.check_stop_free                = tcm_qla2xxx_check_stop_free,
 	.release_cmd			= tcm_qla2xxx_release_cmd,
-	.shutdown_session		= tcm_qla2xxx_shutdown_session,
 	.close_session			= tcm_qla2xxx_close_session,
 	.sess_get_index			= tcm_qla2xxx_sess_get_index,
 	.sess_get_initiator_sid		= NULL,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 3bbf4cb..37e026a 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -34,6 +34,7 @@
 	int prod_mode_write_protect;
 	int demo_mode_login_only;
 	int fabric_prot_type;
+	int jam_host;
 };
 
 struct tcm_qla2xxx_tpg {
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index ce79de8..b1383a7 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -293,3 +293,56 @@
 	return 0;
 }
 EXPORT_SYMBOL(scsi_set_sense_information);
+
+/**
+ * scsi_set_sense_field_pointer - set the field pointer sense key
+ *		specific information in a formatted sense data buffer
+ * @buf:	Where to build sense data
+ * @buf_len:    buffer length
+ * @fp:		field pointer to be set
+ * @bp:		bit pointer to be set
+ * @cd:		command/data bit
+ *
+ * Return value:
+ *	0 on success or EINVAL for invalid sense buffer length
+ */
+int scsi_set_sense_field_pointer(u8 *buf, int buf_len, u16 fp, u8 bp, bool cd)
+{
+	u8 *ucp, len;
+
+	if ((buf[0] & 0x7f) == 0x72) {
+		len = buf[7];
+		ucp = (char *)scsi_sense_desc_find(buf, len + 8, 2);
+		if (!ucp) {
+			buf[7] = len + 8;
+			ucp = buf + 8 + len;
+		}
+
+		if (buf_len < len + 8)
+			/* Not enough room for info */
+			return -EINVAL;
+
+		ucp[0] = 2;
+		ucp[1] = 6;
+		ucp[4] = 0x80; /* Valid bit */
+		if (cd)
+			ucp[4] |= 0x40;
+		if (bp < 0x8)
+			ucp[4] |= 0x8 | bp;
+		put_unaligned_be16(fp, &ucp[5]);
+	} else if ((buf[0] & 0x7f) == 0x70) {
+		len = buf[7];
+		if (len < 18)
+			buf[7] = 18;
+
+		buf[15] = 0x80;
+		if (cd)
+			buf[15] |= 0x40;
+		if (bp < 0x8)
+			buf[15] |= 0x8 | bp;
+		put_unaligned_be16(fp, &buf[16]);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(scsi_set_sense_field_pointer);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 984ddcb..a8b610e 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -452,7 +452,7 @@
  *	When a deferred error is detected the current command has
  *	not been executed and needs retrying.
  */
-static int scsi_check_sense(struct scsi_cmnd *scmd)
+int scsi_check_sense(struct scsi_cmnd *scmd)
 {
 	struct scsi_device *sdev = scmd->device;
 	struct scsi_sense_hdr sshdr;
@@ -602,6 +602,7 @@
 		return SUCCESS;
 	}
 }
+EXPORT_SYMBOL_GPL(scsi_check_sense);
 
 static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
 {
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b2e332a..c71344a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -821,9 +821,12 @@
 	}
 
 	/*
-	 * If we finished all bytes in the request we are done now.
+	 * special case: failed zero length commands always need to
+	 * drop down into the retry code. Otherwise, if we finished
+	 * all bytes in the request we are done now.
 	 */
-	if (!scsi_end_request(req, error, good_bytes, 0))
+	if (!(blk_rq_bytes(req) == 0 && error) &&
+	    !scsi_end_request(req, error, good_bytes, 0))
 		return;
 
 	/*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 428c03e..f459dff 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1398,11 +1398,15 @@
  **/
 static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
 {
-	struct scsi_disk *sdkp = scsi_disk(disk);
-	struct scsi_device *sdp = sdkp->device;
+	struct scsi_disk *sdkp = scsi_disk_get(disk);
+	struct scsi_device *sdp;
 	struct scsi_sense_hdr *sshdr = NULL;
 	int retval;
 
+	if (!sdkp)
+		return 0;
+
+	sdp = sdkp->device;
 	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
 
 	/*
@@ -1459,6 +1463,7 @@
 	kfree(sshdr);
 	retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
 	sdp->changed = 0;
+	scsi_disk_put(sdkp);
 	return retval;
 }
 
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index 3c3e56d..a003ba2 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -1059,7 +1059,7 @@
 	.regs = mt2701_regs,
 	.type = PWRAP_MT2701,
 	.arb_en_all = 0x3f,
-	.int_en_all = ~(BIT(31) | BIT(2)),
+	.int_en_all = ~(u32)(BIT(31) | BIT(2)),
 	.spi_w = PWRAP_MAN_CMD_SPI_WRITE_NEW,
 	.wdt_src = PWRAP_WDT_SRC_MASK_ALL,
 	.has_bridge = 0,
@@ -1071,7 +1071,7 @@
 	.regs = mt8135_regs,
 	.type = PWRAP_MT8135,
 	.arb_en_all = 0x1ff,
-	.int_en_all = ~(BIT(31) | BIT(1)),
+	.int_en_all = ~(u32)(BIT(31) | BIT(1)),
 	.spi_w = PWRAP_MAN_CMD_SPI_WRITE,
 	.wdt_src = PWRAP_WDT_SRC_MASK_ALL,
 	.has_bridge = 1,
@@ -1083,7 +1083,7 @@
 	.regs = mt8173_regs,
 	.type = PWRAP_MT8173,
 	.arb_en_all = 0x3f,
-	.int_en_all = ~(BIT(31) | BIT(1)),
+	.int_en_all = ~(u32)(BIT(31) | BIT(1)),
 	.spi_w = PWRAP_MAN_CMD_SPI_WRITE,
 	.wdt_src = PWRAP_WDT_SRC_MASK_NO_STAUPD,
 	.has_bridge = 0,
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 9d8c84b..4b931ec 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -410,7 +410,6 @@
 config SPI_OMAP24XX
 	tristate "McSPI driver for OMAP"
 	depends on HAS_DMA
-	depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SUPERH
 	depends on ARCH_OMAP2PLUS || COMPILE_TEST
 	help
 	  SPI master controller for OMAP24XX and later Multichannel SPI
@@ -432,10 +431,23 @@
 
 config SPI_ORION
 	tristate "Orion SPI master"
-	depends on PLAT_ORION || COMPILE_TEST
+	depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
 	help
 	  This enables using the SPI master controller on the Orion chips.
 
+config SPI_PIC32
+	tristate "Microchip PIC32 series SPI"
+	depends on MACH_PIC32 || COMPILE_TEST
+	help
+	  SPI driver for Microchip PIC32 SPI master controller.
+
+config SPI_PIC32_SQI
+	tristate "Microchip PIC32 Quad SPI driver"
+	depends on MACH_PIC32 || COMPILE_TEST
+	depends on HAS_DMA
+	help
+	  SPI driver for PIC32 Quad SPI controller.
+
 config SPI_PL022
 	tristate "ARM AMBA PL022 SSP controller"
 	depends on ARM_AMBA
@@ -469,7 +481,6 @@
 
 config SPI_ROCKCHIP
 	tristate "Rockchip SPI controller driver"
-	depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SUPERH
 	help
 	  This selects a driver for Rockchip SPI controller.
 
@@ -569,7 +580,7 @@
 
 config SPI_ST_SSC4
 	tristate "STMicroelectronics SPI SSC-based driver"
-	depends on ARCH_STI
+	depends on ARCH_STI || COMPILE_TEST
 	help
 	  STMicroelectronics SoCs support for SPI. If you say yes to
 	  this option, support will be included for the SSC driven SPI.
@@ -656,7 +667,7 @@
 
 config SPI_XLP
 	tristate "Netlogic XLP SPI controller driver"
-	depends on CPU_XLP || COMPILE_TEST
+	depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST
 	help
 	  Enable support for the SPI controller on the Netlogic XLP SoCs.
 	  Currently supported XLP variants are XLP8XX, XLP3XX, XLP2XX, XLP9XX
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index fbb255c..3c74d00 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -62,6 +62,8 @@
 obj-$(CONFIG_SPI_OMAP24XX)		+= spi-omap2-mcspi.o
 obj-$(CONFIG_SPI_TI_QSPI)		+= spi-ti-qspi.o
 obj-$(CONFIG_SPI_ORION)			+= spi-orion.o
+obj-$(CONFIG_SPI_PIC32)			+= spi-pic32.o
+obj-$(CONFIG_SPI_PIC32_SQI)		+= spi-pic32-sqi.o
 obj-$(CONFIG_SPI_PL022)			+= spi-pl022.o
 obj-$(CONFIG_SPI_PPC4xx)		+= spi-ppc4xx.o
 spi-pxa2xx-platform-objs		:= spi-pxa2xx.o spi-pxa2xx-dma.o
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index c968ab2..2b1456e 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -525,7 +525,6 @@
 	if (ret)
 		goto err_ref_clk_disable;
 
-	master->dev.parent = &pdev->dev;
 	master->dev.of_node = pdev->dev.of_node;
 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
 	master->bits_per_word_mask = SPI_BPW_MASK(8);
diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c
index cc3f938..afb5169 100644
--- a/drivers/spi/spi-bcm53xx.c
+++ b/drivers/spi/spi-bcm53xx.c
@@ -10,6 +10,7 @@
 #include "spi-bcm53xx.h"
 
 #define BCM53XXSPI_MAX_SPI_BAUD	13500000	/* 216 MHz? */
+#define BCM53XXSPI_FLASH_WINDOW	SZ_32M
 
 /* The longest observed required wait was 19 ms */
 #define BCM53XXSPI_SPE_TIMEOUT_MS	80
@@ -17,8 +18,10 @@
 struct bcm53xxspi {
 	struct bcma_device *core;
 	struct spi_master *master;
+	void __iomem *mmio_base;
 
 	size_t read_offset;
+	bool bspi;				/* Boot SPI mode with memory mapping */
 };
 
 static inline u32 bcm53xxspi_read(struct bcm53xxspi *b53spi, u16 offset)
@@ -32,6 +35,50 @@
 	bcma_write32(b53spi->core, offset, value);
 }
 
+static void bcm53xxspi_disable_bspi(struct bcm53xxspi *b53spi)
+{
+	struct device *dev = &b53spi->core->dev;
+	unsigned long deadline;
+	u32 tmp;
+
+	if (!b53spi->bspi)
+		return;
+
+	tmp = bcm53xxspi_read(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL);
+	if (tmp & 0x1)
+		return;
+
+	deadline = jiffies + usecs_to_jiffies(200);
+	do {
+		tmp = bcm53xxspi_read(b53spi, B53SPI_BSPI_BUSY_STATUS);
+		if (!(tmp & 0x1)) {
+			bcm53xxspi_write(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL,
+					 0x1);
+			ndelay(200);
+			b53spi->bspi = false;
+			return;
+		}
+		udelay(1);
+	} while (!time_after_eq(jiffies, deadline));
+
+	dev_warn(dev, "Timeout disabling BSPI\n");
+}
+
+static void bcm53xxspi_enable_bspi(struct bcm53xxspi *b53spi)
+{
+	u32 tmp;
+
+	if (b53spi->bspi)
+		return;
+
+	tmp = bcm53xxspi_read(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL);
+	if (!(tmp & 0x1))
+		return;
+
+	bcm53xxspi_write(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL, 0x0);
+	b53spi->bspi = true;
+}
+
 static inline unsigned int bcm53xxspi_calc_timeout(size_t len)
 {
 	/* Do some magic calculation based on length and buad. Add 10% and 1. */
@@ -176,6 +223,8 @@
 	u8 *buf;
 	size_t left;
 
+	bcm53xxspi_disable_bspi(b53spi);
+
 	if (t->tx_buf) {
 		buf = (u8 *)t->tx_buf;
 		left = t->len;
@@ -206,6 +255,22 @@
 	return 0;
 }
 
+static int bcm53xxspi_flash_read(struct spi_device *spi,
+				 struct spi_flash_read_message *msg)
+{
+	struct bcm53xxspi *b53spi = spi_master_get_devdata(spi->master);
+	int ret = 0;
+
+	if (msg->from + msg->len > BCM53XXSPI_FLASH_WINDOW)
+		return -EINVAL;
+
+	bcm53xxspi_enable_bspi(b53spi);
+	memcpy_fromio(msg->buf, b53spi->mmio_base + msg->from, msg->len);
+	msg->retlen = msg->len;
+
+	return ret;
+}
+
 /**************************************************
  * BCMA
  **************************************************/
@@ -222,6 +287,7 @@
 
 static int bcm53xxspi_bcma_probe(struct bcma_device *core)
 {
+	struct device *dev = &core->dev;
 	struct bcm53xxspi *b53spi;
 	struct spi_master *master;
 	int err;
@@ -231,7 +297,7 @@
 		return -ENOTSUPP;
 	}
 
-	master = spi_alloc_master(&core->dev, sizeof(*b53spi));
+	master = spi_alloc_master(dev, sizeof(*b53spi));
 	if (!master)
 		return -ENOMEM;
 
@@ -239,11 +305,19 @@
 	b53spi->master = master;
 	b53spi->core = core;
 
+	if (core->addr_s[0])
+		b53spi->mmio_base = devm_ioremap(dev, core->addr_s[0],
+						 BCM53XXSPI_FLASH_WINDOW);
+	b53spi->bspi = true;
+	bcm53xxspi_disable_bspi(b53spi);
+
 	master->transfer_one = bcm53xxspi_transfer_one;
+	if (b53spi->mmio_base)
+		master->spi_flash_read = bcm53xxspi_flash_read;
 
 	bcma_set_drvdata(core, b53spi);
 
-	err = devm_spi_register_master(&core->dev, master);
+	err = devm_spi_register_master(dev, master);
 	if (err) {
 		spi_master_put(master);
 		bcma_set_drvdata(core, NULL);
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 121a413..1c57ce6 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -19,44 +19,46 @@
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/spi/spi.h>
 
 /* Name of this driver */
 #define CDNS_SPI_NAME		"cdns-spi"
 
 /* Register offset definitions */
-#define CDNS_SPI_CR_OFFSET	0x00 /* Configuration  Register, RW */
-#define CDNS_SPI_ISR_OFFSET	0x04 /* Interrupt Status Register, RO */
-#define CDNS_SPI_IER_OFFSET	0x08 /* Interrupt Enable Register, WO */
-#define CDNS_SPI_IDR_OFFSET	0x0c /* Interrupt Disable Register, WO */
-#define CDNS_SPI_IMR_OFFSET	0x10 /* Interrupt Enabled Mask Register, RO */
-#define CDNS_SPI_ER_OFFSET	0x14 /* Enable/Disable Register, RW */
-#define CDNS_SPI_DR_OFFSET	0x18 /* Delay Register, RW */
-#define CDNS_SPI_TXD_OFFSET	0x1C /* Data Transmit Register, WO */
-#define CDNS_SPI_RXD_OFFSET	0x20 /* Data Receive Register, RO */
-#define CDNS_SPI_SICR_OFFSET	0x24 /* Slave Idle Count Register, RW */
-#define CDNS_SPI_THLD_OFFSET	0x28 /* Transmit FIFO Watermark Register,RW */
+#define CDNS_SPI_CR	0x00 /* Configuration  Register, RW */
+#define CDNS_SPI_ISR	0x04 /* Interrupt Status Register, RO */
+#define CDNS_SPI_IER	0x08 /* Interrupt Enable Register, WO */
+#define CDNS_SPI_IDR	0x0c /* Interrupt Disable Register, WO */
+#define CDNS_SPI_IMR	0x10 /* Interrupt Enabled Mask Register, RO */
+#define CDNS_SPI_ER	0x14 /* Enable/Disable Register, RW */
+#define CDNS_SPI_DR	0x18 /* Delay Register, RW */
+#define CDNS_SPI_TXD	0x1C /* Data Transmit Register, WO */
+#define CDNS_SPI_RXD	0x20 /* Data Receive Register, RO */
+#define CDNS_SPI_SICR	0x24 /* Slave Idle Count Register, RW */
+#define CDNS_SPI_THLD	0x28 /* Transmit FIFO Watermark Register,RW */
 
+#define SPI_AUTOSUSPEND_TIMEOUT		3000
 /*
  * SPI Configuration Register bit Masks
  *
  * This register contains various control bits that affect the operation
  * of the SPI controller
  */
-#define CDNS_SPI_CR_MANSTRT_MASK	0x00010000 /* Manual TX Start */
-#define CDNS_SPI_CR_CPHA_MASK		0x00000004 /* Clock Phase Control */
-#define CDNS_SPI_CR_CPOL_MASK		0x00000002 /* Clock Polarity Control */
-#define CDNS_SPI_CR_SSCTRL_MASK		0x00003C00 /* Slave Select Mask */
-#define CDNS_SPI_CR_PERI_SEL_MASK	0x00000200 /* Peripheral Select Decode */
-#define CDNS_SPI_CR_BAUD_DIV_MASK	0x00000038 /* Baud Rate Divisor Mask */
-#define CDNS_SPI_CR_MSTREN_MASK		0x00000001 /* Master Enable Mask */
-#define CDNS_SPI_CR_MANSTRTEN_MASK	0x00008000 /* Manual TX Enable Mask */
-#define CDNS_SPI_CR_SSFORCE_MASK	0x00004000 /* Manual SS Enable Mask */
-#define CDNS_SPI_CR_BAUD_DIV_4_MASK	0x00000008 /* Default Baud Div Mask */
-#define CDNS_SPI_CR_DEFAULT_MASK	(CDNS_SPI_CR_MSTREN_MASK | \
-					CDNS_SPI_CR_SSCTRL_MASK | \
-					CDNS_SPI_CR_SSFORCE_MASK | \
-					CDNS_SPI_CR_BAUD_DIV_4_MASK)
+#define CDNS_SPI_CR_MANSTRT	0x00010000 /* Manual TX Start */
+#define CDNS_SPI_CR_CPHA		0x00000004 /* Clock Phase Control */
+#define CDNS_SPI_CR_CPOL		0x00000002 /* Clock Polarity Control */
+#define CDNS_SPI_CR_SSCTRL		0x00003C00 /* Slave Select Mask */
+#define CDNS_SPI_CR_PERI_SEL	0x00000200 /* Peripheral Select Decode */
+#define CDNS_SPI_CR_BAUD_DIV	0x00000038 /* Baud Rate Divisor Mask */
+#define CDNS_SPI_CR_MSTREN		0x00000001 /* Master Enable Mask */
+#define CDNS_SPI_CR_MANSTRTEN	0x00008000 /* Manual TX Enable Mask */
+#define CDNS_SPI_CR_SSFORCE	0x00004000 /* Manual SS Enable Mask */
+#define CDNS_SPI_CR_BAUD_DIV_4	0x00000008 /* Default Baud Div Mask */
+#define CDNS_SPI_CR_DEFAULT	(CDNS_SPI_CR_MSTREN | \
+					CDNS_SPI_CR_SSCTRL | \
+					CDNS_SPI_CR_SSFORCE | \
+					CDNS_SPI_CR_BAUD_DIV_4)
 
 /*
  * SPI Configuration Register - Baud rate and slave select
@@ -77,21 +79,21 @@
  * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
  * bit definitions.
  */
-#define CDNS_SPI_IXR_TXOW_MASK	0x00000004 /* SPI TX FIFO Overwater */
-#define CDNS_SPI_IXR_MODF_MASK	0x00000002 /* SPI Mode Fault */
-#define CDNS_SPI_IXR_RXNEMTY_MASK 0x00000010 /* SPI RX FIFO Not Empty */
-#define CDNS_SPI_IXR_DEFAULT_MASK	(CDNS_SPI_IXR_TXOW_MASK | \
-					CDNS_SPI_IXR_MODF_MASK)
-#define CDNS_SPI_IXR_TXFULL_MASK	0x00000008 /* SPI TX Full */
-#define CDNS_SPI_IXR_ALL_MASK	0x0000007F /* SPI all interrupts */
+#define CDNS_SPI_IXR_TXOW	0x00000004 /* SPI TX FIFO Overwater */
+#define CDNS_SPI_IXR_MODF	0x00000002 /* SPI Mode Fault */
+#define CDNS_SPI_IXR_RXNEMTY 0x00000010 /* SPI RX FIFO Not Empty */
+#define CDNS_SPI_IXR_DEFAULT	(CDNS_SPI_IXR_TXOW | \
+					CDNS_SPI_IXR_MODF)
+#define CDNS_SPI_IXR_TXFULL	0x00000008 /* SPI TX Full */
+#define CDNS_SPI_IXR_ALL	0x0000007F /* SPI all interrupts */
 
 /*
  * SPI Enable Register bit Masks
  *
  * This register is used to enable or disable the SPI controller
  */
-#define CDNS_SPI_ER_ENABLE_MASK	0x00000001 /* SPI Enable Bit Mask */
-#define CDNS_SPI_ER_DISABLE_MASK	0x0 /* SPI Disable Bit Mask */
+#define CDNS_SPI_ER_ENABLE	0x00000001 /* SPI Enable Bit Mask */
+#define CDNS_SPI_ER_DISABLE	0x0 /* SPI Disable Bit Mask */
 
 /* SPI FIFO depth in bytes */
 #define CDNS_SPI_FIFO_DEPTH	128
@@ -149,56 +151,51 @@
  */
 static void cdns_spi_init_hw(struct cdns_spi *xspi)
 {
-	u32 ctrl_reg = CDNS_SPI_CR_DEFAULT_MASK;
+	u32 ctrl_reg = CDNS_SPI_CR_DEFAULT;
 
 	if (xspi->is_decoded_cs)
-		ctrl_reg |= CDNS_SPI_CR_PERI_SEL_MASK;
+		ctrl_reg |= CDNS_SPI_CR_PERI_SEL;
 
-	cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
-		       CDNS_SPI_ER_DISABLE_MASK);
-	cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
-		       CDNS_SPI_IXR_ALL_MASK);
+	cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
+	cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_ALL);
 
 	/* Clear the RX FIFO */
-	while (cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET) &
-	       CDNS_SPI_IXR_RXNEMTY_MASK)
-		cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET);
+	while (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_RXNEMTY)
+		cdns_spi_read(xspi, CDNS_SPI_RXD);
 
-	cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET,
-		       CDNS_SPI_IXR_ALL_MASK);
-	cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
-	cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
-		       CDNS_SPI_ER_ENABLE_MASK);
+	cdns_spi_write(xspi, CDNS_SPI_ISR, CDNS_SPI_IXR_ALL);
+	cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg);
+	cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE);
 }
 
 /**
  * cdns_spi_chipselect - Select or deselect the chip select line
  * @spi:	Pointer to the spi_device structure
- * @is_on:	Select(0) or deselect (1) the chip select line
+ * @is_high:	Select(0) or deselect (1) the chip select line
  */
 static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
 {
 	struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
 	u32 ctrl_reg;
 
-	ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
+	ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
 
 	if (is_high) {
 		/* Deselect the slave */
-		ctrl_reg |= CDNS_SPI_CR_SSCTRL_MASK;
+		ctrl_reg |= CDNS_SPI_CR_SSCTRL;
 	} else {
 		/* Select the slave */
-		ctrl_reg &= ~CDNS_SPI_CR_SSCTRL_MASK;
+		ctrl_reg &= ~CDNS_SPI_CR_SSCTRL;
 		if (!(xspi->is_decoded_cs))
 			ctrl_reg |= ((~(CDNS_SPI_SS0 << spi->chip_select)) <<
 				     CDNS_SPI_SS_SHIFT) &
-				     CDNS_SPI_CR_SSCTRL_MASK;
+				     CDNS_SPI_CR_SSCTRL;
 		else
 			ctrl_reg |= (spi->chip_select << CDNS_SPI_SS_SHIFT) &
-				     CDNS_SPI_CR_SSCTRL_MASK;
+				     CDNS_SPI_CR_SSCTRL;
 	}
 
-	cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
+	cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg);
 }
 
 /**
@@ -212,14 +209,15 @@
 	struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
 	u32 ctrl_reg, new_ctrl_reg;
 
-	new_ctrl_reg = ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
+	new_ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
+	ctrl_reg = new_ctrl_reg;
 
 	/* Set the SPI clock phase and clock polarity */
-	new_ctrl_reg &= ~(CDNS_SPI_CR_CPHA_MASK | CDNS_SPI_CR_CPOL_MASK);
+	new_ctrl_reg &= ~(CDNS_SPI_CR_CPHA | CDNS_SPI_CR_CPOL);
 	if (spi->mode & SPI_CPHA)
-		new_ctrl_reg |= CDNS_SPI_CR_CPHA_MASK;
+		new_ctrl_reg |= CDNS_SPI_CR_CPHA;
 	if (spi->mode & SPI_CPOL)
-		new_ctrl_reg |= CDNS_SPI_CR_CPOL_MASK;
+		new_ctrl_reg |= CDNS_SPI_CR_CPOL;
 
 	if (new_ctrl_reg != ctrl_reg) {
 		/*
@@ -228,11 +226,9 @@
 		 * polarity as it will cause the SPI slave to see spurious clock
 		 * transitions. To workaround the issue toggle the ER register.
 		 */
-		cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
-				   CDNS_SPI_ER_DISABLE_MASK);
-		cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, new_ctrl_reg);
-		cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
-				   CDNS_SPI_ER_ENABLE_MASK);
+		cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
+		cdns_spi_write(xspi, CDNS_SPI_CR, new_ctrl_reg);
+		cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE);
 	}
 }
 
@@ -251,7 +247,7 @@
  * controller.
  */
 static void cdns_spi_config_clock_freq(struct spi_device *spi,
-				  struct spi_transfer *transfer)
+				       struct spi_transfer *transfer)
 {
 	struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
 	u32 ctrl_reg, baud_rate_val;
@@ -259,7 +255,7 @@
 
 	frequency = clk_get_rate(xspi->ref_clk);
 
-	ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
+	ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
 
 	/* Set the clock frequency */
 	if (xspi->speed_hz != transfer->speed_hz) {
@@ -269,12 +265,12 @@
 		       (frequency / (2 << baud_rate_val)) > transfer->speed_hz)
 			baud_rate_val++;
 
-		ctrl_reg &= ~CDNS_SPI_CR_BAUD_DIV_MASK;
+		ctrl_reg &= ~CDNS_SPI_CR_BAUD_DIV;
 		ctrl_reg |= baud_rate_val << CDNS_SPI_BAUD_DIV_SHIFT;
 
 		xspi->speed_hz = frequency / (2 << baud_rate_val);
 	}
-	cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
+	cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg);
 }
 
 /**
@@ -313,10 +309,9 @@
 	while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
 	       (xspi->tx_bytes > 0)) {
 		if (xspi->txbuf)
-			cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET,
-				       *xspi->txbuf++);
+			cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
 		else
-			cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET, 0);
+			cdns_spi_write(xspi, CDNS_SPI_TXD, 0);
 
 		xspi->tx_bytes--;
 		trans_cnt++;
@@ -344,19 +339,18 @@
 	u32 intr_status, status;
 
 	status = IRQ_NONE;
-	intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET);
-	cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET, intr_status);
+	intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR);
+	cdns_spi_write(xspi, CDNS_SPI_ISR, intr_status);
 
-	if (intr_status & CDNS_SPI_IXR_MODF_MASK) {
+	if (intr_status & CDNS_SPI_IXR_MODF) {
 		/* Indicate that transfer is completed, the SPI subsystem will
 		 * identify the error as the remaining bytes to be
 		 * transferred is non-zero
 		 */
-		cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
-			       CDNS_SPI_IXR_DEFAULT_MASK);
+		cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_DEFAULT);
 		spi_finalize_current_transfer(master);
 		status = IRQ_HANDLED;
-	} else if (intr_status & CDNS_SPI_IXR_TXOW_MASK) {
+	} else if (intr_status & CDNS_SPI_IXR_TXOW) {
 		unsigned long trans_cnt;
 
 		trans_cnt = xspi->rx_bytes - xspi->tx_bytes;
@@ -365,7 +359,7 @@
 		while (trans_cnt) {
 			u8 data;
 
-			data = cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET);
+			data = cdns_spi_read(xspi, CDNS_SPI_RXD);
 			if (xspi->rxbuf)
 				*xspi->rxbuf++ = data;
 
@@ -378,8 +372,8 @@
 			cdns_spi_fill_tx_fifo(xspi);
 		} else {
 			/* Transfer is completed */
-			cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
-				       CDNS_SPI_IXR_DEFAULT_MASK);
+			cdns_spi_write(xspi, CDNS_SPI_IDR,
+				       CDNS_SPI_IXR_DEFAULT);
 			spi_finalize_current_transfer(master);
 		}
 		status = IRQ_HANDLED;
@@ -387,6 +381,7 @@
 
 	return status;
 }
+
 static int cdns_prepare_message(struct spi_master *master,
 				struct spi_message *msg)
 {
@@ -421,8 +416,7 @@
 
 	cdns_spi_fill_tx_fifo(xspi);
 
-	cdns_spi_write(xspi, CDNS_SPI_IER_OFFSET,
-		       CDNS_SPI_IXR_DEFAULT_MASK);
+	cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT);
 	return transfer->len;
 }
 
@@ -439,8 +433,7 @@
 {
 	struct cdns_spi *xspi = spi_master_get_devdata(master);
 
-	cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
-		       CDNS_SPI_ER_ENABLE_MASK);
+	cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE);
 
 	return 0;
 }
@@ -458,8 +451,7 @@
 {
 	struct cdns_spi *xspi = spi_master_get_devdata(master);
 
-	cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
-		       CDNS_SPI_ER_DISABLE_MASK);
+	cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
 
 	return 0;
 }
@@ -481,7 +473,7 @@
 	u32 num_cs;
 
 	master = spi_alloc_master(&pdev->dev, sizeof(*xspi));
-	if (master == NULL)
+	if (!master)
 		return -ENOMEM;
 
 	xspi = spi_master_get_devdata(master);
@@ -521,6 +513,11 @@
 		goto clk_dis_apb;
 	}
 
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+	pm_runtime_set_active(&pdev->dev);
+
 	ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
 	if (ret < 0)
 		master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
@@ -535,11 +532,14 @@
 	/* SPI controller initializations */
 	cdns_spi_init_hw(xspi);
 
+	pm_runtime_mark_last_busy(&pdev->dev);
+	pm_runtime_put_autosuspend(&pdev->dev);
+
 	irq = platform_get_irq(pdev, 0);
 	if (irq <= 0) {
 		ret = -ENXIO;
 		dev_err(&pdev->dev, "irq number is invalid\n");
-		goto remove_master;
+		goto clk_dis_all;
 	}
 
 	ret = devm_request_irq(&pdev->dev, irq, cdns_spi_irq,
@@ -547,7 +547,7 @@
 	if (ret != 0) {
 		ret = -ENXIO;
 		dev_err(&pdev->dev, "request_irq failed\n");
-		goto remove_master;
+		goto clk_dis_all;
 	}
 
 	master->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
@@ -555,6 +555,7 @@
 	master->transfer_one = cdns_transfer_one;
 	master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware;
 	master->set_cs = cdns_spi_chipselect;
+	master->auto_runtime_pm = true;
 	master->mode_bits = SPI_CPOL | SPI_CPHA;
 
 	/* Set to default valid value */
@@ -572,6 +573,8 @@
 	return ret;
 
 clk_dis_all:
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
 	clk_disable_unprepare(xspi->ref_clk);
 clk_dis_apb:
 	clk_disable_unprepare(xspi->pclk);
@@ -595,11 +598,12 @@
 	struct spi_master *master = platform_get_drvdata(pdev);
 	struct cdns_spi *xspi = spi_master_get_devdata(master);
 
-	cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
-		       CDNS_SPI_ER_DISABLE_MASK);
+	cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
 
 	clk_disable_unprepare(xspi->ref_clk);
 	clk_disable_unprepare(xspi->pclk);
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
 
 	spi_unregister_master(master);
 
@@ -613,21 +617,14 @@
  * This function disables the SPI controller and
  * changes the driver state to "suspend"
  *
- * Return:	Always 0
+ * Return:	0 on success and error value on error
  */
 static int __maybe_unused cdns_spi_suspend(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct spi_master *master = platform_get_drvdata(pdev);
-	struct cdns_spi *xspi = spi_master_get_devdata(master);
 
-	spi_master_suspend(master);
-
-	clk_disable_unprepare(xspi->ref_clk);
-
-	clk_disable_unprepare(xspi->pclk);
-
-	return 0;
+	return spi_master_suspend(master);
 }
 
 /**
@@ -642,8 +639,23 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct spi_master *master = platform_get_drvdata(pdev);
+
+	return spi_master_resume(master);
+}
+
+/**
+ * cdns_spi_runtime_resume - Runtime resume method for the SPI driver
+ * @dev:	Address of the platform_device structure
+ *
+ * This function enables the clocks
+ *
+ * Return:	0 on success and error value on error
+ */
+static int __maybe_unused cnds_runtime_resume(struct device *dev)
+{
+	struct spi_master *master = dev_get_drvdata(dev);
 	struct cdns_spi *xspi = spi_master_get_devdata(master);
-	int ret = 0;
+	int ret;
 
 	ret = clk_prepare_enable(xspi->pclk);
 	if (ret) {
@@ -657,13 +669,33 @@
 		clk_disable(xspi->pclk);
 		return ret;
 	}
-	spi_master_resume(master);
+	return 0;
+}
+
+/**
+ * cdns_spi_runtime_suspend - Runtime suspend method for the SPI driver
+ * @dev:	Address of the platform_device structure
+ *
+ * This function disables the clocks
+ *
+ * Return:	Always 0
+ */
+static int __maybe_unused cnds_runtime_suspend(struct device *dev)
+{
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+	clk_disable_unprepare(xspi->ref_clk);
+	clk_disable_unprepare(xspi->pclk);
 
 	return 0;
 }
 
-static SIMPLE_DEV_PM_OPS(cdns_spi_dev_pm_ops, cdns_spi_suspend,
-			 cdns_spi_resume);
+static const struct dev_pm_ops cdns_spi_dev_pm_ops = {
+	SET_RUNTIME_PM_OPS(cnds_runtime_suspend,
+			   cnds_runtime_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(cdns_spi_suspend, cdns_spi_resume)
+};
 
 static const struct of_device_id cdns_spi_of_match[] = {
 	{ .compatible = "xlnx,zynq-spi-r1p6" },
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index fddb7a3..d36c11b 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -23,7 +23,6 @@
 #include <linux/clk.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
-#include <linux/edma.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
@@ -33,8 +32,6 @@
 
 #include <linux/platform_data/spi-davinci.h>
 
-#define SPI_NO_RESOURCE		((resource_size_t)-1)
-
 #define CS_DEFAULT	0xFF
 
 #define SPIFMT_PHASE_MASK	BIT(16)
@@ -130,8 +127,6 @@
 
 	struct dma_chan		*dma_rx;
 	struct dma_chan		*dma_tx;
-	int			dma_rx_chnum;
-	int			dma_tx_chnum;
 
 	struct davinci_spi_platform_data pdata;
 
@@ -797,35 +792,19 @@
 
 static int davinci_spi_request_dma(struct davinci_spi *dspi)
 {
-	dma_cap_mask_t mask;
 	struct device *sdev = dspi->bitbang.master->dev.parent;
-	int r;
 
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
+	dspi->dma_rx = dma_request_chan(sdev, "rx");
+	if (IS_ERR(dspi->dma_rx))
+		return PTR_ERR(dspi->dma_rx);
 
-	dspi->dma_rx = dma_request_channel(mask, edma_filter_fn,
-					   &dspi->dma_rx_chnum);
-	if (!dspi->dma_rx) {
-		dev_err(sdev, "request RX DMA channel failed\n");
-		r = -ENODEV;
-		goto rx_dma_failed;
-	}
-
-	dspi->dma_tx = dma_request_channel(mask, edma_filter_fn,
-					   &dspi->dma_tx_chnum);
-	if (!dspi->dma_tx) {
-		dev_err(sdev, "request TX DMA channel failed\n");
-		r = -ENODEV;
-		goto tx_dma_failed;
+	dspi->dma_tx = dma_request_chan(sdev, "tx");
+	if (IS_ERR(dspi->dma_tx)) {
+		dma_release_channel(dspi->dma_rx);
+		return PTR_ERR(dspi->dma_tx);
 	}
 
 	return 0;
-
-tx_dma_failed:
-	dma_release_channel(dspi->dma_rx);
-rx_dma_failed:
-	return r;
 }
 
 #if defined(CONFIG_OF)
@@ -936,8 +915,6 @@
 	struct davinci_spi *dspi;
 	struct davinci_spi_platform_data *pdata;
 	struct resource *r;
-	resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
-	resource_size_t	dma_tx_chan = SPI_NO_RESOURCE;
 	int ret = 0;
 	u32 spipc0;
 
@@ -1044,27 +1021,15 @@
 		}
 	}
 
-	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (r)
-		dma_rx_chan = r->start;
-	r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-	if (r)
-		dma_tx_chan = r->start;
-
 	dspi->bitbang.txrx_bufs = davinci_spi_bufs;
-	if (dma_rx_chan != SPI_NO_RESOURCE &&
-	    dma_tx_chan != SPI_NO_RESOURCE) {
-		dspi->dma_rx_chnum = dma_rx_chan;
-		dspi->dma_tx_chnum = dma_tx_chan;
 
-		ret = davinci_spi_request_dma(dspi);
-		if (ret)
-			goto free_clk;
-
-		dev_info(&pdev->dev, "DMA: supported\n");
-		dev_info(&pdev->dev, "DMA: RX channel: %pa, TX channel: %pa, event queue: %d\n",
-				&dma_rx_chan, &dma_tx_chan,
-				pdata->dma_event_q);
+	ret = davinci_spi_request_dma(dspi);
+	if (ret == -EPROBE_DEFER) {
+		goto free_clk;
+	} else if (ret) {
+		dev_info(&pdev->dev, "DMA is not supported (%d)\n", ret);
+		dspi->dma_rx = NULL;
+		dspi->dma_tx = NULL;
 	}
 
 	dspi->get_rx = davinci_spi_rx_buf_u8;
@@ -1102,8 +1067,10 @@
 	return ret;
 
 free_dma:
-	dma_release_channel(dspi->dma_rx);
-	dma_release_channel(dspi->dma_tx);
+	if (dspi->dma_rx) {
+		dma_release_channel(dspi->dma_rx);
+		dma_release_channel(dspi->dma_tx);
+	}
 free_clk:
 	clk_disable_unprepare(dspi->clk);
 free_master:
@@ -1134,6 +1101,11 @@
 	clk_disable_unprepare(dspi->clk);
 	spi_master_put(master);
 
+	if (dspi->dma_rx) {
+		dma_release_channel(dspi->dma_rx);
+		dma_release_channel(dspi->dma_tx);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
index 3b7d91d..b62a99c 100644
--- a/drivers/spi/spi-dln2.c
+++ b/drivers/spi/spi-dln2.c
@@ -683,6 +683,7 @@
 	struct spi_master *master;
 	struct dln2_spi *dln2;
 	struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	struct device *dev = &pdev->dev;
 	int ret;
 
 	master = spi_alloc_master(&pdev->dev, sizeof(*dln2));
@@ -700,6 +701,7 @@
 	}
 
 	dln2->master = master;
+	dln2->master->dev.of_node = dev->of_node;
 	dln2->pdev = pdev;
 	dln2->port = pdata->port;
 	/* cs/mode can never be 0xff, so the first transfer will set them */
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 332ccb0..ef7db75 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -67,7 +67,7 @@
 	dws->irq = pdev->irq;
 
 	/*
-	 * Specific handling for paltforms, like dma setup,
+	 * Specific handling for platforms, like dma setup,
 	 * clock rate, FIFO depth.
 	 */
 	if (desc) {
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index bb00be8..17a6387 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -567,7 +567,7 @@
 	txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
 	if (IS_ERR(txd)) {
 		ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
-		dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
+		dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
 		msg->status = PTR_ERR(txd);
 		return;
 	}
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index c1a2d74..9e9dadb 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -121,18 +121,22 @@
 
 struct fsl_dspi_devtype_data {
 	enum dspi_trans_mode trans_mode;
+	u8 max_clock_factor;
 };
 
 static const struct fsl_dspi_devtype_data vf610_data = {
 	.trans_mode = DSPI_EOQ_MODE,
+	.max_clock_factor = 2,
 };
 
 static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
 	.trans_mode = DSPI_TCFQ_MODE,
+	.max_clock_factor = 8,
 };
 
 static const struct fsl_dspi_devtype_data ls2085a_data = {
 	.trans_mode = DSPI_TCFQ_MODE,
+	.max_clock_factor = 8,
 };
 
 struct fsl_dspi {
@@ -726,6 +730,9 @@
 	}
 	clk_prepare_enable(dspi->clk);
 
+	master->max_speed_hz =
+		clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
+
 	init_waitqueue_head(&dspi->waitq);
 	platform_set_drvdata(pdev, master);
 
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7cb0c19..8d85a3c 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -245,7 +245,12 @@
 	if (ret)
 		return ret;
 
-	wait_for_completion(&mpc8xxx_spi->done);
+	/* Won't hang up forever, SPI bus sometimes got lost interrupts... */
+	ret = wait_for_completion_timeout(&mpc8xxx_spi->done, 2 * HZ);
+	if (ret == 0)
+		dev_err(mpc8xxx_spi->dev,
+			"Transaction hanging up (left %d bytes)\n",
+			mpc8xxx_spi->count);
 
 	/* disable rx ints */
 	mpc8xxx_spi_write_reg(&reg_base->mask, 0);
@@ -539,16 +544,31 @@
 	if (events & SPIE_NE) {
 		u32 rx_data, tmp;
 		u8 rx_data_8;
+		int rx_nr_bytes = 4;
+		int ret;
 
 		/* Spin until RX is done */
-		while (SPIE_RXCNT(events) < min(4, mspi->len)) {
-			cpu_relax();
-			events = mpc8xxx_spi_read_reg(&reg_base->event);
+		if (SPIE_RXCNT(events) < min(4, mspi->len)) {
+			ret = spin_event_timeout(
+				!(SPIE_RXCNT(events =
+				mpc8xxx_spi_read_reg(&reg_base->event)) <
+						min(4, mspi->len)),
+						10000, 0); /* 10 msec */
+			if (!ret)
+				dev_err(mspi->dev,
+					 "tired waiting for SPIE_RXCNT\n");
 		}
 
 		if (mspi->len >= 4) {
 			rx_data = mpc8xxx_spi_read_reg(&reg_base->receive);
+		} else if (mspi->len <= 0) {
+			dev_err(mspi->dev,
+				"unexpected RX(SPIE_NE) interrupt occurred,\n"
+				"(local rxlen %d bytes, reg rxlen %d bytes)\n",
+				min(4, mspi->len), SPIE_RXCNT(events));
+			rx_nr_bytes = 0;
 		} else {
+			rx_nr_bytes = mspi->len;
 			tmp = mspi->len;
 			rx_data = 0;
 			while (tmp--) {
@@ -559,7 +579,7 @@
 			rx_data <<= (4 - mspi->len) * 8;
 		}
 
-		mspi->len -= 4;
+		mspi->len -= rx_nr_bytes;
 
 		if (mspi->rx)
 			mspi->get_rx(rx_data, mspi);
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
index 07e4ce8..3b17009 100644
--- a/drivers/spi/spi-octeon.c
+++ b/drivers/spi/spi-octeon.c
@@ -175,6 +175,7 @@
 static int octeon_spi_probe(struct platform_device *pdev)
 {
 	struct resource *res_mem;
+	void __iomem *reg_base;
 	struct spi_master *master;
 	struct octeon_spi *p;
 	int err = -ENOENT;
@@ -186,19 +187,13 @@
 	platform_set_drvdata(pdev, master);
 
 	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	reg_base = devm_ioremap_resource(&pdev->dev, res_mem);
+	if (IS_ERR(reg_base)) {
+		err = PTR_ERR(reg_base);
+		goto fail;
+	}
 
-	if (res_mem == NULL) {
-		dev_err(&pdev->dev, "found no memory resource\n");
-		err = -ENXIO;
-		goto fail;
-	}
-	if (!devm_request_mem_region(&pdev->dev, res_mem->start,
-				     resource_size(res_mem), res_mem->name)) {
-		dev_err(&pdev->dev, "request_mem_region failed\n");
-		goto fail;
-	}
-	p->register_base = (u64)devm_ioremap(&pdev->dev, res_mem->start,
-					     resource_size(res_mem));
+	p->register_base = (u64)reg_base;
 
 	master->num_chipselect = 4;
 	master->mode_bits = SPI_CPHA |
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 0caa3c8..1d237e9 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -23,7 +23,6 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
-#include <linux/omap-dma.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/err.h>
@@ -103,9 +102,6 @@
 	struct dma_chan *dma_tx;
 	struct dma_chan *dma_rx;
 
-	int dma_tx_sync_dev;
-	int dma_rx_sync_dev;
-
 	struct completion dma_tx_completion;
 	struct completion dma_rx_completion;
 
@@ -964,8 +960,7 @@
 	struct spi_master	*master = spi->master;
 	struct omap2_mcspi	*mcspi;
 	struct omap2_mcspi_dma	*mcspi_dma;
-	dma_cap_mask_t mask;
-	unsigned sig;
+	int ret = 0;
 
 	mcspi = spi_master_get_devdata(master);
 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
@@ -973,34 +968,25 @@
 	init_completion(&mcspi_dma->dma_rx_completion);
 	init_completion(&mcspi_dma->dma_tx_completion);
 
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-	sig = mcspi_dma->dma_rx_sync_dev;
-
-	mcspi_dma->dma_rx =
-		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
-						 &sig, &master->dev,
-						 mcspi_dma->dma_rx_ch_name);
-	if (!mcspi_dma->dma_rx)
-		goto no_dma;
-
-	sig = mcspi_dma->dma_tx_sync_dev;
-	mcspi_dma->dma_tx =
-		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
-						 &sig, &master->dev,
-						 mcspi_dma->dma_tx_ch_name);
-
-	if (!mcspi_dma->dma_tx) {
-		dma_release_channel(mcspi_dma->dma_rx);
+	mcspi_dma->dma_rx = dma_request_chan(&master->dev,
+					     mcspi_dma->dma_rx_ch_name);
+	if (IS_ERR(mcspi_dma->dma_rx)) {
+		ret = PTR_ERR(mcspi_dma->dma_rx);
 		mcspi_dma->dma_rx = NULL;
 		goto no_dma;
 	}
 
-	return 0;
+	mcspi_dma->dma_tx = dma_request_chan(&master->dev,
+					     mcspi_dma->dma_tx_ch_name);
+	if (IS_ERR(mcspi_dma->dma_tx)) {
+		ret = PTR_ERR(mcspi_dma->dma_tx);
+		mcspi_dma->dma_tx = NULL;
+		dma_release_channel(mcspi_dma->dma_rx);
+		mcspi_dma->dma_rx = NULL;
+	}
 
 no_dma:
-	dev_warn(&spi->dev, "not using DMA for McSPI\n");
-	return -EAGAIN;
+	return ret;
 }
 
 static int omap2_mcspi_setup(struct spi_device *spi)
@@ -1039,8 +1025,9 @@
 
 	if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
 		ret = omap2_mcspi_request_dma(spi);
-		if (ret < 0 && ret != -EAGAIN)
-			return ret;
+		if (ret)
+			dev_warn(&spi->dev, "not using DMA for McSPI (%d)\n",
+				 ret);
 	}
 
 	ret = pm_runtime_get_sync(mcspi->dev);
@@ -1434,42 +1421,8 @@
 	}
 
 	for (i = 0; i < master->num_chipselect; i++) {
-		char *dma_rx_ch_name = mcspi->dma_channels[i].dma_rx_ch_name;
-		char *dma_tx_ch_name = mcspi->dma_channels[i].dma_tx_ch_name;
-		struct resource *dma_res;
-
-		sprintf(dma_rx_ch_name, "rx%d", i);
-		if (!pdev->dev.of_node) {
-			dma_res =
-				platform_get_resource_byname(pdev,
-							     IORESOURCE_DMA,
-							     dma_rx_ch_name);
-			if (!dma_res) {
-				dev_dbg(&pdev->dev,
-					"cannot get DMA RX channel\n");
-				status = -ENODEV;
-				break;
-			}
-
-			mcspi->dma_channels[i].dma_rx_sync_dev =
-				dma_res->start;
-		}
-		sprintf(dma_tx_ch_name, "tx%d", i);
-		if (!pdev->dev.of_node) {
-			dma_res =
-				platform_get_resource_byname(pdev,
-							     IORESOURCE_DMA,
-							     dma_tx_ch_name);
-			if (!dma_res) {
-				dev_dbg(&pdev->dev,
-					"cannot get DMA TX channel\n");
-				status = -ENODEV;
-				break;
-			}
-
-			mcspi->dma_channels[i].dma_tx_sync_dev =
-				dma_res->start;
-		}
+		sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
+		sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
 	}
 
 	if (status < 0)
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c
new file mode 100644
index 0000000..ca3c8d9
--- /dev/null
+++ b/drivers/spi/spi-pic32-sqi.c
@@ -0,0 +1,727 @@
+/*
+ * PIC32 Quad SPI controller driver.
+ *
+ * Purna Chandra Mandal <purna.mandal@microchip.com>
+ * Copyright (c) 2016, Microchip Technology Inc.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+/* SQI registers */
+#define PESQI_XIP_CONF1_REG	0x00
+#define PESQI_XIP_CONF2_REG	0x04
+#define PESQI_CONF_REG		0x08
+#define PESQI_CTRL_REG		0x0C
+#define PESQI_CLK_CTRL_REG	0x10
+#define PESQI_CMD_THRES_REG	0x14
+#define PESQI_INT_THRES_REG	0x18
+#define PESQI_INT_ENABLE_REG	0x1C
+#define PESQI_INT_STAT_REG	0x20
+#define PESQI_TX_DATA_REG	0x24
+#define PESQI_RX_DATA_REG	0x28
+#define PESQI_STAT1_REG		0x2C
+#define PESQI_STAT2_REG		0x30
+#define PESQI_BD_CTRL_REG	0x34
+#define PESQI_BD_CUR_ADDR_REG	0x38
+#define PESQI_BD_BASE_ADDR_REG	0x40
+#define PESQI_BD_STAT_REG	0x44
+#define PESQI_BD_POLL_CTRL_REG	0x48
+#define PESQI_BD_TX_DMA_STAT_REG	0x4C
+#define PESQI_BD_RX_DMA_STAT_REG	0x50
+#define PESQI_THRES_REG		0x54
+#define PESQI_INT_SIGEN_REG	0x58
+
+/* PESQI_CONF_REG fields */
+#define PESQI_MODE		0x7
+#define  PESQI_MODE_BOOT	0
+#define  PESQI_MODE_PIO		1
+#define  PESQI_MODE_DMA		2
+#define  PESQI_MODE_XIP		3
+#define PESQI_MODE_SHIFT	0
+#define PESQI_CPHA		BIT(3)
+#define PESQI_CPOL		BIT(4)
+#define PESQI_LSBF		BIT(5)
+#define PESQI_RXLATCH		BIT(7)
+#define PESQI_SERMODE		BIT(8)
+#define PESQI_WP_EN		BIT(9)
+#define PESQI_HOLD_EN		BIT(10)
+#define PESQI_BURST_EN		BIT(12)
+#define PESQI_CS_CTRL_HW	BIT(15)
+#define PESQI_SOFT_RESET	BIT(16)
+#define PESQI_LANES_SHIFT	20
+#define  PESQI_SINGLE_LANE	0
+#define  PESQI_DUAL_LANE	1
+#define  PESQI_QUAD_LANE	2
+#define PESQI_CSEN_SHIFT	24
+#define PESQI_EN		BIT(23)
+
+/* PESQI_CLK_CTRL_REG fields */
+#define PESQI_CLK_EN		BIT(0)
+#define PESQI_CLK_STABLE	BIT(1)
+#define PESQI_CLKDIV_SHIFT	8
+#define PESQI_CLKDIV		0xff
+
+/* PESQI_INT_THR/CMD_THR_REG */
+#define PESQI_TXTHR_MASK	0x1f
+#define PESQI_TXTHR_SHIFT	8
+#define PESQI_RXTHR_MASK	0x1f
+#define PESQI_RXTHR_SHIFT	0
+
+/* PESQI_INT_EN/INT_STAT/INT_SIG_EN_REG */
+#define PESQI_TXEMPTY		BIT(0)
+#define PESQI_TXFULL		BIT(1)
+#define PESQI_TXTHR		BIT(2)
+#define PESQI_RXEMPTY		BIT(3)
+#define PESQI_RXFULL		BIT(4)
+#define PESQI_RXTHR		BIT(5)
+#define PESQI_BDDONE		BIT(9)  /* BD processing complete */
+#define PESQI_PKTCOMP		BIT(10) /* packet processing complete */
+#define PESQI_DMAERR		BIT(11) /* error */
+
+/* PESQI_BD_CTRL_REG */
+#define PESQI_DMA_EN		BIT(0) /* enable DMA engine */
+#define PESQI_POLL_EN		BIT(1) /* enable polling */
+#define PESQI_BDP_START		BIT(2) /* start BD processor */
+
+/* PESQI controller buffer descriptor */
+struct buf_desc {
+	u32 bd_ctrl;	/* control */
+	u32 bd_status;	/* reserved */
+	u32 bd_addr;	/* DMA buffer addr */
+	u32 bd_nextp;	/* next item in chain */
+};
+
+/* bd_ctrl */
+#define BD_BUFLEN		0x1ff
+#define BD_CBD_INT_EN		BIT(16)	/* Current BD is processed */
+#define BD_PKT_INT_EN		BIT(17) /* All BDs of PKT processed */
+#define BD_LIFM			BIT(18) /* last data of pkt */
+#define BD_LAST			BIT(19) /* end of list */
+#define BD_DATA_RECV		BIT(20) /* receive data */
+#define BD_DDR			BIT(21) /* DDR mode */
+#define BD_DUAL			BIT(22)	/* Dual SPI */
+#define BD_QUAD			BIT(23) /* Quad SPI */
+#define BD_LSBF			BIT(25)	/* LSB First */
+#define BD_STAT_CHECK		BIT(27) /* Status poll */
+#define BD_DEVSEL_SHIFT		28	/* CS */
+#define BD_CS_DEASSERT		BIT(30) /* de-assert CS after current BD */
+#define BD_EN			BIT(31) /* BD owned by H/W */
+
+/**
+ * struct ring_desc - Representation of SQI ring descriptor
+ * @list:	list element to add to free or used list.
+ * @bd:		PESQI controller buffer descriptor
+ * @bd_dma:	DMA address of PESQI controller buffer descriptor
+ * @xfer_len:	transfer length
+ */
+struct ring_desc {
+	struct list_head list;
+	struct buf_desc *bd;
+	dma_addr_t bd_dma;
+	u32 xfer_len;
+};
+
+/* Global constants */
+#define PESQI_BD_BUF_LEN_MAX	256
+#define PESQI_BD_COUNT		256 /* max 64KB data per spi message */
+
+struct pic32_sqi {
+	void __iomem		*regs;
+	struct clk		*sys_clk;
+	struct clk		*base_clk; /* drives spi clock */
+	struct spi_master	*master;
+	int			irq;
+	struct completion	xfer_done;
+	struct ring_desc	*ring;
+	void			*bd;
+	dma_addr_t		bd_dma;
+	struct list_head	bd_list_free; /* free */
+	struct list_head	bd_list_used; /* allocated */
+	struct spi_device	*cur_spi;
+	u32			cur_speed;
+	u8			cur_mode;
+};
+
+static inline void pic32_setbits(void __iomem *reg, u32 set)
+{
+	writel(readl(reg) | set, reg);
+}
+
+static inline void pic32_clrbits(void __iomem *reg, u32 clr)
+{
+	writel(readl(reg) & ~clr, reg);
+}
+
+static int pic32_sqi_set_clk_rate(struct pic32_sqi *sqi, u32 sck)
+{
+	u32 val, div;
+
+	/* div = base_clk / (2 * spi_clk) */
+	div = clk_get_rate(sqi->base_clk) / (2 * sck);
+	div &= PESQI_CLKDIV;
+
+	val = readl(sqi->regs + PESQI_CLK_CTRL_REG);
+	/* apply new divider */
+	val &= ~(PESQI_CLK_STABLE | (PESQI_CLKDIV << PESQI_CLKDIV_SHIFT));
+	val |= div << PESQI_CLKDIV_SHIFT;
+	writel(val, sqi->regs + PESQI_CLK_CTRL_REG);
+
+	/* wait for stability */
+	return readl_poll_timeout(sqi->regs + PESQI_CLK_CTRL_REG, val,
+				  val & PESQI_CLK_STABLE, 1, 5000);
+}
+
+static inline void pic32_sqi_enable_int(struct pic32_sqi *sqi)
+{
+	u32 mask = PESQI_DMAERR | PESQI_BDDONE | PESQI_PKTCOMP;
+
+	writel(mask, sqi->regs + PESQI_INT_ENABLE_REG);
+	/* INT_SIGEN works as interrupt-gate to INTR line */
+	writel(mask, sqi->regs + PESQI_INT_SIGEN_REG);
+}
+
+static inline void pic32_sqi_disable_int(struct pic32_sqi *sqi)
+{
+	writel(0, sqi->regs + PESQI_INT_ENABLE_REG);
+	writel(0, sqi->regs + PESQI_INT_SIGEN_REG);
+}
+
+static irqreturn_t pic32_sqi_isr(int irq, void *dev_id)
+{
+	struct pic32_sqi *sqi = dev_id;
+	u32 enable, status;
+
+	enable = readl(sqi->regs + PESQI_INT_ENABLE_REG);
+	status = readl(sqi->regs + PESQI_INT_STAT_REG);
+
+	/* check spurious interrupt */
+	if (!status)
+		return IRQ_NONE;
+
+	if (status & PESQI_DMAERR) {
+		enable = 0;
+		goto irq_done;
+	}
+
+	if (status & PESQI_TXTHR)
+		enable &= ~(PESQI_TXTHR | PESQI_TXFULL | PESQI_TXEMPTY);
+
+	if (status & PESQI_RXTHR)
+		enable &= ~(PESQI_RXTHR | PESQI_RXFULL | PESQI_RXEMPTY);
+
+	if (status & PESQI_BDDONE)
+		enable &= ~PESQI_BDDONE;
+
+	/* packet processing completed */
+	if (status & PESQI_PKTCOMP) {
+		/* mask all interrupts */
+		enable = 0;
+		/* complete trasaction */
+		complete(&sqi->xfer_done);
+	}
+
+irq_done:
+	/* interrupts are sticky, so mask when handled */
+	writel(enable, sqi->regs + PESQI_INT_ENABLE_REG);
+
+	return IRQ_HANDLED;
+}
+
+static struct ring_desc *ring_desc_get(struct pic32_sqi *sqi)
+{
+	struct ring_desc *rdesc;
+
+	if (list_empty(&sqi->bd_list_free))
+		return NULL;
+
+	rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list);
+	list_del(&rdesc->list);
+	list_add_tail(&rdesc->list, &sqi->bd_list_used);
+	return rdesc;
+}
+
+static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc)
+{
+	list_del(&rdesc->list);
+	list_add(&rdesc->list, &sqi->bd_list_free);
+}
+
+static int pic32_sqi_one_transfer(struct pic32_sqi *sqi,
+				  struct spi_message *mesg,
+				  struct spi_transfer *xfer)
+{
+	struct spi_device *spi = mesg->spi;
+	struct scatterlist *sg, *sgl;
+	struct ring_desc *rdesc;
+	struct buf_desc *bd;
+	int nents, i;
+	u32 bd_ctrl;
+	u32 nbits;
+
+	/* Device selection */
+	bd_ctrl = spi->chip_select << BD_DEVSEL_SHIFT;
+
+	/* half-duplex: select transfer buffer, direction and lane */
+	if (xfer->rx_buf) {
+		bd_ctrl |= BD_DATA_RECV;
+		nbits = xfer->rx_nbits;
+		sgl = xfer->rx_sg.sgl;
+		nents = xfer->rx_sg.nents;
+	} else {
+		nbits = xfer->tx_nbits;
+		sgl = xfer->tx_sg.sgl;
+		nents = xfer->tx_sg.nents;
+	}
+
+	if (nbits & SPI_NBITS_QUAD)
+		bd_ctrl |= BD_QUAD;
+	else if (nbits & SPI_NBITS_DUAL)
+		bd_ctrl |= BD_DUAL;
+
+	/* LSB first */
+	if (spi->mode & SPI_LSB_FIRST)
+		bd_ctrl |= BD_LSBF;
+
+	/* ownership to hardware */
+	bd_ctrl |= BD_EN;
+
+	for_each_sg(sgl, sg, nents, i) {
+		/* get ring descriptor */
+		rdesc = ring_desc_get(sqi);
+		if (!rdesc)
+			break;
+
+		bd = rdesc->bd;
+
+		/* BD CTRL: length */
+		rdesc->xfer_len = sg_dma_len(sg);
+		bd->bd_ctrl = bd_ctrl;
+		bd->bd_ctrl |= rdesc->xfer_len;
+
+		/* BD STAT */
+		bd->bd_status = 0;
+
+		/* BD BUFFER ADDRESS */
+		bd->bd_addr = sg->dma_address;
+	}
+
+	return 0;
+}
+
+static int pic32_sqi_prepare_hardware(struct spi_master *master)
+{
+	struct pic32_sqi *sqi = spi_master_get_devdata(master);
+
+	/* enable spi interface */
+	pic32_setbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
+	/* enable spi clk */
+	pic32_setbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
+
+	return 0;
+}
+
+static bool pic32_sqi_can_dma(struct spi_master *master,
+			      struct spi_device *spi,
+			      struct spi_transfer *x)
+{
+	/* Do DMA irrespective of transfer size */
+	return true;
+}
+
+static int pic32_sqi_one_message(struct spi_master *master,
+				 struct spi_message *msg)
+{
+	struct spi_device *spi = msg->spi;
+	struct ring_desc *rdesc, *next;
+	struct spi_transfer *xfer;
+	struct pic32_sqi *sqi;
+	int ret = 0, mode;
+	u32 val;
+
+	sqi = spi_master_get_devdata(master);
+
+	reinit_completion(&sqi->xfer_done);
+	msg->actual_length = 0;
+
+	/* We can't handle spi_transfer specific "speed_hz", "bits_per_word"
+	 * and "delay_usecs". But spi_device specific speed and mode change
+	 * can be handled at best during spi chip-select switch.
+	 */
+	if (sqi->cur_spi != spi) {
+		/* set spi speed */
+		if (sqi->cur_speed != spi->max_speed_hz) {
+			sqi->cur_speed = spi->max_speed_hz;
+			ret = pic32_sqi_set_clk_rate(sqi, spi->max_speed_hz);
+			if (ret)
+				dev_warn(&spi->dev, "set_clk, %d\n", ret);
+		}
+
+		/* set spi mode */
+		mode = spi->mode & (SPI_MODE_3 | SPI_LSB_FIRST);
+		if (sqi->cur_mode != mode) {
+			val = readl(sqi->regs + PESQI_CONF_REG);
+			val &= ~(PESQI_CPOL | PESQI_CPHA | PESQI_LSBF);
+			if (mode & SPI_CPOL)
+				val |= PESQI_CPOL;
+			if (mode & SPI_LSB_FIRST)
+				val |= PESQI_LSBF;
+			val |= PESQI_CPHA;
+			writel(val, sqi->regs + PESQI_CONF_REG);
+
+			sqi->cur_mode = mode;
+		}
+		sqi->cur_spi = spi;
+	}
+
+	/* prepare hardware desc-list(BD) for transfer(s) */
+	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+		ret = pic32_sqi_one_transfer(sqi, msg, xfer);
+		if (ret) {
+			dev_err(&spi->dev, "xfer %p err\n", xfer);
+			goto xfer_out;
+		}
+	}
+
+	/* BDs are prepared and chained. Now mark LAST_BD, CS_DEASSERT at last
+	 * element of the list.
+	 */
+	rdesc = list_last_entry(&sqi->bd_list_used, struct ring_desc, list);
+	rdesc->bd->bd_ctrl |= BD_LAST | BD_CS_DEASSERT |
+			      BD_LIFM | BD_PKT_INT_EN;
+
+	/* set base address BD list for DMA engine */
+	rdesc = list_first_entry(&sqi->bd_list_used, struct ring_desc, list);
+	writel(rdesc->bd_dma, sqi->regs + PESQI_BD_BASE_ADDR_REG);
+
+	/* enable interrupt */
+	pic32_sqi_enable_int(sqi);
+
+	/* enable DMA engine */
+	val = PESQI_DMA_EN | PESQI_POLL_EN | PESQI_BDP_START;
+	writel(val, sqi->regs + PESQI_BD_CTRL_REG);
+
+	/* wait for xfer completion */
+	ret = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ);
+	if (ret <= 0) {
+		dev_err(&sqi->master->dev, "wait timedout/interrupted\n");
+		ret = -EIO;
+		msg->status = ret;
+	} else {
+		/* success */
+		msg->status = 0;
+		ret = 0;
+	}
+
+	/* disable DMA */
+	writel(0, sqi->regs + PESQI_BD_CTRL_REG);
+
+	pic32_sqi_disable_int(sqi);
+
+xfer_out:
+	list_for_each_entry_safe_reverse(rdesc, next,
+					 &sqi->bd_list_used, list) {
+		/* Update total byte transferred */
+		msg->actual_length += rdesc->xfer_len;
+		/* release ring descr */
+		ring_desc_put(sqi, rdesc);
+	}
+	spi_finalize_current_message(spi->master);
+
+	return ret;
+}
+
+static int pic32_sqi_unprepare_hardware(struct spi_master *master)
+{
+	struct pic32_sqi *sqi = spi_master_get_devdata(master);
+
+	/* disable clk */
+	pic32_clrbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
+	/* disable spi */
+	pic32_clrbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
+
+	return 0;
+}
+
+static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
+{
+	struct ring_desc *rdesc;
+	struct buf_desc *bd;
+	int i;
+
+	/* allocate coherent DMAable memory for hardware buffer descriptors. */
+	sqi->bd = dma_zalloc_coherent(&sqi->master->dev,
+				      sizeof(*bd) * PESQI_BD_COUNT,
+				      &sqi->bd_dma, GFP_DMA32);
+	if (!sqi->bd) {
+		dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
+		return -ENOMEM;
+	}
+
+	/* allocate software ring descriptors */
+	sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL);
+	if (!sqi->ring) {
+		dma_free_coherent(&sqi->master->dev,
+				  sizeof(*bd) * PESQI_BD_COUNT,
+				  sqi->bd, sqi->bd_dma);
+		return -ENOMEM;
+	}
+
+	bd = (struct buf_desc *)sqi->bd;
+
+	INIT_LIST_HEAD(&sqi->bd_list_free);
+	INIT_LIST_HEAD(&sqi->bd_list_used);
+
+	/* initialize ring-desc */
+	for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) {
+		INIT_LIST_HEAD(&rdesc->list);
+		rdesc->bd = &bd[i];
+		rdesc->bd_dma = sqi->bd_dma + (void *)&bd[i] - (void *)bd;
+		list_add_tail(&rdesc->list, &sqi->bd_list_free);
+	}
+
+	/* Prepare BD: chain to next BD(s) */
+	for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT - 1; i++)
+		bd[i].bd_nextp = rdesc[i + 1].bd_dma;
+	bd[PESQI_BD_COUNT - 1].bd_nextp = 0;
+
+	return 0;
+}
+
+static void ring_desc_ring_free(struct pic32_sqi *sqi)
+{
+	dma_free_coherent(&sqi->master->dev,
+			  sizeof(struct buf_desc) * PESQI_BD_COUNT,
+			  sqi->bd, sqi->bd_dma);
+	kfree(sqi->ring);
+}
+
+static void pic32_sqi_hw_init(struct pic32_sqi *sqi)
+{
+	unsigned long flags;
+	u32 val;
+
+	/* Soft-reset of PESQI controller triggers interrupt.
+	 * We are not yet ready to handle them so disable CPU
+	 * interrupt for the time being.
+	 */
+	local_irq_save(flags);
+
+	/* assert soft-reset */
+	writel(PESQI_SOFT_RESET, sqi->regs + PESQI_CONF_REG);
+
+	/* wait until clear */
+	readl_poll_timeout_atomic(sqi->regs + PESQI_CONF_REG, val,
+				  !(val & PESQI_SOFT_RESET), 1, 5000);
+
+	/* disable all interrupts */
+	pic32_sqi_disable_int(sqi);
+
+	/* Now it is safe to enable back CPU interrupt */
+	local_irq_restore(flags);
+
+	/* tx and rx fifo interrupt threshold */
+	val = readl(sqi->regs + PESQI_CMD_THRES_REG);
+	val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
+	val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
+	val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
+	writel(val, sqi->regs + PESQI_CMD_THRES_REG);
+
+	val = readl(sqi->regs + PESQI_INT_THRES_REG);
+	val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
+	val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
+	val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
+	writel(val, sqi->regs + PESQI_INT_THRES_REG);
+
+	/* default configuration */
+	val = readl(sqi->regs + PESQI_CONF_REG);
+
+	/* set mode: DMA */
+	val &= ~PESQI_MODE;
+	val |= PESQI_MODE_DMA << PESQI_MODE_SHIFT;
+	writel(val, sqi->regs + PESQI_CONF_REG);
+
+	/* DATAEN - SQIID0-ID3 */
+	val |= PESQI_QUAD_LANE << PESQI_LANES_SHIFT;
+
+	/* burst/INCR4 enable */
+	val |= PESQI_BURST_EN;
+
+	/* CSEN - all CS */
+	val |= 3U << PESQI_CSEN_SHIFT;
+	writel(val, sqi->regs + PESQI_CONF_REG);
+
+	/* write poll count */
+	writel(0, sqi->regs + PESQI_BD_POLL_CTRL_REG);
+
+	sqi->cur_speed = 0;
+	sqi->cur_mode = -1;
+}
+
+static int pic32_sqi_probe(struct platform_device *pdev)
+{
+	struct spi_master *master;
+	struct pic32_sqi *sqi;
+	struct resource *reg;
+	int ret;
+
+	master = spi_alloc_master(&pdev->dev, sizeof(*sqi));
+	if (!master)
+		return -ENOMEM;
+
+	sqi = spi_master_get_devdata(master);
+	sqi->master = master;
+
+	reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	sqi->regs = devm_ioremap_resource(&pdev->dev, reg);
+	if (IS_ERR(sqi->regs)) {
+		ret = PTR_ERR(sqi->regs);
+		goto err_free_master;
+	}
+
+	/* irq */
+	sqi->irq = platform_get_irq(pdev, 0);
+	if (sqi->irq < 0) {
+		dev_err(&pdev->dev, "no irq found\n");
+		ret = sqi->irq;
+		goto err_free_master;
+	}
+
+	/* clocks */
+	sqi->sys_clk = devm_clk_get(&pdev->dev, "reg_ck");
+	if (IS_ERR(sqi->sys_clk)) {
+		ret = PTR_ERR(sqi->sys_clk);
+		dev_err(&pdev->dev, "no sys_clk ?\n");
+		goto err_free_master;
+	}
+
+	sqi->base_clk = devm_clk_get(&pdev->dev, "spi_ck");
+	if (IS_ERR(sqi->base_clk)) {
+		ret = PTR_ERR(sqi->base_clk);
+		dev_err(&pdev->dev, "no base clk ?\n");
+		goto err_free_master;
+	}
+
+	ret = clk_prepare_enable(sqi->sys_clk);
+	if (ret) {
+		dev_err(&pdev->dev, "sys clk enable failed\n");
+		goto err_free_master;
+	}
+
+	ret = clk_prepare_enable(sqi->base_clk);
+	if (ret) {
+		dev_err(&pdev->dev, "base clk enable failed\n");
+		clk_disable_unprepare(sqi->sys_clk);
+		goto err_free_master;
+	}
+
+	init_completion(&sqi->xfer_done);
+
+	/* initialize hardware */
+	pic32_sqi_hw_init(sqi);
+
+	/* allocate buffers & descriptors */
+	ret = ring_desc_ring_alloc(sqi);
+	if (ret) {
+		dev_err(&pdev->dev, "ring alloc failed\n");
+		goto err_disable_clk;
+	}
+
+	/* install irq handlers */
+	ret = request_irq(sqi->irq, pic32_sqi_isr, 0,
+			  dev_name(&pdev->dev), sqi);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "request_irq(%d), failed\n", sqi->irq);
+		goto err_free_ring;
+	}
+
+	/* register master */
+	master->num_chipselect	= 2;
+	master->max_speed_hz	= clk_get_rate(sqi->base_clk);
+	master->dma_alignment	= 32;
+	master->max_dma_len	= PESQI_BD_BUF_LEN_MAX;
+	master->dev.of_node	= of_node_get(pdev->dev.of_node);
+	master->mode_bits	= SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL |
+				  SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
+	master->flags		= SPI_MASTER_HALF_DUPLEX;
+	master->can_dma		= pic32_sqi_can_dma;
+	master->bits_per_word_mask	= SPI_BPW_RANGE_MASK(8, 32);
+	master->transfer_one_message	= pic32_sqi_one_message;
+	master->prepare_transfer_hardware	= pic32_sqi_prepare_hardware;
+	master->unprepare_transfer_hardware	= pic32_sqi_unprepare_hardware;
+
+	ret = devm_spi_register_master(&pdev->dev, master);
+	if (ret) {
+		dev_err(&master->dev, "failed registering spi master\n");
+		free_irq(sqi->irq, sqi);
+		goto err_free_ring;
+	}
+
+	platform_set_drvdata(pdev, sqi);
+
+	return 0;
+
+err_free_ring:
+	ring_desc_ring_free(sqi);
+
+err_disable_clk:
+	clk_disable_unprepare(sqi->base_clk);
+	clk_disable_unprepare(sqi->sys_clk);
+
+err_free_master:
+	spi_master_put(master);
+	return ret;
+}
+
+static int pic32_sqi_remove(struct platform_device *pdev)
+{
+	struct pic32_sqi *sqi = platform_get_drvdata(pdev);
+
+	/* release resources */
+	free_irq(sqi->irq, sqi);
+	ring_desc_ring_free(sqi);
+
+	/* disable clk */
+	clk_disable_unprepare(sqi->base_clk);
+	clk_disable_unprepare(sqi->sys_clk);
+
+	return 0;
+}
+
+static const struct of_device_id pic32_sqi_of_ids[] = {
+	{.compatible = "microchip,pic32mzda-sqi",},
+	{},
+};
+MODULE_DEVICE_TABLE(of, pic32_sqi_of_ids);
+
+static struct platform_driver pic32_sqi_driver = {
+	.driver = {
+		.name = "sqi-pic32",
+		.of_match_table = of_match_ptr(pic32_sqi_of_ids),
+	},
+	.probe = pic32_sqi_probe,
+	.remove = pic32_sqi_remove,
+};
+
+module_platform_driver(pic32_sqi_driver);
+
+MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
+MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SQI controller.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
new file mode 100644
index 0000000..73db87f
--- /dev/null
+++ b/drivers/spi/spi-pic32.c
@@ -0,0 +1,878 @@
+/*
+ * Microchip PIC32 SPI controller driver.
+ *
+ * Purna Chandra Mandal <purna.mandal@microchip.com>
+ * Copyright (c) 2016, Microchip Technology Inc.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+/* SPI controller registers */
+struct pic32_spi_regs {
+	u32 ctrl;
+	u32 ctrl_clr;
+	u32 ctrl_set;
+	u32 ctrl_inv;
+	u32 status;
+	u32 status_clr;
+	u32 status_set;
+	u32 status_inv;
+	u32 buf;
+	u32 dontuse[3];
+	u32 baud;
+	u32 dontuse2[3];
+	u32 ctrl2;
+	u32 ctrl2_clr;
+	u32 ctrl2_set;
+	u32 ctrl2_inv;
+};
+
+/* Bit fields of SPI Control Register */
+#define CTRL_RX_INT_SHIFT	0  /* Rx interrupt generation */
+#define  RX_FIFO_EMTPY		0
+#define  RX_FIFO_NOT_EMPTY	1 /* not empty */
+#define  RX_FIFO_HALF_FULL	2 /* full by half or more */
+#define  RX_FIFO_FULL		3 /* completely full */
+
+#define CTRL_TX_INT_SHIFT	2  /* TX interrupt generation */
+#define  TX_FIFO_ALL_EMPTY	0 /* completely empty */
+#define  TX_FIFO_EMTPY		1 /* empty */
+#define  TX_FIFO_HALF_EMPTY	2 /* empty by half or more */
+#define  TX_FIFO_NOT_FULL	3 /* atleast one empty */
+
+#define CTRL_MSTEN	BIT(5) /* enable master mode */
+#define CTRL_CKP	BIT(6) /* active low */
+#define CTRL_CKE	BIT(8) /* Tx on falling edge */
+#define CTRL_SMP	BIT(9) /* Rx at middle or end of tx */
+#define CTRL_BPW_MASK	0x03   /* bits per word/sample */
+#define CTRL_BPW_SHIFT	10
+#define  PIC32_BPW_8	0
+#define  PIC32_BPW_16	1
+#define  PIC32_BPW_32	2
+#define CTRL_SIDL	BIT(13) /* sleep when idle */
+#define CTRL_ON		BIT(15) /* enable macro */
+#define CTRL_ENHBUF	BIT(16) /* enable enhanced buffering */
+#define CTRL_MCLKSEL	BIT(23) /* select clock source */
+#define CTRL_MSSEN	BIT(28) /* macro driven /SS */
+#define CTRL_FRMEN	BIT(31) /* enable framing mode */
+
+/* Bit fields of SPI Status Register */
+#define STAT_RF_EMPTY	BIT(5) /* RX Fifo empty */
+#define STAT_RX_OV	BIT(6) /* err, s/w needs to clear */
+#define STAT_TX_UR	BIT(8) /* UR in Framed SPI modes */
+#define STAT_FRM_ERR	BIT(12) /* Multiple Frame Sync pulse */
+#define STAT_TF_LVL_MASK	0x1F
+#define STAT_TF_LVL_SHIFT	16
+#define STAT_RF_LVL_MASK	0x1F
+#define STAT_RF_LVL_SHIFT	24
+
+/* Bit fields of SPI Baud Register */
+#define BAUD_MASK		0x1ff
+
+/* Bit fields of SPI Control2 Register */
+#define CTRL2_TX_UR_EN		BIT(10) /* Enable int on Tx under-run */
+#define CTRL2_RX_OV_EN		BIT(11) /* Enable int on Rx over-run */
+#define CTRL2_FRM_ERR_EN	BIT(12) /* Enable frame err int */
+
+/* Minimum DMA transfer size */
+#define PIC32_DMA_LEN_MIN	64
+
+struct pic32_spi {
+	dma_addr_t		dma_base;
+	struct pic32_spi_regs __iomem *regs;
+	int			fault_irq;
+	int			rx_irq;
+	int			tx_irq;
+	u32			fifo_n_byte; /* FIFO depth in bytes */
+	struct clk		*clk;
+	struct spi_master	*master;
+	/* Current controller setting */
+	u32			speed_hz; /* spi-clk rate */
+	u32			mode;
+	u32			bits_per_word;
+	u32			fifo_n_elm; /* FIFO depth in words */
+#define PIC32F_DMA_PREP		0 /* DMA chnls configured */
+	unsigned long		flags;
+	/* Current transfer state */
+	struct completion	xfer_done;
+	/* PIO transfer specific */
+	const void		*tx;
+	const void		*tx_end;
+	const void		*rx;
+	const void		*rx_end;
+	int			len;
+	void (*rx_fifo)(struct pic32_spi *);
+	void (*tx_fifo)(struct pic32_spi *);
+};
+
+static inline void pic32_spi_enable(struct pic32_spi *pic32s)
+{
+	writel(CTRL_ON | CTRL_SIDL, &pic32s->regs->ctrl_set);
+}
+
+static inline void pic32_spi_disable(struct pic32_spi *pic32s)
+{
+	writel(CTRL_ON | CTRL_SIDL, &pic32s->regs->ctrl_clr);
+
+	/* avoid SPI registers read/write at immediate next CPU clock */
+	ndelay(20);
+}
+
+static void pic32_spi_set_clk_rate(struct pic32_spi *pic32s, u32 spi_ck)
+{
+	u32 div;
+
+	/* div = (clk_in / 2 * spi_ck) - 1 */
+	div = DIV_ROUND_CLOSEST(clk_get_rate(pic32s->clk), 2 * spi_ck) - 1;
+
+	writel(div & BAUD_MASK, &pic32s->regs->baud);
+}
+
+static inline u32 pic32_rx_fifo_level(struct pic32_spi *pic32s)
+{
+	u32 sr = readl(&pic32s->regs->status);
+
+	return (sr >> STAT_RF_LVL_SHIFT) & STAT_RF_LVL_MASK;
+}
+
+static inline u32 pic32_tx_fifo_level(struct pic32_spi *pic32s)
+{
+	u32 sr = readl(&pic32s->regs->status);
+
+	return (sr >> STAT_TF_LVL_SHIFT) & STAT_TF_LVL_MASK;
+}
+
+/* Return the max entries we can fill into tx fifo */
+static u32 pic32_tx_max(struct pic32_spi *pic32s, int n_bytes)
+{
+	u32 tx_left, tx_room, rxtx_gap;
+
+	tx_left = (pic32s->tx_end - pic32s->tx) / n_bytes;
+	tx_room = pic32s->fifo_n_elm - pic32_tx_fifo_level(pic32s);
+
+	/*
+	 * Another concern is about the tx/rx mismatch, we
+	 * though to use (pic32s->fifo_n_byte - rxfl - txfl) as
+	 * one maximum value for tx, but it doesn't cover the
+	 * data which is out of tx/rx fifo and inside the
+	 * shift registers. So a ctrl from sw point of
+	 * view is taken.
+	 */
+	rxtx_gap = ((pic32s->rx_end - pic32s->rx) -
+		    (pic32s->tx_end - pic32s->tx)) / n_bytes;
+	return min3(tx_left, tx_room, (u32)(pic32s->fifo_n_elm - rxtx_gap));
+}
+
+/* Return the max entries we should read out of rx fifo */
+static u32 pic32_rx_max(struct pic32_spi *pic32s, int n_bytes)
+{
+	u32 rx_left = (pic32s->rx_end - pic32s->rx) / n_bytes;
+
+	return min_t(u32, rx_left, pic32_rx_fifo_level(pic32s));
+}
+
+#define BUILD_SPI_FIFO_RW(__name, __type, __bwl)		\
+static void pic32_spi_rx_##__name(struct pic32_spi *pic32s)	\
+{								\
+	__type v;						\
+	u32 mx = pic32_rx_max(pic32s, sizeof(__type));		\
+	for (; mx; mx--) {					\
+		v = read##__bwl(&pic32s->regs->buf);		\
+		if (pic32s->rx_end - pic32s->len)		\
+			*(__type *)(pic32s->rx) = v;		\
+		pic32s->rx += sizeof(__type);			\
+	}							\
+}								\
+								\
+static void pic32_spi_tx_##__name(struct pic32_spi *pic32s)	\
+{								\
+	__type v;						\
+	u32 mx = pic32_tx_max(pic32s, sizeof(__type));		\
+	for (; mx ; mx--) {					\
+		v = (__type)~0U;				\
+		if (pic32s->tx_end - pic32s->len)		\
+			v = *(__type *)(pic32s->tx);		\
+		write##__bwl(v, &pic32s->regs->buf);		\
+		pic32s->tx += sizeof(__type);			\
+	}							\
+}
+
+BUILD_SPI_FIFO_RW(byte, u8, b);
+BUILD_SPI_FIFO_RW(word, u16, w);
+BUILD_SPI_FIFO_RW(dword, u32, l);
+
+static void pic32_err_stop(struct pic32_spi *pic32s, const char *msg)
+{
+	/* disable all interrupts */
+	disable_irq_nosync(pic32s->fault_irq);
+	disable_irq_nosync(pic32s->rx_irq);
+	disable_irq_nosync(pic32s->tx_irq);
+
+	/* Show err message and abort xfer with err */
+	dev_err(&pic32s->master->dev, "%s\n", msg);
+	if (pic32s->master->cur_msg)
+		pic32s->master->cur_msg->status = -EIO;
+	complete(&pic32s->xfer_done);
+}
+
+static irqreturn_t pic32_spi_fault_irq(int irq, void *dev_id)
+{
+	struct pic32_spi *pic32s = dev_id;
+	u32 status;
+
+	status = readl(&pic32s->regs->status);
+
+	/* Error handling */
+	if (status & (STAT_RX_OV | STAT_TX_UR)) {
+		writel(STAT_RX_OV, &pic32s->regs->status_clr);
+		writel(STAT_TX_UR, &pic32s->regs->status_clr);
+		pic32_err_stop(pic32s, "err_irq: fifo ov/ur-run\n");
+		return IRQ_HANDLED;
+	}
+
+	if (status & STAT_FRM_ERR) {
+		pic32_err_stop(pic32s, "err_irq: frame error");
+		return IRQ_HANDLED;
+	}
+
+	if (!pic32s->master->cur_msg) {
+		pic32_err_stop(pic32s, "err_irq: no mesg");
+		return IRQ_NONE;
+	}
+
+	return IRQ_NONE;
+}
+
+static irqreturn_t pic32_spi_rx_irq(int irq, void *dev_id)
+{
+	struct pic32_spi *pic32s = dev_id;
+
+	pic32s->rx_fifo(pic32s);
+
+	/* rx complete ? */
+	if (pic32s->rx_end == pic32s->rx) {
+		/* disable all interrupts */
+		disable_irq_nosync(pic32s->fault_irq);
+		disable_irq_nosync(pic32s->rx_irq);
+
+		/* complete current xfer */
+		complete(&pic32s->xfer_done);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pic32_spi_tx_irq(int irq, void *dev_id)
+{
+	struct pic32_spi *pic32s = dev_id;
+
+	pic32s->tx_fifo(pic32s);
+
+	/* tx complete? disable tx interrupt */
+	if (pic32s->tx_end == pic32s->tx)
+		disable_irq_nosync(pic32s->tx_irq);
+
+	return IRQ_HANDLED;
+}
+
+static void pic32_spi_dma_rx_notify(void *data)
+{
+	struct pic32_spi *pic32s = data;
+
+	complete(&pic32s->xfer_done);
+}
+
+static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
+				  struct spi_transfer *xfer)
+{
+	struct spi_master *master = pic32s->master;
+	struct dma_async_tx_descriptor *desc_rx;
+	struct dma_async_tx_descriptor *desc_tx;
+	dma_cookie_t cookie;
+	int ret;
+
+	if (!master->dma_rx || !master->dma_tx)
+		return -ENODEV;
+
+	desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
+					  xfer->rx_sg.sgl,
+					  xfer->rx_sg.nents,
+					  DMA_FROM_DEVICE,
+					  DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!desc_rx) {
+		ret = -EINVAL;
+		goto err_dma;
+	}
+
+	desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
+					  xfer->tx_sg.sgl,
+					  xfer->tx_sg.nents,
+					  DMA_TO_DEVICE,
+					  DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!desc_tx) {
+		ret = -EINVAL;
+		goto err_dma;
+	}
+
+	/* Put callback on the RX transfer, that should finish last */
+	desc_rx->callback = pic32_spi_dma_rx_notify;
+	desc_rx->callback_param = pic32s;
+
+	cookie = dmaengine_submit(desc_rx);
+	ret = dma_submit_error(cookie);
+	if (ret)
+		goto err_dma;
+
+	cookie = dmaengine_submit(desc_tx);
+	ret = dma_submit_error(cookie);
+	if (ret)
+		goto err_dma_tx;
+
+	dma_async_issue_pending(master->dma_rx);
+	dma_async_issue_pending(master->dma_tx);
+
+	return 0;
+
+err_dma_tx:
+	dmaengine_terminate_all(master->dma_rx);
+err_dma:
+	return ret;
+}
+
+static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
+{
+	int buf_offset = offsetof(struct pic32_spi_regs, buf);
+	struct spi_master *master = pic32s->master;
+	struct dma_slave_config cfg;
+	int ret;
+
+	cfg.device_fc = true;
+	cfg.src_addr = pic32s->dma_base + buf_offset;
+	cfg.dst_addr = pic32s->dma_base + buf_offset;
+	cfg.src_maxburst = pic32s->fifo_n_elm / 2; /* fill one-half */
+	cfg.dst_maxburst = pic32s->fifo_n_elm / 2; /* drain one-half */
+	cfg.src_addr_width = dma_width;
+	cfg.dst_addr_width = dma_width;
+	/* tx channel */
+	cfg.slave_id = pic32s->tx_irq;
+	cfg.direction = DMA_MEM_TO_DEV;
+	ret = dmaengine_slave_config(master->dma_tx, &cfg);
+	if (ret) {
+		dev_err(&master->dev, "tx channel setup failed\n");
+		return ret;
+	}
+	/* rx channel */
+	cfg.slave_id = pic32s->rx_irq;
+	cfg.direction = DMA_DEV_TO_MEM;
+	ret = dmaengine_slave_config(master->dma_rx, &cfg);
+	if (ret)
+		dev_err(&master->dev, "rx channel setup failed\n");
+
+	return ret;
+}
+
+static int pic32_spi_set_word_size(struct pic32_spi *pic32s, u8 bits_per_word)
+{
+	enum dma_slave_buswidth dmawidth;
+	u32 buswidth, v;
+
+	switch (bits_per_word) {
+	case 8:
+		pic32s->rx_fifo = pic32_spi_rx_byte;
+		pic32s->tx_fifo = pic32_spi_tx_byte;
+		buswidth = PIC32_BPW_8;
+		dmawidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+		break;
+	case 16:
+		pic32s->rx_fifo = pic32_spi_rx_word;
+		pic32s->tx_fifo = pic32_spi_tx_word;
+		buswidth = PIC32_BPW_16;
+		dmawidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+		break;
+	case 32:
+		pic32s->rx_fifo = pic32_spi_rx_dword;
+		pic32s->tx_fifo = pic32_spi_tx_dword;
+		buswidth = PIC32_BPW_32;
+		dmawidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		break;
+	default:
+		/* not supported */
+		return -EINVAL;
+	}
+
+	/* calculate maximum number of words fifos can hold */
+	pic32s->fifo_n_elm = DIV_ROUND_UP(pic32s->fifo_n_byte,
+					  bits_per_word / 8);
+	/* set word size */
+	v = readl(&pic32s->regs->ctrl);
+	v &= ~(CTRL_BPW_MASK << CTRL_BPW_SHIFT);
+	v |= buswidth << CTRL_BPW_SHIFT;
+	writel(v, &pic32s->regs->ctrl);
+
+	/* re-configure dma width, if required */
+	if (test_bit(PIC32F_DMA_PREP, &pic32s->flags))
+		pic32_spi_dma_config(pic32s, dmawidth);
+
+	return 0;
+}
+
+static int pic32_spi_prepare_hardware(struct spi_master *master)
+{
+	struct pic32_spi *pic32s = spi_master_get_devdata(master);
+
+	pic32_spi_enable(pic32s);
+
+	return 0;
+}
+
+static int pic32_spi_prepare_message(struct spi_master *master,
+				     struct spi_message *msg)
+{
+	struct pic32_spi *pic32s = spi_master_get_devdata(master);
+	struct spi_device *spi = msg->spi;
+	u32 val;
+
+	/* set device specific bits_per_word */
+	if (pic32s->bits_per_word != spi->bits_per_word) {
+		pic32_spi_set_word_size(pic32s, spi->bits_per_word);
+		pic32s->bits_per_word = spi->bits_per_word;
+	}
+
+	/* device specific speed change */
+	if (pic32s->speed_hz != spi->max_speed_hz) {
+		pic32_spi_set_clk_rate(pic32s, spi->max_speed_hz);
+		pic32s->speed_hz = spi->max_speed_hz;
+	}
+
+	/* device specific mode change */
+	if (pic32s->mode != spi->mode) {
+		val = readl(&pic32s->regs->ctrl);
+		/* active low */
+		if (spi->mode & SPI_CPOL)
+			val |= CTRL_CKP;
+		else
+			val &= ~CTRL_CKP;
+		/* tx on rising edge */
+		if (spi->mode & SPI_CPHA)
+			val &= ~CTRL_CKE;
+		else
+			val |= CTRL_CKE;
+
+		/* rx at end of tx */
+		val |= CTRL_SMP;
+		writel(val, &pic32s->regs->ctrl);
+		pic32s->mode = spi->mode;
+	}
+
+	return 0;
+}
+
+static bool pic32_spi_can_dma(struct spi_master *master,
+			      struct spi_device *spi,
+			      struct spi_transfer *xfer)
+{
+	struct pic32_spi *pic32s = spi_master_get_devdata(master);
+
+	/* skip using DMA on small size transfer to avoid overhead.*/
+	return (xfer->len >= PIC32_DMA_LEN_MIN) &&
+	       test_bit(PIC32F_DMA_PREP, &pic32s->flags);
+}
+
+static int pic32_spi_one_transfer(struct spi_master *master,
+				  struct spi_device *spi,
+				  struct spi_transfer *transfer)
+{
+	struct pic32_spi *pic32s;
+	bool dma_issued = false;
+	int ret;
+
+	pic32s = spi_master_get_devdata(master);
+
+	/* handle transfer specific word size change */
+	if (transfer->bits_per_word &&
+	    (transfer->bits_per_word != pic32s->bits_per_word)) {
+		ret = pic32_spi_set_word_size(pic32s, transfer->bits_per_word);
+		if (ret)
+			return ret;
+		pic32s->bits_per_word = transfer->bits_per_word;
+	}
+
+	/* handle transfer specific speed change */
+	if (transfer->speed_hz && (transfer->speed_hz != pic32s->speed_hz)) {
+		pic32_spi_set_clk_rate(pic32s, transfer->speed_hz);
+		pic32s->speed_hz = transfer->speed_hz;
+	}
+
+	reinit_completion(&pic32s->xfer_done);
+
+	/* transact by DMA mode */
+	if (transfer->rx_sg.nents && transfer->tx_sg.nents) {
+		ret = pic32_spi_dma_transfer(pic32s, transfer);
+		if (ret) {
+			dev_err(&spi->dev, "dma submit error\n");
+			return ret;
+		}
+
+		/* DMA issued */
+		dma_issued = true;
+	} else {
+		/* set current transfer information */
+		pic32s->tx = (const void *)transfer->tx_buf;
+		pic32s->rx = (const void *)transfer->rx_buf;
+		pic32s->tx_end = pic32s->tx + transfer->len;
+		pic32s->rx_end = pic32s->rx + transfer->len;
+		pic32s->len = transfer->len;
+
+		/* transact by interrupt driven PIO */
+		enable_irq(pic32s->fault_irq);
+		enable_irq(pic32s->rx_irq);
+		enable_irq(pic32s->tx_irq);
+	}
+
+	/* wait for completion */
+	ret = wait_for_completion_timeout(&pic32s->xfer_done, 2 * HZ);
+	if (ret <= 0) {
+		dev_err(&spi->dev, "wait error/timedout\n");
+		if (dma_issued) {
+			dmaengine_terminate_all(master->dma_rx);
+			dmaengine_terminate_all(master->dma_rx);
+		}
+		ret = -ETIMEDOUT;
+	} else {
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static int pic32_spi_unprepare_message(struct spi_master *master,
+				       struct spi_message *msg)
+{
+	/* nothing to do */
+	return 0;
+}
+
+static int pic32_spi_unprepare_hardware(struct spi_master *master)
+{
+	struct pic32_spi *pic32s = spi_master_get_devdata(master);
+
+	pic32_spi_disable(pic32s);
+
+	return 0;
+}
+
+/* This may be called multiple times by same spi dev */
+static int pic32_spi_setup(struct spi_device *spi)
+{
+	if (!spi->max_speed_hz) {
+		dev_err(&spi->dev, "No max speed HZ parameter\n");
+		return -EINVAL;
+	}
+
+	/* PIC32 spi controller can drive /CS during transfer depending
+	 * on tx fifo fill-level. /CS will stay asserted as long as TX
+	 * fifo is non-empty, else will be deasserted indicating
+	 * completion of the ongoing transfer. This might result into
+	 * unreliable/erroneous SPI transactions.
+	 * To avoid that we will always handle /CS by toggling GPIO.
+	 */
+	if (!gpio_is_valid(spi->cs_gpio))
+		return -EINVAL;
+
+	gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+
+	return 0;
+}
+
+static void pic32_spi_cleanup(struct spi_device *spi)
+{
+	/* de-activate cs-gpio */
+	gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+}
+
+static void pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
+{
+	struct spi_master *master = pic32s->master;
+	dma_cap_mask_t mask;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	master->dma_rx = dma_request_slave_channel_compat(mask, NULL, NULL,
+							  dev, "spi-rx");
+	if (!master->dma_rx) {
+		dev_warn(dev, "RX channel not found.\n");
+		goto out_err;
+	}
+
+	master->dma_tx = dma_request_slave_channel_compat(mask, NULL, NULL,
+							  dev, "spi-tx");
+	if (!master->dma_tx) {
+		dev_warn(dev, "TX channel not found.\n");
+		goto out_err;
+	}
+
+	if (pic32_spi_dma_config(pic32s, DMA_SLAVE_BUSWIDTH_1_BYTE))
+		goto out_err;
+
+	/* DMA chnls allocated and prepared */
+	set_bit(PIC32F_DMA_PREP, &pic32s->flags);
+
+	return;
+
+out_err:
+	if (master->dma_rx)
+		dma_release_channel(master->dma_rx);
+
+	if (master->dma_tx)
+		dma_release_channel(master->dma_tx);
+}
+
+static void pic32_spi_dma_unprep(struct pic32_spi *pic32s)
+{
+	if (!test_bit(PIC32F_DMA_PREP, &pic32s->flags))
+		return;
+
+	clear_bit(PIC32F_DMA_PREP, &pic32s->flags);
+	if (pic32s->master->dma_rx)
+		dma_release_channel(pic32s->master->dma_rx);
+
+	if (pic32s->master->dma_tx)
+		dma_release_channel(pic32s->master->dma_tx);
+}
+
+static void pic32_spi_hw_init(struct pic32_spi *pic32s)
+{
+	u32 ctrl;
+
+	/* disable hardware */
+	pic32_spi_disable(pic32s);
+
+	ctrl = readl(&pic32s->regs->ctrl);
+	/* enable enhanced fifo of 128bit deep */
+	ctrl |= CTRL_ENHBUF;
+	pic32s->fifo_n_byte = 16;
+
+	/* disable framing mode */
+	ctrl &= ~CTRL_FRMEN;
+
+	/* enable master mode while disabled */
+	ctrl |= CTRL_MSTEN;
+
+	/* set tx fifo threshold interrupt */
+	ctrl &= ~(0x3 << CTRL_TX_INT_SHIFT);
+	ctrl |= (TX_FIFO_HALF_EMPTY << CTRL_TX_INT_SHIFT);
+
+	/* set rx fifo threshold interrupt */
+	ctrl &= ~(0x3 << CTRL_RX_INT_SHIFT);
+	ctrl |= (RX_FIFO_NOT_EMPTY << CTRL_RX_INT_SHIFT);
+
+	/* select clk source */
+	ctrl &= ~CTRL_MCLKSEL;
+
+	/* set manual /CS mode */
+	ctrl &= ~CTRL_MSSEN;
+
+	writel(ctrl, &pic32s->regs->ctrl);
+
+	/* enable error reporting */
+	ctrl = CTRL2_TX_UR_EN | CTRL2_RX_OV_EN | CTRL2_FRM_ERR_EN;
+	writel(ctrl, &pic32s->regs->ctrl2_set);
+}
+
+static int pic32_spi_hw_probe(struct platform_device *pdev,
+			      struct pic32_spi *pic32s)
+{
+	struct resource *mem;
+	int ret;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pic32s->regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(pic32s->regs))
+		return PTR_ERR(pic32s->regs);
+
+	pic32s->dma_base = mem->start;
+
+	/* get irq resources: err-irq, rx-irq, tx-irq */
+	pic32s->fault_irq = platform_get_irq_byname(pdev, "fault");
+	if (pic32s->fault_irq < 0) {
+		dev_err(&pdev->dev, "fault-irq not found\n");
+		return pic32s->fault_irq;
+	}
+
+	pic32s->rx_irq = platform_get_irq_byname(pdev, "rx");
+	if (pic32s->rx_irq < 0) {
+		dev_err(&pdev->dev, "rx-irq not found\n");
+		return pic32s->rx_irq;
+	}
+
+	pic32s->tx_irq = platform_get_irq_byname(pdev, "tx");
+	if (pic32s->tx_irq < 0) {
+		dev_err(&pdev->dev, "tx-irq not found\n");
+		return pic32s->tx_irq;
+	}
+
+	/* get clock */
+	pic32s->clk = devm_clk_get(&pdev->dev, "mck0");
+	if (IS_ERR(pic32s->clk)) {
+		dev_err(&pdev->dev, "clk not found\n");
+		ret = PTR_ERR(pic32s->clk);
+		goto err_unmap_mem;
+	}
+
+	ret = clk_prepare_enable(pic32s->clk);
+	if (ret)
+		goto err_unmap_mem;
+
+	pic32_spi_hw_init(pic32s);
+
+	return 0;
+
+err_unmap_mem:
+	dev_err(&pdev->dev, "%s failed, err %d\n", __func__, ret);
+	return ret;
+}
+
+static int pic32_spi_probe(struct platform_device *pdev)
+{
+	struct spi_master *master;
+	struct pic32_spi *pic32s;
+	int ret;
+
+	master = spi_alloc_master(&pdev->dev, sizeof(*pic32s));
+	if (!master)
+		return -ENOMEM;
+
+	pic32s = spi_master_get_devdata(master);
+	pic32s->master = master;
+
+	ret = pic32_spi_hw_probe(pdev, pic32s);
+	if (ret)
+		goto err_master;
+
+	master->dev.of_node	= of_node_get(pdev->dev.of_node);
+	master->mode_bits	= SPI_MODE_3 | SPI_MODE_0 | SPI_CS_HIGH;
+	master->num_chipselect	= 1; /* single chip-select */
+	master->max_speed_hz	= clk_get_rate(pic32s->clk);
+	master->setup		= pic32_spi_setup;
+	master->cleanup		= pic32_spi_cleanup;
+	master->flags		= SPI_MASTER_MUST_TX | SPI_MASTER_MUST_RX;
+	master->bits_per_word_mask	= SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+					  SPI_BPW_MASK(32);
+	master->transfer_one		= pic32_spi_one_transfer;
+	master->prepare_message		= pic32_spi_prepare_message;
+	master->unprepare_message	= pic32_spi_unprepare_message;
+	master->prepare_transfer_hardware	= pic32_spi_prepare_hardware;
+	master->unprepare_transfer_hardware	= pic32_spi_unprepare_hardware;
+
+	/* optional DMA support */
+	pic32_spi_dma_prep(pic32s, &pdev->dev);
+	if (test_bit(PIC32F_DMA_PREP, &pic32s->flags))
+		master->can_dma	= pic32_spi_can_dma;
+
+	init_completion(&pic32s->xfer_done);
+	pic32s->mode = -1;
+
+	/* install irq handlers (with irq-disabled) */
+	irq_set_status_flags(pic32s->fault_irq, IRQ_NOAUTOEN);
+	ret = devm_request_irq(&pdev->dev, pic32s->fault_irq,
+			       pic32_spi_fault_irq, IRQF_NO_THREAD,
+			       dev_name(&pdev->dev), pic32s);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "request fault-irq %d\n", pic32s->rx_irq);
+		goto err_bailout;
+	}
+
+	/* receive interrupt handler */
+	irq_set_status_flags(pic32s->rx_irq, IRQ_NOAUTOEN);
+	ret = devm_request_irq(&pdev->dev, pic32s->rx_irq,
+			       pic32_spi_rx_irq, IRQF_NO_THREAD,
+			       dev_name(&pdev->dev), pic32s);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "request rx-irq %d\n", pic32s->rx_irq);
+		goto err_bailout;
+	}
+
+	/* transmit interrupt handler */
+	irq_set_status_flags(pic32s->tx_irq, IRQ_NOAUTOEN);
+	ret = devm_request_irq(&pdev->dev, pic32s->tx_irq,
+			       pic32_spi_tx_irq, IRQF_NO_THREAD,
+			       dev_name(&pdev->dev), pic32s);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "request tx-irq %d\n", pic32s->tx_irq);
+		goto err_bailout;
+	}
+
+	/* register master */
+	ret = devm_spi_register_master(&pdev->dev, master);
+	if (ret) {
+		dev_err(&master->dev, "failed registering spi master\n");
+		goto err_bailout;
+	}
+
+	platform_set_drvdata(pdev, pic32s);
+
+	return 0;
+
+err_bailout:
+	clk_disable_unprepare(pic32s->clk);
+err_master:
+	spi_master_put(master);
+	return ret;
+}
+
+static int pic32_spi_remove(struct platform_device *pdev)
+{
+	struct pic32_spi *pic32s;
+
+	pic32s = platform_get_drvdata(pdev);
+	pic32_spi_disable(pic32s);
+	clk_disable_unprepare(pic32s->clk);
+	pic32_spi_dma_unprep(pic32s);
+
+	return 0;
+}
+
+static const struct of_device_id pic32_spi_of_match[] = {
+	{.compatible = "microchip,pic32mzda-spi",},
+	{},
+};
+MODULE_DEVICE_TABLE(of, pic32_spi_of_match);
+
+static struct platform_driver pic32_spi_driver = {
+	.driver = {
+		.name = "spi-pic32",
+		.of_match_table = of_match_ptr(pic32_spi_of_match),
+	},
+	.probe = pic32_spi_probe,
+	.remove = pic32_spi_remove,
+};
+
+module_platform_driver(pic32_spi_driver);
+
+MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
+MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SPI controller.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 365fc22..a18a03d 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -33,12 +33,10 @@
 		dmadev = drv_data->tx_chan->device->dev;
 		sgt = &drv_data->tx_sgt;
 		buf = drv_data->tx;
-		drv_data->tx_map_len = len;
 	} else {
 		dmadev = drv_data->rx_chan->device->dev;
 		sgt = &drv_data->rx_sgt;
 		buf = drv_data->rx;
-		drv_data->rx_map_len = len;
 	}
 
 	nents = DIV_ROUND_UP(len, SZ_2K);
@@ -55,11 +53,7 @@
 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 		size_t bytes = min_t(size_t, len, SZ_2K);
 
-		if (buf)
-			sg_set_buf(sg, pbuf, bytes);
-		else
-			sg_set_buf(sg, drv_data->dummy, bytes);
-
+		sg_set_buf(sg, pbuf, bytes);
 		pbuf += bytes;
 		len -= bytes;
 	}
@@ -133,9 +127,6 @@
 		if (!error) {
 			pxa2xx_spi_unmap_dma_buffers(drv_data);
 
-			drv_data->tx += drv_data->tx_map_len;
-			drv_data->rx += drv_data->rx_map_len;
-
 			msg->actual_length += drv_data->len;
 			msg->state = pxa2xx_spi_next_transfer(drv_data);
 		} else {
@@ -267,19 +258,22 @@
 int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
 {
 	struct dma_async_tx_descriptor *tx_desc, *rx_desc;
+	int err = 0;
 
 	tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV);
 	if (!tx_desc) {
 		dev_err(&drv_data->pdev->dev,
 			"failed to get DMA TX descriptor\n");
-		return -EBUSY;
+		err = -EBUSY;
+		goto err_tx;
 	}
 
 	rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM);
 	if (!rx_desc) {
 		dev_err(&drv_data->pdev->dev,
 			"failed to get DMA RX descriptor\n");
-		return -EBUSY;
+		err = -EBUSY;
+		goto err_rx;
 	}
 
 	/* We are ready when RX completes */
@@ -289,6 +283,12 @@
 	dmaengine_submit(rx_desc);
 	dmaengine_submit(tx_desc);
 	return 0;
+
+err_rx:
+	dmaengine_terminate_async(drv_data->tx_chan);
+err_tx:
+	pxa2xx_spi_unmap_dma_buffers(drv_data);
+	return err;
 }
 
 void pxa2xx_spi_dma_start(struct driver_data *drv_data)
@@ -308,10 +308,6 @@
 	dma_cap_zero(mask);
 	dma_cap_set(DMA_SLAVE, mask);
 
-	drv_data->dummy = devm_kzalloc(dev, SZ_2K, GFP_KERNEL);
-	if (!drv_data->dummy)
-		return -ENOMEM;
-
 	drv_data->tx_chan = dma_request_slave_channel_compat(mask,
 				pdata->dma_filter, pdata->tx_param, dev, "tx");
 	if (!drv_data->tx_chan)
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 4fd7f98..5202de9 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -173,8 +173,8 @@
 	ssp->type = c->type;
 
 	snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id);
-	ssp->clk = clk_register_fixed_rate(&dev->dev, buf , NULL,
-					CLK_IS_ROOT, c->max_clk_rate);
+	ssp->clk = clk_register_fixed_rate(&dev->dev, buf , NULL, 0,
+					   c->max_clk_rate);
 	 if (IS_ERR(ssp->clk))
 		return PTR_ERR(ssp->clk);
 
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 86138e4..fe07c05 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -570,9 +570,8 @@
 		/* see if the next and current messages point
 		 * to the same chip
 		 */
-		if (next_msg && next_msg->spi != msg->spi)
-			next_msg = NULL;
-		if (!next_msg || msg->state == ERROR_STATE)
+		if ((next_msg && next_msg->spi != msg->spi) ||
+		    msg->state == ERROR_STATE)
 			cs_deassert(drv_data);
 	}
 
@@ -928,6 +927,7 @@
 	u32 dma_thresh = drv_data->cur_chip->dma_threshold;
 	u32 dma_burst = drv_data->cur_chip->dma_burst_size;
 	u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
+	int err;
 
 	/* Get current state information */
 	message = drv_data->cur_msg;
@@ -1047,7 +1047,12 @@
 		/* Ensure we have the correct interrupt handler */
 		drv_data->transfer_handler = pxa2xx_spi_dma_transfer;
 
-		pxa2xx_spi_dma_prepare(drv_data, dma_burst);
+		err = pxa2xx_spi_dma_prepare(drv_data, dma_burst);
+		if (err) {
+			message->status = err;
+			giveback(drv_data);
+			return;
+		}
 
 		/* Clear status and start DMA engine */
 		cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
@@ -1543,7 +1548,6 @@
 	drv_data->pdev = pdev;
 	drv_data->ssp = ssp;
 
-	master->dev.parent = &pdev->dev;
 	master->dev.of_node = pdev->dev.of_node;
 	/* the spi->mode bits understood by this driver: */
 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
@@ -1556,6 +1560,7 @@
 	master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
 	master->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
 	master->auto_runtime_pm = true;
+	master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
 
 	drv_data->ssp_type = ssp->type;
 
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index a1ef889..e6b0900 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -56,7 +56,6 @@
 	struct sg_table tx_sgt;
 	int rx_nents;
 	int tx_nents;
-	void *dummy;
 	atomic_t dma_running;
 
 	/* Current message transfer state info */
@@ -69,8 +68,6 @@
 	void *rx;
 	void *rx_end;
 	int dma_mapped;
-	size_t rx_map_len;
-	size_t tx_map_len;
 	u8 n_bytes;
 	int (*write)(struct driver_data *drv_data);
 	int (*read)(struct driver_data *drv_data);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index 810a7fa..c338ef1 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -937,6 +937,10 @@
 	config = readl(controller->base + QUP_CONFIG);
 	config |= QUP_CONFIG_CLOCK_AUTO_GATE;
 	writel_relaxed(config, controller->base + QUP_CONFIG);
+
+	clk_disable_unprepare(controller->cclk);
+	clk_disable_unprepare(controller->iclk);
+
 	return 0;
 }
 
@@ -945,6 +949,15 @@
 	struct spi_master *master = dev_get_drvdata(device);
 	struct spi_qup *controller = spi_master_get_devdata(master);
 	u32 config;
+	int ret;
+
+	ret = clk_prepare_enable(controller->iclk);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(controller->cclk);
+	if (ret)
+		return ret;
 
 	/* Disable clocks auto gaiting */
 	config = readl_relaxed(controller->base + QUP_CONFIG);
@@ -1017,6 +1030,8 @@
 
 	pm_runtime_put_noidle(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
+	spi_master_put(master);
+
 	return 0;
 }
 
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 6c6c001..cd89682 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -744,10 +744,8 @@
 	rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
 	if (IS_ERR(rs->dma_rx.ch)) {
 		if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
-			dma_release_channel(rs->dma_tx.ch);
-			rs->dma_tx.ch = NULL;
 			ret = -EPROBE_DEFER;
-			goto err_get_fifo_len;
+			goto err_free_dma_tx;
 		}
 		dev_warn(rs->dev, "Failed to request RX DMA channel\n");
 		rs->dma_rx.ch = NULL;
@@ -775,10 +773,11 @@
 
 err_register_master:
 	pm_runtime_disable(&pdev->dev);
-	if (rs->dma_tx.ch)
-		dma_release_channel(rs->dma_tx.ch);
 	if (rs->dma_rx.ch)
 		dma_release_channel(rs->dma_rx.ch);
+err_free_dma_tx:
+	if (rs->dma_tx.ch)
+		dma_release_channel(rs->dma_tx.ch);
 err_get_fifo_len:
 	clk_disable_unprepare(rs->spiclk);
 err_spiclk_enable:
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index f17c0ab..d5adf9f 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -345,12 +345,13 @@
 	spi_st->clk = devm_clk_get(&pdev->dev, "ssc");
 	if (IS_ERR(spi_st->clk)) {
 		dev_err(&pdev->dev, "Unable to request clock\n");
-		return PTR_ERR(spi_st->clk);
+		ret = PTR_ERR(spi_st->clk);
+		goto put_master;
 	}
 
 	ret = spi_st_clk_enable(spi_st);
 	if (ret)
-		return ret;
+		goto put_master;
 
 	init_completion(&spi_st->done);
 
@@ -408,7 +409,8 @@
 
 clk_disable:
 	spi_st_clk_disable(spi_st);
-
+put_master:
+	spi_master_put(master);
 	return ret;
 }
 
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index aab9b49..18aeace 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -360,7 +360,7 @@
 
 	ret = clk_enable(xqspi->refclk);
 	if (ret)
-		goto clk_err;
+		return ret;
 
 	ret = clk_enable(xqspi->pclk);
 	if (ret)
@@ -369,6 +369,7 @@
 	zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
 	return 0;
 clk_err:
+	clk_disable(xqspi->refclk);
 	return ret;
 }
 
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 0239b45..77e6e45 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -717,9 +717,11 @@
 	if (vmalloced_buf) {
 		desc_len = min_t(int, max_seg_size, PAGE_SIZE);
 		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
-	} else {
+	} else if (virt_addr_valid(buf)) {
 		desc_len = min_t(int, max_seg_size, master->max_dma_len);
 		sgs = DIV_ROUND_UP(len, desc_len);
+	} else {
+		return -EINVAL;
 	}
 
 	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
@@ -933,7 +935,7 @@
  * spi_transfer_one_message - Default implementation of transfer_one_message()
  *
  * This is a standard implementation of transfer_one_message() for
- * drivers which impelment a transfer_one() operation.  It provides
+ * drivers which implement a transfer_one() operation.  It provides
  * standard handling of delays and chip select management.
  */
 static int spi_transfer_one_message(struct spi_master *master,
@@ -1764,6 +1766,7 @@
 	master->num_chipselect = 1;
 	master->dev.class = &spi_master_class;
 	master->dev.parent = dev;
+	pm_suspend_ignore_children(&master->dev, true);
 	spi_master_set_devdata(master, &master[1]);
 
 	return master;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 5bac28a..7c197d1 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -66,8 +66,6 @@
 
 source "drivers/staging/media/Kconfig"
 
-source "drivers/staging/rdma/Kconfig"
-
 source "drivers/staging/android/Kconfig"
 
 source "drivers/staging/board/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index a954242..a470c72 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -23,7 +23,6 @@
 obj-$(CONFIG_USB_EMXX)		+= emxx_udc/
 obj-$(CONFIG_SPEAKUP)		+= speakup/
 obj-$(CONFIG_MFD_NVEC)		+= nvec/
-obj-$(CONFIG_STAGING_RDMA)	+= rdma/
 obj-$(CONFIG_ANDROID)		+= android/
 obj-$(CONFIG_STAGING_BOARD)	+= board/
 obj-$(CONFIG_LTE_GDM724X)	+= gdm724x/
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index ce1f949..3f2f30b 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -976,8 +976,8 @@
 }
 
 /* llite/xattr.c */
-int ll_setxattr(struct dentry *dentry, const char *name,
-		const void *value, size_t size, int flags);
+int ll_setxattr(struct dentry *dentry, struct inode *inode,
+		const char *name, const void *value, size_t size, int flags);
 ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
 		    const char *name, void *buffer, size_t size);
 ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index ed4de04..608014b 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -211,11 +211,9 @@
 	return 0;
 }
 
-int ll_setxattr(struct dentry *dentry, const char *name,
-		const void *value, size_t size, int flags)
+int ll_setxattr(struct dentry *dentry, struct inode *inode,
+		const char *name, const void *value, size_t size, int flags)
 {
-	struct inode *inode = d_inode(dentry);
-
 	LASSERT(inode);
 	LASSERT(name);
 
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 163f21a..e389009 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -42,23 +42,33 @@
 static int enable_hw_ecc;
 static int enable_read_hw_ecc;
 
-static struct nand_ecclayout spinand_oob_64 = {
-	.eccbytes = 24,
-	.eccpos = {
-		1, 2, 3, 4, 5, 6,
-		17, 18, 19, 20, 21, 22,
-		33, 34, 35, 36, 37, 38,
-		49, 50, 51, 52, 53, 54, },
-	.oobfree = {
-		{.offset = 8,
-			.length = 8},
-		{.offset = 24,
-			.length = 8},
-		{.offset = 40,
-			.length = 8},
-		{.offset = 56,
-			.length = 8},
-	}
+static int spinand_ooblayout_64_ecc(struct mtd_info *mtd, int section,
+				    struct mtd_oob_region *oobregion)
+{
+	if (section > 3)
+		return -ERANGE;
+
+	oobregion->offset = (section * 16) + 1;
+	oobregion->length = 6;
+
+	return 0;
+}
+
+static int spinand_ooblayout_64_free(struct mtd_info *mtd, int section,
+				     struct mtd_oob_region *oobregion)
+{
+	if (section > 3)
+		return -ERANGE;
+
+	oobregion->offset = (section * 16) + 8;
+	oobregion->length = 8;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops spinand_oob_64_ops = {
+	.ecc = spinand_ooblayout_64_ecc,
+	.free = spinand_ooblayout_64_free,
 };
 #endif
 
@@ -886,11 +896,11 @@
 
 	chip->ecc.strength = 1;
 	chip->ecc.total	= chip->ecc.steps * chip->ecc.bytes;
-	chip->ecc.layout = &spinand_oob_64;
 	chip->ecc.read_page = spinand_read_page_hwecc;
 	chip->ecc.write_page = spinand_write_page_hwecc;
 #else
 	chip->ecc.mode	= NAND_ECC_SOFT;
+	chip->ecc.algo	= NAND_ECC_HAMMING;
 	if (spinand_disable_ecc(spi_nand) < 0)
 		dev_info(&spi_nand->dev, "%s: disable ecc failed!\n",
 			 __func__);
@@ -912,6 +922,9 @@
 
 	mtd->dev.parent = &spi_nand->dev;
 	mtd->oobsize = 64;
+#ifdef CONFIG_MTD_SPINAND_ONDIEECC
+	mtd_set_ooblayout(mtd, &spinand_oob_64_ops);
+#endif
 
 	if (nand_scan(mtd, 1))
 		return -ENXIO;
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig
deleted file mode 100644
index f1f3eca..0000000
--- a/drivers/staging/rdma/Kconfig
+++ /dev/null
@@ -1,27 +0,0 @@
-menuconfig STAGING_RDMA
-        tristate "RDMA staging drivers"
-	depends on INFINIBAND
-	depends on PCI || BROKEN
-	depends on HAS_IOMEM
-	depends on NET
-	depends on INET
-        default n
-        ---help---
-          This option allows you to select a number of RDMA drivers that
-	  fall into one of two categories: deprecated drivers being held
-	  here before finally being removed or new drivers that still need
-	  some work before being moved to the normal RDMA driver area.
-
-          If you wish to work on these drivers, to help improve them, or
-          to report problems you have with them, please use the
-	  linux-rdma@vger.kernel.org mailing list.
-
-          If in doubt, say N here.
-
-
-# Please keep entries in alphabetic order
-if STAGING_RDMA
-
-source "drivers/staging/rdma/hfi1/Kconfig"
-
-endif
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile
deleted file mode 100644
index 8c7fc1d..0000000
--- a/drivers/staging/rdma/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# Entries for RDMA_STAGING tree
-obj-$(CONFIG_INFINIBAND_HFI1)	+= hfi1/
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
deleted file mode 100644
index 4c6f1d7..0000000
--- a/drivers/staging/rdma/hfi1/TODO
+++ /dev/null
@@ -1,6 +0,0 @@
-July, 2015
-
-- Remove unneeded file entries in sysfs
-- Remove software processing of IB protocol and place in library for use
-  by qib, ipath (if still present), hfi1, and eventually soft-roce
-- Replace incorrect uAPI
diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c
deleted file mode 100644
index bb2409a..0000000
--- a/drivers/staging/rdma/hfi1/diag.c
+++ /dev/null
@@ -1,1925 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  - Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  - Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  - Neither the name of Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-/*
- * This file contains support for diagnostic functions.  It is accessed by
- * opening the hfi1_diag device, normally minor number 129.  Diagnostic use
- * of the chip may render the chip or board unusable until the driver
- * is unloaded, or in some cases, until the system is rebooted.
- *
- * Accesses to the chip through this interface are not similar to going
- * through the /sys/bus/pci resource mmap interface.
- */
-
-#include <linux/io.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <rdma/ib_smi.h>
-#include "hfi.h"
-#include "device.h"
-#include "common.h"
-#include "verbs_txreq.h"
-#include "trace.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) DRIVER_NAME ": " fmt
-#define snoop_dbg(fmt, ...) \
-	hfi1_cdbg(SNOOP, fmt, ##__VA_ARGS__)
-
-/* Snoop option mask */
-#define SNOOP_DROP_SEND		BIT(0)
-#define SNOOP_USE_METADATA	BIT(1)
-#define SNOOP_SET_VL0TOVL15     BIT(2)
-
-static u8 snoop_flags;
-
-/*
- * Extract packet length from LRH header.
- * This is in Dwords so multiply by 4 to get size in bytes
- */
-#define HFI1_GET_PKT_LEN(x)      (((be16_to_cpu((x)->lrh[2]) & 0xFFF)) << 2)
-
-enum hfi1_filter_status {
-	HFI1_FILTER_HIT,
-	HFI1_FILTER_ERR,
-	HFI1_FILTER_MISS
-};
-
-/* snoop processing functions */
-rhf_rcv_function_ptr snoop_rhf_rcv_functions[8] = {
-	[RHF_RCV_TYPE_EXPECTED] = snoop_recv_handler,
-	[RHF_RCV_TYPE_EAGER]    = snoop_recv_handler,
-	[RHF_RCV_TYPE_IB]       = snoop_recv_handler,
-	[RHF_RCV_TYPE_ERROR]    = snoop_recv_handler,
-	[RHF_RCV_TYPE_BYPASS]   = snoop_recv_handler,
-	[RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
-	[RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
-	[RHF_RCV_TYPE_INVALID7] = process_receive_invalid
-};
-
-/* Snoop packet structure */
-struct snoop_packet {
-	struct list_head list;
-	u32 total_len;
-	u8 data[];
-};
-
-/* Do not make these an enum or it will blow up the capture_md */
-#define PKT_DIR_EGRESS 0x0
-#define PKT_DIR_INGRESS 0x1
-
-/* Packet capture metadata returned to the user with the packet. */
-struct capture_md {
-	u8 port;
-	u8 dir;
-	u8 reserved[6];
-	union {
-		u64 pbc;
-		u64 rhf;
-	} u;
-};
-
-static atomic_t diagpkt_count = ATOMIC_INIT(0);
-static struct cdev diagpkt_cdev;
-static struct device *diagpkt_device;
-
-static ssize_t diagpkt_write(struct file *fp, const char __user *data,
-			     size_t count, loff_t *off);
-
-static const struct file_operations diagpkt_file_ops = {
-	.owner = THIS_MODULE,
-	.write = diagpkt_write,
-	.llseek = noop_llseek,
-};
-
-/*
- * This is used for communication with user space for snoop extended IOCTLs
- */
-struct hfi1_link_info {
-	__be64 node_guid;
-	u8 port_mode;
-	u8 port_state;
-	u16 link_speed_active;
-	u16 link_width_active;
-	u16 vl15_init;
-	u8 port_number;
-	/*
-	 * Add padding to make this a full IB SMP payload. Note: changing the
-	 * size of this structure will make the IOCTLs created with _IOWR
-	 * change.
-	 * Be sure to run tests on all IOCTLs when making changes to this
-	 * structure.
-	 */
-	u8 res[47];
-};
-
-/*
- * This starts our ioctl sequence numbers *way* off from the ones
- * defined in ib_core.
- */
-#define SNOOP_CAPTURE_VERSION 0x1
-
-#define IB_IOCTL_MAGIC          0x1b /* See Documentation/ioctl-number.txt */
-#define HFI1_SNOOP_IOC_MAGIC IB_IOCTL_MAGIC
-#define HFI1_SNOOP_IOC_BASE_SEQ 0x80
-
-#define HFI1_SNOOP_IOCGETLINKSTATE \
-	_IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ)
-#define HFI1_SNOOP_IOCSETLINKSTATE \
-	_IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 1)
-#define HFI1_SNOOP_IOCCLEARQUEUE \
-	_IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 2)
-#define HFI1_SNOOP_IOCCLEARFILTER \
-	_IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 3)
-#define HFI1_SNOOP_IOCSETFILTER \
-	_IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 4)
-#define HFI1_SNOOP_IOCGETVERSION \
-	_IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 5)
-#define HFI1_SNOOP_IOCSET_OPTS \
-	_IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6)
-
-/*
- * These offsets +6/+7 could change, but these are already known and used
- * IOCTL numbers so don't change them without a good reason.
- */
-#define HFI1_SNOOP_IOCGETLINKSTATE_EXTRA \
-	_IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6, \
-		struct hfi1_link_info)
-#define HFI1_SNOOP_IOCSETLINKSTATE_EXTRA \
-	_IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 7, \
-		struct hfi1_link_info)
-
-static int hfi1_snoop_open(struct inode *in, struct file *fp);
-static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
-			       size_t pkt_len, loff_t *off);
-static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
-				size_t count, loff_t *off);
-static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
-static unsigned int hfi1_snoop_poll(struct file *fp,
-				    struct poll_table_struct *wait);
-static int hfi1_snoop_release(struct inode *in, struct file *fp);
-
-struct hfi1_packet_filter_command {
-	int opcode;
-	int length;
-	void *value_ptr;
-};
-
-/* Can't re-use PKT_DIR_*GRESS here because 0 means no packets for this */
-#define HFI1_SNOOP_INGRESS 0x1
-#define HFI1_SNOOP_EGRESS  0x2
-
-enum hfi1_packet_filter_opcodes {
-	FILTER_BY_LID,
-	FILTER_BY_DLID,
-	FILTER_BY_MAD_MGMT_CLASS,
-	FILTER_BY_QP_NUMBER,
-	FILTER_BY_PKT_TYPE,
-	FILTER_BY_SERVICE_LEVEL,
-	FILTER_BY_PKEY,
-	FILTER_BY_DIRECTION,
-};
-
-static const struct file_operations snoop_file_ops = {
-	.owner = THIS_MODULE,
-	.open = hfi1_snoop_open,
-	.read = hfi1_snoop_read,
-	.unlocked_ioctl = hfi1_ioctl,
-	.poll = hfi1_snoop_poll,
-	.write = hfi1_snoop_write,
-	.release = hfi1_snoop_release
-};
-
-struct hfi1_filter_array {
-	int (*filter)(void *, void *, void *);
-};
-
-static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data,
-				      void *value);
-static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data,
-				     void *value);
-static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
-					void *value);
-static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value);
-
-static const struct hfi1_filter_array hfi1_filters[] = {
-	{ hfi1_filter_lid },
-	{ hfi1_filter_dlid },
-	{ hfi1_filter_mad_mgmt_class },
-	{ hfi1_filter_qp_number },
-	{ hfi1_filter_ibpacket_type },
-	{ hfi1_filter_ib_service_level },
-	{ hfi1_filter_ib_pkey },
-	{ hfi1_filter_direction },
-};
-
-#define HFI1_MAX_FILTERS	ARRAY_SIZE(hfi1_filters)
-#define HFI1_DIAG_MINOR_BASE	129
-
-static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name);
-
-int hfi1_diag_add(struct hfi1_devdata *dd)
-{
-	char name[16];
-	int ret = 0;
-
-	snprintf(name, sizeof(name), "%s_diagpkt%d", class_name(),
-		 dd->unit);
-	/*
-	 * Do this for each device as opposed to the normal diagpkt
-	 * interface which is one per host
-	 */
-	ret = hfi1_snoop_add(dd, name);
-	if (ret)
-		dd_dev_err(dd, "Unable to init snoop/capture device");
-
-	snprintf(name, sizeof(name), "%s_diagpkt", class_name());
-	if (atomic_inc_return(&diagpkt_count) == 1) {
-		ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name,
-				     &diagpkt_file_ops, &diagpkt_cdev,
-				     &diagpkt_device, false);
-	}
-
-	return ret;
-}
-
-/* this must be called w/ dd->snoop_in_lock held */
-static void drain_snoop_list(struct list_head *queue)
-{
-	struct list_head *pos, *q;
-	struct snoop_packet *packet;
-
-	list_for_each_safe(pos, q, queue) {
-		packet = list_entry(pos, struct snoop_packet, list);
-		list_del(pos);
-		kfree(packet);
-	}
-}
-
-static void hfi1_snoop_remove(struct hfi1_devdata *dd)
-{
-	unsigned long flags = 0;
-
-	spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-	drain_snoop_list(&dd->hfi1_snoop.queue);
-	hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev);
-	spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-}
-
-void hfi1_diag_remove(struct hfi1_devdata *dd)
-{
-	hfi1_snoop_remove(dd);
-	if (atomic_dec_and_test(&diagpkt_count))
-		hfi1_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
-	hfi1_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
-}
-
-/*
- * Allocated structure shared between the credit return mechanism and
- * diagpkt_send().
- */
-struct diagpkt_wait {
-	struct completion credits_returned;
-	int code;
-	atomic_t count;
-};
-
-/*
- * When each side is finished with the structure, they call this.
- * The last user frees the structure.
- */
-static void put_diagpkt_wait(struct diagpkt_wait *wait)
-{
-	if (atomic_dec_and_test(&wait->count))
-		kfree(wait);
-}
-
-/*
- * Callback from the credit return code.  Set the complete, which
- * will let diapkt_send() continue.
- */
-static void diagpkt_complete(void *arg, int code)
-{
-	struct diagpkt_wait *wait = (struct diagpkt_wait *)arg;
-
-	wait->code = code;
-	complete(&wait->credits_returned);
-	put_diagpkt_wait(wait);	/* finished with the structure */
-}
-
-/**
- * diagpkt_send - send a packet
- * @dp: diag packet descriptor
- */
-static ssize_t diagpkt_send(struct diag_pkt *dp)
-{
-	struct hfi1_devdata *dd;
-	struct send_context *sc;
-	struct pio_buf *pbuf;
-	u32 *tmpbuf = NULL;
-	ssize_t ret = 0;
-	u32 pkt_len, total_len;
-	pio_release_cb credit_cb = NULL;
-	void *credit_arg = NULL;
-	struct diagpkt_wait *wait = NULL;
-	int trycount = 0;
-
-	dd = hfi1_lookup(dp->unit);
-	if (!dd || !(dd->flags & HFI1_PRESENT) || !dd->kregbase) {
-		ret = -ENODEV;
-		goto bail;
-	}
-	if (!(dd->flags & HFI1_INITTED)) {
-		/* no hardware, freeze, etc. */
-		ret = -ENODEV;
-		goto bail;
-	}
-
-	if (dp->version != _DIAG_PKT_VERS) {
-		dd_dev_err(dd, "Invalid version %u for diagpkt_write\n",
-			   dp->version);
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	/* send count must be an exact number of dwords */
-	if (dp->len & 3) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	/* there is only port 1 */
-	if (dp->port != 1) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	/* need a valid context */
-	if (dp->sw_index >= dd->num_send_contexts) {
-		ret = -EINVAL;
-		goto bail;
-	}
-	/* can only use kernel contexts */
-	if (dd->send_contexts[dp->sw_index].type != SC_KERNEL &&
-	    dd->send_contexts[dp->sw_index].type != SC_VL15) {
-		ret = -EINVAL;
-		goto bail;
-	}
-	/* must be allocated */
-	sc = dd->send_contexts[dp->sw_index].sc;
-	if (!sc) {
-		ret = -EINVAL;
-		goto bail;
-	}
-	/* must be enabled */
-	if (!(sc->flags & SCF_ENABLED)) {
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	/* allocate a buffer and copy the data in */
-	tmpbuf = vmalloc(dp->len);
-	if (!tmpbuf) {
-		ret = -ENOMEM;
-		goto bail;
-	}
-
-	if (copy_from_user(tmpbuf,
-			   (const void __user *)(unsigned long)dp->data,
-			   dp->len)) {
-		ret = -EFAULT;
-		goto bail;
-	}
-
-	/*
-	 * pkt_len is how much data we have to write, includes header and data.
-	 * total_len is length of the packet in Dwords plus the PBC should not
-	 * include the CRC.
-	 */
-	pkt_len = dp->len >> 2;
-	total_len = pkt_len + 2; /* PBC + packet */
-
-	/* if 0, fill in a default */
-	if (dp->pbc == 0) {
-		struct hfi1_pportdata *ppd = dd->pport;
-
-		hfi1_cdbg(PKT, "Generating PBC");
-		dp->pbc = create_pbc(ppd, 0, 0, 0, total_len);
-	} else {
-		hfi1_cdbg(PKT, "Using passed in PBC");
-	}
-
-	hfi1_cdbg(PKT, "Egress PBC content is 0x%llx", dp->pbc);
-
-	/*
-	 * The caller wants to wait until the packet is sent and to
-	 * check for errors.  The best we can do is wait until
-	 * the buffer credits are returned and check if any packet
-	 * error has occurred.  If there are any late errors, this
-	 * could miss it.  If there are other senders who generate
-	 * an error, this may find it.  However, in general, it
-	 * should catch most.
-	 */
-	if (dp->flags & F_DIAGPKT_WAIT) {
-		/* always force a credit return */
-		dp->pbc |= PBC_CREDIT_RETURN;
-		/* turn on credit return interrupts */
-		sc_add_credit_return_intr(sc);
-		wait = kmalloc(sizeof(*wait), GFP_KERNEL);
-		if (!wait) {
-			ret = -ENOMEM;
-			goto bail;
-		}
-		init_completion(&wait->credits_returned);
-		atomic_set(&wait->count, 2);
-		wait->code = PRC_OK;
-
-		credit_cb = diagpkt_complete;
-		credit_arg = wait;
-	}
-
-retry:
-	pbuf = sc_buffer_alloc(sc, total_len, credit_cb, credit_arg);
-	if (!pbuf) {
-		if (trycount == 0) {
-			/* force a credit return and try again */
-			sc_return_credits(sc);
-			trycount = 1;
-			goto retry;
-		}
-		/*
-		 * No send buffer means no credit callback.  Undo
-		 * the wait set-up that was done above.  We free wait
-		 * because the callback will never be called.
-		 */
-		if (dp->flags & F_DIAGPKT_WAIT) {
-			sc_del_credit_return_intr(sc);
-			kfree(wait);
-			wait = NULL;
-		}
-		ret = -ENOSPC;
-		goto bail;
-	}
-
-	pio_copy(dd, pbuf, dp->pbc, tmpbuf, pkt_len);
-	/* no flush needed as the HW knows the packet size */
-
-	ret = sizeof(*dp);
-
-	if (dp->flags & F_DIAGPKT_WAIT) {
-		/* wait for credit return */
-		ret = wait_for_completion_interruptible(
-						&wait->credits_returned);
-		/*
-		 * If the wait returns an error, the wait was interrupted,
-		 * e.g. with a ^C in the user program.  The callback is
-		 * still pending.  This is OK as the wait structure is
-		 * kmalloc'ed and the structure will free itself when
-		 * all users are done with it.
-		 *
-		 * A context disable occurs on a send context restart, so
-		 * include that in the list of errors below to check for.
-		 * NOTE: PRC_FILL_ERR is at best informational and cannot
-		 * be depended on.
-		 */
-		if (!ret && (((wait->code & PRC_STATUS_ERR) ||
-			      (wait->code & PRC_FILL_ERR) ||
-			      (wait->code & PRC_SC_DISABLE))))
-			ret = -EIO;
-
-		put_diagpkt_wait(wait);	/* finished with the structure */
-		sc_del_credit_return_intr(sc);
-	}
-
-bail:
-	vfree(tmpbuf);
-	return ret;
-}
-
-static ssize_t diagpkt_write(struct file *fp, const char __user *data,
-			     size_t count, loff_t *off)
-{
-	struct hfi1_devdata *dd;
-	struct send_context *sc;
-	u8 vl;
-
-	struct diag_pkt dp;
-
-	if (count != sizeof(dp))
-		return -EINVAL;
-
-	if (copy_from_user(&dp, data, sizeof(dp)))
-		return -EFAULT;
-
-	/*
-	* The Send Context is derived from the PbcVL value
-	* if PBC is populated
-	*/
-	if (dp.pbc) {
-		dd = hfi1_lookup(dp.unit);
-		if (!dd)
-			return -ENODEV;
-		vl = (dp.pbc >> PBC_VL_SHIFT) & PBC_VL_MASK;
-		sc = dd->vld[vl].sc;
-		if (sc) {
-			dp.sw_index = sc->sw_index;
-			hfi1_cdbg(
-			       PKT,
-			       "Packet sent over VL %d via Send Context %u(%u)",
-			       vl, sc->sw_index, sc->hw_context);
-		}
-	}
-
-	return diagpkt_send(&dp);
-}
-
-static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name)
-{
-	int ret = 0;
-
-	dd->hfi1_snoop.mode_flag = 0;
-	spin_lock_init(&dd->hfi1_snoop.snoop_lock);
-	INIT_LIST_HEAD(&dd->hfi1_snoop.queue);
-	init_waitqueue_head(&dd->hfi1_snoop.waitq);
-
-	ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name,
-			     &snoop_file_ops,
-			     &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev,
-			     false);
-
-	if (ret) {
-		dd_dev_err(dd, "Couldn't create %s device: %d", name, ret);
-		hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev,
-				  &dd->hfi1_snoop.class_dev);
-	}
-
-	return ret;
-}
-
-static struct hfi1_devdata *hfi1_dd_from_sc_inode(struct inode *in)
-{
-	int unit = iminor(in) - HFI1_SNOOP_CAPTURE_BASE;
-	struct hfi1_devdata *dd;
-
-	dd = hfi1_lookup(unit);
-	return dd;
-}
-
-/* clear or restore send context integrity checks */
-static void adjust_integrity_checks(struct hfi1_devdata *dd)
-{
-	struct send_context *sc;
-	unsigned long sc_flags;
-	int i;
-
-	spin_lock_irqsave(&dd->sc_lock, sc_flags);
-	for (i = 0; i < dd->num_send_contexts; i++) {
-		int enable;
-
-		sc = dd->send_contexts[i].sc;
-
-		if (!sc)
-			continue;	/* not allocated */
-
-		enable = likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
-			 dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE;
-
-		set_pio_integrity(sc);
-
-		if (enable) /* take HFI_CAP_* flags into account */
-			hfi1_init_ctxt(sc);
-	}
-	spin_unlock_irqrestore(&dd->sc_lock, sc_flags);
-}
-
-static int hfi1_snoop_open(struct inode *in, struct file *fp)
-{
-	int ret;
-	int mode_flag = 0;
-	unsigned long flags = 0;
-	struct hfi1_devdata *dd;
-	struct list_head *queue;
-
-	mutex_lock(&hfi1_mutex);
-
-	dd = hfi1_dd_from_sc_inode(in);
-	if (!dd) {
-		ret = -ENODEV;
-		goto bail;
-	}
-
-	/*
-	 * File mode determines snoop or capture. Some existing user
-	 * applications expect the capture device to be able to be opened RDWR
-	 * because they expect a dedicated capture device. For this reason we
-	 * support a module param to force capture mode even if the file open
-	 * mode matches snoop.
-	 */
-	if ((fp->f_flags & O_ACCMODE) == O_RDONLY) {
-		snoop_dbg("Capture Enabled");
-		mode_flag = HFI1_PORT_CAPTURE_MODE;
-	} else if ((fp->f_flags & O_ACCMODE) == O_RDWR) {
-		snoop_dbg("Snoop Enabled");
-		mode_flag = HFI1_PORT_SNOOP_MODE;
-	} else {
-		snoop_dbg("Invalid");
-		ret =  -EINVAL;
-		goto bail;
-	}
-	queue = &dd->hfi1_snoop.queue;
-
-	/*
-	 * We are not supporting snoop and capture at the same time.
-	 */
-	spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-	if (dd->hfi1_snoop.mode_flag) {
-		ret = -EBUSY;
-		spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-		goto bail;
-	}
-
-	dd->hfi1_snoop.mode_flag = mode_flag;
-	drain_snoop_list(queue);
-
-	dd->hfi1_snoop.filter_callback = NULL;
-	dd->hfi1_snoop.filter_value = NULL;
-
-	/*
-	 * Send side packet integrity checks are not helpful when snooping so
-	 * disable and re-enable when we stop snooping.
-	 */
-	if (mode_flag == HFI1_PORT_SNOOP_MODE) {
-		/* clear after snoop mode is on */
-		adjust_integrity_checks(dd); /* clear */
-
-		/*
-		 * We also do not want to be doing the DLID LMC check for
-		 * ingressed packets.
-		 */
-		dd->hfi1_snoop.dcc_cfg = read_csr(dd, DCC_CFG_PORT_CONFIG1);
-		write_csr(dd, DCC_CFG_PORT_CONFIG1,
-			  (dd->hfi1_snoop.dcc_cfg >> 32) << 32);
-	}
-
-	/*
-	 * As soon as we set these function pointers the recv and send handlers
-	 * are active. This is a race condition so we must make sure to drain
-	 * the queue and init filter values above. Technically we should add
-	 * locking here but all that will happen is on recv a packet will get
-	 * allocated and get stuck on the snoop_lock before getting added to the
-	 * queue. Same goes for send.
-	 */
-	dd->rhf_rcv_function_map = snoop_rhf_rcv_functions;
-	dd->process_pio_send = snoop_send_pio_handler;
-	dd->process_dma_send = snoop_send_pio_handler;
-	dd->pio_inline_send = snoop_inline_pio_send;
-
-	spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-	ret = 0;
-
-bail:
-	mutex_unlock(&hfi1_mutex);
-
-	return ret;
-}
-
-static int hfi1_snoop_release(struct inode *in, struct file *fp)
-{
-	unsigned long flags = 0;
-	struct hfi1_devdata *dd;
-	int mode_flag;
-
-	dd = hfi1_dd_from_sc_inode(in);
-	if (!dd)
-		return -ENODEV;
-
-	spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-
-	/* clear the snoop mode before re-adjusting send context CSRs */
-	mode_flag = dd->hfi1_snoop.mode_flag;
-	dd->hfi1_snoop.mode_flag = 0;
-
-	/*
-	 * Drain the queue and clear the filters we are done with it. Don't
-	 * forget to restore the packet integrity checks
-	 */
-	drain_snoop_list(&dd->hfi1_snoop.queue);
-	if (mode_flag == HFI1_PORT_SNOOP_MODE) {
-		/* restore after snoop mode is clear */
-		adjust_integrity_checks(dd); /* restore */
-
-		/*
-		 * Also should probably reset the DCC_CONFIG1 register for DLID
-		 * checking on incoming packets again. Use the value saved when
-		 * opening the snoop device.
-		 */
-		write_csr(dd, DCC_CFG_PORT_CONFIG1, dd->hfi1_snoop.dcc_cfg);
-	}
-
-	dd->hfi1_snoop.filter_callback = NULL;
-	kfree(dd->hfi1_snoop.filter_value);
-	dd->hfi1_snoop.filter_value = NULL;
-
-	/*
-	 * User is done snooping and capturing, return control to the normal
-	 * handler. Re-enable SDMA handling.
-	 */
-	dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
-	dd->process_pio_send = hfi1_verbs_send_pio;
-	dd->process_dma_send = hfi1_verbs_send_dma;
-	dd->pio_inline_send = pio_copy;
-
-	spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-
-	snoop_dbg("snoop/capture device released");
-
-	return 0;
-}
-
-static unsigned int hfi1_snoop_poll(struct file *fp,
-				    struct poll_table_struct *wait)
-{
-	int ret = 0;
-	unsigned long flags = 0;
-
-	struct hfi1_devdata *dd;
-
-	dd = hfi1_dd_from_sc_inode(fp->f_inode);
-	if (!dd)
-		return -ENODEV;
-
-	spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-
-	poll_wait(fp, &dd->hfi1_snoop.waitq, wait);
-	if (!list_empty(&dd->hfi1_snoop.queue))
-		ret |= POLLIN | POLLRDNORM;
-
-	spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-	return ret;
-}
-
-static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
-				size_t count, loff_t *off)
-{
-	struct diag_pkt dpkt;
-	struct hfi1_devdata *dd;
-	size_t ret;
-	u8 byte_two, sl, sc5, sc4, vl, byte_one;
-	struct send_context *sc;
-	u32 len;
-	u64 pbc;
-	struct hfi1_ibport *ibp;
-	struct hfi1_pportdata *ppd;
-
-	dd = hfi1_dd_from_sc_inode(fp->f_inode);
-	if (!dd)
-		return -ENODEV;
-
-	ppd = dd->pport;
-	snoop_dbg("received %lu bytes from user", count);
-
-	memset(&dpkt, 0, sizeof(struct diag_pkt));
-	dpkt.version = _DIAG_PKT_VERS;
-	dpkt.unit = dd->unit;
-	dpkt.port = 1;
-
-	if (likely(!(snoop_flags & SNOOP_USE_METADATA))) {
-		/*
-		* We need to generate the PBC and not let diagpkt_send do it,
-		* to do this we need the VL and the length in dwords.
-		* The VL can be determined by using the SL and looking up the
-		* SC. Then the SC can be converted into VL. The exception to
-		* this is those packets which are from an SMI queue pair.
-		* Since we can't detect anything about the QP here we have to
-		* rely on the SC. If its 0xF then we assume its SMI and
-		* do not look at the SL.
-		*/
-		if (copy_from_user(&byte_one, data, 1))
-			return -EINVAL;
-
-		if (copy_from_user(&byte_two, data + 1, 1))
-			return -EINVAL;
-
-		sc4 = (byte_one >> 4) & 0xf;
-		if (sc4 == 0xF) {
-			snoop_dbg("Detected VL15 packet ignoring SL in packet");
-			vl = sc4;
-		} else {
-			sl = (byte_two >> 4) & 0xf;
-			ibp = to_iport(&dd->verbs_dev.rdi.ibdev, 1);
-			sc5 = ibp->sl_to_sc[sl];
-			vl = sc_to_vlt(dd, sc5);
-			if (vl != sc4) {
-				snoop_dbg("VL %d does not match SC %d of packet",
-					  vl, sc4);
-				return -EINVAL;
-			}
-		}
-
-		sc = dd->vld[vl].sc; /* Look up the context based on VL */
-		if (sc) {
-			dpkt.sw_index = sc->sw_index;
-			snoop_dbg("Sending on context %u(%u)", sc->sw_index,
-				  sc->hw_context);
-		} else {
-			snoop_dbg("Could not find context for vl %d", vl);
-			return -EINVAL;
-		}
-
-		len = (count >> 2) + 2; /* Add in PBC */
-		pbc = create_pbc(ppd, 0, 0, vl, len);
-	} else {
-		if (copy_from_user(&pbc, data, sizeof(pbc)))
-			return -EINVAL;
-		vl = (pbc >> PBC_VL_SHIFT) & PBC_VL_MASK;
-		sc = dd->vld[vl].sc; /* Look up the context based on VL */
-		if (sc) {
-			dpkt.sw_index = sc->sw_index;
-		} else {
-			snoop_dbg("Could not find context for vl %d", vl);
-			return -EINVAL;
-		}
-		data += sizeof(pbc);
-		count -= sizeof(pbc);
-	}
-	dpkt.len = count;
-	dpkt.data = (unsigned long)data;
-
-	snoop_dbg("PBC: vl=0x%llx Length=0x%llx",
-		  (pbc >> 12) & 0xf,
-		  (pbc & 0xfff));
-
-	dpkt.pbc = pbc;
-	ret = diagpkt_send(&dpkt);
-	/*
-	 * diagpkt_send only returns number of bytes in the diagpkt so patch
-	 * that up here before returning.
-	 */
-	if (ret == sizeof(dpkt))
-		return count;
-
-	return ret;
-}
-
-static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
-			       size_t pkt_len, loff_t *off)
-{
-	ssize_t ret = 0;
-	unsigned long flags = 0;
-	struct snoop_packet *packet = NULL;
-	struct hfi1_devdata *dd;
-
-	dd = hfi1_dd_from_sc_inode(fp->f_inode);
-	if (!dd)
-		return -ENODEV;
-
-	spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-
-	while (list_empty(&dd->hfi1_snoop.queue)) {
-		spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-
-		if (fp->f_flags & O_NONBLOCK)
-			return -EAGAIN;
-
-		if (wait_event_interruptible(
-				dd->hfi1_snoop.waitq,
-				!list_empty(&dd->hfi1_snoop.queue)))
-			return -EINTR;
-
-		spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-	}
-
-	if (!list_empty(&dd->hfi1_snoop.queue)) {
-		packet = list_entry(dd->hfi1_snoop.queue.next,
-				    struct snoop_packet, list);
-		list_del(&packet->list);
-		spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-		if (pkt_len >= packet->total_len) {
-			if (copy_to_user(data, packet->data,
-					 packet->total_len))
-				ret = -EFAULT;
-			else
-				ret = packet->total_len;
-		} else {
-			ret = -EINVAL;
-		}
-
-		kfree(packet);
-	} else {
-		spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-	}
-
-	return ret;
-}
-
-/**
- * hfi1_assign_snoop_link_credits -- Set up credits for VL15 and others
- * @ppd : ptr to hfi1 port data
- * @value : options from user space
- *
- * Assumes the rest of the CM credit registers are zero from a
- * previous global or credit reset.
- * Leave shared count at zero for both global and all vls.
- * In snoop mode ideally we don't use shared credits
- * Reserve 8.5k for VL15
- * If total credits less than 8.5kbytes return error.
- * Divide the rest of the credits across VL0 to VL7 and if
- * each of these levels has less than 34 credits (at least 2048 + 128 bytes)
- * return with an error.
- * The credit registers will be reset to zero on link negotiation or link up
- * so this function should be activated from user space only if the port has
- * gone past link negotiation and link up.
- *
- * Return -- 0 if successful else error condition
- *
- */
-static long hfi1_assign_snoop_link_credits(struct hfi1_pportdata *ppd,
-					   int value)
-{
-#define  OPA_MIN_PER_VL_CREDITS  34  /* 2048 + 128 bytes */
-	struct buffer_control t;
-	int i;
-	struct hfi1_devdata *dd = ppd->dd;
-	u16  total_credits = (value >> 16) & 0xffff;
-	u16  vl15_credits = dd->vl15_init / 2;
-	u16  per_vl_credits;
-	__be16 be_per_vl_credits;
-
-	if (!(ppd->host_link_state & HLS_UP))
-		goto err_exit;
-	if (total_credits  <  vl15_credits)
-		goto err_exit;
-
-	per_vl_credits = (total_credits - vl15_credits) / TXE_NUM_DATA_VL;
-
-	if (per_vl_credits < OPA_MIN_PER_VL_CREDITS)
-		goto err_exit;
-
-	memset(&t, 0, sizeof(t));
-	be_per_vl_credits = cpu_to_be16(per_vl_credits);
-
-	for (i = 0; i < TXE_NUM_DATA_VL; i++)
-		t.vl[i].dedicated = be_per_vl_credits;
-
-	t.vl[15].dedicated  = cpu_to_be16(vl15_credits);
-	return set_buffer_control(ppd, &t);
-
-err_exit:
-	snoop_dbg("port_state = 0x%x, total_credits = %d, vl15_credits = %d",
-		  ppd->host_link_state, total_credits, vl15_credits);
-
-	return -EINVAL;
-}
-
-static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
-{
-	struct hfi1_devdata *dd;
-	void *filter_value = NULL;
-	long ret = 0;
-	int value = 0;
-	u8 phys_state = 0;
-	u8 link_state = 0;
-	u16 dev_state = 0;
-	unsigned long flags = 0;
-	unsigned long *argp = NULL;
-	struct hfi1_packet_filter_command filter_cmd = {0};
-	int mode_flag = 0;
-	struct hfi1_pportdata *ppd = NULL;
-	unsigned int index;
-	struct hfi1_link_info link_info;
-	int read_cmd, write_cmd, read_ok, write_ok;
-
-	dd = hfi1_dd_from_sc_inode(fp->f_inode);
-	if (!dd)
-		return -ENODEV;
-
-	mode_flag = dd->hfi1_snoop.mode_flag;
-	read_cmd = _IOC_DIR(cmd) & _IOC_READ;
-	write_cmd = _IOC_DIR(cmd) & _IOC_WRITE;
-	write_ok = access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
-	read_ok = access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
-
-	if ((read_cmd && !write_ok) || (write_cmd && !read_ok))
-		return -EFAULT;
-
-	if (!capable(CAP_SYS_ADMIN))
-		return -EPERM;
-
-	if ((mode_flag & HFI1_PORT_CAPTURE_MODE) &&
-	    (cmd != HFI1_SNOOP_IOCCLEARQUEUE) &&
-	    (cmd != HFI1_SNOOP_IOCCLEARFILTER) &&
-	    (cmd != HFI1_SNOOP_IOCSETFILTER))
-		/* Capture devices are allowed only 3 operations
-		 * 1.Clear capture queue
-		 * 2.Clear capture filter
-		 * 3.Set capture filter
-		 * Other are invalid.
-		 */
-		return -EINVAL;
-
-	switch (cmd) {
-	case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA:
-		memset(&link_info, 0, sizeof(link_info));
-
-		if (copy_from_user(&link_info,
-				   (struct hfi1_link_info __user *)arg,
-				   sizeof(link_info)))
-			return -EFAULT;
-
-		value = link_info.port_state;
-		index = link_info.port_number;
-		if (index > dd->num_pports - 1)
-			return -EINVAL;
-
-		ppd = &dd->pport[index];
-		if (!ppd)
-			return -EINVAL;
-
-		/* What we want to transition to */
-		phys_state = (value >> 4) & 0xF;
-		link_state = value & 0xF;
-		snoop_dbg("Setting link state 0x%x", value);
-
-		switch (link_state) {
-		case IB_PORT_NOP:
-			if (phys_state == 0)
-				break;
-				/* fall through */
-		case IB_PORT_DOWN:
-			switch (phys_state) {
-			case 0:
-				dev_state = HLS_DN_DOWNDEF;
-				break;
-			case 2:
-				dev_state = HLS_DN_POLL;
-				break;
-			case 3:
-				dev_state = HLS_DN_DISABLE;
-				break;
-			default:
-				return -EINVAL;
-			}
-			ret = set_link_state(ppd, dev_state);
-			break;
-		case IB_PORT_ARMED:
-			ret = set_link_state(ppd, HLS_UP_ARMED);
-			if (!ret)
-				send_idle_sma(dd, SMA_IDLE_ARM);
-			break;
-		case IB_PORT_ACTIVE:
-			ret = set_link_state(ppd, HLS_UP_ACTIVE);
-			if (!ret)
-				send_idle_sma(dd, SMA_IDLE_ACTIVE);
-			break;
-		default:
-			return -EINVAL;
-		}
-
-		if (ret)
-			break;
-		/* fall through */
-	case HFI1_SNOOP_IOCGETLINKSTATE:
-	case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA:
-		if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) {
-			memset(&link_info, 0, sizeof(link_info));
-			if (copy_from_user(&link_info,
-					   (struct hfi1_link_info __user *)arg,
-					   sizeof(link_info)))
-				return -EFAULT;
-			index = link_info.port_number;
-		} else {
-			ret = __get_user(index, (int __user *)arg);
-			if (ret !=  0)
-				break;
-		}
-
-		if (index > dd->num_pports - 1)
-			return -EINVAL;
-
-		ppd = &dd->pport[index];
-		if (!ppd)
-			return -EINVAL;
-
-		value = hfi1_ibphys_portstate(ppd);
-		value <<= 4;
-		value |= driver_lstate(ppd);
-
-		snoop_dbg("Link port | Link State: %d", value);
-
-		if ((cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) ||
-		    (cmd == HFI1_SNOOP_IOCSETLINKSTATE_EXTRA)) {
-			link_info.port_state = value;
-			link_info.node_guid = cpu_to_be64(ppd->guid);
-			link_info.link_speed_active =
-						ppd->link_speed_active;
-			link_info.link_width_active =
-						ppd->link_width_active;
-			if (copy_to_user((struct hfi1_link_info __user *)arg,
-					 &link_info, sizeof(link_info)))
-				return -EFAULT;
-		} else {
-			ret = __put_user(value, (int __user *)arg);
-		}
-		break;
-
-	case HFI1_SNOOP_IOCCLEARQUEUE:
-		snoop_dbg("Clearing snoop queue");
-		spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-		drain_snoop_list(&dd->hfi1_snoop.queue);
-		spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-		break;
-
-	case HFI1_SNOOP_IOCCLEARFILTER:
-		snoop_dbg("Clearing filter");
-		spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-		if (dd->hfi1_snoop.filter_callback) {
-			/* Drain packets first */
-			drain_snoop_list(&dd->hfi1_snoop.queue);
-			dd->hfi1_snoop.filter_callback = NULL;
-		}
-		kfree(dd->hfi1_snoop.filter_value);
-		dd->hfi1_snoop.filter_value = NULL;
-		spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-		break;
-
-	case HFI1_SNOOP_IOCSETFILTER:
-		snoop_dbg("Setting filter");
-		/* just copy command structure */
-		argp = (unsigned long *)arg;
-		if (copy_from_user(&filter_cmd, (void __user *)argp,
-				   sizeof(filter_cmd)))
-			return -EFAULT;
-
-		if (filter_cmd.opcode >= HFI1_MAX_FILTERS) {
-			pr_alert("Invalid opcode in request\n");
-			return -EINVAL;
-		}
-
-		snoop_dbg("Opcode %d Len %d Ptr %p",
-			  filter_cmd.opcode, filter_cmd.length,
-			  filter_cmd.value_ptr);
-
-		filter_value = kcalloc(filter_cmd.length, sizeof(u8),
-				       GFP_KERNEL);
-		if (!filter_value)
-			return -ENOMEM;
-
-		/* copy remaining data from userspace */
-		if (copy_from_user((u8 *)filter_value,
-				   (void __user *)filter_cmd.value_ptr,
-				   filter_cmd.length)) {
-			kfree(filter_value);
-			return -EFAULT;
-		}
-		/* Drain packets first */
-		spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-		drain_snoop_list(&dd->hfi1_snoop.queue);
-		dd->hfi1_snoop.filter_callback =
-			hfi1_filters[filter_cmd.opcode].filter;
-		/* just in case we see back to back sets */
-		kfree(dd->hfi1_snoop.filter_value);
-		dd->hfi1_snoop.filter_value = filter_value;
-		spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-		break;
-	case HFI1_SNOOP_IOCGETVERSION:
-		value = SNOOP_CAPTURE_VERSION;
-		snoop_dbg("Getting version: %d", value);
-		ret = __put_user(value, (int __user *)arg);
-		break;
-	case HFI1_SNOOP_IOCSET_OPTS:
-		snoop_flags = 0;
-		ret = __get_user(value, (int __user *)arg);
-		if (ret != 0)
-			break;
-
-		snoop_dbg("Setting snoop option %d", value);
-		if (value & SNOOP_DROP_SEND)
-			snoop_flags |= SNOOP_DROP_SEND;
-		if (value & SNOOP_USE_METADATA)
-			snoop_flags |= SNOOP_USE_METADATA;
-		if (value & (SNOOP_SET_VL0TOVL15)) {
-			ppd = &dd->pport[0];  /* first port will do */
-			ret = hfi1_assign_snoop_link_credits(ppd, value);
-		}
-		break;
-	default:
-		return -ENOTTY;
-	}
-
-	return ret;
-}
-
-static void snoop_list_add_tail(struct snoop_packet *packet,
-				struct hfi1_devdata *dd)
-{
-	unsigned long flags = 0;
-
-	spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-	if (likely((dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) ||
-		   (dd->hfi1_snoop.mode_flag & HFI1_PORT_CAPTURE_MODE))) {
-		list_add_tail(&packet->list, &dd->hfi1_snoop.queue);
-		snoop_dbg("Added packet to list");
-	}
-
-	/*
-	 * Technically we can could have closed the snoop device while waiting
-	 * on the above lock and it is gone now. The snoop mode_flag will
-	 * prevent us from adding the packet to the queue though.
-	 */
-
-	spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-	wake_up_interruptible(&dd->hfi1_snoop.waitq);
-}
-
-static inline int hfi1_filter_check(void *val, const char *msg)
-{
-	if (!val) {
-		snoop_dbg("Error invalid %s value for filter", msg);
-		return HFI1_FILTER_ERR;
-	}
-	return 0;
-}
-
-static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value)
-{
-	struct hfi1_ib_header *hdr;
-	int ret;
-
-	ret = hfi1_filter_check(ibhdr, "header");
-	if (ret)
-		return ret;
-	ret = hfi1_filter_check(value, "user");
-	if (ret)
-		return ret;
-	hdr = (struct hfi1_ib_header *)ibhdr;
-
-	if (*((u16 *)value) == be16_to_cpu(hdr->lrh[3])) /* matches slid */
-		return HFI1_FILTER_HIT; /* matched */
-
-	return HFI1_FILTER_MISS; /* Not matched */
-}
-
-static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value)
-{
-	struct hfi1_ib_header *hdr;
-	int ret;
-
-	ret = hfi1_filter_check(ibhdr, "header");
-	if (ret)
-		return ret;
-	ret = hfi1_filter_check(value, "user");
-	if (ret)
-		return ret;
-
-	hdr = (struct hfi1_ib_header *)ibhdr;
-
-	if (*((u16 *)value) == be16_to_cpu(hdr->lrh[1]))
-		return HFI1_FILTER_HIT;
-
-	return HFI1_FILTER_MISS;
-}
-
-/* Not valid for outgoing packets, send handler passes null for data*/
-static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data,
-				      void *value)
-{
-	struct hfi1_ib_header *hdr;
-	struct hfi1_other_headers *ohdr = NULL;
-	struct ib_smp *smp = NULL;
-	u32 qpn = 0;
-	int ret;
-
-	ret = hfi1_filter_check(ibhdr, "header");
-	if (ret)
-		return ret;
-	ret = hfi1_filter_check(packet_data, "packet_data");
-	if (ret)
-		return ret;
-	ret = hfi1_filter_check(value, "user");
-	if (ret)
-		return ret;
-
-	hdr = (struct hfi1_ib_header *)ibhdr;
-
-	/* Check for GRH */
-	if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
-		ohdr = &hdr->u.oth; /* LRH + BTH + DETH */
-	else
-		ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */
-
-	qpn = be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF;
-	if (qpn <= 1) {
-		smp = (struct ib_smp *)packet_data;
-		if (*((u8 *)value) == smp->mgmt_class)
-			return HFI1_FILTER_HIT;
-		else
-			return HFI1_FILTER_MISS;
-	}
-	return HFI1_FILTER_ERR;
-}
-
-static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value)
-{
-	struct hfi1_ib_header *hdr;
-	struct hfi1_other_headers *ohdr = NULL;
-	int ret;
-
-	ret = hfi1_filter_check(ibhdr, "header");
-	if (ret)
-		return ret;
-	ret = hfi1_filter_check(value, "user");
-	if (ret)
-		return ret;
-
-	hdr = (struct hfi1_ib_header *)ibhdr;
-
-	/* Check for GRH */
-	if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
-		ohdr = &hdr->u.oth; /* LRH + BTH + DETH */
-	else
-		ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */
-	if (*((u32 *)value) == (be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF))
-		return HFI1_FILTER_HIT;
-
-	return HFI1_FILTER_MISS;
-}
-
-static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data,
-				     void *value)
-{
-	u32 lnh = 0;
-	u8 opcode = 0;
-	struct hfi1_ib_header *hdr;
-	struct hfi1_other_headers *ohdr = NULL;
-	int ret;
-
-	ret = hfi1_filter_check(ibhdr, "header");
-	if (ret)
-		return ret;
-	ret = hfi1_filter_check(value, "user");
-	if (ret)
-		return ret;
-
-	hdr = (struct hfi1_ib_header *)ibhdr;
-
-	lnh = (be16_to_cpu(hdr->lrh[0]) & 3);
-
-	if (lnh == HFI1_LRH_BTH)
-		ohdr = &hdr->u.oth;
-	else if (lnh == HFI1_LRH_GRH)
-		ohdr = &hdr->u.l.oth;
-	else
-		return HFI1_FILTER_ERR;
-
-	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
-
-	if (*((u8 *)value) == ((opcode >> 5) & 0x7))
-		return HFI1_FILTER_HIT;
-
-	return HFI1_FILTER_MISS;
-}
-
-static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
-					void *value)
-{
-	struct hfi1_ib_header *hdr;
-	int ret;
-
-	ret = hfi1_filter_check(ibhdr, "header");
-	if (ret)
-		return ret;
-	ret = hfi1_filter_check(value, "user");
-	if (ret)
-		return ret;
-
-	hdr = (struct hfi1_ib_header *)ibhdr;
-
-	if ((*((u8 *)value)) == ((be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF))
-		return HFI1_FILTER_HIT;
-
-	return HFI1_FILTER_MISS;
-}
-
-static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value)
-{
-	u32 lnh = 0;
-	struct hfi1_ib_header *hdr;
-	struct hfi1_other_headers *ohdr = NULL;
-	int ret;
-
-	ret = hfi1_filter_check(ibhdr, "header");
-	if (ret)
-		return ret;
-	ret = hfi1_filter_check(value, "user");
-	if (ret)
-		return ret;
-
-	hdr = (struct hfi1_ib_header *)ibhdr;
-
-	lnh = (be16_to_cpu(hdr->lrh[0]) & 3);
-	if (lnh == HFI1_LRH_BTH)
-		ohdr = &hdr->u.oth;
-	else if (lnh == HFI1_LRH_GRH)
-		ohdr = &hdr->u.l.oth;
-	else
-		return HFI1_FILTER_ERR;
-
-	/* P_key is 16-bit entity, however top most bit indicates
-	 * type of membership. 0 for limited and 1 for Full.
-	 * Limited members cannot accept information from other
-	 * Limited members, but communication is allowed between
-	 * every other combination of membership.
-	 * Hence we'll omit comparing top-most bit while filtering
-	 */
-
-	if ((*(u16 *)value & 0x7FFF) ==
-		((be32_to_cpu(ohdr->bth[0])) & 0x7FFF))
-		return HFI1_FILTER_HIT;
-
-	return HFI1_FILTER_MISS;
-}
-
-/*
- * If packet_data is NULL then this is coming from one of the send functions.
- * Thus we know if its an ingressed or egressed packet.
- */
-static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value)
-{
-	u8 user_dir = *(u8 *)value;
-	int ret;
-
-	ret = hfi1_filter_check(value, "user");
-	if (ret)
-		return ret;
-
-	if (packet_data) {
-		/* Incoming packet */
-		if (user_dir & HFI1_SNOOP_INGRESS)
-			return HFI1_FILTER_HIT;
-	} else {
-		/* Outgoing packet */
-		if (user_dir & HFI1_SNOOP_EGRESS)
-			return HFI1_FILTER_HIT;
-	}
-
-	return HFI1_FILTER_MISS;
-}
-
-/*
- * Allocate a snoop packet. The structure that is stored in the ring buffer, not
- * to be confused with an hfi packet type.
- */
-static struct snoop_packet *allocate_snoop_packet(u32 hdr_len,
-						  u32 data_len,
-						  u32 md_len)
-{
-	struct snoop_packet *packet;
-
-	packet = kzalloc(sizeof(*packet) + hdr_len + data_len
-			 + md_len,
-			 GFP_ATOMIC | __GFP_NOWARN);
-	if (likely(packet))
-		INIT_LIST_HEAD(&packet->list);
-
-	return packet;
-}
-
-/*
- * Instead of having snoop and capture code intermixed with the recv functions,
- * both the interrupt handler and hfi1_ib_rcv() we are going to hijack the call
- * and land in here for snoop/capture but if not enabled the call will go
- * through as before. This gives us a single point to constrain all of the snoop
- * snoop recv logic. There is nothing special that needs to happen for bypass
- * packets. This routine should not try to look into the packet. It just copied
- * it. There is no guarantee for filters when it comes to bypass packets as
- * there is no specific support. Bottom line is this routine does now even know
- * what a bypass packet is.
- */
-int snoop_recv_handler(struct hfi1_packet *packet)
-{
-	struct hfi1_pportdata *ppd = packet->rcd->ppd;
-	struct hfi1_ib_header *hdr = packet->hdr;
-	int header_size = packet->hlen;
-	void *data = packet->ebuf;
-	u32 tlen = packet->tlen;
-	struct snoop_packet *s_packet = NULL;
-	int ret;
-	int snoop_mode = 0;
-	u32 md_len = 0;
-	struct capture_md md;
-
-	snoop_dbg("PACKET IN: hdr size %d tlen %d data %p", header_size, tlen,
-		  data);
-
-	trace_snoop_capture(ppd->dd, header_size, hdr, tlen - header_size,
-			    data);
-
-	if (!ppd->dd->hfi1_snoop.filter_callback) {
-		snoop_dbg("filter not set");
-		ret = HFI1_FILTER_HIT;
-	} else {
-		ret = ppd->dd->hfi1_snoop.filter_callback(hdr, data,
-					ppd->dd->hfi1_snoop.filter_value);
-	}
-
-	switch (ret) {
-	case HFI1_FILTER_ERR:
-		snoop_dbg("Error in filter call");
-		break;
-	case HFI1_FILTER_MISS:
-		snoop_dbg("Filter Miss");
-		break;
-	case HFI1_FILTER_HIT:
-
-		if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
-			snoop_mode = 1;
-		if ((snoop_mode == 0) ||
-		    unlikely(snoop_flags & SNOOP_USE_METADATA))
-			md_len = sizeof(struct capture_md);
-
-		s_packet = allocate_snoop_packet(header_size,
-						 tlen - header_size,
-						 md_len);
-
-		if (unlikely(!s_packet)) {
-			dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
-			break;
-		}
-
-		if (md_len > 0) {
-			memset(&md, 0, sizeof(struct capture_md));
-			md.port = 1;
-			md.dir = PKT_DIR_INGRESS;
-			md.u.rhf = packet->rhf;
-			memcpy(s_packet->data, &md, md_len);
-		}
-
-		/* We should always have a header */
-		if (hdr) {
-			memcpy(s_packet->data + md_len, hdr, header_size);
-		} else {
-			dd_dev_err(ppd->dd, "Unable to copy header to snoop/capture packet\n");
-			kfree(s_packet);
-			break;
-		}
-
-		/*
-		 * Packets with no data are possible. If there is no data needed
-		 * to take care of the last 4 bytes which are normally included
-		 * with data buffers and are included in tlen.  Since we kzalloc
-		 * the buffer we do not need to set any values but if we decide
-		 * not to use kzalloc we should zero them.
-		 */
-		if (data)
-			memcpy(s_packet->data + header_size + md_len, data,
-			       tlen - header_size);
-
-		s_packet->total_len = tlen + md_len;
-		snoop_list_add_tail(s_packet, ppd->dd);
-
-		/*
-		 * If we are snooping the packet not capturing then throw away
-		 * after adding to the list.
-		 */
-		snoop_dbg("Capturing packet");
-		if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) {
-			snoop_dbg("Throwing packet away");
-			/*
-			 * If we are dropping the packet we still may need to
-			 * handle the case where error flags are set, this is
-			 * normally done by the type specific handler but that
-			 * won't be called in this case.
-			 */
-			if (unlikely(rhf_err_flags(packet->rhf)))
-				handle_eflags(packet);
-
-			/* throw the packet on the floor */
-			return RHF_RCV_CONTINUE;
-		}
-		break;
-	default:
-		break;
-	}
-
-	/*
-	 * We do not care what type of packet came in here - just pass it off
-	 * to the normal handler.
-	 */
-	return ppd->dd->normal_rhf_rcv_functions[rhf_rcv_type(packet->rhf)]
-			(packet);
-}
-
-/*
- * Handle snooping and capturing packets when sdma is being used.
- */
-int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
-			   u64 pbc)
-{
-	pr_alert("Snooping/Capture of Send DMA Packets Is Not Supported!\n");
-	snoop_dbg("Unsupported Operation");
-	return hfi1_verbs_send_dma(qp, ps, 0);
-}
-
-/*
- * Handle snooping and capturing packets when pio is being used. Does not handle
- * bypass packets. The only way to send a bypass packet currently is to use the
- * diagpkt interface. When that interface is enable snoop/capture is not.
- */
-int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
-			   u64 pbc)
-{
-	u32 hdrwords = qp->s_hdrwords;
-	struct rvt_sge_state *ss = qp->s_cur_sge;
-	u32 len = qp->s_cur_size;
-	u32 dwords = (len + 3) >> 2;
-	u32 plen = hdrwords + dwords + 2; /* includes pbc */
-	struct hfi1_pportdata *ppd = ps->ppd;
-	struct snoop_packet *s_packet = NULL;
-	u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr;
-	u32 length = 0;
-	struct rvt_sge_state temp_ss;
-	void *data = NULL;
-	void *data_start = NULL;
-	int ret;
-	int snoop_mode = 0;
-	int md_len = 0;
-	struct capture_md md;
-	u32 vl;
-	u32 hdr_len = hdrwords << 2;
-	u32 tlen = HFI1_GET_PKT_LEN(&ps->s_txreq->phdr.hdr);
-
-	md.u.pbc = 0;
-
-	snoop_dbg("PACKET OUT: hdrword %u len %u plen %u dwords %u tlen %u",
-		  hdrwords, len, plen, dwords, tlen);
-	if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
-		snoop_mode = 1;
-	if ((snoop_mode == 0) ||
-	    unlikely(snoop_flags & SNOOP_USE_METADATA))
-		md_len = sizeof(struct capture_md);
-
-	/* not using ss->total_len as arg 2 b/c that does not count CRC */
-	s_packet = allocate_snoop_packet(hdr_len, tlen - hdr_len, md_len);
-
-	if (unlikely(!s_packet)) {
-		dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
-		goto out;
-	}
-
-	s_packet->total_len = tlen + md_len;
-
-	if (md_len > 0) {
-		memset(&md, 0, sizeof(struct capture_md));
-		md.port = 1;
-		md.dir = PKT_DIR_EGRESS;
-		if (likely(pbc == 0)) {
-			vl = be16_to_cpu(ps->s_txreq->phdr.hdr.lrh[0]) >> 12;
-			md.u.pbc = create_pbc(ppd, 0, qp->s_srate, vl, plen);
-		} else {
-			md.u.pbc = 0;
-		}
-		memcpy(s_packet->data, &md, md_len);
-	} else {
-		md.u.pbc = pbc;
-	}
-
-	/* Copy header */
-	if (likely(hdr)) {
-		memcpy(s_packet->data + md_len, hdr, hdr_len);
-	} else {
-		dd_dev_err(ppd->dd,
-			   "Unable to copy header to snoop/capture packet\n");
-		kfree(s_packet);
-		goto out;
-	}
-
-	if (ss) {
-		data = s_packet->data + hdr_len + md_len;
-		data_start = data;
-
-		/*
-		 * Copy SGE State
-		 * The update_sge() function below will not modify the
-		 * individual SGEs in the array. It will make a copy each time
-		 * and operate on that. So we only need to copy this instance
-		 * and it won't impact PIO.
-		 */
-		temp_ss = *ss;
-		length = len;
-
-		snoop_dbg("Need to copy %d bytes", length);
-		while (length) {
-			void *addr = temp_ss.sge.vaddr;
-			u32 slen = temp_ss.sge.length;
-
-			if (slen > length) {
-				slen = length;
-				snoop_dbg("slen %d > len %d", slen, length);
-			}
-			snoop_dbg("copy %d to %p", slen, addr);
-			memcpy(data, addr, slen);
-			update_sge(&temp_ss, slen);
-			length -= slen;
-			data += slen;
-			snoop_dbg("data is now %p bytes left %d", data, length);
-		}
-		snoop_dbg("Completed SGE copy");
-	}
-
-	/*
-	 * Why do the filter check down here? Because the event tracing has its
-	 * own filtering and we need to have the walked the SGE list.
-	 */
-	if (!ppd->dd->hfi1_snoop.filter_callback) {
-		snoop_dbg("filter not set\n");
-		ret = HFI1_FILTER_HIT;
-	} else {
-		ret = ppd->dd->hfi1_snoop.filter_callback(
-					&ps->s_txreq->phdr.hdr,
-					NULL,
-					ppd->dd->hfi1_snoop.filter_value);
-	}
-
-	switch (ret) {
-	case HFI1_FILTER_ERR:
-		snoop_dbg("Error in filter call");
-		/* fall through */
-	case HFI1_FILTER_MISS:
-		snoop_dbg("Filter Miss");
-		kfree(s_packet);
-		break;
-	case HFI1_FILTER_HIT:
-		snoop_dbg("Capturing packet");
-		snoop_list_add_tail(s_packet, ppd->dd);
-
-		if (unlikely((snoop_flags & SNOOP_DROP_SEND) &&
-			     (ppd->dd->hfi1_snoop.mode_flag &
-			      HFI1_PORT_SNOOP_MODE))) {
-			unsigned long flags;
-
-			snoop_dbg("Dropping packet");
-			if (qp->s_wqe) {
-				spin_lock_irqsave(&qp->s_lock, flags);
-				hfi1_send_complete(
-					qp,
-					qp->s_wqe,
-					IB_WC_SUCCESS);
-				spin_unlock_irqrestore(&qp->s_lock, flags);
-			} else if (qp->ibqp.qp_type == IB_QPT_RC) {
-				spin_lock_irqsave(&qp->s_lock, flags);
-				hfi1_rc_send_complete(qp,
-						      &ps->s_txreq->phdr.hdr);
-				spin_unlock_irqrestore(&qp->s_lock, flags);
-			}
-
-			/*
-			 * If snoop is dropping the packet we need to put the
-			 * txreq back because no one else will.
-			 */
-			hfi1_put_txreq(ps->s_txreq);
-			return 0;
-		}
-		break;
-	default:
-		kfree(s_packet);
-		break;
-	}
-out:
-	return hfi1_verbs_send_pio(qp, ps, md.u.pbc);
-}
-
-/*
- * Callers of this must pass a hfi1_ib_header type for the from ptr. Currently
- * this can be used anywhere, but the intention is for inline ACKs for RC and
- * CCA packets. We don't restrict this usage though.
- */
-void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
-			   u64 pbc, const void *from, size_t count)
-{
-	int snoop_mode = 0;
-	int md_len = 0;
-	struct capture_md md;
-	struct snoop_packet *s_packet = NULL;
-
-	/*
-	 * count is in dwords so we need to convert to bytes.
-	 * We also need to account for CRC which would be tacked on by hardware.
-	 */
-	int packet_len = (count << 2) + 4;
-	int ret;
-
-	snoop_dbg("ACK OUT: len %d", packet_len);
-
-	if (!dd->hfi1_snoop.filter_callback) {
-		snoop_dbg("filter not set");
-		ret = HFI1_FILTER_HIT;
-	} else {
-		ret = dd->hfi1_snoop.filter_callback(
-				(struct hfi1_ib_header *)from,
-				NULL,
-				dd->hfi1_snoop.filter_value);
-	}
-
-	switch (ret) {
-	case HFI1_FILTER_ERR:
-		snoop_dbg("Error in filter call");
-		/* fall through */
-	case HFI1_FILTER_MISS:
-		snoop_dbg("Filter Miss");
-		break;
-	case HFI1_FILTER_HIT:
-		snoop_dbg("Capturing packet");
-		if (dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
-			snoop_mode = 1;
-		if ((snoop_mode == 0) ||
-		    unlikely(snoop_flags & SNOOP_USE_METADATA))
-			md_len = sizeof(struct capture_md);
-
-		s_packet = allocate_snoop_packet(packet_len, 0, md_len);
-
-		if (unlikely(!s_packet)) {
-			dd_dev_warn_ratelimited(dd, "Unable to allocate snoop/capture packet\n");
-			goto inline_pio_out;
-		}
-
-		s_packet->total_len = packet_len + md_len;
-
-		/* Fill in the metadata for the packet */
-		if (md_len > 0) {
-			memset(&md, 0, sizeof(struct capture_md));
-			md.port = 1;
-			md.dir = PKT_DIR_EGRESS;
-			md.u.pbc = pbc;
-			memcpy(s_packet->data, &md, md_len);
-		}
-
-		/* Add the packet data which is a single buffer */
-		memcpy(s_packet->data + md_len, from, packet_len);
-
-		snoop_list_add_tail(s_packet, dd);
-
-		if (unlikely((snoop_flags & SNOOP_DROP_SEND) && snoop_mode)) {
-			snoop_dbg("Dropping packet");
-			return;
-		}
-		break;
-	default:
-		break;
-	}
-
-inline_pio_out:
-	pio_copy(dd, pbuf, pbc, from, count);
-}
diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c
deleted file mode 100644
index bd87715..0000000
--- a/drivers/staging/rdma/hfi1/eprom.c
+++ /dev/null
@@ -1,471 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  - Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  - Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  - Neither the name of Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#include <linux/delay.h>
-#include "hfi.h"
-#include "common.h"
-#include "eprom.h"
-
-/*
- * The EPROM is logically divided into three partitions:
- *	partition 0: the first 128K, visible from PCI ROM BAR
- *	partition 1: 4K config file (sector size)
- *	partition 2: the rest
- */
-#define P0_SIZE (128 * 1024)
-#define P1_SIZE   (4 * 1024)
-#define P1_START P0_SIZE
-#define P2_START (P0_SIZE + P1_SIZE)
-
-/* erase sizes supported by the controller */
-#define SIZE_4KB (4 * 1024)
-#define MASK_4KB (SIZE_4KB - 1)
-
-#define SIZE_32KB (32 * 1024)
-#define MASK_32KB (SIZE_32KB - 1)
-
-#define SIZE_64KB (64 * 1024)
-#define MASK_64KB (SIZE_64KB - 1)
-
-/* controller page size, in bytes */
-#define EP_PAGE_SIZE 256
-#define EEP_PAGE_MASK (EP_PAGE_SIZE - 1)
-
-/* controller commands */
-#define CMD_SHIFT 24
-#define CMD_NOP			    (0)
-#define CMD_PAGE_PROGRAM(addr)	    ((0x02 << CMD_SHIFT) | addr)
-#define CMD_READ_DATA(addr)	    ((0x03 << CMD_SHIFT) | addr)
-#define CMD_READ_SR1		    ((0x05 << CMD_SHIFT))
-#define CMD_WRITE_ENABLE	    ((0x06 << CMD_SHIFT))
-#define CMD_SECTOR_ERASE_4KB(addr)  ((0x20 << CMD_SHIFT) | addr)
-#define CMD_SECTOR_ERASE_32KB(addr) ((0x52 << CMD_SHIFT) | addr)
-#define CMD_CHIP_ERASE		    ((0x60 << CMD_SHIFT))
-#define CMD_READ_MANUF_DEV_ID	    ((0x90 << CMD_SHIFT))
-#define CMD_RELEASE_POWERDOWN_NOID  ((0xab << CMD_SHIFT))
-#define CMD_SECTOR_ERASE_64KB(addr) ((0xd8 << CMD_SHIFT) | addr)
-
-/* controller interface speeds */
-#define EP_SPEED_FULL 0x2	/* full speed */
-
-/* controller status register 1 bits */
-#define SR1_BUSY 0x1ull		/* the BUSY bit in SR1 */
-
-/* sleep length while waiting for controller */
-#define WAIT_SLEEP_US 100	/* must be larger than 5 (see usage) */
-#define COUNT_DELAY_SEC(n) ((n) * (1000000 / WAIT_SLEEP_US))
-
-/* GPIO pins */
-#define EPROM_WP_N BIT_ULL(14)	/* EPROM write line */
-
-/*
- * How long to wait for the EPROM to become available, in ms.
- * The spec 32 Mb EPROM takes around 40s to erase then write.
- * Double it for safety.
- */
-#define EPROM_TIMEOUT 80000 /* ms */
-
-/*
- * Turn on external enable line that allows writing on the flash.
- */
-static void write_enable(struct hfi1_devdata *dd)
-{
-	/* raise signal */
-	write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N);
-	/* raise enable */
-	write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N);
-}
-
-/*
- * Turn off external enable line that allows writing on the flash.
- */
-static void write_disable(struct hfi1_devdata *dd)
-{
-	/* lower signal */
-	write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N);
-	/* lower enable */
-	write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N);
-}
-
-/*
- * Wait for the device to become not busy.  Must be called after all
- * write or erase operations.
- */
-static int wait_for_not_busy(struct hfi1_devdata *dd)
-{
-	unsigned long count = 0;
-	u64 reg;
-	int ret = 0;
-
-	/* starts page mode */
-	write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_SR1);
-	while (1) {
-		udelay(WAIT_SLEEP_US);
-		usleep_range(WAIT_SLEEP_US - 5, WAIT_SLEEP_US + 5);
-		count++;
-		reg = read_csr(dd, ASIC_EEP_DATA);
-		if ((reg & SR1_BUSY) == 0)
-			break;
-		/* 200s is the largest time for a 128Mb device */
-		if (count > COUNT_DELAY_SEC(200)) {
-			dd_dev_err(dd, "waited too long for SPI FLASH busy to clear - failing\n");
-			ret = -ETIMEDOUT;
-			break; /* break, not goto - must stop page mode */
-		}
-	}
-
-	/* stop page mode with a NOP */
-	write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP);
-
-	return ret;
-}
-
-/*
- * Read the device ID from the SPI controller.
- */
-static u32 read_device_id(struct hfi1_devdata *dd)
-{
-	/* read the Manufacture Device ID */
-	write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_MANUF_DEV_ID);
-	return (u32)read_csr(dd, ASIC_EEP_DATA);
-}
-
-/*
- * Erase the whole flash.
- */
-static int erase_chip(struct hfi1_devdata *dd)
-{
-	int ret;
-
-	write_enable(dd);
-
-	write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
-	write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_CHIP_ERASE);
-	ret = wait_for_not_busy(dd);
-
-	write_disable(dd);
-
-	return ret;
-}
-
-/*
- * Erase a range.
- */
-static int erase_range(struct hfi1_devdata *dd, u32 start, u32 len)
-{
-	u32 end = start + len;
-	int ret = 0;
-
-	if (end < start)
-		return -EINVAL;
-
-	/* check the end points for the minimum erase */
-	if ((start & MASK_4KB) || (end & MASK_4KB)) {
-		dd_dev_err(dd,
-			   "%s: non-aligned range (0x%x,0x%x) for a 4KB erase\n",
-			   __func__, start, end);
-		return -EINVAL;
-	}
-
-	write_enable(dd);
-
-	while (start < end) {
-		write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
-		/* check in order of largest to smallest */
-		if (((start & MASK_64KB) == 0) && (start + SIZE_64KB <= end)) {
-			write_csr(dd, ASIC_EEP_ADDR_CMD,
-				  CMD_SECTOR_ERASE_64KB(start));
-			start += SIZE_64KB;
-		} else if (((start & MASK_32KB) == 0) &&
-			   (start + SIZE_32KB <= end)) {
-			write_csr(dd, ASIC_EEP_ADDR_CMD,
-				  CMD_SECTOR_ERASE_32KB(start));
-			start += SIZE_32KB;
-		} else {	/* 4KB will work */
-			write_csr(dd, ASIC_EEP_ADDR_CMD,
-				  CMD_SECTOR_ERASE_4KB(start));
-			start += SIZE_4KB;
-		}
-		ret = wait_for_not_busy(dd);
-		if (ret)
-			goto done;
-	}
-
-done:
-	write_disable(dd);
-
-	return ret;
-}
-
-/*
- * Read a 256 byte (64 dword) EPROM page.
- * All callers have verified the offset is at a page boundary.
- */
-static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result)
-{
-	int i;
-
-	write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_DATA(offset));
-	for (i = 0; i < EP_PAGE_SIZE / sizeof(u32); i++)
-		result[i] = (u32)read_csr(dd, ASIC_EEP_DATA);
-	write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); /* close open page */
-}
-
-/*
- * Read length bytes starting at offset.  Copy to user address addr.
- */
-static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
-{
-	u32 offset;
-	u32 buffer[EP_PAGE_SIZE / sizeof(u32)];
-	int ret = 0;
-
-	/* reject anything not on an EPROM page boundary */
-	if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK))
-		return -EINVAL;
-
-	for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
-		read_page(dd, start + offset, buffer);
-		if (copy_to_user((void __user *)(addr + offset),
-				 buffer, EP_PAGE_SIZE)) {
-			ret = -EFAULT;
-			goto done;
-		}
-	}
-
-done:
-	return ret;
-}
-
-/*
- * Write a 256 byte (64 dword) EPROM page.
- * All callers have verified the offset is at a page boundary.
- */
-static int write_page(struct hfi1_devdata *dd, u32 offset, u32 *data)
-{
-	int i;
-
-	write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
-	write_csr(dd, ASIC_EEP_DATA, data[0]);
-	write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_PAGE_PROGRAM(offset));
-	for (i = 1; i < EP_PAGE_SIZE / sizeof(u32); i++)
-		write_csr(dd, ASIC_EEP_DATA, data[i]);
-	/* will close the open page */
-	return wait_for_not_busy(dd);
-}
-
-/*
- * Write length bytes starting at offset.  Read from user address addr.
- */
-static int write_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
-{
-	u32 offset;
-	u32 buffer[EP_PAGE_SIZE / sizeof(u32)];
-	int ret = 0;
-
-	/* reject anything not on an EPROM page boundary */
-	if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK))
-		return -EINVAL;
-
-	write_enable(dd);
-
-	for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
-		if (copy_from_user(buffer, (void __user *)(addr + offset),
-				   EP_PAGE_SIZE)) {
-			ret = -EFAULT;
-			goto done;
-		}
-		ret = write_page(dd, start + offset, buffer);
-		if (ret)
-			goto done;
-	}
-
-done:
-	write_disable(dd);
-	return ret;
-}
-
-/* convert an range composite to a length, in bytes */
-static inline u32 extract_rlen(u32 composite)
-{
-	return (composite & 0xffff) * EP_PAGE_SIZE;
-}
-
-/* convert an range composite to a start, in bytes */
-static inline u32 extract_rstart(u32 composite)
-{
-	return (composite >> 16) * EP_PAGE_SIZE;
-}
-
-/*
- * Perform the given operation on the EPROM.  Called from user space.  The
- * user credentials have already been checked.
- *
- * Return 0 on success, -ERRNO on error
- */
-int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd)
-{
-	struct hfi1_devdata *dd;
-	u32 dev_id;
-	u32 rlen;	/* range length */
-	u32 rstart;	/* range start */
-	int i_minor;
-	int ret = 0;
-
-	/*
-	 * Map the device file to device data using the relative minor.
-	 * The device file minor number is the unit number + 1.  0 is
-	 * the generic device file - reject it.
-	 */
-	i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
-	if (i_minor <= 0)
-		return -EINVAL;
-	dd = hfi1_lookup(i_minor - 1);
-	if (!dd) {
-		pr_err("%s: cannot find unit %d!\n", __func__, i_minor);
-		return -EINVAL;
-	}
-
-	/* some devices do not have an EPROM */
-	if (!dd->eprom_available)
-		return -EOPNOTSUPP;
-
-	ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
-	if (ret) {
-		dd_dev_err(dd, "%s: unable to acquire EPROM resource\n",
-			   __func__);
-		goto done_asic;
-	}
-
-	dd_dev_info(dd, "%s: cmd: type %d, len 0x%x, addr 0x%016llx\n",
-		    __func__, cmd->type, cmd->len, cmd->addr);
-
-	switch (cmd->type) {
-	case HFI1_CMD_EP_INFO:
-		if (cmd->len != sizeof(u32)) {
-			ret = -ERANGE;
-			break;
-		}
-		dev_id = read_device_id(dd);
-		/* addr points to a u32 user buffer */
-		if (copy_to_user((void __user *)cmd->addr, &dev_id,
-				 sizeof(u32)))
-			ret = -EFAULT;
-		break;
-
-	case HFI1_CMD_EP_ERASE_CHIP:
-		ret = erase_chip(dd);
-		break;
-
-	case HFI1_CMD_EP_ERASE_RANGE:
-		rlen = extract_rlen(cmd->len);
-		rstart = extract_rstart(cmd->len);
-		ret = erase_range(dd, rstart, rlen);
-		break;
-
-	case HFI1_CMD_EP_READ_RANGE:
-		rlen = extract_rlen(cmd->len);
-		rstart = extract_rstart(cmd->len);
-		ret = read_length(dd, rstart, rlen, cmd->addr);
-		break;
-
-	case HFI1_CMD_EP_WRITE_RANGE:
-		rlen = extract_rlen(cmd->len);
-		rstart = extract_rstart(cmd->len);
-		ret = write_length(dd, rstart, rlen, cmd->addr);
-		break;
-
-	default:
-		dd_dev_err(dd, "%s: unexpected command %d\n",
-			   __func__, cmd->type);
-		ret = -EINVAL;
-		break;
-	}
-
-	release_chip_resource(dd, CR_EPROM);
-done_asic:
-	return ret;
-}
-
-/*
- * Initialize the EPROM handler.
- */
-int eprom_init(struct hfi1_devdata *dd)
-{
-	int ret = 0;
-
-	/* only the discrete chip has an EPROM */
-	if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0)
-		return 0;
-
-	/*
-	 * It is OK if both HFIs reset the EPROM as long as they don't
-	 * do it at the same time.
-	 */
-	ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
-	if (ret) {
-		dd_dev_err(dd,
-			   "%s: unable to acquire EPROM resource, no EPROM support\n",
-			   __func__);
-		goto done_asic;
-	}
-
-	/* reset EPROM to be sure it is in a good state */
-
-	/* set reset */
-	write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
-	/* clear reset, set speed */
-	write_csr(dd, ASIC_EEP_CTL_STAT,
-		  EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
-
-	/* wake the device with command "release powerdown NoID" */
-	write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
-
-	dd->eprom_available = true;
-	release_chip_resource(dd, CR_EPROM);
-done_asic:
-	return ret;
-}
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
index 8345fb4..bbdbf9c 100644
--- a/drivers/target/iscsi/Kconfig
+++ b/drivers/target/iscsi/Kconfig
@@ -7,3 +7,5 @@
 	help
 	Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI
 	Target Mode Stack.
+
+source	"drivers/target/iscsi/cxgbit/Kconfig"
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
index 0f43be9..0f18295 100644
--- a/drivers/target/iscsi/Makefile
+++ b/drivers/target/iscsi/Makefile
@@ -18,3 +18,4 @@
 				iscsi_target_transport.o
 
 obj-$(CONFIG_ISCSI_TARGET)	+= iscsi_target_mod.o
+obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit/
diff --git a/drivers/target/iscsi/cxgbit/Kconfig b/drivers/target/iscsi/cxgbit/Kconfig
new file mode 100644
index 0000000..c9b6a3c
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/Kconfig
@@ -0,0 +1,7 @@
+config ISCSI_TARGET_CXGB4
+	tristate "Chelsio iSCSI target offload driver"
+	depends on ISCSI_TARGET && CHELSIO_T4 && INET
+	select CHELSIO_T4_UWIRE
+	---help---
+	To compile this driver as module, choose M here: the module
+	will be called cxgbit.
diff --git a/drivers/target/iscsi/cxgbit/Makefile b/drivers/target/iscsi/cxgbit/Makefile
new file mode 100644
index 0000000..bd56c07
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/Makefile
@@ -0,0 +1,6 @@
+ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+ccflags-y += -Idrivers/target/iscsi
+
+obj-$(CONFIG_ISCSI_TARGET_CXGB4)  += cxgbit.o
+
+cxgbit-y  := cxgbit_main.o cxgbit_cm.o cxgbit_target.o cxgbit_ddp.o
diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h
new file mode 100644
index 0000000..625c7f6
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CXGBIT_H__
+#define __CXGBIT_H__
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/completion.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/inet.h>
+#include <linux/wait.h>
+#include <linux/kref.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+
+#include <asm/byteorder.h>
+
+#include <net/net_namespace.h>
+
+#include <target/iscsi/iscsi_transport.h>
+#include <iscsi_target_parameters.h>
+#include <iscsi_target_login.h>
+
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "cxgb4.h"
+#include "cxgb4_uld.h"
+#include "l2t.h"
+#include "cxgb4_ppm.h"
+#include "cxgbit_lro.h"
+
+extern struct mutex cdev_list_lock;
+extern struct list_head cdev_list_head;
+struct cxgbit_np;
+
+struct cxgbit_sock;
+
+struct cxgbit_cmd {
+	struct scatterlist sg;
+	struct cxgbi_task_tag_info ttinfo;
+	bool setup_ddp;
+	bool release;
+};
+
+#define CXGBIT_MAX_ISO_PAYLOAD	\
+	min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
+
+struct cxgbit_iso_info {
+	u8 flags;
+	u32 mpdu;
+	u32 len;
+	u32 burst_len;
+};
+
+enum cxgbit_skcb_flags {
+	SKCBF_TX_NEED_HDR	= (1 << 0), /* packet needs a header */
+	SKCBF_TX_FLAG_COMPL	= (1 << 1), /* wr completion flag */
+	SKCBF_TX_ISO		= (1 << 2), /* iso cpl in tx skb */
+	SKCBF_RX_LRO		= (1 << 3), /* lro skb */
+};
+
+struct cxgbit_skb_rx_cb {
+	u8 opcode;
+	void *pdu_cb;
+	void (*backlog_fn)(struct cxgbit_sock *, struct sk_buff *);
+};
+
+struct cxgbit_skb_tx_cb {
+	u8 submode;
+	u32 extra_len;
+};
+
+union cxgbit_skb_cb {
+	struct {
+		u8 flags;
+		union {
+			struct cxgbit_skb_tx_cb tx;
+			struct cxgbit_skb_rx_cb rx;
+		};
+	};
+
+	struct {
+		/* This member must be first. */
+		struct l2t_skb_cb l2t;
+		struct sk_buff *wr_next;
+	};
+};
+
+#define CXGBIT_SKB_CB(skb)	((union cxgbit_skb_cb *)&((skb)->cb[0]))
+#define cxgbit_skcb_flags(skb)		(CXGBIT_SKB_CB(skb)->flags)
+#define cxgbit_skcb_submode(skb)	(CXGBIT_SKB_CB(skb)->tx.submode)
+#define cxgbit_skcb_tx_wr_next(skb)	(CXGBIT_SKB_CB(skb)->wr_next)
+#define cxgbit_skcb_tx_extralen(skb)	(CXGBIT_SKB_CB(skb)->tx.extra_len)
+#define cxgbit_skcb_rx_opcode(skb)	(CXGBIT_SKB_CB(skb)->rx.opcode)
+#define cxgbit_skcb_rx_backlog_fn(skb)	(CXGBIT_SKB_CB(skb)->rx.backlog_fn)
+#define cxgbit_rx_pdu_cb(skb)		(CXGBIT_SKB_CB(skb)->rx.pdu_cb)
+
+static inline void *cplhdr(struct sk_buff *skb)
+{
+	return skb->data;
+}
+
+enum cxgbit_cdev_flags {
+	CDEV_STATE_UP = 0,
+	CDEV_ISO_ENABLE,
+	CDEV_DDP_ENABLE,
+};
+
+#define NP_INFO_HASH_SIZE 32
+
+struct np_info {
+	struct np_info *next;
+	struct cxgbit_np *cnp;
+	unsigned int stid;
+};
+
+struct cxgbit_list_head {
+	struct list_head list;
+	/* device lock */
+	spinlock_t lock;
+};
+
+struct cxgbit_device {
+	struct list_head list;
+	struct cxgb4_lld_info lldi;
+	struct np_info *np_hash_tab[NP_INFO_HASH_SIZE];
+	/* np lock */
+	spinlock_t np_lock;
+	u8 selectq[MAX_NPORTS][2];
+	struct cxgbit_list_head cskq;
+	u32 mdsl;
+	struct kref kref;
+	unsigned long flags;
+};
+
+struct cxgbit_wr_wait {
+	struct completion completion;
+	int ret;
+};
+
+enum cxgbit_csk_state {
+	CSK_STATE_IDLE = 0,
+	CSK_STATE_LISTEN,
+	CSK_STATE_CONNECTING,
+	CSK_STATE_ESTABLISHED,
+	CSK_STATE_ABORTING,
+	CSK_STATE_CLOSING,
+	CSK_STATE_MORIBUND,
+	CSK_STATE_DEAD,
+};
+
+enum cxgbit_csk_flags {
+	CSK_TX_DATA_SENT = 0,
+	CSK_LOGIN_PDU_DONE,
+	CSK_LOGIN_DONE,
+	CSK_DDP_ENABLE,
+};
+
+struct cxgbit_sock_common {
+	struct cxgbit_device *cdev;
+	struct sockaddr_storage local_addr;
+	struct sockaddr_storage remote_addr;
+	struct cxgbit_wr_wait wr_wait;
+	enum cxgbit_csk_state state;
+	unsigned long flags;
+};
+
+struct cxgbit_np {
+	struct cxgbit_sock_common com;
+	wait_queue_head_t accept_wait;
+	struct iscsi_np *np;
+	struct completion accept_comp;
+	struct list_head np_accept_list;
+	/* np accept lock */
+	spinlock_t np_accept_lock;
+	struct kref kref;
+	unsigned int stid;
+};
+
+struct cxgbit_sock {
+	struct cxgbit_sock_common com;
+	struct cxgbit_np *cnp;
+	struct iscsi_conn *conn;
+	struct l2t_entry *l2t;
+	struct dst_entry *dst;
+	struct list_head list;
+	struct sk_buff_head rxq;
+	struct sk_buff_head txq;
+	struct sk_buff_head ppodq;
+	struct sk_buff_head backlogq;
+	struct sk_buff_head skbq;
+	struct sk_buff *wr_pending_head;
+	struct sk_buff *wr_pending_tail;
+	struct sk_buff *skb;
+	struct sk_buff *lro_skb;
+	struct sk_buff *lro_hskb;
+	struct list_head accept_node;
+	/* socket lock */
+	spinlock_t lock;
+	wait_queue_head_t waitq;
+	wait_queue_head_t ack_waitq;
+	bool lock_owner;
+	struct kref kref;
+	u32 max_iso_npdu;
+	u32 wr_cred;
+	u32 wr_una_cred;
+	u32 wr_max_cred;
+	u32 snd_una;
+	u32 tid;
+	u32 snd_nxt;
+	u32 rcv_nxt;
+	u32 smac_idx;
+	u32 tx_chan;
+	u32 mtu;
+	u32 write_seq;
+	u32 rx_credits;
+	u32 snd_win;
+	u32 rcv_win;
+	u16 mss;
+	u16 emss;
+	u16 plen;
+	u16 rss_qid;
+	u16 txq_idx;
+	u16 ctrlq_idx;
+	u8 tos;
+	u8 port_id;
+#define CXGBIT_SUBMODE_HCRC 0x1
+#define CXGBIT_SUBMODE_DCRC 0x2
+	u8 submode;
+#ifdef CONFIG_CHELSIO_T4_DCB
+	u8 dcb_priority;
+#endif
+	u8 snd_wscale;
+};
+
+void _cxgbit_free_cdev(struct kref *kref);
+void _cxgbit_free_csk(struct kref *kref);
+void _cxgbit_free_cnp(struct kref *kref);
+
+static inline void cxgbit_get_cdev(struct cxgbit_device *cdev)
+{
+	kref_get(&cdev->kref);
+}
+
+static inline void cxgbit_put_cdev(struct cxgbit_device *cdev)
+{
+	kref_put(&cdev->kref, _cxgbit_free_cdev);
+}
+
+static inline void cxgbit_get_csk(struct cxgbit_sock *csk)
+{
+	kref_get(&csk->kref);
+}
+
+static inline void cxgbit_put_csk(struct cxgbit_sock *csk)
+{
+	kref_put(&csk->kref, _cxgbit_free_csk);
+}
+
+static inline void cxgbit_get_cnp(struct cxgbit_np *cnp)
+{
+	kref_get(&cnp->kref);
+}
+
+static inline void cxgbit_put_cnp(struct cxgbit_np *cnp)
+{
+	kref_put(&cnp->kref, _cxgbit_free_cnp);
+}
+
+static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk)
+{
+	csk->wr_pending_tail = NULL;
+	csk->wr_pending_head = NULL;
+}
+
+static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk)
+{
+	return csk->wr_pending_head;
+}
+
+static inline void
+cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+	cxgbit_skcb_tx_wr_next(skb) = NULL;
+
+	skb_get(skb);
+
+	if (!csk->wr_pending_head)
+		csk->wr_pending_head = skb;
+	else
+		cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
+	csk->wr_pending_tail = skb;
+}
+
+static inline struct sk_buff *cxgbit_sock_dequeue_wr(struct cxgbit_sock *csk)
+{
+	struct sk_buff *skb = csk->wr_pending_head;
+
+	if (likely(skb)) {
+		csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb);
+		cxgbit_skcb_tx_wr_next(skb) = NULL;
+	}
+	return skb;
+}
+
+typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *,
+				       struct sk_buff *);
+
+int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
+int cxgbit_setup_conn_digest(struct cxgbit_sock *);
+int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
+void cxgbit_free_np(struct iscsi_np *);
+void cxgbit_free_conn(struct iscsi_conn *);
+extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
+int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
+int cxgbit_rx_data_ack(struct cxgbit_sock *);
+int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *,
+		    struct l2t_entry *);
+void cxgbit_push_tx_frames(struct cxgbit_sock *);
+int cxgbit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
+int cxgbit_xmit_pdu(struct iscsi_conn *, struct iscsi_cmd *,
+		    struct iscsi_datain_req *, const void *, u32);
+void cxgbit_get_r2t_ttt(struct iscsi_conn *, struct iscsi_cmd *,
+			struct iscsi_r2t *);
+u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *);
+int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *);
+void cxgbit_get_rx_pdu(struct iscsi_conn *);
+int cxgbit_validate_params(struct iscsi_conn *);
+struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *);
+
+/* DDP */
+int cxgbit_ddp_init(struct cxgbit_device *);
+int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32);
+int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsi_cmd *);
+void cxgbit_release_cmd(struct iscsi_conn *, struct iscsi_cmd *);
+
+static inline
+struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev)
+{
+	return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm);
+}
+#endif /* __CXGBIT_H__ */
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
new file mode 100644
index 0000000..0ae0b13
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -0,0 +1,2086 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <net/route.h>
+#include <net/tcp.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
+
+#include "cxgbit.h"
+#include "clip_tbl.h"
+
+static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
+{
+	wr_waitp->ret = 0;
+	reinit_completion(&wr_waitp->completion);
+}
+
+static void
+cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
+{
+	if (ret == CPL_ERR_NONE)
+		wr_waitp->ret = 0;
+	else
+		wr_waitp->ret = -EIO;
+
+	if (wr_waitp->ret)
+		pr_err("%s: err:%u", func, ret);
+
+	complete(&wr_waitp->completion);
+}
+
+static int
+cxgbit_wait_for_reply(struct cxgbit_device *cdev,
+		      struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
+		      const char *func)
+{
+	int ret;
+
+	if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
+		wr_waitp->ret = -EIO;
+		goto out;
+	}
+
+	ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
+	if (!ret) {
+		pr_info("%s - Device %s not responding tid %u\n",
+			func, pci_name(cdev->lldi.pdev), tid);
+		wr_waitp->ret = -ETIMEDOUT;
+	}
+out:
+	if (wr_waitp->ret)
+		pr_info("%s: FW reply %d tid %u\n",
+			pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
+	return wr_waitp->ret;
+}
+
+/* Returns whether a CPL status conveys negative advice.
+ */
+static int cxgbit_is_neg_adv(unsigned int status)
+{
+	return status == CPL_ERR_RTX_NEG_ADVICE ||
+		status == CPL_ERR_PERSIST_NEG_ADVICE ||
+		status == CPL_ERR_KEEPALV_NEG_ADVICE;
+}
+
+static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
+{
+	return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
+}
+
+static struct np_info *
+cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
+		   unsigned int stid)
+{
+	struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
+
+	if (p) {
+		int bucket = cxgbit_np_hashfn(cnp);
+
+		p->cnp = cnp;
+		p->stid = stid;
+		spin_lock(&cdev->np_lock);
+		p->next = cdev->np_hash_tab[bucket];
+		cdev->np_hash_tab[bucket] = p;
+		spin_unlock(&cdev->np_lock);
+	}
+
+	return p;
+}
+
+static int
+cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+	int stid = -1, bucket = cxgbit_np_hashfn(cnp);
+	struct np_info *p;
+
+	spin_lock(&cdev->np_lock);
+	for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
+		if (p->cnp == cnp) {
+			stid = p->stid;
+			break;
+		}
+	}
+	spin_unlock(&cdev->np_lock);
+
+	return stid;
+}
+
+static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+	int stid = -1, bucket = cxgbit_np_hashfn(cnp);
+	struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
+
+	spin_lock(&cdev->np_lock);
+	for (p = *prev; p; prev = &p->next, p = p->next) {
+		if (p->cnp == cnp) {
+			stid = p->stid;
+			*prev = p->next;
+			kfree(p);
+			break;
+		}
+	}
+	spin_unlock(&cdev->np_lock);
+
+	return stid;
+}
+
+void _cxgbit_free_cnp(struct kref *kref)
+{
+	struct cxgbit_np *cnp;
+
+	cnp = container_of(kref, struct cxgbit_np, kref);
+	kfree(cnp);
+}
+
+static int
+cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
+		      struct cxgbit_np *cnp)
+{
+	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
+				     &cnp->com.local_addr;
+	int addr_type;
+	int ret;
+
+	pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
+		 __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
+
+	addr_type = ipv6_addr_type((const struct in6_addr *)
+				   &sin6->sin6_addr);
+	if (addr_type != IPV6_ADDR_ANY) {
+		ret = cxgb4_clip_get(cdev->lldi.ports[0],
+				     (const u32 *)&sin6->sin6_addr.s6_addr, 1);
+		if (ret) {
+			pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
+			       sin6->sin6_addr.s6_addr, ret);
+			return -ENOMEM;
+		}
+	}
+
+	cxgbit_get_cnp(cnp);
+	cxgbit_init_wr_wait(&cnp->com.wr_wait);
+
+	ret = cxgb4_create_server6(cdev->lldi.ports[0],
+				   stid, &sin6->sin6_addr,
+				   sin6->sin6_port,
+				   cdev->lldi.rxq_ids[0]);
+	if (!ret)
+		ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
+					    0, 10, __func__);
+	else if (ret > 0)
+		ret = net_xmit_errno(ret);
+	else
+		cxgbit_put_cnp(cnp);
+
+	if (ret) {
+		if (ret != -ETIMEDOUT)
+			cxgb4_clip_release(cdev->lldi.ports[0],
+				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
+
+		pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
+		       ret, stid, sin6->sin6_addr.s6_addr,
+		       ntohs(sin6->sin6_port));
+	}
+
+	return ret;
+}
+
+static int
+cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
+		      struct cxgbit_np *cnp)
+{
+	struct sockaddr_in *sin = (struct sockaddr_in *)
+				   &cnp->com.local_addr;
+	int ret;
+
+	pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
+		 __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
+
+	cxgbit_get_cnp(cnp);
+	cxgbit_init_wr_wait(&cnp->com.wr_wait);
+
+	ret = cxgb4_create_server(cdev->lldi.ports[0],
+				  stid, sin->sin_addr.s_addr,
+				  sin->sin_port, 0,
+				  cdev->lldi.rxq_ids[0]);
+	if (!ret)
+		ret = cxgbit_wait_for_reply(cdev,
+					    &cnp->com.wr_wait,
+					    0, 10, __func__);
+	else if (ret > 0)
+		ret = net_xmit_errno(ret);
+	else
+		cxgbit_put_cnp(cnp);
+
+	if (ret)
+		pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
+		       ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
+	return ret;
+}
+
+struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
+{
+	struct cxgbit_device *cdev;
+	u8 i;
+
+	list_for_each_entry(cdev, &cdev_list_head, list) {
+		struct cxgb4_lld_info *lldi = &cdev->lldi;
+
+		for (i = 0; i < lldi->nports; i++) {
+			if (lldi->ports[i] == ndev) {
+				if (port_id)
+					*port_id = i;
+				return cdev;
+			}
+		}
+	}
+
+	return NULL;
+}
+
+static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
+{
+	if (ndev->priv_flags & IFF_BONDING) {
+		pr_err("Bond devices are not supported. Interface:%s\n",
+		       ndev->name);
+		return NULL;
+	}
+
+	if (is_vlan_dev(ndev))
+		return vlan_dev_real_dev(ndev);
+
+	return ndev;
+}
+
+static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
+{
+	struct net_device *ndev;
+
+	ndev = __ip_dev_find(&init_net, saddr, false);
+	if (!ndev)
+		return NULL;
+
+	return cxgbit_get_real_dev(ndev);
+}
+
+static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
+{
+	struct net_device *ndev = NULL;
+	bool found = false;
+
+	if (IS_ENABLED(CONFIG_IPV6)) {
+		for_each_netdev_rcu(&init_net, ndev)
+			if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
+				found = true;
+				break;
+			}
+	}
+	if (!found)
+		return NULL;
+	return cxgbit_get_real_dev(ndev);
+}
+
+static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
+{
+	struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
+	int ss_family = sockaddr->ss_family;
+	struct net_device *ndev = NULL;
+	struct cxgbit_device *cdev = NULL;
+
+	rcu_read_lock();
+	if (ss_family == AF_INET) {
+		struct sockaddr_in *sin;
+
+		sin = (struct sockaddr_in *)sockaddr;
+		ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
+	} else if (ss_family == AF_INET6) {
+		struct sockaddr_in6 *sin6;
+
+		sin6 = (struct sockaddr_in6 *)sockaddr;
+		ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
+	}
+	if (!ndev)
+		goto out;
+
+	cdev = cxgbit_find_device(ndev, NULL);
+out:
+	rcu_read_unlock();
+	return cdev;
+}
+
+static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
+{
+	struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
+	int ss_family = sockaddr->ss_family;
+	int addr_type;
+
+	if (ss_family == AF_INET) {
+		struct sockaddr_in *sin;
+
+		sin = (struct sockaddr_in *)sockaddr;
+		if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
+			return true;
+	} else if (ss_family == AF_INET6) {
+		struct sockaddr_in6 *sin6;
+
+		sin6 = (struct sockaddr_in6 *)sockaddr;
+		addr_type = ipv6_addr_type((const struct in6_addr *)
+				&sin6->sin6_addr);
+		if (addr_type == IPV6_ADDR_ANY)
+			return true;
+	}
+	return false;
+}
+
+static int
+__cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+	int stid, ret;
+	int ss_family = cnp->com.local_addr.ss_family;
+
+	if (!test_bit(CDEV_STATE_UP, &cdev->flags))
+		return -EINVAL;
+
+	stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
+	if (stid < 0)
+		return -EINVAL;
+
+	if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
+		cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
+		return -EINVAL;
+	}
+
+	if (ss_family == AF_INET)
+		ret = cxgbit_create_server4(cdev, stid, cnp);
+	else
+		ret = cxgbit_create_server6(cdev, stid, cnp);
+
+	if (ret) {
+		if (ret != -ETIMEDOUT)
+			cxgb4_free_stid(cdev->lldi.tids, stid,
+					ss_family);
+		cxgbit_np_hash_del(cdev, cnp);
+		return ret;
+	}
+	return ret;
+}
+
+static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
+{
+	struct cxgbit_device *cdev;
+	int ret = -1;
+
+	mutex_lock(&cdev_list_lock);
+	cdev = cxgbit_find_np_cdev(cnp);
+	if (!cdev)
+		goto out;
+
+	if (cxgbit_np_hash_find(cdev, cnp) >= 0)
+		goto out;
+
+	if (__cxgbit_setup_cdev_np(cdev, cnp))
+		goto out;
+
+	cnp->com.cdev = cdev;
+	ret = 0;
+out:
+	mutex_unlock(&cdev_list_lock);
+	return ret;
+}
+
+static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
+{
+	struct cxgbit_device *cdev;
+	int ret;
+	u32 count = 0;
+
+	mutex_lock(&cdev_list_lock);
+	list_for_each_entry(cdev, &cdev_list_head, list) {
+		if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
+			mutex_unlock(&cdev_list_lock);
+			return -1;
+		}
+	}
+
+	list_for_each_entry(cdev, &cdev_list_head, list) {
+		ret = __cxgbit_setup_cdev_np(cdev, cnp);
+		if (ret == -ETIMEDOUT)
+			break;
+		if (ret != 0)
+			continue;
+		count++;
+	}
+	mutex_unlock(&cdev_list_lock);
+
+	return count ? 0 : -1;
+}
+
+int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
+{
+	struct cxgbit_np *cnp;
+	int ret;
+
+	if ((ksockaddr->ss_family != AF_INET) &&
+	    (ksockaddr->ss_family != AF_INET6))
+		return -EINVAL;
+
+	cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
+	if (!cnp)
+		return -ENOMEM;
+
+	init_waitqueue_head(&cnp->accept_wait);
+	init_completion(&cnp->com.wr_wait.completion);
+	init_completion(&cnp->accept_comp);
+	INIT_LIST_HEAD(&cnp->np_accept_list);
+	spin_lock_init(&cnp->np_accept_lock);
+	kref_init(&cnp->kref);
+	memcpy(&np->np_sockaddr, ksockaddr,
+	       sizeof(struct sockaddr_storage));
+	memcpy(&cnp->com.local_addr, &np->np_sockaddr,
+	       sizeof(cnp->com.local_addr));
+
+	cnp->np = np;
+	cnp->com.cdev = NULL;
+
+	if (cxgbit_inaddr_any(cnp))
+		ret = cxgbit_setup_all_np(cnp);
+	else
+		ret = cxgbit_setup_cdev_np(cnp);
+
+	if (ret) {
+		cxgbit_put_cnp(cnp);
+		return -EINVAL;
+	}
+
+	np->np_context = cnp;
+	cnp->com.state = CSK_STATE_LISTEN;
+	return 0;
+}
+
+static void
+cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
+		     struct cxgbit_sock *csk)
+{
+	conn->login_family = np->np_sockaddr.ss_family;
+	conn->login_sockaddr = csk->com.remote_addr;
+	conn->local_sockaddr = csk->com.local_addr;
+}
+
+int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
+{
+	struct cxgbit_np *cnp = np->np_context;
+	struct cxgbit_sock *csk;
+	int ret = 0;
+
+accept_wait:
+	ret = wait_for_completion_interruptible(&cnp->accept_comp);
+	if (ret)
+		return -ENODEV;
+
+	spin_lock_bh(&np->np_thread_lock);
+	if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
+		spin_unlock_bh(&np->np_thread_lock);
+		/**
+		 * No point in stalling here when np_thread
+		 * is in state RESET/SHUTDOWN/EXIT - bail
+		 **/
+		return -ENODEV;
+	}
+	spin_unlock_bh(&np->np_thread_lock);
+
+	spin_lock_bh(&cnp->np_accept_lock);
+	if (list_empty(&cnp->np_accept_list)) {
+		spin_unlock_bh(&cnp->np_accept_lock);
+		goto accept_wait;
+	}
+
+	csk = list_first_entry(&cnp->np_accept_list,
+			       struct cxgbit_sock,
+			       accept_node);
+
+	list_del_init(&csk->accept_node);
+	spin_unlock_bh(&cnp->np_accept_lock);
+	conn->context = csk;
+	csk->conn = conn;
+
+	cxgbit_set_conn_info(np, conn, csk);
+	return 0;
+}
+
+static int
+__cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+	int stid, ret;
+	bool ipv6 = false;
+
+	stid = cxgbit_np_hash_del(cdev, cnp);
+	if (stid < 0)
+		return -EINVAL;
+	if (!test_bit(CDEV_STATE_UP, &cdev->flags))
+		return -EINVAL;
+
+	if (cnp->np->np_sockaddr.ss_family == AF_INET6)
+		ipv6 = true;
+
+	cxgbit_get_cnp(cnp);
+	cxgbit_init_wr_wait(&cnp->com.wr_wait);
+	ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
+				  cdev->lldi.rxq_ids[0], ipv6);
+
+	if (ret > 0)
+		ret = net_xmit_errno(ret);
+
+	if (ret) {
+		cxgbit_put_cnp(cnp);
+		return ret;
+	}
+
+	ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
+				    0, 10, __func__);
+	if (ret == -ETIMEDOUT)
+		return ret;
+
+	if (ipv6 && cnp->com.cdev) {
+		struct sockaddr_in6 *sin6;
+
+		sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
+		cxgb4_clip_release(cdev->lldi.ports[0],
+				   (const u32 *)&sin6->sin6_addr.s6_addr,
+				   1);
+	}
+
+	cxgb4_free_stid(cdev->lldi.tids, stid,
+			cnp->com.local_addr.ss_family);
+	return 0;
+}
+
+static void cxgbit_free_all_np(struct cxgbit_np *cnp)
+{
+	struct cxgbit_device *cdev;
+	int ret;
+
+	mutex_lock(&cdev_list_lock);
+	list_for_each_entry(cdev, &cdev_list_head, list) {
+		ret = __cxgbit_free_cdev_np(cdev, cnp);
+		if (ret == -ETIMEDOUT)
+			break;
+	}
+	mutex_unlock(&cdev_list_lock);
+}
+
+static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
+{
+	struct cxgbit_device *cdev;
+	bool found = false;
+
+	mutex_lock(&cdev_list_lock);
+	list_for_each_entry(cdev, &cdev_list_head, list) {
+		if (cdev == cnp->com.cdev) {
+			found = true;
+			break;
+		}
+	}
+	if (!found)
+		goto out;
+
+	__cxgbit_free_cdev_np(cdev, cnp);
+out:
+	mutex_unlock(&cdev_list_lock);
+}
+
+void cxgbit_free_np(struct iscsi_np *np)
+{
+	struct cxgbit_np *cnp = np->np_context;
+
+	cnp->com.state = CSK_STATE_DEAD;
+	if (cnp->com.cdev)
+		cxgbit_free_cdev_np(cnp);
+	else
+		cxgbit_free_all_np(cnp);
+
+	np->np_context = NULL;
+	cxgbit_put_cnp(cnp);
+}
+
+static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
+{
+	struct sk_buff *skb;
+	struct cpl_close_con_req *req;
+	unsigned int len = roundup(sizeof(struct cpl_close_con_req), 16);
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	req = (struct cpl_close_con_req *)__skb_put(skb, len);
+	memset(req, 0, len);
+
+	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+	INIT_TP_WR(req, csk->tid);
+	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
+						    csk->tid));
+	req->rsvd = 0;
+
+	cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
+	__skb_queue_tail(&csk->txq, skb);
+	cxgbit_push_tx_frames(csk);
+}
+
+static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
+{
+	pr_debug("%s cxgbit_device %p\n", __func__, handle);
+	kfree_skb(skb);
+}
+
+static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
+{
+	struct cxgbit_device *cdev = handle;
+	struct cpl_abort_req *req = cplhdr(skb);
+
+	pr_debug("%s cdev %p\n", __func__, cdev);
+	req->cmd = CPL_ABORT_NO_RST;
+	cxgbit_ofld_send(cdev, skb);
+}
+
+static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
+{
+	struct cpl_abort_req *req;
+	unsigned int len = roundup(sizeof(*req), 16);
+	struct sk_buff *skb;
+
+	pr_debug("%s: csk %p tid %u; state %d\n",
+		 __func__, csk, csk->tid, csk->com.state);
+
+	__skb_queue_purge(&csk->txq);
+
+	if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
+		cxgbit_send_tx_flowc_wr(csk);
+
+	skb = __skb_dequeue(&csk->skbq);
+	req = (struct cpl_abort_req *)__skb_put(skb, len);
+	memset(req, 0, len);
+
+	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+	t4_set_arp_err_handler(skb, csk->com.cdev, cxgbit_abort_arp_failure);
+	INIT_TP_WR(req, csk->tid);
+	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ,
+						    csk->tid));
+	req->cmd = CPL_ABORT_SEND_RST;
+	return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
+}
+
+void cxgbit_free_conn(struct iscsi_conn *conn)
+{
+	struct cxgbit_sock *csk = conn->context;
+	bool release = false;
+
+	pr_debug("%s: state %d\n",
+		 __func__, csk->com.state);
+
+	spin_lock_bh(&csk->lock);
+	switch (csk->com.state) {
+	case CSK_STATE_ESTABLISHED:
+		if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+			csk->com.state = CSK_STATE_CLOSING;
+			cxgbit_send_halfclose(csk);
+		} else {
+			csk->com.state = CSK_STATE_ABORTING;
+			cxgbit_send_abort_req(csk);
+		}
+		break;
+	case CSK_STATE_CLOSING:
+		csk->com.state = CSK_STATE_MORIBUND;
+		cxgbit_send_halfclose(csk);
+		break;
+	case CSK_STATE_DEAD:
+		release = true;
+		break;
+	default:
+		pr_err("%s: csk %p; state %d\n",
+		       __func__, csk, csk->com.state);
+	}
+	spin_unlock_bh(&csk->lock);
+
+	if (release)
+		cxgbit_put_csk(csk);
+}
+
+static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
+{
+	csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
+			((csk->com.remote_addr.ss_family == AF_INET) ?
+			sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
+			sizeof(struct tcphdr);
+	csk->mss = csk->emss;
+	if (TCPOPT_TSTAMP_G(opt))
+		csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
+	if (csk->emss < 128)
+		csk->emss = 128;
+	if (csk->emss & 7)
+		pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
+			TCPOPT_MSS_G(opt), csk->mss, csk->emss);
+	pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
+		 csk->mss, csk->emss);
+}
+
+static void cxgbit_free_skb(struct cxgbit_sock *csk)
+{
+	struct sk_buff *skb;
+
+	__skb_queue_purge(&csk->txq);
+	__skb_queue_purge(&csk->rxq);
+	__skb_queue_purge(&csk->backlogq);
+	__skb_queue_purge(&csk->ppodq);
+	__skb_queue_purge(&csk->skbq);
+
+	while ((skb = cxgbit_sock_dequeue_wr(csk)))
+		kfree_skb(skb);
+
+	__kfree_skb(csk->lro_hskb);
+}
+
+void _cxgbit_free_csk(struct kref *kref)
+{
+	struct cxgbit_sock *csk;
+	struct cxgbit_device *cdev;
+
+	csk = container_of(kref, struct cxgbit_sock, kref);
+
+	pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
+
+	if (csk->com.local_addr.ss_family == AF_INET6) {
+		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
+					     &csk->com.local_addr;
+		cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
+				   (const u32 *)
+				   &sin6->sin6_addr.s6_addr, 1);
+	}
+
+	cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid);
+	dst_release(csk->dst);
+	cxgb4_l2t_release(csk->l2t);
+
+	cdev = csk->com.cdev;
+	spin_lock_bh(&cdev->cskq.lock);
+	list_del(&csk->list);
+	spin_unlock_bh(&cdev->cskq.lock);
+
+	cxgbit_free_skb(csk);
+	cxgbit_put_cdev(cdev);
+
+	kfree(csk);
+}
+
+static void
+cxgbit_get_tuple_info(struct cpl_pass_accept_req *req, int *iptype,
+		      __u8 *local_ip, __u8 *peer_ip, __be16 *local_port,
+		      __be16 *peer_port)
+{
+	u32 eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+	u32 ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+	struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
+	struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
+	struct tcphdr *tcp = (struct tcphdr *)
+			      ((u8 *)(req + 1) + eth_len + ip_len);
+
+	if (ip->version == 4) {
+		pr_debug("%s saddr 0x%x daddr 0x%x sport %u dport %u\n",
+			 __func__,
+			 ntohl(ip->saddr), ntohl(ip->daddr),
+			 ntohs(tcp->source),
+			 ntohs(tcp->dest));
+		*iptype = 4;
+		memcpy(peer_ip, &ip->saddr, 4);
+		memcpy(local_ip, &ip->daddr, 4);
+	} else {
+		pr_debug("%s saddr %pI6 daddr %pI6 sport %u dport %u\n",
+			 __func__,
+			 ip6->saddr.s6_addr, ip6->daddr.s6_addr,
+			 ntohs(tcp->source),
+			 ntohs(tcp->dest));
+		*iptype = 6;
+		memcpy(peer_ip, ip6->saddr.s6_addr, 16);
+		memcpy(local_ip, ip6->daddr.s6_addr, 16);
+	}
+
+	*peer_port = tcp->source;
+	*local_port = tcp->dest;
+}
+
+static int
+cxgbit_our_interface(struct cxgbit_device *cdev, struct net_device *egress_dev)
+{
+	u8 i;
+
+	egress_dev = cxgbit_get_real_dev(egress_dev);
+	for (i = 0; i < cdev->lldi.nports; i++)
+		if (cdev->lldi.ports[i] == egress_dev)
+			return 1;
+	return 0;
+}
+
+static struct dst_entry *
+cxgbit_find_route6(struct cxgbit_device *cdev, __u8 *local_ip, __u8 *peer_ip,
+		   __be16 local_port, __be16 peer_port, u8 tos,
+		   __u32 sin6_scope_id)
+{
+	struct dst_entry *dst = NULL;
+
+	if (IS_ENABLED(CONFIG_IPV6)) {
+		struct flowi6 fl6;
+
+		memset(&fl6, 0, sizeof(fl6));
+		memcpy(&fl6.daddr, peer_ip, 16);
+		memcpy(&fl6.saddr, local_ip, 16);
+		if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+			fl6.flowi6_oif = sin6_scope_id;
+		dst = ip6_route_output(&init_net, NULL, &fl6);
+		if (!dst)
+			goto out;
+		if (!cxgbit_our_interface(cdev, ip6_dst_idev(dst)->dev) &&
+		    !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
+			dst_release(dst);
+			dst = NULL;
+		}
+	}
+out:
+	return dst;
+}
+
+static struct dst_entry *
+cxgbit_find_route(struct cxgbit_device *cdev, __be32 local_ip, __be32 peer_ip,
+		  __be16 local_port, __be16 peer_port, u8 tos)
+{
+	struct rtable *rt;
+	struct flowi4 fl4;
+	struct neighbour *n;
+
+	rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip,
+				   local_ip,
+				   peer_port, local_port, IPPROTO_TCP,
+				   tos, 0);
+	if (IS_ERR(rt))
+		return NULL;
+	n = dst_neigh_lookup(&rt->dst, &peer_ip);
+	if (!n)
+		return NULL;
+	if (!cxgbit_our_interface(cdev, n->dev) &&
+	    !(n->dev->flags & IFF_LOOPBACK)) {
+		neigh_release(n);
+		dst_release(&rt->dst);
+		return NULL;
+	}
+	neigh_release(n);
+	return &rt->dst;
+}
+
+static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
+{
+	unsigned int linkspeed;
+	u8 scale;
+
+	linkspeed = pi->link_cfg.speed;
+	scale = linkspeed / SPEED_10000;
+
+#define CXGBIT_10G_RCV_WIN (256 * 1024)
+	csk->rcv_win = CXGBIT_10G_RCV_WIN;
+	if (scale)
+		csk->rcv_win *= scale;
+
+#define CXGBIT_10G_SND_WIN (256 * 1024)
+	csk->snd_win = CXGBIT_10G_SND_WIN;
+	if (scale)
+		csk->snd_win *= scale;
+
+	pr_debug("%s snd_win %d rcv_win %d\n",
+		 __func__, csk->snd_win, csk->rcv_win);
+}
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
+{
+	return ndev->dcbnl_ops->getstate(ndev);
+}
+
+static int cxgbit_select_priority(int pri_mask)
+{
+	if (!pri_mask)
+		return 0;
+
+	return (ffs(pri_mask) - 1);
+}
+
+static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
+{
+	int ret;
+	u8 caps;
+
+	struct dcb_app iscsi_dcb_app = {
+		.protocol = local_port
+	};
+
+	ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
+
+	if (ret)
+		return 0;
+
+	if (caps & DCB_CAP_DCBX_VER_IEEE) {
+		iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
+
+		ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
+
+	} else if (caps & DCB_CAP_DCBX_VER_CEE) {
+		iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
+
+		ret = dcb_getapp(ndev, &iscsi_dcb_app);
+	}
+
+	pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
+
+	return cxgbit_select_priority(ret);
+}
+#endif
+
+static int
+cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
+		    u16 local_port, struct dst_entry *dst,
+		    struct cxgbit_device *cdev)
+{
+	struct neighbour *n;
+	int ret, step;
+	struct net_device *ndev;
+	u16 rxq_idx, port_id;
+#ifdef CONFIG_CHELSIO_T4_DCB
+	u8 priority = 0;
+#endif
+
+	n = dst_neigh_lookup(dst, peer_ip);
+	if (!n)
+		return -ENODEV;
+
+	rcu_read_lock();
+	ret = -ENOMEM;
+	if (n->dev->flags & IFF_LOOPBACK) {
+		if (iptype == 4)
+			ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
+		else if (IS_ENABLED(CONFIG_IPV6))
+			ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
+		else
+			ndev = NULL;
+
+		if (!ndev) {
+			ret = -ENODEV;
+			goto out;
+		}
+
+		csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
+					 n, ndev, 0);
+		if (!csk->l2t)
+			goto out;
+		csk->mtu = ndev->mtu;
+		csk->tx_chan = cxgb4_port_chan(ndev);
+		csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1;
+		step = cdev->lldi.ntxq /
+			cdev->lldi.nchan;
+		csk->txq_idx = cxgb4_port_idx(ndev) * step;
+		step = cdev->lldi.nrxq /
+			cdev->lldi.nchan;
+		csk->ctrlq_idx = cxgb4_port_idx(ndev);
+		csk->rss_qid = cdev->lldi.rxq_ids[
+				cxgb4_port_idx(ndev) * step];
+		csk->port_id = cxgb4_port_idx(ndev);
+		cxgbit_set_tcp_window(csk,
+				      (struct port_info *)netdev_priv(ndev));
+	} else {
+		ndev = cxgbit_get_real_dev(n->dev);
+		if (!ndev) {
+			ret = -ENODEV;
+			goto out;
+		}
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+		if (cxgbit_get_iscsi_dcb_state(ndev))
+			priority = cxgbit_get_iscsi_dcb_priority(ndev,
+								 local_port);
+
+		csk->dcb_priority = priority;
+
+		csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
+#else
+		csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
+#endif
+		if (!csk->l2t)
+			goto out;
+		port_id = cxgb4_port_idx(ndev);
+		csk->mtu = dst_mtu(dst);
+		csk->tx_chan = cxgb4_port_chan(ndev);
+		csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1;
+		step = cdev->lldi.ntxq /
+			cdev->lldi.nports;
+		csk->txq_idx = (port_id * step) +
+				(cdev->selectq[port_id][0]++ % step);
+		csk->ctrlq_idx = cxgb4_port_idx(ndev);
+		step = cdev->lldi.nrxq /
+			cdev->lldi.nports;
+		rxq_idx = (port_id * step) +
+				(cdev->selectq[port_id][1]++ % step);
+		csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
+		csk->port_id = port_id;
+		cxgbit_set_tcp_window(csk,
+				      (struct port_info *)netdev_priv(ndev));
+	}
+	ret = 0;
+out:
+	rcu_read_unlock();
+	neigh_release(n);
+	return ret;
+}
+
+int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+	int ret = 0;
+
+	if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
+		kfree_skb(skb);
+		pr_err("%s - device not up - dropping\n", __func__);
+		return -EIO;
+	}
+
+	ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
+	if (ret < 0)
+		kfree_skb(skb);
+	return ret < 0 ? ret : 0;
+}
+
+static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
+{
+	struct cpl_tid_release *req;
+	unsigned int len = roundup(sizeof(*req), 16);
+	struct sk_buff *skb;
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	req = (struct cpl_tid_release *)__skb_put(skb, len);
+	memset(req, 0, len);
+
+	INIT_TP_WR(req, tid);
+	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(
+		   CPL_TID_RELEASE, tid));
+	set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+	cxgbit_ofld_send(cdev, skb);
+}
+
+int
+cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
+		struct l2t_entry *l2e)
+{
+	int ret = 0;
+
+	if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
+		kfree_skb(skb);
+		pr_err("%s - device not up - dropping\n", __func__);
+		return -EIO;
+	}
+
+	ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
+	if (ret < 0)
+		kfree_skb(skb);
+	return ret < 0 ? ret : 0;
+}
+
+static void
+cxgbit_best_mtu(const unsigned short *mtus, unsigned short mtu,
+		unsigned int *idx, int use_ts, int ipv6)
+{
+	unsigned short hdr_size = (ipv6 ? sizeof(struct ipv6hdr) :
+				   sizeof(struct iphdr)) +
+				   sizeof(struct tcphdr) +
+				   (use_ts ? round_up(TCPOLEN_TIMESTAMP,
+				    4) : 0);
+	unsigned short data_size = mtu - hdr_size;
+
+	cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
+}
+
+static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+	if (csk->com.state != CSK_STATE_ESTABLISHED) {
+		__kfree_skb(skb);
+		return;
+	}
+
+	cxgbit_ofld_send(csk->com.cdev, skb);
+}
+
+/*
+ * CPL connection rx data ack: host ->
+ * Send RX credits through an RX_DATA_ACK CPL message.
+ * Returns the number of credits sent.
+ */
+int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
+{
+	struct sk_buff *skb;
+	struct cpl_rx_data_ack *req;
+	unsigned int len = roundup(sizeof(*req), 16);
+
+	skb = alloc_skb(len, GFP_KERNEL);
+	if (!skb)
+		return -1;
+
+	req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
+	memset(req, 0, len);
+
+	set_wr_txq(skb, CPL_PRIORITY_ACK, csk->ctrlq_idx);
+	INIT_TP_WR(req, csk->tid);
+	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
+						    csk->tid));
+	req->credit_dack = cpu_to_be32(RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
+				       RX_CREDITS_V(csk->rx_credits));
+
+	csk->rx_credits = 0;
+
+	spin_lock_bh(&csk->lock);
+	if (csk->lock_owner) {
+		cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
+		__skb_queue_tail(&csk->backlogq, skb);
+		spin_unlock_bh(&csk->lock);
+		return 0;
+	}
+
+	cxgbit_send_rx_credits(csk, skb);
+	spin_unlock_bh(&csk->lock);
+
+	return 0;
+}
+
+#define FLOWC_WR_NPARAMS_MIN    9
+#define FLOWC_WR_NPARAMS_MAX	11
+static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
+{
+	struct sk_buff *skb;
+	u32 len, flowclen;
+	u8 i;
+
+	flowclen = offsetof(struct fw_flowc_wr,
+			    mnemval[FLOWC_WR_NPARAMS_MAX]);
+
+	len = max_t(u32, sizeof(struct cpl_abort_req),
+		    sizeof(struct cpl_abort_rpl));
+
+	len = max(len, flowclen);
+	len = roundup(len, 16);
+
+	for (i = 0; i < 3; i++) {
+		skb = alloc_skb(len, GFP_ATOMIC);
+		if (!skb)
+			goto out;
+		__skb_queue_tail(&csk->skbq, skb);
+	}
+
+	skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
+	if (!skb)
+		goto out;
+
+	memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
+	csk->lro_hskb = skb;
+
+	return 0;
+out:
+	__skb_queue_purge(&csk->skbq);
+	return -ENOMEM;
+}
+
+static u32 cxgbit_compute_wscale(u32 win)
+{
+	u32 wscale = 0;
+
+	while (wscale < 14 && (65535 << wscale) < win)
+		wscale++;
+	return wscale;
+}
+
+static void
+cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
+{
+	struct sk_buff *skb;
+	const struct tcphdr *tcph;
+	struct cpl_t5_pass_accept_rpl *rpl5;
+	unsigned int len = roundup(sizeof(*rpl5), 16);
+	unsigned int mtu_idx;
+	u64 opt0;
+	u32 opt2, hlen;
+	u32 wscale;
+	u32 win;
+
+	pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb) {
+		cxgbit_put_csk(csk);
+		return;
+	}
+
+	rpl5 = (struct cpl_t5_pass_accept_rpl *)__skb_put(skb, len);
+	memset(rpl5, 0, len);
+
+	INIT_TP_WR(rpl5, csk->tid);
+	OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+						     csk->tid));
+	cxgbit_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
+			req->tcpopt.tstamp,
+			(csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+	wscale = cxgbit_compute_wscale(csk->rcv_win);
+	/*
+	 * Specify the largest window that will fit in opt0. The
+	 * remainder will be specified in the rx_data_ack.
+	 */
+	win = csk->rcv_win >> 10;
+	if (win > RCV_BUFSIZ_M)
+		win = RCV_BUFSIZ_M;
+	opt0 =  TCAM_BYPASS_F |
+		WND_SCALE_V(wscale) |
+		MSS_IDX_V(mtu_idx) |
+		L2T_IDX_V(csk->l2t->idx) |
+		TX_CHAN_V(csk->tx_chan) |
+		SMAC_SEL_V(csk->smac_idx) |
+		DSCP_V(csk->tos >> 2) |
+		ULP_MODE_V(ULP_MODE_ISCSI) |
+		RCV_BUFSIZ_V(win);
+
+	opt2 = RX_CHANNEL_V(0) |
+		RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
+
+	if (req->tcpopt.tstamp)
+		opt2 |= TSTAMPS_EN_F;
+	if (req->tcpopt.sack)
+		opt2 |= SACK_EN_F;
+	if (wscale)
+		opt2 |= WND_SCALE_EN_F;
+
+	hlen = ntohl(req->hdr_len);
+	tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
+		IP_HDR_LEN_G(hlen);
+
+	if (tcph->ece && tcph->cwr)
+		opt2 |= CCTRL_ECN_V(1);
+
+	opt2 |= RX_COALESCE_V(3);
+	opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
+
+	opt2 |= T5_ISS_F;
+	rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
+
+	opt2 |= T5_OPT_2_VALID_F;
+
+	rpl5->opt0 = cpu_to_be64(opt0);
+	rpl5->opt2 = cpu_to_be32(opt2);
+	set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
+	t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard);
+	cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
+}
+
+static void
+cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+	struct cxgbit_sock *csk = NULL;
+	struct cxgbit_np *cnp;
+	struct cpl_pass_accept_req *req = cplhdr(skb);
+	unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
+	struct tid_info *t = cdev->lldi.tids;
+	unsigned int tid = GET_TID(req);
+	u16 peer_mss = ntohs(req->tcpopt.mss);
+	unsigned short hdrs;
+
+	struct dst_entry *dst;
+	__u8 local_ip[16], peer_ip[16];
+	__be16 local_port, peer_port;
+	int ret;
+	int iptype;
+
+	pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
+		 __func__, cdev, stid, tid);
+
+	cnp = lookup_stid(t, stid);
+	if (!cnp) {
+		pr_err("%s connect request on invalid stid %d\n",
+		       __func__, stid);
+		goto rel_skb;
+	}
+
+	if (cnp->com.state != CSK_STATE_LISTEN) {
+		pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
+		       __func__);
+		goto reject;
+	}
+
+	csk = lookup_tid(t, tid);
+	if (csk) {
+		pr_err("%s csk not null tid %u\n",
+		       __func__, tid);
+		goto rel_skb;
+	}
+
+	cxgbit_get_tuple_info(req, &iptype, local_ip, peer_ip,
+			      &local_port, &peer_port);
+
+	/* Find output route */
+	if (iptype == 4)  {
+		pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
+			 "lport %d rport %d peer_mss %d\n"
+			 , __func__, cnp, tid,
+			 local_ip, peer_ip, ntohs(local_port),
+			 ntohs(peer_port), peer_mss);
+		dst = cxgbit_find_route(cdev, *(__be32 *)local_ip,
+					*(__be32 *)peer_ip,
+					local_port, peer_port,
+					PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
+	} else {
+		pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
+			 "lport %d rport %d peer_mss %d\n"
+			 , __func__, cnp, tid,
+			 local_ip, peer_ip, ntohs(local_port),
+			 ntohs(peer_port), peer_mss);
+		dst = cxgbit_find_route6(cdev, local_ip, peer_ip,
+					 local_port, peer_port,
+					 PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
+					 ((struct sockaddr_in6 *)
+					 &cnp->com.local_addr)->sin6_scope_id);
+	}
+	if (!dst) {
+		pr_err("%s - failed to find dst entry!\n",
+		       __func__);
+		goto reject;
+	}
+
+	csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
+	if (!csk) {
+		dst_release(dst);
+		goto rel_skb;
+	}
+
+	ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
+				  dst, cdev);
+	if (ret) {
+		pr_err("%s - failed to allocate l2t entry!\n",
+		       __func__);
+		dst_release(dst);
+		kfree(csk);
+		goto reject;
+	}
+
+	kref_init(&csk->kref);
+	init_completion(&csk->com.wr_wait.completion);
+
+	INIT_LIST_HEAD(&csk->accept_node);
+
+	hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
+		sizeof(struct tcphdr) +	(req->tcpopt.tstamp ? 12 : 0);
+	if (peer_mss && csk->mtu > (peer_mss + hdrs))
+		csk->mtu = peer_mss + hdrs;
+
+	csk->com.state = CSK_STATE_CONNECTING;
+	csk->com.cdev = cdev;
+	csk->cnp = cnp;
+	csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+	csk->dst = dst;
+	csk->tid = tid;
+	csk->wr_cred = cdev->lldi.wr_cred -
+			DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
+	csk->wr_max_cred = csk->wr_cred;
+	csk->wr_una_cred = 0;
+
+	if (iptype == 4) {
+		struct sockaddr_in *sin = (struct sockaddr_in *)
+					  &csk->com.local_addr;
+		sin->sin_family = AF_INET;
+		sin->sin_port = local_port;
+		sin->sin_addr.s_addr = *(__be32 *)local_ip;
+
+		sin = (struct sockaddr_in *)&csk->com.remote_addr;
+		sin->sin_family = AF_INET;
+		sin->sin_port = peer_port;
+		sin->sin_addr.s_addr = *(__be32 *)peer_ip;
+	} else {
+		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
+					    &csk->com.local_addr;
+
+		sin6->sin6_family = PF_INET6;
+		sin6->sin6_port = local_port;
+		memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
+		cxgb4_clip_get(cdev->lldi.ports[0],
+			       (const u32 *)&sin6->sin6_addr.s6_addr,
+			       1);
+
+		sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
+		sin6->sin6_family = PF_INET6;
+		sin6->sin6_port = peer_port;
+		memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
+	}
+
+	skb_queue_head_init(&csk->rxq);
+	skb_queue_head_init(&csk->txq);
+	skb_queue_head_init(&csk->ppodq);
+	skb_queue_head_init(&csk->backlogq);
+	skb_queue_head_init(&csk->skbq);
+	cxgbit_sock_reset_wr_list(csk);
+	spin_lock_init(&csk->lock);
+	init_waitqueue_head(&csk->waitq);
+	init_waitqueue_head(&csk->ack_waitq);
+	csk->lock_owner = false;
+
+	if (cxgbit_alloc_csk_skb(csk)) {
+		dst_release(dst);
+		kfree(csk);
+		goto rel_skb;
+	}
+
+	cxgbit_get_cdev(cdev);
+
+	spin_lock(&cdev->cskq.lock);
+	list_add_tail(&csk->list, &cdev->cskq.list);
+	spin_unlock(&cdev->cskq.lock);
+
+	cxgb4_insert_tid(t, csk, tid);
+	cxgbit_pass_accept_rpl(csk, req);
+	goto rel_skb;
+
+reject:
+	cxgbit_release_tid(cdev, tid);
+rel_skb:
+	__kfree_skb(skb);
+}
+
+static u32
+cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
+			   u32 *flowclenp)
+{
+	u32 nparams, flowclen16, flowclen;
+
+	nparams = FLOWC_WR_NPARAMS_MIN;
+
+	if (csk->snd_wscale)
+		nparams++;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+	nparams++;
+#endif
+	flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+	flowclen16 = DIV_ROUND_UP(flowclen, 16);
+	flowclen = flowclen16 * 16;
+	/*
+	 * Return the number of 16-byte credits used by the flowc request.
+	 * Pass back the nparams and actual flowc length if requested.
+	 */
+	if (nparamsp)
+		*nparamsp = nparams;
+	if (flowclenp)
+		*flowclenp = flowclen;
+	return flowclen16;
+}
+
+u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
+{
+	struct cxgbit_device *cdev = csk->com.cdev;
+	struct fw_flowc_wr *flowc;
+	u32 nparams, flowclen16, flowclen;
+	struct sk_buff *skb;
+	u8 index;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+	u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
+#endif
+
+	flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
+
+	skb = __skb_dequeue(&csk->skbq);
+	flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
+	memset(flowc, 0, flowclen);
+
+	flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
+					   FW_FLOWC_WR_NPARAMS_V(nparams));
+	flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
+					  FW_WR_FLOWID_V(csk->tid));
+	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
+	flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
+					    (csk->com.cdev->lldi.pf));
+	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
+	flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
+	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
+	flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
+	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
+	flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
+	flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
+	flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
+	flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
+	flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
+	flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
+	flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
+	flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
+	flowc->mnemval[7].val = cpu_to_be32(csk->emss);
+
+	flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
+	if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
+		flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
+	else
+		flowc->mnemval[8].val = cpu_to_be32(16384);
+
+	index = 9;
+
+	if (csk->snd_wscale) {
+		flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
+		flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
+		index++;
+	}
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+	flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
+	if (vlan == VLAN_NONE) {
+		pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
+		flowc->mnemval[index].val = cpu_to_be32(0);
+	} else
+		flowc->mnemval[index].val = cpu_to_be32(
+				(vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
+#endif
+
+	pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
+		 " rcv_seq = %u; snd_win = %u; emss = %u\n",
+		 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
+		 csk->rcv_nxt, csk->snd_win, csk->emss);
+	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+	cxgbit_ofld_send(csk->com.cdev, skb);
+	return flowclen16;
+}
+
+int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
+{
+	struct sk_buff *skb;
+	struct cpl_set_tcb_field *req;
+	u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
+	u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
+	unsigned int len = roundup(sizeof(*req), 16);
+	int ret;
+
+	skb = alloc_skb(len, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	/*  set up ulp submode */
+	req = (struct cpl_set_tcb_field *)__skb_put(skb, len);
+	memset(req, 0, len);
+
+	INIT_TP_WR(req, csk->tid);
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
+	req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
+	req->word_cookie = htons(0);
+	req->mask = cpu_to_be64(0x3 << 4);
+	req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
+				(dcrc ? ULP_CRC_DATA : 0)) << 4);
+	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
+
+	cxgbit_get_csk(csk);
+	cxgbit_init_wr_wait(&csk->com.wr_wait);
+
+	cxgbit_ofld_send(csk->com.cdev, skb);
+
+	ret = cxgbit_wait_for_reply(csk->com.cdev,
+				    &csk->com.wr_wait,
+				    csk->tid, 5, __func__);
+	if (ret)
+		return -1;
+
+	return 0;
+}
+
+int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
+{
+	struct sk_buff *skb;
+	struct cpl_set_tcb_field *req;
+	unsigned int len = roundup(sizeof(*req), 16);
+	int ret;
+
+	skb = alloc_skb(len, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	req = (struct cpl_set_tcb_field *)__skb_put(skb, len);
+	memset(req, 0, len);
+
+	INIT_TP_WR(req, csk->tid);
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
+	req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
+	req->word_cookie = htons(0);
+	req->mask = cpu_to_be64(0x3 << 8);
+	req->val = cpu_to_be64(pg_idx << 8);
+	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
+
+	cxgbit_get_csk(csk);
+	cxgbit_init_wr_wait(&csk->com.wr_wait);
+
+	cxgbit_ofld_send(csk->com.cdev, skb);
+
+	ret = cxgbit_wait_for_reply(csk->com.cdev,
+				    &csk->com.wr_wait,
+				    csk->tid, 5, __func__);
+	if (ret)
+		return -1;
+
+	return 0;
+}
+
+static void
+cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+	struct cpl_pass_open_rpl *rpl = cplhdr(skb);
+	struct tid_info *t = cdev->lldi.tids;
+	unsigned int stid = GET_TID(rpl);
+	struct cxgbit_np *cnp = lookup_stid(t, stid);
+
+	pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
+		 __func__, cnp, stid, rpl->status);
+
+	if (!cnp) {
+		pr_info("%s stid %d lookup failure\n", __func__, stid);
+		return;
+	}
+
+	cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
+	cxgbit_put_cnp(cnp);
+}
+
+static void
+cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+	struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
+	struct tid_info *t = cdev->lldi.tids;
+	unsigned int stid = GET_TID(rpl);
+	struct cxgbit_np *cnp = lookup_stid(t, stid);
+
+	pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
+		 __func__, cnp, stid, rpl->status);
+
+	if (!cnp) {
+		pr_info("%s stid %d lookup failure\n", __func__, stid);
+		return;
+	}
+
+	cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
+	cxgbit_put_cnp(cnp);
+}
+
+static void
+cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+	struct cpl_pass_establish *req = cplhdr(skb);
+	struct tid_info *t = cdev->lldi.tids;
+	unsigned int tid = GET_TID(req);
+	struct cxgbit_sock *csk;
+	struct cxgbit_np *cnp;
+	u16 tcp_opt = be16_to_cpu(req->tcp_opt);
+	u32 snd_isn = be32_to_cpu(req->snd_isn);
+	u32 rcv_isn = be32_to_cpu(req->rcv_isn);
+
+	csk = lookup_tid(t, tid);
+	if (unlikely(!csk)) {
+		pr_err("can't find connection for tid %u.\n", tid);
+		goto rel_skb;
+	}
+	cnp = csk->cnp;
+
+	pr_debug("%s: csk %p; tid %u; cnp %p\n",
+		 __func__, csk, tid, cnp);
+
+	csk->write_seq = snd_isn;
+	csk->snd_una = snd_isn;
+	csk->snd_nxt = snd_isn;
+
+	csk->rcv_nxt = rcv_isn;
+
+	if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
+		csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
+
+	csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
+	cxgbit_set_emss(csk, tcp_opt);
+	dst_confirm(csk->dst);
+	csk->com.state = CSK_STATE_ESTABLISHED;
+	spin_lock_bh(&cnp->np_accept_lock);
+	list_add_tail(&csk->accept_node, &cnp->np_accept_list);
+	spin_unlock_bh(&cnp->np_accept_lock);
+	complete(&cnp->accept_comp);
+rel_skb:
+	__kfree_skb(skb);
+}
+
+static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+	cxgbit_skcb_flags(skb) = 0;
+	spin_lock_bh(&csk->rxq.lock);
+	__skb_queue_tail(&csk->rxq, skb);
+	spin_unlock_bh(&csk->rxq.lock);
+	wake_up(&csk->waitq);
+}
+
+static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+	pr_debug("%s: csk %p; tid %u; state %d\n",
+		 __func__, csk, csk->tid, csk->com.state);
+
+	switch (csk->com.state) {
+	case CSK_STATE_ESTABLISHED:
+		csk->com.state = CSK_STATE_CLOSING;
+		cxgbit_queue_rx_skb(csk, skb);
+		return;
+	case CSK_STATE_CLOSING:
+		/* simultaneous close */
+		csk->com.state = CSK_STATE_MORIBUND;
+		break;
+	case CSK_STATE_MORIBUND:
+		csk->com.state = CSK_STATE_DEAD;
+		cxgbit_put_csk(csk);
+		break;
+	case CSK_STATE_ABORTING:
+		break;
+	default:
+		pr_info("%s: cpl_peer_close in bad state %d\n",
+			__func__, csk->com.state);
+	}
+
+	__kfree_skb(skb);
+}
+
+static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+	pr_debug("%s: csk %p; tid %u; state %d\n",
+		 __func__, csk, csk->tid, csk->com.state);
+
+	switch (csk->com.state) {
+	case CSK_STATE_CLOSING:
+		csk->com.state = CSK_STATE_MORIBUND;
+		break;
+	case CSK_STATE_MORIBUND:
+		csk->com.state = CSK_STATE_DEAD;
+		cxgbit_put_csk(csk);
+		break;
+	case CSK_STATE_ABORTING:
+	case CSK_STATE_DEAD:
+		break;
+	default:
+		pr_info("%s: cpl_close_con_rpl in bad state %d\n",
+			__func__, csk->com.state);
+	}
+
+	__kfree_skb(skb);
+}
+
+static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+	struct cpl_abort_req_rss *hdr = cplhdr(skb);
+	unsigned int tid = GET_TID(hdr);
+	struct cpl_abort_rpl *rpl;
+	struct sk_buff *rpl_skb;
+	bool release = false;
+	bool wakeup_thread = false;
+	unsigned int len = roundup(sizeof(*rpl), 16);
+
+	pr_debug("%s: csk %p; tid %u; state %d\n",
+		 __func__, csk, tid, csk->com.state);
+
+	if (cxgbit_is_neg_adv(hdr->status)) {
+		pr_err("%s: got neg advise %d on tid %u\n",
+		       __func__, hdr->status, tid);
+		goto rel_skb;
+	}
+
+	switch (csk->com.state) {
+	case CSK_STATE_CONNECTING:
+	case CSK_STATE_MORIBUND:
+		csk->com.state = CSK_STATE_DEAD;
+		release = true;
+		break;
+	case CSK_STATE_ESTABLISHED:
+		csk->com.state = CSK_STATE_DEAD;
+		wakeup_thread = true;
+		break;
+	case CSK_STATE_CLOSING:
+		csk->com.state = CSK_STATE_DEAD;
+		if (!csk->conn)
+			release = true;
+		break;
+	case CSK_STATE_ABORTING:
+		break;
+	default:
+		pr_info("%s: cpl_abort_req_rss in bad state %d\n",
+			__func__, csk->com.state);
+		csk->com.state = CSK_STATE_DEAD;
+	}
+
+	__skb_queue_purge(&csk->txq);
+
+	if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
+		cxgbit_send_tx_flowc_wr(csk);
+
+	rpl_skb = __skb_dequeue(&csk->skbq);
+	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+
+	rpl = (struct cpl_abort_rpl *)__skb_put(rpl_skb, len);
+	memset(rpl, 0, len);
+
+	INIT_TP_WR(rpl, csk->tid);
+	OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+	rpl->cmd = CPL_ABORT_NO_RST;
+	cxgbit_ofld_send(csk->com.cdev, rpl_skb);
+
+	if (wakeup_thread) {
+		cxgbit_queue_rx_skb(csk, skb);
+		return;
+	}
+
+	if (release)
+		cxgbit_put_csk(csk);
+rel_skb:
+	__kfree_skb(skb);
+}
+
+static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+	pr_debug("%s: csk %p; tid %u; state %d\n",
+		 __func__, csk, csk->tid, csk->com.state);
+
+	switch (csk->com.state) {
+	case CSK_STATE_ABORTING:
+		csk->com.state = CSK_STATE_DEAD;
+		cxgbit_put_csk(csk);
+		break;
+	default:
+		pr_info("%s: cpl_abort_rpl_rss in state %d\n",
+			__func__, csk->com.state);
+	}
+
+	__kfree_skb(skb);
+}
+
+static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
+{
+	const struct sk_buff *skb = csk->wr_pending_head;
+	u32 credit = 0;
+
+	if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
+		pr_err("csk 0x%p, tid %u, credit %u > %u\n",
+		       csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
+		return true;
+	}
+
+	while (skb) {
+		credit += skb->csum;
+		skb = cxgbit_skcb_tx_wr_next(skb);
+	}
+
+	if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
+		pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
+		       csk, csk->tid, csk->wr_cred,
+		       credit, csk->wr_max_cred);
+
+		return true;
+	}
+
+	return false;
+}
+
+static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+	struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
+	u32 credits = rpl->credits;
+	u32 snd_una = ntohl(rpl->snd_una);
+
+	csk->wr_cred += credits;
+	if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
+		csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
+
+	while (credits) {
+		struct sk_buff *p = cxgbit_sock_peek_wr(csk);
+
+		if (unlikely(!p)) {
+			pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
+			       csk, csk->tid, credits,
+			       csk->wr_cred, csk->wr_una_cred);
+			break;
+		}
+
+		if (unlikely(credits < p->csum)) {
+			pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
+				csk,  csk->tid,
+				credits, csk->wr_cred, csk->wr_una_cred,
+				p->csum);
+			p->csum -= credits;
+			break;
+		}
+
+		cxgbit_sock_dequeue_wr(csk);
+		credits -= p->csum;
+		kfree_skb(p);
+	}
+
+	if (unlikely(cxgbit_credit_err(csk))) {
+		cxgbit_queue_rx_skb(csk, skb);
+		return;
+	}
+
+	if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
+		if (unlikely(before(snd_una, csk->snd_una))) {
+			pr_warn("csk 0x%p,%u, snd_una %u/%u.",
+				csk, csk->tid, snd_una,
+				csk->snd_una);
+			goto rel_skb;
+		}
+
+		if (csk->snd_una != snd_una) {
+			csk->snd_una = snd_una;
+			dst_confirm(csk->dst);
+			wake_up(&csk->ack_waitq);
+		}
+	}
+
+	if (skb_queue_len(&csk->txq))
+		cxgbit_push_tx_frames(csk);
+
+rel_skb:
+	__kfree_skb(skb);
+}
+
+static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+	struct cxgbit_sock *csk;
+	struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
+	unsigned int tid = GET_TID(rpl);
+	struct cxgb4_lld_info *lldi = &cdev->lldi;
+	struct tid_info *t = lldi->tids;
+
+	csk = lookup_tid(t, tid);
+	if (unlikely(!csk))
+		pr_err("can't find connection for tid %u.\n", tid);
+	else
+		cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
+
+	cxgbit_put_csk(csk);
+}
+
+static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+	struct cxgbit_sock *csk;
+	struct cpl_rx_data *cpl = cplhdr(skb);
+	unsigned int tid = GET_TID(cpl);
+	struct cxgb4_lld_info *lldi = &cdev->lldi;
+	struct tid_info *t = lldi->tids;
+
+	csk = lookup_tid(t, tid);
+	if (unlikely(!csk)) {
+		pr_err("can't find conn. for tid %u.\n", tid);
+		goto rel_skb;
+	}
+
+	cxgbit_queue_rx_skb(csk, skb);
+	return;
+rel_skb:
+	__kfree_skb(skb);
+}
+
+static void
+__cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+	spin_lock(&csk->lock);
+	if (csk->lock_owner) {
+		__skb_queue_tail(&csk->backlogq, skb);
+		spin_unlock(&csk->lock);
+		return;
+	}
+
+	cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
+	spin_unlock(&csk->lock);
+}
+
+static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+	cxgbit_get_csk(csk);
+	__cxgbit_process_rx_cpl(csk, skb);
+	cxgbit_put_csk(csk);
+}
+
+static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+	struct cxgbit_sock *csk;
+	struct cpl_tx_data *cpl = cplhdr(skb);
+	struct cxgb4_lld_info *lldi = &cdev->lldi;
+	struct tid_info *t = lldi->tids;
+	unsigned int tid = GET_TID(cpl);
+	u8 opcode = cxgbit_skcb_rx_opcode(skb);
+	bool ref = true;
+
+	switch (opcode) {
+	case CPL_FW4_ACK:
+			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
+			ref = false;
+			break;
+	case CPL_PEER_CLOSE:
+			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
+			break;
+	case CPL_CLOSE_CON_RPL:
+			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
+			break;
+	case CPL_ABORT_REQ_RSS:
+			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
+			break;
+	case CPL_ABORT_RPL_RSS:
+			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
+			break;
+	default:
+		goto rel_skb;
+	}
+
+	csk = lookup_tid(t, tid);
+	if (unlikely(!csk)) {
+		pr_err("can't find conn. for tid %u.\n", tid);
+		goto rel_skb;
+	}
+
+	if (ref)
+		cxgbit_process_rx_cpl(csk, skb);
+	else
+		__cxgbit_process_rx_cpl(csk, skb);
+
+	return;
+rel_skb:
+	__kfree_skb(skb);
+}
+
+cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
+	[CPL_PASS_OPEN_RPL]	= cxgbit_pass_open_rpl,
+	[CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
+	[CPL_PASS_ACCEPT_REQ]	= cxgbit_pass_accept_req,
+	[CPL_PASS_ESTABLISH]	= cxgbit_pass_establish,
+	[CPL_SET_TCB_RPL]	= cxgbit_set_tcb_rpl,
+	[CPL_RX_DATA]		= cxgbit_rx_data,
+	[CPL_FW4_ACK]		= cxgbit_rx_cpl,
+	[CPL_PEER_CLOSE]	= cxgbit_rx_cpl,
+	[CPL_CLOSE_CON_RPL]	= cxgbit_rx_cpl,
+	[CPL_ABORT_REQ_RSS]	= cxgbit_rx_cpl,
+	[CPL_ABORT_RPL_RSS]	= cxgbit_rx_cpl,
+};
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
new file mode 100644
index 0000000..5d78bdb
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "cxgbit.h"
+
+static void
+cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod,
+		    struct cxgbi_task_tag_info *ttinfo,
+		    struct scatterlist **sg_pp, unsigned int *sg_off)
+{
+	struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
+	unsigned int offset = sg_off ? *sg_off : 0;
+	dma_addr_t addr = 0UL;
+	unsigned int len = 0;
+	int i;
+
+	memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
+
+	if (sg) {
+		addr = sg_dma_address(sg);
+		len = sg_dma_len(sg);
+	}
+
+	for (i = 0; i < PPOD_PAGES_MAX; i++) {
+		if (sg) {
+			ppod->addr[i] = cpu_to_be64(addr + offset);
+			offset += PAGE_SIZE;
+			if (offset == (len + sg->offset)) {
+				offset = 0;
+				sg = sg_next(sg);
+				if (sg) {
+					addr = sg_dma_address(sg);
+					len = sg_dma_len(sg);
+				}
+			}
+		} else {
+			ppod->addr[i] = 0ULL;
+		}
+	}
+
+	/*
+	 * the fifth address needs to be repeated in the next ppod, so do
+	 * not move sg
+	 */
+	if (sg_pp) {
+		*sg_pp = sg;
+		*sg_off = offset;
+	}
+
+	if (offset == len) {
+		offset = 0;
+		if (sg) {
+			sg = sg_next(sg);
+			if (sg)
+				addr = sg_dma_address(sg);
+		}
+	}
+	ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
+}
+
+static struct sk_buff *
+cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm,
+		       unsigned int idx, unsigned int npods, unsigned int tid)
+{
+	struct ulp_mem_io *req;
+	struct ulptx_idata *idata;
+	unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
+	unsigned int dlen = npods << PPOD_SIZE_SHIFT;
+	unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
+				sizeof(struct ulptx_idata) + dlen, 16);
+	struct sk_buff *skb;
+
+	skb  = alloc_skb(wr_len, GFP_KERNEL);
+	if (!skb)
+		return NULL;
+
+	req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
+	INIT_ULPTX_WR(req, wr_len, 0, tid);
+	req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
+		FW_WR_ATOMIC_V(0));
+	req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+		ULP_MEMIO_ORDER_V(0) |
+		T5_ULP_MEMIO_IMM_V(1));
+	req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
+	req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
+	req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
+
+	idata = (struct ulptx_idata *)(req + 1);
+	idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
+	idata->len = htonl(dlen);
+
+	return skb;
+}
+
+static int
+cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
+			struct cxgbi_task_tag_info *ttinfo, unsigned int idx,
+			unsigned int npods, struct scatterlist **sg_pp,
+			unsigned int *sg_off)
+{
+	struct cxgbit_device *cdev = csk->com.cdev;
+	struct sk_buff *skb;
+	struct ulp_mem_io *req;
+	struct ulptx_idata *idata;
+	struct cxgbi_pagepod *ppod;
+	unsigned int i;
+
+	skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid);
+	if (!skb)
+		return -ENOMEM;
+
+	req = (struct ulp_mem_io *)skb->data;
+	idata = (struct ulptx_idata *)(req + 1);
+	ppod = (struct cxgbi_pagepod *)(idata + 1);
+
+	for (i = 0; i < npods; i++, ppod++)
+		cxgbit_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
+
+	__skb_queue_tail(&csk->ppodq, skb);
+
+	return 0;
+}
+
+static int
+cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
+		   struct cxgbi_task_tag_info *ttinfo)
+{
+	unsigned int pidx = ttinfo->idx;
+	unsigned int npods = ttinfo->npods;
+	unsigned int i, cnt;
+	struct scatterlist *sg = ttinfo->sgl;
+	unsigned int offset = 0;
+	int ret = 0;
+
+	for (i = 0; i < npods; i += cnt, pidx += cnt) {
+		cnt = npods - i;
+
+		if (cnt > ULPMEM_IDATA_MAX_NPPODS)
+			cnt = ULPMEM_IDATA_MAX_NPPODS;
+
+		ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
+					      &sg, &offset);
+		if (ret < 0)
+			break;
+	}
+
+	return ret;
+}
+
+static int cxgbit_ddp_sgl_check(struct scatterlist *sg,
+				unsigned int nents)
+{
+	unsigned int last_sgidx = nents - 1;
+	unsigned int i;
+
+	for (i = 0; i < nents; i++, sg = sg_next(sg)) {
+		unsigned int len = sg->length + sg->offset;
+
+		if ((sg->offset & 0x3) || (i && sg->offset) ||
+		    ((i != last_sgidx) && (len != PAGE_SIZE))) {
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
+		   unsigned int xferlen)
+{
+	struct cxgbit_device *cdev = csk->com.cdev;
+	struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+	struct scatterlist *sgl = ttinfo->sgl;
+	unsigned int sgcnt = ttinfo->nents;
+	unsigned int sg_offset = sgl->offset;
+	int ret;
+
+	if ((xferlen < DDP_THRESHOLD) || (!sgcnt)) {
+		pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
+			 ppm, ppm->tformat.pgsz_idx_dflt,
+			 xferlen, ttinfo->nents);
+		return -EINVAL;
+	}
+
+	if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0)
+		return -EINVAL;
+
+	ttinfo->nr_pages = (xferlen + sgl->offset +
+			    (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT;
+
+	/*
+	 * the ddp tag will be used for the ttt in the outgoing r2t pdu
+	 */
+	ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
+				      &ttinfo->tag, 0);
+	if (ret < 0)
+		return ret;
+	ttinfo->npods = ret;
+
+	sgl->offset = 0;
+	ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
+	sgl->offset = sg_offset;
+	if (!ret) {
+		pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
+			__func__, 0, xferlen, sgcnt);
+		goto rel_ppods;
+	}
+
+	cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
+				xferlen, &ttinfo->hdr);
+
+	ret = cxgbit_ddp_set_map(ppm, csk, ttinfo);
+	if (ret < 0) {
+		__skb_queue_purge(&csk->ppodq);
+		dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
+		goto rel_ppods;
+	}
+
+	return 0;
+
+rel_ppods:
+	cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
+	return -EINVAL;
+}
+
+void
+cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+		   struct iscsi_r2t *r2t)
+{
+	struct cxgbit_sock *csk = conn->context;
+	struct cxgbit_device *cdev = csk->com.cdev;
+	struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+	struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
+	int ret = -EINVAL;
+
+	if ((!ccmd->setup_ddp) ||
+	    (!test_bit(CSK_DDP_ENABLE, &csk->com.flags)))
+		goto out;
+
+	ccmd->setup_ddp = false;
+
+	ttinfo->sgl = cmd->se_cmd.t_data_sg;
+	ttinfo->nents = cmd->se_cmd.t_data_nents;
+
+	ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
+	if (ret < 0) {
+		pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
+			csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
+
+		ttinfo->sgl = NULL;
+		ttinfo->nents = 0;
+	} else {
+		ccmd->release = true;
+	}
+out:
+	pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag);
+	r2t->targ_xfer_tag = ttinfo->tag;
+}
+
+void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+	struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+
+	if (ccmd->release) {
+		struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
+
+		if (ttinfo->sgl) {
+			struct cxgbit_sock *csk = conn->context;
+			struct cxgbit_device *cdev = csk->com.cdev;
+			struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+
+			cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
+
+			dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
+				     ttinfo->nents, DMA_FROM_DEVICE);
+		} else {
+			put_page(sg_page(&ccmd->sg));
+		}
+
+		ccmd->release = false;
+	}
+}
+
+int cxgbit_ddp_init(struct cxgbit_device *cdev)
+{
+	struct cxgb4_lld_info *lldi = &cdev->lldi;
+	struct net_device *ndev = cdev->lldi.ports[0];
+	struct cxgbi_tag_format tformat;
+	unsigned int ppmax;
+	int ret, i;
+
+	if (!lldi->vr->iscsi.size) {
+		pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
+		return -EACCES;
+	}
+
+	ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
+
+	memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
+	for (i = 0; i < 4; i++)
+		tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
+					 & 0xF;
+	cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
+
+	ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0],
+			     cdev->lldi.pdev, &cdev->lldi, &tformat,
+			     ppmax, lldi->iscsi_llimit,
+			     lldi->vr->iscsi.start, 2);
+	if (ret >= 0) {
+		struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm);
+
+		if ((ppm->tformat.pgsz_idx_dflt < DDP_PGIDX_MAX) &&
+		    (ppm->ppmax >= 1024))
+			set_bit(CDEV_DDP_ENABLE, &cdev->flags);
+		ret = 0;
+	}
+
+	return ret;
+}
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_lro.h b/drivers/target/iscsi/cxgbit/cxgbit_lro.h
new file mode 100644
index 0000000..28c11bd
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_lro.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#ifndef	__CXGBIT_LRO_H__
+#define	__CXGBIT_LRO_H__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#define LRO_FLUSH_LEN_MAX	65535
+
+struct cxgbit_lro_cb {
+	struct cxgbit_sock *csk;
+	u32 pdu_totallen;
+	u32 offset;
+	u8 pdu_idx;
+	bool complete;
+};
+
+enum cxgbit_pducb_flags {
+	PDUCBF_RX_HDR		= (1 << 0), /* received pdu header */
+	PDUCBF_RX_DATA		= (1 << 1), /* received pdu payload */
+	PDUCBF_RX_STATUS	= (1 << 2), /* received ddp status */
+	PDUCBF_RX_DATA_DDPD	= (1 << 3), /* pdu payload ddp'd */
+	PDUCBF_RX_HCRC_ERR	= (1 << 4), /* header digest error */
+	PDUCBF_RX_DCRC_ERR	= (1 << 5), /* data digest error */
+};
+
+struct cxgbit_lro_pdu_cb {
+	u8 flags;
+	u8 frags;
+	u8 hfrag_idx;
+	u8 nr_dfrags;
+	u8 dfrag_idx;
+	bool complete;
+	u32 seq;
+	u32 pdulen;
+	u32 hlen;
+	u32 dlen;
+	u32 doffset;
+	u32 ddigest;
+	void *hdr;
+};
+
+#define LRO_SKB_MAX_HEADROOM  \
+		(sizeof(struct cxgbit_lro_cb) + \
+		 (MAX_SKB_FRAGS * sizeof(struct cxgbit_lro_pdu_cb)))
+
+#define LRO_SKB_MIN_HEADROOM  \
+		(sizeof(struct cxgbit_lro_cb) + \
+		 sizeof(struct cxgbit_lro_pdu_cb))
+
+#define cxgbit_skb_lro_cb(skb)	((struct cxgbit_lro_cb *)skb->data)
+#define cxgbit_skb_lro_pdu_cb(skb, i)	\
+	((struct cxgbit_lro_pdu_cb *)(skb->data + sizeof(struct cxgbit_lro_cb) \
+		+ (i * sizeof(struct cxgbit_lro_pdu_cb))))
+
+#define CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT	16 /* ddp'able */
+#define CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT	19 /* pad error */
+#define CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT	20 /* hcrc error */
+#define CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT	21 /* dcrc error */
+
+#endif	/*__CXGBIT_LRO_H_*/
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
new file mode 100644
index 0000000..60dccd0
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -0,0 +1,702 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define DRV_NAME "cxgbit"
+#define DRV_VERSION "1.0.0-ko"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#include "cxgbit.h"
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+#include <net/dcbevent.h>
+#include "cxgb4_dcb.h"
+#endif
+
+LIST_HEAD(cdev_list_head);
+/* cdev list lock */
+DEFINE_MUTEX(cdev_list_lock);
+
+void _cxgbit_free_cdev(struct kref *kref)
+{
+	struct cxgbit_device *cdev;
+
+	cdev = container_of(kref, struct cxgbit_device, kref);
+	kfree(cdev);
+}
+
+static void cxgbit_set_mdsl(struct cxgbit_device *cdev)
+{
+	struct cxgb4_lld_info *lldi = &cdev->lldi;
+	u32 mdsl;
+
+#define ULP2_MAX_PKT_LEN 16224
+#define ISCSI_PDU_NONPAYLOAD_LEN 312
+	mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN,
+		     ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN);
+	mdsl = min_t(u32, mdsl, 8192);
+	mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE);
+
+	cdev->mdsl = mdsl;
+}
+
+static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi)
+{
+	struct cxgbit_device *cdev;
+
+	if (is_t4(lldi->adapter_type))
+		return ERR_PTR(-ENODEV);
+
+	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+	if (!cdev)
+		return ERR_PTR(-ENOMEM);
+
+	kref_init(&cdev->kref);
+
+	cdev->lldi = *lldi;
+
+	cxgbit_set_mdsl(cdev);
+
+	if (cxgbit_ddp_init(cdev) < 0) {
+		kfree(cdev);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags))
+		pr_info("cdev %s ddp init failed\n",
+			pci_name(lldi->pdev));
+
+	if (lldi->fw_vers >= 0x10d2b00)
+		set_bit(CDEV_ISO_ENABLE, &cdev->flags);
+
+	spin_lock_init(&cdev->cskq.lock);
+	INIT_LIST_HEAD(&cdev->cskq.list);
+
+	mutex_lock(&cdev_list_lock);
+	list_add_tail(&cdev->list, &cdev_list_head);
+	mutex_unlock(&cdev_list_lock);
+
+	pr_info("cdev %s added for iSCSI target transport\n",
+		pci_name(lldi->pdev));
+
+	return cdev;
+}
+
+static void cxgbit_close_conn(struct cxgbit_device *cdev)
+{
+	struct cxgbit_sock *csk;
+	struct sk_buff *skb;
+	bool wakeup_thread = false;
+
+	spin_lock_bh(&cdev->cskq.lock);
+	list_for_each_entry(csk, &cdev->cskq.list, list) {
+		skb = alloc_skb(0, GFP_ATOMIC);
+		if (!skb)
+			continue;
+
+		spin_lock_bh(&csk->rxq.lock);
+		__skb_queue_tail(&csk->rxq, skb);
+		if (skb_queue_len(&csk->rxq) == 1)
+			wakeup_thread = true;
+		spin_unlock_bh(&csk->rxq.lock);
+
+		if (wakeup_thread) {
+			wake_up(&csk->waitq);
+			wakeup_thread = false;
+		}
+	}
+	spin_unlock_bh(&cdev->cskq.lock);
+}
+
+static void cxgbit_detach_cdev(struct cxgbit_device *cdev)
+{
+	bool free_cdev = false;
+
+	spin_lock_bh(&cdev->cskq.lock);
+	if (list_empty(&cdev->cskq.list))
+		free_cdev = true;
+	spin_unlock_bh(&cdev->cskq.lock);
+
+	if (free_cdev) {
+		mutex_lock(&cdev_list_lock);
+		list_del(&cdev->list);
+		mutex_unlock(&cdev_list_lock);
+
+		cxgbit_put_cdev(cdev);
+	} else {
+		cxgbit_close_conn(cdev);
+	}
+}
+
+static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state)
+{
+	struct cxgbit_device *cdev = handle;
+
+	switch (state) {
+	case CXGB4_STATE_UP:
+		set_bit(CDEV_STATE_UP, &cdev->flags);
+		pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev));
+		break;
+	case CXGB4_STATE_START_RECOVERY:
+		clear_bit(CDEV_STATE_UP, &cdev->flags);
+		cxgbit_close_conn(cdev);
+		pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev));
+		break;
+	case CXGB4_STATE_DOWN:
+		pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev));
+		break;
+	case CXGB4_STATE_DETACH:
+		clear_bit(CDEV_STATE_UP, &cdev->flags);
+		pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev));
+		cxgbit_detach_cdev(cdev);
+		break;
+	default:
+		pr_info("cdev %s unknown state %d.\n",
+			pci_name(cdev->lldi.pdev), state);
+		break;
+	}
+	return 0;
+}
+
+static void
+cxgbit_proc_ddp_status(unsigned int tid, struct cpl_rx_data_ddp *cpl,
+		       struct cxgbit_lro_pdu_cb *pdu_cb)
+{
+	unsigned int status = ntohl(cpl->ddpvld);
+
+	pdu_cb->flags |= PDUCBF_RX_STATUS;
+	pdu_cb->ddigest = ntohl(cpl->ulp_crc);
+	pdu_cb->pdulen = ntohs(cpl->len);
+
+	if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) {
+		pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", tid, status);
+		pdu_cb->flags |= PDUCBF_RX_HCRC_ERR;
+	}
+
+	if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) {
+		pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", tid, status);
+		pdu_cb->flags |= PDUCBF_RX_DCRC_ERR;
+	}
+
+	if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT))
+		pr_info("tid 0x%x, status 0x%x, pad bad.\n", tid, status);
+
+	if ((status & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) &&
+	    (!(pdu_cb->flags & PDUCBF_RX_DATA))) {
+		pdu_cb->flags |= PDUCBF_RX_DATA_DDPD;
+	}
+}
+
+static void
+cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp)
+{
+	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
+						lro_cb->pdu_idx);
+	struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1);
+
+	cxgbit_proc_ddp_status(lro_cb->csk->tid, cpl, pdu_cb);
+
+	if (pdu_cb->flags & PDUCBF_RX_HDR)
+		pdu_cb->complete = true;
+
+	lro_cb->complete = true;
+	lro_cb->pdu_totallen += pdu_cb->pdulen;
+	lro_cb->pdu_idx++;
+}
+
+static void
+cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl,
+		  unsigned int offset)
+{
+	u8 skb_frag_idx = skb_shinfo(skb)->nr_frags;
+	u8 i;
+
+	/* usually there's just one frag */
+	__skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page,
+			     gl->frags[0].offset + offset,
+			     gl->frags[0].size - offset);
+	for (i = 1; i < gl->nfrags; i++)
+		__skb_fill_page_desc(skb, skb_frag_idx + i,
+				     gl->frags[i].page,
+				     gl->frags[i].offset,
+				     gl->frags[i].size);
+
+	skb_shinfo(skb)->nr_frags += gl->nfrags;
+
+	/* get a reference to the last page, we don't own it */
+	get_page(gl->frags[gl->nfrags - 1].page);
+}
+
+static void
+cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl)
+{
+	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
+						lro_cb->pdu_idx);
+	u32 len, offset;
+
+	if (op == CPL_ISCSI_HDR) {
+		struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va;
+
+		offset = sizeof(struct cpl_iscsi_hdr);
+		pdu_cb->flags |= PDUCBF_RX_HDR;
+		pdu_cb->seq = ntohl(cpl->seq);
+		len = ntohs(cpl->len);
+		pdu_cb->hdr = gl->va + offset;
+		pdu_cb->hlen = len;
+		pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
+
+		if (unlikely(gl->nfrags > 1))
+			cxgbit_skcb_flags(skb) = 0;
+
+		lro_cb->complete = false;
+	} else {
+		struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va;
+
+		offset = sizeof(struct cpl_iscsi_data);
+		pdu_cb->flags |= PDUCBF_RX_DATA;
+		len = ntohs(cpl->len);
+		pdu_cb->dlen = len;
+		pdu_cb->doffset = lro_cb->offset;
+		pdu_cb->nr_dfrags = gl->nfrags;
+		pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags;
+	}
+
+	cxgbit_copy_frags(skb, gl, offset);
+
+	pdu_cb->frags += gl->nfrags;
+	lro_cb->offset += len;
+	skb->len += len;
+	skb->data_len += len;
+	skb->truesize += len;
+}
+
+static struct sk_buff *
+cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl,
+		    const __be64 *rsp, struct napi_struct *napi)
+{
+	struct sk_buff *skb;
+	struct cxgbit_lro_cb *lro_cb;
+
+	skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM);
+
+	if (unlikely(!skb))
+		return NULL;
+
+	memset(skb->data, 0, LRO_SKB_MAX_HEADROOM);
+
+	cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO;
+
+	lro_cb = cxgbit_skb_lro_cb(skb);
+
+	cxgbit_get_csk(csk);
+
+	lro_cb->csk = csk;
+
+	return skb;
+}
+
+static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+	bool wakeup_thread = false;
+
+	spin_lock(&csk->rxq.lock);
+	__skb_queue_tail(&csk->rxq, skb);
+	if (skb_queue_len(&csk->rxq) == 1)
+		wakeup_thread = true;
+	spin_unlock(&csk->rxq.lock);
+
+	if (wakeup_thread)
+		wake_up(&csk->waitq);
+}
+
+static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb)
+{
+	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+	struct cxgbit_sock *csk = lro_cb->csk;
+
+	csk->lro_skb = NULL;
+
+	__skb_unlink(skb, &lro_mgr->lroq);
+	cxgbit_queue_lro_skb(csk, skb);
+
+	cxgbit_put_csk(csk);
+
+	lro_mgr->lro_pkts++;
+	lro_mgr->lro_session_cnt--;
+}
+
+static void cxgbit_uld_lro_flush(struct t4_lro_mgr *lro_mgr)
+{
+	struct sk_buff *skb;
+
+	while ((skb = skb_peek(&lro_mgr->lroq)))
+		cxgbit_lro_flush(lro_mgr, skb);
+}
+
+static int
+cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp,
+		   const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
+		   struct napi_struct *napi)
+{
+	struct sk_buff *skb;
+	struct cxgbit_lro_cb *lro_cb;
+
+	if (!csk) {
+		pr_err("%s: csk NULL, op 0x%x.\n", __func__, op);
+		goto out;
+	}
+
+	if (csk->lro_skb)
+		goto add_packet;
+
+start_lro:
+	if (lro_mgr->lro_session_cnt >= MAX_LRO_SESSIONS) {
+		cxgbit_uld_lro_flush(lro_mgr);
+		goto start_lro;
+	}
+
+	skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi);
+	if (unlikely(!skb))
+		goto out;
+
+	csk->lro_skb = skb;
+
+	__skb_queue_tail(&lro_mgr->lroq, skb);
+	lro_mgr->lro_session_cnt++;
+
+add_packet:
+	skb = csk->lro_skb;
+	lro_cb = cxgbit_skb_lro_cb(skb);
+
+	if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) >
+	    MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) ||
+	    (lro_cb->pdu_idx >= MAX_SKB_FRAGS)) {
+		cxgbit_lro_flush(lro_mgr, skb);
+		goto start_lro;
+	}
+
+	if (gl)
+		cxgbit_lro_add_packet_gl(skb, op, gl);
+	else
+		cxgbit_lro_add_packet_rsp(skb, op, rsp);
+
+	lro_mgr->lro_merged++;
+
+	return 0;
+
+out:
+	return -1;
+}
+
+static int
+cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
+			  const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
+			  struct napi_struct *napi)
+{
+	struct cxgbit_device *cdev = hndl;
+	struct cxgb4_lld_info *lldi = &cdev->lldi;
+	struct cpl_tx_data *rpl = NULL;
+	struct cxgbit_sock *csk = NULL;
+	unsigned int tid = 0;
+	struct sk_buff *skb;
+	unsigned int op = *(u8 *)rsp;
+	bool lro_flush = true;
+
+	switch (op) {
+	case CPL_ISCSI_HDR:
+	case CPL_ISCSI_DATA:
+	case CPL_RX_ISCSI_DDP:
+	case CPL_FW4_ACK:
+		lro_flush = false;
+	case CPL_ABORT_RPL_RSS:
+	case CPL_PASS_ESTABLISH:
+	case CPL_PEER_CLOSE:
+	case CPL_CLOSE_CON_RPL:
+	case CPL_ABORT_REQ_RSS:
+	case CPL_SET_TCB_RPL:
+	case CPL_RX_DATA:
+		rpl = gl ? (struct cpl_tx_data *)gl->va :
+			   (struct cpl_tx_data *)(rsp + 1);
+		tid = GET_TID(rpl);
+		csk = lookup_tid(lldi->tids, tid);
+		break;
+	default:
+		break;
+	}
+
+	if (csk && csk->lro_skb && lro_flush)
+		cxgbit_lro_flush(lro_mgr, csk->lro_skb);
+
+	if (!gl) {
+		unsigned int len;
+
+		if (op == CPL_RX_ISCSI_DDP) {
+			if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr,
+						napi))
+				return 0;
+		}
+
+		len = 64 - sizeof(struct rsp_ctrl) - 8;
+		skb = napi_alloc_skb(napi, len);
+		if (!skb)
+			goto nomem;
+		__skb_put(skb, len);
+		skb_copy_to_linear_data(skb, &rsp[1], len);
+	} else {
+		if (unlikely(op != *(u8 *)gl->va)) {
+			pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
+				gl->va, be64_to_cpu(*rsp),
+				be64_to_cpu(*(u64 *)gl->va),
+				gl->tot_len);
+			return 0;
+		}
+
+		if (op == CPL_ISCSI_HDR || op == CPL_ISCSI_DATA) {
+			if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr,
+						napi))
+				return 0;
+		}
+
+#define RX_PULL_LEN 128
+		skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
+		if (unlikely(!skb))
+			goto nomem;
+	}
+
+	rpl = (struct cpl_tx_data *)skb->data;
+	op = rpl->ot.opcode;
+	cxgbit_skcb_rx_opcode(skb) = op;
+
+	pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
+		 cdev, op, rpl->ot.opcode_tid,
+		 ntohl(rpl->ot.opcode_tid), skb);
+
+	if (op < NUM_CPL_CMDS && cxgbit_cplhandlers[op]) {
+		cxgbit_cplhandlers[op](cdev, skb);
+	} else {
+		pr_err("No handler for opcode 0x%x.\n", op);
+		__kfree_skb(skb);
+	}
+	return 0;
+nomem:
+	pr_err("%s OOM bailing out.\n", __func__);
+	return 1;
+}
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+struct cxgbit_dcb_work {
+	struct dcb_app_type dcb_app;
+	struct work_struct work;
+};
+
+static void
+cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id,
+			   u8 dcb_priority, u16 port_num)
+{
+	struct cxgbit_sock *csk;
+	struct sk_buff *skb;
+	u16 local_port;
+	bool wakeup_thread = false;
+
+	spin_lock_bh(&cdev->cskq.lock);
+	list_for_each_entry(csk, &cdev->cskq.list, list) {
+		if (csk->port_id != port_id)
+			continue;
+
+		if (csk->com.local_addr.ss_family == AF_INET6) {
+			struct sockaddr_in6 *sock_in6;
+
+			sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr;
+			local_port = ntohs(sock_in6->sin6_port);
+		} else {
+			struct sockaddr_in *sock_in;
+
+			sock_in = (struct sockaddr_in *)&csk->com.local_addr;
+			local_port = ntohs(sock_in->sin_port);
+		}
+
+		if (local_port != port_num)
+			continue;
+
+		if (csk->dcb_priority == dcb_priority)
+			continue;
+
+		skb = alloc_skb(0, GFP_ATOMIC);
+		if (!skb)
+			continue;
+
+		spin_lock(&csk->rxq.lock);
+		__skb_queue_tail(&csk->rxq, skb);
+		if (skb_queue_len(&csk->rxq) == 1)
+			wakeup_thread = true;
+		spin_unlock(&csk->rxq.lock);
+
+		if (wakeup_thread) {
+			wake_up(&csk->waitq);
+			wakeup_thread = false;
+		}
+	}
+	spin_unlock_bh(&cdev->cskq.lock);
+}
+
+static void cxgbit_dcb_workfn(struct work_struct *work)
+{
+	struct cxgbit_dcb_work *dcb_work;
+	struct net_device *ndev;
+	struct cxgbit_device *cdev = NULL;
+	struct dcb_app_type *iscsi_app;
+	u8 priority, port_id = 0xff;
+
+	dcb_work = container_of(work, struct cxgbit_dcb_work, work);
+	iscsi_app = &dcb_work->dcb_app;
+
+	if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
+		if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
+			goto out;
+
+		priority = iscsi_app->app.priority;
+
+	} else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
+		if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
+			goto out;
+
+		if (!iscsi_app->app.priority)
+			goto out;
+
+		priority = ffs(iscsi_app->app.priority) - 1;
+	} else {
+		goto out;
+	}
+
+	pr_debug("priority for ifid %d is %u\n",
+		 iscsi_app->ifindex, priority);
+
+	ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
+
+	if (!ndev)
+		goto out;
+
+	mutex_lock(&cdev_list_lock);
+	cdev = cxgbit_find_device(ndev, &port_id);
+
+	dev_put(ndev);
+
+	if (!cdev) {
+		mutex_unlock(&cdev_list_lock);
+		goto out;
+	}
+
+	cxgbit_update_dcb_priority(cdev, port_id, priority,
+				   iscsi_app->app.protocol);
+	mutex_unlock(&cdev_list_lock);
+out:
+	kfree(dcb_work);
+}
+
+static int
+cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action,
+		       void *data)
+{
+	struct cxgbit_dcb_work *dcb_work;
+	struct dcb_app_type *dcb_app = data;
+
+	dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
+	if (!dcb_work)
+		return NOTIFY_DONE;
+
+	dcb_work->dcb_app = *dcb_app;
+	INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn);
+	schedule_work(&dcb_work->work);
+	return NOTIFY_OK;
+}
+#endif
+
+static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsi_conn *conn)
+{
+	return TARGET_PROT_NORMAL;
+}
+
+static struct iscsit_transport cxgbit_transport = {
+	.name			= DRV_NAME,
+	.transport_type		= ISCSI_CXGBIT,
+	.rdma_shutdown		= false,
+	.priv_size		= sizeof(struct cxgbit_cmd),
+	.owner			= THIS_MODULE,
+	.iscsit_setup_np	= cxgbit_setup_np,
+	.iscsit_accept_np	= cxgbit_accept_np,
+	.iscsit_free_np		= cxgbit_free_np,
+	.iscsit_free_conn	= cxgbit_free_conn,
+	.iscsit_get_login_rx	= cxgbit_get_login_rx,
+	.iscsit_put_login_tx	= cxgbit_put_login_tx,
+	.iscsit_immediate_queue	= iscsit_immediate_queue,
+	.iscsit_response_queue	= iscsit_response_queue,
+	.iscsit_get_dataout	= iscsit_build_r2ts_for_cmd,
+	.iscsit_queue_data_in	= iscsit_queue_rsp,
+	.iscsit_queue_status	= iscsit_queue_rsp,
+	.iscsit_xmit_pdu	= cxgbit_xmit_pdu,
+	.iscsit_get_r2t_ttt	= cxgbit_get_r2t_ttt,
+	.iscsit_get_rx_pdu	= cxgbit_get_rx_pdu,
+	.iscsit_validate_params	= cxgbit_validate_params,
+	.iscsit_release_cmd	= cxgbit_release_cmd,
+	.iscsit_aborted_task	= iscsit_aborted_task,
+	.iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops,
+};
+
+static struct cxgb4_uld_info cxgbit_uld_info = {
+	.name		= DRV_NAME,
+	.add		= cxgbit_uld_add,
+	.state_change	= cxgbit_uld_state_change,
+	.lro_rx_handler = cxgbit_uld_lro_rx_handler,
+	.lro_flush	= cxgbit_uld_lro_flush,
+};
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+static struct notifier_block cxgbit_dcbevent_nb = {
+	.notifier_call = cxgbit_dcbevent_notify,
+};
+#endif
+
+static int __init cxgbit_init(void)
+{
+	cxgb4_register_uld(CXGB4_ULD_ISCSIT, &cxgbit_uld_info);
+	iscsit_register_transport(&cxgbit_transport);
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+	pr_info("%s dcb enabled.\n", DRV_NAME);
+	register_dcbevent_notifier(&cxgbit_dcbevent_nb);
+#endif
+	BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
+		     sizeof(union cxgbit_skb_cb));
+	return 0;
+}
+
+static void __exit cxgbit_exit(void)
+{
+	struct cxgbit_device *cdev, *tmp;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+	unregister_dcbevent_notifier(&cxgbit_dcbevent_nb);
+#endif
+	mutex_lock(&cdev_list_lock);
+	list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) {
+		list_del(&cdev->list);
+		cxgbit_put_cdev(cdev);
+	}
+	mutex_unlock(&cdev_list_lock);
+	iscsit_unregister_transport(&cxgbit_transport);
+	cxgb4_unregister_uld(CXGB4_ULD_ISCSIT);
+}
+
+module_init(cxgbit_init);
+module_exit(cxgbit_exit);
+
+MODULE_DESCRIPTION("Chelsio iSCSI target offload driver");
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
new file mode 100644
index 0000000..d02bf58
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -0,0 +1,1561 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <asm/unaligned.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include "cxgbit.h"
+
+struct sge_opaque_hdr {
+	void *dev;
+	dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+static const u8 cxgbit_digest_len[] = {0, 4, 4, 8};
+
+#define TX_HDR_LEN (sizeof(struct sge_opaque_hdr) + \
+		    sizeof(struct fw_ofld_tx_data_wr))
+
+static struct sk_buff *
+__cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso)
+{
+	struct sk_buff *skb = NULL;
+	u8 submode = 0;
+	int errcode;
+	static const u32 hdr_len = TX_HDR_LEN + ISCSI_HDR_LEN;
+
+	if (len) {
+		skb = alloc_skb_with_frags(hdr_len, len,
+					   0, &errcode,
+					   GFP_KERNEL);
+		if (!skb)
+			return NULL;
+
+		skb_reserve(skb, TX_HDR_LEN);
+		skb_reset_transport_header(skb);
+		__skb_put(skb, ISCSI_HDR_LEN);
+		skb->data_len = len;
+		skb->len += len;
+		submode |= (csk->submode & CXGBIT_SUBMODE_DCRC);
+
+	} else {
+		u32 iso_len = iso ? sizeof(struct cpl_tx_data_iso) : 0;
+
+		skb = alloc_skb(hdr_len + iso_len, GFP_KERNEL);
+		if (!skb)
+			return NULL;
+
+		skb_reserve(skb, TX_HDR_LEN + iso_len);
+		skb_reset_transport_header(skb);
+		__skb_put(skb, ISCSI_HDR_LEN);
+	}
+
+	submode |= (csk->submode & CXGBIT_SUBMODE_HCRC);
+	cxgbit_skcb_submode(skb) = submode;
+	cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[submode];
+	cxgbit_skcb_flags(skb) |= SKCBF_TX_NEED_HDR;
+	return skb;
+}
+
+static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len)
+{
+	return __cxgbit_alloc_skb(csk, len, false);
+}
+
+/*
+ * cxgbit_is_ofld_imm - check whether a packet can be sent as immediate data
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as an offload WR with immediate
+ * data.  We currently use the same limit as for Ethernet packets.
+ */
+static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
+{
+	int length = skb->len;
+
+	if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
+		length += sizeof(struct fw_ofld_tx_data_wr);
+
+	if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
+		length += sizeof(struct cpl_tx_data_iso);
+
+#define MAX_IMM_TX_PKT_LEN	256
+	return length <= MAX_IMM_TX_PKT_LEN;
+}
+
+/*
+ * cxgbit_sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int cxgbit_sgl_len(unsigned int n)
+{
+	n--;
+	return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/*
+ * cxgbit_calc_tx_flits_ofld - calculate # of flits for an offload packet
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for the given offload packet.
+ * These packets are already fully constructed and no additional headers
+ * will be added.
+ */
+static unsigned int cxgbit_calc_tx_flits_ofld(const struct sk_buff *skb)
+{
+	unsigned int flits, cnt;
+
+	if (cxgbit_is_ofld_imm(skb))
+		return DIV_ROUND_UP(skb->len, 8);
+	flits = skb_transport_offset(skb) / 8;
+	cnt = skb_shinfo(skb)->nr_frags;
+	if (skb_tail_pointer(skb) != skb_transport_header(skb))
+		cnt++;
+	return flits + cxgbit_sgl_len(cnt);
+}
+
+#define CXGBIT_ISO_FSLICE 0x1
+#define CXGBIT_ISO_LSLICE 0x2
+static void
+cxgbit_cpl_tx_data_iso(struct sk_buff *skb, struct cxgbit_iso_info *iso_info)
+{
+	struct cpl_tx_data_iso *cpl;
+	unsigned int submode = cxgbit_skcb_submode(skb);
+	unsigned int fslice = !!(iso_info->flags & CXGBIT_ISO_FSLICE);
+	unsigned int lslice = !!(iso_info->flags & CXGBIT_ISO_LSLICE);
+
+	cpl = (struct cpl_tx_data_iso *)__skb_push(skb, sizeof(*cpl));
+
+	cpl->op_to_scsi = htonl(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) |
+			CPL_TX_DATA_ISO_FIRST_V(fslice) |
+			CPL_TX_DATA_ISO_LAST_V(lslice) |
+			CPL_TX_DATA_ISO_CPLHDRLEN_V(0) |
+			CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) |
+			CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) |
+			CPL_TX_DATA_ISO_IMMEDIATE_V(0) |
+			CPL_TX_DATA_ISO_SCSI_V(2));
+
+	cpl->ahs_len = 0;
+	cpl->mpdu = htons(DIV_ROUND_UP(iso_info->mpdu, 4));
+	cpl->burst_size = htonl(DIV_ROUND_UP(iso_info->burst_len, 4));
+	cpl->len = htonl(iso_info->len);
+	cpl->reserved2_seglen_offset = htonl(0);
+	cpl->datasn_offset = htonl(0);
+	cpl->buffer_offset = htonl(0);
+	cpl->reserved3 = 0;
+
+	__skb_pull(skb, sizeof(*cpl));
+}
+
+static void
+cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen,
+		  u32 len, u32 credits, u32 compl)
+{
+	struct fw_ofld_tx_data_wr *req;
+	u32 submode = cxgbit_skcb_submode(skb);
+	u32 wr_ulp_mode = 0;
+	u32 hdr_size = sizeof(*req);
+	u32 opcode = FW_OFLD_TX_DATA_WR;
+	u32 immlen = 0;
+	u32 force = TX_FORCE_V(!submode);
+
+	if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) {
+		opcode = FW_ISCSI_TX_DATA_WR;
+		immlen += sizeof(struct cpl_tx_data_iso);
+		hdr_size += sizeof(struct cpl_tx_data_iso);
+		submode |= 8;
+	}
+
+	if (cxgbit_is_ofld_imm(skb))
+		immlen += dlen;
+
+	req = (struct fw_ofld_tx_data_wr *)__skb_push(skb,
+							hdr_size);
+	req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) |
+					FW_WR_COMPL_V(compl) |
+					FW_WR_IMMDLEN_V(immlen));
+	req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
+					FW_WR_LEN16_V(credits));
+	req->plen = htonl(len);
+	wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) |
+				FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
+
+	req->tunnel_to_proxy = htonl((wr_ulp_mode) | force |
+		 FW_OFLD_TX_DATA_WR_SHOVE_V(skb_peek(&csk->txq) ? 0 : 1));
+}
+
+static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb)
+{
+	kfree_skb(skb);
+}
+
+void cxgbit_push_tx_frames(struct cxgbit_sock *csk)
+{
+	struct sk_buff *skb;
+
+	while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) {
+		u32 dlen = skb->len;
+		u32 len = skb->len;
+		u32 credits_needed;
+		u32 compl = 0;
+		u32 flowclen16 = 0;
+		u32 iso_cpl_len = 0;
+
+		if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)
+			iso_cpl_len = sizeof(struct cpl_tx_data_iso);
+
+		if (cxgbit_is_ofld_imm(skb))
+			credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16);
+		else
+			credits_needed = DIV_ROUND_UP((8 *
+					cxgbit_calc_tx_flits_ofld(skb)) +
+					iso_cpl_len, 16);
+
+		if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
+			credits_needed += DIV_ROUND_UP(
+				sizeof(struct fw_ofld_tx_data_wr), 16);
+		/*
+		 * Assumes the initial credits is large enough to support
+		 * fw_flowc_wr plus largest possible first payload
+		 */
+
+		if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) {
+			flowclen16 = cxgbit_send_tx_flowc_wr(csk);
+			csk->wr_cred -= flowclen16;
+			csk->wr_una_cred += flowclen16;
+		}
+
+		if (csk->wr_cred < credits_needed) {
+			pr_debug("csk 0x%p, skb %u/%u, wr %d < %u.\n",
+				 csk, skb->len, skb->data_len,
+				 credits_needed, csk->wr_cred);
+			break;
+		}
+		__skb_unlink(skb, &csk->txq);
+		set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+		skb->csum = credits_needed + flowclen16;
+		csk->wr_cred -= credits_needed;
+		csk->wr_una_cred += credits_needed;
+
+		pr_debug("csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
+			 csk, skb->len, skb->data_len, credits_needed,
+			 csk->wr_cred, csk->wr_una_cred);
+
+		if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) {
+			len += cxgbit_skcb_tx_extralen(skb);
+
+			if ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
+			    (!before(csk->write_seq,
+				     csk->snd_una + csk->snd_win))) {
+				compl = 1;
+				csk->wr_una_cred = 0;
+			}
+
+			cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed,
+					  compl);
+			csk->snd_nxt += len;
+
+		} else if ((cxgbit_skcb_flags(skb) & SKCBF_TX_FLAG_COMPL) ||
+			   (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
+			struct cpl_close_con_req *req =
+				(struct cpl_close_con_req *)skb->data;
+			req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
+			csk->wr_una_cred = 0;
+		}
+
+		cxgbit_sock_enqueue_wr(csk, skb);
+		t4_set_arp_err_handler(skb, csk,
+				       cxgbit_arp_failure_skb_discard);
+
+		pr_debug("csk 0x%p,%u, skb 0x%p, %u.\n",
+			 csk, csk->tid, skb, len);
+
+		cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
+	}
+}
+
+static bool cxgbit_lock_sock(struct cxgbit_sock *csk)
+{
+	spin_lock_bh(&csk->lock);
+
+	if (before(csk->write_seq, csk->snd_una + csk->snd_win))
+		csk->lock_owner = true;
+
+	spin_unlock_bh(&csk->lock);
+
+	return csk->lock_owner;
+}
+
+static void cxgbit_unlock_sock(struct cxgbit_sock *csk)
+{
+	struct sk_buff_head backlogq;
+	struct sk_buff *skb;
+	void (*fn)(struct cxgbit_sock *, struct sk_buff *);
+
+	skb_queue_head_init(&backlogq);
+
+	spin_lock_bh(&csk->lock);
+	while (skb_queue_len(&csk->backlogq)) {
+		skb_queue_splice_init(&csk->backlogq, &backlogq);
+		spin_unlock_bh(&csk->lock);
+
+		while ((skb = __skb_dequeue(&backlogq))) {
+			fn = cxgbit_skcb_rx_backlog_fn(skb);
+			fn(csk, skb);
+		}
+
+		spin_lock_bh(&csk->lock);
+	}
+
+	csk->lock_owner = false;
+	spin_unlock_bh(&csk->lock);
+}
+
+static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+	int ret = 0;
+
+	wait_event_interruptible(csk->ack_waitq, cxgbit_lock_sock(csk));
+
+	if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) ||
+		     signal_pending(current))) {
+		__kfree_skb(skb);
+		__skb_queue_purge(&csk->ppodq);
+		ret = -1;
+		spin_lock_bh(&csk->lock);
+		if (csk->lock_owner) {
+			spin_unlock_bh(&csk->lock);
+			goto unlock;
+		}
+		spin_unlock_bh(&csk->lock);
+		return ret;
+	}
+
+	csk->write_seq += skb->len +
+			  cxgbit_skcb_tx_extralen(skb);
+
+	skb_queue_splice_tail_init(&csk->ppodq, &csk->txq);
+	__skb_queue_tail(&csk->txq, skb);
+	cxgbit_push_tx_frames(csk);
+
+unlock:
+	cxgbit_unlock_sock(csk);
+	return ret;
+}
+
+static int
+cxgbit_map_skb(struct iscsi_cmd *cmd, struct sk_buff *skb, u32 data_offset,
+	       u32 data_length)
+{
+	u32 i = 0, nr_frags = MAX_SKB_FRAGS;
+	u32 padding = ((-data_length) & 3);
+	struct scatterlist *sg;
+	struct page *page;
+	unsigned int page_off;
+
+	if (padding)
+		nr_frags--;
+
+	/*
+	 * We know each entry in t_data_sg contains a page.
+	 */
+	sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
+	page_off = (data_offset % PAGE_SIZE);
+
+	while (data_length && (i < nr_frags)) {
+		u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+
+		page = sg_page(sg);
+
+		get_page(page);
+		skb_fill_page_desc(skb, i, page, sg->offset + page_off,
+				   cur_len);
+		skb->data_len += cur_len;
+		skb->len += cur_len;
+		skb->truesize += cur_len;
+
+		data_length -= cur_len;
+		page_off = 0;
+		sg = sg_next(sg);
+		i++;
+	}
+
+	if (data_length)
+		return -1;
+
+	if (padding) {
+		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+		if (!page)
+			return -1;
+		skb_fill_page_desc(skb, i, page, 0, padding);
+		skb->data_len += padding;
+		skb->len += padding;
+		skb->truesize += padding;
+	}
+
+	return 0;
+}
+
+static int
+cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsi_cmd *cmd,
+		     struct iscsi_datain_req *dr)
+{
+	struct iscsi_conn *conn = csk->conn;
+	struct sk_buff *skb;
+	struct iscsi_datain datain;
+	struct cxgbit_iso_info iso_info;
+	u32 data_length = cmd->se_cmd.data_length;
+	u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
+	u32 num_pdu, plen, tx_data = 0;
+	bool task_sense = !!(cmd->se_cmd.se_cmd_flags &
+		SCF_TRANSPORT_TASK_SENSE);
+	bool set_statsn = false;
+	int ret = -1;
+
+	while (data_length) {
+		num_pdu = (data_length + mrdsl - 1) / mrdsl;
+		if (num_pdu > csk->max_iso_npdu)
+			num_pdu = csk->max_iso_npdu;
+
+		plen = num_pdu * mrdsl;
+		if (plen > data_length)
+			plen = data_length;
+
+		skb = __cxgbit_alloc_skb(csk, 0, true);
+		if (unlikely(!skb))
+			return -ENOMEM;
+
+		memset(skb->data, 0, ISCSI_HDR_LEN);
+		cxgbit_skcb_flags(skb) |= SKCBF_TX_ISO;
+		cxgbit_skcb_submode(skb) |= (csk->submode &
+				CXGBIT_SUBMODE_DCRC);
+		cxgbit_skcb_tx_extralen(skb) = (num_pdu *
+				cxgbit_digest_len[cxgbit_skcb_submode(skb)]) +
+						((num_pdu - 1) * ISCSI_HDR_LEN);
+
+		memset(&datain, 0, sizeof(struct iscsi_datain));
+		memset(&iso_info, 0, sizeof(iso_info));
+
+		if (!tx_data)
+			iso_info.flags |= CXGBIT_ISO_FSLICE;
+
+		if (!(data_length - plen)) {
+			iso_info.flags |= CXGBIT_ISO_LSLICE;
+			if (!task_sense) {
+				datain.flags = ISCSI_FLAG_DATA_STATUS;
+				iscsit_increment_maxcmdsn(cmd, conn->sess);
+				cmd->stat_sn = conn->stat_sn++;
+				set_statsn = true;
+			}
+		}
+
+		iso_info.burst_len = num_pdu * mrdsl;
+		iso_info.mpdu = mrdsl;
+		iso_info.len = ISCSI_HDR_LEN + plen;
+
+		cxgbit_cpl_tx_data_iso(skb, &iso_info);
+
+		datain.offset = tx_data;
+		datain.data_sn = cmd->data_sn - 1;
+
+		iscsit_build_datain_pdu(cmd, conn, &datain,
+					(struct iscsi_data_rsp *)skb->data,
+					set_statsn);
+
+		ret = cxgbit_map_skb(cmd, skb, tx_data, plen);
+		if (unlikely(ret)) {
+			__kfree_skb(skb);
+			goto out;
+		}
+
+		ret = cxgbit_queue_skb(csk, skb);
+		if (unlikely(ret))
+			goto out;
+
+		tx_data += plen;
+		data_length -= plen;
+
+		cmd->read_data_done += plen;
+		cmd->data_sn += num_pdu;
+	}
+
+	dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static int
+cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsi_cmd *cmd,
+		 const struct iscsi_datain *datain)
+{
+	struct sk_buff *skb;
+	int ret = 0;
+
+	skb = cxgbit_alloc_skb(csk, 0);
+	if (unlikely(!skb))
+		return -ENOMEM;
+
+	memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
+
+	if (datain->length) {
+		cxgbit_skcb_submode(skb) |= (csk->submode &
+				CXGBIT_SUBMODE_DCRC);
+		cxgbit_skcb_tx_extralen(skb) =
+				cxgbit_digest_len[cxgbit_skcb_submode(skb)];
+	}
+
+	ret = cxgbit_map_skb(cmd, skb, datain->offset, datain->length);
+	if (ret < 0) {
+		__kfree_skb(skb);
+		return ret;
+	}
+
+	return cxgbit_queue_skb(csk, skb);
+}
+
+static int
+cxgbit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+		       struct iscsi_datain_req *dr,
+		       const struct iscsi_datain *datain)
+{
+	struct cxgbit_sock *csk = conn->context;
+	u32 data_length = cmd->se_cmd.data_length;
+	u32 padding = ((-data_length) & 3);
+	u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
+
+	if ((data_length > mrdsl) && (!dr->recovery) &&
+	    (!padding) && (!datain->offset) && csk->max_iso_npdu) {
+		atomic_long_add(data_length - datain->length,
+				&conn->sess->tx_data_octets);
+		return cxgbit_tx_datain_iso(csk, cmd, dr);
+	}
+
+	return cxgbit_tx_datain(csk, cmd, datain);
+}
+
+static int
+cxgbit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			  const void *data_buf, u32 data_buf_len)
+{
+	struct cxgbit_sock *csk = conn->context;
+	struct sk_buff *skb;
+	u32 padding = ((-data_buf_len) & 3);
+
+	skb = cxgbit_alloc_skb(csk, data_buf_len + padding);
+	if (unlikely(!skb))
+		return -ENOMEM;
+
+	memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
+
+	if (data_buf_len) {
+		u32 pad_bytes = 0;
+
+		skb_store_bits(skb, ISCSI_HDR_LEN, data_buf, data_buf_len);
+
+		if (padding)
+			skb_store_bits(skb, ISCSI_HDR_LEN + data_buf_len,
+				       &pad_bytes, padding);
+	}
+
+	cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[
+				       cxgbit_skcb_submode(skb)];
+
+	return cxgbit_queue_skb(csk, skb);
+}
+
+int
+cxgbit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+		struct iscsi_datain_req *dr, const void *buf, u32 buf_len)
+{
+	if (dr)
+		return cxgbit_xmit_datain_pdu(conn, cmd, dr, buf);
+	else
+		return cxgbit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
+}
+
+int cxgbit_validate_params(struct iscsi_conn *conn)
+{
+	struct cxgbit_sock *csk = conn->context;
+	struct cxgbit_device *cdev = csk->com.cdev;
+	struct iscsi_param *param;
+	u32 max_xmitdsl;
+
+	param = iscsi_find_param_from_key(MAXXMITDATASEGMENTLENGTH,
+					  conn->param_list);
+	if (!param)
+		return -1;
+
+	if (kstrtou32(param->value, 0, &max_xmitdsl) < 0)
+		return -1;
+
+	if (max_xmitdsl > cdev->mdsl) {
+		if (iscsi_change_param_sprintf(
+			conn, "MaxXmitDataSegmentLength=%u", cdev->mdsl))
+			return -1;
+	}
+
+	return 0;
+}
+
+static int cxgbit_set_digest(struct cxgbit_sock *csk)
+{
+	struct iscsi_conn *conn = csk->conn;
+	struct iscsi_param *param;
+
+	param = iscsi_find_param_from_key(HEADERDIGEST, conn->param_list);
+	if (!param) {
+		pr_err("param not found key %s\n", HEADERDIGEST);
+		return -1;
+	}
+
+	if (!strcmp(param->value, CRC32C))
+		csk->submode |= CXGBIT_SUBMODE_HCRC;
+
+	param = iscsi_find_param_from_key(DATADIGEST, conn->param_list);
+	if (!param) {
+		csk->submode = 0;
+		pr_err("param not found key %s\n", DATADIGEST);
+		return -1;
+	}
+
+	if (!strcmp(param->value, CRC32C))
+		csk->submode |= CXGBIT_SUBMODE_DCRC;
+
+	if (cxgbit_setup_conn_digest(csk)) {
+		csk->submode = 0;
+		return -1;
+	}
+
+	return 0;
+}
+
+static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
+{
+	struct iscsi_conn *conn = csk->conn;
+	struct iscsi_conn_ops *conn_ops = conn->conn_ops;
+	struct iscsi_param *param;
+	u32 mrdsl, mbl;
+	u32 max_npdu, max_iso_npdu;
+
+	if (conn->login->leading_connection) {
+		param = iscsi_find_param_from_key(DATASEQUENCEINORDER,
+						  conn->param_list);
+		if (!param) {
+			pr_err("param not found key %s\n", DATASEQUENCEINORDER);
+			return -1;
+		}
+
+		if (strcmp(param->value, YES))
+			return 0;
+
+		param = iscsi_find_param_from_key(DATAPDUINORDER,
+						  conn->param_list);
+		if (!param) {
+			pr_err("param not found key %s\n", DATAPDUINORDER);
+			return -1;
+		}
+
+		if (strcmp(param->value, YES))
+			return 0;
+
+		param = iscsi_find_param_from_key(MAXBURSTLENGTH,
+						  conn->param_list);
+		if (!param) {
+			pr_err("param not found key %s\n", MAXBURSTLENGTH);
+			return -1;
+		}
+
+		if (kstrtou32(param->value, 0, &mbl) < 0)
+			return -1;
+	} else {
+		if (!conn->sess->sess_ops->DataSequenceInOrder)
+			return 0;
+		if (!conn->sess->sess_ops->DataPDUInOrder)
+			return 0;
+
+		mbl = conn->sess->sess_ops->MaxBurstLength;
+	}
+
+	mrdsl = conn_ops->MaxRecvDataSegmentLength;
+	max_npdu = mbl / mrdsl;
+
+	max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD /
+			(ISCSI_HDR_LEN + mrdsl +
+			cxgbit_digest_len[csk->submode]);
+
+	csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
+
+	if (csk->max_iso_npdu <= 1)
+		csk->max_iso_npdu = 0;
+
+	return 0;
+}
+
+static int cxgbit_set_params(struct iscsi_conn *conn)
+{
+	struct cxgbit_sock *csk = conn->context;
+	struct cxgbit_device *cdev = csk->com.cdev;
+	struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm;
+	struct iscsi_conn_ops *conn_ops = conn->conn_ops;
+	struct iscsi_param *param;
+	u8 erl;
+
+	if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
+		conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
+
+	if (conn->login->leading_connection) {
+		param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
+						  conn->param_list);
+		if (!param) {
+			pr_err("param not found key %s\n", ERRORRECOVERYLEVEL);
+			return -1;
+		}
+		if (kstrtou8(param->value, 0, &erl) < 0)
+			return -1;
+	} else {
+		erl = conn->sess->sess_ops->ErrorRecoveryLevel;
+	}
+
+	if (!erl) {
+		if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
+			if (cxgbit_set_iso_npdu(csk))
+				return -1;
+		}
+
+		if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) {
+			if (cxgbit_setup_conn_pgidx(csk,
+						    ppm->tformat.pgsz_idx_dflt))
+				return -1;
+			set_bit(CSK_DDP_ENABLE, &csk->com.flags);
+		}
+	}
+
+	if (cxgbit_set_digest(csk))
+		return -1;
+
+	return 0;
+}
+
+int
+cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+		    u32 length)
+{
+	struct cxgbit_sock *csk = conn->context;
+	struct sk_buff *skb;
+	u32 padding_buf = 0;
+	u8 padding = ((-length) & 3);
+
+	skb = cxgbit_alloc_skb(csk, length + padding);
+	if (!skb)
+		return -ENOMEM;
+	skb_store_bits(skb, 0, login->rsp, ISCSI_HDR_LEN);
+	skb_store_bits(skb, ISCSI_HDR_LEN, login->rsp_buf, length);
+
+	if (padding)
+		skb_store_bits(skb, ISCSI_HDR_LEN + length,
+			       &padding_buf, padding);
+
+	if (login->login_complete) {
+		if (cxgbit_set_params(conn)) {
+			kfree_skb(skb);
+			return -1;
+		}
+
+		set_bit(CSK_LOGIN_DONE, &csk->com.flags);
+	}
+
+	if (cxgbit_queue_skb(csk, skb))
+		return -1;
+
+	if ((!login->login_complete) && (!login->login_failed))
+		schedule_delayed_work(&conn->login_work, 0);
+
+	return 0;
+}
+
+static void
+cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
+		      unsigned int nents)
+{
+	struct skb_seq_state st;
+	const u8 *buf;
+	unsigned int consumed = 0, buf_len;
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(skb);
+
+	skb_prepare_seq_read(skb, pdu_cb->doffset,
+			     pdu_cb->doffset + pdu_cb->dlen,
+			     &st);
+
+	while (true) {
+		buf_len = skb_seq_read(consumed, &buf, &st);
+		if (!buf_len) {
+			skb_abort_seq_read(&st);
+			break;
+		}
+
+		consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
+						 buf_len, consumed);
+	}
+}
+
+static struct iscsi_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk)
+{
+	struct iscsi_conn *conn = csk->conn;
+	struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev);
+	struct cxgbit_cmd *ccmd;
+	struct iscsi_cmd *cmd;
+
+	cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+	if (!cmd) {
+		pr_err("Unable to allocate iscsi_cmd + cxgbit_cmd\n");
+		return NULL;
+	}
+
+	ccmd = iscsit_priv_cmd(cmd);
+	ccmd->ttinfo.tag = ppm->tformat.no_ddp_mask;
+	ccmd->setup_ddp = true;
+
+	return cmd;
+}
+
+static int
+cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
+			     u32 length)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	struct cxgbit_sock *csk = conn->context;
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+
+	if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+		pr_err("ImmediateData CRC32C DataDigest error\n");
+		if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+			pr_err("Unable to recover from"
+			       " Immediate Data digest failure while"
+			       " in ERL=0.\n");
+			iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
+					  (unsigned char *)hdr);
+			return IMMEDIATE_DATA_CANNOT_RECOVER;
+		}
+
+		iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
+				  (unsigned char *)hdr);
+		return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
+	}
+
+	if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
+		struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+		struct skb_shared_info *ssi = skb_shinfo(csk->skb);
+		skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx];
+
+		sg_init_table(&ccmd->sg, 1);
+		sg_set_page(&ccmd->sg, dfrag->page.p, skb_frag_size(dfrag),
+			    dfrag->page_offset);
+		get_page(dfrag->page.p);
+
+		cmd->se_cmd.t_data_sg = &ccmd->sg;
+		cmd->se_cmd.t_data_nents = 1;
+
+		ccmd->release = true;
+	} else {
+		struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
+		u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
+
+		cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents);
+	}
+
+	cmd->write_data_done += pdu_cb->dlen;
+
+	if (cmd->write_data_done == cmd->se_cmd.data_length) {
+		spin_lock_bh(&cmd->istate_lock);
+		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+		spin_unlock_bh(&cmd->istate_lock);
+	}
+
+	return IMMEDIATE_DATA_NORMAL_OPERATION;
+}
+
+static int
+cxgbit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
+			  bool dump_payload)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+	/*
+	 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
+	 */
+	if (dump_payload)
+		goto after_immediate_data;
+
+	immed_ret = cxgbit_handle_immediate_data(cmd, hdr,
+						 cmd->first_burst_len);
+after_immediate_data:
+	if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
+		/*
+		 * A PDU/CmdSN carrying Immediate Data passed
+		 * DataCRC, check against ExpCmdSN/MaxCmdSN if
+		 * Immediate Bit is not set.
+		 */
+		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+						(unsigned char *)hdr,
+						hdr->cmdsn);
+		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+			return -1;
+
+		if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+			target_put_sess_cmd(&cmd->se_cmd);
+			return 0;
+		} else if (cmd->unsolicited_data) {
+			iscsit_set_unsoliticed_dataout(cmd);
+		}
+
+	} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
+		/*
+		 * Immediate Data failed DataCRC and ERL>=1,
+		 * silently drop this PDU and let the initiator
+		 * plug the CmdSN gap.
+		 *
+		 * FIXME: Send Unsolicited NOPIN with reserved
+		 * TTT here to help the initiator figure out
+		 * the missing CmdSN, although they should be
+		 * intelligent enough to determine the missing
+		 * CmdSN and issue a retry to plug the sequence.
+		 */
+		cmd->i_state = ISTATE_REMOVE;
+		iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+	} else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
+		return -1;
+
+	return 0;
+}
+
+static int
+cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
+{
+	struct iscsi_conn *conn = csk->conn;
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)pdu_cb->hdr;
+	int rc;
+	bool dump_payload = false;
+
+	rc = iscsit_setup_scsi_cmd(conn, cmd, (unsigned char *)hdr);
+	if (rc < 0)
+		return rc;
+
+	if (pdu_cb->dlen && (pdu_cb->dlen == cmd->se_cmd.data_length) &&
+	    (pdu_cb->nr_dfrags == 1))
+		cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+
+	rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
+	if (rc < 0)
+		return 0;
+	else if (rc > 0)
+		dump_payload = true;
+
+	if (!pdu_cb->dlen)
+		return 0;
+
+	return cxgbit_get_immediate_data(cmd, hdr, dump_payload);
+}
+
+static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
+{
+	struct scatterlist *sg_start;
+	struct iscsi_conn *conn = csk->conn;
+	struct iscsi_cmd *cmd = NULL;
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+	struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr;
+	u32 data_offset = be32_to_cpu(hdr->offset);
+	u32 data_len = pdu_cb->dlen;
+	int rc, sg_nents, sg_off;
+	bool dcrc_err = false;
+
+	rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd);
+	if (rc < 0)
+		return rc;
+	else if (!cmd)
+		return 0;
+
+	if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+		pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
+		       " DataSN: 0x%08x\n",
+		       hdr->itt, hdr->offset, data_len,
+		       hdr->datasn);
+
+		dcrc_err = true;
+		goto check_payload;
+	}
+
+	pr_debug("DataOut data_len: %u, "
+		"write_data_done: %u, data_length: %u\n",
+		  data_len,  cmd->write_data_done,
+		  cmd->se_cmd.data_length);
+
+	if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
+		sg_off = data_offset / PAGE_SIZE;
+		sg_start = &cmd->se_cmd.t_data_sg[sg_off];
+		sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE));
+
+		cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents);
+	}
+
+check_payload:
+
+	rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
+{
+	struct iscsi_conn *conn = csk->conn;
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+	struct iscsi_nopout *hdr = (struct iscsi_nopout *)pdu_cb->hdr;
+	unsigned char *ping_data = NULL;
+	u32 payload_length = pdu_cb->dlen;
+	int ret;
+
+	ret = iscsit_setup_nop_out(conn, cmd, hdr);
+	if (ret < 0)
+		return 0;
+
+	if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+		if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+			pr_err("Unable to recover from"
+			       " NOPOUT Ping DataCRC failure while in"
+			       " ERL=0.\n");
+			ret = -1;
+			goto out;
+		} else {
+			/*
+			 * drop this PDU and let the
+			 * initiator plug the CmdSN gap.
+			 */
+			pr_info("Dropping NOPOUT"
+				" Command CmdSN: 0x%08x due to"
+				" DataCRC error.\n", hdr->cmdsn);
+			ret = 0;
+			goto out;
+		}
+	}
+
+	/*
+	 * Handle NOP-OUT payload for traditional iSCSI sockets
+	 */
+	if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
+		ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
+		if (!ping_data) {
+			pr_err("Unable to allocate memory for"
+				" NOPOUT ping data.\n");
+			ret = -1;
+			goto out;
+		}
+
+		skb_copy_bits(csk->skb, pdu_cb->doffset,
+			      ping_data, payload_length);
+
+		ping_data[payload_length] = '\0';
+		/*
+		 * Attach ping data to struct iscsi_cmd->buf_ptr.
+		 */
+		cmd->buf_ptr = ping_data;
+		cmd->buf_ptr_size = payload_length;
+
+		pr_debug("Got %u bytes of NOPOUT ping"
+			" data.\n", payload_length);
+		pr_debug("Ping Data: \"%s\"\n", ping_data);
+	}
+
+	return iscsit_process_nop_out(conn, cmd, hdr);
+out:
+	if (cmd)
+		iscsit_free_cmd(cmd, false);
+	return ret;
+}
+
+static int
+cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
+{
+	struct iscsi_conn *conn = csk->conn;
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+	struct iscsi_text *hdr = (struct iscsi_text *)pdu_cb->hdr;
+	u32 payload_length = pdu_cb->dlen;
+	int rc;
+	unsigned char *text_in = NULL;
+
+	rc = iscsit_setup_text_cmd(conn, cmd, hdr);
+	if (rc < 0)
+		return rc;
+
+	if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+		if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+			pr_err("Unable to recover from"
+			       " Text Data digest failure while in"
+			       " ERL=0.\n");
+			goto reject;
+		} else {
+			/*
+			 * drop this PDU and let the
+			 * initiator plug the CmdSN gap.
+			 */
+			pr_info("Dropping Text"
+				" Command CmdSN: 0x%08x due to"
+				" DataCRC error.\n", hdr->cmdsn);
+			return 0;
+		}
+	}
+
+	if (payload_length) {
+		text_in = kzalloc(payload_length, GFP_KERNEL);
+		if (!text_in) {
+			pr_err("Unable to allocate text_in of payload_length: %u\n",
+			       payload_length);
+			return -ENOMEM;
+		}
+		skb_copy_bits(csk->skb, pdu_cb->doffset,
+			      text_in, payload_length);
+
+		text_in[payload_length - 1] = '\0';
+
+		cmd->text_in_ptr = text_in;
+	}
+
+	return iscsit_process_text_cmd(conn, cmd, hdr);
+
+reject:
+	return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+				 pdu_cb->hdr);
+}
+
+static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk)
+{
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+	struct iscsi_hdr *hdr = (struct iscsi_hdr *)pdu_cb->hdr;
+	struct iscsi_conn *conn = csk->conn;
+	struct iscsi_cmd *cmd = NULL;
+	u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
+	int ret = -EINVAL;
+
+	switch (opcode) {
+	case ISCSI_OP_SCSI_CMD:
+		cmd = cxgbit_allocate_cmd(csk);
+		if (!cmd)
+			goto reject;
+
+		ret = cxgbit_handle_scsi_cmd(csk, cmd);
+		break;
+	case ISCSI_OP_SCSI_DATA_OUT:
+		ret = cxgbit_handle_iscsi_dataout(csk);
+		break;
+	case ISCSI_OP_NOOP_OUT:
+		if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
+			cmd = cxgbit_allocate_cmd(csk);
+			if (!cmd)
+				goto reject;
+		}
+
+		ret = cxgbit_handle_nop_out(csk, cmd);
+		break;
+	case ISCSI_OP_SCSI_TMFUNC:
+		cmd = cxgbit_allocate_cmd(csk);
+		if (!cmd)
+			goto reject;
+
+		ret = iscsit_handle_task_mgt_cmd(conn, cmd,
+						 (unsigned char *)hdr);
+		break;
+	case ISCSI_OP_TEXT:
+		if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
+			cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
+			if (!cmd)
+				goto reject;
+		} else {
+			cmd = cxgbit_allocate_cmd(csk);
+			if (!cmd)
+				goto reject;
+		}
+
+		ret = cxgbit_handle_text_cmd(csk, cmd);
+		break;
+	case ISCSI_OP_LOGOUT:
+		cmd = cxgbit_allocate_cmd(csk);
+		if (!cmd)
+			goto reject;
+
+		ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
+		if (ret > 0)
+			wait_for_completion_timeout(&conn->conn_logout_comp,
+						    SECONDS_FOR_LOGOUT_COMP
+						    * HZ);
+		break;
+	case ISCSI_OP_SNACK:
+		ret = iscsit_handle_snack(conn, (unsigned char *)hdr);
+		break;
+	default:
+		pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
+		dump_stack();
+		break;
+	}
+
+	return ret;
+
+reject:
+	return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+				 (unsigned char *)hdr);
+	return ret;
+}
+
+static int cxgbit_rx_opcode(struct cxgbit_sock *csk)
+{
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+	struct iscsi_conn *conn = csk->conn;
+	struct iscsi_hdr *hdr = pdu_cb->hdr;
+	u8 opcode;
+
+	if (pdu_cb->flags & PDUCBF_RX_HCRC_ERR) {
+		atomic_long_inc(&conn->sess->conn_digest_errors);
+		goto transport_err;
+	}
+
+	if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
+		goto transport_err;
+
+	opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+	if (conn->sess->sess_ops->SessionType &&
+	    ((!(opcode & ISCSI_OP_TEXT)) ||
+	     (!(opcode & ISCSI_OP_LOGOUT)))) {
+		pr_err("Received illegal iSCSI Opcode: 0x%02x"
+			" while in Discovery Session, rejecting.\n", opcode);
+		iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+				  (unsigned char *)hdr);
+		goto transport_err;
+	}
+
+	if (cxgbit_target_rx_opcode(csk) < 0)
+		goto transport_err;
+
+	return 0;
+
+transport_err:
+	return -1;
+}
+
+static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk)
+{
+	struct iscsi_conn *conn = csk->conn;
+	struct iscsi_login *login = conn->login;
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+	struct iscsi_login_req *login_req;
+
+	login_req = (struct iscsi_login_req *)login->req;
+	memcpy(login_req, pdu_cb->hdr, sizeof(*login_req));
+
+	pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
+		" CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
+		login_req->flags, login_req->itt, login_req->cmdsn,
+		login_req->exp_statsn, login_req->cid, pdu_cb->dlen);
+	/*
+	 * Setup the initial iscsi_login values from the leading
+	 * login request PDU.
+	 */
+	if (login->first_request) {
+		login_req = (struct iscsi_login_req *)login->req;
+		login->leading_connection = (!login_req->tsih) ? 1 : 0;
+		login->current_stage	= ISCSI_LOGIN_CURRENT_STAGE(
+				login_req->flags);
+		login->version_min	= login_req->min_version;
+		login->version_max	= login_req->max_version;
+		memcpy(login->isid, login_req->isid, 6);
+		login->cmd_sn		= be32_to_cpu(login_req->cmdsn);
+		login->init_task_tag	= login_req->itt;
+		login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
+		login->cid		= be16_to_cpu(login_req->cid);
+		login->tsih		= be16_to_cpu(login_req->tsih);
+	}
+
+	if (iscsi_target_check_login_request(conn, login) < 0)
+		return -1;
+
+	memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
+	skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen);
+
+	return 0;
+}
+
+static int
+cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx)
+{
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, idx);
+	int ret;
+
+	cxgbit_rx_pdu_cb(skb) = pdu_cb;
+
+	csk->skb = skb;
+
+	if (!test_bit(CSK_LOGIN_DONE, &csk->com.flags)) {
+		ret = cxgbit_rx_login_pdu(csk);
+		set_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
+	} else {
+		ret = cxgbit_rx_opcode(csk);
+	}
+
+	return ret;
+}
+
+static void cxgbit_lro_skb_dump(struct sk_buff *skb)
+{
+	struct skb_shared_info *ssi = skb_shinfo(skb);
+	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
+	u8 i;
+
+	pr_info("skb 0x%p, head 0x%p, 0x%p, len %u,%u, frags %u.\n",
+		skb, skb->head, skb->data, skb->len, skb->data_len,
+		ssi->nr_frags);
+	pr_info("skb 0x%p, lro_cb, csk 0x%p, pdu %u, %u.\n",
+		skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen);
+
+	for (i = 0; i < lro_cb->pdu_idx; i++, pdu_cb++)
+		pr_info("skb 0x%p, pdu %d, %u, f 0x%x, seq 0x%x, dcrc 0x%x, "
+			"frags %u.\n",
+			skb, i, pdu_cb->pdulen, pdu_cb->flags, pdu_cb->seq,
+			pdu_cb->ddigest, pdu_cb->frags);
+	for (i = 0; i < ssi->nr_frags; i++)
+		pr_info("skb 0x%p, frag %d, off %u, sz %u.\n",
+			skb, i, ssi->frags[i].page_offset, ssi->frags[i].size);
+}
+
+static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk)
+{
+	struct sk_buff *skb = csk->lro_hskb;
+	struct skb_shared_info *ssi = skb_shinfo(skb);
+	u8 i;
+
+	memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
+	for (i = 0; i < ssi->nr_frags; i++)
+		put_page(skb_frag_page(&ssi->frags[i]));
+	ssi->nr_frags = 0;
+}
+
+static void
+cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
+{
+	struct sk_buff *hskb = csk->lro_hskb;
+	struct cxgbit_lro_pdu_cb *hpdu_cb = cxgbit_skb_lro_pdu_cb(hskb, 0);
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, pdu_idx);
+	struct skb_shared_info *hssi = skb_shinfo(hskb);
+	struct skb_shared_info *ssi = skb_shinfo(skb);
+	unsigned int len = 0;
+
+	if (pdu_cb->flags & PDUCBF_RX_HDR) {
+		hpdu_cb->flags = pdu_cb->flags;
+		hpdu_cb->seq = pdu_cb->seq;
+		hpdu_cb->hdr = pdu_cb->hdr;
+		hpdu_cb->hlen = pdu_cb->hlen;
+
+		memcpy(&hssi->frags[0], &ssi->frags[pdu_cb->hfrag_idx],
+		       sizeof(skb_frag_t));
+
+		get_page(skb_frag_page(&hssi->frags[0]));
+		hssi->nr_frags = 1;
+		hpdu_cb->frags = 1;
+		hpdu_cb->hfrag_idx = 0;
+
+		len = hssi->frags[0].size;
+		hskb->len = len;
+		hskb->data_len = len;
+		hskb->truesize = len;
+	}
+
+	if (pdu_cb->flags & PDUCBF_RX_DATA) {
+		u8 hfrag_idx = 1, i;
+
+		hpdu_cb->flags |= pdu_cb->flags;
+
+		len = 0;
+		for (i = 0; i < pdu_cb->nr_dfrags; hfrag_idx++, i++) {
+			memcpy(&hssi->frags[hfrag_idx],
+			       &ssi->frags[pdu_cb->dfrag_idx + i],
+			       sizeof(skb_frag_t));
+
+			get_page(skb_frag_page(&hssi->frags[hfrag_idx]));
+
+			len += hssi->frags[hfrag_idx].size;
+
+			hssi->nr_frags++;
+			hpdu_cb->frags++;
+		}
+
+		hpdu_cb->dlen = pdu_cb->dlen;
+		hpdu_cb->doffset = hpdu_cb->hlen;
+		hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags;
+		hpdu_cb->dfrag_idx = 1;
+		hskb->len += len;
+		hskb->data_len += len;
+		hskb->truesize += len;
+	}
+
+	if (pdu_cb->flags & PDUCBF_RX_STATUS) {
+		hpdu_cb->flags |= pdu_cb->flags;
+
+		if (hpdu_cb->flags & PDUCBF_RX_DATA)
+			hpdu_cb->flags &= ~PDUCBF_RX_DATA_DDPD;
+
+		hpdu_cb->ddigest = pdu_cb->ddigest;
+		hpdu_cb->pdulen = pdu_cb->pdulen;
+	}
+}
+
+static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
+	u8 pdu_idx = 0, last_idx = 0;
+	int ret = 0;
+
+	if (!pdu_cb->complete) {
+		cxgbit_lro_skb_merge(csk, skb, 0);
+
+		if (pdu_cb->flags & PDUCBF_RX_STATUS) {
+			struct sk_buff *hskb = csk->lro_hskb;
+
+			ret = cxgbit_process_iscsi_pdu(csk, hskb, 0);
+
+			cxgbit_lro_hskb_reset(csk);
+
+			if (ret < 0)
+				goto out;
+		}
+
+		pdu_idx = 1;
+	}
+
+	if (lro_cb->pdu_idx)
+		last_idx = lro_cb->pdu_idx - 1;
+
+	for (; pdu_idx <= last_idx; pdu_idx++) {
+		ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx);
+		if (ret < 0)
+			goto out;
+	}
+
+	if ((!lro_cb->complete) && lro_cb->pdu_idx)
+		cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx);
+
+out:
+	return ret;
+}
+
+static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+	struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+	struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
+	int ret = -1;
+
+	if ((pdu_cb->flags & PDUCBF_RX_HDR) &&
+	    (pdu_cb->seq != csk->rcv_nxt)) {
+		pr_info("csk 0x%p, tid 0x%x, seq 0x%x != 0x%x.\n",
+			csk, csk->tid, pdu_cb->seq, csk->rcv_nxt);
+		cxgbit_lro_skb_dump(skb);
+		return ret;
+	}
+
+	csk->rcv_nxt += lro_cb->pdu_totallen;
+
+	ret = cxgbit_process_lro_skb(csk, skb);
+
+	csk->rx_credits += lro_cb->pdu_totallen;
+
+	if (csk->rx_credits >= (csk->rcv_win / 4))
+		cxgbit_rx_data_ack(csk);
+
+	return ret;
+}
+
+static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+	int ret = -1;
+
+	if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO))
+		ret = cxgbit_rx_lro_skb(csk, skb);
+
+	__kfree_skb(skb);
+	return ret;
+}
+
+static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq)
+{
+	spin_lock_bh(&csk->rxq.lock);
+	if (skb_queue_len(&csk->rxq)) {
+		skb_queue_splice_init(&csk->rxq, rxq);
+		spin_unlock_bh(&csk->rxq.lock);
+		return true;
+	}
+	spin_unlock_bh(&csk->rxq.lock);
+	return false;
+}
+
+static int cxgbit_wait_rxq(struct cxgbit_sock *csk)
+{
+	struct sk_buff *skb;
+	struct sk_buff_head rxq;
+
+	skb_queue_head_init(&rxq);
+
+	wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq));
+
+	if (signal_pending(current))
+		goto out;
+
+	while ((skb = __skb_dequeue(&rxq))) {
+		if (cxgbit_rx_skb(csk, skb))
+			goto out;
+	}
+
+	return 0;
+out:
+	__skb_queue_purge(&rxq);
+	return -1;
+}
+
+int cxgbit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+	struct cxgbit_sock *csk = conn->context;
+	int ret = -1;
+
+	while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags)) {
+		ret = cxgbit_wait_rxq(csk);
+		if (ret) {
+			clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+void cxgbit_get_rx_pdu(struct iscsi_conn *conn)
+{
+	struct cxgbit_sock *csk = conn->context;
+
+	while (!kthread_should_stop()) {
+		iscsit_thread_check_cpumask(conn, current, 0);
+		if (cxgbit_wait_rxq(csk))
+			return;
+	}
+}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 961202f..50f3d3a 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -478,16 +478,16 @@
 	return 0;
 }
 
-static int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
-static int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+static void iscsit_get_rx_pdu(struct iscsi_conn *);
 
-static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 {
 	iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
 	return 0;
 }
+EXPORT_SYMBOL(iscsit_queue_rsp);
 
-static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 {
 	bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
 
@@ -498,6 +498,169 @@
 
 	__iscsit_free_cmd(cmd, scsi_cmd, true);
 }
+EXPORT_SYMBOL(iscsit_aborted_task);
+
+static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
+				      u32, u32, u8 *, u8 *);
+static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);
+
+static int
+iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			  const void *data_buf, u32 data_buf_len)
+{
+	struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu;
+	struct kvec *iov;
+	u32 niov = 0, tx_size = ISCSI_HDR_LEN;
+	int ret;
+
+	iov = &cmd->iov_misc[0];
+	iov[niov].iov_base	= cmd->pdu;
+	iov[niov++].iov_len	= ISCSI_HDR_LEN;
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
+					  ISCSI_HDR_LEN, 0, NULL,
+					  (u8 *)header_digest);
+
+		iov[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32C HeaderDigest"
+			 " to opcode 0x%x 0x%08x\n",
+			 hdr->opcode, *header_digest);
+	}
+
+	if (data_buf_len) {
+		u32 padding = ((-data_buf_len) & 3);
+
+		iov[niov].iov_base	= (void *)data_buf;
+		iov[niov++].iov_len	= data_buf_len;
+		tx_size += data_buf_len;
+
+		if (padding != 0) {
+			iov[niov].iov_base = &cmd->pad_bytes;
+			iov[niov++].iov_len = padding;
+			tx_size += padding;
+			pr_debug("Attaching %u additional"
+				 " padding bytes.\n", padding);
+		}
+
+		if (conn->conn_ops->DataDigest) {
+			iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
+						  data_buf, data_buf_len,
+						  padding,
+						  (u8 *)&cmd->pad_bytes,
+						  (u8 *)&cmd->data_crc);
+
+			iov[niov].iov_base = &cmd->data_crc;
+			iov[niov++].iov_len = ISCSI_CRC_LEN;
+			tx_size += ISCSI_CRC_LEN;
+			pr_debug("Attached DataDigest for %u"
+				 " bytes opcode 0x%x, CRC 0x%08x\n",
+				 data_buf_len, hdr->opcode, cmd->data_crc);
+		}
+	}
+
+	cmd->iov_misc_count = niov;
+	cmd->tx_size = tx_size;
+
+	ret = iscsit_send_tx_data(cmd, conn, 1);
+	if (ret < 0) {
+		iscsit_tx_thread_wait_for_tcp(conn);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int iscsit_map_iovec(struct iscsi_cmd *, struct kvec *, u32, u32);
+static void iscsit_unmap_iovec(struct iscsi_cmd *);
+static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsi_cmd *,
+				    u32, u32, u32, u8 *);
+static int
+iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+		       const struct iscsi_datain *datain)
+{
+	struct kvec *iov;
+	u32 iov_count = 0, tx_size = 0;
+	int ret, iov_ret;
+
+	iov = &cmd->iov_data[0];
+	iov[iov_count].iov_base	= cmd->pdu;
+	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+	tx_size += ISCSI_HDR_LEN;
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
+					  ISCSI_HDR_LEN, 0, NULL,
+					  (u8 *)header_digest);
+
+		iov[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+
+		pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n",
+			 *header_digest);
+	}
+
+	iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
+				   datain->offset, datain->length);
+	if (iov_ret < 0)
+		return -1;
+
+	iov_count += iov_ret;
+	tx_size += datain->length;
+
+	cmd->padding = ((-datain->length) & 3);
+	if (cmd->padding) {
+		iov[iov_count].iov_base		= cmd->pad_bytes;
+		iov[iov_count++].iov_len	= cmd->padding;
+		tx_size += cmd->padding;
+
+		pr_debug("Attaching %u padding bytes\n", cmd->padding);
+	}
+
+	if (conn->conn_ops->DataDigest) {
+		cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
+							 cmd, datain->offset,
+							 datain->length,
+							 cmd->padding,
+							 cmd->pad_bytes);
+
+		iov[iov_count].iov_base	= &cmd->data_crc;
+		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+
+		pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n",
+			 datain->length + cmd->padding, cmd->data_crc);
+	}
+
+	cmd->iov_data_count = iov_count;
+	cmd->tx_size = tx_size;
+
+	ret = iscsit_fe_sendpage_sg(cmd, conn);
+
+	iscsit_unmap_iovec(cmd);
+
+	if (ret < 0) {
+		iscsit_tx_thread_wait_for_tcp(conn);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int iscsit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			   struct iscsi_datain_req *dr, const void *buf,
+			   u32 buf_len)
+{
+	if (dr)
+		return iscsit_xmit_datain_pdu(conn, cmd, buf);
+	else
+		return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
+}
 
 static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
 {
@@ -507,6 +670,7 @@
 static struct iscsit_transport iscsi_target_transport = {
 	.name			= "iSCSI/TCP",
 	.transport_type		= ISCSI_TCP,
+	.rdma_shutdown		= false,
 	.owner			= NULL,
 	.iscsit_setup_np	= iscsit_setup_np,
 	.iscsit_accept_np	= iscsit_accept_np,
@@ -519,6 +683,8 @@
 	.iscsit_queue_data_in	= iscsit_queue_rsp,
 	.iscsit_queue_status	= iscsit_queue_rsp,
 	.iscsit_aborted_task	= iscsit_aborted_task,
+	.iscsit_xmit_pdu	= iscsit_xmit_pdu,
+	.iscsit_get_rx_pdu	= iscsit_get_rx_pdu,
 	.iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
 };
 
@@ -634,7 +800,7 @@
 	kfree(iscsit_global);
 }
 
-static int iscsit_add_reject(
+int iscsit_add_reject(
 	struct iscsi_conn *conn,
 	u8 reason,
 	unsigned char *buf)
@@ -664,6 +830,7 @@
 
 	return -1;
 }
+EXPORT_SYMBOL(iscsit_add_reject);
 
 static int iscsit_add_reject_from_cmd(
 	struct iscsi_cmd *cmd,
@@ -719,6 +886,7 @@
 {
 	return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
 }
+EXPORT_SYMBOL(iscsit_reject_cmd);
 
 /*
  * Map some portion of the allocated scatterlist to an iovec, suitable for
@@ -737,7 +905,14 @@
 	/*
 	 * We know each entry in t_data_sg contains a page.
 	 */
-	sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
+	u32 ent = data_offset / PAGE_SIZE;
+
+	if (ent >= cmd->se_cmd.t_data_nents) {
+		pr_err("Initial page entry out-of-bounds\n");
+		return -1;
+	}
+
+	sg = &cmd->se_cmd.t_data_sg[ent];
 	page_off = (data_offset % PAGE_SIZE);
 
 	cmd->first_data_sg = sg;
@@ -2335,7 +2510,7 @@
 }
 EXPORT_SYMBOL(iscsit_handle_logout_cmd);
 
-static int iscsit_handle_snack(
+int iscsit_handle_snack(
 	struct iscsi_conn *conn,
 	unsigned char *buf)
 {
@@ -2388,6 +2563,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(iscsit_handle_snack);
 
 static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
 {
@@ -2534,7 +2710,6 @@
 {
 	struct iscsi_async *hdr;
 
-	cmd->tx_size = ISCSI_HDR_LEN;
 	cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
 
 	hdr			= (struct iscsi_async *) cmd->pdu;
@@ -2552,25 +2727,11 @@
 	hdr->param2		= cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
 	hdr->param3		= cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
 
-	if (conn->conn_ops->HeaderDigest) {
-		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
-		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
-				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
-		cmd->tx_size += ISCSI_CRC_LEN;
-		pr_debug("Attaching CRC32C HeaderDigest to"
-			" Async Message 0x%08x\n", *header_digest);
-	}
-
-	cmd->iov_misc[0].iov_base	= cmd->pdu;
-	cmd->iov_misc[0].iov_len	= cmd->tx_size;
-	cmd->iov_misc_count		= 1;
-
 	pr_debug("Sending Connection Dropped Async Message StatSN:"
 		" 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
 			cmd->logout_cid, conn->cid);
-	return 0;
+
+	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
 }
 
 static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
@@ -2583,7 +2744,7 @@
 	}
 }
 
-static void
+void
 iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
 			struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
 			bool set_statsn)
@@ -2627,15 +2788,14 @@
 		cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
 		ntohl(hdr->offset), datain->length, conn->cid);
 }
+EXPORT_SYMBOL(iscsit_build_datain_pdu);
 
 static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
 {
 	struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
 	struct iscsi_datain datain;
 	struct iscsi_datain_req *dr;
-	struct kvec *iov;
-	u32 iov_count = 0, tx_size = 0;
-	int eodr = 0, ret, iov_ret;
+	int eodr = 0, ret;
 	bool set_statsn = false;
 
 	memset(&datain, 0, sizeof(struct iscsi_datain));
@@ -2677,64 +2837,9 @@
 
 	iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
 
-	iov = &cmd->iov_data[0];
-	iov[iov_count].iov_base	= cmd->pdu;
-	iov[iov_count++].iov_len	= ISCSI_HDR_LEN;
-	tx_size += ISCSI_HDR_LEN;
-
-	if (conn->conn_ops->HeaderDigest) {
-		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
-		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
-				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
-		iov[0].iov_len += ISCSI_CRC_LEN;
-		tx_size += ISCSI_CRC_LEN;
-
-		pr_debug("Attaching CRC32 HeaderDigest"
-			" for DataIN PDU 0x%08x\n", *header_digest);
-	}
-
-	iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
-				datain.offset, datain.length);
-	if (iov_ret < 0)
-		return -1;
-
-	iov_count += iov_ret;
-	tx_size += datain.length;
-
-	cmd->padding = ((-datain.length) & 3);
-	if (cmd->padding) {
-		iov[iov_count].iov_base		= cmd->pad_bytes;
-		iov[iov_count++].iov_len	= cmd->padding;
-		tx_size += cmd->padding;
-
-		pr_debug("Attaching %u padding bytes\n",
-				cmd->padding);
-	}
-	if (conn->conn_ops->DataDigest) {
-		cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash, cmd,
-			 datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
-
-		iov[iov_count].iov_base	= &cmd->data_crc;
-		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
-		tx_size += ISCSI_CRC_LEN;
-
-		pr_debug("Attached CRC32C DataDigest %d bytes, crc"
-			" 0x%08x\n", datain.length+cmd->padding, cmd->data_crc);
-	}
-
-	cmd->iov_data_count = iov_count;
-	cmd->tx_size = tx_size;
-
-	ret = iscsit_fe_sendpage_sg(cmd, conn);
-
-	iscsit_unmap_iovec(cmd);
-
-	if (ret < 0) {
-		iscsit_tx_thread_wait_for_tcp(conn);
+	ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0);
+	if (ret < 0)
 		return ret;
-	}
 
 	if (dr->dr_complete) {
 		eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
@@ -2843,34 +2948,14 @@
 static int
 iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
 {
-	struct kvec *iov;
-	int niov = 0, tx_size, rc;
+	int rc;
 
 	rc = iscsit_build_logout_rsp(cmd, conn,
 			(struct iscsi_logout_rsp *)&cmd->pdu[0]);
 	if (rc < 0)
 		return rc;
 
-	tx_size = ISCSI_HDR_LEN;
-	iov = &cmd->iov_misc[0];
-	iov[niov].iov_base	= cmd->pdu;
-	iov[niov++].iov_len	= ISCSI_HDR_LEN;
-
-	if (conn->conn_ops->HeaderDigest) {
-		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
-		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, &cmd->pdu[0],
-				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
-		iov[0].iov_len += ISCSI_CRC_LEN;
-		tx_size += ISCSI_CRC_LEN;
-		pr_debug("Attaching CRC32C HeaderDigest to"
-			" Logout Response 0x%08x\n", *header_digest);
-	}
-	cmd->iov_misc_count = niov;
-	cmd->tx_size = tx_size;
-
-	return 0;
+	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
 }
 
 void
@@ -2910,34 +2995,16 @@
 	int want_response)
 {
 	struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
-	int tx_size = ISCSI_HDR_LEN, ret;
+	int ret;
 
 	iscsit_build_nopin_rsp(cmd, conn, hdr, false);
 
-	if (conn->conn_ops->HeaderDigest) {
-		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
-		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
-				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
-		tx_size += ISCSI_CRC_LEN;
-		pr_debug("Attaching CRC32C HeaderDigest to"
-			" NopIN 0x%08x\n", *header_digest);
-	}
-
-	cmd->iov_misc[0].iov_base	= cmd->pdu;
-	cmd->iov_misc[0].iov_len	= tx_size;
-	cmd->iov_misc_count	= 1;
-	cmd->tx_size		= tx_size;
-
 	pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
 		" 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
 
-	ret = iscsit_send_tx_data(cmd, conn, 1);
-	if (ret < 0) {
-		iscsit_tx_thread_wait_for_tcp(conn);
+	ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
+	if (ret < 0)
 		return ret;
-	}
 
 	spin_lock_bh(&cmd->istate_lock);
 	cmd->i_state = want_response ?
@@ -2951,75 +3018,24 @@
 iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
 {
 	struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
-	struct kvec *iov;
-	u32 padding = 0;
-	int niov = 0, tx_size;
 
 	iscsit_build_nopin_rsp(cmd, conn, hdr, true);
 
-	tx_size = ISCSI_HDR_LEN;
-	iov = &cmd->iov_misc[0];
-	iov[niov].iov_base	= cmd->pdu;
-	iov[niov++].iov_len	= ISCSI_HDR_LEN;
-
-	if (conn->conn_ops->HeaderDigest) {
-		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
-		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
-				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
-		iov[0].iov_len += ISCSI_CRC_LEN;
-		tx_size += ISCSI_CRC_LEN;
-		pr_debug("Attaching CRC32C HeaderDigest"
-			" to NopIn 0x%08x\n", *header_digest);
-	}
-
 	/*
 	 * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
 	 * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
 	 */
-	if (cmd->buf_ptr_size) {
-		iov[niov].iov_base	= cmd->buf_ptr;
-		iov[niov++].iov_len	= cmd->buf_ptr_size;
-		tx_size += cmd->buf_ptr_size;
+	pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size);
 
-		pr_debug("Echoing back %u bytes of ping"
-			" data.\n", cmd->buf_ptr_size);
-
-		padding = ((-cmd->buf_ptr_size) & 3);
-		if (padding != 0) {
-			iov[niov].iov_base = &cmd->pad_bytes;
-			iov[niov++].iov_len = padding;
-			tx_size += padding;
-			pr_debug("Attaching %u additional"
-				" padding bytes.\n", padding);
-		}
-		if (conn->conn_ops->DataDigest) {
-			iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
-				cmd->buf_ptr, cmd->buf_ptr_size,
-				padding, (u8 *)&cmd->pad_bytes,
-				(u8 *)&cmd->data_crc);
-
-			iov[niov].iov_base = &cmd->data_crc;
-			iov[niov++].iov_len = ISCSI_CRC_LEN;
-			tx_size += ISCSI_CRC_LEN;
-			pr_debug("Attached DataDigest for %u"
-				" bytes of ping data, CRC 0x%08x\n",
-				cmd->buf_ptr_size, cmd->data_crc);
-		}
-	}
-
-	cmd->iov_misc_count = niov;
-	cmd->tx_size = tx_size;
-
-	return 0;
+	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
+						     cmd->buf_ptr,
+						     cmd->buf_ptr_size);
 }
 
 static int iscsit_send_r2t(
 	struct iscsi_cmd *cmd,
 	struct iscsi_conn *conn)
 {
-	int tx_size = 0;
 	struct iscsi_r2t *r2t;
 	struct iscsi_r2t_rsp *hdr;
 	int ret;
@@ -3035,7 +3051,10 @@
 	int_to_scsilun(cmd->se_cmd.orig_fe_lun,
 			(struct scsi_lun *)&hdr->lun);
 	hdr->itt		= cmd->init_task_tag;
-	r2t->targ_xfer_tag	= session_get_next_ttt(conn->sess);
+	if (conn->conn_transport->iscsit_get_r2t_ttt)
+		conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t);
+	else
+		r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
 	hdr->ttt		= cpu_to_be32(r2t->targ_xfer_tag);
 	hdr->statsn		= cpu_to_be32(conn->stat_sn);
 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
@@ -3044,38 +3063,18 @@
 	hdr->data_offset	= cpu_to_be32(r2t->offset);
 	hdr->data_length	= cpu_to_be32(r2t->xfer_len);
 
-	cmd->iov_misc[0].iov_base	= cmd->pdu;
-	cmd->iov_misc[0].iov_len	= ISCSI_HDR_LEN;
-	tx_size += ISCSI_HDR_LEN;
-
-	if (conn->conn_ops->HeaderDigest) {
-		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
-		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
-				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
-		cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
-		tx_size += ISCSI_CRC_LEN;
-		pr_debug("Attaching CRC32 HeaderDigest for R2T"
-			" PDU 0x%08x\n", *header_digest);
-	}
-
 	pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
 		" 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
 		(!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
 		r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
 			r2t->offset, r2t->xfer_len, conn->cid);
 
-	cmd->iov_misc_count = 1;
-	cmd->tx_size = tx_size;
-
 	spin_lock_bh(&cmd->r2t_lock);
 	r2t->sent_r2t = 1;
 	spin_unlock_bh(&cmd->r2t_lock);
 
-	ret = iscsit_send_tx_data(cmd, conn, 1);
+	ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
 	if (ret < 0) {
-		iscsit_tx_thread_wait_for_tcp(conn);
 		return ret;
 	}
 
@@ -3166,6 +3165,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd);
 
 void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
 			bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
@@ -3204,18 +3204,12 @@
 static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
 {
 	struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
-	struct kvec *iov;
-	u32 padding = 0, tx_size = 0;
-	int iov_count = 0;
 	bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
+	void *data_buf = NULL;
+	u32 padding = 0, data_buf_len = 0;
 
 	iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
 
-	iov = &cmd->iov_misc[0];
-	iov[iov_count].iov_base	= cmd->pdu;
-	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
-	tx_size += ISCSI_HDR_LEN;
-
 	/*
 	 * Attach SENSE DATA payload to iSCSI Response PDU
 	 */
@@ -3227,56 +3221,23 @@
 
 		padding		= -(cmd->se_cmd.scsi_sense_length) & 3;
 		hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
-		iov[iov_count].iov_base	= cmd->sense_buffer;
-		iov[iov_count++].iov_len =
-				(cmd->se_cmd.scsi_sense_length + padding);
-		tx_size += cmd->se_cmd.scsi_sense_length;
+		data_buf = cmd->sense_buffer;
+		data_buf_len = cmd->se_cmd.scsi_sense_length + padding;
 
 		if (padding) {
 			memset(cmd->sense_buffer +
 				cmd->se_cmd.scsi_sense_length, 0, padding);
-			tx_size += padding;
 			pr_debug("Adding %u bytes of padding to"
 				" SENSE.\n", padding);
 		}
 
-		if (conn->conn_ops->DataDigest) {
-			iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
-				cmd->sense_buffer,
-				(cmd->se_cmd.scsi_sense_length + padding),
-				0, NULL, (u8 *)&cmd->data_crc);
-
-			iov[iov_count].iov_base    = &cmd->data_crc;
-			iov[iov_count++].iov_len     = ISCSI_CRC_LEN;
-			tx_size += ISCSI_CRC_LEN;
-
-			pr_debug("Attaching CRC32 DataDigest for"
-				" SENSE, %u bytes CRC 0x%08x\n",
-				(cmd->se_cmd.scsi_sense_length + padding),
-				cmd->data_crc);
-		}
-
 		pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
 				" Response PDU\n",
 				cmd->se_cmd.scsi_sense_length);
 	}
 
-	if (conn->conn_ops->HeaderDigest) {
-		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
-		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
-				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
-		iov[0].iov_len += ISCSI_CRC_LEN;
-		tx_size += ISCSI_CRC_LEN;
-		pr_debug("Attaching CRC32 HeaderDigest for Response"
-				" PDU 0x%08x\n", *header_digest);
-	}
-
-	cmd->iov_misc_count = iov_count;
-	cmd->tx_size = tx_size;
-
-	return 0;
+	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf,
+						     data_buf_len);
 }
 
 static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
@@ -3323,30 +3284,10 @@
 iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
 {
 	struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
-	u32 tx_size = 0;
 
 	iscsit_build_task_mgt_rsp(cmd, conn, hdr);
 
-	cmd->iov_misc[0].iov_base	= cmd->pdu;
-	cmd->iov_misc[0].iov_len	= ISCSI_HDR_LEN;
-	tx_size += ISCSI_HDR_LEN;
-
-	if (conn->conn_ops->HeaderDigest) {
-		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
-		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
-				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
-		cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
-		tx_size += ISCSI_CRC_LEN;
-		pr_debug("Attaching CRC32 HeaderDigest for Task"
-			" Mgmt Response PDU 0x%08x\n", *header_digest);
-	}
-
-	cmd->iov_misc_count = 1;
-	cmd->tx_size = tx_size;
-
-	return 0;
+	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
 }
 
 static bool iscsit_check_inaddr_any(struct iscsi_np *np)
@@ -3583,53 +3524,16 @@
 	struct iscsi_conn *conn)
 {
 	struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
-	struct kvec *iov;
-	u32 tx_size = 0;
-	int text_length, iov_count = 0, rc;
+	int text_length;
 
-	rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP);
-	if (rc < 0)
-		return rc;
+	text_length = iscsit_build_text_rsp(cmd, conn, hdr,
+				conn->conn_transport->transport_type);
+	if (text_length < 0)
+		return text_length;
 
-	text_length = rc;
-	iov = &cmd->iov_misc[0];
-	iov[iov_count].iov_base = cmd->pdu;
-	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
-	iov[iov_count].iov_base	= cmd->buf_ptr;
-	iov[iov_count++].iov_len = text_length;
-
-	tx_size += (ISCSI_HDR_LEN + text_length);
-
-	if (conn->conn_ops->HeaderDigest) {
-		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
-		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
-				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
-		iov[0].iov_len += ISCSI_CRC_LEN;
-		tx_size += ISCSI_CRC_LEN;
-		pr_debug("Attaching CRC32 HeaderDigest for"
-			" Text Response PDU 0x%08x\n", *header_digest);
-	}
-
-	if (conn->conn_ops->DataDigest) {
-		iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
-				cmd->buf_ptr, text_length,
-				0, NULL, (u8 *)&cmd->data_crc);
-
-		iov[iov_count].iov_base	= &cmd->data_crc;
-		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
-		tx_size	+= ISCSI_CRC_LEN;
-
-		pr_debug("Attaching DataDigest for %u bytes of text"
-			" data, CRC 0x%08x\n", text_length,
-			cmd->data_crc);
-	}
-
-	cmd->iov_misc_count = iov_count;
-	cmd->tx_size = tx_size;
-
-	return 0;
+	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
+						     cmd->buf_ptr,
+						     text_length);
 }
 
 void
@@ -3654,49 +3558,15 @@
 	struct iscsi_conn *conn)
 {
 	struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
-	struct kvec *iov;
-	u32 iov_count = 0, tx_size;
 
 	iscsit_build_reject(cmd, conn, hdr);
 
-	iov = &cmd->iov_misc[0];
-	iov[iov_count].iov_base = cmd->pdu;
-	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
-	iov[iov_count].iov_base = cmd->buf_ptr;
-	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
-
-	tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN);
-
-	if (conn->conn_ops->HeaderDigest) {
-		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
-		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
-				ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
-		iov[0].iov_len += ISCSI_CRC_LEN;
-		tx_size += ISCSI_CRC_LEN;
-		pr_debug("Attaching CRC32 HeaderDigest for"
-			" REJECT PDU 0x%08x\n", *header_digest);
-	}
-
-	if (conn->conn_ops->DataDigest) {
-		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->buf_ptr,
-				ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
-
-		iov[iov_count].iov_base = &cmd->data_crc;
-		iov[iov_count++].iov_len  = ISCSI_CRC_LEN;
-		tx_size += ISCSI_CRC_LEN;
-		pr_debug("Attaching CRC32 DataDigest for REJECT"
-				" PDU 0x%08x\n", cmd->data_crc);
-	}
-
-	cmd->iov_misc_count = iov_count;
-	cmd->tx_size = tx_size;
-
 	pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
 		" CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
 
-	return 0;
+	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
+						     cmd->buf_ptr,
+						     ISCSI_HDR_LEN);
 }
 
 void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
@@ -3724,33 +3594,7 @@
 	cpumask_setall(conn->conn_cpumask);
 }
 
-static inline void iscsit_thread_check_cpumask(
-	struct iscsi_conn *conn,
-	struct task_struct *p,
-	int mode)
-{
-	/*
-	 * mode == 1 signals iscsi_target_tx_thread() usage.
-	 * mode == 0 signals iscsi_target_rx_thread() usage.
-	 */
-	if (mode == 1) {
-		if (!conn->conn_tx_reset_cpumask)
-			return;
-		conn->conn_tx_reset_cpumask = 0;
-	} else {
-		if (!conn->conn_rx_reset_cpumask)
-			return;
-		conn->conn_rx_reset_cpumask = 0;
-	}
-	/*
-	 * Update the CPU mask for this single kthread so that
-	 * both TX and RX kthreads are scheduled to run on the
-	 * same CPU.
-	 */
-	set_cpus_allowed_ptr(p, conn->conn_cpumask);
-}
-
-static int
+int
 iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
 {
 	int ret;
@@ -3792,6 +3636,7 @@
 err:
 	return -1;
 }
+EXPORT_SYMBOL(iscsit_immediate_queue);
 
 static int
 iscsit_handle_immediate_queue(struct iscsi_conn *conn)
@@ -3816,7 +3661,7 @@
 	return 0;
 }
 
-static int
+int
 iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
 {
 	int ret;
@@ -3889,13 +3734,6 @@
 	if (ret < 0)
 		goto err;
 
-	if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
-		iscsit_tx_thread_wait_for_tcp(conn);
-		iscsit_unmap_iovec(cmd);
-		goto err;
-	}
-	iscsit_unmap_iovec(cmd);
-
 	switch (state) {
 	case ISTATE_SEND_LOGOUTRSP:
 		if (!iscsit_logout_post_handler(cmd, conn))
@@ -3928,6 +3766,7 @@
 err:
 	return -1;
 }
+EXPORT_SYMBOL(iscsit_response_queue);
 
 static int iscsit_handle_response_queue(struct iscsi_conn *conn)
 {
@@ -4087,36 +3926,12 @@
 	return ret;
 }
 
-int iscsi_target_rx_thread(void *arg)
+static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
 {
-	int ret, rc;
+	int ret;
 	u8 buffer[ISCSI_HDR_LEN], opcode;
 	u32 checksum = 0, digest = 0;
-	struct iscsi_conn *conn = arg;
 	struct kvec iov;
-	/*
-	 * Allow ourselves to be interrupted by SIGINT so that a
-	 * connection recovery / failure event can be triggered externally.
-	 */
-	allow_signal(SIGINT);
-	/*
-	 * Wait for iscsi_post_login_handler() to complete before allowing
-	 * incoming iscsi/tcp socket I/O, and/or failing the connection.
-	 */
-	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
-	if (rc < 0 || iscsi_target_check_conn_state(conn))
-		return 0;
-
-	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
-		struct completion comp;
-
-		init_completion(&comp);
-		rc = wait_for_completion_interruptible(&comp);
-		if (rc < 0)
-			goto transport_err;
-
-		goto transport_err;
-	}
 
 	while (!kthread_should_stop()) {
 		/*
@@ -4134,7 +3949,7 @@
 		ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
 		if (ret != ISCSI_HDR_LEN) {
 			iscsit_rx_thread_wait_for_tcp(conn);
-			goto transport_err;
+			return;
 		}
 
 		if (conn->conn_ops->HeaderDigest) {
@@ -4144,7 +3959,7 @@
 			ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
 			if (ret != ISCSI_CRC_LEN) {
 				iscsit_rx_thread_wait_for_tcp(conn);
-				goto transport_err;
+				return;
 			}
 
 			iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
@@ -4168,7 +3983,7 @@
 		}
 
 		if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
-			goto transport_err;
+			return;
 
 		opcode = buffer[0] & ISCSI_OPCODE_MASK;
 
@@ -4179,15 +3994,38 @@
 			" while in Discovery Session, rejecting.\n", opcode);
 			iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
 					  buffer);
-			goto transport_err;
+			return;
 		}
 
 		ret = iscsi_target_rx_opcode(conn, buffer);
 		if (ret < 0)
-			goto transport_err;
+			return;
 	}
+}
 
-transport_err:
+int iscsi_target_rx_thread(void *arg)
+{
+	int rc;
+	struct iscsi_conn *conn = arg;
+
+	/*
+	 * Allow ourselves to be interrupted by SIGINT so that a
+	 * connection recovery / failure event can be triggered externally.
+	 */
+	allow_signal(SIGINT);
+	/*
+	 * Wait for iscsi_post_login_handler() to complete before allowing
+	 * incoming iscsi/tcp socket I/O, and/or failing the connection.
+	 */
+	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+	if (rc < 0 || iscsi_target_check_conn_state(conn))
+		return 0;
+
+	if (!conn->conn_transport->iscsit_get_rx_pdu)
+		return 0;
+
+	conn->conn_transport->iscsit_get_rx_pdu(conn);
+
 	if (!signal_pending(current))
 		atomic_set(&conn->transport_failed, 1);
 	iscsit_take_action_for_connection_exit(conn);
@@ -4240,16 +4078,17 @@
 	pr_debug("Closing iSCSI connection CID %hu on SID:"
 		" %u\n", conn->cid, sess->sid);
 	/*
-	 * Always up conn_logout_comp for the traditional TCP case just in case
-	 * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout
-	 * response never got sent because the connection failed.
+	 * Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD
+	 * case just in case the RX Thread in iscsi_target_rx_opcode() is
+	 * sleeping and the logout response never got sent because the
+	 * connection failed.
 	 *
 	 * However for iser-target, isert_wait4logout() is using conn_logout_comp
 	 * to signal logout response TX interrupt completion.  Go ahead and skip
 	 * this for iser since isert_rx_opcode() does not wait on logout failure,
 	 * and to avoid iscsi_conn pointer dereference in iser-target code.
 	 */
-	if (conn->conn_transport->transport_type == ISCSI_TCP)
+	if (!conn->conn_transport->rdma_shutdown)
 		complete(&conn->conn_logout_comp);
 
 	if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
@@ -4438,7 +4277,7 @@
 	if (!atomic_read(&sess->session_reinstatement) &&
 	     atomic_read(&sess->session_fall_back_to_erl0)) {
 		spin_unlock_bh(&sess->conn_lock);
-		target_put_session(sess->se_sess);
+		iscsit_close_session(sess);
 
 		return 0;
 	} else if (atomic_read(&sess->session_logout)) {
@@ -4467,6 +4306,10 @@
 	}
 }
 
+/*
+ * If the iSCSI Session for the iSCSI Initiator Node exists,
+ * forcefully shutdown the iSCSI NEXUS.
+ */
 int iscsit_close_session(struct iscsi_session *sess)
 {
 	struct iscsi_portal_group *tpg = sess->tpg;
@@ -4556,7 +4399,7 @@
 	 * always sleep waiting for RX/TX thread shutdown to complete
 	 * within iscsit_close_connection().
 	 */
-	if (conn->conn_transport->transport_type == ISCSI_TCP)
+	if (!conn->conn_transport->rdma_shutdown)
 		sleep = cmpxchg(&conn->tx_thread_active, true, false);
 
 	atomic_set(&conn->conn_logout_remove, 0);
@@ -4565,7 +4408,7 @@
 	iscsit_dec_conn_usage_count(conn);
 	iscsit_stop_session(sess, sleep, sleep);
 	iscsit_dec_session_usage_count(sess);
-	target_put_session(sess->se_sess);
+	iscsit_close_session(sess);
 }
 
 static void iscsit_logout_post_handler_samecid(
@@ -4573,7 +4416,7 @@
 {
 	int sleep = 1;
 
-	if (conn->conn_transport->transport_type == ISCSI_TCP)
+	if (!conn->conn_transport->rdma_shutdown)
 		sleep = cmpxchg(&conn->tx_thread_active, true, false);
 
 	atomic_set(&conn->conn_logout_remove, 0);
@@ -4736,7 +4579,7 @@
 	} else
 		spin_unlock_bh(&sess->conn_lock);
 
-	target_put_session(sess->se_sess);
+	iscsit_close_session(sess);
 	return 0;
 }
 
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 97e5b69..923c032 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -43,14 +43,15 @@
 	return container_of(to_tpg_np(item), struct iscsi_tpg_np, se_tpg_np);
 }
 
-static ssize_t lio_target_np_sctp_show(struct config_item *item, char *page)
+static ssize_t lio_target_np_driver_show(struct config_item *item, char *page,
+					 enum iscsit_transport_type type)
 {
 	struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
-	struct iscsi_tpg_np *tpg_np_sctp;
+	struct iscsi_tpg_np *tpg_np_new;
 	ssize_t rb;
 
-	tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
-	if (tpg_np_sctp)
+	tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
+	if (tpg_np_new)
 		rb = sprintf(page, "1\n");
 	else
 		rb = sprintf(page, "0\n");
@@ -58,19 +59,20 @@
 	return rb;
 }
 
-static ssize_t lio_target_np_sctp_store(struct config_item *item,
-		const char *page, size_t count)
+static ssize_t lio_target_np_driver_store(struct config_item *item,
+		const char *page, size_t count, enum iscsit_transport_type type,
+		const char *mod_name)
 {
 	struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
 	struct iscsi_np *np;
 	struct iscsi_portal_group *tpg;
-	struct iscsi_tpg_np *tpg_np_sctp = NULL;
+	struct iscsi_tpg_np *tpg_np_new = NULL;
 	u32 op;
-	int ret;
+	int rc;
 
-	ret = kstrtou32(page, 0, &op);
-	if (ret)
-		return ret;
+	rc = kstrtou32(page, 0, &op);
+	if (rc)
+		return rc;
 	if ((op != 1) && (op != 0)) {
 		pr_err("Illegal value for tpg_enable: %u\n", op);
 		return -EINVAL;
@@ -87,89 +89,23 @@
 		return -EINVAL;
 
 	if (op) {
-		/*
-		 * Use existing np->np_sockaddr for SCTP network portal reference
-		 */
-		tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
-					tpg_np, ISCSI_SCTP_TCP);
-		if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
-			goto out;
-	} else {
-		tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
-		if (!tpg_np_sctp)
-			goto out;
-
-		ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp);
-		if (ret < 0)
-			goto out;
-	}
-
-	iscsit_put_tpg(tpg);
-	return count;
-out:
-	iscsit_put_tpg(tpg);
-	return -EINVAL;
-}
-
-static ssize_t lio_target_np_iser_show(struct config_item *item, char *page)
-{
-	struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
-	struct iscsi_tpg_np *tpg_np_iser;
-	ssize_t rb;
-
-	tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
-	if (tpg_np_iser)
-		rb = sprintf(page, "1\n");
-	else
-		rb = sprintf(page, "0\n");
-
-	return rb;
-}
-
-static ssize_t lio_target_np_iser_store(struct config_item *item,
-		const char *page, size_t count)
-{
-	struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
-	struct iscsi_np *np;
-	struct iscsi_portal_group *tpg;
-	struct iscsi_tpg_np *tpg_np_iser = NULL;
-	char *endptr;
-	u32 op;
-	int rc = 0;
-
-	op = simple_strtoul(page, &endptr, 0);
-	if ((op != 1) && (op != 0)) {
-		pr_err("Illegal value for tpg_enable: %u\n", op);
-		return -EINVAL;
-	}
-	np = tpg_np->tpg_np;
-	if (!np) {
-		pr_err("Unable to locate struct iscsi_np from"
-				" struct iscsi_tpg_np\n");
-		return -EINVAL;
-	}
-
-	tpg = tpg_np->tpg;
-	if (iscsit_get_tpg(tpg) < 0)
-		return -EINVAL;
-
-	if (op) {
-		rc = request_module("ib_isert");
-		if (rc != 0) {
-			pr_warn("Unable to request_module for ib_isert\n");
-			rc = 0;
+		if (strlen(mod_name)) {
+			rc = request_module(mod_name);
+			if (rc != 0) {
+				pr_warn("Unable to request_module for %s\n",
+					mod_name);
+				rc = 0;
+			}
 		}
 
-		tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
-				tpg_np, ISCSI_INFINIBAND);
-		if (IS_ERR(tpg_np_iser)) {
-			rc = PTR_ERR(tpg_np_iser);
+		tpg_np_new = iscsit_tpg_add_network_portal(tpg,
+					&np->np_sockaddr, tpg_np, type);
+		if (IS_ERR(tpg_np_new))
 			goto out;
-		}
 	} else {
-		tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
-		if (tpg_np_iser) {
-			rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
+		tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
+		if (tpg_np_new) {
+			rc = iscsit_tpg_del_network_portal(tpg, tpg_np_new);
 			if (rc < 0)
 				goto out;
 		}
@@ -182,12 +118,35 @@
 	return rc;
 }
 
-CONFIGFS_ATTR(lio_target_np_, sctp);
+static ssize_t lio_target_np_iser_show(struct config_item *item, char *page)
+{
+	return lio_target_np_driver_show(item, page, ISCSI_INFINIBAND);
+}
+
+static ssize_t lio_target_np_iser_store(struct config_item *item,
+					const char *page, size_t count)
+{
+	return lio_target_np_driver_store(item, page, count,
+					  ISCSI_INFINIBAND, "ib_isert");
+}
 CONFIGFS_ATTR(lio_target_np_, iser);
 
+static ssize_t lio_target_np_cxgbit_show(struct config_item *item, char *page)
+{
+	return lio_target_np_driver_show(item, page, ISCSI_CXGBIT);
+}
+
+static ssize_t lio_target_np_cxgbit_store(struct config_item *item,
+					  const char *page, size_t count)
+{
+	return lio_target_np_driver_store(item, page, count,
+					  ISCSI_CXGBIT, "cxgbit");
+}
+CONFIGFS_ATTR(lio_target_np_, cxgbit);
+
 static struct configfs_attribute *lio_target_portal_attrs[] = {
-	&lio_target_np_attr_sctp,
 	&lio_target_np_attr_iser,
+	&lio_target_np_attr_cxgbit,
 	NULL,
 };
 
@@ -1554,7 +1513,7 @@
  * This function calls iscsit_inc_session_usage_count() on the
  * struct iscsi_session in question.
  */
-static int lio_tpg_shutdown_session(struct se_session *se_sess)
+static void lio_tpg_close_session(struct se_session *se_sess)
 {
 	struct iscsi_session *sess = se_sess->fabric_sess_ptr;
 	struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg;
@@ -1566,7 +1525,7 @@
 	    (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
 		spin_unlock(&sess->conn_lock);
 		spin_unlock_bh(&se_tpg->session_lock);
-		return 0;
+		return;
 	}
 	atomic_set(&sess->session_reinstatement, 1);
 	spin_unlock(&sess->conn_lock);
@@ -1575,20 +1534,6 @@
 	spin_unlock_bh(&se_tpg->session_lock);
 
 	iscsit_stop_session(sess, 1, 1);
-	return 1;
-}
-
-/*
- * Calls iscsit_dec_session_usage_count() as inverse of
- * lio_tpg_shutdown_session()
- */
-static void lio_tpg_close_session(struct se_session *se_sess)
-{
-	struct iscsi_session *sess = se_sess->fabric_sess_ptr;
-	/*
-	 * If the iSCSI Session for the iSCSI Initiator Node exists,
-	 * forcefully shutdown the iSCSI NEXUS.
-	 */
 	iscsit_close_session(sess);
 }
 
@@ -1640,7 +1585,6 @@
 	.tpg_get_inst_index		= lio_tpg_get_inst_index,
 	.check_stop_free		= lio_check_stop_free,
 	.release_cmd			= lio_release_cmd,
-	.shutdown_session		= lio_tpg_shutdown_session,
 	.close_session			= lio_tpg_close_session,
 	.sess_get_index			= lio_sess_get_index,
 	.sess_get_initiator_sid		= lio_sess_get_initiator_sid,
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index fb3b52b..647d4a5 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -524,3 +524,4 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL(iscsit_get_datain_values);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 210f6e4..b54e72c 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -786,7 +786,7 @@
 	}
 
 	spin_unlock_bh(&se_tpg->session_lock);
-	target_put_session(sess->se_sess);
+	iscsit_close_session(sess);
 }
 
 void iscsit_start_time2retain_handler(struct iscsi_session *sess)
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 8436d56..b5212f0 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -228,7 +228,7 @@
 	if (sess->session_state == TARG_SESS_STATE_FAILED) {
 		spin_unlock_bh(&sess->conn_lock);
 		iscsit_dec_session_usage_count(sess);
-		target_put_session(sess->se_sess);
+		iscsit_close_session(sess);
 		return 0;
 	}
 	spin_unlock_bh(&sess->conn_lock);
@@ -236,7 +236,7 @@
 	iscsit_stop_session(sess, 1, 1);
 	iscsit_dec_session_usage_count(sess);
 
-	target_put_session(sess->se_sess);
+	iscsit_close_session(sess);
 	return 0;
 }
 
@@ -258,7 +258,7 @@
 	mutex_unlock(&auth_id_lock);
 }
 
-static __printf(2, 3) int iscsi_change_param_sprintf(
+__printf(2, 3) int iscsi_change_param_sprintf(
 	struct iscsi_conn *conn,
 	const char *fmt, ...)
 {
@@ -279,6 +279,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(iscsi_change_param_sprintf);
 
 /*
  *	This is the leading connection of a new session,
@@ -1387,6 +1388,16 @@
 			goto old_sess_out;
 	}
 
+	if (conn->conn_transport->iscsit_validate_params) {
+		ret = conn->conn_transport->iscsit_validate_params(conn);
+		if (ret < 0) {
+			if (zero_tsih)
+				goto new_sess_out;
+			else
+				goto old_sess_out;
+		}
+	}
+
 	ret = iscsi_target_start_negotiation(login, conn);
 	if (ret < 0)
 		goto new_sess_out;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 9fc9117..89d34bd 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -269,6 +269,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(iscsi_target_check_login_request);
 
 static int iscsi_target_check_first_request(
 	struct iscsi_conn *conn,
@@ -1246,16 +1247,16 @@
 {
 	int ret;
 
-	ret = iscsi_target_do_login(conn, login);
-	if (!ret) {
-		if (conn->sock) {
-			struct sock *sk = conn->sock->sk;
+       if (conn->sock) {
+               struct sock *sk = conn->sock->sk;
 
-			write_lock_bh(&sk->sk_callback_lock);
-			set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
-			write_unlock_bh(&sk->sk_callback_lock);
-		}
-	} else if (ret < 0) {
+               write_lock_bh(&sk->sk_callback_lock);
+               set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+               write_unlock_bh(&sk->sk_callback_lock);
+       }
+
+       ret = iscsi_target_do_login(conn, login);
+       if (ret < 0) {
 		cancel_delayed_work_sync(&conn->login_work);
 		cancel_delayed_work_sync(&conn->login_cleanup_work);
 		iscsi_target_restore_sock_callbacks(conn);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 3a1f9a7..0efa80b 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -680,6 +680,7 @@
 	pr_err("Unable to locate key \"%s\".\n", key);
 	return NULL;
 }
+EXPORT_SYMBOL(iscsi_find_param_from_key);
 
 int iscsi_extract_key_value(char *textbuf, char **key, char **value)
 {
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 5772038..1f38177 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -514,6 +514,7 @@
 
 	wake_up(&conn->queues_wq);
 }
+EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue);
 
 struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
 {
@@ -725,6 +726,9 @@
 		iscsit_remove_cmd_from_immediate_queue(cmd, conn);
 		iscsit_remove_cmd_from_response_queue(cmd, conn);
 	}
+
+	if (conn && conn->conn_transport->iscsit_release_cmd)
+		conn->conn_transport->iscsit_release_cmd(conn, cmd);
 }
 
 void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
@@ -773,6 +777,7 @@
 		break;
 	}
 }
+EXPORT_SYMBOL(iscsit_free_cmd);
 
 int iscsit_check_session_usage_count(struct iscsi_session *sess)
 {
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 0ad5ac5..5091b31 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -601,16 +601,6 @@
 	return tl_cmd->sc_cmd_state;
 }
 
-static int tcm_loop_shutdown_session(struct se_session *se_sess)
-{
-	return 0;
-}
-
-static void tcm_loop_close_session(struct se_session *se_sess)
-{
-	return;
-};
-
 static int tcm_loop_write_pending(struct se_cmd *se_cmd)
 {
 	/*
@@ -1243,8 +1233,6 @@
 	.tpg_get_inst_index		= tcm_loop_get_inst_index,
 	.check_stop_free		= tcm_loop_check_stop_free,
 	.release_cmd			= tcm_loop_release_cmd,
-	.shutdown_session		= tcm_loop_shutdown_session,
-	.close_session			= tcm_loop_close_session,
 	.sess_get_index			= tcm_loop_sess_get_index,
 	.write_pending			= tcm_loop_write_pending,
 	.write_pending_status		= tcm_loop_write_pending_status,
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index c57e788..58bb6ed 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1726,16 +1726,6 @@
 	sbp_free_request(req);
 }
 
-static int sbp_shutdown_session(struct se_session *se_sess)
-{
-	return 0;
-}
-
-static void sbp_close_session(struct se_session *se_sess)
-{
-	return;
-}
-
 static u32 sbp_sess_get_index(struct se_session *se_sess)
 {
 	return 0;
@@ -2349,8 +2339,6 @@
 	.tpg_check_prod_mode_write_protect = sbp_check_false,
 	.tpg_get_inst_index		= sbp_tpg_get_inst_index,
 	.release_cmd			= sbp_release_cmd,
-	.shutdown_session		= sbp_shutdown_session,
-	.close_session			= sbp_close_session,
 	.sess_get_index			= sbp_sess_get_index,
 	.write_pending			= sbp_write_pending,
 	.write_pending_status		= sbp_write_pending_status,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 49aba4a..4c82bbe 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -932,7 +932,7 @@
 			tg_pt_gp->tg_pt_gp_alua_access_status);
 
 	snprintf(path, ALUA_METADATA_PATH_LEN,
-		"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
+		"%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0],
 		config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
 
 	rc = core_alua_write_tpg_metadata(path, md_buf, len);
@@ -1275,8 +1275,8 @@
 			atomic_read(&lun->lun_tg_pt_secondary_offline),
 			lun->lun_tg_pt_secondary_stat);
 
-	snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%llu",
-			se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
+	snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu",
+			db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
 			lun->unpacked_lun);
 
 	rc = core_alua_write_tpg_metadata(path, md_buf, len);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index d498533..2001005 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -99,6 +99,67 @@
 
 CONFIGFS_ATTR_RO(target_core_item_, version);
 
+char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT;
+static char db_root_stage[DB_ROOT_LEN];
+
+static ssize_t target_core_item_dbroot_show(struct config_item *item,
+					    char *page)
+{
+	return sprintf(page, "%s\n", db_root);
+}
+
+static ssize_t target_core_item_dbroot_store(struct config_item *item,
+					const char *page, size_t count)
+{
+	ssize_t read_bytes;
+	struct file *fp;
+
+	mutex_lock(&g_tf_lock);
+	if (!list_empty(&g_tf_list)) {
+		mutex_unlock(&g_tf_lock);
+		pr_err("db_root: cannot be changed: target drivers registered");
+		return -EINVAL;
+	}
+
+	if (count > (DB_ROOT_LEN - 1)) {
+		mutex_unlock(&g_tf_lock);
+		pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n",
+		       (int)count, DB_ROOT_LEN - 1);
+		return -EINVAL;
+	}
+
+	read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
+	if (!read_bytes) {
+		mutex_unlock(&g_tf_lock);
+		return -EINVAL;
+	}
+	if (db_root_stage[read_bytes - 1] == '\n')
+		db_root_stage[read_bytes - 1] = '\0';
+
+	/* validate new db root before accepting it */
+	fp = filp_open(db_root_stage, O_RDONLY, 0);
+	if (IS_ERR(fp)) {
+		mutex_unlock(&g_tf_lock);
+		pr_err("db_root: cannot open: %s\n", db_root_stage);
+		return -EINVAL;
+	}
+	if (!S_ISDIR(fp->f_inode->i_mode)) {
+		filp_close(fp, 0);
+		mutex_unlock(&g_tf_lock);
+		pr_err("db_root: not a directory: %s\n", db_root_stage);
+		return -EINVAL;
+	}
+	filp_close(fp, 0);
+
+	strncpy(db_root, db_root_stage, read_bytes);
+
+	mutex_unlock(&g_tf_lock);
+
+	return read_bytes;
+}
+
+CONFIGFS_ATTR(target_core_item_, dbroot);
+
 static struct target_fabric_configfs *target_core_get_fabric(
 	const char *name)
 {
@@ -239,6 +300,7 @@
  */
 static struct configfs_attribute *target_core_fabric_item_attrs[] = {
 	&target_core_item_attr_version,
+	&target_core_item_attr_dbroot,
 	NULL,
 };
 
@@ -323,14 +385,6 @@
 		pr_err("Missing tfo->release_cmd()\n");
 		return -EINVAL;
 	}
-	if (!tfo->shutdown_session) {
-		pr_err("Missing tfo->shutdown_session()\n");
-		return -EINVAL;
-	}
-	if (!tfo->close_session) {
-		pr_err("Missing tfo->close_session()\n");
-		return -EINVAL;
-	}
 	if (!tfo->sess_get_index) {
 		pr_err("Missing tfo->sess_get_index()\n");
 		return -EINVAL;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 86b4a83..fc91e85 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -155,4 +155,10 @@
 /* target_core_xcopy.c */
 extern struct se_portal_group xcopy_pt_tpg;
 
+/* target_core_configfs.c */
+#define DB_ROOT_LEN		4096
+#define	DB_ROOT_DEFAULT		"/var/target"
+
+extern char db_root[];
+
 #endif /* TARGET_CORE_INTERNAL_H */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index b179573..47463c9 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1985,7 +1985,7 @@
 		return -EMSGSIZE;
 	}
 
-	snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
+	snprintf(path, 512, "%s/pr/aptpl_%s", db_root, &wwn->unit_serial[0]);
 	file = filp_open(path, flags, 0600);
 	if (IS_ERR(file)) {
 		pr_err("filp_open(%s) for APTPL metadata"
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 47a833f..24b36fd 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -403,7 +403,6 @@
 	struct se_device *se_dev = cmd->se_dev;
 	struct rd_dev *dev = RD_DEV(se_dev);
 	struct rd_dev_sg_table *prot_table;
-	bool need_to_release = false;
 	struct scatterlist *prot_sg;
 	u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
 	u32 prot_offset, prot_page;
@@ -432,9 +431,6 @@
 	if (!rc)
 		sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
 
-	if (need_to_release)
-		kfree(prot_sg);
-
 	return rc;
 }
 
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index ddf0460..d99752c 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -336,44 +336,39 @@
 	return acl;
 }
 
+static void target_shutdown_sessions(struct se_node_acl *acl)
+{
+	struct se_session *sess;
+	unsigned long flags;
+
+restart:
+	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
+	list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
+		if (sess->sess_tearing_down)
+			continue;
+
+		list_del_init(&sess->sess_acl_list);
+		spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+
+		if (acl->se_tpg->se_tpg_tfo->close_session)
+			acl->se_tpg->se_tpg_tfo->close_session(sess);
+		goto restart;
+	}
+	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+}
+
 void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
 {
 	struct se_portal_group *tpg = acl->se_tpg;
-	LIST_HEAD(sess_list);
-	struct se_session *sess, *sess_tmp;
-	unsigned long flags;
-	int rc;
 
 	mutex_lock(&tpg->acl_node_mutex);
-	if (acl->dynamic_node_acl) {
+	if (acl->dynamic_node_acl)
 		acl->dynamic_node_acl = 0;
-	}
 	list_del(&acl->acl_list);
 	mutex_unlock(&tpg->acl_node_mutex);
 
-	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
-	acl->acl_stop = 1;
+	target_shutdown_sessions(acl);
 
-	list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
-				sess_acl_list) {
-		if (sess->sess_tearing_down != 0)
-			continue;
-
-		if (!target_get_session(sess))
-			continue;
-		list_move(&sess->sess_acl_list, &sess_list);
-	}
-	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
-
-	list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
-		list_del(&sess->sess_acl_list);
-
-		rc = tpg->se_tpg_tfo->shutdown_session(sess);
-		target_put_session(sess);
-		if (!rc)
-			continue;
-		target_put_session(sess);
-	}
 	target_put_nacl(acl);
 	/*
 	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
@@ -400,11 +395,7 @@
 	struct se_node_acl *acl,
 	u32 queue_depth)
 {
-	LIST_HEAD(sess_list);
 	struct se_portal_group *tpg = acl->se_tpg;
-	struct se_session *sess, *sess_tmp;
-	unsigned long flags;
-	int rc;
 
 	/*
 	 * User has requested to change the queue depth for a Initiator Node.
@@ -413,30 +404,10 @@
 	 */
 	target_set_nacl_queue_depth(tpg, acl, queue_depth);
 
-	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
-	list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
-				 sess_acl_list) {
-		if (sess->sess_tearing_down != 0)
-			continue;
-		if (!target_get_session(sess))
-			continue;
-		spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
-
-		/*
-		 * Finally call tpg->se_tpg_tfo->close_session() to force session
-		 * reinstatement to occur if there is an active session for the
-		 * $FABRIC_MOD Initiator Node in question.
-		 */
-		rc = tpg->se_tpg_tfo->shutdown_session(sess);
-		target_put_session(sess);
-		if (!rc) {
-			spin_lock_irqsave(&acl->nacl_sess_lock, flags);
-			continue;
-		}
-		target_put_session(sess);
-		spin_lock_irqsave(&acl->nacl_sess_lock, flags);
-	}
-	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+	/*
+	 * Shutdown all pending sessions to force session reinstatement.
+	 */
+	target_shutdown_sessions(acl);
 
 	pr_debug("Successfully changed queue depth to: %d for Initiator"
 		" Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 590384a..5ab3967 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -239,7 +239,6 @@
 	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
 	INIT_LIST_HEAD(&se_sess->sess_wait_list);
 	spin_lock_init(&se_sess->sess_cmd_lock);
-	kref_init(&se_sess->sess_kref);
 	se_sess->sup_prot_ops = sup_prot_ops;
 
 	return se_sess;
@@ -430,27 +429,6 @@
 }
 EXPORT_SYMBOL(target_alloc_session);
 
-static void target_release_session(struct kref *kref)
-{
-	struct se_session *se_sess = container_of(kref,
-			struct se_session, sess_kref);
-	struct se_portal_group *se_tpg = se_sess->se_tpg;
-
-	se_tpg->se_tpg_tfo->close_session(se_sess);
-}
-
-int target_get_session(struct se_session *se_sess)
-{
-	return kref_get_unless_zero(&se_sess->sess_kref);
-}
-EXPORT_SYMBOL(target_get_session);
-
-void target_put_session(struct se_session *se_sess)
-{
-	kref_put(&se_sess->sess_kref, target_release_session);
-}
-EXPORT_SYMBOL(target_put_session);
-
 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
 {
 	struct se_session *se_sess;
@@ -499,8 +477,8 @@
 	se_nacl = se_sess->se_node_acl;
 	if (se_nacl) {
 		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
-		if (se_nacl->acl_stop == 0)
-			list_del(&se_sess->sess_acl_list);
+		if (!list_empty(&se_sess->sess_acl_list))
+			list_del_init(&se_sess->sess_acl_list);
 		/*
 		 * If the session list is empty, then clear the pointer.
 		 * Otherwise, set the struct se_session pointer from the tail
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index c30003b..e28209b 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -139,7 +139,6 @@
  * Session ops.
  */
 void ft_sess_put(struct ft_sess *);
-int ft_sess_shutdown(struct se_session *);
 void ft_sess_close(struct se_session *);
 u32 ft_sess_get_index(struct se_session *);
 u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32);
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 4d375e9..42ee911 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -442,7 +442,6 @@
 	.tpg_get_inst_index =		ft_tpg_get_inst_index,
 	.check_stop_free =		ft_check_stop_free,
 	.release_cmd =			ft_release_cmd,
-	.shutdown_session =		ft_sess_shutdown,
 	.close_session =		ft_sess_close,
 	.sess_get_index =		ft_sess_get_index,
 	.sess_get_initiator_sid =	NULL,
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index d0c3e18..f5186a7 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -303,18 +303,6 @@
  */
 
 /*
- * Determine whether session is allowed to be shutdown in the current context.
- * Returns non-zero if the session should be shutdown.
- */
-int ft_sess_shutdown(struct se_session *se_sess)
-{
-	struct ft_sess *sess = se_sess->fabric_sess_ptr;
-
-	pr_debug("port_id %x\n", sess->port_id);
-	return 1;
-}
-
-/*
  * Remove session and send PRLO.
  * This is called when the ACL is being deleted or queue depth is changing.
  */
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index d89d60c..2d702ca 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -260,16 +260,6 @@
 	  Enable this option if you want to have support for thermal management
 	  controller present in Armada 370 and Armada XP SoC.
 
-config TEGRA_SOCTHERM
-	tristate "Tegra SOCTHERM thermal management"
-	depends on ARCH_TEGRA
-	help
-	  Enable this option for integrated thermal management support on NVIDIA
-	  Tegra124 systems-on-chip. The driver supports four thermal zones
-	  (CPU, GPU, MEM, PLLX). Cooling devices can be bound to the thermal
-	  zones to manage temperatures. This option is also required for the
-	  emergency thermal reset (thermtrip) feature to function.
-
 config DB8500_CPUFREQ_COOLING
 	tristate "DB8500 cpufreq cooling"
 	depends on ARCH_U8500 || COMPILE_TEST
@@ -377,6 +367,17 @@
 source "drivers/thermal/st/Kconfig"
 endmenu
 
+config TANGO_THERMAL
+	tristate "Tango thermal management"
+	depends on ARCH_TANGO || COMPILE_TEST
+	help
+	  Enable the Tango thermal driver, which supports the primitive
+	  temperature sensor embedded in Tango chips since the SMP8758.
+	  This sensor only generates a 1-bit signal to indicate whether
+	  the die temperature exceeds a programmable threshold.
+
+source "drivers/thermal/tegra/Kconfig"
+
 config QCOM_SPMI_TEMP_ALARM
 	tristate "Qualcomm SPMI PMIC Temperature Alarm"
 	depends on OF && SPMI && IIO
@@ -388,4 +389,14 @@
 	  real time die temperature if an ADC is present or an estimate of the
 	  temperature based upon the over temperature stage value.
 
+config GENERIC_ADC_THERMAL
+	tristate "Generic ADC based thermal sensor"
+	depends on IIO
+	help
+	  This enabled a thermal sysfs driver for the temperature sensor
+	  which is connected to the General Purpose ADC. The ADC channel
+	  is read via IIO framework and the channel information is provided
+	  to this driver. This driver reports the temperature by reading ADC
+	  channel and converts it to temperature based on lookup table.
+
 endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 8e9cbc3..10b07c1 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -35,6 +35,7 @@
 obj-$(CONFIG_DOVE_THERMAL)  	+= dove_thermal.o
 obj-$(CONFIG_DB8500_THERMAL)	+= db8500_thermal.o
 obj-$(CONFIG_ARMADA_THERMAL)	+= armada_thermal.o
+obj-$(CONFIG_TANGO_THERMAL)	+= tango_thermal.o
 obj-$(CONFIG_IMX_THERMAL)	+= imx_thermal.o
 obj-$(CONFIG_DB8500_CPUFREQ_COOLING)	+= db8500_cpufreq_cooling.o
 obj-$(CONFIG_INTEL_POWERCLAMP)	+= intel_powerclamp.o
@@ -46,6 +47,7 @@
 obj-$(CONFIG_INT340X_THERMAL)  += int340x_thermal/
 obj-$(CONFIG_INTEL_PCH_THERMAL)	+= intel_pch_thermal.o
 obj-$(CONFIG_ST_THERMAL)	+= st/
-obj-$(CONFIG_TEGRA_SOCTHERM)	+= tegra_soctherm.o
+obj-$(CONFIG_TEGRA_SOCTHERM)	+= tegra/
 obj-$(CONFIG_HISI_THERMAL)     += hisi_thermal.o
 obj-$(CONFIG_MTK_THERMAL)	+= mtk_thermal.o
+obj-$(CONFIG_GENERIC_ADC_THERMAL)	+= thermal-generic-adc.o
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index 70836c5..fc52016 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -29,7 +29,13 @@
 	struct thermal_instance *instance;
 
 	tz->ops->get_trip_temp(tz, trip, &trip_temp);
-	tz->ops->get_trip_hyst(tz, trip, &trip_hyst);
+
+	if (!tz->ops->get_trip_hyst) {
+		pr_warn_once("Undefined get_trip_hyst for thermal zone %s - "
+				"running with default hysteresis zero\n", tz->type);
+		trip_hyst = 0;
+	} else
+		tz->ops->get_trip_hyst(tz, trip, &trip_hyst);
 
 	dev_dbg(&tz->device, "Trip%d[temp=%d]:temp=%d:hyst=%d\n",
 				trip, trip_temp, tz->temperature,
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 5e820b5..97fad8f 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -160,7 +160,7 @@
 	struct hisi_thermal_sensor *sensor = _sensor;
 	struct hisi_thermal_data *data = sensor->thermal;
 
-	int sensor_id = 0, i;
+	int sensor_id = -1, i;
 	long max_temp = 0;
 
 	*temp = hisi_thermal_get_sensor_temp(data, sensor);
@@ -168,12 +168,19 @@
 	sensor->sensor_temp = *temp;
 
 	for (i = 0; i < HISI_MAX_SENSORS; i++) {
+		if (!data->sensors[i].tzd)
+			continue;
+
 		if (data->sensors[i].sensor_temp >= max_temp) {
 			max_temp = data->sensors[i].sensor_temp;
 			sensor_id = i;
 		}
 	}
 
+	/* If no sensor has been enabled, then skip to enable irq */
+	if (sensor_id == -1)
+		return 0;
+
 	mutex_lock(&data->thermal_lock);
 	data->irq_bind_sensor = sensor_id;
 	mutex_unlock(&data->thermal_lock);
@@ -226,8 +233,12 @@
 		 sensor->thres_temp / 1000);
 	mutex_unlock(&data->thermal_lock);
 
-	for (i = 0; i < HISI_MAX_SENSORS; i++)
+	for (i = 0; i < HISI_MAX_SENSORS; i++) {
+		if (!data->sensors[i].tzd)
+			continue;
+
 		thermal_zone_device_update(data->sensors[i].tzd);
+	}
 
 	return IRQ_HANDLED;
 }
@@ -243,10 +254,11 @@
 	sensor->id = index;
 	sensor->thermal = data;
 
-	sensor->tzd = thermal_zone_of_sensor_register(&pdev->dev, sensor->id,
-				sensor, &hisi_of_thermal_ops);
+	sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
+				sensor->id, sensor, &hisi_of_thermal_ops);
 	if (IS_ERR(sensor->tzd)) {
 		ret = PTR_ERR(sensor->tzd);
+		sensor->tzd = NULL;
 		dev_err(&pdev->dev, "failed to register sensor id %d: %d\n",
 			sensor->id, ret);
 		return ret;
@@ -331,28 +343,21 @@
 		return ret;
 	}
 
+	hisi_thermal_enable_bind_irq_sensor(data);
+	irq_get_irqchip_state(data->irq, IRQCHIP_STATE_MASKED,
+			      &data->irq_enabled);
+
 	for (i = 0; i < HISI_MAX_SENSORS; ++i) {
 		ret = hisi_thermal_register_sensor(pdev, data,
 						   &data->sensors[i], i);
-		if (ret) {
+		if (ret)
 			dev_err(&pdev->dev,
 				"failed to register thermal sensor: %d\n", ret);
-			goto err_get_sensor_data;
-		}
+		else
+			hisi_thermal_toggle_sensor(&data->sensors[i], true);
 	}
 
-	hisi_thermal_enable_bind_irq_sensor(data);
-	data->irq_enabled = true;
-
-	for (i = 0; i < HISI_MAX_SENSORS; i++)
-		hisi_thermal_toggle_sensor(&data->sensors[i], true);
-
 	return 0;
-
-err_get_sensor_data:
-	clk_disable_unprepare(data->clk);
-
-	return ret;
 }
 
 static int hisi_thermal_remove(struct platform_device *pdev)
@@ -363,8 +368,10 @@
 	for (i = 0; i < HISI_MAX_SENSORS; i++) {
 		struct hisi_thermal_sensor *sensor = &data->sensors[i];
 
+		if (!sensor->tzd)
+			continue;
+
 		hisi_thermal_toggle_sensor(sensor, false);
-		thermal_zone_of_sensor_unregister(&pdev->dev, sensor->tzd);
 	}
 
 	hisi_thermal_disable_sensor(data);
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c
index 13d431c..a578cd2 100644
--- a/drivers/thermal/int340x_thermal/int3406_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3406_thermal.c
@@ -177,7 +177,7 @@
 		return -ENODEV;
 	d->raw_bd = bd;
 
-	ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br);
+	ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br, NULL);
 	if (ret)
 		return ret;
 
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 36fa724..42c1ac0 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -198,49 +198,33 @@
 	.get_temp       = proc_thermal_get_zone_temp,
 };
 
-static int proc_thermal_add(struct device *dev,
-			    struct proc_thermal_device **priv)
+static int proc_thermal_read_ppcc(struct proc_thermal_device *proc_priv)
 {
-	struct proc_thermal_device *proc_priv;
-	struct acpi_device *adev;
+	int i;
 	acpi_status status;
 	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
 	union acpi_object *elements, *ppcc;
 	union acpi_object *p;
-	unsigned long long tmp;
-	struct thermal_zone_device_ops *ops = NULL;
-	int i;
-	int ret;
+	int ret = 0;
 
-	adev = ACPI_COMPANION(dev);
-	if (!adev)
-		return -ENODEV;
-
-	status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf);
+	status = acpi_evaluate_object(proc_priv->adev->handle, "PPCC",
+				      NULL, &buf);
 	if (ACPI_FAILURE(status))
 		return -ENODEV;
 
 	p = buf.pointer;
 	if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
-		dev_err(dev, "Invalid PPCC data\n");
+		dev_err(proc_priv->dev, "Invalid PPCC data\n");
 		ret = -EFAULT;
 		goto free_buffer;
 	}
+
 	if (!p->package.count) {
-		dev_err(dev, "Invalid PPCC package size\n");
+		dev_err(proc_priv->dev, "Invalid PPCC package size\n");
 		ret = -EFAULT;
 		goto free_buffer;
 	}
 
-	proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL);
-	if (!proc_priv) {
-		ret = -ENOMEM;
-		goto free_buffer;
-	}
-
-	proc_priv->dev = dev;
-	proc_priv->adev = adev;
-
 	for (i = 0; i < min((int)p->package.count - 1, 2); ++i) {
 		elements = &(p->package.elements[i+1]);
 		if (elements->type != ACPI_TYPE_PACKAGE ||
@@ -257,12 +241,62 @@
 		proc_priv->power_limits[i].step_uw = ppcc[5].integer.value;
 	}
 
+free_buffer:
+	kfree(buf.pointer);
+
+	return ret;
+}
+
+#define PROC_POWER_CAPABILITY_CHANGED	0x83
+static void proc_thermal_notify(acpi_handle handle, u32 event, void *data)
+{
+	struct proc_thermal_device *proc_priv = data;
+
+	if (!proc_priv)
+		return;
+
+	switch (event) {
+	case PROC_POWER_CAPABILITY_CHANGED:
+		proc_thermal_read_ppcc(proc_priv);
+		int340x_thermal_zone_device_update(proc_priv->int340x_zone);
+		break;
+	default:
+		dev_err(proc_priv->dev, "Unsupported event [0x%x]\n", event);
+		break;
+	}
+}
+
+
+static int proc_thermal_add(struct device *dev,
+			    struct proc_thermal_device **priv)
+{
+	struct proc_thermal_device *proc_priv;
+	struct acpi_device *adev;
+	acpi_status status;
+	unsigned long long tmp;
+	struct thermal_zone_device_ops *ops = NULL;
+	int ret;
+
+	adev = ACPI_COMPANION(dev);
+	if (!adev)
+		return -ENODEV;
+
+	proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL);
+	if (!proc_priv)
+		return -ENOMEM;
+
+	proc_priv->dev = dev;
+	proc_priv->adev = adev;
 	*priv = proc_priv;
 
-	ret = sysfs_create_group(&dev->kobj,
-				 &power_limit_attribute_group);
+	ret = proc_thermal_read_ppcc(proc_priv);
+	if (!ret) {
+		ret = sysfs_create_group(&dev->kobj,
+					 &power_limit_attribute_group);
+
+	}
 	if (ret)
-		goto free_buffer;
+		return ret;
 
 	status = acpi_evaluate_integer(adev->handle, "_TMP", NULL, &tmp);
 	if (ACPI_FAILURE(status)) {
@@ -274,20 +308,32 @@
 
 	proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
 	if (IS_ERR(proc_priv->int340x_zone)) {
-		sysfs_remove_group(&proc_priv->dev->kobj,
-			   &power_limit_attribute_group);
 		ret = PTR_ERR(proc_priv->int340x_zone);
+		goto remove_group;
 	} else
 		ret = 0;
 
-free_buffer:
-	kfree(buf.pointer);
+	ret = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
+					  proc_thermal_notify,
+					  (void *)proc_priv);
+	if (ret)
+		goto remove_zone;
+
+	return 0;
+
+remove_zone:
+	int340x_thermal_zone_remove(proc_priv->int340x_zone);
+remove_group:
+	sysfs_remove_group(&proc_priv->dev->kobj,
+			   &power_limit_attribute_group);
 
 	return ret;
 }
 
 static void proc_thermal_remove(struct proc_thermal_device *proc_priv)
 {
+	acpi_remove_notify_handler(proc_priv->adev->handle,
+				   ACPI_DEVICE_NOTIFY, proc_thermal_notify);
 	int340x_thermal_zone_remove(proc_priv->int340x_zone);
 	sysfs_remove_group(&proc_priv->dev->kobj,
 			   &power_limit_attribute_group);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 6c79588..015ce2e 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -510,12 +510,6 @@
 	unsigned long cpu;
 	struct task_struct *thread;
 
-	/* check if pkg cstate counter is completely 0, abort in this case */
-	if (!has_pkg_state_counter()) {
-		pr_err("pkg cstate counter not functional, abort\n");
-		return -EINVAL;
-	}
-
 	set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
 	/* prevent cpu hotplug */
 	get_online_cpus();
@@ -672,35 +666,11 @@
 	.set_cur_state = powerclamp_set_cur_state,
 };
 
-/* runs on Nehalem and later */
 static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
-	{ X86_VENDOR_INTEL, 6, 0x1a},
-	{ X86_VENDOR_INTEL, 6, 0x1c},
-	{ X86_VENDOR_INTEL, 6, 0x1e},
-	{ X86_VENDOR_INTEL, 6, 0x1f},
-	{ X86_VENDOR_INTEL, 6, 0x25},
-	{ X86_VENDOR_INTEL, 6, 0x26},
-	{ X86_VENDOR_INTEL, 6, 0x2a},
-	{ X86_VENDOR_INTEL, 6, 0x2c},
-	{ X86_VENDOR_INTEL, 6, 0x2d},
-	{ X86_VENDOR_INTEL, 6, 0x2e},
-	{ X86_VENDOR_INTEL, 6, 0x2f},
-	{ X86_VENDOR_INTEL, 6, 0x37},
-	{ X86_VENDOR_INTEL, 6, 0x3a},
-	{ X86_VENDOR_INTEL, 6, 0x3c},
-	{ X86_VENDOR_INTEL, 6, 0x3d},
-	{ X86_VENDOR_INTEL, 6, 0x3e},
-	{ X86_VENDOR_INTEL, 6, 0x3f},
-	{ X86_VENDOR_INTEL, 6, 0x45},
-	{ X86_VENDOR_INTEL, 6, 0x46},
-	{ X86_VENDOR_INTEL, 6, 0x47},
-	{ X86_VENDOR_INTEL, 6, 0x4c},
-	{ X86_VENDOR_INTEL, 6, 0x4d},
-	{ X86_VENDOR_INTEL, 6, 0x4e},
-	{ X86_VENDOR_INTEL, 6, 0x4f},
-	{ X86_VENDOR_INTEL, 6, 0x56},
-	{ X86_VENDOR_INTEL, 6, 0x57},
-	{ X86_VENDOR_INTEL, 6, 0x5e},
+	{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
+	{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT },
+	{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC },
+	{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC},
 	{}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
@@ -712,11 +682,12 @@
 				boot_cpu_data.x86, boot_cpu_data.x86_model);
 		return -ENODEV;
 	}
-	if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
-		!boot_cpu_has(X86_FEATURE_CONSTANT_TSC) ||
-		!boot_cpu_has(X86_FEATURE_MWAIT) ||
-		!boot_cpu_has(X86_FEATURE_ARAT))
+
+	/* The goal for idle time alignment is to achieve package cstate. */
+	if (!has_pkg_state_counter()) {
+		pr_info("No package C-state available");
 		return -ENODEV;
+	}
 
 	/* find the deepest mwait value */
 	find_target_mwait();
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 507632b..262ab0a 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -144,7 +144,6 @@
 	s32 o_slope;
 	s32 vts[MT8173_NUM_SENSORS];
 
-	struct thermal_zone_device *tzd;
 };
 
 struct mtk_thermal_bank_cfg {
@@ -572,16 +571,11 @@
 
 	platform_set_drvdata(pdev, mt);
 
-	mt->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, mt,
-				&mtk_thermal_ops);
-	if (IS_ERR(mt->tzd))
-		goto err_register;
+	devm_thermal_zone_of_sensor_register(&pdev->dev, 0, mt,
+					     &mtk_thermal_ops);
 
 	return 0;
 
-err_register:
-	clk_disable_unprepare(mt->clk_peri_therm);
-
 err_disable_clk_auxadc:
 	clk_disable_unprepare(mt->clk_auxadc);
 
@@ -592,8 +586,6 @@
 {
 	struct mtk_thermal *mt = platform_get_drvdata(pdev);
 
-	thermal_zone_of_sensor_unregister(&pdev->dev, mt->tzd);
-
 	clk_disable_unprepare(mt->clk_peri_therm);
 	clk_disable_unprepare(mt->clk_auxadc);
 
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index d8ec44b..b8e509c 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -331,6 +331,14 @@
 	if (trip >= data->ntrips || trip < 0)
 		return -EDOM;
 
+	if (data->ops->set_trip_temp) {
+		int ret;
+
+		ret = data->ops->set_trip_temp(data->sensor_data, trip, temp);
+		if (ret)
+			return ret;
+	}
+
 	/* thermal framework should take care of data->mask & (1 << trip) */
 	data->trips[trip].temperature = temp;
 
@@ -906,7 +914,7 @@
 	return tz;
 
 free_tbps:
-	for (i = 0; i < tz->num_tbps; i++)
+	for (i = i - 1; i >= 0; i--)
 		of_node_put(tz->tbps[i].cooling_device);
 	kfree(tz->tbps);
 free_trips:
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index b677aad..f8a3c60 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -260,7 +260,7 @@
 	if (ret < 0)
 		goto fail;
 
-	chip->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
+	chip->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
 							&qpnp_tm_sensor_ops);
 	if (IS_ERR(chip->tz_dev)) {
 		dev_err(&pdev->dev, "failed to register sensor\n");
@@ -281,7 +281,6 @@
 {
 	struct qpnp_tm_chip *chip = dev_get_drvdata(&pdev->dev);
 
-	thermal_zone_of_sensor_unregister(&pdev->dev, chip->tz_dev);
 	if (!IS_ERR(chip->adc))
 		iio_channel_release(chip->adc);
 
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 82daba0..71a3392 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -492,7 +492,7 @@
 			goto error_unregister;
 
 		if (of_data == USE_OF_THERMAL)
-			priv->zone = thermal_zone_of_sensor_register(
+			priv->zone = devm_thermal_zone_of_sensor_register(
 						dev, i, priv,
 						&rcar_thermal_zone_of_ops);
 		else
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 233a564..5d491f1 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -1,7 +1,5 @@
 /*
- * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
- *
- * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ * Copyright (c) 2014-2016, Fuzhou Rockchip Electronics Co., Ltd
  * Caesar Wang <wxt@rock-chips.com>
  *
  * This program is free software; you can redistribute it and/or modify it
@@ -23,8 +21,10 @@
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/reset.h>
 #include <linux/thermal.h>
+#include <linux/mfd/syscon.h>
 #include <linux/pinctrl/consumer.h>
 
 /**
@@ -73,7 +73,7 @@
 #define SOC_MAX_SENSORS	2
 
 /**
- * struct chip_tsadc_table: hold information about chip-specific differences
+ * struct chip_tsadc_table - hold information about chip-specific differences
  * @id: conversion table
  * @length: size of conversion table
  * @data_mask: mask to apply on data inputs
@@ -86,6 +86,20 @@
 	enum adc_sort_mode mode;
 };
 
+/**
+ * struct rockchip_tsadc_chip - hold the private data of tsadc chip
+ * @chn_id[SOC_MAX_SENSORS]: the sensor id of chip correspond to the channel
+ * @chn_num: the channel number of tsadc chip
+ * @tshut_temp: the hardware-controlled shutdown temperature value
+ * @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO)
+ * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
+ * @initialize: SoC special initialize tsadc controller method
+ * @irq_ack: clear the interrupt
+ * @get_temp: get the temperature
+ * @set_tshut_temp: set the hardware-controlled shutdown temperature
+ * @set_tshut_mode: set the hardware-controlled shutdown mode
+ * @table: the chip-specific conversion table
+ */
 struct rockchip_tsadc_chip {
 	/* The sensor id of chip correspond to the ADC channel */
 	int chn_id[SOC_MAX_SENSORS];
@@ -97,7 +111,8 @@
 	enum tshut_polarity tshut_polarity;
 
 	/* Chip-wide methods */
-	void (*initialize)(void __iomem *reg, enum tshut_polarity p);
+	void (*initialize)(struct regmap *grf,
+			   void __iomem *reg, enum tshut_polarity p);
 	void (*irq_ack)(void __iomem *reg);
 	void (*control)(void __iomem *reg, bool on);
 
@@ -112,12 +127,32 @@
 	struct chip_tsadc_table table;
 };
 
+/**
+ * struct rockchip_thermal_sensor - hold the information of thermal sensor
+ * @thermal:  pointer to the platform/configuration data
+ * @tzd: pointer to a thermal zone
+ * @id: identifier of the thermal sensor
+ */
 struct rockchip_thermal_sensor {
 	struct rockchip_thermal_data *thermal;
 	struct thermal_zone_device *tzd;
 	int id;
 };
 
+/**
+ * struct rockchip_thermal_data - hold the private data of thermal driver
+ * @chip: pointer to the platform/configuration data
+ * @pdev: platform device of thermal
+ * @reset: the reset controller of tsadc
+ * @sensors[SOC_MAX_SENSORS]: the thermal sensor
+ * @clk: the controller clock is divided by the exteral 24MHz
+ * @pclk: the advanced peripherals bus clock
+ * @grf: the general register file will be used to do static set by software
+ * @regs: the base address of tsadc controller
+ * @tshut_temp: the hardware-controlled shutdown temperature value
+ * @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO)
+ * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
+ */
 struct rockchip_thermal_data {
 	const struct rockchip_tsadc_chip *chip;
 	struct platform_device *pdev;
@@ -128,6 +163,7 @@
 	struct clk *clk;
 	struct clk *pclk;
 
+	struct regmap *grf;
 	void __iomem *regs;
 
 	int tshut_temp;
@@ -142,6 +178,7 @@
  * TSADCV3_* are used for newer SoCs than RK3288. (e.g: RK3228, RK3399)
  *
  */
+#define TSADCV2_USER_CON			0x00
 #define TSADCV2_AUTO_CON			0x04
 #define TSADCV2_INT_EN				0x08
 #define TSADCV2_INT_PD				0x0c
@@ -155,12 +192,7 @@
 #define TSADCV2_AUTO_EN				BIT(0)
 #define TSADCV2_AUTO_SRC_EN(chn)		BIT(4 + (chn))
 #define TSADCV2_AUTO_TSHUT_POLARITY_HIGH	BIT(8)
-/**
- * TSADCV1_AUTO_Q_SEL_EN:
- * whether select (1024 - tsadc_q) as output
- * 1'b0:use tsadc_q as output(temperature-code is rising sequence)
- * 1'b1:use(1024 - tsadc_q) as output (temperature-code is falling sequence)
- */
+
 #define TSADCV3_AUTO_Q_SEL_EN			BIT(1)
 
 #define TSADCV2_INT_SRC_EN(chn)			BIT(chn)
@@ -177,19 +209,32 @@
 #define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT	4
 #define TSADCV2_AUTO_PERIOD_TIME		250 /* msec */
 #define TSADCV2_AUTO_PERIOD_HT_TIME		50  /* msec */
+#define TSADCV2_USER_INTER_PD_SOC		0x340 /* 13 clocks */
 
+#define GRF_SARADC_TESTBIT			0x0e644
+#define GRF_TSADC_TESTBIT_L			0x0e648
+#define GRF_TSADC_TESTBIT_H			0x0e64c
+
+#define GRF_TSADC_TSEN_PD_ON			(0x30003 << 0)
+#define GRF_TSADC_TSEN_PD_OFF			(0x30000 << 0)
+#define GRF_SARADC_TESTBIT_ON			(0x10001 << 2)
+#define GRF_TSADC_TESTBIT_H_ON			(0x10001 << 2)
+
+/**
+ * struct tsadc_table - code to temperature conversion table
+ * @code: the value of adc channel
+ * @temp: the temperature
+ * Note:
+ * code to temperature mapping of the temperature sensor is a piece wise linear
+ * curve.Any temperature, code faling between to 2 give temperatures can be
+ * linearly interpolated.
+ * Code to Temperature mapping should be updated based on manufacturer results.
+ */
 struct tsadc_table {
 	u32 code;
 	int temp;
 };
 
-/**
- * Note:
- * Code to Temperature mapping of the Temperature sensor is a piece wise linear
- * curve.Any temperature, code faling between to 2 give temperatures can be
- * linearly interpolated.
- * Code to Temperature mapping should be updated based on sillcon results.
- */
 static const struct tsadc_table rk3228_code_table[] = {
 	{0, -40000},
 	{588, -40000},
@@ -308,40 +353,40 @@
 
 static const struct tsadc_table rk3399_code_table[] = {
 	{0, -40000},
-	{593, -40000},
-	{598, -35000},
-	{603, -30000},
-	{609, -25000},
-	{614, -20000},
-	{619, -15000},
-	{625, -10000},
-	{630, -5000},
-	{635, 0},
-	{641, 5000},
-	{646, 10000},
-	{651, 15000},
-	{657, 20000},
-	{662, 25000},
-	{667, 30000},
-	{673, 35000},
-	{678, 40000},
-	{684, 45000},
-	{689, 50000},
-	{694, 55000},
-	{700, 60000},
-	{705, 65000},
-	{711, 70000},
-	{716, 75000},
-	{722, 80000},
-	{727, 85000},
-	{733, 90000},
-	{738, 95000},
-	{743, 100000},
-	{749, 105000},
-	{754, 110000},
-	{760, 115000},
-	{765, 120000},
-	{771, 125000},
+	{402, -40000},
+	{410, -35000},
+	{419, -30000},
+	{427, -25000},
+	{436, -20000},
+	{444, -15000},
+	{453, -10000},
+	{461, -5000},
+	{470, 0},
+	{478, 5000},
+	{487, 10000},
+	{496, 15000},
+	{504, 20000},
+	{513, 25000},
+	{521, 30000},
+	{530, 35000},
+	{538, 40000},
+	{547, 45000},
+	{555, 50000},
+	{564, 55000},
+	{573, 60000},
+	{581, 65000},
+	{590, 70000},
+	{599, 75000},
+	{607, 80000},
+	{616, 85000},
+	{624, 90000},
+	{633, 95000},
+	{642, 100000},
+	{650, 105000},
+	{659, 110000},
+	{668, 115000},
+	{677, 120000},
+	{685, 125000},
 	{TSADCV3_DATA_MASK, 125000},
 };
 
@@ -405,8 +450,8 @@
 			return -EAGAIN;		/* Incorrect reading */
 
 		while (low <= high) {
-			if (code >= table.id[mid - 1].code &&
-			    code < table.id[mid].code)
+			if (code <= table.id[mid].code &&
+			    code > table.id[mid - 1].code)
 				break;
 			else if (code > table.id[mid].code)
 				low = mid + 1;
@@ -449,7 +494,7 @@
  *     If the temperature is higher than COMP_INT or COMP_SHUT for
  *     "debounce" times, TSADC controller will generate interrupt or TSHUT.
  */
-static void rk_tsadcv2_initialize(void __iomem *regs,
+static void rk_tsadcv2_initialize(struct regmap *grf, void __iomem *regs,
 				  enum tshut_polarity tshut_polarity)
 {
 	if (tshut_polarity == TSHUT_HIGH_ACTIVE)
@@ -466,6 +511,62 @@
 		       regs + TSADCV2_AUTO_PERIOD_HT);
 	writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
 		       regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
+
+	if (IS_ERR(grf)) {
+		pr_warn("%s: Missing rockchip,grf property\n", __func__);
+		return;
+	}
+}
+
+/**
+ * rk_tsadcv3_initialize - initialize TASDC Controller.
+ *
+ * (1) The tsadc control power sequence.
+ *
+ * (2) Set TSADC_V2_AUTO_PERIOD:
+ *     Configure the interleave between every two accessing of
+ *     TSADC in normal operation.
+ *
+ * (2) Set TSADCV2_AUTO_PERIOD_HT:
+ *     Configure the interleave between every two accessing of
+ *     TSADC after the temperature is higher than COM_SHUT or COM_INT.
+ *
+ * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE:
+ *     If the temperature is higher than COMP_INT or COMP_SHUT for
+ *     "debounce" times, TSADC controller will generate interrupt or TSHUT.
+ */
+static void rk_tsadcv3_initialize(struct regmap *grf, void __iomem *regs,
+				  enum tshut_polarity tshut_polarity)
+{
+	/* The tsadc control power sequence */
+	if (IS_ERR(grf)) {
+		/* Set interleave value to workround ic time sync issue */
+		writel_relaxed(TSADCV2_USER_INTER_PD_SOC, regs +
+			       TSADCV2_USER_CON);
+	} else {
+		regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_TSEN_PD_ON);
+		mdelay(10);
+		regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_TSEN_PD_OFF);
+		usleep_range(15, 100); /* The spec note says at least 15 us */
+		regmap_write(grf, GRF_SARADC_TESTBIT, GRF_SARADC_TESTBIT_ON);
+		regmap_write(grf, GRF_TSADC_TESTBIT_H, GRF_TSADC_TESTBIT_H_ON);
+		usleep_range(90, 200); /* The spec note says at least 90 us */
+	}
+
+	if (tshut_polarity == TSHUT_HIGH_ACTIVE)
+		writel_relaxed(0U | TSADCV2_AUTO_TSHUT_POLARITY_HIGH,
+			       regs + TSADCV2_AUTO_CON);
+	else
+		writel_relaxed(0U & ~TSADCV2_AUTO_TSHUT_POLARITY_HIGH,
+			       regs + TSADCV2_AUTO_CON);
+
+	writel_relaxed(TSADCV2_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD);
+	writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT,
+		       regs + TSADCV2_HIGHT_INT_DEBOUNCE);
+	writel_relaxed(TSADCV2_AUTO_PERIOD_HT_TIME,
+		       regs + TSADCV2_AUTO_PERIOD_HT);
+	writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
+		       regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
 }
 
 static void rk_tsadcv2_irq_ack(void __iomem *regs)
@@ -498,10 +599,11 @@
 }
 
 /**
- * @rk_tsadcv3_control:
- * TSADC controller works at auto mode, and some SoCs need set the tsadc_q_sel
- * bit on TSADCV2_AUTO_CON[1]. The (1024 - tsadc_q) as output adc value if
- * setting this bit to enable.
+ * rk_tsadcv3_control - the tsadc controller is enabled or disabled.
+ *
+ * NOTE: TSADC controller works at auto mode, and some SoCs need set the
+ * tsadc_q_sel bit on TSADCV2_AUTO_CON[1]. The (1024 - tsadc_q) as output
+ * adc value if setting this bit to enable.
  */
 static void rk_tsadcv3_control(void __iomem *regs, bool enable)
 {
@@ -603,6 +705,30 @@
 	},
 };
 
+static const struct rockchip_tsadc_chip rk3366_tsadc_data = {
+	.chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
+	.chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
+	.chn_num = 2, /* two channels for tsadc */
+
+	.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
+	.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
+	.tshut_temp = 95000,
+
+	.initialize = rk_tsadcv3_initialize,
+	.irq_ack = rk_tsadcv3_irq_ack,
+	.control = rk_tsadcv3_control,
+	.get_temp = rk_tsadcv2_get_temp,
+	.set_tshut_temp = rk_tsadcv2_tshut_temp,
+	.set_tshut_mode = rk_tsadcv2_tshut_mode,
+
+	.table = {
+		.id = rk3228_code_table,
+		.length = ARRAY_SIZE(rk3228_code_table),
+		.data_mask = TSADCV3_DATA_MASK,
+		.mode = ADC_INCREMENT,
+	},
+};
+
 static const struct rockchip_tsadc_chip rk3368_tsadc_data = {
 	.chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
 	.chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
@@ -636,7 +762,7 @@
 	.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
 	.tshut_temp = 95000,
 
-	.initialize = rk_tsadcv2_initialize,
+	.initialize = rk_tsadcv3_initialize,
 	.irq_ack = rk_tsadcv3_irq_ack,
 	.control = rk_tsadcv3_control,
 	.get_temp = rk_tsadcv2_get_temp,
@@ -661,6 +787,10 @@
 		.data = (void *)&rk3288_tsadc_data,
 	},
 	{
+		.compatible = "rockchip,rk3366-tsadc",
+		.data = (void *)&rk3366_tsadc_data,
+	},
+	{
 		.compatible = "rockchip,rk3368-tsadc",
 		.data = (void *)&rk3368_tsadc_data,
 	},
@@ -768,6 +898,11 @@
 		return -EINVAL;
 	}
 
+	/* The tsadc wont to handle the error in here since some SoCs didn't
+	 * need this property.
+	 */
+	thermal->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+
 	return 0;
 }
 
@@ -786,8 +921,8 @@
 
 	sensor->thermal = thermal;
 	sensor->id = id;
-	sensor->tzd = thermal_zone_of_sensor_register(&pdev->dev, id, sensor,
-						      &rockchip_of_thermal_ops);
+	sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, id,
+					sensor, &rockchip_of_thermal_ops);
 	if (IS_ERR(sensor->tzd)) {
 		error = PTR_ERR(sensor->tzd);
 		dev_err(&pdev->dev, "failed to register sensor %d: %d\n",
@@ -815,7 +950,7 @@
 	const struct of_device_id *match;
 	struct resource *res;
 	int irq;
-	int i, j;
+	int i;
 	int error;
 
 	match = of_match_node(of_rockchip_thermal_match, np);
@@ -888,7 +1023,8 @@
 		goto err_disable_pclk;
 	}
 
-	thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
+	thermal->chip->initialize(thermal->grf, thermal->regs,
+				  thermal->tshut_polarity);
 
 	for (i = 0; i < thermal->chip->chn_num; i++) {
 		error = rockchip_thermal_register_sensor(pdev, thermal,
@@ -898,9 +1034,6 @@
 			dev_err(&pdev->dev,
 				"failed to register sensor[%d] : error = %d\n",
 				i, error);
-			for (j = 0; j < i; j++)
-				thermal_zone_of_sensor_unregister(&pdev->dev,
-						thermal->sensors[j].tzd);
 			goto err_disable_pclk;
 		}
 	}
@@ -912,7 +1045,7 @@
 	if (error) {
 		dev_err(&pdev->dev,
 			"failed to request tsadc irq: %d\n", error);
-		goto err_unregister_sensor;
+		goto err_disable_pclk;
 	}
 
 	thermal->chip->control(thermal->regs, true);
@@ -924,11 +1057,6 @@
 
 	return 0;
 
-err_unregister_sensor:
-	while (i--)
-		thermal_zone_of_sensor_unregister(&pdev->dev,
-						  thermal->sensors[i].tzd);
-
 err_disable_pclk:
 	clk_disable_unprepare(thermal->pclk);
 err_disable_clk:
@@ -946,7 +1074,6 @@
 		struct rockchip_thermal_sensor *sensor = &thermal->sensors[i];
 
 		rockchip_thermal_toggle_sensor(sensor, false);
-		thermal_zone_of_sensor_unregister(&pdev->dev, sensor->tzd);
 	}
 
 	thermal->chip->control(thermal->regs, false);
@@ -988,12 +1115,15 @@
 		return error;
 
 	error = clk_enable(thermal->pclk);
-	if (error)
+	if (error) {
+		clk_disable(thermal->clk);
 		return error;
+	}
 
 	rockchip_thermal_reset_controller(thermal->reset);
 
-	thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
+	thermal->chip->initialize(thermal->grf, thermal->regs,
+				  thermal->tshut_polarity);
 
 	for (i = 0; i < thermal->chip->chn_num; i++) {
 		int id = thermal->sensors[i].id;
diff --git a/drivers/thermal/tango_thermal.c b/drivers/thermal/tango_thermal.c
new file mode 100644
index 0000000..70e0d9f
--- /dev/null
+++ b/drivers/thermal/tango_thermal.c
@@ -0,0 +1,109 @@
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/thermal.h>
+#include <linux/platform_device.h>
+
+/*
+ * According to a data sheet draft, "this temperature sensor uses a bandgap
+ * type of circuit to compare a voltage which has a negative temperature
+ * coefficient with a voltage that is proportional to absolute temperature.
+ * A resistor bank allows 41 different temperature thresholds to be selected
+ * and the logic output will then indicate whether the actual die temperature
+ * lies above or below the selected threshold."
+ */
+
+#define TEMPSI_CMD	0
+#define TEMPSI_RES	4
+#define TEMPSI_CFG	8
+
+#define CMD_OFF		0
+#define CMD_ON		1
+#define CMD_READ	2
+
+#define IDX_MIN		15
+#define IDX_MAX		40
+
+struct tango_thermal_priv {
+	void __iomem *base;
+	int thresh_idx;
+};
+
+static bool temp_above_thresh(void __iomem *base, int thresh_idx)
+{
+	writel(CMD_READ | thresh_idx << 8, base + TEMPSI_CMD);
+	usleep_range(10, 20);
+	writel(CMD_READ | thresh_idx << 8, base + TEMPSI_CMD);
+
+	return readl(base + TEMPSI_RES);
+}
+
+static int tango_get_temp(void *arg, int *res)
+{
+	struct tango_thermal_priv *priv = arg;
+	int idx = priv->thresh_idx;
+
+	if (temp_above_thresh(priv->base, idx)) {
+		/* Search upward by incrementing thresh_idx */
+		while (idx < IDX_MAX && temp_above_thresh(priv->base, ++idx))
+			cpu_relax();
+		idx = idx - 1; /* always return lower bound */
+	} else {
+		/* Search downward by decrementing thresh_idx */
+		while (idx > IDX_MIN && !temp_above_thresh(priv->base, --idx))
+			cpu_relax();
+	}
+
+	*res = (idx * 9 / 2 - 38) * 1000; /* millidegrees Celsius */
+	priv->thresh_idx = idx;
+
+	return 0;
+}
+
+static const struct thermal_zone_of_device_ops ops = {
+	.get_temp	= tango_get_temp,
+};
+
+static int tango_thermal_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct tango_thermal_priv *priv;
+	struct thermal_zone_device *tzdev;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	priv->thresh_idx = IDX_MIN;
+	writel(0, priv->base + TEMPSI_CFG);
+	writel(CMD_ON, priv->base + TEMPSI_CMD);
+
+	tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, priv, &ops);
+	return PTR_ERR_OR_ZERO(tzdev);
+}
+
+static const struct of_device_id tango_sensor_ids[] = {
+	{
+		.compatible = "sigma,smp8758-thermal",
+	},
+	{ /* sentinel */ }
+};
+
+static struct platform_driver tango_thermal_driver = {
+	.probe	= tango_thermal_probe,
+	.driver	= {
+		.name		= "tango-thermal",
+		.of_match_table	= tango_sensor_ids,
+	},
+};
+
+module_platform_driver(tango_thermal_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sigma Designs");
+MODULE_DESCRIPTION("Tango temperature sensor");
diff --git a/drivers/thermal/tegra/Kconfig b/drivers/thermal/tegra/Kconfig
new file mode 100644
index 0000000..cec586e
--- /dev/null
+++ b/drivers/thermal/tegra/Kconfig
@@ -0,0 +1,13 @@
+menu "NVIDIA Tegra thermal drivers"
+depends on ARCH_TEGRA
+
+config TEGRA_SOCTHERM
+	tristate "Tegra SOCTHERM thermal management"
+	help
+	  Enable this option for integrated thermal management support on NVIDIA
+	  Tegra systems-on-chip. The driver supports four thermal zones
+	  (CPU, GPU, MEM, PLLX). Cooling devices can be bound to the thermal
+	  zones to manage temperatures. This option is also required for the
+	  emergency thermal reset (thermtrip) feature to function.
+
+endmenu
diff --git a/drivers/thermal/tegra/Makefile b/drivers/thermal/tegra/Makefile
new file mode 100644
index 0000000..1ce1af2c
--- /dev/null
+++ b/drivers/thermal/tegra/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_TEGRA_SOCTHERM)	+= tegra-soctherm.o
+
+tegra-soctherm-y				:= soctherm.o soctherm-fuse.o
+tegra-soctherm-$(CONFIG_ARCH_TEGRA_124_SOC)	+= tegra124-soctherm.o
+tegra-soctherm-$(CONFIG_ARCH_TEGRA_132_SOC)	+= tegra132-soctherm.o
+tegra-soctherm-$(CONFIG_ARCH_TEGRA_210_SOC)	+= tegra210-soctherm.o
diff --git a/drivers/thermal/tegra/soctherm-fuse.c b/drivers/thermal/tegra/soctherm-fuse.c
new file mode 100644
index 0000000..2996318
--- /dev/null
+++ b/drivers/thermal/tegra/soctherm-fuse.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <soc/tegra/fuse.h>
+
+#include "soctherm.h"
+
+#define NOMINAL_CALIB_FT			105
+#define NOMINAL_CALIB_CP			25
+
+#define FUSE_TSENSOR_CALIB_CP_TS_BASE_MASK	0x1fff
+#define FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK	(0x1fff << 13)
+#define FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT	13
+
+#define FUSE_TSENSOR_COMMON			0x180
+
+/*
+ * Tegra210: Layout of bits in FUSE_TSENSOR_COMMON:
+ *    3                   2                   1                   0
+ *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |       BASE_FT       |      BASE_CP      | SHFT_FT | SHIFT_CP  |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Tegra12x, etc:
+ * In chips prior to Tegra210, this fuse was incorrectly sized as 26 bits,
+ * and didn't hold SHIFT_CP in [31:26]. Therefore these missing six bits
+ * were obtained via the FUSE_SPARE_REALIGNMENT_REG register [5:0].
+ *
+ * FUSE_TSENSOR_COMMON:
+ *    3                   2                   1                   0
+ *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |-----------| SHFT_FT |       BASE_FT       |      BASE_CP      |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * FUSE_SPARE_REALIGNMENT_REG:
+ *    3                   2                   1                   0
+ *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |---------------------------------------------------| SHIFT_CP  |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+#define CALIB_COEFFICIENT 1000000LL
+
+/**
+ * div64_s64_precise() - wrapper for div64_s64()
+ * @a:  the dividend
+ * @b:  the divisor
+ *
+ * Implements division with fairly accurate rounding instead of truncation by
+ * shifting the dividend to the left by 16 so that the quotient has a
+ * much higher precision.
+ *
+ * Return: the quotient of a / b.
+ */
+static s64 div64_s64_precise(s64 a, s32 b)
+{
+	s64 r, al;
+
+	/* Scale up for increased precision division */
+	al = a << 16;
+
+	r = div64_s64(al * 2 + 1, 2 * b);
+	return r >> 16;
+}
+
+int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse,
+			    struct tsensor_shared_calib *shared)
+{
+	u32 val;
+	s32 shifted_cp, shifted_ft;
+	int err;
+
+	err = tegra_fuse_readl(FUSE_TSENSOR_COMMON, &val);
+	if (err)
+		return err;
+
+	shared->base_cp = (val & tfuse->fuse_base_cp_mask) >>
+			  tfuse->fuse_base_cp_shift;
+	shared->base_ft = (val & tfuse->fuse_base_ft_mask) >>
+			  tfuse->fuse_base_ft_shift;
+
+	shifted_ft = (val & tfuse->fuse_shift_ft_mask) >>
+		     tfuse->fuse_shift_ft_shift;
+	shifted_ft = sign_extend32(shifted_ft, 4);
+
+	if (tfuse->fuse_spare_realignment) {
+		err = tegra_fuse_readl(tfuse->fuse_spare_realignment, &val);
+		if (err)
+			return err;
+	}
+
+	shifted_cp = sign_extend32(val, 5);
+
+	shared->actual_temp_cp = 2 * NOMINAL_CALIB_CP + shifted_cp;
+	shared->actual_temp_ft = 2 * NOMINAL_CALIB_FT + shifted_ft;
+
+	return 0;
+}
+
+int tegra_calc_tsensor_calib(const struct tegra_tsensor *sensor,
+			     const struct tsensor_shared_calib *shared,
+			     u32 *calibration)
+{
+	const struct tegra_tsensor_group *sensor_group;
+	u32 val, calib;
+	s32 actual_tsensor_ft, actual_tsensor_cp;
+	s32 delta_sens, delta_temp;
+	s32 mult, div;
+	s16 therma, thermb;
+	s64 temp;
+	int err;
+
+	sensor_group = sensor->group;
+
+	err = tegra_fuse_readl(sensor->calib_fuse_offset, &val);
+	if (err)
+		return err;
+
+	actual_tsensor_cp = (shared->base_cp * 64) + sign_extend32(val, 12);
+	val = (val & FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK) >>
+	      FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT;
+	actual_tsensor_ft = (shared->base_ft * 32) + sign_extend32(val, 12);
+
+	delta_sens = actual_tsensor_ft - actual_tsensor_cp;
+	delta_temp = shared->actual_temp_ft - shared->actual_temp_cp;
+
+	mult = sensor_group->pdiv * sensor->config->tsample_ate;
+	div = sensor->config->tsample * sensor_group->pdiv_ate;
+
+	temp = (s64)delta_temp * (1LL << 13) * mult;
+	therma = div64_s64_precise(temp, (s64)delta_sens * div);
+
+	temp = ((s64)actual_tsensor_ft * shared->actual_temp_cp) -
+		((s64)actual_tsensor_cp * shared->actual_temp_ft);
+	thermb = div64_s64_precise(temp, delta_sens);
+
+	temp = (s64)therma * sensor->fuse_corr_alpha;
+	therma = div64_s64_precise(temp, CALIB_COEFFICIENT);
+
+	temp = (s64)thermb * sensor->fuse_corr_alpha + sensor->fuse_corr_beta;
+	thermb = div64_s64_precise(temp, CALIB_COEFFICIENT);
+
+	calib = ((u16)therma << SENSOR_CONFIG2_THERMA_SHIFT) |
+		((u16)thermb << SENSOR_CONFIG2_THERMB_SHIFT);
+
+	*calibration = calib;
+
+	return 0;
+}
+
+MODULE_AUTHOR("Wei Ni <wni@nvidia.com>");
+MODULE_DESCRIPTION("Tegra SOCTHERM fuse management");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
new file mode 100644
index 0000000..b865172
--- /dev/null
+++ b/drivers/thermal/tegra/soctherm.c
@@ -0,0 +1,685 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * Author:
+ *	Mikko Perttunen <mperttunen@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/thermal.h>
+
+#include <dt-bindings/thermal/tegra124-soctherm.h>
+
+#include "soctherm.h"
+
+#define SENSOR_CONFIG0				0
+#define SENSOR_CONFIG0_STOP			BIT(0)
+#define SENSOR_CONFIG0_CPTR_OVER		BIT(2)
+#define SENSOR_CONFIG0_OVER			BIT(3)
+#define SENSOR_CONFIG0_TCALC_OVER		BIT(4)
+#define SENSOR_CONFIG0_TALL_MASK		(0xfffff << 8)
+#define SENSOR_CONFIG0_TALL_SHIFT		8
+
+#define SENSOR_CONFIG1				4
+#define SENSOR_CONFIG1_TSAMPLE_MASK		0x3ff
+#define SENSOR_CONFIG1_TSAMPLE_SHIFT		0
+#define SENSOR_CONFIG1_TIDDQ_EN_MASK		(0x3f << 15)
+#define SENSOR_CONFIG1_TIDDQ_EN_SHIFT		15
+#define SENSOR_CONFIG1_TEN_COUNT_MASK		(0x3f << 24)
+#define SENSOR_CONFIG1_TEN_COUNT_SHIFT		24
+#define SENSOR_CONFIG1_TEMP_ENABLE		BIT(31)
+
+/*
+ * SENSOR_CONFIG2 is defined in soctherm.h
+ * because, it will be used by tegra_soctherm_fuse.c
+ */
+
+#define SENSOR_STATUS0				0xc
+#define SENSOR_STATUS0_VALID_MASK		BIT(31)
+#define SENSOR_STATUS0_CAPTURE_MASK		0xffff
+
+#define SENSOR_STATUS1				0x10
+#define SENSOR_STATUS1_TEMP_VALID_MASK		BIT(31)
+#define SENSOR_STATUS1_TEMP_MASK		0xffff
+
+#define READBACK_VALUE_MASK			0xff00
+#define READBACK_VALUE_SHIFT			8
+#define READBACK_ADD_HALF			BIT(7)
+#define READBACK_NEGATE				BIT(0)
+
+/* get val from register(r) mask bits(m) */
+#define REG_GET_MASK(r, m)	(((r) & (m)) >> (ffs(m) - 1))
+/* set val(v) to mask bits(m) of register(r) */
+#define REG_SET_MASK(r, m, v)	(((r) & ~(m)) | \
+				 (((v) & (m >> (ffs(m) - 1))) << (ffs(m) - 1)))
+
+static const int min_low_temp = -127000;
+static const int max_high_temp = 127000;
+
+struct tegra_thermctl_zone {
+	void __iomem *reg;
+	struct device *dev;
+	struct thermal_zone_device *tz;
+	const struct tegra_tsensor_group *sg;
+};
+
+struct tegra_soctherm {
+	struct reset_control *reset;
+	struct clk *clock_tsensor;
+	struct clk *clock_soctherm;
+	void __iomem *regs;
+	struct thermal_zone_device **thermctl_tzs;
+
+	u32 *calib;
+	struct tegra_soctherm_soc *soc;
+
+	struct dentry *debugfs_dir;
+};
+
+static void enable_tsensor(struct tegra_soctherm *tegra, unsigned int i)
+{
+	const struct tegra_tsensor *sensor = &tegra->soc->tsensors[i];
+	void __iomem *base = tegra->regs + sensor->base;
+	unsigned int val;
+
+	val = sensor->config->tall << SENSOR_CONFIG0_TALL_SHIFT;
+	writel(val, base + SENSOR_CONFIG0);
+
+	val  = (sensor->config->tsample - 1) << SENSOR_CONFIG1_TSAMPLE_SHIFT;
+	val |= sensor->config->tiddq_en << SENSOR_CONFIG1_TIDDQ_EN_SHIFT;
+	val |= sensor->config->ten_count << SENSOR_CONFIG1_TEN_COUNT_SHIFT;
+	val |= SENSOR_CONFIG1_TEMP_ENABLE;
+	writel(val, base + SENSOR_CONFIG1);
+
+	writel(tegra->calib[i], base + SENSOR_CONFIG2);
+}
+
+/*
+ * Translate from soctherm readback format to millicelsius.
+ * The soctherm readback format in bits is as follows:
+ *   TTTTTTTT H______N
+ * where T's contain the temperature in Celsius,
+ * H denotes an addition of 0.5 Celsius and N denotes negation
+ * of the final value.
+ */
+static int translate_temp(u16 val)
+{
+	int t;
+
+	t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000;
+	if (val & READBACK_ADD_HALF)
+		t += 500;
+	if (val & READBACK_NEGATE)
+		t *= -1;
+
+	return t;
+}
+
+static int tegra_thermctl_get_temp(void *data, int *out_temp)
+{
+	struct tegra_thermctl_zone *zone = data;
+	u32 val;
+
+	val = readl(zone->reg);
+	val = REG_GET_MASK(val, zone->sg->sensor_temp_mask);
+	*out_temp = translate_temp(val);
+
+	return 0;
+}
+
+static int
+thermtrip_program(struct device *dev, const struct tegra_tsensor_group *sg,
+		  int trip_temp);
+
+static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
+{
+	struct tegra_thermctl_zone *zone = data;
+	struct thermal_zone_device *tz = zone->tz;
+	const struct tegra_tsensor_group *sg = zone->sg;
+	struct device *dev = zone->dev;
+	enum thermal_trip_type type;
+	int ret;
+
+	if (!tz)
+		return -EINVAL;
+
+	ret = tz->ops->get_trip_type(tz, trip, &type);
+	if (ret)
+		return ret;
+
+	if (type != THERMAL_TRIP_CRITICAL)
+		return 0;
+
+	return thermtrip_program(dev, sg, temp);
+}
+
+static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
+	.get_temp = tegra_thermctl_get_temp,
+	.set_trip_temp = tegra_thermctl_set_trip_temp,
+};
+
+/**
+ * enforce_temp_range() - check and enforce temperature range [min, max]
+ * @trip_temp: the trip temperature to check
+ *
+ * Checks and enforces the permitted temperature range that SOC_THERM
+ * HW can support This is
+ * done while taking care of precision.
+ *
+ * Return: The precision adjusted capped temperature in millicelsius.
+ */
+static int enforce_temp_range(struct device *dev, int trip_temp)
+{
+	int temp;
+
+	temp = clamp_val(trip_temp, min_low_temp, max_high_temp);
+	if (temp != trip_temp)
+		dev_info(dev, "soctherm: trip temperature %d forced to %d\n",
+			 trip_temp, temp);
+	return temp;
+}
+
+/**
+ * thermtrip_program() - Configures the hardware to shut down the
+ * system if a given sensor group reaches a given temperature
+ * @dev: ptr to the struct device for the SOC_THERM IP block
+ * @sg: pointer to the sensor group to set the thermtrip temperature for
+ * @trip_temp: the temperature in millicelsius to trigger the thermal trip at
+ *
+ * Sets the thermal trip threshold of the given sensor group to be the
+ * @trip_temp.  If this threshold is crossed, the hardware will shut
+ * down.
+ *
+ * Note that, although @trip_temp is specified in millicelsius, the
+ * hardware is programmed in degrees Celsius.
+ *
+ * Return: 0 upon success, or %-EINVAL upon failure.
+ */
+static int thermtrip_program(struct device *dev,
+			     const struct tegra_tsensor_group *sg,
+			     int trip_temp)
+{
+	struct tegra_soctherm *ts = dev_get_drvdata(dev);
+	int temp;
+	u32 r;
+
+	if (!sg || !sg->thermtrip_threshold_mask)
+		return -EINVAL;
+
+	temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain;
+
+	r = readl(ts->regs + THERMCTL_THERMTRIP_CTL);
+	r = REG_SET_MASK(r, sg->thermtrip_threshold_mask, temp);
+	r = REG_SET_MASK(r, sg->thermtrip_enable_mask, 1);
+	r = REG_SET_MASK(r, sg->thermtrip_any_en_mask, 0);
+	writel(r, ts->regs + THERMCTL_THERMTRIP_CTL);
+
+	return 0;
+}
+
+/**
+ * tegra_soctherm_set_hwtrips() - set HW trip point from DT data
+ * @dev: struct device * of the SOC_THERM instance
+ *
+ * Configure the SOC_THERM HW trip points, setting "THERMTRIP"
+ * trip points , using "critical" type trip_temp from thermal
+ * zone.
+ * After they have been configured, THERMTRIP will take action
+ * when the configured SoC thermal sensor group reaches a
+ * certain temperature.
+ *
+ * Return: 0 upon success, or a negative error code on failure.
+ * "Success" does not mean that trips was enabled; it could also
+ * mean that no node was found in DT.
+ * THERMTRIP has been enabled successfully when a message similar to
+ * this one appears on the serial console:
+ * "thermtrip: will shut down when sensor group XXX reaches YYYYYY mC"
+ */
+static int tegra_soctherm_set_hwtrips(struct device *dev,
+				      const struct tegra_tsensor_group *sg,
+				      struct thermal_zone_device *tz)
+{
+	int temperature;
+	int ret;
+
+	ret = tz->ops->get_crit_temp(tz, &temperature);
+	if (ret) {
+		dev_warn(dev, "thermtrip: %s: missing critical temperature\n",
+			 sg->name);
+		return ret;
+	}
+
+	ret = thermtrip_program(dev, sg, temperature);
+	if (ret) {
+		dev_err(dev, "thermtrip: %s: error during enable\n",
+			sg->name);
+		return ret;
+	}
+
+	dev_info(dev,
+		 "thermtrip: will shut down when %s reaches %d mC\n",
+		 sg->name, temperature);
+
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int regs_show(struct seq_file *s, void *data)
+{
+	struct platform_device *pdev = s->private;
+	struct tegra_soctherm *ts = platform_get_drvdata(pdev);
+	const struct tegra_tsensor *tsensors = ts->soc->tsensors;
+	const struct tegra_tsensor_group **ttgs = ts->soc->ttgs;
+	u32 r, state;
+	int i;
+
+	seq_puts(s, "-----TSENSE (convert HW)-----\n");
+
+	for (i = 0; i < ts->soc->num_tsensors; i++) {
+		r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG1);
+		state = REG_GET_MASK(r, SENSOR_CONFIG1_TEMP_ENABLE);
+
+		seq_printf(s, "%s: ", tsensors[i].name);
+		seq_printf(s, "En(%d) ", state);
+
+		if (!state) {
+			seq_puts(s, "\n");
+			continue;
+		}
+
+		state = REG_GET_MASK(r, SENSOR_CONFIG1_TIDDQ_EN_MASK);
+		seq_printf(s, "tiddq(%d) ", state);
+		state = REG_GET_MASK(r, SENSOR_CONFIG1_TEN_COUNT_MASK);
+		seq_printf(s, "ten_count(%d) ", state);
+		state = REG_GET_MASK(r, SENSOR_CONFIG1_TSAMPLE_MASK);
+		seq_printf(s, "tsample(%d) ", state + 1);
+
+		r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS1);
+		state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_VALID_MASK);
+		seq_printf(s, "Temp(%d/", state);
+		state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_MASK);
+		seq_printf(s, "%d) ", translate_temp(state));
+
+		r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS0);
+		state = REG_GET_MASK(r, SENSOR_STATUS0_VALID_MASK);
+		seq_printf(s, "Capture(%d/", state);
+		state = REG_GET_MASK(r, SENSOR_STATUS0_CAPTURE_MASK);
+		seq_printf(s, "%d) ", state);
+
+		r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG0);
+		state = REG_GET_MASK(r, SENSOR_CONFIG0_STOP);
+		seq_printf(s, "Stop(%d) ", state);
+		state = REG_GET_MASK(r, SENSOR_CONFIG0_TALL_MASK);
+		seq_printf(s, "Tall(%d) ", state);
+		state = REG_GET_MASK(r, SENSOR_CONFIG0_TCALC_OVER);
+		seq_printf(s, "Over(%d/", state);
+		state = REG_GET_MASK(r, SENSOR_CONFIG0_OVER);
+		seq_printf(s, "%d/", state);
+		state = REG_GET_MASK(r, SENSOR_CONFIG0_CPTR_OVER);
+		seq_printf(s, "%d) ", state);
+
+		r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG2);
+		state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMA_MASK);
+		seq_printf(s, "Therm_A/B(%d/", state);
+		state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMB_MASK);
+		seq_printf(s, "%d)\n", (s16)state);
+	}
+
+	r = readl(ts->regs + SENSOR_PDIV);
+	seq_printf(s, "PDIV: 0x%x\n", r);
+
+	r = readl(ts->regs + SENSOR_HOTSPOT_OFF);
+	seq_printf(s, "HOTSPOT: 0x%x\n", r);
+
+	seq_puts(s, "\n");
+	seq_puts(s, "-----SOC_THERM-----\n");
+
+	r = readl(ts->regs + SENSOR_TEMP1);
+	state = REG_GET_MASK(r, SENSOR_TEMP1_CPU_TEMP_MASK);
+	seq_printf(s, "Temperatures: CPU(%d) ", translate_temp(state));
+	state = REG_GET_MASK(r, SENSOR_TEMP1_GPU_TEMP_MASK);
+	seq_printf(s, " GPU(%d) ", translate_temp(state));
+	r = readl(ts->regs + SENSOR_TEMP2);
+	state = REG_GET_MASK(r, SENSOR_TEMP2_PLLX_TEMP_MASK);
+	seq_printf(s, " PLLX(%d) ", translate_temp(state));
+	state = REG_GET_MASK(r, SENSOR_TEMP2_MEM_TEMP_MASK);
+	seq_printf(s, " MEM(%d)\n", translate_temp(state));
+
+	r = readl(ts->regs + THERMCTL_THERMTRIP_CTL);
+	state = REG_GET_MASK(r, ttgs[0]->thermtrip_any_en_mask);
+	seq_printf(s, "Thermtrip Any En(%d)\n", state);
+	for (i = 0; i < ts->soc->num_ttgs; i++) {
+		state = REG_GET_MASK(r, ttgs[i]->thermtrip_enable_mask);
+		seq_printf(s, "     %s En(%d) ", ttgs[i]->name, state);
+		state = REG_GET_MASK(r, ttgs[i]->thermtrip_threshold_mask);
+		state *= ts->soc->thresh_grain;
+		seq_printf(s, "Thresh(%d)\n", state);
+	}
+
+	return 0;
+}
+
+static int regs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, regs_show, inode->i_private);
+}
+
+static const struct file_operations regs_fops = {
+	.open		= regs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void soctherm_debug_init(struct platform_device *pdev)
+{
+	struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+	struct dentry *root, *file;
+
+	root = debugfs_create_dir("soctherm", NULL);
+	if (!root) {
+		dev_err(&pdev->dev, "failed to create debugfs directory\n");
+		return;
+	}
+
+	tegra->debugfs_dir = root;
+
+	file = debugfs_create_file("reg_contents", 0644, root,
+				   pdev, &regs_fops);
+	if (!file) {
+		dev_err(&pdev->dev, "failed to create debugfs file\n");
+		debugfs_remove_recursive(tegra->debugfs_dir);
+		tegra->debugfs_dir = NULL;
+	}
+}
+#else
+static inline void soctherm_debug_init(struct platform_device *pdev) {}
+#endif
+
+static int soctherm_clk_enable(struct platform_device *pdev, bool enable)
+{
+	struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+	int err;
+
+	if (!tegra->clock_soctherm || !tegra->clock_tsensor)
+		return -EINVAL;
+
+	reset_control_assert(tegra->reset);
+
+	if (enable) {
+		err = clk_prepare_enable(tegra->clock_soctherm);
+		if (err) {
+			reset_control_deassert(tegra->reset);
+			return err;
+		}
+
+		err = clk_prepare_enable(tegra->clock_tsensor);
+		if (err) {
+			clk_disable_unprepare(tegra->clock_soctherm);
+			reset_control_deassert(tegra->reset);
+			return err;
+		}
+	} else {
+		clk_disable_unprepare(tegra->clock_tsensor);
+		clk_disable_unprepare(tegra->clock_soctherm);
+	}
+
+	reset_control_deassert(tegra->reset);
+
+	return 0;
+}
+
+static void soctherm_init(struct platform_device *pdev)
+{
+	struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+	const struct tegra_tsensor_group **ttgs = tegra->soc->ttgs;
+	int i;
+	u32 pdiv, hotspot;
+
+	/* Initialize raw sensors */
+	for (i = 0; i < tegra->soc->num_tsensors; ++i)
+		enable_tsensor(tegra, i);
+
+	/* program pdiv and hotspot offsets per THERM */
+	pdiv = readl(tegra->regs + SENSOR_PDIV);
+	hotspot = readl(tegra->regs + SENSOR_HOTSPOT_OFF);
+	for (i = 0; i < tegra->soc->num_ttgs; ++i) {
+		pdiv = REG_SET_MASK(pdiv, ttgs[i]->pdiv_mask,
+				    ttgs[i]->pdiv);
+		/* hotspot offset from PLLX, doesn't need to configure PLLX */
+		if (ttgs[i]->id == TEGRA124_SOCTHERM_SENSOR_PLLX)
+			continue;
+		hotspot =  REG_SET_MASK(hotspot,
+					ttgs[i]->pllx_hotspot_mask,
+					ttgs[i]->pllx_hotspot_diff);
+	}
+	writel(pdiv, tegra->regs + SENSOR_PDIV);
+	writel(hotspot, tegra->regs + SENSOR_HOTSPOT_OFF);
+}
+
+static const struct of_device_id tegra_soctherm_of_match[] = {
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+	{
+		.compatible = "nvidia,tegra124-soctherm",
+		.data = &tegra124_soctherm,
+	},
+#endif
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+	{
+		.compatible = "nvidia,tegra132-soctherm",
+		.data = &tegra132_soctherm,
+	},
+#endif
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+	{
+		.compatible = "nvidia,tegra210-soctherm",
+		.data = &tegra210_soctherm,
+	},
+#endif
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tegra_soctherm_of_match);
+
+static int tegra_soctherm_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *match;
+	struct tegra_soctherm *tegra;
+	struct thermal_zone_device *z;
+	struct tsensor_shared_calib shared_calib;
+	struct resource *res;
+	struct tegra_soctherm_soc *soc;
+	unsigned int i;
+	int err;
+
+	match = of_match_node(tegra_soctherm_of_match, pdev->dev.of_node);
+	if (!match)
+		return -ENODEV;
+
+	soc = (struct tegra_soctherm_soc *)match->data;
+	if (soc->num_ttgs > TEGRA124_SOCTHERM_SENSOR_NUM)
+		return -EINVAL;
+
+	tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+	if (!tegra)
+		return -ENOMEM;
+
+	dev_set_drvdata(&pdev->dev, tegra);
+
+	tegra->soc = soc;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	tegra->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(tegra->regs))
+		return PTR_ERR(tegra->regs);
+
+	tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm");
+	if (IS_ERR(tegra->reset)) {
+		dev_err(&pdev->dev, "can't get soctherm reset\n");
+		return PTR_ERR(tegra->reset);
+	}
+
+	tegra->clock_tsensor = devm_clk_get(&pdev->dev, "tsensor");
+	if (IS_ERR(tegra->clock_tsensor)) {
+		dev_err(&pdev->dev, "can't get tsensor clock\n");
+		return PTR_ERR(tegra->clock_tsensor);
+	}
+
+	tegra->clock_soctherm = devm_clk_get(&pdev->dev, "soctherm");
+	if (IS_ERR(tegra->clock_soctherm)) {
+		dev_err(&pdev->dev, "can't get soctherm clock\n");
+		return PTR_ERR(tegra->clock_soctherm);
+	}
+
+	tegra->calib = devm_kzalloc(&pdev->dev,
+				    sizeof(u32) * soc->num_tsensors,
+				    GFP_KERNEL);
+	if (!tegra->calib)
+		return -ENOMEM;
+
+	/* calculate shared calibration data */
+	err = tegra_calc_shared_calib(soc->tfuse, &shared_calib);
+	if (err)
+		return err;
+
+	/* calculate tsensor calibaration data */
+	for (i = 0; i < soc->num_tsensors; ++i) {
+		err = tegra_calc_tsensor_calib(&soc->tsensors[i],
+					       &shared_calib,
+					       &tegra->calib[i]);
+		if (err)
+			return err;
+	}
+
+	tegra->thermctl_tzs = devm_kzalloc(&pdev->dev,
+					   sizeof(*z) * soc->num_ttgs,
+					   GFP_KERNEL);
+	if (!tegra->thermctl_tzs)
+		return -ENOMEM;
+
+	err = soctherm_clk_enable(pdev, true);
+	if (err)
+		return err;
+
+	soctherm_init(pdev);
+
+	for (i = 0; i < soc->num_ttgs; ++i) {
+		struct tegra_thermctl_zone *zone =
+			devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
+		if (!zone) {
+			err = -ENOMEM;
+			goto disable_clocks;
+		}
+
+		zone->reg = tegra->regs + soc->ttgs[i]->sensor_temp_offset;
+		zone->dev = &pdev->dev;
+		zone->sg = soc->ttgs[i];
+
+		z = devm_thermal_zone_of_sensor_register(&pdev->dev,
+							 soc->ttgs[i]->id, zone,
+							 &tegra_of_thermal_ops);
+		if (IS_ERR(z)) {
+			err = PTR_ERR(z);
+			dev_err(&pdev->dev, "failed to register sensor: %d\n",
+				err);
+			goto disable_clocks;
+		}
+
+		zone->tz = z;
+		tegra->thermctl_tzs[soc->ttgs[i]->id] = z;
+
+		/* Configure hw trip points */
+		tegra_soctherm_set_hwtrips(&pdev->dev, soc->ttgs[i], z);
+	}
+
+	soctherm_debug_init(pdev);
+
+	return 0;
+
+disable_clocks:
+	soctherm_clk_enable(pdev, false);
+
+	return err;
+}
+
+static int tegra_soctherm_remove(struct platform_device *pdev)
+{
+	struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+
+	debugfs_remove_recursive(tegra->debugfs_dir);
+
+	soctherm_clk_enable(pdev, false);
+
+	return 0;
+}
+
+static int __maybe_unused soctherm_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+
+	soctherm_clk_enable(pdev, false);
+
+	return 0;
+}
+
+static int __maybe_unused soctherm_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+	struct tegra_soctherm_soc *soc = tegra->soc;
+	int err, i;
+
+	err = soctherm_clk_enable(pdev, true);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Resume failed: enable clocks failed\n");
+		return err;
+	}
+
+	soctherm_init(pdev);
+
+	for (i = 0; i < soc->num_ttgs; ++i) {
+		struct thermal_zone_device *tz;
+
+		tz = tegra->thermctl_tzs[soc->ttgs[i]->id];
+		tegra_soctherm_set_hwtrips(dev, soc->ttgs[i], tz);
+	}
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tegra_soctherm_pm, soctherm_suspend, soctherm_resume);
+
+static struct platform_driver tegra_soctherm_driver = {
+	.probe = tegra_soctherm_probe,
+	.remove = tegra_soctherm_remove,
+	.driver = {
+		.name = "tegra_soctherm",
+		.pm = &tegra_soctherm_pm,
+		.of_match_table = tegra_soctherm_of_match,
+	},
+};
+module_platform_driver(tegra_soctherm_driver);
+
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra SOCTHERM thermal management driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/tegra/soctherm.h b/drivers/thermal/tegra/soctherm.h
new file mode 100644
index 0000000..28e18ec
--- /dev/null
+++ b/drivers/thermal/tegra/soctherm.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_THERMAL_TEGRA_SOCTHERM_H
+#define __DRIVERS_THERMAL_TEGRA_SOCTHERM_H
+
+#define SENSOR_CONFIG2                          8
+#define SENSOR_CONFIG2_THERMA_MASK		(0xffff << 16)
+#define SENSOR_CONFIG2_THERMA_SHIFT		16
+#define SENSOR_CONFIG2_THERMB_MASK		0xffff
+#define SENSOR_CONFIG2_THERMB_SHIFT		0
+
+#define THERMCTL_THERMTRIP_CTL			0x80
+/* BITs are defined in device file */
+
+#define SENSOR_PDIV				0x1c0
+#define SENSOR_PDIV_CPU_MASK			(0xf << 12)
+#define SENSOR_PDIV_GPU_MASK			(0xf << 8)
+#define SENSOR_PDIV_MEM_MASK			(0xf << 4)
+#define SENSOR_PDIV_PLLX_MASK			(0xf << 0)
+
+#define SENSOR_HOTSPOT_OFF			0x1c4
+#define SENSOR_HOTSPOT_CPU_MASK			(0xff << 16)
+#define SENSOR_HOTSPOT_GPU_MASK			(0xff << 8)
+#define SENSOR_HOTSPOT_MEM_MASK			(0xff << 0)
+
+#define SENSOR_TEMP1				0x1c8
+#define SENSOR_TEMP1_CPU_TEMP_MASK		(0xffff << 16)
+#define SENSOR_TEMP1_GPU_TEMP_MASK		0xffff
+#define SENSOR_TEMP2				0x1cc
+#define SENSOR_TEMP2_MEM_TEMP_MASK		(0xffff << 16)
+#define SENSOR_TEMP2_PLLX_TEMP_MASK		0xffff
+
+/**
+ * struct tegra_tsensor_group - SOC_THERM sensor group data
+ * @name: short name of the temperature sensor group
+ * @id: numeric ID of the temperature sensor group
+ * @sensor_temp_offset: offset of the SENSOR_TEMP* register
+ * @sensor_temp_mask: bit mask for this sensor group in SENSOR_TEMP* register
+ * @pdiv: the sensor count post-divider to use during runtime
+ * @pdiv_ate: the sensor count post-divider used during automated test
+ * @pdiv_mask: register bitfield mask for the PDIV field for this sensor
+ * @pllx_hotspot_diff: hotspot offset from the PLLX sensor, must be 0 for
+    PLLX sensor group
+ * @pllx_hotspot_mask: register bitfield mask for the HOTSPOT field
+ */
+struct tegra_tsensor_group {
+	const char *name;
+	u8 id;
+	u16 sensor_temp_offset;
+	u32 sensor_temp_mask;
+	u32 pdiv, pdiv_ate, pdiv_mask;
+	u32 pllx_hotspot_diff, pllx_hotspot_mask;
+	u32 thermtrip_enable_mask;
+	u32 thermtrip_any_en_mask;
+	u32 thermtrip_threshold_mask;
+};
+
+struct tegra_tsensor_configuration {
+	u32 tall, tiddq_en, ten_count, pdiv, pdiv_ate, tsample, tsample_ate;
+};
+
+struct tegra_tsensor {
+	const char *name;
+	const u32 base;
+	const struct tegra_tsensor_configuration *config;
+	const u32 calib_fuse_offset;
+	/*
+	 * Correction values used to modify values read from
+	 * calibration fuses
+	 */
+	const s32 fuse_corr_alpha, fuse_corr_beta;
+	const struct tegra_tsensor_group *group;
+};
+
+struct tegra_soctherm_fuse {
+	u32 fuse_base_cp_mask, fuse_base_cp_shift;
+	u32 fuse_base_ft_mask, fuse_base_ft_shift;
+	u32 fuse_shift_ft_mask, fuse_shift_ft_shift;
+	u32 fuse_spare_realignment;
+};
+
+struct tsensor_shared_calib {
+	u32 base_cp, base_ft;
+	u32 actual_temp_cp, actual_temp_ft;
+};
+
+struct tegra_soctherm_soc {
+	const struct tegra_tsensor *tsensors;
+	const unsigned int num_tsensors;
+	const struct tegra_tsensor_group **ttgs;
+	const unsigned int num_ttgs;
+	const struct tegra_soctherm_fuse *tfuse;
+	const int thresh_grain;
+};
+
+int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse,
+			    struct tsensor_shared_calib *shared);
+int tegra_calc_tsensor_calib(const struct tegra_tsensor *sensor,
+			     const struct tsensor_shared_calib *shared,
+			     u32 *calib);
+
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+extern const struct tegra_soctherm_soc tegra124_soctherm;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+extern const struct tegra_soctherm_soc tegra132_soctherm;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+extern const struct tegra_soctherm_soc tegra210_soctherm;
+#endif
+
+#endif
+
diff --git a/drivers/thermal/tegra/tegra124-soctherm.c b/drivers/thermal/tegra/tegra124-soctherm.c
new file mode 100644
index 0000000..beb9d36
--- /dev/null
+++ b/drivers/thermal/tegra/tegra124-soctherm.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/thermal/tegra124-soctherm.h>
+
+#include "soctherm.h"
+
+#define TEGRA124_THERMTRIP_ANY_EN_MASK		(0x1 << 28)
+#define TEGRA124_THERMTRIP_MEM_EN_MASK		(0x1 << 27)
+#define TEGRA124_THERMTRIP_GPU_EN_MASK		(0x1 << 26)
+#define TEGRA124_THERMTRIP_CPU_EN_MASK		(0x1 << 25)
+#define TEGRA124_THERMTRIP_TSENSE_EN_MASK	(0x1 << 24)
+#define TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK	(0xff << 16)
+#define TEGRA124_THERMTRIP_CPU_THRESH_MASK	(0xff << 8)
+#define TEGRA124_THERMTRIP_TSENSE_THRESH_MASK	0xff
+
+#define TEGRA124_THRESH_GRAIN			1000
+
+static const struct tegra_tsensor_configuration tegra124_tsensor_config = {
+	.tall = 16300,
+	.tiddq_en = 1,
+	.ten_count = 1,
+	.tsample = 120,
+	.tsample_ate = 480,
+};
+
+static const struct tegra_tsensor_group tegra124_tsensor_group_cpu = {
+	.id = TEGRA124_SOCTHERM_SENSOR_CPU,
+	.name	= "cpu",
+	.sensor_temp_offset	= SENSOR_TEMP1,
+	.sensor_temp_mask	= SENSOR_TEMP1_CPU_TEMP_MASK,
+	.pdiv = 8,
+	.pdiv_ate = 8,
+	.pdiv_mask = SENSOR_PDIV_CPU_MASK,
+	.pllx_hotspot_diff = 10,
+	.pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK,
+	.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
+	.thermtrip_enable_mask = TEGRA124_THERMTRIP_CPU_EN_MASK,
+	.thermtrip_threshold_mask = TEGRA124_THERMTRIP_CPU_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra124_tsensor_group_gpu = {
+	.id = TEGRA124_SOCTHERM_SENSOR_GPU,
+	.name = "gpu",
+	.sensor_temp_offset = SENSOR_TEMP1,
+	.sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK,
+	.pdiv = 8,
+	.pdiv_ate = 8,
+	.pdiv_mask = SENSOR_PDIV_GPU_MASK,
+	.pllx_hotspot_diff = 5,
+	.pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK,
+	.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
+	.thermtrip_enable_mask = TEGRA124_THERMTRIP_GPU_EN_MASK,
+	.thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra124_tsensor_group_pll = {
+	.id = TEGRA124_SOCTHERM_SENSOR_PLLX,
+	.name = "pll",
+	.sensor_temp_offset = SENSOR_TEMP2,
+	.sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK,
+	.pdiv = 8,
+	.pdiv_ate = 8,
+	.pdiv_mask = SENSOR_PDIV_PLLX_MASK,
+	.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
+	.thermtrip_enable_mask = TEGRA124_THERMTRIP_TSENSE_EN_MASK,
+	.thermtrip_threshold_mask = TEGRA124_THERMTRIP_TSENSE_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra124_tsensor_group_mem = {
+	.id = TEGRA124_SOCTHERM_SENSOR_MEM,
+	.name = "mem",
+	.sensor_temp_offset = SENSOR_TEMP2,
+	.sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK,
+	.pdiv = 8,
+	.pdiv_ate = 8,
+	.pdiv_mask = SENSOR_PDIV_MEM_MASK,
+	.pllx_hotspot_diff = 0,
+	.pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK,
+	.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
+	.thermtrip_enable_mask = TEGRA124_THERMTRIP_MEM_EN_MASK,
+	.thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group *tegra124_tsensor_groups[] = {
+	&tegra124_tsensor_group_cpu,
+	&tegra124_tsensor_group_gpu,
+	&tegra124_tsensor_group_pll,
+	&tegra124_tsensor_group_mem,
+};
+
+static const struct tegra_tsensor tegra124_tsensors[] = {
+	{
+		.name = "cpu0",
+		.base = 0xc0,
+		.config = &tegra124_tsensor_config,
+		.calib_fuse_offset = 0x098,
+		.fuse_corr_alpha = 1135400,
+		.fuse_corr_beta = -6266900,
+		.group = &tegra124_tsensor_group_cpu,
+	}, {
+		.name = "cpu1",
+		.base = 0xe0,
+		.config = &tegra124_tsensor_config,
+		.calib_fuse_offset = 0x084,
+		.fuse_corr_alpha = 1122220,
+		.fuse_corr_beta = -5700700,
+		.group = &tegra124_tsensor_group_cpu,
+	}, {
+		.name = "cpu2",
+		.base = 0x100,
+		.config = &tegra124_tsensor_config,
+		.calib_fuse_offset = 0x088,
+		.fuse_corr_alpha = 1127000,
+		.fuse_corr_beta = -6768200,
+		.group = &tegra124_tsensor_group_cpu,
+	}, {
+		.name = "cpu3",
+		.base = 0x120,
+		.config = &tegra124_tsensor_config,
+		.calib_fuse_offset = 0x12c,
+		.fuse_corr_alpha = 1110900,
+		.fuse_corr_beta = -6232000,
+		.group = &tegra124_tsensor_group_cpu,
+	}, {
+		.name = "mem0",
+		.base = 0x140,
+		.config = &tegra124_tsensor_config,
+		.calib_fuse_offset = 0x158,
+		.fuse_corr_alpha = 1122300,
+		.fuse_corr_beta = -5936400,
+		.group = &tegra124_tsensor_group_mem,
+	}, {
+		.name = "mem1",
+		.base = 0x160,
+		.config = &tegra124_tsensor_config,
+		.calib_fuse_offset = 0x15c,
+		.fuse_corr_alpha = 1145700,
+		.fuse_corr_beta = -7124600,
+		.group = &tegra124_tsensor_group_mem,
+	}, {
+		.name = "gpu",
+		.base = 0x180,
+		.config = &tegra124_tsensor_config,
+		.calib_fuse_offset = 0x154,
+		.fuse_corr_alpha = 1120100,
+		.fuse_corr_beta = -6000500,
+		.group = &tegra124_tsensor_group_gpu,
+	}, {
+		.name = "pllx",
+		.base = 0x1a0,
+		.config = &tegra124_tsensor_config,
+		.calib_fuse_offset = 0x160,
+		.fuse_corr_alpha = 1106500,
+		.fuse_corr_beta = -6729300,
+		.group = &tegra124_tsensor_group_pll,
+	},
+};
+
+/*
+ * Mask/shift bits in FUSE_TSENSOR_COMMON and
+ * FUSE_TSENSOR_COMMON, which are described in
+ * tegra_soctherm_fuse.c
+ */
+static const struct tegra_soctherm_fuse tegra124_soctherm_fuse = {
+	.fuse_base_cp_mask = 0x3ff,
+	.fuse_base_cp_shift = 0,
+	.fuse_base_ft_mask = 0x7ff << 10,
+	.fuse_base_ft_shift = 10,
+	.fuse_shift_ft_mask = 0x1f << 21,
+	.fuse_shift_ft_shift = 21,
+	.fuse_spare_realignment = 0x1fc,
+};
+
+const struct tegra_soctherm_soc tegra124_soctherm = {
+	.tsensors = tegra124_tsensors,
+	.num_tsensors = ARRAY_SIZE(tegra124_tsensors),
+	.ttgs = tegra124_tsensor_groups,
+	.num_ttgs = ARRAY_SIZE(tegra124_tsensor_groups),
+	.tfuse = &tegra124_soctherm_fuse,
+	.thresh_grain = TEGRA124_THRESH_GRAIN,
+};
diff --git a/drivers/thermal/tegra/tegra132-soctherm.c b/drivers/thermal/tegra/tegra132-soctherm.c
new file mode 100644
index 0000000..e2aa84e
--- /dev/null
+++ b/drivers/thermal/tegra/tegra132-soctherm.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/thermal/tegra124-soctherm.h>
+
+#include "soctherm.h"
+
+#define TEGRA132_THERMTRIP_ANY_EN_MASK		(0x1 << 28)
+#define TEGRA132_THERMTRIP_MEM_EN_MASK		(0x1 << 27)
+#define TEGRA132_THERMTRIP_GPU_EN_MASK		(0x1 << 26)
+#define TEGRA132_THERMTRIP_CPU_EN_MASK		(0x1 << 25)
+#define TEGRA132_THERMTRIP_TSENSE_EN_MASK	(0x1 << 24)
+#define TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK	(0xff << 16)
+#define TEGRA132_THERMTRIP_CPU_THRESH_MASK	(0xff << 8)
+#define TEGRA132_THERMTRIP_TSENSE_THRESH_MASK	0xff
+
+#define TEGRA132_THRESH_GRAIN			1000
+
+static const struct tegra_tsensor_configuration tegra132_tsensor_config = {
+	.tall = 16300,
+	.tiddq_en = 1,
+	.ten_count = 1,
+	.tsample = 120,
+	.tsample_ate = 480,
+};
+
+static const struct tegra_tsensor_group tegra132_tsensor_group_cpu = {
+	.id = TEGRA124_SOCTHERM_SENSOR_CPU,
+	.name = "cpu",
+	.sensor_temp_offset = SENSOR_TEMP1,
+	.sensor_temp_mask = SENSOR_TEMP1_CPU_TEMP_MASK,
+	.pdiv = 8,
+	.pdiv_ate = 8,
+	.pdiv_mask = SENSOR_PDIV_CPU_MASK,
+	.pllx_hotspot_diff = 10,
+	.pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK,
+	.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
+	.thermtrip_enable_mask = TEGRA132_THERMTRIP_CPU_EN_MASK,
+	.thermtrip_threshold_mask = TEGRA132_THERMTRIP_CPU_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra132_tsensor_group_gpu = {
+	.id = TEGRA124_SOCTHERM_SENSOR_GPU,
+	.name = "gpu",
+	.sensor_temp_offset = SENSOR_TEMP1,
+	.sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK,
+	.pdiv = 8,
+	.pdiv_ate = 8,
+	.pdiv_mask = SENSOR_PDIV_GPU_MASK,
+	.pllx_hotspot_diff = 5,
+	.pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK,
+	.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
+	.thermtrip_enable_mask = TEGRA132_THERMTRIP_GPU_EN_MASK,
+	.thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra132_tsensor_group_pll = {
+	.id = TEGRA124_SOCTHERM_SENSOR_PLLX,
+	.name = "pll",
+	.sensor_temp_offset = SENSOR_TEMP2,
+	.sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK,
+	.pdiv = 8,
+	.pdiv_ate = 8,
+	.pdiv_mask = SENSOR_PDIV_PLLX_MASK,
+	.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
+	.thermtrip_enable_mask = TEGRA132_THERMTRIP_TSENSE_EN_MASK,
+	.thermtrip_threshold_mask = TEGRA132_THERMTRIP_TSENSE_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra132_tsensor_group_mem = {
+	.id = TEGRA124_SOCTHERM_SENSOR_MEM,
+	.name = "mem",
+	.sensor_temp_offset = SENSOR_TEMP2,
+	.sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK,
+	.pdiv = 8,
+	.pdiv_ate = 8,
+	.pdiv_mask = SENSOR_PDIV_MEM_MASK,
+	.pllx_hotspot_diff = 0,
+	.pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK,
+	.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
+	.thermtrip_enable_mask = TEGRA132_THERMTRIP_MEM_EN_MASK,
+	.thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group *tegra132_tsensor_groups[] = {
+	&tegra132_tsensor_group_cpu,
+	&tegra132_tsensor_group_gpu,
+	&tegra132_tsensor_group_pll,
+	&tegra132_tsensor_group_mem,
+};
+
+static struct tegra_tsensor tegra132_tsensors[] = {
+	{
+		.name = "cpu0",
+		.base = 0xc0,
+		.config = &tegra132_tsensor_config,
+		.calib_fuse_offset = 0x098,
+		.fuse_corr_alpha = 1126600,
+		.fuse_corr_beta = -9433500,
+		.group = &tegra132_tsensor_group_cpu,
+	}, {
+		.name = "cpu1",
+		.base = 0xe0,
+		.config = &tegra132_tsensor_config,
+		.calib_fuse_offset = 0x084,
+		.fuse_corr_alpha = 1110800,
+		.fuse_corr_beta = -7383000,
+		.group = &tegra132_tsensor_group_cpu,
+	}, {
+		.name = "cpu2",
+		.base = 0x100,
+		.config = &tegra132_tsensor_config,
+		.calib_fuse_offset = 0x088,
+		.fuse_corr_alpha = 1113800,
+		.fuse_corr_beta = -6215200,
+		.group = &tegra132_tsensor_group_cpu,
+	}, {
+		.name = "cpu3",
+		.base = 0x120,
+		.config = &tegra132_tsensor_config,
+		.calib_fuse_offset = 0x12c,
+		.fuse_corr_alpha = 1129600,
+		.fuse_corr_beta = -8196100,
+		.group = &tegra132_tsensor_group_cpu,
+	}, {
+		.name = "mem0",
+		.base = 0x140,
+		.config = &tegra132_tsensor_config,
+		.calib_fuse_offset = 0x158,
+		.fuse_corr_alpha = 1132900,
+		.fuse_corr_beta = -6755300,
+		.group = &tegra132_tsensor_group_mem,
+	}, {
+		.name = "mem1",
+		.base = 0x160,
+		.config = &tegra132_tsensor_config,
+		.calib_fuse_offset = 0x15c,
+		.fuse_corr_alpha = 1142300,
+		.fuse_corr_beta = -7374200,
+		.group = &tegra132_tsensor_group_mem,
+	}, {
+		.name = "gpu",
+		.base = 0x180,
+		.config = &tegra132_tsensor_config,
+		.calib_fuse_offset = 0x154,
+		.fuse_corr_alpha = 1125100,
+		.fuse_corr_beta = -6350400,
+		.group = &tegra132_tsensor_group_gpu,
+	}, {
+		.name = "pllx",
+		.base = 0x1a0,
+		.config = &tegra132_tsensor_config,
+		.calib_fuse_offset = 0x160,
+		.fuse_corr_alpha = 1118100,
+		.fuse_corr_beta = -8208800,
+		.group = &tegra132_tsensor_group_pll,
+	},
+};
+
+/*
+ * Mask/shift bits in FUSE_TSENSOR_COMMON and
+ * FUSE_TSENSOR_COMMON, which are described in
+ * tegra_soctherm_fuse.c
+ */
+static const struct tegra_soctherm_fuse tegra132_soctherm_fuse = {
+	.fuse_base_cp_mask = 0x3ff,
+	.fuse_base_cp_shift = 0,
+	.fuse_base_ft_mask = 0x7ff << 10,
+	.fuse_base_ft_shift = 10,
+	.fuse_shift_ft_mask = 0x1f << 21,
+	.fuse_shift_ft_shift = 21,
+	.fuse_spare_realignment = 0x1fc,
+};
+
+const struct tegra_soctherm_soc tegra132_soctherm = {
+	.tsensors = tegra132_tsensors,
+	.num_tsensors = ARRAY_SIZE(tegra132_tsensors),
+	.ttgs = tegra132_tsensor_groups,
+	.num_ttgs = ARRAY_SIZE(tegra132_tsensor_groups),
+	.tfuse = &tegra132_soctherm_fuse,
+	.thresh_grain = TEGRA132_THRESH_GRAIN,
+};
diff --git a/drivers/thermal/tegra/tegra210-soctherm.c b/drivers/thermal/tegra/tegra210-soctherm.c
new file mode 100644
index 0000000..19cc0ab
--- /dev/null
+++ b/drivers/thermal/tegra/tegra210-soctherm.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <soc/tegra/fuse.h>
+
+#include <dt-bindings/thermal/tegra124-soctherm.h>
+
+#include "soctherm.h"
+
+#define TEGRA210_THERMTRIP_ANY_EN_MASK		(0x1 << 31)
+#define TEGRA210_THERMTRIP_MEM_EN_MASK		(0x1 << 30)
+#define TEGRA210_THERMTRIP_GPU_EN_MASK		(0x1 << 29)
+#define TEGRA210_THERMTRIP_CPU_EN_MASK		(0x1 << 28)
+#define TEGRA210_THERMTRIP_TSENSE_EN_MASK	(0x1 << 27)
+#define TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK	(0x1ff << 18)
+#define TEGRA210_THERMTRIP_CPU_THRESH_MASK	(0x1ff << 9)
+#define TEGRA210_THERMTRIP_TSENSE_THRESH_MASK	0x1ff
+
+#define TEGRA210_THRESH_GRAIN			500
+
+static const struct tegra_tsensor_configuration tegra210_tsensor_config = {
+	.tall = 16300,
+	.tiddq_en = 1,
+	.ten_count = 1,
+	.tsample = 120,
+	.tsample_ate = 480,
+};
+
+static const struct tegra_tsensor_group tegra210_tsensor_group_cpu = {
+	.id = TEGRA124_SOCTHERM_SENSOR_CPU,
+	.name = "cpu",
+	.sensor_temp_offset = SENSOR_TEMP1,
+	.sensor_temp_mask = SENSOR_TEMP1_CPU_TEMP_MASK,
+	.pdiv = 8,
+	.pdiv_ate = 8,
+	.pdiv_mask = SENSOR_PDIV_CPU_MASK,
+	.pllx_hotspot_diff = 10,
+	.pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK,
+	.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
+	.thermtrip_enable_mask = TEGRA210_THERMTRIP_CPU_EN_MASK,
+	.thermtrip_threshold_mask = TEGRA210_THERMTRIP_CPU_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra210_tsensor_group_gpu = {
+	.id = TEGRA124_SOCTHERM_SENSOR_GPU,
+	.name = "gpu",
+	.sensor_temp_offset = SENSOR_TEMP1,
+	.sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK,
+	.pdiv = 8,
+	.pdiv_ate = 8,
+	.pdiv_mask = SENSOR_PDIV_GPU_MASK,
+	.pllx_hotspot_diff = 5,
+	.pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK,
+	.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
+	.thermtrip_enable_mask = TEGRA210_THERMTRIP_GPU_EN_MASK,
+	.thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra210_tsensor_group_pll = {
+	.id = TEGRA124_SOCTHERM_SENSOR_PLLX,
+	.name = "pll",
+	.sensor_temp_offset = SENSOR_TEMP2,
+	.sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK,
+	.pdiv = 8,
+	.pdiv_ate = 8,
+	.pdiv_mask = SENSOR_PDIV_PLLX_MASK,
+	.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
+	.thermtrip_enable_mask = TEGRA210_THERMTRIP_TSENSE_EN_MASK,
+	.thermtrip_threshold_mask = TEGRA210_THERMTRIP_TSENSE_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra210_tsensor_group_mem = {
+	.id = TEGRA124_SOCTHERM_SENSOR_MEM,
+	.name = "mem",
+	.sensor_temp_offset = SENSOR_TEMP2,
+	.sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK,
+	.pdiv = 8,
+	.pdiv_ate = 8,
+	.pdiv_mask = SENSOR_PDIV_MEM_MASK,
+	.pllx_hotspot_diff = 0,
+	.pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK,
+	.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
+	.thermtrip_enable_mask = TEGRA210_THERMTRIP_MEM_EN_MASK,
+	.thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group *tegra210_tsensor_groups[] = {
+	&tegra210_tsensor_group_cpu,
+	&tegra210_tsensor_group_gpu,
+	&tegra210_tsensor_group_pll,
+	&tegra210_tsensor_group_mem,
+};
+
+static const struct tegra_tsensor tegra210_tsensors[] = {
+	{
+		.name = "cpu0",
+		.base = 0xc0,
+		.config = &tegra210_tsensor_config,
+		.calib_fuse_offset = 0x098,
+		.fuse_corr_alpha = 1085000,
+		.fuse_corr_beta = 3244200,
+		.group = &tegra210_tsensor_group_cpu,
+	}, {
+		.name = "cpu1",
+		.base = 0xe0,
+		.config = &tegra210_tsensor_config,
+		.calib_fuse_offset = 0x084,
+		.fuse_corr_alpha = 1126200,
+		.fuse_corr_beta = -67500,
+		.group = &tegra210_tsensor_group_cpu,
+	}, {
+		.name = "cpu2",
+		.base = 0x100,
+		.config = &tegra210_tsensor_config,
+		.calib_fuse_offset = 0x088,
+		.fuse_corr_alpha = 1098400,
+		.fuse_corr_beta = 2251100,
+		.group = &tegra210_tsensor_group_cpu,
+	}, {
+		.name = "cpu3",
+		.base = 0x120,
+		.config = &tegra210_tsensor_config,
+		.calib_fuse_offset = 0x12c,
+		.fuse_corr_alpha = 1108000,
+		.fuse_corr_beta = 602700,
+		.group = &tegra210_tsensor_group_cpu,
+	}, {
+		.name = "mem0",
+		.base = 0x140,
+		.config = &tegra210_tsensor_config,
+		.calib_fuse_offset = 0x158,
+		.fuse_corr_alpha = 1069200,
+		.fuse_corr_beta = 3549900,
+		.group = &tegra210_tsensor_group_mem,
+	}, {
+		.name = "mem1",
+		.base = 0x160,
+		.config = &tegra210_tsensor_config,
+		.calib_fuse_offset = 0x15c,
+		.fuse_corr_alpha = 1173700,
+		.fuse_corr_beta = -6263600,
+		.group = &tegra210_tsensor_group_mem,
+	}, {
+		.name = "gpu",
+		.base = 0x180,
+		.config = &tegra210_tsensor_config,
+		.calib_fuse_offset = 0x154,
+		.fuse_corr_alpha = 1074300,
+		.fuse_corr_beta = 2734900,
+		.group = &tegra210_tsensor_group_gpu,
+	}, {
+		.name = "pllx",
+		.base = 0x1a0,
+		.config = &tegra210_tsensor_config,
+		.calib_fuse_offset = 0x160,
+		.fuse_corr_alpha = 1039700,
+		.fuse_corr_beta = 6829100,
+		.group = &tegra210_tsensor_group_pll,
+	},
+};
+
+/*
+ * Mask/shift bits in FUSE_TSENSOR_COMMON and
+ * FUSE_TSENSOR_COMMON, which are described in
+ * tegra_soctherm_fuse.c
+ */
+static const struct tegra_soctherm_fuse tegra210_soctherm_fuse = {
+	.fuse_base_cp_mask = 0x3ff << 11,
+	.fuse_base_cp_shift = 11,
+	.fuse_base_ft_mask = 0x7ff << 21,
+	.fuse_base_ft_shift = 21,
+	.fuse_shift_ft_mask = 0x1f << 6,
+	.fuse_shift_ft_shift = 6,
+	.fuse_spare_realignment = 0,
+};
+
+const struct tegra_soctherm_soc tegra210_soctherm = {
+	.tsensors = tegra210_tsensors,
+	.num_tsensors = ARRAY_SIZE(tegra210_tsensors),
+	.ttgs = tegra210_tsensor_groups,
+	.num_ttgs = ARRAY_SIZE(tegra210_tsensor_groups),
+	.tfuse = &tegra210_soctherm_fuse,
+	.thresh_grain = TEGRA210_THRESH_GRAIN,
+};
diff --git a/drivers/thermal/tegra_soctherm.c b/drivers/thermal/tegra_soctherm.c
deleted file mode 100644
index 1369752..0000000
--- a/drivers/thermal/tegra_soctherm.c
+++ /dev/null
@@ -1,476 +0,0 @@
-/*
- * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
- *
- * Author:
- *	Mikko Perttunen <mperttunen@nvidia.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/reset.h>
-#include <linux/thermal.h>
-
-#include <soc/tegra/fuse.h>
-
-#define SENSOR_CONFIG0				0
-#define SENSOR_CONFIG0_STOP			BIT(0)
-#define SENSOR_CONFIG0_TALL_SHIFT		8
-#define SENSOR_CONFIG0_TCALC_OVER		BIT(4)
-#define SENSOR_CONFIG0_OVER			BIT(3)
-#define SENSOR_CONFIG0_CPTR_OVER		BIT(2)
-
-#define SENSOR_CONFIG1				4
-#define SENSOR_CONFIG1_TSAMPLE_SHIFT		0
-#define SENSOR_CONFIG1_TIDDQ_EN_SHIFT		15
-#define SENSOR_CONFIG1_TEN_COUNT_SHIFT		24
-#define SENSOR_CONFIG1_TEMP_ENABLE		BIT(31)
-
-#define SENSOR_CONFIG2				8
-#define SENSOR_CONFIG2_THERMA_SHIFT		16
-#define SENSOR_CONFIG2_THERMB_SHIFT		0
-
-#define SENSOR_PDIV				0x1c0
-#define SENSOR_PDIV_T124			0x8888
-#define SENSOR_HOTSPOT_OFF			0x1c4
-#define SENSOR_HOTSPOT_OFF_T124			0x00060600
-#define SENSOR_TEMP1				0x1c8
-#define SENSOR_TEMP2				0x1cc
-
-#define SENSOR_TEMP_MASK			0xffff
-#define READBACK_VALUE_MASK			0xff00
-#define READBACK_VALUE_SHIFT			8
-#define READBACK_ADD_HALF			BIT(7)
-#define READBACK_NEGATE				BIT(0)
-
-#define FUSE_TSENSOR8_CALIB			0x180
-#define FUSE_SPARE_REALIGNMENT_REG_0		0x1fc
-
-#define FUSE_TSENSOR_CALIB_CP_TS_BASE_MASK	0x1fff
-#define FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK	(0x1fff << 13)
-#define FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT	13
-
-#define FUSE_TSENSOR8_CALIB_CP_TS_BASE_MASK	0x3ff
-#define FUSE_TSENSOR8_CALIB_FT_TS_BASE_MASK	(0x7ff << 10)
-#define FUSE_TSENSOR8_CALIB_FT_TS_BASE_SHIFT	10
-
-#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_CP_MASK 0x3f
-#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_MASK (0x1f << 21)
-#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_SHIFT 21
-
-#define NOMINAL_CALIB_FT_T124			105
-#define NOMINAL_CALIB_CP_T124			25
-
-struct tegra_tsensor_configuration {
-	u32 tall, tsample, tiddq_en, ten_count, pdiv, tsample_ate, pdiv_ate;
-};
-
-struct tegra_tsensor {
-	const struct tegra_tsensor_configuration *config;
-	u32 base, calib_fuse_offset;
-	/* Correction values used to modify values read from calibration fuses */
-	s32 fuse_corr_alpha, fuse_corr_beta;
-};
-
-struct tegra_thermctl_zone {
-	void __iomem *reg;
-	unsigned int shift;
-};
-
-static const struct tegra_tsensor_configuration t124_tsensor_config = {
-	.tall = 16300,
-	.tsample = 120,
-	.tiddq_en = 1,
-	.ten_count = 1,
-	.pdiv = 8,
-	.tsample_ate = 480,
-	.pdiv_ate = 8
-};
-
-static const struct tegra_tsensor t124_tsensors[] = {
-	{
-		.config = &t124_tsensor_config,
-		.base = 0xc0,
-		.calib_fuse_offset = 0x098,
-		.fuse_corr_alpha = 1135400,
-		.fuse_corr_beta = -6266900,
-	},
-	{
-		.config = &t124_tsensor_config,
-		.base = 0xe0,
-		.calib_fuse_offset = 0x084,
-		.fuse_corr_alpha = 1122220,
-		.fuse_corr_beta = -5700700,
-	},
-	{
-		.config = &t124_tsensor_config,
-		.base = 0x100,
-		.calib_fuse_offset = 0x088,
-		.fuse_corr_alpha = 1127000,
-		.fuse_corr_beta = -6768200,
-	},
-	{
-		.config = &t124_tsensor_config,
-		.base = 0x120,
-		.calib_fuse_offset = 0x12c,
-		.fuse_corr_alpha = 1110900,
-		.fuse_corr_beta = -6232000,
-	},
-	{
-		.config = &t124_tsensor_config,
-		.base = 0x140,
-		.calib_fuse_offset = 0x158,
-		.fuse_corr_alpha = 1122300,
-		.fuse_corr_beta = -5936400,
-	},
-	{
-		.config = &t124_tsensor_config,
-		.base = 0x160,
-		.calib_fuse_offset = 0x15c,
-		.fuse_corr_alpha = 1145700,
-		.fuse_corr_beta = -7124600,
-	},
-	{
-		.config = &t124_tsensor_config,
-		.base = 0x180,
-		.calib_fuse_offset = 0x154,
-		.fuse_corr_alpha = 1120100,
-		.fuse_corr_beta = -6000500,
-	},
-	{
-		.config = &t124_tsensor_config,
-		.base = 0x1a0,
-		.calib_fuse_offset = 0x160,
-		.fuse_corr_alpha = 1106500,
-		.fuse_corr_beta = -6729300,
-	},
-};
-
-struct tegra_soctherm {
-	struct reset_control *reset;
-	struct clk *clock_tsensor;
-	struct clk *clock_soctherm;
-	void __iomem *regs;
-
-	struct thermal_zone_device *thermctl_tzs[4];
-};
-
-struct tsensor_shared_calibration {
-	u32 base_cp, base_ft;
-	u32 actual_temp_cp, actual_temp_ft;
-};
-
-static int calculate_shared_calibration(struct tsensor_shared_calibration *r)
-{
-	u32 val, shifted_cp, shifted_ft;
-	int err;
-
-	err = tegra_fuse_readl(FUSE_TSENSOR8_CALIB, &val);
-	if (err)
-		return err;
-	r->base_cp = val & FUSE_TSENSOR8_CALIB_CP_TS_BASE_MASK;
-	r->base_ft = (val & FUSE_TSENSOR8_CALIB_FT_TS_BASE_MASK)
-		>> FUSE_TSENSOR8_CALIB_FT_TS_BASE_SHIFT;
-	val = ((val & FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_MASK)
-		>> FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_SHIFT);
-	shifted_ft = sign_extend32(val, 4);
-
-	err = tegra_fuse_readl(FUSE_SPARE_REALIGNMENT_REG_0, &val);
-	if (err)
-		return err;
-	shifted_cp = sign_extend32(val, 5);
-
-	r->actual_temp_cp = 2 * NOMINAL_CALIB_CP_T124 + shifted_cp;
-	r->actual_temp_ft = 2 * NOMINAL_CALIB_FT_T124 + shifted_ft;
-
-	return 0;
-}
-
-static s64 div64_s64_precise(s64 a, s64 b)
-{
-	s64 r, al;
-
-	/* Scale up for increased precision division */
-	al = a << 16;
-
-	r = div64_s64(al * 2 + 1, 2 * b);
-	return r >> 16;
-}
-
-static int
-calculate_tsensor_calibration(const struct tegra_tsensor *sensor,
-			      const struct tsensor_shared_calibration *shared,
-			      u32 *calib)
-{
-	u32 val;
-	s32 actual_tsensor_ft, actual_tsensor_cp, delta_sens, delta_temp,
-	    mult, div;
-	s16 therma, thermb;
-	s64 tmp;
-	int err;
-
-	err = tegra_fuse_readl(sensor->calib_fuse_offset, &val);
-	if (err)
-		return err;
-
-	actual_tsensor_cp = (shared->base_cp * 64) + sign_extend32(val, 12);
-	val = (val & FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK)
-		>> FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT;
-	actual_tsensor_ft = (shared->base_ft * 32) + sign_extend32(val, 12);
-
-	delta_sens = actual_tsensor_ft - actual_tsensor_cp;
-	delta_temp = shared->actual_temp_ft - shared->actual_temp_cp;
-
-	mult = sensor->config->pdiv * sensor->config->tsample_ate;
-	div = sensor->config->tsample * sensor->config->pdiv_ate;
-
-	therma = div64_s64_precise((s64) delta_temp * (1LL << 13) * mult,
-				   (s64) delta_sens * div);
-
-	tmp = (s64)actual_tsensor_ft * shared->actual_temp_cp -
-	      (s64)actual_tsensor_cp * shared->actual_temp_ft;
-	thermb = div64_s64_precise(tmp, (s64)delta_sens);
-
-	therma = div64_s64_precise((s64)therma * sensor->fuse_corr_alpha,
-				   (s64)1000000LL);
-	thermb = div64_s64_precise((s64)thermb * sensor->fuse_corr_alpha +
-				   sensor->fuse_corr_beta, (s64)1000000LL);
-
-	*calib = ((u16)therma << SENSOR_CONFIG2_THERMA_SHIFT) |
-		 ((u16)thermb << SENSOR_CONFIG2_THERMB_SHIFT);
-
-	return 0;
-}
-
-static int enable_tsensor(struct tegra_soctherm *tegra,
-			  const struct tegra_tsensor *sensor,
-			  const struct tsensor_shared_calibration *shared)
-{
-	void __iomem *base = tegra->regs + sensor->base;
-	unsigned int val;
-	u32 calib;
-	int err;
-
-	err = calculate_tsensor_calibration(sensor, shared, &calib);
-	if (err)
-		return err;
-
-	val = sensor->config->tall << SENSOR_CONFIG0_TALL_SHIFT;
-	writel(val, base + SENSOR_CONFIG0);
-
-	val  = (sensor->config->tsample - 1) << SENSOR_CONFIG1_TSAMPLE_SHIFT;
-	val |= sensor->config->tiddq_en << SENSOR_CONFIG1_TIDDQ_EN_SHIFT;
-	val |= sensor->config->ten_count << SENSOR_CONFIG1_TEN_COUNT_SHIFT;
-	val |= SENSOR_CONFIG1_TEMP_ENABLE;
-	writel(val, base + SENSOR_CONFIG1);
-
-	writel(calib, base + SENSOR_CONFIG2);
-
-	return 0;
-}
-
-/*
- * Translate from soctherm readback format to millicelsius.
- * The soctherm readback format in bits is as follows:
- *   TTTTTTTT H______N
- * where T's contain the temperature in Celsius,
- * H denotes an addition of 0.5 Celsius and N denotes negation
- * of the final value.
- */
-static int translate_temp(u16 val)
-{
-	long t;
-
-	t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000;
-	if (val & READBACK_ADD_HALF)
-		t += 500;
-	if (val & READBACK_NEGATE)
-		t *= -1;
-
-	return t;
-}
-
-static int tegra_thermctl_get_temp(void *data, int *out_temp)
-{
-	struct tegra_thermctl_zone *zone = data;
-	u32 val;
-
-	val = (readl(zone->reg) >> zone->shift) & SENSOR_TEMP_MASK;
-	*out_temp = translate_temp(val);
-
-	return 0;
-}
-
-static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
-	.get_temp = tegra_thermctl_get_temp,
-};
-
-static const struct of_device_id tegra_soctherm_of_match[] = {
-	{ .compatible = "nvidia,tegra124-soctherm" },
-	{ },
-};
-MODULE_DEVICE_TABLE(of, tegra_soctherm_of_match);
-
-struct thermctl_zone_desc {
-	unsigned int offset;
-	unsigned int shift;
-};
-
-static const struct thermctl_zone_desc t124_thermctl_temp_zones[] = {
-	{ SENSOR_TEMP1, 16 },
-	{ SENSOR_TEMP2, 16 },
-	{ SENSOR_TEMP1, 0 },
-	{ SENSOR_TEMP2, 0 }
-};
-
-static int tegra_soctherm_probe(struct platform_device *pdev)
-{
-	struct tegra_soctherm *tegra;
-	struct thermal_zone_device *tz;
-	struct tsensor_shared_calibration shared_calib;
-	struct resource *res;
-	unsigned int i;
-	int err;
-
-	const struct tegra_tsensor *tsensors = t124_tsensors;
-
-	tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
-	if (!tegra)
-		return -ENOMEM;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	tegra->regs = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(tegra->regs))
-		return PTR_ERR(tegra->regs);
-
-	tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm");
-	if (IS_ERR(tegra->reset)) {
-		dev_err(&pdev->dev, "can't get soctherm reset\n");
-		return PTR_ERR(tegra->reset);
-	}
-
-	tegra->clock_tsensor = devm_clk_get(&pdev->dev, "tsensor");
-	if (IS_ERR(tegra->clock_tsensor)) {
-		dev_err(&pdev->dev, "can't get tsensor clock\n");
-		return PTR_ERR(tegra->clock_tsensor);
-	}
-
-	tegra->clock_soctherm = devm_clk_get(&pdev->dev, "soctherm");
-	if (IS_ERR(tegra->clock_soctherm)) {
-		dev_err(&pdev->dev, "can't get soctherm clock\n");
-		return PTR_ERR(tegra->clock_soctherm);
-	}
-
-	reset_control_assert(tegra->reset);
-
-	err = clk_prepare_enable(tegra->clock_soctherm);
-	if (err)
-		return err;
-
-	err = clk_prepare_enable(tegra->clock_tsensor);
-	if (err) {
-		clk_disable_unprepare(tegra->clock_soctherm);
-		return err;
-	}
-
-	reset_control_deassert(tegra->reset);
-
-	/* Initialize raw sensors */
-
-	err = calculate_shared_calibration(&shared_calib);
-	if (err)
-		goto disable_clocks;
-
-	for (i = 0; i < ARRAY_SIZE(t124_tsensors); ++i) {
-		err = enable_tsensor(tegra, tsensors + i, &shared_calib);
-		if (err)
-			goto disable_clocks;
-	}
-
-	writel(SENSOR_PDIV_T124, tegra->regs + SENSOR_PDIV);
-	writel(SENSOR_HOTSPOT_OFF_T124, tegra->regs + SENSOR_HOTSPOT_OFF);
-
-	/* Initialize thermctl sensors */
-
-	for (i = 0; i < ARRAY_SIZE(tegra->thermctl_tzs); ++i) {
-		struct tegra_thermctl_zone *zone =
-			devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
-		if (!zone) {
-			err = -ENOMEM;
-			goto unregister_tzs;
-		}
-
-		zone->reg = tegra->regs + t124_thermctl_temp_zones[i].offset;
-		zone->shift = t124_thermctl_temp_zones[i].shift;
-
-		tz = thermal_zone_of_sensor_register(&pdev->dev, i, zone,
-						     &tegra_of_thermal_ops);
-		if (IS_ERR(tz)) {
-			err = PTR_ERR(tz);
-			dev_err(&pdev->dev, "failed to register sensor: %d\n",
-				err);
-			goto unregister_tzs;
-		}
-
-		tegra->thermctl_tzs[i] = tz;
-	}
-
-	return 0;
-
-unregister_tzs:
-	while (i--)
-		thermal_zone_of_sensor_unregister(&pdev->dev,
-						  tegra->thermctl_tzs[i]);
-
-disable_clocks:
-	clk_disable_unprepare(tegra->clock_tsensor);
-	clk_disable_unprepare(tegra->clock_soctherm);
-
-	return err;
-}
-
-static int tegra_soctherm_remove(struct platform_device *pdev)
-{
-	struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
-	unsigned int i;
-
-	for (i = 0; i < ARRAY_SIZE(tegra->thermctl_tzs); ++i) {
-		thermal_zone_of_sensor_unregister(&pdev->dev,
-						  tegra->thermctl_tzs[i]);
-	}
-
-	clk_disable_unprepare(tegra->clock_tsensor);
-	clk_disable_unprepare(tegra->clock_soctherm);
-
-	return 0;
-}
-
-static struct platform_driver tegra_soctherm_driver = {
-	.probe = tegra_soctherm_probe,
-	.remove = tegra_soctherm_remove,
-	.driver = {
-		.name = "tegra-soctherm",
-		.of_match_table = tegra_soctherm_of_match,
-	},
-};
-module_platform_driver(tegra_soctherm_driver);
-
-MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
-MODULE_DESCRIPTION("NVIDIA Tegra SOCTHERM thermal management driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
new file mode 100644
index 0000000..73f55d6
--- /dev/null
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -0,0 +1,182 @@
+/*
+ * Generic ADC thermal driver
+ *
+ * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/iio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+struct gadc_thermal_info {
+	struct device *dev;
+	struct thermal_zone_device *tz_dev;
+	struct iio_channel *channel;
+	s32 *lookup_table;
+	int nlookup_table;
+};
+
+static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val)
+{
+	int temp, adc_hi, adc_lo;
+	int i;
+
+	for (i = 0; i < gti->nlookup_table; i++) {
+		if (val >= gti->lookup_table[2 * i + 1])
+			break;
+	}
+
+	if (i == 0) {
+		temp = gti->lookup_table[0];
+	} else if (i >= (gti->nlookup_table - 1)) {
+		temp = gti->lookup_table[2 * (gti->nlookup_table - 1)];
+	} else {
+		adc_hi = gti->lookup_table[2 * i - 1];
+		adc_lo = gti->lookup_table[2 * i + 1];
+		temp = gti->lookup_table[2 * i];
+		temp -= ((val - adc_lo) * 1000) / (adc_hi - adc_lo);
+	}
+
+	return temp;
+}
+
+static int gadc_thermal_get_temp(void *data, int *temp)
+{
+	struct gadc_thermal_info *gti = data;
+	int val;
+	int ret;
+
+	ret = iio_read_channel_processed(gti->channel, &val);
+	if (ret < 0) {
+		dev_err(gti->dev, "IIO channel read failed %d\n", ret);
+		return ret;
+	}
+	*temp = gadc_thermal_adc_to_temp(gti, val);
+
+	return 0;
+}
+
+static const struct thermal_zone_of_device_ops gadc_thermal_ops = {
+	.get_temp = gadc_thermal_get_temp,
+};
+
+static int gadc_thermal_read_linear_lookup_table(struct device *dev,
+						 struct gadc_thermal_info *gti)
+{
+	struct device_node *np = dev->of_node;
+	int ntable;
+	int ret;
+
+	ntable = of_property_count_elems_of_size(np, "temperature-lookup-table",
+						 sizeof(u32));
+	if (ntable < 0) {
+		dev_err(dev, "Lookup table is not provided\n");
+		return ntable;
+	}
+
+	if (ntable % 2) {
+		dev_err(dev, "Pair of temperature vs ADC read value missing\n");
+		return -EINVAL;
+	}
+
+	gti->lookup_table = devm_kzalloc(dev, sizeof(*gti->lookup_table) *
+					 ntable, GFP_KERNEL);
+	if (!gti->lookup_table)
+		return -ENOMEM;
+
+	ret = of_property_read_u32_array(np, "temperature-lookup-table",
+					 (u32 *)gti->lookup_table, ntable);
+	if (ret < 0) {
+		dev_err(dev, "Failed to read temperature lookup table: %d\n",
+			ret);
+		return ret;
+	}
+
+	gti->nlookup_table = ntable / 2;
+
+	return 0;
+}
+
+static int gadc_thermal_probe(struct platform_device *pdev)
+{
+	struct gadc_thermal_info *gti;
+	int ret;
+
+	if (!pdev->dev.of_node) {
+		dev_err(&pdev->dev, "Only DT based supported\n");
+		return -ENODEV;
+	}
+
+	gti = devm_kzalloc(&pdev->dev, sizeof(*gti), GFP_KERNEL);
+	if (!gti)
+		return -ENOMEM;
+
+	ret = gadc_thermal_read_linear_lookup_table(&pdev->dev, gti);
+	if (ret < 0)
+		return ret;
+
+	gti->dev = &pdev->dev;
+	platform_set_drvdata(pdev, gti);
+
+	gti->channel = iio_channel_get(&pdev->dev, "sensor-channel");
+	if (IS_ERR(gti->channel)) {
+		ret = PTR_ERR(gti->channel);
+		dev_err(&pdev->dev, "IIO channel not found: %d\n", ret);
+		return ret;
+	}
+
+	gti->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0,
+						      gti, &gadc_thermal_ops);
+	if (IS_ERR(gti->tz_dev)) {
+		ret = PTR_ERR(gti->tz_dev);
+		dev_err(&pdev->dev, "Thermal zone sensor register failed: %d\n",
+			ret);
+		goto sensor_fail;
+	}
+
+	return 0;
+
+sensor_fail:
+	iio_channel_release(gti->channel);
+
+	return ret;
+}
+
+static int gadc_thermal_remove(struct platform_device *pdev)
+{
+	struct gadc_thermal_info *gti = platform_get_drvdata(pdev);
+
+	thermal_zone_of_sensor_unregister(&pdev->dev, gti->tz_dev);
+	iio_channel_release(gti->channel);
+
+	return 0;
+}
+
+static const struct of_device_id of_adc_thermal_match[] = {
+	{ .compatible = "generic-adc-thermal", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_adc_thermal_match);
+
+static struct platform_driver gadc_thermal_driver = {
+	.driver = {
+		.name = "generic-adc-thermal",
+		.of_match_table = of_adc_thermal_match,
+	},
+	.probe = gadc_thermal_probe,
+	.remove = gadc_thermal_remove,
+};
+
+module_platform_driver(gadc_thermal_driver);
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("Generic ADC thermal driver using IIO framework with DT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index b213a12..15c0a9a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -337,7 +337,7 @@
 		return -EINVAL;
 
 	/* in case this is specified by DT */
-	data->ti_thermal = thermal_zone_of_sensor_register(bgp->dev, id,
+	data->ti_thermal = devm_thermal_zone_of_sensor_register(bgp->dev, id,
 					data, &ti_of_thermal_ops);
 	if (IS_ERR(data->ti_thermal)) {
 		/* Create thermal zone */
@@ -368,9 +368,6 @@
 	if (data && data->ti_thermal) {
 		if (data->our_zone)
 			thermal_zone_device_unregister(data->ti_thermal);
-		else
-			thermal_zone_of_sensor_unregister(bgp->dev,
-							  data->ti_thermal);
 	}
 
 	return 0;
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 7fc919f..97f0a2b 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -555,7 +555,7 @@
 {
 	unsigned int cpu = (unsigned long) hcpu;
 
-	switch (action) {
+	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_ONLINE:
 	case CPU_DOWN_FAILED:
 		get_core_online(cpu);
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 82c4d2e..9510305 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -120,17 +120,6 @@
 	  All modern Linux systems use the Unix98 ptys.  Say Y unless
 	  you're on an embedded system and want to conserve memory.
 
-config DEVPTS_MULTIPLE_INSTANCES
-	bool "Support multiple instances of devpts"
-	depends on UNIX98_PTYS
-	default n
-	---help---
-	  Enable support for multiple instances of devpts filesystem.
-	  If you want to have isolated PTY namespaces (eg: in containers),
-	  say Y here.  Otherwise, say N. If enabled, each mount of devpts
-	  filesystem with the '-o newinstance' option will create an
-	  independent PTY namespace.
-
 config LEGACY_PTYS
 	bool "Legacy (BSD) PTY support"
 	default y
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index dd4b841..f856c45 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -668,7 +668,7 @@
 	else
 		fsi = tty->link->driver_data;
 	devpts_kill_index(fsi, tty->index);
-	devpts_put_ref(fsi);
+	devpts_release(fsi);
 }
 
 static const struct tty_operations ptm_unix98_ops = {
@@ -733,10 +733,11 @@
 	if (retval)
 		return retval;
 
-	fsi = devpts_get_ref(inode, filp);
-	retval = -ENODEV;
-	if (!fsi)
+	fsi = devpts_acquire(filp);
+	if (IS_ERR(fsi)) {
+		retval = PTR_ERR(fsi);
 		goto out_free_file;
+	}
 
 	/* find a device that is not in use. */
 	mutex_lock(&devpts_mutex);
@@ -745,7 +746,7 @@
 
 	retval = index;
 	if (index < 0)
-		goto out_put_ref;
+		goto out_put_fsi;
 
 
 	mutex_lock(&tty_mutex);
@@ -789,8 +790,8 @@
 	return retval;
 out:
 	devpts_kill_index(fsi, index);
-out_put_ref:
-	devpts_put_ref(fsi);
+out_put_fsi:
+	devpts_release(fsi);
 out_free_file:
 	tty_free_file(filp);
 	return retval;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index a2aa655..1b7331e 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2360,7 +2360,7 @@
 		return ret;
 
 	ret = of_alias_get_id(np, "serial");
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		seen_dev_without_alias = true;
 		ret = index;
 	} else {
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 1897106..699447a 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -654,7 +654,7 @@
 		return ret;
 
 	ret = of_alias_get_id(np, "serial");
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		ret = index;
 	else if (ret >= ARRAY_SIZE(sprd_port) || sprd_port[ret] != NULL) {
 		dev_warn(dev, "requested serial port %d not available.\n", ret);
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 2ace029..35fe3c8 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1290,15 +1290,6 @@
 	percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 }
 
-static int usbg_shutdown_session(struct se_session *se_sess)
-{
-	return 0;
-}
-
-static void usbg_close_session(struct se_session *se_sess)
-{
-}
-
 static u32 usbg_sess_get_index(struct se_session *se_sess)
 {
 	return 0;
@@ -1735,8 +1726,6 @@
 	.tpg_check_prod_mode_write_protect = usbg_check_false,
 	.tpg_get_inst_index		= usbg_tpg_get_inst_index,
 	.release_cmd			= usbg_release_cmd,
-	.shutdown_session		= usbg_shutdown_session,
-	.close_session			= usbg_close_session,
 	.sess_get_index			= usbg_sess_get_index,
 	.sess_get_initiator_sid		= NULL,
 	.write_pending			= usbg_send_write_request,
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 712a849..188b1ff 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -113,6 +113,35 @@
 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
 static void vfio_pci_disable(struct vfio_pci_device *vdev);
 
+/*
+ * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
+ * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
+ * If a device implements the former but not the latter we would typically
+ * expect broken_intx_masking be set and require an exclusive interrupt.
+ * However since we do have control of the device's ability to assert INTx,
+ * we can instead pretend that the device does not implement INTx, virtualizing
+ * the pin register to report zero and maintaining DisINTx set on the host.
+ */
+static bool vfio_pci_nointx(struct pci_dev *pdev)
+{
+	switch (pdev->vendor) {
+	case PCI_VENDOR_ID_INTEL:
+		switch (pdev->device) {
+		/* All i40e (XL710/X710) 10/20/40GbE NICs */
+		case 0x1572:
+		case 0x1574:
+		case 0x1580 ... 0x1581:
+		case 0x1583 ... 0x1589:
+		case 0x37d0 ... 0x37d2:
+			return true;
+		default:
+			return false;
+		}
+	}
+
+	return false;
+}
+
 static int vfio_pci_enable(struct vfio_pci_device *vdev)
 {
 	struct pci_dev *pdev = vdev->pdev;
@@ -136,6 +165,21 @@
 		pr_debug("%s: Couldn't store %s saved state\n",
 			 __func__, dev_name(&pdev->dev));
 
+	if (likely(!nointxmask)) {
+		if (vfio_pci_nointx(pdev)) {
+			dev_info(&pdev->dev, "Masking broken INTx support\n");
+			vdev->nointx = true;
+			pci_intx(pdev, 0);
+		} else
+			vdev->pci_2_3 = pci_intx_mask_supported(pdev);
+	}
+
+	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+	if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
+		cmd &= ~PCI_COMMAND_INTX_DISABLE;
+		pci_write_config_word(pdev, PCI_COMMAND, cmd);
+	}
+
 	ret = vfio_config_init(vdev);
 	if (ret) {
 		kfree(vdev->pci_saved_state);
@@ -144,15 +188,6 @@
 		return ret;
 	}
 
-	if (likely(!nointxmask))
-		vdev->pci_2_3 = pci_intx_mask_supported(pdev);
-
-	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
-	if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
-		cmd &= ~PCI_COMMAND_INTX_DISABLE;
-		pci_write_config_word(pdev, PCI_COMMAND, cmd);
-	}
-
 	msix_pos = pdev->msix_cap;
 	if (msix_pos) {
 		u16 flags;
@@ -304,7 +339,7 @@
 	if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
 		u8 pin;
 		pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
-		if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && pin)
+		if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin)
 			return 1;
 
 	} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 142c533..688691d 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -408,6 +408,7 @@
 {
 	struct pci_dev *pdev = vdev->pdev;
 	u32 *rbar = vdev->rbar;
+	u16 cmd;
 	int i;
 
 	if (pdev->is_virtfn)
@@ -420,6 +421,12 @@
 		pci_user_write_config_dword(pdev, i, *rbar);
 
 	pci_user_write_config_dword(pdev, PCI_ROM_ADDRESS, *rbar);
+
+	if (vdev->nointx) {
+		pci_user_read_config_word(pdev, PCI_COMMAND, &cmd);
+		cmd |= PCI_COMMAND_INTX_DISABLE;
+		pci_user_write_config_word(pdev, PCI_COMMAND, cmd);
+	}
 }
 
 static __le32 vfio_generate_bar_flags(struct pci_dev *pdev, int bar)
@@ -515,6 +522,23 @@
 	return count;
 }
 
+/* Test whether BARs match the value we think they should contain */
+static bool vfio_need_bar_restore(struct vfio_pci_device *vdev)
+{
+	int i = 0, pos = PCI_BASE_ADDRESS_0, ret;
+	u32 bar;
+
+	for (; pos <= PCI_BASE_ADDRESS_5; i++, pos += 4) {
+		if (vdev->rbar[i]) {
+			ret = pci_user_read_config_dword(vdev->pdev, pos, &bar);
+			if (ret || vdev->rbar[i] != bar)
+				return true;
+		}
+	}
+
+	return false;
+}
+
 static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
 				   int count, struct perm_bits *perm,
 				   int offset, __le32 val)
@@ -553,7 +577,8 @@
 		 * SR-IOV devices will trigger this, but we catch them later
 		 */
 		if ((new_mem && virt_mem && !phys_mem) ||
-		    (new_io && virt_io && !phys_io))
+		    (new_io && virt_io && !phys_io) ||
+		    vfio_need_bar_restore(vdev))
 			vfio_bar_restore(vdev);
 	}
 
@@ -724,7 +749,8 @@
 		if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4)
 			return count;
 	} else {
-		if (pci_read_vpd(pdev, addr, 4, &data) != 4)
+		data = 0;
+		if (pci_read_vpd(pdev, addr, 4, &data) < 0)
 			return count;
 		*pdata = cpu_to_le32(data);
 	}
@@ -1124,9 +1150,12 @@
 			return pcibios_err_to_errno(ret);
 
 		if (PCI_X_CMD_VERSION(word)) {
-			/* Test for extended capabilities */
-			pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword);
-			vdev->extended_caps = (dword != 0);
+			if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) {
+				/* Test for extended capabilities */
+				pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE,
+						      &dword);
+				vdev->extended_caps = (dword != 0);
+			}
 			return PCI_CAP_PCIX_SIZEOF_V2;
 		} else
 			return PCI_CAP_PCIX_SIZEOF_V0;
@@ -1138,9 +1167,11 @@
 
 		return byte;
 	case PCI_CAP_ID_EXP:
-		/* Test for extended capabilities */
-		pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword);
-		vdev->extended_caps = (dword != 0);
+		if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) {
+			/* Test for extended capabilities */
+			pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword);
+			vdev->extended_caps = (dword != 0);
+		}
 
 		/* length based on version */
 		if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1)
@@ -1545,7 +1576,7 @@
 		*(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device);
 	}
 
-	if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX))
+	if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
 		vconfig[PCI_INTERRUPT_PIN] = 0;
 
 	ret = vfio_cap_init(vdev);
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index e9ea3fe..15ecfc9 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -228,9 +228,9 @@
 
 static void vfio_intx_disable(struct vfio_pci_device *vdev)
 {
-	vfio_intx_set_signal(vdev, -1);
 	vfio_virqfd_disable(&vdev->ctx[0].unmask);
 	vfio_virqfd_disable(&vdev->ctx[0].mask);
+	vfio_intx_set_signal(vdev, -1);
 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
 	vdev->num_ctx = 0;
 	kfree(vdev->ctx);
@@ -401,13 +401,13 @@
 	struct pci_dev *pdev = vdev->pdev;
 	int i;
 
-	vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
-
 	for (i = 0; i < vdev->num_ctx; i++) {
 		vfio_virqfd_disable(&vdev->ctx[i].unmask);
 		vfio_virqfd_disable(&vdev->ctx[i].mask);
 	}
 
+	vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
+
 	if (msix) {
 		pci_disable_msix(vdev->pdev);
 		kfree(vdev->msix);
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 8a7d546..016c14a 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -83,6 +83,7 @@
 	bool			bardirty;
 	bool			has_vga;
 	bool			needs_reset;
+	bool			nointx;
 	struct pci_saved_state	*pci_saved_state;
 	int			refcnt;
 	struct eventfd_ctx	*err_trigger;
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 3054e3f..80378dd 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -331,14 +331,12 @@
 static void tce_iommu_release(void *iommu_data)
 {
 	struct tce_container *container = iommu_data;
-	struct iommu_table_group *table_group;
 	struct tce_iommu_group *tcegrp;
 	long i;
 
 	while (tce_groups_attached(container)) {
 		tcegrp = list_first_entry(&container->group_list,
 				struct tce_iommu_group, next);
-		table_group = iommu_group_get_iommudata(tcegrp->grp);
 		tce_iommu_detach_group(iommu_data, tcegrp->grp);
 	}
 
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 15a6582..2ba1942 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -515,7 +515,7 @@
 			  unsigned long pfn, long npage, int prot)
 {
 	long i;
-	int ret;
+	int ret = 0;
 
 	for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
 		ret = iommu_map(domain->domain, iova,
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 0e6fd55..9d6320e 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -333,16 +333,6 @@
 	percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 }
 
-static int vhost_scsi_shutdown_session(struct se_session *se_sess)
-{
-	return 0;
-}
-
-static void vhost_scsi_close_session(struct se_session *se_sess)
-{
-	return;
-}
-
 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
 {
 	return 0;
@@ -2114,8 +2104,6 @@
 	.tpg_get_inst_index		= vhost_scsi_tpg_get_inst_index,
 	.release_cmd			= vhost_scsi_release_cmd,
 	.check_stop_free		= vhost_scsi_check_stop_free,
-	.shutdown_session		= vhost_scsi_shutdown_session,
-	.close_session			= vhost_scsi_close_session,
 	.sess_get_index			= vhost_scsi_sess_get_index,
 	.sess_get_initiator_sid		= NULL,
 	.write_pending			= vhost_scsi_write_pending,
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index 35fe482..60d6c2a 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -162,7 +162,7 @@
 
 static void lm3630a_pwm_ctrl(struct lm3630a_chip *pchip, int br, int br_max)
 {
-	unsigned int period = pwm_get_period(pchip->pwmd);
+	unsigned int period = pchip->pdata->pwm_period;
 	unsigned int duty = br * period / br_max;
 
 	pwm_config(pchip->pwmd, duty, period);
@@ -424,8 +424,13 @@
 			dev_err(&client->dev, "fail : get pwm device\n");
 			return PTR_ERR(pchip->pwmd);
 		}
+
+		/*
+		 * FIXME: pwm_apply_args() should be removed when switching to
+		 * the atomic PWM API.
+		 */
+		pwm_apply_args(pchip->pwmd);
 	}
-	pchip->pwmd->period = pdata->pwm_period;
 
 	/* interrupt enable  : irq 0 is not allowed */
 	pchip->irq = client->irq;
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index daca9e6..e5b14f5 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -246,6 +246,12 @@
 			return;
 
 		lp->pwm = pwm;
+
+		/*
+		 * FIXME: pwm_apply_args() should be removed when switching to
+		 * the atomic PWM API.
+		 */
+		pwm_apply_args(pwm);
 	}
 
 	pwm_config(lp->pwm, duty, period);
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index 5d583d7..cf869ec 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -145,6 +145,12 @@
 		}
 
 		bl->pwm = pwm;
+
+		/*
+		 * FIXME: pwm_apply_args() should be removed when switching to
+		 * the atomic PWM API.
+		 */
+		pwm_apply_args(pwm);
 	}
 
 	pwm_config(bl->pwm, duty, period);
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 64f9e1b..b2b366b 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -201,6 +201,7 @@
 	struct device_node *node = pdev->dev.of_node;
 	struct pwm_bl_data *pb;
 	int initial_blank = FB_BLANK_UNBLANK;
+	struct pwm_args pargs;
 	int ret;
 
 	if (!data) {
@@ -307,16 +308,21 @@
 	dev_dbg(&pdev->dev, "got pwm for backlight\n");
 
 	/*
+	 * FIXME: pwm_apply_args() should be removed when switching to
+	 * the atomic PWM API.
+	 */
+	pwm_apply_args(pb->pwm);
+
+	/*
 	 * The DT case will set the pwm_period_ns field to 0 and store the
 	 * period, parsed from the DT, in the PWM device. For the non-DT case,
 	 * set the period from platform data if it has not already been set
 	 * via the PWM lookup table.
 	 */
-	pb->period = pwm_get_period(pb->pwm);
-	if (!pb->period && (data->pwm_period_ns > 0)) {
+	pwm_get_args(pb->pwm, &pargs);
+	pb->period = pargs.period;
+	if (!pb->period && (data->pwm_period_ns > 0))
 		pb->period = data->pwm_period_ns;
-		pwm_set_period(pb->pwm, data->pwm_period_ns);
-	}
 
 	pb->lth_brightness = data->lth_brightness * (pb->period / pb->scale);
 
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 57721c7..74b5bca 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -164,7 +164,7 @@
 	.set_page_dirty = fb_deferred_io_set_page_dirty,
 };
 
-static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
+int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
 	vma->vm_ops = &fb_deferred_io_vm_ops;
 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
@@ -173,6 +173,7 @@
 	vma->vm_private_data = info;
 	return 0;
 }
+EXPORT_SYMBOL(fb_deferred_io_mmap);
 
 /* workqueue callback */
 static void fb_deferred_io_work(struct work_struct *work)
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index d8d583d..c229b1a 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -713,7 +713,7 @@
 
 	if (par->lcdc_clk_rate != lcdc_clk_rate) {
 		ret = clk_set_rate(par->lcdc_clk, lcdc_clk_rate);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret) {
 			dev_err(par->dev,
 				"unable to set clock rate at %u\n",
 				lcdc_clk_rate);
@@ -784,7 +784,7 @@
 	int ret = 0;
 
 	ret = da8xx_fb_calc_config_clk_divider(par, panel);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret) {
 		dev_err(par->dev, "unable to configure clock\n");
 		return ret;
 	}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
index 8ea531d..bbfe7e2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
@@ -51,8 +51,8 @@
 {
 	void __iomem *base = core->base;
 	const unsigned long long iclk = 266000000;	/* DSS L3 ICLK */
-	const unsigned ss_scl_high = 4000;		/* ns */
-	const unsigned ss_scl_low = 4700;		/* ns */
+	const unsigned ss_scl_high = 4600;		/* ns */
+	const unsigned ss_scl_low = 5400;		/* ns */
 	const unsigned fs_scl_high = 600;		/* ns */
 	const unsigned fs_scl_low = 1300;		/* ns */
 	const unsigned sda_hold = 1000;			/* ns */
@@ -442,7 +442,7 @@
 
 	c = (ptr[1] >> 6) & 0x3;
 	m = (ptr[1] >> 4) & 0x3;
-	r = (ptr[1] >> 0) & 0x3;
+	r = (ptr[1] >> 0) & 0xf;
 
 	itc = (ptr[2] >> 7) & 0x1;
 	ec = (ptr[2] >> 4) & 0x7;
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 21dafe5..a9c45c8 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -286,6 +286,7 @@
 {
 	int ret;
 	u32 precharge, dclk, com_invdir, compins;
+	struct pwm_args pargs;
 
 	if (par->device_info->need_pwm) {
 		par->pwm = pwm_get(&par->client->dev, NULL);
@@ -294,7 +295,15 @@
 			return PTR_ERR(par->pwm);
 		}
 
-		par->pwm_period = pwm_get_period(par->pwm);
+		/*
+		 * FIXME: pwm_apply_args() should be removed when switching to
+		 * the atomic PWM API.
+		 */
+		pwm_apply_args(par->pwm);
+
+		pwm_get_args(par->pwm, &pargs);
+
+		par->pwm_period = pargs.period;
 		/* Enable the PWM */
 		pwm_config(par->pwm, par->pwm_period / 2, par->pwm_period);
 		pwm_enable(par->pwm);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 7b6d74f..476c0e3 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -75,7 +75,7 @@
 
 	/* The array of pfns we tell the Host about. */
 	unsigned int num_pfns;
-	u32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
+	__virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
 
 	/* Memory statistics */
 	struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
@@ -127,14 +127,16 @@
 
 }
 
-static void set_page_pfns(u32 pfns[], struct page *page)
+static void set_page_pfns(struct virtio_balloon *vb,
+			  __virtio32 pfns[], struct page *page)
 {
 	unsigned int i;
 
 	/* Set balloon pfns pointing at this page.
 	 * Note that the first pfn points at start of the page. */
 	for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
-		pfns[i] = page_to_balloon_pfn(page) + i;
+		pfns[i] = cpu_to_virtio32(vb->vdev,
+					  page_to_balloon_pfn(page) + i);
 }
 
 static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
@@ -158,7 +160,7 @@
 			msleep(200);
 			break;
 		}
-		set_page_pfns(vb->pfns + vb->num_pfns, page);
+		set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
 		vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
 		if (!virtio_has_feature(vb->vdev,
 					VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
@@ -177,10 +179,12 @@
 static void release_pages_balloon(struct virtio_balloon *vb)
 {
 	unsigned int i;
+	struct page *page;
 
 	/* Find pfns pointing at start of each page, get pages and free them. */
 	for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
-		struct page *page = balloon_pfn_to_page(vb->pfns[i]);
+		page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev,
+							   vb->pfns[i]));
 		if (!virtio_has_feature(vb->vdev,
 					VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
 			adjust_managed_page_count(page, 1);
@@ -203,7 +207,7 @@
 		page = balloon_page_dequeue(vb_dev_info);
 		if (!page)
 			break;
-		set_page_pfns(vb->pfns + vb->num_pfns, page);
+		set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
 		vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
 	}
 
@@ -471,13 +475,13 @@
 	__count_vm_event(BALLOON_MIGRATE);
 	spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
 	vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
-	set_page_pfns(vb->pfns, newpage);
+	set_page_pfns(vb, vb->pfns, newpage);
 	tell_host(vb, vb->inflate_vq);
 
 	/* balloon's page migration 2nd step -- deflate "page" */
 	balloon_page_delete(page);
 	vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
-	set_page_pfns(vb->pfns, page);
+	set_page_pfns(vb, vb->pfns, page);
 	tell_host(vb, vb->deflate_vq);
 
 	mutex_unlock(&vb->balloon_lock);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 5b45e27..b54f26c 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -661,6 +661,14 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called atlas7_wdt.
 
+config RENESAS_WDT
+	tristate "Renesas WDT Watchdog"
+	depends on ARCH_RENESAS || COMPILE_TEST
+	select WATCHDOG_CORE
+	help
+	  This driver adds watchdog support for the integrated watchdogs in the
+	  Renesas R-Car and other SH-Mobile SoCs (usually named RWDT or SWDT).
+
 # AVR32 Architecture
 
 config AT32AP700X_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 9bde095..a46e7c1 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -73,6 +73,7 @@
 obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o
 obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o
 obj-$(CONFIG_ATLAS7_WATCHDOG) += atlas7_wdt.o
+obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o
 
 # AVR32 Architecture
 obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 0200768..71ee079 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -611,9 +611,7 @@
 	}
 
 	if (p->broken) {
-		init_timer(&cpwd_timer);
-		cpwd_timer.function	= cpwd_brokentimer;
-		cpwd_timer.data		= (unsigned long) p;
+		setup_timer(&cpwd_timer, cpwd_brokentimer, (unsigned long)p);
 		cpwd_timer.expires	= WD_BTIMEOUT;
 
 		pr_info("PLD defect workaround enabled for model %s\n",
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 016bd93..d4ba262 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -38,7 +38,7 @@
 
 #define SIO_F71808FG_LD_WDT	0x07	/* Watchdog timer logical device */
 #define SIO_UNLOCK_KEY		0x87	/* Key to enable Super-I/O */
-#define SIO_LOCK_KEY		0xAA	/* Key to diasble Super-I/O */
+#define SIO_LOCK_KEY		0xAA	/* Key to disable Super-I/O */
 
 #define SIO_REG_LDSEL		0x07	/* Logical device select */
 #define SIO_REG_DEVID		0x20	/* Device ID (2 bytes) */
@@ -59,6 +59,7 @@
 #define SIO_F71869A_ID		0x1007	/* Chipset ID */
 #define SIO_F71882_ID		0x0541	/* Chipset ID */
 #define SIO_F71889_ID		0x0723	/* Chipset ID */
+#define SIO_F81865_ID		0x0704	/* Chipset ID */
 
 #define F71808FG_REG_WDO_CONF		0xf0
 #define F71808FG_REG_WDT_CONF		0xf5
@@ -66,11 +67,14 @@
 
 #define F71808FG_FLAG_WDOUT_EN		7
 
-#define F71808FG_FLAG_WDTMOUT_STS	5
+#define F71808FG_FLAG_WDTMOUT_STS	6
 #define F71808FG_FLAG_WD_EN		5
 #define F71808FG_FLAG_WD_PULSE		4
 #define F71808FG_FLAG_WD_UNIT		3
 
+#define F81865_REG_WDO_CONF		0xfa
+#define F81865_FLAG_WDOUT_EN		0
+
 /* Default values */
 #define WATCHDOG_TIMEOUT	60	/* 1 minute default timeout */
 #define WATCHDOG_MAX_TIMEOUT	(60 * 255)
@@ -112,7 +116,7 @@
 MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with"
 	" given initial timeout. Zero (default) disables this feature.");
 
-enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg };
+enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg, f81865 };
 
 static const char *f71808e_names[] = {
 	"f71808fg",
@@ -121,6 +125,7 @@
 	"f71869",
 	"f71882fg",
 	"f71889fg",
+	"f81865",
 };
 
 /* Super-I/O Function prototypes */
@@ -360,6 +365,11 @@
 			superio_inb(watchdog.sioaddr, SIO_REG_MFUNCT3) & 0xcf);
 		break;
 
+	case f81865:
+		/* Set pin 70 to WDTRST# */
+		superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 5);
+		break;
+
 	default:
 		/*
 		 * 'default' label to shut up the compiler and catch
@@ -371,8 +381,13 @@
 
 	superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT);
 	superio_set_bit(watchdog.sioaddr, SIO_REG_ENABLE, 0);
-	superio_set_bit(watchdog.sioaddr, F71808FG_REG_WDO_CONF,
-			F71808FG_FLAG_WDOUT_EN);
+
+	if (watchdog.type == f81865)
+		superio_set_bit(watchdog.sioaddr, F81865_REG_WDO_CONF,
+				F81865_FLAG_WDOUT_EN);
+	else
+		superio_set_bit(watchdog.sioaddr, F71808FG_REG_WDO_CONF,
+				F71808FG_FLAG_WDOUT_EN);
 
 	superio_set_bit(watchdog.sioaddr, F71808FG_REG_WDT_CONF,
 			F71808FG_FLAG_WD_EN);
@@ -655,7 +670,7 @@
 	superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT);
 
 	wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF);
-	watchdog.caused_reboot = wdt_conf & F71808FG_FLAG_WDTMOUT_STS;
+	watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS);
 
 	superio_exit(sioaddr);
 
@@ -770,6 +785,9 @@
 		/* Confirmed (by datasheet) not to have a watchdog. */
 		err = -ENODEV;
 		goto exit;
+	case SIO_F81865_ID:
+		watchdog.type = f81865;
+		break;
 	default:
 		pr_info("Unrecognized Fintek device: %04x\n",
 			(unsigned int)devid);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 331aed8..62f346b 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -37,6 +37,8 @@
 
 #define IMX2_WDT_WCR		0x00		/* Control Register */
 #define IMX2_WDT_WCR_WT		(0xFF << 8)	/* -> Watchdog Timeout Field */
+#define IMX2_WDT_WCR_WDA	(1 << 5)	/* -> External Reset WDOG_B */
+#define IMX2_WDT_WCR_SRS	(1 << 4)	/* -> Software Reset Signal */
 #define IMX2_WDT_WCR_WRE	(1 << 3)	/* -> WDOG Reset Enable */
 #define IMX2_WDT_WCR_WDE	(1 << 2)	/* -> Watchdog Enable */
 #define IMX2_WDT_WCR_WDZST	(1 << 0)	/* -> Watchdog timer Suspend */
@@ -59,6 +61,7 @@
 	struct clk *clk;
 	struct regmap *regmap;
 	struct watchdog_device wdog;
+	bool ext_reset;
 };
 
 static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -83,6 +86,12 @@
 	struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
 	unsigned int wcr_enable = IMX2_WDT_WCR_WDE;
 
+	/* Use internal reset or external - not both */
+	if (wdev->ext_reset)
+		wcr_enable |= IMX2_WDT_WCR_SRS; /* do not assert int reset */
+	else
+		wcr_enable |= IMX2_WDT_WCR_WDA; /* do not assert ext-reset */
+
 	/* Assert SRS signal */
 	regmap_write(wdev->regmap, IMX2_WDT_WCR, wcr_enable);
 	/*
@@ -112,8 +121,12 @@
 	val |= IMX2_WDT_WCR_WDZST;
 	/* Strip the old watchdog Time-Out value */
 	val &= ~IMX2_WDT_WCR_WT;
-	/* Generate reset if WDOG times out */
-	val &= ~IMX2_WDT_WCR_WRE;
+	/* Generate internal chip-level reset if WDOG times out */
+	if (!wdev->ext_reset)
+		val &= ~IMX2_WDT_WCR_WRE;
+	/* Or if external-reset assert WDOG_B reset only on time-out */
+	else
+		val |= IMX2_WDT_WCR_WRE;
 	/* Keep Watchdog Disabled */
 	val &= ~IMX2_WDT_WCR_WDE;
 	/* Set the watchdog's Time-Out value */
@@ -230,6 +243,8 @@
 	regmap_read(wdev->regmap, IMX2_WDT_WRSR, &val);
 	wdog->bootstatus = val & IMX2_WDT_WRSR_TOUT ? WDIOF_CARDRESET : 0;
 
+	wdev->ext_reset = of_property_read_bool(pdev->dev.of_node,
+						"fsl,ext-reset-output");
 	wdog->timeout = clamp_t(unsigned, timeout, 1, IMX2_WDT_MAX_TIME);
 	if (wdog->timeout != timeout)
 		dev_warn(&pdev->dev, "Initial timeout out of range! Clamped from %u to %u\n",
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 6a7d5c3..c8d51dd 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -160,10 +160,8 @@
 
 	drvdata = devm_kzalloc(&pdev->dev, sizeof(struct jz4740_wdt_drvdata),
 			       GFP_KERNEL);
-	if (!drvdata) {
-		dev_err(&pdev->dev, "Unable to alloacate watchdog device\n");
+	if (!drvdata)
 		return -ENOMEM;
-	}
 
 	if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
 		heartbeat = DEFAULT_HEARTBEAT;
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
index 14521c8..b55981f 100644
--- a/drivers/watchdog/octeon-wdt-main.c
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -431,7 +431,7 @@
 {
 	unsigned int cpu = (unsigned long)hcpu;
 
-	switch (action) {
+	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_DOWN_PREPARE:
 		octeon_wdt_disable_interrupt(cpu);
 		break;
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index 20563cc..a043fa4 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -21,6 +21,7 @@
 
 #define WDT_RST		0x38
 #define WDT_EN		0x40
+#define WDT_STS		0x44
 #define WDT_BITE_TIME	0x5C
 
 struct qcom_wdt {
@@ -108,7 +109,8 @@
 static const struct watchdog_info qcom_wdt_info = {
 	.options	= WDIOF_KEEPALIVEPING
 			| WDIOF_MAGICCLOSE
-			| WDIOF_SETTIMEOUT,
+			| WDIOF_SETTIMEOUT
+			| WDIOF_CARDRESET,
 	.identity	= KBUILD_MODNAME,
 };
 
@@ -171,6 +173,9 @@
 	wdt->wdd.max_timeout = 0x10000000U / wdt->rate;
 	wdt->wdd.parent = &pdev->dev;
 
+	if (readl(wdt->base + WDT_STS) & 1)
+		wdt->wdd.bootstatus = WDIOF_CARDRESET;
+
 	/*
 	 * If 'timeout-sec' unspecified in devicetree, assume a 30 second
 	 * default, unless the max timeout is less than 30 seconds, then use
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
new file mode 100644
index 0000000..cf61c92
--- /dev/null
+++ b/drivers/watchdog/renesas_wdt.c
@@ -0,0 +1,213 @@
+/*
+ * Watchdog driver for Renesas WDT watchdog
+ *
+ * Copyright (C) 2015-16 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com>
+ * Copyright (C) 2015-16 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/watchdog.h>
+
+#define RWTCNT		0
+#define RWTCSRA		4
+#define RWTCSRA_WOVF	BIT(4)
+#define RWTCSRA_WRFLG	BIT(5)
+#define RWTCSRA_TME	BIT(7)
+
+#define RWDT_DEFAULT_TIMEOUT 60U
+
+static const unsigned int clk_divs[] = { 1, 4, 16, 32, 64, 128, 1024 };
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+				__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+struct rwdt_priv {
+	void __iomem *base;
+	struct watchdog_device wdev;
+	struct clk *clk;
+	unsigned int clks_per_sec;
+	u8 cks;
+};
+
+static void rwdt_write(struct rwdt_priv *priv, u32 val, unsigned int reg)
+{
+	if (reg == RWTCNT)
+		val |= 0x5a5a0000;
+	else
+		val |= 0xa5a5a500;
+
+	writel_relaxed(val, priv->base + reg);
+}
+
+static int rwdt_init_timeout(struct watchdog_device *wdev)
+{
+	struct rwdt_priv *priv = watchdog_get_drvdata(wdev);
+
+	rwdt_write(priv, 65536 - wdev->timeout * priv->clks_per_sec, RWTCNT);
+
+	return 0;
+}
+
+static int rwdt_start(struct watchdog_device *wdev)
+{
+	struct rwdt_priv *priv = watchdog_get_drvdata(wdev);
+
+	clk_prepare_enable(priv->clk);
+
+	rwdt_write(priv, priv->cks, RWTCSRA);
+	rwdt_init_timeout(wdev);
+
+	while (readb_relaxed(priv->base + RWTCSRA) & RWTCSRA_WRFLG)
+		cpu_relax();
+
+	rwdt_write(priv, priv->cks | RWTCSRA_TME, RWTCSRA);
+
+	return 0;
+}
+
+static int rwdt_stop(struct watchdog_device *wdev)
+{
+	struct rwdt_priv *priv = watchdog_get_drvdata(wdev);
+
+	rwdt_write(priv, priv->cks, RWTCSRA);
+	clk_disable_unprepare(priv->clk);
+
+	return 0;
+}
+
+static unsigned int rwdt_get_timeleft(struct watchdog_device *wdev)
+{
+	struct rwdt_priv *priv = watchdog_get_drvdata(wdev);
+	u16 val = readw_relaxed(priv->base + RWTCNT);
+
+	return DIV_ROUND_CLOSEST(65536 - val, priv->clks_per_sec);
+}
+
+static const struct watchdog_info rwdt_ident = {
+	.options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
+	.identity = "Renesas WDT Watchdog",
+};
+
+static const struct watchdog_ops rwdt_ops = {
+	.owner = THIS_MODULE,
+	.start = rwdt_start,
+	.stop = rwdt_stop,
+	.ping = rwdt_init_timeout,
+	.get_timeleft = rwdt_get_timeleft,
+};
+
+static int rwdt_probe(struct platform_device *pdev)
+{
+	struct rwdt_priv *priv;
+	struct resource *res;
+	unsigned long rate;
+	unsigned int clks_per_sec;
+	int ret, i;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	priv->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(priv->clk))
+		return PTR_ERR(priv->clk);
+
+	rate = clk_get_rate(priv->clk);
+	if (!rate)
+		return -ENOENT;
+
+	for (i = ARRAY_SIZE(clk_divs) - 1; i >= 0; i--) {
+		clks_per_sec = DIV_ROUND_UP(rate, clk_divs[i]);
+		if (clks_per_sec) {
+			priv->clks_per_sec = clks_per_sec;
+			priv->cks = i;
+			break;
+		}
+	}
+
+	if (!clks_per_sec) {
+		dev_err(&pdev->dev, "Can't find suitable clock divider\n");
+		return -ERANGE;
+	}
+
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_get_sync(&pdev->dev);
+
+	priv->wdev.info = &rwdt_ident,
+	priv->wdev.ops = &rwdt_ops,
+	priv->wdev.parent = &pdev->dev;
+	priv->wdev.min_timeout = 1;
+	priv->wdev.max_timeout = 65536 / clks_per_sec;
+	priv->wdev.timeout = min(priv->wdev.max_timeout, RWDT_DEFAULT_TIMEOUT);
+
+	platform_set_drvdata(pdev, priv);
+	watchdog_set_drvdata(&priv->wdev, priv);
+	watchdog_set_nowayout(&priv->wdev, nowayout);
+
+	/* This overrides the default timeout only if DT configuration was found */
+	ret = watchdog_init_timeout(&priv->wdev, 0, &pdev->dev);
+	if (ret)
+		dev_warn(&pdev->dev, "Specified timeout value invalid, using default\n");
+
+	ret = watchdog_register_device(&priv->wdev);
+	if (ret < 0) {
+		pm_runtime_put(&pdev->dev);
+		pm_runtime_disable(&pdev->dev);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int rwdt_remove(struct platform_device *pdev)
+{
+	struct rwdt_priv *priv = platform_get_drvdata(pdev);
+
+	watchdog_unregister_device(&priv->wdev);
+	pm_runtime_put(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+/*
+ * This driver does also fit for R-Car Gen2 (r8a779[0-4]) WDT. However, for SMP
+ * to work there, one also needs a RESET (RST) driver which does not exist yet
+ * due to HW issues. This needs to be solved before adding compatibles here.
+ */
+static const struct of_device_id rwdt_ids[] = {
+	{ .compatible = "renesas,rcar-gen3-wdt", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rwdt_ids);
+
+static struct platform_driver rwdt_driver = {
+	.driver = {
+		.name = "renesas_wdt",
+		.of_match_table = rwdt_ids,
+	},
+	.probe = rwdt_probe,
+	.remove = rwdt_remove,
+};
+module_platform_driver(rwdt_driver);
+
+MODULE_DESCRIPTION("Renesas WDT Watchdog Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wolfram Sang <wsa@sang-engineering.com>");
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index f908121..517a733 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -275,9 +275,7 @@
 		return rc;
 	}
 
-	init_timer(&wdt->timer);
-	wdt->timer.function	= sh_wdt_ping;
-	wdt->timer.data		= (unsigned long)wdt;
+	setup_timer(&wdt->timer, sh_wdt_ping, (unsigned long)wdt);
 	wdt->timer.expires	= next_ping_period(clock_division_ratio);
 
 	dev_info(&pdev->dev, "initialized.\n");
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 6467b91..028618c 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -73,6 +73,13 @@
 /*
  * Some TCO specific functions
  */
+
+static bool tco_has_sp5100_reg_layout(struct pci_dev *dev)
+{
+	return dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
+	       dev->revision < 0x40;
+}
+
 static void tco_timer_start(void)
 {
 	u32 val;
@@ -129,7 +136,7 @@
 {
 	int val;
 
-	if (sp5100_tco_pci->revision >= 0x40) {
+	if (!tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
 		/* For SB800 or later */
 		/* Set the Watchdog timer resolution to 1 sec */
 		outb(SB800_PM_WATCHDOG_CONFIG, SB800_IO_PM_INDEX_REG);
@@ -342,8 +349,7 @@
 	/*
 	 * Determine type of southbridge chipset.
 	 */
-	if (sp5100_tco_pci->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
-	    sp5100_tco_pci->revision < 0x40) {
+	if (tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
 		dev_name = SP5100_DEVNAME;
 		index_reg = SP5100_IO_PM_INDEX_REG;
 		data_reg = SP5100_IO_PM_DATA_REG;
@@ -388,8 +394,7 @@
 	 * Secondly, Find the watchdog timer MMIO address
 	 * from SBResource_MMIO register.
 	 */
-	if (sp5100_tco_pci->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
-	    sp5100_tco_pci->revision < 0x40) {
+	if (tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
 		/* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
 		pci_read_config_dword(sp5100_tco_pci,
 				      SP5100_SB_RESOURCE_MMIO_BASE, &val);
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 981a668..7c3ba58 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -104,7 +104,7 @@
  * timeout module parameter (if it is valid value) or the timeout-sec property
  * (only if it is a valid value and the timeout_parm is out of bounds).
  * If none of them are valid then we keep the old value (which should normally
- * be the default timeout value.
+ * be the default timeout value).
  *
  * A zero is returned on success and -EINVAL for failure.
  */
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index e2c5abb..3595cff 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -736,7 +736,6 @@
 		watchdog_ping(wdd);
 	}
 
-	cancel_delayed_work_sync(&wd_data->work);
 	watchdog_update_worker(wdd);
 
 	/* make sure that /dev/watchdog can be re-opened */
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 9b7a35c..030e91b 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -8,6 +8,7 @@
 CFLAGS_features.o			:= $(nostackp)
 
 CFLAGS_efi.o				+= -fshort-wchar
+LDFLAGS					+= $(call ld-option, --no-wchar-size-warning)
 
 dom0-$(CONFIG_PCI) += pci.o
 dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index cb7138c..71d49a9 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -487,7 +487,8 @@
 	if (!VALID_EVTCHN(evtchn))
 		return;
 
-	if (unlikely(irqd_is_setaffinity_pending(data))) {
+	if (unlikely(irqd_is_setaffinity_pending(data)) &&
+	    likely(!irqd_irq_disabled(data))) {
 		int masked = test_and_set_mask(evtchn);
 
 		clear_evtchn(evtchn);
@@ -1370,7 +1371,8 @@
 	if (!VALID_EVTCHN(evtchn))
 		return;
 
-	if (unlikely(irqd_is_setaffinity_pending(data))) {
+	if (unlikely(irqd_is_setaffinity_pending(data)) &&
+	    likely(!irqd_irq_disabled(data))) {
 		int masked = test_and_set_mask(evtchn);
 
 		clear_evtchn(evtchn);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index dc49538..6793957 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -748,7 +748,7 @@
 	return rc;
 }
 
-#define GNTDEV_COPY_BATCH 24
+#define GNTDEV_COPY_BATCH 16
 
 struct gntdev_copy_batch {
 	struct gnttab_copy ops[GNTDEV_COPY_BATCH];
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index ff93262..d6950e0 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1399,15 +1399,6 @@
 	percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 }
 
-static int scsiback_shutdown_session(struct se_session *se_sess)
-{
-	return 0;
-}
-
-static void scsiback_close_session(struct se_session *se_sess)
-{
-}
-
 static u32 scsiback_sess_get_index(struct se_session *se_sess)
 {
 	return 0;
@@ -1841,8 +1832,6 @@
 	.tpg_get_inst_index		= scsiback_tpg_get_inst_index,
 	.check_stop_free		= scsiback_check_stop_free,
 	.release_cmd			= scsiback_release_cmd,
-	.shutdown_session		= scsiback_shutdown_session,
-	.close_session			= scsiback_close_session,
 	.sess_get_index			= scsiback_sess_get_index,
 	.sess_get_initiator_sid		= NULL,
 	.write_pending			= scsiback_write_pending,
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index eb3589e..0576eae 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -239,13 +239,13 @@
 }
 
 static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
-			      struct dentry *dentry, const char *name,
-			      const void *value, size_t size, int flags)
+			      struct dentry *dentry, struct inode *inode,
+			      const char *name, const void *value,
+			      size_t size, int flags)
 {
 	int retval;
 	struct posix_acl *acl;
 	struct v9fs_session_info *v9ses;
-	struct inode *inode = d_inode(dentry);
 
 	v9ses = v9fs_dentry2v9ses(dentry);
 	/*
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index 18c62ba..a6bd349 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -147,8 +147,9 @@
 }
 
 static int v9fs_xattr_handler_set(const struct xattr_handler *handler,
-				  struct dentry *dentry, const char *name,
-				  const void *value, size_t size, int flags)
+				  struct dentry *dentry, struct inode *inode,
+				  const char *name, const void *value,
+				  size_t size, int flags)
 {
 	const char *full_name = xattr_full_name(handler, name);
 
diff --git a/fs/Kconfig b/fs/Kconfig
index 6725f59..b8fcb41 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -52,6 +52,7 @@
 	depends on FS_DAX
 	depends on ZONE_DEVICE
 	depends on TRANSPARENT_HUGEPAGE
+	depends on BROKEN
 
 endif # BLOCK
 
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 2d0cbbd..72c0335 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -1,6 +1,7 @@
 config BINFMT_ELF
 	bool "Kernel support for ELF binaries"
 	depends on MMU && (BROKEN || !FRV)
+	select ELFCORE
 	default y
 	---help---
 	  ELF (Executable and Linkable Format) is a format for libraries and
@@ -26,6 +27,7 @@
 config COMPAT_BINFMT_ELF
 	bool
 	depends on COMPAT && BINFMT_ELF
+	select ELFCORE
 
 config ARCH_BINFMT_ELF_STATE
 	bool
@@ -34,6 +36,7 @@
 	bool "Kernel support for FDPIC ELF binaries"
 	default y
 	depends on (FRV || BLACKFIN || (SUPERH32 && !MMU) || C6X)
+	select ELFCORE
 	help
 	  ELF FDPIC binaries are based on ELF, but allow the individual load
 	  segments of a binary to be located in memory independently of each
@@ -43,6 +46,11 @@
 
 	  It is also possible to run FDPIC ELF binaries on MMU linux also.
 
+config ELFCORE
+	bool
+	help
+	  This option enables kernel/elfcore.o.
+
 config CORE_DUMP_DEFAULT_ELF_HEADERS
 	bool "Write ELF core dumps with partial segments"
 	default y
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 2a6713b..d638486 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -528,7 +528,7 @@
 	char			*prefix = NULL;
 
 	new_opts = kstrdup(data, GFP_KERNEL);
-	if (!new_opts)
+	if (data && !new_opts)
 		return -ENOMEM;
 
 	pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
@@ -546,7 +546,8 @@
 	}
 
 	flush_delayed_work(&sbi->sb_work);
-	replace_mount_options(sb, new_opts);
+	if (new_opts)
+		replace_mount_options(sb, new_opts);
 
 	sbi->s_flags = mount_flags;
 	sbi->s_mode  = mode;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 65de439..14d506e 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -643,10 +643,6 @@
 		return 0;
 
 	result = generic_file_write_iter(iocb, from);
-	if (IS_ERR_VALUE(result)) {
-		_leave(" = %zd", result);
-		return result;
-	}
 
 	_leave(" = %zd", result);
 	return result;
diff --git a/fs/aio.c b/fs/aio.c
index a6deaa7..fb8e45b 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -496,7 +496,12 @@
 	ctx->mmap_size = nr_pages * PAGE_SIZE;
 	pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem)) {
+		ctx->mmap_size = 0;
+		aio_free_ring(ctx);
+		return -EINTR;
+	}
+
 	ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
 				       PROT_READ | PROT_WRITE,
 				       MAP_SHARED, 0, &unused);
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 72e35b7..3ba385e 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -100,8 +100,8 @@
 	return -EIO;
 }
 
-static int bad_inode_setxattr(struct dentry *dentry, const char *name,
-		const void *value, size_t size, int flags)
+static int bad_inode_setxattr(struct dentry *dentry, struct inode *inode,
+		const char *name, const void *value, size_t size, int flags)
 {
 	return -EIO;
 }
diff --git a/fs/befs/datastream.c b/fs/befs/datastream.c
index dde0b79..af1bc19 100644
--- a/fs/befs/datastream.c
+++ b/fs/befs/datastream.c
@@ -48,7 +48,7 @@
 befs_read_datastream(struct super_block *sb, const befs_data_stream *ds,
 		     befs_off_t pos, uint * off)
 {
-	struct buffer_head *bh = NULL;
+	struct buffer_head *bh;
 	befs_block_run run;
 	befs_blocknr_t block;	/* block coresponding to pos */
 
@@ -127,7 +127,7 @@
 {
 	befs_off_t bytes_read = 0;	/* bytes readed */
 	u16 plen;
-	struct buffer_head *bh = NULL;
+	struct buffer_head *bh;
 	befs_debug(sb, "---> %s length: %llu", __func__, len);
 
 	while (bytes_read < len) {
@@ -429,7 +429,7 @@
 	struct buffer_head *dbl_indir_block;
 	struct buffer_head *indir_block;
 	befs_block_run indir_run;
-	befs_disk_inode_addr *iaddr_array = NULL;
+	befs_disk_inode_addr *iaddr_array;
 	struct befs_sb_info *befs_sb = BEFS_SB(sb);
 
 	befs_blocknr_t indir_start_blk =
@@ -488,7 +488,6 @@
 	iaddr_array = (befs_disk_inode_addr *) dbl_indir_block->b_data;
 	indir_run = fsrun_to_cpu(sb, iaddr_array[dbl_block_indx]);
 	brelse(dbl_indir_block);
-	iaddr_array = NULL;
 
 	/* Read indirect block */
 	which_block = indir_indx / befs_iaddrs_per_block(sb);
@@ -513,7 +512,6 @@
 	iaddr_array = (befs_disk_inode_addr *) indir_block->b_data;
 	*run = fsrun_to_cpu(sb, iaddr_array[block_indx]);
 	brelse(indir_block);
-	iaddr_array = NULL;
 
 	blockno_at_run_start = indir_start_blk;
 	blockno_at_run_start += diblklen * dblindir_indx;
diff --git a/fs/befs/io.c b/fs/befs/io.c
index 7a5b4ec..523c8af 100644
--- a/fs/befs/io.c
+++ b/fs/befs/io.c
@@ -26,7 +26,7 @@
 struct buffer_head *
 befs_bread_iaddr(struct super_block *sb, befs_inode_addr iaddr)
 {
-	struct buffer_head *bh = NULL;
+	struct buffer_head *bh;
 	befs_blocknr_t block = 0;
 	struct befs_sb_info *befs_sb = BEFS_SB(sb);
 
@@ -63,7 +63,7 @@
 struct buffer_head *
 befs_bread(struct super_block *sb, befs_blocknr_t block)
 {
-	struct buffer_head *bh = NULL;
+	struct buffer_head *bh;
 
 	befs_debug(sb, "---> Enter %s %lu", __func__, (unsigned long)block);
 
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 71112aa..7da05b1 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -155,7 +155,7 @@
 static struct dentry *
 befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
 {
-	struct inode *inode = NULL;
+	struct inode *inode;
 	struct super_block *sb = dir->i_sb;
 	const befs_data_stream *ds = &BEFS_I(dir)->i_data.ds;
 	befs_off_t offset;
@@ -294,10 +294,10 @@
 
 static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
 {
-	struct buffer_head *bh = NULL;
-	befs_inode *raw_inode = NULL;
+	struct buffer_head *bh;
+	befs_inode *raw_inode;
 	struct befs_sb_info *befs_sb = BEFS_SB(sb);
-	struct befs_inode_info *befs_ino = NULL;
+	struct befs_inode_info *befs_ino;
 	struct inode *inode;
 	long ret = -EIO;
 
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 4c55668..ae1b540 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -127,12 +127,8 @@
 {
 	start = PAGE_ALIGN(start);
 	end = PAGE_ALIGN(end);
-	if (end > start) {
-		unsigned long addr;
-		addr = vm_brk(start, end - start);
-		if (BAD_ADDR(addr))
-			return addr;
-	}
+	if (end > start)
+		return vm_brk(start, end - start);
 	return 0;
 }
 
@@ -275,7 +271,7 @@
 		map_size = ex.a_text+ex.a_data;
 #endif
 		error = vm_brk(text_addr & PAGE_MASK, map_size);
-		if (error != (text_addr & PAGE_MASK))
+		if (error)
 			return error;
 
 		error = read_code(bprm->file, text_addr, pos,
@@ -297,7 +293,10 @@
 		}
 
 		if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
-			vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
+			error = vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
+			if (error)
+				return error;
+
 			read_code(bprm->file, N_TXTADDR(ex), fd_offset,
 				  ex.a_text + ex.a_data);
 			goto beyond_if;
@@ -378,8 +377,10 @@
 			       "N_TXTOFF is not page aligned. Please convert library: %pD\n",
 			       file);
 		}
-		vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
-		
+		retval = vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
+		if (retval)
+			goto out;
+
 		read_code(file, start_addr, N_TXTOFF(ex),
 			  ex.a_text + ex.a_data);
 		retval = 0;
@@ -397,9 +398,8 @@
 	len = PAGE_ALIGN(ex.a_text + ex.a_data);
 	bss = ex.a_text + ex.a_data + ex.a_bss;
 	if (bss > len) {
-		error = vm_brk(start_addr + len, bss - len);
-		retval = error;
-		if (error != start_addr + len)
+		retval = vm_brk(start_addr + len, bss - len);
+		if (retval)
 			goto out;
 	}
 	retval = 0;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 56224ff..a7a28110 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -96,10 +96,9 @@
 	start = ELF_PAGEALIGN(start);
 	end = ELF_PAGEALIGN(end);
 	if (end > start) {
-		unsigned long addr;
-		addr = vm_brk(start, end - start);
-		if (BAD_ADDR(addr))
-			return addr;
+		int error = vm_brk(start, end - start);
+		if (error)
+			return error;
 	}
 	current->mm->start_brk = current->mm->brk = end;
 	return 0;
@@ -629,7 +628,7 @@
 
 		/* Map the last of the bss segment */
 		error = vm_brk(elf_bss, last_bss - elf_bss);
-		if (BAD_ADDR(error))
+		if (error)
 			goto out;
 	}
 
@@ -1176,8 +1175,11 @@
 	len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
 			    ELF_MIN_ALIGN - 1);
 	bss = eppnt->p_memsz + eppnt->p_vaddr;
-	if (bss > len)
-		vm_brk(len, bss - len);
+	if (bss > len) {
+		error = vm_brk(len, bss - len);
+		if (error)
+			goto out_free_ph;
+	}
 	error = 0;
 
 out_free_ph:
@@ -2273,7 +2275,7 @@
 		goto end_coredump;
 
 	/* Align to page */
-	if (!dump_skip(cprm, dataoff - cprm->file->f_pos))
+	if (!dump_skip(cprm, dataoff - cprm->pos))
 		goto end_coredump;
 
 	for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 71ade0e..2035893 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1787,7 +1787,7 @@
 				goto end_coredump;
 	}
 
-	if (!dump_skip(cprm, dataoff - cprm->file->f_pos))
+	if (!dump_skip(cprm, dataoff - cprm->pos))
 		goto end_coredump;
 
 	if (!elf_fdpic_dump_segments(cprm))
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index f723cd3..caf9e39 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -337,7 +337,7 @@
 					"(%d != %d)", (unsigned) r, curid, id);
 			goto failed;
 		} else if ( ! p->lib_list[id].loaded &&
-				IS_ERR_VALUE(load_flat_shared_library(id, p))) {
+				load_flat_shared_library(id, p) < 0) {
 			printk("BINFMT_FLAT: failed to load library %d", id);
 			goto failed;
 		}
@@ -837,7 +837,7 @@
 
 	res = prepare_binprm(&bprm);
 
-	if (!IS_ERR_VALUE(res))
+	if (!res)
 		res = load_flat_file(&bprm, libs, id, NULL);
 
 	abort_creds(bprm.cred);
@@ -883,7 +883,7 @@
 	stack_len += FLAT_STACK_ALIGN - 1;  /* reserve for upcoming alignment */
 	
 	res = load_flat_file(bprm, &libinfo, 0, &stack_len);
-	if (IS_ERR_VALUE(res))
+	if (res < 0)
 		return res;
 	
 	/* Update data segment pointers for all libraries */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index a063d4d..71ccab1 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -29,6 +29,7 @@
 #include <linux/log2.h>
 #include <linux/cleancache.h>
 #include <linux/dax.h>
+#include <linux/badblocks.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -50,6 +51,18 @@
 }
 EXPORT_SYMBOL(I_BDEV);
 
+void __vfs_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk_ratelimited("%sVFS (%s): %pV\n", prefix, sb->s_id, &vaf);
+	va_end(args);
+}
+
 static void bdev_write_inode(struct block_device *bdev)
 {
 	struct inode *inode = bdev->bd_inode;
@@ -488,7 +501,7 @@
 	sector += get_start_sect(bdev);
 	if (sector % (PAGE_SIZE / 512))
 		return -EINVAL;
-	avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn);
+	avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn, size);
 	if (!avail)
 		return -ERANGE;
 	if (avail > 0 && avail & ~PAGE_MASK)
@@ -497,6 +510,75 @@
 }
 EXPORT_SYMBOL_GPL(bdev_direct_access);
 
+/**
+ * bdev_dax_supported() - Check if the device supports dax for filesystem
+ * @sb: The superblock of the device
+ * @blocksize: The block size of the device
+ *
+ * This is a library function for filesystems to check if the block device
+ * can be mounted with dax option.
+ *
+ * Return: negative errno if unsupported, 0 if supported.
+ */
+int bdev_dax_supported(struct super_block *sb, int blocksize)
+{
+	struct blk_dax_ctl dax = {
+		.sector = 0,
+		.size = PAGE_SIZE,
+	};
+	int err;
+
+	if (blocksize != PAGE_SIZE) {
+		vfs_msg(sb, KERN_ERR, "error: unsupported blocksize for dax");
+		return -EINVAL;
+	}
+
+	err = bdev_direct_access(sb->s_bdev, &dax);
+	if (err < 0) {
+		switch (err) {
+		case -EOPNOTSUPP:
+			vfs_msg(sb, KERN_ERR,
+				"error: device does not support dax");
+			break;
+		case -EINVAL:
+			vfs_msg(sb, KERN_ERR,
+				"error: unaligned partition for dax");
+			break;
+		default:
+			vfs_msg(sb, KERN_ERR,
+				"error: dax access failed (%d)", err);
+		}
+		return err;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(bdev_dax_supported);
+
+/**
+ * bdev_dax_capable() - Return if the raw device is capable for dax
+ * @bdev: The device for raw block device access
+ */
+bool bdev_dax_capable(struct block_device *bdev)
+{
+	struct blk_dax_ctl dax = {
+		.size = PAGE_SIZE,
+	};
+
+	if (!IS_ENABLED(CONFIG_FS_DAX))
+		return false;
+
+	dax.sector = 0;
+	if (bdev_direct_access(bdev, &dax) < 0)
+		return false;
+
+	dax.sector = bdev->bd_part->nr_sects - (PAGE_SIZE / 512);
+	if (bdev_direct_access(bdev, &dax) < 0)
+		return false;
+
+	return true;
+}
+
 /*
  * pseudo-fs
  */
@@ -1238,7 +1320,7 @@
 
 			if (!ret) {
 				bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
-				if (!blkdev_dax_capable(bdev))
+				if (!bdev_dax_capable(bdev))
 					bdev->bd_inode->i_flags &= ~S_DAX;
 			}
 
@@ -1275,7 +1357,7 @@
 				goto out_clear;
 			}
 			bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
-			if (!blkdev_dax_capable(bdev))
+			if (!bdev_dax_capable(bdev))
 				bdev->bd_inode->i_flags &= ~S_DAX;
 		}
 	} else {
@@ -1720,79 +1802,13 @@
 	.is_dirty_writeback = buffer_check_dirty_writeback,
 };
 
-#ifdef CONFIG_FS_DAX
-/*
- * In the raw block case we do not need to contend with truncation nor
- * unwritten file extents.  Without those concerns there is no need for
- * additional locking beyond the mmap_sem context that these routines
- * are already executing under.
- *
- * Note, there is no protection if the block device is dynamically
- * resized (partition grow/shrink) during a fault. A stable block device
- * size is already not enforced in the blkdev_direct_IO path.
- *
- * For DAX, it is the responsibility of the block device driver to
- * ensure the whole-disk device size is stable while requests are in
- * flight.
- *
- * Finally, unlike the filemap_page_mkwrite() case there is no
- * filesystem superblock to sync against freezing.  We still include a
- * pfn_mkwrite callback for dax drivers to receive write fault
- * notifications.
- */
-static int blkdev_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-	return __dax_fault(vma, vmf, blkdev_get_block, NULL);
-}
-
-static int blkdev_dax_pfn_mkwrite(struct vm_area_struct *vma,
-		struct vm_fault *vmf)
-{
-	return dax_pfn_mkwrite(vma, vmf);
-}
-
-static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
-		pmd_t *pmd, unsigned int flags)
-{
-	return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL);
-}
-
-static const struct vm_operations_struct blkdev_dax_vm_ops = {
-	.fault		= blkdev_dax_fault,
-	.pmd_fault	= blkdev_dax_pmd_fault,
-	.pfn_mkwrite	= blkdev_dax_pfn_mkwrite,
-};
-
-static const struct vm_operations_struct blkdev_default_vm_ops = {
-	.fault		= filemap_fault,
-	.map_pages	= filemap_map_pages,
-};
-
-static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
-{
-	struct inode *bd_inode = bdev_file_inode(file);
-
-	file_accessed(file);
-	if (IS_DAX(bd_inode)) {
-		vma->vm_ops = &blkdev_dax_vm_ops;
-		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
-	} else {
-		vma->vm_ops = &blkdev_default_vm_ops;
-	}
-
-	return 0;
-}
-#else
-#define blkdev_mmap generic_file_mmap
-#endif
-
 const struct file_operations def_blk_fops = {
 	.open		= blkdev_open,
 	.release	= blkdev_close,
 	.llseek		= block_llseek,
 	.read_iter	= blkdev_read_iter,
 	.write_iter	= blkdev_write_iter,
-	.mmap		= blkdev_mmap,
+	.mmap		= generic_file_mmap,
 	.fsync		= blkdev_fsync,
 	.unlocked_ioctl	= block_ioctl,
 #ifdef CONFIG_COMPAT
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index d309018..8bb3509 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1939,7 +1939,7 @@
  * from ipath->fspath->val[i].
  * when it returns, there are ipath->fspath->elem_cnt number of paths available
  * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
- * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
+ * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
  * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
  * have been needed to return all paths.
  */
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 1da5753..4919aed 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -313,7 +313,7 @@
 	struct bio *dio_bio;
 
 	/*
-	 * The original bio may be splited to several sub-bios, this is
+	 * The original bio may be split to several sub-bios, this is
 	 * done during endio of sub-bios
 	 */
 	int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int);
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 516e19d..b677a6e 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1939,7 +1939,7 @@
 		/*
 		 * Clear all references of this block. Do not free
 		 * the block itself even if is not referenced anymore
-		 * because it still carries valueable information
+		 * because it still carries valuable information
 		 * like whether it was ever written and IO completed.
 		 */
 		list_for_each_entry_safe(l, tmp, &block->ref_to_list,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index decd0a3..427c36b 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -156,7 +156,7 @@
 
 		/*
 		 * RCU really hurts here, we could free up the root node because
-		 * it was cow'ed but we may not get the new root node yet so do
+		 * it was COWed but we may not get the new root node yet so do
 		 * the inc_not_zero dance and if it doesn't work then
 		 * synchronize_rcu and try again.
 		 */
@@ -955,7 +955,7 @@
 			      struct extent_buffer *buf)
 {
 	/*
-	 * Tree blocks not in refernece counted trees and tree roots
+	 * Tree blocks not in reference counted trees and tree roots
 	 * are never shared. If a block was allocated after the last
 	 * snapshot and the block was not allocated by tree relocation,
 	 * we know the block is not shared.
@@ -1270,7 +1270,7 @@
 
 /*
  * tm is a pointer to the first operation to rewind within eb. then, all
- * previous operations will be rewinded (until we reach something older than
+ * previous operations will be rewound (until we reach something older than
  * time_seq).
  */
 static void
@@ -1345,7 +1345,7 @@
 }
 
 /*
- * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
+ * Called with eb read locked. If the buffer cannot be rewound, the same buffer
  * is returned. If rewind operations happen, a fresh buffer is returned. The
  * returned buffer is always read-locked. If the returned buffer is not the
  * input buffer, the lock on the input buffer is released and the input buffer
@@ -1516,7 +1516,7 @@
 	 * 3) the root is not forced COW.
 	 *
 	 * What is forced COW:
-	 *    when we create snapshot during commiting the transaction,
+	 *    when we create snapshot during committing the transaction,
 	 *    after we've finished coping src root, we must COW the shared
 	 *    block to ensure the metadata consistency.
 	 */
@@ -1531,7 +1531,7 @@
 
 /*
  * cows a single block, see __btrfs_cow_block for the real work.
- * This version of it has extra checks so that a block isn't cow'd more than
+ * This version of it has extra checks so that a block isn't COWed more than
  * once per transaction, as long as it hasn't been written yet
  */
 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
@@ -2986,7 +2986,7 @@
 		btrfs_unlock_up_safe(p, level + 1);
 
 		/*
-		 * Since we can unwind eb's we want to do a real search every
+		 * Since we can unwind ebs we want to do a real search every
 		 * time.
 		 */
 		prev_cmp = -1;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index ddcc58f..101c3cf 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -89,7 +89,7 @@
 /* four bytes for CRC32 */
 #define BTRFS_EMPTY_DIR_SIZE 0
 
-/* spefic to btrfs_map_block(), therefore not in include/linux/blk_types.h */
+/* specific to btrfs_map_block(), therefore not in include/linux/blk_types.h */
 #define REQ_GET_READ_MIRRORS	(1 << 30)
 
 /* ioprio of readahead is set to idle */
@@ -431,7 +431,7 @@
 	 * bytes_pinned does not reflect the bytes that will be pinned once the
 	 * delayed refs are flushed, so this counter is inc'ed every time we
 	 * call btrfs_free_extent so it is a realtime count of what will be
-	 * freed once the transaction is committed.  It will be zero'ed every
+	 * freed once the transaction is committed.  It will be zeroed every
 	 * time the transaction commits.
 	 */
 	struct percpu_counter total_bytes_pinned;
@@ -1401,7 +1401,7 @@
 	token->kaddr = NULL;
 }
 
-/* some macros to generate set/get funcs for the struct fields.  This
+/* some macros to generate set/get functions for the struct fields.  This
  * assumes there is a lefoo_to_cpu for every type, so lets make a simple
  * one for u8:
  */
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index c24b653..5fca953 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -188,7 +188,7 @@
 
 	/*
 	 * To make qgroup to skip given root.
-	 * This is for snapshot, as btrfs_qgroup_inherit() will manully
+	 * This is for snapshot, as btrfs_qgroup_inherit() will manually
 	 * modify counters for snapshot and its source, so we should skip
 	 * the snapshot in new_root/old_roots or it will get calculated twice
 	 */
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 85f12e6..63ef9cd 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -450,7 +450,7 @@
 }
 
 /*
- * blocked until all flighting bios are finished.
+ * blocked until all in-flight bios operations are finished.
  */
 static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
 {
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 91d1239..6628fca 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -384,7 +384,7 @@
 	/*
 	 * Things reading via commit roots that don't have normal protection,
 	 * like send, can have a really old block in cache that may point at a
-	 * block that has been free'd and re-allocated.  So don't clear uptodate
+	 * block that has been freed and re-allocated.  So don't clear uptodate
 	 * if we find an eb that is under IO (dirty/writeback) because we could
 	 * end up reading in the stale data and then writing it back out and
 	 * making everybody very sad.
@@ -418,7 +418,7 @@
 		/*
 		 * The super_block structure does not span the whole
 		 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
-		 * is filled with zeros and is included in the checkum.
+		 * is filled with zeros and is included in the checksum.
 		 */
 		crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
 				crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
@@ -600,7 +600,7 @@
 
 		/*
 		 * Check to make sure that we don't point outside of the leaf,
-		 * just incase all the items are consistent to eachother, but
+		 * just in case all the items are consistent to each other, but
 		 * all point outside of the leaf.
 		 */
 		if (btrfs_item_end_nr(leaf, slot) >
@@ -3022,7 +3022,7 @@
 	}
 
 	/*
-	 * Mount does not set all options immediatelly, we can do it now and do
+	 * Mount does not set all options immediately, we can do it now and do
 	 * not have to wait for transaction commit
 	 */
 	btrfs_apply_pending_changes(fs_info);
@@ -3255,7 +3255,7 @@
 		btrfs_warn_rl_in_rcu(device->dev_root->fs_info,
 				"lost page write due to IO error on %s",
 					  rcu_str_deref(device->name));
-		/* note, we dont' set_buffer_write_io_error because we have
+		/* note, we don't set_buffer_write_io_error because we have
 		 * our own ways of dealing with the IO errors
 		 */
 		clear_buffer_uptodate(bh);
@@ -4367,7 +4367,7 @@
 		if (ret)
 			break;
 
-		clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
+		clear_extent_bits(dirty_pages, start, end, mark);
 		while (start <= end) {
 			eb = btrfs_find_tree_block(root->fs_info, start);
 			start += root->nodesize;
@@ -4402,7 +4402,7 @@
 		if (ret)
 			break;
 
-		clear_extent_dirty(unpin, start, end, GFP_NOFS);
+		clear_extent_dirty(unpin, start, end);
 		btrfs_error_unpin_extent_range(root, start, end);
 		cond_resched();
 	}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9424864..689d25a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -231,9 +231,9 @@
 {
 	u64 end = start + num_bytes - 1;
 	set_extent_bits(&root->fs_info->freed_extents[0],
-			start, end, EXTENT_UPTODATE, GFP_NOFS);
+			start, end, EXTENT_UPTODATE);
 	set_extent_bits(&root->fs_info->freed_extents[1],
-			start, end, EXTENT_UPTODATE, GFP_NOFS);
+			start, end, EXTENT_UPTODATE);
 	return 0;
 }
 
@@ -246,9 +246,9 @@
 	end = start + cache->key.offset - 1;
 
 	clear_extent_bits(&root->fs_info->freed_extents[0],
-			  start, end, EXTENT_UPTODATE, GFP_NOFS);
+			  start, end, EXTENT_UPTODATE);
 	clear_extent_bits(&root->fs_info->freed_extents[1],
-			  start, end, EXTENT_UPTODATE, GFP_NOFS);
+			  start, end, EXTENT_UPTODATE);
 }
 
 static int exclude_super_stripes(struct btrfs_root *root,
@@ -980,7 +980,7 @@
  * event that tree block loses its owner tree's reference and do the
  * back refs conversion.
  *
- * When a tree block is COW'd through a tree, there are four cases:
+ * When a tree block is COWed through a tree, there are four cases:
  *
  * The reference count of the block is one and the tree is the block's
  * owner tree. Nothing to do in this case.
@@ -2042,6 +2042,11 @@
 	struct btrfs_bio *bbio = NULL;
 
 
+	/*
+	 * Avoid races with device replace and make sure our bbio has devices
+	 * associated to its stripes that don't go away while we are discarding.
+	 */
+	btrfs_bio_counter_inc_blocked(root->fs_info);
 	/* Tell the block device(s) that the sectors can be discarded */
 	ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
 			      bytenr, &num_bytes, &bbio, 0);
@@ -2074,6 +2079,7 @@
 		}
 		btrfs_put_bbio(bbio);
 	}
+	btrfs_bio_counter_dec(root->fs_info);
 
 	if (actual_bytes)
 		*actual_bytes = discarded_bytes;
@@ -2595,7 +2601,7 @@
 			}
 
 			/*
-			 * Need to drop our head ref lock and re-aqcuire the
+			 * Need to drop our head ref lock and re-acquire the
 			 * delayed ref lock and then re-check to make sure
 			 * nobody got added.
 			 */
@@ -2747,7 +2753,7 @@
 
 	/*
 	 * We don't ever fill up leaves all the way so multiply by 2 just to be
-	 * closer to what we're really going to want to ouse.
+	 * closer to what we're really going to want to use.
 	 */
 	return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
 }
@@ -2851,7 +2857,7 @@
 	}
 
 	/*
-	 * trans->sync means that when we call end_transaciton, we won't
+	 * trans->sync means that when we call end_transaction, we won't
 	 * wait on delayed refs
 	 */
 	trans->sync = true;
@@ -4296,7 +4302,7 @@
  * Called if we need to clear a data reservation for this inode
  * Normally in a error case.
  *
- * This one will handle the per-indoe data rsv map for accurate reserved
+ * This one will handle the per-inode data rsv map for accurate reserved
  * space framework.
  */
 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
@@ -4967,7 +4973,7 @@
  * @orig_bytes - the number of bytes we want
  * @flush - whether or not we can flush to make our reservation
  *
- * This will reserve orgi_bytes number of bytes from the space info associated
+ * This will reserve orig_bytes number of bytes from the space info associated
  * with the block_rsv.  If there is not enough space it will make an attempt to
  * flush out space to make room.  It will do this by flushing delalloc if
  * possible or committing the transaction.  If flush is 0 then no attempts to
@@ -5572,7 +5578,7 @@
  * common file/directory operations, they change two fs/file trees
  * and root tree, the number of items that the qgroup reserves is
  * different with the free space reservation. So we can not use
- * the space reseravtion mechanism in start_transaction().
+ * the space reservation mechanism in start_transaction().
  */
 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
 				     struct btrfs_block_rsv *rsv,
@@ -5621,7 +5627,7 @@
 /**
  * drop_outstanding_extent - drop an outstanding extent
  * @inode: the inode we're dropping the extent for
- * @num_bytes: the number of bytes we're relaseing.
+ * @num_bytes: the number of bytes we're releasing.
  *
  * This is called when we are freeing up an outstanding extent, either called
  * after an error or after an extent is written.  This will return the number of
@@ -5647,7 +5653,7 @@
 		drop_inode_space = 1;
 
 	/*
-	 * If we have more or the same amount of outsanding extents than we have
+	 * If we have more or the same amount of outstanding extents than we have
 	 * reserved then we need to leave the reserved extents count alone.
 	 */
 	if (BTRFS_I(inode)->outstanding_extents >=
@@ -5661,8 +5667,8 @@
 }
 
 /**
- * calc_csum_metadata_size - return the amount of metada space that must be
- *	reserved/free'd for the given bytes.
+ * calc_csum_metadata_size - return the amount of metadata space that must be
+ *	reserved/freed for the given bytes.
  * @inode: the inode we're manipulating
  * @num_bytes: the number of bytes in question
  * @reserve: 1 if we are reserving space, 0 if we are freeing space
@@ -5814,7 +5820,7 @@
 
 		/*
 		 * This is tricky, but first we need to figure out how much we
-		 * free'd from any free-ers that occurred during this
+		 * freed from any free-ers that occurred during this
 		 * reservation, so we reset ->csum_bytes to the csum_bytes
 		 * before we dropped our lock, and then call the free for the
 		 * number of bytes that were freed while we were trying our
@@ -5836,7 +5842,7 @@
 
 		/*
 		 * Now reset ->csum_bytes to what it should be.  If bytes is
-		 * more than to_free then we would have free'd more space had we
+		 * more than to_free then we would have freed more space had we
 		 * not had an artificially high ->csum_bytes, so we need to free
 		 * the remainder.  If bytes is the same or less then we don't
 		 * need to do anything, the other free-ers did the correct
@@ -6515,7 +6521,7 @@
 			ret = btrfs_discard_extent(root, start,
 						   end + 1 - start, NULL);
 
-		clear_extent_dirty(unpin, start, end, GFP_NOFS);
+		clear_extent_dirty(unpin, start, end);
 		unpin_extent_range(root, start, end, true);
 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
 		cond_resched();
@@ -7578,7 +7584,7 @@
 		if (loop == LOOP_CACHING_NOWAIT) {
 			/*
 			 * We want to skip the LOOP_CACHING_WAIT step if we
-			 * don't have any unached bgs and we've alrelady done a
+			 * don't have any uncached bgs and we've already done a
 			 * full search through.
 			 */
 			if (orig_have_caching_bg || !full_search)
@@ -7982,7 +7988,7 @@
 
 	/*
 	 * Mixed block groups will exclude before processing the log so we only
-	 * need to do the exlude dance if this fs isn't mixed.
+	 * need to do the exclude dance if this fs isn't mixed.
 	 */
 	if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
 		ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
@@ -8032,7 +8038,7 @@
 					buf->start + buf->len - 1, GFP_NOFS);
 		else
 			set_extent_new(&root->dirty_log_pages, buf->start,
-					buf->start + buf->len - 1, GFP_NOFS);
+					buf->start + buf->len - 1);
 	} else {
 		buf->log_index = -1;
 		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
@@ -9426,7 +9432,7 @@
 	u64 free_bytes = 0;
 	int factor;
 
-	/* It's df, we don't care if it's racey */
+	/* It's df, we don't care if it's racy */
 	if (list_empty(&sinfo->ro_bgs))
 		return 0;
 
@@ -10635,14 +10641,14 @@
 		 */
 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
 		ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
-				  EXTENT_DIRTY, GFP_NOFS);
+				  EXTENT_DIRTY);
 		if (ret) {
 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
 			btrfs_dec_block_group_ro(root, block_group);
 			goto end_trans;
 		}
 		ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
-				  EXTENT_DIRTY, GFP_NOFS);
+				  EXTENT_DIRTY);
 		if (ret) {
 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
 			btrfs_dec_block_group_ro(root, block_group);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 2f83448..6e953de 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -726,14 +726,6 @@
 	start = last_end + 1;
 	if (start <= end && state && !need_resched())
 		goto hit_next;
-	goto search_again;
-
-out:
-	spin_unlock(&tree->lock);
-	if (prealloc)
-		free_extent_state(prealloc);
-
-	return 0;
 
 search_again:
 	if (start > end)
@@ -742,6 +734,14 @@
 	if (gfpflags_allow_blocking(mask))
 		cond_resched();
 	goto again;
+
+out:
+	spin_unlock(&tree->lock);
+	if (prealloc)
+		free_extent_state(prealloc);
+
+	return 0;
+
 }
 
 static void wait_on_state(struct extent_io_tree *tree,
@@ -873,8 +873,14 @@
 	bits |= EXTENT_FIRST_DELALLOC;
 again:
 	if (!prealloc && gfpflags_allow_blocking(mask)) {
+		/*
+		 * Don't care for allocation failure here because we might end
+		 * up not needing the pre-allocated extent state at all, which
+		 * is the case if we only have in the tree extent states that
+		 * cover our input range and don't cover too any other range.
+		 * If we end up needing a new extent state we allocate it later.
+		 */
 		prealloc = alloc_extent_state(mask);
-		BUG_ON(!prealloc);
 	}
 
 	spin_lock(&tree->lock);
@@ -1037,7 +1043,13 @@
 		goto out;
 	}
 
-	goto search_again;
+search_again:
+	if (start > end)
+		goto out;
+	spin_unlock(&tree->lock);
+	if (gfpflags_allow_blocking(mask))
+		cond_resched();
+	goto again;
 
 out:
 	spin_unlock(&tree->lock);
@@ -1046,13 +1058,6 @@
 
 	return err;
 
-search_again:
-	if (start > end)
-		goto out;
-	spin_unlock(&tree->lock);
-	if (gfpflags_allow_blocking(mask))
-		cond_resched();
-	goto again;
 }
 
 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
@@ -1073,17 +1078,18 @@
  * @bits:	the bits to set in this range
  * @clear_bits:	the bits to clear in this range
  * @cached_state:	state that we're going to cache
- * @mask:	the allocation mask
  *
  * This will go through and set bits for the given range.  If any states exist
  * already in this range they are set with the given bit and cleared of the
  * clear_bits.  This is only meant to be used by things that are mergeable, ie
  * converting from say DELALLOC to DIRTY.  This is not meant to be used with
  * boundary bits like LOCK.
+ *
+ * All allocations are done with GFP_NOFS.
  */
 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 		       unsigned bits, unsigned clear_bits,
-		       struct extent_state **cached_state, gfp_t mask)
+		       struct extent_state **cached_state)
 {
 	struct extent_state *state;
 	struct extent_state *prealloc = NULL;
@@ -1098,7 +1104,7 @@
 	btrfs_debug_check_extent_io_range(tree, start, end);
 
 again:
-	if (!prealloc && gfpflags_allow_blocking(mask)) {
+	if (!prealloc) {
 		/*
 		 * Best effort, don't worry if extent state allocation fails
 		 * here for the first iteration. We might have a cached state
@@ -1106,7 +1112,7 @@
 		 * extent state allocations are needed. We'll only know this
 		 * after locking the tree.
 		 */
-		prealloc = alloc_extent_state(mask);
+		prealloc = alloc_extent_state(GFP_NOFS);
 		if (!prealloc && !first_iteration)
 			return -ENOMEM;
 	}
@@ -1263,7 +1269,13 @@
 		goto out;
 	}
 
-	goto search_again;
+search_again:
+	if (start > end)
+		goto out;
+	spin_unlock(&tree->lock);
+	cond_resched();
+	first_iteration = false;
+	goto again;
 
 out:
 	spin_unlock(&tree->lock);
@@ -1271,21 +1283,11 @@
 		free_extent_state(prealloc);
 
 	return err;
-
-search_again:
-	if (start > end)
-		goto out;
-	spin_unlock(&tree->lock);
-	if (gfpflags_allow_blocking(mask))
-		cond_resched();
-	first_iteration = false;
-	goto again;
 }
 
 /* wrappers around set/clear extent bit */
 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-			   unsigned bits, gfp_t mask,
-			   struct extent_changeset *changeset)
+			   unsigned bits, struct extent_changeset *changeset)
 {
 	/*
 	 * We don't support EXTENT_LOCKED yet, as current changeset will
@@ -1295,7 +1297,7 @@
 	 */
 	BUG_ON(bits & EXTENT_LOCKED);
 
-	return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, mask,
+	return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
 				changeset);
 }
 
@@ -1308,8 +1310,7 @@
 }
 
 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-			     unsigned bits, gfp_t mask,
-			     struct extent_changeset *changeset)
+		unsigned bits, struct extent_changeset *changeset)
 {
 	/*
 	 * Don't support EXTENT_LOCKED case, same reason as
@@ -1317,7 +1318,7 @@
 	 */
 	BUG_ON(bits & EXTENT_LOCKED);
 
-	return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask,
+	return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
 				  changeset);
 }
 
@@ -1975,13 +1976,13 @@
 	set_state_failrec(failure_tree, rec->start, NULL);
 	ret = clear_extent_bits(failure_tree, rec->start,
 				rec->start + rec->len - 1,
-				EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
+				EXTENT_LOCKED | EXTENT_DIRTY);
 	if (ret)
 		err = ret;
 
 	ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
 				rec->start + rec->len - 1,
-				EXTENT_DAMAGED, GFP_NOFS);
+				EXTENT_DAMAGED);
 	if (ret && !err)
 		err = ret;
 
@@ -2024,9 +2025,16 @@
 	bio->bi_iter.bi_size = 0;
 	map_length = length;
 
+	/*
+	 * Avoid races with device replace and make sure our bbio has devices
+	 * associated to its stripes that don't go away while we are doing the
+	 * read repair operation.
+	 */
+	btrfs_bio_counter_inc_blocked(fs_info);
 	ret = btrfs_map_block(fs_info, WRITE, logical,
 			      &map_length, &bbio, mirror_num);
 	if (ret) {
+		btrfs_bio_counter_dec(fs_info);
 		bio_put(bio);
 		return -EIO;
 	}
@@ -2036,6 +2044,7 @@
 	dev = bbio->stripes[mirror_num-1].dev;
 	btrfs_put_bbio(bbio);
 	if (!dev || !dev->bdev || !dev->writeable) {
+		btrfs_bio_counter_dec(fs_info);
 		bio_put(bio);
 		return -EIO;
 	}
@@ -2044,6 +2053,7 @@
 
 	if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
 		/* try to remap that extent elsewhere? */
+		btrfs_bio_counter_dec(fs_info);
 		bio_put(bio);
 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
 		return -EIO;
@@ -2053,6 +2063,7 @@
 		"read error corrected: ino %llu off %llu (dev %s sector %llu)",
 				  btrfs_ino(inode), start,
 				  rcu_str_deref(dev->name), sector);
+	btrfs_bio_counter_dec(fs_info);
 	bio_put(bio);
 	return 0;
 }
@@ -2232,13 +2243,12 @@
 
 		/* set the bits in the private failure tree */
 		ret = set_extent_bits(failure_tree, start, end,
-					EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
+					EXTENT_LOCKED | EXTENT_DIRTY);
 		if (ret >= 0)
 			ret = set_state_failrec(failure_tree, start, failrec);
 		/* set the bits in the inode's tree */
 		if (ret >= 0)
-			ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
-						GFP_NOFS);
+			ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
 		if (ret < 0) {
 			kfree(failrec);
 			return ret;
@@ -4389,8 +4399,12 @@
 	if (ret < 0) {
 		btrfs_free_path(path);
 		return ret;
+	} else {
+		WARN_ON(!ret);
+		if (ret == 1)
+			ret = 0;
 	}
-	WARN_ON(!ret);
+
 	path->slots[0]--;
 	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
 	found_type = found_key.type;
@@ -4601,7 +4615,7 @@
 		if (mapped)
 			spin_unlock(&page->mapping->private_lock);
 
-		/* One for when we alloced the page */
+		/* One for when we allocated the page */
 		put_page(page);
 	} while (index != 0);
 }
@@ -5761,7 +5775,7 @@
 	struct extent_buffer *eb;
 
 	/*
-	 * We need to make sure noboody is attaching this page to an eb right
+	 * We need to make sure nobody is attaching this page to an eb right
 	 * now.
 	 */
 	spin_lock(&page->mapping->private_lock);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 981f402..1baf19c 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -220,8 +220,7 @@
 		   unsigned bits, int filled,
 		   struct extent_state *cached_state);
 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-			     unsigned bits, gfp_t mask,
-			     struct extent_changeset *changeset);
+		unsigned bits, struct extent_changeset *changeset);
 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 		     unsigned bits, int wake, int delete,
 		     struct extent_state **cached, gfp_t mask);
@@ -240,27 +239,27 @@
 }
 
 static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
-		u64 end, unsigned bits, gfp_t mask)
+		u64 end, unsigned bits)
 {
 	int wake = 0;
 
 	if (bits & EXTENT_LOCKED)
 		wake = 1;
 
-	return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, mask);
+	return clear_extent_bit(tree, start, end, bits, wake, 0, NULL,
+			GFP_NOFS);
 }
 
 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-			   unsigned bits, gfp_t mask,
-			   struct extent_changeset *changeset);
+			   unsigned bits, struct extent_changeset *changeset);
 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 		   unsigned bits, u64 *failed_start,
 		   struct extent_state **cached_state, gfp_t mask);
 
 static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
-		u64 end, unsigned bits, gfp_t mask)
+		u64 end, unsigned bits)
 {
-	return set_extent_bit(tree, start, end, bits, NULL, NULL, mask);
+	return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
 }
 
 static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -278,37 +277,38 @@
 }
 
 static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
-		u64 end, gfp_t mask)
+		u64 end)
 {
 	return clear_extent_bit(tree, start, end,
 				EXTENT_DIRTY | EXTENT_DELALLOC |
-				EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
+				EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
 }
 
 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 		       unsigned bits, unsigned clear_bits,
-		       struct extent_state **cached_state, gfp_t mask);
+		       struct extent_state **cached_state);
 
 static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
-		u64 end, struct extent_state **cached_state, gfp_t mask)
+		u64 end, struct extent_state **cached_state)
 {
 	return set_extent_bit(tree, start, end,
 			      EXTENT_DELALLOC | EXTENT_UPTODATE,
-			      NULL, cached_state, mask);
+			      NULL, cached_state, GFP_NOFS);
 }
 
 static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
-		u64 end, struct extent_state **cached_state, gfp_t mask)
+		u64 end, struct extent_state **cached_state)
 {
 	return set_extent_bit(tree, start, end,
 			      EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
-			      NULL, cached_state, mask);
+			      NULL, cached_state, GFP_NOFS);
 }
 
 static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
-		u64 end, gfp_t mask)
+		u64 end)
 {
-	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, mask);
+	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
+			GFP_NOFS);
 }
 
 static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 318b048..e0715fc 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -62,7 +62,7 @@
 
 /**
  * free_extent_map - drop reference count of an extent_map
- * @em:		extent map being releasead
+ * @em:		extent map being released
  *
  * Drops the reference out on @em by one and free the structure
  * if the reference count hits zero.
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 7a7d6e2..62a81ee 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -248,7 +248,7 @@
 				    BTRFS_DATA_RELOC_TREE_OBJECTID) {
 					set_extent_bits(io_tree, offset,
 						offset + root->sectorsize - 1,
-						EXTENT_NODATASUM, GFP_NOFS);
+						EXTENT_NODATASUM);
 				} else {
 					btrfs_info(BTRFS_I(inode)->root->fs_info,
 						   "no csum found for inode %llu start %llu",
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c98805c..e0c9bd3 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1596,6 +1596,13 @@
 
 		copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
 
+		num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
+						reserve_bytes);
+		dirty_sectors = round_up(copied + sector_offset,
+					root->sectorsize);
+		dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
+						dirty_sectors);
+
 		/*
 		 * if we have trouble faulting in the pages, fall
 		 * back to one page at a time
@@ -1605,6 +1612,7 @@
 
 		if (copied == 0) {
 			force_page_uptodate = true;
+			dirty_sectors = 0;
 			dirty_pages = 0;
 		} else {
 			force_page_uptodate = false;
@@ -1615,20 +1623,19 @@
 		/*
 		 * If we had a short copy we need to release the excess delaloc
 		 * bytes we reserved.  We need to increment outstanding_extents
-		 * because btrfs_delalloc_release_space will decrement it, but
+		 * because btrfs_delalloc_release_space and
+		 * btrfs_delalloc_release_metadata will decrement it, but
 		 * we still have an outstanding extent for the chunk we actually
 		 * managed to copy.
 		 */
-		num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
-						reserve_bytes);
-		dirty_sectors = round_up(copied + sector_offset,
-					root->sectorsize);
-		dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
-						dirty_sectors);
-
 		if (num_sectors > dirty_sectors) {
-			release_bytes = (write_bytes - copied)
-				& ~((u64)root->sectorsize - 1);
+			/*
+			 * we round down because we don't want to count
+			 * any partial blocks actually sent through the
+			 * IO machines
+			 */
+			release_bytes = round_down(release_bytes - copied,
+				      root->sectorsize);
 			if (copied > 0) {
 				spin_lock(&BTRFS_I(inode)->lock);
 				BTRFS_I(inode)->outstanding_extents++;
@@ -2022,7 +2029,7 @@
 	     BTRFS_I(inode)->last_trans
 	     <= root->fs_info->last_trans_committed)) {
 		/*
-		 * We'v had everything committed since the last time we were
+		 * We've had everything committed since the last time we were
 		 * modified so clear this flag in case it was set for whatever
 		 * reason, it's no longer relevant.
 		 */
@@ -2370,7 +2377,7 @@
 
 	/* Check the aligned pages after the first unaligned page,
 	 * if offset != orig_start, which means the first unaligned page
-	 * including serveral following pages are already in holes,
+	 * including several following pages are already in holes,
 	 * the extra check can be skipped */
 	if (offset == orig_start) {
 		/* after truncate page, check hole again */
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 5e6062c..c6dc118 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1983,7 +1983,7 @@
 		/*
 		 * If this block group has some small extents we don't want to
 		 * use up all of our free slots in the cache with them, we want
-		 * to reserve them to larger extents, however if we have plent
+		 * to reserve them to larger extents, however if we have plenty
 		 * of cache left then go ahead an dadd them, no sense in adding
 		 * the overhead of a bitmap if we don't have to.
 		 */
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 33178c4..3af651c 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -123,7 +123,7 @@
 int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
 			   u64 *trimmed, u64 start, u64 end, u64 minlen);
 
-/* Support functions for runnint our sanity tests */
+/* Support functions for running our sanity tests */
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
 			      u64 offset, u64 bytes, bool bitmap);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 91419ef..8b1212e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -455,7 +455,7 @@
 
 	/*
 	 * skip compression for a small file range(<=blocksize) that
-	 * isn't an inline extent, since it dosen't save disk space at all.
+	 * isn't an inline extent, since it doesn't save disk space at all.
 	 */
 	if (total_compressed <= blocksize &&
 	   (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
@@ -1978,7 +1978,7 @@
 {
 	WARN_ON((end & (PAGE_SIZE - 1)) == 0);
 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
-				   cached_state, GFP_NOFS);
+				   cached_state);
 }
 
 /* see btrfs_writepage_start_hook for details on why this is required */
@@ -3119,8 +3119,7 @@
 
 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
-		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
-				  GFP_NOFS);
+		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
 		return 0;
 	}
 
@@ -3722,7 +3721,7 @@
 	 * and doesn't have an inode ref with the name "bar" anymore.
 	 *
 	 * Setting last_unlink_trans to last_trans is a pessimistic approach,
-	 * but it guarantees correctness at the expense of ocassional full
+	 * but it guarantees correctness at the expense of occasional full
 	 * transaction commits on fsync if our inode is a directory, or if our
 	 * inode is not a directory, logging its parent unnecessarily.
 	 */
@@ -4978,7 +4977,7 @@
 		 * be instantly completed which will give us extents that need
 		 * to be truncated.  If we fail to get an orphan inode down we
 		 * could have left over extents that were never meant to live,
-		 * so we need to garuntee from this point on that everything
+		 * so we need to guarantee from this point on that everything
 		 * will be consistent.
 		 */
 		ret = btrfs_orphan_add(trans, inode);
@@ -5248,7 +5247,7 @@
 		}
 
 		/*
-		 * We can't just steal from the global reserve, we need tomake
+		 * We can't just steal from the global reserve, we need to make
 		 * sure there is room to do it, if not we need to commit and try
 		 * again.
 		 */
@@ -6980,7 +6979,18 @@
 		 * existing will always be non-NULL, since there must be
 		 * extent causing the -EEXIST.
 		 */
-		if (start >= extent_map_end(existing) ||
+		if (existing->start == em->start &&
+		    extent_map_end(existing) == extent_map_end(em) &&
+		    em->block_start == existing->block_start) {
+			/*
+			 * these two extents are the same, it happens
+			 * with inlines especially
+			 */
+			free_extent_map(em);
+			em = existing;
+			err = 0;
+
+		} else if (start >= extent_map_end(existing) ||
 		    start <= existing->start) {
 			/*
 			 * The existing extent map is the one nearest to
@@ -7433,7 +7443,7 @@
 				 cached_state);
 		/*
 		 * We're concerned with the entire range that we're going to be
-		 * doing DIO to, so we need to make sure theres no ordered
+		 * doing DIO to, so we need to make sure there's no ordered
 		 * extents in this range.
 		 */
 		ordered = btrfs_lookup_ordered_range(inode, lockstart,
@@ -7595,7 +7605,7 @@
 	if (current->journal_info) {
 		/*
 		 * Need to pull our outstanding extents and set journal_info to NULL so
-		 * that anything that needs to check if there's a transction doesn't get
+		 * that anything that needs to check if there's a transaction doesn't get
 		 * confused.
 		 */
 		dio_data = current->journal_info;
@@ -7628,7 +7638,7 @@
 	 * decompress it, so there will be buffering required no matter what we
 	 * do, so go ahead and fallback to buffered.
 	 *
-	 * We return -ENOTBLK because thats what makes DIO go ahead and go back
+	 * We return -ENOTBLK because that's what makes DIO go ahead and go back
 	 * to buffered IO.  Don't blame me, this is the price we pay for using
 	 * the generic code.
 	 */
@@ -9041,7 +9051,7 @@
 		return ret;
 
 	/*
-	 * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
+	 * Yes ladies and gentlemen, this is indeed ugly.  The fact is we have
 	 * 3 things going on here
 	 *
 	 * 1) We need to reserve space for our orphan item and the space to
@@ -9055,15 +9065,15 @@
 	 * space reserved in case it uses space during the truncate (thank you
 	 * very much snapshotting).
 	 *
-	 * And we need these to all be seperate.  The fact is we can use alot of
+	 * And we need these to all be separate.  The fact is we can use a lot of
 	 * space doing the truncate, and we have no earthly idea how much space
-	 * we will use, so we need the truncate reservation to be seperate so it
+	 * we will use, so we need the truncate reservation to be separate so it
 	 * doesn't end up using space reserved for updating the inode or
 	 * removing the orphan item.  We also need to be able to stop the
 	 * transaction and start a new one, which means we need to be able to
 	 * update the inode several times, and we have no idea of knowing how
 	 * many times that will be, so we can't just reserve 1 item for the
-	 * entirety of the opration, so that has to be done seperately as well.
+	 * entirety of the operation, so that has to be done separately as well.
 	 * Then there is the orphan item, which does indeed need to be held on
 	 * to for the whole operation, and we need nobody to touch this reserved
 	 * space except the orphan code.
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 4e70069..0517356 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -296,7 +296,7 @@
 		}
 	} else {
 		/*
-		 * Revert back under same assuptions as above
+		 * Revert back under same assumptions as above
 		 */
 		if (S_ISREG(mode)) {
 			if (inode->i_size == 0)
@@ -465,7 +465,7 @@
 
 	/*
 	 * Don't create subvolume whose level is not zero. Or qgroup will be
-	 * screwed up since it assume subvolme qgroup's level to be 0.
+	 * screwed up since it assumes subvolume qgroup's level to be 0.
 	 */
 	if (btrfs_qgroup_level(objectid)) {
 		ret = -ENOSPC;
@@ -780,7 +780,7 @@
  *	a. be owner of dir, or
  *	b. be owner of victim, or
  *	c. have CAP_FOWNER capability
- *  6. If the victim is append-only or immutable we can't do antyhing with
+ *  6. If the victim is append-only or immutable we can't do anything with
  *     links pointing to it.
  *  7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
  *  8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
@@ -846,11 +846,9 @@
 	struct dentry *dentry;
 	int error;
 
-	inode_lock_nested(dir, I_MUTEX_PARENT);
-	// XXX: should've been
-	// mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
-	// if (error == -EINTR)
-	//	return error;
+	error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
+	if (error == -EINTR)
+		return error;
 
 	dentry = lookup_one_len(name, parent->dentry, namelen);
 	error = PTR_ERR(dentry);
@@ -1239,7 +1237,7 @@
 
 
 	set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
-			  &cached_state, GFP_NOFS);
+			  &cached_state);
 
 	unlock_extent_cached(&BTRFS_I(inode)->io_tree,
 			     page_start, page_end - 1, &cached_state,
@@ -2377,11 +2375,9 @@
 		goto out;
 
 
-	inode_lock_nested(dir, I_MUTEX_PARENT);
-	// XXX: should've been
-	// err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
-	// if (err == -EINTR)
-	//	goto out_drop_write;
+	err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
+	if (err == -EINTR)
+		goto out_drop_write;
 	dentry = lookup_one_len(vol_args->name, parent, namelen);
 	if (IS_ERR(dentry)) {
 		err = PTR_ERR(dentry);
@@ -2571,7 +2567,7 @@
 	dput(dentry);
 out_unlock_dir:
 	inode_unlock(dir);
-//out_drop_write:
+out_drop_write:
 	mnt_drop_write_file(file);
 out:
 	kfree(vol_args);
@@ -4654,7 +4650,7 @@
 	}
 
 	/*
-	 * mut. excl. ops lock is locked.  Three possibilites:
+	 * mut. excl. ops lock is locked.  Three possibilities:
 	 *   (1) some other op is running
 	 *   (2) balance is running
 	 *   (3) balance is paused -- special case (think resume)
@@ -5571,7 +5567,7 @@
 		ret = btrfs_sync_fs(file_inode(file)->i_sb, 1);
 		/*
 		 * The transaction thread may want to do more work,
-		 * namely it pokes the cleaner ktread that will start
+		 * namely it pokes the cleaner kthread that will start
 		 * processing uncleaned subvols.
 		 */
 		wake_up_process(root->fs_info->transaction_kthread);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 5591704..e96634a 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -718,12 +718,13 @@
 	return count;
 }
 
-void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
+int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
 			      const u64 range_start, const u64 range_len)
 {
 	struct btrfs_root *root;
 	struct list_head splice;
 	int done;
+	int total_done = 0;
 
 	INIT_LIST_HEAD(&splice);
 
@@ -742,6 +743,7 @@
 		done = btrfs_wait_ordered_extents(root, nr,
 						  range_start, range_len);
 		btrfs_put_fs_root(root);
+		total_done += done;
 
 		spin_lock(&fs_info->ordered_root_lock);
 		if (nr != -1) {
@@ -752,6 +754,8 @@
 	list_splice_tail(&splice, &fs_info->ordered_roots);
 	spin_unlock(&fs_info->ordered_root_lock);
 	mutex_unlock(&fs_info->ordered_operations_mutex);
+
+	return total_done;
 }
 
 /*
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 8ef1262..4515077 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -58,7 +58,7 @@
 
 #define BTRFS_ORDERED_COMPRESSED 3 /* writing a zlib compressed extent */
 
-#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */
+#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to preallocated extent */
 
 #define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */
 
@@ -199,7 +199,7 @@
 			   u32 *sum, int len);
 int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
 			       const u64 range_start, const u64 range_len);
-void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
+int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
 			      const u64 range_start, const u64 range_len);
 void btrfs_get_logged_extents(struct inode *inode,
 			      struct list_head *logged_list,
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 9e11955..9d4c05b 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -85,7 +85,7 @@
 
 	/*
 	 * temp variables for accounting operations
-	 * Refer to qgroup_shared_accouting() for details.
+	 * Refer to qgroup_shared_accounting() for details.
 	 */
 	u64 old_refcnt;
 	u64 new_refcnt;
@@ -499,7 +499,7 @@
 	}
 	/*
 	 * we call btrfs_free_qgroup_config() when umounting
-	 * filesystem and disabling quota, so we set qgroup_ulit
+	 * filesystem and disabling quota, so we set qgroup_ulist
 	 * to be null here to avoid double free.
 	 */
 	ulist_free(fs_info->qgroup_ulist);
@@ -1036,7 +1036,7 @@
 
 /*
  * The easy accounting, if we are adding/removing the only ref for an extent
- * then this qgroup and all of the parent qgroups get their refrence and
+ * then this qgroup and all of the parent qgroups get their reference and
  * exclusive counts adjusted.
  *
  * Caller should hold fs_info->qgroup_lock.
@@ -1436,7 +1436,7 @@
 
 	/*
 	 * No need to do lock, since this function will only be called in
-	 * btrfs_commmit_transaction().
+	 * btrfs_commit_transaction().
 	 */
 	node = rb_first(&delayed_refs->dirty_extent_root);
 	while (node) {
@@ -1557,7 +1557,7 @@
  * A:	cur_old_roots < nr_old_roots	(not exclusive before)
  * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
  * B:	cur_new_roots < nr_new_roots	(not exclusive now)
- * !B:	cur_new_roots == nr_new_roots	(possible exclsuive now)
+ * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
  *
  * Results:
  * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
@@ -1851,7 +1851,7 @@
 }
 
 /*
- * Copy the acounting information between qgroups. This is necessary
+ * Copy the accounting information between qgroups. This is necessary
  * when a snapshot or a subvolume is created. Throwing an error will
  * cause a transaction abort so we take extra care here to only error
  * when a readonly fs is a reasonable outcome.
@@ -2340,7 +2340,7 @@
 	mutex_unlock(&fs_info->qgroup_rescan_lock);
 
 	/*
-	 * only update status, since the previous part has alreay updated the
+	 * only update status, since the previous part has already updated the
 	 * qgroup info.
 	 */
 	trans = btrfs_start_transaction(fs_info->quota_root, 1);
@@ -2542,8 +2542,7 @@
 	changeset.bytes_changed = 0;
 	changeset.range_changed = ulist_alloc(GFP_NOFS);
 	ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
-			start + len -1, EXTENT_QGROUP_RESERVED, GFP_NOFS,
-			&changeset);
+			start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
 	trace_btrfs_qgroup_reserve_data(inode, start, len,
 					changeset.bytes_changed,
 					QGROUP_RESERVE);
@@ -2580,8 +2579,7 @@
 		return -ENOMEM;
 
 	ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, 
-			start + len -1, EXTENT_QGROUP_RESERVED, GFP_NOFS,
-			&changeset);
+			start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
 	if (ret < 0)
 		goto out;
 
@@ -2672,7 +2670,7 @@
 }
 
 /*
- * Check qgroup reserved space leaking, normally at destory inode
+ * Check qgroup reserved space leaking, normally at destroy inode
  * time
  */
 void btrfs_qgroup_check_reserved_leak(struct inode *inode)
@@ -2688,7 +2686,7 @@
 		return;
 
 	ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
-			EXTENT_QGROUP_RESERVED, GFP_NOFS, &changeset);
+			EXTENT_QGROUP_RESERVED, &changeset);
 
 	WARN_ON(ret < 0);
 	if (WARN_ON(changeset.bytes_changed)) {
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 0b7792e..f8b6d41 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -576,7 +576,7 @@
 	 * we can't merge with cached rbios, since the
 	 * idea is that when we merge the destination
 	 * rbio is going to run our IO for us.  We can
-	 * steal from cached rbio's though, other functions
+	 * steal from cached rbios though, other functions
 	 * handle that.
 	 */
 	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
@@ -2368,7 +2368,7 @@
 			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
 		}
 
-		/* Check scrubbing pairty and repair it */
+		/* Check scrubbing parity and repair it */
 		p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
 		parity = kmap(p);
 		if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
@@ -2493,7 +2493,7 @@
 		/*
 		 * Here means we got one corrupted data stripe and one
 		 * corrupted parity on RAID6, if the corrupted parity
-		 * is scrubbing parity, luckly, use the other one to repair
+		 * is scrubbing parity, luckily, use the other one to repair
 		 * the data, or we can not repair the data stripe.
 		 */
 		if (failp != rbio->scrubp)
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 298631ea..8428db7 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -761,12 +761,14 @@
 
 	do {
 		enqueued = 0;
+		mutex_lock(&fs_devices->device_list_mutex);
 		list_for_each_entry(device, &fs_devices->devices, dev_list) {
 			if (atomic_read(&device->reada_in_flight) <
 			    MAX_IN_FLIGHT)
 				enqueued += reada_start_machine_dev(fs_info,
 								    device);
 		}
+		mutex_unlock(&fs_devices->device_list_mutex);
 		total += enqueued;
 	} while (enqueued && total < 10000);
 
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 1cfd35c..0477dca 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -668,8 +668,8 @@
  * roots of b-trees that reference the tree block.
  *
  * the basic idea of this function is check backrefs of a given block
- * to find upper level blocks that refernece the block, and then check
- * bakcrefs of these upper level blocks recursively. the recursion stop
+ * to find upper level blocks that reference the block, and then check
+ * backrefs of these upper level blocks recursively. the recursion stop
  * when tree root is reached or backrefs for the block is cached.
  *
  * NOTE: if we find backrefs for a block are cached, we know backrefs
@@ -1160,7 +1160,7 @@
 			if (!RB_EMPTY_NODE(&upper->rb_node))
 				continue;
 
-			/* Add this guy's upper edges to the list to proces */
+			/* Add this guy's upper edges to the list to process */
 			list_for_each_entry(edge, &upper->upper, list[LOWER])
 				list_add_tail(&edge->list[UPPER], &list);
 			if (list_empty(&upper->upper))
@@ -2396,7 +2396,7 @@
 		}
 
 		/*
-		 * we keep the old last snapshod transid in rtranid when we
+		 * we keep the old last snapshot transid in rtranid when we
 		 * created the relocation tree.
 		 */
 		last_snap = btrfs_root_rtransid(&reloc_root->root_item);
@@ -2616,7 +2616,7 @@
 			 * only one thread can access block_rsv at this point,
 			 * so we don't need hold lock to protect block_rsv.
 			 * we expand more reservation size here to allow enough
-			 * space for relocation and we will return eailer in
+			 * space for relocation and we will return earlier in
 			 * enospc case.
 			 */
 			rc->block_rsv->size = tmp + rc->extent_root->nodesize *
@@ -2814,7 +2814,7 @@
 				 u64 bytenr, u32 blocksize)
 {
 	set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
-			EXTENT_DIRTY, GFP_NOFS);
+			EXTENT_DIRTY);
 }
 
 static void __mark_block_processed(struct reloc_control *rc,
@@ -3182,7 +3182,7 @@
 		    page_start + offset == cluster->boundary[nr]) {
 			set_extent_bits(&BTRFS_I(inode)->io_tree,
 					page_start, page_end,
-					EXTENT_BOUNDARY, GFP_NOFS);
+					EXTENT_BOUNDARY);
 			nr++;
 		}
 
@@ -4059,8 +4059,7 @@
 	}
 
 	btrfs_release_path(path);
-	clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
-			  GFP_NOFS);
+	clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
 
 	if (trans) {
 		btrfs_end_transaction_throttle(trans, rc->extent_root);
@@ -4591,7 +4590,7 @@
 
 /*
  * called before creating snapshot. it calculates metadata reservation
- * requried for relocating tree blocks in the snapshot
+ * required for relocating tree blocks in the snapshot
  */
 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
 			      u64 *bytes_to_reserve)
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index b2b14e7..f1c3086 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -71,9 +71,9 @@
  * search_key: the key to search
  * path: the path we search
  * root_item: the root item of the tree we look for
- * root_key: the reak key of the tree we look for
+ * root_key: the root key of the tree we look for
  *
- * If ->offset of 'seach_key' is -1ULL, it means we are not sure the offset
+ * If ->offset of 'search_key' is -1ULL, it means we are not sure the offset
  * of the search key, just lookup the root with the highest offset for a
  * given objectid.
  *
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index fa35cdc..70427ef 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -745,7 +745,7 @@
 		 * sure we read the bad mirror.
 		 */
 		ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
-					EXTENT_DAMAGED, GFP_NOFS);
+					EXTENT_DAMAGED);
 		if (ret) {
 			/* set_extent_bits should give proper error */
 			WARN_ON(ret > 0);
@@ -763,7 +763,7 @@
 						end, EXTENT_DAMAGED, 0, NULL);
 		if (!corrected)
 			clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
-						EXTENT_DAMAGED, GFP_NOFS);
+						EXTENT_DAMAGED);
 	}
 
 out:
@@ -1044,7 +1044,7 @@
 
 		/*
 		 * !is_metadata and !have_csum, this means that the data
-		 * might not be COW'ed, that it might be modified
+		 * might not be COWed, that it might be modified
 		 * concurrently. The general strategy to work on the
 		 * commit root does not help in the case when COW is not
 		 * used.
@@ -1125,7 +1125,7 @@
 	 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
 	 * of mirror #2 is readable but the final checksum test fails,
 	 * then the 2nd page of mirror #3 could be tried, whether now
-	 * the final checksum succeedes. But this would be a rare
+	 * the final checksum succeeds. But this would be a rare
 	 * exception and is therefore not implemented. At least it is
 	 * avoided that the good copy is overwritten.
 	 * A more useful improvement would be to pick the sectors
@@ -2181,7 +2181,7 @@
 	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
 	u64 length = sblock->page_count * PAGE_SIZE;
 	u64 logical = sblock->pagev[0]->logical;
-	struct btrfs_bio *bbio;
+	struct btrfs_bio *bbio = NULL;
 	struct bio *bio;
 	struct btrfs_raid_bio *rbio;
 	int ret;
@@ -2982,6 +2982,7 @@
 						       extent_len);
 
 			mapped_length = extent_len;
+			bbio = NULL;
 			ret = btrfs_map_block(fs_info, READ, extent_logical,
 					      &mapped_length, &bbio, 0);
 			if (!ret) {
@@ -3581,6 +3582,46 @@
 		 */
 		scrub_pause_on(fs_info);
 		ret = btrfs_inc_block_group_ro(root, cache);
+		if (!ret && is_dev_replace) {
+			/*
+			 * If we are doing a device replace wait for any tasks
+			 * that started dellaloc right before we set the block
+			 * group to RO mode, as they might have just allocated
+			 * an extent from it or decided they could do a nocow
+			 * write. And if any such tasks did that, wait for their
+			 * ordered extents to complete and then commit the
+			 * current transaction, so that we can later see the new
+			 * extent items in the extent tree - the ordered extents
+			 * create delayed data references (for cow writes) when
+			 * they complete, which will be run and insert the
+			 * corresponding extent items into the extent tree when
+			 * we commit the transaction they used when running
+			 * inode.c:btrfs_finish_ordered_io(). We later use
+			 * the commit root of the extent tree to find extents
+			 * to copy from the srcdev into the tgtdev, and we don't
+			 * want to miss any new extents.
+			 */
+			btrfs_wait_block_group_reservations(cache);
+			btrfs_wait_nocow_writers(cache);
+			ret = btrfs_wait_ordered_roots(fs_info, -1,
+						       cache->key.objectid,
+						       cache->key.offset);
+			if (ret > 0) {
+				struct btrfs_trans_handle *trans;
+
+				trans = btrfs_join_transaction(root);
+				if (IS_ERR(trans))
+					ret = PTR_ERR(trans);
+				else
+					ret = btrfs_commit_transaction(trans,
+								       root);
+				if (ret) {
+					scrub_pause_off(fs_info);
+					btrfs_put_block_group(cache);
+					break;
+				}
+			}
+		}
 		scrub_pause_off(fs_info);
 
 		if (ret == 0) {
@@ -3601,9 +3642,11 @@
 			break;
 		}
 
+		btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
 		dev_replace->cursor_right = found_key.offset + length;
 		dev_replace->cursor_left = found_key.offset;
 		dev_replace->item_needs_writeback = 1;
+		btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
 		ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
 				  found_key.offset, cache, is_dev_replace);
 
@@ -3639,6 +3682,11 @@
 
 		scrub_pause_off(fs_info);
 
+		btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
+		dev_replace->cursor_left = dev_replace->cursor_right;
+		dev_replace->item_needs_writeback = 1;
+		btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
+
 		if (ro_set)
 			btrfs_dec_block_group_ro(root, cache);
 
@@ -3676,9 +3724,6 @@
 			ret = -ENOMEM;
 			break;
 		}
-
-		dev_replace->cursor_left = dev_replace->cursor_right;
-		dev_replace->item_needs_writeback = 1;
 skip:
 		key.offset = found_key.offset + length;
 		btrfs_release_path(path);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 6a8c860..b71dd29 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1831,7 +1831,7 @@
 
 	/*
 	 * If we have a parent root we need to verify that the parent dir was
-	 * not delted and then re-created, if it was then we have no overwrite
+	 * not deleted and then re-created, if it was then we have no overwrite
 	 * and we can just unlink this entry.
 	 */
 	if (sctx->parent_root) {
@@ -4192,9 +4192,9 @@
 		return -ENOMEM;
 
 	/*
-	 * This hack is needed because empty acl's are stored as zero byte
+	 * This hack is needed because empty acls are stored as zero byte
 	 * data in xattrs. Problem with that is, that receiving these zero byte
-	 * acl's will fail later. To fix this, we send a dummy acl list that
+	 * acls will fail later. To fix this, we send a dummy acl list that
 	 * only contains the version number and no entries.
 	 */
 	if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index e05619f..875c757 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -36,7 +36,7 @@
  *
  * The end result is that anyone who #includes ctree.h gets a
  * declaration for the btrfs_set_foo functions and btrfs_foo functions,
- * which are wappers of btrfs_set_token_#bits functions and
+ * which are wrappers of btrfs_set_token_#bits functions and
  * btrfs_get_token_#bits functions, which are defined in this file.
  *
  * These setget functions do all the extent_buffer related mapping
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index bf71071..4e59a91 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -112,7 +112,7 @@
 		 * Note that a running device replace operation is not
 		 * canceled here although there is no way to update
 		 * the progress. It would add the risk of a deadlock,
-		 * therefore the canceling is ommited. The only penalty
+		 * therefore the canceling is omitted. The only penalty
 		 * is that some I/O remains active until the procedure
 		 * completes. The next time when the filesystem is
 		 * mounted writeable again, the device replace
@@ -1877,7 +1877,7 @@
 	int ret;
 
 	/*
-	 * We aren't under the device list lock, so this is racey-ish, but good
+	 * We aren't under the device list lock, so this is racy-ish, but good
 	 * enough for our purposes.
 	 */
 	nr_devices = fs_info->fs_devices->open_devices;
@@ -1896,7 +1896,7 @@
 	if (!devices_info)
 		return -ENOMEM;
 
-	/* calc min stripe number for data space alloction */
+	/* calc min stripe number for data space allocation */
 	type = btrfs_get_alloc_profile(root, 1);
 	if (type & BTRFS_BLOCK_GROUP_RAID0) {
 		min_stripes = 2;
@@ -1932,7 +1932,7 @@
 		avail_space *= BTRFS_STRIPE_LEN;
 
 		/*
-		 * In order to avoid overwritting the superblock on the drive,
+		 * In order to avoid overwriting the superblock on the drive,
 		 * btrfs starts at an offset of at least 1MB when doing chunk
 		 * allocation.
 		 */
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 70948b1..5572460 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -113,7 +113,7 @@
 	 * |--- delalloc ---|
 	 * |---  search  ---|
 	 */
-	set_extent_delalloc(&tmp, 0, 4095, NULL, GFP_KERNEL);
+	set_extent_delalloc(&tmp, 0, 4095, NULL);
 	start = 0;
 	end = 0;
 	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -144,7 +144,7 @@
 		test_msg("Couldn't find the locked page\n");
 		goto out_bits;
 	}
-	set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL, GFP_KERNEL);
+	set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL);
 	start = test_start;
 	end = 0;
 	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -176,7 +176,7 @@
 	locked_page = find_lock_page(inode->i_mapping, test_start >>
 				     PAGE_SHIFT);
 	if (!locked_page) {
-		test_msg("Could'nt find the locked page\n");
+		test_msg("Couldn't find the locked page\n");
 		goto out_bits;
 	}
 	start = test_start;
@@ -199,7 +199,7 @@
 	 *
 	 * We are re-using our test_start from above since it works out well.
 	 */
-	set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL, GFP_KERNEL);
+	set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL);
 	start = test_start;
 	end = 0;
 	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -262,7 +262,7 @@
 	}
 	ret = 0;
 out_bits:
-	clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL);
+	clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1);
 out:
 	if (locked_page)
 		put_page(locked_page);
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index 5142475..0eeb8f3 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -25,7 +25,7 @@
 #define BITS_PER_BITMAP		(PAGE_SIZE * 8)
 
 /*
- * This test just does basic sanity checking, making sure we can add an exten
+ * This test just does basic sanity checking, making sure we can add an extent
  * entry and remove space from either end and the middle, and make sure we can
  * remove space that covers adjacent extent entries.
  */
@@ -396,8 +396,9 @@
  * wasn't optimal as they could be spread all over the block group while under
  * concurrency (extra overhead and fragmentation).
  *
- * This stealing approach is benefical, since we always prefer to allocate from
- * extent entries, both for clustered and non-clustered allocation requests.
+ * This stealing approach is beneficial, since we always prefer to allocate
+ * from extent entries, both for clustered and non-clustered allocation
+ * requests.
  */
 static int
 test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 863a6a3..8a25fe8 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -264,7 +264,7 @@
 
 	/*
 	 * We will just free a dummy node if it's ref count is 2 so we need an
-	 * extra ref so our searches don't accidently release our page.
+	 * extra ref so our searches don't accidentally release our page.
 	 */
 	extent_buffer_get(root->node);
 	btrfs_set_header_nritems(root->node, 0);
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 8ea5d34..8aa4ded3 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -234,7 +234,7 @@
 	}
 
 	/*
-	 * Since the test trans doesn't havee the complicated delayed refs,
+	 * Since the test trans doesn't have the complicated delayed refs,
 	 * we can only call btrfs_qgroup_account_extent() directly to test
 	 * quota.
 	 */
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 5b0b758..f6e24cb 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -944,7 +944,7 @@
 
 		err = convert_extent_bit(dirty_pages, start, end,
 					 EXTENT_NEED_WAIT,
-					 mark, &cached_state, GFP_NOFS);
+					 mark, &cached_state);
 		/*
 		 * convert_extent_bit can return -ENOMEM, which is most of the
 		 * time a temporary error. So when it happens, ignore the error
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 72be51f..9fe0ec2 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -144,7 +144,7 @@
 	/* block reservation for the operation */
 	struct btrfs_block_rsv block_rsv;
 	u64 qgroup_reserved;
-	/* extra metadata reseration for relocation */
+	/* extra metadata reservation for relocation */
 	int error;
 	bool readonly;
 	struct list_head list;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 8aaca5c..b7665af 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2330,7 +2330,7 @@
 				break;
 
 			/* for regular files, make sure corresponding
-			 * orhpan item exist. extents past the new EOF
+			 * orphan item exist. extents past the new EOF
 			 * will be truncated later by orphan cleanup.
 			 */
 			if (S_ISREG(mode)) {
@@ -3001,7 +3001,7 @@
 			break;
 
 		clear_extent_bits(&log->dirty_log_pages, start, end,
-				  EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
+				  EXTENT_DIRTY | EXTENT_NEW);
 	}
 
 	/*
@@ -4914,7 +4914,7 @@
  * the actual unlink operation, so if we do this check before a concurrent task
  * sets last_unlink_trans it means we've logged a consistent version/state of
  * all the inode items, otherwise we are not sure and must do a transaction
- * commit (the concurrent task migth have only updated last_unlink_trans before
+ * commit (the concurrent task might have only updated last_unlink_trans before
  * we logged the inode or it might have also done the unlink).
  */
 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
@@ -4973,7 +4973,7 @@
 	while (1) {
 		/*
 		 * If we are logging a directory then we start with our inode,
-		 * not our parents inode, so we need to skipp setting the
+		 * not our parent's inode, so we need to skip setting the
 		 * logged_trans so that further down in the log code we don't
 		 * think this inode has already been logged.
 		 */
@@ -5357,7 +5357,7 @@
 		log_dentries = true;
 
 	/*
-	 * On unlink we must make sure all our current and old parent directores
+	 * On unlink we must make sure all our current and old parent directory
 	 * inodes are fully logged. This is to prevent leaving dangling
 	 * directory index entries in directories that were our parents but are
 	 * not anymore. Not doing this results in old parent directory being
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index 91feb2b..b1434bb 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -28,7 +28,7 @@
  * }
  * ulist_free(ulist);
  *
- * This assumes the graph nodes are adressable by u64. This stems from the
+ * This assumes the graph nodes are addressable by u64. This stems from the
  * usage for tree enumeration in btrfs, where the logical addresses are
  * 64 bit.
  *
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 2b88127..da9e003 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2190,7 +2190,7 @@
 }
 
 /*
- * strore the expected generation for seed devices in device items.
+ * Store the expected generation for seed devices in device items.
  */
 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
 			       struct btrfs_root *root)
@@ -2761,6 +2761,7 @@
 	u64 dev_extent_len = 0;
 	u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
 	int i, ret = 0;
+	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
 
 	/* Just in case */
 	root = root->fs_info->chunk_root;
@@ -2787,12 +2788,19 @@
 	check_system_chunk(trans, extent_root, map->type);
 	unlock_chunks(root->fs_info->chunk_root);
 
+	/*
+	 * Take the device list mutex to prevent races with the final phase of
+	 * a device replace operation that replaces the device object associated
+	 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
+	 */
+	mutex_lock(&fs_devices->device_list_mutex);
 	for (i = 0; i < map->num_stripes; i++) {
 		struct btrfs_device *device = map->stripes[i].dev;
 		ret = btrfs_free_dev_extent(trans, device,
 					    map->stripes[i].physical,
 					    &dev_extent_len);
 		if (ret) {
+			mutex_unlock(&fs_devices->device_list_mutex);
 			btrfs_abort_transaction(trans, root, ret);
 			goto out;
 		}
@@ -2811,11 +2819,14 @@
 		if (map->stripes[i].dev) {
 			ret = btrfs_update_device(trans, map->stripes[i].dev);
 			if (ret) {
+				mutex_unlock(&fs_devices->device_list_mutex);
 				btrfs_abort_transaction(trans, root, ret);
 				goto out;
 			}
 		}
 	}
+	mutex_unlock(&fs_devices->device_list_mutex);
+
 	ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
 	if (ret) {
 		btrfs_abort_transaction(trans, root, ret);
@@ -3387,7 +3398,7 @@
 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
 		/*
 		 * Same logic as the 'limit' filter; the minimum cannot be
-		 * determined here because we do not have the global informatoin
+		 * determined here because we do not have the global information
 		 * about the count of all chunks that satisfy the filters.
 		 */
 		if (bargs->limit_max == 0)
@@ -5762,20 +5773,17 @@
 			}
 		}
 		if (found) {
-			if (physical_of_found + map->stripe_len <=
-			    dev_replace->cursor_left) {
-				struct btrfs_bio_stripe *tgtdev_stripe =
-					bbio->stripes + num_stripes;
+			struct btrfs_bio_stripe *tgtdev_stripe =
+				bbio->stripes + num_stripes;
 
-				tgtdev_stripe->physical = physical_of_found;
-				tgtdev_stripe->length =
-					bbio->stripes[index_srcdev].length;
-				tgtdev_stripe->dev = dev_replace->tgtdev;
-				bbio->tgtdev_map[index_srcdev] = num_stripes;
+			tgtdev_stripe->physical = physical_of_found;
+			tgtdev_stripe->length =
+				bbio->stripes[index_srcdev].length;
+			tgtdev_stripe->dev = dev_replace->tgtdev;
+			bbio->tgtdev_map[index_srcdev] = num_stripes;
 
-				tgtdev_indexes++;
-				num_stripes++;
-			}
+			tgtdev_indexes++;
+			num_stripes++;
 		}
 	}
 
@@ -6076,7 +6084,7 @@
 {
 	atomic_inc(&bbio->error);
 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
-		/* Shoud be the original bio. */
+		/* Should be the original bio. */
 		WARN_ON(bio != bbio->orig_bio);
 
 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
@@ -6560,7 +6568,7 @@
 	set_extent_buffer_uptodate(sb);
 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
 	/*
-	 * The sb extent buffer is artifical and just used to read the system array.
+	 * The sb extent buffer is artificial and just used to read the system array.
 	 * set_extent_buffer_uptodate() call does not properly mark all it's
 	 * pages up-to-date when the page is larger: extent does not cover the
 	 * whole page and consequently check_page_uptodate does not find all
@@ -6630,13 +6638,13 @@
 		sb_array_offset += len;
 		cur_offset += len;
 	}
-	free_extent_buffer(sb);
+	free_extent_buffer_stale(sb);
 	return ret;
 
 out_short_read:
 	printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
 			len, cur_offset);
-	free_extent_buffer(sb);
+	free_extent_buffer_stale(sb);
 	return -EIO;
 }
 
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 3bfb252..d1a177a 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -380,23 +380,21 @@
 }
 
 static int btrfs_xattr_handler_set(const struct xattr_handler *handler,
-				   struct dentry *dentry, const char *name,
-				   const void *buffer, size_t size,
-				   int flags)
+				   struct dentry *unused, struct inode *inode,
+				   const char *name, const void *buffer,
+				   size_t size, int flags)
 {
-	struct inode *inode = d_inode(dentry);
-
 	name = xattr_full_name(handler, name);
 	return __btrfs_setxattr(NULL, inode, name, buffer, size, flags);
 }
 
 static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
-					struct dentry *dentry,
+					struct dentry *unused, struct inode *inode,
 					const char *name, const void *value,
 					size_t size, int flags)
 {
 	name = xattr_full_name(handler, name);
-	return btrfs_set_prop(d_inode(dentry), name, value, size, flags);
+	return btrfs_set_prop(inode, name, value, size, flags);
 }
 
 static const struct xattr_handler btrfs_security_xattr_handler = {
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 861d611..ce5f345 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -380,7 +380,7 @@
  * check if the backing cache is updated to FS-Cache
  * - called by FS-Cache when evaluates if need to invalidate the cache
  */
-static bool cachefiles_check_consistency(struct fscache_operation *op)
+static int cachefiles_check_consistency(struct fscache_operation *op)
 {
 	struct cachefiles_object *object;
 	struct cachefiles_cache *cache;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 43098cd..26a9d10 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -257,12 +257,12 @@
 /*
  * Finish an async read(ahead) op.
  */
-static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
+static void finish_read(struct ceph_osd_request *req)
 {
 	struct inode *inode = req->r_inode;
 	struct ceph_osd_data *osd_data;
-	int rc = req->r_result;
-	int bytes = le32_to_cpu(msg->hdr.data_len);
+	int rc = req->r_result <= 0 ? req->r_result : 0;
+	int bytes = req->r_result >= 0 ? req->r_result : 0;
 	int num_pages;
 	int i;
 
@@ -276,8 +276,10 @@
 	for (i = 0; i < num_pages; i++) {
 		struct page *page = osd_data->pages[i];
 
-		if (rc < 0 && rc != -ENOENT)
+		if (rc < 0 && rc != -ENOENT) {
+			ceph_fscache_readpage_cancel(inode, page);
 			goto unlock;
+		}
 		if (bytes < (int)PAGE_SIZE) {
 			/* zero (remainder of) page */
 			int s = bytes < 0 ? 0 : bytes;
@@ -376,8 +378,6 @@
 	req->r_callback = finish_read;
 	req->r_inode = inode;
 
-	ceph_osdc_build_request(req, off, NULL, vino.snap, NULL);
-
 	dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len);
 	ret = ceph_osdc_start_request(osdc, req, false);
 	if (ret < 0)
@@ -537,8 +537,6 @@
 	    CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
 		set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC);
 
-	ceph_readpage_to_fscache(inode, page);
-
 	set_page_writeback(page);
 	err = ceph_osdc_writepages(osdc, ceph_vino(inode),
 				   &ci->i_layout, snapc,
@@ -546,11 +544,21 @@
 				   truncate_seq, truncate_size,
 				   &inode->i_mtime, &page, 1);
 	if (err < 0) {
-		dout("writepage setting page/mapping error %d %p\n", err, page);
+		struct writeback_control tmp_wbc;
+		if (!wbc)
+			wbc = &tmp_wbc;
+		if (err == -ERESTARTSYS) {
+			/* killed by SIGKILL */
+			dout("writepage interrupted page %p\n", page);
+			redirty_page_for_writepage(wbc, page);
+			end_page_writeback(page);
+			goto out;
+		}
+		dout("writepage setting page/mapping error %d %p\n",
+		     err, page);
 		SetPageError(page);
 		mapping_set_error(&inode->i_data, err);
-		if (wbc)
-			wbc->pages_skipped++;
+		wbc->pages_skipped++;
 	} else {
 		dout("writepage cleaned page %p\n", page);
 		err = 0;  /* vfs expects us to return 0 */
@@ -571,12 +579,16 @@
 	BUG_ON(!inode);
 	ihold(inode);
 	err = writepage_nounlock(page, wbc);
+	if (err == -ERESTARTSYS) {
+		/* direct memory reclaimer was killed by SIGKILL. return 0
+		 * to prevent caller from setting mapping/page error */
+		err = 0;
+	}
 	unlock_page(page);
 	iput(inode);
 	return err;
 }
 
-
 /*
  * lame release_pages helper.  release_pages() isn't exported to
  * modules.
@@ -600,8 +612,7 @@
  * If we get an error, set the mapping error bit, but not the individual
  * page error bits.
  */
-static void writepages_finish(struct ceph_osd_request *req,
-			      struct ceph_msg *msg)
+static void writepages_finish(struct ceph_osd_request *req)
 {
 	struct inode *inode = req->r_inode;
 	struct ceph_inode_info *ci = ceph_inode(inode);
@@ -615,7 +626,6 @@
 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 	bool remove_page;
 
-
 	dout("writepages_finish %p rc %d\n", inode, rc);
 	if (rc < 0)
 		mapping_set_error(mapping, rc);
@@ -650,6 +660,9 @@
 				clear_bdi_congested(&fsc->backing_dev_info,
 						    BLK_RW_ASYNC);
 
+			if (rc < 0)
+				SetPageError(page);
+
 			ceph_put_snap_context(page_snap_context(page));
 			page->private = 0;
 			ClearPagePrivate(page);
@@ -718,8 +731,11 @@
 	     (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
 
 	if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
-		pr_warn("writepage_start %p on forced umount\n", inode);
-		truncate_pagecache(inode, 0);
+		if (ci->i_wrbuffer_ref > 0) {
+			pr_warn_ratelimited(
+				"writepage_start %p %lld forced umount\n",
+				inode, ceph_ino(inode));
+		}
 		mapping_set_error(mapping, -EIO);
 		return -EIO; /* we're in a forced umount, don't write! */
 	}
@@ -1063,10 +1079,7 @@
 			pages = NULL;
 		}
 
-		vino = ceph_vino(inode);
-		ceph_osdc_build_request(req, offset, snapc, vino.snap,
-					&inode->i_mtime);
-
+		req->r_mtime = inode->i_mtime;
 		rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
 		BUG_ON(rc);
 		req = NULL;
@@ -1099,8 +1112,7 @@
 		mapping->writeback_index = index;
 
 out:
-	if (req)
-		ceph_osdc_put_request(req);
+	ceph_osdc_put_request(req);
 	ceph_put_snap_context(snapc);
 	dout("writepages done, rc = %d\n", rc);
 	return rc;
@@ -1134,6 +1146,7 @@
 			    struct page *page)
 {
 	struct inode *inode = file_inode(file);
+	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	loff_t page_off = pos & PAGE_MASK;
 	int pos_in_page = pos & ~PAGE_MASK;
@@ -1142,6 +1155,12 @@
 	int r;
 	struct ceph_snap_context *snapc, *oldest;
 
+	if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
+		dout(" page %p forced umount\n", page);
+		unlock_page(page);
+		return -EIO;
+	}
+
 retry_locked:
 	/* writepages currently holds page lock, but if we change that later, */
 	wait_on_page_writeback(page);
@@ -1165,7 +1184,7 @@
 			snapc = ceph_get_snap_context(snapc);
 			unlock_page(page);
 			ceph_queue_writeback(inode);
-			r = wait_event_interruptible(ci->i_cap_wq,
+			r = wait_event_killable(ci->i_cap_wq,
 			       context_is_writeable_or_written(inode, snapc));
 			ceph_put_snap_context(snapc);
 			if (r == -ERESTARTSYS)
@@ -1311,6 +1330,17 @@
 	.direct_IO = ceph_direct_io,
 };
 
+static void ceph_block_sigs(sigset_t *oldset)
+{
+	sigset_t mask;
+	siginitsetinv(&mask, sigmask(SIGKILL));
+	sigprocmask(SIG_BLOCK, &mask, oldset);
+}
+
+static void ceph_restore_sigs(sigset_t *oldset)
+{
+	sigprocmask(SIG_SETMASK, oldset, NULL);
+}
 
 /*
  * vm ops
@@ -1323,6 +1353,9 @@
 	struct page *pinned_page = NULL;
 	loff_t off = vmf->pgoff << PAGE_SHIFT;
 	int want, got, ret;
+	sigset_t oldset;
+
+	ceph_block_sigs(&oldset);
 
 	dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
 	     inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE);
@@ -1330,17 +1363,12 @@
 		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
 	else
 		want = CEPH_CAP_FILE_CACHE;
-	while (1) {
-		got = 0;
-		ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want,
-				    -1, &got, &pinned_page);
-		if (ret == 0)
-			break;
-		if (ret != -ERESTARTSYS) {
-			WARN_ON(1);
-			return VM_FAULT_SIGBUS;
-		}
-	}
+
+	got = 0;
+	ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
+	if (ret < 0)
+		goto out_restore;
+
 	dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
 	     inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
 
@@ -1357,7 +1385,7 @@
 	ceph_put_cap_refs(ci, got);
 
 	if (ret != -EAGAIN)
-		return ret;
+		goto out_restore;
 
 	/* read inline data */
 	if (off >= PAGE_SIZE) {
@@ -1371,15 +1399,18 @@
 						~__GFP_FS));
 		if (!page) {
 			ret = VM_FAULT_OOM;
-			goto out;
+			goto out_inline;
 		}
 		ret1 = __ceph_do_getattr(inode, page,
 					 CEPH_STAT_CAP_INLINE_DATA, true);
 		if (ret1 < 0 || off >= i_size_read(inode)) {
 			unlock_page(page);
 			put_page(page);
-			ret = VM_FAULT_SIGBUS;
-			goto out;
+			if (ret1 < 0)
+				ret = ret1;
+			else
+				ret = VM_FAULT_SIGBUS;
+			goto out_inline;
 		}
 		if (ret1 < PAGE_SIZE)
 			zero_user_segment(page, ret1, PAGE_SIZE);
@@ -1388,10 +1419,15 @@
 		SetPageUptodate(page);
 		vmf->page = page;
 		ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
+out_inline:
+		dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
+		     inode, off, (size_t)PAGE_SIZE, ret);
 	}
-out:
-	dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
-	     inode, off, (size_t)PAGE_SIZE, ret);
+out_restore:
+	ceph_restore_sigs(&oldset);
+	if (ret < 0)
+		ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+
 	return ret;
 }
 
@@ -1409,10 +1445,13 @@
 	loff_t size = i_size_read(inode);
 	size_t len;
 	int want, got, ret;
+	sigset_t oldset;
 
 	prealloc_cf = ceph_alloc_cap_flush();
 	if (!prealloc_cf)
-		return VM_FAULT_SIGBUS;
+		return VM_FAULT_OOM;
+
+	ceph_block_sigs(&oldset);
 
 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
 		struct page *locked_page = NULL;
@@ -1423,10 +1462,8 @@
 		ret = ceph_uninline_data(vma->vm_file, locked_page);
 		if (locked_page)
 			unlock_page(locked_page);
-		if (ret < 0) {
-			ret = VM_FAULT_SIGBUS;
+		if (ret < 0)
 			goto out_free;
-		}
 	}
 
 	if (off + PAGE_SIZE <= size)
@@ -1440,45 +1477,36 @@
 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
 	else
 		want = CEPH_CAP_FILE_BUFFER;
-	while (1) {
-		got = 0;
-		ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len,
-				    &got, NULL);
-		if (ret == 0)
-			break;
-		if (ret != -ERESTARTSYS) {
-			WARN_ON(1);
-			ret = VM_FAULT_SIGBUS;
-			goto out_free;
-		}
-	}
+
+	got = 0;
+	ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len,
+			    &got, NULL);
+	if (ret < 0)
+		goto out_free;
+
 	dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
 	     inode, off, len, ceph_cap_string(got));
 
 	/* Update time before taking page lock */
 	file_update_time(vma->vm_file);
 
-	lock_page(page);
+	do {
+		lock_page(page);
 
-	ret = VM_FAULT_NOPAGE;
-	if ((off > size) ||
-	    (page->mapping != inode->i_mapping)) {
-		unlock_page(page);
-		goto out;
-	}
+		if ((off > size) || (page->mapping != inode->i_mapping)) {
+			unlock_page(page);
+			ret = VM_FAULT_NOPAGE;
+			break;
+		}
 
-	ret = ceph_update_writeable_page(vma->vm_file, off, len, page);
-	if (ret >= 0) {
-		/* success.  we'll keep the page locked. */
-		set_page_dirty(page);
-		ret = VM_FAULT_LOCKED;
-	} else {
-		if (ret == -ENOMEM)
-			ret = VM_FAULT_OOM;
-		else
-			ret = VM_FAULT_SIGBUS;
-	}
-out:
+		ret = ceph_update_writeable_page(vma->vm_file, off, len, page);
+		if (ret >= 0) {
+			/* success.  we'll keep the page locked. */
+			set_page_dirty(page);
+			ret = VM_FAULT_LOCKED;
+		}
+	} while (ret == -EAGAIN);
+
 	if (ret == VM_FAULT_LOCKED ||
 	    ci->i_inline_version != CEPH_INLINE_NONE) {
 		int dirty;
@@ -1495,8 +1523,10 @@
 	     inode, off, len, ceph_cap_string(got), ret);
 	ceph_put_cap_refs(ci, got);
 out_free:
+	ceph_restore_sigs(&oldset);
 	ceph_free_cap_flush(prealloc_cf);
-
+	if (ret < 0)
+		ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
 	return ret;
 }
 
@@ -1614,7 +1644,7 @@
 		goto out;
 	}
 
-	ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime);
+	req->r_mtime = inode->i_mtime;
 	err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
 	if (!err)
 		err = ceph_osdc_wait_request(&fsc->client->osdc, req);
@@ -1657,7 +1687,7 @@
 			goto out_put;
 	}
 
-	ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime);
+	req->r_mtime = inode->i_mtime;
 	err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
 	if (!err)
 		err = ceph_osdc_wait_request(&fsc->client->osdc, req);
@@ -1758,9 +1788,11 @@
 	rd_req->r_flags = CEPH_OSD_FLAG_READ;
 	osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
 	rd_req->r_base_oloc.pool = pool;
-	snprintf(rd_req->r_base_oid.name, sizeof(rd_req->r_base_oid.name),
-		 "%llx.00000000", ci->i_vino.ino);
-	rd_req->r_base_oid.name_len = strlen(rd_req->r_base_oid.name);
+	ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino);
+
+	err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
+	if (err)
+		goto out_unlock;
 
 	wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
 					 1, false, GFP_NOFS);
@@ -1769,11 +1801,14 @@
 		goto out_unlock;
 	}
 
-	wr_req->r_flags = CEPH_OSD_FLAG_WRITE |
-			  CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK;
+	wr_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ACK;
 	osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
-	wr_req->r_base_oloc.pool = pool;
-	wr_req->r_base_oid = rd_req->r_base_oid;
+	ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc);
+	ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid);
+
+	err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS);
+	if (err)
+		goto out_unlock;
 
 	/* one page should be large enough for STAT data */
 	pages = ceph_alloc_page_vector(1, GFP_KERNEL);
@@ -1784,12 +1819,9 @@
 
 	osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
 				     0, false, true);
-	ceph_osdc_build_request(rd_req, 0, NULL, CEPH_NOSNAP,
-				&ci->vfs_inode.i_mtime);
 	err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
 
-	ceph_osdc_build_request(wr_req, 0, NULL, CEPH_NOSNAP,
-				&ci->vfs_inode.i_mtime);
+	wr_req->r_mtime = ci->vfs_inode.i_mtime;
 	err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
 
 	if (!err)
@@ -1823,10 +1855,8 @@
 out_unlock:
 	up_write(&mdsc->pool_perm_rwsem);
 
-	if (rd_req)
-		ceph_osdc_put_request(rd_req);
-	if (wr_req)
-		ceph_osdc_put_request(wr_req);
+	ceph_osdc_put_request(rd_req);
+	ceph_osdc_put_request(wr_req);
 out:
 	if (!err)
 		err = have;
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index a351480..238c55b 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -25,6 +25,7 @@
 #include "cache.h"
 
 struct ceph_aux_inode {
+	u64 		version;
 	struct timespec	mtime;
 	loff_t          size;
 };
@@ -69,15 +70,8 @@
 	fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
 					      &ceph_fscache_fsid_object_def,
 					      fsc, true);
-
-	if (fsc->fscache == NULL) {
+	if (!fsc->fscache)
 		pr_err("Unable to resgister fsid: %p fscache cookie", fsc);
-		return 0;
-	}
-
-	fsc->revalidate_wq = alloc_workqueue("ceph-revalidate", 0, 1);
-	if (fsc->revalidate_wq == NULL)
-		return -ENOMEM;
 
 	return 0;
 }
@@ -105,6 +99,7 @@
 	const struct inode* inode = &ci->vfs_inode;
 
 	memset(&aux, 0, sizeof(aux));
+	aux.version = ci->i_version;
 	aux.mtime = inode->i_mtime;
 	aux.size = i_size_read(inode);
 
@@ -131,6 +126,7 @@
 		return FSCACHE_CHECKAUX_OBSOLETE;
 
 	memset(&aux, 0, sizeof(aux));
+	aux.version = ci->i_version;
 	aux.mtime = inode->i_mtime;
 	aux.size = i_size_read(inode);
 
@@ -181,32 +177,26 @@
 	.now_uncached	= ceph_fscache_inode_now_uncached,
 };
 
-void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
-					struct ceph_inode_info* ci)
+void ceph_fscache_register_inode_cookie(struct inode *inode)
 {
-	struct inode* inode = &ci->vfs_inode;
+	struct ceph_inode_info *ci = ceph_inode(inode);
+	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 
 	/* No caching for filesystem */
 	if (fsc->fscache == NULL)
 		return;
 
 	/* Only cache for regular files that are read only */
-	if ((ci->vfs_inode.i_mode & S_IFREG) == 0)
+	if (!S_ISREG(inode->i_mode))
 		return;
 
-	/* Avoid multiple racing open requests */
-	inode_lock(inode);
-
-	if (ci->fscache)
-		goto done;
-
-	ci->fscache = fscache_acquire_cookie(fsc->fscache,
-					     &ceph_fscache_inode_object_def,
-					     ci, true);
-	fscache_check_consistency(ci->fscache);
-done:
+	inode_lock_nested(inode, I_MUTEX_CHILD);
+	if (!ci->fscache) {
+		ci->fscache = fscache_acquire_cookie(fsc->fscache,
+					&ceph_fscache_inode_object_def,
+					ci, false);
+	}
 	inode_unlock(inode);
-
 }
 
 void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
@@ -222,6 +212,34 @@
 	fscache_relinquish_cookie(cookie, 0);
 }
 
+static bool ceph_fscache_can_enable(void *data)
+{
+	struct inode *inode = data;
+	return !inode_is_open_for_write(inode);
+}
+
+void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
+{
+	struct ceph_inode_info *ci = ceph_inode(inode);
+
+	if (!fscache_cookie_valid(ci->fscache))
+		return;
+
+	if (inode_is_open_for_write(inode)) {
+		dout("fscache_file_set_cookie %p %p disabling cache\n",
+		     inode, filp);
+		fscache_disable_cookie(ci->fscache, false);
+		fscache_uncache_all_inode_pages(ci->fscache, inode);
+	} else {
+		fscache_enable_cookie(ci->fscache, ceph_fscache_can_enable,
+				inode);
+		if (fscache_cookie_enabled(ci->fscache)) {
+			dout("fscache_file_set_cookie %p %p enabing cache\n",
+			     inode, filp);
+		}
+	}
+}
+
 static void ceph_vfs_readpage_complete(struct page *page, void *data, int error)
 {
 	if (!error)
@@ -236,10 +254,9 @@
 	unlock_page(page);
 }
 
-static inline int cache_valid(struct ceph_inode_info *ci)
+static inline bool cache_valid(struct ceph_inode_info *ci)
 {
-	return ((ceph_caps_issued(ci) & CEPH_CAP_FILE_CACHE) &&
-		(ci->i_fscache_gen == ci->i_rdcache_gen));
+	return ci->i_fscache_gen == ci->i_rdcache_gen;
 }
 
 
@@ -332,69 +349,27 @@
 
 void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
 {
-	if (fsc->revalidate_wq)
-		destroy_workqueue(fsc->revalidate_wq);
-
 	fscache_relinquish_cookie(fsc->fscache, 0);
 	fsc->fscache = NULL;
 }
 
-static void ceph_revalidate_work(struct work_struct *work)
+/*
+ * caller should hold CEPH_CAP_FILE_{RD,CACHE}
+ */
+void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
 {
-	int issued;
-	u32 orig_gen;
-	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
-						  i_revalidate_work);
-	struct inode *inode = &ci->vfs_inode;
-
-	spin_lock(&ci->i_ceph_lock);
-	issued = __ceph_caps_issued(ci, NULL);
-	orig_gen = ci->i_rdcache_gen;
-	spin_unlock(&ci->i_ceph_lock);
-
-	if (!(issued & CEPH_CAP_FILE_CACHE)) {
-		dout("revalidate_work lost cache before validation %p\n",
-		     inode);
-		goto out;
-	}
-
-	if (!fscache_check_consistency(ci->fscache))
-		fscache_invalidate(ci->fscache);
-
-	spin_lock(&ci->i_ceph_lock);
-	/* Update the new valid generation (backwards sanity check too) */
-	if (orig_gen > ci->i_fscache_gen) {
-		ci->i_fscache_gen = orig_gen;
-	}
-	spin_unlock(&ci->i_ceph_lock);
-
-out:
-	iput(&ci->vfs_inode);
-}
-
-void ceph_queue_revalidate(struct inode *inode)
-{
-	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
-	struct ceph_inode_info *ci = ceph_inode(inode);
-
-	if (fsc->revalidate_wq == NULL || ci->fscache == NULL)
+	if (cache_valid(ci))
 		return;
 
-	ihold(inode);
-
-	if (queue_work(ceph_sb_to_client(inode->i_sb)->revalidate_wq,
-		       &ci->i_revalidate_work)) {
-		dout("ceph_queue_revalidate %p\n", inode);
-	} else {
-		dout("ceph_queue_revalidate %p failed\n)", inode);
-		iput(inode);
+	/* resue i_truncate_mutex. There should be no pending
+	 * truncate while the caller holds CEPH_CAP_FILE_RD */
+	mutex_lock(&ci->i_truncate_mutex);
+	if (!cache_valid(ci)) {
+		if (fscache_check_consistency(ci->fscache))
+			fscache_invalidate(ci->fscache);
+		spin_lock(&ci->i_ceph_lock);
+		ci->i_fscache_gen = ci->i_rdcache_gen;
+		spin_unlock(&ci->i_ceph_lock);
 	}
-}
-
-void ceph_fscache_inode_init(struct ceph_inode_info *ci)
-{
-	ci->fscache = NULL;
-	/* The first load is verifed cookie open time */
-	ci->i_fscache_gen = 1;
-	INIT_WORK(&ci->i_revalidate_work, ceph_revalidate_work);
+	mutex_unlock(&ci->i_truncate_mutex);
 }
diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h
index 5ac591b..7e72c75 100644
--- a/fs/ceph/cache.h
+++ b/fs/ceph/cache.h
@@ -34,10 +34,10 @@
 int ceph_fscache_register_fs(struct ceph_fs_client* fsc);
 void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc);
 
-void ceph_fscache_inode_init(struct ceph_inode_info *ci);
-void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
-					struct ceph_inode_info* ci);
+void ceph_fscache_register_inode_cookie(struct inode *inode);
 void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci);
+void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp);
+void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci);
 
 int ceph_readpage_from_fscache(struct inode *inode, struct page *page);
 int ceph_readpages_from_fscache(struct inode *inode,
@@ -46,12 +46,11 @@
 				unsigned *nr_pages);
 void ceph_readpage_to_fscache(struct inode *inode, struct page *page);
 void ceph_invalidate_fscache_page(struct inode* inode, struct page *page);
-void ceph_queue_revalidate(struct inode *inode);
 
-static inline void ceph_fscache_update_objectsize(struct inode *inode)
+static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
 {
-	struct ceph_inode_info *ci = ceph_inode(inode);
-	fscache_attr_changed(ci->fscache);
+	ci->fscache = NULL;
+	ci->i_fscache_gen = 0;
 }
 
 static inline void ceph_fscache_invalidate(struct inode *inode)
@@ -88,6 +87,11 @@
 	return fscache_readpages_cancel(ci->fscache, pages);
 }
 
+static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci)
+{
+	ci->i_fscache_gen = ci->i_rdcache_gen - 1;
+}
+
 #else
 
 static inline int ceph_fscache_register(void)
@@ -112,8 +116,20 @@
 {
 }
 
-static inline void ceph_fscache_register_inode_cookie(struct ceph_fs_client* parent_fsc,
-						      struct ceph_inode_info* ci)
+static inline void ceph_fscache_register_inode_cookie(struct inode *inode)
+{
+}
+
+static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
+{
+}
+
+static inline void ceph_fscache_file_set_cookie(struct inode *inode,
+						struct file *filp)
+{
+}
+
+static inline void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
 {
 }
 
@@ -141,10 +157,6 @@
 {
 }
 
-static inline void ceph_fscache_update_objectsize(struct inode *inode)
-{
-}
-
 static inline void ceph_fscache_invalidate(struct inode *inode)
 {
 }
@@ -154,10 +166,6 @@
 {
 }
 
-static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
-{
-}
-
 static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp)
 {
 	return 1;
@@ -173,7 +181,7 @@
 {
 }
 
-static inline void ceph_queue_revalidate(struct inode *inode)
+static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci)
 {
 }
 
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index cfaeef1..6f60d0a 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1656,7 +1656,7 @@
 	 */
 	if ((!is_delayed || mdsc->stopping) &&
 	    !S_ISDIR(inode->i_mode) &&		/* ignore readdir cache */
-	    ci->i_wrbuffer_ref == 0 &&		/* no dirty pages... */
+	    !(ci->i_wb_ref || ci->i_wrbuffer_ref) &&   /* no dirty pages... */
 	    inode->i_data.nrpages &&		/* have cached pages */
 	    (revoking & (CEPH_CAP_FILE_CACHE|
 			 CEPH_CAP_FILE_LAZYIO)) && /*  or revoking cache */
@@ -1698,8 +1698,8 @@
 
 		revoking = cap->implemented & ~cap->issued;
 		dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
-		     cap->mds, cap, ceph_cap_string(cap->issued),
-		     ceph_cap_string(cap_used),
+		     cap->mds, cap, ceph_cap_string(cap_used),
+		     ceph_cap_string(cap->issued),
 		     ceph_cap_string(cap->implemented),
 		     ceph_cap_string(revoking));
 
@@ -2317,7 +2317,7 @@
 
 	/* make sure file is actually open */
 	file_wanted = __ceph_caps_file_wanted(ci);
-	if ((file_wanted & need) == 0) {
+	if ((file_wanted & need) != need) {
 		dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
 		     ceph_cap_string(need), ceph_cap_string(file_wanted));
 		*err = -EBADF;
@@ -2393,6 +2393,9 @@
 				snap_rwsem_locked = true;
 			}
 			*got = need | (have & want);
+			if ((need & CEPH_CAP_FILE_RD) &&
+			    !(*got & CEPH_CAP_FILE_CACHE))
+				ceph_disable_fscache_readpage(ci);
 			__take_cap_refs(ci, *got, true);
 			ret = 1;
 		}
@@ -2412,12 +2415,26 @@
 			goto out_unlock;
 		}
 
-		if (!__ceph_is_any_caps(ci) &&
-		    ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
-			dout("get_cap_refs %p forced umount\n", inode);
-			*err = -EIO;
-			ret = 1;
-			goto out_unlock;
+		if (ci->i_ceph_flags & CEPH_I_CAP_DROPPED) {
+			int mds_wanted;
+			if (ACCESS_ONCE(mdsc->fsc->mount_state) ==
+			    CEPH_MOUNT_SHUTDOWN) {
+				dout("get_cap_refs %p forced umount\n", inode);
+				*err = -EIO;
+				ret = 1;
+				goto out_unlock;
+			}
+			mds_wanted = __ceph_caps_mds_wanted(ci);
+			if ((mds_wanted & need) != need) {
+				dout("get_cap_refs %p caps were dropped"
+				     " (session killed?)\n", inode);
+				*err = -ESTALE;
+				ret = 1;
+				goto out_unlock;
+			}
+			if ((mds_wanted & file_wanted) ==
+			    (file_wanted & (CEPH_CAP_FILE_RD|CEPH_CAP_FILE_WR)))
+				ci->i_ceph_flags &= ~CEPH_I_CAP_DROPPED;
 		}
 
 		dout("get_cap_refs %p have %s needed %s\n", inode,
@@ -2487,7 +2504,7 @@
 			if (err == -EAGAIN)
 				continue;
 			if (err < 0)
-				return err;
+				ret = err;
 		} else {
 			ret = wait_event_interruptible(ci->i_cap_wq,
 					try_get_cap_refs(ci, need, want, endoff,
@@ -2496,8 +2513,15 @@
 				continue;
 			if (err < 0)
 				ret = err;
-			if (ret < 0)
-				return ret;
+		}
+		if (ret < 0) {
+			if (err == -ESTALE) {
+				/* session was killed, try renew caps */
+				ret = ceph_renew_caps(&ci->vfs_inode);
+				if (ret == 0)
+					continue;
+			}
+			return ret;
 		}
 
 		if (ci->i_inline_version != CEPH_INLINE_NONE &&
@@ -2533,6 +2557,9 @@
 		break;
 	}
 
+	if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE))
+		ceph_fscache_revalidate_cookie(ci);
+
 	*got = _got;
 	return 0;
 }
@@ -2774,7 +2801,6 @@
 	bool writeback = false;
 	bool queue_trunc = false;
 	bool queue_invalidate = false;
-	bool queue_revalidate = false;
 	bool deleted_inode = false;
 	bool fill_inline = false;
 
@@ -2807,7 +2833,7 @@
 	if (!S_ISDIR(inode->i_mode) && /* don't invalidate readdir cache */
 	    ((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
 	    (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
-	    !ci->i_wrbuffer_ref) {
+	    !(ci->i_wrbuffer_ref || ci->i_wb_ref)) {
 		if (try_nonblocking_invalidate(inode)) {
 			/* there were locked pages.. invalidate later
 			   in a separate thread. */
@@ -2816,8 +2842,6 @@
 				ci->i_rdcache_revoking = ci->i_rdcache_gen;
 			}
 		}
-
-		ceph_fscache_invalidate(inode);
 	}
 
 	/* side effects now are allowed */
@@ -2859,11 +2883,6 @@
 		}
 	}
 
-	/* Do we need to revalidate our fscache cookie. Don't bother on the
-	 * first cache cap as we already validate at cookie creation time. */
-	if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1)
-		queue_revalidate = true;
-
 	if (newcaps & CEPH_CAP_ANY_RD) {
 		/* ctime/mtime/atime? */
 		ceph_decode_timespec(&mtime, &grant->mtime);
@@ -2972,11 +2991,8 @@
 	if (fill_inline)
 		ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
 
-	if (queue_trunc) {
+	if (queue_trunc)
 		ceph_queue_vmtruncate(inode);
-		ceph_queue_revalidate(inode);
-	} else if (queue_revalidate)
-		ceph_queue_revalidate(inode);
 
 	if (writeback)
 		/*
@@ -3178,10 +3194,8 @@
 					  truncate_seq, truncate_size, size);
 	spin_unlock(&ci->i_ceph_lock);
 
-	if (queue_trunc) {
+	if (queue_trunc)
 		ceph_queue_vmtruncate(inode);
-		ceph_fscache_invalidate(inode);
-	}
 }
 
 /*
@@ -3226,6 +3240,8 @@
 
 	if (target < 0) {
 		__ceph_remove_cap(cap, false);
+		if (!ci->i_auth_cap)
+			ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
 		goto out_unlock;
 	}
 
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 31f8314..39ff678 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -109,7 +109,7 @@
 				   path ? path : "");
 			spin_unlock(&req->r_old_dentry->d_lock);
 			kfree(path);
-		} else if (req->r_path2) {
+		} else if (req->r_path2 && req->r_op != CEPH_MDS_OP_SYMLINK) {
 			if (req->r_ino2.ino)
 				seq_printf(s, " #%llx/%s", req->r_ino2.ino,
 					   req->r_path2);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 3ab1192..6e0fedf 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -70,16 +70,42 @@
 }
 
 /*
- * for readdir, we encode the directory frag and offset within that
- * frag into f_pos.
+ * for f_pos for readdir:
+ * - hash order:
+ *	(0xff << 52) | ((24 bits hash) << 28) |
+ *	(the nth entry has hash collision);
+ * - frag+name order;
+ *	((frag value) << 28) | (the nth entry in frag);
  */
+#define OFFSET_BITS	28
+#define OFFSET_MASK	((1 << OFFSET_BITS) - 1)
+#define HASH_ORDER	(0xffull << (OFFSET_BITS + 24))
+loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order)
+{
+	loff_t fpos = ((loff_t)high << 28) | (loff_t)off;
+	if (hash_order)
+		fpos |= HASH_ORDER;
+	return fpos;
+}
+
+static bool is_hash_order(loff_t p)
+{
+	return (p & HASH_ORDER) == HASH_ORDER;
+}
+
 static unsigned fpos_frag(loff_t p)
 {
-	return p >> 32;
+	return p >> OFFSET_BITS;
 }
+
+static unsigned fpos_hash(loff_t p)
+{
+	return ceph_frag_value(fpos_frag(p));
+}
+
 static unsigned fpos_off(loff_t p)
 {
-	return p & 0xffffffff;
+	return p & OFFSET_MASK;
 }
 
 static int fpos_cmp(loff_t l, loff_t r)
@@ -111,6 +137,50 @@
 	return 0;
 }
 
+
+static struct dentry *
+__dcache_find_get_entry(struct dentry *parent, u64 idx,
+			struct ceph_readdir_cache_control *cache_ctl)
+{
+	struct inode *dir = d_inode(parent);
+	struct dentry *dentry;
+	unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1;
+	loff_t ptr_pos = idx * sizeof(struct dentry *);
+	pgoff_t ptr_pgoff = ptr_pos >> PAGE_SHIFT;
+
+	if (ptr_pos >= i_size_read(dir))
+		return NULL;
+
+	if (!cache_ctl->page || ptr_pgoff != page_index(cache_ctl->page)) {
+		ceph_readdir_cache_release(cache_ctl);
+		cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
+		if (!cache_ctl->page) {
+			dout(" page %lu not found\n", ptr_pgoff);
+			return ERR_PTR(-EAGAIN);
+		}
+		/* reading/filling the cache are serialized by
+		   i_mutex, no need to use page lock */
+		unlock_page(cache_ctl->page);
+		cache_ctl->dentries = kmap(cache_ctl->page);
+	}
+
+	cache_ctl->index = idx & idx_mask;
+
+	rcu_read_lock();
+	spin_lock(&parent->d_lock);
+	/* check i_size again here, because empty directory can be
+	 * marked as complete while not holding the i_mutex. */
+	if (ceph_dir_is_complete_ordered(dir) && ptr_pos < i_size_read(dir))
+		dentry = cache_ctl->dentries[cache_ctl->index];
+	else
+		dentry = NULL;
+	spin_unlock(&parent->d_lock);
+	if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
+		dentry = NULL;
+	rcu_read_unlock();
+	return dentry ? : ERR_PTR(-EAGAIN);
+}
+
 /*
  * When possible, we try to satisfy a readdir by peeking at the
  * dcache.  We make this work by carefully ordering dentries on
@@ -130,75 +200,68 @@
 	struct inode *dir = d_inode(parent);
 	struct dentry *dentry, *last = NULL;
 	struct ceph_dentry_info *di;
-	unsigned nsize = PAGE_SIZE / sizeof(struct dentry *);
-	int err = 0;
-	loff_t ptr_pos = 0;
 	struct ceph_readdir_cache_control cache_ctl = {};
+	u64 idx = 0;
+	int err = 0;
 
-	dout("__dcache_readdir %p v%u at %llu\n", dir, shared_gen, ctx->pos);
+	dout("__dcache_readdir %p v%u at %llx\n", dir, shared_gen, ctx->pos);
 
-	/* we can calculate cache index for the first dirfrag */
-	if (ceph_frag_is_leftmost(fpos_frag(ctx->pos))) {
-		cache_ctl.index = fpos_off(ctx->pos) - 2;
-		BUG_ON(cache_ctl.index < 0);
-		ptr_pos = cache_ctl.index * sizeof(struct dentry *);
+	/* search start position */
+	if (ctx->pos > 2) {
+		u64 count = div_u64(i_size_read(dir), sizeof(struct dentry *));
+		while (count > 0) {
+			u64 step = count >> 1;
+			dentry = __dcache_find_get_entry(parent, idx + step,
+							 &cache_ctl);
+			if (!dentry) {
+				/* use linar search */
+				idx = 0;
+				break;
+			}
+			if (IS_ERR(dentry)) {
+				err = PTR_ERR(dentry);
+				goto out;
+			}
+			di = ceph_dentry(dentry);
+			spin_lock(&dentry->d_lock);
+			if (fpos_cmp(di->offset, ctx->pos) < 0) {
+				idx += step + 1;
+				count -= step + 1;
+			} else {
+				count = step;
+			}
+			spin_unlock(&dentry->d_lock);
+			dput(dentry);
+		}
+
+		dout("__dcache_readdir %p cache idx %llu\n", dir, idx);
 	}
 
-	while (true) {
-		pgoff_t pgoff;
-		bool emit_dentry;
 
-		if (ptr_pos >= i_size_read(dir)) {
+	for (;;) {
+		bool emit_dentry = false;
+		dentry = __dcache_find_get_entry(parent, idx++, &cache_ctl);
+		if (!dentry) {
 			fi->flags |= CEPH_F_ATEND;
 			err = 0;
 			break;
 		}
-
-		err = -EAGAIN;
-		pgoff = ptr_pos >> PAGE_SHIFT;
-		if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) {
-			ceph_readdir_cache_release(&cache_ctl);
-			cache_ctl.page = find_lock_page(&dir->i_data, pgoff);
-			if (!cache_ctl.page) {
-				dout(" page %lu not found\n", pgoff);
-				break;
-			}
-			/* reading/filling the cache are serialized by
-			 * i_mutex, no need to use page lock */
-			unlock_page(cache_ctl.page);
-			cache_ctl.dentries = kmap(cache_ctl.page);
+		if (IS_ERR(dentry)) {
+			err = PTR_ERR(dentry);
+			goto out;
 		}
 
-		rcu_read_lock();
-		spin_lock(&parent->d_lock);
-		/* check i_size again here, because empty directory can be
-		 * marked as complete while not holding the i_mutex. */
-		if (ceph_dir_is_complete_ordered(dir) &&
-		    ptr_pos < i_size_read(dir))
-			dentry = cache_ctl.dentries[cache_ctl.index % nsize];
-		else
-			dentry = NULL;
-		spin_unlock(&parent->d_lock);
-		if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
-			dentry = NULL;
-		rcu_read_unlock();
-		if (!dentry)
-			break;
-
-		emit_dentry = false;
 		di = ceph_dentry(dentry);
 		spin_lock(&dentry->d_lock);
 		if (di->lease_shared_gen == shared_gen &&
 		    d_really_is_positive(dentry) &&
-		    ceph_snap(d_inode(dentry)) != CEPH_SNAPDIR &&
-		    ceph_ino(d_inode(dentry)) != CEPH_INO_CEPH &&
 		    fpos_cmp(ctx->pos, di->offset) <= 0) {
 			emit_dentry = true;
 		}
 		spin_unlock(&dentry->d_lock);
 
 		if (emit_dentry) {
-			dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
+			dout(" %llx dentry %p %pd %p\n", di->offset,
 			     dentry, dentry, d_inode(dentry));
 			ctx->pos = di->offset;
 			if (!dir_emit(ctx, dentry->d_name.name,
@@ -218,10 +281,8 @@
 		} else {
 			dput(dentry);
 		}
-
-		cache_ctl.index++;
-		ptr_pos += sizeof(struct dentry *);
 	}
+out:
 	ceph_readdir_cache_release(&cache_ctl);
 	if (last) {
 		int ret;
@@ -235,6 +296,16 @@
 	return err;
 }
 
+static bool need_send_readdir(struct ceph_file_info *fi, loff_t pos)
+{
+	if (!fi->last_readdir)
+		return true;
+	if (is_hash_order(pos))
+		return !ceph_frag_contains_value(fi->frag, fpos_hash(pos));
+	else
+		return fi->frag != fpos_frag(pos);
+}
+
 static int ceph_readdir(struct file *file, struct dir_context *ctx)
 {
 	struct ceph_file_info *fi = file->private_data;
@@ -242,13 +313,12 @@
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 	struct ceph_mds_client *mdsc = fsc->mdsc;
-	unsigned frag = fpos_frag(ctx->pos);
-	int off = fpos_off(ctx->pos);
+	int i;
 	int err;
 	u32 ftype;
 	struct ceph_mds_reply_info_parsed *rinfo;
 
-	dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
+	dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
 	if (fi->flags & CEPH_F_ATEND)
 		return 0;
 
@@ -260,7 +330,6 @@
 			    inode->i_mode >> 12))
 			return 0;
 		ctx->pos = 1;
-		off = 1;
 	}
 	if (ctx->pos == 1) {
 		ino_t ino = parent_ino(file->f_path.dentry);
@@ -270,7 +339,6 @@
 			    inode->i_mode >> 12))
 			return 0;
 		ctx->pos = 2;
-		off = 2;
 	}
 
 	/* can we use the dcache? */
@@ -285,8 +353,6 @@
 		err = __dcache_readdir(file, ctx, shared_gen);
 		if (err != -EAGAIN)
 			return err;
-		frag = fpos_frag(ctx->pos);
-		off = fpos_off(ctx->pos);
 	} else {
 		spin_unlock(&ci->i_ceph_lock);
 	}
@@ -294,8 +360,9 @@
 	/* proceed with a normal readdir */
 more:
 	/* do we have the correct frag content buffered? */
-	if (fi->frag != frag || fi->last_readdir == NULL) {
+	if (need_send_readdir(fi, ctx->pos)) {
 		struct ceph_mds_request *req;
+		unsigned frag;
 		int op = ceph_snap(inode) == CEPH_SNAPDIR ?
 			CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
 
@@ -305,6 +372,13 @@
 			fi->last_readdir = NULL;
 		}
 
+		if (is_hash_order(ctx->pos)) {
+			frag = ceph_choose_frag(ci, fpos_hash(ctx->pos),
+						NULL, NULL);
+		} else {
+			frag = fpos_frag(ctx->pos);
+		}
+
 		dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
 		     ceph_vinop(inode), frag, fi->last_name);
 		req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
@@ -331,6 +405,8 @@
 		req->r_readdir_cache_idx = fi->readdir_cache_idx;
 		req->r_readdir_offset = fi->next_offset;
 		req->r_args.readdir.frag = cpu_to_le32(frag);
+		req->r_args.readdir.flags =
+				cpu_to_le16(CEPH_READDIR_REPLY_BITFLAGS);
 
 		req->r_inode = inode;
 		ihold(inode);
@@ -340,22 +416,26 @@
 			ceph_mdsc_put_request(req);
 			return err;
 		}
-		dout("readdir got and parsed readdir result=%d"
-		     " on frag %x, end=%d, complete=%d\n", err, frag,
+		dout("readdir got and parsed readdir result=%d on "
+		     "frag %x, end=%d, complete=%d, hash_order=%d\n",
+		     err, frag,
 		     (int)req->r_reply_info.dir_end,
-		     (int)req->r_reply_info.dir_complete);
+		     (int)req->r_reply_info.dir_complete,
+		     (int)req->r_reply_info.hash_order);
 
-
-		/* note next offset and last dentry name */
 		rinfo = &req->r_reply_info;
 		if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
 			frag = le32_to_cpu(rinfo->dir_dir->frag);
-			off = req->r_readdir_offset;
-			fi->next_offset = off;
+			if (!rinfo->hash_order) {
+				fi->next_offset = req->r_readdir_offset;
+				/* adjust ctx->pos to beginning of frag */
+				ctx->pos = ceph_make_fpos(frag,
+							  fi->next_offset,
+							  false);
+			}
 		}
 
 		fi->frag = frag;
-		fi->offset = fi->next_offset;
 		fi->last_readdir = req;
 
 		if (req->r_did_prepopulate) {
@@ -363,7 +443,8 @@
 			if (fi->readdir_cache_idx < 0) {
 				/* preclude from marking dir ordered */
 				fi->dir_ordered_count = 0;
-			} else if (ceph_frag_is_leftmost(frag) && off == 2) {
+			} else if (ceph_frag_is_leftmost(frag) &&
+				   fi->next_offset == 2) {
 				/* note dir version at start of readdir so
 				 * we can tell if any dentries get dropped */
 				fi->dir_release_count = req->r_dir_release_cnt;
@@ -377,65 +458,87 @@
 			fi->dir_release_count = 0;
 		}
 
-		if (req->r_reply_info.dir_end) {
-			kfree(fi->last_name);
-			fi->last_name = NULL;
-			if (ceph_frag_is_rightmost(frag))
-				fi->next_offset = 2;
-			else
-				fi->next_offset = 0;
-		} else {
-			err = note_last_dentry(fi,
-				       rinfo->dir_dname[rinfo->dir_nr-1],
-				       rinfo->dir_dname_len[rinfo->dir_nr-1],
-				       fi->next_offset + rinfo->dir_nr);
+		/* note next offset and last dentry name */
+		if (rinfo->dir_nr > 0) {
+			struct ceph_mds_reply_dir_entry *rde =
+					rinfo->dir_entries + (rinfo->dir_nr-1);
+			unsigned next_offset = req->r_reply_info.dir_end ?
+					2 : (fpos_off(rde->offset) + 1);
+			err = note_last_dentry(fi, rde->name, rde->name_len,
+					       next_offset);
 			if (err)
 				return err;
+		} else if (req->r_reply_info.dir_end) {
+			fi->next_offset = 2;
+			/* keep last name */
 		}
 	}
 
 	rinfo = &fi->last_readdir->r_reply_info;
-	dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
-	     rinfo->dir_nr, off, fi->offset);
+	dout("readdir frag %x num %d pos %llx chunk first %llx\n",
+	     fi->frag, rinfo->dir_nr, ctx->pos,
+	     rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
 
-	ctx->pos = ceph_make_fpos(frag, off);
-	while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
-		struct ceph_mds_reply_inode *in =
-			rinfo->dir_in[off - fi->offset].in;
+	i = 0;
+	/* search start position */
+	if (rinfo->dir_nr > 0) {
+		int step, nr = rinfo->dir_nr;
+		while (nr > 0) {
+			step = nr >> 1;
+			if (rinfo->dir_entries[i + step].offset < ctx->pos) {
+				i +=  step + 1;
+				nr -= step + 1;
+			} else {
+				nr = step;
+			}
+		}
+	}
+	for (; i < rinfo->dir_nr; i++) {
+		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
 		struct ceph_vino vino;
 		ino_t ino;
 
-		dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
-		     off, off - fi->offset, rinfo->dir_nr, ctx->pos,
-		     rinfo->dir_dname_len[off - fi->offset],
-		     rinfo->dir_dname[off - fi->offset], in);
-		BUG_ON(!in);
-		ftype = le32_to_cpu(in->mode) >> 12;
-		vino.ino = le64_to_cpu(in->ino);
-		vino.snap = le64_to_cpu(in->snapid);
+		BUG_ON(rde->offset < ctx->pos);
+
+		ctx->pos = rde->offset;
+		dout("readdir (%d/%d) -> %llx '%.*s' %p\n",
+		     i, rinfo->dir_nr, ctx->pos,
+		     rde->name_len, rde->name, &rde->inode.in);
+
+		BUG_ON(!rde->inode.in);
+		ftype = le32_to_cpu(rde->inode.in->mode) >> 12;
+		vino.ino = le64_to_cpu(rde->inode.in->ino);
+		vino.snap = le64_to_cpu(rde->inode.in->snapid);
 		ino = ceph_vino_to_ino(vino);
-		if (!dir_emit(ctx,
-			    rinfo->dir_dname[off - fi->offset],
-			    rinfo->dir_dname_len[off - fi->offset],
-			    ceph_translate_ino(inode->i_sb, ino), ftype)) {
+
+		if (!dir_emit(ctx, rde->name, rde->name_len,
+			      ceph_translate_ino(inode->i_sb, ino), ftype)) {
 			dout("filldir stopping us...\n");
 			return 0;
 		}
-		off++;
 		ctx->pos++;
 	}
 
-	if (fi->last_name) {
+	if (fi->next_offset > 2) {
 		ceph_mdsc_put_request(fi->last_readdir);
 		fi->last_readdir = NULL;
 		goto more;
 	}
 
 	/* more frags? */
-	if (!ceph_frag_is_rightmost(frag)) {
-		frag = ceph_frag_next(frag);
-		off = 0;
-		ctx->pos = ceph_make_fpos(frag, off);
+	if (!ceph_frag_is_rightmost(fi->frag)) {
+		unsigned frag = ceph_frag_next(fi->frag);
+		if (is_hash_order(ctx->pos)) {
+			loff_t new_pos = ceph_make_fpos(ceph_frag_value(frag),
+							fi->next_offset, true);
+			if (new_pos > ctx->pos)
+				ctx->pos = new_pos;
+			/* keep last_name */
+		} else {
+			ctx->pos = ceph_make_fpos(frag, fi->next_offset, false);
+			kfree(fi->last_name);
+			fi->last_name = NULL;
+		}
 		dout("readdir next frag is %x\n", frag);
 		goto more;
 	}
@@ -467,7 +570,7 @@
 	return 0;
 }
 
-static void reset_readdir(struct ceph_file_info *fi, unsigned frag)
+static void reset_readdir(struct ceph_file_info *fi)
 {
 	if (fi->last_readdir) {
 		ceph_mdsc_put_request(fi->last_readdir);
@@ -477,18 +580,38 @@
 	fi->last_name = NULL;
 	fi->dir_release_count = 0;
 	fi->readdir_cache_idx = -1;
-	if (ceph_frag_is_leftmost(frag))
-		fi->next_offset = 2;  /* compensate for . and .. */
-	else
-		fi->next_offset = 0;
+	fi->next_offset = 2;  /* compensate for . and .. */
 	fi->flags &= ~CEPH_F_ATEND;
 }
 
+/*
+ * discard buffered readdir content on seekdir(0), or seek to new frag,
+ * or seek prior to current chunk
+ */
+static bool need_reset_readdir(struct ceph_file_info *fi, loff_t new_pos)
+{
+	struct ceph_mds_reply_info_parsed *rinfo;
+	loff_t chunk_offset;
+	if (new_pos == 0)
+		return true;
+	if (is_hash_order(new_pos)) {
+		/* no need to reset last_name for a forward seek when
+		 * dentries are sotred in hash order */
+	} else if (fi->frag |= fpos_frag(new_pos)) {
+		return true;
+	}
+	rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL;
+	if (!rinfo || !rinfo->dir_nr)
+		return true;
+	chunk_offset = rinfo->dir_entries[0].offset;
+	return new_pos < chunk_offset ||
+	       is_hash_order(new_pos) != is_hash_order(chunk_offset);
+}
+
 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
 {
 	struct ceph_file_info *fi = file->private_data;
 	struct inode *inode = file->f_mapping->host;
-	loff_t old_offset = ceph_make_fpos(fi->frag, fi->next_offset);
 	loff_t retval;
 
 	inode_lock(inode);
@@ -505,25 +628,22 @@
 	}
 
 	if (offset >= 0) {
+		if (need_reset_readdir(fi, offset)) {
+			dout("dir_llseek dropping %p content\n", file);
+			reset_readdir(fi);
+		} else if (is_hash_order(offset) && offset > file->f_pos) {
+			/* for hash offset, we don't know if a forward seek
+			 * is within same frag */
+			fi->dir_release_count = 0;
+			fi->readdir_cache_idx = -1;
+		}
+
 		if (offset != file->f_pos) {
 			file->f_pos = offset;
 			file->f_version = 0;
 			fi->flags &= ~CEPH_F_ATEND;
 		}
 		retval = offset;
-
-		if (offset == 0 ||
-		    fpos_frag(offset) != fi->frag ||
-		    fpos_off(offset) < fi->offset) {
-			/* discard buffered readdir content on seekdir(0), or
-			 * seek to new frag, or seek prior to current chunk */
-			dout("dir_llseek dropping %p content\n", file);
-			reset_readdir(fi, fpos_frag(offset));
-		} else if (fpos_cmp(offset, old_offset) > 0) {
-			/* reset dir_release_count if we did a forward seek */
-			fi->dir_release_count = 0;
-			fi->readdir_cache_idx = -1;
-		}
 	}
 out:
 	inode_unlock(inode);
@@ -591,7 +711,7 @@
 	return dentry;
 }
 
-static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
+static bool is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
 {
 	return ceph_ino(inode) == CEPH_INO_ROOT &&
 		strncmp(dentry->d_name.name, ".ceph", 5) == 0;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 4f1dc71..ce2f579 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -137,23 +137,11 @@
 {
 	struct ceph_file_info *cf;
 	int ret = 0;
-	struct ceph_inode_info *ci = ceph_inode(inode);
-	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
-	struct ceph_mds_client *mdsc = fsc->mdsc;
 
 	switch (inode->i_mode & S_IFMT) {
 	case S_IFREG:
-		/* First file open request creates the cookie, we want to keep
-		 * this cookie around for the filetime of the inode as not to
-		 * have to worry about fscache register / revoke / operation
-		 * races.
-		 *
-		 * Also, if we know the operation is going to invalidate data
-		 * (non readonly) just nuke the cache right away.
-		 */
-		ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
-		if ((fmode & CEPH_FILE_MODE_WR))
-			ceph_fscache_invalidate(inode);
+		ceph_fscache_register_inode_cookie(inode);
+		ceph_fscache_file_set_cookie(inode, file);
 	case S_IFDIR:
 		dout("init_file %p %p 0%o (regular)\n", inode, file,
 		     inode->i_mode);
@@ -192,6 +180,59 @@
 }
 
 /*
+ * try renew caps after session gets killed.
+ */
+int ceph_renew_caps(struct inode *inode)
+{
+	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+	struct ceph_inode_info *ci = ceph_inode(inode);
+	struct ceph_mds_request *req;
+	int err, flags, wanted;
+
+	spin_lock(&ci->i_ceph_lock);
+	wanted = __ceph_caps_file_wanted(ci);
+	if (__ceph_is_any_real_caps(ci) &&
+	    (!(wanted & CEPH_CAP_ANY_WR) == 0 || ci->i_auth_cap)) {
+		int issued = __ceph_caps_issued(ci, NULL);
+		spin_unlock(&ci->i_ceph_lock);
+		dout("renew caps %p want %s issued %s updating mds_wanted\n",
+		     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
+		ceph_check_caps(ci, 0, NULL);
+		return 0;
+	}
+	spin_unlock(&ci->i_ceph_lock);
+
+	flags = 0;
+	if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
+		flags = O_RDWR;
+	else if (wanted & CEPH_CAP_FILE_RD)
+		flags = O_RDONLY;
+	else if (wanted & CEPH_CAP_FILE_WR)
+		flags = O_WRONLY;
+#ifdef O_LAZY
+	if (wanted & CEPH_CAP_FILE_LAZYIO)
+		flags |= O_LAZY;
+#endif
+
+	req = prepare_open_request(inode->i_sb, flags, 0);
+	if (IS_ERR(req)) {
+		err = PTR_ERR(req);
+		goto out;
+	}
+
+	req->r_inode = inode;
+	ihold(inode);
+	req->r_num_caps = 1;
+	req->r_fmode = -1;
+
+	err = ceph_mdsc_do_request(mdsc, NULL, req);
+	ceph_mdsc_put_request(req);
+out:
+	dout("renew caps %p open result=%d\n", inode, err);
+	return err < 0 ? err : 0;
+}
+
+/*
  * If we already have the requisite capabilities, we can satisfy
  * the open request locally (no need to request new caps from the
  * MDS).  We do, however, need to inform the MDS (asynchronously)
@@ -616,8 +657,7 @@
 	kfree(aio_req);
 }
 
-static void ceph_aio_complete_req(struct ceph_osd_request *req,
-				  struct ceph_msg *msg)
+static void ceph_aio_complete_req(struct ceph_osd_request *req)
 {
 	int rc = req->r_result;
 	struct inode *inode = req->r_inode;
@@ -714,14 +754,21 @@
 	req->r_flags =	CEPH_OSD_FLAG_ORDERSNAP |
 			CEPH_OSD_FLAG_ONDISK |
 			CEPH_OSD_FLAG_WRITE;
-	req->r_base_oloc = orig_req->r_base_oloc;
-	req->r_base_oid = orig_req->r_base_oid;
+	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
+	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
+
+	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
+	if (ret) {
+		ceph_osdc_put_request(req);
+		req = orig_req;
+		goto out;
+	}
 
 	req->r_ops[0] = orig_req->r_ops[0];
 	osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
 
-	ceph_osdc_build_request(req, req->r_ops[0].extent.offset,
-				snapc, CEPH_NOSNAP, &aio_req->mtime);
+	req->r_mtime = aio_req->mtime;
+	req->r_data_offset = req->r_ops[0].extent.offset;
 
 	ceph_osdc_put_request(orig_req);
 
@@ -733,7 +780,7 @@
 out:
 	if (ret < 0) {
 		req->r_result = ret;
-		ceph_aio_complete_req(req, NULL);
+		ceph_aio_complete_req(req);
 	}
 
 	ceph_put_snap_context(snapc);
@@ -764,6 +811,8 @@
 		list_add_tail(&req->r_unsafe_item,
 			      &ci->i_unsafe_writes);
 		spin_unlock(&ci->i_unsafe_lock);
+
+		complete_all(&req->r_completion);
 	} else {
 		spin_lock(&ci->i_unsafe_lock);
 		list_del_init(&req->r_unsafe_item);
@@ -875,14 +924,12 @@
 					(pos+len) | (PAGE_SIZE - 1));
 
 			osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
+			req->r_mtime = mtime;
 		}
 
-
 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
 						 false, false);
 
-		ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
-
 		if (aio_req) {
 			aio_req->total_len += len;
 			aio_req->num_reqs++;
@@ -956,7 +1003,7 @@
 							      req, false);
 			if (ret < 0) {
 				req->r_result = ret;
-				ceph_aio_complete_req(req, NULL);
+				ceph_aio_complete_req(req);
 			}
 		}
 		return -EIOCBQUEUED;
@@ -1067,9 +1114,7 @@
 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
 						false, true);
 
-		/* BUG_ON(vino.snap != CEPH_NOSNAP); */
-		ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
-
+		req->r_mtime = mtime;
 		ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
 		if (!ret)
 			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
@@ -1292,7 +1337,7 @@
 	}
 
 retry_snap:
-	if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
+	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
 		err = -ENOSPC;
 		goto out;
 	}
@@ -1350,7 +1395,6 @@
 			iov_iter_advance(from, written);
 		ceph_put_snap_context(snapc);
 	} else {
-		loff_t old_size = i_size_read(inode);
 		/*
 		 * No need to acquire the i_truncate_mutex. Because
 		 * the MDS revokes Fwb caps before sending truncate
@@ -1361,8 +1405,6 @@
 		written = generic_perform_write(file, from, pos);
 		if (likely(written >= 0))
 			iocb->ki_pos = pos + written;
-		if (i_size_read(inode) > old_size)
-			ceph_fscache_update_objectsize(inode);
 		inode_unlock(inode);
 	}
 
@@ -1383,7 +1425,7 @@
 	ceph_put_cap_refs(ci, got);
 
 	if (written >= 0) {
-		if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))
+		if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
 			iocb->ki_flags |= IOCB_DSYNC;
 
 		written = generic_write_sync(iocb, written);
@@ -1524,9 +1566,7 @@
 		goto out;
 	}
 
-	ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap,
-				&inode->i_mtime);
-
+	req->r_mtime = inode->i_mtime;
 	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
 	if (!ret) {
 		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
@@ -1617,8 +1657,8 @@
 		goto unlock;
 	}
 
-	if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
-		!(mode & FALLOC_FL_PUNCH_HOLE)) {
+	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
+	    !(mode & FALLOC_FL_PUNCH_HOLE)) {
 		ret = -ENOSPC;
 		goto unlock;
 	}
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e669cfa..f059b59 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -11,6 +11,7 @@
 #include <linux/xattr.h>
 #include <linux/posix_acl.h>
 #include <linux/random.h>
+#include <linux/sort.h>
 
 #include "super.h"
 #include "mds_client.h"
@@ -254,6 +255,9 @@
 		diri_auth = ci->i_auth_cap->mds;
 	spin_unlock(&ci->i_ceph_lock);
 
+	if (mds == -1) /* CDIR_AUTH_PARENT */
+		mds = diri_auth;
+
 	mutex_lock(&ci->i_fragtree_mutex);
 	if (ndist == 0 && mds == diri_auth) {
 		/* no delegation info needed. */
@@ -300,20 +304,38 @@
 	return err;
 }
 
+static int frag_tree_split_cmp(const void *l, const void *r)
+{
+	struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
+	struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
+	return ceph_frag_compare(ls->frag, rs->frag);
+}
+
+static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
+{
+	if (!frag)
+		return f == ceph_frag_make(0, 0);
+	if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
+		return false;
+	return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
+}
+
 static int ceph_fill_fragtree(struct inode *inode,
 			      struct ceph_frag_tree_head *fragtree,
 			      struct ceph_mds_reply_dirfrag *dirinfo)
 {
 	struct ceph_inode_info *ci = ceph_inode(inode);
-	struct ceph_inode_frag *frag;
+	struct ceph_inode_frag *frag, *prev_frag = NULL;
 	struct rb_node *rb_node;
-	int i;
-	u32 id, nsplits;
+	unsigned i, split_by, nsplits;
+	u32 id;
 	bool update = false;
 
 	mutex_lock(&ci->i_fragtree_mutex);
 	nsplits = le32_to_cpu(fragtree->nsplits);
-	if (nsplits) {
+	if (nsplits != ci->i_fragtree_nsplits) {
+		update = true;
+	} else if (nsplits) {
 		i = prandom_u32() % nsplits;
 		id = le32_to_cpu(fragtree->splits[i].frag);
 		if (!__ceph_find_frag(ci, id))
@@ -332,10 +354,22 @@
 	if (!update)
 		goto out_unlock;
 
+	if (nsplits > 1) {
+		sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
+		     frag_tree_split_cmp, NULL);
+	}
+
 	dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
 	rb_node = rb_first(&ci->i_fragtree);
 	for (i = 0; i < nsplits; i++) {
 		id = le32_to_cpu(fragtree->splits[i].frag);
+		split_by = le32_to_cpu(fragtree->splits[i].by);
+		if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
+			pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
+			       "frag %x split by %d\n", ceph_vinop(inode),
+			       i, nsplits, id, split_by);
+			continue;
+		}
 		frag = NULL;
 		while (rb_node) {
 			frag = rb_entry(rb_node, struct ceph_inode_frag, node);
@@ -347,8 +381,14 @@
 				break;
 			}
 			rb_node = rb_next(rb_node);
-			rb_erase(&frag->node, &ci->i_fragtree);
-			kfree(frag);
+			/* delete stale split/leaf node */
+			if (frag->split_by > 0 ||
+			    !is_frag_child(frag->frag, prev_frag)) {
+				rb_erase(&frag->node, &ci->i_fragtree);
+				if (frag->split_by > 0)
+					ci->i_fragtree_nsplits--;
+				kfree(frag);
+			}
 			frag = NULL;
 		}
 		if (!frag) {
@@ -356,14 +396,23 @@
 			if (IS_ERR(frag))
 				continue;
 		}
-		frag->split_by = le32_to_cpu(fragtree->splits[i].by);
+		if (frag->split_by == 0)
+			ci->i_fragtree_nsplits++;
+		frag->split_by = split_by;
 		dout(" frag %x split by %d\n", frag->frag, frag->split_by);
+		prev_frag = frag;
 	}
 	while (rb_node) {
 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
 		rb_node = rb_next(rb_node);
-		rb_erase(&frag->node, &ci->i_fragtree);
-		kfree(frag);
+		/* delete stale split/leaf node */
+		if (frag->split_by > 0 ||
+		    !is_frag_child(frag->frag, prev_frag)) {
+			rb_erase(&frag->node, &ci->i_fragtree);
+			if (frag->split_by > 0)
+				ci->i_fragtree_nsplits--;
+			kfree(frag);
+		}
 	}
 out_unlock:
 	mutex_unlock(&ci->i_fragtree_mutex);
@@ -513,6 +562,7 @@
 		rb_erase(n, &ci->i_fragtree);
 		kfree(frag);
 	}
+	ci->i_fragtree_nsplits = 0;
 
 	__ceph_destroy_xattrs(ci);
 	if (ci->i_xattrs.blob)
@@ -533,6 +583,11 @@
 	return 1;
 }
 
+static inline blkcnt_t calc_inode_blocks(u64 size)
+{
+	return (size + (1<<9) - 1) >> 9;
+}
+
 /*
  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
  * careful because either the client or MDS may have more up to date
@@ -555,7 +610,7 @@
 			size = 0;
 		}
 		i_size_write(inode, size);
-		inode->i_blocks = (size + (1<<9) - 1) >> 9;
+		inode->i_blocks = calc_inode_blocks(size);
 		ci->i_reported_size = size;
 		if (truncate_seq != ci->i_truncate_seq) {
 			dout("truncate_seq %u -> %u\n",
@@ -814,9 +869,13 @@
 
 			spin_unlock(&ci->i_ceph_lock);
 
-			err = -EINVAL;
-			if (WARN_ON(symlen != i_size_read(inode)))
-				goto out;
+			if (symlen != i_size_read(inode)) {
+				pr_err("fill_inode %llx.%llx BAD symlink "
+					"size %lld\n", ceph_vinop(inode),
+					i_size_read(inode));
+				i_size_write(inode, symlen);
+				inode->i_blocks = calc_inode_blocks(symlen);
+			}
 
 			err = -ENOMEM;
 			sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
@@ -1309,12 +1368,13 @@
 	int i, err = 0;
 
 	for (i = 0; i < rinfo->dir_nr; i++) {
+		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
 		struct ceph_vino vino;
 		struct inode *in;
 		int rc;
 
-		vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
-		vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
+		vino.ino = le64_to_cpu(rde->inode.in->ino);
+		vino.snap = le64_to_cpu(rde->inode.in->snapid);
 
 		in = ceph_get_inode(req->r_dentry->d_sb, vino);
 		if (IS_ERR(in)) {
@@ -1322,14 +1382,14 @@
 			dout("new_inode badness got %d\n", err);
 			continue;
 		}
-		rc = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
+		rc = fill_inode(in, NULL, &rde->inode, NULL, session,
 				req->r_request_started, -1,
 				&req->r_caps_reservation);
 		if (rc < 0) {
 			pr_err("fill_inode badness on %p got %d\n", in, rc);
 			err = rc;
-			continue;
 		}
+		iput(in);
 	}
 
 	return err;
@@ -1387,6 +1447,7 @@
 			     struct ceph_mds_session *session)
 {
 	struct dentry *parent = req->r_dentry;
+	struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
 	struct qstr dname;
 	struct dentry *dn;
@@ -1394,22 +1455,27 @@
 	int err = 0, skipped = 0, ret, i;
 	struct inode *snapdir = NULL;
 	struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
-	struct ceph_dentry_info *di;
 	u32 frag = le32_to_cpu(rhead->args.readdir.frag);
+	u32 last_hash = 0;
+	u32 fpos_offset;
 	struct ceph_readdir_cache_control cache_ctl = {};
 
 	if (req->r_aborted)
 		return readdir_prepopulate_inodes_only(req, session);
 
+	if (rinfo->hash_order && req->r_path2) {
+		last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
+					  req->r_path2, strlen(req->r_path2));
+		last_hash = ceph_frag_value(last_hash);
+	}
+
 	if (rinfo->dir_dir &&
 	    le32_to_cpu(rinfo->dir_dir->frag) != frag) {
 		dout("readdir_prepopulate got new frag %x -> %x\n",
 		     frag, le32_to_cpu(rinfo->dir_dir->frag));
 		frag = le32_to_cpu(rinfo->dir_dir->frag);
-		if (ceph_frag_is_leftmost(frag))
+		if (!rinfo->hash_order)
 			req->r_readdir_offset = 2;
-		else
-			req->r_readdir_offset = 0;
 	}
 
 	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
@@ -1427,24 +1493,37 @@
 	if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) {
 		/* note dir version at start of readdir so we can tell
 		 * if any dentries get dropped */
-		struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
 		req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
 		req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count);
 		req->r_readdir_cache_idx = 0;
 	}
 
 	cache_ctl.index = req->r_readdir_cache_idx;
+	fpos_offset = req->r_readdir_offset;
 
 	/* FIXME: release caps/leases if error occurs */
 	for (i = 0; i < rinfo->dir_nr; i++) {
+		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
 		struct ceph_vino vino;
 
-		dname.name = rinfo->dir_dname[i];
-		dname.len = rinfo->dir_dname_len[i];
+		dname.name = rde->name;
+		dname.len = rde->name_len;
 		dname.hash = full_name_hash(dname.name, dname.len);
 
-		vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
-		vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
+		vino.ino = le64_to_cpu(rde->inode.in->ino);
+		vino.snap = le64_to_cpu(rde->inode.in->snapid);
+
+		if (rinfo->hash_order) {
+			u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
+						 rde->name, rde->name_len);
+			hash = ceph_frag_value(hash);
+			if (hash != last_hash)
+				fpos_offset = 2;
+			last_hash = hash;
+			rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
+		} else {
+			rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
+		}
 
 retry_lookup:
 		dn = d_lookup(parent, &dname);
@@ -1490,7 +1569,7 @@
 			}
 		}
 
-		ret = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
+		ret = fill_inode(in, NULL, &rde->inode, NULL, session,
 				 req->r_request_started, -1,
 				 &req->r_caps_reservation);
 		if (ret < 0) {
@@ -1523,11 +1602,9 @@
 			dn = realdn;
 		}
 
-		di = dn->d_fsdata;
-		di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
+		ceph_dentry(dn)->offset = rde->offset;
 
-		update_dentry_lease(dn, rinfo->dir_dlease[i],
-				    req->r_session,
+		update_dentry_lease(dn, rde->lease, req->r_session,
 				    req->r_request_started);
 
 		if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
@@ -1562,7 +1639,7 @@
 	spin_lock(&ci->i_ceph_lock);
 	dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
 	i_size_write(inode, size);
-	inode->i_blocks = (size + (1 << 9) - 1) >> 9;
+	inode->i_blocks = calc_inode_blocks(size);
 
 	/* tell the MDS if we are approaching max_size */
 	if ((size << 1) >= ci->i_max_size &&
@@ -1624,10 +1701,21 @@
 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
 						  i_pg_inv_work);
 	struct inode *inode = &ci->vfs_inode;
+	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 	u32 orig_gen;
 	int check = 0;
 
 	mutex_lock(&ci->i_truncate_mutex);
+
+	if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
+		pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
+				    inode, ceph_ino(inode));
+		mapping_set_error(inode->i_mapping, -EIO);
+		truncate_pagecache(inode, 0);
+		mutex_unlock(&ci->i_truncate_mutex);
+		goto out;
+	}
+
 	spin_lock(&ci->i_ceph_lock);
 	dout("invalidate_pages %p gen %d revoking %d\n", inode,
 	     ci->i_rdcache_gen, ci->i_rdcache_revoking);
@@ -1641,7 +1729,9 @@
 	orig_gen = ci->i_rdcache_gen;
 	spin_unlock(&ci->i_ceph_lock);
 
-	truncate_pagecache(inode, 0);
+	if (invalidate_inode_pages2(inode->i_mapping) < 0) {
+		pr_err("invalidate_pages %p fails\n", inode);
+	}
 
 	spin_lock(&ci->i_ceph_lock);
 	if (orig_gen == ci->i_rdcache_gen &&
@@ -1920,8 +2010,7 @@
 		if ((issued & CEPH_CAP_FILE_EXCL) &&
 		    attr->ia_size > inode->i_size) {
 			i_size_write(inode, attr->ia_size);
-			inode->i_blocks =
-				(attr->ia_size + (1 << 9) - 1) >> 9;
+			inode->i_blocks = calc_inode_blocks(attr->ia_size);
 			inode->i_ctime = attr->ia_ctime;
 			ci->i_reported_size = attr->ia_size;
 			dirtied |= CEPH_CAP_FILE_EXCL;
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index f851d8d..be6b165 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -193,12 +193,12 @@
 	if (copy_from_user(&dl, arg, sizeof(dl)))
 		return -EFAULT;
 
-	down_read(&osdc->map_sem);
+	down_read(&osdc->lock);
 	r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, len,
 					  &dl.object_no, &dl.object_offset,
 					  &olen);
 	if (r < 0) {
-		up_read(&osdc->map_sem);
+		up_read(&osdc->lock);
 		return -EIO;
 	}
 	dl.file_offset -= dl.object_offset;
@@ -213,15 +213,15 @@
 		 ceph_ino(inode), dl.object_no);
 
 	oloc.pool = ceph_file_layout_pg_pool(ci->i_layout);
-	ceph_oid_set_name(&oid, dl.object_name);
+	ceph_oid_printf(&oid, "%s", dl.object_name);
 
-	r = ceph_oloc_oid_to_pg(osdc->osdmap, &oloc, &oid, &pgid);
+	r = ceph_object_locator_to_pg(osdc->osdmap, &oid, &oloc, &pgid);
 	if (r < 0) {
-		up_read(&osdc->map_sem);
+		up_read(&osdc->lock);
 		return r;
 	}
 
-	dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid);
+	dl.osd = ceph_pg_to_acting_primary(osdc->osdmap, &pgid);
 	if (dl.osd >= 0) {
 		struct ceph_entity_addr *a =
 			ceph_osd_addr(osdc->osdmap, dl.osd);
@@ -230,7 +230,7 @@
 	} else {
 		memset(&dl.osd_addr, 0, sizeof(dl.osd_addr));
 	}
-	up_read(&osdc->map_sem);
+	up_read(&osdc->lock);
 
 	/* send result back to user */
 	if (copy_to_user(arg, &dl, sizeof(dl)))
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 85b8517..2103b82 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -181,17 +181,18 @@
 
 	ceph_decode_need(p, end, sizeof(num) + 2, bad);
 	num = ceph_decode_32(p);
-	info->dir_end = ceph_decode_8(p);
-	info->dir_complete = ceph_decode_8(p);
+	{
+		u16 flags = ceph_decode_16(p);
+		info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
+		info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
+		info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
+	}
 	if (num == 0)
 		goto done;
 
-	BUG_ON(!info->dir_in);
-	info->dir_dname = (void *)(info->dir_in + num);
-	info->dir_dname_len = (void *)(info->dir_dname + num);
-	info->dir_dlease = (void *)(info->dir_dname_len + num);
-	if ((unsigned long)(info->dir_dlease + num) >
-	    (unsigned long)info->dir_in + info->dir_buf_size) {
+	BUG_ON(!info->dir_entries);
+	if ((unsigned long)(info->dir_entries + num) >
+	    (unsigned long)info->dir_entries + info->dir_buf_size) {
 		pr_err("dir contents are larger than expected\n");
 		WARN_ON(1);
 		goto bad;
@@ -199,21 +200,23 @@
 
 	info->dir_nr = num;
 	while (num) {
+		struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
 		/* dentry */
 		ceph_decode_need(p, end, sizeof(u32)*2, bad);
-		info->dir_dname_len[i] = ceph_decode_32(p);
-		ceph_decode_need(p, end, info->dir_dname_len[i], bad);
-		info->dir_dname[i] = *p;
-		*p += info->dir_dname_len[i];
-		dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i],
-		     info->dir_dname[i]);
-		info->dir_dlease[i] = *p;
+		rde->name_len = ceph_decode_32(p);
+		ceph_decode_need(p, end, rde->name_len, bad);
+		rde->name = *p;
+		*p += rde->name_len;
+		dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
+		rde->lease = *p;
 		*p += sizeof(struct ceph_mds_reply_lease);
 
 		/* inode */
-		err = parse_reply_info_in(p, end, &info->dir_in[i], features);
+		err = parse_reply_info_in(p, end, &rde->inode, features);
 		if (err < 0)
 			goto out_bad;
+		/* ceph_readdir_prepopulate() will update it */
+		rde->offset = 0;
 		i++;
 		num--;
 	}
@@ -345,9 +348,9 @@
 
 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
 {
-	if (!info->dir_in)
+	if (!info->dir_entries)
 		return;
-	free_pages((unsigned long)info->dir_in, get_order(info->dir_buf_size));
+	free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
 }
 
 
@@ -567,51 +570,23 @@
 	kfree(req);
 }
 
+DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
+
 /*
  * lookup session, bump ref if found.
  *
  * called under mdsc->mutex.
  */
-static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc,
-					     u64 tid)
+static struct ceph_mds_request *
+lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
 {
 	struct ceph_mds_request *req;
-	struct rb_node *n = mdsc->request_tree.rb_node;
 
-	while (n) {
-		req = rb_entry(n, struct ceph_mds_request, r_node);
-		if (tid < req->r_tid)
-			n = n->rb_left;
-		else if (tid > req->r_tid)
-			n = n->rb_right;
-		else {
-			ceph_mdsc_get_request(req);
-			return req;
-		}
-	}
-	return NULL;
-}
+	req = lookup_request(&mdsc->request_tree, tid);
+	if (req)
+		ceph_mdsc_get_request(req);
 
-static void __insert_request(struct ceph_mds_client *mdsc,
-			     struct ceph_mds_request *new)
-{
-	struct rb_node **p = &mdsc->request_tree.rb_node;
-	struct rb_node *parent = NULL;
-	struct ceph_mds_request *req = NULL;
-
-	while (*p) {
-		parent = *p;
-		req = rb_entry(parent, struct ceph_mds_request, r_node);
-		if (new->r_tid < req->r_tid)
-			p = &(*p)->rb_left;
-		else if (new->r_tid > req->r_tid)
-			p = &(*p)->rb_right;
-		else
-			BUG();
-	}
-
-	rb_link_node(&new->r_node, parent, p);
-	rb_insert_color(&new->r_node, &mdsc->request_tree);
+	return req;
 }
 
 /*
@@ -630,7 +605,7 @@
 				  req->r_num_caps);
 	dout("__register_request %p tid %lld\n", req, req->r_tid);
 	ceph_mdsc_get_request(req);
-	__insert_request(mdsc, req);
+	insert_request(&mdsc->request_tree, req);
 
 	req->r_uid = current_fsuid();
 	req->r_gid = current_fsgid();
@@ -663,8 +638,7 @@
 		}
 	}
 
-	rb_erase(&req->r_node, &mdsc->request_tree);
-	RB_CLEAR_NODE(&req->r_node);
+	erase_request(&mdsc->request_tree, req);
 
 	if (req->r_unsafe_dir && req->r_got_unsafe) {
 		struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
@@ -868,12 +842,14 @@
 	int metadata_bytes = 0;
 	int metadata_key_count = 0;
 	struct ceph_options *opt = mdsc->fsc->client->options;
+	struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
 	void *p;
 
 	const char* metadata[][2] = {
 		{"hostname", utsname()->nodename},
 		{"kernel_version", utsname()->release},
-		{"entity_id", opt->name ? opt->name : ""},
+		{"entity_id", opt->name ? : ""},
+		{"root", fsopt->server_path ? : "/"},
 		{NULL, NULL}
 	};
 
@@ -1149,9 +1125,11 @@
 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
 				  void *arg)
 {
+	struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	LIST_HEAD(to_remove);
-	int drop = 0;
+	bool drop = false;
+	bool invalidate = false;
 
 	dout("removing cap %p, ci is %p, inode is %p\n",
 	     cap, ci, &ci->vfs_inode);
@@ -1159,8 +1137,13 @@
 	__ceph_remove_cap(cap, false);
 	if (!ci->i_auth_cap) {
 		struct ceph_cap_flush *cf;
-		struct ceph_mds_client *mdsc =
-			ceph_sb_to_client(inode->i_sb)->mdsc;
+		struct ceph_mds_client *mdsc = fsc->mdsc;
+
+		ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
+
+		if (ci->i_wrbuffer_ref > 0 &&
+		    ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
+			invalidate = true;
 
 		while (true) {
 			struct rb_node *n = rb_first(&ci->i_cap_flush_tree);
@@ -1183,7 +1166,7 @@
 				inode, ceph_ino(inode));
 			ci->i_dirty_caps = 0;
 			list_del_init(&ci->i_dirty_item);
-			drop = 1;
+			drop = true;
 		}
 		if (!list_empty(&ci->i_flushing_item)) {
 			pr_warn_ratelimited(
@@ -1193,7 +1176,7 @@
 			ci->i_flushing_caps = 0;
 			list_del_init(&ci->i_flushing_item);
 			mdsc->num_cap_flushing--;
-			drop = 1;
+			drop = true;
 		}
 		spin_unlock(&mdsc->cap_dirty_lock);
 
@@ -1210,7 +1193,11 @@
 		list_del(&cf->list);
 		ceph_free_cap_flush(cf);
 	}
-	while (drop--)
+
+	wake_up_all(&ci->i_cap_wq);
+	if (invalidate)
+		ceph_queue_invalidate(inode);
+	if (drop)
 		iput(inode);
 	return 0;
 }
@@ -1220,12 +1207,13 @@
  */
 static void remove_session_caps(struct ceph_mds_session *session)
 {
+	struct ceph_fs_client *fsc = session->s_mdsc->fsc;
+	struct super_block *sb = fsc->sb;
 	dout("remove_session_caps on %p\n", session);
-	iterate_session_caps(session, remove_session_caps_cb, NULL);
+	iterate_session_caps(session, remove_session_caps_cb, fsc);
 
 	spin_lock(&session->s_cap_lock);
 	if (session->s_nr_caps > 0) {
-		struct super_block *sb = session->s_mdsc->fsc->sb;
 		struct inode *inode;
 		struct ceph_cap *cap, *prev = NULL;
 		struct ceph_vino vino;
@@ -1270,13 +1258,13 @@
 {
 	struct ceph_inode_info *ci = ceph_inode(inode);
 
-	wake_up_all(&ci->i_cap_wq);
 	if (arg) {
 		spin_lock(&ci->i_ceph_lock);
 		ci->i_wanted_max_size = 0;
 		ci->i_requested_max_size = 0;
 		spin_unlock(&ci->i_ceph_lock);
 	}
+	wake_up_all(&ci->i_cap_wq);
 	return 0;
 }
 
@@ -1671,8 +1659,7 @@
 	struct ceph_inode_info *ci = ceph_inode(dir);
 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
 	struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
-	size_t size = sizeof(*rinfo->dir_in) + sizeof(*rinfo->dir_dname_len) +
-		      sizeof(*rinfo->dir_dname) + sizeof(*rinfo->dir_dlease);
+	size_t size = sizeof(struct ceph_mds_reply_dir_entry);
 	int order, num_entries;
 
 	spin_lock(&ci->i_ceph_lock);
@@ -1683,14 +1670,14 @@
 
 	order = get_order(size * num_entries);
 	while (order >= 0) {
-		rinfo->dir_in = (void*)__get_free_pages(GFP_KERNEL |
-							__GFP_NOWARN,
-							order);
-		if (rinfo->dir_in)
+		rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
+							     __GFP_NOWARN,
+							     order);
+		if (rinfo->dir_entries)
 			break;
 		order--;
 	}
-	if (!rinfo->dir_in)
+	if (!rinfo->dir_entries)
 		return -ENOMEM;
 
 	num_entries = (PAGE_SIZE << order) / size;
@@ -1722,6 +1709,7 @@
 	INIT_LIST_HEAD(&req->r_unsafe_target_item);
 	req->r_fmode = -1;
 	kref_init(&req->r_kref);
+	RB_CLEAR_NODE(&req->r_node);
 	INIT_LIST_HEAD(&req->r_wait);
 	init_completion(&req->r_completion);
 	init_completion(&req->r_safe_completion);
@@ -2414,7 +2402,7 @@
 	/* get request, session */
 	tid = le64_to_cpu(msg->hdr.tid);
 	mutex_lock(&mdsc->mutex);
-	req = __lookup_request(mdsc, tid);
+	req = lookup_get_request(mdsc, tid);
 	if (!req) {
 		dout("handle_reply on unknown tid %llu\n", tid);
 		mutex_unlock(&mdsc->mutex);
@@ -2604,7 +2592,7 @@
 	fwd_seq = ceph_decode_32(&p);
 
 	mutex_lock(&mdsc->mutex);
-	req = __lookup_request(mdsc, tid);
+	req = lookup_get_request(mdsc, tid);
 	if (!req) {
 		dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
 		goto out;  /* dup reply? */
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index ee69a53..e7d38aa 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -47,6 +47,14 @@
 	u32 pool_ns_len;
 };
 
+struct ceph_mds_reply_dir_entry {
+	char                          *name;
+	u32                           name_len;
+	struct ceph_mds_reply_lease   *lease;
+	struct ceph_mds_reply_info_in inode;
+	loff_t			      offset;
+};
+
 /*
  * parsed info about an mds reply, including information about
  * either: 1) the target inode and/or its parent directory and dentry,
@@ -73,11 +81,10 @@
 			struct ceph_mds_reply_dirfrag *dir_dir;
 			size_t			      dir_buf_size;
 			int                           dir_nr;
-			char                          **dir_dname;
-			u32                           *dir_dname_len;
-			struct ceph_mds_reply_lease   **dir_dlease;
-			struct ceph_mds_reply_info_in *dir_in;
-			u8                            dir_complete, dir_end;
+			bool			      dir_complete;
+			bool			      dir_end;
+			bool			      hash_order;
+			struct ceph_mds_reply_dir_entry  *dir_entries;
 		};
 
 		/* for create results */
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 261531e..8c3591a 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -54,16 +54,21 @@
 	const void *start = *p;
 	int i, j, n;
 	int err = -EINVAL;
-	u16 version;
+	u8 mdsmap_v, mdsmap_cv;
 
 	m = kzalloc(sizeof(*m), GFP_NOFS);
 	if (m == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	ceph_decode_16_safe(p, end, version, bad);
-	if (version > 3) {
-		pr_warn("got mdsmap version %d > 3, failing", version);
-		goto bad;
+	ceph_decode_need(p, end, 1 + 1, bad);
+	mdsmap_v = ceph_decode_8(p);
+	mdsmap_cv = ceph_decode_8(p);
+	if (mdsmap_v >= 4) {
+	       u32 mdsmap_len;
+	       ceph_decode_32_safe(p, end, mdsmap_len, bad);
+	       if (end < *p + mdsmap_len)
+		       goto bad;
+	       end = *p + mdsmap_len;
 	}
 
 	ceph_decode_need(p, end, 8*sizeof(u32) + sizeof(u64), bad);
@@ -87,16 +92,29 @@
 		u32 namelen;
 		s32 mds, inc, state;
 		u64 state_seq;
-		u8 infoversion;
+		u8 info_v;
+		void *info_end = NULL;
 		struct ceph_entity_addr addr;
 		u32 num_export_targets;
 		void *pexport_targets = NULL;
 		struct ceph_timespec laggy_since;
 		struct ceph_mds_info *info;
 
-		ceph_decode_need(p, end, sizeof(u64)*2 + 1 + sizeof(u32), bad);
+		ceph_decode_need(p, end, sizeof(u64) + 1, bad);
 		global_id = ceph_decode_64(p);
-		infoversion = ceph_decode_8(p);
+		info_v= ceph_decode_8(p);
+		if (info_v >= 4) {
+			u32 info_len;
+			u8 info_cv;
+			ceph_decode_need(p, end, 1 + sizeof(u32), bad);
+			info_cv = ceph_decode_8(p);
+			info_len = ceph_decode_32(p);
+			info_end = *p + info_len;
+			if (info_end > end)
+				goto bad;
+		}
+
+		ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
 		*p += sizeof(u64);
 		namelen = ceph_decode_32(p);  /* skip mds name */
 		*p += namelen;
@@ -115,7 +133,7 @@
 		*p += sizeof(u32);
 		ceph_decode_32_safe(p, end, namelen, bad);
 		*p += namelen;
-		if (infoversion >= 2) {
+		if (info_v >= 2) {
 			ceph_decode_32_safe(p, end, num_export_targets, bad);
 			pexport_targets = *p;
 			*p += num_export_targets * sizeof(u32);
@@ -123,6 +141,12 @@
 			num_export_targets = 0;
 		}
 
+		if (info_end && *p != info_end) {
+			if (*p > info_end)
+				goto bad;
+			*p = info_end;
+		}
+
 		dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n",
 		     i+1, n, global_id, mds, inc,
 		     ceph_pr_addr(&addr.in_addr),
@@ -163,6 +187,7 @@
 	m->m_cas_pg_pool = ceph_decode_64(p);
 
 	/* ok, we don't care about the rest. */
+	*p = end;
 	dout("mdsmap_decode success epoch %u\n", m->m_epoch);
 	return m;
 
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index f12d5e2..91e0248 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -108,6 +108,7 @@
  * mount options
  */
 enum {
+	Opt_mds_namespace,
 	Opt_wsize,
 	Opt_rsize,
 	Opt_rasize,
@@ -143,6 +144,7 @@
 };
 
 static match_table_t fsopt_tokens = {
+	{Opt_mds_namespace, "mds_namespace=%d"},
 	{Opt_wsize, "wsize=%d"},
 	{Opt_rsize, "rsize=%d"},
 	{Opt_rasize, "rasize=%d"},
@@ -212,6 +214,9 @@
 		break;
 
 		/* misc */
+	case Opt_mds_namespace:
+		fsopt->mds_namespace = intval;
+		break;
 	case Opt_wsize:
 		fsopt->wsize = intval;
 		break;
@@ -297,6 +302,7 @@
 {
 	dout("destroy_mount_options %p\n", args);
 	kfree(args->snapdir_name);
+	kfree(args->server_path);
 	kfree(args);
 }
 
@@ -328,14 +334,17 @@
 	if (ret)
 		return ret;
 
+	ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
+	if (ret)
+		return ret;
+
 	return ceph_compare_options(new_opt, fsc->client);
 }
 
 static int parse_mount_options(struct ceph_mount_options **pfsopt,
 			       struct ceph_options **popt,
 			       int flags, char *options,
-			       const char *dev_name,
-			       const char **path)
+			       const char *dev_name)
 {
 	struct ceph_mount_options *fsopt;
 	const char *dev_name_end;
@@ -367,6 +376,7 @@
 	fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
 	fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
 	fsopt->congestion_kb = default_congestion_kb();
+	fsopt->mds_namespace = CEPH_FS_CLUSTER_ID_NONE;
 
 	/*
 	 * Distinguish the server list from the path in "dev_name".
@@ -380,12 +390,13 @@
 	 */
 	dev_name_end = strchr(dev_name, '/');
 	if (dev_name_end) {
-		/* skip over leading '/' for path */
-		*path = dev_name_end + 1;
+		fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
+		if (!fsopt->server_path) {
+			err = -ENOMEM;
+			goto out;
+		}
 	} else {
-		/* path is empty */
 		dev_name_end = dev_name + strlen(dev_name);
-		*path = dev_name_end;
 	}
 	err = -EINVAL;
 	dev_name_end--;		/* back up to ':' separator */
@@ -395,7 +406,8 @@
 		goto out;
 	}
 	dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
-	dout("server path '%s'\n", *path);
+	if (fsopt->server_path)
+		dout("server path '%s'\n", fsopt->server_path);
 
 	*popt = ceph_parse_options(options, dev_name, dev_name_end,
 				 parse_fsopt_token, (void *)fsopt);
@@ -457,6 +469,8 @@
 		seq_puts(m, ",noacl");
 #endif
 
+	if (fsopt->mds_namespace != CEPH_FS_CLUSTER_ID_NONE)
+		seq_printf(m, ",mds_namespace=%d", fsopt->mds_namespace);
 	if (fsopt->wsize)
 		seq_printf(m, ",wsize=%d", fsopt->wsize);
 	if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
@@ -511,9 +525,8 @@
 {
 	struct ceph_fs_client *fsc;
 	const u64 supported_features =
-		CEPH_FEATURE_FLOCK |
-		CEPH_FEATURE_DIRLAYOUTHASH |
-		CEPH_FEATURE_MDS_INLINE_DATA;
+		CEPH_FEATURE_FLOCK | CEPH_FEATURE_DIRLAYOUTHASH |
+		CEPH_FEATURE_MDSENC | CEPH_FEATURE_MDS_INLINE_DATA;
 	const u64 required_features = 0;
 	int page_count;
 	size_t size;
@@ -530,6 +543,7 @@
 		goto fail;
 	}
 	fsc->client->extra_mon_dispatch = extra_mon_dispatch;
+	fsc->client->monc.fs_cluster_id = fsopt->mds_namespace;
 	ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, 0, true);
 
 	fsc->mount_options = fsopt;
@@ -785,8 +799,7 @@
 /*
  * mount: join the ceph cluster, and open root directory.
  */
-static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
-		      const char *path)
+static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
 {
 	int err;
 	unsigned long started = jiffies;  /* note the start time */
@@ -815,11 +828,12 @@
 			goto fail;
 	}
 
-	if (path[0] == 0) {
+	if (!fsc->mount_options->server_path) {
 		root = fsc->sb->s_root;
 		dget(root);
 	} else {
-		dout("mount opening base mountpoint\n");
+		const char *path = fsc->mount_options->server_path + 1;
+		dout("mount opening path %s\n", path);
 		root = open_root_dentry(fsc, path, started);
 		if (IS_ERR(root)) {
 			err = PTR_ERR(root);
@@ -935,7 +949,6 @@
 	struct dentry *res;
 	int err;
 	int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
-	const char *path = NULL;
 	struct ceph_mount_options *fsopt = NULL;
 	struct ceph_options *opt = NULL;
 
@@ -944,7 +957,7 @@
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
 	flags |= MS_POSIXACL;
 #endif
-	err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
+	err = parse_mount_options(&fsopt, &opt, flags, data, dev_name);
 	if (err < 0) {
 		res = ERR_PTR(err);
 		goto out_final;
@@ -987,7 +1000,7 @@
 		}
 	}
 
-	res = ceph_real_mount(fsc, path);
+	res = ceph_real_mount(fsc);
 	if (IS_ERR(res))
 		goto out_splat;
 	dout("root %p inode %p ino %llx.%llx\n", res,
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 7b99eb7..0168b49 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -62,6 +62,7 @@
 	int cap_release_safety;
 	int max_readdir;       /* max readdir result (entires) */
 	int max_readdir_bytes; /* max readdir result (bytes) */
+	int mds_namespace;
 
 	/*
 	 * everything above this point can be memcmp'd; everything below
@@ -69,6 +70,7 @@
 	 */
 
 	char *snapdir_name;   /* default ".snap" */
+	char *server_path;    /* default  "/" */
 };
 
 struct ceph_fs_client {
@@ -101,7 +103,6 @@
 
 #ifdef CONFIG_CEPH_FSCACHE
 	struct fscache_cookie *fscache;
-	struct workqueue_struct *revalidate_wq;
 #endif
 };
 
@@ -295,6 +296,7 @@
 	u64 i_files, i_subdirs;
 
 	struct rb_root i_fragtree;
+	int i_fragtree_nsplits;
 	struct mutex i_fragtree_mutex;
 
 	struct ceph_inode_xattrs_info i_xattrs;
@@ -357,8 +359,7 @@
 
 #ifdef CONFIG_CEPH_FSCACHE
 	struct fscache_cookie *fscache;
-	u32 i_fscache_gen; /* sequence, for delayed fscache validate */
-	struct work_struct i_revalidate_work;
+	u32 i_fscache_gen;
 #endif
 	struct inode vfs_inode; /* at end */
 };
@@ -469,6 +470,7 @@
 #define CEPH_I_POOL_RD		(1 << 5)  /* can read from pool */
 #define CEPH_I_POOL_WR		(1 << 6)  /* can write to pool */
 #define CEPH_I_SEC_INITED	(1 << 7)  /* security initialized */
+#define CEPH_I_CAP_DROPPED	(1 << 8)  /* caps were forcibly dropped */
 
 static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
 					   long long release_count,
@@ -537,11 +539,6 @@
 	return (struct ceph_dentry_info *)dentry->d_fsdata;
 }
 
-static inline loff_t ceph_make_fpos(unsigned frag, unsigned off)
-{
-	return ((loff_t)frag << 32) | (loff_t)off;
-}
-
 /*
  * caps helpers
  */
@@ -632,7 +629,6 @@
 	struct ceph_mds_request *last_readdir;
 
 	/* readdir: position within a frag */
-	unsigned offset;       /* offset of last chunk, adjusted for . and .. */
 	unsigned next_offset;  /* offset of next chunk (last_name's + 1) */
 	char *last_name;       /* last entry in previous chunk */
 	long long dir_release_count;
@@ -927,6 +923,7 @@
 /* file.c */
 extern const struct file_operations ceph_file_fops;
 
+extern int ceph_renew_caps(struct inode *inode);
 extern int ceph_open(struct inode *inode, struct file *file);
 extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
 			    struct file *file, unsigned flags, umode_t mode,
@@ -942,6 +939,7 @@
 extern const struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops,
 	ceph_snapdir_dentry_ops;
 
+extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order);
 extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
 extern int ceph_handle_snapdir(struct ceph_mds_request *req,
 			       struct dentry *dentry, int err);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 0d66722..4870b29 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -77,7 +77,7 @@
 	char buf[128];
 
 	dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
-	down_read(&osdc->map_sem);
+	down_read(&osdc->lock);
 	pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
 	if (pool_name) {
 		size_t len = strlen(pool_name);
@@ -109,7 +109,7 @@
 				ret = -ERANGE;
 		}
 	}
-	up_read(&osdc->map_sem);
+	up_read(&osdc->lock);
 	return ret;
 }
 
@@ -143,13 +143,13 @@
 	s64 pool = ceph_file_layout_pg_pool(ci->i_layout);
 	const char *pool_name;
 
-	down_read(&osdc->map_sem);
+	down_read(&osdc->lock);
 	pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
 	if (pool_name)
 		ret = snprintf(val, size, "%s", pool_name);
 	else
 		ret = snprintf(val, size, "%lld", (unsigned long long)pool);
-	up_read(&osdc->map_sem);
+	up_read(&osdc->lock);
 	return ret;
 }
 
@@ -862,6 +862,7 @@
 	struct ceph_mds_request *req;
 	struct ceph_mds_client *mdsc = fsc->mdsc;
 	struct ceph_pagelist *pagelist = NULL;
+	int op = CEPH_MDS_OP_SETXATTR;
 	int err;
 
 	if (size > 0) {
@@ -875,20 +876,21 @@
 		if (err)
 			goto out;
 	} else if (!value) {
-		flags |= CEPH_XATTR_REMOVE;
+		if (flags & CEPH_XATTR_REPLACE)
+			op = CEPH_MDS_OP_RMXATTR;
+		else
+			flags |= CEPH_XATTR_REMOVE;
 	}
 
 	dout("setxattr value=%.*s\n", (int)size, value);
 
 	/* do request */
-	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
-				       USE_AUTH_MDS);
+	req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
 	if (IS_ERR(req)) {
 		err = PTR_ERR(req);
 		goto out;
 	}
 
-	req->r_args.setxattr.flags = cpu_to_le32(flags);
 	req->r_path2 = kstrdup(name, GFP_NOFS);
 	if (!req->r_path2) {
 		ceph_mdsc_put_request(req);
@@ -896,8 +898,11 @@
 		goto out;
 	}
 
-	req->r_pagelist = pagelist;
-	pagelist = NULL;
+	if (op == CEPH_MDS_OP_SETXATTR) {
+		req->r_args.setxattr.flags = cpu_to_le32(flags);
+		req->r_pagelist = pagelist;
+		pagelist = NULL;
+	}
 
 	req->r_inode = inode;
 	ihold(inode);
@@ -1051,12 +1056,13 @@
 }
 
 static int ceph_set_xattr_handler(const struct xattr_handler *handler,
-				  struct dentry *dentry, const char *name,
-				  const void *value, size_t size, int flags)
+				  struct dentry *unused, struct inode *inode,
+				  const char *name, const void *value,
+				  size_t size, int flags)
 {
 	if (!ceph_is_valid_xattr(name))
 		return -EOPNOTSUPP;
-	return __ceph_setxattr(d_inode(dentry), name, value, size, flags);
+	return __ceph_setxattr(inode, name, value, size, flags);
 }
 
 const struct xattr_handler ceph_other_xattr_handler = {
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index c8b77aa..5e23f64 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -39,8 +39,9 @@
 enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT };
 
 static int cifs_xattr_set(const struct xattr_handler *handler,
-			  struct dentry *dentry, const char *name,
-			  const void *value, size_t size, int flags)
+			  struct dentry *dentry, struct inode *inode,
+			  const char *name, const void *value,
+			  size_t size, int flags)
 {
 	int rc = -EOPNOTSUPP;
 	unsigned int xid;
@@ -99,12 +100,12 @@
 			if (value &&
 			    pTcon->ses->server->ops->set_acl)
 				rc = pTcon->ses->server->ops->set_acl(pacl,
-						size, d_inode(dentry),
+						size, inode,
 						full_path, CIFS_ACL_DACL);
 			else
 				rc = -EOPNOTSUPP;
 			if (rc == 0) /* force revalidate of the inode */
-				CIFS_I(d_inode(dentry))->time = 0;
+				CIFS_I(inode)->time = 0;
 			kfree(pacl);
 		}
 #endif /* CONFIG_CIFS_ACL */
diff --git a/fs/compat.c b/fs/compat.c
index 8754e9a..be6e48b 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -936,6 +936,8 @@
 	}
 	dirent = buf->previous;
 	if (dirent) {
+		if (signal_pending(current))
+			return -EINTR;
 		if (__put_user(offset, &dirent->d_off))
 			goto efault;
 	}
@@ -1020,6 +1022,8 @@
 	dirent = buf->previous;
 
 	if (dirent) {
+		if (signal_pending(current))
+			return -EINTR;
 		if (__put_user_unaligned(offset, &dirent->d_off))
 			goto efault;
 	}
diff --git a/fs/coredump.c b/fs/coredump.c
index 492c2db..281b768 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -413,7 +413,9 @@
 	core_state->dumper.task = tsk;
 	core_state->dumper.next = NULL;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
+
 	if (!mm->core_state)
 		core_waiters = zap_threads(tsk, mm, core_state, exit_code);
 	up_write(&mm->mmap_sem);
@@ -792,6 +794,7 @@
 			return 0;
 		file->f_pos = pos;
 		cprm->written += n;
+		cprm->pos += n;
 		nr -= n;
 	}
 	return 1;
@@ -806,6 +809,7 @@
 		if (dump_interrupted() ||
 		    file->f_op->llseek(file, nr, SEEK_CUR) < 0)
 			return 0;
+		cprm->pos += nr;
 		return 1;
 	} else {
 		while (nr > PAGE_SIZE) {
@@ -820,7 +824,7 @@
 
 int dump_align(struct coredump_params *cprm, int align)
 {
-	unsigned mod = cprm->file->f_pos & (align - 1);
+	unsigned mod = cprm->pos & (align - 1);
 	if (align & (align - 1))
 		return 0;
 	return mod ? dump_skip(cprm, align - mod) : 1;
diff --git a/fs/dax.c b/fs/dax.c
index a345c16..761495b 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -32,14 +32,43 @@
 #include <linux/pfn_t.h>
 #include <linux/sizes.h>
 
-#define RADIX_DAX_MASK	0xf
-#define RADIX_DAX_SHIFT	4
-#define RADIX_DAX_PTE  (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY)
-#define RADIX_DAX_PMD  (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY)
-#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK)
+/*
+ * We use lowest available bit in exceptional entry for locking, other two
+ * bits to determine entry type. In total 3 special bits.
+ */
+#define RADIX_DAX_SHIFT	(RADIX_TREE_EXCEPTIONAL_SHIFT + 3)
+#define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
+#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
+#define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD)
+#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK)
 #define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
 #define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
-		RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE)))
+		RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \
+		RADIX_TREE_EXCEPTIONAL_ENTRY))
+
+/* We choose 4096 entries - same as per-zone page wait tables */
+#define DAX_WAIT_TABLE_BITS 12
+#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
+
+wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
+
+static int __init init_dax_wait_table(void)
+{
+	int i;
+
+	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
+		init_waitqueue_head(wait_table + i);
+	return 0;
+}
+fs_initcall(init_dax_wait_table);
+
+static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
+					      pgoff_t index)
+{
+	unsigned long hash = hash_long((unsigned long)mapping ^ index,
+				       DAX_WAIT_TABLE_BITS);
+	return wait_table + hash;
+}
 
 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
 {
@@ -87,50 +116,6 @@
 	return page;
 }
 
-/*
- * dax_clear_sectors() is called from within transaction context from XFS,
- * and hence this means the stack from this point must follow GFP_NOFS
- * semantics for all operations.
- */
-int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size)
-{
-	struct blk_dax_ctl dax = {
-		.sector = _sector,
-		.size = _size,
-	};
-
-	might_sleep();
-	do {
-		long count, sz;
-
-		count = dax_map_atomic(bdev, &dax);
-		if (count < 0)
-			return count;
-		sz = min_t(long, count, SZ_128K);
-		clear_pmem(dax.addr, sz);
-		dax.size -= sz;
-		dax.sector += sz / 512;
-		dax_unmap_atomic(bdev, &dax);
-		cond_resched();
-	} while (dax.size);
-
-	wmb_pmem();
-	return 0;
-}
-EXPORT_SYMBOL_GPL(dax_clear_sectors);
-
-/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
-static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
-		loff_t pos, loff_t end)
-{
-	loff_t final = end - pos + first; /* The final byte of the buffer */
-
-	if (first > 0)
-		clear_pmem(addr, first);
-	if (final < size)
-		clear_pmem(addr + final, size - final);
-}
-
 static bool buffer_written(struct buffer_head *bh)
 {
 	return buffer_mapped(bh) && !buffer_unwritten(bh);
@@ -169,6 +154,9 @@
 	struct blk_dax_ctl dax = {
 		.addr = (void __pmem *) ERR_PTR(-EIO),
 	};
+	unsigned blkbits = inode->i_blkbits;
+	sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1)
+								>> blkbits;
 
 	if (rw == READ)
 		end = min(end, i_size_read(inode));
@@ -176,7 +164,6 @@
 	while (pos < end) {
 		size_t len;
 		if (pos == max) {
-			unsigned blkbits = inode->i_blkbits;
 			long page = pos >> PAGE_SHIFT;
 			sector_t block = page << (PAGE_SHIFT - blkbits);
 			unsigned first = pos - (block << blkbits);
@@ -192,6 +179,13 @@
 					bh->b_size = 1 << blkbits;
 				bh_max = pos - first + bh->b_size;
 				bdev = bh->b_bdev;
+				/*
+				 * We allow uninitialized buffers for writes
+				 * beyond EOF as those cannot race with faults
+				 */
+				WARN_ON_ONCE(
+					(buffer_new(bh) && block < file_blks) ||
+					(rw == WRITE && buffer_unwritten(bh)));
 			} else {
 				unsigned done = bh->b_size -
 						(bh_max - (pos - first));
@@ -211,11 +205,6 @@
 					rc = map_len;
 					break;
 				}
-				if (buffer_unwritten(bh) || buffer_new(bh)) {
-					dax_new_buf(dax.addr, map_len, first,
-							pos, end);
-					need_wmb = true;
-				}
 				dax.addr += first;
 				size = map_len - first;
 			}
@@ -276,15 +265,8 @@
 	memset(&bh, 0, sizeof(bh));
 	bh.b_bdev = inode->i_sb->s_bdev;
 
-	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
-		struct address_space *mapping = inode->i_mapping;
+	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
 		inode_lock(inode);
-		retval = filemap_write_and_wait_range(mapping, pos, end - 1);
-		if (retval) {
-			inode_unlock(inode);
-			goto out;
-		}
-	}
 
 	/* Protects against truncate */
 	if (!(flags & DIO_SKIP_DIO_COUNT))
@@ -305,12 +287,268 @@
 
 	if (!(flags & DIO_SKIP_DIO_COUNT))
 		inode_dio_end(inode);
- out:
 	return retval;
 }
 EXPORT_SYMBOL_GPL(dax_do_io);
 
 /*
+ * DAX radix tree locking
+ */
+struct exceptional_entry_key {
+	struct address_space *mapping;
+	unsigned long index;
+};
+
+struct wait_exceptional_entry_queue {
+	wait_queue_t wait;
+	struct exceptional_entry_key key;
+};
+
+static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
+				       int sync, void *keyp)
+{
+	struct exceptional_entry_key *key = keyp;
+	struct wait_exceptional_entry_queue *ewait =
+		container_of(wait, struct wait_exceptional_entry_queue, wait);
+
+	if (key->mapping != ewait->key.mapping ||
+	    key->index != ewait->key.index)
+		return 0;
+	return autoremove_wake_function(wait, mode, sync, NULL);
+}
+
+/*
+ * Check whether the given slot is locked. The function must be called with
+ * mapping->tree_lock held
+ */
+static inline int slot_locked(struct address_space *mapping, void **slot)
+{
+	unsigned long entry = (unsigned long)
+		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
+	return entry & RADIX_DAX_ENTRY_LOCK;
+}
+
+/*
+ * Mark the given slot is locked. The function must be called with
+ * mapping->tree_lock held
+ */
+static inline void *lock_slot(struct address_space *mapping, void **slot)
+{
+	unsigned long entry = (unsigned long)
+		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
+
+	entry |= RADIX_DAX_ENTRY_LOCK;
+	radix_tree_replace_slot(slot, (void *)entry);
+	return (void *)entry;
+}
+
+/*
+ * Mark the given slot is unlocked. The function must be called with
+ * mapping->tree_lock held
+ */
+static inline void *unlock_slot(struct address_space *mapping, void **slot)
+{
+	unsigned long entry = (unsigned long)
+		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
+
+	entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
+	radix_tree_replace_slot(slot, (void *)entry);
+	return (void *)entry;
+}
+
+/*
+ * Lookup entry in radix tree, wait for it to become unlocked if it is
+ * exceptional entry and return it. The caller must call
+ * put_unlocked_mapping_entry() when he decided not to lock the entry or
+ * put_locked_mapping_entry() when he locked the entry and now wants to
+ * unlock it.
+ *
+ * The function must be called with mapping->tree_lock held.
+ */
+static void *get_unlocked_mapping_entry(struct address_space *mapping,
+					pgoff_t index, void ***slotp)
+{
+	void *ret, **slot;
+	struct wait_exceptional_entry_queue ewait;
+	wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
+
+	init_wait(&ewait.wait);
+	ewait.wait.func = wake_exceptional_entry_func;
+	ewait.key.mapping = mapping;
+	ewait.key.index = index;
+
+	for (;;) {
+		ret = __radix_tree_lookup(&mapping->page_tree, index, NULL,
+					  &slot);
+		if (!ret || !radix_tree_exceptional_entry(ret) ||
+		    !slot_locked(mapping, slot)) {
+			if (slotp)
+				*slotp = slot;
+			return ret;
+		}
+		prepare_to_wait_exclusive(wq, &ewait.wait,
+					  TASK_UNINTERRUPTIBLE);
+		spin_unlock_irq(&mapping->tree_lock);
+		schedule();
+		finish_wait(wq, &ewait.wait);
+		spin_lock_irq(&mapping->tree_lock);
+	}
+}
+
+/*
+ * Find radix tree entry at given index. If it points to a page, return with
+ * the page locked. If it points to the exceptional entry, return with the
+ * radix tree entry locked. If the radix tree doesn't contain given index,
+ * create empty exceptional entry for the index and return with it locked.
+ *
+ * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
+ * persistent memory the benefit is doubtful. We can add that later if we can
+ * show it helps.
+ */
+static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index)
+{
+	void *ret, **slot;
+
+restart:
+	spin_lock_irq(&mapping->tree_lock);
+	ret = get_unlocked_mapping_entry(mapping, index, &slot);
+	/* No entry for given index? Make sure radix tree is big enough. */
+	if (!ret) {
+		int err;
+
+		spin_unlock_irq(&mapping->tree_lock);
+		err = radix_tree_preload(
+				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
+		if (err)
+			return ERR_PTR(err);
+		ret = (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
+			       RADIX_DAX_ENTRY_LOCK);
+		spin_lock_irq(&mapping->tree_lock);
+		err = radix_tree_insert(&mapping->page_tree, index, ret);
+		radix_tree_preload_end();
+		if (err) {
+			spin_unlock_irq(&mapping->tree_lock);
+			/* Someone already created the entry? */
+			if (err == -EEXIST)
+				goto restart;
+			return ERR_PTR(err);
+		}
+		/* Good, we have inserted empty locked entry into the tree. */
+		mapping->nrexceptional++;
+		spin_unlock_irq(&mapping->tree_lock);
+		return ret;
+	}
+	/* Normal page in radix tree? */
+	if (!radix_tree_exceptional_entry(ret)) {
+		struct page *page = ret;
+
+		get_page(page);
+		spin_unlock_irq(&mapping->tree_lock);
+		lock_page(page);
+		/* Page got truncated? Retry... */
+		if (unlikely(page->mapping != mapping)) {
+			unlock_page(page);
+			put_page(page);
+			goto restart;
+		}
+		return page;
+	}
+	ret = lock_slot(mapping, slot);
+	spin_unlock_irq(&mapping->tree_lock);
+	return ret;
+}
+
+void dax_wake_mapping_entry_waiter(struct address_space *mapping,
+				   pgoff_t index, bool wake_all)
+{
+	wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
+
+	/*
+	 * Checking for locked entry and prepare_to_wait_exclusive() happens
+	 * under mapping->tree_lock, ditto for entry handling in our callers.
+	 * So at this point all tasks that could have seen our entry locked
+	 * must be in the waitqueue and the following check will see them.
+	 */
+	if (waitqueue_active(wq)) {
+		struct exceptional_entry_key key;
+
+		key.mapping = mapping;
+		key.index = index;
+		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
+	}
+}
+
+void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
+{
+	void *ret, **slot;
+
+	spin_lock_irq(&mapping->tree_lock);
+	ret = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
+	if (WARN_ON_ONCE(!ret || !radix_tree_exceptional_entry(ret) ||
+			 !slot_locked(mapping, slot))) {
+		spin_unlock_irq(&mapping->tree_lock);
+		return;
+	}
+	unlock_slot(mapping, slot);
+	spin_unlock_irq(&mapping->tree_lock);
+	dax_wake_mapping_entry_waiter(mapping, index, false);
+}
+
+static void put_locked_mapping_entry(struct address_space *mapping,
+				     pgoff_t index, void *entry)
+{
+	if (!radix_tree_exceptional_entry(entry)) {
+		unlock_page(entry);
+		put_page(entry);
+	} else {
+		dax_unlock_mapping_entry(mapping, index);
+	}
+}
+
+/*
+ * Called when we are done with radix tree entry we looked up via
+ * get_unlocked_mapping_entry() and which we didn't lock in the end.
+ */
+static void put_unlocked_mapping_entry(struct address_space *mapping,
+				       pgoff_t index, void *entry)
+{
+	if (!radix_tree_exceptional_entry(entry))
+		return;
+
+	/* We have to wake up next waiter for the radix tree entry lock */
+	dax_wake_mapping_entry_waiter(mapping, index, false);
+}
+
+/*
+ * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
+ * entry to get unlocked before deleting it.
+ */
+int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
+{
+	void *entry;
+
+	spin_lock_irq(&mapping->tree_lock);
+	entry = get_unlocked_mapping_entry(mapping, index, NULL);
+	/*
+	 * This gets called from truncate / punch_hole path. As such, the caller
+	 * must hold locks protecting against concurrent modifications of the
+	 * radix tree (usually fs-private i_mmap_sem for writing). Since the
+	 * caller has seen exceptional entry for this index, we better find it
+	 * at that index as well...
+	 */
+	if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) {
+		spin_unlock_irq(&mapping->tree_lock);
+		return 0;
+	}
+	radix_tree_delete(&mapping->page_tree, index);
+	mapping->nrexceptional--;
+	spin_unlock_irq(&mapping->tree_lock);
+	dax_wake_mapping_entry_waiter(mapping, index, true);
+
+	return 1;
+}
+
+/*
  * The user has performed a load from a hole in the file.  Allocating
  * a new page in the file would cause excessive storage usage for
  * workloads with sparse files.  We allocate a page cache page instead.
@@ -318,24 +556,24 @@
  * otherwise it will simply fall out of the page cache under memory
  * pressure without ever having been dirtied.
  */
-static int dax_load_hole(struct address_space *mapping, struct page *page,
-							struct vm_fault *vmf)
+static int dax_load_hole(struct address_space *mapping, void *entry,
+			 struct vm_fault *vmf)
 {
-	unsigned long size;
-	struct inode *inode = mapping->host;
-	if (!page)
-		page = find_or_create_page(mapping, vmf->pgoff,
-						GFP_KERNEL | __GFP_ZERO);
-	if (!page)
-		return VM_FAULT_OOM;
-	/* Recheck i_size under page lock to avoid truncate race */
-	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	if (vmf->pgoff >= size) {
-		unlock_page(page);
-		put_page(page);
-		return VM_FAULT_SIGBUS;
+	struct page *page;
+
+	/* Hole page already exists? Return it...  */
+	if (!radix_tree_exceptional_entry(entry)) {
+		vmf->page = entry;
+		return VM_FAULT_LOCKED;
 	}
 
+	/* This will replace locked radix tree entry with a hole page */
+	page = find_or_create_page(mapping, vmf->pgoff,
+				   vmf->gfp_mask | __GFP_ZERO);
+	if (!page) {
+		put_locked_mapping_entry(mapping, vmf->pgoff, entry);
+		return VM_FAULT_OOM;
+	}
 	vmf->page = page;
 	return VM_FAULT_LOCKED;
 }
@@ -359,77 +597,72 @@
 	return 0;
 }
 
-#define NO_SECTOR -1
 #define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
 
-static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
-		sector_t sector, bool pmd_entry, bool dirty)
+static void *dax_insert_mapping_entry(struct address_space *mapping,
+				      struct vm_fault *vmf,
+				      void *entry, sector_t sector)
 {
 	struct radix_tree_root *page_tree = &mapping->page_tree;
-	pgoff_t pmd_index = DAX_PMD_INDEX(index);
-	int type, error = 0;
-	void *entry;
+	int error = 0;
+	bool hole_fill = false;
+	void *new_entry;
+	pgoff_t index = vmf->pgoff;
 
-	WARN_ON_ONCE(pmd_entry && !dirty);
-	if (dirty)
+	if (vmf->flags & FAULT_FLAG_WRITE)
 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 
-	spin_lock_irq(&mapping->tree_lock);
-
-	entry = radix_tree_lookup(page_tree, pmd_index);
-	if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) {
-		index = pmd_index;
-		goto dirty;
+	/* Replacing hole page with block mapping? */
+	if (!radix_tree_exceptional_entry(entry)) {
+		hole_fill = true;
+		/*
+		 * Unmap the page now before we remove it from page cache below.
+		 * The page is locked so it cannot be faulted in again.
+		 */
+		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
+				    PAGE_SIZE, 0);
+		error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
+		if (error)
+			return ERR_PTR(error);
 	}
 
-	entry = radix_tree_lookup(page_tree, index);
-	if (entry) {
-		type = RADIX_DAX_TYPE(entry);
-		if (WARN_ON_ONCE(type != RADIX_DAX_PTE &&
-					type != RADIX_DAX_PMD)) {
-			error = -EIO;
+	spin_lock_irq(&mapping->tree_lock);
+	new_entry = (void *)((unsigned long)RADIX_DAX_ENTRY(sector, false) |
+		       RADIX_DAX_ENTRY_LOCK);
+	if (hole_fill) {
+		__delete_from_page_cache(entry, NULL);
+		/* Drop pagecache reference */
+		put_page(entry);
+		error = radix_tree_insert(page_tree, index, new_entry);
+		if (error) {
+			new_entry = ERR_PTR(error);
 			goto unlock;
 		}
+		mapping->nrexceptional++;
+	} else {
+		void **slot;
+		void *ret;
 
-		if (!pmd_entry || type == RADIX_DAX_PMD)
-			goto dirty;
-
-		/*
-		 * We only insert dirty PMD entries into the radix tree.  This
-		 * means we don't need to worry about removing a dirty PTE
-		 * entry and inserting a clean PMD entry, thus reducing the
-		 * range we would flush with a follow-up fsync/msync call.
-		 */
-		radix_tree_delete(&mapping->page_tree, index);
-		mapping->nrexceptional--;
+		ret = __radix_tree_lookup(page_tree, index, NULL, &slot);
+		WARN_ON_ONCE(ret != entry);
+		radix_tree_replace_slot(slot, new_entry);
 	}
-
-	if (sector == NO_SECTOR) {
-		/*
-		 * This can happen during correct operation if our pfn_mkwrite
-		 * fault raced against a hole punch operation.  If this
-		 * happens the pte that was hole punched will have been
-		 * unmapped and the radix tree entry will have been removed by
-		 * the time we are called, but the call will still happen.  We
-		 * will return all the way up to wp_pfn_shared(), where the
-		 * pte_same() check will fail, eventually causing page fault
-		 * to be retried by the CPU.
-		 */
-		goto unlock;
-	}
-
-	error = radix_tree_insert(page_tree, index,
-			RADIX_DAX_ENTRY(sector, pmd_entry));
-	if (error)
-		goto unlock;
-
-	mapping->nrexceptional++;
- dirty:
-	if (dirty)
+	if (vmf->flags & FAULT_FLAG_WRITE)
 		radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
  unlock:
 	spin_unlock_irq(&mapping->tree_lock);
-	return error;
+	if (hole_fill) {
+		radix_tree_preload_end();
+		/*
+		 * We don't need hole page anymore, it has been replaced with
+		 * locked radix tree entry now.
+		 */
+		if (mapping->a_ops->freepage)
+			mapping->a_ops->freepage(entry);
+		unlock_page(entry);
+		put_page(entry);
+	}
+	return new_entry;
 }
 
 static int dax_writeback_one(struct block_device *bdev,
@@ -555,56 +788,29 @@
 }
 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
 
-static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
+static int dax_insert_mapping(struct address_space *mapping,
+			struct buffer_head *bh, void **entryp,
 			struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	unsigned long vaddr = (unsigned long)vmf->virtual_address;
-	struct address_space *mapping = inode->i_mapping;
 	struct block_device *bdev = bh->b_bdev;
 	struct blk_dax_ctl dax = {
-		.sector = to_sector(bh, inode),
+		.sector = to_sector(bh, mapping->host),
 		.size = bh->b_size,
 	};
-	pgoff_t size;
-	int error;
+	void *ret;
+	void *entry = *entryp;
 
-	i_mmap_lock_read(mapping);
-
-	/*
-	 * Check truncate didn't happen while we were allocating a block.
-	 * If it did, this block may or may not be still allocated to the
-	 * file.  We can't tell the filesystem to free it because we can't
-	 * take i_mutex here.  In the worst case, the file still has blocks
-	 * allocated past the end of the file.
-	 */
-	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	if (unlikely(vmf->pgoff >= size)) {
-		error = -EIO;
-		goto out;
-	}
-
-	if (dax_map_atomic(bdev, &dax) < 0) {
-		error = PTR_ERR(dax.addr);
-		goto out;
-	}
-
-	if (buffer_unwritten(bh) || buffer_new(bh)) {
-		clear_pmem(dax.addr, PAGE_SIZE);
-		wmb_pmem();
-	}
+	if (dax_map_atomic(bdev, &dax) < 0)
+		return PTR_ERR(dax.addr);
 	dax_unmap_atomic(bdev, &dax);
 
-	error = dax_radix_entry(mapping, vmf->pgoff, dax.sector, false,
-			vmf->flags & FAULT_FLAG_WRITE);
-	if (error)
-		goto out;
+	ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector);
+	if (IS_ERR(ret))
+		return PTR_ERR(ret);
+	*entryp = ret;
 
-	error = vm_insert_mixed(vma, vaddr, dax.pfn);
-
- out:
-	i_mmap_unlock_read(mapping);
-
-	return error;
+	return vm_insert_mixed(vma, vaddr, dax.pfn);
 }
 
 /**
@@ -612,24 +818,18 @@
  * @vma: The virtual memory area where the fault occurred
  * @vmf: The description of the fault
  * @get_block: The filesystem method used to translate file offsets to blocks
- * @complete_unwritten: The filesystem method used to convert unwritten blocks
- *	to written so the data written to them is exposed. This is required for
- *	required by write faults for filesystems that will return unwritten
- *	extent mappings from @get_block, but it is optional for reads as
- *	dax_insert_mapping() will always zero unwritten blocks. If the fs does
- *	not support unwritten extents, the it should pass NULL.
  *
  * When a page fault occurs, filesystems may call this helper in their
  * fault handler for DAX files. __dax_fault() assumes the caller has done all
  * the necessary locking for the page fault to proceed successfully.
  */
 int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
-			get_block_t get_block, dax_iodone_t complete_unwritten)
+			get_block_t get_block)
 {
 	struct file *file = vma->vm_file;
 	struct address_space *mapping = file->f_mapping;
 	struct inode *inode = mapping->host;
-	struct page *page;
+	void *entry;
 	struct buffer_head bh;
 	unsigned long vaddr = (unsigned long)vmf->virtual_address;
 	unsigned blkbits = inode->i_blkbits;
@@ -638,6 +838,11 @@
 	int error;
 	int major = 0;
 
+	/*
+	 * Check whether offset isn't beyond end of file now. Caller is supposed
+	 * to hold locks serializing us with truncate / punch hole so this is
+	 * a reliable test.
+	 */
 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	if (vmf->pgoff >= size)
 		return VM_FAULT_SIGBUS;
@@ -647,49 +852,17 @@
 	bh.b_bdev = inode->i_sb->s_bdev;
 	bh.b_size = PAGE_SIZE;
 
- repeat:
-	page = find_get_page(mapping, vmf->pgoff);
-	if (page) {
-		if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
-			put_page(page);
-			return VM_FAULT_RETRY;
-		}
-		if (unlikely(page->mapping != mapping)) {
-			unlock_page(page);
-			put_page(page);
-			goto repeat;
-		}
-		size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-		if (unlikely(vmf->pgoff >= size)) {
-			/*
-			 * We have a struct page covering a hole in the file
-			 * from a read fault and we've raced with a truncate
-			 */
-			error = -EIO;
-			goto unlock_page;
-		}
+	entry = grab_mapping_entry(mapping, vmf->pgoff);
+	if (IS_ERR(entry)) {
+		error = PTR_ERR(entry);
+		goto out;
 	}
 
 	error = get_block(inode, block, &bh, 0);
 	if (!error && (bh.b_size < PAGE_SIZE))
 		error = -EIO;		/* fs corruption? */
 	if (error)
-		goto unlock_page;
-
-	if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
-		if (vmf->flags & FAULT_FLAG_WRITE) {
-			error = get_block(inode, block, &bh, 1);
-			count_vm_event(PGMAJFAULT);
-			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
-			major = VM_FAULT_MAJOR;
-			if (!error && (bh.b_size < PAGE_SIZE))
-				error = -EIO;
-			if (error)
-				goto unlock_page;
-		} else {
-			return dax_load_hole(mapping, page, vmf);
-		}
-	}
+		goto unlock_entry;
 
 	if (vmf->cow_page) {
 		struct page *new_page = vmf->cow_page;
@@ -698,53 +871,35 @@
 		else
 			clear_user_highpage(new_page, vaddr);
 		if (error)
-			goto unlock_page;
-		vmf->page = page;
-		if (!page) {
-			i_mmap_lock_read(mapping);
-			/* Check we didn't race with truncate */
-			size = (i_size_read(inode) + PAGE_SIZE - 1) >>
-								PAGE_SHIFT;
-			if (vmf->pgoff >= size) {
-				i_mmap_unlock_read(mapping);
-				error = -EIO;
-				goto out;
-			}
+			goto unlock_entry;
+		if (!radix_tree_exceptional_entry(entry)) {
+			vmf->page = entry;
+			return VM_FAULT_LOCKED;
 		}
-		return VM_FAULT_LOCKED;
+		vmf->entry = entry;
+		return VM_FAULT_DAX_LOCKED;
 	}
 
-	/* Check we didn't race with a read fault installing a new page */
-	if (!page && major)
-		page = find_lock_page(mapping, vmf->pgoff);
-
-	if (page) {
-		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
-							PAGE_SIZE, 0);
-		delete_from_page_cache(page);
-		unlock_page(page);
-		put_page(page);
-		page = NULL;
+	if (!buffer_mapped(&bh)) {
+		if (vmf->flags & FAULT_FLAG_WRITE) {
+			error = get_block(inode, block, &bh, 1);
+			count_vm_event(PGMAJFAULT);
+			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+			major = VM_FAULT_MAJOR;
+			if (!error && (bh.b_size < PAGE_SIZE))
+				error = -EIO;
+			if (error)
+				goto unlock_entry;
+		} else {
+			return dax_load_hole(mapping, entry, vmf);
+		}
 	}
 
-	/*
-	 * If we successfully insert the new mapping over an unwritten extent,
-	 * we need to ensure we convert the unwritten extent. If there is an
-	 * error inserting the mapping, the filesystem needs to leave it as
-	 * unwritten to prevent exposure of the stale underlying data to
-	 * userspace, but we still need to call the completion function so
-	 * the private resources on the mapping buffer can be released. We
-	 * indicate what the callback should do via the uptodate variable, same
-	 * as for normal BH based IO completions.
-	 */
-	error = dax_insert_mapping(inode, &bh, vma, vmf);
-	if (buffer_unwritten(&bh)) {
-		if (complete_unwritten)
-			complete_unwritten(&bh, !error);
-		else
-			WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
-	}
-
+	/* Filesystem should not return unwritten buffers to us! */
+	WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
+	error = dax_insert_mapping(mapping, &bh, &entry, vma, vmf);
+ unlock_entry:
+	put_locked_mapping_entry(mapping, vmf->pgoff, entry);
  out:
 	if (error == -ENOMEM)
 		return VM_FAULT_OOM | major;
@@ -752,13 +907,6 @@
 	if ((error < 0) && (error != -EBUSY))
 		return VM_FAULT_SIGBUS | major;
 	return VM_FAULT_NOPAGE | major;
-
- unlock_page:
-	if (page) {
-		unlock_page(page);
-		put_page(page);
-	}
-	goto out;
 }
 EXPORT_SYMBOL(__dax_fault);
 
@@ -772,7 +920,7 @@
  * fault handler for DAX files.
  */
 int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
-	      get_block_t get_block, dax_iodone_t complete_unwritten)
+	      get_block_t get_block)
 {
 	int result;
 	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
@@ -781,7 +929,7 @@
 		sb_start_pagefault(sb);
 		file_update_time(vma->vm_file);
 	}
-	result = __dax_fault(vma, vmf, get_block, complete_unwritten);
+	result = __dax_fault(vma, vmf, get_block);
 	if (vmf->flags & FAULT_FLAG_WRITE)
 		sb_end_pagefault(sb);
 
@@ -789,7 +937,7 @@
 }
 EXPORT_SYMBOL_GPL(dax_fault);
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
 /*
  * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
  * more often than one might expect in the below function.
@@ -815,8 +963,7 @@
 #define dax_pmd_dbg(bh, address, reason)	__dax_dbg(bh, address, reason, "dax_pmd")
 
 int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
-		pmd_t *pmd, unsigned int flags, get_block_t get_block,
-		dax_iodone_t complete_unwritten)
+		pmd_t *pmd, unsigned int flags, get_block_t get_block)
 {
 	struct file *file = vma->vm_file;
 	struct address_space *mapping = file->f_mapping;
@@ -828,7 +975,7 @@
 	struct block_device *bdev;
 	pgoff_t size, pgoff;
 	sector_t block;
-	int error, result = 0;
+	int result = 0;
 	bool alloc = false;
 
 	/* dax pmd mappings require pfn_t_devmap() */
@@ -875,6 +1022,7 @@
 		if (get_block(inode, block, &bh, 1) != 0)
 			return VM_FAULT_SIGBUS;
 		alloc = true;
+		WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
 	}
 
 	bdev = bh.b_bdev;
@@ -900,26 +1048,7 @@
 		truncate_pagecache_range(inode, lstart, lend);
 	}
 
-	i_mmap_lock_read(mapping);
-
-	/*
-	 * If a truncate happened while we were allocating blocks, we may
-	 * leave blocks allocated to the file that are beyond EOF.  We can't
-	 * take i_mutex here, so just leave them hanging; they'll be freed
-	 * when the file is deleted.
-	 */
-	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	if (pgoff >= size) {
-		result = VM_FAULT_SIGBUS;
-		goto out;
-	}
-	if ((pgoff | PG_PMD_COLOUR) >= size) {
-		dax_pmd_dbg(&bh, address,
-				"offset + huge page size > file size");
-		goto fallback;
-	}
-
-	if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
+	if (!write && !buffer_mapped(&bh)) {
 		spinlock_t *ptl;
 		pmd_t entry;
 		struct page *zero_page = get_huge_zero_page();
@@ -954,8 +1083,8 @@
 		long length = dax_map_atomic(bdev, &dax);
 
 		if (length < 0) {
-			result = VM_FAULT_SIGBUS;
-			goto out;
+			dax_pmd_dbg(&bh, address, "dax-error fallback");
+			goto fallback;
 		}
 		if (length < PMD_SIZE) {
 			dax_pmd_dbg(&bh, address, "dax-length too small");
@@ -973,14 +1102,6 @@
 			dax_pmd_dbg(&bh, address, "pfn not in memmap");
 			goto fallback;
 		}
-
-		if (buffer_unwritten(&bh) || buffer_new(&bh)) {
-			clear_pmem(dax.addr, PMD_SIZE);
-			wmb_pmem();
-			count_vm_event(PGMAJFAULT);
-			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
-			result |= VM_FAULT_MAJOR;
-		}
 		dax_unmap_atomic(bdev, &dax);
 
 		/*
@@ -999,13 +1120,10 @@
 		 * the write to insert a dirty entry.
 		 */
 		if (write) {
-			error = dax_radix_entry(mapping, pgoff, dax.sector,
-					true, true);
-			if (error) {
-				dax_pmd_dbg(&bh, address,
-						"PMD radix insertion failed");
-				goto fallback;
-			}
+			/*
+			 * We should insert radix-tree entry and dirty it here.
+			 * For now this is broken...
+			 */
 		}
 
 		dev_dbg(part_to_dev(bdev->bd_part),
@@ -1018,11 +1136,6 @@
 	}
 
  out:
-	i_mmap_unlock_read(mapping);
-
-	if (buffer_unwritten(&bh))
-		complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
-
 	return result;
 
  fallback:
@@ -1042,8 +1155,7 @@
  * pmd_fault handler for DAX files.
  */
 int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
-			pmd_t *pmd, unsigned int flags, get_block_t get_block,
-			dax_iodone_t complete_unwritten)
+			pmd_t *pmd, unsigned int flags, get_block_t get_block)
 {
 	int result;
 	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
@@ -1052,8 +1164,7 @@
 		sb_start_pagefault(sb);
 		file_update_time(vma->vm_file);
 	}
-	result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
-				complete_unwritten);
+	result = __dax_pmd_fault(vma, address, pmd, flags, get_block);
 	if (flags & FAULT_FLAG_WRITE)
 		sb_end_pagefault(sb);
 
@@ -1070,27 +1181,59 @@
 int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct file *file = vma->vm_file;
-	int error;
+	struct address_space *mapping = file->f_mapping;
+	void *entry;
+	pgoff_t index = vmf->pgoff;
 
-	/*
-	 * We pass NO_SECTOR to dax_radix_entry() because we expect that a
-	 * RADIX_DAX_PTE entry already exists in the radix tree from a
-	 * previous call to __dax_fault().  We just want to look up that PTE
-	 * entry using vmf->pgoff and make sure the dirty tag is set.  This
-	 * saves us from having to make a call to get_block() here to look
-	 * up the sector.
-	 */
-	error = dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false,
-			true);
-
-	if (error == -ENOMEM)
-		return VM_FAULT_OOM;
-	if (error)
-		return VM_FAULT_SIGBUS;
+	spin_lock_irq(&mapping->tree_lock);
+	entry = get_unlocked_mapping_entry(mapping, index, NULL);
+	if (!entry || !radix_tree_exceptional_entry(entry))
+		goto out;
+	radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
+	put_unlocked_mapping_entry(mapping, index, entry);
+out:
+	spin_unlock_irq(&mapping->tree_lock);
 	return VM_FAULT_NOPAGE;
 }
 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
 
+static bool dax_range_is_aligned(struct block_device *bdev,
+				 unsigned int offset, unsigned int length)
+{
+	unsigned short sector_size = bdev_logical_block_size(bdev);
+
+	if (!IS_ALIGNED(offset, sector_size))
+		return false;
+	if (!IS_ALIGNED(length, sector_size))
+		return false;
+
+	return true;
+}
+
+int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
+		unsigned int offset, unsigned int length)
+{
+	struct blk_dax_ctl dax = {
+		.sector		= sector,
+		.size		= PAGE_SIZE,
+	};
+
+	if (dax_range_is_aligned(bdev, offset, length)) {
+		sector_t start_sector = dax.sector + (offset >> 9);
+
+		return blkdev_issue_zeroout(bdev, start_sector,
+				length >> 9, GFP_NOFS, true);
+	} else {
+		if (dax_map_atomic(bdev, &dax) < 0)
+			return PTR_ERR(dax.addr);
+		clear_pmem(dax.addr + offset, length);
+		wmb_pmem();
+		dax_unmap_atomic(bdev, &dax);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__dax_zero_page_range);
+
 /**
  * dax_zero_page_range - zero a range within a page of a DAX file
  * @inode: The file being truncated
@@ -1102,12 +1245,6 @@
  * page in a DAX file.  This is intended for hole-punch operations.  If
  * you are truncating a file, the helper function dax_truncate_page() may be
  * more convenient.
- *
- * We work in terms of PAGE_SIZE here for commonality with
- * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
- * took care of disposing of the unnecessary blocks.  Even if the filesystem
- * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
- * since the file might be mmapped.
  */
 int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
 							get_block_t get_block)
@@ -1126,23 +1263,11 @@
 	bh.b_bdev = inode->i_sb->s_bdev;
 	bh.b_size = PAGE_SIZE;
 	err = get_block(inode, index, &bh, 0);
-	if (err < 0)
+	if (err < 0 || !buffer_written(&bh))
 		return err;
-	if (buffer_written(&bh)) {
-		struct block_device *bdev = bh.b_bdev;
-		struct blk_dax_ctl dax = {
-			.sector = to_sector(&bh, inode),
-			.size = PAGE_SIZE,
-		};
 
-		if (dax_map_atomic(bdev, &dax) < 0)
-			return PTR_ERR(dax.addr);
-		clear_pmem(dax.addr + offset, length);
-		wmb_pmem();
-		dax_unmap_atomic(bdev, &dax);
-	}
-
-	return 0;
+	return __dax_zero_page_range(bh.b_bdev, to_sector(&bh, inode),
+			offset, length);
 }
 EXPORT_SYMBOL_GPL(dax_zero_page_range);
 
@@ -1154,12 +1279,6 @@
  *
  * Similar to block_truncate_page(), this function can be called by a
  * filesystem when it is truncating a DAX file to handle the partial page.
- *
- * We work in terms of PAGE_SIZE here for commonality with
- * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
- * took care of disposing of the unnecessary blocks.  Even if the filesystem
- * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
- * since the file might be mmapped.
  */
 int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
 {
diff --git a/fs/dcache.c b/fs/dcache.c
index c622872..817c243 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1636,7 +1636,7 @@
 	struct dentry *dentry = __d_alloc(parent->d_sb, name);
 	if (!dentry)
 		return NULL;
-
+	dentry->d_flags |= DCACHE_RCUACCESS;
 	spin_lock(&parent->d_lock);
 	/*
 	 * don't need child lock because it is not subject
@@ -1670,8 +1670,7 @@
 	struct qstr q;
 
 	q.name = name;
-	q.len = strlen(name);
-	q.hash = full_name_hash(q.name, q.len);
+	q.hash_len = hashlen_string(name);
 	return d_alloc(parent, &q);
 }
 EXPORT_SYMBOL(d_alloc_name);
@@ -2359,7 +2358,6 @@
 {
 	BUG_ON(!d_unhashed(entry));
 	hlist_bl_lock(b);
-	entry->d_flags |= DCACHE_RCUACCESS;
 	hlist_bl_add_head_rcu(&entry->d_hash, b);
 	hlist_bl_unlock(b);
 }
@@ -2844,6 +2842,7 @@
 	/* ... and switch them in the tree */
 	if (IS_ROOT(dentry)) {
 		/* splicing a tree */
+		dentry->d_flags |= DCACHE_RCUACCESS;
 		dentry->d_parent = target->d_parent;
 		target->d_parent = target;
 		list_del_init(&target->d_child);
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 0b2954d..37c134a 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -95,8 +95,6 @@
 
 static DEFINE_MUTEX(allocated_ptys_lock);
 
-static struct vfsmount *devpts_mnt;
-
 struct pts_mount_opts {
 	int setuid;
 	int setgid;
@@ -104,7 +102,7 @@
 	kgid_t   gid;
 	umode_t mode;
 	umode_t ptmxmode;
-	int newinstance;
+	int reserve;
 	int max;
 };
 
@@ -117,11 +115,9 @@
 	{Opt_uid, "uid=%u"},
 	{Opt_gid, "gid=%u"},
 	{Opt_mode, "mode=%o"},
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
 	{Opt_ptmxmode, "ptmxmode=%o"},
 	{Opt_newinstance, "newinstance"},
 	{Opt_max, "max=%d"},
-#endif
 	{Opt_err, NULL}
 };
 
@@ -137,15 +133,48 @@
 	return sb->s_fs_info;
 }
 
-static inline struct super_block *pts_sb_from_inode(struct inode *inode)
+struct pts_fs_info *devpts_acquire(struct file *filp)
 {
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
-	if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
-		return inode->i_sb;
-#endif
-	if (!devpts_mnt)
-		return NULL;
-	return devpts_mnt->mnt_sb;
+	struct pts_fs_info *result;
+	struct path path;
+	struct super_block *sb;
+	int err;
+
+	path = filp->f_path;
+	path_get(&path);
+
+	/* Has the devpts filesystem already been found? */
+	sb = path.mnt->mnt_sb;
+	if (sb->s_magic != DEVPTS_SUPER_MAGIC) {
+		/* Is a devpts filesystem at "pts" in the same directory? */
+		err = path_pts(&path);
+		if (err) {
+			result = ERR_PTR(err);
+			goto out;
+		}
+
+		/* Is the path the root of a devpts filesystem? */
+		result = ERR_PTR(-ENODEV);
+		sb = path.mnt->mnt_sb;
+		if ((sb->s_magic != DEVPTS_SUPER_MAGIC) ||
+		    (path.mnt->mnt_root != sb->s_root))
+			goto out;
+	}
+
+	/*
+	 * pty code needs to hold extra references in case of last /dev/tty close
+	 */
+	atomic_inc(&sb->s_active);
+	result = DEVPTS_SB(sb);
+
+out:
+	path_put(&path);
+	return result;
+}
+
+void devpts_release(struct pts_fs_info *fsi)
+{
+	deactivate_super(fsi->sb);
 }
 
 #define PARSE_MOUNT	0
@@ -154,9 +183,7 @@
 /*
  * parse_mount_options():
  *	Set @opts to mount options specified in @data. If an option is not
- *	specified in @data, set it to its default value. The exception is
- *	'newinstance' option which can only be set/cleared on a mount (i.e.
- *	cannot be changed during remount).
+ *	specified in @data, set it to its default value.
  *
  * Note: @data may be NULL (in which case all options are set to default).
  */
@@ -174,9 +201,12 @@
 	opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
 	opts->max     = NR_UNIX98_PTY_MAX;
 
-	/* newinstance makes sense only on initial mount */
+	/* Only allow instances mounted from the initial mount
+	 * namespace to tap the reserve pool of ptys.
+	 */
 	if (op == PARSE_MOUNT)
-		opts->newinstance = 0;
+		opts->reserve =
+			(current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns);
 
 	while ((p = strsep(&data, ",")) != NULL) {
 		substring_t args[MAX_OPT_ARGS];
@@ -211,16 +241,12 @@
 				return -EINVAL;
 			opts->mode = option & S_IALLUGO;
 			break;
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
 		case Opt_ptmxmode:
 			if (match_octal(&args[0], &option))
 				return -EINVAL;
 			opts->ptmxmode = option & S_IALLUGO;
 			break;
 		case Opt_newinstance:
-			/* newinstance makes sense only on initial mount */
-			if (op == PARSE_MOUNT)
-				opts->newinstance = 1;
 			break;
 		case Opt_max:
 			if (match_int(&args[0], &option) ||
@@ -228,7 +254,6 @@
 				return -EINVAL;
 			opts->max = option;
 			break;
-#endif
 		default:
 			pr_err("called with bogus options\n");
 			return -EINVAL;
@@ -238,7 +263,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
 static int mknod_ptmx(struct super_block *sb)
 {
 	int mode;
@@ -305,12 +329,6 @@
 		inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode;
 	}
 }
-#else
-static inline void update_ptmx_mode(struct pts_fs_info *fsi)
-{
-	return;
-}
-#endif
 
 static int devpts_remount(struct super_block *sb, int *flags, char *data)
 {
@@ -344,11 +362,9 @@
 		seq_printf(seq, ",gid=%u",
 			   from_kgid_munged(&init_user_ns, opts->gid));
 	seq_printf(seq, ",mode=%03o", opts->mode);
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
 	seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode);
 	if (opts->max < NR_UNIX98_PTY_MAX)
 		seq_printf(seq, ",max=%d", opts->max);
-#endif
 
 	return 0;
 }
@@ -410,40 +426,11 @@
 	return -ENOMEM;
 }
 
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
-static int compare_init_pts_sb(struct super_block *s, void *p)
-{
-	if (devpts_mnt)
-		return devpts_mnt->mnt_sb == s;
-	return 0;
-}
-
 /*
  * devpts_mount()
  *
- *     If the '-o newinstance' mount option was specified, mount a new
- *     (private) instance of devpts.  PTYs created in this instance are
- *     independent of the PTYs in other devpts instances.
- *
- *     If the '-o newinstance' option was not specified, mount/remount the
- *     initial kernel mount of devpts.  This type of mount gives the
- *     legacy, single-instance semantics.
- *
- *     The 'newinstance' option is needed to support multiple namespace
- *     semantics in devpts while preserving backward compatibility of the
- *     current 'single-namespace' semantics. i.e all mounts of devpts
- *     without the 'newinstance' mount option should bind to the initial
- *     kernel mount, like mount_single().
- *
- *     Mounts with 'newinstance' option create a new, private namespace.
- *
- *     NOTE:
- *
- *     For single-mount semantics, devpts cannot use mount_single(),
- *     because mount_single()/sget() find and use the super-block from
- *     the most recent mount of devpts. But that recent mount may be a
- *     'newinstance' mount and mount_single() would pick the newinstance
- *     super-block instead of the initial super-block.
+ *     Mount a new (private) instance of devpts.  PTYs created in this
+ *     instance are independent of the PTYs in other devpts instances.
  */
 static struct dentry *devpts_mount(struct file_system_type *fs_type,
 	int flags, const char *dev_name, void *data)
@@ -456,18 +443,7 @@
 	if (error)
 		return ERR_PTR(error);
 
-	/* Require newinstance for all user namespace mounts to ensure
-	 * the mount options are not changed.
-	 */
-	if ((current_user_ns() != &init_user_ns) && !opts.newinstance)
-		return ERR_PTR(-EINVAL);
-
-	if (opts.newinstance)
-		s = sget(fs_type, NULL, set_anon_super, flags, NULL);
-	else
-		s = sget(fs_type, compare_init_pts_sb, set_anon_super, flags,
-			 NULL);
-
+	s = sget(fs_type, NULL, set_anon_super, flags, NULL);
 	if (IS_ERR(s))
 		return ERR_CAST(s);
 
@@ -491,18 +467,6 @@
 	return ERR_PTR(error);
 }
 
-#else
-/*
- * This supports only the legacy single-instance semantics (no
- * multiple-instance semantics)
- */
-static struct dentry *devpts_mount(struct file_system_type *fs_type, int flags,
-		const char *dev_name, void *data)
-{
-	return mount_single(fs_type, flags, data, devpts_fill_super);
-}
-#endif
-
 static void devpts_kill_sb(struct super_block *sb)
 {
 	struct pts_fs_info *fsi = DEVPTS_SB(sb);
@@ -516,9 +480,7 @@
 	.name		= "devpts",
 	.mount		= devpts_mount,
 	.kill_sb	= devpts_kill_sb,
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
 	.fs_flags	= FS_USERNS_MOUNT | FS_USERNS_DEV_MOUNT,
-#endif
 };
 
 /*
@@ -531,16 +493,13 @@
 	int index;
 	int ida_ret;
 
-	if (!fsi)
-		return -ENODEV;
-
 retry:
 	if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
 		return -ENOMEM;
 
 	mutex_lock(&allocated_ptys_lock);
-	if (pty_count >= pty_limit -
-			(fsi->mount_opts.newinstance ? pty_reserve : 0)) {
+	if (pty_count >= (pty_limit -
+			  (fsi->mount_opts.reserve ? 0 : pty_reserve))) {
 		mutex_unlock(&allocated_ptys_lock);
 		return -ENOSPC;
 	}
@@ -571,30 +530,6 @@
 	mutex_unlock(&allocated_ptys_lock);
 }
 
-/*
- * pty code needs to hold extra references in case of last /dev/tty close
- */
-struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file)
-{
-	struct super_block *sb;
-	struct pts_fs_info *fsi;
-
-	sb = pts_sb_from_inode(ptmx_inode);
-	if (!sb)
-		return NULL;
-	fsi = DEVPTS_SB(sb);
-	if (!fsi)
-		return NULL;
-
-	atomic_inc(&sb->s_active);
-	return fsi;
-}
-
-void devpts_put_ref(struct pts_fs_info *fsi)
-{
-	deactivate_super(fsi->sb);
-}
-
 /**
  * devpts_pty_new -- create a new inode in /dev/pts/
  * @ptmx_inode: inode of the master
@@ -607,16 +542,12 @@
 struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
 {
 	struct dentry *dentry;
-	struct super_block *sb;
+	struct super_block *sb = fsi->sb;
 	struct inode *inode;
 	struct dentry *root;
 	struct pts_mount_opts *opts;
 	char s[12];
 
-	if (!fsi)
-		return ERR_PTR(-ENODEV);
-
-	sb = fsi->sb;
 	root = sb->s_root;
 	opts = &fsi->mount_opts;
 
@@ -676,20 +607,8 @@
 static int __init init_devpts_fs(void)
 {
 	int err = register_filesystem(&devpts_fs_type);
-	struct ctl_table_header *table;
-
 	if (!err) {
-		struct vfsmount *mnt;
-
-		table = register_sysctl_table(pty_root_table);
-		mnt = kern_mount(&devpts_fs_type);
-		if (IS_ERR(mnt)) {
-			err = PTR_ERR(mnt);
-			unregister_filesystem(&devpts_fs_type);
-			unregister_sysctl_table(table);
-		} else {
-			devpts_mnt = mnt;
-		}
+		register_sysctl_table(pty_root_table);
 	}
 	return err;
 }
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 3bf3f20..f3b4408 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -628,11 +628,11 @@
 		map_bh->b_size = fs_count << i_blkbits;
 
 		/*
-		 * For writes inside i_size on a DIO_SKIP_HOLES filesystem we
-		 * forbid block creations: only overwrites are permitted.
-		 * We will return early to the caller once we see an
-		 * unmapped buffer head returned, and the caller will fall
-		 * back to buffered I/O.
+		 * For writes that could fill holes inside i_size on a
+		 * DIO_SKIP_HOLES filesystem we forbid block creations: only
+		 * overwrites are permitted. We will return early to the caller
+		 * once we see an unmapped buffer head returned, and the caller
+		 * will fall back to buffered I/O.
 		 *
 		 * Otherwise the decision is left to the get_blocks method,
 		 * which may decide to handle it or also return an unmapped
@@ -640,8 +640,8 @@
 		 */
 		create = dio->rw & WRITE;
 		if (dio->flags & DIO_SKIP_HOLES) {
-			if (sdio->block_in_file < (i_size_read(dio->inode) >>
-							sdio->blkbits))
+			if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
+							i_blkbits))
 				create = 0;
 		}
 
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index ebd40f4..0d8eb34 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1141,12 +1141,13 @@
 
 static int
 ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry,
+				 struct inode *ecryptfs_inode,
 				 char *page_virt, size_t size)
 {
 	int rc;
 
-	rc = ecryptfs_setxattr(ecryptfs_dentry, ECRYPTFS_XATTR_NAME, page_virt,
-			       size, 0);
+	rc = ecryptfs_setxattr(ecryptfs_dentry, ecryptfs_inode,
+			       ECRYPTFS_XATTR_NAME, page_virt, size, 0);
 	return rc;
 }
 
@@ -1215,8 +1216,8 @@
 		goto out_free;
 	}
 	if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
-		rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt,
-						      size);
+		rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, ecryptfs_inode,
+						      virt, size);
 	else
 		rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt,
 							 virt_len);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 3ec495d..4ba1547 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -609,8 +609,8 @@
 ecryptfs_getxattr_lower(struct dentry *lower_dentry, struct inode *lower_inode,
 			const char *name, void *value, size_t size);
 int
-ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
-		  size_t size, int flags);
+ecryptfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
+		  const void *value, size_t size, int flags);
 int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode);
 #ifdef CONFIG_ECRYPT_FS_MESSAGING
 int ecryptfs_process_response(struct ecryptfs_daemon *daemon,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 318b046..9d153b6 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -1001,7 +1001,8 @@
 }
 
 int
-ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ecryptfs_setxattr(struct dentry *dentry, struct inode *inode,
+		  const char *name, const void *value,
 		  size_t size, int flags)
 {
 	int rc = 0;
@@ -1014,8 +1015,8 @@
 	}
 
 	rc = vfs_setxattr(lower_dentry, name, value, size, flags);
-	if (!rc && d_really_is_positive(dentry))
-		fsstack_copy_attr_all(d_inode(dentry), d_inode(lower_dentry));
+	if (!rc && inode)
+		fsstack_copy_attr_all(inode, d_inode(lower_dentry));
 out:
 	return rc;
 }
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 148d11b..9c3437c 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -442,7 +442,8 @@
 	if (size < 0)
 		size = 8;
 	put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
-	rc = lower_inode->i_op->setxattr(lower_dentry, ECRYPTFS_XATTR_NAME,
+	rc = lower_inode->i_op->setxattr(lower_dentry, lower_inode,
+					 ECRYPTFS_XATTR_NAME,
 					 xattr_virt, size, 0);
 	inode_unlock(lower_inode);
 	if (rc)
diff --git a/fs/exec.c b/fs/exec.c
index e92419f..887c1c9 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -243,10 +243,6 @@
 	put_page(page);
 }
 
-static void free_arg_page(struct linux_binprm *bprm, int i)
-{
-}
-
 static void free_arg_pages(struct linux_binprm *bprm)
 {
 }
@@ -267,7 +263,10 @@
 	if (!vma)
 		return -ENOMEM;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem)) {
+		err = -EINTR;
+		goto err_free;
+	}
 	vma->vm_mm = mm;
 
 	/*
@@ -294,6 +293,7 @@
 	return 0;
 err:
 	up_write(&mm->mmap_sem);
+err_free:
 	bprm->vma = NULL;
 	kmem_cache_free(vm_area_cachep, vma);
 	return err;
@@ -700,7 +700,9 @@
 		bprm->loader -= stack_shift;
 	bprm->exec -= stack_shift;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
+
 	vm_flags = VM_STACK_FLAGS;
 
 	/*
@@ -1499,9 +1501,6 @@
 
 		kunmap_atomic(kaddr);
 		put_arg_page(page);
-
-		if (offset == PAGE_SIZE)
-			free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
 	} while (offset == PAGE_SIZE);
 
 	bprm->p++;
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index c1400b1..868c023 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -51,7 +51,7 @@
 	}
 	down_read(&ei->dax_sem);
 
-	ret = __dax_fault(vma, vmf, ext2_get_block, NULL);
+	ret = __dax_fault(vma, vmf, ext2_get_block);
 
 	up_read(&ei->dax_sem);
 	if (vmf->flags & FAULT_FLAG_WRITE)
@@ -72,7 +72,7 @@
 	}
 	down_read(&ei->dax_sem);
 
-	ret = __dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block, NULL);
+	ret = __dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block);
 
 	up_read(&ei->dax_sem);
 	if (flags & FAULT_FLAG_WRITE)
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index b675610..fcbe586 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -26,6 +26,7 @@
 #include <linux/highuid.h>
 #include <linux/pagemap.h>
 #include <linux/dax.h>
+#include <linux/blkdev.h>
 #include <linux/quotaops.h>
 #include <linux/writeback.h>
 #include <linux/buffer_head.h>
@@ -737,19 +738,18 @@
 		 * so that it's not found by another thread before it's
 		 * initialised
 		 */
-		err = dax_clear_sectors(inode->i_sb->s_bdev,
-				le32_to_cpu(chain[depth-1].key) <<
-				(inode->i_blkbits - 9),
-				1 << inode->i_blkbits);
+		err = sb_issue_zeroout(inode->i_sb,
+				le32_to_cpu(chain[depth-1].key), count,
+				GFP_NOFS);
 		if (err) {
 			mutex_unlock(&ei->truncate_mutex);
 			goto cleanup;
 		}
-	}
+	} else
+		set_buffer_new(bh_result);
 
 	ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
 	mutex_unlock(&ei->truncate_mutex);
-	set_buffer_new(bh_result);
 got_it:
 	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
 	if (count > blocks_to_boundary)
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index b78caf2..1d93795 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -922,16 +922,9 @@
 	blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
 
 	if (sbi->s_mount_opt & EXT2_MOUNT_DAX) {
-		if (blocksize != PAGE_SIZE) {
-			ext2_msg(sb, KERN_ERR,
-					"error: unsupported blocksize for dax");
+		err = bdev_dax_supported(sb, blocksize);
+		if (err)
 			goto failed_mount;
-		}
-		if (!sb->s_bdev->bd_disk->fops->direct_access) {
-			ext2_msg(sb, KERN_ERR,
-					"error: device does not support dax");
-			goto failed_mount;
-		}
 	}
 
 	/* If the blocksize doesn't match, re-read the thing.. */
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index 7fd3b86..7b9e9c1 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -18,10 +18,11 @@
 
 static int
 ext2_xattr_security_set(const struct xattr_handler *handler,
-			struct dentry *dentry, const char *name,
-			const void *value, size_t size, int flags)
+			struct dentry *unused, struct inode *inode,
+			const char *name, const void *value,
+			size_t size, int flags)
 {
-	return ext2_xattr_set(d_inode(dentry), EXT2_XATTR_INDEX_SECURITY, name,
+	return ext2_xattr_set(inode, EXT2_XATTR_INDEX_SECURITY, name,
 			      value, size, flags);
 }
 
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index 0f85705..65049b7 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -25,10 +25,11 @@
 
 static int
 ext2_xattr_trusted_set(const struct xattr_handler *handler,
-		       struct dentry *dentry, const char *name,
-		       const void *value, size_t size, int flags)
+		       struct dentry *unused, struct inode *inode,
+		       const char *name, const void *value,
+		       size_t size, int flags)
 {
-	return ext2_xattr_set(d_inode(dentry), EXT2_XATTR_INDEX_TRUSTED, name,
+	return ext2_xattr_set(inode, EXT2_XATTR_INDEX_TRUSTED, name,
 			      value, size, flags);
 }
 
diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c
index 1fafd27..fb2f992 100644
--- a/fs/ext2/xattr_user.c
+++ b/fs/ext2/xattr_user.c
@@ -29,13 +29,14 @@
 
 static int
 ext2_xattr_user_set(const struct xattr_handler *handler,
-		    struct dentry *dentry, const char *name,
-		    const void *value, size_t size, int flags)
+		    struct dentry *unused, struct inode *inode,
+		    const char *name, const void *value,
+		    size_t size, int flags)
 {
-	if (!test_opt(dentry->d_sb, XATTR_USER))
+	if (!test_opt(inode->i_sb, XATTR_USER))
 		return -EOPNOTSUPP;
 
-	return ext2_xattr_set(d_inode(dentry), EXT2_XATTR_INDEX_USER,
+	return ext2_xattr_set(inode, EXT2_XATTR_INDEX_USER,
 			      name, value, size, flags);
 }
 
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index fe1f50f..3020fd7 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -610,7 +610,8 @@
 
 	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
 
-	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
+	jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
+	return 1;
 }
 
 /*
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 5d00bf0..68323e3 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -150,6 +150,11 @@
 	while (ctx->pos < inode->i_size) {
 		struct ext4_map_blocks map;
 
+		if (fatal_signal_pending(current)) {
+			err = -ERESTARTSYS;
+			goto errout;
+		}
+		cond_resched();
 		map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
 		map.m_len = 1;
 		err = ext4_map_blocks(NULL, inode, &map, 0);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 72f4c9e..b84aa1c 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -33,6 +33,7 @@
 #include <linux/ratelimit.h>
 #include <crypto/hash.h>
 #include <linux/falloc.h>
+#include <linux/percpu-rwsem.h>
 #ifdef __KERNEL__
 #include <linux/compat.h>
 #endif
@@ -581,6 +582,9 @@
 #define EXT4_GET_BLOCKS_ZERO			0x0200
 #define EXT4_GET_BLOCKS_CREATE_ZERO		(EXT4_GET_BLOCKS_CREATE |\
 					EXT4_GET_BLOCKS_ZERO)
+	/* Caller will submit data before dropping transaction handle. This
+	 * allows jbd2 to avoid submitting data before commit. */
+#define EXT4_GET_BLOCKS_IO_SUBMIT		0x0400
 
 /*
  * The bit position of these flags must not overlap with any of the
@@ -1505,6 +1509,9 @@
 	struct ratelimit_state s_err_ratelimit_state;
 	struct ratelimit_state s_warning_ratelimit_state;
 	struct ratelimit_state s_msg_ratelimit_state;
+
+	/* Barrier between changing inodes' journal flags and writepages ops. */
+	struct percpu_rw_semaphore s_journal_flag_rwsem;
 };
 
 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1549,7 +1556,6 @@
 	EXT4_STATE_DIOREAD_LOCK,	/* Disable support for dio read
 					   nolocking */
 	EXT4_STATE_MAY_INLINE_DATA,	/* may have in-inode data */
-	EXT4_STATE_ORDERED_MODE,	/* data=ordered mode */
 	EXT4_STATE_EXT_PRECACHED,	/* extents have been precached */
 };
 
@@ -2521,8 +2527,8 @@
 struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
 			     struct buffer_head *bh_result, int create);
-int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock,
-			    struct buffer_head *bh_result, int create);
+int ext4_dax_get_block(struct inode *inode, sector_t iblock,
+		       struct buffer_head *bh_result, int create);
 int ext4_get_block(struct inode *inode, sector_t iblock,
 		   struct buffer_head *bh_result, int create);
 int ext4_dio_get_block(struct inode *inode, sector_t iblock,
@@ -2581,7 +2587,6 @@
 /* indirect.c */
 extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
 				struct ext4_map_blocks *map, int flags);
-extern ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
 extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
 extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
 extern void ext4_ind_truncate(handle_t *, struct inode *inode);
@@ -3329,6 +3334,13 @@
 	}
 }
 
+static inline bool ext4_aligned_io(struct inode *inode, loff_t off, loff_t len)
+{
+	int blksize = 1 << inode->i_blkbits;
+
+	return IS_ALIGNED(off, blksize) && IS_ALIGNED(len, blksize);
+}
+
 #endif	/* __KERNEL__ */
 
 #define EFSBADCRC	EBADMSG		/* Bad CRC detected */
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 5f58462..09c1ef3 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -359,10 +359,21 @@
 	return 0;
 }
 
-static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode)
+static inline int ext4_jbd2_inode_add_write(handle_t *handle,
+					    struct inode *inode)
 {
 	if (ext4_handle_valid(handle))
-		return jbd2_journal_file_inode(handle, EXT4_I(inode)->jinode);
+		return jbd2_journal_inode_add_write(handle,
+						    EXT4_I(inode)->jinode);
+	return 0;
+}
+
+static inline int ext4_jbd2_inode_add_wait(handle_t *handle,
+					   struct inode *inode)
+{
+	if (ext4_handle_valid(handle))
+		return jbd2_journal_inode_add_wait(handle,
+						   EXT4_I(inode)->jinode);
 	return 0;
 }
 
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 95bf467..2a2eef9 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -120,9 +120,14 @@
 
 	if (!ext4_handle_valid(handle))
 		return 0;
-	if (handle->h_buffer_credits > needed)
+	if (handle->h_buffer_credits >= needed)
 		return 0;
-	err = ext4_journal_extend(handle, needed);
+	/*
+	 * If we need to extend the journal get a few extra blocks
+	 * while we're at it for efficiency's sake.
+	 */
+	needed += 3;
+	err = ext4_journal_extend(handle, needed - handle->h_buffer_credits);
 	if (err <= 0)
 		return err;
 	err = ext4_truncate_restart_trans(handle, inode, needed);
@@ -907,13 +912,6 @@
 
 		eh = ext_block_hdr(bh);
 		ppos++;
-		if (unlikely(ppos > depth)) {
-			put_bh(bh);
-			EXT4_ERROR_INODE(inode,
-					 "ppos %d > depth %d", ppos, depth);
-			ret = -EFSCORRUPTED;
-			goto err;
-		}
 		path[ppos].p_bh = bh;
 		path[ppos].p_hdr = eh;
 	}
@@ -2583,7 +2581,7 @@
 		}
 	} else
 		ext4_error(sbi->s_sb, "strange request: removal(2) "
-			   "%u-%u from %u:%u\n",
+			   "%u-%u from %u:%u",
 			   from, to, le32_to_cpu(ex->ee_block), ee_len);
 	return 0;
 }
@@ -3738,7 +3736,7 @@
 	if (ee_block != map->m_lblk || ee_len > map->m_len) {
 #ifdef EXT4_DEBUG
 		ext4_warning("Inode (%ld) finished: extent logical block %llu,"
-			     " len %u; IO logical block %llu, len %u\n",
+			     " len %u; IO logical block %llu, len %u",
 			     inode->i_ino, (unsigned long long)ee_block, ee_len,
 			     (unsigned long long)map->m_lblk, map->m_len);
 #endif
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index e38b987a..37e0592 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -707,7 +707,7 @@
 	    (status & EXTENT_STATUS_WRITTEN)) {
 		ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
 				" delayed and written which can potentially "
-				" cause data loss.\n", lblk, len);
+				" cause data loss.", lblk, len);
 		WARN_ON(1);
 	}
 
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 00ff691..df44c87 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -202,7 +202,7 @@
 	if (IS_ERR(handle))
 		result = VM_FAULT_SIGBUS;
 	else
-		result = __dax_fault(vma, vmf, ext4_dax_mmap_get_block, NULL);
+		result = __dax_fault(vma, vmf, ext4_dax_get_block);
 
 	if (write) {
 		if (!IS_ERR(handle))
@@ -238,7 +238,7 @@
 		result = VM_FAULT_SIGBUS;
 	else
 		result = __dax_pmd_fault(vma, addr, pmd, flags,
-				ext4_dax_mmap_get_block, NULL);
+					 ext4_dax_get_block);
 
 	if (write) {
 		if (!IS_ERR(handle))
@@ -373,7 +373,7 @@
 	if (ext4_encrypted_inode(d_inode(dir)) &&
 	    !ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) {
 		ext4_warning(inode->i_sb,
-			     "Inconsistent encryption contexts: %lu/%lu\n",
+			     "Inconsistent encryption contexts: %lu/%lu",
 			     (unsigned long) d_inode(dir)->i_ino,
 			     (unsigned long) inode->i_ino);
 		dput(dir);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 237b877..3da4cf8 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1150,25 +1150,20 @@
 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
 	ext4_group_t block_group;
 	int bit;
-	struct buffer_head *bitmap_bh;
+	struct buffer_head *bitmap_bh = NULL;
 	struct inode *inode = NULL;
-	long err = -EIO;
+	int err = -EFSCORRUPTED;
 
-	/* Error cases - e2fsck has already cleaned up for us */
-	if (ino > max_ino) {
-		ext4_warning(sb, "bad orphan ino %lu!  e2fsck was run?", ino);
-		err = -EFSCORRUPTED;
-		goto error;
-	}
+	if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
+		goto bad_orphan;
 
 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
 	if (IS_ERR(bitmap_bh)) {
-		err = PTR_ERR(bitmap_bh);
-		ext4_warning(sb, "inode bitmap error %ld for orphan %lu",
-			     ino, err);
-		goto error;
+		ext4_error(sb, "inode bitmap error %ld for orphan %lu",
+			   ino, PTR_ERR(bitmap_bh));
+		return (struct inode *) bitmap_bh;
 	}
 
 	/* Having the inode bit set should be a 100% indicator that this
@@ -1179,15 +1174,21 @@
 		goto bad_orphan;
 
 	inode = ext4_iget(sb, ino);
-	if (IS_ERR(inode))
-		goto iget_failed;
+	if (IS_ERR(inode)) {
+		err = PTR_ERR(inode);
+		ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
+			   ino, err);
+		return inode;
+	}
 
 	/*
-	 * If the orphans has i_nlinks > 0 then it should be able to be
-	 * truncated, otherwise it won't be removed from the orphan list
-	 * during processing and an infinite loop will result.
+	 * If the orphans has i_nlinks > 0 then it should be able to
+	 * be truncated, otherwise it won't be removed from the orphan
+	 * list during processing and an infinite loop will result.
+	 * Similarly, it must not be a bad inode.
 	 */
-	if (inode->i_nlink && !ext4_can_truncate(inode))
+	if ((inode->i_nlink && !ext4_can_truncate(inode)) ||
+	    is_bad_inode(inode))
 		goto bad_orphan;
 
 	if (NEXT_ORPHAN(inode) > max_ino)
@@ -1195,29 +1196,25 @@
 	brelse(bitmap_bh);
 	return inode;
 
-iget_failed:
-	err = PTR_ERR(inode);
-	inode = NULL;
 bad_orphan:
-	ext4_warning(sb, "bad orphan inode %lu!  e2fsck was run?", ino);
-	printk(KERN_WARNING "ext4_test_bit(bit=%d, block=%llu) = %d\n",
-	       bit, (unsigned long long)bitmap_bh->b_blocknr,
-	       ext4_test_bit(bit, bitmap_bh->b_data));
-	printk(KERN_WARNING "inode=%p\n", inode);
+	ext4_error(sb, "bad orphan inode %lu", ino);
+	if (bitmap_bh)
+		printk(KERN_ERR "ext4_test_bit(bit=%d, block=%llu) = %d\n",
+		       bit, (unsigned long long)bitmap_bh->b_blocknr,
+		       ext4_test_bit(bit, bitmap_bh->b_data));
 	if (inode) {
-		printk(KERN_WARNING "is_bad_inode(inode)=%d\n",
+		printk(KERN_ERR "is_bad_inode(inode)=%d\n",
 		       is_bad_inode(inode));
-		printk(KERN_WARNING "NEXT_ORPHAN(inode)=%u\n",
+		printk(KERN_ERR "NEXT_ORPHAN(inode)=%u\n",
 		       NEXT_ORPHAN(inode));
-		printk(KERN_WARNING "max_ino=%lu\n", max_ino);
-		printk(KERN_WARNING "i_nlink=%u\n", inode->i_nlink);
+		printk(KERN_ERR "max_ino=%lu\n", max_ino);
+		printk(KERN_ERR "i_nlink=%u\n", inode->i_nlink);
 		/* Avoid freeing blocks if we got a bad deleted inode */
 		if (inode->i_nlink == 0)
 			inode->i_blocks = 0;
 		iput(inode);
 	}
 	brelse(bitmap_bh);
-error:
 	return ERR_PTR(err);
 }
 
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 627b7e8..bc15c2c 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -649,133 +649,6 @@
 }
 
 /*
- * O_DIRECT for ext3 (or indirect map) based files
- *
- * If the O_DIRECT write will extend the file then add this inode to the
- * orphan list.  So recovery will truncate it back to the original size
- * if the machine crashes during the write.
- *
- * If the O_DIRECT write is intantiating holes inside i_size and the machine
- * crashes then stale disk data _may_ be exposed inside the file. But current
- * VFS code falls back into buffered path in that case so we are safe.
- */
-ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
-{
-	struct file *file = iocb->ki_filp;
-	struct inode *inode = file->f_mapping->host;
-	struct ext4_inode_info *ei = EXT4_I(inode);
-	loff_t offset = iocb->ki_pos;
-	handle_t *handle;
-	ssize_t ret;
-	int orphan = 0;
-	size_t count = iov_iter_count(iter);
-	int retries = 0;
-
-	if (iov_iter_rw(iter) == WRITE) {
-		loff_t final_size = offset + count;
-
-		if (final_size > inode->i_size) {
-			/* Credits for sb + inode write */
-			handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
-			if (IS_ERR(handle)) {
-				ret = PTR_ERR(handle);
-				goto out;
-			}
-			ret = ext4_orphan_add(handle, inode);
-			if (ret) {
-				ext4_journal_stop(handle);
-				goto out;
-			}
-			orphan = 1;
-			ei->i_disksize = inode->i_size;
-			ext4_journal_stop(handle);
-		}
-	}
-
-retry:
-	if (iov_iter_rw(iter) == READ && ext4_should_dioread_nolock(inode)) {
-		/*
-		 * Nolock dioread optimization may be dynamically disabled
-		 * via ext4_inode_block_unlocked_dio(). Check inode's state
-		 * while holding extra i_dio_count ref.
-		 */
-		inode_dio_begin(inode);
-		smp_mb();
-		if (unlikely(ext4_test_inode_state(inode,
-						    EXT4_STATE_DIOREAD_LOCK))) {
-			inode_dio_end(inode);
-			goto locked;
-		}
-		if (IS_DAX(inode))
-			ret = dax_do_io(iocb, inode, iter,
-					ext4_dio_get_block, NULL, 0);
-		else
-			ret = __blockdev_direct_IO(iocb, inode,
-						   inode->i_sb->s_bdev, iter,
-						   ext4_dio_get_block,
-						   NULL, NULL, 0);
-		inode_dio_end(inode);
-	} else {
-locked:
-		if (IS_DAX(inode))
-			ret = dax_do_io(iocb, inode, iter,
-					ext4_dio_get_block, NULL, DIO_LOCKING);
-		else
-			ret = blockdev_direct_IO(iocb, inode, iter,
-						 ext4_dio_get_block);
-
-		if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
-			loff_t isize = i_size_read(inode);
-			loff_t end = offset + count;
-
-			if (end > isize)
-				ext4_truncate_failed_write(inode);
-		}
-	}
-	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
-		goto retry;
-
-	if (orphan) {
-		int err;
-
-		/* Credits for sb + inode write */
-		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
-		if (IS_ERR(handle)) {
-			/* This is really bad luck. We've written the data
-			 * but cannot extend i_size. Bail out and pretend
-			 * the write failed... */
-			ret = PTR_ERR(handle);
-			if (inode->i_nlink)
-				ext4_orphan_del(NULL, inode);
-
-			goto out;
-		}
-		if (inode->i_nlink)
-			ext4_orphan_del(handle, inode);
-		if (ret > 0) {
-			loff_t end = offset + ret;
-			if (end > inode->i_size) {
-				ei->i_disksize = end;
-				i_size_write(inode, end);
-				/*
-				 * We're going to return a positive `ret'
-				 * here due to non-zero-length I/O, so there's
-				 * no way of reporting error returns from
-				 * ext4_mark_inode_dirty() to userspace.  So
-				 * ignore it.
-				 */
-				ext4_mark_inode_dirty(handle, inode);
-			}
-		}
-		err = ext4_journal_stop(handle);
-		if (ret == 0)
-			ret = err;
-	}
-out:
-	return ret;
-}
-
-/*
  * Calculate the number of metadata blocks need to reserve
  * to allocate a new block at @lblocks for non extent file based file
  */
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 7bc6c85..ff7538c 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1780,7 +1780,7 @@
 			ext4_warning(dir->i_sb,
 				     "bad inline directory (dir #%lu) - "
 				     "inode %u, rec_len %u, name_len %d"
-				     "inline size %d\n",
+				     "inline size %d",
 				     dir->i_ino, le32_to_cpu(de->inode),
 				     le16_to_cpu(de->rec_len), de->name_len,
 				     inline_size);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 79b298d..f7140ca6 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -684,6 +684,24 @@
 		ret = check_block_validity(inode, map);
 		if (ret != 0)
 			return ret;
+
+		/*
+		 * Inodes with freshly allocated blocks where contents will be
+		 * visible after transaction commit must be on transaction's
+		 * ordered data list.
+		 */
+		if (map->m_flags & EXT4_MAP_NEW &&
+		    !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
+		    !(flags & EXT4_GET_BLOCKS_ZERO) &&
+		    !IS_NOQUOTA(inode) &&
+		    ext4_should_order_data(inode)) {
+			if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
+				ret = ext4_jbd2_inode_add_wait(handle, inode);
+			else
+				ret = ext4_jbd2_inode_add_write(handle, inode);
+			if (ret)
+				return ret;
+		}
 	}
 	return retval;
 }
@@ -1289,15 +1307,6 @@
 	int i_size_changed = 0;
 
 	trace_ext4_write_end(inode, pos, len, copied);
-	if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) {
-		ret = ext4_jbd2_file_inode(handle, inode);
-		if (ret) {
-			unlock_page(page);
-			put_page(page);
-			goto errout;
-		}
-	}
-
 	if (ext4_has_inline_data(inode)) {
 		ret = ext4_write_inline_data_end(inode, pos, len,
 						 copied, page);
@@ -2313,7 +2322,8 @@
 	 * the data was copied into the page cache.
 	 */
 	get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
-			   EXT4_GET_BLOCKS_METADATA_NOFAIL;
+			   EXT4_GET_BLOCKS_METADATA_NOFAIL |
+			   EXT4_GET_BLOCKS_IO_SUBMIT;
 	dioread_nolock = ext4_should_dioread_nolock(inode);
 	if (dioread_nolock)
 		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
@@ -2602,11 +2612,14 @@
 	struct blk_plug plug;
 	bool give_up_on_write = false;
 
+	percpu_down_read(&sbi->s_journal_flag_rwsem);
 	trace_ext4_writepages(inode, wbc);
 
-	if (dax_mapping(mapping))
-		return dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev,
-						   wbc);
+	if (dax_mapping(mapping)) {
+		ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev,
+						  wbc);
+		goto out_writepages;
+	}
 
 	/*
 	 * No pages to write? This is mainly a kludge to avoid starting
@@ -2776,6 +2789,7 @@
 out_writepages:
 	trace_ext4_writepages_result(inode, wbc, ret,
 				     nr_to_write - wbc->nr_to_write);
+	percpu_up_read(&sbi->s_journal_flag_rwsem);
 	return ret;
 }
 
@@ -3215,75 +3229,52 @@
 }
 
 #ifdef CONFIG_FS_DAX
-int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock,
-			    struct buffer_head *bh_result, int create)
+/*
+ * Get block function for DAX IO and mmap faults. It takes care of converting
+ * unwritten extents to written ones and initializes new / converted blocks
+ * to zeros.
+ */
+int ext4_dax_get_block(struct inode *inode, sector_t iblock,
+		       struct buffer_head *bh_result, int create)
 {
-	int ret, err;
-	int credits;
-	struct ext4_map_blocks map;
-	handle_t *handle = NULL;
-	int flags = 0;
+	int ret;
 
-	ext4_debug("ext4_dax_mmap_get_block: inode %lu, create flag %d\n",
-		   inode->i_ino, create);
-	map.m_lblk = iblock;
-	map.m_len = bh_result->b_size >> inode->i_blkbits;
-	credits = ext4_chunk_trans_blocks(inode, map.m_len);
-	if (create) {
-		flags |= EXT4_GET_BLOCKS_PRE_IO | EXT4_GET_BLOCKS_CREATE_ZERO;
-		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits);
-		if (IS_ERR(handle)) {
-			ret = PTR_ERR(handle);
+	ext4_debug("inode %lu, create flag %d\n", inode->i_ino, create);
+	if (!create)
+		return _ext4_get_block(inode, iblock, bh_result, 0);
+
+	ret = ext4_get_block_trans(inode, iblock, bh_result,
+				   EXT4_GET_BLOCKS_PRE_IO |
+				   EXT4_GET_BLOCKS_CREATE_ZERO);
+	if (ret < 0)
+		return ret;
+
+	if (buffer_unwritten(bh_result)) {
+		/*
+		 * We are protected by i_mmap_sem or i_mutex so we know block
+		 * cannot go away from under us even though we dropped
+		 * i_data_sem. Convert extent to written and write zeros there.
+		 */
+		ret = ext4_get_block_trans(inode, iblock, bh_result,
+					   EXT4_GET_BLOCKS_CONVERT |
+					   EXT4_GET_BLOCKS_CREATE_ZERO);
+		if (ret < 0)
 			return ret;
-		}
 	}
-
-	ret = ext4_map_blocks(handle, inode, &map, flags);
-	if (create) {
-		err = ext4_journal_stop(handle);
-		if (ret >= 0 && err < 0)
-			ret = err;
-	}
-	if (ret <= 0)
-		goto out;
-	if (map.m_flags & EXT4_MAP_UNWRITTEN) {
-		int err2;
-
-		/*
-		 * We are protected by i_mmap_sem so we know block cannot go
-		 * away from under us even though we dropped i_data_sem.
-		 * Convert extent to written and write zeros there.
-		 *
-		 * Note: We may get here even when create == 0.
-		 */
-		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits);
-		if (IS_ERR(handle)) {
-			ret = PTR_ERR(handle);
-			goto out;
-		}
-
-		err = ext4_map_blocks(handle, inode, &map,
-		      EXT4_GET_BLOCKS_CONVERT | EXT4_GET_BLOCKS_CREATE_ZERO);
-		if (err < 0)
-			ret = err;
-		err2 = ext4_journal_stop(handle);
-		if (err2 < 0 && ret > 0)
-			ret = err2;
-	}
-out:
-	WARN_ON_ONCE(ret == 0 && create);
-	if (ret > 0) {
-		map_bh(bh_result, inode->i_sb, map.m_pblk);
-		/*
-		 * At least for now we have to clear BH_New so that DAX code
-		 * doesn't attempt to zero blocks again in a racy way.
-		 */
-		map.m_flags &= ~EXT4_MAP_NEW;
-		ext4_update_bh_state(bh_result, map.m_flags);
-		bh_result->b_size = map.m_len << inode->i_blkbits;
-		ret = 0;
-	}
-	return ret;
+	/*
+	 * At least for now we have to clear BH_New so that DAX code
+	 * doesn't attempt to zero blocks again in a racy way.
+	 */
+	clear_buffer_new(bh_result);
+	return 0;
+}
+#else
+/* Just define empty function, it will never get called. */
+int ext4_dax_get_block(struct inode *inode, sector_t iblock,
+		       struct buffer_head *bh_result, int create)
+{
+	BUG();
+	return 0;
 }
 #endif
 
@@ -3316,7 +3307,9 @@
 }
 
 /*
- * For ext4 extent files, ext4 will do direct-io write to holes,
+ * Handling of direct IO writes.
+ *
+ * For ext4 extent files, ext4 will do direct-io write even to holes,
  * preallocated extents, and those write extend the file, no need to
  * fall back to buffered IO.
  *
@@ -3334,10 +3327,11 @@
  * if the machine crashes during the write.
  *
  */
-static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
+	struct ext4_inode_info *ei = EXT4_I(inode);
 	ssize_t ret;
 	loff_t offset = iocb->ki_pos;
 	size_t count = iov_iter_count(iter);
@@ -3345,10 +3339,25 @@
 	get_block_t *get_block_func = NULL;
 	int dio_flags = 0;
 	loff_t final_size = offset + count;
+	int orphan = 0;
+	handle_t *handle;
 
-	/* Use the old path for reads and writes beyond i_size. */
-	if (iov_iter_rw(iter) != WRITE || final_size > inode->i_size)
-		return ext4_ind_direct_IO(iocb, iter);
+	if (final_size > inode->i_size) {
+		/* Credits for sb + inode write */
+		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+		if (IS_ERR(handle)) {
+			ret = PTR_ERR(handle);
+			goto out;
+		}
+		ret = ext4_orphan_add(handle, inode);
+		if (ret) {
+			ext4_journal_stop(handle);
+			goto out;
+		}
+		orphan = 1;
+		ei->i_disksize = inode->i_size;
+		ext4_journal_stop(handle);
+	}
 
 	BUG_ON(iocb->private == NULL);
 
@@ -3357,8 +3366,7 @@
 	 * conversion. This also disallows race between truncate() and
 	 * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
 	 */
-	if (iov_iter_rw(iter) == WRITE)
-		inode_dio_begin(inode);
+	inode_dio_begin(inode);
 
 	/* If we do a overwrite dio, i_mutex locking can be released */
 	overwrite = *((int *)iocb->private);
@@ -3367,7 +3375,7 @@
 		inode_unlock(inode);
 
 	/*
-	 * We could direct write to holes and fallocate.
+	 * For extent mapped files we could direct write to holes and fallocate.
 	 *
 	 * Allocated blocks to fill the hole are marked as unwritten to prevent
 	 * parallel buffered read to expose the stale data before DIO complete
@@ -3389,7 +3397,23 @@
 	iocb->private = NULL;
 	if (overwrite)
 		get_block_func = ext4_dio_get_block_overwrite;
-	else if (is_sync_kiocb(iocb)) {
+	else if (IS_DAX(inode)) {
+		/*
+		 * We can avoid zeroing for aligned DAX writes beyond EOF. Other
+		 * writes need zeroing either because they can race with page
+		 * faults or because they use partial blocks.
+		 */
+		if (round_down(offset, 1<<inode->i_blkbits) >= inode->i_size &&
+		    ext4_aligned_io(inode, offset, count))
+			get_block_func = ext4_dio_get_block;
+		else
+			get_block_func = ext4_dax_get_block;
+		dio_flags = DIO_LOCKING;
+	} else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
+		   round_down(offset, 1 << inode->i_blkbits) >= inode->i_size) {
+		get_block_func = ext4_dio_get_block;
+		dio_flags = DIO_LOCKING | DIO_SKIP_HOLES;
+	} else if (is_sync_kiocb(iocb)) {
 		get_block_func = ext4_dio_get_block_unwritten_sync;
 		dio_flags = DIO_LOCKING;
 	} else {
@@ -3399,10 +3423,10 @@
 #ifdef CONFIG_EXT4_FS_ENCRYPTION
 	BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
 #endif
-	if (IS_DAX(inode))
+	if (IS_DAX(inode)) {
 		ret = dax_do_io(iocb, inode, iter, get_block_func,
 				ext4_end_io_dio, dio_flags);
-	else
+	} else
 		ret = __blockdev_direct_IO(iocb, inode,
 					   inode->i_sb->s_bdev, iter,
 					   get_block_func,
@@ -3422,12 +3446,86 @@
 		ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
 	}
 
-	if (iov_iter_rw(iter) == WRITE)
-		inode_dio_end(inode);
+	inode_dio_end(inode);
 	/* take i_mutex locking again if we do a ovewrite dio */
 	if (overwrite)
 		inode_lock(inode);
 
+	if (ret < 0 && final_size > inode->i_size)
+		ext4_truncate_failed_write(inode);
+
+	/* Handle extending of i_size after direct IO write */
+	if (orphan) {
+		int err;
+
+		/* Credits for sb + inode write */
+		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+		if (IS_ERR(handle)) {
+			/* This is really bad luck. We've written the data
+			 * but cannot extend i_size. Bail out and pretend
+			 * the write failed... */
+			ret = PTR_ERR(handle);
+			if (inode->i_nlink)
+				ext4_orphan_del(NULL, inode);
+
+			goto out;
+		}
+		if (inode->i_nlink)
+			ext4_orphan_del(handle, inode);
+		if (ret > 0) {
+			loff_t end = offset + ret;
+			if (end > inode->i_size) {
+				ei->i_disksize = end;
+				i_size_write(inode, end);
+				/*
+				 * We're going to return a positive `ret'
+				 * here due to non-zero-length I/O, so there's
+				 * no way of reporting error returns from
+				 * ext4_mark_inode_dirty() to userspace.  So
+				 * ignore it.
+				 */
+				ext4_mark_inode_dirty(handle, inode);
+			}
+		}
+		err = ext4_journal_stop(handle);
+		if (ret == 0)
+			ret = err;
+	}
+out:
+	return ret;
+}
+
+static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
+{
+	int unlocked = 0;
+	struct inode *inode = iocb->ki_filp->f_mapping->host;
+	ssize_t ret;
+
+	if (ext4_should_dioread_nolock(inode)) {
+		/*
+		 * Nolock dioread optimization may be dynamically disabled
+		 * via ext4_inode_block_unlocked_dio(). Check inode's state
+		 * while holding extra i_dio_count ref.
+		 */
+		inode_dio_begin(inode);
+		smp_mb();
+		if (unlikely(ext4_test_inode_state(inode,
+						    EXT4_STATE_DIOREAD_LOCK)))
+			inode_dio_end(inode);
+		else
+			unlocked = 1;
+	}
+	if (IS_DAX(inode)) {
+		ret = dax_do_io(iocb, inode, iter, ext4_dio_get_block,
+				NULL, unlocked ? 0 : DIO_LOCKING);
+	} else {
+		ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
+					   iter, ext4_dio_get_block,
+					   NULL, NULL,
+					   unlocked ? 0 : DIO_LOCKING);
+	}
+	if (unlocked)
+		inode_dio_end(inode);
 	return ret;
 }
 
@@ -3455,10 +3553,10 @@
 		return 0;
 
 	trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
-	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
-		ret = ext4_ext_direct_IO(iocb, iter);
+	if (iov_iter_rw(iter) == READ)
+		ret = ext4_direct_IO_read(iocb, iter);
 	else
-		ret = ext4_ind_direct_IO(iocb, iter);
+		ret = ext4_direct_IO_write(iocb, iter);
 	trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
 	return ret;
 }
@@ -3534,10 +3632,7 @@
 {
 	switch (ext4_inode_journal_mode(inode)) {
 	case EXT4_INODE_ORDERED_DATA_MODE:
-		ext4_set_inode_state(inode, EXT4_STATE_ORDERED_MODE);
-		break;
 	case EXT4_INODE_WRITEBACK_DATA_MODE:
-		ext4_clear_inode_state(inode, EXT4_STATE_ORDERED_MODE);
 		break;
 	case EXT4_INODE_JOURNAL_DATA_MODE:
 		inode->i_mapping->a_ops = &ext4_journalled_aops;
@@ -3630,8 +3725,8 @@
 	} else {
 		err = 0;
 		mark_buffer_dirty(bh);
-		if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE))
-			err = ext4_jbd2_file_inode(handle, inode);
+		if (ext4_should_order_data(inode))
+			err = ext4_jbd2_inode_add_write(handle, inode);
 	}
 
 unlock:
@@ -5429,6 +5524,7 @@
 	journal_t *journal;
 	handle_t *handle;
 	int err;
+	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 
 	/*
 	 * We have to be very careful here: changing a data block's
@@ -5445,22 +5541,30 @@
 		return 0;
 	if (is_journal_aborted(journal))
 		return -EROFS;
-	/* We have to allocate physical blocks for delalloc blocks
-	 * before flushing journal. otherwise delalloc blocks can not
-	 * be allocated any more. even more truncate on delalloc blocks
-	 * could trigger BUG by flushing delalloc blocks in journal.
-	 * There is no delalloc block in non-journal data mode.
-	 */
-	if (val && test_opt(inode->i_sb, DELALLOC)) {
-		err = ext4_alloc_da_blocks(inode);
-		if (err < 0)
-			return err;
-	}
 
 	/* Wait for all existing dio workers */
 	ext4_inode_block_unlocked_dio(inode);
 	inode_dio_wait(inode);
 
+	/*
+	 * Before flushing the journal and switching inode's aops, we have
+	 * to flush all dirty data the inode has. There can be outstanding
+	 * delayed allocations, there can be unwritten extents created by
+	 * fallocate or buffered writes in dioread_nolock mode covered by
+	 * dirty data which can be converted only after flushing the dirty
+	 * data (and journalled aops don't know how to handle these cases).
+	 */
+	if (val) {
+		down_write(&EXT4_I(inode)->i_mmap_sem);
+		err = filemap_write_and_wait(inode->i_mapping);
+		if (err < 0) {
+			up_write(&EXT4_I(inode)->i_mmap_sem);
+			ext4_inode_resume_unlocked_dio(inode);
+			return err;
+		}
+	}
+
+	percpu_down_write(&sbi->s_journal_flag_rwsem);
 	jbd2_journal_lock_updates(journal);
 
 	/*
@@ -5477,6 +5581,7 @@
 		err = jbd2_journal_flush(journal);
 		if (err < 0) {
 			jbd2_journal_unlock_updates(journal);
+			percpu_up_write(&sbi->s_journal_flag_rwsem);
 			ext4_inode_resume_unlocked_dio(inode);
 			return err;
 		}
@@ -5485,6 +5590,10 @@
 	ext4_set_aops(inode);
 
 	jbd2_journal_unlock_updates(journal);
+	percpu_up_write(&sbi->s_journal_flag_rwsem);
+
+	if (val)
+		up_write(&EXT4_I(inode)->i_mmap_sem);
 	ext4_inode_resume_unlocked_dio(inode);
 
 	/* Finally we can mark the inode as dirty. */
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 7497f50..28cc412 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -365,7 +365,7 @@
 		struct dquot *transfer_to[MAXQUOTAS] = { };
 
 		transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
-		if (transfer_to[PRJQUOTA]) {
+		if (!IS_ERR(transfer_to[PRJQUOTA])) {
 			err = __dquot_transfer(inode, transfer_to);
 			dqput(transfer_to[PRJQUOTA]);
 			if (err)
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index eeeade7..c1ab3ec 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1266,6 +1266,7 @@
 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
 {
 	int order = 1;
+	int bb_incr = 1 << (e4b->bd_blkbits - 1);
 	void *bb;
 
 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
@@ -1278,7 +1279,8 @@
 			/* this block is part of buddy of order 'order' */
 			return order;
 		}
-		bb += 1 << (e4b->bd_blkbits - order);
+		bb += bb_incr;
+		bb_incr >>= 1;
 		order++;
 	}
 	return 0;
@@ -2583,7 +2585,7 @@
 {
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	unsigned i, j;
-	unsigned offset;
+	unsigned offset, offset_incr;
 	unsigned max;
 	int ret;
 
@@ -2612,11 +2614,13 @@
 
 	i = 1;
 	offset = 0;
+	offset_incr = 1 << (sb->s_blocksize_bits - 1);
 	max = sb->s_blocksize << 2;
 	do {
 		sbi->s_mb_offsets[i] = offset;
 		sbi->s_mb_maxs[i] = max;
-		offset += 1 << (sb->s_blocksize_bits - i);
+		offset += offset_incr;
+		offset_incr = offset_incr >> 1;
 		max = max >> 1;
 		i++;
 	} while (i <= sb->s_blocksize_bits + 1);
@@ -4935,7 +4939,7 @@
 	 * boundary.
 	 */
 	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
-		ext4_warning(sb, "too much blocks added to group %u\n",
+		ext4_warning(sb, "too much blocks added to group %u",
 			     block_group);
 		err = -EINVAL;
 		goto error_return;
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 2444527..23d436d 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -121,7 +121,7 @@
 	__ext4_warning(sb, function, line, "%s", msg);
 	__ext4_warning(sb, function, line,
 		       "MMP failure info: last update time: %llu, last update "
-		       "node: %s, last update device: %s\n",
+		       "node: %s, last update device: %s",
 		       (long long unsigned int) le64_to_cpu(mmp->mmp_time),
 		       mmp->mmp_nodename, mmp->mmp_bdevname);
 }
@@ -353,7 +353,7 @@
 	 * wait for MMP interval and check mmp_seq.
 	 */
 	if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
-		ext4_warning(sb, "MMP startup interrupted, failing mount\n");
+		ext4_warning(sb, "MMP startup interrupted, failing mount");
 		goto failed;
 	}
 
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 325cef4..a920c5d 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -400,7 +400,7 @@
 
 	/* Even in case of data=writeback it is reasonable to pin
 	 * inode to transaction, to prevent unexpected data loss */
-	*err = ext4_jbd2_file_inode(handle, orig_inode);
+	*err = ext4_jbd2_inode_add_write(handle, orig_inode);
 
 unlock_pages:
 	unlock_page(pagep[0]);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 5611ec9..ec4c399 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1107,6 +1107,11 @@
 	}
 
 	while (1) {
+		if (fatal_signal_pending(current)) {
+			err = -ERESTARTSYS;
+			goto errout;
+		}
+		cond_resched();
 		block = dx_get_block(frame->at);
 		ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
 					     start_hash, start_minor_hash);
@@ -1613,7 +1618,7 @@
 			if (nokey)
 				return ERR_PTR(-ENOKEY);
 			ext4_warning(inode->i_sb,
-				     "Inconsistent encryption contexts: %lu/%lu\n",
+				     "Inconsistent encryption contexts: %lu/%lu",
 				     (unsigned long) dir->i_ino,
 				     (unsigned long) inode->i_ino);
 			return ERR_PTR(-EPERM);
@@ -2828,7 +2833,7 @@
 			 * list entries can cause panics at unmount time.
 			 */
 			mutex_lock(&sbi->s_orphan_lock);
-			list_del(&EXT4_I(inode)->i_orphan);
+			list_del_init(&EXT4_I(inode)->i_orphan);
 			mutex_unlock(&sbi->s_orphan_lock);
 		}
 	}
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index e4fc8ea..2a01df9 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -342,9 +342,7 @@
 	if (bio) {
 		int io_op = io->io_wbc->sync_mode == WB_SYNC_ALL ?
 			    WRITE_SYNC : WRITE;
-		bio_get(io->io_bio);
 		submit_bio(io_op, io->io_bio);
-		bio_put(io->io_bio);
 	}
 	io->io_bio = NULL;
 }
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 34038e3..cf68100 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -41,7 +41,7 @@
 	 */
 	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
 		ext4_warning(sb, "There are errors in the filesystem, "
-			     "so online resizing is not allowed\n");
+			     "so online resizing is not allowed");
 		return -EPERM;
 	}
 
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 304c712..3822a5a 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -859,6 +859,7 @@
 	percpu_counter_destroy(&sbi->s_freeinodes_counter);
 	percpu_counter_destroy(&sbi->s_dirs_counter);
 	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+	percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
 	brelse(sbi->s_sbh);
 #ifdef CONFIG_QUOTA
 	for (i = 0; i < EXT4_MAXQUOTAS; i++)
@@ -3416,16 +3417,9 @@
 	}
 
 	if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
-		if (blocksize != PAGE_SIZE) {
-			ext4_msg(sb, KERN_ERR,
-					"error: unsupported blocksize for dax");
+		err = bdev_dax_supported(sb, blocksize);
+		if (err)
 			goto failed_mount;
-		}
-		if (!sb->s_bdev->bd_disk->fops->direct_access) {
-			ext4_msg(sb, KERN_ERR,
-					"error: device does not support dax");
-			goto failed_mount;
-		}
 	}
 
 	if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
@@ -3930,6 +3924,9 @@
 	if (!err)
 		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
 					  GFP_KERNEL);
+	if (!err)
+		err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
+
 	if (err) {
 		ext4_msg(sb, KERN_ERR, "insufficient memory");
 		goto failed_mount6;
diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c
index 123a7d0..a892111 100644
--- a/fs/ext4/xattr_security.c
+++ b/fs/ext4/xattr_security.c
@@ -22,10 +22,11 @@
 
 static int
 ext4_xattr_security_set(const struct xattr_handler *handler,
-			struct dentry *dentry, const char *name,
-			const void *value, size_t size, int flags)
+			struct dentry *unused, struct inode *inode,
+			const char *name, const void *value,
+			size_t size, int flags)
 {
-	return ext4_xattr_set(d_inode(dentry), EXT4_XATTR_INDEX_SECURITY,
+	return ext4_xattr_set(inode, EXT4_XATTR_INDEX_SECURITY,
 			      name, value, size, flags);
 }
 
diff --git a/fs/ext4/xattr_trusted.c b/fs/ext4/xattr_trusted.c
index 60652fa..c7765c7 100644
--- a/fs/ext4/xattr_trusted.c
+++ b/fs/ext4/xattr_trusted.c
@@ -29,10 +29,11 @@
 
 static int
 ext4_xattr_trusted_set(const struct xattr_handler *handler,
-		       struct dentry *dentry, const char *name,
-		       const void *value, size_t size, int flags)
+		       struct dentry *unused, struct inode *inode,
+		       const char *name, const void *value,
+		       size_t size, int flags)
 {
-	return ext4_xattr_set(d_inode(dentry), EXT4_XATTR_INDEX_TRUSTED,
+	return ext4_xattr_set(inode, EXT4_XATTR_INDEX_TRUSTED,
 			      name, value, size, flags);
 }
 
diff --git a/fs/ext4/xattr_user.c b/fs/ext4/xattr_user.c
index 17a446f..ca20e42 100644
--- a/fs/ext4/xattr_user.c
+++ b/fs/ext4/xattr_user.c
@@ -30,12 +30,13 @@
 
 static int
 ext4_xattr_user_set(const struct xattr_handler *handler,
-		    struct dentry *dentry, const char *name,
-		    const void *value, size_t size, int flags)
+		    struct dentry *unused, struct inode *inode,
+		    const char *name, const void *value,
+		    size_t size, int flags)
 {
-	if (!test_opt(dentry->d_sb, XATTR_USER))
+	if (!test_opt(inode->i_sb, XATTR_USER))
 		return -EOPNOTSUPP;
-	return ext4_xattr_set(d_inode(dentry), EXT4_XATTR_INDEX_USER,
+	return ext4_xattr_set(inode, EXT4_XATTR_INDEX_USER,
 			      name, value, size, flags);
 }
 
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 00ea567..e3decae 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -50,10 +50,11 @@
 }
 
 static int f2fs_xattr_generic_set(const struct xattr_handler *handler,
-		struct dentry *dentry, const char *name, const void *value,
+		struct dentry *unused, struct inode *inode,
+		const char *name, const void *value,
 		size_t size, int flags)
 {
-	struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
+	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 
 	switch (handler->flags) {
 	case F2FS_XATTR_INDEX_USER:
@@ -69,7 +70,7 @@
 	default:
 		return -EINVAL;
 	}
-	return f2fs_setxattr(d_inode(dentry), handler->flags, name,
+	return f2fs_setxattr(inode, handler->flags, name,
 					value, size, NULL, flags);
 }
 
@@ -95,11 +96,10 @@
 }
 
 static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
-		struct dentry *dentry, const char *name, const void *value,
+		struct dentry *unused, struct inode *inode,
+		const char *name, const void *value,
 		size_t size, int flags)
 {
-	struct inode *inode = d_inode(dentry);
-
 	if (!inode_owner_or_capable(inode))
 		return -EPERM;
 	if (value == NULL)
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 3078b67..c8c4f79 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -887,6 +887,8 @@
 			put_page(results[i]);
 	}
 
+	wake_up_bit(&cookie->flags, 0);
+
 	_leave("");
 }
 
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index b941905..ccd4971 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1719,10 +1719,10 @@
 	return fuse_update_attributes(inode, stat, NULL, NULL);
 }
 
-static int fuse_setxattr(struct dentry *entry, const char *name,
-			 const void *value, size_t size, int flags)
+static int fuse_setxattr(struct dentry *unused, struct inode *inode,
+			 const char *name, const void *value,
+			 size_t size, int flags)
 {
-	struct inode *inode = d_inode(entry);
 	struct fuse_conn *fc = get_fuse_conn(inode);
 	FUSE_ARGS(args);
 	struct fuse_setxattr_in inarg;
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 4a01f30..271d939 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -783,12 +783,15 @@
 		       u64 *leaf_out)
 {
 	__be64 *hash;
+	int error;
 
 	hash = gfs2_dir_get_hash_table(dip);
-	if (IS_ERR(hash))
-		return PTR_ERR(hash);
-	*leaf_out = be64_to_cpu(*(hash + index));
-	return 0;
+	error = PTR_ERR_OR_ZERO(hash);
+
+	if (!error)
+		*leaf_out = be64_to_cpu(*(hash + index));
+
+	return error;
 }
 
 static int get_first_leaf(struct gfs2_inode *dip, u32 index,
@@ -798,7 +801,7 @@
 	int error;
 
 	error = get_leaf_nr(dip, index, &leaf_no);
-	if (!IS_ERR_VALUE(error))
+	if (!error)
 		error = get_leaf(dip, leaf_no, bh_out);
 
 	return error;
@@ -1014,7 +1017,7 @@
 
 	index = name->hash >> (32 - dip->i_depth);
 	error = get_leaf_nr(dip, index, &leaf_no);
-	if (IS_ERR_VALUE(error))
+	if (error)
 		return error;
 
 	/*  Get the old leaf block  */
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index f42ab53b..3a28535 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -1251,10 +1251,10 @@
 }
 
 static int gfs2_xattr_set(const struct xattr_handler *handler,
-			  struct dentry *dentry, const char *name,
-			  const void *value, size_t size, int flags)
+			  struct dentry *unused, struct inode *inode,
+			  const char *name, const void *value,
+			  size_t size, int flags)
 {
-	struct inode *inode = d_inode(dentry);
 	struct gfs2_inode *ip = GFS2_I(inode);
 	struct gfs2_holder gh;
 	int ret;
diff --git a/fs/hfs/attr.c b/fs/hfs/attr.c
index 064f92f..d9a8691 100644
--- a/fs/hfs/attr.c
+++ b/fs/hfs/attr.c
@@ -13,10 +13,10 @@
 #include "hfs_fs.h"
 #include "btree.h"
 
-int hfs_setxattr(struct dentry *dentry, const char *name,
-		 const void *value, size_t size, int flags)
+int hfs_setxattr(struct dentry *unused, struct inode *inode,
+		 const char *name, const void *value,
+		 size_t size, int flags)
 {
-	struct inode *inode = d_inode(dentry);
 	struct hfs_find_data fd;
 	hfs_cat_rec rec;
 	struct hfs_cat_file *file;
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index fa3eed8..ee2f385 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -212,7 +212,7 @@
 extern void hfs_delete_inode(struct inode *);
 
 /* attr.c */
-extern int hfs_setxattr(struct dentry *dentry, const char *name,
+extern int hfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
 			const void *value, size_t size, int flags);
 extern ssize_t hfs_getxattr(struct dentry *dentry, struct inode *inode,
 			    const char *name, void *value, size_t size);
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index 4f118d2..d37bb88 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -424,7 +424,7 @@
 	return len;
 }
 
-int hfsplus_setxattr(struct dentry *dentry, const char *name,
+int hfsplus_setxattr(struct inode *inode, const char *name,
 		     const void *value, size_t size, int flags,
 		     const char *prefix, size_t prefixlen)
 {
@@ -437,8 +437,7 @@
 		return -ENOMEM;
 	strcpy(xattr_name, prefix);
 	strcpy(xattr_name + prefixlen, name);
-	res = __hfsplus_setxattr(d_inode(dentry), xattr_name, value, size,
-				 flags);
+	res = __hfsplus_setxattr(inode, xattr_name, value, size, flags);
 	kfree(xattr_name);
 	return res;
 }
@@ -864,8 +863,9 @@
 }
 
 static int hfsplus_osx_setxattr(const struct xattr_handler *handler,
-				struct dentry *dentry, const char *name,
-				const void *buffer, size_t size, int flags)
+				struct dentry *unused, struct inode *inode,
+				const char *name, const void *buffer,
+				size_t size, int flags)
 {
 	/*
 	 * Don't allow setting properly prefixed attributes
@@ -880,7 +880,7 @@
 	 * creates), so we pass the name through unmodified (after
 	 * ensuring it doesn't conflict with another namespace).
 	 */
-	return __hfsplus_setxattr(d_inode(dentry), name, buffer, size, flags);
+	return __hfsplus_setxattr(inode, name, buffer, size, flags);
 }
 
 const struct xattr_handler hfsplus_xattr_osx_handler = {
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h
index d04ba6f..68f6b53 100644
--- a/fs/hfsplus/xattr.h
+++ b/fs/hfsplus/xattr.h
@@ -21,7 +21,7 @@
 int __hfsplus_setxattr(struct inode *inode, const char *name,
 			const void *value, size_t size, int flags);
 
-int hfsplus_setxattr(struct dentry *dentry, const char *name,
+int hfsplus_setxattr(struct inode *inode, const char *name,
 				   const void *value, size_t size, int flags,
 				   const char *prefix, size_t prefixlen);
 
diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c
index ae2ca8c..37b3efa 100644
--- a/fs/hfsplus/xattr_security.c
+++ b/fs/hfsplus/xattr_security.c
@@ -23,10 +23,11 @@
 }
 
 static int hfsplus_security_setxattr(const struct xattr_handler *handler,
-				     struct dentry *dentry, const char *name,
-				     const void *buffer, size_t size, int flags)
+				     struct dentry *unused, struct inode *inode,
+				     const char *name, const void *buffer,
+				     size_t size, int flags)
 {
-	return hfsplus_setxattr(dentry, name, buffer, size, flags,
+	return hfsplus_setxattr(inode, name, buffer, size, flags,
 				XATTR_SECURITY_PREFIX,
 				XATTR_SECURITY_PREFIX_LEN);
 }
diff --git a/fs/hfsplus/xattr_trusted.c b/fs/hfsplus/xattr_trusted.c
index eae2947..94519d6 100644
--- a/fs/hfsplus/xattr_trusted.c
+++ b/fs/hfsplus/xattr_trusted.c
@@ -21,10 +21,11 @@
 }
 
 static int hfsplus_trusted_setxattr(const struct xattr_handler *handler,
-				    struct dentry *dentry, const char *name,
-				    const void *buffer, size_t size, int flags)
+				    struct dentry *unused, struct inode *inode,
+				    const char *name, const void *buffer,
+				    size_t size, int flags)
 {
-	return hfsplus_setxattr(dentry, name, buffer, size, flags,
+	return hfsplus_setxattr(inode, name, buffer, size, flags,
 				XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
 }
 
diff --git a/fs/hfsplus/xattr_user.c b/fs/hfsplus/xattr_user.c
index 3c9eec3..fae6c0e 100644
--- a/fs/hfsplus/xattr_user.c
+++ b/fs/hfsplus/xattr_user.c
@@ -21,10 +21,11 @@
 }
 
 static int hfsplus_user_setxattr(const struct xattr_handler *handler,
-				 struct dentry *dentry, const char *name,
-				 const void *buffer, size_t size, int flags)
+				 struct dentry *unused, struct inode *inode,
+				 const char *name, const void *buffer,
+				 size_t size, int flags)
 {
-	return hfsplus_setxattr(dentry, name, buffer, size, flags,
+	return hfsplus_setxattr(inode, name, buffer, size, flags,
 				XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
 }
 
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 458cf46..82067ca 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/bitmap.h>
 #include <linux/slab.h>
+#include <linux/seq_file.h>
 
 /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
 
@@ -453,10 +454,6 @@
 	int lowercase, eas, chk, errs, chkdsk, timeshift;
 	int o;
 	struct hpfs_sb_info *sbi = hpfs_sb(s);
-	char *new_opts = kstrdup(data, GFP_KERNEL);
-
-	if (!new_opts)
-		return -ENOMEM;
 
 	sync_filesystem(s);
 
@@ -493,17 +490,44 @@
 
 	if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
 
-	replace_mount_options(s, new_opts);
-
 	hpfs_unlock(s);
 	return 0;
 
 out_err:
 	hpfs_unlock(s);
-	kfree(new_opts);
 	return -EINVAL;
 }
 
+static int hpfs_show_options(struct seq_file *seq, struct dentry *root)
+{
+	struct hpfs_sb_info *sbi = hpfs_sb(root->d_sb);
+
+	seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, sbi->sb_uid));
+	seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbi->sb_gid));
+	seq_printf(seq, ",umask=%03o", (~sbi->sb_mode & 0777));
+	if (sbi->sb_lowercase)
+		seq_printf(seq, ",case=lower");
+	if (!sbi->sb_chk)
+		seq_printf(seq, ",check=none");
+	if (sbi->sb_chk == 2)
+		seq_printf(seq, ",check=strict");
+	if (!sbi->sb_err)
+		seq_printf(seq, ",errors=continue");
+	if (sbi->sb_err == 2)
+		seq_printf(seq, ",errors=panic");
+	if (!sbi->sb_chkdsk)
+		seq_printf(seq, ",chkdsk=no");
+	if (sbi->sb_chkdsk == 2)
+		seq_printf(seq, ",chkdsk=always");
+	if (!sbi->sb_eas)
+		seq_printf(seq, ",eas=no");
+	if (sbi->sb_eas == 1)
+		seq_printf(seq, ",eas=ro");
+	if (sbi->sb_timeshift)
+		seq_printf(seq, ",timeshift=%d", sbi->sb_timeshift);
+	return 0;
+}
+
 /* Super operations */
 
 static const struct super_operations hpfs_sops =
@@ -514,7 +538,7 @@
 	.put_super	= hpfs_put_super,
 	.statfs		= hpfs_statfs,
 	.remount_fs	= hpfs_remount_fs,
-	.show_options	= generic_show_options,
+	.show_options	= hpfs_show_options,
 };
 
 static int hpfs_fill_super(struct super_block *s, void *options, int silent)
@@ -537,8 +561,6 @@
 
 	int o;
 
-	save_mount_options(s, options);
-
 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
 	if (!sbi) {
 		return -ENOMEM;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 2ad98d6..7007809 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -219,6 +219,8 @@
 
 	spin_lock(&journal->j_list_lock);
 	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
+		if (!(jinode->i_flags & JI_WRITE_DATA))
+			continue;
 		mapping = jinode->i_vfs_inode->i_mapping;
 		jinode->i_flags |= JI_COMMIT_RUNNING;
 		spin_unlock(&journal->j_list_lock);
@@ -256,6 +258,8 @@
 	/* For locking, see the comment in journal_submit_data_buffers() */
 	spin_lock(&journal->j_list_lock);
 	list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
+		if (!(jinode->i_flags & JI_WAIT_DATA))
+			continue;
 		jinode->i_flags |= JI_COMMIT_RUNNING;
 		spin_unlock(&journal->j_list_lock);
 		err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 435f0b2..b31852f 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -94,7 +94,8 @@
 EXPORT_SYMBOL(jbd2_journal_invalidatepage);
 EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
 EXPORT_SYMBOL(jbd2_journal_force_commit);
-EXPORT_SYMBOL(jbd2_journal_file_inode);
+EXPORT_SYMBOL(jbd2_journal_inode_add_write);
+EXPORT_SYMBOL(jbd2_journal_inode_add_wait);
 EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
 EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
 EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 2c56c3e..1749519 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -2462,7 +2462,8 @@
 /*
  * File inode in the inode list of the handle's transaction
  */
-int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
+static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode,
+				   unsigned long flags)
 {
 	transaction_t *transaction = handle->h_transaction;
 	journal_t *journal;
@@ -2487,12 +2488,14 @@
 	 * and if jinode->i_next_transaction == transaction, commit code
 	 * will only file the inode where we want it.
 	 */
-	if (jinode->i_transaction == transaction ||
-	    jinode->i_next_transaction == transaction)
+	if ((jinode->i_transaction == transaction ||
+	    jinode->i_next_transaction == transaction) &&
+	    (jinode->i_flags & flags) == flags)
 		return 0;
 
 	spin_lock(&journal->j_list_lock);
-
+	jinode->i_flags |= flags;
+	/* Is inode already attached where we need it? */
 	if (jinode->i_transaction == transaction ||
 	    jinode->i_next_transaction == transaction)
 		goto done;
@@ -2523,6 +2526,17 @@
 	return 0;
 }
 
+int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *jinode)
+{
+	return jbd2_journal_file_inode(handle, jinode,
+				       JI_WRITE_DATA | JI_WAIT_DATA);
+}
+
+int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *jinode)
+{
+	return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA);
+}
+
 /*
  * File truncate and transaction commit interact with each other in a
  * non-trivial way.  If a transaction writing data block A is
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index 3ed9a4b4..c2332e3 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -57,10 +57,11 @@
 }
 
 static int jffs2_security_setxattr(const struct xattr_handler *handler,
-				   struct dentry *dentry, const char *name,
-				   const void *buffer, size_t size, int flags)
+				   struct dentry *unused, struct inode *inode,
+				   const char *name, const void *buffer,
+				   size_t size, int flags)
 {
-	return do_jffs2_setxattr(d_inode(dentry), JFFS2_XPREFIX_SECURITY,
+	return do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY,
 				 name, buffer, size, flags);
 }
 
diff --git a/fs/jffs2/xattr_trusted.c b/fs/jffs2/xattr_trusted.c
index 4ebecff..5d60308 100644
--- a/fs/jffs2/xattr_trusted.c
+++ b/fs/jffs2/xattr_trusted.c
@@ -25,10 +25,11 @@
 }
 
 static int jffs2_trusted_setxattr(const struct xattr_handler *handler,
-				  struct dentry *dentry, const char *name,
-				  const void *buffer, size_t size, int flags)
+				  struct dentry *unused, struct inode *inode,
+				  const char *name, const void *buffer,
+				  size_t size, int flags)
 {
-	return do_jffs2_setxattr(d_inode(dentry), JFFS2_XPREFIX_TRUSTED,
+	return do_jffs2_setxattr(inode, JFFS2_XPREFIX_TRUSTED,
 				 name, buffer, size, flags);
 }
 
diff --git a/fs/jffs2/xattr_user.c b/fs/jffs2/xattr_user.c
index bce249e..9d027b4 100644
--- a/fs/jffs2/xattr_user.c
+++ b/fs/jffs2/xattr_user.c
@@ -25,10 +25,11 @@
 }
 
 static int jffs2_user_setxattr(const struct xattr_handler *handler,
-			       struct dentry *dentry, const char *name,
-			       const void *buffer, size_t size, int flags)
+			       struct dentry *unused, struct inode *inode,
+			       const char *name, const void *buffer,
+			       size_t size, int flags)
 {
-	return do_jffs2_setxattr(d_inode(dentry), JFFS2_XPREFIX_USER,
+	return do_jffs2_setxattr(inode, JFFS2_XPREFIX_USER,
 				 name, buffer, size, flags);
 }
 
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index beb182b..0bf3c33 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -943,11 +943,10 @@
 }
 
 static int jfs_xattr_set(const struct xattr_handler *handler,
-			 struct dentry *dentry, const char *name,
-			 const void *value, size_t size, int flags)
+			 struct dentry *unused, struct inode *inode,
+			 const char *name, const void *value,
+			 size_t size, int flags)
 {
-	struct inode *inode = d_inode(dentry);
-
 	name = xattr_full_name(handler, name);
 	return __jfs_xattr_set(inode, name, value, size, flags);
 }
@@ -962,11 +961,10 @@
 }
 
 static int jfs_xattr_set_os2(const struct xattr_handler *handler,
-			     struct dentry *dentry, const char *name,
-			     const void *value, size_t size, int flags)
+			     struct dentry *unused, struct inode *inode,
+			     const char *name, const void *value,
+			     size_t size, int flags)
 {
-	struct inode *inode = d_inode(dentry);
-
 	if (is_known_namespace(name))
 		return -EOPNOTSUPP;
 	return __jfs_xattr_set(inode, name, value, size, flags);
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index 1719649..63b925d 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -160,10 +160,11 @@
 	return 0;
 }
 
-int kernfs_iop_setxattr(struct dentry *dentry, const char *name,
-			const void *value, size_t size, int flags)
+int kernfs_iop_setxattr(struct dentry *unused, struct inode *inode,
+			const char *name, const void *value,
+			size_t size, int flags)
 {
-	struct kernfs_node *kn = dentry->d_fsdata;
+	struct kernfs_node *kn = inode->i_private;
 	struct kernfs_iattrs *attrs;
 	void *secdata;
 	int error;
@@ -175,11 +176,11 @@
 
 	if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) {
 		const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
-		error = security_inode_setsecurity(d_inode(dentry), suffix,
+		error = security_inode_setsecurity(inode, suffix,
 						value, size, flags);
 		if (error)
 			return error;
-		error = security_inode_getsecctx(d_inode(dentry),
+		error = security_inode_getsecctx(inode,
 						&secdata, &secdata_len);
 		if (error)
 			return error;
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 45c9192..3715923 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -81,7 +81,8 @@
 int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr);
 int kernfs_iop_getattr(struct vfsmount *mnt, struct dentry *dentry,
 		       struct kstat *stat);
-int kernfs_iop_setxattr(struct dentry *dentry, const char *name, const void *value,
+int kernfs_iop_setxattr(struct dentry *dentry, struct inode *inode,
+			const char *name, const void *value,
 			size_t size, int flags);
 int kernfs_iop_removexattr(struct dentry *dentry, const char *name);
 ssize_t kernfs_iop_getxattr(struct dentry *dentry, struct inode *inode,
diff --git a/fs/libfs.c b/fs/libfs.c
index 8765ff1..3db2721 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -1118,8 +1118,9 @@
 	return -EPERM;
 }
 
-static int empty_dir_setxattr(struct dentry *dentry, const char *name,
-			      const void *value, size_t size, int flags)
+static int empty_dir_setxattr(struct dentry *dentry, struct inode *inode,
+			      const char *name, const void *value,
+			      size_t size, int flags)
 {
 	return -EOPNOTSUPP;
 }
diff --git a/fs/namei.c b/fs/namei.c
index 5375571..70580ab 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -35,6 +35,7 @@
 #include <linux/fs_struct.h>
 #include <linux/posix_acl.h>
 #include <linux/hash.h>
+#include <linux/bitops.h>
 #include <asm/uaccess.h>
 
 #include "internal.h"
@@ -1415,21 +1416,28 @@
 	}
 }
 
+static int path_parent_directory(struct path *path)
+{
+	struct dentry *old = path->dentry;
+	/* rare case of legitimate dget_parent()... */
+	path->dentry = dget_parent(path->dentry);
+	dput(old);
+	if (unlikely(!path_connected(path)))
+		return -ENOENT;
+	return 0;
+}
+
 static int follow_dotdot(struct nameidata *nd)
 {
 	while(1) {
-		struct dentry *old = nd->path.dentry;
-
 		if (nd->path.dentry == nd->root.dentry &&
 		    nd->path.mnt == nd->root.mnt) {
 			break;
 		}
 		if (nd->path.dentry != nd->path.mnt->mnt_root) {
-			/* rare case of legitimate dget_parent()... */
-			nd->path.dentry = dget_parent(nd->path.dentry);
-			dput(old);
-			if (unlikely(!path_connected(&nd->path)))
-				return -ENOENT;
+			int ret = path_parent_directory(&nd->path);
+			if (ret)
+				return ret;
 			break;
 		}
 		if (!follow_up(&nd->path))
@@ -1797,74 +1805,144 @@
 
 #include <asm/word-at-a-time.h>
 
-#ifdef CONFIG_64BIT
+#ifdef HASH_MIX
 
-static inline unsigned int fold_hash(unsigned long hash)
-{
-	return hash_64(hash, 32);
-}
+/* Architecture provides HASH_MIX and fold_hash() in <asm/hash.h> */
+
+#elif defined(CONFIG_64BIT)
+/*
+ * Register pressure in the mixing function is an issue, particularly
+ * on 32-bit x86, but almost any function requires one state value and
+ * one temporary.  Instead, use a function designed for two state values
+ * and no temporaries.
+ *
+ * This function cannot create a collision in only two iterations, so
+ * we have two iterations to achieve avalanche.  In those two iterations,
+ * we have six layers of mixing, which is enough to spread one bit's
+ * influence out to 2^6 = 64 state bits.
+ *
+ * Rotate constants are scored by considering either 64 one-bit input
+ * deltas or 64*63/2 = 2016 two-bit input deltas, and finding the
+ * probability of that delta causing a change to each of the 128 output
+ * bits, using a sample of random initial states.
+ *
+ * The Shannon entropy of the computed probabilities is then summed
+ * to produce a score.  Ideally, any input change has a 50% chance of
+ * toggling any given output bit.
+ *
+ * Mixing scores (in bits) for (12,45):
+ * Input delta: 1-bit      2-bit
+ * 1 round:     713.3    42542.6
+ * 2 rounds:   2753.7   140389.8
+ * 3 rounds:   5954.1   233458.2
+ * 4 rounds:   7862.6   256672.2
+ * Perfect:    8192     258048
+ *            (64*128) (64*63/2 * 128)
+ */
+#define HASH_MIX(x, y, a)	\
+	(	x ^= (a),	\
+	y ^= x,	x = rol64(x,12),\
+	x += y,	y = rol64(y,45),\
+	y *= 9			)
 
 /*
- * This is George Marsaglia's XORSHIFT generator.
- * It implements a maximum-period LFSR in only a few
- * instructions.  It also has the property (required
- * by hash_name()) that mix_hash(0) = 0.
+ * Fold two longs into one 32-bit hash value.  This must be fast, but
+ * latency isn't quite as critical, as there is a fair bit of additional
+ * work done before the hash value is used.
  */
-static inline unsigned long mix_hash(unsigned long hash)
+static inline unsigned int fold_hash(unsigned long x, unsigned long y)
 {
-	hash ^= hash << 13;
-	hash ^= hash >> 7;
-	hash ^= hash << 17;
-	return hash;
+	y ^= x * GOLDEN_RATIO_64;
+	y *= GOLDEN_RATIO_64;
+	return y >> 32;
 }
 
 #else	/* 32-bit case */
 
-#define fold_hash(x) (x)
+/*
+ * Mixing scores (in bits) for (7,20):
+ * Input delta: 1-bit      2-bit
+ * 1 round:     330.3     9201.6
+ * 2 rounds:   1246.4    25475.4
+ * 3 rounds:   1907.1    31295.1
+ * 4 rounds:   2042.3    31718.6
+ * Perfect:    2048      31744
+ *            (32*64)   (32*31/2 * 64)
+ */
+#define HASH_MIX(x, y, a)	\
+	(	x ^= (a),	\
+	y ^= x,	x = rol32(x, 7),\
+	x += y,	y = rol32(y,20),\
+	y *= 9			)
 
-static inline unsigned long mix_hash(unsigned long hash)
+static inline unsigned int fold_hash(unsigned long x, unsigned long y)
 {
-	hash ^= hash << 13;
-	hash ^= hash >> 17;
-	hash ^= hash << 5;
-	return hash;
+	/* Use arch-optimized multiply if one exists */
+	return __hash_32(y ^ __hash_32(x));
 }
 
 #endif
 
-unsigned int full_name_hash(const unsigned char *name, unsigned int len)
+/*
+ * Return the hash of a string of known length.  This is carfully
+ * designed to match hash_name(), which is the more critical function.
+ * In particular, we must end by hashing a final word containing 0..7
+ * payload bytes, to match the way that hash_name() iterates until it
+ * finds the delimiter after the name.
+ */
+unsigned int full_name_hash(const char *name, unsigned int len)
 {
-	unsigned long a, hash = 0;
+	unsigned long a, x = 0, y = 0;
 
 	for (;;) {
+		if (!len)
+			goto done;
 		a = load_unaligned_zeropad(name);
 		if (len < sizeof(unsigned long))
 			break;
-		hash = mix_hash(hash + a);
+		HASH_MIX(x, y, a);
 		name += sizeof(unsigned long);
 		len -= sizeof(unsigned long);
-		if (!len)
-			goto done;
 	}
-	hash += a & bytemask_from_count(len);
+	x ^= a & bytemask_from_count(len);
 done:
-	return fold_hash(hash);
+	return fold_hash(x, y);
 }
 EXPORT_SYMBOL(full_name_hash);
 
+/* Return the "hash_len" (hash and length) of a null-terminated string */
+u64 hashlen_string(const char *name)
+{
+	unsigned long a = 0, x = 0, y = 0, adata, mask, len;
+	const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+
+	len = -sizeof(unsigned long);
+	do {
+		HASH_MIX(x, y, a);
+		len += sizeof(unsigned long);
+		a = load_unaligned_zeropad(name+len);
+	} while (!has_zero(a, &adata, &constants));
+
+	adata = prep_zero_mask(a, adata, &constants);
+	mask = create_zero_mask(adata);
+	x ^= a & zero_bytemask(mask);
+
+	return hashlen_create(fold_hash(x, y), len + find_zero(mask));
+}
+EXPORT_SYMBOL(hashlen_string);
+
 /*
  * Calculate the length and hash of the path component, and
  * return the "hash_len" as the result.
  */
 static inline u64 hash_name(const char *name)
 {
-	unsigned long a, b, adata, bdata, mask, hash, len;
+	unsigned long a = 0, b, x = 0, y = 0, adata, bdata, mask, len;
 	const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
 
-	hash = a = 0;
 	len = -sizeof(unsigned long);
 	do {
-		hash = mix_hash(hash + a);
+		HASH_MIX(x, y, a);
 		len += sizeof(unsigned long);
 		a = load_unaligned_zeropad(name+len);
 		b = a ^ REPEAT_BYTE('/');
@@ -1872,25 +1950,40 @@
 
 	adata = prep_zero_mask(a, adata, &constants);
 	bdata = prep_zero_mask(b, bdata, &constants);
-
 	mask = create_zero_mask(adata | bdata);
+	x ^= a & zero_bytemask(mask);
 
-	hash += a & zero_bytemask(mask);
-	len += find_zero(mask);
-	return hashlen_create(fold_hash(hash), len);
+	return hashlen_create(fold_hash(x, y), len + find_zero(mask));
 }
 
-#else
+#else	/* !CONFIG_DCACHE_WORD_ACCESS: Slow, byte-at-a-time version */
 
-unsigned int full_name_hash(const unsigned char *name, unsigned int len)
+/* Return the hash of a string of known length */
+unsigned int full_name_hash(const char *name, unsigned int len)
 {
 	unsigned long hash = init_name_hash();
 	while (len--)
-		hash = partial_name_hash(*name++, hash);
+		hash = partial_name_hash((unsigned char)*name++, hash);
 	return end_name_hash(hash);
 }
 EXPORT_SYMBOL(full_name_hash);
 
+/* Return the "hash_len" (hash and length) of a null-terminated string */
+u64 hashlen_string(const char *name)
+{
+	unsigned long hash = init_name_hash();
+	unsigned long len = 0, c;
+
+	c = (unsigned char)*name;
+	while (c) {
+		len++;
+		hash = partial_name_hash(c, hash);
+		c = (unsigned char)name[len];
+	}
+	return hashlen_create(end_name_hash(hash), len);
+}
+EXPORT_SYMBOL(hashlen_string);
+
 /*
  * We know there's a real path component here of at least
  * one character.
@@ -1934,7 +2027,7 @@
 		int type;
 
 		err = may_lookup(nd);
- 		if (err)
+		if (err)
 			return err;
 
 		hash_len = hash_name(name);
@@ -2428,6 +2521,34 @@
 }
 EXPORT_SYMBOL(lookup_one_len_unlocked);
 
+#ifdef CONFIG_UNIX98_PTYS
+int path_pts(struct path *path)
+{
+	/* Find something mounted on "pts" in the same directory as
+	 * the input path.
+	 */
+	struct dentry *child, *parent;
+	struct qstr this;
+	int ret;
+
+	ret = path_parent_directory(path);
+	if (ret)
+		return ret;
+
+	parent = path->dentry;
+	this.name = "pts";
+	this.len = 3;
+	child = d_hash_and_lookup(parent, &this);
+	if (!child)
+		return -ENOENT;
+
+	path->dentry = child;
+	dput(parent);
+	follow_mount(path);
+	return 0;
+}
+#endif
+
 int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
 		 struct path *path, int *empty)
 {
@@ -2909,9 +3030,13 @@
 			}
 			if (*opened & FILE_CREATED)
 				fsnotify_create(dir, dentry);
-			path->dentry = dentry;
-			path->mnt = nd->path.mnt;
-			return 1;
+			if (unlikely(d_is_negative(dentry))) {
+				error = -ENOENT;
+			} else {
+				path->dentry = dentry;
+				path->mnt = nd->path.mnt;
+				return 1;
+			}
 		}
 	}
 	dput(dentry);
@@ -3080,9 +3205,7 @@
 	int acc_mode = op->acc_mode;
 	unsigned seq;
 	struct inode *inode;
-	struct path save_parent = { .dentry = NULL, .mnt = NULL };
 	struct path path;
-	bool retried = false;
 	int error;
 
 	nd->flags &= ~LOOKUP_PARENT;
@@ -3125,7 +3248,6 @@
 			return -EISDIR;
 	}
 
-retry_lookup:
 	if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
 		error = mnt_want_write(nd->path.mnt);
 		if (!error)
@@ -3177,6 +3299,10 @@
 		got_write = false;
 	}
 
+	error = follow_managed(&path, nd);
+	if (unlikely(error < 0))
+		return error;
+
 	if (unlikely(d_is_negative(path.dentry))) {
 		path_to_nameidata(&path, nd);
 		return -ENOENT;
@@ -3192,10 +3318,6 @@
 		return -EEXIST;
 	}
 
-	error = follow_managed(&path, nd);
-	if (unlikely(error < 0))
-		return error;
-
 	seq = 0;	/* out of RCU mode, so the value doesn't matter */
 	inode = d_backing_inode(path.dentry);
 finish_lookup:
@@ -3206,23 +3328,14 @@
 	if (unlikely(error))
 		return error;
 
-	if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
-		path_to_nameidata(&path, nd);
-	} else {
-		save_parent.dentry = nd->path.dentry;
-		save_parent.mnt = mntget(path.mnt);
-		nd->path.dentry = path.dentry;
-
-	}
+	path_to_nameidata(&path, nd);
 	nd->inode = inode;
 	nd->seq = seq;
 	/* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
 finish_open:
 	error = complete_walk(nd);
-	if (error) {
-		path_put(&save_parent);
+	if (error)
 		return error;
-	}
 	audit_inode(nd->name, nd->path.dentry, 0);
 	error = -EISDIR;
 	if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
@@ -3245,13 +3358,9 @@
 		goto out;
 	BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
 	error = vfs_open(&nd->path, file, current_cred());
-	if (!error) {
-		*opened |= FILE_OPENED;
-	} else {
-		if (error == -EOPENSTALE)
-			goto stale_open;
+	if (error)
 		goto out;
-	}
+	*opened |= FILE_OPENED;
 opened:
 	error = open_check_o_direct(file);
 	if (!error)
@@ -3267,26 +3376,7 @@
 	}
 	if (got_write)
 		mnt_drop_write(nd->path.mnt);
-	path_put(&save_parent);
 	return error;
-
-stale_open:
-	/* If no saved parent or already retried then can't retry */
-	if (!save_parent.dentry || retried)
-		goto out;
-
-	BUG_ON(save_parent.dentry != dir);
-	path_put(&nd->path);
-	nd->path = save_parent;
-	nd->inode = dir->d_inode;
-	save_parent.mnt = NULL;
-	save_parent.dentry = NULL;
-	if (got_write) {
-		mnt_drop_write(nd->path.mnt);
-		got_write = false;
-	}
-	retried = true;
-	goto retry_lookup;
 }
 
 static int do_tmpfile(struct nameidata *nd, unsigned flags,
@@ -4542,7 +4632,6 @@
 out:
 	return len;
 }
-EXPORT_SYMBOL(readlink_copy);
 
 /*
  * A helper for ->readlink().  This should be used *ONLY* for symlinks that
diff --git a/fs/namespace.c b/fs/namespace.c
index 4fb1691..a7ec92c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2409,8 +2409,10 @@
 			mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
 		}
 		if (type->fs_flags & FS_USERNS_VISIBLE) {
-			if (!fs_fully_visible(type, &mnt_flags))
+			if (!fs_fully_visible(type, &mnt_flags)) {
+				put_filesystem(type);
 				return -EPERM;
+			}
 		}
 	}
 
@@ -3271,7 +3273,7 @@
 		list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
 			struct inode *inode = child->mnt_mountpoint->d_inode;
 			/* Only worry about locked mounts */
-			if (!(mnt_flags & MNT_LOCKED))
+			if (!(child->mnt.mnt_flags & MNT_LOCKED))
 				continue;
 			/* Is the directory permanetly empty? */
 			if (!is_empty_dir_inode(inode))
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 618ced3..aaa2e8d 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -217,7 +217,8 @@
 	}
 
 	if (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
-					&args->cbl_range)) {
+				&args->cbl_range,
+				be32_to_cpu(args->cbl_stateid.seqid))) {
 		rv = NFS4_OK;
 		goto unlock;
 	}
@@ -500,8 +501,10 @@
 	cps->slot = slot;
 
 	/* The ca_maxresponsesize_cached is 0 with no DRC */
-	if (args->csa_cachethis != 0)
-		return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
+	if (args->csa_cachethis != 0) {
+		status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
+		goto out_unlock;
+	}
 
 	/*
 	 * Check for pending referring calls.  If a match is found, a
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 976c906..d81f96a 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -146,10 +146,16 @@
 	p = read_buf(xdr, NFS4_STATEID_SIZE);
 	if (unlikely(p == NULL))
 		return htonl(NFS4ERR_RESOURCE);
-	memcpy(stateid, p, NFS4_STATEID_SIZE);
+	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
 	return 0;
 }
 
+static __be32 decode_delegation_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+	stateid->type = NFS4_DELEGATION_STATEID_TYPE;
+	return decode_stateid(xdr, stateid);
+}
+
 static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr)
 {
 	__be32 *p;
@@ -211,7 +217,7 @@
 	__be32 *p;
 	__be32 status;
 
-	status = decode_stateid(xdr, &args->stateid);
+	status = decode_delegation_stateid(xdr, &args->stateid);
 	if (unlikely(status != 0))
 		goto out;
 	p = read_buf(xdr, 4);
@@ -227,6 +233,11 @@
 }
 
 #if defined(CONFIG_NFS_V4_1)
+static __be32 decode_layout_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+	stateid->type = NFS4_LAYOUT_STATEID_TYPE;
+	return decode_stateid(xdr, stateid);
+}
 
 static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
 				       struct xdr_stream *xdr,
@@ -263,7 +274,7 @@
 		}
 		p = xdr_decode_hyper(p, &args->cbl_range.offset);
 		p = xdr_decode_hyper(p, &args->cbl_range.length);
-		status = decode_stateid(xdr, &args->cbl_stateid);
+		status = decode_layout_stateid(xdr, &args->cbl_stateid);
 		if (unlikely(status != 0))
 			goto out;
 	} else if (args->cbl_recall_type == RETURN_FSID) {
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 5166adc..322c258 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -875,15 +875,16 @@
 
 /**
  * nfs4_copy_delegation_stateid - Copy inode's state ID information
- * @dst: stateid data structure to fill in
  * @inode: inode to check
  * @flags: delegation type requirement
+ * @dst: stateid data structure to fill in
+ * @cred: optional argument to retrieve credential
  *
  * Returns "true" and fills in "dst->data" * if inode had a delegation,
  * otherwise "false" is returned.
  */
-bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode,
-		fmode_t flags)
+bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags,
+		nfs4_stateid *dst, struct rpc_cred **cred)
 {
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_delegation *delegation;
@@ -896,6 +897,8 @@
 	if (ret) {
 		nfs4_stateid_copy(dst, &delegation->stateid);
 		nfs_mark_delegation_referenced(delegation);
+		if (cred)
+			*cred = get_rpccred(delegation->cred);
 	}
 	rcu_read_unlock();
 	return ret;
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 333063e..64724d2 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -56,7 +56,7 @@
 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
-bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags);
+bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, struct rpc_cred **cred);
 
 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
 int nfs4_have_delegation(struct inode *inode, fmode_t flags);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 741a92c..979b3c4 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -87,6 +87,7 @@
 	int			mirror_count;
 
 	ssize_t			count,		/* bytes actually processed */
+				max_count,	/* max expected count */
 				bytes_left,	/* bytes left to be sent */
 				io_start,	/* start of IO */
 				error;		/* any reported error */
@@ -123,6 +124,8 @@
 	int i;
 	ssize_t count;
 
+	WARN_ON_ONCE(dreq->count >= dreq->max_count);
+
 	if (dreq->mirror_count == 1) {
 		dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
 		dreq->count += hdr->good_bytes;
@@ -275,7 +278,7 @@
 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
 			      struct nfs_direct_req *dreq)
 {
-	cinfo->lock = &dreq->inode->i_lock;
+	cinfo->inode = dreq->inode;
 	cinfo->mds = &dreq->mds_cinfo;
 	cinfo->ds = &dreq->ds_cinfo;
 	cinfo->dreq = dreq;
@@ -591,7 +594,7 @@
 		goto out_unlock;
 
 	dreq->inode = inode;
-	dreq->bytes_left = count;
+	dreq->bytes_left = dreq->max_count = count;
 	dreq->io_start = iocb->ki_pos;
 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
 	l_ctx = nfs_get_lock_context(dreq->ctx);
@@ -630,13 +633,13 @@
 				  struct list_head *list,
 				  struct nfs_commit_info *cinfo)
 {
-	spin_lock(cinfo->lock);
+	spin_lock(&cinfo->inode->i_lock);
 #ifdef CONFIG_NFS_V4_1
 	if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
 		NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
 #endif
 	nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
-	spin_unlock(cinfo->lock);
+	spin_unlock(&cinfo->inode->i_lock);
 }
 
 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
@@ -671,13 +674,13 @@
 		if (!nfs_pageio_add_request(&desc, req)) {
 			nfs_list_remove_request(req);
 			nfs_list_add_request(req, &failed);
-			spin_lock(cinfo.lock);
+			spin_lock(&cinfo.inode->i_lock);
 			dreq->flags = 0;
 			if (desc.pg_error < 0)
 				dreq->error = desc.pg_error;
 			else
 				dreq->error = -EIO;
-			spin_unlock(cinfo.lock);
+			spin_unlock(&cinfo.inode->i_lock);
 		}
 		nfs_release_request(req);
 	}
@@ -1023,7 +1026,7 @@
 		goto out_unlock;
 
 	dreq->inode = inode;
-	dreq->bytes_left = iov_iter_count(iter);
+	dreq->bytes_left = dreq->max_count = iov_iter_count(iter);
 	dreq->io_start = pos;
 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
 	l_ctx = nfs_get_lock_context(dreq->ctx);
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 3384dc8..aa59757 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -795,7 +795,7 @@
 		buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW;
 	}
 
-	spin_lock(cinfo->lock);
+	spin_lock(&cinfo->inode->i_lock);
 	if (cinfo->ds->nbuckets >= size)
 		goto out;
 	for (i = 0; i < cinfo->ds->nbuckets; i++) {
@@ -811,7 +811,7 @@
 	swap(cinfo->ds->buckets, buckets);
 	cinfo->ds->nbuckets = size;
 out:
-	spin_unlock(cinfo->lock);
+	spin_unlock(&cinfo->inode->i_lock);
 	kfree(buckets);
 	return 0;
 }
@@ -890,6 +890,7 @@
 					   0,
 					   NFS4_MAX_UINT64,
 					   IOMODE_READ,
+					   false,
 					   GFP_KERNEL);
 		if (IS_ERR(pgio->pg_lseg)) {
 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
@@ -915,6 +916,7 @@
 					   0,
 					   NFS4_MAX_UINT64,
 					   IOMODE_RW,
+					   false,
 					   GFP_NOFS);
 		if (IS_ERR(pgio->pg_lseg)) {
 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 0cb1abd..0e8018b 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -26,6 +26,8 @@
 
 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
 
+static struct group_info	*ff_zero_group;
+
 static struct pnfs_layout_hdr *
 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
 {
@@ -53,14 +55,15 @@
 	kfree(FF_LAYOUT_FROM_HDR(lo));
 }
 
-static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
 {
 	__be32 *p;
 
 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
 	if (unlikely(p == NULL))
 		return -ENOBUFS;
-	memcpy(stateid, p, NFS4_STATEID_SIZE);
+	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
+	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
 		p[0], p[1], p[2], p[3]);
 	return 0;
@@ -211,10 +214,16 @@
 
 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
 {
+	struct rpc_cred	*cred;
+
 	ff_layout_remove_mirror(mirror);
 	kfree(mirror->fh_versions);
-	if (mirror->cred)
-		put_rpccred(mirror->cred);
+	cred = rcu_access_pointer(mirror->ro_cred);
+	if (cred)
+		put_rpccred(cred);
+	cred = rcu_access_pointer(mirror->rw_cred);
+	if (cred)
+		put_rpccred(cred);
 	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
 	kfree(mirror);
 }
@@ -290,6 +299,8 @@
 {
 	u64 new_end, old_end;
 
+	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
+		return false;
 	if (new->pls_range.iomode != old->pls_range.iomode)
 		return false;
 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
@@ -310,8 +321,6 @@
 			new_end);
 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
-	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
-		set_bit(NFS_LSEG_LAYOUTRETURN, &new->pls_flags);
 	return true;
 }
 
@@ -407,8 +416,9 @@
 		struct nfs4_ff_layout_mirror *mirror;
 		struct nfs4_deviceid devid;
 		struct nfs4_deviceid_node *idnode;
-		u32 ds_count;
-		u32 fh_count;
+		struct auth_cred acred = { .group_info = ff_zero_group };
+		struct rpc_cred	__rcu *cred;
+		u32 ds_count, fh_count, id;
 		int j;
 
 		rc = -EIO;
@@ -456,7 +466,7 @@
 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
 
 		/* stateid */
-		rc = decode_stateid(&stream, &fls->mirror_array[i]->stateid);
+		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
 		if (rc)
 			goto out_err_free;
 
@@ -484,24 +494,49 @@
 		fls->mirror_array[i]->fh_versions_cnt = fh_count;
 
 		/* user */
-		rc = decode_name(&stream, &fls->mirror_array[i]->uid);
+		rc = decode_name(&stream, &id);
 		if (rc)
 			goto out_err_free;
 
+		acred.uid = make_kuid(&init_user_ns, id);
+
 		/* group */
-		rc = decode_name(&stream, &fls->mirror_array[i]->gid);
+		rc = decode_name(&stream, &id);
 		if (rc)
 			goto out_err_free;
 
+		acred.gid = make_kgid(&init_user_ns, id);
+
+		/* find the cred for it */
+		rcu_assign_pointer(cred, rpc_lookup_generic_cred(&acred, 0, gfp_flags));
+		if (IS_ERR(cred)) {
+			rc = PTR_ERR(cred);
+			goto out_err_free;
+		}
+
+		if (lgr->range.iomode == IOMODE_READ)
+			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
+		else
+			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
+
 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
 		if (mirror != fls->mirror_array[i]) {
+			/* swap cred ptrs so free_mirror will clean up old */
+			if (lgr->range.iomode == IOMODE_READ) {
+				cred = xchg(&mirror->ro_cred, cred);
+				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
+			} else {
+				cred = xchg(&mirror->rw_cred, cred);
+				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
+			}
 			ff_layout_free_mirror(fls->mirror_array[i]);
 			fls->mirror_array[i] = mirror;
 		}
 
-		dprintk("%s: uid %d gid %d\n", __func__,
-			fls->mirror_array[i]->uid,
-			fls->mirror_array[i]->gid);
+		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
+			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
+			from_kuid(&init_user_ns, acred.uid),
+			from_kgid(&init_user_ns, acred.gid));
 	}
 
 	p = xdr_inline_decode(&stream, 4);
@@ -745,7 +780,7 @@
 	else {
 		int i;
 
-		spin_lock(cinfo->lock);
+		spin_lock(&cinfo->inode->i_lock);
 		if (cinfo->ds->nbuckets != 0)
 			kfree(buckets);
 		else {
@@ -759,7 +794,7 @@
 					NFS_INVALID_STABLE_HOW;
 			}
 		}
-		spin_unlock(cinfo->lock);
+		spin_unlock(&cinfo->inode->i_lock);
 		return 0;
 	}
 }
@@ -786,6 +821,36 @@
 }
 
 static void
+ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
+		      struct nfs_page *req,
+		      bool strict_iomode)
+{
+retry_strict:
+	pnfs_put_lseg(pgio->pg_lseg);
+	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+					   req->wb_context,
+					   0,
+					   NFS4_MAX_UINT64,
+					   IOMODE_READ,
+					   strict_iomode,
+					   GFP_KERNEL);
+	if (IS_ERR(pgio->pg_lseg)) {
+		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+		pgio->pg_lseg = NULL;
+	}
+
+	/* If we don't have checking, do get a IOMODE_RW
+	 * segment, and the server wants to avoid READs
+	 * there, then retry!
+	 */
+	if (pgio->pg_lseg && !strict_iomode &&
+	    ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
+		strict_iomode = true;
+		goto retry_strict;
+	}
+}
+
+static void
 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
 			struct nfs_page *req)
 {
@@ -795,26 +860,23 @@
 	int ds_idx;
 
 	/* Use full layout for now */
-	if (!pgio->pg_lseg) {
-		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
-						   req->wb_context,
-						   0,
-						   NFS4_MAX_UINT64,
-						   IOMODE_READ,
-						   GFP_KERNEL);
-		if (IS_ERR(pgio->pg_lseg)) {
-			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
-			pgio->pg_lseg = NULL;
-			return;
-		}
-	}
+	if (!pgio->pg_lseg)
+		ff_layout_pg_get_read(pgio, req, false);
+	else if (ff_layout_avoid_read_on_rw(pgio->pg_lseg))
+		ff_layout_pg_get_read(pgio, req, true);
+
 	/* If no lseg, fall back to read through mds */
 	if (pgio->pg_lseg == NULL)
 		goto out_mds;
 
 	ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
-	if (!ds)
-		goto out_mds;
+	if (!ds) {
+		if (ff_layout_no_fallback_to_mds(pgio->pg_lseg))
+			goto out_pnfs;
+		else
+			goto out_mds;
+	}
+
 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
 
 	pgio->pg_mirror_idx = ds_idx;
@@ -828,6 +890,12 @@
 	pnfs_put_lseg(pgio->pg_lseg);
 	pgio->pg_lseg = NULL;
 	nfs_pageio_reset_read_mds(pgio);
+	return;
+
+out_pnfs:
+	pnfs_set_lo_fail(pgio->pg_lseg);
+	pnfs_put_lseg(pgio->pg_lseg);
+	pgio->pg_lseg = NULL;
 }
 
 static void
@@ -847,6 +915,7 @@
 						   0,
 						   NFS4_MAX_UINT64,
 						   IOMODE_RW,
+						   false,
 						   GFP_NOFS);
 		if (IS_ERR(pgio->pg_lseg)) {
 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
@@ -870,8 +939,12 @@
 
 	for (i = 0; i < pgio->pg_mirror_count; i++) {
 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
-		if (!ds)
-			goto out_mds;
+		if (!ds) {
+			if (ff_layout_no_fallback_to_mds(pgio->pg_lseg))
+				goto out_pnfs;
+			else
+				goto out_mds;
+		}
 		pgm = &pgio->pg_mirrors[i];
 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
 		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
@@ -883,6 +956,12 @@
 	pnfs_put_lseg(pgio->pg_lseg);
 	pgio->pg_lseg = NULL;
 	nfs_pageio_reset_write_mds(pgio);
+	return;
+
+out_pnfs:
+	pnfs_set_lo_fail(pgio->pg_lseg);
+	pnfs_put_lseg(pgio->pg_lseg);
+	pgio->pg_lseg = NULL;
 }
 
 static unsigned int
@@ -895,6 +974,7 @@
 						   0,
 						   NFS4_MAX_UINT64,
 						   IOMODE_RW,
+						   false,
 						   GFP_NOFS);
 		if (IS_ERR(pgio->pg_lseg)) {
 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
@@ -1067,8 +1147,7 @@
 		rpc_wake_up(&tbl->slot_tbl_waitq);
 		/* fall through */
 	default:
-		if (ff_layout_no_fallback_to_mds(lseg) ||
-		    ff_layout_has_available_ds(lseg))
+		if (ff_layout_avoid_mds_available_ds(lseg))
 			return -NFS4ERR_RESET_TO_PNFS;
 reset:
 		dprintk("%s Retry through MDS. Error %d\n", __func__,
@@ -1215,8 +1294,6 @@
 					hdr->pgio_mirror_idx + 1,
 					&hdr->pgio_mirror_idx))
 			goto out_eagain;
-		set_bit(NFS_LAYOUT_RETURN_REQUESTED,
-			&hdr->lseg->pls_layout->plh_flags);
 		pnfs_read_resend_pnfs(hdr);
 		return task->tk_status;
 	case -NFS4ERR_RESET_TO_MDS:
@@ -1260,7 +1337,7 @@
 }
 
 static bool
-ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx)
+ff_layout_device_unavailable(struct pnfs_layout_segment *lseg, int idx)
 {
 	/* No mirroring for now */
 	struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
@@ -1297,16 +1374,10 @@
 		rpc_exit(task, -EIO);
 		return -EIO;
 	}
-	if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
-		dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
-		if (ff_layout_has_available_ds(hdr->lseg))
-			pnfs_read_resend_pnfs(hdr);
-		else
-			ff_layout_reset_read(hdr);
-		rpc_exit(task, 0);
+	if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
+		rpc_exit(task, -EHOSTDOWN);
 		return -EAGAIN;
 	}
-	hdr->pgio_done_cb = ff_layout_read_done_cb;
 
 	ff_layout_read_record_layoutstats_start(task, hdr);
 	return 0;
@@ -1496,14 +1567,8 @@
 		return -EIO;
 	}
 
-	if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
-		bool retry_pnfs;
-
-		retry_pnfs = ff_layout_has_available_ds(hdr->lseg);
-		dprintk("%s task %u reset io to %s\n", __func__,
-			task->tk_pid, retry_pnfs ? "pNFS" : "MDS");
-		ff_layout_reset_write(hdr, retry_pnfs);
-		rpc_exit(task, 0);
+	if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
+		rpc_exit(task, -EHOSTDOWN);
 		return -EAGAIN;
 	}
 
@@ -1712,7 +1777,7 @@
 		goto out_failed;
 
 	ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
-	if (IS_ERR(ds_cred))
+	if (!ds_cred)
 		goto out_failed;
 
 	vers = nfs4_ff_layout_ds_version(lseg, idx);
@@ -1720,6 +1785,7 @@
 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
 		ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
 
+	hdr->pgio_done_cb = ff_layout_read_done_cb;
 	atomic_inc(&ds->ds_clp->cl_count);
 	hdr->ds_clp = ds->ds_clp;
 	fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
@@ -1737,11 +1803,11 @@
 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
 				      &ff_layout_read_call_ops_v4,
 			  0, RPC_TASK_SOFTCONN);
-
+	put_rpccred(ds_cred);
 	return PNFS_ATTEMPTED;
 
 out_failed:
-	if (ff_layout_has_available_ds(lseg))
+	if (ff_layout_avoid_mds_available_ds(lseg))
 		return PNFS_TRY_AGAIN;
 	return PNFS_NOT_ATTEMPTED;
 }
@@ -1769,7 +1835,7 @@
 		return PNFS_NOT_ATTEMPTED;
 
 	ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
-	if (IS_ERR(ds_cred))
+	if (!ds_cred)
 		return PNFS_NOT_ATTEMPTED;
 
 	vers = nfs4_ff_layout_ds_version(lseg, idx);
@@ -1798,6 +1864,7 @@
 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
 				      &ff_layout_write_call_ops_v4,
 			  sync, RPC_TASK_SOFTCONN);
+	put_rpccred(ds_cred);
 	return PNFS_ATTEMPTED;
 }
 
@@ -1824,7 +1891,7 @@
 	struct rpc_clnt *ds_clnt;
 	struct rpc_cred *ds_cred;
 	u32 idx;
-	int vers;
+	int vers, ret;
 	struct nfs_fh *fh;
 
 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
@@ -1838,7 +1905,7 @@
 		goto out_err;
 
 	ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
-	if (IS_ERR(ds_cred))
+	if (!ds_cred)
 		goto out_err;
 
 	vers = nfs4_ff_layout_ds_version(lseg, idx);
@@ -1854,10 +1921,12 @@
 	if (fh)
 		data->args.fh = fh;
 
-	return nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
+	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
 					       &ff_layout_commit_call_ops_v4,
 				   how, RPC_TASK_SOFTCONN);
+	put_rpccred(ds_cred);
+	return ret;
 out_err:
 	pnfs_generic_prepare_to_resend_writes(data);
 	pnfs_generic_commit_release(data);
@@ -2223,6 +2292,11 @@
 {
 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
 	       __func__);
+	if (!ff_zero_group) {
+		ff_zero_group = groups_alloc(0);
+		if (!ff_zero_group)
+			return -ENOMEM;
+	}
 	return pnfs_register_layoutdriver(&flexfilelayout_type);
 }
 
@@ -2231,6 +2305,10 @@
 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
 	       __func__);
 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
+	if (ff_zero_group) {
+		put_group_info(ff_zero_group);
+		ff_zero_group = NULL;
+	}
 }
 
 MODULE_ALIAS("nfs-layouttype4-4");
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index dd353bb..1bcdb15 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -10,7 +10,8 @@
 #define FS_NFS_NFS4FLEXFILELAYOUT_H
 
 #define FF_FLAGS_NO_LAYOUTCOMMIT 1
-#define FF_FLAGS_NO_IO_THRU_MDS 2
+#define FF_FLAGS_NO_IO_THRU_MDS  2
+#define FF_FLAGS_NO_READ_IO      4
 
 #include "../pnfs.h"
 
@@ -76,9 +77,8 @@
 	u32				fh_versions_cnt;
 	struct nfs_fh			*fh_versions;
 	nfs4_stateid			stateid;
-	u32				uid;
-	u32				gid;
-	struct rpc_cred			*cred;
+	struct rpc_cred	__rcu		*ro_cred;
+	struct rpc_cred	__rcu		*rw_cred;
 	atomic_t			ref;
 	spinlock_t			lock;
 	struct nfs4_ff_layoutstat	read_stat;
@@ -154,6 +154,12 @@
 }
 
 static inline bool
+ff_layout_no_read_on_rw(struct pnfs_layout_segment *lseg)
+{
+	return FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_READ_IO;
+}
+
+static inline bool
 ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node)
 {
 	return nfs4_test_deviceid_unavailable(node);
@@ -192,4 +198,7 @@
 struct rpc_cred *ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg,
 				       u32 ds_idx, struct rpc_cred *mdscred);
 bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
+bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg);
+bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg);
+
 #endif /* FS_NFS_NFS4FLEXFILELAYOUT_H */
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index add0e5a..0aa36be 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -228,7 +228,8 @@
 		return e1->opnum < e2->opnum ? -1 : 1;
 	if (e1->status != e2->status)
 		return e1->status < e2->status ? -1 : 1;
-	ret = memcmp(&e1->stateid, &e2->stateid, sizeof(e1->stateid));
+	ret = memcmp(e1->stateid.data, e2->stateid.data,
+			sizeof(e1->stateid.data));
 	if (ret != 0)
 		return ret;
 	ret = memcmp(&e1->deviceid, &e2->deviceid, sizeof(e1->deviceid));
@@ -302,40 +303,26 @@
 	return 0;
 }
 
-/* currently we only support AUTH_NONE and AUTH_SYS */
-static rpc_authflavor_t
-nfs4_ff_layout_choose_authflavor(struct nfs4_ff_layout_mirror *mirror)
+static struct rpc_cred *
+ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode)
 {
-	if (mirror->uid == (u32)-1)
-		return RPC_AUTH_NULL;
-	return RPC_AUTH_UNIX;
-}
+	struct rpc_cred *cred, __rcu **pcred;
 
-/* fetch cred for NFSv3 DS */
-static int ff_layout_update_mirror_cred(struct nfs4_ff_layout_mirror *mirror,
-				      struct nfs4_pnfs_ds *ds)
-{
-	if (ds->ds_clp && !mirror->cred &&
-	    mirror->mirror_ds->ds_versions[0].version == 3) {
-		struct rpc_auth *auth = ds->ds_clp->cl_rpcclient->cl_auth;
-		struct rpc_cred *cred;
-		struct auth_cred acred = {
-			.uid = make_kuid(&init_user_ns, mirror->uid),
-			.gid = make_kgid(&init_user_ns, mirror->gid),
-		};
+	if (iomode == IOMODE_READ)
+		pcred = &mirror->ro_cred;
+	else
+		pcred = &mirror->rw_cred;
 
-		/* AUTH_NULL ignores acred */
-		cred = auth->au_ops->lookup_cred(auth, &acred, 0);
-		if (IS_ERR(cred)) {
-			dprintk("%s: lookup_cred failed with %ld\n",
-				__func__, PTR_ERR(cred));
-			return PTR_ERR(cred);
-		} else {
-			if (cmpxchg(&mirror->cred, NULL, cred))
-				put_rpccred(cred);
-		}
-	}
-	return 0;
+	rcu_read_lock();
+	do {
+		cred = rcu_dereference(*pcred);
+		if (!cred)
+			break;
+
+		cred = get_rpccred_rcu(cred);
+	} while(!cred);
+	rcu_read_unlock();
+	return cred;
 }
 
 struct nfs_fh *
@@ -356,7 +343,23 @@
 	return fh;
 }
 
-/* Upon return, either ds is connected, or ds is NULL */
+/**
+ * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
+ * @lseg: the layout segment we're operating on
+ * @ds_idx: index of the DS to use
+ * @fail_return: return layout on connect failure?
+ *
+ * Try to prepare a DS connection to accept an RPC call. This involves
+ * selecting a mirror to use and connecting the client to it if it's not
+ * already connected.
+ *
+ * Since we only need a single functioning mirror to satisfy a read, we don't
+ * want to return the layout if there is one. For writes though, any down
+ * mirror should result in a LAYOUTRETURN. @fail_return is how we distinguish
+ * between the two cases.
+ *
+ * Returns a pointer to a connected DS object on success or NULL on failure.
+ */
 struct nfs4_pnfs_ds *
 nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
 			  bool fail_return)
@@ -367,7 +370,6 @@
 	struct inode *ino = lseg->pls_layout->plh_inode;
 	struct nfs_server *s = NFS_SERVER(ino);
 	unsigned int max_payload;
-	rpc_authflavor_t flavor;
 
 	if (!ff_layout_mirror_valid(lseg, mirror)) {
 		pr_err_ratelimited("NFS: %s: No data server for offset index %d\n",
@@ -383,9 +385,7 @@
 	/* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
 	smp_rmb();
 	if (ds->ds_clp)
-		goto out_update_creds;
-
-	flavor = nfs4_ff_layout_choose_authflavor(mirror);
+		goto out;
 
 	/* FIXME: For now we assume the server sent only one version of NFS
 	 * to use for the DS.
@@ -394,7 +394,7 @@
 			     dataserver_retrans,
 			     mirror->mirror_ds->ds_versions[0].version,
 			     mirror->mirror_ds->ds_versions[0].minor_version,
-			     flavor);
+			     RPC_AUTH_UNIX);
 
 	/* connect success, check rsize/wsize limit */
 	if (ds->ds_clp) {
@@ -410,20 +410,10 @@
 					 mirror, lseg->pls_range.offset,
 					 lseg->pls_range.length, NFS4ERR_NXIO,
 					 OP_ILLEGAL, GFP_NOIO);
-		if (!fail_return) {
-			if (ff_layout_has_available_ds(lseg))
-				set_bit(NFS_LAYOUT_RETURN_REQUESTED,
-					&lseg->pls_layout->plh_flags);
-			else
-				pnfs_error_mark_layout_for_return(ino, lseg);
-		} else
+		if (fail_return || !ff_layout_has_available_ds(lseg))
 			pnfs_error_mark_layout_for_return(ino, lseg);
 		ds = NULL;
-		goto out;
 	}
-out_update_creds:
-	if (ff_layout_update_mirror_cred(mirror, ds))
-		ds = NULL;
 out:
 	return ds;
 }
@@ -433,16 +423,15 @@
 		      struct rpc_cred *mdscred)
 {
 	struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
-	struct rpc_cred *cred = ERR_PTR(-EINVAL);
+	struct rpc_cred *cred;
 
-	if (!nfs4_ff_layout_prepare_ds(lseg, ds_idx, true))
-		goto out;
-
-	if (mirror && mirror->cred)
-		cred = mirror->cred;
-	else
-		cred = mdscred;
-out:
+	if (mirror) {
+		cred = ff_layout_get_mirror_cred(mirror, lseg->pls_range.iomode);
+		if (!cred)
+			cred = get_rpccred(mdscred);
+	} else {
+		cred = get_rpccred(mdscred);
+	}
 	return cred;
 }
 
@@ -562,6 +551,18 @@
 	return ff_rw_layout_has_available_ds(lseg);
 }
 
+bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg)
+{
+	return ff_layout_no_fallback_to_mds(lseg) ||
+	       ff_layout_has_available_ds(lseg);
+}
+
+bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg)
+{
+	return lseg->pls_range.iomode == IOMODE_RW &&
+	       ff_layout_no_read_on_rw(lseg);
+}
+
 module_param(dataserver_retrans, uint, 0644);
 MODULE_PARM_DESC(dataserver_retrans, "The  number of times the NFSv4.1 client "
 			"retries a request before it attempts further "
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index f1d1d2c..5154fa6 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -477,6 +477,7 @@
 			     u32 ds_commit_idx);
 int nfs_write_need_commit(struct nfs_pgio_header *);
 void nfs_writeback_update_inode(struct nfs_pgio_header *hdr);
+int nfs_commit_file(struct file *file, struct nfs_write_verifier *verf);
 int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
 			    int how, struct nfs_commit_info *cinfo);
 void nfs_retry_commit(struct list_head *page_list,
diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h
index b587ccd..b6cd153 100644
--- a/fs/nfs/nfs42.h
+++ b/fs/nfs/nfs42.h
@@ -13,6 +13,7 @@
 
 /* nfs4.2proc.c */
 int nfs42_proc_allocate(struct file *, loff_t, loff_t);
+ssize_t nfs42_proc_copy(struct file *, loff_t, struct file *, loff_t, size_t);
 int nfs42_proc_deallocate(struct file *, loff_t, loff_t);
 loff_t nfs42_proc_llseek(struct file *, loff_t, int);
 int nfs42_proc_layoutstats_generic(struct nfs_server *,
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index dff8346..aa03ed0 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -126,6 +126,111 @@
 	return err;
 }
 
+static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src,
+				struct nfs_lock_context *src_lock,
+				struct file *dst, loff_t pos_dst,
+				struct nfs_lock_context *dst_lock,
+				size_t count)
+{
+	struct nfs42_copy_args args = {
+		.src_fh		= NFS_FH(file_inode(src)),
+		.src_pos	= pos_src,
+		.dst_fh		= NFS_FH(file_inode(dst)),
+		.dst_pos	= pos_dst,
+		.count		= count,
+	};
+	struct nfs42_copy_res res;
+	struct rpc_message msg = {
+		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY],
+		.rpc_argp = &args,
+		.rpc_resp = &res,
+	};
+	struct inode *dst_inode = file_inode(dst);
+	struct nfs_server *server = NFS_SERVER(dst_inode);
+	int status;
+
+	status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
+				     src_lock, FMODE_READ);
+	if (status)
+		return status;
+
+	status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
+				     dst_lock, FMODE_WRITE);
+	if (status)
+		return status;
+
+	status = nfs4_call_sync(server->client, server, &msg,
+				&args.seq_args, &res.seq_res, 0);
+	if (status == -ENOTSUPP)
+		server->caps &= ~NFS_CAP_COPY;
+	if (status)
+		return status;
+
+	if (res.write_res.verifier.committed != NFS_FILE_SYNC) {
+		status = nfs_commit_file(dst, &res.write_res.verifier.verifier);
+		if (status)
+			return status;
+	}
+
+	truncate_pagecache_range(dst_inode, pos_dst,
+				 pos_dst + res.write_res.count);
+
+	return res.write_res.count;
+}
+
+ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
+			struct file *dst, loff_t pos_dst,
+			size_t count)
+{
+	struct nfs_server *server = NFS_SERVER(file_inode(dst));
+	struct nfs_lock_context *src_lock;
+	struct nfs_lock_context *dst_lock;
+	struct nfs4_exception src_exception = { };
+	struct nfs4_exception dst_exception = { };
+	ssize_t err, err2;
+
+	if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY))
+		return -EOPNOTSUPP;
+
+	src_lock = nfs_get_lock_context(nfs_file_open_context(src));
+	if (IS_ERR(src_lock))
+		return PTR_ERR(src_lock);
+
+	src_exception.inode = file_inode(src);
+	src_exception.state = src_lock->open_context->state;
+
+	dst_lock = nfs_get_lock_context(nfs_file_open_context(dst));
+	if (IS_ERR(dst_lock)) {
+		err = PTR_ERR(dst_lock);
+		goto out_put_src_lock;
+	}
+
+	dst_exception.inode = file_inode(dst);
+	dst_exception.state = dst_lock->open_context->state;
+
+	do {
+		inode_lock(file_inode(dst));
+		err = _nfs42_proc_copy(src, pos_src, src_lock,
+				       dst, pos_dst, dst_lock, count);
+		inode_unlock(file_inode(dst));
+
+		if (err == -ENOTSUPP) {
+			err = -EOPNOTSUPP;
+			break;
+		}
+
+		err2 = nfs4_handle_exception(server, err, &src_exception);
+		err  = nfs4_handle_exception(server, err, &dst_exception);
+		if (!err)
+			err = err2;
+	} while (src_exception.retry || dst_exception.retry);
+
+	nfs_put_lock_context(dst_lock);
+out_put_src_lock:
+	nfs_put_lock_context(src_lock);
+	return err;
+}
+
 static loff_t _nfs42_proc_llseek(struct file *filep,
 		struct nfs_lock_context *lock, loff_t offset, int whence)
 {
@@ -232,7 +337,7 @@
 			 * with the current stateid.
 			 */
 			set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
-			pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
+			pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);
 			spin_unlock(&inode->i_lock);
 			pnfs_free_lseg_list(&head);
 		} else
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 0ca482a..6dc6f2a 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -9,9 +9,22 @@
 #define encode_fallocate_maxsz		(encode_stateid_maxsz + \
 					 2 /* offset */ + \
 					 2 /* length */)
+#define NFS42_WRITE_RES_SIZE		(1 /* wr_callback_id size */ +\
+					 XDR_QUADLEN(NFS4_STATEID_SIZE) + \
+					 2 /* wr_count */ + \
+					 1 /* wr_committed */ + \
+					 XDR_QUADLEN(NFS4_VERIFIER_SIZE))
 #define encode_allocate_maxsz		(op_encode_hdr_maxsz + \
 					 encode_fallocate_maxsz)
 #define decode_allocate_maxsz		(op_decode_hdr_maxsz)
+#define encode_copy_maxsz		(op_encode_hdr_maxsz +          \
+					 XDR_QUADLEN(NFS4_STATEID_SIZE) + \
+					 XDR_QUADLEN(NFS4_STATEID_SIZE) + \
+					 2 + 2 + 2 + 1 + 1 + 1)
+#define decode_copy_maxsz		(op_decode_hdr_maxsz + \
+					 NFS42_WRITE_RES_SIZE + \
+					 1 /* cr_consecutive */ + \
+					 1 /* cr_synchronous */)
 #define encode_deallocate_maxsz		(op_encode_hdr_maxsz + \
 					 encode_fallocate_maxsz)
 #define decode_deallocate_maxsz		(op_decode_hdr_maxsz)
@@ -49,6 +62,16 @@
 					 decode_putfh_maxsz + \
 					 decode_allocate_maxsz + \
 					 decode_getattr_maxsz)
+#define NFS4_enc_copy_sz		(compound_encode_hdr_maxsz + \
+					 encode_putfh_maxsz + \
+					 encode_savefh_maxsz + \
+					 encode_putfh_maxsz + \
+					 encode_copy_maxsz)
+#define NFS4_dec_copy_sz		(compound_decode_hdr_maxsz + \
+					 decode_putfh_maxsz + \
+					 decode_savefh_maxsz + \
+					 decode_putfh_maxsz + \
+					 decode_copy_maxsz)
 #define NFS4_enc_deallocate_sz		(compound_encode_hdr_maxsz + \
 					 encode_putfh_maxsz + \
 					 encode_deallocate_maxsz + \
@@ -102,6 +125,23 @@
 	encode_fallocate(xdr, args);
 }
 
+static void encode_copy(struct xdr_stream *xdr,
+			struct nfs42_copy_args *args,
+			struct compound_hdr *hdr)
+{
+	encode_op_hdr(xdr, OP_COPY, decode_copy_maxsz, hdr);
+	encode_nfs4_stateid(xdr, &args->src_stateid);
+	encode_nfs4_stateid(xdr, &args->dst_stateid);
+
+	encode_uint64(xdr, args->src_pos);
+	encode_uint64(xdr, args->dst_pos);
+	encode_uint64(xdr, args->count);
+
+	encode_uint32(xdr, 1); /* consecutive = true */
+	encode_uint32(xdr, 1); /* synchronous = true */
+	encode_uint32(xdr, 0); /* src server list */
+}
+
 static void encode_deallocate(struct xdr_stream *xdr,
 			      struct nfs42_falloc_args *args,
 			      struct compound_hdr *hdr)
@@ -182,6 +222,26 @@
 }
 
 /*
+ * Encode COPY request
+ */
+static void nfs4_xdr_enc_copy(struct rpc_rqst *req,
+			      struct xdr_stream *xdr,
+			      struct nfs42_copy_args *args)
+{
+	struct compound_hdr hdr = {
+		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
+	};
+
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->src_fh, &hdr);
+	encode_savefh(xdr, &hdr);
+	encode_putfh(xdr, args->dst_fh, &hdr);
+	encode_copy(xdr, args, &hdr);
+	encode_nops(&hdr);
+}
+
+/*
  * Encode DEALLOCATE request
  */
 static void nfs4_xdr_enc_deallocate(struct rpc_rqst *req,
@@ -266,6 +326,62 @@
 	return decode_op_hdr(xdr, OP_ALLOCATE);
 }
 
+static int decode_write_response(struct xdr_stream *xdr,
+				 struct nfs42_write_res *res)
+{
+	__be32 *p;
+	int stateids;
+
+	p = xdr_inline_decode(xdr, 4 + 8 + 4);
+	if (unlikely(!p))
+		goto out_overflow;
+
+	stateids = be32_to_cpup(p++);
+	p = xdr_decode_hyper(p, &res->count);
+	res->verifier.committed = be32_to_cpup(p);
+	return decode_verifier(xdr, &res->verifier.verifier);
+
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int decode_copy_requirements(struct xdr_stream *xdr,
+				    struct nfs42_copy_res *res) {
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(!p))
+		goto out_overflow;
+
+	res->consecutive = be32_to_cpup(p++);
+	res->synchronous = be32_to_cpup(p++);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int decode_copy(struct xdr_stream *xdr, struct nfs42_copy_res *res)
+{
+	int status;
+
+	status = decode_op_hdr(xdr, OP_COPY);
+	if (status == NFS4ERR_OFFLOAD_NO_REQS) {
+		status = decode_copy_requirements(xdr, res);
+		if (status)
+			return status;
+		return NFS4ERR_OFFLOAD_NO_REQS;
+	} else if (status)
+		return status;
+
+	status = decode_write_response(xdr, &res->write_res);
+	if (status)
+		return status;
+
+	return decode_copy_requirements(xdr, res);
+}
+
 static int decode_deallocate(struct xdr_stream *xdr, struct nfs42_falloc_res *res)
 {
 	return decode_op_hdr(xdr, OP_DEALLOCATE);
@@ -331,6 +447,36 @@
 }
 
 /*
+ * Decode COPY response
+ */
+static int nfs4_xdr_dec_copy(struct rpc_rqst *rqstp,
+			     struct xdr_stream *xdr,
+			     struct nfs42_copy_res *res)
+{
+	struct compound_hdr hdr;
+	int status;
+
+	status = decode_compound_hdr(xdr, &hdr);
+	if (status)
+		goto out;
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
+	if (status)
+		goto out;
+	status = decode_putfh(xdr);
+	if (status)
+		goto out;
+	status = decode_savefh(xdr);
+	if (status)
+		goto out;
+	status = decode_putfh(xdr);
+	if (status)
+		goto out;
+	status = decode_copy(xdr, res);
+out:
+	return status;
+}
+
+/*
  * Decode DEALLOCATE request
  */
 static int nfs4_xdr_dec_deallocate(struct rpc_rqst *rqstp,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 4afdee4..768456f 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -438,8 +438,9 @@
 				      struct nfs41_server_scope **);
 extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
 extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
-extern int nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *,
-		fmode_t, const struct nfs_lockowner *);
+extern int nfs4_select_rw_stateid(struct nfs4_state *, fmode_t,
+		const struct nfs_lockowner *, nfs4_stateid *,
+		struct rpc_cred **);
 
 extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
 extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
@@ -496,12 +497,15 @@
 
 static inline void nfs4_stateid_copy(nfs4_stateid *dst, const nfs4_stateid *src)
 {
-	memcpy(dst, src, sizeof(*dst));
+	memcpy(dst->data, src->data, sizeof(dst->data));
+	dst->type = src->type;
 }
 
 static inline bool nfs4_stateid_match(const nfs4_stateid *dst, const nfs4_stateid *src)
 {
-	return memcmp(dst, src, sizeof(*dst)) == 0;
+	if (dst->type != src->type)
+		return false;
+	return memcmp(dst->data, src->data, sizeof(dst->data)) == 0;
 }
 
 static inline bool nfs4_stateid_match_other(const nfs4_stateid *dst, const nfs4_stateid *src)
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index d039051..014b0e4 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -129,6 +129,28 @@
 }
 
 #ifdef CONFIG_NFS_V4_2
+static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
+				    struct file *file_out, loff_t pos_out,
+				    size_t count, unsigned int flags)
+{
+	struct inode *in_inode = file_inode(file_in);
+	struct inode *out_inode = file_inode(file_out);
+	int ret;
+
+	if (in_inode == out_inode)
+		return -EINVAL;
+
+	/* flush any pending writes */
+	ret = nfs_sync_inode(in_inode);
+	if (ret)
+		return ret;
+	ret = nfs_sync_inode(out_inode);
+	if (ret)
+		return ret;
+
+	return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
+}
+
 static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
 {
 	loff_t ret;
@@ -243,6 +265,7 @@
 	.check_flags	= nfs_check_flags,
 	.setlease	= simple_nosetlease,
 #ifdef CONFIG_NFS_V4_2
+	.copy_file_range = nfs4_copy_file_range,
 	.llseek		= nfs4_file_llseek,
 	.fallocate	= nfs42_fallocate,
 	.clone_file_range = nfs42_clone_file_range,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 084e857..de97567 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -74,6 +74,17 @@
 #define NFS4_POLL_RETRY_MIN	(HZ/10)
 #define NFS4_POLL_RETRY_MAX	(15*HZ)
 
+/* file attributes which can be mapped to nfs attributes */
+#define NFS4_VALID_ATTRS (ATTR_MODE \
+	| ATTR_UID \
+	| ATTR_GID \
+	| ATTR_SIZE \
+	| ATTR_ATIME \
+	| ATTR_MTIME \
+	| ATTR_CTIME \
+	| ATTR_ATIME_SET \
+	| ATTR_MTIME_SET)
+
 struct nfs4_opendata;
 static int _nfs4_proc_open(struct nfs4_opendata *data);
 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
@@ -416,6 +427,7 @@
 		case -NFS4ERR_DELAY:
 			nfs_inc_server_stats(server, NFSIOS_DELAY);
 		case -NFS4ERR_GRACE:
+		case -NFS4ERR_RECALLCONFLICT:
 			exception->delay = 1;
 			return 0;
 
@@ -2558,15 +2570,20 @@
 	if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
 	    (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
 		nfs4_exclusive_attrset(opendata, sattr, &label);
-
-		nfs_fattr_init(opendata->o_res.f_attr);
-		status = nfs4_do_setattr(state->inode, cred,
-				opendata->o_res.f_attr, sattr,
-				state, label, olabel);
-		if (status == 0) {
-			nfs_setattr_update_inode(state->inode, sattr,
-					opendata->o_res.f_attr);
-			nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
+		/*
+		 * send create attributes which was not set by open
+		 * with an extra setattr.
+		 */
+		if (sattr->ia_valid & NFS4_VALID_ATTRS) {
+			nfs_fattr_init(opendata->o_res.f_attr);
+			status = nfs4_do_setattr(state->inode, cred,
+					opendata->o_res.f_attr, sattr,
+					state, label, olabel);
+			if (status == 0) {
+				nfs_setattr_update_inode(state->inode, sattr,
+						opendata->o_res.f_attr);
+				nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
+			}
 		}
 	}
 	if (opened && opendata->file_created)
@@ -2676,6 +2693,7 @@
 		.rpc_resp	= &res,
 		.rpc_cred	= cred,
         };
+	struct rpc_cred *delegation_cred = NULL;
 	unsigned long timestamp = jiffies;
 	fmode_t fmode;
 	bool truncate;
@@ -2691,7 +2709,7 @@
 	truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
 	fmode = truncate ? FMODE_WRITE : FMODE_READ;
 
-	if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
+	if (nfs4_copy_delegation_stateid(inode, fmode, &arg.stateid, &delegation_cred)) {
 		/* Use that stateid */
 	} else if (truncate && state != NULL) {
 		struct nfs_lockowner lockowner = {
@@ -2700,13 +2718,17 @@
 		};
 		if (!nfs4_valid_open_stateid(state))
 			return -EBADF;
-		if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
-				&lockowner) == -EIO)
+		if (nfs4_select_rw_stateid(state, FMODE_WRITE, &lockowner,
+				&arg.stateid, &delegation_cred) == -EIO)
 			return -EBADF;
 	} else
 		nfs4_stateid_copy(&arg.stateid, &zero_stateid);
+	if (delegation_cred)
+		msg.rpc_cred = delegation_cred;
 
 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
+
+	put_rpccred(delegation_cred);
 	if (status == 0 && state != NULL)
 		renew_lease(server, timestamp);
 	trace_nfs4_setattr(inode, &arg.stateid, status);
@@ -4285,7 +4307,7 @@
 
 	if (l_ctx != NULL)
 		lockowner = &l_ctx->lockowner;
-	return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner);
+	return nfs4_select_rw_stateid(ctx->state, fmode, lockowner, stateid, NULL);
 }
 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
 
@@ -4993,12 +5015,11 @@
 }
 
 static int
-nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen)
+nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
 {
 	struct nfs4_label ilabel, *olabel = NULL;
 	struct nfs_fattr fattr;
 	struct rpc_cred *cred;
-	struct inode *inode = d_inode(dentry);
 	int status;
 
 	if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
@@ -6054,6 +6075,7 @@
 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
 {
 	struct nfs_inode *nfsi = NFS_I(state->inode);
+	struct nfs4_state_owner *sp = state->owner;
 	unsigned char fl_flags = request->fl_flags;
 	int status = -ENOLCK;
 
@@ -6068,6 +6090,7 @@
 	status = do_vfs_lock(state->inode, request);
 	if (status < 0)
 		goto out;
+	mutex_lock(&sp->so_delegreturn_mutex);
 	down_read(&nfsi->rwsem);
 	if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
 		/* Yes: cache locks! */
@@ -6075,9 +6098,11 @@
 		request->fl_flags = fl_flags & ~FL_SLEEP;
 		status = do_vfs_lock(state->inode, request);
 		up_read(&nfsi->rwsem);
+		mutex_unlock(&sp->so_delegreturn_mutex);
 		goto out;
 	}
 	up_read(&nfsi->rwsem);
+	mutex_unlock(&sp->so_delegreturn_mutex);
 	status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
 out:
 	request->fl_flags = fl_flags;
@@ -6255,11 +6280,11 @@
 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
 
 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
-				   struct dentry *dentry, const char *key,
-				   const void *buf, size_t buflen,
-				   int flags)
+				   struct dentry *unused, struct inode *inode,
+				   const char *key, const void *buf,
+				   size_t buflen, int flags)
 {
-	return nfs4_proc_set_acl(d_inode(dentry), buf, buflen);
+	return nfs4_proc_set_acl(inode, buf, buflen);
 }
 
 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
@@ -6277,12 +6302,12 @@
 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
 
 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
-				     struct dentry *dentry, const char *key,
-				     const void *buf, size_t buflen,
-				     int flags)
+				     struct dentry *unused, struct inode *inode,
+				     const char *key, const void *buf,
+				     size_t buflen, int flags)
 {
 	if (security_ismaclabel(key))
-		return nfs4_set_security_label(dentry, buf, buflen);
+		return nfs4_set_security_label(inode, buf, buflen);
 
 	return -EOPNOTSUPP;
 }
@@ -7351,9 +7376,11 @@
  * always set csa_cachethis to FALSE because the current implementation
  * of the back channel DRC only supports caching the CB_SEQUENCE operation.
  */
-static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
+static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
+				    struct rpc_clnt *clnt)
 {
 	unsigned int max_rqst_sz, max_resp_sz;
+	unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
 
 	max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
 	max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
@@ -7371,8 +7398,8 @@
 		args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
 
 	/* Back channel attributes */
-	args->bc_attrs.max_rqst_sz = PAGE_SIZE;
-	args->bc_attrs.max_resp_sz = PAGE_SIZE;
+	args->bc_attrs.max_rqst_sz = max_bc_payload;
+	args->bc_attrs.max_resp_sz = max_bc_payload;
 	args->bc_attrs.max_resp_sz_cached = 0;
 	args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
 	args->bc_attrs.max_reqs = NFS41_BC_MAX_CALLBACKS;
@@ -7476,7 +7503,7 @@
 	};
 	int status;
 
-	nfs4_init_channel_attrs(&args);
+	nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
 	args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
 
 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
@@ -7820,40 +7847,34 @@
 	struct nfs4_layoutget *lgp = calldata;
 	struct nfs_server *server = NFS_SERVER(lgp->args.inode);
 	struct nfs4_session *session = nfs4_get_session(server);
-	int ret;
 
 	dprintk("--> %s\n", __func__);
-	/* Note the is a race here, where a CB_LAYOUTRECALL can come in
-	 * right now covering the LAYOUTGET we are about to send.
-	 * However, that is not so catastrophic, and there seems
-	 * to be no way to prevent it completely.
-	 */
-	if (nfs41_setup_sequence(session, &lgp->args.seq_args,
-				&lgp->res.seq_res, task))
-		return;
-	ret = pnfs_choose_layoutget_stateid(&lgp->args.stateid,
-					  NFS_I(lgp->args.inode)->layout,
-					  &lgp->args.range,
-					  lgp->args.ctx->state);
-	if (ret < 0)
-		rpc_exit(task, ret);
+	nfs41_setup_sequence(session, &lgp->args.seq_args,
+				&lgp->res.seq_res, task);
+	dprintk("<-- %s\n", __func__);
 }
 
 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
 {
 	struct nfs4_layoutget *lgp = calldata;
+
+	dprintk("--> %s\n", __func__);
+	nfs41_sequence_done(task, &lgp->res.seq_res);
+	dprintk("<-- %s\n", __func__);
+}
+
+static int
+nfs4_layoutget_handle_exception(struct rpc_task *task,
+		struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
+{
 	struct inode *inode = lgp->args.inode;
 	struct nfs_server *server = NFS_SERVER(inode);
 	struct pnfs_layout_hdr *lo;
-	struct nfs4_state *state = NULL;
-	unsigned long timeo, now, giveup;
+	int status = task->tk_status;
 
 	dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
 
-	if (!nfs41_sequence_done(task, &lgp->res.seq_res))
-		goto out;
-
-	switch (task->tk_status) {
+	switch (status) {
 	case 0:
 		goto out;
 
@@ -7863,57 +7884,43 @@
 	 * retry go inband.
 	 */
 	case -NFS4ERR_LAYOUTUNAVAILABLE:
-		task->tk_status = -ENODATA;
+		status = -ENODATA;
 		goto out;
 	/*
 	 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
 	 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
 	 */
 	case -NFS4ERR_BADLAYOUT:
-		goto out_overflow;
+		status = -EOVERFLOW;
+		goto out;
 	/*
 	 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
 	 * (or clients) writing to the same RAID stripe except when
 	 * the minlength argument is 0 (see RFC5661 section 18.43.3).
+	 *
+	 * Treat it like we would RECALLCONFLICT -- we retry for a little
+	 * while, and then eventually give up.
 	 */
 	case -NFS4ERR_LAYOUTTRYLATER:
-		if (lgp->args.minlength == 0)
-			goto out_overflow;
-	/*
-	 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
-	 * existing layout before getting a new one).
-	 */
-	case -NFS4ERR_RECALLCONFLICT:
-		timeo = rpc_get_timeout(task->tk_client);
-		giveup = lgp->args.timestamp + timeo;
-		now = jiffies;
-		if (time_after(giveup, now)) {
-			unsigned long delay;
-
-			/* Delay for:
-			 * - Not less then NFS4_POLL_RETRY_MIN.
-			 * - One last time a jiffie before we give up
-			 * - exponential backoff (time_now minus start_attempt)
-			 */
-			delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
-				    min((giveup - now - 1),
-					now - lgp->args.timestamp));
-
-			dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
-				__func__, delay);
-			rpc_delay(task, delay);
-			/* Do not call nfs4_async_handle_error() */
-			goto out_restart;
+		if (lgp->args.minlength == 0) {
+			status = -EOVERFLOW;
+			goto out;
 		}
-		break;
+		/* Fallthrough */
+	case -NFS4ERR_RECALLCONFLICT:
+		nfs4_handle_exception(server, -NFS4ERR_RECALLCONFLICT,
+					exception);
+		status = -ERECALLCONFLICT;
+		goto out;
 	case -NFS4ERR_EXPIRED:
 	case -NFS4ERR_BAD_STATEID:
+		exception->timeout = 0;
 		spin_lock(&inode->i_lock);
 		if (nfs4_stateid_match(&lgp->args.stateid,
 					&lgp->args.ctx->state->stateid)) {
 			spin_unlock(&inode->i_lock);
 			/* If the open stateid was bad, then recover it. */
-			state = lgp->args.ctx->state;
+			exception->state = lgp->args.ctx->state;
 			break;
 		}
 		lo = NFS_I(inode)->layout;
@@ -7926,25 +7933,21 @@
 			 * with the current stateid.
 			 */
 			set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
-			pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
+			pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);
 			spin_unlock(&inode->i_lock);
 			pnfs_free_lseg_list(&head);
 		} else
 			spin_unlock(&inode->i_lock);
-		goto out_restart;
+		status = -EAGAIN;
+		goto out;
 	}
-	if (nfs4_async_handle_error(task, server, state, &lgp->timeout) == -EAGAIN)
-		goto out_restart;
+
+	status = nfs4_handle_exception(server, status, exception);
+	if (exception->retry)
+		status = -EAGAIN;
 out:
 	dprintk("<-- %s\n", __func__);
-	return;
-out_restart:
-	task->tk_status = 0;
-	rpc_restart_call_prepare(task);
-	return;
-out_overflow:
-	task->tk_status = -EOVERFLOW;
-	goto out;
+	return status;
 }
 
 static size_t max_response_pages(struct nfs_server *server)
@@ -8013,7 +8016,7 @@
 };
 
 struct pnfs_layout_segment *
-nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
+nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
 {
 	struct inode *inode = lgp->args.inode;
 	struct nfs_server *server = NFS_SERVER(inode);
@@ -8033,6 +8036,7 @@
 		.flags = RPC_TASK_ASYNC,
 	};
 	struct pnfs_layout_segment *lseg = NULL;
+	struct nfs4_exception exception = { .timeout = *timeout };
 	int status = 0;
 
 	dprintk("--> %s\n", __func__);
@@ -8046,7 +8050,6 @@
 		return ERR_PTR(-ENOMEM);
 	}
 	lgp->args.layout.pglen = max_pages * PAGE_SIZE;
-	lgp->args.timestamp = jiffies;
 
 	lgp->res.layoutp = &lgp->args.layout;
 	lgp->res.seq_res.sr_slot = NULL;
@@ -8056,13 +8059,17 @@
 	if (IS_ERR(task))
 		return ERR_CAST(task);
 	status = nfs4_wait_for_completion_rpc_task(task);
-	if (status == 0)
-		status = task->tk_status;
+	if (status == 0) {
+		status = nfs4_layoutget_handle_exception(task, lgp, &exception);
+		*timeout = exception.timeout;
+	}
+
 	trace_nfs4_layoutget(lgp->args.ctx,
 			&lgp->args.range,
 			&lgp->res.range,
 			&lgp->res.stateid,
 			status);
+
 	/* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
 	if (status == 0 && lgp->res.layoutp->len)
 		lseg = pnfs_layout_process(lgp);
@@ -8118,7 +8125,8 @@
 
 	dprintk("--> %s\n", __func__);
 	spin_lock(&lo->plh_inode->i_lock);
-	pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
+	pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range,
+			be32_to_cpu(lrp->args.stateid.seqid));
 	pnfs_mark_layout_returned_if_empty(lo);
 	if (lrp->res.lrs_present)
 		pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
@@ -8653,6 +8661,9 @@
 static bool nfs41_match_stateid(const nfs4_stateid *s1,
 		const nfs4_stateid *s2)
 {
+	if (s1->type != s2->type)
+		return false;
+
 	if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
 		return false;
 
@@ -8793,6 +8804,7 @@
 		| NFS_CAP_STATEID_NFSV41
 		| NFS_CAP_ATOMIC_OPEN_V1
 		| NFS_CAP_ALLOCATE
+		| NFS_CAP_COPY
 		| NFS_CAP_DEALLOCATE
 		| NFS_CAP_SEEK
 		| NFS_CAP_LAYOUTSTATS
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index d854693..9679f47 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -65,7 +65,10 @@
 
 #define OPENOWNER_POOL_SIZE	8
 
-const nfs4_stateid zero_stateid;
+const nfs4_stateid zero_stateid = {
+	{ .data = { 0 } },
+	.type = NFS4_SPECIAL_STATEID_TYPE,
+};
 static DEFINE_MUTEX(nfs_clid_init_mutex);
 
 int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
@@ -985,15 +988,20 @@
  * Byte-range lock aware utility to initialize the stateid of read/write
  * requests.
  */
-int nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state,
-		fmode_t fmode, const struct nfs_lockowner *lockowner)
+int nfs4_select_rw_stateid(struct nfs4_state *state,
+		fmode_t fmode, const struct nfs_lockowner *lockowner,
+		nfs4_stateid *dst, struct rpc_cred **cred)
 {
-	int ret = nfs4_copy_lock_stateid(dst, state, lockowner);
+	int ret;
+
+	if (cred != NULL)
+		*cred = NULL;
+	ret = nfs4_copy_lock_stateid(dst, state, lockowner);
 	if (ret == -EIO)
 		/* A lost lock - don't even consider delegations */
 		goto out;
 	/* returns true if delegation stateid found and copied */
-	if (nfs4_copy_delegation_stateid(dst, state->inode, fmode)) {
+	if (nfs4_copy_delegation_stateid(state->inode, fmode, dst, cred)) {
 		ret = 0;
 		goto out;
 	}
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 2c8d05d..9c150b1 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -1520,6 +1520,8 @@
 		{ PNFS_UPDATE_LAYOUT_FOUND_CACHED, "found cached" },	\
 		{ PNFS_UPDATE_LAYOUT_RETURN, "layoutreturn" },		\
 		{ PNFS_UPDATE_LAYOUT_BLOCKED, "layouts blocked" },	\
+		{ PNFS_UPDATE_LAYOUT_INVALID_OPEN, "invalid open" },	\
+		{ PNFS_UPDATE_LAYOUT_RETRY, "retrying" },	\
 		{ PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET, "sent layoutget" })
 
 TRACE_EVENT(pnfs_update_layout,
@@ -1528,9 +1530,10 @@
 			u64 count,
 			enum pnfs_iomode iomode,
 			struct pnfs_layout_hdr *lo,
+			struct pnfs_layout_segment *lseg,
 			enum pnfs_update_layout_reason reason
 		),
-		TP_ARGS(inode, pos, count, iomode, lo, reason),
+		TP_ARGS(inode, pos, count, iomode, lo, lseg, reason),
 		TP_STRUCT__entry(
 			__field(dev_t, dev)
 			__field(u64, fileid)
@@ -1540,6 +1543,7 @@
 			__field(enum pnfs_iomode, iomode)
 			__field(int, layoutstateid_seq)
 			__field(u32, layoutstateid_hash)
+			__field(long, lseg)
 			__field(enum pnfs_update_layout_reason, reason)
 		),
 		TP_fast_assign(
@@ -1559,11 +1563,12 @@
 				__entry->layoutstateid_seq = 0;
 				__entry->layoutstateid_hash = 0;
 			}
+			__entry->lseg = (long)lseg;
 		),
 		TP_printk(
 			"fileid=%02x:%02x:%llu fhandle=0x%08x "
 			"iomode=%s pos=%llu count=%llu "
-			"layoutstateid=%d:0x%08x (%s)",
+			"layoutstateid=%d:0x%08x lseg=0x%lx (%s)",
 			MAJOR(__entry->dev), MINOR(__entry->dev),
 			(unsigned long long)__entry->fileid,
 			__entry->fhandle,
@@ -1571,6 +1576,7 @@
 			(unsigned long long)__entry->pos,
 			(unsigned long long)__entry->count,
 			__entry->layoutstateid_seq, __entry->layoutstateid_hash,
+			__entry->lseg,
 			show_pnfs_update_layout_reason(__entry->reason)
 		)
 );
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 88474a4..661e753 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -4270,6 +4270,24 @@
 	return decode_opaque_fixed(xdr, stateid, NFS4_STATEID_SIZE);
 }
 
+static int decode_open_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+	stateid->type = NFS4_OPEN_STATEID_TYPE;
+	return decode_stateid(xdr, stateid);
+}
+
+static int decode_lock_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+	stateid->type = NFS4_LOCK_STATEID_TYPE;
+	return decode_stateid(xdr, stateid);
+}
+
+static int decode_delegation_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+	stateid->type = NFS4_DELEGATION_STATEID_TYPE;
+	return decode_stateid(xdr, stateid);
+}
+
 static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
 {
 	int status;
@@ -4278,7 +4296,7 @@
 	if (status != -EIO)
 		nfs_increment_open_seqid(status, res->seqid);
 	if (!status)
-		status = decode_stateid(xdr, &res->stateid);
+		status = decode_open_stateid(xdr, &res->stateid);
 	return status;
 }
 
@@ -4937,7 +4955,7 @@
 	if (status == -EIO)
 		goto out;
 	if (status == 0) {
-		status = decode_stateid(xdr, &res->stateid);
+		status = decode_lock_stateid(xdr, &res->stateid);
 		if (unlikely(status))
 			goto out;
 	} else if (status == -NFS4ERR_DENIED)
@@ -4966,7 +4984,7 @@
 	if (status != -EIO)
 		nfs_increment_lock_seqid(status, res->seqid);
 	if (status == 0)
-		status = decode_stateid(xdr, &res->stateid);
+		status = decode_lock_stateid(xdr, &res->stateid);
 	return status;
 }
 
@@ -5016,7 +5034,7 @@
 	__be32 *p;
 	int status;
 
-	status = decode_stateid(xdr, &res->delegation);
+	status = decode_delegation_stateid(xdr, &res->delegation);
 	if (unlikely(status))
 		return status;
 	p = xdr_inline_decode(xdr, 4);
@@ -5096,7 +5114,7 @@
 	nfs_increment_open_seqid(status, res->seqid);
 	if (status)
 		return status;
-	status = decode_stateid(xdr, &res->stateid);
+	status = decode_open_stateid(xdr, &res->stateid);
 	if (unlikely(status))
 		return status;
 
@@ -5136,7 +5154,7 @@
 	if (status != -EIO)
 		nfs_increment_open_seqid(status, res->seqid);
 	if (!status)
-		status = decode_stateid(xdr, &res->stateid);
+		status = decode_open_stateid(xdr, &res->stateid);
 	return status;
 }
 
@@ -5148,7 +5166,7 @@
 	if (status != -EIO)
 		nfs_increment_open_seqid(status, res->seqid);
 	if (!status)
-		status = decode_stateid(xdr, &res->stateid);
+		status = decode_open_stateid(xdr, &res->stateid);
 	return status;
 }
 
@@ -5838,6 +5856,12 @@
 }
 
 #if defined(CONFIG_NFS_V4_1)
+static int decode_layout_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+	stateid->type = NFS4_LAYOUT_STATEID_TYPE;
+	return decode_stateid(xdr, stateid);
+}
+
 static int decode_getdeviceinfo(struct xdr_stream *xdr,
 				struct nfs4_getdeviceinfo_res *res)
 {
@@ -5919,7 +5943,7 @@
 	if (unlikely(!p))
 		goto out_overflow;
 	res->return_on_close = be32_to_cpup(p);
-	decode_stateid(xdr, &res->stateid);
+	decode_layout_stateid(xdr, &res->stateid);
 	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(!p))
 		goto out_overflow;
@@ -5985,7 +6009,7 @@
 		goto out_overflow;
 	res->lrs_present = be32_to_cpup(p);
 	if (res->lrs_present)
-		status = decode_stateid(xdr, &res->stateid);
+		status = decode_layout_stateid(xdr, &res->stateid);
 	return status;
 out_overflow:
 	print_overflow_msg(__func__, xdr);
@@ -7515,6 +7539,7 @@
 	PROC(DEALLOCATE,	enc_deallocate,		dec_deallocate),
 	PROC(LAYOUTSTATS,	enc_layoutstats,	dec_layoutstats),
 	PROC(CLONE,		enc_clone,		dec_clone),
+	PROC(COPY,		enc_copy,		dec_copy),
 #endif /* CONFIG_NFS_V4_2 */
 };
 
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 1f6db42..174dd4c 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -341,8 +341,10 @@
 	 * long write-back delay. This will be adjusted in
 	 * update_nfs_request below if the region is not locked. */
 	req->wb_page    = page;
-	req->wb_index	= page_file_index(page);
-	get_page(page);
+	if (page) {
+		req->wb_index = page_file_index(page);
+		get_page(page);
+	}
 	req->wb_offset  = offset;
 	req->wb_pgbase	= offset;
 	req->wb_bytes   = count;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 89a5ef4..0c7e0d4 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -270,7 +270,7 @@
 	};
 
 	set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
-	return pnfs_mark_matching_lsegs_invalid(lo, lseg_list, &range);
+	return pnfs_mark_matching_lsegs_invalid(lo, lseg_list, &range, 0);
 }
 
 static int
@@ -308,7 +308,7 @@
 
 	spin_lock(&inode->i_lock);
 	pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
-	pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
+	pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0);
 	spin_unlock(&inode->i_lock);
 	pnfs_free_lseg_list(&head);
 	dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
@@ -522,13 +522,35 @@
 	return rv;
 }
 
-/* Returns count of number of matching invalid lsegs remaining in list
- * after call.
+/*
+ * Compare 2 layout stateid sequence ids, to see which is newer,
+ * taking into account wraparound issues.
+ */
+static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
+{
+	return (s32)(s1 - s2) > 0;
+}
+
+/**
+ * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
+ * @lo: layout header containing the lsegs
+ * @tmp_list: list head where doomed lsegs should go
+ * @recall_range: optional recall range argument to match (may be NULL)
+ * @seq: only invalidate lsegs obtained prior to this sequence (may be 0)
+ *
+ * Walk the list of lsegs in the layout header, and tear down any that should
+ * be destroyed. If "recall_range" is specified then the segment must match
+ * that range. If "seq" is non-zero, then only match segments that were handed
+ * out at or before that sequence.
+ *
+ * Returns number of matching invalid lsegs remaining in list after scanning
+ * it and purging them.
  */
 int
 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
 			    struct list_head *tmp_list,
-			    const struct pnfs_layout_range *recall_range)
+			    const struct pnfs_layout_range *recall_range,
+			    u32 seq)
 {
 	struct pnfs_layout_segment *lseg, *next;
 	int remaining = 0;
@@ -540,10 +562,12 @@
 	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
 		if (!recall_range ||
 		    should_free_lseg(&lseg->pls_range, recall_range)) {
-			dprintk("%s: freeing lseg %p iomode %d "
+			if (seq && pnfs_seqid_is_newer(lseg->pls_seq, seq))
+				continue;
+			dprintk("%s: freeing lseg %p iomode %d seq %u"
 				"offset %llu length %llu\n", __func__,
-				lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
-				lseg->pls_range.length);
+				lseg, lseg->pls_range.iomode, lseg->pls_seq,
+				lseg->pls_range.offset, lseg->pls_range.length);
 			if (!mark_lseg_invalid(lseg, tmp_list))
 				remaining++;
 		}
@@ -730,15 +754,6 @@
 	pnfs_destroy_layouts_byclid(clp, false);
 }
 
-/*
- * Compare 2 layout stateid sequence ids, to see which is newer,
- * taking into account wraparound issues.
- */
-static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
-{
-	return (s32)(s1 - s2) > 0;
-}
-
 /* update lo->plh_stateid with new if is more recent */
 void
 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
@@ -781,50 +796,22 @@
 		test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
 }
 
-int
-pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
-			      const struct pnfs_layout_range *range,
-			      struct nfs4_state *open_state)
-{
-	int status = 0;
-
-	dprintk("--> %s\n", __func__);
-	spin_lock(&lo->plh_inode->i_lock);
-	if (pnfs_layoutgets_blocked(lo)) {
-		status = -EAGAIN;
-	} else if (!nfs4_valid_open_stateid(open_state)) {
-		status = -EBADF;
-	} else if (list_empty(&lo->plh_segs) ||
-		   test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
-		int seq;
-
-		do {
-			seq = read_seqbegin(&open_state->seqlock);
-			nfs4_stateid_copy(dst, &open_state->stateid);
-		} while (read_seqretry(&open_state->seqlock, seq));
-	} else
-		nfs4_stateid_copy(dst, &lo->plh_stateid);
-	spin_unlock(&lo->plh_inode->i_lock);
-	dprintk("<-- %s\n", __func__);
-	return status;
-}
-
 /*
-* Get layout from server.
-*    for now, assume that whole file layouts are requested.
-*    arg->offset: 0
-*    arg->length: all ones
-*/
+ * Get layout from server.
+ *    for now, assume that whole file layouts are requested.
+ *    arg->offset: 0
+ *    arg->length: all ones
+ */
 static struct pnfs_layout_segment *
 send_layoutget(struct pnfs_layout_hdr *lo,
 	   struct nfs_open_context *ctx,
+	   nfs4_stateid *stateid,
 	   const struct pnfs_layout_range *range,
-	   gfp_t gfp_flags)
+	   long *timeout, gfp_t gfp_flags)
 {
 	struct inode *ino = lo->plh_inode;
 	struct nfs_server *server = NFS_SERVER(ino);
 	struct nfs4_layoutget *lgp;
-	struct pnfs_layout_segment *lseg;
 	loff_t i_size;
 
 	dprintk("--> %s\n", __func__);
@@ -834,40 +821,31 @@
 	 * store in lseg. If we race with a concurrent seqid morphing
 	 * op, then re-send the LAYOUTGET.
 	 */
-	do {
-		lgp = kzalloc(sizeof(*lgp), gfp_flags);
-		if (lgp == NULL)
-			return NULL;
+	lgp = kzalloc(sizeof(*lgp), gfp_flags);
+	if (lgp == NULL)
+		return ERR_PTR(-ENOMEM);
 
-		i_size = i_size_read(ino);
+	i_size = i_size_read(ino);
 
-		lgp->args.minlength = PAGE_SIZE;
-		if (lgp->args.minlength > range->length)
-			lgp->args.minlength = range->length;
-		if (range->iomode == IOMODE_READ) {
-			if (range->offset >= i_size)
-				lgp->args.minlength = 0;
-			else if (i_size - range->offset < lgp->args.minlength)
-				lgp->args.minlength = i_size - range->offset;
-		}
-		lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
-		pnfs_copy_range(&lgp->args.range, range);
-		lgp->args.type = server->pnfs_curr_ld->id;
-		lgp->args.inode = ino;
-		lgp->args.ctx = get_nfs_open_context(ctx);
-		lgp->gfp_flags = gfp_flags;
-		lgp->cred = lo->plh_lc_cred;
+	lgp->args.minlength = PAGE_SIZE;
+	if (lgp->args.minlength > range->length)
+		lgp->args.minlength = range->length;
+	if (range->iomode == IOMODE_READ) {
+		if (range->offset >= i_size)
+			lgp->args.minlength = 0;
+		else if (i_size - range->offset < lgp->args.minlength)
+			lgp->args.minlength = i_size - range->offset;
+	}
+	lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
+	pnfs_copy_range(&lgp->args.range, range);
+	lgp->args.type = server->pnfs_curr_ld->id;
+	lgp->args.inode = ino;
+	lgp->args.ctx = get_nfs_open_context(ctx);
+	nfs4_stateid_copy(&lgp->args.stateid, stateid);
+	lgp->gfp_flags = gfp_flags;
+	lgp->cred = lo->plh_lc_cred;
 
-		lseg = nfs4_proc_layoutget(lgp, gfp_flags);
-	} while (lseg == ERR_PTR(-EAGAIN));
-
-	if (IS_ERR(lseg) && !nfs_error_is_fatal(PTR_ERR(lseg)))
-		lseg = NULL;
-	else
-		pnfs_layout_clear_fail_bit(lo,
-				pnfs_iomode_to_fail_bit(range->iomode));
-
-	return lseg;
+	return nfs4_proc_layoutget(lgp, timeout, gfp_flags);
 }
 
 static void pnfs_clear_layoutcommit(struct inode *inode,
@@ -899,6 +877,7 @@
 	if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
 		return false;
 	lo->plh_return_iomode = 0;
+	lo->plh_return_seq = 0;
 	pnfs_get_layout_hdr(lo);
 	clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
 	return true;
@@ -969,6 +948,7 @@
 		bool send;
 
 		nfs4_stateid_copy(&stateid, &lo->plh_stateid);
+		stateid.seqid = cpu_to_be32(lo->plh_return_seq);
 		iomode = lo->plh_return_iomode;
 		send = pnfs_prepare_layoutreturn(lo);
 		spin_unlock(&inode->i_lock);
@@ -1012,7 +992,7 @@
 	pnfs_get_layout_hdr(lo);
 	empty = list_empty(&lo->plh_segs);
 	pnfs_clear_layoutcommit(ino, &tmp_list);
-	pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
+	pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
 
 	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
 		struct pnfs_layout_range range = {
@@ -1341,23 +1321,28 @@
 
 /*
  * iomode matching rules:
- * iomode	lseg	match
- * -----	-----	-----
- * ANY		READ	true
- * ANY		RW	true
- * RW		READ	false
- * RW		RW	true
- * READ		READ	true
- * READ		RW	true
+ * iomode	lseg	strict match
+ *                      iomode
+ * -----	-----	------ -----
+ * ANY		READ	N/A    true
+ * ANY		RW	N/A    true
+ * RW		READ	N/A    false
+ * RW		RW	N/A    true
+ * READ		READ	N/A    true
+ * READ		RW	true   false
+ * READ		RW	false  true
  */
 static bool
 pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
-		 const struct pnfs_layout_range *range)
+		 const struct pnfs_layout_range *range,
+		 bool strict_iomode)
 {
 	struct pnfs_layout_range range1;
 
 	if ((range->iomode == IOMODE_RW &&
 	     ls_range->iomode != IOMODE_RW) ||
+	    (range->iomode != ls_range->iomode &&
+	     strict_iomode == true) ||
 	    !pnfs_lseg_range_intersecting(ls_range, range))
 		return 0;
 
@@ -1372,7 +1357,8 @@
  */
 static struct pnfs_layout_segment *
 pnfs_find_lseg(struct pnfs_layout_hdr *lo,
-		struct pnfs_layout_range *range)
+		struct pnfs_layout_range *range,
+		bool strict_iomode)
 {
 	struct pnfs_layout_segment *lseg, *ret = NULL;
 
@@ -1381,7 +1367,8 @@
 	list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
 		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
 		    !test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
-		    pnfs_lseg_range_match(&lseg->pls_range, range)) {
+		    pnfs_lseg_range_match(&lseg->pls_range, range,
+					  strict_iomode)) {
 			ret = pnfs_get_lseg(lseg);
 			break;
 		}
@@ -1498,6 +1485,7 @@
 		   loff_t pos,
 		   u64 count,
 		   enum pnfs_iomode iomode,
+		   bool strict_iomode,
 		   gfp_t gfp_flags)
 {
 	struct pnfs_layout_range arg = {
@@ -1505,27 +1493,30 @@
 		.offset = pos,
 		.length = count,
 	};
-	unsigned pg_offset;
+	unsigned pg_offset, seq;
 	struct nfs_server *server = NFS_SERVER(ino);
 	struct nfs_client *clp = server->nfs_client;
-	struct pnfs_layout_hdr *lo;
+	struct pnfs_layout_hdr *lo = NULL;
 	struct pnfs_layout_segment *lseg = NULL;
+	nfs4_stateid stateid;
+	long timeout = 0;
+	unsigned long giveup = jiffies + rpc_get_timeout(server->client);
 	bool first;
 
 	if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
-		trace_pnfs_update_layout(ino, pos, count, iomode, NULL,
+		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
 				 PNFS_UPDATE_LAYOUT_NO_PNFS);
 		goto out;
 	}
 
 	if (iomode == IOMODE_READ && i_size_read(ino) == 0) {
-		trace_pnfs_update_layout(ino, pos, count, iomode, NULL,
+		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
 				 PNFS_UPDATE_LAYOUT_RD_ZEROLEN);
 		goto out;
 	}
 
 	if (pnfs_within_mdsthreshold(ctx, ino, iomode)) {
-		trace_pnfs_update_layout(ino, pos, count, iomode, NULL,
+		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
 				 PNFS_UPDATE_LAYOUT_MDSTHRESH);
 		goto out;
 	}
@@ -1536,14 +1527,14 @@
 	lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
 	if (lo == NULL) {
 		spin_unlock(&ino->i_lock);
-		trace_pnfs_update_layout(ino, pos, count, iomode, NULL,
+		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
 				 PNFS_UPDATE_LAYOUT_NOMEM);
 		goto out;
 	}
 
 	/* Do we even need to bother with this? */
 	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
-		trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
 				 PNFS_UPDATE_LAYOUT_BULK_RECALL);
 		dprintk("%s matches recall, use MDS\n", __func__);
 		goto out_unlock;
@@ -1551,14 +1542,34 @@
 
 	/* if LAYOUTGET already failed once we don't try again */
 	if (pnfs_layout_io_test_failed(lo, iomode)) {
-		trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
 				 PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
 		goto out_unlock;
 	}
 
-	first = list_empty(&lo->plh_segs);
-	if (first) {
-		/* The first layoutget for the file. Need to serialize per
+	lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
+	if (lseg) {
+		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
+				PNFS_UPDATE_LAYOUT_FOUND_CACHED);
+		goto out_unlock;
+	}
+
+	if (!nfs4_valid_open_stateid(ctx->state)) {
+		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
+				PNFS_UPDATE_LAYOUT_INVALID_OPEN);
+		goto out_unlock;
+	}
+
+	/*
+	 * Choose a stateid for the LAYOUTGET. If we don't have a layout
+	 * stateid, or it has been invalidated, then we must use the open
+	 * stateid.
+	 */
+	if (lo->plh_stateid.seqid == 0 ||
+	    test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
+
+		/*
+		 * The first layoutget for the file. Need to serialize per
 		 * RFC 5661 Errata 3208.
 		 */
 		if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
@@ -1567,18 +1578,17 @@
 			wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
 				    TASK_UNINTERRUPTIBLE);
 			pnfs_put_layout_hdr(lo);
+			dprintk("%s retrying\n", __func__);
 			goto lookup_again;
 		}
+
+		first = true;
+		do {
+			seq = read_seqbegin(&ctx->state->seqlock);
+			nfs4_stateid_copy(&stateid, &ctx->state->stateid);
+		} while (read_seqretry(&ctx->state->seqlock, seq));
 	} else {
-		/* Check to see if the layout for the given range
-		 * already exists
-		 */
-		lseg = pnfs_find_lseg(lo, &arg);
-		if (lseg) {
-			trace_pnfs_update_layout(ino, pos, count, iomode, lo,
-					PNFS_UPDATE_LAYOUT_FOUND_CACHED);
-			goto out_unlock;
-		}
+		nfs4_stateid_copy(&stateid, &lo->plh_stateid);
 	}
 
 	/*
@@ -1593,15 +1603,17 @@
 				pnfs_clear_first_layoutget(lo);
 			pnfs_put_layout_hdr(lo);
 			dprintk("%s retrying\n", __func__);
+			trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+					lseg, PNFS_UPDATE_LAYOUT_RETRY);
 			goto lookup_again;
 		}
-		trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
 				PNFS_UPDATE_LAYOUT_RETURN);
 		goto out_put_layout_hdr;
 	}
 
 	if (pnfs_layoutgets_blocked(lo)) {
-		trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
 				PNFS_UPDATE_LAYOUT_BLOCKED);
 		goto out_unlock;
 	}
@@ -1626,10 +1638,36 @@
 	if (arg.length != NFS4_MAX_UINT64)
 		arg.length = PAGE_ALIGN(arg.length);
 
-	lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
-	atomic_dec(&lo->plh_outstanding);
-	trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+	lseg = send_layoutget(lo, ctx, &stateid, &arg, &timeout, gfp_flags);
+	trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
 				 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
+	if (IS_ERR(lseg)) {
+		switch(PTR_ERR(lseg)) {
+		case -ERECALLCONFLICT:
+			if (time_after(jiffies, giveup))
+				lseg = NULL;
+			/* Fallthrough */
+		case -EAGAIN:
+			pnfs_put_layout_hdr(lo);
+			if (first)
+				pnfs_clear_first_layoutget(lo);
+			if (lseg) {
+				trace_pnfs_update_layout(ino, pos, count,
+					iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
+				goto lookup_again;
+			}
+			/* Fallthrough */
+		default:
+			if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
+				pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
+				lseg = NULL;
+			}
+		}
+	} else {
+		pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
+	}
+
+	atomic_dec(&lo->plh_outstanding);
 out_put_layout_hdr:
 	if (first)
 		pnfs_clear_first_layoutget(lo);
@@ -1678,38 +1716,36 @@
 	struct pnfs_layout_segment *lseg;
 	struct inode *ino = lo->plh_inode;
 	LIST_HEAD(free_me);
-	int status = -EINVAL;
 
 	if (!pnfs_sanity_check_layout_range(&res->range))
-		goto out;
+		return ERR_PTR(-EINVAL);
 
 	/* Inject layout blob into I/O device driver */
 	lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
-	if (!lseg || IS_ERR(lseg)) {
+	if (IS_ERR_OR_NULL(lseg)) {
 		if (!lseg)
-			status = -ENOMEM;
-		else
-			status = PTR_ERR(lseg);
-		dprintk("%s: Could not allocate layout: error %d\n",
-		       __func__, status);
-		goto out;
+			lseg = ERR_PTR(-ENOMEM);
+
+		dprintk("%s: Could not allocate layout: error %ld\n",
+		       __func__, PTR_ERR(lseg));
+		return lseg;
 	}
 
 	init_lseg(lo, lseg);
 	lseg->pls_range = res->range;
+	lseg->pls_seq = be32_to_cpu(res->stateid.seqid);
 
 	spin_lock(&ino->i_lock);
 	if (pnfs_layoutgets_blocked(lo)) {
 		dprintk("%s forget reply due to state\n", __func__);
-		goto out_forget_reply;
+		goto out_forget;
 	}
 
 	if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
 		/* existing state ID, make sure the sequence number matches. */
 		if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
 			dprintk("%s forget reply due to sequence\n", __func__);
-			status = -EAGAIN;
-			goto out_forget_reply;
+			goto out_forget;
 		}
 		pnfs_set_layout_stateid(lo, &res->stateid, false);
 	} else {
@@ -1718,7 +1754,7 @@
 		 * inode invalid, and don't bother validating the stateid
 		 * sequence number.
 		 */
-		pnfs_mark_matching_lsegs_invalid(lo, &free_me, NULL);
+		pnfs_mark_matching_lsegs_invalid(lo, &free_me, NULL, 0);
 
 		nfs4_stateid_copy(&lo->plh_stateid, &res->stateid);
 		lo->plh_barrier = be32_to_cpu(res->stateid.seqid);
@@ -1735,18 +1771,17 @@
 	spin_unlock(&ino->i_lock);
 	pnfs_free_lseg_list(&free_me);
 	return lseg;
-out:
-	return ERR_PTR(status);
 
-out_forget_reply:
+out_forget:
 	spin_unlock(&ino->i_lock);
 	lseg->pls_layout = lo;
 	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
-	goto out;
+	return ERR_PTR(-EAGAIN);
 }
 
 static void
-pnfs_set_plh_return_iomode(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode)
+pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
+			 u32 seq)
 {
 	if (lo->plh_return_iomode == iomode)
 		return;
@@ -1754,6 +1789,8 @@
 		iomode = IOMODE_ANY;
 	lo->plh_return_iomode = iomode;
 	set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
+	if (!lo->plh_return_seq || pnfs_seqid_is_newer(seq, lo->plh_return_seq))
+		lo->plh_return_seq = seq;
 }
 
 /**
@@ -1769,7 +1806,8 @@
 int
 pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
 				struct list_head *tmp_list,
-				const struct pnfs_layout_range *return_range)
+				const struct pnfs_layout_range *return_range,
+				u32 seq)
 {
 	struct pnfs_layout_segment *lseg, *next;
 	int remaining = 0;
@@ -1792,8 +1830,11 @@
 				continue;
 			remaining++;
 			set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
-			pnfs_set_plh_return_iomode(lo, return_range->iomode);
 		}
+
+	if (remaining)
+		pnfs_set_plh_return_info(lo, return_range->iomode, seq);
+
 	return remaining;
 }
 
@@ -1810,13 +1851,14 @@
 	bool return_now = false;
 
 	spin_lock(&inode->i_lock);
-	pnfs_set_plh_return_iomode(lo, range.iomode);
+	pnfs_set_plh_return_info(lo, range.iomode, lseg->pls_seq);
 	/*
 	 * mark all matching lsegs so that we are sure to have no live
 	 * segments at hand when sending layoutreturn. See pnfs_put_lseg()
 	 * for how it works.
 	 */
-	if (!pnfs_mark_matching_lsegs_return(lo, &free_me, &range)) {
+	if (!pnfs_mark_matching_lsegs_return(lo, &free_me,
+						&range, lseg->pls_seq)) {
 		nfs4_stateid stateid;
 		enum pnfs_iomode iomode = lo->plh_return_iomode;
 
@@ -1849,6 +1891,7 @@
 						   req_offset(req),
 						   rd_size,
 						   IOMODE_READ,
+						   false,
 						   GFP_KERNEL);
 		if (IS_ERR(pgio->pg_lseg)) {
 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
@@ -1873,6 +1916,7 @@
 						   req_offset(req),
 						   wb_size,
 						   IOMODE_RW,
+						   false,
 						   GFP_NOFS);
 		if (IS_ERR(pgio->pg_lseg)) {
 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
@@ -2143,12 +2187,15 @@
 }
 
 /* Resend all requests through pnfs. */
-int pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
+void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
 {
 	struct nfs_pageio_descriptor pgio;
 
-	nfs_pageio_init_read(&pgio, hdr->inode, false, hdr->completion_ops);
-	return nfs_pageio_resend(&pgio, hdr);
+	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+		nfs_pageio_init_read(&pgio, hdr->inode, false,
+					hdr->completion_ops);
+		hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
+	}
 }
 EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);
 
@@ -2158,12 +2205,11 @@
 	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
 	struct pnfs_layout_segment *lseg = desc->pg_lseg;
 	enum pnfs_try_status trypnfs;
-	int err = 0;
 
 	trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
 	if (trypnfs == PNFS_TRY_AGAIN)
-		err = pnfs_read_resend_pnfs(hdr);
-	if (trypnfs == PNFS_NOT_ATTEMPTED || err)
+		pnfs_read_resend_pnfs(hdr);
+	if (trypnfs == PNFS_NOT_ATTEMPTED || hdr->task.tk_status)
 		pnfs_read_through_mds(desc, hdr);
 }
 
@@ -2405,7 +2451,7 @@
 	spin_lock(&inode->i_lock);
 	if (!NFS_I(inode)->layout) {
 		spin_unlock(&inode->i_lock);
-		goto out;
+		goto out_clear_layoutstats;
 	}
 	hdr = NFS_I(inode)->layout;
 	pnfs_get_layout_hdr(hdr);
@@ -2434,6 +2480,7 @@
 	kfree(data);
 out_put:
 	pnfs_put_layout_hdr(hdr);
+out_clear_layoutstats:
 	smp_mb__before_atomic();
 	clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
 	smp_mb__after_atomic();
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 1ac1db5..b21bd0b 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -64,6 +64,7 @@
 	struct list_head pls_lc_list;
 	struct pnfs_layout_range pls_range;
 	atomic_t pls_refcount;
+	u32 pls_seq;
 	unsigned long pls_flags;
 	struct pnfs_layout_hdr *pls_layout;
 	struct work_struct pls_work;
@@ -194,6 +195,7 @@
 	unsigned long		plh_flags;
 	nfs4_stateid		plh_stateid;
 	u32			plh_barrier; /* ignore lower seqids */
+	u32			plh_return_seq;
 	enum pnfs_iomode	plh_return_iomode;
 	loff_t			plh_lwb; /* last write byte for layoutcommit */
 	struct rpc_cred		*plh_lc_cred; /* layoutcommit cred */
@@ -226,7 +228,7 @@
 extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
 				   struct pnfs_device *dev,
 				   struct rpc_cred *cred);
-extern struct pnfs_layout_segment* nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
+extern struct pnfs_layout_segment* nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags);
 extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync);
 
 /* pnfs.c */
@@ -258,16 +260,14 @@
 void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
 			     const nfs4_stateid *new,
 			     bool update_barrier);
-int pnfs_choose_layoutget_stateid(nfs4_stateid *dst,
-				  struct pnfs_layout_hdr *lo,
-				  const struct pnfs_layout_range *range,
-				  struct nfs4_state *open_state);
 int pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
 				struct list_head *tmp_list,
-				const struct pnfs_layout_range *recall_range);
+				const struct pnfs_layout_range *recall_range,
+				u32 seq);
 int pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
 				struct list_head *tmp_list,
-				const struct pnfs_layout_range *recall_range);
+				const struct pnfs_layout_range *recall_range,
+				u32 seq);
 bool pnfs_roc(struct inode *ino);
 void pnfs_roc_release(struct inode *ino);
 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
@@ -282,12 +282,13 @@
 int pnfs_commit_and_return_layout(struct inode *);
 void pnfs_ld_write_done(struct nfs_pgio_header *);
 void pnfs_ld_read_done(struct nfs_pgio_header *);
-int pnfs_read_resend_pnfs(struct nfs_pgio_header *);
+void pnfs_read_resend_pnfs(struct nfs_pgio_header *);
 struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
 					       struct nfs_open_context *ctx,
 					       loff_t pos,
 					       u64 count,
 					       enum pnfs_iomode iomode,
+					       bool strict_iomode,
 					       gfp_t gfp_flags);
 void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo);
 
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 4aaed89..0dfc476 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -61,7 +61,7 @@
 
 /* The generic layer is about to remove the req from the commit list.
  * If this will make the bucket empty, it will need to put the lseg reference.
- * Note this must be called holding the inode (/cinfo) lock
+ * Note this must be called holding i_lock
  */
 void
 pnfs_generic_clear_request_commit(struct nfs_page *req,
@@ -98,7 +98,7 @@
 		if (!nfs_lock_request(req))
 			continue;
 		kref_get(&req->wb_kref);
-		if (cond_resched_lock(cinfo->lock))
+		if (cond_resched_lock(&cinfo->inode->i_lock))
 			list_safe_reset_next(req, tmp, wb_list);
 		nfs_request_remove_commit_list(req, cinfo);
 		clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
@@ -119,7 +119,7 @@
 	struct list_head *dst = &bucket->committing;
 	int ret;
 
-	lockdep_assert_held(cinfo->lock);
+	lockdep_assert_held(&cinfo->inode->i_lock);
 	ret = pnfs_generic_transfer_commit_list(src, dst, cinfo, max);
 	if (ret) {
 		cinfo->ds->nwritten -= ret;
@@ -142,7 +142,7 @@
 {
 	int i, rv = 0, cnt;
 
-	lockdep_assert_held(cinfo->lock);
+	lockdep_assert_held(&cinfo->inode->i_lock);
 	for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
 		cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i],
 						       cinfo, max);
@@ -161,16 +161,16 @@
 	struct pnfs_layout_segment *freeme;
 	int i;
 
-	lockdep_assert_held(cinfo->lock);
+	lockdep_assert_held(&cinfo->inode->i_lock);
 restart:
 	for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
 		if (pnfs_generic_transfer_commit_list(&b->written, dst,
 						      cinfo, 0)) {
 			freeme = b->wlseg;
 			b->wlseg = NULL;
-			spin_unlock(cinfo->lock);
+			spin_unlock(&cinfo->inode->i_lock);
 			pnfs_put_lseg(freeme);
-			spin_lock(cinfo->lock);
+			spin_lock(&cinfo->inode->i_lock);
 			goto restart;
 		}
 	}
@@ -186,7 +186,7 @@
 	LIST_HEAD(pages);
 	int i;
 
-	spin_lock(cinfo->lock);
+	spin_lock(&cinfo->inode->i_lock);
 	for (i = idx; i < fl_cinfo->nbuckets; i++) {
 		bucket = &fl_cinfo->buckets[i];
 		if (list_empty(&bucket->committing))
@@ -194,12 +194,12 @@
 		freeme = bucket->clseg;
 		bucket->clseg = NULL;
 		list_splice_init(&bucket->committing, &pages);
-		spin_unlock(cinfo->lock);
+		spin_unlock(&cinfo->inode->i_lock);
 		nfs_retry_commit(&pages, freeme, cinfo, i);
 		pnfs_put_lseg(freeme);
-		spin_lock(cinfo->lock);
+		spin_lock(&cinfo->inode->i_lock);
 	}
-	spin_unlock(cinfo->lock);
+	spin_unlock(&cinfo->inode->i_lock);
 }
 
 static unsigned int
@@ -238,14 +238,31 @@
 	struct pnfs_commit_bucket *bucket;
 
 	bucket = &cinfo->ds->buckets[data->ds_commit_index];
-	spin_lock(cinfo->lock);
+	spin_lock(&cinfo->inode->i_lock);
 	list_splice_init(&bucket->committing, pages);
 	data->lseg = bucket->clseg;
 	bucket->clseg = NULL;
-	spin_unlock(cinfo->lock);
+	spin_unlock(&cinfo->inode->i_lock);
 
 }
 
+/* Helper function for pnfs_generic_commit_pagelist to catch an empty
+ * page list. This can happen when two commits race. */
+static bool
+pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
+					  struct nfs_commit_data *data,
+					  struct nfs_commit_info *cinfo)
+{
+	if (list_empty(pages)) {
+		if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
+			wake_up_atomic_t(&cinfo->mds->rpcs_out);
+		nfs_commitdata_release(data);
+		return true;
+	}
+
+	return false;
+}
+
 /* This follows nfs_commit_list pretty closely */
 int
 pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
@@ -280,6 +297,11 @@
 	list_for_each_entry_safe(data, tmp, &list, pages) {
 		list_del_init(&data->pages);
 		if (data->ds_commit_index < 0) {
+			/* another commit raced with us */
+			if (pnfs_generic_commit_cancel_empty_pagelist(mds_pages,
+				data, cinfo))
+				continue;
+
 			nfs_init_commit(data, mds_pages, NULL, cinfo);
 			nfs_initiate_commit(NFS_CLIENT(inode), data,
 					    NFS_PROTO(data->inode),
@@ -288,6 +310,12 @@
 			LIST_HEAD(pages);
 
 			pnfs_fetch_commit_bucket_list(&pages, data, cinfo);
+
+			/* another commit raced with us */
+			if (pnfs_generic_commit_cancel_empty_pagelist(&pages,
+				data, cinfo))
+				continue;
+
 			nfs_init_commit(data, &pages, data->lseg, cinfo);
 			initiate_commit(data, how);
 		}
@@ -874,12 +902,12 @@
 	struct list_head *list;
 	struct pnfs_commit_bucket *buckets;
 
-	spin_lock(cinfo->lock);
+	spin_lock(&cinfo->inode->i_lock);
 	buckets = cinfo->ds->buckets;
 	list = &buckets[ds_commit_idx].written;
 	if (list_empty(list)) {
 		if (!pnfs_is_valid_lseg(lseg)) {
-			spin_unlock(cinfo->lock);
+			spin_unlock(&cinfo->inode->i_lock);
 			cinfo->completion_ops->resched_write(cinfo, req);
 			return;
 		}
@@ -896,7 +924,7 @@
 	cinfo->ds->nwritten++;
 
 	nfs_request_add_commit_list_locked(req, list, cinfo);
-	spin_unlock(cinfo->lock);
+	spin_unlock(&cinfo->inode->i_lock);
 	nfs_mark_page_unstable(req->wb_page, cinfo);
 }
 EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index f126828..2137e02 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -191,6 +191,7 @@
 
 enum {
 	Opt_xprt_udp, Opt_xprt_udp6, Opt_xprt_tcp, Opt_xprt_tcp6, Opt_xprt_rdma,
+	Opt_xprt_rdma6,
 
 	Opt_xprt_err
 };
@@ -201,6 +202,7 @@
 	{ Opt_xprt_tcp, "tcp" },
 	{ Opt_xprt_tcp6, "tcp6" },
 	{ Opt_xprt_rdma, "rdma" },
+	{ Opt_xprt_rdma6, "rdma6" },
 
 	{ Opt_xprt_err, NULL }
 };
@@ -1456,6 +1458,8 @@
 				mnt->flags |= NFS_MOUNT_TCP;
 				mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
 				break;
+			case Opt_xprt_rdma6:
+				protofamily = AF_INET6;
 			case Opt_xprt_rdma:
 				/* vector side protocols to TCP */
 				mnt->flags |= NFS_MOUNT_TCP;
@@ -2408,6 +2412,11 @@
 				     struct nfs_server *server2)
 {
 	struct sockaddr *sap1, *sap2;
+	struct rpc_xprt *xprt1 = server1->client->cl_xprt;
+	struct rpc_xprt *xprt2 = server2->client->cl_xprt;
+
+	if (!net_eq(xprt1->xprt_net, xprt2->xprt_net))
+		return 0;
 
 	sap1 = (struct sockaddr *)&server1->nfs_client->cl_addr;
 	sap2 = (struct sockaddr *)&server2->nfs_client->cl_addr;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5f4fd53..e1c74d3 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -245,8 +245,7 @@
 static int wb_priority(struct writeback_control *wbc)
 {
 	int ret = 0;
-	if (wbc->for_reclaim)
-		return FLUSH_HIGHPRI | FLUSH_COND_STABLE;
+
 	if (wbc->sync_mode == WB_SYNC_ALL)
 		ret = FLUSH_COND_STABLE;
 	return ret;
@@ -737,7 +736,7 @@
 		head = req->wb_head;
 
 		spin_lock(&inode->i_lock);
-		if (likely(!PageSwapCache(head->wb_page))) {
+		if (likely(head->wb_page && !PageSwapCache(head->wb_page))) {
 			set_page_private(head->wb_page, 0);
 			ClearPagePrivate(head->wb_page);
 			smp_mb__after_atomic();
@@ -759,7 +758,8 @@
 static void
 nfs_mark_request_dirty(struct nfs_page *req)
 {
-	__set_page_dirty_nobuffers(req->wb_page);
+	if (req->wb_page)
+		__set_page_dirty_nobuffers(req->wb_page);
 }
 
 /*
@@ -804,7 +804,7 @@
  * number of outstanding requests requiring a commit as well as
  * the MM page stats.
  *
- * The caller must hold the cinfo->lock, and the nfs_page lock.
+ * The caller must hold cinfo->inode->i_lock, and the nfs_page lock.
  */
 void
 nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
@@ -832,10 +832,11 @@
 void
 nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
 {
-	spin_lock(cinfo->lock);
+	spin_lock(&cinfo->inode->i_lock);
 	nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo);
-	spin_unlock(cinfo->lock);
-	nfs_mark_page_unstable(req->wb_page, cinfo);
+	spin_unlock(&cinfo->inode->i_lock);
+	if (req->wb_page)
+		nfs_mark_page_unstable(req->wb_page, cinfo);
 }
 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
 
@@ -864,7 +865,7 @@
 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
 				      struct inode *inode)
 {
-	cinfo->lock = &inode->i_lock;
+	cinfo->inode = inode;
 	cinfo->mds = &NFS_I(inode)->commit_info;
 	cinfo->ds = pnfs_get_ds_info(inode);
 	cinfo->dreq = NULL;
@@ -967,7 +968,7 @@
 	return cinfo->mds->ncommit;
 }
 
-/* cinfo->lock held by caller */
+/* cinfo->inode->i_lock held by caller */
 int
 nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
 		     struct nfs_commit_info *cinfo, int max)
@@ -979,7 +980,7 @@
 		if (!nfs_lock_request(req))
 			continue;
 		kref_get(&req->wb_kref);
-		if (cond_resched_lock(cinfo->lock))
+		if (cond_resched_lock(&cinfo->inode->i_lock))
 			list_safe_reset_next(req, tmp, wb_list);
 		nfs_request_remove_commit_list(req, cinfo);
 		nfs_list_add_request(req, dst);
@@ -1005,7 +1006,7 @@
 {
 	int ret = 0;
 
-	spin_lock(cinfo->lock);
+	spin_lock(&cinfo->inode->i_lock);
 	if (cinfo->mds->ncommit > 0) {
 		const int max = INT_MAX;
 
@@ -1013,7 +1014,7 @@
 					   cinfo, max);
 		ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
 	}
-	spin_unlock(cinfo->lock);
+	spin_unlock(&cinfo->inode->i_lock);
 	return ret;
 }
 
@@ -1709,6 +1710,10 @@
 {
 	struct nfs_commit_data	*data;
 
+	/* another commit raced with us */
+	if (list_empty(head))
+		return 0;
+
 	data = nfs_commitdata_alloc();
 
 	if (!data)
@@ -1724,6 +1729,36 @@
 	return -ENOMEM;
 }
 
+int nfs_commit_file(struct file *file, struct nfs_write_verifier *verf)
+{
+	struct inode *inode = file_inode(file);
+	struct nfs_open_context *open;
+	struct nfs_commit_info cinfo;
+	struct nfs_page *req;
+	int ret;
+
+	open = get_nfs_open_context(nfs_file_open_context(file));
+	req  = nfs_create_request(open, NULL, NULL, 0, i_size_read(inode));
+	if (IS_ERR(req)) {
+		ret = PTR_ERR(req);
+		goto out_put;
+	}
+
+	nfs_init_cinfo_from_inode(&cinfo, inode);
+
+	memcpy(&req->wb_verf, verf, sizeof(struct nfs_write_verifier));
+	nfs_request_add_commit_list(req, &cinfo);
+	ret = nfs_commit_inode(inode, FLUSH_SYNC);
+	if (ret > 0)
+		ret = 0;
+
+	nfs_free_request(req);
+out_put:
+	put_nfs_open_context(open);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nfs_commit_file);
+
 /*
  * COMMIT call returned
  */
@@ -1748,7 +1783,8 @@
 	while (!list_empty(&data->pages)) {
 		req = nfs_list_entry(data->pages.next);
 		nfs_list_remove_request(req);
-		nfs_clear_page_commit(req->wb_page);
+		if (req->wb_page)
+			nfs_clear_page_commit(req->wb_page);
 
 		dprintk("NFS:       commit (%s/%llu %d@%lld)",
 			req->wb_context->dentry->d_sb->s_id,
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 93d5853..dba2ff8 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -379,7 +379,7 @@
 	 */
 	hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
 	dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
-		- hdr;
+		+ rqstp->rq_arg.tail[0].iov_len - hdr;
 	/*
 	 * Round the length of the data which was specified up to
 	 * the next multiple of XDR units and then compare that
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 825c7bc..953c075 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -289,7 +289,7 @@
 
 		status = nfserr_bad_stateid;
 		mutex_lock(&ls->ls_mutex);
-		if (stateid->si_generation > stid->sc_stateid.si_generation)
+		if (nfsd4_stateid_generation_after(stateid, &stid->sc_stateid))
 			goto out_unlock_stid;
 		if (layout_type != ls->ls_layout_type)
 			goto out_unlock_stid;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 0462eed..f5f82e1 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4651,12 +4651,6 @@
 	return opens_in_grace(net) && mandatory_lock(inode);
 }
 
-/* Returns true iff a is later than b: */
-static bool stateid_generation_after(stateid_t *a, stateid_t *b)
-{
-	return (s32)(a->si_generation - b->si_generation) > 0;
-}
-
 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
 {
 	/*
@@ -4670,7 +4664,7 @@
 		return nfs_ok;
 
 	/* If the client sends us a stateid from the future, it's buggy: */
-	if (stateid_generation_after(in, ref))
+	if (nfsd4_stateid_generation_after(in, ref))
 		return nfserr_bad_stateid;
 	/*
 	 * However, we could see a stateid from the past, even from a
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index c050c53..986e51e 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -573,6 +573,11 @@
 	NFSPROC4_CLNT_CB_SEQUENCE,
 };
 
+/* Returns true iff a is later than b: */
+static inline bool nfsd4_stateid_generation_after(stateid_t *a, stateid_t *b)
+{
+	return (s32)(a->si_generation - b->si_generation) > 0;
+}
 
 struct nfsd4_compound_state;
 struct nfsd_net;
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index 2ccbf55..1a85d94 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -13,13 +13,8 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Original code was written by Koji Sato <koji@osrg.net>.
- * Two allocators were unified by Ryusuke Konishi <ryusuke@osrg.net>,
- *                                Amagai Yoshiji <amagai@osrg.net>.
+ * Originally written by Koji Sato.
+ * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
  */
 
 #include <linux/types.h>
@@ -58,7 +53,7 @@
  * @inode: inode of metadata file using this allocator
  * @entry_size: size of the persistent object
  */
-int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned entry_size)
+int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size)
 {
 	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
 
@@ -73,13 +68,17 @@
 	mi->mi_blocks_per_group =
 		DIV_ROUND_UP(nilfs_palloc_entries_per_group(inode),
 			     mi->mi_entries_per_block) + 1;
-		/* Number of blocks in a group including entry blocks and
-		   a bitmap block */
+		/*
+		 * Number of blocks in a group including entry blocks
+		 * and a bitmap block
+		 */
 	mi->mi_blocks_per_desc_block =
 		nilfs_palloc_groups_per_desc_block(inode) *
 		mi->mi_blocks_per_group + 1;
-		/* Number of blocks per descriptor including the
-		   descriptor block */
+		/*
+		 * Number of blocks per descriptor including the
+		 * descriptor block
+		 */
 	return 0;
 }
 
@@ -389,7 +388,7 @@
  */
 static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
 					    unsigned long target,
-					    unsigned bsize,
+					    unsigned int bsize,
 					    spinlock_t *lock)
 {
 	int pos, end = bsize;
@@ -624,7 +623,7 @@
 
 	if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap))
 		nilfs_warning(inode->i_sb, __func__,
-			      "entry number %llu already freed: ino=%lu\n",
+			      "entry number %llu already freed: ino=%lu",
 			      (unsigned long long)req->pr_entry_nr,
 			      (unsigned long)inode->i_ino);
 	else
@@ -665,7 +664,7 @@
 
 	if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap))
 		nilfs_warning(inode->i_sb, __func__,
-			      "entry number %llu already freed: ino=%lu\n",
+			      "entry number %llu already freed: ino=%lu",
 			      (unsigned long long)req->pr_entry_nr,
 			      (unsigned long)inode->i_ino);
 	else
@@ -740,8 +739,8 @@
 	unsigned long group, group_offset;
 	__u64 group_min_nr, last_nrs[8];
 	const unsigned long epg = nilfs_palloc_entries_per_group(inode);
-	const unsigned epb = NILFS_MDT(inode)->mi_entries_per_block;
-	unsigned entry_start, end, pos;
+	const unsigned int epb = NILFS_MDT(inode)->mi_entries_per_block;
+	unsigned int entry_start, end, pos;
 	spinlock_t *lock;
 	int i, j, k, ret;
 	u32 nfree;
@@ -774,7 +773,7 @@
 			if (!nilfs_clear_bit_atomic(lock, group_offset,
 						    bitmap)) {
 				nilfs_warning(inode->i_sb, __func__,
-					      "entry number %llu already freed: ino=%lu\n",
+					      "entry number %llu already freed: ino=%lu",
 					      (unsigned long long)entry_nrs[j],
 					      (unsigned long)inode->i_ino);
 			} else {
@@ -819,7 +818,7 @@
 							      last_nrs[k]);
 			if (ret && ret != -ENOENT) {
 				nilfs_warning(inode->i_sb, __func__,
-					      "failed to delete block of entry %llu: ino=%lu, err=%d\n",
+					      "failed to delete block of entry %llu: ino=%lu, err=%d",
 					      (unsigned long long)last_nrs[k],
 					      (unsigned long)inode->i_ino, ret);
 			}
@@ -838,7 +837,7 @@
 			ret = nilfs_palloc_delete_bitmap_block(inode, group);
 			if (ret && ret != -ENOENT) {
 				nilfs_warning(inode->i_sb, __func__,
-					      "failed to delete bitmap block of group %lu: ino=%lu, err=%d\n",
+					      "failed to delete bitmap block of group %lu: ino=%lu, err=%d",
 					      group,
 					      (unsigned long)inode->i_ino, ret);
 			}
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index 6e6f49a..05149e6 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -13,13 +13,8 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Original code was written by Koji Sato <koji@osrg.net>.
- * Two allocators were unified by Ryusuke Konishi <ryusuke@osrg.net>,
- *                                Amagai Yoshiji <amagai@osrg.net>.
+ * Originally written by Koji Sato.
+ * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
  */
 
 #ifndef _NILFS_ALLOC_H
@@ -42,7 +37,7 @@
 	return 1UL << (inode->i_blkbits + 3 /* log2(8 = CHAR_BITS) */);
 }
 
-int nilfs_palloc_init_blockgroup(struct inode *, unsigned);
+int nilfs_palloc_init_blockgroup(struct inode *, unsigned int);
 int nilfs_palloc_get_entry_block(struct inode *, __u64, int,
 				 struct buffer_head **);
 void *nilfs_palloc_block_get_entry(const struct inode *, __u64,
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index a9fb363..f2a7877 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Koji Sato <koji@osrg.net>.
+ * Written by Koji Sato.
  */
 
 #include <linux/fs.h>
@@ -46,7 +42,7 @@
 
 	if (err == -EINVAL) {
 		nilfs_error(inode->i_sb, fname,
-			    "broken bmap (inode number=%lu)\n", inode->i_ino);
+			    "broken bmap (inode number=%lu)", inode->i_ino);
 		err = -EIO;
 	}
 	return err;
@@ -97,7 +93,7 @@
 }
 
 int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp,
-			     unsigned maxblocks)
+			     unsigned int maxblocks)
 {
 	int ret;
 
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h
index bfa817c..b6a4c8f9 100644
--- a/fs/nilfs2/bmap.h
+++ b/fs/nilfs2/bmap.h
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Koji Sato <koji@osrg.net>.
+ * Written by Koji Sato.
  */
 
 #ifndef _NILFS_BMAP_H
@@ -61,7 +57,7 @@
 struct nilfs_bmap_operations {
 	int (*bop_lookup)(const struct nilfs_bmap *, __u64, int, __u64 *);
 	int (*bop_lookup_contig)(const struct nilfs_bmap *, __u64, __u64 *,
-				 unsigned);
+				 unsigned int);
 	int (*bop_insert)(struct nilfs_bmap *, __u64, __u64);
 	int (*bop_delete)(struct nilfs_bmap *, __u64);
 	void (*bop_clear)(struct nilfs_bmap *);
@@ -126,10 +122,14 @@
 
 /* pointer type */
 #define NILFS_BMAP_PTR_P	0	/* physical block number (i.e. LBN) */
-#define NILFS_BMAP_PTR_VS	1	/* virtual block number (single
-					   version) */
-#define NILFS_BMAP_PTR_VM	2	/* virtual block number (has multiple
-					   versions) */
+#define NILFS_BMAP_PTR_VS	1	/*
+					 * virtual block number (single
+					 * version)
+					 */
+#define NILFS_BMAP_PTR_VM	2	/*
+					 * virtual block number (has multiple
+					 * versions)
+					 */
 #define NILFS_BMAP_PTR_U	(-1)	/* never perform pointer operations */
 
 #define NILFS_BMAP_USE_VBN(bmap)	((bmap)->b_ptr_type > 0)
@@ -154,7 +154,7 @@
 int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *);
 int nilfs_bmap_read(struct nilfs_bmap *, struct nilfs_inode *);
 void nilfs_bmap_write(struct nilfs_bmap *, struct nilfs_inode *);
-int nilfs_bmap_lookup_contig(struct nilfs_bmap *, __u64, __u64 *, unsigned);
+int nilfs_bmap_lookup_contig(struct nilfs_bmap *, __u64, __u64 *, unsigned int);
 int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec);
 int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key);
 int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp);
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index e0c9daf..0576033 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -13,13 +13,8 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * This file was originally written by Seiji Kihara <kihara@osrg.net>
- * and fully revised by Ryusuke Konishi <ryusuke@osrg.net> for
- * stabilization and simplification.
+ * Originally written by Seiji Kihara.
+ * Fully revised by Ryusuke Konishi for stabilization and simplification.
  *
  */
 
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
index d876b56..2cc1b80 100644
--- a/fs/nilfs2/btnode.h
+++ b/fs/nilfs2/btnode.h
@@ -13,12 +13,8 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Seiji Kihara <kihara@osrg.net>
- * Revised by Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Seiji Kihara.
+ * Revised by Ryusuke Konishi.
  */
 
 #ifndef _NILFS_BTNODE_H
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 3a3821b..eccb1c8 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Koji Sato <koji@osrg.net>.
+ * Written by Koji Sato.
  */
 
 #include <linux/slab.h>
@@ -689,7 +685,8 @@
 }
 
 static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
-				     __u64 key, __u64 *ptrp, unsigned maxblocks)
+				     __u64 key, __u64 *ptrp,
+				     unsigned int maxblocks)
 {
 	struct nilfs_btree_path *path;
 	struct nilfs_btree_node *node;
@@ -1032,12 +1029,12 @@
 	if (ptr != NILFS_BMAP_INVALID_PTR)
 		/* sequential access */
 		return ptr;
-	else {
-		ptr = nilfs_btree_find_near(btree, path);
-		if (ptr != NILFS_BMAP_INVALID_PTR)
-			/* near */
-			return ptr;
-	}
+
+	ptr = nilfs_btree_find_near(btree, path);
+	if (ptr != NILFS_BMAP_INVALID_PTR)
+		/* near */
+		return ptr;
+
 	/* block group */
 	return nilfs_bmap_find_target_in_group(btree);
 }
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h
index 22c02e3..df1a25f 100644
--- a/fs/nilfs2/btree.h
+++ b/fs/nilfs2/btree.h
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Koji Sato <koji@osrg.net>.
+ * Written by Koji Sato.
  */
 
 #ifndef _NILFS_BTREE_H
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index b6596ca..8a3d3b6 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Koji Sato <koji@osrg.net>.
+ * Written by Koji Sato.
  */
 
 #include <linux/kernel.h>
@@ -41,6 +37,7 @@
 nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
 {
 	__u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
+
 	do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
 	return (unsigned long)tcno;
 }
@@ -50,6 +47,7 @@
 nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
 {
 	__u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
+
 	return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
 }
 
@@ -433,7 +431,8 @@
 }
 
 static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
-					  void *buf, unsigned cisz, size_t nci)
+					  void *buf, unsigned int cisz,
+					  size_t nci)
 {
 	struct nilfs_checkpoint *cp;
 	struct nilfs_cpinfo *ci = buf;
@@ -484,7 +483,8 @@
 }
 
 static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
-					  void *buf, unsigned cisz, size_t nci)
+					  void *buf, unsigned int cisz,
+					  size_t nci)
 {
 	struct buffer_head *bh;
 	struct nilfs_cpfile_header *header;
@@ -570,7 +570,7 @@
  */
 
 ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
-				void *buf, unsigned cisz, size_t nci)
+				void *buf, unsigned int cisz, size_t nci)
 {
 	switch (mode) {
 	case NILFS_CHECKPOINT:
@@ -870,8 +870,10 @@
 	void *kaddr;
 	int ret;
 
-	/* CP number is invalid if it's zero or larger than the
-	largest	exist one.*/
+	/*
+	 * CP number is invalid if it's zero or larger than the
+	 * largest existing one.
+	 */
 	if (cno == 0 || cno >= nilfs_mdt_cno(cpfile))
 		return -ENOENT;
 	down_read(&NILFS_MDT(cpfile)->mi_sem);
diff --git a/fs/nilfs2/cpfile.h b/fs/nilfs2/cpfile.h
index a242b9a..0249744 100644
--- a/fs/nilfs2/cpfile.h
+++ b/fs/nilfs2/cpfile.h
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Koji Sato <koji@osrg.net>.
+ * Written by Koji Sato.
  */
 
 #ifndef _NILFS_CPFILE_H
@@ -37,8 +33,8 @@
 int nilfs_cpfile_change_cpmode(struct inode *, __u64, int);
 int nilfs_cpfile_is_snapshot(struct inode *, __u64);
 int nilfs_cpfile_get_stat(struct inode *, struct nilfs_cpstat *);
-ssize_t nilfs_cpfile_get_cpinfo(struct inode *, __u64 *, int, void *, unsigned,
-				size_t);
+ssize_t nilfs_cpfile_get_cpinfo(struct inode *, __u64 *, int, void *,
+				unsigned int, size_t);
 
 int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
 		      struct nilfs_inode *raw_inode, struct inode **inodep);
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 7dc23f1..7367610 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Koji Sato <koji@osrg.net>.
+ * Written by Koji Sato.
  */
 
 #include <linux/types.h>
@@ -428,7 +424,7 @@
 	return ret;
 }
 
-ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
+ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
 			    size_t nvi)
 {
 	struct buffer_head *entry_bh;
diff --git a/fs/nilfs2/dat.h b/fs/nilfs2/dat.h
index cbd8e97..abbfdab 100644
--- a/fs/nilfs2/dat.h
+++ b/fs/nilfs2/dat.h
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Koji Sato <koji@osrg.net>.
+ * Written by Koji Sato.
  */
 
 #ifndef _NILFS_DAT_H
@@ -51,7 +47,7 @@
 int nilfs_dat_mark_dirty(struct inode *, __u64);
 int nilfs_dat_freev(struct inode *, __u64 *, size_t);
 int nilfs_dat_move(struct inode *, __u64, sector_t);
-ssize_t nilfs_dat_get_vinfo(struct inode *, void *, unsigned, size_t);
+ssize_t nilfs_dat_get_vinfo(struct inode *, void *, unsigned int, size_t);
 
 int nilfs_dat_read(struct super_block *sb, size_t entry_size,
 		   struct nilfs_inode *raw_inode, struct inode **inodep);
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 6723d45..e506f4f 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Modified for NILFS by Amagai Yoshiji <amagai@osrg.net>
+ * Modified for NILFS by Amagai Yoshiji.
  */
 /*
  *  linux/fs/ext2/dir.c
@@ -50,7 +46,7 @@
  * nilfs uses block-sized chunks. Arguably, sector-sized ones would be
  * more robust, but we have what we have
  */
-static inline unsigned nilfs_chunk_size(struct inode *inode)
+static inline unsigned int nilfs_chunk_size(struct inode *inode)
 {
 	return inode->i_sb->s_blocksize;
 }
@@ -65,9 +61,9 @@
  * Return the offset into page `page_nr' of the last valid
  * byte in that page, plus one.
  */
-static unsigned nilfs_last_byte(struct inode *inode, unsigned long page_nr)
+static unsigned int nilfs_last_byte(struct inode *inode, unsigned long page_nr)
 {
-	unsigned last_byte = inode->i_size;
+	unsigned int last_byte = inode->i_size;
 
 	last_byte -= page_nr << PAGE_SHIFT;
 	if (last_byte > PAGE_SIZE)
@@ -75,20 +71,22 @@
 	return last_byte;
 }
 
-static int nilfs_prepare_chunk(struct page *page, unsigned from, unsigned to)
+static int nilfs_prepare_chunk(struct page *page, unsigned int from,
+			       unsigned int to)
 {
 	loff_t pos = page_offset(page) + from;
+
 	return __block_write_begin(page, pos, to - from, nilfs_get_block);
 }
 
 static void nilfs_commit_chunk(struct page *page,
 			       struct address_space *mapping,
-			       unsigned from, unsigned to)
+			       unsigned int from, unsigned int to)
 {
 	struct inode *dir = mapping->host;
 	loff_t pos = page_offset(page) + from;
-	unsigned len = to - from;
-	unsigned nr_dirty, copied;
+	unsigned int len = to - from;
+	unsigned int nr_dirty, copied;
 	int err;
 
 	nr_dirty = nilfs_page_count_clean_buffers(page, from, to);
@@ -106,10 +104,10 @@
 {
 	struct inode *dir = page->mapping->host;
 	struct super_block *sb = dir->i_sb;
-	unsigned chunk_size = nilfs_chunk_size(dir);
+	unsigned int chunk_size = nilfs_chunk_size(dir);
 	char *kaddr = page_address(page);
-	unsigned offs, rec_len;
-	unsigned limit = PAGE_SIZE;
+	unsigned int offs, rec_len;
+	unsigned int limit = PAGE_SIZE;
 	struct nilfs_dir_entry *p;
 	char *error;
 
@@ -259,7 +257,6 @@
 	unsigned int offset = pos & ~PAGE_MASK;
 	unsigned long n = pos >> PAGE_SHIFT;
 	unsigned long npages = dir_pages(inode);
-/*	unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */
 
 	if (pos > inode->i_size - NILFS_DIR_REC_LEN(1))
 		return 0;
@@ -321,7 +318,7 @@
 {
 	const unsigned char *name = qstr->name;
 	int namelen = qstr->len;
-	unsigned reclen = NILFS_DIR_REC_LEN(namelen);
+	unsigned int reclen = NILFS_DIR_REC_LEN(namelen);
 	unsigned long start, n;
 	unsigned long npages = dir_pages(dir);
 	struct page *page = NULL;
@@ -340,6 +337,7 @@
 	n = start;
 	do {
 		char *kaddr;
+
 		page = nilfs_get_page(dir, n);
 		if (!IS_ERR(page)) {
 			kaddr = page_address(page);
@@ -410,8 +408,8 @@
 void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
 		    struct page *page, struct inode *inode)
 {
-	unsigned from = (char *) de - (char *) page_address(page);
-	unsigned to = from + nilfs_rec_len_from_disk(de->rec_len);
+	unsigned int from = (char *)de - (char *)page_address(page);
+	unsigned int to = from + nilfs_rec_len_from_disk(de->rec_len);
 	struct address_space *mapping = page->mapping;
 	int err;
 
@@ -433,15 +431,15 @@
 	struct inode *dir = d_inode(dentry->d_parent);
 	const unsigned char *name = dentry->d_name.name;
 	int namelen = dentry->d_name.len;
-	unsigned chunk_size = nilfs_chunk_size(dir);
-	unsigned reclen = NILFS_DIR_REC_LEN(namelen);
+	unsigned int chunk_size = nilfs_chunk_size(dir);
+	unsigned int reclen = NILFS_DIR_REC_LEN(namelen);
 	unsigned short rec_len, name_len;
 	struct page *page = NULL;
 	struct nilfs_dir_entry *de;
 	unsigned long npages = dir_pages(dir);
 	unsigned long n;
 	char *kaddr;
-	unsigned from, to;
+	unsigned int from, to;
 	int err;
 
 	/*
@@ -533,13 +531,14 @@
 	struct address_space *mapping = page->mapping;
 	struct inode *inode = mapping->host;
 	char *kaddr = page_address(page);
-	unsigned from = ((char *)dir - kaddr) & ~(nilfs_chunk_size(inode) - 1);
-	unsigned to = ((char *)dir - kaddr) +
-		nilfs_rec_len_from_disk(dir->rec_len);
-	struct nilfs_dir_entry *pde = NULL;
-	struct nilfs_dir_entry *de = (struct nilfs_dir_entry *)(kaddr + from);
+	unsigned int from, to;
+	struct nilfs_dir_entry *de, *pde = NULL;
 	int err;
 
+	from = ((char *)dir - kaddr) & ~(nilfs_chunk_size(inode) - 1);
+	to = ((char *)dir - kaddr) + nilfs_rec_len_from_disk(dir->rec_len);
+	de = (struct nilfs_dir_entry *)(kaddr + from);
+
 	while ((char *)de < (char *)dir) {
 		if (de->rec_len == 0) {
 			nilfs_error(inode->i_sb, __func__,
@@ -572,7 +571,7 @@
 {
 	struct address_space *mapping = inode->i_mapping;
 	struct page *page = grab_cache_page(mapping, 0);
-	unsigned chunk_size = nilfs_chunk_size(inode);
+	unsigned int chunk_size = nilfs_chunk_size(inode);
 	struct nilfs_dir_entry *de;
 	int err;
 	void *kaddr;
@@ -630,8 +629,8 @@
 		while ((char *)de <= kaddr) {
 			if (de->rec_len == 0) {
 				nilfs_error(inode->i_sb, __func__,
-					    "zero-length directory entry "
-					    "(kaddr=%p, de=%p)\n", kaddr, de);
+					    "zero-length directory entry (kaddr=%p, de=%p)",
+					    kaddr, de);
 				goto not_empty;
 			}
 			if (de->inode != 0) {
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index ebf89fd..251a4492 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Koji Sato <koji@osrg.net>.
+ * Written by Koji Sato.
  */
 
 #include <linux/errno.h>
@@ -62,7 +58,7 @@
 
 static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
 				      __u64 key, __u64 *ptrp,
-				      unsigned maxblocks)
+				      unsigned int maxblocks)
 {
 	struct inode *dat = NULL;
 	__u64 ptr, ptr2;
@@ -83,7 +79,8 @@
 		ptr = blocknr;
 	}
 
-	maxblocks = min_t(unsigned, maxblocks, NILFS_DIRECT_KEY_MAX - key + 1);
+	maxblocks = min_t(unsigned int, maxblocks,
+			  NILFS_DIRECT_KEY_MAX - key + 1);
 	for (cnt = 1; cnt < maxblocks &&
 		     (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) !=
 		     NILFS_BMAP_INVALID_PTR;
@@ -110,9 +107,9 @@
 	if (ptr != NILFS_BMAP_INVALID_PTR)
 		/* sequential access */
 		return ptr;
-	else
-		/* block group */
-		return nilfs_bmap_find_target_in_group(direct);
+
+	/* block group */
+	return nilfs_bmap_find_target_in_group(direct);
 }
 
 static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
diff --git a/fs/nilfs2/direct.h b/fs/nilfs2/direct.h
index dc643de..3015a6e 100644
--- a/fs/nilfs2/direct.h
+++ b/fs/nilfs2/direct.h
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Koji Sato <koji@osrg.net>.
+ * Written by Koji Sato.
  */
 
 #ifndef _NILFS_DIRECT_H
diff --git a/fs/nilfs2/export.h b/fs/nilfs2/export.h
index 19ccbf9..00107fd 100644
--- a/fs/nilfs2/export.h
+++ b/fs/nilfs2/export.h
@@ -20,6 +20,6 @@
 
 	u32 parent_gen;
 	u64 parent_ino;
-} __attribute__ ((packed));
+} __packed;
 
 #endif
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 088ba00..547381f 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -13,12 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Amagai Yoshiji <amagai@osrg.net>,
- *            Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Amagai Yoshiji and Ryusuke Konishi.
  */
 
 #include <linux/fs.h>
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 0224b78..693aded 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -13,13 +13,8 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Seiji Kihara <kihara@osrg.net>, Amagai Yoshiji <amagai@osrg.net>,
- *            and Ryusuke Konishi <ryusuke@osrg.net>.
- * Revised by Ryusuke Konishi <ryusuke@osrg.net>.
+ * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi.
+ * Revised by Ryusuke Konishi.
  *
  */
 /*
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index 6548c78..1d2b180 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -13,12 +13,8 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Amagai Yoshiji <amagai@osrg.net>.
- * Revised by Ryusuke Konishi <ryusuke@osrg.net>.
+ * Written by Amagai Yoshiji.
+ * Revised by Ryusuke Konishi.
  *
  */
 
@@ -68,8 +64,10 @@
 	struct nilfs_palloc_req req;
 	int ret;
 
-	req.pr_entry_nr = 0;  /* 0 says find free inode from beginning of
-				 a group. dull code!! */
+	req.pr_entry_nr = 0;  /*
+			       * 0 says find free inode from beginning
+			       * of a group. dull code!!
+			       */
 	req.pr_entry_bh = NULL;
 
 	ret = nilfs_palloc_prepare_alloc_entry(ifile, &req);
diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h
index 679674d..23ad2f0 100644
--- a/fs/nilfs2/ifile.h
+++ b/fs/nilfs2/ifile.h
@@ -13,12 +13,8 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Amagai Yoshiji <amagai@osrg.net>
- * Revised by Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Amagai Yoshiji.
+ * Revised by Ryusuke Konishi.
  *
  */
 
@@ -36,6 +32,7 @@
 nilfs_ifile_map_inode(struct inode *ifile, ino_t ino, struct buffer_head *ibh)
 {
 	void *kaddr = kmap(ibh->b_page);
+
 	return nilfs_palloc_block_get_entry(ifile, ino, ibh, kaddr);
 }
 
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index cfebcd2..a0ebdb1 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Ryusuke Konishi.
  *
  */
 
@@ -87,7 +83,7 @@
 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 	__u64 blknum = 0;
 	int err = 0, ret;
-	unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
+	unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits;
 
 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
@@ -133,11 +129,14 @@
 		/* Error handling should be detailed */
 		set_buffer_new(bh_result);
 		set_buffer_delay(bh_result);
-		map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
-						      to proper value */
+		map_bh(bh_result, inode->i_sb, 0);
+		/* Disk block number must be changed to proper value */
+
 	} else if (ret == -ENOENT) {
-		/* not found is not error (e.g. hole); must return without
-		   the mapped state flag. */
+		/*
+		 * not found is not error (e.g. hole); must return without
+		 * the mapped state flag.
+		 */
 		;
 	} else {
 		err = ret;
@@ -167,7 +166,7 @@
  * @nr_pages - number of pages to be read
  */
 static int nilfs_readpages(struct file *file, struct address_space *mapping,
-			   struct list_head *pages, unsigned nr_pages)
+			   struct list_head *pages, unsigned int nr_pages)
 {
 	return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
 }
@@ -226,7 +225,7 @@
 	int ret = __set_page_dirty_nobuffers(page);
 
 	if (page_has_buffers(page)) {
-		unsigned nr_dirty = 0;
+		unsigned int nr_dirty = 0;
 		struct buffer_head *bh, *head;
 
 		/*
@@ -249,7 +248,7 @@
 		if (nr_dirty)
 			nilfs_set_file_dirty(inode, nr_dirty);
 	} else if (ret) {
-		unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
+		unsigned int nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
 
 		nilfs_set_file_dirty(inode, nr_dirty);
 	}
@@ -291,8 +290,8 @@
 			   struct page *page, void *fsdata)
 {
 	struct inode *inode = mapping->host;
-	unsigned start = pos & (PAGE_SIZE - 1);
-	unsigned nr_dirty;
+	unsigned int start = pos & (PAGE_SIZE - 1);
+	unsigned int nr_dirty;
 	int err;
 
 	nr_dirty = nilfs_page_count_clean_buffers(page, start,
@@ -399,23 +398,26 @@
 
 	err = nilfs_init_acl(inode, dir);
 	if (unlikely(err))
-		goto failed_after_creation; /* never occur. When supporting
-				    nilfs_init_acl(), proper cancellation of
-				    above jobs should be considered */
+		/*
+		 * Never occur.  When supporting nilfs_init_acl(),
+		 * proper cancellation of above jobs should be considered.
+		 */
+		goto failed_after_creation;
 
 	return inode;
 
  failed_after_creation:
 	clear_nlink(inode);
 	unlock_new_inode(inode);
-	iput(inode);  /* raw_inode will be deleted through
-			 nilfs_evict_inode() */
+	iput(inode);  /*
+		       * raw_inode will be deleted through
+		       * nilfs_evict_inode().
+		       */
 	goto failed;
 
  failed_ifile_create_inode:
 	make_bad_inode(inode);
-	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
-			 called */
+	iput(inode);
  failed:
 	return ERR_PTR(err);
 }
@@ -666,8 +668,10 @@
 	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
 		raw_inode->i_device_code =
 			cpu_to_le64(huge_encode_dev(inode->i_rdev));
-	/* When extending inode, nilfs->ns_inode_size should be checked
-	   for substitutions of appended fields */
+	/*
+	 * When extending inode, nilfs->ns_inode_size should be checked
+	 * for substitutions of appended fields.
+	 */
 }
 
 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
@@ -685,9 +689,12 @@
 		set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
 
 	nilfs_write_inode_common(inode, raw_inode, 0);
-		/* XXX: call with has_bmap = 0 is a workaround to avoid
-		   deadlock of bmap. This delays update of i_bmap to just
-		   before writing */
+		/*
+		 * XXX: call with has_bmap = 0 is a workaround to avoid
+		 * deadlock of bmap.  This delays update of i_bmap to just
+		 * before writing.
+		 */
+
 	nilfs_ifile_unmap_inode(ifile, ino, ibh);
 }
 
@@ -752,14 +759,15 @@
 	nilfs_mark_inode_dirty(inode);
 	nilfs_set_file_dirty(inode, 0);
 	nilfs_transaction_commit(sb);
-	/* May construct a logical segment and may fail in sync mode.
-	   But truncate has no return value. */
+	/*
+	 * May construct a logical segment and may fail in sync mode.
+	 * But truncate has no return value.
+	 */
 }
 
 static void nilfs_clear_inode(struct inode *inode)
 {
 	struct nilfs_inode_info *ii = NILFS_I(inode);
-	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
 
 	/*
 	 * Free resources allocated in nilfs_read_inode(), here.
@@ -768,8 +776,8 @@
 	brelse(ii->i_bh);
 	ii->i_bh = NULL;
 
-	if (mdi && mdi->mi_palloc_cache)
-		nilfs_palloc_destroy_cache(inode);
+	if (nilfs_is_metadata_file_inode(inode))
+		nilfs_mdt_clear(inode);
 
 	if (test_bit(NILFS_I_BMAP, &ii->i_state))
 		nilfs_bmap_clear(ii->i_bmap);
@@ -811,8 +819,10 @@
 	if (IS_SYNC(inode))
 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
 	nilfs_transaction_commit(sb);
-	/* May construct a logical segment and may fail in sync mode.
-	   But delete_inode has no return value. */
+	/*
+	 * May construct a logical segment and may fail in sync mode.
+	 * But delete_inode has no return value.
+	 */
 }
 
 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
@@ -856,6 +866,7 @@
 int nilfs_permission(struct inode *inode, int mask)
 {
 	struct nilfs_root *root = NILFS_I(inode)->i_root;
+
 	if ((mask & MAY_WRITE) && root &&
 	    root->cno != NILFS_CPTREE_CURRENT_CNO)
 		return -EROFS; /* snapshot is not writable */
@@ -906,7 +917,7 @@
 	return ret;
 }
 
-int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
+int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
 {
 	struct nilfs_inode_info *ii = NILFS_I(inode);
 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
@@ -919,17 +930,23 @@
 	spin_lock(&nilfs->ns_inode_lock);
 	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
 	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
-		/* Because this routine may race with nilfs_dispose_list(),
-		   we have to check NILFS_I_QUEUED here, too. */
+		/*
+		 * Because this routine may race with nilfs_dispose_list(),
+		 * we have to check NILFS_I_QUEUED here, too.
+		 */
 		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
-			/* This will happen when somebody is freeing
-			   this inode. */
+			/*
+			 * This will happen when somebody is freeing
+			 * this inode.
+			 */
 			nilfs_warning(inode->i_sb, __func__,
-				      "cannot get inode (ino=%lu)\n",
+				      "cannot get inode (ino=%lu)",
 				      inode->i_ino);
 			spin_unlock(&nilfs->ns_inode_lock);
-			return -EINVAL; /* NILFS_I_DIRTY may remain for
-					   freeing inode */
+			return -EINVAL; /*
+					 * NILFS_I_DIRTY may remain for
+					 * freeing inode.
+					 */
 		}
 		list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
 		set_bit(NILFS_I_QUEUED, &ii->i_state);
@@ -946,7 +963,7 @@
 	err = nilfs_load_inode_block(inode, &ibh);
 	if (unlikely(err)) {
 		nilfs_warning(inode->i_sb, __func__,
-			      "failed to reget inode block.\n");
+			      "failed to reget inode block.");
 		return err;
 	}
 	nilfs_update_inode(inode, ibh, flags);
@@ -973,7 +990,7 @@
 
 	if (is_bad_inode(inode)) {
 		nilfs_warning(inode->i_sb, __func__,
-			      "tried to mark bad_inode dirty. ignored.\n");
+			      "tried to mark bad_inode dirty. ignored.");
 		dump_stack();
 		return;
 	}
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index e8fe248..358b57e 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Koji Sato <koji@osrg.net>.
+ * Written by Koji Sato.
  */
 
 #include <linux/fs.h>
@@ -783,6 +779,7 @@
 	size_t nmembs = argv->v_nmembs;
 	struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap;
 	struct nilfs_bdesc *bdescs = buf;
+	struct buffer_head *bh;
 	int ret, i;
 
 	for (i = 0; i < nmembs; i++) {
@@ -800,12 +797,16 @@
 			/* skip dead block */
 			continue;
 		if (bdescs[i].bd_level == 0) {
-			ret = nilfs_mdt_mark_block_dirty(nilfs->ns_dat,
-							 bdescs[i].bd_offset);
-			if (ret < 0) {
+			ret = nilfs_mdt_get_block(nilfs->ns_dat,
+						  bdescs[i].bd_offset,
+						  false, NULL, &bh);
+			if (unlikely(ret)) {
 				WARN_ON(ret == -ENOENT);
 				return ret;
 			}
+			mark_buffer_dirty(bh);
+			nilfs_mdt_mark_dirty(nilfs->ns_dat);
+			put_bh(bh);
 		} else {
 			ret = nilfs_bmap_mark(bmap, bdescs[i].bd_offset,
 					      bdescs[i].bd_level);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index f6982b9..3417d85 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Ryusuke Konishi.
  */
 
 #include <linux/buffer_head.h>
@@ -32,6 +28,7 @@
 #include "segment.h"
 #include "page.h"
 #include "mdt.h"
+#include "alloc.h"		/* nilfs_palloc_destroy_cache() */
 
 #include <trace/events/nilfs2.h>
 
@@ -393,34 +390,6 @@
 	return ret;
 }
 
-/**
- * nilfs_mdt_mark_block_dirty - mark a block on the meta data file dirty.
- * @inode: inode of the meta data file
- * @block: block offset
- *
- * Return Value: On success, it returns 0. On error, the following negative
- * error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
- *
- * %-EIO - I/O error
- *
- * %-ENOENT - the specified block does not exist (hole block)
- */
-int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block)
-{
-	struct buffer_head *bh;
-	int err;
-
-	err = nilfs_mdt_read_block(inode, block, 0, &bh);
-	if (unlikely(err))
-		return err;
-	mark_buffer_dirty(bh);
-	nilfs_mdt_mark_dirty(inode);
-	brelse(bh);
-	return 0;
-}
-
 int nilfs_mdt_fetch_dirty(struct inode *inode)
 {
 	struct nilfs_inode_info *ii = NILFS_I(inode);
@@ -497,8 +466,32 @@
 	return 0;
 }
 
-void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
-			      unsigned header_size)
+/**
+ * nilfs_mdt_clear - do cleanup for the metadata file
+ * @inode: inode of the metadata file
+ */
+void nilfs_mdt_clear(struct inode *inode)
+{
+	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
+
+	if (mdi->mi_palloc_cache)
+		nilfs_palloc_destroy_cache(inode);
+}
+
+/**
+ * nilfs_mdt_destroy - release resources used by the metadata file
+ * @inode: inode of the metadata file
+ */
+void nilfs_mdt_destroy(struct inode *inode)
+{
+	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
+
+	kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
+	kfree(mdi);
+}
+
+void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size,
+			      unsigned int header_size)
 {
 	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
 
diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
index 03246ca..3f67f39 100644
--- a/fs/nilfs2/mdt.h
+++ b/fs/nilfs2/mdt.h
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Ryusuke Konishi.
  */
 
 #ifndef _NILFS_MDT_H
@@ -57,8 +53,8 @@
 struct nilfs_mdt_info {
 	struct rw_semaphore	mi_sem;
 	struct blockgroup_lock *mi_bgl;
-	unsigned		mi_entry_size;
-	unsigned		mi_first_entry_offset;
+	unsigned int		mi_entry_size;
+	unsigned int		mi_first_entry_offset;
 	unsigned long		mi_entries_per_block;
 	struct nilfs_palloc_cache *mi_palloc_cache;
 	struct nilfs_shadow_map *mi_shadow;
@@ -71,6 +67,11 @@
 	return inode->i_private;
 }
 
+static inline int nilfs_is_metadata_file_inode(const struct inode *inode)
+{
+	return inode->i_private != NULL;
+}
+
 /* Default GFP flags using highmem */
 #define NILFS_MDT_GFP      (__GFP_RECLAIM | __GFP_IO | __GFP_HIGHMEM)
 
@@ -83,11 +84,13 @@
 			 struct buffer_head **out_bh);
 int nilfs_mdt_delete_block(struct inode *, unsigned long);
 int nilfs_mdt_forget_block(struct inode *, unsigned long);
-int nilfs_mdt_mark_block_dirty(struct inode *, unsigned long);
 int nilfs_mdt_fetch_dirty(struct inode *);
 
 int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz);
-void nilfs_mdt_set_entry_size(struct inode *, unsigned, unsigned);
+void nilfs_mdt_clear(struct inode *inode);
+void nilfs_mdt_destroy(struct inode *inode);
+
+void nilfs_mdt_set_entry_size(struct inode *, unsigned int, unsigned int);
 
 int nilfs_mdt_setup_shadow_map(struct inode *inode,
 			       struct nilfs_shadow_map *shadow);
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 3b2af05..1ec8ae5 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -13,12 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Modified for NILFS by Amagai Yoshiji <amagai@osrg.net>,
- *                       Ryusuke Konishi <ryusuke@osrg.net>
+ * Modified for NILFS by Amagai Yoshiji and Ryusuke Konishi.
  */
 /*
  *  linux/fs/ext2/namei.c
@@ -49,6 +44,7 @@
 static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode)
 {
 	int err = nilfs_add_link(dentry, inode);
+
 	if (!err) {
 		d_instantiate(dentry, inode);
 		unlock_new_inode(inode);
@@ -143,7 +139,7 @@
 {
 	struct nilfs_transaction_info ti;
 	struct super_block *sb = dir->i_sb;
-	unsigned l = strlen(symname)+1;
+	unsigned int l = strlen(symname) + 1;
 	struct inode *inode;
 	int err;
 
@@ -288,7 +284,7 @@
 
 	if (!inode->i_nlink) {
 		nilfs_warning(inode->i_sb, __func__,
-			      "deleting nonexistent file (%lu), %d\n",
+			      "deleting nonexistent file (%lu), %d",
 			      inode->i_ino, inode->i_nlink);
 		set_nlink(inode, 1);
 	}
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 3857040..b1d48bc 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -13,12 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Koji Sato <koji@osrg.net>
- *            Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Koji Sato and Ryusuke Konishi.
  */
 
 #ifndef _NILFS_H
@@ -69,8 +64,10 @@
 	 */
 	struct rw_semaphore xattr_sem;
 #endif
-	struct buffer_head *i_bh;	/* i_bh contains a new or dirty
-					   disk inode */
+	struct buffer_head *i_bh;	/*
+					 * i_bh contains a new or dirty
+					 * disk inode.
+					 */
 	struct nilfs_root *i_root;
 	struct inode vfs_inode;
 };
@@ -100,8 +97,10 @@
 	NILFS_I_NEW = 0,		/* Inode is newly created */
 	NILFS_I_DIRTY,			/* The file is dirty */
 	NILFS_I_QUEUED,			/* inode is in dirty_files list */
-	NILFS_I_BUSY,			/* inode is grabbed by a segment
-					   constructor */
+	NILFS_I_BUSY,			/*
+					 * Inode is grabbed by a segment
+					 * constructor
+					 */
 	NILFS_I_COLLECTED,		/* All dirty blocks are collected */
 	NILFS_I_UPDATED,		/* The file has been written back */
 	NILFS_I_INODE_SYNC,		/* dsync is not allowed for inode */
@@ -145,8 +144,10 @@
 struct nilfs_transaction_info {
 	u32			ti_magic;
 	void		       *ti_save;
-				/* This should never used. If this happens,
-				   one of other filesystems has a bug. */
+				/*
+				 * This should never be used.  If it happens,
+				 * one of other filesystems has a bug.
+				 */
 	unsigned short		ti_flags;
 	unsigned short		ti_count;
 };
@@ -156,8 +157,10 @@
 
 /* ti_flags */
 #define NILFS_TI_DYNAMIC_ALLOC	0x0001  /* Allocated from slab */
-#define NILFS_TI_SYNC		0x0002	/* Force to construct segment at the
-					   end of transaction. */
+#define NILFS_TI_SYNC		0x0002	/*
+					 * Force to construct segment at the
+					 * end of transaction.
+					 */
 #define NILFS_TI_GC		0x0004	/* GC context */
 #define NILFS_TI_COMMIT		0x0008	/* Change happened or not */
 #define NILFS_TI_WRITER		0x0010	/* Constructor context */
@@ -279,7 +282,7 @@
 int nilfs_permission(struct inode *inode, int mask);
 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh);
 extern int nilfs_inode_dirty(struct inode *);
-int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty);
+int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty);
 extern int __nilfs_mark_inode_dirty(struct inode *, int);
 extern void nilfs_dirty_inode(struct inode *, int flags);
 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 4893915..d97ba5f 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -13,12 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Ryusuke Konishi <ryusuke@osrg.net>,
- *            Seiji Kihara <kihara@osrg.net>.
+ * Written by Ryusuke Konishi and Seiji Kihara.
  */
 
 #include <linux/pagemap.h>
@@ -440,12 +435,12 @@
 	__nilfs_clear_page_dirty(page);
 }
 
-unsigned nilfs_page_count_clean_buffers(struct page *page,
-					unsigned from, unsigned to)
+unsigned int nilfs_page_count_clean_buffers(struct page *page,
+					    unsigned int from, unsigned int to)
 {
-	unsigned block_start, block_end;
+	unsigned int block_start, block_end;
 	struct buffer_head *bh, *head;
-	unsigned nc = 0;
+	unsigned int nc = 0;
 
 	for (bh = head = page_buffers(page), block_start = 0;
 	     bh != head || !block_start;
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index a43b8287..f3687c9 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -13,12 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Ryusuke Konishi <ryusuke@osrg.net>,
- *            Seiji Kihara <kihara@osrg.net>.
+ * Written by Ryusuke Konishi and Seiji Kihara.
  */
 
 #ifndef _NILFS_PAGE_H
@@ -58,7 +53,8 @@
 void nilfs_clear_dirty_page(struct page *, bool);
 void nilfs_clear_dirty_pages(struct address_space *, bool);
 void nilfs_mapping_init(struct address_space *mapping, struct inode *inode);
-unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
+unsigned int nilfs_page_count_clean_buffers(struct page *, unsigned int,
+					    unsigned int);
 unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
 					    sector_t start_blk,
 					    sector_t *blkoff);
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 5afa77f..d893dc9 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Ryusuke Konishi.
  */
 
 #include <linux/buffer_head.h>
@@ -47,8 +43,10 @@
 
 /* work structure for recovery */
 struct nilfs_recovery_block {
-	ino_t ino;		/* Inode number of the file that this block
-				   belongs to */
+	ino_t ino;		/*
+				 * Inode number of the file that this block
+				 * belongs to
+				 */
 	sector_t blocknr;	/* block number */
 	__u64 vblocknr;		/* virtual block number */
 	unsigned long blkoff;	/* File offset of the data block (per block) */
@@ -156,7 +154,7 @@
 
 	sr = (struct nilfs_super_root *)bh_sr->b_data;
 	if (check) {
-		unsigned bytes = le16_to_cpu(sr->sr_bytes);
+		unsigned int bytes = le16_to_cpu(sr->sr_bytes);
 
 		if (bytes == 0 || bytes > nilfs->ns_blocksize) {
 			ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
@@ -508,7 +506,7 @@
 {
 	struct inode *inode;
 	struct nilfs_recovery_block *rb, *n;
-	unsigned blocksize = nilfs->ns_blocksize;
+	unsigned int blocksize = nilfs->ns_blocksize;
 	struct page *page;
 	loff_t pos;
 	int err = 0, err2 = 0;
@@ -526,6 +524,7 @@
 					0, &page, nilfs_get_block);
 		if (unlikely(err)) {
 			loff_t isize = inode->i_size;
+
 			if (pos + blocksize > isize)
 				nilfs_write_failed(inode->i_mapping,
 							pos + blocksize);
@@ -872,9 +871,11 @@
 
 		flags = le16_to_cpu(sum->ss_flags);
 		if (!(flags & NILFS_SS_SR) && !scan_newer) {
-			/* This will never happen because a superblock
-			   (last_segment) always points to a pseg
-			   having a super root. */
+			/*
+			 * This will never happen because a superblock
+			 * (last_segment) always points to a pseg with
+			 * a super root.
+			 */
 			ret = NILFS_SEG_FAIL_CONSISTENCY;
 			goto failed;
 		}
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index f63620c..bf36df1 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Ryusuke Konishi.
  *
  */
 
@@ -133,7 +129,7 @@
 	return 0;
 }
 
-int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
+int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned int flags,
 		       time_t ctime, __u64 cno)
 {
 	int err;
@@ -240,7 +236,7 @@
 {
 	struct nilfs_super_root *raw_sr;
 	struct the_nilfs *nilfs = segbuf->sb_super->s_fs_info;
-	unsigned srsize;
+	unsigned int srsize;
 	u32 crc;
 
 	raw_sr = (struct nilfs_super_root *)segbuf->sb_super_root->b_data;
diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h
index b04f08c..7bbccc0 100644
--- a/fs/nilfs2/segbuf.h
+++ b/fs/nilfs2/segbuf.h
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Ryusuke Konishi.
  *
  */
 #ifndef _NILFS_SEGBUF_H
@@ -82,7 +78,7 @@
 	__u64			sb_nextnum;
 	sector_t		sb_fseg_start, sb_fseg_end;
 	sector_t		sb_pseg_start;
-	unsigned		sb_rest_blocks;
+	unsigned int		sb_rest_blocks;
 
 	/* Buffers */
 	struct list_head	sb_segsum_buffers;
@@ -124,7 +120,8 @@
 			   struct nilfs_segment_buffer *prev);
 void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *, __u64,
 				  struct the_nilfs *);
-int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned, time_t, __u64);
+int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned int, time_t,
+		       __u64);
 int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *);
 int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *,
 				struct buffer_head **);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 4317f72..e78b68a8 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Ryusuke Konishi.
  *
  */
 
@@ -49,18 +45,26 @@
  */
 #define SC_N_INODEVEC	16   /* Size of locally allocated inode vector */
 
-#define SC_MAX_SEGDELTA 64   /* Upper limit of the number of segments
-				appended in collection retry loop */
+#define SC_MAX_SEGDELTA 64   /*
+			      * Upper limit of the number of segments
+			      * appended in collection retry loop
+			      */
 
 /* Construction mode */
 enum {
 	SC_LSEG_SR = 1,	/* Make a logical segment having a super root */
-	SC_LSEG_DSYNC,	/* Flush data blocks of a given file and make
-			   a logical segment without a super root */
-	SC_FLUSH_FILE,	/* Flush data files, leads to segment writes without
-			   creating a checkpoint */
-	SC_FLUSH_DAT,	/* Flush DAT file. This also creates segments without
-			   a checkpoint */
+	SC_LSEG_DSYNC,	/*
+			 * Flush data blocks of a given file and make
+			 * a logical segment without a super root.
+			 */
+	SC_FLUSH_FILE,	/*
+			 * Flush data files, leads to segment writes without
+			 * creating a checkpoint.
+			 */
+	SC_FLUSH_DAT,	/*
+			 * Flush DAT file.  This also creates segments
+			 * without a checkpoint.
+			 */
 };
 
 /* Stage numbers of dirty block collection */
@@ -154,17 +158,15 @@
 	if (cur_ti) {
 		if (cur_ti->ti_magic == NILFS_TI_MAGIC)
 			return ++cur_ti->ti_count;
-		else {
-			/*
-			 * If journal_info field is occupied by other FS,
-			 * it is saved and will be restored on
-			 * nilfs_transaction_commit().
-			 */
-			printk(KERN_WARNING
-			       "NILFS warning: journal info from a different "
-			       "FS\n");
-			save = current->journal_info;
-		}
+
+		/*
+		 * If journal_info field is occupied by other FS,
+		 * it is saved and will be restored on
+		 * nilfs_transaction_commit().
+		 */
+		printk(KERN_WARNING
+		       "NILFS warning: journal info from a different FS\n");
+		save = current->journal_info;
 	}
 	if (!ti) {
 		ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
@@ -397,10 +399,10 @@
 
 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
 					    struct nilfs_segsum_pointer *ssp,
-					    unsigned bytes)
+					    unsigned int bytes)
 {
 	struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
-	unsigned blocksize = sci->sc_super->s_blocksize;
+	unsigned int blocksize = sci->sc_super->s_blocksize;
 	void *p;
 
 	if (unlikely(ssp->offset + bytes > blocksize)) {
@@ -422,8 +424,8 @@
 {
 	struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
 	struct buffer_head *sumbh;
-	unsigned sumbytes;
-	unsigned flags = 0;
+	unsigned int sumbytes;
+	unsigned int flags = 0;
 	int err;
 
 	if (nilfs_doing_gc())
@@ -444,8 +446,10 @@
 {
 	sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
 	if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
-		return -E2BIG; /* The current segment is filled up
-				  (internal code) */
+		return -E2BIG; /*
+				* The current segment is filled up
+				* (internal code)
+				*/
 	sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
 	return nilfs_segctor_reset_segment_buffer(sci);
 }
@@ -472,9 +476,9 @@
  */
 static int nilfs_segctor_segsum_block_required(
 	struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
-	unsigned binfo_size)
+	unsigned int binfo_size)
 {
-	unsigned blocksize = sci->sc_super->s_blocksize;
+	unsigned int blocksize = sci->sc_super->s_blocksize;
 	/* Size of finfo and binfo is enough small against blocksize */
 
 	return ssp->offset + binfo_size +
@@ -533,7 +537,7 @@
 static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
 					struct buffer_head *bh,
 					struct inode *inode,
-					unsigned binfo_size)
+					unsigned int binfo_size)
 {
 	struct nilfs_segment_buffer *segbuf;
 	int required, err = 0;
@@ -617,7 +621,7 @@
 	*vblocknr = binfo->bi_v.bi_vblocknr;
 }
 
-static struct nilfs_sc_operations nilfs_sc_file_ops = {
+static const struct nilfs_sc_operations nilfs_sc_file_ops = {
 	.collect_data = nilfs_collect_file_data,
 	.collect_node = nilfs_collect_file_node,
 	.collect_bmap = nilfs_collect_file_bmap,
@@ -666,7 +670,7 @@
 	*binfo_dat = binfo->bi_dat;
 }
 
-static struct nilfs_sc_operations nilfs_sc_dat_ops = {
+static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
 	.collect_data = nilfs_collect_dat_data,
 	.collect_node = nilfs_collect_file_node,
 	.collect_bmap = nilfs_collect_dat_bmap,
@@ -674,7 +678,7 @@
 	.write_node_binfo = nilfs_write_dat_node_binfo,
 };
 
-static struct nilfs_sc_operations nilfs_sc_dsync_ops = {
+static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
 	.collect_data = nilfs_collect_file_data,
 	.collect_node = NULL,
 	.collect_bmap = NULL,
@@ -777,7 +781,7 @@
 {
 	struct nilfs_inode_info *ii, *n;
 	struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
-	unsigned nv = 0;
+	unsigned int nv = 0;
 
 	while (!list_empty(head)) {
 		spin_lock(&nilfs->ns_inode_lock);
@@ -875,9 +879,11 @@
 	err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
 					  &raw_cp, &bh_cp);
 	if (likely(!err)) {
-		/* The following code is duplicated with cpfile.  But, it is
-		   needed to collect the checkpoint even if it was not newly
-		   created */
+		/*
+		 * The following code is duplicated with cpfile.  But, it is
+		 * needed to collect the checkpoint even if it was not newly
+		 * created.
+		 */
 		mark_buffer_dirty(bh_cp);
 		nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
 		nilfs_cpfile_put_checkpoint(
@@ -958,7 +964,7 @@
 {
 	struct buffer_head *bh_sr;
 	struct nilfs_super_root *raw_sr;
-	unsigned isz, srsz;
+	unsigned int isz, srsz;
 
 	bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
 	raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
@@ -1043,7 +1049,7 @@
 
 static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
 				   struct inode *inode,
-				   struct nilfs_sc_operations *sc_ops)
+				   const struct nilfs_sc_operations *sc_ops)
 {
 	LIST_HEAD(data_buffers);
 	LIST_HEAD(node_buffers);
@@ -1406,8 +1412,10 @@
 	if (atomic_read(&segbuf->sb_err)) {
 		/* Case 1: The first segment failed */
 		if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
-			/* Case 1a:  Partial segment appended into an existing
-			   segment */
+			/*
+			 * Case 1a:  Partial segment appended into an existing
+			 * segment
+			 */
 			nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
 						segbuf->sb_fseg_end);
 		else /* Case 1b:  New full segment */
@@ -1550,7 +1558,7 @@
 	sector_t blocknr;
 	unsigned long nfinfo = segbuf->sb_sum.nfinfo;
 	unsigned long nblocks = 0, ndatablk = 0;
-	struct nilfs_sc_operations *sc_op = NULL;
+	const struct nilfs_sc_operations *sc_op = NULL;
 	struct nilfs_segsum_pointer ssp;
 	struct nilfs_finfo *finfo = NULL;
 	union nilfs_binfo binfo;
@@ -1631,8 +1639,10 @@
 static void nilfs_begin_page_io(struct page *page)
 {
 	if (!page || PageWriteback(page))
-		/* For split b-tree node pages, this function may be called
-		   twice.  We ignore the 2nd or later calls by this check. */
+		/*
+		 * For split b-tree node pages, this function may be called
+		 * twice.  We ignore the 2nd or later calls by this check.
+		 */
 		return;
 
 	lock_page(page);
@@ -1942,7 +1952,7 @@
 				ifile, ii->vfs_inode.i_ino, &ibh);
 			if (unlikely(err)) {
 				nilfs_warning(sci->sc_super, __func__,
-					      "failed to get inode block.\n");
+					      "failed to get inode block.");
 				return err;
 			}
 			mark_buffer_dirty(ibh);
@@ -2395,6 +2405,7 @@
 static void nilfs_construction_timeout(unsigned long data)
 {
 	struct task_struct *p = (struct task_struct *)data;
+
 	wake_up_process(p);
 }
 
@@ -2555,10 +2566,10 @@
 
 		if (timeout || sci->sc_seq_request != sci->sc_seq_done)
 			mode = SC_LSEG_SR;
-		else if (!sci->sc_flush_request)
-			break;
-		else
+		else if (sci->sc_flush_request)
 			mode = nilfs_segctor_flush_mode(sci);
+		else
+			break;
 
 		spin_unlock(&sci->sc_state_lock);
 		nilfs_segctor_thread_construct(sci, mode);
@@ -2684,8 +2695,10 @@
 {
 	int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
 
-	/* The segctord thread was stopped and its timer was removed.
-	   But some tasks remain. */
+	/*
+	 * The segctord thread was stopped and its timer was removed.
+	 * But some tasks remain.
+	 */
 	do {
 		struct nilfs_transaction_info ti;
 
@@ -2727,13 +2740,13 @@
 
 	if (!list_empty(&sci->sc_dirty_files)) {
 		nilfs_warning(sci->sc_super, __func__,
-			      "dirty file(s) after the final construction\n");
+			      "dirty file(s) after the final construction");
 		nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
 	}
 
 	if (!list_empty(&sci->sc_iput_queue)) {
 		nilfs_warning(sci->sc_super, __func__,
-			      "iput queue is not empty\n");
+			      "iput queue is not empty");
 		nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
 	}
 
@@ -2810,7 +2823,7 @@
 	if (!list_empty(&nilfs->ns_dirty_files)) {
 		list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
 		nilfs_warning(sb, __func__,
-			      "Hit dirty file after stopped log writer\n");
+			      "Hit dirty file after stopped log writer");
 	}
 	spin_unlock(&nilfs->ns_inode_lock);
 	up_write(&nilfs->ns_segctor_sem);
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 0408b9b..6565c10 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Ryusuke Konishi.
  *
  */
 #ifndef _NILFS_SEGMENT_H
@@ -75,7 +71,7 @@
  */
 struct nilfs_cstage {
 	int			scnt;
-	unsigned		flags;
+	unsigned int		flags;
 	struct nilfs_inode_info *dirty_file_ptr;
 	struct nilfs_inode_info *gc_inode_ptr;
 };
@@ -84,7 +80,7 @@
 
 struct nilfs_segsum_pointer {
 	struct buffer_head     *bh;
-	unsigned		offset; /* offset in bytes */
+	unsigned int		offset; /* offset in bytes */
 };
 
 /**
@@ -193,11 +189,15 @@
 	NILFS_SC_DIRTY,		/* One or more dirty meta-data blocks exist */
 	NILFS_SC_UNCLOSED,	/* Logical segment is not closed */
 	NILFS_SC_SUPER_ROOT,	/* The latest segment has a super root */
-	NILFS_SC_PRIOR_FLUSH,	/* Requesting immediate flush without making a
-				   checkpoint */
-	NILFS_SC_HAVE_DELTA,	/* Next checkpoint will have update of files
-				   other than DAT, cpfile, sufile, or files
-				   moved by GC */
+	NILFS_SC_PRIOR_FLUSH,	/*
+				 * Requesting immediate flush without making a
+				 * checkpoint
+				 */
+	NILFS_SC_HAVE_DELTA,	/*
+				 * Next checkpoint will have update of files
+				 * other than DAT, cpfile, sufile, or files
+				 * moved by GC.
+				 */
 };
 
 /* sc_state */
@@ -207,17 +207,23 @@
 /*
  * Constant parameters
  */
-#define NILFS_SC_CLEANUP_RETRY	    3  /* Retry count of construction when
-					  destroying segctord */
+#define NILFS_SC_CLEANUP_RETRY	    3  /*
+					* Retry count of construction when
+					* destroying segctord
+					*/
 
 /*
  * Default values of timeout, in seconds.
  */
-#define NILFS_SC_DEFAULT_TIMEOUT    5   /* Timeout value of dirty blocks.
-					   It triggers construction of a
-					   logical segment with a super root */
-#define NILFS_SC_DEFAULT_SR_FREQ    30  /* Maximum frequency of super root
-					   creation */
+#define NILFS_SC_DEFAULT_TIMEOUT    5   /*
+					 * Timeout value of dirty blocks.
+					 * It triggers construction of a
+					 * logical segment with a super root.
+					 */
+#define NILFS_SC_DEFAULT_SR_FREQ    30  /*
+					 * Maximum frequency of super root
+					 * creation
+					 */
 
 /*
  * The default threshold amount of data, in block counts.
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index 52821ff..1963595 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -13,12 +13,8 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Koji Sato <koji@osrg.net>.
- * Revised by Ryusuke Konishi <ryusuke@osrg.net>.
+ * Written by Koji Sato.
+ * Revised by Ryusuke Konishi.
  */
 
 #include <linux/kernel.h>
@@ -61,6 +57,7 @@
 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
 {
 	__u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
+
 	do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
 	return (unsigned long)t;
 }
@@ -69,6 +66,7 @@
 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
 {
 	__u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
+
 	return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
 }
 
@@ -819,7 +817,7 @@
  * %-ENOMEM - Insufficient amount of memory available.
  */
 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
-				unsigned sisz, size_t nsi)
+				unsigned int sisz, size_t nsi)
 {
 	struct buffer_head *su_bh;
 	struct nilfs_segment_usage *su;
@@ -897,7 +895,7 @@
  * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
  */
 ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
-				unsigned supsz, size_t nsup)
+				unsigned int supsz, size_t nsup)
 {
 	struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
 	struct buffer_head *header_bh, *bh;
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h
index b8afd72..46e8987 100644
--- a/fs/nilfs2/sufile.h
+++ b/fs/nilfs2/sufile.h
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Koji Sato <koji@osrg.net>.
+ * Written by Koji Sato.
  */
 
 #ifndef _NILFS_SUFILE_H
@@ -42,9 +38,9 @@
 int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
 				   unsigned long nblocks, time_t modtime);
 int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *);
-ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned,
+ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned int,
 				size_t);
-ssize_t nilfs_sufile_set_suinfo(struct inode *, void *, unsigned , size_t);
+ssize_t nilfs_sufile_set_suinfo(struct inode *, void *, unsigned int, size_t);
 
 int nilfs_sufile_updatev(struct inode *, __u64 *, size_t, int, size_t *,
 			 void (*dofunc)(struct inode *, __u64,
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 7f5d3d9..666107a 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Ryusuke Konishi.
  */
 /*
  *  linux/fs/ext2/super.c
@@ -173,12 +169,10 @@
 static void nilfs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
 
-	if (mdi) {
-		kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
-		kfree(mdi);
-	}
+	if (nilfs_is_metadata_file_inode(inode))
+		nilfs_mdt_destroy(inode);
+
 	kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode));
 }
 
@@ -279,7 +273,7 @@
 		}
 	} else if (sbp[1] &&
 		   sbp[1]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) {
-			memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
+		memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
 	}
 
 	if (flip && sbp[1])
@@ -749,6 +743,7 @@
 
 	while ((p = strsep(&options, ",")) != NULL) {
 		int token;
+
 		if (!*p)
 			continue;
 
@@ -891,7 +886,7 @@
 	nilfs->ns_interval = le32_to_cpu(sbp->s_c_interval);
 	nilfs->ns_watermark = le32_to_cpu(sbp->s_c_block_max);
 
-	return !parse_options(data, sb, 0) ? -EINVAL : 0 ;
+	return !parse_options(data, sb, 0) ? -EINVAL : 0;
 }
 
 int nilfs_check_feature_compatibility(struct super_block *sb,
@@ -1316,7 +1311,7 @@
 	}
 
 	if (!s->s_root) {
- 		s_new = true;
+		s_new = true;
 
 		/* New superblock instance created */
 		s->s_mode = mode;
diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
index bbb0dcc..8ffa42b 100644
--- a/fs/nilfs2/sysfs.c
+++ b/fs/nilfs2/sysfs.c
@@ -68,7 +68,7 @@
 static const struct sysfs_ops nilfs_##name##_attr_ops = { \
 	.show	= nilfs_##name##_attr_show, \
 	.store	= nilfs_##name##_attr_store, \
-};
+}
 
 #define NILFS_DEV_INT_GROUP_TYPE(name, parent_name) \
 static void nilfs_##name##_attr_release(struct kobject *kobj) \
@@ -84,7 +84,7 @@
 	.default_attrs	= nilfs_##name##_attrs, \
 	.sysfs_ops	= &nilfs_##name##_attr_ops, \
 	.release	= nilfs_##name##_attr_release, \
-};
+}
 
 #define NILFS_DEV_INT_GROUP_FNS(name, parent_name) \
 static int nilfs_sysfs_create_##name##_group(struct the_nilfs *nilfs) \
@@ -756,7 +756,7 @@
 				      struct the_nilfs *nilfs,
 				      char *buf)
 {
-	unsigned sbwcount;
+	unsigned int sbwcount;
 
 	down_read(&nilfs->ns_sem);
 	sbwcount = nilfs->ns_sbwcount;
@@ -770,7 +770,7 @@
 					    struct the_nilfs *nilfs,
 					    char *buf)
 {
-	unsigned sb_update_freq;
+	unsigned int sb_update_freq;
 
 	down_read(&nilfs->ns_sem);
 	sb_update_freq = nilfs->ns_sb_update_freq;
@@ -784,7 +784,7 @@
 					    struct the_nilfs *nilfs,
 					    const char *buf, size_t count)
 {
-	unsigned val;
+	unsigned int val;
 	int err;
 
 	err = kstrtouint(skip_spaces(buf), 0, &val);
diff --git a/fs/nilfs2/sysfs.h b/fs/nilfs2/sysfs.h
index 677e3a1..648cedf 100644
--- a/fs/nilfs2/sysfs.h
+++ b/fs/nilfs2/sysfs.h
@@ -66,7 +66,7 @@
 			char *); \
 	ssize_t (*store)(struct kobject *, struct attribute *, \
 			 const char *, size_t); \
-};
+}
 
 NILFS_COMMON_ATTR_STRUCT(feature);
 
@@ -77,7 +77,7 @@
 			char *); \
 	ssize_t (*store)(struct nilfs_##name##_attr *, struct the_nilfs *, \
 			 const char *, size_t); \
-};
+}
 
 NILFS_DEV_ATTR_STRUCT(dev);
 NILFS_DEV_ATTR_STRUCT(segments);
@@ -93,7 +93,7 @@
 			char *); \
 	ssize_t (*store)(struct nilfs_##name##_attr *, struct nilfs_root *, \
 			 const char *, size_t); \
-};
+}
 
 NILFS_CP_ATTR_STRUCT(snapshot);
 
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 69bd801..809bd2d 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Ryusuke Konishi.
  *
  */
 
@@ -112,8 +108,8 @@
 	struct nilfs_super_root *raw_sr;
 	struct nilfs_super_block **sbp = nilfs->ns_sbp;
 	struct nilfs_inode *rawi;
-	unsigned dat_entry_size, segment_usage_size, checkpoint_size;
-	unsigned inode_size;
+	unsigned int dat_entry_size, segment_usage_size, checkpoint_size;
+	unsigned int inode_size;
 	int err;
 
 	err = nilfs_read_super_root_block(nilfs, sr_block, &bh_sr, 1);
@@ -621,8 +617,10 @@
 		err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
 		if (err)
 			goto out;
-			/* not failed_sbh; sbh is released automatically
-			   when reloading fails. */
+			/*
+			 * Not to failed_sbh; sbh is released automatically
+			 * when reloading fails.
+			 */
 	}
 	nilfs->ns_blocksize_bits = sb->s_blocksize_bits;
 	nilfs->ns_blocksize = blocksize;
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 23778d3..79369fd 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -13,11 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Ryusuke Konishi.
  *
  */
 
@@ -118,10 +114,10 @@
 	struct buffer_head     *ns_sbh[2];
 	struct nilfs_super_block *ns_sbp[2];
 	time_t			ns_sbwtime;
-	unsigned		ns_sbwcount;
-	unsigned		ns_sbsize;
-	unsigned		ns_mount_state;
-	unsigned		ns_sb_update_freq;
+	unsigned int		ns_sbwcount;
+	unsigned int		ns_sbsize;
+	unsigned int		ns_mount_state;
+	unsigned int		ns_sb_update_freq;
 
 	/*
 	 * Following fields are dedicated to a writable FS-instance.
@@ -226,15 +222,14 @@
  * Mount option operations
  */
 #define nilfs_clear_opt(nilfs, opt)  \
-	do { (nilfs)->ns_mount_opt &= ~NILFS_MOUNT_##opt; } while (0)
+	((nilfs)->ns_mount_opt &= ~NILFS_MOUNT_##opt)
 #define nilfs_set_opt(nilfs, opt)  \
-	do { (nilfs)->ns_mount_opt |= NILFS_MOUNT_##opt; } while (0)
+	((nilfs)->ns_mount_opt |= NILFS_MOUNT_##opt)
 #define nilfs_test_opt(nilfs, opt) ((nilfs)->ns_mount_opt & NILFS_MOUNT_##opt)
 #define nilfs_write_opt(nilfs, mask, opt)				\
-	do { (nilfs)->ns_mount_opt =					\
+	((nilfs)->ns_mount_opt =					\
 		(((nilfs)->ns_mount_opt & ~NILFS_MOUNT_##mask) |	\
-		 NILFS_MOUNT_##opt);					\
-	} while (0)
+		 NILFS_MOUNT_##opt))					\
 
 /**
  * struct nilfs_root - nilfs root object
@@ -273,6 +268,7 @@
 static inline int nilfs_sb_need_update(struct the_nilfs *nilfs)
 {
 	u64 t = get_seconds();
+
 	return t < nilfs->ns_sbwtime ||
 		t > nilfs->ns_sbwtime + nilfs->ns_sb_update_freq;
 }
@@ -280,6 +276,7 @@
 static inline int nilfs_sb_will_flip(struct the_nilfs *nilfs)
 {
 	int flip_bits = nilfs->ns_sbwcount & 0x0FL;
+
 	return (flip_bits != 0x08 && flip_bits != 0x0F);
 }
 
@@ -308,7 +305,7 @@
 
 static inline int nilfs_valid_fs(struct the_nilfs *nilfs)
 {
-	unsigned valid_fs;
+	unsigned int valid_fs;
 
 	down_read(&nilfs->ns_sem);
 	valid_fs = (nilfs->ns_mount_state & NILFS_VALID_FS);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index a8d15be..6aaf3e3 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -272,10 +272,21 @@
 	struct delayed_work	hr_write_timeout_work;
 	unsigned long		hr_last_timeout_start;
 
+	/* negotiate timer, used to negotiate extending hb timeout. */
+	struct delayed_work	hr_nego_timeout_work;
+	unsigned long		hr_nego_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+
 	/* Used during o2hb_check_slot to hold a copy of the block
 	 * being checked because we temporarily have to zero out the
 	 * crc field. */
 	struct o2hb_disk_heartbeat_block *hr_tmp_block;
+
+	/* Message key for negotiate timeout message. */
+	unsigned int		hr_key;
+	struct list_head	hr_handler_list;
+
+	/* last hb status, 0 for success, other value for error. */
+	int			hr_last_hb_status;
 };
 
 struct o2hb_bio_wait_ctxt {
@@ -284,6 +295,17 @@
 	int               wc_error;
 };
 
+#define O2HB_NEGO_TIMEOUT_MS (O2HB_MAX_WRITE_TIMEOUT_MS/2)
+
+enum {
+	O2HB_NEGO_TIMEOUT_MSG = 1,
+	O2HB_NEGO_APPROVE_MSG = 2,
+};
+
+struct o2hb_nego_msg {
+	u8 node_num;
+};
+
 static void o2hb_write_timeout(struct work_struct *work)
 {
 	int failed, quorum;
@@ -319,7 +341,7 @@
 	o2quo_disk_timeout();
 }
 
-static void o2hb_arm_write_timeout(struct o2hb_region *reg)
+static void o2hb_arm_timeout(struct o2hb_region *reg)
 {
 	/* Arm writeout only after thread reaches steady state */
 	if (atomic_read(&reg->hr_steady_iterations) != 0)
@@ -334,14 +356,132 @@
 		spin_unlock(&o2hb_live_lock);
 	}
 	cancel_delayed_work(&reg->hr_write_timeout_work);
-	reg->hr_last_timeout_start = jiffies;
 	schedule_delayed_work(&reg->hr_write_timeout_work,
 			      msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS));
+
+	cancel_delayed_work(&reg->hr_nego_timeout_work);
+	/* negotiate timeout must be less than write timeout. */
+	schedule_delayed_work(&reg->hr_nego_timeout_work,
+			      msecs_to_jiffies(O2HB_NEGO_TIMEOUT_MS));
+	memset(reg->hr_nego_node_bitmap, 0, sizeof(reg->hr_nego_node_bitmap));
 }
 
-static void o2hb_disarm_write_timeout(struct o2hb_region *reg)
+static void o2hb_disarm_timeout(struct o2hb_region *reg)
 {
 	cancel_delayed_work_sync(&reg->hr_write_timeout_work);
+	cancel_delayed_work_sync(&reg->hr_nego_timeout_work);
+}
+
+static int o2hb_send_nego_msg(int key, int type, u8 target)
+{
+	struct o2hb_nego_msg msg;
+	int status, ret;
+
+	msg.node_num = o2nm_this_node();
+again:
+	ret = o2net_send_message(type, key, &msg, sizeof(msg),
+			target, &status);
+
+	if (ret == -EAGAIN || ret == -ENOMEM) {
+		msleep(100);
+		goto again;
+	}
+
+	return ret;
+}
+
+static void o2hb_nego_timeout(struct work_struct *work)
+{
+	unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+	int master_node, i, ret;
+	struct o2hb_region *reg;
+
+	reg = container_of(work, struct o2hb_region, hr_nego_timeout_work.work);
+	/* don't negotiate timeout if last hb failed since it is very
+	 * possible io failed. Should let write timeout fence self.
+	 */
+	if (reg->hr_last_hb_status)
+		return;
+
+	o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap));
+	/* lowest node as master node to make negotiate decision. */
+	master_node = find_next_bit(live_node_bitmap, O2NM_MAX_NODES, 0);
+
+	if (master_node == o2nm_this_node()) {
+		if (!test_bit(master_node, reg->hr_nego_node_bitmap)) {
+			printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%s).\n",
+				o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000,
+				config_item_name(&reg->hr_item), reg->hr_dev_name);
+			set_bit(master_node, reg->hr_nego_node_bitmap);
+		}
+		if (memcmp(reg->hr_nego_node_bitmap, live_node_bitmap,
+				sizeof(reg->hr_nego_node_bitmap))) {
+			/* check negotiate bitmap every second to do timeout
+			 * approve decision.
+			 */
+			schedule_delayed_work(&reg->hr_nego_timeout_work,
+				msecs_to_jiffies(1000));
+
+			return;
+		}
+
+		printk(KERN_NOTICE "o2hb: all nodes hb write hung, maybe region %s (%s) is down.\n",
+			config_item_name(&reg->hr_item), reg->hr_dev_name);
+		/* approve negotiate timeout request. */
+		o2hb_arm_timeout(reg);
+
+		i = -1;
+		while ((i = find_next_bit(live_node_bitmap,
+				O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
+			if (i == master_node)
+				continue;
+
+			mlog(ML_HEARTBEAT, "send NEGO_APPROVE msg to node %d\n", i);
+			ret = o2hb_send_nego_msg(reg->hr_key,
+					O2HB_NEGO_APPROVE_MSG, i);
+			if (ret)
+				mlog(ML_ERROR, "send NEGO_APPROVE msg to node %d fail %d\n",
+					i, ret);
+		}
+	} else {
+		/* negotiate timeout with master node. */
+		printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%s), negotiate timeout with node %d.\n",
+			o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000, config_item_name(&reg->hr_item),
+			reg->hr_dev_name, master_node);
+		ret = o2hb_send_nego_msg(reg->hr_key, O2HB_NEGO_TIMEOUT_MSG,
+				master_node);
+		if (ret)
+			mlog(ML_ERROR, "send NEGO_TIMEOUT msg to node %d fail %d\n",
+				master_node, ret);
+	}
+}
+
+static int o2hb_nego_timeout_handler(struct o2net_msg *msg, u32 len, void *data,
+				void **ret_data)
+{
+	struct o2hb_region *reg = data;
+	struct o2hb_nego_msg *nego_msg;
+
+	nego_msg = (struct o2hb_nego_msg *)msg->buf;
+	printk(KERN_NOTICE "o2hb: receive negotiate timeout message from node %d on region %s (%s).\n",
+		nego_msg->node_num, config_item_name(&reg->hr_item), reg->hr_dev_name);
+	if (nego_msg->node_num < O2NM_MAX_NODES)
+		set_bit(nego_msg->node_num, reg->hr_nego_node_bitmap);
+	else
+		mlog(ML_ERROR, "got nego timeout message from bad node.\n");
+
+	return 0;
+}
+
+static int o2hb_nego_approve_handler(struct o2net_msg *msg, u32 len, void *data,
+				void **ret_data)
+{
+	struct o2hb_region *reg = data;
+
+	printk(KERN_NOTICE "o2hb: negotiate timeout approved by master node on region %s (%s).\n",
+		config_item_name(&reg->hr_item), reg->hr_dev_name);
+	o2hb_arm_timeout(reg);
+	return 0;
 }
 
 static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc)
@@ -1032,7 +1172,8 @@
 	/* Skip disarming the timeout if own slot has stale/bad data */
 	if (own_slot_ok) {
 		o2hb_set_quorum_device(reg);
-		o2hb_arm_write_timeout(reg);
+		o2hb_arm_timeout(reg);
+		reg->hr_last_timeout_start = jiffies;
 	}
 
 bail:
@@ -1096,6 +1237,7 @@
 		before_hb = ktime_get_real();
 
 		ret = o2hb_do_disk_heartbeat(reg);
+		reg->hr_last_hb_status = ret;
 
 		after_hb = ktime_get_real();
 
@@ -1114,7 +1256,7 @@
 		}
 	}
 
-	o2hb_disarm_write_timeout(reg);
+	o2hb_disarm_timeout(reg);
 
 	/* unclean stop is only used in very bad situation */
 	for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++)
@@ -1451,6 +1593,7 @@
 	list_del(&reg->hr_all_item);
 	spin_unlock(&o2hb_live_lock);
 
+	o2net_unregister_handler_list(&reg->hr_handler_list);
 	kfree(reg);
 }
 
@@ -1762,6 +1905,7 @@
 	}
 
 	INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
+	INIT_DELAYED_WORK(&reg->hr_nego_timeout_work, o2hb_nego_timeout);
 
 	/*
 	 * A node is considered live after it has beat LIVE_THRESHOLD
@@ -1995,13 +2139,37 @@
 
 	config_item_init_type_name(&reg->hr_item, name, &o2hb_region_type);
 
+	/* this is the same way to generate msg key as dlm, for local heartbeat,
+	 * name is also the same, so make initial crc value different to avoid
+	 * message key conflict.
+	 */
+	reg->hr_key = crc32_le(reg->hr_region_num + O2NM_MAX_REGIONS,
+		name, strlen(name));
+	INIT_LIST_HEAD(&reg->hr_handler_list);
+	ret = o2net_register_handler(O2HB_NEGO_TIMEOUT_MSG, reg->hr_key,
+			sizeof(struct o2hb_nego_msg),
+			o2hb_nego_timeout_handler,
+			reg, NULL, &reg->hr_handler_list);
+	if (ret)
+		goto free;
+
+	ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key,
+			sizeof(struct o2hb_nego_msg),
+			o2hb_nego_approve_handler,
+			reg, NULL, &reg->hr_handler_list);
+	if (ret)
+		goto unregister_handler;
+
 	ret = o2hb_debug_region_init(reg, o2hb_debug_dir);
 	if (ret) {
 		config_item_put(&reg->hr_item);
-		goto free;
+		goto unregister_handler;
 	}
 
 	return &reg->hr_item;
+
+unregister_handler:
+	o2net_unregister_handler_list(&reg->hr_handler_list);
 free:
 	kfree(reg);
 	return ERR_PTR(ret);
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index b95e7df..94b1836 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -44,6 +44,9 @@
  * version here in tcp_internal.h should not need to be bumped for
  * filesystem locking changes.
  *
+ * New in version 12
+ *	- Negotiate hb timeout when storage is down.
+ *
  * New in version 11
  * 	- Negotiation of filesystem locking in the dlm join.
  *
@@ -75,7 +78,7 @@
  * 	- full 64 bit i_size in the metadata lock lvbs
  * 	- introduction of "rw" lock and pushing meta/data locking down
  */
-#define O2NET_PROTOCOL_VERSION 11ULL
+#define O2NET_PROTOCOL_VERSION 12ULL
 struct o2net_handshake {
 	__be64	protocol_version;
 	__be64	connector_id;
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 0748777..c56a767 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -176,12 +176,7 @@
 	}
 	if (is_bad_inode(inode)) {
 		iput(inode);
-		if ((flags & OCFS2_FI_FLAG_FILECHECK_CHK) ||
-		    (flags & OCFS2_FI_FLAG_FILECHECK_FIX))
-			/* Return OCFS2_FILECHECK_ERR_XXX related errno */
-			inode = ERR_PTR(rc);
-		else
-			inode = ERR_PTR(-ESTALE);
+		inode = ERR_PTR(rc);
 		goto bail;
 	}
 
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index f4cd3c3..497a4171 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -619,7 +619,7 @@
 
 static inline int ocfs2_jbd2_file_inode(handle_t *handle, struct inode *inode)
 {
-	return jbd2_journal_file_inode(handle, &OCFS2_I(inode)->ip_jinode);
+	return jbd2_journal_inode_add_write(handle, &OCFS2_I(inode)->ip_jinode);
 }
 
 static inline int ocfs2_begin_ordered_truncate(struct inode *inode,
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index ad16995..d205385 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -7254,10 +7254,11 @@
 }
 
 static int ocfs2_xattr_security_set(const struct xattr_handler *handler,
-				    struct dentry *dentry, const char *name,
-				    const void *value, size_t size, int flags)
+				    struct dentry *unused, struct inode *inode,
+				    const char *name, const void *value,
+				    size_t size, int flags)
 {
-	return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_SECURITY,
+	return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
 			       name, value, size, flags);
 }
 
@@ -7325,10 +7326,11 @@
 }
 
 static int ocfs2_xattr_trusted_set(const struct xattr_handler *handler,
-				   struct dentry *dentry, const char *name,
-				   const void *value, size_t size, int flags)
+				   struct dentry *unused, struct inode *inode,
+				   const char *name, const void *value,
+				   size_t size, int flags)
 {
-	return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_TRUSTED,
+	return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_TRUSTED,
 			       name, value, size, flags);
 }
 
@@ -7354,15 +7356,16 @@
 }
 
 static int ocfs2_xattr_user_set(const struct xattr_handler *handler,
-				struct dentry *dentry, const char *name,
-				const void *value, size_t size, int flags)
+				struct dentry *unused, struct inode *inode,
+				const char *name, const void *value,
+				size_t size, int flags)
 {
-	struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
+	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 
 	if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
 		return -EOPNOTSUPP;
 
-	return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_USER,
+	return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_USER,
 			       name, value, size, flags);
 }
 
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index 99c1954..5893ddd 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -448,13 +448,14 @@
 }
 
 static int orangefs_xattr_set_default(const struct xattr_handler *handler,
-				      struct dentry *dentry,
+				      struct dentry *unused,
+				      struct inode *inode,
 				      const char *name,
 				      const void *buffer,
 				      size_t size,
 				      int flags)
 {
-	return orangefs_inode_setxattr(dentry->d_inode,
+	return orangefs_inode_setxattr(inode,
 				    ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
 				    name,
 				    buffer,
@@ -478,13 +479,14 @@
 }
 
 static int orangefs_xattr_set_trusted(const struct xattr_handler *handler,
-				     struct dentry *dentry,
+				     struct dentry *unused,
+				     struct inode *inode,
 				     const char *name,
 				     const void *buffer,
 				     size_t size,
 				     int flags)
 {
-	return orangefs_inode_setxattr(dentry->d_inode,
+	return orangefs_inode_setxattr(inode,
 				    ORANGEFS_XATTR_NAME_TRUSTED_PREFIX,
 				    name,
 				    buffer,
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index cc514da..80aa6f1 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -336,7 +336,6 @@
 	struct dentry *upperdir;
 	struct dentry *upperdentry;
 	const struct cred *old_cred;
-	struct cred *override_cred;
 	char *link = NULL;
 
 	if (WARN_ON(!workdir))
@@ -357,28 +356,7 @@
 			return PTR_ERR(link);
 	}
 
-	err = -ENOMEM;
-	override_cred = prepare_creds();
-	if (!override_cred)
-		goto out_free_link;
-
-	override_cred->fsuid = stat->uid;
-	override_cred->fsgid = stat->gid;
-	/*
-	 * CAP_SYS_ADMIN for copying up extended attributes
-	 * CAP_DAC_OVERRIDE for create
-	 * CAP_FOWNER for chmod, timestamp update
-	 * CAP_FSETID for chmod
-	 * CAP_CHOWN for chown
-	 * CAP_MKNOD for mknod
-	 */
-	cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
-	cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
-	cap_raise(override_cred->cap_effective, CAP_FOWNER);
-	cap_raise(override_cred->cap_effective, CAP_FSETID);
-	cap_raise(override_cred->cap_effective, CAP_CHOWN);
-	cap_raise(override_cred->cap_effective, CAP_MKNOD);
-	old_cred = override_creds(override_cred);
+	old_cred = ovl_override_creds(dentry->d_sb);
 
 	err = -EIO;
 	if (lock_rename(workdir, upperdir) != NULL) {
@@ -401,9 +379,7 @@
 out_unlock:
 	unlock_rename(workdir, upperdir);
 	revert_creds(old_cred);
-	put_cred(override_cred);
 
-out_free_link:
 	if (link)
 		free_page((unsigned long) link);
 
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index b3fc0a3..22f0253 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -405,28 +405,13 @@
 		err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
 	} else {
 		const struct cred *old_cred;
-		struct cred *override_cred;
 
-		err = -ENOMEM;
-		override_cred = prepare_creds();
-		if (!override_cred)
-			goto out_iput;
-
-		/*
-		 * CAP_SYS_ADMIN for setting opaque xattr
-		 * CAP_DAC_OVERRIDE for create in workdir, rename
-		 * CAP_FOWNER for removing whiteout from sticky dir
-		 */
-		cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
-		cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
-		cap_raise(override_cred->cap_effective, CAP_FOWNER);
-		old_cred = override_creds(override_cred);
+		old_cred = ovl_override_creds(dentry->d_sb);
 
 		err = ovl_create_over_whiteout(dentry, inode, &stat, link,
 					       hardlink);
 
 		revert_creds(old_cred);
-		put_cred(override_cred);
 	}
 
 	if (!err)
@@ -662,32 +647,11 @@
 	if (OVL_TYPE_PURE_UPPER(type)) {
 		err = ovl_remove_upper(dentry, is_dir);
 	} else {
-		const struct cred *old_cred;
-		struct cred *override_cred;
-
-		err = -ENOMEM;
-		override_cred = prepare_creds();
-		if (!override_cred)
-			goto out_drop_write;
-
-		/*
-		 * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir
-		 * CAP_DAC_OVERRIDE for create in workdir, rename
-		 * CAP_FOWNER for removing whiteout from sticky dir
-		 * CAP_FSETID for chmod of opaque dir
-		 * CAP_CHOWN for chown of opaque dir
-		 */
-		cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
-		cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
-		cap_raise(override_cred->cap_effective, CAP_FOWNER);
-		cap_raise(override_cred->cap_effective, CAP_FSETID);
-		cap_raise(override_cred->cap_effective, CAP_CHOWN);
-		old_cred = override_creds(override_cred);
+		const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
 
 		err = ovl_remove_and_whiteout(dentry, is_dir);
 
 		revert_creds(old_cred);
-		put_cred(override_cred);
 	}
 out_drop_write:
 	ovl_drop_write(dentry);
@@ -725,7 +689,6 @@
 	bool new_is_dir = false;
 	struct dentry *opaquedir = NULL;
 	const struct cred *old_cred = NULL;
-	struct cred *override_cred = NULL;
 
 	err = -EINVAL;
 	if (flags & ~(RENAME_EXCHANGE | RENAME_NOREPLACE))
@@ -794,26 +757,8 @@
 	old_opaque = !OVL_TYPE_PURE_UPPER(old_type);
 	new_opaque = !OVL_TYPE_PURE_UPPER(new_type);
 
-	if (old_opaque || new_opaque) {
-		err = -ENOMEM;
-		override_cred = prepare_creds();
-		if (!override_cred)
-			goto out_drop_write;
-
-		/*
-		 * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir
-		 * CAP_DAC_OVERRIDE for create in workdir
-		 * CAP_FOWNER for removing whiteout from sticky dir
-		 * CAP_FSETID for chmod of opaque dir
-		 * CAP_CHOWN for chown of opaque dir
-		 */
-		cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
-		cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
-		cap_raise(override_cred->cap_effective, CAP_FOWNER);
-		cap_raise(override_cred->cap_effective, CAP_FSETID);
-		cap_raise(override_cred->cap_effective, CAP_CHOWN);
-		old_cred = override_creds(override_cred);
-	}
+	if (old_opaque || new_opaque)
+		old_cred = ovl_override_creds(old->d_sb);
 
 	if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) {
 		opaquedir = ovl_check_empty_and_clear(new);
@@ -943,10 +888,8 @@
 out_unlock:
 	unlock_rename(new_upperdir, old_upperdir);
 out_revert_creds:
-	if (old_opaque || new_opaque) {
+	if (old_opaque || new_opaque)
 		revert_creds(old_cred);
-		put_cred(override_cred);
-	}
 out_drop_write:
 	ovl_drop_write(old);
 out:
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index c7b31a0..0ed7c40 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -210,8 +210,9 @@
 	return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0;
 }
 
-int ovl_setxattr(struct dentry *dentry, const char *name,
-		 const void *value, size_t size, int flags)
+int ovl_setxattr(struct dentry *dentry, struct inode *inode,
+		 const char *name, const void *value,
+		 size_t size, int flags)
 {
 	int err;
 	struct dentry *upperdentry;
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 99ec4b0..4bd9b5b 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -153,6 +153,7 @@
 bool ovl_dentry_is_opaque(struct dentry *dentry);
 void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque);
 bool ovl_is_whiteout(struct dentry *dentry);
+const struct cred *ovl_override_creds(struct super_block *sb);
 void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
 struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 			  unsigned int flags);
@@ -171,8 +172,9 @@
 /* inode.c */
 int ovl_setattr(struct dentry *dentry, struct iattr *attr);
 int ovl_permission(struct inode *inode, int mask);
-int ovl_setxattr(struct dentry *dentry, const char *name,
-		 const void *value, size_t size, int flags);
+int ovl_setxattr(struct dentry *dentry, struct inode *inode,
+		 const char *name, const void *value,
+		 size_t size, int flags);
 ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
 		     const char *name, void *value, size_t size);
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index da186ee..cf37fc7 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -36,6 +36,7 @@
 
 struct ovl_readdir_data {
 	struct dir_context ctx;
+	struct dentry *dentry;
 	bool is_lowest;
 	struct rb_root root;
 	struct list_head *list;
@@ -206,21 +207,10 @@
 	struct ovl_cache_entry *p;
 	struct dentry *dentry;
 	const struct cred *old_cred;
-	struct cred *override_cred;
 
-	override_cred = prepare_creds();
-	if (!override_cred)
-		return -ENOMEM;
+	old_cred = ovl_override_creds(rdd->dentry->d_sb);
 
-	/*
-	 * CAP_DAC_OVERRIDE for lookup
-	 */
-	cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
-	old_cred = override_creds(override_cred);
-
-	inode_lock(dir->d_inode);
-	err = 0;
-	// XXX: err = mutex_lock_killable(&dir->d_inode->i_mutex);
+	err = down_write_killable(&dir->d_inode->i_rwsem);
 	if (!err) {
 		while (rdd->first_maybe_whiteout) {
 			p = rdd->first_maybe_whiteout;
@@ -234,7 +224,6 @@
 		inode_unlock(dir->d_inode);
 	}
 	revert_creds(old_cred);
-	put_cred(override_cred);
 
 	return err;
 }
@@ -290,6 +279,7 @@
 	struct path realpath;
 	struct ovl_readdir_data rdd = {
 		.ctx.actor = ovl_fill_merge,
+		.dentry = dentry,
 		.list = list,
 		.root = RB_ROOT,
 		.is_lowest = false,
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index ed53ae0..ce02f46 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -42,6 +42,8 @@
 	long lower_namelen;
 	/* pathnames of lower and upper dirs, for show_options */
 	struct ovl_config config;
+	/* creds of process who forced instantiation of super block */
+	const struct cred *creator_cred;
 };
 
 struct ovl_dir_cache;
@@ -265,6 +267,13 @@
 	return inode && IS_WHITEOUT(inode);
 }
 
+const struct cred *ovl_override_creds(struct super_block *sb)
+{
+	struct ovl_fs *ofs = sb->s_fs_info;
+
+	return override_creds(ofs->creator_cred);
+}
+
 static bool ovl_is_opaquedir(struct dentry *dentry)
 {
 	int res;
@@ -603,6 +612,7 @@
 	kfree(ufs->config.lowerdir);
 	kfree(ufs->config.upperdir);
 	kfree(ufs->config.workdir);
+	put_cred(ufs->creator_cred);
 	kfree(ufs);
 }
 
@@ -1064,16 +1074,19 @@
 		/*
 		 * Upper should support d_type, else whiteouts are visible.
 		 * Given workdir and upper are on same fs, we can do
-		 * iterate_dir() on workdir.
+		 * iterate_dir() on workdir. This check requires successful
+		 * creation of workdir in previous step.
 		 */
-		err = ovl_check_d_type_supported(&workpath);
-		if (err < 0)
-			goto out_put_workdir;
+		if (ufs->workdir) {
+			err = ovl_check_d_type_supported(&workpath);
+			if (err < 0)
+				goto out_put_workdir;
 
-		if (!err) {
-			pr_err("overlayfs: upper fs needs to support d_type.\n");
-			err = -EINVAL;
-			goto out_put_workdir;
+			if (!err) {
+				pr_err("overlayfs: upper fs needs to support d_type.\n");
+				err = -EINVAL;
+				goto out_put_workdir;
+			}
 		}
 	}
 
@@ -1108,10 +1121,14 @@
 	else
 		sb->s_d_op = &ovl_dentry_operations;
 
+	ufs->creator_cred = prepare_creds();
+	if (!ufs->creator_cred)
+		goto out_put_lower_mnt;
+
 	err = -ENOMEM;
 	oe = ovl_alloc_entry(numlower);
 	if (!oe)
-		goto out_put_lower_mnt;
+		goto out_put_cred;
 
 	root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, oe));
 	if (!root_dentry)
@@ -1144,6 +1161,8 @@
 
 out_free_oe:
 	kfree(oe);
+out_put_cred:
+	put_cred(ufs->creator_cred);
 out_put_lower_mnt:
 	for (i = 0; i < ufs->numlower; i++)
 		mntput(ufs->lower_mnt[i]);
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 2c60f17..8a4a266 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -822,10 +822,10 @@
 
 static int
 posix_acl_xattr_set(const struct xattr_handler *handler,
-		    struct dentry *dentry, const char *name,
-		    const void *value, size_t size, int flags)
+		    struct dentry *unused, struct inode *inode,
+		    const char *name, const void *value,
+		    size_t size, int flags)
 {
-	struct inode *inode = d_backing_inode(dentry);
 	struct posix_acl *acl = NULL;
 	int ret;
 
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 5415835..4648c7f 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1027,11 +1027,15 @@
 		};
 
 		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
+			if (down_write_killable(&mm->mmap_sem)) {
+				count = -EINTR;
+				goto out_mm;
+			}
+
 			/*
 			 * Writing 5 to /proc/pid/clear_refs resets the peak
 			 * resident set size to this mm's current rss value.
 			 */
-			down_write(&mm->mmap_sem);
 			reset_mm_hiwater_rss(mm);
 			up_write(&mm->mmap_sem);
 			goto out_mm;
@@ -1043,7 +1047,10 @@
 				if (!(vma->vm_flags & VM_SOFTDIRTY))
 					continue;
 				up_read(&mm->mmap_sem);
-				down_write(&mm->mmap_sem);
+				if (down_write_killable(&mm->mmap_sem)) {
+					count = -EINTR;
+					goto out_mm;
+				}
 				for (vma = mm->mmap; vma; vma = vma->vm_next) {
 					vma->vm_flags &= ~VM_SOFTDIRTY;
 					vma_set_page_prot(vma);
diff --git a/fs/readdir.c b/fs/readdir.c
index a86c6c0..9d0212c 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -35,13 +35,13 @@
 	if (res)
 		goto out;
 
-	if (shared)
+	if (shared) {
 		inode_lock_shared(inode);
-	else
-		inode_lock(inode);
-	// res = mutex_lock_killable(&inode->i_mutex);
-	// if (res)
-	//	goto out;
+	} else {
+		res = down_write_killable(&inode->i_rwsem);
+		if (res)
+			goto out;
+	}
 
 	res = -ENOENT;
 	if (!IS_DEADDIR(inode)) {
@@ -182,6 +182,8 @@
 	}
 	dirent = buf->previous;
 	if (dirent) {
+		if (signal_pending(current))
+			return -EINTR;
 		if (__put_user(offset, &dirent->d_off))
 			goto efault;
 	}
@@ -261,6 +263,8 @@
 		return -EINVAL;
 	dirent = buf->previous;
 	if (dirent) {
+		if (signal_pending(current))
+			return -EINTR;
 		if (__put_user(offset, &dirent->d_off))
 			goto efault;
 	}
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index 86aeb9d..e4cbb77 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -20,13 +20,14 @@
 }
 
 static int
-security_set(const struct xattr_handler *handler, struct dentry *dentry,
-	     const char *name, const void *buffer, size_t size, int flags)
+security_set(const struct xattr_handler *handler, struct dentry *unused,
+	     struct inode *inode, const char *name, const void *buffer,
+	     size_t size, int flags)
 {
-	if (IS_PRIVATE(d_inode(dentry)))
+	if (IS_PRIVATE(inode))
 		return -EPERM;
 
-	return reiserfs_xattr_set(d_inode(dentry),
+	return reiserfs_xattr_set(inode,
 				  xattr_full_name(handler, name),
 				  buffer, size, flags);
 }
diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c
index 31837f0..f15a5f9 100644
--- a/fs/reiserfs/xattr_trusted.c
+++ b/fs/reiserfs/xattr_trusted.c
@@ -19,13 +19,14 @@
 }
 
 static int
-trusted_set(const struct xattr_handler *handler, struct dentry *dentry,
-	    const char *name, const void *buffer, size_t size, int flags)
+trusted_set(const struct xattr_handler *handler, struct dentry *unused,
+	    struct inode *inode, const char *name, const void *buffer,
+	    size_t size, int flags)
 {
-	if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry)))
+	if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode))
 		return -EPERM;
 
-	return reiserfs_xattr_set(d_inode(dentry),
+	return reiserfs_xattr_set(inode,
 				  xattr_full_name(handler, name),
 				  buffer, size, flags);
 }
diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c
index f7c3973..dc59df4 100644
--- a/fs/reiserfs/xattr_user.c
+++ b/fs/reiserfs/xattr_user.c
@@ -17,12 +17,13 @@
 }
 
 static int
-user_set(const struct xattr_handler *handler, struct dentry *dentry,
-	 const char *name, const void *buffer, size_t size, int flags)
+user_set(const struct xattr_handler *handler, struct dentry *unused,
+	 struct inode *inode, const char *name, const void *buffer,
+	 size_t size, int flags)
 {
-	if (!reiserfs_xattrs_user(dentry->d_sb))
+	if (!reiserfs_xattrs_user(inode->i_sb))
 		return -EOPNOTSUPP;
-	return reiserfs_xattr_set(d_inode(dentry),
+	return reiserfs_xattr_set(inode,
 				  xattr_full_name(handler, name),
 				  buffer, size, flags);
 }
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 595ca0d..69e287e2 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -260,7 +260,7 @@
 	pr_err("\txattr_names    %u\n", ui->xattr_names);
 	pr_err("\tdirty          %u\n", ui->dirty);
 	pr_err("\txattr          %u\n", ui->xattr);
-	pr_err("\tbulk_read      %u\n", ui->xattr);
+	pr_err("\tbulk_read      %u\n", ui->bulk_read);
 	pr_err("\tsynced_i_size  %llu\n",
 	       (unsigned long long)ui->synced_i_size);
 	pr_err("\tui_size        %llu\n",
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 6c277eb..b5fc279 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -579,11 +579,10 @@
 }
 
 static int ubifs_xattr_set(const struct xattr_handler *handler,
-			   struct dentry *dentry, const char *name,
-			   const void *value, size_t size, int flags)
+			   struct dentry *dentry, struct inode *inode,
+			   const char *name, const void *value,
+			   size_t size, int flags)
 {
-	struct inode *inode = d_inode(dentry);
-
 	dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd",
 		name, inode->i_ino, dentry, size);
 
diff --git a/fs/xattr.c b/fs/xattr.c
index b11945e..4beafc4 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -100,7 +100,7 @@
 	if (issec)
 		inode->i_flags &= ~S_NOSEC;
 	if (inode->i_op->setxattr) {
-		error = inode->i_op->setxattr(dentry, name, value, size, flags);
+		error = inode->i_op->setxattr(dentry, inode, name, value, size, flags);
 		if (!error) {
 			fsnotify_xattr(dentry);
 			security_inode_post_setxattr(dentry, name, value,
@@ -655,6 +655,7 @@
  * operations to the correct xattr_handler.
  */
 #define for_each_xattr_handler(handlers, handler)		\
+	if (handlers)						\
 		for ((handler) = *(handlers)++;			\
 			(handler) != NULL;			\
 			(handler) = *(handlers)++)
@@ -668,7 +669,7 @@
 	const struct xattr_handler *handler;
 
 	if (!*name)
-		return NULL;
+		return ERR_PTR(-EINVAL);
 
 	for_each_xattr_handler(handlers, handler) {
 		const char *n;
@@ -744,7 +745,8 @@
  * Find the handler for the prefix and dispatch its set() operation.
  */
 int
-generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags)
+generic_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
+		 const void *value, size_t size, int flags)
 {
 	const struct xattr_handler *handler;
 
@@ -753,7 +755,7 @@
 	handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
 	if (IS_ERR(handler))
 		return PTR_ERR(handler);
-	return handler->set(handler, dentry, name, value, size, flags);
+	return handler->set(handler, dentry, inode, name, value, size, flags);
 }
 
 /*
@@ -768,7 +770,8 @@
 	handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
 	if (IS_ERR(handler))
 		return PTR_ERR(handler);
-	return handler->set(handler, dentry, name, NULL, 0, XATTR_REPLACE);
+	return handler->set(handler, dentry, d_inode(dentry), name, NULL,
+			    0, XATTR_REPLACE);
 }
 
 EXPORT_SYMBOL(generic_getxattr);
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index 686ba6f..339c696 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -93,19 +93,23 @@
 }
 
 void *
-kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
-	     xfs_km_flags_t flags)
+kmem_realloc(const void *old, size_t newsize, xfs_km_flags_t flags)
 {
-	void	*new;
+	int	retries = 0;
+	gfp_t	lflags = kmem_flags_convert(flags);
+	void	*ptr;
 
-	new = kmem_alloc(newsize, flags);
-	if (ptr) {
-		if (new)
-			memcpy(new, ptr,
-				((oldsize < newsize) ? oldsize : newsize));
-		kmem_free(ptr);
-	}
-	return new;
+	do {
+		ptr = krealloc(old, newsize, lflags);
+		if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
+			return ptr;
+		if (!(++retries % 100))
+			xfs_err(NULL,
+	"%s(%u) possible memory allocation deadlock size %zu in %s (mode:0x%x)",
+				current->comm, current->pid,
+				newsize, __func__, lflags);
+		congestion_wait(BLK_RW_ASYNC, HZ/50);
+	} while (1);
 }
 
 void *
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index d1c66e4..689f746 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -62,7 +62,7 @@
 
 extern void *kmem_alloc(size_t, xfs_km_flags_t);
 extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
-extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
+extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t);
 static inline void  kmem_free(const void *ptr)
 {
 	kvfree(ptr);
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index fa3b948..4e126f4 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -242,37 +242,21 @@
 			return error;
 	}
 
-	/*
-	 * Start our first transaction of the day.
-	 *
-	 * All future transactions during this code must be "chained" off
-	 * this one via the trans_dup() call.  All transactions will contain
-	 * the inode, and the inode will always be marked with trans_ihold().
-	 * Since the inode will be locked in all transactions, we must log
-	 * the inode in every transaction to let it float upward through
-	 * the log.
-	 */
-	args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_SET);
+	tres.tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
+			 M_RES(mp)->tr_attrsetrt.tr_logres * args.total;
+	tres.tr_logcount = XFS_ATTRSET_LOG_COUNT;
+	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
 
 	/*
 	 * Root fork attributes can use reserved data blocks for this
 	 * operation if necessary
 	 */
-
-	if (rsvd)
-		args.trans->t_flags |= XFS_TRANS_RESERVE;
-
-	tres.tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
-			 M_RES(mp)->tr_attrsetrt.tr_logres * args.total;
-	tres.tr_logcount = XFS_ATTRSET_LOG_COUNT;
-	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
-	error = xfs_trans_reserve(args.trans, &tres, args.total, 0);
-	if (error) {
-		xfs_trans_cancel(args.trans);
+	error = xfs_trans_alloc(mp, &tres, args.total, 0,
+			rsvd ? XFS_TRANS_RESERVE : 0, &args.trans);
+	if (error)
 		return error;
-	}
-	xfs_ilock(dp, XFS_ILOCK_EXCL);
 
+	xfs_ilock(dp, XFS_ILOCK_EXCL);
 	error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
 				rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
 				       XFS_QMOPT_RES_REGBLKS);
@@ -429,31 +413,15 @@
 		return error;
 
 	/*
-	 * Start our first transaction of the day.
-	 *
-	 * All future transactions during this code must be "chained" off
-	 * this one via the trans_dup() call.  All transactions will contain
-	 * the inode, and the inode will always be marked with trans_ihold().
-	 * Since the inode will be locked in all transactions, we must log
-	 * the inode in every transaction to let it float upward through
-	 * the log.
-	 */
-	args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_RM);
-
-	/*
 	 * Root fork attributes can use reserved data blocks for this
 	 * operation if necessary
 	 */
-
-	if (flags & ATTR_ROOT)
-		args.trans->t_flags |= XFS_TRANS_RESERVE;
-
-	error = xfs_trans_reserve(args.trans, &M_RES(mp)->tr_attrrm,
-				  XFS_ATTRRM_SPACE_RES(mp), 0);
-	if (error) {
-		xfs_trans_cancel(args.trans);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_attrrm,
+			XFS_ATTRRM_SPACE_RES(mp), 0,
+			(flags & ATTR_ROOT) ? XFS_TRANS_RESERVE : 0,
+			&args.trans);
+	if (error)
 		return error;
-	}
 
 	xfs_ilock(dp, XFS_ILOCK_EXCL);
 	/*
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index ce41d7f..932381c 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1121,15 +1121,14 @@
 
 	mp = ip->i_mount;
 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
-	tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK);
+
 	blks = XFS_ADDAFORK_SPACE_RES(mp);
-	if (rsvd)
-		tp->t_flags |= XFS_TRANS_RESERVE;
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_addafork, blks, 0);
-	if (error) {
-		xfs_trans_cancel(tp);
+
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
+			rsvd ? XFS_TRANS_RESERVE : 0, &tp);
+	if (error)
 		return error;
-	}
+
 	xfs_ilock(ip, XFS_ILOCK_EXCL);
 	error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
 			XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
@@ -6026,13 +6025,10 @@
 	xfs_fsblock_t           firstfsb;
 	int                     error;
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
-			XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
-	if (error) {
-		xfs_trans_cancel(tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
+			XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
+	if (error)
 		return error;
-	}
 
 	xfs_ilock(ip, XFS_ILOCK_EXCL);
 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index 974d62e..e5bb9cc 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -257,15 +257,12 @@
 	 *
 	 * Convert the inode to local format and copy the data in.
 	 */
-	dp->i_df.if_flags &= ~XFS_IFEXTENTS;
-	dp->i_df.if_flags |= XFS_IFINLINE;
-	dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
 	ASSERT(dp->i_df.if_bytes == 0);
-	xfs_idata_realloc(dp, size, XFS_DATA_FORK);
+	xfs_init_local_fork(dp, XFS_DATA_FORK, dst, size);
+	dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
+	dp->i_d.di_size = size;
 
 	logflags |= XFS_ILOG_DDATA;
-	memcpy(dp->i_df.if_u1.if_data, dst, size);
-	dp->i_d.di_size = size;
 	xfs_dir2_sf_check(args);
 out:
 	xfs_trans_log_inode(args->trans, dp, logflags);
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 11faf7d..bbcc8c7 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -231,6 +231,48 @@
 	return error;
 }
 
+void
+xfs_init_local_fork(
+	struct xfs_inode	*ip,
+	int			whichfork,
+	const void		*data,
+	int			size)
+{
+	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
+	int			mem_size = size, real_size = 0;
+	bool			zero_terminate;
+
+	/*
+	 * If we are using the local fork to store a symlink body we need to
+	 * zero-terminate it so that we can pass it back to the VFS directly.
+	 * Overallocate the in-memory fork by one for that and add a zero
+	 * to terminate it below.
+	 */
+	zero_terminate = S_ISLNK(VFS_I(ip)->i_mode);
+	if (zero_terminate)
+		mem_size++;
+
+	if (size == 0)
+		ifp->if_u1.if_data = NULL;
+	else if (mem_size <= sizeof(ifp->if_u2.if_inline_data))
+		ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
+	else {
+		real_size = roundup(mem_size, 4);
+		ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
+	}
+
+	if (size) {
+		memcpy(ifp->if_u1.if_data, data, size);
+		if (zero_terminate)
+			ifp->if_u1.if_data[size] = '\0';
+	}
+
+	ifp->if_bytes = size;
+	ifp->if_real_bytes = real_size;
+	ifp->if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT);
+	ifp->if_flags |= XFS_IFINLINE;
+}
+
 /*
  * The file is in-lined in the on-disk inode.
  * If it fits into if_inline_data, then copy
@@ -248,8 +290,6 @@
 	int		whichfork,
 	int		size)
 {
-	xfs_ifork_t	*ifp;
-	int		real_size;
 
 	/*
 	 * If the size is unreasonable, then something
@@ -265,22 +305,8 @@
 				     ip->i_mount, dip);
 		return -EFSCORRUPTED;
 	}
-	ifp = XFS_IFORK_PTR(ip, whichfork);
-	real_size = 0;
-	if (size == 0)
-		ifp->if_u1.if_data = NULL;
-	else if (size <= sizeof(ifp->if_u2.if_inline_data))
-		ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
-	else {
-		real_size = roundup(size, 4);
-		ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
-	}
-	ifp->if_bytes = size;
-	ifp->if_real_bytes = real_size;
-	if (size)
-		memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
-	ifp->if_flags &= ~XFS_IFEXTENTS;
-	ifp->if_flags |= XFS_IFINLINE;
+
+	xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
 	return 0;
 }
 
@@ -516,7 +542,6 @@
 		new_max = cur_max + rec_diff;
 		new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
 		ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
-				XFS_BMAP_BROOT_SPACE_CALC(mp, cur_max),
 				KM_SLEEP | KM_NOFS);
 		op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
 						     ifp->if_broot_bytes);
@@ -660,7 +685,6 @@
 				ifp->if_u1.if_data =
 					kmem_realloc(ifp->if_u1.if_data,
 							real_size,
-							ifp->if_real_bytes,
 							KM_SLEEP | KM_NOFS);
 			}
 		} else {
@@ -1376,8 +1400,7 @@
 		if (rnew_size != ifp->if_real_bytes) {
 			ifp->if_u1.if_extents =
 				kmem_realloc(ifp->if_u1.if_extents,
-						rnew_size,
-						ifp->if_real_bytes, KM_NOFS);
+						rnew_size, KM_NOFS);
 		}
 		if (rnew_size > ifp->if_real_bytes) {
 			memset(&ifp->if_u1.if_extents[ifp->if_bytes /
@@ -1461,9 +1484,8 @@
 	if (new_size == 0) {
 		xfs_iext_destroy(ifp);
 	} else {
-		ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
-			kmem_realloc(ifp->if_u1.if_ext_irec,
-				new_size, size, KM_NOFS);
+		ifp->if_u1.if_ext_irec =
+			kmem_realloc(ifp->if_u1.if_ext_irec, new_size, KM_NOFS);
 	}
 }
 
@@ -1497,6 +1519,24 @@
 }
 
 /*
+ * Remove all records from the indirection array.
+ */
+STATIC void
+xfs_iext_irec_remove_all(
+	struct xfs_ifork *ifp)
+{
+	int		nlists;
+	int		i;
+
+	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
+	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
+	for (i = 0; i < nlists; i++)
+		kmem_free(ifp->if_u1.if_ext_irec[i].er_extbuf);
+	kmem_free(ifp->if_u1.if_ext_irec);
+	ifp->if_flags &= ~XFS_IFEXTIREC;
+}
+
+/*
  * Free incore file extents.
  */
 void
@@ -1504,14 +1544,7 @@
 	xfs_ifork_t	*ifp)		/* inode fork pointer */
 {
 	if (ifp->if_flags & XFS_IFEXTIREC) {
-		int	erp_idx;
-		int	nlists;
-
-		nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-		for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
-			xfs_iext_irec_remove(ifp, erp_idx);
-		}
-		ifp->if_flags &= ~XFS_IFEXTIREC;
+		xfs_iext_irec_remove_all(ifp);
 	} else if (ifp->if_real_bytes) {
 		kmem_free(ifp->if_u1.if_extents);
 	} else if (ifp->if_bytes) {
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 7d3b1ed..f95e072 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -134,6 +134,7 @@
 int		xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int);
 int		xfs_iextents_copy(struct xfs_inode *, struct xfs_bmbt_rec *,
 				  int);
+void		xfs_init_local_fork(struct xfs_inode *, int, const void *, int);
 
 struct xfs_bmbt_rec_host *
 		xfs_iext_get_ext(struct xfs_ifork *, xfs_extnum_t);
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index d54a801..e8f49c0 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -212,6 +212,11 @@
 #define	XFS_TRANS_HEADER_MAGIC	0x5452414e	/* TRAN */
 
 /*
+ * The only type valid for th_type in CIL-enabled file system logs:
+ */
+#define XFS_TRANS_CHECKPOINT	40
+
+/*
  * Log item types.
  */
 #define	XFS_LI_EFI		0x1236
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 8a53eaa..12ca867 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -838,12 +838,10 @@
 	struct xfs_trans	*tp;
 	int			error;
 
-	tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_CHANGE, KM_SLEEP);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
-	if (error) {
-		xfs_trans_cancel(tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_sb, 0, 0,
+			XFS_TRANS_NO_WRITECOUNT, &tp);
+	if (error)
 		return error;
-	}
 
 	xfs_log_sb(tp);
 	if (wait)
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 81ac870..16002b5 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -56,103 +56,6 @@
 extern const struct xfs_buf_ops xfs_rtbuf_ops;
 
 /*
- * Transaction types.  Used to distinguish types of buffers. These never reach
- * the log.
- */
-#define XFS_TRANS_SETATTR_NOT_SIZE	1
-#define XFS_TRANS_SETATTR_SIZE		2
-#define XFS_TRANS_INACTIVE		3
-#define XFS_TRANS_CREATE		4
-#define XFS_TRANS_CREATE_TRUNC		5
-#define XFS_TRANS_TRUNCATE_FILE		6
-#define XFS_TRANS_REMOVE		7
-#define XFS_TRANS_LINK			8
-#define XFS_TRANS_RENAME		9
-#define XFS_TRANS_MKDIR			10
-#define XFS_TRANS_RMDIR			11
-#define XFS_TRANS_SYMLINK		12
-#define XFS_TRANS_SET_DMATTRS		13
-#define XFS_TRANS_GROWFS		14
-#define XFS_TRANS_STRAT_WRITE		15
-#define XFS_TRANS_DIOSTRAT		16
-/* 17 was XFS_TRANS_WRITE_SYNC */
-#define	XFS_TRANS_WRITEID		18
-#define	XFS_TRANS_ADDAFORK		19
-#define	XFS_TRANS_ATTRINVAL		20
-#define	XFS_TRANS_ATRUNCATE		21
-#define	XFS_TRANS_ATTR_SET		22
-#define	XFS_TRANS_ATTR_RM		23
-#define	XFS_TRANS_ATTR_FLAG		24
-#define	XFS_TRANS_CLEAR_AGI_BUCKET	25
-#define XFS_TRANS_SB_CHANGE		26
-/*
- * Dummy entries since we use the transaction type to index into the
- * trans_type[] in xlog_recover_print_trans_head()
- */
-#define XFS_TRANS_DUMMY1		27
-#define XFS_TRANS_DUMMY2		28
-#define XFS_TRANS_QM_QUOTAOFF		29
-#define XFS_TRANS_QM_DQALLOC		30
-#define XFS_TRANS_QM_SETQLIM		31
-#define XFS_TRANS_QM_DQCLUSTER		32
-#define XFS_TRANS_QM_QINOCREATE		33
-#define XFS_TRANS_QM_QUOTAOFF_END	34
-#define XFS_TRANS_FSYNC_TS		35
-#define	XFS_TRANS_GROWFSRT_ALLOC	36
-#define	XFS_TRANS_GROWFSRT_ZERO		37
-#define	XFS_TRANS_GROWFSRT_FREE		38
-#define	XFS_TRANS_SWAPEXT		39
-#define	XFS_TRANS_CHECKPOINT		40
-#define	XFS_TRANS_ICREATE		41
-#define	XFS_TRANS_CREATE_TMPFILE	42
-#define	XFS_TRANS_TYPE_MAX		43
-/* new transaction types need to be reflected in xfs_logprint(8) */
-
-#define XFS_TRANS_TYPES \
-	{ XFS_TRANS_SETATTR_NOT_SIZE,	"SETATTR_NOT_SIZE" }, \
-	{ XFS_TRANS_SETATTR_SIZE,	"SETATTR_SIZE" }, \
-	{ XFS_TRANS_INACTIVE,		"INACTIVE" }, \
-	{ XFS_TRANS_CREATE,		"CREATE" }, \
-	{ XFS_TRANS_CREATE_TRUNC,	"CREATE_TRUNC" }, \
-	{ XFS_TRANS_TRUNCATE_FILE,	"TRUNCATE_FILE" }, \
-	{ XFS_TRANS_REMOVE,		"REMOVE" }, \
-	{ XFS_TRANS_LINK,		"LINK" }, \
-	{ XFS_TRANS_RENAME,		"RENAME" }, \
-	{ XFS_TRANS_MKDIR,		"MKDIR" }, \
-	{ XFS_TRANS_RMDIR,		"RMDIR" }, \
-	{ XFS_TRANS_SYMLINK,		"SYMLINK" }, \
-	{ XFS_TRANS_SET_DMATTRS,	"SET_DMATTRS" }, \
-	{ XFS_TRANS_GROWFS,		"GROWFS" }, \
-	{ XFS_TRANS_STRAT_WRITE,	"STRAT_WRITE" }, \
-	{ XFS_TRANS_DIOSTRAT,		"DIOSTRAT" }, \
-	{ XFS_TRANS_WRITEID,		"WRITEID" }, \
-	{ XFS_TRANS_ADDAFORK,		"ADDAFORK" }, \
-	{ XFS_TRANS_ATTRINVAL,		"ATTRINVAL" }, \
-	{ XFS_TRANS_ATRUNCATE,		"ATRUNCATE" }, \
-	{ XFS_TRANS_ATTR_SET,		"ATTR_SET" }, \
-	{ XFS_TRANS_ATTR_RM,		"ATTR_RM" }, \
-	{ XFS_TRANS_ATTR_FLAG,		"ATTR_FLAG" }, \
-	{ XFS_TRANS_CLEAR_AGI_BUCKET,	"CLEAR_AGI_BUCKET" }, \
-	{ XFS_TRANS_SB_CHANGE,		"SBCHANGE" }, \
-	{ XFS_TRANS_DUMMY1,		"DUMMY1" }, \
-	{ XFS_TRANS_DUMMY2,		"DUMMY2" }, \
-	{ XFS_TRANS_QM_QUOTAOFF,	"QM_QUOTAOFF" }, \
-	{ XFS_TRANS_QM_DQALLOC,		"QM_DQALLOC" }, \
-	{ XFS_TRANS_QM_SETQLIM,		"QM_SETQLIM" }, \
-	{ XFS_TRANS_QM_DQCLUSTER,	"QM_DQCLUSTER" }, \
-	{ XFS_TRANS_QM_QINOCREATE,	"QM_QINOCREATE" }, \
-	{ XFS_TRANS_QM_QUOTAOFF_END,	"QM_QOFF_END" }, \
-	{ XFS_TRANS_FSYNC_TS,		"FSYNC_TS" }, \
-	{ XFS_TRANS_GROWFSRT_ALLOC,	"GROWFSRT_ALLOC" }, \
-	{ XFS_TRANS_GROWFSRT_ZERO,	"GROWFSRT_ZERO" }, \
-	{ XFS_TRANS_GROWFSRT_FREE,	"GROWFSRT_FREE" }, \
-	{ XFS_TRANS_SWAPEXT,		"SWAPEXT" }, \
-	{ XFS_TRANS_CHECKPOINT,		"CHECKPOINT" }, \
-	{ XFS_TRANS_ICREATE,		"ICREATE" }, \
-	{ XFS_TRANS_CREATE_TMPFILE,	"CREATE_TMPFILE" }, \
-	{ XLOG_UNMOUNT_REC_TYPE,	"UNMOUNT" }
-
-/*
  * This structure is used to track log items associated with
  * a transaction.  It points to the log item and keeps some
  * flags to track the state of the log item.  It also tracks
@@ -181,8 +84,9 @@
 #define	XFS_TRANS_SYNC		0x08	/* make commit synchronous */
 #define XFS_TRANS_DQ_DIRTY	0x10	/* at least one dquot in trx dirty */
 #define XFS_TRANS_RESERVE	0x20    /* OK to use reserved data blocks */
-#define XFS_TRANS_FREEZE_PROT	0x40	/* Transaction has elevated writer
-					   count in superblock */
+#define XFS_TRANS_NO_WRITECOUNT 0x40	/* do not elevate SB writecount */
+#define XFS_TRANS_NOFS		0x80	/* pass KM_NOFS to kmem_alloc */
+
 /*
  * Field values for xfs_trans_mod_sb.
  */
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index c535887..4c463b9 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -84,23 +84,71 @@
 }
 
 /*
- * We're now finished for good with this ioend structure.
- * Update the page state via the associated buffer_heads,
- * release holds on the inode and bio, and finally free
- * up memory.  Do not use the ioend after this.
+ * We're now finished for good with this page.  Update the page state via the
+ * associated buffer_heads, paying attention to the start and end offsets that
+ * we need to process on the page.
+ */
+static void
+xfs_finish_page_writeback(
+	struct inode		*inode,
+	struct bio_vec		*bvec,
+	int			error)
+{
+	unsigned int		end = bvec->bv_offset + bvec->bv_len - 1;
+	struct buffer_head	*head, *bh;
+	unsigned int		off = 0;
+
+	ASSERT(bvec->bv_offset < PAGE_SIZE);
+	ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0);
+	ASSERT(end < PAGE_SIZE);
+	ASSERT((bvec->bv_len & ((1 << inode->i_blkbits) - 1)) == 0);
+
+	bh = head = page_buffers(bvec->bv_page);
+
+	do {
+		if (off < bvec->bv_offset)
+			goto next_bh;
+		if (off > end)
+			break;
+		bh->b_end_io(bh, !error);
+next_bh:
+		off += bh->b_size;
+	} while ((bh = bh->b_this_page) != head);
+}
+
+/*
+ * We're now finished for good with this ioend structure.  Update the page
+ * state, release holds on bios, and finally free up memory.  Do not use the
+ * ioend after this.
  */
 STATIC void
 xfs_destroy_ioend(
-	xfs_ioend_t		*ioend)
+	struct xfs_ioend	*ioend,
+	int			error)
 {
-	struct buffer_head	*bh, *next;
+	struct inode		*inode = ioend->io_inode;
+	struct bio		*last = ioend->io_bio;
+	struct bio		*bio, *next;
 
-	for (bh = ioend->io_buffer_head; bh; bh = next) {
-		next = bh->b_private;
-		bh->b_end_io(bh, !ioend->io_error);
+	for (bio = &ioend->io_inline_bio; bio; bio = next) {
+		struct bio_vec	*bvec;
+		int		i;
+
+		/*
+		 * For the last bio, bi_private points to the ioend, so we
+		 * need to explicitly end the iteration here.
+		 */
+		if (bio == last)
+			next = NULL;
+		else
+			next = bio->bi_private;
+
+		/* walk each page on bio, ending page IO on them */
+		bio_for_each_segment_all(bvec, bio, i)
+			xfs_finish_page_writeback(inode, bvec, error);
+
+		bio_put(bio);
 	}
-
-	mempool_free(ioend, xfs_ioend_pool);
 }
 
 /*
@@ -120,13 +168,9 @@
 	struct xfs_trans	*tp;
 	int			error;
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
-
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
-	if (error) {
-		xfs_trans_cancel(tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
+	if (error)
 		return error;
-	}
 
 	ioend->io_append_trans = tp;
 
@@ -174,7 +218,8 @@
 
 STATIC int
 xfs_setfilesize_ioend(
-	struct xfs_ioend	*ioend)
+	struct xfs_ioend	*ioend,
+	int			error)
 {
 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
 	struct xfs_trans	*tp = ioend->io_append_trans;
@@ -188,53 +233,32 @@
 	__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
 
 	/* we abort the update if there was an IO error */
-	if (ioend->io_error) {
+	if (error) {
 		xfs_trans_cancel(tp);
-		return ioend->io_error;
+		return error;
 	}
 
 	return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
 }
 
 /*
- * Schedule IO completion handling on the final put of an ioend.
- *
- * If there is no work to do we might as well call it a day and free the
- * ioend right now.
- */
-STATIC void
-xfs_finish_ioend(
-	struct xfs_ioend	*ioend)
-{
-	if (atomic_dec_and_test(&ioend->io_remaining)) {
-		struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
-
-		if (ioend->io_type == XFS_IO_UNWRITTEN)
-			queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
-		else if (ioend->io_append_trans)
-			queue_work(mp->m_data_workqueue, &ioend->io_work);
-		else
-			xfs_destroy_ioend(ioend);
-	}
-}
-
-/*
  * IO write completion.
  */
 STATIC void
 xfs_end_io(
 	struct work_struct *work)
 {
-	xfs_ioend_t	*ioend = container_of(work, xfs_ioend_t, io_work);
-	struct xfs_inode *ip = XFS_I(ioend->io_inode);
-	int		error = 0;
+	struct xfs_ioend	*ioend =
+		container_of(work, struct xfs_ioend, io_work);
+	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
+	int			error = ioend->io_bio->bi_error;
 
 	/*
 	 * Set an error if the mount has shut down and proceed with end I/O
 	 * processing so it can perform whatever cleanups are necessary.
 	 */
 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
-		ioend->io_error = -EIO;
+		error = -EIO;
 
 	/*
 	 * For unwritten extents we need to issue transactions to convert a
@@ -244,55 +268,33 @@
 	 * on error.
 	 */
 	if (ioend->io_type == XFS_IO_UNWRITTEN) {
-		if (ioend->io_error)
+		if (error)
 			goto done;
 		error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
 						  ioend->io_size);
 	} else if (ioend->io_append_trans) {
-		error = xfs_setfilesize_ioend(ioend);
+		error = xfs_setfilesize_ioend(ioend, error);
 	} else {
 		ASSERT(!xfs_ioend_is_append(ioend));
 	}
 
 done:
-	if (error)
-		ioend->io_error = error;
-	xfs_destroy_ioend(ioend);
+	xfs_destroy_ioend(ioend, error);
 }
 
-/*
- * Allocate and initialise an IO completion structure.
- * We need to track unwritten extent write completion here initially.
- * We'll need to extend this for updating the ondisk inode size later
- * (vs. incore size).
- */
-STATIC xfs_ioend_t *
-xfs_alloc_ioend(
-	struct inode		*inode,
-	unsigned int		type)
+STATIC void
+xfs_end_bio(
+	struct bio		*bio)
 {
-	xfs_ioend_t		*ioend;
+	struct xfs_ioend	*ioend = bio->bi_private;
+	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
 
-	ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
-
-	/*
-	 * Set the count to 1 initially, which will prevent an I/O
-	 * completion callback from happening before we have started
-	 * all the I/O from calling the completion routine too early.
-	 */
-	atomic_set(&ioend->io_remaining, 1);
-	ioend->io_error = 0;
-	INIT_LIST_HEAD(&ioend->io_list);
-	ioend->io_type = type;
-	ioend->io_inode = inode;
-	ioend->io_buffer_head = NULL;
-	ioend->io_buffer_tail = NULL;
-	ioend->io_offset = 0;
-	ioend->io_size = 0;
-	ioend->io_append_trans = NULL;
-
-	INIT_WORK(&ioend->io_work, xfs_end_io);
-	return ioend;
+	if (ioend->io_type == XFS_IO_UNWRITTEN)
+		queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
+	else if (ioend->io_append_trans)
+		queue_work(mp->m_data_workqueue, &ioend->io_work);
+	else
+		xfs_destroy_ioend(ioend, bio->bi_error);
 }
 
 STATIC int
@@ -364,50 +366,6 @@
 		offset < imap->br_startoff + imap->br_blockcount;
 }
 
-/*
- * BIO completion handler for buffered IO.
- */
-STATIC void
-xfs_end_bio(
-	struct bio		*bio)
-{
-	xfs_ioend_t		*ioend = bio->bi_private;
-
-	if (!ioend->io_error)
-		ioend->io_error = bio->bi_error;
-
-	/* Toss bio and pass work off to an xfsdatad thread */
-	bio->bi_private = NULL;
-	bio->bi_end_io = NULL;
-	bio_put(bio);
-
-	xfs_finish_ioend(ioend);
-}
-
-STATIC void
-xfs_submit_ioend_bio(
-	struct writeback_control *wbc,
-	xfs_ioend_t		*ioend,
-	struct bio		*bio)
-{
-	atomic_inc(&ioend->io_remaining);
-	bio->bi_private = ioend;
-	bio->bi_end_io = xfs_end_bio;
-	submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
-}
-
-STATIC struct bio *
-xfs_alloc_ioend_bio(
-	struct buffer_head	*bh)
-{
-	struct bio		*bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
-
-	ASSERT(bio->bi_private == NULL);
-	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
-	bio->bi_bdev = bh->b_bdev;
-	return bio;
-}
-
 STATIC void
 xfs_start_buffer_writeback(
 	struct buffer_head	*bh)
@@ -452,28 +410,35 @@
 }
 
 /*
- * Submit all of the bios for an ioend. We are only passed a single ioend at a
- * time; the caller is responsible for chaining prior to submission.
+ * Submit the bio for an ioend. We are passed an ioend with a bio attached to
+ * it, and we submit that bio. The ioend may be used for multiple bio
+ * submissions, so we only want to allocate an append transaction for the ioend
+ * once. In the case of multiple bio submission, each bio will take an IO
+ * reference to the ioend to ensure that the ioend completion is only done once
+ * all bios have been submitted and the ioend is really done.
  *
  * If @fail is non-zero, it means that we have a situation where some part of
  * the submission process has failed after we have marked paged for writeback
- * and unlocked them. In this situation, we need to fail the ioend chain rather
- * than submit it to IO. This typically only happens on a filesystem shutdown.
+ * and unlocked them. In this situation, we need to fail the bio and ioend
+ * rather than submit it to IO. This typically only happens on a filesystem
+ * shutdown.
  */
 STATIC int
 xfs_submit_ioend(
 	struct writeback_control *wbc,
-	xfs_ioend_t		*ioend,
+	struct xfs_ioend	*ioend,
 	int			status)
 {
-	struct buffer_head	*bh;
-	struct bio		*bio;
-	sector_t		lastblock = 0;
-
 	/* Reserve log space if we might write beyond the on-disk inode size. */
 	if (!status &&
-	     ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
+	    ioend->io_type != XFS_IO_UNWRITTEN &&
+	    xfs_ioend_is_append(ioend) &&
+	    !ioend->io_append_trans)
 		status = xfs_setfilesize_trans_alloc(ioend);
+
+	ioend->io_bio->bi_private = ioend;
+	ioend->io_bio->bi_end_io = xfs_end_bio;
+
 	/*
 	 * If we are failing the IO now, just mark the ioend with an
 	 * error and finish it. This will run IO completion immediately
@@ -481,35 +446,75 @@
 	 * time.
 	 */
 	if (status) {
-		ioend->io_error = status;
-		xfs_finish_ioend(ioend);
+		ioend->io_bio->bi_error = status;
+		bio_endio(ioend->io_bio);
 		return status;
 	}
 
-	bio = NULL;
-	for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
-
-		if (!bio) {
-retry:
-			bio = xfs_alloc_ioend_bio(bh);
-		} else if (bh->b_blocknr != lastblock + 1) {
-			xfs_submit_ioend_bio(wbc, ioend, bio);
-			goto retry;
-		}
-
-		if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
-			xfs_submit_ioend_bio(wbc, ioend, bio);
-			goto retry;
-		}
-
-		lastblock = bh->b_blocknr;
-	}
-	if (bio)
-		xfs_submit_ioend_bio(wbc, ioend, bio);
-	xfs_finish_ioend(ioend);
+	submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE,
+		   ioend->io_bio);
 	return 0;
 }
 
+static void
+xfs_init_bio_from_bh(
+	struct bio		*bio,
+	struct buffer_head	*bh)
+{
+	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+	bio->bi_bdev = bh->b_bdev;
+}
+
+static struct xfs_ioend *
+xfs_alloc_ioend(
+	struct inode		*inode,
+	unsigned int		type,
+	xfs_off_t		offset,
+	struct buffer_head	*bh)
+{
+	struct xfs_ioend	*ioend;
+	struct bio		*bio;
+
+	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, xfs_ioend_bioset);
+	xfs_init_bio_from_bh(bio, bh);
+
+	ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
+	INIT_LIST_HEAD(&ioend->io_list);
+	ioend->io_type = type;
+	ioend->io_inode = inode;
+	ioend->io_size = 0;
+	ioend->io_offset = offset;
+	INIT_WORK(&ioend->io_work, xfs_end_io);
+	ioend->io_append_trans = NULL;
+	ioend->io_bio = bio;
+	return ioend;
+}
+
+/*
+ * Allocate a new bio, and chain the old bio to the new one.
+ *
+ * Note that we have to do perform the chaining in this unintuitive order
+ * so that the bi_private linkage is set up in the right direction for the
+ * traversal in xfs_destroy_ioend().
+ */
+static void
+xfs_chain_bio(
+	struct xfs_ioend	*ioend,
+	struct writeback_control *wbc,
+	struct buffer_head	*bh)
+{
+	struct bio *new;
+
+	new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
+	xfs_init_bio_from_bh(new, bh);
+
+	bio_chain(ioend->io_bio, new);
+	bio_get(ioend->io_bio);		/* for xfs_destroy_ioend */
+	submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE,
+		   ioend->io_bio);
+	ioend->io_bio = new;
+}
+
 /*
  * Test to see if we've been building up a completion structure for
  * earlier buffers -- if so, we try to append to this ioend if we
@@ -523,27 +528,24 @@
 	struct buffer_head	*bh,
 	xfs_off_t		offset,
 	struct xfs_writepage_ctx *wpc,
+	struct writeback_control *wbc,
 	struct list_head	*iolist)
 {
 	if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
 	    bh->b_blocknr != wpc->last_block + 1 ||
 	    offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
-		struct xfs_ioend	*new;
-
 		if (wpc->ioend)
 			list_add(&wpc->ioend->io_list, iolist);
-
-		new = xfs_alloc_ioend(inode, wpc->io_type);
-		new->io_offset = offset;
-		new->io_buffer_head = bh;
-		new->io_buffer_tail = bh;
-		wpc->ioend = new;
-	} else {
-		wpc->ioend->io_buffer_tail->b_private = bh;
-		wpc->ioend->io_buffer_tail = bh;
+		wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
 	}
 
-	bh->b_private = NULL;
+	/*
+	 * If the buffer doesn't fit into the bio we need to allocate a new
+	 * one.  This shouldn't happen more than once for a given buffer.
+	 */
+	while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
+		xfs_chain_bio(wpc->ioend, wbc, bh);
+
 	wpc->ioend->io_size += bh->b_size;
 	wpc->last_block = bh->b_blocknr;
 	xfs_start_buffer_writeback(bh);
@@ -803,7 +805,7 @@
 			lock_buffer(bh);
 			if (wpc->io_type != XFS_IO_OVERWRITE)
 				xfs_map_at_offset(inode, bh, &wpc->imap, offset);
-			xfs_add_to_ioend(inode, bh, offset, wpc, &submit_list);
+			xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
 			count++;
 		}
 
@@ -1391,13 +1393,10 @@
 
 		trace_xfs_end_io_direct_write_append(ip, offset, size);
 
-		tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
-		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
-		if (error) {
-			xfs_trans_cancel(tp);
-			return error;
-		}
-		error = xfs_setfilesize(ip, tp, offset, size);
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0,
+				&tp);
+		if (!error)
+			error = xfs_setfilesize(ip, tp, offset, size);
 	}
 
 	return error;
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index b442117..814aab7 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -18,7 +18,7 @@
 #ifndef __XFS_AOPS_H__
 #define __XFS_AOPS_H__
 
-extern mempool_t *xfs_ioend_pool;
+extern struct bio_set *xfs_ioend_bioset;
 
 /*
  * Types of I/O for bmap clustering and I/O completion tracking.
@@ -37,22 +37,19 @@
 	{ XFS_IO_OVERWRITE,		"overwrite" }
 
 /*
- * xfs_ioend struct manages large extent writes for XFS.
- * It can manage several multi-page bio's at once.
+ * Structure for buffered I/O completions.
  */
-typedef struct xfs_ioend {
+struct xfs_ioend {
 	struct list_head	io_list;	/* next ioend in chain */
 	unsigned int		io_type;	/* delalloc / unwritten */
-	int			io_error;	/* I/O error code */
-	atomic_t		io_remaining;	/* hold count */
 	struct inode		*io_inode;	/* file being written to */
-	struct buffer_head	*io_buffer_head;/* buffer linked list head */
-	struct buffer_head	*io_buffer_tail;/* buffer linked list tail */
 	size_t			io_size;	/* size of the extent */
 	xfs_off_t		io_offset;	/* offset in the file */
 	struct work_struct	io_work;	/* xfsdatad work queue */
 	struct xfs_trans	*io_append_trans;/* xact. for size update */
-} xfs_ioend_t;
+	struct bio		*io_bio;	/* bio being built */
+	struct bio		io_inline_bio;	/* MUST BE LAST! */
+};
 
 extern const struct address_space_operations xfs_address_space_operations;
 
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index dd48245..e3da5d4 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -112,8 +112,9 @@
  *========================================================================*/
 
 
+/* Return 0 on success, or -errno; other state communicated via *context */
 typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, int,
-			      unsigned char *, int, int, unsigned char *);
+			      unsigned char *, int, int);
 
 typedef struct xfs_attr_list_context {
 	struct xfs_inode		*dp;		/* inode */
@@ -126,7 +127,6 @@
 	int				firstu;		/* first used byte in buffer */
 	int				flags;		/* from VOP call */
 	int				resynch;	/* T/F: resynch with cursor */
-	int				put_value;	/* T/F: need value for listent */
 	put_listent_func_t		put_listent;	/* list output fmt function */
 	int				index;		/* index into output buffer */
 } xfs_attr_list_context_t;
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index 2bb959a..55d2149 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -405,21 +405,11 @@
 		goto out_destroy_fork;
 	xfs_iunlock(dp, lock_mode);
 
-	/*
-	 * Start our first transaction of the day.
-	 *
-	 * All future transactions during this code must be "chained" off
-	 * this one via the trans_dup() call.  All transactions will contain
-	 * the inode, and the inode will always be marked with trans_ihold().
-	 * Since the inode will be locked in all transactions, we must log
-	 * the inode in every transaction to let it float upward through
-	 * the log.
-	 */
 	lock_mode = 0;
-	trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
-	error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0);
+
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_attrinval, 0, 0, 0, &trans);
 	if (error)
-		goto out_cancel;
+		goto out_destroy_fork;
 
 	lock_mode = XFS_ILOCK_EXCL;
 	xfs_ilock(dp, lock_mode);
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 4fa1482..d25f26b 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -106,18 +106,15 @@
 					   sfe->flags,
 					   sfe->nameval,
 					   (int)sfe->namelen,
-					   (int)sfe->valuelen,
-					   &sfe->nameval[sfe->namelen]);
-
+					   (int)sfe->valuelen);
+			if (error)
+				return error;
 			/*
 			 * Either search callback finished early or
 			 * didn't fit it all in the buffer after all.
 			 */
 			if (context->seen_enough)
 				break;
-
-			if (error)
-				return error;
 			sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
 		}
 		trace_xfs_attr_list_sf_all(context);
@@ -200,8 +197,7 @@
 					sbp->flags,
 					sbp->name,
 					sbp->namelen,
-					sbp->valuelen,
-					&sbp->name[sbp->namelen]);
+					sbp->valuelen);
 		if (error) {
 			kmem_free(sbuf);
 			return error;
@@ -416,6 +412,9 @@
 	 */
 	retval = 0;
 	for (; i < ichdr.count; entry++, i++) {
+		char *name;
+		int namelen, valuelen;
+
 		if (be32_to_cpu(entry->hashval) != cursor->hashval) {
 			cursor->hashval = be32_to_cpu(entry->hashval);
 			cursor->offset = 0;
@@ -425,56 +424,25 @@
 			continue;		/* skip incomplete entries */
 
 		if (entry->flags & XFS_ATTR_LOCAL) {
-			xfs_attr_leaf_name_local_t *name_loc =
-				xfs_attr3_leaf_name_local(leaf, i);
+			xfs_attr_leaf_name_local_t *name_loc;
 
-			retval = context->put_listent(context,
-						entry->flags,
-						name_loc->nameval,
-						(int)name_loc->namelen,
-						be16_to_cpu(name_loc->valuelen),
-						&name_loc->nameval[name_loc->namelen]);
-			if (retval)
-				return retval;
+			name_loc = xfs_attr3_leaf_name_local(leaf, i);
+			name = name_loc->nameval;
+			namelen = name_loc->namelen;
+			valuelen = be16_to_cpu(name_loc->valuelen);
 		} else {
-			xfs_attr_leaf_name_remote_t *name_rmt =
-				xfs_attr3_leaf_name_remote(leaf, i);
+			xfs_attr_leaf_name_remote_t *name_rmt;
 
-			int valuelen = be32_to_cpu(name_rmt->valuelen);
-
-			if (context->put_value) {
-				xfs_da_args_t args;
-
-				memset((char *)&args, 0, sizeof(args));
-				args.geo = context->dp->i_mount->m_attr_geo;
-				args.dp = context->dp;
-				args.whichfork = XFS_ATTR_FORK;
-				args.valuelen = valuelen;
-				args.rmtvaluelen = valuelen;
-				args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
-				args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
-				args.rmtblkcnt = xfs_attr3_rmt_blocks(
-							args.dp->i_mount, valuelen);
-				retval = xfs_attr_rmtval_get(&args);
-				if (!retval)
-					retval = context->put_listent(context,
-							entry->flags,
-							name_rmt->name,
-							(int)name_rmt->namelen,
-							valuelen,
-							args.value);
-				kmem_free(args.value);
-			} else {
-				retval = context->put_listent(context,
-						entry->flags,
-						name_rmt->name,
-						(int)name_rmt->namelen,
-						valuelen,
-						NULL);
-			}
-			if (retval)
-				return retval;
+			name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
+			name = name_rmt->name;
+			namelen = name_rmt->namelen;
+			valuelen = be32_to_cpu(name_rmt->valuelen);
 		}
+
+		retval = context->put_listent(context, entry->flags,
+					      name, namelen, valuelen);
+		if (retval)
+			break;
 		if (context->seen_enough)
 			break;
 		cursor->offset++;
@@ -551,8 +519,7 @@
 	int		flags,
 	unsigned char	*name,
 	int		namelen,
-	int		valuelen,
-	unsigned char	*value)
+	int		valuelen)
 {
 	struct attrlist *alist = (struct attrlist *)context->alist;
 	attrlist_ent_t *aep;
@@ -581,7 +548,7 @@
 		trace_xfs_attr_list_full(context);
 		alist->al_more = 1;
 		context->seen_enough = 1;
-		return 1;
+		return 0;
 	}
 
 	aep = (attrlist_ent_t *)&context->alist[context->firstu];
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 3b63098..586bb64 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -72,18 +72,11 @@
 	struct xfs_mount *mp = ip->i_mount;
 	xfs_daddr_t	sector = xfs_fsb_to_db(ip, start_fsb);
 	sector_t	block = XFS_BB_TO_FSBT(mp, sector);
-	ssize_t		size = XFS_FSB_TO_B(mp, count_fsb);
 
-	if (IS_DAX(VFS_I(ip)))
-		return dax_clear_sectors(xfs_find_bdev_for_inode(VFS_I(ip)),
-				sector, size);
-
-	/*
-	 * let the block layer decide on the fastest method of
-	 * implementing the zeroing.
-	 */
-	return sb_issue_zeroout(mp->m_super, block, count_fsb, GFP_NOFS);
-
+	return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
+		block << (mp->m_super->s_blocksize_bits - 9),
+		count_fsb << (mp->m_super->s_blocksize_bits - 9),
+		GFP_NOFS, true);
 }
 
 /*
@@ -900,19 +893,15 @@
 		 * Free them up now by truncating the file to
 		 * its current size.
 		 */
-		tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
-
 		if (need_iolock) {
-			if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
-				xfs_trans_cancel(tp);
+			if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
 				return -EAGAIN;
-			}
 		}
 
-		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
+				&tp);
 		if (error) {
 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
-			xfs_trans_cancel(tp);
 			if (need_iolock)
 				xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 			return error;
@@ -1037,9 +1026,9 @@
 		/*
 		 * Allocate and setup the transaction.
 		 */
-		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
-		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
-					  resblks, resrtextents);
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
+				resrtextents, 0, &tp);
+
 		/*
 		 * Check for running out of space
 		 */
@@ -1048,7 +1037,6 @@
 			 * Free the transaction structure.
 			 */
 			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
-			xfs_trans_cancel(tp);
 			break;
 		}
 		xfs_ilock(ip, XFS_ILOCK_EXCL);
@@ -1311,18 +1299,10 @@
 		 * transaction to dip into the reserve blocks to ensure
 		 * the freeing of the space succeeds at ENOSPC.
 		 */
-		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
-		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0);
-
-		/*
-		 * check for running out of space
-		 */
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
+				&tp);
 		if (error) {
-			/*
-			 * Free the transaction structure.
-			 */
 			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
-			xfs_trans_cancel(tp);
 			break;
 		}
 		xfs_ilock(ip, XFS_ILOCK_EXCL);
@@ -1482,19 +1462,16 @@
 	}
 
 	while (!error && !done) {
-		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
 		/*
 		 * We would need to reserve permanent block for transaction.
 		 * This will come into picture when after shifting extent into
 		 * hole we found that adjacent extents can be merged which
 		 * may lead to freeing of a block during record update.
 		 */
-		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
-				XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
-		if (error) {
-			xfs_trans_cancel(tp);
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
+				XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
+		if (error)
 			break;
-		}
 
 		xfs_ilock(ip, XFS_ILOCK_EXCL);
 		error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
@@ -1747,12 +1724,9 @@
 	if (error)
 		goto out_unlock;
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
-	if (error) {
-		xfs_trans_cancel(tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
+	if (error)
 		goto out_unlock;
-	}
 
 	/*
 	 * Lock and join the inodes to the tansaction so that transaction commit
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 9a2191b..e71cfbd 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1100,22 +1100,18 @@
 	return error;
 }
 
-STATIC void
+static void
 xfs_buf_bio_end_io(
 	struct bio		*bio)
 {
-	xfs_buf_t		*bp = (xfs_buf_t *)bio->bi_private;
+	struct xfs_buf		*bp = (struct xfs_buf *)bio->bi_private;
 
 	/*
 	 * don't overwrite existing errors - otherwise we can lose errors on
 	 * buffers that require multiple bios to complete.
 	 */
-	if (bio->bi_error) {
-		spin_lock(&bp->b_lock);
-		if (!bp->b_io_error)
-			bp->b_io_error = bio->bi_error;
-		spin_unlock(&bp->b_lock);
-	}
+	if (bio->bi_error)
+		cmpxchg(&bp->b_io_error, 0, bio->bi_error);
 
 	if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
 		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 4eb89bd..8bfb974 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -183,6 +183,26 @@
 	unsigned int		b_page_count;	/* size of page array */
 	unsigned int		b_offset;	/* page offset in first page */
 	int			b_error;	/* error code on I/O */
+
+	/*
+	 * async write failure retry count. Initialised to zero on the first
+	 * failure, then when it exceeds the maximum configured without a
+	 * success the write is considered to be failed permanently and the
+	 * iodone handler will take appropriate action.
+	 *
+	 * For retry timeouts, we record the jiffie of the first failure. This
+	 * means that we can change the retry timeout for buffers already under
+	 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
+	 *
+	 * last_error is used to ensure that we are getting repeated errors, not
+	 * different errors. e.g. a block device might change ENOSPC to EIO when
+	 * a failure timeout occurs, so we want to re-initialise the error
+	 * retry behaviour appropriately when that happens.
+	 */
+	int			b_retries;
+	unsigned long		b_first_retry_time; /* in jiffies */
+	int			b_last_error;
+
 	const struct xfs_buf_ops	*b_ops;
 
 #ifdef XFS_BUF_LOCK_TRACKING
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 99e91a0..3425799 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -1042,35 +1042,22 @@
 	}
 }
 
-/*
- * This is the iodone() function for buffers which have had callbacks
- * attached to them by xfs_buf_attach_iodone().  It should remove each
- * log item from the buffer's list and call the callback of each in turn.
- * When done, the buffer's fsprivate field is set to NULL and the buffer
- * is unlocked with a call to iodone().
- */
-void
-xfs_buf_iodone_callbacks(
+static bool
+xfs_buf_iodone_callback_error(
 	struct xfs_buf		*bp)
 {
 	struct xfs_log_item	*lip = bp->b_fspriv;
 	struct xfs_mount	*mp = lip->li_mountp;
 	static ulong		lasttime;
 	static xfs_buftarg_t	*lasttarg;
-
-	if (likely(!bp->b_error))
-		goto do_callbacks;
+	struct xfs_error_cfg	*cfg;
 
 	/*
 	 * If we've already decided to shutdown the filesystem because of
 	 * I/O errors, there's no point in giving this a retry.
 	 */
-	if (XFS_FORCED_SHUTDOWN(mp)) {
-		xfs_buf_stale(bp);
-		bp->b_flags |= XBF_DONE;
-		trace_xfs_buf_item_iodone(bp, _RET_IP_);
-		goto do_callbacks;
-	}
+	if (XFS_FORCED_SHUTDOWN(mp))
+		goto out_stale;
 
 	if (bp->b_target != lasttarg ||
 	    time_after(jiffies, (lasttime + 5*HZ))) {
@@ -1079,45 +1066,93 @@
 	}
 	lasttarg = bp->b_target;
 
+	/* synchronous writes will have callers process the error */
+	if (!(bp->b_flags & XBF_ASYNC))
+		goto out_stale;
+
+	trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
+	ASSERT(bp->b_iodone != NULL);
+
 	/*
 	 * If the write was asynchronous then no one will be looking for the
-	 * error.  Clear the error state and write the buffer out again.
-	 *
-	 * XXX: This helps against transient write errors, but we need to find
-	 * a way to shut the filesystem down if the writes keep failing.
-	 *
-	 * In practice we'll shut the filesystem down soon as non-transient
-	 * errors tend to affect the whole device and a failing log write
-	 * will make us give up.  But we really ought to do better here.
+	 * error.  If this is the first failure of this type, clear the error
+	 * state and write the buffer out again. This means we always retry an
+	 * async write failure at least once, but we also need to set the buffer
+	 * up to behave correctly now for repeated failures.
 	 */
-	if (bp->b_flags & XBF_ASYNC) {
-		ASSERT(bp->b_iodone != NULL);
+	if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL)) ||
+	     bp->b_last_error != bp->b_error) {
+		bp->b_flags |= (XBF_WRITE | XBF_ASYNC |
+			        XBF_DONE | XBF_WRITE_FAIL);
+		bp->b_last_error = bp->b_error;
+		bp->b_retries = 0;
+		bp->b_first_retry_time = jiffies;
 
-		trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
-
-		xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
-
-		if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) {
-			bp->b_flags |= XBF_WRITE | XBF_ASYNC |
-				       XBF_DONE | XBF_WRITE_FAIL;
-			xfs_buf_submit(bp);
-		} else {
-			xfs_buf_relse(bp);
-		}
-
-		return;
+		xfs_buf_ioerror(bp, 0);
+		xfs_buf_submit(bp);
+		return true;
 	}
 
 	/*
-	 * If the write of the buffer was synchronous, we want to make
-	 * sure to return the error to the caller of xfs_bwrite().
+	 * Repeated failure on an async write. Take action according to the
+	 * error configuration we have been set up to use.
 	 */
+	cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
+
+	if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
+	    ++bp->b_retries > cfg->max_retries)
+			goto permanent_error;
+	if (cfg->retry_timeout &&
+	    time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
+			goto permanent_error;
+
+	/* At unmount we may treat errors differently */
+	if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount)
+		goto permanent_error;
+
+	/* still a transient error, higher layers will retry */
+	xfs_buf_ioerror(bp, 0);
+	xfs_buf_relse(bp);
+	return true;
+
+	/*
+	 * Permanent error - we need to trigger a shutdown if we haven't already
+	 * to indicate that inconsistency will result from this action.
+	 */
+permanent_error:
+	xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+out_stale:
 	xfs_buf_stale(bp);
 	bp->b_flags |= XBF_DONE;
-
 	trace_xfs_buf_error_relse(bp, _RET_IP_);
+	return false;
+}
 
-do_callbacks:
+/*
+ * This is the iodone() function for buffers which have had callbacks attached
+ * to them by xfs_buf_attach_iodone(). We need to iterate the items on the
+ * callback list, mark the buffer as having no more callbacks and then push the
+ * buffer through IO completion processing.
+ */
+void
+xfs_buf_iodone_callbacks(
+	struct xfs_buf		*bp)
+{
+	/*
+	 * If there is an error, process it. Some errors require us
+	 * to run callbacks after failure processing is done so we
+	 * detect that and take appropriate action.
+	 */
+	if (bp->b_error && xfs_buf_iodone_callback_error(bp))
+		return;
+
+	/*
+	 * Successful IO or permanent error. Either way, we can clear the
+	 * retry state here in preparation for the next error that may occur.
+	 */
+	bp->b_last_error = 0;
+	bp->b_retries = 0;
+
 	xfs_buf_do_callbacks(bp);
 	bp->b_fspriv = NULL;
 	bp->b_iodone = NULL;
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 316b2a1..e064665 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -614,11 +614,10 @@
 	trace_xfs_dqread(dqp);
 
 	if (flags & XFS_QMOPT_DQALLOC) {
-		tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
-		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_dqalloc,
-					  XFS_QM_DQALLOC_SPACE_RES(mp), 0);
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
+				XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
 		if (error)
-			goto error1;
+			goto error0;
 	}
 
 	/*
@@ -692,7 +691,7 @@
  * end of the chunk, skip ahead to first id in next allocated chunk
  * using the SEEK_DATA interface.
  */
-int
+static int
 xfs_dq_get_next_id(
 	xfs_mount_t		*mp,
 	uint			type,
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 85ce303..47fc632 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -145,12 +145,10 @@
 	struct xfs_trans	*tp;
 	int			error;
 
-	tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
-	error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
-	if (error) {
-		xfs_trans_cancel(tp);
+	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
+			0, 0, 0, &tp);
+	if (error)
 		return error;
-	}
 
 	xfs_ilock(ip, XFS_ILOCK_EXCL);
 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
@@ -1553,7 +1551,7 @@
 	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
 
 	if (IS_DAX(inode)) {
-		ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault, NULL);
+		ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault);
 	} else {
 		ret = block_page_mkwrite(vma, vmf, xfs_get_blocks);
 		ret = block_page_mkwrite_return(ret);
@@ -1587,7 +1585,7 @@
 		 * changes to xfs_get_blocks_direct() to map unwritten extent
 		 * ioend for conversion on read-only mappings.
 		 */
-		ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault, NULL);
+		ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault);
 	} else
 		ret = filemap_fault(vma, vmf);
 	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
@@ -1624,8 +1622,7 @@
 	}
 
 	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
-	ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault,
-			      NULL);
+	ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault);
 	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
 
 	if (flags & FAULT_FLAG_WRITE)
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index ee3aaa0a..b4d7582 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -198,14 +198,10 @@
 			return error;
 	}
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
-	tp->t_flags |= XFS_TRANS_RESERVE;
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growdata,
-				  XFS_GROWFS_SPACE_RES(mp), 0);
-	if (error) {
-		xfs_trans_cancel(tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
+			XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
+	if (error)
 		return error;
-	}
 
 	/*
 	 * Write new AG headers to disk. Non-transactional, but written
@@ -243,8 +239,8 @@
 		agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
 		agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
 		agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
-		agf->agf_flfirst = 0;
-		agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
+		agf->agf_flfirst = cpu_to_be32(1);
+		agf->agf_fllast = 0;
 		agf->agf_flcount = 0;
 		tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
 		agf->agf_freeblks = cpu_to_be32(tmpsize);
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index bf2d607..99ee6eee 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -37,9 +37,6 @@
 #include <linux/kthread.h>
 #include <linux/freezer.h>
 
-STATIC void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp,
-				struct xfs_perag *pag, struct xfs_inode *ip);
-
 /*
  * Allocate and initialise an xfs_inode.
  */
@@ -94,13 +91,6 @@
 	struct inode		*inode = container_of(head, struct inode, i_rcu);
 	struct xfs_inode	*ip = XFS_I(inode);
 
-	kmem_zone_free(xfs_inode_zone, ip);
-}
-
-void
-xfs_inode_free(
-	struct xfs_inode	*ip)
-{
 	switch (VFS_I(ip)->i_mode & S_IFMT) {
 	case S_IFREG:
 	case S_IFDIR:
@@ -118,6 +108,25 @@
 		ip->i_itemp = NULL;
 	}
 
+	kmem_zone_free(xfs_inode_zone, ip);
+}
+
+static void
+__xfs_inode_free(
+	struct xfs_inode	*ip)
+{
+	/* asserts to verify all state is correct here */
+	ASSERT(atomic_read(&ip->i_pincount) == 0);
+	ASSERT(!xfs_isiflocked(ip));
+	XFS_STATS_DEC(ip->i_mount, vn_active);
+
+	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
+}
+
+void
+xfs_inode_free(
+	struct xfs_inode	*ip)
+{
 	/*
 	 * Because we use RCU freeing we need to ensure the inode always
 	 * appears to be reclaimed with an invalid inode number when in the
@@ -129,12 +138,123 @@
 	ip->i_ino = 0;
 	spin_unlock(&ip->i_flags_lock);
 
-	/* asserts to verify all state is correct here */
-	ASSERT(atomic_read(&ip->i_pincount) == 0);
-	ASSERT(!xfs_isiflocked(ip));
-	XFS_STATS_DEC(ip->i_mount, vn_active);
+	__xfs_inode_free(ip);
+}
 
-	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
+/*
+ * Queue a new inode reclaim pass if there are reclaimable inodes and there
+ * isn't a reclaim pass already in progress. By default it runs every 5s based
+ * on the xfs periodic sync default of 30s. Perhaps this should have it's own
+ * tunable, but that can be done if this method proves to be ineffective or too
+ * aggressive.
+ */
+static void
+xfs_reclaim_work_queue(
+	struct xfs_mount        *mp)
+{
+
+	rcu_read_lock();
+	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
+		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
+			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
+	}
+	rcu_read_unlock();
+}
+
+/*
+ * This is a fast pass over the inode cache to try to get reclaim moving on as
+ * many inodes as possible in a short period of time. It kicks itself every few
+ * seconds, as well as being kicked by the inode cache shrinker when memory
+ * goes low. It scans as quickly as possible avoiding locked inodes or those
+ * already being flushed, and once done schedules a future pass.
+ */
+void
+xfs_reclaim_worker(
+	struct work_struct *work)
+{
+	struct xfs_mount *mp = container_of(to_delayed_work(work),
+					struct xfs_mount, m_reclaim_work);
+
+	xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
+	xfs_reclaim_work_queue(mp);
+}
+
+static void
+xfs_perag_set_reclaim_tag(
+	struct xfs_perag	*pag)
+{
+	struct xfs_mount	*mp = pag->pag_mount;
+
+	ASSERT(spin_is_locked(&pag->pag_ici_lock));
+	if (pag->pag_ici_reclaimable++)
+		return;
+
+	/* propagate the reclaim tag up into the perag radix tree */
+	spin_lock(&mp->m_perag_lock);
+	radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
+			   XFS_ICI_RECLAIM_TAG);
+	spin_unlock(&mp->m_perag_lock);
+
+	/* schedule periodic background inode reclaim */
+	xfs_reclaim_work_queue(mp);
+
+	trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
+}
+
+static void
+xfs_perag_clear_reclaim_tag(
+	struct xfs_perag	*pag)
+{
+	struct xfs_mount	*mp = pag->pag_mount;
+
+	ASSERT(spin_is_locked(&pag->pag_ici_lock));
+	if (--pag->pag_ici_reclaimable)
+		return;
+
+	/* clear the reclaim tag from the perag radix tree */
+	spin_lock(&mp->m_perag_lock);
+	radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
+			     XFS_ICI_RECLAIM_TAG);
+	spin_unlock(&mp->m_perag_lock);
+	trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
+}
+
+
+/*
+ * We set the inode flag atomically with the radix tree tag.
+ * Once we get tag lookups on the radix tree, this inode flag
+ * can go away.
+ */
+void
+xfs_inode_set_reclaim_tag(
+	struct xfs_inode	*ip)
+{
+	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_perag	*pag;
+
+	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
+	spin_lock(&pag->pag_ici_lock);
+	spin_lock(&ip->i_flags_lock);
+
+	radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
+			   XFS_ICI_RECLAIM_TAG);
+	xfs_perag_set_reclaim_tag(pag);
+	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
+
+	spin_unlock(&ip->i_flags_lock);
+	spin_unlock(&pag->pag_ici_lock);
+	xfs_perag_put(pag);
+}
+
+STATIC void
+xfs_inode_clear_reclaim_tag(
+	struct xfs_perag	*pag,
+	xfs_ino_t		ino)
+{
+	radix_tree_tag_clear(&pag->pag_ici_root,
+			     XFS_INO_TO_AGINO(pag->pag_mount, ino),
+			     XFS_ICI_RECLAIM_TAG);
+	xfs_perag_clear_reclaim_tag(pag);
 }
 
 /*
@@ -264,7 +384,7 @@
 		 */
 		ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
 		ip->i_flags |= XFS_INEW;
-		__xfs_inode_clear_reclaim_tag(mp, pag, ip);
+		xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
 		inode->i_state = I_NEW;
 
 		ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
@@ -723,121 +843,6 @@
 }
 
 /*
- * Queue a new inode reclaim pass if there are reclaimable inodes and there
- * isn't a reclaim pass already in progress. By default it runs every 5s based
- * on the xfs periodic sync default of 30s. Perhaps this should have it's own
- * tunable, but that can be done if this method proves to be ineffective or too
- * aggressive.
- */
-static void
-xfs_reclaim_work_queue(
-	struct xfs_mount        *mp)
-{
-
-	rcu_read_lock();
-	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
-		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
-			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
-	}
-	rcu_read_unlock();
-}
-
-/*
- * This is a fast pass over the inode cache to try to get reclaim moving on as
- * many inodes as possible in a short period of time. It kicks itself every few
- * seconds, as well as being kicked by the inode cache shrinker when memory
- * goes low. It scans as quickly as possible avoiding locked inodes or those
- * already being flushed, and once done schedules a future pass.
- */
-void
-xfs_reclaim_worker(
-	struct work_struct *work)
-{
-	struct xfs_mount *mp = container_of(to_delayed_work(work),
-					struct xfs_mount, m_reclaim_work);
-
-	xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
-	xfs_reclaim_work_queue(mp);
-}
-
-static void
-__xfs_inode_set_reclaim_tag(
-	struct xfs_perag	*pag,
-	struct xfs_inode	*ip)
-{
-	radix_tree_tag_set(&pag->pag_ici_root,
-			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
-			   XFS_ICI_RECLAIM_TAG);
-
-	if (!pag->pag_ici_reclaimable) {
-		/* propagate the reclaim tag up into the perag radix tree */
-		spin_lock(&ip->i_mount->m_perag_lock);
-		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
-				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
-				XFS_ICI_RECLAIM_TAG);
-		spin_unlock(&ip->i_mount->m_perag_lock);
-
-		/* schedule periodic background inode reclaim */
-		xfs_reclaim_work_queue(ip->i_mount);
-
-		trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
-							-1, _RET_IP_);
-	}
-	pag->pag_ici_reclaimable++;
-}
-
-/*
- * We set the inode flag atomically with the radix tree tag.
- * Once we get tag lookups on the radix tree, this inode flag
- * can go away.
- */
-void
-xfs_inode_set_reclaim_tag(
-	xfs_inode_t	*ip)
-{
-	struct xfs_mount *mp = ip->i_mount;
-	struct xfs_perag *pag;
-
-	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
-	spin_lock(&pag->pag_ici_lock);
-	spin_lock(&ip->i_flags_lock);
-	__xfs_inode_set_reclaim_tag(pag, ip);
-	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
-	spin_unlock(&ip->i_flags_lock);
-	spin_unlock(&pag->pag_ici_lock);
-	xfs_perag_put(pag);
-}
-
-STATIC void
-__xfs_inode_clear_reclaim(
-	xfs_perag_t	*pag,
-	xfs_inode_t	*ip)
-{
-	pag->pag_ici_reclaimable--;
-	if (!pag->pag_ici_reclaimable) {
-		/* clear the reclaim tag from the perag radix tree */
-		spin_lock(&ip->i_mount->m_perag_lock);
-		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
-				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
-				XFS_ICI_RECLAIM_TAG);
-		spin_unlock(&ip->i_mount->m_perag_lock);
-		trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
-							-1, _RET_IP_);
-	}
-}
-
-STATIC void
-__xfs_inode_clear_reclaim_tag(
-	xfs_mount_t	*mp,
-	xfs_perag_t	*pag,
-	xfs_inode_t	*ip)
-{
-	radix_tree_tag_clear(&pag->pag_ici_root,
-			XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
-	__xfs_inode_clear_reclaim(pag, ip);
-}
-
-/*
  * Grab the inode for reclaim exclusively.
  * Return 0 if we grabbed it, non-zero otherwise.
  */
@@ -929,6 +934,7 @@
 	int			sync_mode)
 {
 	struct xfs_buf		*bp = NULL;
+	xfs_ino_t		ino = ip->i_ino; /* for radix_tree_delete */
 	int			error;
 
 restart:
@@ -993,6 +999,22 @@
 
 	xfs_iflock(ip);
 reclaim:
+	/*
+	 * Because we use RCU freeing we need to ensure the inode always appears
+	 * to be reclaimed with an invalid inode number when in the free state.
+	 * We do this as early as possible under the ILOCK and flush lock so
+	 * that xfs_iflush_cluster() can be guaranteed to detect races with us
+	 * here. By doing this, we guarantee that once xfs_iflush_cluster has
+	 * locked both the XFS_ILOCK and the flush lock that it will see either
+	 * a valid, flushable inode that will serialise correctly against the
+	 * locks below, or it will see a clean (and invalid) inode that it can
+	 * skip.
+	 */
+	spin_lock(&ip->i_flags_lock);
+	ip->i_flags = XFS_IRECLAIM;
+	ip->i_ino = 0;
+	spin_unlock(&ip->i_flags_lock);
+
 	xfs_ifunlock(ip);
 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 
@@ -1006,9 +1028,9 @@
 	 */
 	spin_lock(&pag->pag_ici_lock);
 	if (!radix_tree_delete(&pag->pag_ici_root,
-				XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
+				XFS_INO_TO_AGINO(ip->i_mount, ino)))
 		ASSERT(0);
-	__xfs_inode_clear_reclaim(pag, ip);
+	xfs_perag_clear_reclaim_tag(pag);
 	spin_unlock(&pag->pag_ici_lock);
 
 	/*
@@ -1023,7 +1045,7 @@
 	xfs_qm_dqdetach(ip);
 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 
-	xfs_inode_free(ip);
+	__xfs_inode_free(ip);
 	return error;
 
 out_ifunlock:
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 96f606d..ee6799e 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1030,7 +1030,7 @@
 			tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
 		}
 
-		code = xfs_trans_roll(&tp, 0);
+		code = xfs_trans_roll(&tp, NULL);
 		if (committed != NULL)
 			*committed = 1;
 
@@ -1161,11 +1161,9 @@
 		rdev = 0;
 		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
 		tres = &M_RES(mp)->tr_mkdir;
-		tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
 	} else {
 		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
 		tres = &M_RES(mp)->tr_create;
-		tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
 	}
 
 	/*
@@ -1174,20 +1172,19 @@
 	 * the case we'll drop the one we have and get a more
 	 * appropriate transaction later.
 	 */
-	error = xfs_trans_reserve(tp, tres, resblks, 0);
+	error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
 	if (error == -ENOSPC) {
 		/* flush outstanding delalloc blocks and retry */
 		xfs_flush_inodes(mp);
-		error = xfs_trans_reserve(tp, tres, resblks, 0);
+		error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
 	}
 	if (error == -ENOSPC) {
 		/* No space at all so try a "no-allocation" reservation */
 		resblks = 0;
-		error = xfs_trans_reserve(tp, tres, 0, 0);
+		error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
 	}
 	if (error)
-		goto out_trans_cancel;
-
+		goto out_release_inode;
 
 	xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL |
 		      XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT);
@@ -1337,17 +1334,16 @@
 		return error;
 
 	resblks = XFS_IALLOC_SPACE_RES(mp);
-	tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE_TMPFILE);
-
 	tres = &M_RES(mp)->tr_create_tmpfile;
-	error = xfs_trans_reserve(tp, tres, resblks, 0);
+
+	error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
 	if (error == -ENOSPC) {
 		/* No space at all so try a "no-allocation" reservation */
 		resblks = 0;
-		error = xfs_trans_reserve(tp, tres, 0, 0);
+		error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
 	}
 	if (error)
-		goto out_trans_cancel;
+		goto out_release_inode;
 
 	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
 						pdqp, resblks, 1, 0);
@@ -1432,15 +1428,14 @@
 	if (error)
 		goto std_return;
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
 	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, resblks, 0);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
 	if (error == -ENOSPC) {
 		resblks = 0;
-		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, 0, 0);
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
 	}
 	if (error)
-		goto error_return;
+		goto std_return;
 
 	xfs_ilock(tdp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
 	xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
@@ -1710,11 +1705,9 @@
 	struct xfs_trans	*tp;
 	int			error;
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
 	if (error) {
 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
-		xfs_trans_cancel(tp);
 		return error;
 	}
 
@@ -1764,8 +1757,6 @@
 	struct xfs_trans	*tp;
 	int			error;
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
-
 	/*
 	 * The ifree transaction might need to allocate blocks for record
 	 * insertion to the finobt. We don't want to fail here at ENOSPC, so
@@ -1781,9 +1772,8 @@
 	 * now remains allocated and sits on the unlinked list until the fs is
 	 * repaired.
 	 */
-	tp->t_flags |= XFS_TRANS_RESERVE;
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree,
-				  XFS_IFREE_SPACE_RES(mp), 0);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
+			XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
 	if (error) {
 		if (error == -ENOSPC) {
 			xfs_warn_ratelimited(mp,
@@ -1792,7 +1782,6 @@
 		} else {
 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
 		}
-		xfs_trans_cancel(tp);
 		return error;
 	}
 
@@ -2525,11 +2514,6 @@
 	if (error)
 		goto std_return;
 
-	if (is_dir)
-		tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
-	else
-		tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
-
 	/*
 	 * We try to get the real space reservation first,
 	 * allowing for directory btree deletion(s) implying
@@ -2540,14 +2524,15 @@
 	 * block from the directory.
 	 */
 	resblks = XFS_REMOVE_SPACE_RES(mp);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, resblks, 0);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
 	if (error == -ENOSPC) {
 		resblks = 0;
-		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, 0, 0);
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
+				&tp);
 	}
 	if (error) {
 		ASSERT(error != -ENOSPC);
-		goto out_trans_cancel;
+		goto std_return;
 	}
 
 	xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
@@ -2855,6 +2840,7 @@
 	 * and flag it as linkable.
 	 */
 	drop_nlink(VFS_I(tmpfile));
+	xfs_setup_iops(tmpfile);
 	xfs_finish_inode_setup(tmpfile);
 	VFS_I(tmpfile)->i_state |= I_LINKABLE;
 
@@ -2910,15 +2896,15 @@
 	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
 				inodes, &num_inodes);
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
 	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, spaceres, 0);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
 	if (error == -ENOSPC) {
 		spaceres = 0;
-		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, 0, 0);
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
+				&tp);
 	}
 	if (error)
-		goto out_trans_cancel;
+		goto out_release_wip;
 
 	/*
 	 * Attach the dquots to the inodes
@@ -3155,6 +3141,7 @@
 	xfs_bmap_cancel(&free_list);
 out_trans_cancel:
 	xfs_trans_cancel(tp);
+out_release_wip:
 	if (wip)
 		IRELE(wip);
 	return error;
@@ -3162,16 +3149,16 @@
 
 STATIC int
 xfs_iflush_cluster(
-	xfs_inode_t	*ip,
-	xfs_buf_t	*bp)
+	struct xfs_inode	*ip,
+	struct xfs_buf		*bp)
 {
-	xfs_mount_t		*mp = ip->i_mount;
+	struct xfs_mount	*mp = ip->i_mount;
 	struct xfs_perag	*pag;
 	unsigned long		first_index, mask;
 	unsigned long		inodes_per_cluster;
-	int			ilist_size;
-	xfs_inode_t		**ilist;
-	xfs_inode_t		*iq;
+	int			cilist_size;
+	struct xfs_inode	**cilist;
+	struct xfs_inode	*cip;
 	int			nr_found;
 	int			clcount = 0;
 	int			bufwasdelwri;
@@ -3180,23 +3167,23 @@
 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
 
 	inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
-	ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
-	ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
-	if (!ilist)
+	cilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
+	cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS);
+	if (!cilist)
 		goto out_put;
 
 	mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
 	first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
 	rcu_read_lock();
 	/* really need a gang lookup range call here */
-	nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
+	nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist,
 					first_index, inodes_per_cluster);
 	if (nr_found == 0)
 		goto out_free;
 
 	for (i = 0; i < nr_found; i++) {
-		iq = ilist[i];
-		if (iq == ip)
+		cip = cilist[i];
+		if (cip == ip)
 			continue;
 
 		/*
@@ -3205,20 +3192,30 @@
 		 * We need to check under the i_flags_lock for a valid inode
 		 * here. Skip it if it is not valid or the wrong inode.
 		 */
-		spin_lock(&ip->i_flags_lock);
-		if (!ip->i_ino ||
-		    (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
-			spin_unlock(&ip->i_flags_lock);
+		spin_lock(&cip->i_flags_lock);
+		if (!cip->i_ino ||
+		    __xfs_iflags_test(cip, XFS_ISTALE)) {
+			spin_unlock(&cip->i_flags_lock);
 			continue;
 		}
-		spin_unlock(&ip->i_flags_lock);
+
+		/*
+		 * Once we fall off the end of the cluster, no point checking
+		 * any more inodes in the list because they will also all be
+		 * outside the cluster.
+		 */
+		if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) {
+			spin_unlock(&cip->i_flags_lock);
+			break;
+		}
+		spin_unlock(&cip->i_flags_lock);
 
 		/*
 		 * Do an un-protected check to see if the inode is dirty and
 		 * is a candidate for flushing.  These checks will be repeated
 		 * later after the appropriate locks are acquired.
 		 */
-		if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
+		if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0)
 			continue;
 
 		/*
@@ -3226,15 +3223,28 @@
 		 * then this inode cannot be flushed and is skipped.
 		 */
 
-		if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
+		if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED))
 			continue;
-		if (!xfs_iflock_nowait(iq)) {
-			xfs_iunlock(iq, XFS_ILOCK_SHARED);
+		if (!xfs_iflock_nowait(cip)) {
+			xfs_iunlock(cip, XFS_ILOCK_SHARED);
 			continue;
 		}
-		if (xfs_ipincount(iq)) {
-			xfs_ifunlock(iq);
-			xfs_iunlock(iq, XFS_ILOCK_SHARED);
+		if (xfs_ipincount(cip)) {
+			xfs_ifunlock(cip);
+			xfs_iunlock(cip, XFS_ILOCK_SHARED);
+			continue;
+		}
+
+
+		/*
+		 * Check the inode number again, just to be certain we are not
+		 * racing with freeing in xfs_reclaim_inode(). See the comments
+		 * in that function for more information as to why the initial
+		 * check is not sufficient.
+		 */
+		if (!cip->i_ino) {
+			xfs_ifunlock(cip);
+			xfs_iunlock(cip, XFS_ILOCK_SHARED);
 			continue;
 		}
 
@@ -3242,18 +3252,18 @@
 		 * arriving here means that this inode can be flushed.  First
 		 * re-check that it's dirty before flushing.
 		 */
-		if (!xfs_inode_clean(iq)) {
+		if (!xfs_inode_clean(cip)) {
 			int	error;
-			error = xfs_iflush_int(iq, bp);
+			error = xfs_iflush_int(cip, bp);
 			if (error) {
-				xfs_iunlock(iq, XFS_ILOCK_SHARED);
+				xfs_iunlock(cip, XFS_ILOCK_SHARED);
 				goto cluster_corrupt_out;
 			}
 			clcount++;
 		} else {
-			xfs_ifunlock(iq);
+			xfs_ifunlock(cip);
 		}
-		xfs_iunlock(iq, XFS_ILOCK_SHARED);
+		xfs_iunlock(cip, XFS_ILOCK_SHARED);
 	}
 
 	if (clcount) {
@@ -3263,7 +3273,7 @@
 
 out_free:
 	rcu_read_unlock();
-	kmem_free(ilist);
+	kmem_free(cilist);
 out_put:
 	xfs_perag_put(pag);
 	return 0;
@@ -3306,8 +3316,8 @@
 	/*
 	 * Unlocks the flush lock
 	 */
-	xfs_iflush_abort(iq, false);
-	kmem_free(ilist);
+	xfs_iflush_abort(cip, false);
+	kmem_free(cilist);
 	xfs_perag_put(pag);
 	return -EFSCORRUPTED;
 }
@@ -3327,7 +3337,7 @@
 	struct xfs_buf		**bpp)
 {
 	struct xfs_mount	*mp = ip->i_mount;
-	struct xfs_buf		*bp;
+	struct xfs_buf		*bp = NULL;
 	struct xfs_dinode	*dip;
 	int			error;
 
@@ -3369,14 +3379,22 @@
 	}
 
 	/*
-	 * Get the buffer containing the on-disk inode.
+	 * Get the buffer containing the on-disk inode. We are doing a try-lock
+	 * operation here, so we may get  an EAGAIN error. In that case, we
+	 * simply want to return with the inode still dirty.
+	 *
+	 * If we get any other error, we effectively have a corruption situation
+	 * and we cannot flush the inode, so we treat it the same as failing
+	 * xfs_iflush_int().
 	 */
 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
 			       0);
-	if (error || !bp) {
+	if (error == -EAGAIN) {
 		xfs_ifunlock(ip);
 		return error;
 	}
+	if (error)
+		goto corrupt_out;
 
 	/*
 	 * First flush out the inode that xfs_iflush was called with.
@@ -3404,7 +3422,8 @@
 	return 0;
 
 corrupt_out:
-	xfs_buf_relse(bp);
+	if (bp)
+		xfs_buf_relse(bp);
 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 cluster_corrupt_out:
 	error = -EFSCORRUPTED;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 43e1d51..e52d7c7 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -440,6 +440,9 @@
 
 
 /* from xfs_iops.c */
+extern void xfs_setup_inode(struct xfs_inode *ip);
+extern void xfs_setup_iops(struct xfs_inode *ip);
+
 /*
  * When setting up a newly allocated inode, we need to call
  * xfs_finish_inode_setup() once the inode is fully instantiated at
@@ -447,7 +450,6 @@
  * before we've completed instantiation. Otherwise we can do it
  * the moment the inode lookup is complete.
  */
-extern void xfs_setup_inode(struct xfs_inode *ip);
 static inline void xfs_finish_inode_setup(struct xfs_inode *ip)
 {
 	xfs_iflags_clear(ip, XFS_INEW);
@@ -458,6 +460,7 @@
 static inline void xfs_setup_existing_inode(struct xfs_inode *ip)
 {
 	xfs_setup_inode(ip);
+	xfs_setup_iops(ip);
 	xfs_finish_inode_setup(ip);
 }
 
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index c48b5b1..a1b0761 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -210,7 +210,7 @@
 			 */
 			data_bytes = roundup(ip->i_df.if_bytes, 4);
 			ASSERT(ip->i_df.if_real_bytes == 0 ||
-			       ip->i_df.if_real_bytes == data_bytes);
+			       ip->i_df.if_real_bytes >= data_bytes);
 			ASSERT(ip->i_df.if_u1.if_data != NULL);
 			ASSERT(ip->i_d.di_size > 0);
 			xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_ILOCAL,
@@ -305,7 +305,7 @@
 			 */
 			data_bytes = roundup(ip->i_afp->if_bytes, 4);
 			ASSERT(ip->i_afp->if_real_bytes == 0 ||
-			       ip->i_afp->if_real_bytes == data_bytes);
+			       ip->i_afp->if_real_bytes >= data_bytes);
 			ASSERT(ip->i_afp->if_u1.if_data != NULL);
 			xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_LOCAL,
 					ip->i_afp->if_u1.if_data,
@@ -479,6 +479,8 @@
 xfs_inode_item_push(
 	struct xfs_log_item	*lip,
 	struct list_head	*buffer_list)
+		__releases(&lip->li_ailp->xa_lock)
+		__acquires(&lip->li_ailp->xa_lock)
 {
 	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
 	struct xfs_inode	*ip = iip->ili_inode;
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index bcb6c19..dbca737 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -277,7 +277,6 @@
 {
 	struct dentry		*dentry;
 	__u32			olen;
-	void			*link;
 	int			error;
 
 	if (!capable(CAP_SYS_ADMIN))
@@ -288,7 +287,7 @@
 		return PTR_ERR(dentry);
 
 	/* Restrict this handle operation to symlinks only. */
-	if (!d_is_symlink(dentry)) {
+	if (!d_inode(dentry)->i_op->readlink) {
 		error = -EINVAL;
 		goto out_dput;
 	}
@@ -298,21 +297,8 @@
 		goto out_dput;
 	}
 
-	link = kmalloc(MAXPATHLEN+1, GFP_KERNEL);
-	if (!link) {
-		error = -ENOMEM;
-		goto out_dput;
-	}
+	error = d_inode(dentry)->i_op->readlink(dentry, hreq->ohandle, olen);
 
-	error = xfs_readlink(XFS_I(d_inode(dentry)), link);
-	if (error)
-		goto out_kfree;
-	error = readlink_copy(hreq->ohandle, olen, link);
-	if (error)
-		goto out_kfree;
-
- out_kfree:
-	kfree(link);
  out_dput:
 	dput(dentry);
 	return error;
@@ -334,12 +320,10 @@
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return -EIO;
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
-	if (error) {
-		xfs_trans_cancel(tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
+	if (error)
 		return error;
-	}
+
 	xfs_ilock(ip, XFS_ILOCK_EXCL);
 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 
@@ -1141,10 +1125,9 @@
 	if (XFS_FORCED_SHUTDOWN(mp))
 		goto out_unlock;
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
 	if (error)
-		goto out_cancel;
+		return ERR_PTR(error);
 
 	xfs_ilock(ip, XFS_ILOCK_EXCL);
 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | join_flags);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index d81bdc0..5839135 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -132,6 +132,7 @@
 	int		error;
 	int		lockmode;
 	int		bmapi_flags = XFS_BMAPI_PREALLOC;
+	uint		tflags = 0;
 
 	rt = XFS_IS_REALTIME_INODE(ip);
 	extsz = xfs_get_extsz_hint(ip);
@@ -192,11 +193,6 @@
 		return error;
 
 	/*
-	 * Allocate and setup the transaction
-	 */
-	tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
-
-	/*
 	 * For DAX, we do not allocate unwritten extents, but instead we zero
 	 * the block before we commit the transaction.  Ideally we'd like to do
 	 * this outside the transaction context, but if we commit and then crash
@@ -209,23 +205,17 @@
 	 * the reserve block pool for bmbt block allocation if there is no space
 	 * left but we need to do unwritten extent conversion.
 	 */
-
 	if (IS_DAX(VFS_I(ip))) {
 		bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
 		if (ISUNWRITTEN(imap)) {
-			tp->t_flags |= XFS_TRANS_RESERVE;
+			tflags |= XFS_TRANS_RESERVE;
 			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
 		}
 	}
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
-				  resblks, resrtextents);
-	/*
-	 * Check for running out of space, note: need lock to return
-	 */
-	if (error) {
-		xfs_trans_cancel(tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
+			tflags, &tp);
+	if (error)
 		return error;
-	}
 
 	lockmode = XFS_ILOCK_EXCL;
 	xfs_ilock(ip, lockmode);
@@ -726,15 +716,13 @@
 
 		nimaps = 0;
 		while (nimaps == 0) {
-			tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
-			tp->t_flags |= XFS_TRANS_RESERVE;
 			nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
-			error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
-						  nres, 0);
-			if (error) {
-				xfs_trans_cancel(tp);
+
+			error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, nres,
+					0, XFS_TRANS_RESERVE, &tp);
+			if (error)
 				return error;
-			}
+
 			xfs_ilock(ip, XFS_ILOCK_EXCL);
 			xfs_trans_ijoin(tp, ip, 0);
 
@@ -878,25 +866,18 @@
 
 	do {
 		/*
-		 * set up a transaction to convert the range of extents
+		 * Set up a transaction to convert the range of extents
 		 * from unwritten to real. Do allocations in a loop until
 		 * we have covered the range passed in.
 		 *
-		 * Note that we open code the transaction allocation here
-		 * to pass KM_NOFS--we can't risk to recursing back into
-		 * the filesystem here as we might be asked to write out
-		 * the same inode that we complete here and might deadlock
-		 * on the iolock.
+		 * Note that we can't risk to recursing back into the filesystem
+		 * here as we might be asked to write out the same inode that we
+		 * complete here and might deadlock on the iolock.
 		 */
-		sb_start_intwrite(mp->m_super);
-		tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);
-		tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT;
-		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
-					  resblks, 0);
-		if (error) {
-			xfs_trans_cancel(tp);
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
+				XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
+		if (error)
 			return error;
-		}
 
 		xfs_ilock(ip, XFS_ILOCK_EXCL);
 		xfs_trans_ijoin(tp, ip, 0);
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index fb7dc61..c5d4eba 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -181,6 +181,8 @@
 	}
 #endif
 
+	xfs_setup_iops(ip);
+
 	if (tmpfile)
 		d_tmpfile(dentry, inode);
 	else
@@ -368,6 +370,8 @@
 	if (unlikely(error))
 		goto out_cleanup_inode;
 
+	xfs_setup_iops(cip);
+
 	d_instantiate(dentry, inode);
 	xfs_finish_inode_setup(cip);
 	return 0;
@@ -442,6 +446,16 @@
 	return ERR_PTR(error);
 }
 
+STATIC const char *
+xfs_vn_get_link_inline(
+	struct dentry		*dentry,
+	struct inode		*inode,
+	struct delayed_call	*done)
+{
+	ASSERT(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE);
+	return XFS_I(inode)->i_df.if_u1.if_data;
+}
+
 STATIC int
 xfs_vn_getattr(
 	struct vfsmount		*mnt,
@@ -599,12 +613,12 @@
 			return error;
 	}
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
 	if (error)
-		goto out_trans_cancel;
+		goto out_dqrele;
 
 	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, ip, 0);
 
 	/*
 	 * Change file ownership.  Must be the owner or privileged.
@@ -633,12 +647,10 @@
 						NULL, capable(CAP_FOWNER) ?
 						XFS_QMOPT_FORCE_RES : 0);
 			if (error)	/* out of quota */
-				goto out_unlock;
+				goto out_cancel;
 		}
 	}
 
-	xfs_trans_ijoin(tp, ip, 0);
-
 	/*
 	 * Change file ownership.  Must be the owner or privileged.
 	 */
@@ -722,10 +734,9 @@
 
 	return 0;
 
-out_unlock:
-	xfs_iunlock(ip, XFS_ILOCK_EXCL);
-out_trans_cancel:
+out_cancel:
 	xfs_trans_cancel(tp);
+out_dqrele:
 	xfs_qm_dqrele(udqp);
 	xfs_qm_dqrele(gdqp);
 	return error;
@@ -834,7 +845,7 @@
 	 * We have to do all the page cache truncate work outside the
 	 * transaction context as the "lock" order is page lock->log space
 	 * reservation as defined by extent allocation in the writeback path.
-	 * Hence a truncate can fail with ENOMEM from xfs_trans_reserve(), but
+	 * Hence a truncate can fail with ENOMEM from xfs_trans_alloc(), but
 	 * having already truncated the in-memory version of the file (i.e. made
 	 * user visible changes). There's not much we can do about this, except
 	 * to hope that the caller sees ENOMEM and retries the truncate
@@ -849,10 +860,9 @@
 		return error;
 	truncate_setsize(inode, newsize);
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
 	if (error)
-		goto out_trans_cancel;
+		return error;
 
 	lock_flags |= XFS_ILOCK_EXCL;
 	xfs_ilock(ip, XFS_ILOCK_EXCL);
@@ -971,12 +981,9 @@
 
 	trace_xfs_update_time(ip);
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
-	if (error) {
-		xfs_trans_cancel(tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
+	if (error)
 		return error;
-	}
 
 	xfs_ilock(ip, XFS_ILOCK_EXCL);
 	if (flags & S_CTIME)
@@ -1167,6 +1174,18 @@
 	.update_time		= xfs_vn_update_time,
 };
 
+static const struct inode_operations xfs_inline_symlink_inode_operations = {
+	.readlink		= generic_readlink,
+	.get_link		= xfs_vn_get_link_inline,
+	.getattr		= xfs_vn_getattr,
+	.setattr		= xfs_vn_setattr,
+	.setxattr		= generic_setxattr,
+	.getxattr		= generic_getxattr,
+	.removexattr		= generic_removexattr,
+	.listxattr		= xfs_vn_listxattr,
+	.update_time		= xfs_vn_update_time,
+};
+
 STATIC void
 xfs_diflags_to_iflags(
 	struct inode		*inode,
@@ -1193,7 +1212,7 @@
 }
 
 /*
- * Initialize the Linux inode and set up the operation vectors.
+ * Initialize the Linux inode.
  *
  * When reading existing inodes from disk this is called directly from xfs_iget,
  * when creating a new inode it is called from xfs_ialloc after setting up the
@@ -1232,32 +1251,12 @@
 	i_size_write(inode, ip->i_d.di_size);
 	xfs_diflags_to_iflags(inode, ip);
 
-	ip->d_ops = ip->i_mount->m_nondir_inode_ops;
-	lockdep_set_class(&ip->i_lock.mr_lock, &xfs_nondir_ilock_class);
-	switch (inode->i_mode & S_IFMT) {
-	case S_IFREG:
-		inode->i_op = &xfs_inode_operations;
-		inode->i_fop = &xfs_file_operations;
-		inode->i_mapping->a_ops = &xfs_address_space_operations;
-		break;
-	case S_IFDIR:
+	if (S_ISDIR(inode->i_mode)) {
 		lockdep_set_class(&ip->i_lock.mr_lock, &xfs_dir_ilock_class);
-		if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
-			inode->i_op = &xfs_dir_ci_inode_operations;
-		else
-			inode->i_op = &xfs_dir_inode_operations;
-		inode->i_fop = &xfs_dir_file_operations;
 		ip->d_ops = ip->i_mount->m_dir_inode_ops;
-		break;
-	case S_IFLNK:
-		inode->i_op = &xfs_symlink_inode_operations;
-		if (!(ip->i_df.if_flags & XFS_IFINLINE))
-			inode->i_mapping->a_ops = &xfs_address_space_operations;
-		break;
-	default:
-		inode->i_op = &xfs_inode_operations;
-		init_special_inode(inode, inode->i_mode, inode->i_rdev);
-		break;
+	} else {
+		ip->d_ops = ip->i_mount->m_nondir_inode_ops;
+		lockdep_set_class(&ip->i_lock.mr_lock, &xfs_nondir_ilock_class);
 	}
 
 	/*
@@ -1277,3 +1276,35 @@
 		cache_no_acl(inode);
 	}
 }
+
+void
+xfs_setup_iops(
+	struct xfs_inode	*ip)
+{
+	struct inode		*inode = &ip->i_vnode;
+
+	switch (inode->i_mode & S_IFMT) {
+	case S_IFREG:
+		inode->i_op = &xfs_inode_operations;
+		inode->i_fop = &xfs_file_operations;
+		inode->i_mapping->a_ops = &xfs_address_space_operations;
+		break;
+	case S_IFDIR:
+		if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
+			inode->i_op = &xfs_dir_ci_inode_operations;
+		else
+			inode->i_op = &xfs_dir_inode_operations;
+		inode->i_fop = &xfs_dir_file_operations;
+		break;
+	case S_IFLNK:
+		if (ip->i_df.if_flags & XFS_IFINLINE)
+			inode->i_op = &xfs_inline_symlink_inode_operations;
+		else
+			inode->i_op = &xfs_symlink_inode_operations;
+		break;
+	default:
+		inode->i_op = &xfs_inode_operations;
+		init_special_inode(inode, inode->i_mode, inode->i_rdev);
+		break;
+	}
+}
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index b49ccf5..bde02f1 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -435,8 +435,7 @@
 	int		 	cnt,
 	struct xlog_ticket	**ticp,
 	__uint8_t	 	client,
-	bool			permanent,
-	uint		 	t_type)
+	bool			permanent)
 {
 	struct xlog		*log = mp->m_log;
 	struct xlog_ticket	*tic;
@@ -456,7 +455,6 @@
 	if (!tic)
 		return -ENOMEM;
 
-	tic->t_trans_type = t_type;
 	*ticp = tic;
 
 	xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
@@ -823,8 +821,7 @@
 	} while (iclog != first_iclog);
 #endif
 	if (! (XLOG_FORCED_SHUTDOWN(log))) {
-		error = xfs_log_reserve(mp, 600, 1, &tic,
-					XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE);
+		error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0);
 		if (!error) {
 			/* the data section must be 32 bit size aligned */
 			struct {
@@ -2032,58 +2029,8 @@
 	    REG_TYPE_STR(ICREATE, "inode create")
 	};
 #undef REG_TYPE_STR
-#define TRANS_TYPE_STR(type)	[XFS_TRANS_##type] = #type
-	static char *trans_type_str[XFS_TRANS_TYPE_MAX] = {
-	    TRANS_TYPE_STR(SETATTR_NOT_SIZE),
-	    TRANS_TYPE_STR(SETATTR_SIZE),
-	    TRANS_TYPE_STR(INACTIVE),
-	    TRANS_TYPE_STR(CREATE),
-	    TRANS_TYPE_STR(CREATE_TRUNC),
-	    TRANS_TYPE_STR(TRUNCATE_FILE),
-	    TRANS_TYPE_STR(REMOVE),
-	    TRANS_TYPE_STR(LINK),
-	    TRANS_TYPE_STR(RENAME),
-	    TRANS_TYPE_STR(MKDIR),
-	    TRANS_TYPE_STR(RMDIR),
-	    TRANS_TYPE_STR(SYMLINK),
-	    TRANS_TYPE_STR(SET_DMATTRS),
-	    TRANS_TYPE_STR(GROWFS),
-	    TRANS_TYPE_STR(STRAT_WRITE),
-	    TRANS_TYPE_STR(DIOSTRAT),
-	    TRANS_TYPE_STR(WRITEID),
-	    TRANS_TYPE_STR(ADDAFORK),
-	    TRANS_TYPE_STR(ATTRINVAL),
-	    TRANS_TYPE_STR(ATRUNCATE),
-	    TRANS_TYPE_STR(ATTR_SET),
-	    TRANS_TYPE_STR(ATTR_RM),
-	    TRANS_TYPE_STR(ATTR_FLAG),
-	    TRANS_TYPE_STR(CLEAR_AGI_BUCKET),
-	    TRANS_TYPE_STR(SB_CHANGE),
-	    TRANS_TYPE_STR(DUMMY1),
-	    TRANS_TYPE_STR(DUMMY2),
-	    TRANS_TYPE_STR(QM_QUOTAOFF),
-	    TRANS_TYPE_STR(QM_DQALLOC),
-	    TRANS_TYPE_STR(QM_SETQLIM),
-	    TRANS_TYPE_STR(QM_DQCLUSTER),
-	    TRANS_TYPE_STR(QM_QINOCREATE),
-	    TRANS_TYPE_STR(QM_QUOTAOFF_END),
-	    TRANS_TYPE_STR(FSYNC_TS),
-	    TRANS_TYPE_STR(GROWFSRT_ALLOC),
-	    TRANS_TYPE_STR(GROWFSRT_ZERO),
-	    TRANS_TYPE_STR(GROWFSRT_FREE),
-	    TRANS_TYPE_STR(SWAPEXT),
-	    TRANS_TYPE_STR(CHECKPOINT),
-	    TRANS_TYPE_STR(ICREATE),
-	    TRANS_TYPE_STR(CREATE_TMPFILE)
-	};
-#undef TRANS_TYPE_STR
 
 	xfs_warn(mp, "xlog_write: reservation summary:");
-	xfs_warn(mp, "  trans type  = %s (%u)",
-		 ((ticket->t_trans_type <= 0 ||
-		   ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
-		  "bad-trans-type" : trans_type_str[ticket->t_trans_type]),
-		 ticket->t_trans_type);
 	xfs_warn(mp, "  unit res    = %d bytes",
 		 ticket->t_unit_res);
 	xfs_warn(mp, "  current res = %d bytes",
@@ -3378,7 +3325,7 @@
 {
 	int	error;
 
-	trace_xfs_log_force(mp, 0);
+	trace_xfs_log_force(mp, 0, _RET_IP_);
 	error = _xfs_log_force(mp, flags, NULL);
 	if (error)
 		xfs_warn(mp, "%s: error %d returned.", __func__, error);
@@ -3527,7 +3474,7 @@
 {
 	int	error;
 
-	trace_xfs_log_force(mp, lsn);
+	trace_xfs_log_force(mp, lsn, _RET_IP_);
 	error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
 	if (error)
 		xfs_warn(mp, "%s: error %d returned.", __func__, error);
@@ -3709,7 +3656,6 @@
 	tic->t_tid		= prandom_u32();
 	tic->t_clientid		= client;
 	tic->t_flags		= XLOG_TIC_INITED;
-	tic->t_trans_type	= 0;
 	if (permanent)
 		tic->t_flags |= XLOG_TIC_PERM_RESERV;
 
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index aa533a7..80ba0c0 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -161,8 +161,7 @@
 			  int		   count,
 			  struct xlog_ticket **ticket,
 			  __uint8_t	   clientid,
-			  bool		   permanent,
-			  uint		   t_type);
+			  bool		   permanent);
 int	  xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
 int	  xfs_log_unmount_write(struct xfs_mount *mp);
 void      xfs_log_unmount(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 4e76493..5e54e79 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -51,7 +51,6 @@
 
 	tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0,
 				KM_SLEEP|KM_NOFS);
-	tic->t_trans_type = XFS_TRANS_CHECKPOINT;
 
 	/*
 	 * set the current reservation to zero so we know to steal the basic
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index ed88963..765f084 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -175,7 +175,6 @@
 	char		   t_cnt;	 /* current count		 : 1  */
 	char		   t_clientid;	 /* who does this belong to;	 : 1  */
 	char		   t_flags;	 /* properties of reservation	 : 1  */
-	uint		   t_trans_type; /* transaction type             : 4  */
 
         /* reservation array fields */
 	uint		   t_res_num;                    /* num in array : 4 */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 396565f..8359978 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3843,7 +3843,7 @@
 	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
 	old_len = item->ri_buf[item->ri_cnt-1].i_len;
 
-	ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
+	ptr = kmem_realloc(old_ptr, len + old_len, KM_SLEEP);
 	memcpy(&ptr[old_len], dp, len);
 	item->ri_buf[item->ri_cnt-1].i_len += len;
 	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
@@ -4205,10 +4205,9 @@
 		}
 	}
 
-	tp = xfs_trans_alloc(mp, 0);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
 	if (error)
-		goto abort_error;
+		return error;
 	efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
 
 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
@@ -4355,10 +4354,9 @@
 	int		offset;
 	int		error;
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
 	if (error)
-		goto out_abort;
+		goto out_error;
 
 	error = xfs_read_agi(mp, tp, agno, &agibp);
 	if (error)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index cfd4210..e39b023 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -89,7 +89,6 @@
 	if (hole < 0) {
 		xfs_uuid_table = kmem_realloc(xfs_uuid_table,
 			(xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
-			xfs_uuid_table_size  * sizeof(*xfs_uuid_table),
 			KM_SLEEP);
 		hole = xfs_uuid_table_size++;
 	}
@@ -681,6 +680,9 @@
 
 	xfs_set_maxicount(mp);
 
+	/* enable fail_at_unmount as default */
+	mp->m_fail_unmount = 1;
+
 	error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname);
 	if (error)
 		goto out;
@@ -690,10 +692,15 @@
 	if (error)
 		goto out_remove_sysfs;
 
-	error = xfs_uuid_mount(mp);
+	error = xfs_error_sysfs_init(mp);
 	if (error)
 		goto out_del_stats;
 
+
+	error = xfs_uuid_mount(mp);
+	if (error)
+		goto out_remove_error_sysfs;
+
 	/*
 	 * Set the minimum read and write sizes
 	 */
@@ -957,6 +964,7 @@
 	cancel_delayed_work_sync(&mp->m_reclaim_work);
 	xfs_reclaim_inodes(mp, SYNC_WAIT);
  out_log_dealloc:
+	mp->m_flags |= XFS_MOUNT_UNMOUNTING;
 	xfs_log_mount_cancel(mp);
  out_fail_wait:
 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
@@ -968,6 +976,8 @@
 	xfs_da_unmount(mp);
  out_remove_uuid:
 	xfs_uuid_unmount(mp);
+ out_remove_error_sysfs:
+	xfs_error_sysfs_del(mp);
  out_del_stats:
 	xfs_sysfs_del(&mp->m_stats.xs_kobj);
  out_remove_sysfs:
@@ -1006,6 +1016,14 @@
 	xfs_log_force(mp, XFS_LOG_SYNC);
 
 	/*
+	 * We now need to tell the world we are unmounting. This will allow
+	 * us to detect that the filesystem is going away and we should error
+	 * out anything that we have been retrying in the background. This will
+	 * prevent neverending retries in AIL pushing from hanging the unmount.
+	 */
+	mp->m_flags |= XFS_MOUNT_UNMOUNTING;
+
+	/*
 	 * Flush all pending changes from the AIL.
 	 */
 	xfs_ail_push_all_sync(mp->m_ail);
@@ -1056,6 +1074,7 @@
 #endif
 	xfs_free_perag(mp);
 
+	xfs_error_sysfs_del(mp);
 	xfs_sysfs_del(&mp->m_stats.xs_kobj);
 	xfs_sysfs_del(&mp->m_kobj);
 }
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index eafe257..c1b798c 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -37,6 +37,32 @@
 	XFS_LOWSP_MAX,
 };
 
+/*
+ * Error Configuration
+ *
+ * Error classes define the subsystem the configuration belongs to.
+ * Error numbers define the errors that are configurable.
+ */
+enum {
+	XFS_ERR_METADATA,
+	XFS_ERR_CLASS_MAX,
+};
+enum {
+	XFS_ERR_DEFAULT,
+	XFS_ERR_EIO,
+	XFS_ERR_ENOSPC,
+	XFS_ERR_ENODEV,
+	XFS_ERR_ERRNO_MAX,
+};
+
+#define XFS_ERR_RETRY_FOREVER	-1
+
+struct xfs_error_cfg {
+	struct xfs_kobj	kobj;
+	int		max_retries;
+	unsigned long	retry_timeout;	/* in jiffies, 0 = no timeout */
+};
+
 typedef struct xfs_mount {
 	struct super_block	*m_super;
 	xfs_tid_t		m_tid;		/* next unused tid for fs */
@@ -127,6 +153,9 @@
 	int64_t			m_low_space[XFS_LOWSP_MAX];
 						/* low free space thresholds */
 	struct xfs_kobj		m_kobj;
+	struct xfs_kobj		m_error_kobj;
+	struct xfs_kobj		m_error_meta_kobj;
+	struct xfs_error_cfg	m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX];
 	struct xstats		m_stats;	/* per-fs stats */
 
 	struct workqueue_struct *m_buf_workqueue;
@@ -148,6 +177,7 @@
 	 */
 	__uint32_t		m_generation;
 
+	bool			m_fail_unmount;
 #ifdef DEBUG
 	/*
 	 * DEBUG mode instrumentation to test and/or trigger delayed allocation
@@ -166,6 +196,7 @@
 #define XFS_MOUNT_WSYNC		(1ULL << 0)	/* for nfs - all metadata ops
 						   must be synchronous except
 						   for space allocations */
+#define XFS_MOUNT_UNMOUNTING	(1ULL << 1)	/* filesystem is unmounting */
 #define XFS_MOUNT_WAS_CLEAN	(1ULL << 3)
 #define XFS_MOUNT_FS_SHUTDOWN	(1ULL << 4)	/* atomic stop of all filesystem
 						   operations, typically for
@@ -364,4 +395,7 @@
 int	xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb,
 			xfs_off_t count_fsb);
 
+struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp,
+		int error_class, int error);
+
 #endif	/* __XFS_MOUNT_H__ */
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 51ddaf2..d5b7566 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -308,12 +308,9 @@
 			goto out_drop_iolock;
 	}
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
-	if (error) {
-		xfs_trans_cancel(tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
+	if (error)
 		goto out_drop_iolock;
-	}
 
 	xfs_ilock(ip, XFS_ILOCK_EXCL);
 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index be125e1..a60d9e2 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -783,13 +783,10 @@
 		}
 	}
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create,
-				  XFS_QM_QINOCREATE_SPACE_RES(mp), 0);
-	if (error) {
-		xfs_trans_cancel(tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
+			XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp);
+	if (error)
 		return error;
-	}
 
 	if (need_alloc) {
 		error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index f4d0e0a..475a388 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -236,10 +236,8 @@
 
 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
 	if (error) {
-		xfs_trans_cancel(tp);
 		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 		goto out_put;
 	}
@@ -436,12 +434,9 @@
 	defq = xfs_get_defquota(dqp, q);
 	xfs_dqunlock(dqp);
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_setqlim, 0, 0);
-	if (error) {
-		xfs_trans_cancel(tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
+	if (error)
 		goto out_rele;
-	}
 
 	xfs_dqlock(dqp);
 	xfs_trans_dqjoin(tp, dqp);
@@ -569,13 +564,9 @@
 	int			error;
 	xfs_qoff_logitem_t	*qoffi;
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
-
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_equotaoff, 0, 0);
-	if (error) {
-		xfs_trans_cancel(tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
+	if (error)
 		return error;
-	}
 
 	qoffi = xfs_trans_get_qoff_item(tp, startqoff,
 					flags & XFS_ALL_QUOTA_ACCT);
@@ -603,12 +594,9 @@
 
 	*qoffstartp = NULL;
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_quotaoff, 0, 0);
-	if (error) {
-		xfs_trans_cancel(tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
+	if (error)
 		goto out;
-	}
 
 	qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
 	xfs_trans_log_quotaoff_item(tp, qoffi);
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index abf4443..3938b37 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -780,15 +780,14 @@
 	 * Allocate space to the file, as necessary.
 	 */
 	while (oblocks < nblocks) {
-		tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC);
 		resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks);
 		/*
 		 * Reserve space & log for one extent added to the file.
 		 */
-		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtalloc,
-					  resblks, 0);
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growrtalloc, resblks,
+				0, 0, &tp);
 		if (error)
-			goto out_trans_cancel;
+			return error;
 		/*
 		 * Lock the inode.
 		 */
@@ -823,14 +822,13 @@
 		for (bno = map.br_startoff, fsbno = map.br_startblock;
 		     bno < map.br_startoff + map.br_blockcount;
 		     bno++, fsbno++) {
-			tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ZERO);
 			/*
 			 * Reserve log for one block zeroing.
 			 */
-			error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtzero,
-						  0, 0);
+			error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growrtzero,
+					0, 0, 0, &tp);
 			if (error)
-				goto out_trans_cancel;
+				return error;
 			/*
 			 * Lock the bitmap inode.
 			 */
@@ -994,11 +992,10 @@
 		/*
 		 * Start a transaction, get the log reservation.
 		 */
-		tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_FREE);
-		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtfree,
-					  0, 0);
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growrtfree, 0, 0, 0,
+				&tp);
 		if (error)
-			goto error_cancel;
+			break;
 		/*
 		 * Lock out other callers by grabbing the bitmap inode lock.
 		 */
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 187e14b..11ea5d5 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -58,8 +58,7 @@
 #include <linux/parser.h>
 
 static const struct super_operations xfs_super_operations;
-static kmem_zone_t *xfs_ioend_zone;
-mempool_t *xfs_ioend_pool;
+struct bio_set *xfs_ioend_bioset;
 
 static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
 #ifdef DEBUG
@@ -350,6 +349,7 @@
 		case Opt_pqnoenforce:
 			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
 			mp->m_qflags &= ~XFS_PQUOTA_ENFD;
+			break;
 		case Opt_gquota:
 		case Opt_grpquota:
 			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
@@ -928,7 +928,7 @@
 
 /*
  * Now that the generic code is guaranteed not to be accessing
- * the linux inode, we can reclaim the inode.
+ * the linux inode, we can inactivate and reclaim the inode.
  */
 STATIC void
 xfs_fs_destroy_inode(
@@ -938,9 +938,14 @@
 
 	trace_xfs_destroy_inode(ip);
 
-	XFS_STATS_INC(ip->i_mount, vn_reclaim);
+	ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
+	XFS_STATS_INC(ip->i_mount, vn_rele);
+	XFS_STATS_INC(ip->i_mount, vn_remove);
+
+	xfs_inactive(ip);
 
 	ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
+	XFS_STATS_INC(ip->i_mount, vn_reclaim);
 
 	/*
 	 * We should never get here with one of the reclaim flags already set.
@@ -987,24 +992,6 @@
 		     "xfsino", ip->i_ino);
 }
 
-STATIC void
-xfs_fs_evict_inode(
-	struct inode		*inode)
-{
-	xfs_inode_t		*ip = XFS_I(inode);
-
-	ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
-
-	trace_xfs_evict_inode(ip);
-
-	truncate_inode_pages_final(&inode->i_data);
-	clear_inode(inode);
-	XFS_STATS_INC(ip->i_mount, vn_rele);
-	XFS_STATS_INC(ip->i_mount, vn_remove);
-
-	xfs_inactive(ip);
-}
-
 /*
  * We do an unlocked check for XFS_IDONTCACHE here because we are already
  * serialised against cache hits here via the inode->i_lock and igrab() in
@@ -1276,6 +1263,16 @@
 			return -EINVAL;
 		}
 
+		if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
+		    xfs_sb_has_ro_compat_feature(sbp,
+					XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
+			xfs_warn(mp,
+"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
+				(sbp->sb_features_ro_compat &
+					XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
+			return -EINVAL;
+		}
+
 		mp->m_flags &= ~XFS_MOUNT_RDONLY;
 
 		/*
@@ -1558,14 +1555,12 @@
 
 	if (mp->m_flags & XFS_MOUNT_DAX) {
 		xfs_warn(mp,
-	"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
-		if (sb->s_blocksize != PAGE_SIZE) {
+		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
+
+		error = bdev_dax_supported(sb, sb->s_blocksize);
+		if (error) {
 			xfs_alert(mp,
-		"Filesystem block size invalid for DAX Turning DAX off.");
-			mp->m_flags &= ~XFS_MOUNT_DAX;
-		} else if (!sb->s_bdev->bd_disk->fops->direct_access) {
-			xfs_alert(mp,
-		"Block device does not support DAX Turning DAX off.");
+			"DAX unsupported by block device. Turning off DAX.");
 			mp->m_flags &= ~XFS_MOUNT_DAX;
 		}
 	}
@@ -1663,7 +1658,6 @@
 static const struct super_operations xfs_super_operations = {
 	.alloc_inode		= xfs_fs_alloc_inode,
 	.destroy_inode		= xfs_fs_destroy_inode,
-	.evict_inode		= xfs_fs_evict_inode,
 	.drop_inode		= xfs_fs_drop_inode,
 	.put_super		= xfs_fs_put_super,
 	.sync_fs		= xfs_fs_sync_fs,
@@ -1688,20 +1682,15 @@
 STATIC int __init
 xfs_init_zones(void)
 {
-
-	xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
-	if (!xfs_ioend_zone)
+	xfs_ioend_bioset = bioset_create(4 * MAX_BUF_PER_PAGE,
+			offsetof(struct xfs_ioend, io_inline_bio));
+	if (!xfs_ioend_bioset)
 		goto out;
 
-	xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
-						  xfs_ioend_zone);
-	if (!xfs_ioend_pool)
-		goto out_destroy_ioend_zone;
-
 	xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
 						"xfs_log_ticket");
 	if (!xfs_log_ticket_zone)
-		goto out_destroy_ioend_pool;
+		goto out_free_ioend_bioset;
 
 	xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
 						"xfs_bmap_free_item");
@@ -1797,10 +1786,8 @@
 	kmem_zone_destroy(xfs_bmap_free_item_zone);
  out_destroy_log_ticket_zone:
 	kmem_zone_destroy(xfs_log_ticket_zone);
- out_destroy_ioend_pool:
-	mempool_destroy(xfs_ioend_pool);
- out_destroy_ioend_zone:
-	kmem_zone_destroy(xfs_ioend_zone);
+ out_free_ioend_bioset:
+	bioset_free(xfs_ioend_bioset);
  out:
 	return -ENOMEM;
 }
@@ -1826,9 +1813,7 @@
 	kmem_zone_destroy(xfs_btree_cur_zone);
 	kmem_zone_destroy(xfs_bmap_free_item_zone);
 	kmem_zone_destroy(xfs_log_ticket_zone);
-	mempool_destroy(xfs_ioend_pool);
-	kmem_zone_destroy(xfs_ioend_zone);
-
+	bioset_free(xfs_ioend_bioset);
 }
 
 STATIC int __init
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index b44284c..08a46c6 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -131,6 +131,8 @@
 
 	trace_xfs_readlink(ip);
 
+	ASSERT(!(ip->i_df.if_flags & XFS_IFINLINE));
+
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return -EIO;
 
@@ -150,12 +152,7 @@
 	}
 
 
-	if (ip->i_df.if_flags & XFS_IFINLINE) {
-		memcpy(link, ip->i_df.if_u1.if_data, pathlen);
-		link[pathlen] = '\0';
-	} else {
-		error = xfs_readlink_bmap(ip, link);
-	}
+	error = xfs_readlink_bmap(ip, link);
 
  out:
 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -221,7 +218,6 @@
 	if (error)
 		return error;
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK);
 	/*
 	 * The symlink will fit into the inode data fork?
 	 * There can't be any attributes so we get the whole variable part.
@@ -231,13 +227,15 @@
 	else
 		fs_blocks = xfs_symlink_blocks(mp, pathlen);
 	resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, resblks, 0);
+
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, resblks, 0, 0, &tp);
 	if (error == -ENOSPC && fs_blocks == 0) {
 		resblks = 0;
-		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, 0, 0);
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, 0, 0, 0,
+				&tp);
 	}
 	if (error)
-		goto out_trans_cancel;
+		goto out_release_inode;
 
 	xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL |
 		      XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT);
@@ -302,19 +300,11 @@
 	 * If the symlink will fit into the inode, write it inline.
 	 */
 	if (pathlen <= XFS_IFORK_DSIZE(ip)) {
-		xfs_idata_realloc(ip, pathlen, XFS_DATA_FORK);
-		memcpy(ip->i_df.if_u1.if_data, target_path, pathlen);
+		xfs_init_local_fork(ip, XFS_DATA_FORK, target_path, pathlen);
+
 		ip->i_d.di_size = pathlen;
-
-		/*
-		 * The inode was initially created in extent format.
-		 */
-		ip->i_df.if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT);
-		ip->i_df.if_flags |= XFS_IFINLINE;
-
 		ip->i_d.di_format = XFS_DINODE_FMT_LOCAL;
 		xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE);
-
 	} else {
 		int	offset;
 
@@ -455,12 +445,9 @@
 	 */
 	ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2);
 
-	tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
-	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
-	if (error) {
-		xfs_trans_cancel(tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+	if (error)
 		return error;
-	}
 
 	xfs_ilock(ip, XFS_ILOCK_EXCL);
 	xfs_trans_ijoin(tp, ip, 0);
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index 6ced4f1..4c2c550 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -17,10 +17,11 @@
  */
 
 #include "xfs.h"
-#include "xfs_sysfs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
 #include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
+#include "xfs_sysfs.h"
 #include "xfs_log.h"
 #include "xfs_log_priv.h"
 #include "xfs_stats.h"
@@ -362,3 +363,291 @@
 	.sysfs_ops = &xfs_sysfs_ops,
 	.default_attrs = xfs_log_attrs,
 };
+
+/*
+ * Metadata IO error configuration
+ *
+ * The sysfs structure here is:
+ *	...xfs/<dev>/error/<class>/<errno>/<error_attrs>
+ *
+ * where <class> allows us to discriminate between data IO and metadata IO,
+ * and any other future type of IO (e.g. special inode or directory error
+ * handling) we care to support.
+ */
+static inline struct xfs_error_cfg *
+to_error_cfg(struct kobject *kobject)
+{
+	struct xfs_kobj *kobj = to_kobj(kobject);
+	return container_of(kobj, struct xfs_error_cfg, kobj);
+}
+
+static inline struct xfs_mount *
+err_to_mp(struct kobject *kobject)
+{
+	struct xfs_kobj *kobj = to_kobj(kobject);
+	return container_of(kobj, struct xfs_mount, m_error_kobj);
+}
+
+static ssize_t
+max_retries_show(
+	struct kobject	*kobject,
+	char		*buf)
+{
+	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", cfg->max_retries);
+}
+
+static ssize_t
+max_retries_store(
+	struct kobject	*kobject,
+	const char	*buf,
+	size_t		count)
+{
+	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
+	int		ret;
+	int		val;
+
+	ret = kstrtoint(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	if (val < -1)
+		return -EINVAL;
+
+	cfg->max_retries = val;
+	return count;
+}
+XFS_SYSFS_ATTR_RW(max_retries);
+
+static ssize_t
+retry_timeout_seconds_show(
+	struct kobject	*kobject,
+	char		*buf)
+{
+	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
+
+	return snprintf(buf, PAGE_SIZE, "%ld\n",
+			jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC);
+}
+
+static ssize_t
+retry_timeout_seconds_store(
+	struct kobject	*kobject,
+	const char	*buf,
+	size_t		count)
+{
+	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
+	int		ret;
+	int		val;
+
+	ret = kstrtoint(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	/* 1 day timeout maximum */
+	if (val < 0 || val > 86400)
+		return -EINVAL;
+
+	cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
+	return count;
+}
+XFS_SYSFS_ATTR_RW(retry_timeout_seconds);
+
+static ssize_t
+fail_at_unmount_show(
+	struct kobject	*kobject,
+	char		*buf)
+{
+	struct xfs_mount	*mp = err_to_mp(kobject);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", mp->m_fail_unmount);
+}
+
+static ssize_t
+fail_at_unmount_store(
+	struct kobject	*kobject,
+	const char	*buf,
+	size_t		count)
+{
+	struct xfs_mount	*mp = err_to_mp(kobject);
+	int		ret;
+	int		val;
+
+	ret = kstrtoint(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	if (val < 0 || val > 1)
+		return -EINVAL;
+
+	mp->m_fail_unmount = val;
+	return count;
+}
+XFS_SYSFS_ATTR_RW(fail_at_unmount);
+
+static struct attribute *xfs_error_attrs[] = {
+	ATTR_LIST(max_retries),
+	ATTR_LIST(retry_timeout_seconds),
+	NULL,
+};
+
+
+struct kobj_type xfs_error_cfg_ktype = {
+	.release = xfs_sysfs_release,
+	.sysfs_ops = &xfs_sysfs_ops,
+	.default_attrs = xfs_error_attrs,
+};
+
+struct kobj_type xfs_error_ktype = {
+	.release = xfs_sysfs_release,
+	.sysfs_ops = &xfs_sysfs_ops,
+};
+
+/*
+ * Error initialization tables. These need to be ordered in the same
+ * order as the enums used to index the array. All class init tables need to
+ * define a "default" behaviour as the first entry, all other entries can be
+ * empty.
+ */
+struct xfs_error_init {
+	char		*name;
+	int		max_retries;
+	int		retry_timeout;	/* in seconds */
+};
+
+static const struct xfs_error_init xfs_error_meta_init[XFS_ERR_ERRNO_MAX] = {
+	{ .name = "default",
+	  .max_retries = XFS_ERR_RETRY_FOREVER,
+	  .retry_timeout = 0,
+	},
+	{ .name = "EIO",
+	  .max_retries = XFS_ERR_RETRY_FOREVER,
+	  .retry_timeout = 0,
+	},
+	{ .name = "ENOSPC",
+	  .max_retries = XFS_ERR_RETRY_FOREVER,
+	  .retry_timeout = 0,
+	},
+	{ .name = "ENODEV",
+	  .max_retries = 0,
+	},
+};
+
+static int
+xfs_error_sysfs_init_class(
+	struct xfs_mount	*mp,
+	int			class,
+	const char		*parent_name,
+	struct xfs_kobj		*parent_kobj,
+	const struct xfs_error_init init[])
+{
+	struct xfs_error_cfg	*cfg;
+	int			error;
+	int			i;
+
+	ASSERT(class < XFS_ERR_CLASS_MAX);
+
+	error = xfs_sysfs_init(parent_kobj, &xfs_error_ktype,
+				&mp->m_error_kobj, parent_name);
+	if (error)
+		return error;
+
+	for (i = 0; i < XFS_ERR_ERRNO_MAX; i++) {
+		cfg = &mp->m_error_cfg[class][i];
+		error = xfs_sysfs_init(&cfg->kobj, &xfs_error_cfg_ktype,
+					parent_kobj, init[i].name);
+		if (error)
+			goto out_error;
+
+		cfg->max_retries = init[i].max_retries;
+		cfg->retry_timeout = msecs_to_jiffies(
+					init[i].retry_timeout * MSEC_PER_SEC);
+	}
+	return 0;
+
+out_error:
+	/* unwind the entries that succeeded */
+	for (i--; i >= 0; i--) {
+		cfg = &mp->m_error_cfg[class][i];
+		xfs_sysfs_del(&cfg->kobj);
+	}
+	xfs_sysfs_del(parent_kobj);
+	return error;
+}
+
+int
+xfs_error_sysfs_init(
+	struct xfs_mount	*mp)
+{
+	int			error;
+
+	/* .../xfs/<dev>/error/ */
+	error = xfs_sysfs_init(&mp->m_error_kobj, &xfs_error_ktype,
+				&mp->m_kobj, "error");
+	if (error)
+		return error;
+
+	error = sysfs_create_file(&mp->m_error_kobj.kobject,
+				  ATTR_LIST(fail_at_unmount));
+
+	if (error)
+		goto out_error;
+
+	/* .../xfs/<dev>/error/metadata/ */
+	error = xfs_error_sysfs_init_class(mp, XFS_ERR_METADATA,
+				"metadata", &mp->m_error_meta_kobj,
+				xfs_error_meta_init);
+	if (error)
+		goto out_error;
+
+	return 0;
+
+out_error:
+	xfs_sysfs_del(&mp->m_error_kobj);
+	return error;
+}
+
+void
+xfs_error_sysfs_del(
+	struct xfs_mount	*mp)
+{
+	struct xfs_error_cfg	*cfg;
+	int			i, j;
+
+	for (i = 0; i < XFS_ERR_CLASS_MAX; i++) {
+		for (j = 0; j < XFS_ERR_ERRNO_MAX; j++) {
+			cfg = &mp->m_error_cfg[i][j];
+
+			xfs_sysfs_del(&cfg->kobj);
+		}
+	}
+	xfs_sysfs_del(&mp->m_error_meta_kobj);
+	xfs_sysfs_del(&mp->m_error_kobj);
+}
+
+struct xfs_error_cfg *
+xfs_error_get_cfg(
+	struct xfs_mount	*mp,
+	int			error_class,
+	int			error)
+{
+	struct xfs_error_cfg	*cfg;
+
+	switch (error) {
+	case EIO:
+		cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];
+		break;
+	case ENOSPC:
+		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENOSPC];
+		break;
+	case ENODEV:
+		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENODEV];
+		break;
+	default:
+		cfg = &mp->m_error_cfg[error_class][XFS_ERR_DEFAULT];
+		break;
+	}
+
+	return cfg;
+}
diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h
index be692e5..d046371 100644
--- a/fs/xfs/xfs_sysfs.h
+++ b/fs/xfs/xfs_sysfs.h
@@ -58,4 +58,7 @@
 	wait_for_completion(&kobj->complete);
 }
 
+int	xfs_error_sysfs_init(struct xfs_mount *mp);
+void	xfs_error_sysfs_del(struct xfs_mount *mp);
+
 #endif	/* __XFS_SYSFS_H__ */
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index c8d5842..ea94ee0 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -364,7 +364,6 @@
 DEFINE_BUF_EVENT(xfs_buf_get_uncached);
 DEFINE_BUF_EVENT(xfs_bdstrat_shut);
 DEFINE_BUF_EVENT(xfs_buf_item_relse);
-DEFINE_BUF_EVENT(xfs_buf_item_iodone);
 DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);
 DEFINE_BUF_EVENT(xfs_buf_error_relse);
 DEFINE_BUF_EVENT(xfs_buf_wait_buftarg);
@@ -944,7 +943,6 @@
 	TP_ARGS(log, tic),
 	TP_STRUCT__entry(
 		__field(dev_t, dev)
-		__field(unsigned, trans_type)
 		__field(char, ocnt)
 		__field(char, cnt)
 		__field(int, curr_res)
@@ -962,7 +960,6 @@
 	),
 	TP_fast_assign(
 		__entry->dev = log->l_mp->m_super->s_dev;
-		__entry->trans_type = tic->t_trans_type;
 		__entry->ocnt = tic->t_ocnt;
 		__entry->cnt = tic->t_cnt;
 		__entry->curr_res = tic->t_curr_res;
@@ -980,14 +977,13 @@
 		__entry->curr_block = log->l_curr_block;
 		__entry->tail_lsn = atomic64_read(&log->l_tail_lsn);
 	),
-	TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u "
+	TP_printk("dev %d:%d t_ocnt %u t_cnt %u t_curr_res %u "
 		  "t_unit_res %u t_flags %s reserveq %s "
 		  "writeq %s grant_reserve_cycle %d "
 		  "grant_reserve_bytes %d grant_write_cycle %d "
 		  "grant_write_bytes %d curr_cycle %d curr_block %d "
 		  "tail_cycle %d tail_block %d",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES),
 		  __entry->ocnt,
 		  __entry->cnt,
 		  __entry->curr_res,
@@ -1053,19 +1049,21 @@
 )
 
 TRACE_EVENT(xfs_log_force,
-	TP_PROTO(struct xfs_mount *mp, xfs_lsn_t lsn),
-	TP_ARGS(mp, lsn),
+	TP_PROTO(struct xfs_mount *mp, xfs_lsn_t lsn, unsigned long caller_ip),
+	TP_ARGS(mp, lsn, caller_ip),
 	TP_STRUCT__entry(
 		__field(dev_t, dev)
 		__field(xfs_lsn_t, lsn)
+		__field(unsigned long, caller_ip)
 	),
 	TP_fast_assign(
 		__entry->dev = mp->m_super->s_dev;
 		__entry->lsn = lsn;
+		__entry->caller_ip = caller_ip;
 	),
-	TP_printk("dev %d:%d lsn 0x%llx",
+	TP_printk("dev %d:%d lsn 0x%llx caller %ps",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->lsn)
+		  __entry->lsn, (void *)__entry->caller_ip)
 )
 
 #define DEFINE_LOG_ITEM_EVENT(name) \
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 20c5366..5f3d33d 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -47,47 +47,6 @@
 }
 
 /*
- * This routine is called to allocate a transaction structure.
- * The type parameter indicates the type of the transaction.  These
- * are enumerated in xfs_trans.h.
- *
- * Dynamically allocate the transaction structure from the transaction
- * zone, initialize it, and return it to the caller.
- */
-xfs_trans_t *
-xfs_trans_alloc(
-	xfs_mount_t	*mp,
-	uint		type)
-{
-	xfs_trans_t     *tp;
-
-	sb_start_intwrite(mp->m_super);
-	tp = _xfs_trans_alloc(mp, type, KM_SLEEP);
-	tp->t_flags |= XFS_TRANS_FREEZE_PROT;
-	return tp;
-}
-
-xfs_trans_t *
-_xfs_trans_alloc(
-	xfs_mount_t	*mp,
-	uint		type,
-	xfs_km_flags_t	memflags)
-{
-	xfs_trans_t	*tp;
-
-	WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
-	atomic_inc(&mp->m_active_trans);
-
-	tp = kmem_zone_zalloc(xfs_trans_zone, memflags);
-	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
-	tp->t_type = type;
-	tp->t_mountp = mp;
-	INIT_LIST_HEAD(&tp->t_items);
-	INIT_LIST_HEAD(&tp->t_busy);
-	return tp;
-}
-
-/*
  * Free the transaction structure.  If there is more clean up
  * to do when the structure is freed, add it here.
  */
@@ -99,7 +58,7 @@
 	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
 
 	atomic_dec(&tp->t_mountp->m_active_trans);
-	if (tp->t_flags & XFS_TRANS_FREEZE_PROT)
+	if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
 		sb_end_intwrite(tp->t_mountp->m_super);
 	xfs_trans_free_dqinfo(tp);
 	kmem_zone_free(xfs_trans_zone, tp);
@@ -125,7 +84,6 @@
 	 * Initialize the new transaction structure.
 	 */
 	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
-	ntp->t_type = tp->t_type;
 	ntp->t_mountp = tp->t_mountp;
 	INIT_LIST_HEAD(&ntp->t_items);
 	INIT_LIST_HEAD(&ntp->t_busy);
@@ -135,9 +93,9 @@
 
 	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
 		       (tp->t_flags & XFS_TRANS_RESERVE) |
-		       (tp->t_flags & XFS_TRANS_FREEZE_PROT);
+		       (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
 	/* We gave our writer reference to the new transaction */
-	tp->t_flags &= ~XFS_TRANS_FREEZE_PROT;
+	tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
 	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
 	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
 	tp->t_blk_res = tp->t_blk_res_used;
@@ -165,7 +123,7 @@
  * This does not do quota reservations. That typically is done by the
  * caller afterwards.
  */
-int
+static int
 xfs_trans_reserve(
 	struct xfs_trans	*tp,
 	struct xfs_trans_res	*resp,
@@ -219,7 +177,7 @@
 						resp->tr_logres,
 						resp->tr_logcount,
 						&tp->t_ticket, XFS_TRANSACTION,
-						permanent, tp->t_type);
+						permanent);
 		}
 
 		if (error)
@@ -268,6 +226,42 @@
 	return error;
 }
 
+int
+xfs_trans_alloc(
+	struct xfs_mount	*mp,
+	struct xfs_trans_res	*resp,
+	uint			blocks,
+	uint			rtextents,
+	uint			flags,
+	struct xfs_trans	**tpp)
+{
+	struct xfs_trans	*tp;
+	int			error;
+
+	if (!(flags & XFS_TRANS_NO_WRITECOUNT))
+		sb_start_intwrite(mp->m_super);
+
+	WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
+	atomic_inc(&mp->m_active_trans);
+
+	tp = kmem_zone_zalloc(xfs_trans_zone,
+		(flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
+	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
+	tp->t_flags = flags;
+	tp->t_mountp = mp;
+	INIT_LIST_HEAD(&tp->t_items);
+	INIT_LIST_HEAD(&tp->t_busy);
+
+	error = xfs_trans_reserve(tp, resp, blocks, rtextents);
+	if (error) {
+		xfs_trans_cancel(tp);
+		return error;
+	}
+
+	*tpp = tp;
+	return 0;
+}
+
 /*
  * Record the indicated change to the given field for application
  * to the file system's superblock when the transaction commits.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index e7c49cf..9a462e8 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -90,7 +90,6 @@
  */
 typedef struct xfs_trans {
 	unsigned int		t_magic;	/* magic number */
-	unsigned int		t_type;		/* transaction type */
 	unsigned int		t_log_res;	/* amt of log space resvd */
 	unsigned int		t_log_count;	/* count for perm log res */
 	unsigned int		t_blk_res;	/* # of blocks resvd */
@@ -148,10 +147,9 @@
 /*
  * XFS transaction mechanism exported interfaces.
  */
-xfs_trans_t	*xfs_trans_alloc(struct xfs_mount *, uint);
-xfs_trans_t	*_xfs_trans_alloc(struct xfs_mount *, uint, xfs_km_flags_t);
-int		xfs_trans_reserve(struct xfs_trans *, struct xfs_trans_res *,
-				  uint, uint);
+int		xfs_trans_alloc(struct xfs_mount *mp, struct xfs_trans_res *resp,
+			uint blocks, uint rtextents, uint flags,
+			struct xfs_trans **tpp);
 void		xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
 
 struct xfs_buf	*xfs_trans_get_buf_map(struct xfs_trans *tp,
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index d111f69..ea62245 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -74,11 +74,12 @@
 }
 
 static int
-xfs_xattr_set(const struct xattr_handler *handler, struct dentry *dentry,
-		const char *name, const void *value, size_t size, int flags)
+xfs_xattr_set(const struct xattr_handler *handler, struct dentry *unused,
+		struct inode *inode, const char *name, const void *value,
+		size_t size, int flags)
 {
 	int			xflags = handler->flags;
-	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
+	struct xfs_inode	*ip = XFS_I(inode);
 	int			error;
 
 	/* Convert Linux syscall to XFS internal ATTR flags */
@@ -92,7 +93,7 @@
 	error = xfs_attr_set(ip, (unsigned char *)name,
 				(void *)value, size, xflags);
 	if (!error)
-		xfs_forget_acl(d_inode(dentry), name, xflags);
+		xfs_forget_acl(inode, name, xflags);
 
 	return error;
 }
@@ -146,7 +147,7 @@
 	arraytop = context->count + prefix_len + namelen + 1;
 	if (arraytop > context->firstu) {
 		context->count = -1;	/* insufficient space */
-		return 1;
+		return 0;
 	}
 	offset = (char *)context->alist + context->count;
 	strncpy(offset, prefix, prefix_len);
@@ -166,8 +167,7 @@
 	int		flags,
 	unsigned char	*name,
 	int		namelen,
-	int		valuelen,
-	unsigned char	*value)
+	int		valuelen)
 {
 	char *prefix;
 	int prefix_len;
@@ -221,11 +221,15 @@
 }
 
 ssize_t
-xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
+xfs_vn_listxattr(
+	struct dentry	*dentry,
+	char		*data,
+	size_t		size)
 {
 	struct xfs_attr_list_context context;
 	struct attrlist_cursor_kern cursor = { 0 };
-	struct inode		*inode = d_inode(dentry);
+	struct inode	*inode = d_inode(dentry);
+	int		error;
 
 	/*
 	 * First read the regular on-disk attributes.
@@ -239,7 +243,9 @@
 	context.firstu = context.bufsize;
 	context.put_listent = xfs_xattr_put_listent;
 
-	xfs_attr_list_int(&context);
+	error = xfs_attr_list_int(&context);
+	if (error)
+		return error;
 	if (context.count < 0)
 		return -ERANGE;
 
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 4d40e9b..788c6c3 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -61,12 +61,12 @@
 bool acpi_bay_match(acpi_handle handle);
 bool acpi_dock_match(acpi_handle handle);
 
-bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs);
+bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs);
 union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
-			int rev, int func, union acpi_object *argv4);
+			u64 rev, u64 func, union acpi_object *argv4);
 
 static inline union acpi_object *
-acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, int rev, int func,
+acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
 			union acpi_object *argv4, acpi_object_type type)
 {
 	union acpi_object *obj;
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 70a41f7..5731ccb 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -51,7 +51,8 @@
  */
 extern bool acpi_video_handles_brightness_key_presses(void);
 extern int acpi_video_get_levels(struct acpi_device *device,
-				 struct acpi_video_device_brightness **dev_br);
+				 struct acpi_video_device_brightness **dev_br,
+				 int *pmax_level);
 #else
 static inline int acpi_video_register(void) { return 0; }
 static inline void acpi_video_unregister(void) { return; }
@@ -72,7 +73,8 @@
 	return false;
 }
 static inline int acpi_video_get_levels(struct acpi_device *device,
-			struct acpi_video_device_brightness **dev_br)
+			struct acpi_video_device_brightness **dev_br,
+			int *pmax_level)
 {
 	return -ENODEV;
 }
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index 5d8ffa3..c1cde35 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -7,10 +7,10 @@
 
 static __always_inline int preempt_count(void)
 {
-	return current_thread_info()->preempt_count;
+	return READ_ONCE(current_thread_info()->preempt_count);
 }
 
-static __always_inline int *preempt_count_ptr(void)
+static __always_inline volatile int *preempt_count_ptr(void)
 {
 	return &current_thread_info()->preempt_count;
 }
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
new file mode 100644
index 0000000..25afb31
--- /dev/null
+++ b/include/drm/bridge/analogix_dp.h
@@ -0,0 +1,41 @@
+/*
+ * Analogix DP (Display Port) Core interface driver.
+ *
+ * Copyright (C) 2015 Rockchip Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef _ANALOGIX_DP_H_
+#define _ANALOGIX_DP_H_
+
+#include <drm/drm_crtc.h>
+
+enum analogix_dp_devtype {
+	EXYNOS_DP,
+	RK3288_DP,
+};
+
+struct analogix_dp_plat_data {
+	enum analogix_dp_devtype dev_type;
+	struct drm_panel *panel;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+
+	int (*power_on)(struct analogix_dp_plat_data *);
+	int (*power_off)(struct analogix_dp_plat_data *);
+	int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *,
+		      struct drm_connector *);
+	int (*get_modes)(struct analogix_dp_plat_data *);
+};
+
+int analogix_dp_resume(struct device *dev);
+int analogix_dp_suspend(struct device *dev);
+
+int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
+		     struct analogix_dp_plat_data *plat_data);
+void analogix_dp_unbind(struct device *dev, struct device *master, void *data);
+
+#endif /* _ANALOGIX_DP_H_ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 3c8422c..84f1a8e 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -90,7 +90,7 @@
 struct dma_buf_attachment;
 
 /*
- * 4 debug categories are defined:
+ * The following categories are defined:
  *
  * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ...
  *	 This is the category used by the DRM_DEBUG() macro.
@@ -580,12 +580,21 @@
 	void (*debugfs_cleanup)(struct drm_minor *minor);
 
 	/**
-	 * Driver-specific constructor for drm_gem_objects, to set up
-	 * obj->driver_private.
+	 * @gem_free_object: deconstructor for drm_gem_objects
 	 *
-	 * Returns 0 on success.
+	 * This is deprecated and should not be used by new drivers. Use
+	 * @gem_free_object_unlocked instead.
 	 */
 	void (*gem_free_object) (struct drm_gem_object *obj);
+
+	/**
+	 * @gem_free_object_unlocked: deconstructor for drm_gem_objects
+	 *
+	 * This is for drivers which are not encumbered with dev->struct_mutex
+	 * legacy locking schemes. Use this hook instead of @gem_free_object.
+	 */
+	void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
+
 	int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
 	void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
 
@@ -769,6 +778,7 @@
 	atomic_t buf_alloc;		/**< Buffer allocation in progress */
 	/*@} */
 
+	struct mutex filelist_mutex;
 	struct list_head filelist;
 
 	/** \name Memory management */
@@ -805,14 +815,6 @@
 	int irq;
 
 	/*
-	 * At load time, disabling the vblank interrupt won't be allowed since
-	 * old clients may not call the modeset ioctl and therefore misbehave.
-	 * Once the modeset ioctl *has* been called though, we can safely
-	 * disable them when unused.
-	 */
-	bool vblank_disable_allowed;
-
-	/*
 	 * If true, vblank interrupt will be disabled immediately when the
 	 * refcount drops to zero, as opposed to via the vblank disable
 	 * timer.
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
index 193ef19..b2d9126 100644
--- a/include/drm/drm_agpsupport.h
+++ b/include/drm/drm_agpsupport.h
@@ -37,7 +37,7 @@
 				uint32_t type);
 
 struct drm_agp_head *drm_agp_init(struct drm_device *dev);
-void drm_agp_clear(struct drm_device *dev);
+void drm_legacy_agp_clear(struct drm_device *dev);
 int drm_agp_acquire(struct drm_device *dev);
 int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
 			  struct drm_file *file_priv);
@@ -93,7 +93,7 @@
 	return NULL;
 }
 
-static inline void drm_agp_clear(struct drm_device *dev)
+static inline void drm_legacy_agp_clear(struct drm_device *dev)
 {
 }
 
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index d3eaa5d..92c84e9 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -137,7 +137,7 @@
 
 int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
 int __must_check drm_atomic_commit(struct drm_atomic_state *state);
-int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
+int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
 
 #define for_each_connector_in_state(state, connector, connector_state, __i) \
 	for ((__i) = 0;							\
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 9054598c..d473dcc 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -40,8 +40,10 @@
 			    struct drm_atomic_state *state);
 int drm_atomic_helper_commit(struct drm_device *dev,
 			     struct drm_atomic_state *state,
-			     bool async);
+			     bool nonblock);
 
+void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
+					struct drm_atomic_state *state);
 bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
 					   struct drm_atomic_state *old_state,
 					   struct drm_crtc *crtc);
@@ -108,6 +110,8 @@
 				uint32_t flags);
 int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
 				     int mode);
+struct drm_encoder *
+drm_atomic_helper_best_encoder(struct drm_connector *connector);
 
 /* default implementations for state handling */
 void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
@@ -115,8 +119,7 @@
 					      struct drm_crtc_state *state);
 struct drm_crtc_state *
 drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
-void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
-					    struct drm_crtc_state *state);
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
 void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
 					  struct drm_crtc_state *state);
 
@@ -125,8 +128,7 @@
 					       struct drm_plane_state *state);
 struct drm_plane_state *
 drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
-void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
-					     struct drm_plane_state *state);
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
 void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
 					  struct drm_plane_state *state);
 
@@ -142,8 +144,7 @@
 drm_atomic_helper_duplicate_state(struct drm_device *dev,
 				  struct drm_modeset_acquire_ctx *ctx);
 void
-__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
-					    struct drm_connector_state *state);
+__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
 void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
 					  struct drm_connector_state *state);
 void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index e0170bf..d1559cd 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -45,20 +45,12 @@
 struct device_node;
 struct fence;
 
-#define DRM_MODE_OBJECT_CRTC 0xcccccccc
-#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
-#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
-#define DRM_MODE_OBJECT_MODE 0xdededede
-#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
-#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
-#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
-#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
-#define DRM_MODE_OBJECT_ANY 0
-
 struct drm_mode_object {
 	uint32_t id;
 	uint32_t type;
 	struct drm_object_properties *properties;
+	struct kref refcount;
+	void (*free_cb)(struct kref *kref);
 };
 
 #define DRM_OBJECT_MAX_PROPERTY 24
@@ -126,6 +118,14 @@
 #define DRM_COLOR_FORMAT_RGB444		(1<<0)
 #define DRM_COLOR_FORMAT_YCRCB444	(1<<1)
 #define DRM_COLOR_FORMAT_YCRCB422	(1<<2)
+
+#define DRM_BUS_FLAG_DE_LOW		(1<<0)
+#define DRM_BUS_FLAG_DE_HIGH		(1<<1)
+/* drive data on pos. edge */
+#define DRM_BUS_FLAG_PIXDATA_POSEDGE	(1<<2)
+/* drive data on neg. edge */
+#define DRM_BUS_FLAG_PIXDATA_NEGEDGE	(1<<3)
+
 /*
  * Describes a given display (e.g. CRT or flat panel) and its limitations.
  */
@@ -147,6 +147,7 @@
 
 	const u32 *bus_formats;
 	unsigned int num_bus_formats;
+	u32 bus_flags;
 
 	/* Mask of supported hdmi deep color modes */
 	u8 edid_hdmi_dc_modes;
@@ -233,8 +234,8 @@
 	 * should be deferred.  In cases like this, the driver would like to
 	 * hold a ref to the fb even though it has already been removed from
 	 * userspace perspective.
+	 * The refcount is stored inside the mode object.
 	 */
-	struct kref refcount;
 	/*
 	 * Place on the dev->mode_config.fb_list, access protected by
 	 * dev->mode_config.fb_lock.
@@ -258,7 +259,6 @@
 struct drm_property_blob {
 	struct drm_mode_object base;
 	struct drm_device *dev;
-	struct kref refcount;
 	struct list_head head_global;
 	struct list_head head_file;
 	size_t length;
@@ -1895,7 +1895,7 @@
 	 * drm_atomic_helper_commit(), or one of the exported sub-functions of
 	 * it.
 	 *
-	 * Asynchronous commits (as indicated with the async parameter) must
+	 * Nonblocking commits (as indicated with the nonblock parameter) must
 	 * do any preparatory work which might result in an unsuccessful commit
 	 * in the context of this callback. The only exceptions are hardware
 	 * errors resulting in -EIO. But even in that case the driver must
@@ -1908,7 +1908,7 @@
 	 * The driver must wait for any pending rendering to the new
 	 * framebuffers to complete before executing the flip. It should also
 	 * wait for any pending rendering from other drivers if the underlying
-	 * buffer is a shared dma-buf. Asynchronous commits must not wait for
+	 * buffer is a shared dma-buf. Nonblocking commits must not wait for
 	 * rendering in the context of this callback.
 	 *
 	 * An application can request to be notified when the atomic commit has
@@ -1939,7 +1939,7 @@
 	 *
 	 * 0 on success or one of the below negative error codes:
 	 *
-	 *  - -EBUSY, if an asynchronous updated is requested and there is
+	 *  - -EBUSY, if a nonblocking updated is requested and there is
 	 *    an earlier updated pending. Drivers are allowed to support a queue
 	 *    of outstanding updates, but currently no driver supports that.
 	 *    Note that drivers must wait for preceding updates to complete if a
@@ -1969,7 +1969,7 @@
 	 */
 	int (*atomic_commit)(struct drm_device *dev,
 			     struct drm_atomic_state *state,
-			     bool async);
+			     bool nonblock);
 
 	/**
 	 * @atomic_state_alloc:
@@ -2259,8 +2259,9 @@
 	return connector->connector_id;
 }
 
-/* helper to unplug all connectors from sysfs for device */
-extern void drm_connector_unplug_all(struct drm_device *dev);
+/* helpers to {un}register all connectors from sysfs for device */
+extern int drm_connector_register_all(struct drm_device *dev);
+extern void drm_connector_unregister_all(struct drm_device *dev);
 
 extern int drm_bridge_add(struct drm_bridge *bridge);
 extern void drm_bridge_remove(struct drm_bridge *bridge);
@@ -2386,8 +2387,6 @@
 				const struct drm_framebuffer_funcs *funcs);
 extern struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
 						      uint32_t id);
-extern void drm_framebuffer_unreference(struct drm_framebuffer *fb);
-extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
 extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
 extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
 extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
@@ -2445,6 +2444,8 @@
 					 int gamma_size);
 extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
 		uint32_t id, uint32_t type);
+void drm_mode_object_reference(struct drm_mode_object *obj);
+void drm_mode_object_unreference(struct drm_mode_object *obj);
 
 /* IOCTLs */
 extern int drm_mode_getresources(struct drm_device *dev,
@@ -2510,6 +2511,8 @@
 extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
 				 bool *edid_corrupt);
 extern bool drm_edid_is_valid(struct edid *edid);
+extern void drm_edid_get_monitor_name(struct edid *edid, char *name,
+				      int buflen);
 
 extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
 							 char topology[8]);
@@ -2577,7 +2580,15 @@
 	return mo ? obj_to_encoder(mo) : NULL;
 }
 
-static inline struct drm_connector *drm_connector_find(struct drm_device *dev,
+/**
+ * drm_connector_lookup - lookup connector object
+ * @dev: DRM device
+ * @id: connector object id
+ *
+ * This function looks up the connector object specified by id
+ * add takes a reference to it.
+ */
+static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev,
 		uint32_t id)
 {
 	struct drm_mode_object *mo;
@@ -2600,14 +2611,73 @@
 static inline uint32_t drm_color_lut_extract(uint32_t user_input,
 					     uint32_t bit_precision)
 {
-	uint32_t val = user_input + (1 << (16 - bit_precision - 1));
+	uint32_t val = user_input;
 	uint32_t max = 0xffff >> (16 - bit_precision);
 
-	val >>= 16 - bit_precision;
+	/* Round only if we're not using full precision. */
+	if (bit_precision < 16) {
+		val += 1UL << (16 - bit_precision - 1);
+		val >>= 16 - bit_precision;
+	}
 
 	return clamp_val(val, 0, max);
 }
 
+/**
+ * drm_framebuffer_reference - incr the fb refcnt
+ * @fb: framebuffer
+ *
+ * This functions increments the fb's refcount.
+ */
+static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
+{
+	drm_mode_object_reference(&fb->base);
+}
+
+/**
+ * drm_framebuffer_unreference - unref a framebuffer
+ * @fb: framebuffer to unref
+ *
+ * This functions decrements the fb's refcount and frees it if it drops to zero.
+ */
+static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+	drm_mode_object_unreference(&fb->base);
+}
+
+/**
+ * drm_framebuffer_read_refcount - read the framebuffer reference count.
+ * @fb: framebuffer
+ *
+ * This functions returns the framebuffer's reference count.
+ */
+static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
+{
+	return atomic_read(&fb->base.refcount.refcount);
+}
+
+/**
+ * drm_connector_reference - incr the connector refcnt
+ * @connector: connector
+ *
+ * This function increments the connector's refcount.
+ */
+static inline void drm_connector_reference(struct drm_connector *connector)
+{
+	drm_mode_object_reference(&connector->base);
+}
+
+/**
+ * drm_connector_unreference - unref a connector
+ * @connector: connector to unref
+ *
+ * This function decrements the connector's refcount and frees it if it drops to zero.
+ */
+static inline void drm_connector_unreference(struct drm_connector *connector)
+{
+	drm_mode_object_unreference(&connector->base);
+}
+
 /* Plane list iterator for legacy (overlay only) planes. */
 #define drm_for_each_legacy_plane(plane, dev) \
 	list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
index 623b4e9..c0d4df6 100644
--- a/include/drm/drm_displayid.h
+++ b/include/drm/drm_displayid.h
@@ -73,4 +73,21 @@
 	u8 topology_id[8];
 } __packed;
 
+struct displayid_detailed_timings_1 {
+	u8 pixel_clock[3];
+	u8 flags;
+	u8 hactive[2];
+	u8 hblank[2];
+	u8 hsync[2];
+	u8 hsw[2];
+	u8 vactive[2];
+	u8 vblank[2];
+	u8 vsync[2];
+	u8 vsw[2];
+} __packed;
+
+struct displayid_detailed_timing_block {
+	struct displayid_block base;
+	struct displayid_detailed_timings_1 timings[0];
+};
 #endif
diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h
new file mode 100644
index 0000000..e8a9dfd
--- /dev/null
+++ b/include/drm/drm_dp_dual_mode_helper.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DRM_DP_DUAL_MODE_HELPER_H
+#define DRM_DP_DUAL_MODE_HELPER_H
+
+#include <linux/types.h>
+
+/*
+ * Optional for type 1 DVI adaptors
+ * Mandatory for type 1 HDMI and type 2 adaptors
+ */
+#define DP_DUAL_MODE_HDMI_ID 0x00 /* 00-0f */
+#define  DP_DUAL_MODE_HDMI_ID_LEN 16
+/*
+ * Optional for type 1 adaptors
+ * Mandatory for type 2 adaptors
+ */
+#define DP_DUAL_MODE_ADAPTOR_ID 0x10
+#define  DP_DUAL_MODE_REV_MASK 0x07
+#define  DP_DUAL_MODE_REV_TYPE2 0x00
+#define  DP_DUAL_MODE_TYPE_MASK 0xf0
+#define  DP_DUAL_MODE_TYPE_TYPE2 0xa0
+#define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/
+#define  DP_DUAL_IEEE_OUI_LEN 3
+#define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */
+#define  DP_DUAL_DEVICE_ID_LEN 6
+#define DP_DUAL_MODE_HARDWARE_REV 0x1a
+#define DP_DUAL_MODE_FIRMWARE_MAJOR_REV 0x1b
+#define DP_DUAL_MODE_FIRMWARE_MINOR_REV 0x1c
+#define DP_DUAL_MODE_MAX_TMDS_CLOCK 0x1d
+#define DP_DUAL_MODE_I2C_SPEED_CAP 0x1e
+#define DP_DUAL_MODE_TMDS_OEN 0x20
+#define  DP_DUAL_MODE_TMDS_DISABLE 0x01
+#define DP_DUAL_MODE_HDMI_PIN_CTRL 0x21
+#define  DP_DUAL_MODE_CEC_ENABLE 0x01
+#define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22
+
+struct i2c_adapter;
+
+ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
+			      u8 offset, void *buffer, size_t size);
+ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
+			       u8 offset, const void *buffer, size_t size);
+
+/**
+ * enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor
+ * @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor
+ * @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor
+ * @DRM_DP_DUAL_MODE_TYPE1_DVI: Type 1 DVI adaptor
+ * @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor
+ * @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor
+ * @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor
+ */
+enum drm_dp_dual_mode_type {
+	DRM_DP_DUAL_MODE_NONE,
+	DRM_DP_DUAL_MODE_UNKNOWN,
+	DRM_DP_DUAL_MODE_TYPE1_DVI,
+	DRM_DP_DUAL_MODE_TYPE1_HDMI,
+	DRM_DP_DUAL_MODE_TYPE2_DVI,
+	DRM_DP_DUAL_MODE_TYPE2_HDMI,
+};
+
+enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter);
+int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
+				    struct i2c_adapter *adapter);
+int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
+				     struct i2c_adapter *adapter, bool *enabled);
+int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
+				     struct i2c_adapter *adapter, bool enable);
+const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
+
+#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 1252108..9d03f16 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -73,6 +73,7 @@
 # define DP_ENHANCED_FRAME_CAP		    (1 << 7)
 
 #define DP_MAX_DOWNSPREAD                   0x003
+# define DP_MAX_DOWNSPREAD_0_5		    (1 << 0)
 # define DP_NO_AUX_HANDSHAKE_LINK_TRAINING  (1 << 6)
 
 #define DP_NORP                             0x004
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index dec6221..919933d 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -328,7 +328,15 @@
 int drm_av_sync_delay(struct drm_connector *connector,
 		      const struct drm_display_mode *mode);
 struct drm_connector *drm_select_eld(struct drm_encoder *encoder);
+
+#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
 int drm_load_edid_firmware(struct drm_connector *connector);
+#else
+static inline int drm_load_edid_firmware(struct drm_connector *connector)
+{
+	return 0;
+}
+#endif
 
 int
 drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index be62bd3..fd0dde9 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -4,11 +4,18 @@
 struct drm_fbdev_cma;
 struct drm_gem_cma_object;
 
+struct drm_fb_helper_surface_size;
+struct drm_framebuffer_funcs;
+struct drm_fb_helper_funcs;
 struct drm_framebuffer;
+struct drm_fb_helper;
 struct drm_device;
 struct drm_file;
 struct drm_mode_fb_cmd2;
 
+struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
+	unsigned int preferred_bpp, unsigned int num_crtc,
+	unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs);
 struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
 	unsigned int preferred_bpp, unsigned int num_crtc,
 	unsigned int max_conn_count);
@@ -16,7 +23,17 @@
 
 void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
 void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
+int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
+	struct drm_fb_helper_surface_size *sizes,
+	const struct drm_framebuffer_funcs *funcs);
 
+void drm_fb_cma_destroy(struct drm_framebuffer *fb);
+int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
+	struct drm_file *file_priv, unsigned int *handle);
+
+struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
+	struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
+	const struct drm_framebuffer_funcs *funcs);
 struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
 	struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd);
 
@@ -24,6 +41,8 @@
 	unsigned int plane);
 
 #ifdef CONFIG_DEBUG_FS
+struct seq_file;
+
 int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg);
 #endif
 
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 062723b..5b4aa35 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -172,6 +172,10 @@
  * @funcs: driver callbacks for fb helper
  * @fbdev: emulated fbdev device info struct
  * @pseudo_palette: fake palette of 16 colors
+ * @dirty_clip: clip rectangle used with deferred_io to accumulate damage to
+ *              the screen buffer
+ * @dirty_lock: spinlock protecting @dirty_clip
+ * @dirty_work: worker used to flush the framebuffer
  *
  * This is the main structure used by the fbdev helpers. Drivers supporting
  * fbdev emulation should embedded this into their overall driver structure.
@@ -189,6 +193,9 @@
 	const struct drm_fb_helper_funcs *funcs;
 	struct fb_info *fbdev;
 	u32 pseudo_palette[17];
+	struct drm_clip_rect dirty_clip;
+	spinlock_t dirty_lock;
+	struct work_struct dirty_work;
 
 	/**
 	 * @kernel_fb_list:
@@ -245,6 +252,9 @@
 
 void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
 
+void drm_fb_helper_deferred_io(struct fb_info *info,
+			       struct list_head *pagelist);
+
 ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
 			       size_t count, loff_t *ppos);
 ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
@@ -368,6 +378,11 @@
 {
 }
 
+static inline void drm_fb_helper_deferred_io(struct fb_info *info,
+					     struct list_head *pagelist)
+{
+}
+
 static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
 					     char __user *buf, size_t count,
 					     loff_t *ppos)
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 0b3e11a..fca1cd1 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -200,47 +200,29 @@
 }
 
 /**
- * drm_gem_object_unreference - release a GEM BO reference
+ * __drm_gem_object_unreference - raw function to release a GEM BO reference
  * @obj: GEM buffer object
  *
- * This releases a reference to @obj. Callers must hold the dev->struct_mutex
- * lock when calling this function, even when the driver doesn't use
- * dev->struct_mutex for anything.
+ * This function is meant to be used by drivers which are not encumbered with
+ * dev->struct_mutex legacy locking and which are using the
+ * gem_free_object_unlocked callback. It avoids all the locking checks and
+ * locking overhead of drm_gem_object_unreference() and
+ * drm_gem_object_unreference_unlocked().
  *
- * For drivers not encumbered with legacy locking use
- * drm_gem_object_unreference_unlocked() instead.
+ * Drivers should never call this directly in their code. Instead they should
+ * wrap it up into a driver_gem_object_unreference(struct driver_gem_object
+ * *obj) wrapper function, and use that. Shared code should never call this, to
+ * avoid breaking drivers by accident which still depend upon dev->struct_mutex
+ * locking.
  */
 static inline void
-drm_gem_object_unreference(struct drm_gem_object *obj)
+__drm_gem_object_unreference(struct drm_gem_object *obj)
 {
-	if (obj != NULL) {
-		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
-
-		kref_put(&obj->refcount, drm_gem_object_free);
-	}
+	kref_put(&obj->refcount, drm_gem_object_free);
 }
 
-/**
- * drm_gem_object_unreference_unlocked - release a GEM BO reference
- * @obj: GEM buffer object
- *
- * This releases a reference to @obj. Callers must not hold the
- * dev->struct_mutex lock when calling this function.
- */
-static inline void
-drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
-{
-	struct drm_device *dev;
-
-	if (!obj)
-		return;
-
-	dev = obj->dev;
-	if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex))
-		mutex_unlock(&dev->struct_mutex);
-	else
-		might_lock(&dev->struct_mutex);
-}
+void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj);
+void drm_gem_object_unreference(struct drm_gem_object *obj);
 
 int drm_gem_handle_create(struct drm_file *file_priv,
 			  struct drm_gem_object *obj,
@@ -256,9 +238,7 @@
 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
 		bool dirty, bool accessed);
 
-struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
-					     struct drm_file *filp,
-					     u32 handle);
+struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
 int drm_gem_dumb_destroy(struct drm_file *file,
 			 struct drm_device *dev,
 			 uint32_t handle);
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index 3e69803..a5ef2c7 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -154,8 +154,10 @@
 int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
 		      unsigned int size, enum drm_map_type type,
 		      enum drm_map_flags flags, struct drm_local_map **map_p);
-int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
+void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
 int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
+void drm_legacy_master_rmmaps(struct drm_device *dev,
+			      struct drm_master *master);
 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
 
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
index e42495a..70d4e22 100644
--- a/include/drm/drm_mem_util.h
+++ b/include/drm/drm_mem_util.h
@@ -54,6 +54,25 @@
 			 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
 }
 
+static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp)
+{
+	if (size != 0 && nmemb > SIZE_MAX / size)
+		return NULL;
+
+	if (size * nmemb <= PAGE_SIZE)
+		return kmalloc(nmemb * size, gfp);
+
+	if (gfp & __GFP_RECLAIMABLE) {
+		void *ptr = kmalloc(nmemb * size,
+				    gfp | __GFP_NOWARN | __GFP_NORETRY);
+		if (ptr)
+			return ptr;
+	}
+
+	return __vmalloc(size * nmemb,
+			 gfp | __GFP_HIGHMEM, PAGE_KERNEL);
+}
+
 static __inline void drm_free_large(void *ptr)
 {
 	kvfree(ptr);
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index b61c2d4..d4619dc 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -672,7 +672,7 @@
 	 * fixed panel can also manually add specific modes using
 	 * drm_mode_probed_add(). Drivers which manually add modes should also
 	 * make sure that the @display_info, @width_mm and @height_mm fields of the
-	 * struct #drm_connector are filled in.
+	 * struct &drm_connector are filled in.
 	 *
 	 * Virtual drivers that just want some standard VESA mode with a given
 	 * resolution can call drm_add_modes_noedid(), and mark the preferred
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 13ff44b..220d1e2b 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -75,6 +75,14 @@
 			   struct display_timing *timings);
 };
 
+/**
+ * struct drm_panel - DRM panel object
+ * @drm: DRM device owning the panel
+ * @connector: DRM connector that the panel is attached to
+ * @dev: parent device of the panel
+ * @funcs: operations that can be performed on the panel
+ * @list: panel entry in registry
+ */
 struct drm_panel {
 	struct drm_device *drm;
 	struct drm_connector *connector;
@@ -85,6 +93,17 @@
 	struct list_head list;
 };
 
+/**
+ * drm_disable_unprepare - power off a panel
+ * @panel: DRM panel
+ *
+ * Calling this function will completely power off a panel (assert the panel's
+ * reset, turn off power supplies, ...). After this function has completed, it
+ * is usually no longer possible to communicate with the panel until another
+ * call to drm_panel_prepare().
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
 static inline int drm_panel_unprepare(struct drm_panel *panel)
 {
 	if (panel && panel->funcs && panel->funcs->unprepare)
@@ -93,6 +112,16 @@
 	return panel ? -ENOSYS : -EINVAL;
 }
 
+/**
+ * drm_panel_disable - disable a panel
+ * @panel: DRM panel
+ *
+ * This will typically turn off the panel's backlight or disable the display
+ * drivers. For smart panels it should still be possible to communicate with
+ * the integrated circuitry via any command bus after this call.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
 static inline int drm_panel_disable(struct drm_panel *panel)
 {
 	if (panel && panel->funcs && panel->funcs->disable)
@@ -101,6 +130,16 @@
 	return panel ? -ENOSYS : -EINVAL;
 }
 
+/**
+ * drm_panel_prepare - power on a panel
+ * @panel: DRM panel
+ *
+ * Calling this function will enable power and deassert any reset signals to
+ * the panel. After this has completed it is possible to communicate with any
+ * integrated circuitry via a command bus.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
 static inline int drm_panel_prepare(struct drm_panel *panel)
 {
 	if (panel && panel->funcs && panel->funcs->prepare)
@@ -109,6 +148,16 @@
 	return panel ? -ENOSYS : -EINVAL;
 }
 
+/**
+ * drm_panel_enable - enable a panel
+ * @panel: DRM panel
+ *
+ * Calling this function will cause the panel display drivers to be turned on
+ * and the backlight to be enabled. Content will be visible on screen after
+ * this call completes.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
 static inline int drm_panel_enable(struct drm_panel *panel)
 {
 	if (panel && panel->funcs && panel->funcs->enable)
@@ -117,6 +166,16 @@
 	return panel ? -ENOSYS : -EINVAL;
 }
 
+/**
+ * drm_panel_get_modes - probe the available display modes of a panel
+ * @panel: DRM panel
+ *
+ * The modes probed from the panel are automatically added to the connector
+ * that the panel is attached to.
+ *
+ * Return: The number of modes available from the panel on success or a
+ * negative error code on failure.
+ */
 static inline int drm_panel_get_modes(struct drm_panel *panel)
 {
 	if (panel && panel->funcs && panel->funcs->get_modes)
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 2f63dd5..06ea8e07 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -176,19 +176,6 @@
 }
 
 /**
- * drm_vma_node_has_offset() - Check whether node is added to offset manager
- * @node: Node to be checked
- *
- * RETURNS:
- * true iff the node was previously allocated an offset and added to
- * an vma offset manager.
- */
-static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node)
-{
-	return drm_mm_node_allocated(&node->vm_node);
-}
-
-/**
  * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
  * @node: Linked offset node
  *
@@ -220,7 +207,7 @@
 static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
 				      struct address_space *file_mapping)
 {
-	if (drm_vma_node_has_offset(node))
+	if (drm_mm_node_allocated(&node->vm_node))
 		unmap_mapping_range(file_mapping,
 				    drm_vma_node_offset_addr(node),
 				    drm_vma_node_size(node) << PAGE_SHIFT, 1);
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 055a08d..c801d90 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -314,7 +314,7 @@
  * Returns -EBUSY if no_wait is true and the buffer is busy.
  * Returns -ERESTARTSYS if interrupted by a signal.
  */
-extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
+extern int ttm_bo_wait(struct ttm_buffer_object *bo,
 		       bool interruptible, bool no_wait);
 /**
  * ttm_bo_validate
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 3d4bf08..513f7f9 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -434,6 +434,18 @@
 	 */
 	int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
 	void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+
+	/**
+	 * Optional driver callback for when BO is removed from the LRU.
+	 * Called with LRU lock held immediately before the removal.
+	 */
+	void (*lru_removal)(struct ttm_buffer_object *bo);
+
+	/**
+	 * Return the list_head after which a BO should be inserted in the LRU.
+	 */
+	struct list_head *(*lru_tail)(struct ttm_buffer_object *bo);
+	struct list_head *(*swap_lru_tail)(struct ttm_buffer_object *bo);
 };
 
 /**
@@ -502,7 +514,6 @@
  * @vma_manager: Address space manager
  * lru_lock: Spinlock that protects the buffer+device lru lists and
  * ddestroy lists.
- * @val_seq: Current validation sequence.
  * @dev_mapping: A pointer to the struct address_space representing the
  * device address space.
  * @wq: Work queue structure for the delayed delete workqueue.
@@ -528,7 +539,6 @@
 	 * Protected by the global:lru lock.
 	 */
 	struct list_head ddestroy;
-	uint32_t val_seq;
 
 	/*
 	 * Protected by load / firstopen / lastclose /unload sync.
@@ -753,14 +763,16 @@
 extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
 extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
 
+struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo);
+struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo);
+
 /**
  * __ttm_bo_reserve:
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @interruptible: Sleep interruptible if waiting.
  * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
- * @use_ticket: If @bo is already reserved, Only sleep waiting for
- * it to become unreserved if @ticket->stamp is older.
+ * @ticket: ticket used to acquire the ww_mutex.
  *
  * Will not remove reserved buffers from the lru lists.
  * Otherwise identical to ttm_bo_reserve.
@@ -776,8 +788,7 @@
  * be returned if @use_ticket is set to true.
  */
 static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
-				   bool interruptible,
-				   bool no_wait, bool use_ticket,
+				   bool interruptible, bool no_wait,
 				   struct ww_acquire_ctx *ticket)
 {
 	int ret = 0;
@@ -806,8 +817,7 @@
  * @bo: A pointer to a struct ttm_buffer_object.
  * @interruptible: Sleep interruptible if waiting.
  * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
- * @use_ticket: If @bo is already reserved, Only sleep waiting for
- * it to become unreserved if @ticket->stamp is older.
+ * @ticket: ticket used to acquire the ww_mutex.
  *
  * Locks a buffer object for validation. (Or prevents other processes from
  * locking it for validation) and removes it from lru lists, while taking
@@ -846,15 +856,14 @@
  * be returned if @use_ticket is set to true.
  */
 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
-				 bool interruptible,
-				 bool no_wait, bool use_ticket,
+				 bool interruptible, bool no_wait,
 				 struct ww_acquire_ctx *ticket)
 {
 	int ret;
 
 	WARN_ON(!atomic_read(&bo->kref.refcount));
 
-	ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket);
+	ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
 	if (likely(ret == 0))
 		ttm_bo_del_sub_from_lru(bo);
 
@@ -1030,8 +1039,7 @@
 
 extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
 
-#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
-#define TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 #include <linux/agp_backend.h>
 
 /**
diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h
index 7956ba1..6094bf7 100644
--- a/include/dt-bindings/clock/mt8173-clk.h
+++ b/include/dt-bindings/clock/mt8173-clk.h
@@ -176,7 +176,8 @@
 #define CLK_APMIXED_LVDSPLL		13
 #define CLK_APMIXED_MSDCPLL2		14
 #define CLK_APMIXED_REF2USB_TX		15
-#define CLK_APMIXED_NR_CLK		16
+#define CLK_APMIXED_HDMI_REF		16
+#define CLK_APMIXED_NR_CLK		17
 
 /* INFRA_SYS */
 
diff --git a/include/dt-bindings/thermal/tegra124-soctherm.h b/include/dt-bindings/thermal/tegra124-soctherm.h
index 85aaf66..729ab9f 100644
--- a/include/dt-bindings/thermal/tegra124-soctherm.h
+++ b/include/dt-bindings/thermal/tegra124-soctherm.h
@@ -9,5 +9,6 @@
 #define TEGRA124_SOCTHERM_SENSOR_MEM 1
 #define TEGRA124_SOCTHERM_SENSOR_GPU 2
 #define TEGRA124_SOCTHERM_SENSOR_PLLX 3
+#define TEGRA124_SOCTHERM_SENSOR_NUM 4
 
 #endif
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index b651aed..dda39d8 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -24,9 +24,6 @@
 #include <linux/workqueue.h>
 
 struct arch_timer_kvm {
-	/* Is the timer enabled */
-	bool			enabled;
-
 	/* Virtual offset */
 	cycle_t			cntvoff;
 };
@@ -53,15 +50,15 @@
 	/* Timer IRQ */
 	struct kvm_irq_level		irq;
 
-	/* VGIC mapping */
-	struct irq_phys_map		*map;
-
 	/* Active IRQ state caching */
 	bool				active_cleared_last;
+
+	/* Is the timer enabled */
+	bool			enabled;
 };
 
 int kvm_timer_hyp_init(void);
-void kvm_timer_enable(struct kvm *kvm);
+int kvm_timer_enable(struct kvm_vcpu *vcpu);
 void kvm_timer_init(struct kvm *kvm);
 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
 			 const struct kvm_irq_level *irq);
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index be6037a..da0a5248 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -19,6 +19,10 @@
 #ifndef __ASM_ARM_KVM_VGIC_H
 #define __ASM_ARM_KVM_VGIC_H
 
+#ifdef CONFIG_KVM_NEW_VGIC
+#include <kvm/vgic/vgic.h>
+#else
+
 #include <linux/kernel.h>
 #include <linux/kvm.h>
 #include <linux/irqreturn.h>
@@ -158,7 +162,6 @@
 struct irq_phys_map {
 	u32			virt_irq;
 	u32			phys_irq;
-	u32			irq;
 };
 
 struct irq_phys_map_entry {
@@ -305,9 +308,6 @@
 	unsigned long   *active_shared;
 	unsigned long   *pend_act_shared;
 
-	/* Number of list registers on this CPU */
-	int		nr_lr;
-
 	/* CPU vif control registers for world switch */
 	union {
 		struct vgic_v2_cpu_if	vgic_v2;
@@ -342,17 +342,18 @@
 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
 			bool level);
 int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
-			       struct irq_phys_map *map, bool level);
+			       unsigned int virt_irq, bool level);
 void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
-struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
-					   int virt_irq, int irq);
-int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
-bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int virt_irq, int phys_irq);
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
 
 #define irqchip_in_kernel(k)	(!!((k)->arch.vgic.in_kernel))
 #define vgic_initialized(k)	(!!((k)->arch.vgic.nr_cpus))
 #define vgic_ready(k)		((k)->arch.vgic.ready)
+#define vgic_valid_spi(k, i)	(((i) >= VGIC_NR_PRIVATE_IRQS) && \
+				 ((i) < (k)->arch.vgic.nr_irqs))
 
 int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info,
 		  const struct vgic_ops **ops,
@@ -370,4 +371,5 @@
 }
 #endif
 
+#endif	/* old VGIC include */
 #endif
diff --git a/include/kvm/vgic/vgic.h b/include/kvm/vgic/vgic.h
new file mode 100644
index 0000000..3fbd175
--- /dev/null
+++ b/include/kvm/vgic/vgic.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_ARM_KVM_VGIC_VGIC_H
+#define __ASM_ARM_KVM_VGIC_VGIC_H
+
+#include <linux/kernel.h>
+#include <linux/kvm.h>
+#include <linux/irqreturn.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <kvm/iodev.h>
+
+#define VGIC_V3_MAX_CPUS	255
+#define VGIC_V2_MAX_CPUS	8
+#define VGIC_NR_IRQS_LEGACY     256
+#define VGIC_NR_SGIS		16
+#define VGIC_NR_PPIS		16
+#define VGIC_NR_PRIVATE_IRQS	(VGIC_NR_SGIS + VGIC_NR_PPIS)
+#define VGIC_MAX_PRIVATE	(VGIC_NR_PRIVATE_IRQS - 1)
+#define VGIC_MAX_SPI		1019
+#define VGIC_MAX_RESERVED	1023
+#define VGIC_MIN_LPI		8192
+
+enum vgic_type {
+	VGIC_V2,		/* Good ol' GICv2 */
+	VGIC_V3,		/* New fancy GICv3 */
+};
+
+/* same for all guests, as depending only on the _host's_ GIC model */
+struct vgic_global {
+	/* type of the host GIC */
+	enum vgic_type		type;
+
+	/* Physical address of vgic virtual cpu interface */
+	phys_addr_t		vcpu_base;
+
+	/* virtual control interface mapping */
+	void __iomem		*vctrl_base;
+
+	/* Number of implemented list registers */
+	int			nr_lr;
+
+	/* Maintenance IRQ number */
+	unsigned int		maint_irq;
+
+	/* maximum number of VCPUs allowed (GICv2 limits us to 8) */
+	int			max_gic_vcpus;
+
+	/* Only needed for the legacy KVM_CREATE_IRQCHIP */
+	bool			can_emulate_gicv2;
+};
+
+extern struct vgic_global kvm_vgic_global_state;
+
+#define VGIC_V2_MAX_LRS		(1 << 6)
+#define VGIC_V3_MAX_LRS		16
+#define VGIC_V3_LR_INDEX(lr)	(VGIC_V3_MAX_LRS - 1 - lr)
+
+enum vgic_irq_config {
+	VGIC_CONFIG_EDGE = 0,
+	VGIC_CONFIG_LEVEL
+};
+
+struct vgic_irq {
+	spinlock_t irq_lock;		/* Protects the content of the struct */
+	struct list_head ap_list;
+
+	struct kvm_vcpu *vcpu;		/* SGIs and PPIs: The VCPU
+					 * SPIs and LPIs: The VCPU whose ap_list
+					 * this is queued on.
+					 */
+
+	struct kvm_vcpu *target_vcpu;	/* The VCPU that this interrupt should
+					 * be sent to, as a result of the
+					 * targets reg (v2) or the
+					 * affinity reg (v3).
+					 */
+
+	u32 intid;			/* Guest visible INTID */
+	bool pending;
+	bool line_level;		/* Level only */
+	bool soft_pending;		/* Level only */
+	bool active;			/* not used for LPIs */
+	bool enabled;
+	bool hw;			/* Tied to HW IRQ */
+	u32 hwintid;			/* HW INTID number */
+	union {
+		u8 targets;			/* GICv2 target VCPUs mask */
+		u32 mpidr;			/* GICv3 target VCPU */
+	};
+	u8 source;			/* GICv2 SGIs only */
+	u8 priority;
+	enum vgic_irq_config config;	/* Level or edge */
+};
+
+struct vgic_register_region;
+
+struct vgic_io_device {
+	gpa_t base_addr;
+	struct kvm_vcpu *redist_vcpu;
+	const struct vgic_register_region *regions;
+	int nr_regions;
+	struct kvm_io_device dev;
+};
+
+struct vgic_dist {
+	bool			in_kernel;
+	bool			ready;
+	bool			initialized;
+
+	/* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
+	u32			vgic_model;
+
+	int			nr_spis;
+
+	/* TODO: Consider moving to global state */
+	/* Virtual control interface mapping */
+	void __iomem		*vctrl_base;
+
+	/* base addresses in guest physical address space: */
+	gpa_t			vgic_dist_base;		/* distributor */
+	union {
+		/* either a GICv2 CPU interface */
+		gpa_t			vgic_cpu_base;
+		/* or a number of GICv3 redistributor regions */
+		gpa_t			vgic_redist_base;
+	};
+
+	/* distributor enabled */
+	bool			enabled;
+
+	struct vgic_irq		*spis;
+
+	struct vgic_io_device	dist_iodev;
+	struct vgic_io_device	*redist_iodevs;
+};
+
+struct vgic_v2_cpu_if {
+	u32		vgic_hcr;
+	u32		vgic_vmcr;
+	u32		vgic_misr;	/* Saved only */
+	u64		vgic_eisr;	/* Saved only */
+	u64		vgic_elrsr;	/* Saved only */
+	u32		vgic_apr;
+	u32		vgic_lr[VGIC_V2_MAX_LRS];
+};
+
+struct vgic_v3_cpu_if {
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+	u32		vgic_hcr;
+	u32		vgic_vmcr;
+	u32		vgic_sre;	/* Restored only, change ignored */
+	u32		vgic_misr;	/* Saved only */
+	u32		vgic_eisr;	/* Saved only */
+	u32		vgic_elrsr;	/* Saved only */
+	u32		vgic_ap0r[4];
+	u32		vgic_ap1r[4];
+	u64		vgic_lr[VGIC_V3_MAX_LRS];
+#endif
+};
+
+struct vgic_cpu {
+	/* CPU vif control registers for world switch */
+	union {
+		struct vgic_v2_cpu_if	vgic_v2;
+		struct vgic_v3_cpu_if	vgic_v3;
+	};
+
+	unsigned int used_lrs;
+	struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
+
+	spinlock_t ap_list_lock;	/* Protects the ap_list */
+
+	/*
+	 * List of IRQs that this VCPU should consider because they are either
+	 * Active or Pending (hence the name; AP list), or because they recently
+	 * were one of the two and need to be migrated off this list to another
+	 * VCPU.
+	 */
+	struct list_head ap_list_head;
+
+	u64 live_lrs;
+};
+
+int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
+void kvm_vgic_early_init(struct kvm *kvm);
+int kvm_vgic_create(struct kvm *kvm, u32 type);
+void kvm_vgic_destroy(struct kvm *kvm);
+void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
+void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
+int kvm_vgic_map_resources(struct kvm *kvm);
+int kvm_vgic_hyp_init(void);
+
+int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
+			bool level);
+int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, unsigned int intid,
+			       bool level);
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq);
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
+
+int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
+
+#define irqchip_in_kernel(k)	(!!((k)->arch.vgic.in_kernel))
+#define vgic_initialized(k)	((k)->arch.vgic.initialized)
+#define vgic_ready(k)		((k)->arch.vgic.ready)
+#define vgic_valid_spi(k, i)	(((i) >= VGIC_NR_PRIVATE_IRQS) && \
+			((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))
+
+bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
+void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
+void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
+
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
+#else
+static inline void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
+{
+}
+#endif
+
+/**
+ * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
+ *
+ * The host's GIC naturally limits the maximum amount of VCPUs a guest
+ * can use.
+ */
+static inline int kvm_vgic_get_max_vcpus(void)
+{
+	return kvm_vgic_global_state.max_gic_vcpus;
+}
+
+#endif /* __ASM_ARM_KVM_VGIC_VGIC_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index f310ec0..99346be 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -243,6 +243,7 @@
 	ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E,
 	ATA_CMD_FPDMA_READ	= 0x60,
 	ATA_CMD_FPDMA_WRITE	= 0x61,
+	ATA_CMD_NCQ_NON_DATA	= 0x63,
 	ATA_CMD_FPDMA_SEND	= 0x64,
 	ATA_CMD_FPDMA_RECV	= 0x65,
 	ATA_CMD_PIO_READ	= 0x20,
@@ -301,19 +302,43 @@
 	ATA_CMD_CFA_WRITE_MULT_NE = 0xCD,
 	ATA_CMD_REQ_SENSE_DATA  = 0x0B,
 	ATA_CMD_SANITIZE_DEVICE = 0xB4,
+	ATA_CMD_ZAC_MGMT_IN	= 0x4A,
+	ATA_CMD_ZAC_MGMT_OUT	= 0x9F,
 
 	/* marked obsolete in the ATA/ATAPI-7 spec */
 	ATA_CMD_RESTORE		= 0x10,
 
+	/* Subcmds for ATA_CMD_FPDMA_RECV */
+	ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT = 0x01,
+	ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN    = 0x02,
+
 	/* Subcmds for ATA_CMD_FPDMA_SEND */
 	ATA_SUBCMD_FPDMA_SEND_DSM            = 0x00,
 	ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 0x02,
 
+	/* Subcmds for ATA_CMD_NCQ_NON_DATA */
+	ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE  = 0x00,
+	ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES = 0x05,
+	ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT     = 0x06,
+	ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT = 0x07,
+
+	/* Subcmds for ATA_CMD_ZAC_MGMT_IN */
+	ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES = 0x00,
+
+	/* Subcmds for ATA_CMD_ZAC_MGMT_OUT */
+	ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE = 0x01,
+	ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE = 0x02,
+	ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE = 0x03,
+	ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER = 0x04,
+
 	/* READ_LOG_EXT pages */
+	ATA_LOG_DIRECTORY	= 0x0,
 	ATA_LOG_SATA_NCQ	= 0x10,
+	ATA_LOG_NCQ_NON_DATA	  = 0x12,
 	ATA_LOG_NCQ_SEND_RECV	  = 0x13,
 	ATA_LOG_SATA_ID_DEV_DATA  = 0x30,
 	ATA_LOG_SATA_SETTINGS	  = 0x08,
+	ATA_LOG_ZONED_INFORMATION = 0x09,
 	ATA_LOG_DEVSLP_OFFSET	  = 0x30,
 	ATA_LOG_DEVSLP_SIZE	  = 0x08,
 	ATA_LOG_DEVSLP_MDAT	  = 0x00,
@@ -328,8 +353,25 @@
 	ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET	= 0x04,
 	ATA_LOG_NCQ_SEND_RECV_DSM_TRIM		= (1 << 0),
 	ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET	= 0x08,
+	ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED  = (1 << 0),
 	ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET	= 0x0C,
-	ATA_LOG_NCQ_SEND_RECV_SIZE		= 0x10,
+	ATA_LOG_NCQ_SEND_RECV_WR_LOG_SUPPORTED  = (1 << 0),
+	ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET	= 0x10,
+	ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OUT_SUPPORTED = (1 << 0),
+	ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED = (1 << 1),
+	ATA_LOG_NCQ_SEND_RECV_SIZE		= 0x14,
+
+	/* NCQ Non-Data log */
+	ATA_LOG_NCQ_NON_DATA_SUBCMDS_OFFSET	= 0x00,
+	ATA_LOG_NCQ_NON_DATA_ABORT_OFFSET	= 0x00,
+	ATA_LOG_NCQ_NON_DATA_ABORT_NCQ		= (1 << 0),
+	ATA_LOG_NCQ_NON_DATA_ABORT_ALL		= (1 << 1),
+	ATA_LOG_NCQ_NON_DATA_ABORT_STREAMING	= (1 << 2),
+	ATA_LOG_NCQ_NON_DATA_ABORT_NON_STREAMING = (1 << 3),
+	ATA_LOG_NCQ_NON_DATA_ABORT_SELECTED	= (1 << 4),
+	ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET	= 0x1C,
+	ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT	= (1 << 0),
+	ATA_LOG_NCQ_NON_DATA_SIZE		= 0x40,
 
 	/* READ/WRITE LONG (obsolete) */
 	ATA_CMD_READ_LONG	= 0x22,
@@ -386,6 +428,8 @@
 	SATA_SSP		= 0x06,	/* Software Settings Preservation */
 	SATA_DEVSLP		= 0x09,	/* Device Sleep */
 
+	SETFEATURE_SENSE_DATA	= 0xC3, /* Sense Data Reporting feature */
+
 	/* feature values for SET_MAX */
 	ATA_SET_MAX_ADDR	= 0x00,
 	ATA_SET_MAX_PASSWD	= 0x01,
@@ -529,6 +573,8 @@
 #define ata_id_cdb_intr(id)	(((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
 #define ata_id_has_da(id)	((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
 #define ata_id_has_devslp(id)	((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
+#define ata_id_has_ncq_autosense(id) \
+				((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
 
 static inline bool ata_id_has_hipm(const u16 *id)
 {
@@ -717,6 +763,20 @@
 	return false;
 }
 
+static inline bool ata_id_has_sense_reporting(const u16 *id)
+{
+	if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+		return false;
+	return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
+}
+
+static inline bool ata_id_sense_reporting_enabled(const u16 *id)
+{
+	if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+		return false;
+	return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
+}
+
 /**
  *	ata_id_major_version	-	get ATA level of drive
  *	@id: Identify data
@@ -821,6 +881,11 @@
 	return id[ATA_ID_SATA_CAPABILITY_2] & BIT(6);
 }
 
+static inline bool ata_id_has_ncq_non_data(const u16 *id)
+{
+	return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5);
+}
+
 static inline bool ata_id_has_trim(const u16 *id)
 {
 	if (ata_id_major_version(id) >= 7 &&
@@ -872,6 +937,11 @@
 	return id[ATA_ID_ROT_SPEED] == 0x01;
 }
 
+static inline u8 ata_id_zoned_cap(const u16 *id)
+{
+	return (id[ATA_ID_ADDITIONAL_SUPP] & 0x3);
+}
+
 static inline bool ata_id_pio_need_iordy(const u16 *id, const u8 pio)
 {
 	/* CF spec. r4.1 Table 22 says no IORDY on PIO5 and PIO6. */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 846513c..a5ac2ca 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -587,7 +587,6 @@
 
 struct bcma_sflash {
 	bool present;
-	u32 window;
 	u32 blocksize;
 	u16 numblocks;
 	u32 size;
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 576e463..314b3ca 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -65,6 +65,7 @@
 	unsigned long limit;
 	unsigned long mm_flags;
 	loff_t written;
+	loff_t pos;
 };
 
 /*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1fd8fdf..3d9cf32 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -768,6 +768,17 @@
 }
 #endif
 
+#ifdef CONFIG_PRINTK
+#define vfs_msg(sb, level, fmt, ...)				\
+	__vfs_msg(sb, level, fmt, ##__VA_ARGS__)
+#else
+#define vfs_msg(sb, level, fmt, ...)				\
+do {								\
+	no_printk(fmt, ##__VA_ARGS__);				\
+	__vfs_msg(sb, "", " ");					\
+} while (0)
+#endif
+
 extern int blk_register_queue(struct gendisk *disk);
 extern void blk_unregister_queue(struct gendisk *disk);
 extern blk_qc_t generic_make_request(struct bio *bio);
@@ -1660,7 +1671,7 @@
 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 	long (*direct_access)(struct block_device *, sector_t, void __pmem **,
-			pfn_t *);
+			pfn_t *, long);
 	unsigned int (*check_events) (struct gendisk *disk,
 				      unsigned int clearing);
 	/* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1680,6 +1691,8 @@
 extern int bdev_write_page(struct block_device *, sector_t, struct page *,
 						struct writeback_control *);
 extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
+extern int bdev_dax_supported(struct super_block *, int);
+extern bool bdev_dax_capable(struct block_device *);
 #else /* CONFIG_BLOCK */
 
 struct block_device;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8ee27b8..8269caf 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -111,6 +111,31 @@
 	BPF_WRITE = 2
 };
 
+/* types of values stored in eBPF registers */
+enum bpf_reg_type {
+	NOT_INIT = 0,		 /* nothing was written into register */
+	UNKNOWN_VALUE,		 /* reg doesn't contain a valid pointer */
+	PTR_TO_CTX,		 /* reg points to bpf_context */
+	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */
+	PTR_TO_MAP_VALUE,	 /* reg points to map element value */
+	PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
+	FRAME_PTR,		 /* reg == frame_pointer */
+	PTR_TO_STACK,		 /* reg == frame_pointer + imm */
+	CONST_IMM,		 /* constant integer value */
+
+	/* PTR_TO_PACKET represents:
+	 * skb->data
+	 * skb->data + imm
+	 * skb->data + (u16) var
+	 * skb->data + (u16) var + imm
+	 * if (range > 0) then [ptr, ptr + range - off) is safe to access
+	 * if (id > 0) means that some 'var' was added
+	 * if (off > 0) menas that 'imm' was added
+	 */
+	PTR_TO_PACKET,
+	PTR_TO_PACKET_END,	 /* skb->data + headlen */
+};
+
 struct bpf_prog;
 
 struct bpf_verifier_ops {
@@ -120,7 +145,8 @@
 	/* return true if 'size' wide access at offset 'off' within bpf_context
 	 * with 'type' (read or write) is allowed
 	 */
-	bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
+	bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
+				enum bpf_reg_type *reg_type);
 
 	u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
 				  int src_reg, int ctx_off,
diff --git a/include/linux/ceph/ceph_frag.h b/include/linux/ceph/ceph_frag.h
index b827e06..146507d 100644
--- a/include/linux/ceph/ceph_frag.h
+++ b/include/linux/ceph/ceph_frag.h
@@ -51,11 +51,11 @@
 	return ceph_frag_make(newbits,
 			 ceph_frag_value(f) | (i << (24 - newbits)));
 }
-static inline int ceph_frag_is_leftmost(__u32 f)
+static inline bool ceph_frag_is_leftmost(__u32 f)
 {
 	return ceph_frag_value(f) == 0;
 }
-static inline int ceph_frag_is_rightmost(__u32 f)
+static inline bool ceph_frag_is_rightmost(__u32 f)
 {
 	return ceph_frag_value(f) == ceph_frag_mask(f);
 }
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 37f28bf..dfce616 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -153,8 +153,9 @@
 
 /* watch-notify operations */
 enum {
-  WATCH_NOTIFY				= 1, /* notifying watcher */
-  WATCH_NOTIFY_COMPLETE			= 2, /* notifier notified when done */
+	CEPH_WATCH_EVENT_NOTIFY		  = 1, /* notifying watcher */
+	CEPH_WATCH_EVENT_NOTIFY_COMPLETE  = 2, /* notifier notified when done */
+	CEPH_WATCH_EVENT_DISCONNECT       = 3, /* we were disconnected */
 };
 
 
@@ -207,6 +208,8 @@
 	struct ceph_fsid fsid;
 } __attribute__ ((packed));
 
+#define CEPH_FS_CLUSTER_ID_NONE  -1
+
 /*
  * mdsmap flags
  */
@@ -344,6 +347,18 @@
 #define CEPH_XATTR_REPLACE (1 << 1)
 #define CEPH_XATTR_REMOVE  (1 << 31)
 
+/*
+ * readdir request flags;
+ */
+#define CEPH_READDIR_REPLY_BITFLAGS	(1<<0)
+
+/*
+ * readdir reply flags.
+ */
+#define CEPH_READDIR_FRAG_END		(1<<0)
+#define CEPH_READDIR_FRAG_COMPLETE	(1<<8)
+#define CEPH_READDIR_HASH_ORDER		(1<<9)
+
 union ceph_mds_request_args {
 	struct {
 		__le32 mask;                 /* CEPH_CAP_* */
@@ -361,6 +376,7 @@
 		__le32 frag;                 /* which dir fragment */
 		__le32 max_entries;          /* how many dentries to grab */
 		__le32 max_bytes;
+		__le16 flags;
 	} __attribute__ ((packed)) readdir;
 	struct {
 		__le32 mode;
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index a6ef9cc..19e9932 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -47,7 +47,7 @@
 /*
  * bounds check input.
  */
-static inline int ceph_has_room(void **p, void *end, size_t n)
+static inline bool ceph_has_room(void **p, void *end, size_t n)
 {
 	return end >= *p && n <= end - *p;
 }
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index db92a8d..690985d 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -180,6 +180,63 @@
 		(off >> PAGE_SHIFT);
 }
 
+/*
+ * These are not meant to be generic - an integer key is assumed.
+ */
+#define DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld)		\
+static void insert_##name(struct rb_root *root, type *t)		\
+{									\
+	struct rb_node **n = &root->rb_node;				\
+	struct rb_node *parent = NULL;					\
+									\
+	BUG_ON(!RB_EMPTY_NODE(&t->nodefld));				\
+									\
+	while (*n) {							\
+		type *cur = rb_entry(*n, type, nodefld);		\
+									\
+		parent = *n;						\
+		if (t->keyfld < cur->keyfld)				\
+			n = &(*n)->rb_left;				\
+		else if (t->keyfld > cur->keyfld)			\
+			n = &(*n)->rb_right;				\
+		else							\
+			BUG();						\
+	}								\
+									\
+	rb_link_node(&t->nodefld, parent, n);				\
+	rb_insert_color(&t->nodefld, root);				\
+}									\
+static void erase_##name(struct rb_root *root, type *t)			\
+{									\
+	BUG_ON(RB_EMPTY_NODE(&t->nodefld));				\
+	rb_erase(&t->nodefld, root);					\
+	RB_CLEAR_NODE(&t->nodefld);					\
+}
+
+#define DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld)		\
+static type *lookup_##name(struct rb_root *root,			\
+			   typeof(((type *)0)->keyfld) key)		\
+{									\
+	struct rb_node *n = root->rb_node;				\
+									\
+	while (n) {							\
+		type *cur = rb_entry(n, type, nodefld);			\
+									\
+		if (key < cur->keyfld)					\
+			n = n->rb_left;					\
+		else if (key > cur->keyfld)				\
+			n = n->rb_right;				\
+		else							\
+			return cur;					\
+	}								\
+									\
+	return NULL;							\
+}
+
+#define DEFINE_RB_FUNCS(name, type, keyfld, nodefld)			\
+DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld)			\
+DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld)
+
 extern struct kmem_cache *ceph_inode_cachep;
 extern struct kmem_cache *ceph_cap_cachep;
 extern struct kmem_cache *ceph_cap_flush_cachep;
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index e230e7e..e2a92df 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -39,20 +39,31 @@
 	ceph_monc_request_func_t do_request;
 };
 
+typedef void (*ceph_monc_callback_t)(struct ceph_mon_generic_request *);
+
 /*
  * ceph_mon_generic_request is being used for the statfs and
  * mon_get_version requests which are being done a bit differently
  * because we need to get data back to the caller
  */
 struct ceph_mon_generic_request {
+	struct ceph_mon_client *monc;
 	struct kref kref;
 	u64 tid;
 	struct rb_node node;
 	int result;
-	void *buf;
+
 	struct completion completion;
+	ceph_monc_callback_t complete_cb;
+	u64 private_data;          /* r_tid/linger_id */
+
 	struct ceph_msg *request;  /* original request */
 	struct ceph_msg *reply;    /* and reply */
+
+	union {
+		struct ceph_statfs *st;
+		u64 newest;
+	} u;
 };
 
 struct ceph_mon_client {
@@ -77,7 +88,6 @@
 
 	/* pending generic requests */
 	struct rb_root generic_request_tree;
-	int num_generic_requests;
 	u64 last_tid;
 
 	/* subs, indexed with CEPH_SUB_* */
@@ -86,6 +96,7 @@
 		bool want;
 		u32 have; /* epoch */
 	} subs[3];
+	int fs_cluster_id; /* "mdsmap.<id>" sub */
 
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *debugfs_file;
@@ -116,16 +127,18 @@
 bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch,
 			bool continuous);
 void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch);
+void ceph_monc_renew_subs(struct ceph_mon_client *monc);
 
-extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc);
 extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
 				 unsigned long timeout);
 
 extern int ceph_monc_do_statfs(struct ceph_mon_client *monc,
 			       struct ceph_statfs *buf);
 
-extern int ceph_monc_do_get_version(struct ceph_mon_client *monc,
-				    const char *what, u64 *newest);
+int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
+			  u64 *newest);
+int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
+				ceph_monc_callback_t cb, u64 private_data);
 
 extern int ceph_monc_open_session(struct ceph_mon_client *monc);
 
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index cbf4609..1b3b6e1 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -20,10 +20,11 @@
 /*
  * completion callback for async writepages
  */
-typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *,
-				     struct ceph_msg *);
+typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
 typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool);
 
+#define CEPH_HOMELESS_OSD	-1
+
 /* a given osd we're communicating with */
 struct ceph_osd {
 	atomic_t o_ref;
@@ -32,16 +33,15 @@
 	int o_incarnation;
 	struct rb_node o_node;
 	struct ceph_connection o_con;
-	struct list_head o_requests;
-	struct list_head o_linger_requests;
+	struct rb_root o_requests;
+	struct rb_root o_linger_requests;
 	struct list_head o_osd_lru;
 	struct ceph_auth_handshake o_auth;
 	unsigned long lru_ttl;
-	int o_marked_for_keepalive;
 	struct list_head o_keepalive_item;
+	struct mutex lock;
 };
 
-
 #define CEPH_OSD_SLAB_OPS	2
 #define CEPH_OSD_MAX_OPS	16
 
@@ -104,76 +104,95 @@
 			struct ceph_osd_data response_data;
 			__u8 class_len;
 			__u8 method_len;
-			__u8 argc;
+			u32 indata_len;
 		} cls;
 		struct {
 			u64 cookie;
-			u64 ver;
-			u32 prot_ver;
-			u32 timeout;
-			__u8 flag;
+			__u8 op;           /* CEPH_OSD_WATCH_OP_ */
+			u32 gen;
 		} watch;
 		struct {
+			struct ceph_osd_data request_data;
+		} notify_ack;
+		struct {
+			u64 cookie;
+			struct ceph_osd_data request_data;
+			struct ceph_osd_data response_data;
+		} notify;
+		struct {
 			u64 expected_object_size;
 			u64 expected_write_size;
 		} alloc_hint;
 	};
 };
 
+struct ceph_osd_request_target {
+	struct ceph_object_id base_oid;
+	struct ceph_object_locator base_oloc;
+	struct ceph_object_id target_oid;
+	struct ceph_object_locator target_oloc;
+
+	struct ceph_pg pgid;
+	u32 pg_num;
+	u32 pg_num_mask;
+	struct ceph_osds acting;
+	struct ceph_osds up;
+	int size;
+	int min_size;
+	bool sort_bitwise;
+
+	unsigned int flags;                /* CEPH_OSD_FLAG_* */
+	bool paused;
+
+	int osd;
+};
+
 /* an in-flight request */
 struct ceph_osd_request {
 	u64             r_tid;              /* unique for this client */
 	struct rb_node  r_node;
-	struct list_head r_req_lru_item;
-	struct list_head r_osd_item;
-	struct list_head r_linger_item;
-	struct list_head r_linger_osd_item;
+	struct rb_node  r_mc_node;          /* map check */
 	struct ceph_osd *r_osd;
-	struct ceph_pg   r_pgid;
-	int              r_pg_osds[CEPH_PG_MAX_SIZE];
-	int              r_num_pg_osds;
+
+	struct ceph_osd_request_target r_t;
+#define r_base_oid	r_t.base_oid
+#define r_base_oloc	r_t.base_oloc
+#define r_flags		r_t.flags
 
 	struct ceph_msg  *r_request, *r_reply;
-	int               r_flags;     /* any additional flags for the osd */
 	u32               r_sent;      /* >0 if r_request is sending/sent */
 
 	/* request osd ops array  */
 	unsigned int		r_num_ops;
 
-	/* these are updated on each send */
-	__le32           *r_request_osdmap_epoch;
-	__le32           *r_request_flags;
-	__le64           *r_request_pool;
-	void             *r_request_pgid;
-	__le32           *r_request_attempts;
-	bool              r_paused;
-	struct ceph_eversion *r_request_reassert_version;
-
 	int               r_result;
-	int               r_got_reply;
-	int		  r_linger;
+	bool              r_got_reply;
 
 	struct ceph_osd_client *r_osdc;
 	struct kref       r_kref;
 	bool              r_mempool;
-	struct completion r_completion, r_safe_completion;
+	struct completion r_completion;
+	struct completion r_safe_completion;  /* fsync waiter */
 	ceph_osdc_callback_t r_callback;
 	ceph_osdc_unsafe_callback_t r_unsafe_callback;
-	struct ceph_eversion r_reassert_version;
 	struct list_head  r_unsafe_item;
 
 	struct inode *r_inode;         	      /* for use by callbacks */
 	void *r_priv;			      /* ditto */
 
-	struct ceph_object_locator r_base_oloc;
-	struct ceph_object_id r_base_oid;
-	struct ceph_object_locator r_target_oloc;
-	struct ceph_object_id r_target_oid;
+	/* set by submitter */
+	u64 r_snapid;                         /* for reads, CEPH_NOSNAP o/w */
+	struct ceph_snap_context *r_snapc;    /* for writes */
+	struct timespec r_mtime;              /* ditto */
+	u64 r_data_offset;                    /* ditto */
+	bool r_linger;                        /* don't resend on failure */
 
-	u64               r_snapid;
-	unsigned long     r_stamp;            /* send OR check time */
-
-	struct ceph_snap_context *r_snapc;    /* snap context for writes */
+	/* internal */
+	unsigned long r_stamp;                /* jiffies, send or check time */
+	int r_attempts;
+	struct ceph_eversion r_replay_version; /* aka reassert_version */
+	u32 r_last_force_resend;
+	u32 r_map_dne_bound;
 
 	struct ceph_osd_req_op r_ops[];
 };
@@ -182,44 +201,70 @@
 	struct ceph_object_locator oloc;
 };
 
-struct ceph_osd_event {
-	u64 cookie;
-	int one_shot;
-	struct ceph_osd_client *osdc;
-	void (*cb)(u64, u64, u8, void *);
-	void *data;
-	struct rb_node node;
-	struct list_head osd_node;
-	struct kref kref;
-};
+typedef void (*rados_watchcb2_t)(void *arg, u64 notify_id, u64 cookie,
+				 u64 notifier_id, void *data, size_t data_len);
+typedef void (*rados_watcherrcb_t)(void *arg, u64 cookie, int err);
 
-struct ceph_osd_event_work {
-	struct work_struct work;
-	struct ceph_osd_event *event;
-        u64 ver;
-        u64 notify_id;
-        u8 opcode;
+struct ceph_osd_linger_request {
+	struct ceph_osd_client *osdc;
+	u64 linger_id;
+	bool committed;
+	bool is_watch;                  /* watch or notify */
+
+	struct ceph_osd *osd;
+	struct ceph_osd_request *reg_req;
+	struct ceph_osd_request *ping_req;
+	unsigned long ping_sent;
+	unsigned long watch_valid_thru;
+	struct list_head pending_lworks;
+
+	struct ceph_osd_request_target t;
+	u32 last_force_resend;
+	u32 map_dne_bound;
+
+	struct timespec mtime;
+
+	struct kref kref;
+	struct mutex lock;
+	struct rb_node node;            /* osd */
+	struct rb_node osdc_node;       /* osdc */
+	struct rb_node mc_node;         /* map check */
+	struct list_head scan_item;
+
+	struct completion reg_commit_wait;
+	struct completion notify_finish_wait;
+	int reg_commit_error;
+	int notify_finish_error;
+	int last_error;
+
+	u32 register_gen;
+	u64 notify_id;
+
+	rados_watchcb2_t wcb;
+	rados_watcherrcb_t errcb;
+	void *data;
+
+	struct page ***preply_pages;
+	size_t *preply_len;
 };
 
 struct ceph_osd_client {
 	struct ceph_client     *client;
 
 	struct ceph_osdmap     *osdmap;       /* current map */
-	struct rw_semaphore    map_sem;
-	struct completion      map_waiters;
-	u64                    last_requested_map;
+	struct rw_semaphore    lock;
 
-	struct mutex           request_mutex;
 	struct rb_root         osds;          /* osds */
 	struct list_head       osd_lru;       /* idle osds */
-	u64                    timeout_tid;   /* tid of timeout triggering rq */
-	u64                    last_tid;      /* tid of last request */
-	struct rb_root         requests;      /* pending requests */
-	struct list_head       req_lru;	      /* in-flight lru */
-	struct list_head       req_unsent;    /* unsent/need-resend queue */
-	struct list_head       req_notarget;  /* map to no osd */
-	struct list_head       req_linger;    /* lingering requests */
-	int                    num_requests;
+	spinlock_t             osd_lru_lock;
+	struct ceph_osd        homeless_osd;
+	atomic64_t             last_tid;      /* tid of last request */
+	u64                    last_linger_id;
+	struct rb_root         linger_requests; /* lingering requests */
+	struct rb_root         map_checks;
+	struct rb_root         linger_map_checks;
+	atomic_t               num_requests;
+	atomic_t               num_homeless;
 	struct delayed_work    timeout_work;
 	struct delayed_work    osds_timeout_work;
 #ifdef CONFIG_DEBUG_FS
@@ -231,13 +276,14 @@
 	struct ceph_msgpool	msgpool_op;
 	struct ceph_msgpool	msgpool_op_reply;
 
-	spinlock_t		event_lock;
-	struct rb_root		event_tree;
-	u64			event_count;
-
 	struct workqueue_struct	*notify_wq;
 };
 
+static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag)
+{
+	return osdc->osdmap->flags & flag;
+}
+
 extern int ceph_osdc_setup(void);
 extern void ceph_osdc_cleanup(void);
 
@@ -271,9 +317,6 @@
 extern struct ceph_osd_data *osd_req_op_extent_osd_data(
 					struct ceph_osd_request *osd_req,
 					unsigned int which);
-extern struct ceph_osd_data *osd_req_op_cls_response_data(
-					struct ceph_osd_request *osd_req,
-					unsigned int which);
 
 extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *,
 					unsigned int which,
@@ -309,9 +352,6 @@
 extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
 				 u16 opcode, const char *name, const void *value,
 				 size_t size, u8 cmp_op, u8 cmp_mode);
-extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
-					unsigned int which, u16 opcode,
-					u64 cookie, u64 version, int flag);
 extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
 				       unsigned int which,
 				       u64 expected_object_size,
@@ -322,11 +362,7 @@
 					       unsigned int num_ops,
 					       bool use_mempool,
 					       gfp_t gfp_flags);
-
-extern void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off,
-				    struct ceph_snap_context *snapc,
-				    u64 snap_id,
-				    struct timespec *mtime);
+int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp);
 
 extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
 				      struct ceph_file_layout *layout,
@@ -338,9 +374,6 @@
 				      u32 truncate_seq, u64 truncate_size,
 				      bool use_mempool);
 
-extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
-					 struct ceph_osd_request *req);
-
 extern void ceph_osdc_get_request(struct ceph_osd_request *req);
 extern void ceph_osdc_put_request(struct ceph_osd_request *req);
 
@@ -353,6 +386,7 @@
 extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
 
 extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
+void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc);
 
 extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
 			       struct ceph_vino vino,
@@ -371,11 +405,33 @@
 				struct timespec *mtime,
 				struct page **pages, int nr_pages);
 
-/* watch/notify events */
-extern int ceph_osdc_create_event(struct ceph_osd_client *osdc,
-				  void (*event_cb)(u64, u64, u8, void *),
-				  void *data, struct ceph_osd_event **pevent);
-extern void ceph_osdc_cancel_event(struct ceph_osd_event *event);
-extern void ceph_osdc_put_event(struct ceph_osd_event *event);
+/* watch/notify */
+struct ceph_osd_linger_request *
+ceph_osdc_watch(struct ceph_osd_client *osdc,
+		struct ceph_object_id *oid,
+		struct ceph_object_locator *oloc,
+		rados_watchcb2_t wcb,
+		rados_watcherrcb_t errcb,
+		void *data);
+int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
+		      struct ceph_osd_linger_request *lreq);
+
+int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
+			 struct ceph_object_id *oid,
+			 struct ceph_object_locator *oloc,
+			 u64 notify_id,
+			 u64 cookie,
+			 void *payload,
+			 size_t payload_len);
+int ceph_osdc_notify(struct ceph_osd_client *osdc,
+		     struct ceph_object_id *oid,
+		     struct ceph_object_locator *oloc,
+		     void *payload,
+		     size_t payload_len,
+		     u32 timeout,
+		     struct page ***preply_pages,
+		     size_t *preply_len);
+int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
+			  struct ceph_osd_linger_request *lreq);
 #endif
 
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index e55c08b..9ccf4db 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -24,21 +24,29 @@
 	uint32_t seed;
 };
 
-#define CEPH_POOL_FLAG_HASHPSPOOL  1
+int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs);
+
+#define CEPH_POOL_FLAG_HASHPSPOOL	(1ULL << 0) /* hash pg seed and pool id
+						       together */
+#define CEPH_POOL_FLAG_FULL		(1ULL << 1) /* pool is full */
 
 struct ceph_pg_pool_info {
 	struct rb_node node;
 	s64 id;
-	u8 type;
+	u8 type; /* CEPH_POOL_TYPE_* */
 	u8 size;
+	u8 min_size;
 	u8 crush_ruleset;
 	u8 object_hash;
+	u32 last_force_request_resend;
 	u32 pg_num, pgp_num;
 	int pg_num_mask, pgp_num_mask;
 	s64 read_tier;
 	s64 write_tier; /* wins for read+write ops */
-	u64 flags;
+	u64 flags; /* CEPH_POOL_FLAG_* */
 	char *name;
+
+	bool was_full;  /* for handle_one_map() */
 };
 
 static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
@@ -57,6 +65,22 @@
 	s64 pool;
 };
 
+static inline void ceph_oloc_init(struct ceph_object_locator *oloc)
+{
+	oloc->pool = -1;
+}
+
+static inline bool ceph_oloc_empty(const struct ceph_object_locator *oloc)
+{
+	return oloc->pool == -1;
+}
+
+static inline void ceph_oloc_copy(struct ceph_object_locator *dest,
+				  const struct ceph_object_locator *src)
+{
+	dest->pool = src->pool;
+}
+
 /*
  * Maximum supported by kernel client object name length
  *
@@ -64,11 +88,47 @@
  */
 #define CEPH_MAX_OID_NAME_LEN 100
 
+/*
+ * 51-char inline_name is long enough for all cephfs and all but one
+ * rbd requests: <imgname> in "<imgname>.rbd"/"rbd_id.<imgname>" can be
+ * arbitrarily long (~PAGE_SIZE).  It's done once during rbd map; all
+ * other rbd requests fit into inline_name.
+ *
+ * Makes ceph_object_id 64 bytes on 64-bit.
+ */
+#define CEPH_OID_INLINE_LEN 52
+
+/*
+ * Both inline and external buffers have space for a NUL-terminator,
+ * which is carried around.  It's not required though - RADOS object
+ * names don't have to be NUL-terminated and may contain NULs.
+ */
 struct ceph_object_id {
-	char name[CEPH_MAX_OID_NAME_LEN];
+	char *name;
+	char inline_name[CEPH_OID_INLINE_LEN];
 	int name_len;
 };
 
+static inline void ceph_oid_init(struct ceph_object_id *oid)
+{
+	oid->name = oid->inline_name;
+	oid->name_len = 0;
+}
+
+static inline bool ceph_oid_empty(const struct ceph_object_id *oid)
+{
+	return oid->name == oid->inline_name && !oid->name_len;
+}
+
+void ceph_oid_copy(struct ceph_object_id *dest,
+		   const struct ceph_object_id *src);
+__printf(2, 3)
+void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...);
+__printf(3, 4)
+int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
+		     const char *fmt, ...);
+void ceph_oid_destroy(struct ceph_object_id *oid);
+
 struct ceph_pg_mapping {
 	struct rb_node node;
 	struct ceph_pg pgid;
@@ -87,7 +147,6 @@
 struct ceph_osdmap {
 	struct ceph_fsid fsid;
 	u32 epoch;
-	u32 mkfs_epoch;
 	struct ceph_timespec created, modified;
 
 	u32 flags;         /* CEPH_OSDMAP_* */
@@ -113,52 +172,23 @@
 	int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3];
 };
 
-static inline void ceph_oid_set_name(struct ceph_object_id *oid,
-				     const char *name)
-{
-	int len;
-
-	len = strlen(name);
-	if (len > sizeof(oid->name)) {
-		WARN(1, "ceph_oid_set_name '%s' len %d vs %zu, truncating\n",
-		     name, len, sizeof(oid->name));
-		len = sizeof(oid->name);
-	}
-
-	memcpy(oid->name, name, len);
-	oid->name_len = len;
-}
-
-static inline void ceph_oid_copy(struct ceph_object_id *dest,
-				 struct ceph_object_id *src)
-{
-	BUG_ON(src->name_len > sizeof(dest->name));
-	memcpy(dest->name, src->name, src->name_len);
-	dest->name_len = src->name_len;
-}
-
-static inline int ceph_osd_exists(struct ceph_osdmap *map, int osd)
+static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd)
 {
 	return osd >= 0 && osd < map->max_osd &&
 	       (map->osd_state[osd] & CEPH_OSD_EXISTS);
 }
 
-static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd)
+static inline bool ceph_osd_is_up(struct ceph_osdmap *map, int osd)
 {
 	return ceph_osd_exists(map, osd) &&
 	       (map->osd_state[osd] & CEPH_OSD_UP);
 }
 
-static inline int ceph_osd_is_down(struct ceph_osdmap *map, int osd)
+static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd)
 {
 	return !ceph_osd_is_up(map, osd);
 }
 
-static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag)
-{
-	return map && (map->flags & flag);
-}
-
 extern char *ceph_osdmap_state_str(char *str, int len, int state);
 extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
 
@@ -192,28 +222,59 @@
 	return 0;
 }
 
+struct ceph_osdmap *ceph_osdmap_alloc(void);
 extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end);
-extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
-					    struct ceph_osdmap *map,
-					    struct ceph_messenger *msgr);
+struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+					     struct ceph_osdmap *map);
 extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
 
+struct ceph_osds {
+	int osds[CEPH_PG_MAX_SIZE];
+	int size;
+	int primary; /* id, NOT index */
+};
+
+static inline void ceph_osds_init(struct ceph_osds *set)
+{
+	set->size = 0;
+	set->primary = -1;
+}
+
+void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src);
+
+bool ceph_is_new_interval(const struct ceph_osds *old_acting,
+			  const struct ceph_osds *new_acting,
+			  const struct ceph_osds *old_up,
+			  const struct ceph_osds *new_up,
+			  int old_size,
+			  int new_size,
+			  int old_min_size,
+			  int new_min_size,
+			  u32 old_pg_num,
+			  u32 new_pg_num,
+			  bool old_sort_bitwise,
+			  bool new_sort_bitwise,
+			  const struct ceph_pg *pgid);
+bool ceph_osds_changed(const struct ceph_osds *old_acting,
+		       const struct ceph_osds *new_acting,
+		       bool any_change);
+
 /* calculate mapping of a file extent to an object */
 extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
 					 u64 off, u64 len,
 					 u64 *bno, u64 *oxoff, u64 *oxlen);
 
-/* calculate mapping of object to a placement group */
-extern int ceph_oloc_oid_to_pg(struct ceph_osdmap *osdmap,
-			       struct ceph_object_locator *oloc,
-			       struct ceph_object_id *oid,
-			       struct ceph_pg *pg_out);
+int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
+			      struct ceph_object_id *oid,
+			      struct ceph_object_locator *oloc,
+			      struct ceph_pg *raw_pgid);
 
-extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap,
-			       struct ceph_pg pgid,
-			       int *osds, int *primary);
-extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
-				struct ceph_pg pgid);
+void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
+			       const struct ceph_pg *raw_pgid,
+			       struct ceph_osds *up,
+			       struct ceph_osds *acting);
+int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
+			      const struct ceph_pg *raw_pgid);
 
 extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
 						    u64 id);
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 2f822dc..5c0da61 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -114,8 +114,8 @@
  * compound epoch+version, used by storage layer to serialize mutations
  */
 struct ceph_eversion {
-	__le32 epoch;
 	__le64 version;
+	__le32 epoch;
 } __attribute__ ((packed));
 
 /*
@@ -153,6 +153,11 @@
 #define CEPH_OSDMAP_NOIN     (1<<8)  /* block osd auto mark-in */
 #define CEPH_OSDMAP_NOBACKFILL (1<<9) /* block osd backfill */
 #define CEPH_OSDMAP_NORECOVER (1<<10) /* block osd recovery and backfill */
+#define CEPH_OSDMAP_NOSCRUB  (1<<11) /* block periodic scrub */
+#define CEPH_OSDMAP_NODEEP_SCRUB (1<<12) /* block periodic deep-scrub */
+#define CEPH_OSDMAP_NOTIERAGENT (1<<13) /* disable tiering agent */
+#define CEPH_OSDMAP_NOREBALANCE (1<<14) /* block osd backfill unless pg is degraded */
+#define CEPH_OSDMAP_SORTBITWISE (1<<15) /* use bitwise hobject_t sort */
 
 /*
  * The error code to return when an OSD can't handle a write
@@ -389,6 +394,13 @@
 	CEPH_OSD_FLAG_SKIPRWLOCKS =   0x10000,  /* skip rw locks */
 	CEPH_OSD_FLAG_IGNORE_OVERLAY = 0x20000, /* ignore pool overlay */
 	CEPH_OSD_FLAG_FLUSH =         0x40000,  /* this is part of flush */
+	CEPH_OSD_FLAG_MAP_SNAP_CLONE = 0x80000,  /* map snap direct to clone id */
+	CEPH_OSD_FLAG_ENFORCE_SNAPC   = 0x100000,  /* use snapc provided even if
+						      pool uses pool snaps */
+	CEPH_OSD_FLAG_REDIRECTED   = 0x200000,  /* op has been redirected */
+	CEPH_OSD_FLAG_KNOWN_REDIR = 0x400000,  /* redirect bit is authoritative */
+	CEPH_OSD_FLAG_FULL_TRY =    0x800000,  /* try op despite full flag */
+	CEPH_OSD_FLAG_FULL_FORCE = 0x1000000,  /* force op despite full flag */
 };
 
 enum {
@@ -415,7 +427,17 @@
 	CEPH_OSD_CMPXATTR_MODE_U64    = 2
 };
 
-#define RADOS_NOTIFY_VER	1
+enum {
+	CEPH_OSD_WATCH_OP_UNWATCH = 0,
+	CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1,
+	/* note: use only ODD ids to prevent pre-giant code from
+	   interpreting the op as UNWATCH */
+	CEPH_OSD_WATCH_OP_WATCH = 3,
+	CEPH_OSD_WATCH_OP_RECONNECT = 5,
+	CEPH_OSD_WATCH_OP_PING = 7,
+};
+
+const char *ceph_osd_watch_op_name(int o);
 
 /*
  * an individual object operation.  each may be accompanied by some data
@@ -450,10 +472,14 @@
 	        } __attribute__ ((packed)) snap;
 		struct {
 			__le64 cookie;
-			__le64 ver;
-			__u8 flag;	/* 0 = unwatch, 1 = watch */
+			__le64 ver;     /* no longer used */
+			__u8 op;	/* CEPH_OSD_WATCH_OP_* */
+			__le32 gen;     /* registration generation */
 		} __attribute__ ((packed)) watch;
 		struct {
+			__le64 cookie;
+		} __attribute__ ((packed)) notify;
+		struct {
 			__le64 offset, length;
 			__le64 src_offset;
 		} __attribute__ ((packed)) clonerange;
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 0c72204..fb39d5a 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -25,7 +25,7 @@
 #define CLK_SET_PARENT_GATE	BIT(1) /* must be gated across re-parent */
 #define CLK_SET_RATE_PARENT	BIT(2) /* propagate rate change up one level */
 #define CLK_IGNORE_UNUSED	BIT(3) /* do not gate even if unused */
-#define CLK_IS_ROOT		BIT(4) /* Deprecated: Don't use */
+				/* unused */
 #define CLK_IS_BASIC		BIT(5) /* Basic clk, can't do a to_clk_foo() */
 #define CLK_GET_RATE_NOCACHE	BIT(6) /* do not use the cached clk rate */
 #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
diff --git a/include/linux/console.h b/include/linux/console.h
index 137ac1a..98c8615 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -191,6 +191,8 @@
 
 #ifdef CONFIG_VGA_CONSOLE
 extern bool vgacon_text_force(void);
+#else
+static inline bool vgacon_text_force(void) { return false; }
 #endif
 
 #endif /* _LINUX_CONSOLE_H */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 786ad32..07b83d3 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -152,6 +152,8 @@
 extern int cpuidle_play_dead(void);
 
 extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
+static inline struct cpuidle_device *cpuidle_get_device(void)
+{return __this_cpu_read(cpuidle_devices); }
 #else
 static inline void disable_cpuidle(void) { }
 static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
@@ -187,6 +189,7 @@
 static inline int cpuidle_play_dead(void) {return -ENODEV; }
 static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
 	struct cpuidle_device *dev) {return NULL; }
+static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
 #endif
 
 #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 982a6c4..43d5f0b 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -3,45 +3,62 @@
 
 #include <linux/fs.h>
 #include <linux/mm.h>
+#include <linux/radix-tree.h>
 #include <asm/pgtable.h>
 
+/* We use lowest available exceptional entry bit for locking */
+#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
+
 ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
 		  get_block_t, dio_iodone_t, int flags);
-int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size);
 int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
 int dax_truncate_page(struct inode *, loff_t from, get_block_t);
-int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
-		dax_iodone_t);
-int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
-		dax_iodone_t);
+int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
+int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
+int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
+void dax_wake_mapping_entry_waiter(struct address_space *mapping,
+				   pgoff_t index, bool wake_all);
 
 #ifdef CONFIG_FS_DAX
 struct page *read_dax_sector(struct block_device *bdev, sector_t n);
+void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index);
+int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
+		unsigned int offset, unsigned int length);
 #else
 static inline struct page *read_dax_sector(struct block_device *bdev,
 		sector_t n)
 {
 	return ERR_PTR(-ENXIO);
 }
+/* Shouldn't ever be called when dax is disabled. */
+static inline void dax_unlock_mapping_entry(struct address_space *mapping,
+					    pgoff_t index)
+{
+	BUG();
+}
+static inline int __dax_zero_page_range(struct block_device *bdev,
+		sector_t sector, unsigned int offset, unsigned int length)
+{
+	return -ENXIO;
+}
 #endif
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
 int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
-				unsigned int flags, get_block_t, dax_iodone_t);
+				unsigned int flags, get_block_t);
 int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
-				unsigned int flags, get_block_t, dax_iodone_t);
+				unsigned int flags, get_block_t);
 #else
 static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
-				pmd_t *pmd, unsigned int flags, get_block_t gb,
-				dax_iodone_t di)
+				pmd_t *pmd, unsigned int flags, get_block_t gb)
 {
 	return VM_FAULT_FALLBACK;
 }
 #define __dax_pmd_fault dax_pmd_fault
 #endif
 int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
-#define dax_mkwrite(vma, vmf, gb, iod)		dax_fault(vma, vmf, gb, iod)
-#define __dax_mkwrite(vma, vmf, gb, iod)	__dax_fault(vma, vmf, gb, iod)
+#define dax_mkwrite(vma, vmf, gb)	dax_fault(vma, vmf, gb)
+#define __dax_mkwrite(vma, vmf, gb)	__dax_fault(vma, vmf, gb)
 
 static inline bool vma_is_dax(struct vm_area_struct *vma)
 {
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index f8506e8..484c879 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -10,6 +10,7 @@
 #include <linux/cache.h>
 #include <linux/rcupdate.h>
 #include <linux/lockref.h>
+#include <linux/stringhash.h>
 
 struct path;
 struct vfsmount;
@@ -52,9 +53,6 @@
 };
 
 #define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
-#define hashlen_hash(hashlen) ((u32) (hashlen))
-#define hashlen_len(hashlen)  ((u32)((hashlen) >> 32))
-#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash))
 
 struct dentry_stat_t {
 	long nr_dentry;
@@ -65,29 +63,6 @@
 };
 extern struct dentry_stat_t dentry_stat;
 
-/* Name hashing routines. Initial hash value */
-/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
-#define init_name_hash()		0
-
-/* partial hash update function. Assume roughly 4 bits per character */
-static inline unsigned long
-partial_name_hash(unsigned long c, unsigned long prevhash)
-{
-	return (prevhash + (c << 4) + (c >> 4)) * 11;
-}
-
-/*
- * Finally: cut down the number of bits to a int value (and try to avoid
- * losing bits)
- */
-static inline unsigned long end_name_hash(unsigned long hash)
-{
-	return (unsigned int) hash;
-}
-
-/* Compute the hash for a name string. */
-extern unsigned int full_name_hash(const unsigned char *, unsigned int);
-
 /*
  * Try to keep struct dentry aligned on 64 byte cachelines (this will
  * give reasonable cacheline footprint with larger lines without the
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 5871f29..277ab9a 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -15,13 +15,12 @@
 
 #include <linux/errno.h>
 
-struct pts_fs_info;
-
 #ifdef CONFIG_UNIX98_PTYS
 
-/* Look up a pts fs info and get a ref to it */
-struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
-void devpts_put_ref(struct pts_fs_info *);
+struct pts_fs_info;
+
+struct pts_fs_info *devpts_acquire(struct file *);
+void devpts_release(struct pts_fs_info *);
 
 int devpts_new_index(struct pts_fs_info *);
 void devpts_kill_index(struct pts_fs_info *, int);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 3fe90d4..4551c6f 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -112,19 +112,24 @@
  * @file: file pointer used for sharing buffers across, and for refcounting.
  * @attachments: list of dma_buf_attachment that denotes all devices attached.
  * @ops: dma_buf_ops associated with this buffer object.
+ * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap
+ * @vmapping_counter: used internally to refcnt the vmaps
+ * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
  * @exp_name: name of the exporter; useful for debugging.
  * @owner: pointer to exporter module; used for refcounting when exporter is a
  *         kernel module.
  * @list_node: node for dma_buf accounting and debugging.
  * @priv: exporter specific private data for this buffer object.
  * @resv: reservation object linked to this dma-buf
+ * @poll: for userspace poll support
+ * @cb_excl: for userspace poll support
+ * @cb_shared: for userspace poll support
  */
 struct dma_buf {
 	size_t size;
 	struct file *file;
 	struct list_head attachments;
 	const struct dma_buf_ops *ops;
-	/* mutex to serialize list manipulation, attach/detach and vmap/unmap */
 	struct mutex lock;
 	unsigned vmapping_counter;
 	void *vmap_ptr;
@@ -188,9 +193,11 @@
 
 /**
  * helper macro for exporters; zeros and fills in most common values
+ *
+ * @name: export-info name
  */
-#define DEFINE_DMA_BUF_EXPORT_INFO(a)	\
-	struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \
+#define DEFINE_DMA_BUF_EXPORT_INFO(name)	\
+	struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
 					 .owner = THIS_MODULE }
 
 /**
diff --git a/include/linux/err.h b/include/linux/err.h
index 56762ab..1e35588 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -18,7 +18,7 @@
 
 #ifndef __ASSEMBLY__
 
-#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
+#define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO)
 
 static inline void * __must_check ERR_PTR(long error)
 {
diff --git a/include/linux/errno.h b/include/linux/errno.h
index 89627b9..7ce9fb1 100644
--- a/include/linux/errno.h
+++ b/include/linux/errno.h
@@ -28,5 +28,6 @@
 #define EBADTYPE	527	/* Type not supported by server */
 #define EJUKEBOX	528	/* Request initiated, but will not complete before timeout */
 #define EIOCBQUEUED	529	/* iocb queued, will get completion event */
+#define ERECALLCONFLICT	530	/* conflict with recalled state */
 
 #endif
diff --git a/include/linux/export.h b/include/linux/export.h
index 96e45ea..2f9ccbe 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -38,7 +38,7 @@
 
 #ifdef CONFIG_MODULES
 
-#ifndef __GENKSYMS__
+#if defined(__KERNEL__) && !defined(__GENKSYMS__)
 #ifdef CONFIG_MODVERSIONS
 /* Mark the CRC weak since genksyms apparently decides not to
  * generate a checksums for some symbols */
@@ -53,7 +53,7 @@
 #endif
 
 /* For every exported symbol, place a struct in the __ksymtab section */
-#define __EXPORT_SYMBOL(sym, sec)				\
+#define ___EXPORT_SYMBOL(sym, sec)				\
 	extern typeof(sym) sym;					\
 	__CRC_SYMBOL(sym, sec)					\
 	static const char __kstrtab_##sym[]			\
@@ -65,6 +65,35 @@
 	__attribute__((section("___ksymtab" sec "+" #sym), unused))	\
 	= { (unsigned long)&sym, __kstrtab_##sym }
 
+#if defined(__KSYM_DEPS__)
+
+/*
+ * For fine grained build dependencies, we want to tell the build system
+ * about each possible exported symbol even if they're not actually exported.
+ * We use a string pattern that is unlikely to be valid code that the build
+ * system filters out from the preprocessor output (see ksym_dep_filter
+ * in scripts/Kbuild.include).
+ */
+#define __EXPORT_SYMBOL(sym, sec)	=== __KSYM_##sym ===
+
+#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+
+#include <linux/kconfig.h>
+#include <generated/autoksyms.h>
+
+#define __EXPORT_SYMBOL(sym, sec)				\
+	__cond_export_sym(sym, sec, config_enabled(__KSYM_##sym))
+#define __cond_export_sym(sym, sec, conf)			\
+	___cond_export_sym(sym, sec, conf)
+#define ___cond_export_sym(sym, sec, enabled)			\
+	__cond_export_sym_##enabled(sym, sec)
+#define __cond_export_sym_1(sym, sec) ___EXPORT_SYMBOL(sym, sec)
+#define __cond_export_sym_0(sym, sec) /* nothing */
+
+#else
+#define __EXPORT_SYMBOL ___EXPORT_SYMBOL
+#endif
+
 #define EXPORT_SYMBOL(sym)					\
 	__EXPORT_SYMBOL(sym, "")
 
diff --git a/include/linux/fb.h b/include/linux/fb.h
index dfe8835..a964d07 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -673,6 +673,7 @@
 }
 
 /* drivers/video/fb_defio.c */
+int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
 extern void fb_deferred_io_init(struct fb_info *info);
 extern void fb_deferred_io_open(struct fb_info *info,
 				struct inode *inode,
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 2b17698..2056e9f 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -49,6 +49,8 @@
  * @timestamp: Timestamp when the fence was signaled.
  * @status: Optional, only valid if < 0, must be set before calling
  * fence_signal, indicates that the fence has completed with an error.
+ * @child_list: list of children fences
+ * @active_list: list of active fences
  *
  * the flags member must be manipulated and read using the appropriate
  * atomic ops (bit_*), so taking the spinlock will not be needed most
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b8b3c72..dd28814 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -74,7 +74,6 @@
 			struct buffer_head *bh_result, int create);
 typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 			ssize_t bytes, void *private);
-typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate);
 
 #define MAY_EXEC		0x00000001
 #define MAY_WRITE		0x00000002
@@ -1730,7 +1729,8 @@
 			struct inode *, struct dentry *, unsigned int);
 	int (*setattr) (struct dentry *, struct iattr *);
 	int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
-	int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
+	int (*setxattr) (struct dentry *, struct inode *,
+			 const char *, const void *, size_t, int);
 	ssize_t (*getxattr) (struct dentry *, struct inode *,
 			     const char *, void *, size_t);
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
@@ -2352,14 +2352,6 @@
 extern void emergency_thaw_all(void);
 extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
 extern int fsync_bdev(struct block_device *);
-#ifdef CONFIG_FS_DAX
-extern bool blkdev_dax_capable(struct block_device *bdev);
-#else
-static inline bool blkdev_dax_capable(struct block_device *bdev)
-{
-	return false;
-}
-#endif
 
 extern struct super_block *blockdev_superblock;
 
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 604e152..13ba552 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -241,7 +241,7 @@
 
 	/* check the consistency between the backing cache and the FS-Cache
 	 * cookie */
-	bool (*check_consistency)(struct fscache_operation *op);
+	int (*check_consistency)(struct fscache_operation *op);
 
 	/* store the updated auxiliary data on an object */
 	void (*update_object)(struct fscache_object *object);
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index 0023088..3f9778c 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -39,6 +39,10 @@
 #define FSL_IFC_VERSION_MASK	0x0F0F0000
 #define FSL_IFC_VERSION_1_0_0	0x01000000
 #define FSL_IFC_VERSION_1_1_0	0x01010000
+#define FSL_IFC_VERSION_2_0_0	0x02000000
+
+#define PGOFFSET_64K	(64*1024)
+#define PGOFFSET_4K	(4*1024)
 
 /*
  * CSPR - Chip Select Property Register
@@ -723,20 +727,26 @@
 	__be32 nand_evter_en;
 	u32 res17[0x2];
 	__be32 nand_evter_intr_en;
-	u32 res18[0x2];
+	__be32 nand_vol_addr_stat;
+	u32 res18;
 	__be32 nand_erattr0;
 	__be32 nand_erattr1;
 	u32 res19[0x10];
 	__be32 nand_fsr;
-	u32 res20;
-	__be32 nand_eccstat[4];
-	u32 res21[0x20];
+	u32 res20[0x3];
+	__be32 nand_eccstat[6];
+	u32 res21[0x1c];
 	__be32 nanndcr;
 	u32 res22[0x2];
 	__be32 nand_autoboot_trgr;
 	u32 res23;
 	__be32 nand_mdr;
-	u32 res24[0x5C];
+	u32 res24[0x1C];
+	__be32 nand_dll_lowcfg0;
+	__be32 nand_dll_lowcfg1;
+	u32 res25;
+	__be32 nand_dll_lowstat;
+	u32 res26[0x3c];
 };
 
 /*
@@ -771,13 +781,12 @@
 	__be32 gpcm_erattr1;
 	__be32 gpcm_erattr2;
 	__be32 gpcm_stat;
-	u32 res4[0x1F3];
 };
 
 /*
  * IFC Controller Registers
  */
-struct fsl_ifc_regs {
+struct fsl_ifc_global {
 	__be32 ifc_rev;
 	u32 res1[0x2];
 	struct {
@@ -803,21 +812,26 @@
 	} ftim_cs[FSL_IFC_BANK_COUNT];
 	u32 res9[0x30];
 	__be32 rb_stat;
-	u32 res10[0x2];
+	__be32 rb_map;
+	__be32 wb_map;
 	__be32 ifc_gcr;
-	u32 res11[0x2];
+	u32 res10[0x2];
 	__be32 cm_evter_stat;
-	u32 res12[0x2];
+	u32 res11[0x2];
 	__be32 cm_evter_en;
-	u32 res13[0x2];
+	u32 res12[0x2];
 	__be32 cm_evter_intr_en;
-	u32 res14[0x2];
+	u32 res13[0x2];
 	__be32 cm_erattr0;
 	__be32 cm_erattr1;
-	u32 res15[0x2];
+	u32 res14[0x2];
 	__be32 ifc_ccr;
 	__be32 ifc_csr;
-	u32 res16[0x2EB];
+	__be32 ddr_ccr_low;
+};
+
+
+struct fsl_ifc_runtime {
 	struct fsl_ifc_nand ifc_nand;
 	struct fsl_ifc_nor ifc_nor;
 	struct fsl_ifc_gpcm ifc_gpcm;
@@ -831,7 +845,8 @@
 struct fsl_ifc_ctrl {
 	/* device info */
 	struct device			*dev;
-	struct fsl_ifc_regs __iomem	*regs;
+	struct fsl_ifc_global __iomem	*gregs;
+	struct fsl_ifc_runtime __iomem	*rregs;
 	int				irq;
 	int				nand_irq;
 	spinlock_t			lock;
diff --git a/include/linux/hash.h b/include/linux/hash.h
index 79c52fa..ad6fa21 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -3,92 +3,94 @@
 /* Fast hashing routine for ints,  longs and pointers.
    (C) 2002 Nadia Yvette Chambers, IBM */
 
-/*
- * Knuth recommends primes in approximately golden ratio to the maximum
- * integer representable by a machine word for multiplicative hashing.
- * Chuck Lever verified the effectiveness of this technique:
- * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
- *
- * These primes are chosen to be bit-sparse, that is operations on
- * them can use shifts and additions instead of multiplications for
- * machines where multiplications are slow.
- */
-
 #include <asm/types.h>
 #include <linux/compiler.h>
 
-/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
-#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
-/*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
-
+/*
+ * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and
+ * fs/inode.c.  It's not actually prime any more (the previous primes
+ * were actively bad for hashing), but the name remains.
+ */
 #if BITS_PER_LONG == 32
-#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
 #define hash_long(val, bits) hash_32(val, bits)
 #elif BITS_PER_LONG == 64
 #define hash_long(val, bits) hash_64(val, bits)
-#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
 #else
 #error Wordsize not 32 or 64
 #endif
 
 /*
- * The above primes are actively bad for hashing, since they are
- * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
- * real problems. Besides, the "prime" part is pointless for the
- * multiplicative hash.
+ * This hash multiplies the input by a large odd number and takes the
+ * high bits.  Since multiplication propagates changes to the most
+ * significant end only, it is essential that the high bits of the
+ * product be used for the hash value.
+ *
+ * Chuck Lever verified the effectiveness of this technique:
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
  *
  * Although a random odd number will do, it turns out that the golden
  * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
- * properties.
+ * properties.  (See Knuth vol 3, section 6.4, exercise 9.)
  *
- * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
- * (See Knuth vol 3, section 6.4, exercise 9.)
+ * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2,
+ * which is very slightly easier to multiply by and makes no
+ * difference to the hash distribution.
  */
 #define GOLDEN_RATIO_32 0x61C88647
 #define GOLDEN_RATIO_64 0x61C8864680B583EBull
 
-static __always_inline u64 hash_64(u64 val, unsigned int bits)
-{
-	u64 hash = val;
-
-#if BITS_PER_LONG == 64
-	hash = hash * GOLDEN_RATIO_64;
-#else
-	/*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
-	u64 n = hash;
-	n <<= 18;
-	hash -= n;
-	n <<= 33;
-	hash -= n;
-	n <<= 3;
-	hash += n;
-	n <<= 3;
-	hash -= n;
-	n <<= 4;
-	hash += n;
-	n <<= 2;
-	hash += n;
+#ifdef CONFIG_HAVE_ARCH_HASH
+/* This header may use the GOLDEN_RATIO_xx constants */
+#include <asm/hash.h>
 #endif
 
-	/* High bits are more random, so use them. */
-	return hash >> (64 - bits);
-}
-
-static inline u32 hash_32(u32 val, unsigned int bits)
+/*
+ * The _generic versions exist only so lib/test_hash.c can compare
+ * the arch-optimized versions with the generic.
+ *
+ * Note that if you change these, any <asm/hash.h> that aren't updated
+ * to match need to have their HAVE_ARCH_* define values updated so the
+ * self-test will not false-positive.
+ */
+#ifndef HAVE_ARCH__HASH_32
+#define __hash_32 __hash_32_generic
+#endif
+static inline u32 __hash_32_generic(u32 val)
 {
-	/* On some cpus multiply is faster, on others gcc will do shifts */
-	u32 hash = val * GOLDEN_RATIO_PRIME_32;
-
-	/* High bits are more random, so use them. */
-	return hash >> (32 - bits);
+	return val * GOLDEN_RATIO_32;
 }
 
-static inline unsigned long hash_ptr(const void *ptr, unsigned int bits)
+#ifndef HAVE_ARCH_HASH_32
+#define hash_32 hash_32_generic
+#endif
+static inline u32 hash_32_generic(u32 val, unsigned int bits)
+{
+	/* High bits are more random, so use them. */
+	return __hash_32(val) >> (32 - bits);
+}
+
+#ifndef HAVE_ARCH_HASH_64
+#define hash_64 hash_64_generic
+#endif
+static __always_inline u32 hash_64_generic(u64 val, unsigned int bits)
+{
+#if BITS_PER_LONG == 64
+	/* 64x64-bit multiply is efficient on all 64-bit processors */
+	return val * GOLDEN_RATIO_64 >> (64 - bits);
+#else
+	/* Hash 64 bits using only 32x32-bit multiply. */
+	return hash_32((u32)val ^ __hash_32(val >> 32), bits);
+#endif
+}
+
+static inline u32 hash_ptr(const void *ptr, unsigned int bits)
 {
 	return hash_long((unsigned long)ptr, bits);
 }
 
+/* This really should be called fold32_ptr; it does no hashing to speak of. */
 static inline u32 hash32_ptr(const void *ptr)
 {
 	unsigned long val = (unsigned long)ptr;
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 7c27fa1..feb04ea 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -52,6 +52,12 @@
 
 int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
 
+void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
+
+int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
+			     struct inet_diag_msg *r, int ext,
+			     struct user_namespace *user_ns);
+
 extern int  inet_diag_register(const struct inet_diag_handler *handler);
 extern void inet_diag_unregister(const struct inet_diag_handler *handler);
 #endif /* _INET_DIAG_H_ */
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 92f7177..f27bb2c 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -19,8 +19,21 @@
 /* iova structure */
 struct iova {
 	struct rb_node	node;
-	unsigned long	pfn_hi; /* IOMMU dish out addr hi */
-	unsigned long	pfn_lo; /* IOMMU dish out addr lo */
+	unsigned long	pfn_hi; /* Highest allocated pfn */
+	unsigned long	pfn_lo; /* Lowest allocated pfn */
+};
+
+struct iova_magazine;
+struct iova_cpu_rcache;
+
+#define IOVA_RANGE_CACHE_MAX_SIZE 6	/* log of max cached IOVA range size (in pages) */
+#define MAX_GLOBAL_MAGS 32	/* magazines per bin */
+
+struct iova_rcache {
+	spinlock_t lock;
+	unsigned long depot_size;
+	struct iova_magazine *depot[MAX_GLOBAL_MAGS];
+	struct iova_cpu_rcache __percpu *cpu_rcaches;
 };
 
 /* holds all the iova translations for a domain */
@@ -31,6 +44,7 @@
 	unsigned long	granule;	/* pfn granularity for this domain */
 	unsigned long	start_pfn;	/* Lower limit for this domain */
 	unsigned long	dma_32bit_pfn;
+	struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE];	/* IOVA range caches */
 };
 
 static inline unsigned long iova_size(struct iova *iova)
@@ -78,6 +92,10 @@
 struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
 	unsigned long limit_pfn,
 	bool size_aligned);
+void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
+		    unsigned long size);
+unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
+			      unsigned long limit_pfn);
 struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
 	unsigned long pfn_hi);
 void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
@@ -87,5 +105,6 @@
 void put_iova_domain(struct iova_domain *iovad);
 struct iova *split_and_remove_iova(struct iova_domain *iovad,
 	struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
+void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
 
 #endif
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 9e6fdd3..dc493e0 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -273,6 +273,12 @@
 #define ICH_LR_ACTIVE_BIT		(1ULL << 63)
 #define ICH_LR_PHYS_ID_SHIFT		32
 #define ICH_LR_PHYS_ID_MASK		(0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
+#define ICH_LR_PRIORITY_SHIFT		48
+
+/* These are for GICv2 emulation only */
+#define GICH_LR_VIRTUALID		(0x3ffUL << 0)
+#define GICH_LR_PHYSID_CPUID_SHIFT	(10)
+#define GICH_LR_PHYSID_CPUID		(7UL << GICH_LR_PHYSID_CPUID_SHIFT)
 
 #define ICH_MISR_EOI			(1 << 0)
 #define ICH_MISR_U			(1 << 1)
@@ -299,12 +305,12 @@
 #define ICC_SGI1R_AFFINITY_1_SHIFT	16
 #define ICC_SGI1R_AFFINITY_1_MASK	(0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
 #define ICC_SGI1R_SGI_ID_SHIFT		24
-#define ICC_SGI1R_SGI_ID_MASK		(0xff << ICC_SGI1R_SGI_ID_SHIFT)
+#define ICC_SGI1R_SGI_ID_MASK		(0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
 #define ICC_SGI1R_AFFINITY_2_SHIFT	32
-#define ICC_SGI1R_AFFINITY_2_MASK	(0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_AFFINITY_2_MASK	(0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT)
 #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT	40
 #define ICC_SGI1R_AFFINITY_3_SHIFT	48
-#define ICC_SGI1R_AFFINITY_3_MASK	(0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_AFFINITY_3_MASK	(0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
 
 #include <asm/arch_gicv3.h>
 
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 9c94026..fd05185 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -33,6 +33,7 @@
 
 #define GIC_DIST_CTRL			0x000
 #define GIC_DIST_CTR			0x004
+#define GIC_DIST_IIDR			0x008
 #define GIC_DIST_IGROUP			0x080
 #define GIC_DIST_ENABLE_SET		0x100
 #define GIC_DIST_ENABLE_CLEAR		0x180
@@ -76,6 +77,7 @@
 #define GICH_LR_VIRTUALID		(0x3ff << 0)
 #define GICH_LR_PHYSID_CPUID_SHIFT	(10)
 #define GICH_LR_PHYSID_CPUID		(0x3ff << GICH_LR_PHYSID_CPUID_SHIFT)
+#define GICH_LR_PRIORITY_SHIFT		23
 #define GICH_LR_STATE			(3 << 28)
 #define GICH_LR_PENDING_BIT		(1 << 28)
 #define GICH_LR_ACTIVE_BIT		(1 << 29)
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index fd1083c..efb232c 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -403,11 +403,19 @@
 
 /* Flags in jbd_inode->i_flags */
 #define __JI_COMMIT_RUNNING 0
-/* Commit of the inode data in progress. We use this flag to protect us from
+#define __JI_WRITE_DATA 1
+#define __JI_WAIT_DATA 2
+
+/*
+ * Commit of the inode data in progress. We use this flag to protect us from
  * concurrent deletion of inode. We cannot use reference to inode for this
  * since we cannot afford doing last iput() on behalf of kjournald
  */
 #define JI_COMMIT_RUNNING (1 << __JI_COMMIT_RUNNING)
+/* Write allocated dirty buffers in this inode before commit */
+#define JI_WRITE_DATA (1 << __JI_WRITE_DATA)
+/* Wait for outstanding data writes for this inode before commit */
+#define JI_WAIT_DATA (1 << __JI_WAIT_DATA)
 
 /**
  * struct jbd_inode is the structure linking inodes in ordered mode
@@ -781,9 +789,6 @@
  * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
  *	number that will fit in j_blocksize
  * @j_last_sync_writer: most recent pid which did a synchronous write
- * @j_history: Buffer storing the transactions statistics history
- * @j_history_max: Maximum number of transactions in the statistics history
- * @j_history_cur: Current number of transactions in the statistics history
  * @j_history_lock: Protect the transactions statistics history
  * @j_proc_entry: procfs entry for the jbd statistics directory
  * @j_stats: Overall statistics
@@ -1270,7 +1275,8 @@
 extern int	   jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
 extern int	   jbd2_journal_force_commit(journal_t *);
 extern int	   jbd2_journal_force_commit_nested(journal_t *);
-extern int	   jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode);
+extern int	   jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode);
+extern int	   jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode);
 extern int	   jbd2_journal_begin_ordered_truncate(journal_t *journal,
 				struct jbd2_inode *inode, loff_t new_size);
 extern void	   jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 2cc643c..e8acb2b 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -230,8 +230,6 @@
 int kexec_should_crash(struct task_struct *);
 void crash_save_cpu(struct pt_regs *regs, int cpu);
 void crash_save_vmcoreinfo(void);
-void crash_map_reserved_pages(void);
-void crash_unmap_reserved_pages(void);
 void arch_crash_save_vmcoreinfo(void);
 __printf(1, 2)
 void vmcoreinfo_append_str(const char *fmt, ...);
@@ -317,6 +315,8 @@
 					Elf_Shdr *sechdrs, unsigned int relsec);
 int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 					unsigned int relsec);
+void arch_kexec_protect_crashkres(void);
+void arch_kexec_unprotect_crashkres(void);
 
 #else /* !CONFIG_KEXEC_CORE */
 struct pt_regs;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index b1fa8f1..1c9c973 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -412,6 +412,8 @@
 #endif
 	long tlbs_dirty;
 	struct list_head devices;
+	struct dentry *debugfs_dentry;
+	struct kvm_stat_data **debugfs_stat_data;
 };
 
 #define kvm_err(fmt, ...) \
@@ -991,6 +993,11 @@
 	KVM_STAT_VCPU,
 };
 
+struct kvm_stat_data {
+	int offset;
+	struct kvm *kvm;
+};
+
 struct kvm_stats_debugfs_item {
 	const char *name;
 	int offset;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 2c4ebef..d15c19e 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -180,6 +180,8 @@
 	ATA_DFLAG_DA		= (1 << 26), /* device supports Device Attention */
 	ATA_DFLAG_DEVSLP	= (1 << 27), /* device supports Device Sleep */
 	ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */
+	ATA_DFLAG_D_SENSE	= (1 << 29), /* Descriptor sense requested */
+	ATA_DFLAG_ZAC		= (1 << 30), /* ZAC device */
 
 	ATA_DEV_UNKNOWN		= 0,	/* unknown device */
 	ATA_DEV_ATA		= 1,	/* ATA device */
@@ -191,7 +193,8 @@
 	ATA_DEV_SEMB		= 7,	/* SEMB */
 	ATA_DEV_SEMB_UNSUP	= 8,	/* SEMB (unsupported) */
 	ATA_DEV_ZAC		= 9,	/* ZAC device */
-	ATA_DEV_NONE		= 10,	/* no device */
+	ATA_DEV_ZAC_UNSUP	= 10,	/* ZAC device (unsupported) */
+	ATA_DEV_NONE		= 11,	/* no device */
 
 	/* struct ata_link flags */
 	ATA_LFLAG_NO_HRST	= (1 << 1), /* avoid hardreset */
@@ -727,6 +730,13 @@
 
 	/* NCQ send and receive log subcommand support */
 	u8			ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_SIZE];
+	u8			ncq_non_data_cmds[ATA_LOG_NCQ_NON_DATA_SIZE];
+
+	/* ZAC zone configuration */
+	u32			zac_zoned_cap;
+	u32			zac_zones_optimal_open;
+	u32			zac_zones_optimal_nonseq;
+	u32			zac_zones_max_open;
 
 	/* error history */
 	int			spdn_cnt;
@@ -1523,7 +1533,8 @@
 static inline unsigned int ata_class_disabled(unsigned int class)
 {
 	return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP ||
-		class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP;
+		class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP ||
+		class == ATA_DEV_ZAC_UNSUP;
 }
 
 static inline unsigned int ata_class_absent(unsigned int class)
@@ -1641,6 +1652,26 @@
 		 ATA_LOG_NCQ_SEND_RECV_DSM_TRIM);
 }
 
+static inline bool ata_fpdma_read_log_supported(struct ata_device *dev)
+{
+	return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) &&
+		(dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET] &
+		 ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED);
+}
+
+static inline bool ata_fpdma_zac_mgmt_in_supported(struct ata_device *dev)
+{
+	return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) &&
+		(dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET] &
+		ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED);
+}
+
+static inline bool ata_fpdma_zac_mgmt_out_supported(struct ata_device *dev)
+{
+	return (dev->ncq_non_data_cmds[ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET] &
+		ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT);
+}
+
 static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
 {
 	qc->tf.ctl |= ATA_NIEN;
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 833867b..0c3c30c 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -27,7 +27,7 @@
 	/* need to set a limit somewhere, but yes, this is likely overkill */
 	ND_IOCTL_MAX_BUFLEN = SZ_4M,
 	ND_CMD_MAX_ELEM = 5,
-	ND_CMD_MAX_ENVELOPE = 16,
+	ND_CMD_MAX_ENVELOPE = 256,
 	ND_MAX_MAPPINGS = 32,
 
 	/* region flag indicating to direct-map persistent memory by default */
@@ -68,7 +68,7 @@
 
 struct nvdimm_bus_descriptor {
 	const struct attribute_group **attr_groups;
-	unsigned long dsm_mask;
+	unsigned long cmd_mask;
 	char *provider_name;
 	ndctl_fn ndctl;
 	int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
@@ -130,10 +130,11 @@
 struct nd_blk_region *to_nd_blk_region(struct device *dev);
 struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
 const char *nvdimm_name(struct nvdimm *nvdimm);
+unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm);
 void *nvdimm_provider_data(struct nvdimm *nvdimm);
 struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
 		const struct attribute_group **groups, unsigned long flags,
-		unsigned long *dsm_mask);
+		unsigned long cmd_mask);
 const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
 const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
 u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 20d8a5d..5145620 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -182,7 +182,7 @@
 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
 
 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
-extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
+extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
 #else
 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
 {
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index a677c2b..64184d2 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -50,9 +50,11 @@
 					EC_MSG_TX_TRAILER_BYTES,
 	EC_MSG_RX_PROTO_BYTES	= 3,
 
-	/* Max length of messages */
-	EC_MSG_BYTES		= EC_PROTO2_MAX_PARAM_SIZE +
+	/* Max length of messages for proto 2*/
+	EC_PROTO2_MSG_BYTES		= EC_PROTO2_MAX_PARAM_SIZE +
 					EC_MSG_TX_PROTO_BYTES,
+
+	EC_MAX_MSG_BYTES		= 64 * 1024,
 };
 
 /*
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index 8f9fc3d..8e95cd8 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -134,6 +134,7 @@
 #define TWL6040_HFDACENA		(1 << 0)
 #define TWL6040_HFPGAENA		(1 << 1)
 #define TWL6040_HFDRVENA		(1 << 4)
+#define TWL6040_HFSWENA			(1 << 6)
 
 /* VIBCTLL/R (0x18/0x1A) fields */
 
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 80dec87..d46a0e7 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -466,6 +466,7 @@
 enum {
 	MLX4_INTERFACE_STATE_UP		= 1 << 0,
 	MLX4_INTERFACE_STATE_DELETION	= 1 << 1,
+	MLX4_INTERFACE_STATE_SHUTDOWN	= 1 << 2,
 };
 
 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 035abdf..73a4847 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1240,8 +1240,6 @@
 	u8                      rsvd[8];
 };
 
-#define MLX5_CMD_OP_MAX 0x920
-
 enum {
 	VPORT_STATE_DOWN		= 0x0,
 	VPORT_STATE_UP			= 0x1,
@@ -1369,6 +1367,12 @@
 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
 	MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
 
+#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
+	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
+
+#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
+	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
+
 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
 	MLX5_GET(flow_table_eswitch_cap, \
 		 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 9a05cd7..e955a28 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -205,7 +205,8 @@
 	MLX5_CMD_OP_ALLOC_FLOW_COUNTER            = 0x939,
 	MLX5_CMD_OP_DEALLOC_FLOW_COUNTER          = 0x93a,
 	MLX5_CMD_OP_QUERY_FLOW_COUNTER            = 0x93b,
-	MLX5_CMD_OP_MODIFY_FLOW_TABLE             = 0x93c
+	MLX5_CMD_OP_MODIFY_FLOW_TABLE             = 0x93c,
+	MLX5_CMD_OP_MAX
 };
 
 struct mlx5_ifc_flow_table_fields_supported_bits {
@@ -500,7 +501,9 @@
 	u8         vport_svlan_insert[0x1];
 	u8         vport_cvlan_insert_if_not_exist[0x1];
 	u8         vport_cvlan_insert_overwrite[0x1];
-	u8         reserved_at_5[0x1b];
+	u8         reserved_at_5[0x19];
+	u8         nic_vport_node_guid_modify[0x1];
+	u8         nic_vport_port_guid_modify[0x1];
 
 	u8         reserved_at_20[0x7e0];
 };
@@ -4583,7 +4586,10 @@
 };
 
 struct mlx5_ifc_modify_nic_vport_field_select_bits {
-	u8         reserved_at_0[0x19];
+	u8         reserved_at_0[0x16];
+	u8         node_guid[0x1];
+	u8         port_guid[0x1];
+	u8         reserved_at_18[0x1];
 	u8         mtu[0x1];
 	u8         change_event[0x1];
 	u8         promisc[0x1];
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 6422102..266320f 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -460,10 +460,9 @@
 };
 
 struct mlx5_qp_path {
-	u8			fl;
+	u8			fl_free_ar;
 	u8			rsvd3;
-	u8			free_ar;
-	u8			pkey_index;
+	__be16			pkey_index;
 	u8			rsvd0;
 	u8			grh_mlid;
 	__be16			rlid;
@@ -560,6 +559,7 @@
 	__be32			optparam;
 	u8			rsvd0[4];
 	struct mlx5_qp_context	ctx;
+	u8			rsvd2[16];
 };
 
 struct mlx5_modify_qp_mbox_out {
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 301da4a..6c16c19 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -50,6 +50,8 @@
 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
 					   u64 *system_image_guid);
 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+				    u32 vport, u64 node_guid);
 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
 					u16 *qkey_viol_cntr);
 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b530c99..5df5feb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -303,6 +303,12 @@
 					 * is set (which is also implied by
 					 * VM_FAULT_ERROR).
 					 */
+	void *entry;			/* ->fault handler can alternatively
+					 * return locked DAX entry. In that
+					 * case handler should return
+					 * VM_FAULT_DAX_LOCKED and fill in
+					 * entry here.
+					 */
 	/* for ->map_pages() only */
 	pgoff_t max_pgoff;		/* map pages for offset from pgoff till
 					 * max_pgoff inclusive */
@@ -1076,6 +1082,7 @@
 #define VM_FAULT_LOCKED	0x0200	/* ->fault locked the returned page */
 #define VM_FAULT_RETRY	0x0400	/* ->fault blocked, must retry */
 #define VM_FAULT_FALLBACK 0x0800	/* huge page fault failed, fall back to small */
+#define VM_FAULT_DAX_LOCKED 0x1000	/* ->fault has locked DAX entry */
 
 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
 
@@ -2011,9 +2018,9 @@
 #endif
 
 /* These take the mm semaphore themselves */
-extern unsigned long vm_brk(unsigned long, unsigned long);
+extern int __must_check vm_brk(unsigned long, unsigned long);
 extern int vm_munmap(unsigned long, size_t);
-extern unsigned long vm_mmap(struct file *, unsigned long,
+extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
         unsigned long, unsigned long,
         unsigned long, unsigned long);
 
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d553855..ca3e517 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -514,7 +514,9 @@
 #ifdef CONFIG_HUGETLB_PAGE
 	atomic_long_t hugetlb_usage;
 #endif
+#ifdef CONFIG_MMU
 	struct work_struct async_put_work;
+#endif
 };
 
 static inline void mm_init_cpumask(struct mm_struct *mm)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 85800b4..45cde8c 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -329,6 +329,7 @@
 	unsigned int		can_retune:1;	/* re-tuning can be used */
 	unsigned int		doing_retune:1;	/* re-tuning in progress */
 	unsigned int		retune_now:1;	/* do re-tuning at next req */
+	unsigned int		retune_paused:1; /* re-tuning is temporarily disabled */
 
 	int			rescan_disable;	/* disable card detection */
 	int			rescan_entered;	/* used with nonremovable devices */
@@ -526,4 +527,7 @@
 		host->retune_now = 1;
 }
 
+void mmc_retune_pause(struct mmc_host *host);
+void mmc_retune_unpause(struct mmc_host *host);
+
 #endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
index c8be32e..ad3c348 100644
--- a/include/linux/mtd/fsmc.h
+++ b/include/linux/mtd/fsmc.h
@@ -103,24 +103,6 @@
 
 #define FSMC_BUSY_WAIT_TIMEOUT	(1 * HZ)
 
-/*
- * There are 13 bytes of ecc for every 512 byte block in FSMC version 8
- * and it has to be read consecutively and immediately after the 512
- * byte data block for hardware to generate the error bit offsets
- * Managing the ecc bytes in the following way is easier. This way is
- * similar to oobfree structure maintained already in u-boot nand driver
- */
-#define MAX_ECCPLACE_ENTRIES	32
-
-struct fsmc_nand_eccplace {
-	uint8_t offset;
-	uint8_t length;
-};
-
-struct fsmc_eccplace {
-	struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES];
-};
-
 struct fsmc_nand_timings {
 	uint8_t tclr;
 	uint8_t tar;
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 5e0eb7c..3aa56e3 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -122,18 +122,13 @@
 #endif
 
 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
-# ifdef map_bankwidth
-#  undef map_bankwidth
-#  define map_bankwidth(map) ((map)->bankwidth)
-#  undef map_bankwidth_is_large
-#  define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
-#  undef map_words
-#  define map_words(map) map_calc_words(map)
-# else
-#  define map_bankwidth(map) 32
-#  define map_bankwidth_is_large(map) (1)
-#  define map_words(map) map_calc_words(map)
-# endif
+/* always use indirect access for 256-bit to preserve kernel stack */
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# undef map_bankwidth_is_large
+# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+# undef map_words
+# define map_words(map) map_calc_words(map)
 #define map_bankwidth_is_32(map) (map_bankwidth(map) == 32)
 #undef MAX_MAP_BANKWIDTH
 #define MAX_MAP_BANKWIDTH 32
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index ef9fea4..29a1706 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -96,16 +96,35 @@
 
 #define MTD_MAX_OOBFREE_ENTRIES_LARGE	32
 #define MTD_MAX_ECCPOS_ENTRIES_LARGE	640
-/*
- * Internal ECC layout control structure. For historical reasons, there is a
- * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
- * for export to user-space via the ECCGETLAYOUT ioctl.
- * nand_ecclayout should be expandable in the future simply by the above macros.
+/**
+ * struct mtd_oob_region - oob region definition
+ * @offset: region offset
+ * @length: region length
+ *
+ * This structure describes a region of the OOB area, and is used
+ * to retrieve ECC or free bytes sections.
+ * Each section is defined by an offset within the OOB area and a
+ * length.
  */
-struct nand_ecclayout {
-	__u32 eccbytes;
-	__u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
-	struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
+struct mtd_oob_region {
+	u32 offset;
+	u32 length;
+};
+
+/*
+ * struct mtd_ooblayout_ops - NAND OOB layout operations
+ * @ecc: function returning an ECC region in the OOB area.
+ *	 Should return -ERANGE if %section exceeds the total number of
+ *	 ECC sections.
+ * @free: function returning a free region in the OOB area.
+ *	  Should return -ERANGE if %section exceeds the total number of
+ *	  free sections.
+ */
+struct mtd_ooblayout_ops {
+	int (*ecc)(struct mtd_info *mtd, int section,
+		   struct mtd_oob_region *oobecc);
+	int (*free)(struct mtd_info *mtd, int section,
+		    struct mtd_oob_region *oobfree);
 };
 
 struct module;	/* only needed for owner field in mtd_info */
@@ -166,8 +185,8 @@
 	const char *name;
 	int index;
 
-	/* ECC layout structure pointer - read only! */
-	struct nand_ecclayout *ecclayout;
+	/* OOB layout description */
+	const struct mtd_ooblayout_ops *ooblayout;
 
 	/* the ecc step size. */
 	unsigned int ecc_step_size;
@@ -253,6 +272,30 @@
 	int usecount;
 };
 
+int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
+		      struct mtd_oob_region *oobecc);
+int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
+				 int *section,
+				 struct mtd_oob_region *oobregion);
+int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
+			       const u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
+			       u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_free(struct mtd_info *mtd, int section,
+		       struct mtd_oob_region *oobfree);
+int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
+				const u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
+				u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_count_freebytes(struct mtd_info *mtd);
+int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd);
+
+static inline void mtd_set_ooblayout(struct mtd_info *mtd,
+				     const struct mtd_ooblayout_ops *ooblayout)
+{
+	mtd->ooblayout = ooblayout;
+}
+
 static inline void mtd_set_of_node(struct mtd_info *mtd,
 				   struct device_node *np)
 {
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 56574ba..fbe8e16 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -116,9 +116,14 @@
 	NAND_ECC_HW,
 	NAND_ECC_HW_SYNDROME,
 	NAND_ECC_HW_OOB_FIRST,
-	NAND_ECC_SOFT_BCH,
 } nand_ecc_modes_t;
 
+enum nand_ecc_algo {
+	NAND_ECC_UNKNOWN,
+	NAND_ECC_HAMMING,
+	NAND_ECC_BCH,
+};
+
 /*
  * Constants for Hardware ECC
  */
@@ -458,6 +463,7 @@
 /**
  * struct nand_ecc_ctrl - Control structure for ECC
  * @mode:	ECC mode
+ * @algo:	ECC algorithm
  * @steps:	number of ECC steps per page
  * @size:	data bytes per ECC step
  * @bytes:	ECC bytes per step
@@ -466,7 +472,6 @@
  * @prepad:	padding information for syndrome based ECC generators
  * @postpad:	padding information for syndrome based ECC generators
  * @options:	ECC specific options (see NAND_ECC_XXX flags defined above)
- * @layout:	ECC layout control struct pointer
  * @priv:	pointer to private ECC control data
  * @hwctl:	function to control hardware ECC generator. Must only
  *		be provided if an hardware ECC is available
@@ -508,6 +513,7 @@
  */
 struct nand_ecc_ctrl {
 	nand_ecc_modes_t mode;
+	enum nand_ecc_algo algo;
 	int steps;
 	int size;
 	int bytes;
@@ -516,7 +522,6 @@
 	int prepad;
 	int postpad;
 	unsigned int options;
-	struct nand_ecclayout	*layout;
 	void *priv;
 	void (*hwctl)(struct mtd_info *mtd, int mode);
 	int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
@@ -740,6 +745,9 @@
 	void *priv;
 };
 
+extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
+extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
+
 static inline void nand_set_flash_node(struct nand_chip *chip,
 				       struct device_node *np)
 {
@@ -1070,4 +1078,18 @@
 				void *ecc, int ecclen,
 				void *extraoob, int extraooblen,
 				int threshold);
+
+/* Default write_oob implementation */
+int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
+
+/* Default write_oob syndrome implementation */
+int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+			    int page);
+
+/* Default read_oob implementation */
+int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
+
+/* Default read_oob syndrome implementation */
+int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+			   int page);
 #endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 4596503..0aaa98b 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -80,7 +80,6 @@
  * @page_buf:		[INTERN] page main data buffer
  * @oob_buf:		[INTERN] page oob data buffer
  * @subpagesize:	[INTERN] holds the subpagesize
- * @ecclayout:		[REPLACEABLE] the default ecc placement scheme
  * @bbm:		[REPLACEABLE] pointer to Bad Block Management
  * @priv:		[OPTIONAL] pointer to private chip date
  */
@@ -134,7 +133,6 @@
 #endif
 
 	int			subpagesize;
-	struct nand_ecclayout	*ecclayout;
 
 	void			*bbm;
 
diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h
index 25f4d2a..65e91d0 100644
--- a/include/linux/mtd/sharpsl.h
+++ b/include/linux/mtd/sharpsl.h
@@ -14,7 +14,7 @@
 
 struct sharpsl_nand_platform_data {
 	struct nand_bbt_descr	*badblock_pattern;
-	struct nand_ecclayout	*ecc_layout;
+	const struct mtd_ooblayout_ops *ecc_layout;
 	struct mtd_partition	*partitions;
 	unsigned int		nr_partitions;
 };
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 3c36113..7f041bd 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -21,6 +21,7 @@
  * Sometimes these are the same as CFI IDs, but sometimes they aren't.
  */
 #define SNOR_MFR_ATMEL		CFI_MFR_ATMEL
+#define SNOR_MFR_GIGADEVICE	0xc8
 #define SNOR_MFR_INTEL		CFI_MFR_INTEL
 #define SNOR_MFR_MICRON		CFI_MFR_ST /* ST Micro <--> Micron */
 #define SNOR_MFR_MACRONIX	CFI_MFR_MACRONIX
diff --git a/include/linux/namei.h b/include/linux/namei.h
index ec5ec28..d3d0398 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -45,6 +45,8 @@
 #define LOOKUP_ROOT		0x2000
 #define LOOKUP_EMPTY		0x4000
 
+extern int path_pts(struct path *path);
+
 extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
 
 static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
diff --git a/include/linux/nd.h b/include/linux/nd.h
index 5489ab7..aee2761 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -15,6 +15,7 @@
 #include <linux/fs.h>
 #include <linux/ndctl.h>
 #include <linux/device.h>
+#include <linux/badblocks.h>
 
 enum nvdimm_event {
 	NVDIMM_REVALIDATE_POISON,
@@ -55,13 +56,19 @@
 }
 
 /**
- * struct nd_namespace_io - infrastructure for loading an nd_pmem instance
+ * struct nd_namespace_io - device representation of a persistent memory range
  * @dev: namespace device created by the nd region driver
  * @res: struct resource conversion of a NFIT SPA table
+ * @size: cached resource_size(@res) for fast path size checks
+ * @addr: virtual address to access the namespace range
+ * @bb: badblocks list for the namespace range
  */
 struct nd_namespace_io {
 	struct nd_namespace_common common;
 	struct resource res;
+	resource_size_t size;
+	void __pmem *addr;
+	struct badblocks bb;
 };
 
 /**
@@ -82,6 +89,7 @@
  * @uuid: namespace name supplied in the dimm label
  * @id: ida allocated id
  * @lbasize: blk namespaces have a native sector size when btt not present
+ * @size: sum of all the resource ranges allocated to this namespace
  * @num_resources: number of dpa extents to claim
  * @res: discontiguous dpa extents for given dimm
  */
@@ -91,6 +99,7 @@
 	u8 *uuid;
 	int id;
 	unsigned long lbasize;
+	resource_size_t size;
 	int num_resources;
 	struct resource **res;
 };
diff --git a/include/linux/net.h b/include/linux/net.h
index 9aa49a0..25aa03b 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -251,7 +251,8 @@
 	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);			\
 	if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&	\
 	    net_ratelimit())						\
-		__dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__);	\
+		__dynamic_pr_debug(&descriptor, pr_fmt(fmt),		\
+		                   ##__VA_ARGS__);			\
 } while (0)
 #elif defined(DEBUG)
 #define net_dbg_ratelimited(fmt, ...)				\
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 0114334..bfed6b3 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -50,12 +50,27 @@
 
 typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier;
 
-struct nfs_stateid4 {
-	__be32 seqid;
-	char other[NFS4_STATEID_OTHER_SIZE];
-} __attribute__ ((packed));
+struct nfs4_stateid_struct {
+	union {
+		char data[NFS4_STATEID_SIZE];
+		struct {
+			__be32 seqid;
+			char other[NFS4_STATEID_OTHER_SIZE];
+		} __attribute__ ((packed));
+	};
 
-typedef struct nfs_stateid4 nfs4_stateid;
+	enum {
+		NFS4_INVALID_STATEID_TYPE = 0,
+		NFS4_SPECIAL_STATEID_TYPE,
+		NFS4_OPEN_STATEID_TYPE,
+		NFS4_LOCK_STATEID_TYPE,
+		NFS4_DELEGATION_STATEID_TYPE,
+		NFS4_LAYOUT_STATEID_TYPE,
+		NFS4_PNFS_DS_STATEID_TYPE,
+	} type;
+};
+
+typedef struct nfs4_stateid_struct nfs4_stateid;
 
 enum nfs_opnum4 {
 	OP_ACCESS = 3,
@@ -504,6 +519,7 @@
 	NFSPROC4_CLNT_DEALLOCATE,
 	NFSPROC4_CLNT_LAYOUTSTATS,
 	NFSPROC4_CLNT_CLONE,
+	NFSPROC4_CLNT_COPY,
 };
 
 /* nfs41 types */
@@ -621,7 +637,9 @@
 	PNFS_UPDATE_LAYOUT_IO_TEST_FAIL,
 	PNFS_UPDATE_LAYOUT_FOUND_CACHED,
 	PNFS_UPDATE_LAYOUT_RETURN,
+	PNFS_UPDATE_LAYOUT_RETRY,
 	PNFS_UPDATE_LAYOUT_BLOCKED,
+	PNFS_UPDATE_LAYOUT_INVALID_OPEN,
 	PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET,
 };
 
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 7fcc13c..14a762d 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -246,5 +246,6 @@
 #define NFS_CAP_DEALLOCATE	(1U << 21)
 #define NFS_CAP_LAYOUTSTATS	(1U << 22)
 #define NFS_CAP_CLONE		(1U << 23)
+#define NFS_CAP_COPY		(1U << 24)
 
 #endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index ee8491d..c304a11 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -233,7 +233,6 @@
 	struct inode *inode;
 	struct nfs_open_context *ctx;
 	nfs4_stateid stateid;
-	unsigned long timestamp;
 	struct nfs4_layoutdriver_data layout;
 };
 
@@ -251,7 +250,6 @@
 	struct nfs4_layoutget_res res;
 	struct rpc_cred *cred;
 	gfp_t gfp_flags;
-	long timeout;
 };
 
 struct nfs4_getdeviceinfo_args {
@@ -1343,6 +1341,32 @@
 	const struct nfs_server		*falloc_server;
 };
 
+struct nfs42_copy_args {
+	struct nfs4_sequence_args	seq_args;
+
+	struct nfs_fh			*src_fh;
+	nfs4_stateid			src_stateid;
+	u64				src_pos;
+
+	struct nfs_fh			*dst_fh;
+	nfs4_stateid			dst_stateid;
+	u64				dst_pos;
+
+	u64				count;
+};
+
+struct nfs42_write_res {
+	u64			count;
+	struct nfs_writeverf	verifier;
+};
+
+struct nfs42_copy_res {
+	struct nfs4_sequence_res	seq_res;
+	struct nfs42_write_res		write_res;
+	bool				consecutive;
+	bool				synchronous;
+};
+
 struct nfs42_seek_args {
 	struct nfs4_sequence_args	seq_args;
 
@@ -1431,7 +1455,7 @@
 };
 
 struct nfs_commit_info {
-	spinlock_t			*lock;	/* inode->i_lock */
+	struct inode 			*inode;	/* Needed for inode->i_lock */
 	struct nfs_mds_commit_info	*mds;
 	struct pnfs_ds_commit_info	*ds;
 	struct nfs_direct_req		*dreq;	/* O_DIRECT request */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index e9fcf90..5988dd5 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -13,12 +13,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU Lesser General Public License for more details.
  *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Written by Koji Sato <koji@osrg.net>
- *            Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Koji Sato and Ryusuke Konishi.
  */
 /*
  *  linux/include/linux/ext2_fs.h
@@ -132,10 +127,14 @@
 #define NILFS_MOUNT_ERRORS_RO		0x0020  /* Remount fs ro on errors */
 #define NILFS_MOUNT_ERRORS_PANIC	0x0040  /* Panic on errors */
 #define NILFS_MOUNT_BARRIER		0x1000  /* Use block barriers */
-#define NILFS_MOUNT_STRICT_ORDER	0x2000  /* Apply strict in-order
-						   semantics also for data */
-#define NILFS_MOUNT_NORECOVERY		0x4000  /* Disable write access during
-						   mount-time recovery */
+#define NILFS_MOUNT_STRICT_ORDER	0x2000  /*
+						 * Apply strict in-order
+						 * semantics also for data
+						 */
+#define NILFS_MOUNT_NORECOVERY		0x4000  /*
+						 * Disable write access during
+						 * mount-time recovery
+						 */
 #define NILFS_MOUNT_DISCARD		0x8000  /* Issue DISCARD requests */
 
 
@@ -147,16 +146,20 @@
 	__le16	s_minor_rev_level;	/* minor revision level */
 	__le16	s_magic;		/* Magic signature */
 
-	__le16  s_bytes;		/* Bytes count of CRC calculation
-					   for this structure. s_reserved
-					   is excluded. */
+	__le16  s_bytes;		/*
+					 * Bytes count of CRC calculation
+					 * for this structure. s_reserved
+					 * is excluded.
+					 */
 	__le16  s_flags;		/* flags */
 	__le32  s_crc_seed;		/* Seed value of CRC calculation */
 /*10*/	__le32	s_sum;			/* Check sum of super block */
 
-	__le32	s_log_block_size;	/* Block size represented as follows
-					   blocksize =
-					       1 << (s_log_block_size + 10) */
+	__le32	s_log_block_size;	/*
+					 * Block size represented as follows
+					 * blocksize =
+					 *     1 << (s_log_block_size + 10)
+					 */
 	__le64  s_nsegments;		/* Number of segments in filesystem */
 /*20*/	__le64  s_dev_size;		/* block device size in bytes */
 	__le64	s_first_data_block;	/* 1st seg disk block number */
@@ -168,8 +171,10 @@
 	__le64  s_last_seq;             /* seq. number of seg written last */
 /*50*/	__le64	s_free_blocks_count;	/* Free blocks count */
 
-	__le64	s_ctime;		/* Creation time (execution time of
-					   newfs) */
+	__le64	s_ctime;		/*
+					 * Creation time (execution time of
+					 * newfs)
+					 */
 /*60*/	__le64	s_mtime;		/* Mount time */
 	__le64	s_wtime;		/* Write time */
 /*70*/	__le16	s_mnt_count;		/* Mount count */
@@ -193,8 +198,10 @@
 /*A8*/	char	s_volume_name[80];	/* volume name */
 
 /*F8*/	__le32  s_c_interval;           /* Commit interval of segment */
-	__le32  s_c_block_max;          /* Threshold of data amount for
-					   the segment construction */
+	__le32  s_c_block_max;          /*
+					 * Threshold of data amount for
+					 * the segment construction
+					 */
 /*100*/	__le64  s_feature_compat;	/* Compatible feature set */
 	__le64  s_feature_compat_ro;	/* Read-only compatible feature set */
 	__le64  s_feature_incompat;	/* Incompatible feature set */
@@ -247,12 +254,18 @@
 
 #define NILFS_SB_OFFSET_BYTES	1024	/* byte offset of nilfs superblock */
 
-#define NILFS_SEG_MIN_BLOCKS	16	/* Minimum number of blocks in
-					   a full segment */
-#define NILFS_PSEG_MIN_BLOCKS	2	/* Minimum number of blocks in
-					   a partial segment */
-#define NILFS_MIN_NRSVSEGS	8	/* Minimum number of reserved
-					   segments */
+#define NILFS_SEG_MIN_BLOCKS	16	/*
+					 * Minimum number of blocks in
+					 * a full segment
+					 */
+#define NILFS_PSEG_MIN_BLOCKS	2	/*
+					 * Minimum number of blocks in
+					 * a partial segment
+					 */
+#define NILFS_MIN_NRSVSEGS	8	/*
+					 * Minimum number of reserved
+					 * segments
+					 */
 
 /*
  * We call DAT, cpfile, and sufile root metadata files.  Inodes of
@@ -327,9 +340,9 @@
 					~NILFS_DIR_ROUND)
 #define NILFS_MAX_REC_LEN		((1<<16)-1)
 
-static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
+static inline unsigned int nilfs_rec_len_from_disk(__le16 dlen)
 {
-	unsigned len = le16_to_cpu(dlen);
+	unsigned int len = le16_to_cpu(dlen);
 
 #if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
 	if (len == NILFS_MAX_REC_LEN)
@@ -338,7 +351,7 @@
 	return len;
 }
 
-static inline __le16 nilfs_rec_len_to_disk(unsigned len)
+static inline __le16 nilfs_rec_len_to_disk(unsigned int len)
 {
 #if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
 	if (len == (1 << 16))
@@ -518,9 +531,11 @@
 	__le64 cp_inodes_count;
 	__le64 cp_blocks_count;
 
-	/* Do not change the byte offset of ifile inode.
-	   To keep the compatibility of the disk format,
-	   additional fields should be added behind cp_ifile_inode. */
+	/*
+	 * Do not change the byte offset of ifile inode.
+	 * To keep the compatibility of the disk format,
+	 * additional fields should be added behind cp_ifile_inode.
+	 */
 	struct nilfs_inode cp_ifile_inode;
 };
 
diff --git a/include/linux/of_mtd.h b/include/linux/of_mtd.h
deleted file mode 100644
index e266caa..0000000
--- a/include/linux/of_mtd.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
- *
- * OF helpers for mtd.
- *
- * This file is released under the GPLv2
- */
-
-#ifndef __LINUX_OF_MTD_H
-#define __LINUX_OF_MTD_H
-
-#ifdef CONFIG_OF_MTD
-
-#include <linux/of.h>
-int of_get_nand_ecc_mode(struct device_node *np);
-int of_get_nand_ecc_step_size(struct device_node *np);
-int of_get_nand_ecc_strength(struct device_node *np);
-int of_get_nand_bus_width(struct device_node *np);
-bool of_get_nand_on_flash_bbt(struct device_node *np);
-
-#else /* CONFIG_OF_MTD */
-
-static inline int of_get_nand_ecc_mode(struct device_node *np)
-{
-	return -ENOSYS;
-}
-
-static inline int of_get_nand_ecc_step_size(struct device_node *np)
-{
-	return -ENOSYS;
-}
-
-static inline int of_get_nand_ecc_strength(struct device_node *np)
-{
-	return -ENOSYS;
-}
-
-static inline int of_get_nand_bus_width(struct device_node *np)
-{
-	return -ENOSYS;
-}
-
-static inline bool of_get_nand_on_flash_bbt(struct device_node *np)
-{
-	return false;
-}
-
-#endif /* CONFIG_OF_MTD */
-
-#endif /* __LINUX_OF_MTD_H */
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index d833eb4..9e9d79e 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -7,161 +7,53 @@
  *  option) any later version.
  */
 
-/* Maximum Number of Chip Selects */
-#define GPMC_CS_NUM		8
+#include <linux/platform_data/gpmc-omap.h>
 
 #define GPMC_CONFIG_WP		0x00000005
 
-#define GPMC_IRQ_FIFOEVENTENABLE	0x01
-#define GPMC_IRQ_COUNT_EVENT		0x02
+/* IRQ numbers in GPMC IRQ domain for legacy boot use */
+#define GPMC_IRQ_FIFOEVENTENABLE	0
+#define GPMC_IRQ_COUNT_EVENT		1
 
-#define GPMC_BURST_4			4	/* 4 word burst */
-#define GPMC_BURST_8			8	/* 8 word burst */
-#define GPMC_BURST_16			16	/* 16 word burst */
-#define GPMC_DEVWIDTH_8BIT		1	/* 8-bit device width */
-#define GPMC_DEVWIDTH_16BIT		2	/* 16-bit device width */
-#define GPMC_MUX_AAD			1	/* Addr-Addr-Data multiplex */
-#define GPMC_MUX_AD			2	/* Addr-Data multiplex */
-
-/* bool type time settings */
-struct gpmc_bool_timings {
-	bool cycle2cyclediffcsen;
-	bool cycle2cyclesamecsen;
-	bool we_extra_delay;
-	bool oe_extra_delay;
-	bool adv_extra_delay;
-	bool cs_extra_delay;
-	bool time_para_granularity;
-};
-
-/*
- * Note that all values in this struct are in nanoseconds except sync_clk
- * (which is in picoseconds), while the register values are in gpmc_fck cycles.
+/**
+ * gpmc_nand_ops - Interface between NAND and GPMC
+ * @nand_write_buffer_empty: get the NAND write buffer empty status.
  */
-struct gpmc_timings {
-	/* Minimum clock period for synchronous mode (in picoseconds) */
-	u32 sync_clk;
-
-	/* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */
-	u32 cs_on;		/* Assertion time */
-	u32 cs_rd_off;		/* Read deassertion time */
-	u32 cs_wr_off;		/* Write deassertion time */
-
-	/* ADV signal timings corresponding to GPMC_CONFIG3 */
-	u32 adv_on;		/* Assertion time */
-	u32 adv_rd_off;		/* Read deassertion time */
-	u32 adv_wr_off;		/* Write deassertion time */
-	u32 adv_aad_mux_on;	/* ADV assertion time for AAD */
-	u32 adv_aad_mux_rd_off;	/* ADV read deassertion time for AAD */
-	u32 adv_aad_mux_wr_off;	/* ADV write deassertion time for AAD */
-
-	/* WE signals timings corresponding to GPMC_CONFIG4 */
-	u32 we_on;		/* WE assertion time */
-	u32 we_off;		/* WE deassertion time */
-
-	/* OE signals timings corresponding to GPMC_CONFIG4 */
-	u32 oe_on;		/* OE assertion time */
-	u32 oe_off;		/* OE deassertion time */
-	u32 oe_aad_mux_on;	/* OE assertion time for AAD */
-	u32 oe_aad_mux_off;	/* OE deassertion time for AAD */
-
-	/* Access time and cycle time timings corresponding to GPMC_CONFIG5 */
-	u32 page_burst_access;	/* Multiple access word delay */
-	u32 access;		/* Start-cycle to first data valid delay */
-	u32 rd_cycle;		/* Total read cycle time */
-	u32 wr_cycle;		/* Total write cycle time */
-
-	u32 bus_turnaround;
-	u32 cycle2cycle_delay;
-
-	u32 wait_monitoring;
-	u32 clk_activation;
-
-	/* The following are only on OMAP3430 */
-	u32 wr_access;		/* WRACCESSTIME */
-	u32 wr_data_mux_bus;	/* WRDATAONADMUXBUS */
-
-	struct gpmc_bool_timings bool_timings;
+struct gpmc_nand_ops {
+	bool (*nand_writebuffer_empty)(void);
 };
 
-/* Device timings in picoseconds */
-struct gpmc_device_timings {
-	u32 t_ceasu;	/* address setup to CS valid */
-	u32 t_avdasu;	/* address setup to ADV valid */
-	/* XXX: try to combine t_avdp_r & t_avdp_w. Issue is
-	 * of tusb using these timings even for sync whilst
-	 * ideally for adv_rd/(wr)_off it should have considered
-	 * t_avdh instead. This indirectly necessitates r/w
-	 * variations of t_avdp as it is possible to have one
-	 * sync & other async
-	 */
-	u32 t_avdp_r;	/* ADV low time (what about t_cer ?) */
-	u32 t_avdp_w;
-	u32 t_aavdh;	/* address hold time */
-	u32 t_oeasu;	/* address setup to OE valid */
-	u32 t_aa;	/* access time from ADV assertion */
-	u32 t_iaa;	/* initial access time */
-	u32 t_oe;	/* access time from OE assertion */
-	u32 t_ce;	/* access time from CS asertion */
-	u32 t_rd_cycle;	/* read cycle time */
-	u32 t_cez_r;	/* read CS deassertion to high Z */
-	u32 t_cez_w;	/* write CS deassertion to high Z */
-	u32 t_oez;	/* OE deassertion to high Z */
-	u32 t_weasu;	/* address setup to WE valid */
-	u32 t_wpl;	/* write assertion time */
-	u32 t_wph;	/* write deassertion time */
-	u32 t_wr_cycle;	/* write cycle time */
+struct gpmc_nand_regs;
 
-	u32 clk;
-	u32 t_bacc;	/* burst access valid clock to output delay */
-	u32 t_ces;	/* CS setup time to clk */
-	u32 t_avds;	/* ADV setup time to clk */
-	u32 t_avdh;	/* ADV hold time from clk */
-	u32 t_ach;	/* address hold time from clk */
-	u32 t_rdyo;	/* clk to ready valid */
+#if IS_ENABLED(CONFIG_OMAP_GPMC)
+struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
+					     int cs);
+#else
+static inline gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
+						    int cs)
+{
+	return NULL;
+}
+#endif /* CONFIG_OMAP_GPMC */
 
-	u32 t_ce_rdyz;	/* XXX: description ?, or use t_cez instead */
-	u32 t_ce_avd;	/* CS on to ADV on delay */
+/*--------------------------------*/
 
-	/* XXX: check the possibility of combining
-	 * cyc_aavhd_oe & cyc_aavdh_we
-	 */
-	u8 cyc_aavdh_oe;/* read address hold time in cycles */
-	u8 cyc_aavdh_we;/* write address hold time in cycles */
-	u8 cyc_oe;	/* access time from OE assertion in cycles */
-	u8 cyc_wpl;	/* write deassertion time in cycles */
-	u32 cyc_iaa;	/* initial access time in cycles */
-
-	/* extra delays */
-	bool ce_xdelay;
-	bool avd_xdelay;
-	bool oe_xdelay;
-	bool we_xdelay;
-};
-
-struct gpmc_settings {
-	bool burst_wrap;	/* enables wrap bursting */
-	bool burst_read;	/* enables read page/burst mode */
-	bool burst_write;	/* enables write page/burst mode */
-	bool device_nand;	/* device is NAND */
-	bool sync_read;		/* enables synchronous reads */
-	bool sync_write;	/* enables synchronous writes */
-	bool wait_on_read;	/* monitor wait on reads */
-	bool wait_on_write;	/* monitor wait on writes */
-	u32 burst_len;		/* page/burst length */
-	u32 device_width;	/* device bus width (8 or 16 bit) */
-	u32 mux_add_data;	/* multiplex address & data */
-	u32 wait_pin;		/* wait-pin to be used */
-};
+/* deprecated APIs */
+#if IS_ENABLED(CONFIG_OMAP_GPMC)
+void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs);
+#else
+static inline void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
+{
+}
+#endif /* CONFIG_OMAP_GPMC */
+/*--------------------------------*/
 
 extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
 			     struct gpmc_settings *gpmc_s,
 			     struct gpmc_device_timings *dev_t);
 
-struct gpmc_nand_regs;
 struct device_node;
 
-extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs);
 extern int gpmc_get_client_irq(unsigned irq_config);
 
 extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
diff --git a/include/linux/oom.h b/include/linux/oom.h
index d3f533f..8346952 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -50,24 +50,21 @@
 	OOM_SCAN_SELECT,	/* always select this thread first */
 };
 
-/* Thread is the potential origin of an oom condition; kill first on oom */
-#define OOM_FLAG_ORIGIN		((__force oom_flags_t)0x1)
-
 extern struct mutex oom_lock;
 
 static inline void set_current_oom_origin(void)
 {
-	current->signal->oom_flags |= OOM_FLAG_ORIGIN;
+	current->signal->oom_flag_origin = true;
 }
 
 static inline void clear_current_oom_origin(void)
 {
-	current->signal->oom_flags &= ~OOM_FLAG_ORIGIN;
+	current->signal->oom_flag_origin = false;
 }
 
 static inline bool oom_task_origin(const struct task_struct *p)
 {
-	return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
+	return p->signal->oom_flag_origin;
 }
 
 extern void mark_oom_victim(struct task_struct *tsk);
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index bf268fa..fec4027 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -46,33 +46,62 @@
 
 static inline bool page_is_young(struct page *page)
 {
-	return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+	struct page_ext *page_ext = lookup_page_ext(page);
+
+	if (unlikely(!page_ext))
+		return false;
+
+	return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
 }
 
 static inline void set_page_young(struct page *page)
 {
-	set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+	struct page_ext *page_ext = lookup_page_ext(page);
+
+	if (unlikely(!page_ext))
+		return;
+
+	set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
 }
 
 static inline bool test_and_clear_page_young(struct page *page)
 {
-	return test_and_clear_bit(PAGE_EXT_YOUNG,
-				  &lookup_page_ext(page)->flags);
+	struct page_ext *page_ext = lookup_page_ext(page);
+
+	if (unlikely(!page_ext))
+		return false;
+
+	return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
 }
 
 static inline bool page_is_idle(struct page *page)
 {
-	return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+	struct page_ext *page_ext = lookup_page_ext(page);
+
+	if (unlikely(!page_ext))
+		return false;
+
+	return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
 }
 
 static inline void set_page_idle(struct page *page)
 {
-	set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+	struct page_ext *page_ext = lookup_page_ext(page);
+
+	if (unlikely(!page_ext))
+		return;
+
+	set_bit(PAGE_EXT_IDLE, &page_ext->flags);
 }
 
 static inline void clear_page_idle(struct page *page)
 {
-	clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+	struct page_ext *page_ext = lookup_page_ext(page);
+
+	if (unlikely(!page_ext))
+		return;
+
+	clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
 }
 #endif /* CONFIG_64BIT */
 
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 44f3383..1a827ce 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -61,6 +61,14 @@
 	__u64				ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
 };
 
+struct perf_callchain_entry_ctx {
+	struct perf_callchain_entry *entry;
+	u32			    max_stack;
+	u32			    nr;
+	short			    contexts;
+	bool			    contexts_maxed;
+};
+
 struct perf_raw_record {
 	u32				size;
 	void				*data;
@@ -1061,20 +1069,36 @@
 /* Callchains */
 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
 
-extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
-extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
+extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
+extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
 extern struct perf_callchain_entry *
 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
-		   bool crosstask, bool add_mark);
+		   u32 max_stack, bool crosstask, bool add_mark);
 extern int get_callchain_buffers(void);
 extern void put_callchain_buffers(void);
 
 extern int sysctl_perf_event_max_stack;
+extern int sysctl_perf_event_max_contexts_per_stack;
 
-static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
+static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
 {
-	if (entry->nr < sysctl_perf_event_max_stack) {
+	if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
+		struct perf_callchain_entry *entry = ctx->entry;
 		entry->ip[entry->nr++] = ip;
+		++ctx->contexts;
+		return 0;
+	} else {
+		ctx->contexts_maxed = true;
+		return -1; /* no more room, stop walking the stack */
+	}
+}
+
+static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
+{
+	if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
+		struct perf_callchain_entry *entry = ctx->entry;
+		entry->ip[entry->nr++] = ip;
+		++ctx->nr;
 		return 0;
 	} else {
 		return -1; /* no more room, stop walking the stack */
diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h
index dc9a13e..be830b1 100644
--- a/include/linux/platform_data/at24.h
+++ b/include/linux/platform_data/at24.h
@@ -26,7 +26,7 @@
  *
  * An example in pseudo code for a setup() callback:
  *
- * void get_mac_addr(struct mvmem_device *nvmem, void *context)
+ * void get_mac_addr(struct nvmem_device *nvmem, void *context)
  * {
  *	u8 *mac_addr = ethernet_pdata->mac_addr;
  *	off_t offset = context;
diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h
new file mode 100644
index 0000000..67ccdb0
--- /dev/null
+++ b/include/linux/platform_data/gpmc-omap.h
@@ -0,0 +1,172 @@
+/*
+ * OMAP GPMC Platform data
+ *
+ * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com
+ *	Roger Quadros <rogerq@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _GPMC_OMAP_H_
+#define _GPMC_OMAP_H_
+
+/* Maximum Number of Chip Selects */
+#define GPMC_CS_NUM		8
+
+/* bool type time settings */
+struct gpmc_bool_timings {
+	bool cycle2cyclediffcsen;
+	bool cycle2cyclesamecsen;
+	bool we_extra_delay;
+	bool oe_extra_delay;
+	bool adv_extra_delay;
+	bool cs_extra_delay;
+	bool time_para_granularity;
+};
+
+/*
+ * Note that all values in this struct are in nanoseconds except sync_clk
+ * (which is in picoseconds), while the register values are in gpmc_fck cycles.
+ */
+struct gpmc_timings {
+	/* Minimum clock period for synchronous mode (in picoseconds) */
+	u32 sync_clk;
+
+	/* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */
+	u32 cs_on;		/* Assertion time */
+	u32 cs_rd_off;		/* Read deassertion time */
+	u32 cs_wr_off;		/* Write deassertion time */
+
+	/* ADV signal timings corresponding to GPMC_CONFIG3 */
+	u32 adv_on;		/* Assertion time */
+	u32 adv_rd_off;		/* Read deassertion time */
+	u32 adv_wr_off;		/* Write deassertion time */
+	u32 adv_aad_mux_on;	/* ADV assertion time for AAD */
+	u32 adv_aad_mux_rd_off;	/* ADV read deassertion time for AAD */
+	u32 adv_aad_mux_wr_off;	/* ADV write deassertion time for AAD */
+
+	/* WE signals timings corresponding to GPMC_CONFIG4 */
+	u32 we_on;		/* WE assertion time */
+	u32 we_off;		/* WE deassertion time */
+
+	/* OE signals timings corresponding to GPMC_CONFIG4 */
+	u32 oe_on;		/* OE assertion time */
+	u32 oe_off;		/* OE deassertion time */
+	u32 oe_aad_mux_on;	/* OE assertion time for AAD */
+	u32 oe_aad_mux_off;	/* OE deassertion time for AAD */
+
+	/* Access time and cycle time timings corresponding to GPMC_CONFIG5 */
+	u32 page_burst_access;	/* Multiple access word delay */
+	u32 access;		/* Start-cycle to first data valid delay */
+	u32 rd_cycle;		/* Total read cycle time */
+	u32 wr_cycle;		/* Total write cycle time */
+
+	u32 bus_turnaround;
+	u32 cycle2cycle_delay;
+
+	u32 wait_monitoring;
+	u32 clk_activation;
+
+	/* The following are only on OMAP3430 */
+	u32 wr_access;		/* WRACCESSTIME */
+	u32 wr_data_mux_bus;	/* WRDATAONADMUXBUS */
+
+	struct gpmc_bool_timings bool_timings;
+};
+
+/* Device timings in picoseconds */
+struct gpmc_device_timings {
+	u32 t_ceasu;	/* address setup to CS valid */
+	u32 t_avdasu;	/* address setup to ADV valid */
+	/* XXX: try to combine t_avdp_r & t_avdp_w. Issue is
+	 * of tusb using these timings even for sync whilst
+	 * ideally for adv_rd/(wr)_off it should have considered
+	 * t_avdh instead. This indirectly necessitates r/w
+	 * variations of t_avdp as it is possible to have one
+	 * sync & other async
+	 */
+	u32 t_avdp_r;	/* ADV low time (what about t_cer ?) */
+	u32 t_avdp_w;
+	u32 t_aavdh;	/* address hold time */
+	u32 t_oeasu;	/* address setup to OE valid */
+	u32 t_aa;	/* access time from ADV assertion */
+	u32 t_iaa;	/* initial access time */
+	u32 t_oe;	/* access time from OE assertion */
+	u32 t_ce;	/* access time from CS asertion */
+	u32 t_rd_cycle;	/* read cycle time */
+	u32 t_cez_r;	/* read CS deassertion to high Z */
+	u32 t_cez_w;	/* write CS deassertion to high Z */
+	u32 t_oez;	/* OE deassertion to high Z */
+	u32 t_weasu;	/* address setup to WE valid */
+	u32 t_wpl;	/* write assertion time */
+	u32 t_wph;	/* write deassertion time */
+	u32 t_wr_cycle;	/* write cycle time */
+
+	u32 clk;
+	u32 t_bacc;	/* burst access valid clock to output delay */
+	u32 t_ces;	/* CS setup time to clk */
+	u32 t_avds;	/* ADV setup time to clk */
+	u32 t_avdh;	/* ADV hold time from clk */
+	u32 t_ach;	/* address hold time from clk */
+	u32 t_rdyo;	/* clk to ready valid */
+
+	u32 t_ce_rdyz;	/* XXX: description ?, or use t_cez instead */
+	u32 t_ce_avd;	/* CS on to ADV on delay */
+
+	/* XXX: check the possibility of combining
+	 * cyc_aavhd_oe & cyc_aavdh_we
+	 */
+	u8 cyc_aavdh_oe;/* read address hold time in cycles */
+	u8 cyc_aavdh_we;/* write address hold time in cycles */
+	u8 cyc_oe;	/* access time from OE assertion in cycles */
+	u8 cyc_wpl;	/* write deassertion time in cycles */
+	u32 cyc_iaa;	/* initial access time in cycles */
+
+	/* extra delays */
+	bool ce_xdelay;
+	bool avd_xdelay;
+	bool oe_xdelay;
+	bool we_xdelay;
+};
+
+#define GPMC_BURST_4			4	/* 4 word burst */
+#define GPMC_BURST_8			8	/* 8 word burst */
+#define GPMC_BURST_16			16	/* 16 word burst */
+#define GPMC_DEVWIDTH_8BIT		1	/* 8-bit device width */
+#define GPMC_DEVWIDTH_16BIT		2	/* 16-bit device width */
+#define GPMC_MUX_AAD			1	/* Addr-Addr-Data multiplex */
+#define GPMC_MUX_AD			2	/* Addr-Data multiplex */
+
+struct gpmc_settings {
+	bool burst_wrap;	/* enables wrap bursting */
+	bool burst_read;	/* enables read page/burst mode */
+	bool burst_write;	/* enables write page/burst mode */
+	bool device_nand;	/* device is NAND */
+	bool sync_read;		/* enables synchronous reads */
+	bool sync_write;	/* enables synchronous writes */
+	bool wait_on_read;	/* monitor wait on reads */
+	bool wait_on_write;	/* monitor wait on writes */
+	u32 burst_len;		/* page/burst length */
+	u32 device_width;	/* device bus width (8 or 16 bit) */
+	u32 mux_add_data;	/* multiplex address & data */
+	u32 wait_pin;		/* wait-pin to be used */
+};
+
+/* Data for each chip select */
+struct gpmc_omap_cs_data {
+	bool valid;			/* data is valid */
+	bool is_nand;			/* device within this CS is NAND */
+	struct gpmc_settings *settings;
+	struct gpmc_device_timings *device_timings;
+	struct gpmc_timings *gpmc_timings;
+	struct platform_device *pdev;	/* device within this CS region */
+	unsigned int pdata_size;
+};
+
+struct gpmc_omap_platform_data {
+	struct gpmc_omap_cs_data cs[GPMC_CS_NUM];
+};
+
+#endif /* _GPMC_OMAP_H */
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
index 090bbab..17d57a1 100644
--- a/include/linux/platform_data/mtd-nand-omap2.h
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -45,7 +45,6 @@
 };
 
 struct gpmc_nand_regs {
-	void __iomem	*gpmc_status;
 	void __iomem	*gpmc_nand_command;
 	void __iomem	*gpmc_nand_address;
 	void __iomem	*gpmc_nand_data;
@@ -64,21 +63,24 @@
 	void __iomem	*gpmc_bch_result4[GPMC_BCH_NUM_REMAINDER];
 	void __iomem	*gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER];
 	void __iomem	*gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER];
+	/* Deprecated. Do not use */
+	void __iomem	*gpmc_status;
 };
 
 struct omap_nand_platform_data {
 	int			cs;
 	struct mtd_partition	*parts;
 	int			nr_parts;
-	bool			dev_ready;
 	bool			flash_bbt;
 	enum nand_io		xfer_type;
 	int			devsize;
 	enum omap_ecc           ecc_opt;
-	struct gpmc_nand_regs	reg;
 
-	/* for passing the partitions */
-	struct device_node	*of_node;
 	struct device_node	*elm_of_node;
+
+	/* deprecated */
+	struct gpmc_nand_regs	reg;
+	struct device_node	*of_node;
+	bool			dev_ready;
 };
 #endif
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index b78d27c..17018f3 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -5,59 +5,7 @@
 #include <linux/mutex.h>
 #include <linux/of.h>
 
-struct pwm_device;
 struct seq_file;
-
-#if IS_ENABLED(CONFIG_PWM)
-/*
- * pwm_request - request a PWM device
- */
-struct pwm_device *pwm_request(int pwm_id, const char *label);
-
-/*
- * pwm_free - free a PWM device
- */
-void pwm_free(struct pwm_device *pwm);
-
-/*
- * pwm_config - change a PWM device configuration
- */
-int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns);
-
-/*
- * pwm_enable - start a PWM output toggling
- */
-int pwm_enable(struct pwm_device *pwm);
-
-/*
- * pwm_disable - stop a PWM output toggling
- */
-void pwm_disable(struct pwm_device *pwm);
-#else
-static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
-{
-	return ERR_PTR(-ENODEV);
-}
-
-static inline void pwm_free(struct pwm_device *pwm)
-{
-}
-
-static inline int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
-{
-	return -EINVAL;
-}
-
-static inline int pwm_enable(struct pwm_device *pwm)
-{
-	return -EINVAL;
-}
-
-static inline void pwm_disable(struct pwm_device *pwm)
-{
-}
-#endif
-
 struct pwm_chip;
 
 /**
@@ -94,8 +42,21 @@
 
 enum {
 	PWMF_REQUESTED = 1 << 0,
-	PWMF_ENABLED = 1 << 1,
-	PWMF_EXPORTED = 1 << 2,
+	PWMF_EXPORTED = 1 << 1,
+};
+
+/*
+ * struct pwm_state - state of a PWM channel
+ * @period: PWM period (in nanoseconds)
+ * @duty_cycle: PWM duty cycle (in nanoseconds)
+ * @polarity: PWM polarity
+ * @enabled: PWM enabled status
+ */
+struct pwm_state {
+	unsigned int period;
+	unsigned int duty_cycle;
+	enum pwm_polarity polarity;
+	bool enabled;
 };
 
 /**
@@ -106,11 +67,8 @@
  * @pwm: global index of the PWM device
  * @chip: PWM chip providing this PWM device
  * @chip_data: chip-private data associated with the PWM device
- * @lock: used to serialize accesses to the PWM device where necessary
- * @period: period of the PWM signal (in nanoseconds)
- * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
- * @polarity: polarity of the PWM signal
  * @args: PWM arguments
+ * @state: curent PWM channel state
  */
 struct pwm_device {
 	const char *label;
@@ -119,50 +77,68 @@
 	unsigned int pwm;
 	struct pwm_chip *chip;
 	void *chip_data;
-	struct mutex lock;
-
-	unsigned int period;
-	unsigned int duty_cycle;
-	enum pwm_polarity polarity;
 
 	struct pwm_args args;
+	struct pwm_state state;
 };
 
+/**
+ * pwm_get_state() - retrieve the current PWM state
+ * @pwm: PWM device
+ * @state: state to fill with the current PWM state
+ */
+static inline void pwm_get_state(const struct pwm_device *pwm,
+				 struct pwm_state *state)
+{
+	*state = pwm->state;
+}
+
 static inline bool pwm_is_enabled(const struct pwm_device *pwm)
 {
-	return test_bit(PWMF_ENABLED, &pwm->flags);
+	struct pwm_state state;
+
+	pwm_get_state(pwm, &state);
+
+	return state.enabled;
 }
 
 static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
 {
 	if (pwm)
-		pwm->period = period;
+		pwm->state.period = period;
 }
 
 static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
 {
-	return pwm ? pwm->period : 0;
+	struct pwm_state state;
+
+	pwm_get_state(pwm, &state);
+
+	return state.period;
 }
 
 static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
 {
 	if (pwm)
-		pwm->duty_cycle = duty;
+		pwm->state.duty_cycle = duty;
 }
 
 static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
 {
-	return pwm ? pwm->duty_cycle : 0;
-}
+	struct pwm_state state;
 
-/*
- * pwm_set_polarity - configure the polarity of a PWM signal
- */
-int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
+	pwm_get_state(pwm, &state);
+
+	return state.duty_cycle;
+}
 
 static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm)
 {
-	return pwm ? pwm->polarity : PWM_POLARITY_NORMAL;
+	struct pwm_state state;
+
+	pwm_get_state(pwm, &state);
+
+	return state.polarity;
 }
 
 static inline void pwm_get_args(const struct pwm_device *pwm,
@@ -171,12 +147,6 @@
 	*args = pwm->args;
 }
 
-static inline void pwm_apply_args(struct pwm_device *pwm)
-{
-	pwm_set_period(pwm, pwm->args.period);
-	pwm_set_polarity(pwm, pwm->args.polarity);
-}
-
 /**
  * struct pwm_ops - PWM controller operations
  * @request: optional hook for requesting a PWM
@@ -185,6 +155,13 @@
  * @set_polarity: configure the polarity of this PWM
  * @enable: enable PWM output toggling
  * @disable: disable PWM output toggling
+ * @apply: atomically apply a new PWM config. The state argument
+ *	   should be adjusted with the real hardware config (if the
+ *	   approximate the period or duty_cycle value, state should
+ *	   reflect it)
+ * @get_state: get the current PWM state. This function is only
+ *	       called once per PWM device when the PWM chip is
+ *	       registered.
  * @dbg_show: optional routine to show contents in debugfs
  * @owner: helps prevent removal of modules exporting active PWMs
  */
@@ -197,6 +174,10 @@
 			    enum pwm_polarity polarity);
 	int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
 	void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
+	int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm,
+		     struct pwm_state *state);
+	void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
+			  struct pwm_state *state);
 #ifdef CONFIG_DEBUG_FS
 	void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s);
 #endif
@@ -232,6 +213,115 @@
 };
 
 #if IS_ENABLED(CONFIG_PWM)
+/* PWM user APIs */
+struct pwm_device *pwm_request(int pwm_id, const char *label);
+void pwm_free(struct pwm_device *pwm);
+int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state);
+int pwm_adjust_config(struct pwm_device *pwm);
+
+/**
+ * pwm_config() - change a PWM device configuration
+ * @pwm: PWM device
+ * @duty_ns: "on" time (in nanoseconds)
+ * @period_ns: duration (in nanoseconds) of one cycle
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
+			     int period_ns)
+{
+	struct pwm_state state;
+
+	if (!pwm)
+		return -EINVAL;
+
+	pwm_get_state(pwm, &state);
+	if (state.duty_cycle == duty_ns && state.period == period_ns)
+		return 0;
+
+	state.duty_cycle = duty_ns;
+	state.period = period_ns;
+	return pwm_apply_state(pwm, &state);
+}
+
+/**
+ * pwm_set_polarity() - configure the polarity of a PWM signal
+ * @pwm: PWM device
+ * @polarity: new polarity of the PWM signal
+ *
+ * Note that the polarity cannot be configured while the PWM device is
+ * enabled.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static inline int pwm_set_polarity(struct pwm_device *pwm,
+				   enum pwm_polarity polarity)
+{
+	struct pwm_state state;
+
+	if (!pwm)
+		return -EINVAL;
+
+	pwm_get_state(pwm, &state);
+	if (state.polarity == polarity)
+		return 0;
+
+	/*
+	 * Changing the polarity of a running PWM without adjusting the
+	 * dutycycle/period value is a bit risky (can introduce glitches).
+	 * Return -EBUSY in this case.
+	 * Note that this is allowed when using pwm_apply_state() because
+	 * the user specifies all the parameters.
+	 */
+	if (state.enabled)
+		return -EBUSY;
+
+	state.polarity = polarity;
+	return pwm_apply_state(pwm, &state);
+}
+
+/**
+ * pwm_enable() - start a PWM output toggling
+ * @pwm: PWM device
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static inline int pwm_enable(struct pwm_device *pwm)
+{
+	struct pwm_state state;
+
+	if (!pwm)
+		return -EINVAL;
+
+	pwm_get_state(pwm, &state);
+	if (state.enabled)
+		return 0;
+
+	state.enabled = true;
+	return pwm_apply_state(pwm, &state);
+}
+
+/**
+ * pwm_disable() - stop a PWM output toggling
+ * @pwm: PWM device
+ */
+static inline void pwm_disable(struct pwm_device *pwm)
+{
+	struct pwm_state state;
+
+	if (!pwm)
+		return;
+
+	pwm_get_state(pwm, &state);
+	if (!state.enabled)
+		return;
+
+	state.enabled = false;
+	pwm_apply_state(pwm, &state);
+}
+
+
+/* PWM provider APIs */
 int pwm_set_chip_data(struct pwm_device *pwm, void *data);
 void *pwm_get_chip_data(struct pwm_device *pwm);
 
@@ -257,6 +347,47 @@
 
 bool pwm_can_sleep(struct pwm_device *pwm);
 #else
+static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void pwm_free(struct pwm_device *pwm)
+{
+}
+
+static inline int pwm_apply_state(struct pwm_device *pwm,
+				  const struct pwm_state *state)
+{
+	return -ENOTSUPP;
+}
+
+static inline int pwm_adjust_config(struct pwm_device *pwm)
+{
+	return -ENOTSUPP;
+}
+
+static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
+			     int period_ns)
+{
+	return -EINVAL;
+}
+
+static inline int pwm_set_polarity(struct pwm_device *pwm,
+				   enum pwm_polarity polarity)
+{
+	return -ENOTSUPP;
+}
+
+static inline int pwm_enable(struct pwm_device *pwm)
+{
+	return -EINVAL;
+}
+
+static inline void pwm_disable(struct pwm_device *pwm)
+{
+}
+
 static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
 {
 	return -EINVAL;
@@ -328,6 +459,34 @@
 }
 #endif
 
+static inline void pwm_apply_args(struct pwm_device *pwm)
+{
+	/*
+	 * PWM users calling pwm_apply_args() expect to have a fresh config
+	 * where the polarity and period are set according to pwm_args info.
+	 * The problem is, polarity can only be changed when the PWM is
+	 * disabled.
+	 *
+	 * PWM drivers supporting hardware readout may declare the PWM device
+	 * as enabled, and prevent polarity setting, which changes from the
+	 * existing behavior, where all PWM devices are declared as disabled
+	 * at startup (even if they are actually enabled), thus authorizing
+	 * polarity setting.
+	 *
+	 * Instead of setting ->enabled to false, we call pwm_disable()
+	 * before pwm_set_polarity() to ensure that everything is configured
+	 * as expected, and the PWM is really disabled when the user request
+	 * it.
+	 *
+	 * Note that PWM users requiring a smooth handover between the
+	 * bootloader and the kernel (like critical regulators controlled by
+	 * PWM devices) will have to switch to the atomic API and avoid calling
+	 * pwm_apply_args().
+	 */
+	pwm_disable(pwm);
+	pwm_set_polarity(pwm, pwm->args.polarity);
+}
+
 struct pwm_lookup {
 	struct list_head list;
 	const char *provider;
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 6ae8cb4..6c876a6 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -49,6 +49,7 @@
 	bool drop_ttl0;
 	u8 vport_id;
 	u16 mtu;
+	bool clear_stats;
 };
 
 struct qed_stop_rxq_params {
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 5a0b64c..b0f305e 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -49,12 +49,27 @@
 extern struct lock_class_key reservation_seqcount_class;
 extern const char reservation_seqcount_string[];
 
+/**
+ * struct reservation_object_list - a list of shared fences
+ * @rcu: for internal use
+ * @shared_count: table of shared fences
+ * @shared_max: for growing shared fence table
+ * @shared: shared fence table
+ */
 struct reservation_object_list {
 	struct rcu_head rcu;
 	u32 shared_count, shared_max;
 	struct fence __rcu *shared[];
 };
 
+/**
+ * struct reservation_object - a reservation object manages fences for a buffer
+ * @lock: update side lock
+ * @seq: sequence count for managing RCU read-side synchronization
+ * @fence_excl: the exclusive fence, if there is one currently
+ * @fence: list of current shared fences
+ * @staged: staged copy of shared fences for RCU updates
+ */
 struct reservation_object {
 	struct ww_mutex lock;
 	seqcount_t seq;
@@ -68,6 +83,10 @@
 #define reservation_object_assert_held(obj) \
 	lockdep_assert_held(&(obj)->lock.base)
 
+/**
+ * reservation_object_init - initialize a reservation object
+ * @obj: the reservation object
+ */
 static inline void
 reservation_object_init(struct reservation_object *obj)
 {
@@ -79,6 +98,10 @@
 	obj->staged = NULL;
 }
 
+/**
+ * reservation_object_fini - destroys a reservation object
+ * @obj: the reservation object
+ */
 static inline void
 reservation_object_fini(struct reservation_object *obj)
 {
@@ -106,6 +129,14 @@
 	ww_mutex_destroy(&obj->lock);
 }
 
+/**
+ * reservation_object_get_list - get the reservation object's
+ * shared fence list, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the shared fence list.  Does NOT take references to
+ * the fence.  The obj->lock must be held.
+ */
 static inline struct reservation_object_list *
 reservation_object_get_list(struct reservation_object *obj)
 {
@@ -113,6 +144,17 @@
 					 reservation_object_held(obj));
 }
 
+/**
+ * reservation_object_get_excl - get the reservation object's
+ * exclusive fence, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the exclusive fence (if any).  Does NOT take a
+ * reference.  The obj->lock must be held.
+ *
+ * RETURNS
+ * The exclusive fence or NULL
+ */
 static inline struct fence *
 reservation_object_get_excl(struct reservation_object *obj)
 {
@@ -120,6 +162,35 @@
 					 reservation_object_held(obj));
 }
 
+/**
+ * reservation_object_get_excl_rcu - get the reservation object's
+ * exclusive fence, without lock held.
+ * @obj: the reservation object
+ *
+ * If there is an exclusive fence, this atomically increments it's
+ * reference count and returns it.
+ *
+ * RETURNS
+ * The exclusive fence or NULL if none
+ */
+static inline struct fence *
+reservation_object_get_excl_rcu(struct reservation_object *obj)
+{
+	struct fence *fence;
+	unsigned seq;
+retry:
+	seq = read_seqcount_begin(&obj->seq);
+	rcu_read_lock();
+	fence = rcu_dereference(obj->fence_excl);
+	if (read_seqcount_retry(&obj->seq, seq)) {
+		rcu_read_unlock();
+		goto retry;
+	}
+	fence = fence_get(fence);
+	rcu_read_unlock();
+	return fence;
+}
+
 int reservation_object_reserve_shared(struct reservation_object *obj);
 void reservation_object_add_shared_fence(struct reservation_object *obj,
 					 struct fence *fence);
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index d1c12d1..d37fbb3 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -156,6 +156,7 @@
  */
 extern void down_read_nested(struct rw_semaphore *sem, int subclass);
 extern void down_write_nested(struct rw_semaphore *sem, int subclass);
+extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
 extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
 
 # define down_write_nest_lock(sem, nest_lock)			\
@@ -176,6 +177,7 @@
 # define down_read_nested(sem, subclass)		down_read(sem)
 # define down_write_nest_lock(sem, nest_lock)	down_write(sem)
 # define down_write_nested(sem, subclass)	down_write(sem)
+# define down_write_killable_nested(sem, subclass)	down_write_killable(sem)
 # define down_read_non_owner(sem)		down_read(sem)
 # define up_read_non_owner(sem)			up_read(sem)
 #endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2c036de..6e42ada 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -794,7 +794,11 @@
 	struct tty_audit_buf *tty_audit_buf;
 #endif
 
-	oom_flags_t oom_flags;
+	/*
+	 * Thread is the potential origin of an oom condition; kill first on
+	 * oom
+	 */
+	bool oom_flag_origin;
 	short oom_score_adj;		/* OOM kill score adjustment */
 	short oom_score_adj_min;	/* OOM kill score adjustment min value.
 					 * Only settable by CAP_SYS_RESOURCE. */
@@ -1535,6 +1539,7 @@
 	unsigned sched_reset_on_fork:1;
 	unsigned sched_contributes_to_load:1;
 	unsigned sched_migrated:1;
+	unsigned sched_remote_wakeup:1;
 	unsigned :0; /* force alignment to the next boundary */
 
 	/* unserialized, strictly 'current' */
@@ -2740,10 +2745,12 @@
 
 /* mmput gets rid of the mappings and all user-space */
 extern void mmput(struct mm_struct *);
-/* same as above but performs the slow path from the async kontext. Can
+#ifdef CONFIG_MMU
+/* same as above but performs the slow path from the async context. Can
  * be called from the atomic context as well
  */
 extern void mmput_async(struct mm_struct *);
+#endif
 
 /* Grab a reference to a task's mm, if it is not already going away */
 extern struct mm_struct *get_task_mm(struct task_struct *task);
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index dacb5e7..de1f643 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -765,6 +765,8 @@
 	__u8	sctpi_s_disable_fragments;
 	__u8	sctpi_s_v4mapped;
 	__u8	sctpi_s_frag_interleave;
+	__u32	sctpi_s_type;
+	__u32	__reserved3;
 };
 
 struct sctp_infox {
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index e058210..7973a82 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -277,7 +277,7 @@
 
 static inline int raw_read_seqcount_latch(seqcount_t *s)
 {
-	return lockless_dereference(s->sequence);
+	return lockless_dereference(s)->sequence;
 }
 
 /**
@@ -331,7 +331,7 @@
  *	unsigned seq, idx;
  *
  *	do {
- *		seq = lockless_dereference(latch->seq);
+ *		seq = lockless_dereference(latch)->seq;
  *
  *		idx = seq & 0x01;
  *		entry = data_query(latch->data[idx], ...);
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 639be26..b63f63e 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -400,7 +400,9 @@
 #else
 #define rt_sigmask(sig)	sigmask(sig)
 #endif
-#define siginmask(sig, mask) (rt_sigmask(sig) & (mask))
+
+#define siginmask(sig, mask) \
+	((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
 
 #define SIG_KERNEL_ONLY_MASK (\
 	rt_sigmask(SIGKILL)   |  rt_sigmask(SIGSTOP))
@@ -421,14 +423,10 @@
         rt_sigmask(SIGCONT)   |  rt_sigmask(SIGCHLD)   | \
 	rt_sigmask(SIGWINCH)  |  rt_sigmask(SIGURG)    )
 
-#define sig_kernel_only(sig) \
-	(((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_ONLY_MASK))
-#define sig_kernel_coredump(sig) \
-	(((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_COREDUMP_MASK))
-#define sig_kernel_ignore(sig) \
-	(((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_IGNORE_MASK))
-#define sig_kernel_stop(sig) \
-	(((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_STOP_MASK))
+#define sig_kernel_only(sig)		siginmask(sig, SIG_KERNEL_ONLY_MASK)
+#define sig_kernel_coredump(sig)	siginmask(sig, SIG_KERNEL_COREDUMP_MASK)
+#define sig_kernel_ignore(sig)		siginmask(sig, SIG_KERNEL_IGNORE_MASK)
+#define sig_kernel_stop(sig)		siginmask(sig, SIG_KERNEL_STOP_MASK)
 
 #define sig_user_defined(t, signr) \
 	(((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&	\
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 665cd0c..d1faa01 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -111,22 +111,6 @@
 }
 #endif
 
-
-/**
- * virt_to_obj - returns address of the beginning of object.
- * @s: object's kmem_cache
- * @slab_page: address of slab page
- * @x: address within object memory range
- *
- * Returns address of the beginning of object
- */
-static inline void *virt_to_obj(struct kmem_cache *s,
-				const void *slab_page,
-				const void *x)
-{
-	return (void *)x - ((x - slab_page) % s->size);
-}
-
 void object_err(struct kmem_cache *s, struct page *page,
 		u8 *object, char *reason);
 
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 857a9a1..1f03483 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -372,6 +372,7 @@
  * @unprepare_message: undo any work done by prepare_message().
  * @spi_flash_read: to support spi-controller hardwares that provide
  *                  accelerated interface to read from flash devices.
+ * @flash_read_supported: spi device supports flash read
  * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
  *	number. Any individual value may be -ENOENT for CS lines that
  *	are not GPIOs (driven by the SPI controller itself).
@@ -529,6 +530,7 @@
 				 struct spi_message *message);
 	int (*spi_flash_read)(struct  spi_device *spi,
 			      struct spi_flash_read_message *msg);
+	bool (*flash_read_supported)(struct spi_device *spi);
 
 	/*
 	 * These hooks are for drivers that use a generic implementation
@@ -1158,7 +1160,9 @@
 /* SPI core interface for flash read support */
 static inline bool spi_flash_read_supported(struct spi_device *spi)
 {
-	return spi->master->spi_flash_read ? true : false;
+	return spi->master->spi_flash_read &&
+	       (!spi->master->flash_read_supported ||
+	       spi->master->flash_read_supported(spi));
 }
 
 int spi_flash_read(struct spi_device *spi,
diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h
new file mode 100644
index 0000000..451771d
--- /dev/null
+++ b/include/linux/stringhash.h
@@ -0,0 +1,76 @@
+#ifndef __LINUX_STRINGHASH_H
+#define __LINUX_STRINGHASH_H
+
+#include <linux/compiler.h>	/* For __pure */
+#include <linux/types.h>	/* For u32, u64 */
+
+/*
+ * Routines for hashing strings of bytes to a 32-bit hash value.
+ *
+ * These hash functions are NOT GUARANTEED STABLE between kernel
+ * versions, architectures, or even repeated boots of the same kernel.
+ * (E.g. they may depend on boot-time hardware detection or be
+ * deliberately randomized.)
+ *
+ * They are also not intended to be secure against collisions caused by
+ * malicious inputs; much slower hash functions are required for that.
+ *
+ * They are optimized for pathname components, meaning short strings.
+ * Even if a majority of files have longer names, the dynamic profile of
+ * pathname components skews short due to short directory names.
+ * (E.g. /usr/lib/libsesquipedalianism.so.3.141.)
+ */
+
+/*
+ * Version 1: one byte at a time.  Example of use:
+ *
+ * unsigned long hash = init_name_hash;
+ * while (*p)
+ *	hash = partial_name_hash(tolower(*p++), hash);
+ * hash = end_name_hash(hash);
+ *
+ * Although this is designed for bytes, fs/hfsplus/unicode.c
+ * abuses it to hash 16-bit values.
+ */
+
+/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
+#define init_name_hash()		0
+
+/* partial hash update function. Assume roughly 4 bits per character */
+static inline unsigned long
+partial_name_hash(unsigned long c, unsigned long prevhash)
+{
+	return (prevhash + (c << 4) + (c >> 4)) * 11;
+}
+
+/*
+ * Finally: cut down the number of bits to a int value (and try to avoid
+ * losing bits)
+ */
+static inline unsigned long end_name_hash(unsigned long hash)
+{
+	return (unsigned int)hash;
+}
+
+/*
+ * Version 2: One word (32 or 64 bits) at a time.
+ * If CONFIG_DCACHE_WORD_ACCESS is defined (meaning <asm/word-at-a-time.h>
+ * exists, which describes major Linux platforms like x86 and ARM), then
+ * this computes a different hash function much faster.
+ *
+ * If not set, this falls back to a wrapper around the preceding.
+ */
+extern unsigned int __pure full_name_hash(const char *, unsigned int);
+
+/*
+ * A hash_len is a u64 with the hash of a string in the low
+ * half and the length in the high half.
+ */
+#define hashlen_hash(hashlen) ((u32)(hashlen))
+#define hashlen_len(hashlen)  ((u32)((hashlen) >> 32))
+#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash))
+
+/* Return the "hash_len" (hash and length) of a null-terminated string */
+extern u64 __pure hashlen_string(const char *name);
+
+#endif	/* __LINUX_STRINGHASH_H */
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 6a241a2..8997915 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -127,7 +127,7 @@
 	void			(*destroy)(struct rpc_auth *);
 
 	struct rpc_cred *	(*lookup_cred)(struct rpc_auth *, struct auth_cred *, int);
-	struct rpc_cred *	(*crcreate)(struct rpc_auth*, struct auth_cred *, int);
+	struct rpc_cred *	(*crcreate)(struct rpc_auth*, struct auth_cred *, int, gfp_t);
 	int			(*list_pseudoflavors)(rpc_authflavor_t *, int);
 	rpc_authflavor_t	(*info2flavor)(struct rpcsec_gss_info *);
 	int			(*flavor2info)(rpc_authflavor_t,
@@ -167,6 +167,7 @@
 
 struct rpc_cred *	rpc_lookup_cred(void);
 struct rpc_cred *	rpc_lookup_cred_nonblock(void);
+struct rpc_cred *	rpc_lookup_generic_cred(struct auth_cred *, int, gfp_t);
 struct rpc_cred *	rpc_lookup_machine_cred(const char *service_name);
 int			rpcauth_register(const struct rpc_authops *);
 int			rpcauth_unregister(const struct rpc_authops *);
@@ -178,7 +179,7 @@
 int			rpcauth_get_gssinfo(rpc_authflavor_t,
 				struct rpcsec_gss_info *);
 int			rpcauth_list_flavors(rpc_authflavor_t *, int);
-struct rpc_cred *	rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int);
+struct rpc_cred *	rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int, gfp_t);
 void			rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
 struct rpc_cred *	rpcauth_lookupcred(struct rpc_auth *, int);
 struct rpc_cred *	rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int);
@@ -201,9 +202,28 @@
 static inline
 struct rpc_cred *	get_rpccred(struct rpc_cred *cred)
 {
-	atomic_inc(&cred->cr_count);
+	if (cred != NULL)
+		atomic_inc(&cred->cr_count);
 	return cred;
 }
 
+/**
+ * get_rpccred_rcu - get a reference to a cred using rcu-protected pointer
+ * @cred: cred of which to take a reference
+ *
+ * In some cases, we may have a pointer to a credential to which we
+ * want to take a reference, but don't already have one. Because these
+ * objects are freed using RCU, we can access the cr_count while its
+ * on its way to destruction and only take a reference if it's not already
+ * zero.
+ */
+static inline struct rpc_cred *
+get_rpccred_rcu(struct rpc_cred *cred)
+{
+	if (atomic_inc_not_zero(&cred->cr_count))
+		return cred;
+	return NULL;
+}
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SUNRPC_AUTH_H */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 9a7ddba..19c659d 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -176,6 +176,7 @@
 int		rpc_protocol(struct rpc_clnt *);
 struct net *	rpc_net_ns(struct rpc_clnt *);
 size_t		rpc_max_payload(struct rpc_clnt *);
+size_t		rpc_max_bc_payload(struct rpc_clnt *);
 unsigned long	rpc_get_timeout(struct rpc_clnt *clnt);
 void		rpc_force_rebind(struct rpc_clnt *);
 size_t		rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index 8073713..59cbf16 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -158,9 +158,9 @@
 
 /*
  * Note that RFC 1833 does not put any size restrictions on the
- * netid string, but all currently defined netid's fit in 4 bytes.
+ * netid string, but all currently defined netid's fit in 5 bytes.
  */
-#define RPCBIND_MAXNETIDLEN	(4u)
+#define RPCBIND_MAXNETIDLEN	(5u)
 
 /*
  * Universal addresses are introduced in RFC 1833 and further spelled
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 3081339..d6917b8 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -199,7 +199,7 @@
 				    struct xdr_buf *rcvbuf);
 
 /* svc_rdma_marshal.c */
-extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg *, struct svc_rqst *);
+extern int svc_rdma_xdr_decode_req(struct xdr_buf *);
 extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
 				     struct rpcrdma_msg *,
 				     enum rpcrdma_errcode, __be32 *);
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index c00f53a..91d5a5d 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -16,6 +16,7 @@
 #include <linux/sunrpc/cache.h>
 #include <linux/sunrpc/gss_api.h>
 #include <linux/hash.h>
+#include <linux/stringhash.h>
 #include <linux/cred.h>
 
 struct svc_cred {
@@ -165,41 +166,18 @@
 extern int unix_gid_cache_create(struct net *net);
 extern void unix_gid_cache_destroy(struct net *net);
 
-static inline unsigned long hash_str(char *name, int bits)
+/*
+ * The <stringhash.h> functions are good enough that we don't need to
+ * use hash_32() on them; just extracting the high bits is enough.
+ */
+static inline unsigned long hash_str(char const *name, int bits)
 {
-	unsigned long hash = 0;
-	unsigned long l = 0;
-	int len = 0;
-	unsigned char c;
-	do {
-		if (unlikely(!(c = *name++))) {
-			c = (char)len; len = -1;
-		}
-		l = (l << 8) | c;
-		len++;
-		if ((len & (BITS_PER_LONG/8-1))==0)
-			hash = hash_long(hash^l, BITS_PER_LONG);
-	} while (len);
-	return hash >> (BITS_PER_LONG - bits);
+	return hashlen_hash(hashlen_string(name)) >> (32 - bits);
 }
 
-static inline unsigned long hash_mem(char *buf, int length, int bits)
+static inline unsigned long hash_mem(char const *buf, int length, int bits)
 {
-	unsigned long hash = 0;
-	unsigned long l = 0;
-	int len = 0;
-	unsigned char c;
-	do {
-		if (len == length) {
-			c = (char)len; len = -1;
-		} else
-			c = *buf++;
-		l = (l << 8) | c;
-		len++;
-		if ((len & (BITS_PER_LONG/8-1))==0)
-			hash = hash_long(hash^l, BITS_PER_LONG);
-	} while (len);
-	return hash >> (BITS_PER_LONG - bits);
+	return full_name_hash(buf, length) >> (32 - bits);
 }
 
 #endif /* __KERNEL__ */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index fb0d212..5aa3834 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -142,6 +142,7 @@
 	int		(*bc_setup)(struct rpc_xprt *xprt,
 				    unsigned int min_reqs);
 	int		(*bc_up)(struct svc_serv *serv, struct net *net);
+	size_t		(*bc_maxpayload)(struct rpc_xprt *xprt);
 	void		(*bc_free_rqst)(struct rpc_rqst *rqst);
 	void		(*bc_destroy)(struct rpc_xprt *xprt,
 				      unsigned int max_reqs);
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 767190b..39267dc 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -52,7 +52,9 @@
 #define RPCRDMA_DEF_SLOT_TABLE	(128U)
 #define RPCRDMA_MAX_SLOT_TABLE	(256U)
 
-#define RPCRDMA_DEF_INLINE  (1024)	/* default inline max */
+#define RPCRDMA_MIN_INLINE  (1024)	/* min inline thresh */
+#define RPCRDMA_DEF_INLINE  (1024)	/* default inline thresh */
+#define RPCRDMA_MAX_INLINE  (3068)	/* max inline thresh */
 
 /* Memory registration strategies, by number.
  * This is part of a kernel / user space API. Do not remove. */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 1b8a5a7..e45abe7 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -340,6 +340,7 @@
 	int (*get_temp)(void *, int *);
 	int (*get_trend)(void *, long *);
 	int (*set_emul_temp)(void *, int);
+	int (*set_trip_temp)(void *, int, int);
 };
 
 /**
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 37dbacf..816b754 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -21,6 +21,9 @@
 	struct timespec64 ts64;
 
 	if (!tv)
+		return do_sys_settimeofday64(NULL, tz);
+
+	if (!timespec_valid(tv))
 		return -EINVAL;
 
 	ts64 = timespec_to_timespec64(*tv);
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 61aa61d..20ac746 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -145,6 +145,8 @@
 
 #define setup_timer(timer, fn, data)					\
 	__setup_timer((timer), (fn), (data), 0)
+#define setup_deferrable_timer(timer, fn, data)				\
+	__setup_timer((timer), (fn), (data), TIMER_DEFERRABLE)
 #define setup_timer_on_stack(timer, fn, data)				\
 	__setup_timer_on_stack((timer), (fn), (data), 0)
 #define setup_deferrable_timer_on_stack(timer, fn, data)		\
diff --git a/include/linux/types.h b/include/linux/types.h
index 70dd3df..baf7183 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -156,7 +156,6 @@
 
 typedef unsigned __bitwise__ gfp_t;
 typedef unsigned __bitwise__ fmode_t;
-typedef unsigned __bitwise__ oom_flags_t;
 
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
 typedef u64 phys_addr_t;
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 957adb7..3d9d786 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -9,6 +9,7 @@
 #include <linux/rbtree.h>
 
 struct vm_area_struct;		/* vma defining user mapping in mm_types.h */
+struct notifier_block;		/* in notifier.h */
 
 /* bits in flags of vmalloc's vm_struct below */
 #define VM_IOREMAP		0x00000001	/* ioremap() and friends */
@@ -188,4 +189,7 @@
 #define VMALLOC_TOTAL 0UL
 #endif
 
+int register_vmap_purge_notifier(struct notifier_block *nb);
+int unregister_vmap_purge_notifier(struct notifier_block *nb);
+
 #endif /* _LINUX_VMALLOC_H */
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 1cc4c57..94079ba 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -33,8 +33,8 @@
 		   struct inode *inode, const char *name, void *buffer,
 		   size_t size);
 	int (*set)(const struct xattr_handler *, struct dentry *dentry,
-		   const char *name, const void *buffer, size_t size,
-		   int flags);
+		   struct inode *inode, const char *name, const void *buffer,
+		   size_t size, int flags);
 };
 
 const char *xattr_full_name(const struct xattr_handler *, const char *);
@@ -54,7 +54,8 @@
 
 ssize_t generic_getxattr(struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size);
 ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
-int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
+int generic_setxattr(struct dentry *dentry, struct inode *inode,
+		     const char *name, const void *value, size_t size, int flags);
 int generic_removexattr(struct dentry *dentry, const char *name);
 ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
 			   char **xattr_value, size_t size, gfp_t flags);
diff --git a/include/net/compat.h b/include/net/compat.h
index 48103cf..13de0cc 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -42,6 +42,7 @@
 
 int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
 		      struct sockaddr __user **, struct iovec **);
+struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval);
 asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
 				   unsigned int);
 asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
diff --git a/include/net/gre.h b/include/net/gre.h
index 5dce30a..7a54a31 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -26,7 +26,7 @@
 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
 				       u8 name_assign_type);
 int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
-		     bool *csum_err, __be16 proto);
+		     bool *csum_err, __be16 proto, int nhs);
 
 static inline int gre_calc_hlen(__be16 o_flags)
 {
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index d325c81..43a5a0e 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -63,6 +63,8 @@
 			    u8 *protocol, struct flowi6 *fl6);
 };
 
+#ifdef CONFIG_INET
+
 extern const struct ip6_tnl_encap_ops __rcu *
 		ip6tun_encaps[MAX_IPTUN_ENCAP_OPS];
 
@@ -138,7 +140,6 @@
 int ip6_tnl_get_iflink(const struct net_device *dev);
 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
 
-#ifdef CONFIG_INET
 static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
 				  struct net_device *dev)
 {
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index af4c10e..cd6018a 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1232,7 +1232,7 @@
 const char *ip_vs_state_name(__u16 proto, int state);
 
 void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
-int ip_vs_check_template(struct ip_vs_conn *ct);
+int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
 void ip_vs_random_dropentry(struct netns_ipvs *ipvs);
 int ip_vs_conn_init(void);
 void ip_vs_conn_cleanup(void);
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 9c5638a..0dbce55 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -28,8 +28,8 @@
 						struct nf_hook_ops *ops);
 };
 
-void nf_register_queue_handler(const struct nf_queue_handler *qh);
-void nf_unregister_queue_handler(void);
+void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
+void nf_unregister_queue_handler(struct net *net);
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
 
 void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 0922354..f7c291f 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -167,6 +167,7 @@
 
 struct nft_set;
 struct nft_set_iter {
+	u8		genmask;
 	unsigned int	count;
 	unsigned int	skip;
 	int		err;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 38aa498..36d7235 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -5,11 +5,13 @@
 
 struct proc_dir_entry;
 struct nf_logger;
+struct nf_queue_handler;
 
 struct netns_nf {
 #if defined CONFIG_PROC_FS
 	struct proc_dir_entry *proc_netfilter;
 #endif
+	const struct nf_queue_handler __rcu *queue_handler;
 	const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
 #ifdef CONFIG_SYSCTL
 	struct ctl_table_header *nf_log_dir_header;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 0f7efa8..3722dda 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -392,16 +392,20 @@
 	};
 };
 
-static inline bool tc_should_offload(struct net_device *dev, u32 flags)
+static inline bool tc_should_offload(const struct net_device *dev,
+				     const struct tcf_proto *tp, u32 flags)
 {
+	const struct Qdisc *sch = tp->q;
+	const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
+
 	if (!(dev->features & NETIF_F_HW_TC))
 		return false;
-
 	if (flags & TCA_CLS_FLAGS_SKIP_HW)
 		return false;
-
 	if (!dev->netdev_ops->ndo_setup_tc)
 		return false;
+	if (cops && cops->tcf_cl_offload)
+		return cops->tcf_cl_offload(tp->classid);
 
 	return true;
 }
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index a1fd76c..62d5531 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -168,6 +168,7 @@
 
 	/* Filter manipulation */
 	struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
+	bool			(*tcf_cl_offload)(u32 classid);
 	unsigned long		(*bind_tcf)(struct Qdisc *, unsigned long,
 					u32 classid);
 	void			(*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -691,9 +692,11 @@
 	/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
 	if (!sch->gso_skb) {
 		sch->gso_skb = sch->dequeue(sch);
-		if (sch->gso_skb)
+		if (sch->gso_skb) {
 			/* it's still part of the queue */
+			qdisc_qstats_backlog_inc(sch, sch->gso_skb);
 			sch->q.qlen++;
+		}
 	}
 
 	return sch->gso_skb;
@@ -706,6 +709,7 @@
 
 	if (skb) {
 		sch->gso_skb = NULL;
+		qdisc_qstats_backlog_dec(sch, skb);
 		sch->q.qlen--;
 	} else {
 		skb = sch->dequeue(sch);
diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
index dc9a09a..c55facd 100644
--- a/include/net/tc_act/tc_ife.h
+++ b/include/net/tc_act/tc_ife.h
@@ -36,7 +36,7 @@
 	int	(*encode)(struct sk_buff *, void *, struct tcf_meta_info *);
 	int	(*decode)(struct sk_buff *, void *, u16 len);
 	int	(*get)(struct sk_buff *skb, struct tcf_meta_info *mi);
-	int	(*alloc)(struct tcf_meta_info *, void *);
+	int	(*alloc)(struct tcf_meta_info *, void *, gfp_t);
 	void	(*release)(struct tcf_meta_info *);
 	int	(*validate)(void *val, int len);
 	struct module	*owner;
@@ -48,8 +48,8 @@
 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi);
 int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
 			const void *dval);
-int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval);
-int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval);
+int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
+int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
 int ife_validate_meta_u32(void *val, int len);
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 37dd534c..c8a773f 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -239,12 +239,15 @@
 
 #define IB_MGMT_CLASSPORTINFO_ATTR_ID	cpu_to_be16(0x0001)
 
+#define IB_CLASS_PORT_INFO_RESP_TIME_MASK	0x1F
+#define IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE 5
+
 struct ib_class_port_info {
 	u8			base_version;
 	u8			class_version;
 	__be16			capability_mask;
-	u8			reserved[3];
-	u8			resp_time_value;
+	  /* 27 bits for cap_mask2, 5 bits for resp_time */
+	__be32			cap_mask2_resp_time;
 	u8			redirect_gid[16];
 	__be32			redirect_tcslfl;
 	__be16			redirect_lid;
@@ -259,6 +262,59 @@
 	__be32			trap_qkey;
 };
 
+/**
+ * ib_get_cpi_resp_time - Returns the resp_time value from
+ * cap_mask2_resp_time in ib_class_port_info.
+ * @cpi: A struct ib_class_port_info mad.
+ */
+static inline u8 ib_get_cpi_resp_time(struct ib_class_port_info *cpi)
+{
+	return (u8)(be32_to_cpu(cpi->cap_mask2_resp_time) &
+		    IB_CLASS_PORT_INFO_RESP_TIME_MASK);
+}
+
+/**
+ * ib_set_cpi_resptime - Sets the response time in an
+ * ib_class_port_info mad.
+ * @cpi: A struct ib_class_port_info.
+ * @rtime: The response time to set.
+ */
+static inline void ib_set_cpi_resp_time(struct ib_class_port_info *cpi,
+					u8 rtime)
+{
+	cpi->cap_mask2_resp_time =
+		(cpi->cap_mask2_resp_time &
+		 cpu_to_be32(~IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
+		cpu_to_be32(rtime & IB_CLASS_PORT_INFO_RESP_TIME_MASK);
+}
+
+/**
+ * ib_get_cpi_capmask2 - Returns the capmask2 value from
+ * cap_mask2_resp_time in ib_class_port_info.
+ * @cpi: A struct ib_class_port_info mad.
+ */
+static inline u32 ib_get_cpi_capmask2(struct ib_class_port_info *cpi)
+{
+	return (be32_to_cpu(cpi->cap_mask2_resp_time) >>
+		IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
+}
+
+/**
+ * ib_set_cpi_capmask2 - Sets the capmask2 in an
+ * ib_class_port_info mad.
+ * @cpi: A struct ib_class_port_info.
+ * @capmask2: The capmask2 to set.
+ */
+static inline void ib_set_cpi_capmask2(struct ib_class_port_info *cpi,
+				       u32 capmask2)
+{
+	cpi->cap_mask2_resp_time =
+		(cpi->cap_mask2_resp_time &
+		 cpu_to_be32(IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
+		cpu_to_be32(capmask2 <<
+			    IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
+}
+
 struct ib_mad_notice_attr {
 	u8 generic_type;
 	u8 prod_type_msb;
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index 0f3daae..b13419c 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -103,6 +103,9 @@
 	IB_OPCODE_ATOMIC_ACKNOWLEDGE                = 0x12,
 	IB_OPCODE_COMPARE_SWAP                      = 0x13,
 	IB_OPCODE_FETCH_ADD                         = 0x14,
+	/* opcode 0x15 is reserved */
+	IB_OPCODE_SEND_LAST_WITH_INVALIDATE         = 0x16,
+	IB_OPCODE_SEND_ONLY_WITH_INVALIDATE         = 0x17,
 
 	/* real constants follow -- see comment about above IB_OPCODE()
 	   macro for more details */
@@ -129,6 +132,8 @@
 	IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE),
 	IB_OPCODE(RC, COMPARE_SWAP),
 	IB_OPCODE(RC, FETCH_ADD),
+	IB_OPCODE(RC, SEND_LAST_WITH_INVALIDATE),
+	IB_OPCODE(RC, SEND_ONLY_WITH_INVALIDATE),
 
 	/* UC */
 	IB_OPCODE(UC, SEND_FIRST),
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index cdc1c81..3840416 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -94,6 +94,8 @@
 	IB_SA_BEST = 3
 };
 
+#define IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT	BIT(12)
+
 /*
  * Structures for SA records are named "struct ib_sa_xxx_rec."  No
  * attempt is made to pack structures to match the physical layout of
@@ -439,4 +441,14 @@
 			      void *context,
 			      struct ib_sa_query **sa_query);
 
+/* Support get SA ClassPortInfo */
+int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
+				   struct ib_device *device, u8 port_num,
+				   int timeout_ms, gfp_t gfp_mask,
+				   void (*callback)(int status,
+						    struct ib_class_port_info *resp,
+						    void *context),
+				   void *context,
+				   struct ib_sa_query **sa_query);
+
 #endif /* IB_SA_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fc0320c..7e440d4 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -217,10 +217,10 @@
 	IB_DEVICE_CROSS_CHANNEL		= (1 << 27),
 	IB_DEVICE_MANAGED_FLOW_STEERING		= (1 << 29),
 	IB_DEVICE_SIGNATURE_HANDOVER		= (1 << 30),
-	IB_DEVICE_ON_DEMAND_PAGING		= (1 << 31),
+	IB_DEVICE_ON_DEMAND_PAGING		= (1ULL << 31),
 	IB_DEVICE_SG_GAPS_REG			= (1ULL << 32),
-	IB_DEVICE_VIRTUAL_FUNCTION		= ((u64)1 << 33),
-	IB_DEVICE_RAW_SCATTER_FCS		= ((u64)1 << 34),
+	IB_DEVICE_VIRTUAL_FUNCTION		= (1ULL << 33),
+	IB_DEVICE_RAW_SCATTER_FCS		= (1ULL << 34),
 };
 
 enum ib_signature_prot_cap {
@@ -403,56 +403,55 @@
 	IB_SPEED_EDR	= 32
 };
 
-struct ib_protocol_stats {
-	/* TBD... */
+/**
+ * struct rdma_hw_stats
+ * @timestamp - Used by the core code to track when the last update was
+ * @lifespan - Used by the core code to determine how old the counters
+ *   should be before being updated again.  Stored in jiffies, defaults
+ *   to 10 milliseconds, drivers can override the default be specifying
+ *   their own value during their allocation routine.
+ * @name - Array of pointers to static names used for the counters in
+ *   directory.
+ * @num_counters - How many hardware counters there are.  If name is
+ *   shorter than this number, a kernel oops will result.  Driver authors
+ *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
+ *   in their code to prevent this.
+ * @value - Array of u64 counters that are accessed by the sysfs code and
+ *   filled in by the drivers get_stats routine
+ */
+struct rdma_hw_stats {
+	unsigned long	timestamp;
+	unsigned long	lifespan;
+	const char * const *names;
+	int		num_counters;
+	u64		value[];
 };
 
-struct iw_protocol_stats {
-	u64	ipInReceives;
-	u64	ipInHdrErrors;
-	u64	ipInTooBigErrors;
-	u64	ipInNoRoutes;
-	u64	ipInAddrErrors;
-	u64	ipInUnknownProtos;
-	u64	ipInTruncatedPkts;
-	u64	ipInDiscards;
-	u64	ipInDelivers;
-	u64	ipOutForwDatagrams;
-	u64	ipOutRequests;
-	u64	ipOutDiscards;
-	u64	ipOutNoRoutes;
-	u64	ipReasmTimeout;
-	u64	ipReasmReqds;
-	u64	ipReasmOKs;
-	u64	ipReasmFails;
-	u64	ipFragOKs;
-	u64	ipFragFails;
-	u64	ipFragCreates;
-	u64	ipInMcastPkts;
-	u64	ipOutMcastPkts;
-	u64	ipInBcastPkts;
-	u64	ipOutBcastPkts;
+#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
+/**
+ * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
+ *   for drivers.
+ * @names - Array of static const char *
+ * @num_counters - How many elements in array
+ * @lifespan - How many milliseconds between updates
+ */
+static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
+		const char * const *names, int num_counters,
+		unsigned long lifespan)
+{
+	struct rdma_hw_stats *stats;
 
-	u64	tcpRtoAlgorithm;
-	u64	tcpRtoMin;
-	u64	tcpRtoMax;
-	u64	tcpMaxConn;
-	u64	tcpActiveOpens;
-	u64	tcpPassiveOpens;
-	u64	tcpAttemptFails;
-	u64	tcpEstabResets;
-	u64	tcpCurrEstab;
-	u64	tcpInSegs;
-	u64	tcpOutSegs;
-	u64	tcpRetransSegs;
-	u64	tcpInErrs;
-	u64	tcpOutRsts;
-};
+	stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
+			GFP_KERNEL);
+	if (!stats)
+		return NULL;
+	stats->names = names;
+	stats->num_counters = num_counters;
+	stats->lifespan = msecs_to_jiffies(lifespan);
 
-union rdma_protocol_stats {
-	struct ib_protocol_stats	ib;
-	struct iw_protocol_stats	iw;
-};
+	return stats;
+}
+
 
 /* Define bits for the various functionality this port needs to be supported by
  * the core.
@@ -1707,8 +1706,29 @@
 
 	struct iw_cm_verbs	     *iwcm;
 
-	int		           (*get_protocol_stats)(struct ib_device *device,
-							 union rdma_protocol_stats *stats);
+	/**
+	 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
+	 *   driver initialized data.  The struct is kfree()'ed by the sysfs
+	 *   core when the device is removed.  A lifespan of -1 in the return
+	 *   struct tells the core to set a default lifespan.
+	 */
+	struct rdma_hw_stats      *(*alloc_hw_stats)(struct ib_device *device,
+						     u8 port_num);
+	/**
+	 * get_hw_stats - Fill in the counter value(s) in the stats struct.
+	 * @index - The index in the value array we wish to have updated, or
+	 *   num_counters if we want all stats updated
+	 * Return codes -
+	 *   < 0 - Error, no counters updated
+	 *   index - Updated the single counter pointed to by index
+	 *   num_counters - Updated all counters (will reset the timestamp
+	 *     and prevent further calls for lifespan milliseconds)
+	 * Drivers are allowed to update all counters in leiu of just the
+	 *   one given in index at their option
+	 */
+	int		           (*get_hw_stats)(struct ib_device *device,
+						   struct rdma_hw_stats *stats,
+						   u8 port, int index);
 	int		           (*query_device)(struct ib_device *device,
 						   struct ib_device_attr *device_attr,
 						   struct ib_udata *udata);
@@ -1926,6 +1946,8 @@
 	u8                           node_type;
 	u8                           phys_port_cnt;
 	struct ib_device_attr        attrs;
+	struct attribute_group	     *hw_stats_ag;
+	struct rdma_hw_stats         *hw_stats;
 
 	/**
 	 * The following mandatory functions are used only at device
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index d57ceee..16274e2 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -149,15 +149,15 @@
 	int qpn_res_end;
 	int nports;
 	int npkeys;
-	u8 qos_shift;
 	char cq_name[RVT_CQN_MAX];
 	int node;
-	int max_rdma_atomic;
 	int psn_mask;
 	int psn_shift;
 	int psn_modify_mask;
 	u32 core_cap_flags;
 	u32 max_mad_size;
+	u8 qos_shift;
+	u8 max_rdma_atomic;
 };
 
 /* Protection domain */
@@ -426,6 +426,15 @@
 }
 
 /*
+ * Return the max atomic suitable for determining
+ * the size of the ack ring buffer in a QP.
+ */
+static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi)
+{
+	return rdi->dparms.max_rdma_atomic + 1;
+}
+
+/*
  * Return the indexed PKEY from the port PKEY table.
  */
 static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi,
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 0e1ff2a..6d23b87 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -211,8 +211,6 @@
 	unsigned size;
 };
 
-#define RVT_MAX_RDMA_ATOMIC	16
-
 /*
  * This structure holds the information that the send tasklet needs
  * to send a RDMA read response or atomic operation.
@@ -282,8 +280,7 @@
 	atomic_t refcount ____cacheline_aligned_in_smp;
 	wait_queue_head_t wait;
 
-	struct rvt_ack_entry s_ack_queue[RVT_MAX_RDMA_ATOMIC + 1]
-		____cacheline_aligned_in_smp;
+	struct rvt_ack_entry *s_ack_queue;
 	struct rvt_sge_state s_rdma_read_sge;
 
 	spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
index 11571b2..20bf7ea 100644
--- a/include/scsi/scsi_common.h
+++ b/include/scsi/scsi_common.h
@@ -63,6 +63,7 @@
 
 extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
 int scsi_set_sense_information(u8 *buf, int buf_len, u64 info);
+int scsi_set_sense_field_pointer(u8 *buf, int buf_len, u16 fp, u8 bp, bool cd);
 extern const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
 				       int desc_type);
 
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index dbb8c64..98d366b 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -16,6 +16,7 @@
 extern int scsi_block_when_processing_errors(struct scsi_device *);
 extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
 					 struct scsi_sense_hdr *sshdr);
+extern int scsi_check_sense(struct scsi_cmnd *);
 
 static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr)
 {
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index c3371fa..4ac24f5 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -74,6 +74,7 @@
 	ISCSI_IWARP_TCP				= 3,
 	ISCSI_IWARP_SCTP			= 4,
 	ISCSI_INFINIBAND			= 5,
+	ISCSI_CXGBIT				= 6,
 };
 
 /* RFC-3720 7.1.4  Standard Connection State Diagram for a Target */
@@ -890,4 +891,30 @@
 }
 
 extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
+
+static inline void iscsit_thread_check_cpumask(
+	struct iscsi_conn *conn,
+	struct task_struct *p,
+	int mode)
+{
+	/*
+	 * mode == 1 signals iscsi_target_tx_thread() usage.
+	 * mode == 0 signals iscsi_target_rx_thread() usage.
+	 */
+	if (mode == 1) {
+		if (!conn->conn_tx_reset_cpumask)
+			return;
+		conn->conn_tx_reset_cpumask = 0;
+	} else {
+		if (!conn->conn_rx_reset_cpumask)
+			return;
+		conn->conn_rx_reset_cpumask = 0;
+	}
+	/*
+	 * Update the CPU mask for this single kthread so that
+	 * both TX and RX kthreads are scheduled to run on the
+	 * same CPU.
+	 */
+	set_cpus_allowed_ptr(p, conn->conn_cpumask);
+}
 #endif /* ISCSI_TARGET_CORE_H */
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index 90e37fa..40ac7cd 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -6,6 +6,7 @@
 #define ISCSIT_TRANSPORT_NAME	16
 	char name[ISCSIT_TRANSPORT_NAME];
 	int transport_type;
+	bool rdma_shutdown;
 	int priv_size;
 	struct module *owner;
 	struct list_head t_node;
@@ -22,6 +23,13 @@
 	int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *);
 	int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
 	void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *);
+	int (*iscsit_xmit_pdu)(struct iscsi_conn *, struct iscsi_cmd *,
+			       struct iscsi_datain_req *, const void *, u32);
+	void (*iscsit_release_cmd)(struct iscsi_conn *, struct iscsi_cmd *);
+	void (*iscsit_get_rx_pdu)(struct iscsi_conn *);
+	int (*iscsit_validate_params)(struct iscsi_conn *);
+	void (*iscsit_get_r2t_ttt)(struct iscsi_conn *, struct iscsi_cmd *,
+				   struct iscsi_r2t *);
 	enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *);
 };
 
@@ -77,6 +85,18 @@
 extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
 				struct iscsi_logout_rsp *);
 extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_queue_rsp(struct iscsi_conn *, struct iscsi_cmd *);
+extern void iscsit_aborted_task(struct iscsi_conn *, struct iscsi_cmd *);
+extern int iscsit_add_reject(struct iscsi_conn *, u8, unsigned char *);
+extern int iscsit_reject_cmd(struct iscsi_cmd *, u8, unsigned char *);
+extern int iscsit_handle_snack(struct iscsi_conn *, unsigned char *);
+extern void iscsit_build_datain_pdu(struct iscsi_cmd *, struct iscsi_conn *,
+				    struct iscsi_datain *,
+				    struct iscsi_data_rsp *, bool);
+extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+				     bool);
+extern int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+extern int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
 /*
  * From iscsi_target_device.c
  */
@@ -102,3 +122,24 @@
 extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
 			       unsigned char *, __be32);
 extern void iscsit_release_cmd(struct iscsi_cmd *);
+extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
+extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *,
+					      struct iscsi_conn *, u8);
+
+/*
+ * From iscsi_target_nego.c
+ */
+extern int iscsi_target_check_login_request(struct iscsi_conn *,
+					    struct iscsi_login *);
+
+/*
+ * From iscsi_target_login.c
+ */
+extern __printf(2, 3) int iscsi_change_param_sprintf(
+	struct iscsi_conn *, const char *, ...);
+
+/*
+ * From iscsi_target_parameters.c
+ */
+extern struct iscsi_param *iscsi_find_param_from_key(
+	char *, struct iscsi_param_list *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 3e0dd86..b316b44 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -536,7 +536,6 @@
 	char			initiatorname[TRANSPORT_IQN_LEN];
 	/* Used to signal demo mode created ACL, disabled by default */
 	bool			dynamic_node_acl;
-	bool			acl_stop:1;
 	u32			queue_depth;
 	u32			acl_index;
 	enum target_prot_type	saved_prot_type;
@@ -603,7 +602,6 @@
 	struct list_head	sess_cmd_list;
 	struct list_head	sess_wait_list;
 	spinlock_t		sess_cmd_lock;
-	struct kref		sess_kref;
 	void			*sess_cmd_map;
 	struct percpu_ida	sess_tag_pool;
 };
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 78d88f0..de44462 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -50,10 +50,6 @@
 	 */
 	int (*check_stop_free)(struct se_cmd *);
 	void (*release_cmd)(struct se_cmd *);
-	/*
-	 * Called with spin_lock_bh(struct se_portal_group->session_lock held.
-	 */
-	int (*shutdown_session)(struct se_session *);
 	void (*close_session)(struct se_session *);
 	u32 (*sess_get_index)(struct se_session *);
 	/*
@@ -123,8 +119,6 @@
 		struct se_node_acl *, struct se_session *, void *);
 void	transport_register_session(struct se_portal_group *,
 		struct se_node_acl *, struct se_session *, void *);
-int	target_get_session(struct se_session *);
-void	target_put_session(struct se_session *);
 ssize_t	target_show_dynamic_sessions(struct se_portal_group *, char *);
 void	transport_free_session(struct se_session *);
 void	target_put_nacl(struct se_node_acl *);
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 526fb3d..f28292d 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -108,7 +108,7 @@
 		__entry->coalesced	= coalesced;
 	),
 
-	TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
+	TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
 		  __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
 		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
 		  (__entry->e & (1<<11)) ? "logical" : "physical",
@@ -129,7 +129,7 @@
 		__entry->e		= e;
 	),
 
-	TP_printk("dst %x vec=%u (%s|%s|%s%s)",
+	TP_printk("dst %x vec %u (%s|%s|%s%s)",
 		  (u8)(__entry->e >> 56), (u8)__entry->e,
 		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
 		  (__entry->e & (1<<11)) ? "logical" : "physical",
@@ -151,7 +151,7 @@
 		__entry->data		= data;
 	),
 
-	TP_printk("dst %u vec %x (%s|%s|%s%s)",
+	TP_printk("dst %u vec %u (%s|%s|%s%s)",
 		  (u8)(__entry->address >> 12), (u8)__entry->data,
 		  __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
 		  (__entry->address & (1<<2)) ? "logical" : "physical",
diff --git a/include/trace/events/libata.h b/include/trace/events/libata.h
index 8b0fbd9..75fff86 100644
--- a/include/trace/events/libata.h
+++ b/include/trace/events/libata.h
@@ -39,6 +39,7 @@
 		 ata_opcode_name(ATA_CMD_WRITE_QUEUED_FUA_EXT), \
 		 ata_opcode_name(ATA_CMD_FPDMA_READ),		\
 		 ata_opcode_name(ATA_CMD_FPDMA_WRITE),		\
+		 ata_opcode_name(ATA_CMD_NCQ_NON_DATA),		\
 		 ata_opcode_name(ATA_CMD_FPDMA_SEND),		\
 		 ata_opcode_name(ATA_CMD_FPDMA_RECV),		\
 		 ata_opcode_name(ATA_CMD_PIO_READ),		\
@@ -97,6 +98,8 @@
 		 ata_opcode_name(ATA_CMD_CFA_WRITE_MULT_NE),	\
 		 ata_opcode_name(ATA_CMD_REQ_SENSE_DATA),	\
 		 ata_opcode_name(ATA_CMD_SANITIZE_DEVICE),	\
+		 ata_opcode_name(ATA_CMD_ZAC_MGMT_IN),		\
+		 ata_opcode_name(ATA_CMD_ZAC_MGMT_OUT),		\
 		 ata_opcode_name(ATA_CMD_RESTORE),		\
 		 ata_opcode_name(ATA_CMD_READ_LONG),		\
 		 ata_opcode_name(ATA_CMD_READ_LONG_ONCE),	\
@@ -139,6 +142,10 @@
 const char *libata_trace_parse_qc_flags(struct trace_seq *, unsigned int);
 #define __parse_qc_flags(f) libata_trace_parse_qc_flags(p, f)
 
+const char *libata_trace_parse_subcmd(struct trace_seq *, unsigned char,
+				      unsigned char, unsigned char);
+#define __parse_subcmd(c,f,h) libata_trace_parse_subcmd(p, c, f, h)
+
 TRACE_EVENT(ata_qc_issue,
 
 	TP_PROTO(struct ata_queued_cmd *qc),
@@ -185,11 +192,12 @@
 		__entry->hob_nsect	= qc->tf.hob_nsect;
 	),
 
-	TP_printk("ata_port=%u ata_dev=%u tag=%d proto=%s cmd=%s " \
+	TP_printk("ata_port=%u ata_dev=%u tag=%d proto=%s cmd=%s%s " \
 		  " tf=(%02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x)",
 		  __entry->ata_port, __entry->ata_dev, __entry->tag,
 		  show_protocol_name(__entry->proto),
 		  show_opcode_name(__entry->cmd),
+		  __parse_subcmd(__entry->cmd, __entry->feature, __entry->hob_nsect),
 		  __entry->cmd, __entry->feature, __entry->nsect,
 		  __entry->lbal, __entry->lbam, __entry->lbah,
 		  __entry->hob_feature, __entry->hob_nsect,
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index c51afb7..a26415b 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -127,8 +127,11 @@
 __SYSCALL(__NR_symlinkat, sys_symlinkat)
 #define __NR_linkat 37
 __SYSCALL(__NR_linkat, sys_linkat)
+#ifdef __ARCH_WANT_RENAMEAT
+/* renameat is superseded with flags by renameat2 */
 #define __NR_renameat 38
 __SYSCALL(__NR_renameat, sys_renameat)
+#endif /* __ARCH_WANT_RENAMEAT */
 
 /* fs/namespace.c */
 #define __NR_umount2 39
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 453a76a..cdecf87 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -34,6 +34,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 #define DRM_AMDGPU_GEM_CREATE		0x00
 #define DRM_AMDGPU_GEM_MMAP		0x01
 #define DRM_AMDGPU_CTX			0x02
@@ -642,4 +646,8 @@
 #define AMDGPU_FAMILY_VI			130 /* Iceland, Tonga */
 #define AMDGPU_FAMILY_CZ			135 /* Carrizo, Stoney */
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif
diff --git a/include/uapi/drm/armada_drm.h b/include/uapi/drm/armada_drm.h
index 6de7f01..72e326f 100644
--- a/include/uapi/drm/armada_drm.h
+++ b/include/uapi/drm/armada_drm.h
@@ -11,6 +11,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 #define DRM_ARMADA_GEM_CREATE		0x00
 #define DRM_ARMADA_GEM_MMAP		0x02
 #define DRM_ARMADA_GEM_PWRITE		0x03
@@ -44,4 +48,8 @@
 #define DRM_IOCTL_ARMADA_GEM_PWRITE \
 	ARMADA_IOCTL(IOW, GEM_PWRITE, gem_pwrite)
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index a0ebfe7..452675f 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -36,7 +36,13 @@
 #ifndef _DRM_H_
 #define _DRM_H_
 
-#if defined(__KERNEL__) || defined(__linux__)
+#if defined(__KERNEL__)
+
+#include <linux/types.h>
+#include <asm/ioctl.h>
+typedef unsigned int drm_handle_t;
+
+#elif defined(__linux__)
 
 #include <linux/types.h>
 #include <asm/ioctl.h>
@@ -59,6 +65,10 @@
 
 #endif
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 #define DRM_NAME	"drm"	  /**< Name in kernel, /dev, and /proc */
 #define DRM_MIN_ORDER	5	  /**< At least 2^5 bytes = 32 bytes */
 #define DRM_MAX_ORDER	22	  /**< Up to 2^22 bytes = 4MB */
@@ -181,7 +191,7 @@
 	_DRM_SHM = 2,		  /**< shared, cached */
 	_DRM_AGP = 3,		  /**< AGP/GART */
 	_DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
-	_DRM_CONSISTENT = 5,	  /**< Consistent memory for PCI DMA */
+	_DRM_CONSISTENT = 5	  /**< Consistent memory for PCI DMA */
 };
 
 /**
@@ -373,7 +383,11 @@
  */
 struct drm_buf_map {
 	int count;		/**< Length of the buffer list */
+#ifdef __cplusplus
+	void __user *virt;
+#else
 	void __user *virtual;		/**< Mmap'd area in user-virtual */
+#endif
 	struct drm_buf_pub __user *list;	/**< Buffer information */
 };
 
@@ -431,7 +445,7 @@
  * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
  */
 typedef enum {
-	DRM_DRAWABLE_CLIPRECTS,
+	DRM_DRAWABLE_CLIPRECTS
 } drm_drawable_info_type_t;
 
 struct drm_update_draw {
@@ -681,7 +695,15 @@
 	__s32 fd;
 };
 
-#include <drm/drm_mode.h>
+#if defined(__cplusplus)
+}
+#endif
+
+#include "drm_mode.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
 
 #define DRM_IOCTL_BASE			'd'
 #define DRM_IO(nr)			_IO(DRM_IOCTL_BASE,nr)
@@ -876,4 +898,8 @@
 typedef struct drm_set_version drm_set_version_t;
 #endif
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 4d8da69..a5890bf 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -26,6 +26,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 #define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
 				 ((__u32)(c) << 16) | ((__u32)(d) << 24))
 
@@ -229,4 +233,8 @@
  */
 #define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE	fourcc_mod_code(SAMSUNG, 1)
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif /* DRM_FOURCC_H */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index c021743..49a7265 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -29,6 +29,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 #define DRM_DISPLAY_INFO_LEN	32
 #define DRM_CONNECTOR_NAME_LEN	32
 #define DRM_DISPLAY_MODE_LEN	32
@@ -202,6 +206,7 @@
 #define DRM_MODE_ENCODER_VIRTUAL 5
 #define DRM_MODE_ENCODER_DSI	6
 #define DRM_MODE_ENCODER_DPMST	7
+#define DRM_MODE_ENCODER_DPI	8
 
 struct drm_mode_get_encoder {
 	__u32 encoder_id;
@@ -241,6 +246,7 @@
 #define DRM_MODE_CONNECTOR_eDP		14
 #define DRM_MODE_CONNECTOR_VIRTUAL      15
 #define DRM_MODE_CONNECTOR_DSI		16
+#define DRM_MODE_CONNECTOR_DPI		17
 
 struct drm_mode_get_connector {
 
@@ -320,6 +326,16 @@
 	__u32 connector_id;
 };
 
+#define DRM_MODE_OBJECT_CRTC 0xcccccccc
+#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
+#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
+#define DRM_MODE_OBJECT_MODE 0xdededede
+#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
+#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
+#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
+#define DRM_MODE_OBJECT_ANY 0
+
 struct drm_mode_obj_get_properties {
 	__u64 props_ptr;
 	__u64 prop_values_ptr;
@@ -611,4 +627,8 @@
 	__u32 blob_id;
 };
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif
diff --git a/include/uapi/drm/drm_sarea.h b/include/uapi/drm/drm_sarea.h
index 1d1a858..a951ced 100644
--- a/include/uapi/drm/drm_sarea.h
+++ b/include/uapi/drm/drm_sarea.h
@@ -34,6 +34,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 /* SAREA area needs to be at least a page */
 #if defined(__alpha__)
 #define SAREA_MAX                       0x2000U
@@ -83,4 +87,8 @@
 typedef struct drm_sarea drm_sarea_t;
 #endif
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif				/* _DRM_SAREA_H_ */
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index f95e1c4..2584c1c 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -19,6 +19,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 /* Please note that modifications to all structs defined here are
  * subject to backwards-compatibility constraints:
  *  1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
@@ -222,4 +226,8 @@
 #define DRM_IOCTL_ETNAVIV_GEM_USERPTR  DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
 #define DRM_IOCTL_ETNAVIV_GEM_WAIT     DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif /* __ETNAVIV_DRM_H__ */
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index 3947c2e..cb3e9f9 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -17,6 +17,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 /**
  * User-desired buffer creation information structure.
  *
@@ -362,4 +366,8 @@
 	__u32			buf_id[EXYNOS_DRM_OPS_MAX];
 };
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif /* _UAPI_EXYNOS_DRM_H_ */
diff --git a/include/uapi/drm/i810_drm.h b/include/uapi/drm/i810_drm.h
index bdb0287..6e6cf86 100644
--- a/include/uapi/drm/i810_drm.h
+++ b/include/uapi/drm/i810_drm.h
@@ -3,6 +3,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 /* WARNING: These defines must be the same as what the Xserver uses.
  * if you change them, you must change the defines in the Xserver.
  */
@@ -280,4 +284,8 @@
 	unsigned int last_render;	/* Last Render Request */
 } drm_i810_mc_t;
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif				/* _I810_DRM_H_ */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index a5524cc..c17d63d 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -29,6 +29,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 /* Please note that modifications to all structs defined here are
  * subject to backwards-compatibility constraints.
  */
@@ -1170,4 +1174,8 @@
 	__u64 value;
 };
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif /* _UAPI_I915_DRM_H_ */
diff --git a/include/uapi/drm/mga_drm.h b/include/uapi/drm/mga_drm.h
index fca8170..8c43375 100644
--- a/include/uapi/drm/mga_drm.h
+++ b/include/uapi/drm/mga_drm.h
@@ -37,6 +37,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 /* WARNING: If you change any of these defines, make sure to change the
  * defines in the Xserver file (mga_sarea.h)
  */
@@ -416,4 +420,8 @@
 	void __user *value;
 } drm_mga_getparam_t;
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 254d3e9..bf19d2c 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -20,6 +20,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 /* Please note that modifications to all structs defined here are
  * subject to backwards-compatibility constraints:
  *  1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
@@ -217,4 +221,8 @@
 #define DRM_IOCTL_MSM_GEM_SUBMIT       DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
 #define DRM_IOCTL_MSM_WAIT_FENCE       DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif /* __MSM_DRM_H__ */
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 500d82a..259588a 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -27,7 +27,11 @@
 
 #define DRM_NOUVEAU_EVENT_NVIF                                       0x80000000
 
-#include <drm/drm.h>
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
 
 #define NOUVEAU_GEM_DOMAIN_CPU       (1 << 0)
 #define NOUVEAU_GEM_DOMAIN_VRAM      (1 << 1)
@@ -141,4 +145,8 @@
 #define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
 #define DRM_IOCTL_NOUVEAU_GEM_INFO           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif /* __NOUVEAU_DRM_H__ */
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h
index 38a3bd8..407cb55 100644
--- a/include/uapi/drm/omap_drm.h
+++ b/include/uapi/drm/omap_drm.h
@@ -22,6 +22,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 /* Please note that modifications to all structs defined here are
  * subject to backwards-compatibility constraints.
  */
@@ -114,4 +118,8 @@
 #define DRM_IOCTL_OMAP_GEM_CPU_FINI	DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
 #define DRM_IOCTL_OMAP_GEM_INFO		DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_INFO, struct drm_omap_gem_info)
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif /* __OMAP_DRM_H__ */
diff --git a/include/uapi/drm/qxl_drm.h b/include/uapi/drm/qxl_drm.h
index 4d1e326..7eef422 100644
--- a/include/uapi/drm/qxl_drm.h
+++ b/include/uapi/drm/qxl_drm.h
@@ -26,6 +26,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 /* Please note that modifications to all structs defined here are
  * subject to backwards-compatibility constraints.
  *
@@ -84,7 +88,6 @@
 	__u32                pad;
 };
 
-/* XXX: call it drm_qxl_commands? */
 struct drm_qxl_execbuffer {
 	__u32		flags;		/* for future use */
 	__u32		commands_num;
@@ -148,4 +151,8 @@
 	DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC_SURF,\
 		struct drm_qxl_alloc_surf)
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif
diff --git a/include/uapi/drm/r128_drm.h b/include/uapi/drm/r128_drm.h
index 7a44c65..690e9c6 100644
--- a/include/uapi/drm/r128_drm.h
+++ b/include/uapi/drm/r128_drm.h
@@ -35,6 +35,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 /* WARNING: If you change any of these defines, make sure to change the
  * defines in the X server file (r128_sarea.h)
  */
@@ -325,4 +329,8 @@
 	void __user *value;
 } drm_r128_getparam_t;
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index ccb9bcd..490a59c 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -35,6 +35,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 /* WARNING: If you change any of these defines, make sure to change the
  * defines in the X server file (radeon_sarea.h)
  */
@@ -1067,4 +1071,8 @@
 
 #define CIK_TILE_MODE_DEPTH_STENCIL_1D		5
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif
diff --git a/include/uapi/drm/savage_drm.h b/include/uapi/drm/savage_drm.h
index 5741474..0f6edde 100644
--- a/include/uapi/drm/savage_drm.h
+++ b/include/uapi/drm/savage_drm.h
@@ -28,6 +28,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 #ifndef __SAVAGE_SAREA_DEFINES__
 #define __SAVAGE_SAREA_DEFINES__
 
@@ -209,4 +213,8 @@
 	} clear1;		/* SAVAGE_CMD_CLEAR data */
 };
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif
diff --git a/include/uapi/drm/sis_drm.h b/include/uapi/drm/sis_drm.h
index 374858c..3e3f7e9 100644
--- a/include/uapi/drm/sis_drm.h
+++ b/include/uapi/drm/sis_drm.h
@@ -27,6 +27,12 @@
 #ifndef __SIS_DRM_H__
 #define __SIS_DRM_H__
 
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 /* SiS specific ioctls */
 #define NOT_USED_0_3
 #define DRM_SIS_FB_ALLOC	0x04
@@ -64,4 +70,8 @@
 	unsigned long offset, size;
 } drm_sis_fb_t;
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif				/* __SIS_DRM_H__ */
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index 27d0b05..d954f8c 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -25,6 +25,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 #define DRM_TEGRA_GEM_CREATE_TILED     (1 << 0)
 #define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
 
@@ -198,4 +202,8 @@
 #define DRM_IOCTL_TEGRA_GEM_SET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_FLAGS, struct drm_tegra_gem_set_flags)
 #define DRM_IOCTL_TEGRA_GEM_GET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_FLAGS, struct drm_tegra_gem_get_flags)
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
index eeb37e3..af12e8a 100644
--- a/include/uapi/drm/vc4_drm.h
+++ b/include/uapi/drm/vc4_drm.h
@@ -26,6 +26,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 #define DRM_VC4_SUBMIT_CL                         0x00
 #define DRM_VC4_WAIT_SEQNO                        0x01
 #define DRM_VC4_WAIT_BO                           0x02
@@ -276,4 +280,8 @@
 	__u32 pad[16];
 };
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif /* _UAPI_VC4_DRM_H_ */
diff --git a/include/uapi/drm/via_drm.h b/include/uapi/drm/via_drm.h
index fa21ed18..a1e125d 100644
--- a/include/uapi/drm/via_drm.h
+++ b/include/uapi/drm/via_drm.h
@@ -26,6 +26,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 /* WARNING: These defines must be the same as what the Xserver uses.
  * if you change them, you must change the defines in the Xserver.
  */
@@ -271,4 +275,8 @@
 	drm_via_blitsync_t sync;
 } drm_via_dmablit_t;
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif				/* _VIA_DRM_H_ */
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index c74f1f9..91a31ff 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -26,6 +26,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 /* Please note that modifications to all structs defined here are
  * subject to backwards-compatibility constraints.
  *
@@ -163,4 +167,8 @@
 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
 	struct drm_virtgpu_get_caps)
 
+#if defined(__cplusplus)
+}
+#endif
+
 #endif
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 5b68b4d..d325a41 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -30,6 +30,10 @@
 
 #include "drm.h"
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 #define DRM_VMW_MAX_SURFACE_FACES 6
 #define DRM_VMW_MAX_MIP_LEVELS 24
 
@@ -1087,4 +1091,9 @@
 	enum drm_vmw_extended_context req;
 	struct drm_vmw_context_arg rep;
 };
+
+#if defined(__cplusplus)
+}
+#endif
+
 #endif
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index e21fe04..3b00f7c 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -222,7 +222,6 @@
 #define BLKSECDISCARD _IO(0x12,125)
 #define BLKROTATIONAL _IO(0x12,126)
 #define BLKZEROOUT _IO(0x12,127)
-#define BLKDAXGET _IO(0x12,129)
 
 #define BMAP_IOCTL 1		/* obsolete - kept for compatibility */
 #define FIBMAP	   _IO(0x00,1)	/* bmap access */
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
index ca1054d..72a04a0 100644
--- a/include/uapi/linux/gtp.h
+++ b/include/uapi/linux/gtp.h
@@ -1,5 +1,5 @@
 #ifndef _UAPI_LINUX_GTP_H_
-#define _UAPI_LINUX_GTP_H__
+#define _UAPI_LINUX_GTP_H_
 
 enum gtp_genl_cmds {
 	GTP_CMD_NEWPDP,
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 7cc28ab..309915f 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015, Intel Corporation.
+ * Copyright (c) 2014-2016, Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU Lesser General Public License,
@@ -20,11 +20,45 @@
 	__u8 data[128];
 } __packed;
 
+#define ND_SMART_HEALTH_VALID	(1 << 0)
+#define ND_SMART_TEMP_VALID 	(1 << 1)
+#define ND_SMART_SPARES_VALID	(1 << 2)
+#define ND_SMART_ALARM_VALID	(1 << 3)
+#define ND_SMART_USED_VALID	(1 << 4)
+#define ND_SMART_SHUTDOWN_VALID	(1 << 5)
+#define ND_SMART_VENDOR_VALID	(1 << 6)
+#define ND_SMART_TEMP_TRIP	(1 << 0)
+#define ND_SMART_SPARE_TRIP	(1 << 1)
+#define ND_SMART_NON_CRITICAL_HEALTH	(1 << 0)
+#define ND_SMART_CRITICAL_HEALTH	(1 << 1)
+#define ND_SMART_FATAL_HEALTH		(1 << 2)
+
+struct nd_smart_payload {
+	__u32 flags;
+	__u8 reserved0[4];
+	__u8 health;
+	__u16 temperature;
+	__u8 spares;
+	__u8 alarm_flags;
+	__u8 life_used;
+	__u8 shutdown_state;
+	__u8 reserved1;
+	__u32 vendor_size;
+	__u8 vendor_data[108];
+} __packed;
+
 struct nd_cmd_smart_threshold {
 	__u32 status;
 	__u8 data[8];
 } __packed;
 
+struct nd_smart_threshold_payload {
+	__u16 alarm_control;
+	__u16 temperature;
+	__u8 spares;
+	__u8 reserved[3];
+} __packed;
+
 struct nd_cmd_dimm_flags {
 	__u32 status;
 	__u32 flags;
@@ -125,6 +159,7 @@
 	ND_CMD_VENDOR_EFFECT_LOG_SIZE = 7,
 	ND_CMD_VENDOR_EFFECT_LOG = 8,
 	ND_CMD_VENDOR = 9,
+	ND_CMD_CALL = 10,
 };
 
 enum {
@@ -158,6 +193,7 @@
 		[ND_CMD_VENDOR_EFFECT_LOG_SIZE] = "effect_size",
 		[ND_CMD_VENDOR_EFFECT_LOG] = "effect_log",
 		[ND_CMD_VENDOR] = "vendor",
+		[ND_CMD_CALL] = "cmd_call",
 	};
 
 	if (cmd < ARRAY_SIZE(names) && names[cmd])
@@ -206,6 +242,7 @@
 #define ND_DEVICE_NAMESPACE_IO 4    /* legacy persistent memory */
 #define ND_DEVICE_NAMESPACE_PMEM 5  /* PMEM namespace (may alias with BLK) */
 #define ND_DEVICE_NAMESPACE_BLK 6   /* BLK namespace (may alias with PMEM) */
+#define ND_DEVICE_DAX_PMEM 7        /* Device DAX interface to pmem */
 
 enum nd_driver_flags {
 	ND_DRIVER_DIMM            = 1 << ND_DEVICE_DIMM,
@@ -214,6 +251,7 @@
 	ND_DRIVER_NAMESPACE_IO    = 1 << ND_DEVICE_NAMESPACE_IO,
 	ND_DRIVER_NAMESPACE_PMEM  = 1 << ND_DEVICE_NAMESPACE_PMEM,
 	ND_DRIVER_NAMESPACE_BLK   = 1 << ND_DEVICE_NAMESPACE_BLK,
+	ND_DRIVER_DAX_PMEM	  = 1 << ND_DEVICE_DAX_PMEM,
 };
 
 enum {
@@ -224,4 +262,44 @@
 	ARS_STATUS_MASK = 0x0000FFFF,
 	ARS_EXT_STATUS_SHIFT = 16,
 };
+
+/*
+ * struct nd_cmd_pkg
+ *
+ * is a wrapper to a quasi pass thru interface for invoking firmware
+ * associated with nvdimms.
+ *
+ * INPUT PARAMETERS
+ *
+ * nd_family corresponds to the firmware (e.g. DSM) interface.
+ *
+ * nd_command are the function index advertised by the firmware.
+ *
+ * nd_size_in is the size of the input parameters being passed to firmware
+ *
+ * OUTPUT PARAMETERS
+ *
+ * nd_fw_size is the size of the data firmware wants to return for
+ * the call.  If nd_fw_size is greater than size of nd_size_out, only
+ * the first nd_size_out bytes are returned.
+ */
+
+struct nd_cmd_pkg {
+	__u64   nd_family;		/* family of commands */
+	__u64   nd_command;
+	__u32   nd_size_in;		/* INPUT: size of input args */
+	__u32   nd_size_out;		/* INPUT: size of payload */
+	__u32   nd_reserved2[9];	/* reserved must be zero */
+	__u32   nd_fw_size;		/* OUTPUT: size fw wants to return */
+	unsigned char nd_payload[];	/* Contents of call      */
+};
+
+/* These NVDIMM families represent pre-standardization command sets */
+#define NVDIMM_FAMILY_INTEL 0
+#define NVDIMM_FAMILY_HPE1 1
+#define NVDIMM_FAMILY_HPE2 2
+
+#define ND_IOCTL_CALL			_IOWR(ND_IOCTL, ND_CMD_CALL,\
+					struct nd_cmd_pkg)
+
 #endif /* __NDCTL_H__ */
diff --git a/include/uapi/linux/netfilter/Kbuild b/include/uapi/linux/netfilter/Kbuild
index 1d973d2..cd26d7a0 100644
--- a/include/uapi/linux/netfilter/Kbuild
+++ b/include/uapi/linux/netfilter/Kbuild
@@ -33,6 +33,7 @@
 header-y += xt_NFQUEUE.h
 header-y += xt_RATEEST.h
 header-y += xt_SECMARK.h
+header-y += xt_SYNPROXY.h
 header-y += xt_TCPMSS.h
 header-y += xt_TCPOPTSTRIP.h
 header-y += xt_TEE.h
diff --git a/include/uapi/linux/netfilter/xt_SYNPROXY.h b/include/uapi/linux/netfilter/xt_SYNPROXY.h
index 2d59fba..ca67e61 100644
--- a/include/uapi/linux/netfilter/xt_SYNPROXY.h
+++ b/include/uapi/linux/netfilter/xt_SYNPROXY.h
@@ -1,6 +1,8 @@
 #ifndef _XT_SYNPROXY_H
 #define _XT_SYNPROXY_H
 
+#include <linux/types.h>
+
 #define XT_SYNPROXY_OPT_MSS		0x01
 #define XT_SYNPROXY_OPT_WSCALE		0x02
 #define XT_SYNPROXY_OPT_SACK_PERM	0x04
diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h
index c4b2a3f..50ff21f 100644
--- a/include/uapi/linux/nvme_ioctl.h
+++ b/include/uapi/linux/nvme_ioctl.h
@@ -61,5 +61,6 @@
 #define NVME_IOCTL_IO_CMD	_IOWR('N', 0x43, struct nvme_passthru_cmd)
 #define NVME_IOCTL_RESET	_IO('N', 0x44)
 #define NVME_IOCTL_SUBSYS_RESET	_IO('N', 0x45)
+#define NVME_IOCTL_RESCAN	_IO('N', 0x46)
 
 #endif /* _UAPI_LINUX_NVME_IOCTL_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 43fc8d2..36ce552 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -862,6 +862,7 @@
 };
 
 #define PERF_MAX_STACK_DEPTH		127
+#define PERF_MAX_CONTEXTS_PER_STACK	  8
 
 enum perf_callchain_context {
 	PERF_CONTEXT_HV			= (__u64)-32,
diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h
index 763bb69..0ec1da2 100644
--- a/include/uapi/mtd/mtd-abi.h
+++ b/include/uapi/mtd/mtd-abi.h
@@ -228,7 +228,7 @@
  * complete set of ECC information. The ioctl truncates the larger internal
  * structure to retain binary compatibility with the static declaration of the
  * ioctl. Note that the "MTD_MAX_..._ENTRIES" macros represent the max size of
- * the user struct, not the MAX size of the internal struct nand_ecclayout.
+ * the user struct, not the MAX size of the internal OOB layout representation.
  */
 struct nand_ecclayout_user {
 	__u32 eccbytes;
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index a533cec..98bebf8 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -66,7 +66,7 @@
  * The major version changes when data structures change in an incompatible
  * way. The driver must be the same for initialization to succeed.
  */
-#define HFI1_USER_SWMAJOR 5
+#define HFI1_USER_SWMAJOR 6
 
 /*
  * Minor version differences are always compatible
@@ -75,7 +75,12 @@
  * may not be implemented; the user code must deal with this if it
  * cares, or it must abort after initialization reports the difference.
  */
-#define HFI1_USER_SWMINOR 0
+#define HFI1_USER_SWMINOR 1
+
+/*
+ * We will encode the major/minor inside a single 32bit version number.
+ */
+#define HFI1_SWMAJOR_SHIFT 16
 
 /*
  * Set of HW and driver capability/feature bits.
@@ -107,19 +112,6 @@
 #define HFI1_RCVHDR_ENTSIZE_16   (1UL << 1)
 #define HFI1_RCVDHR_ENTSIZE_32   (1UL << 2)
 
-/*
- * If the unit is specified via open, HFI choice is fixed.  If port is
- * specified, it's also fixed.  Otherwise we try to spread contexts
- * across ports and HFIs, using different algorithms.  WITHIN is
- * the old default, prior to this mechanism.
- */
-#define HFI1_ALG_ACROSS 0 /* round robin contexts across HFIs, then
-			  * ports; this is the default */
-#define HFI1_ALG_WITHIN 1 /* use all contexts on an HFI (round robin
-			  * active ports within), then next HFI */
-#define HFI1_ALG_COUNT  2 /* number of algorithm choices */
-
-
 /* User commands. */
 #define HFI1_CMD_ASSIGN_CTXT     1	/* allocate HFI and context */
 #define HFI1_CMD_CTXT_INFO       2	/* find out what resources we got */
@@ -127,7 +119,6 @@
 #define HFI1_CMD_TID_UPDATE      4	/* update expected TID entries */
 #define HFI1_CMD_TID_FREE        5	/* free expected TID entries */
 #define HFI1_CMD_CREDIT_UPD      6	/* force an update of PIO credit */
-#define HFI1_CMD_SDMA_STATUS_UPD 7      /* force update of SDMA status ring */
 
 #define HFI1_CMD_RECV_CTRL       8	/* control receipt of packets */
 #define HFI1_CMD_POLL_TYPE       9	/* set the kind of polling we want */
@@ -135,13 +126,46 @@
 #define HFI1_CMD_SET_PKEY        11     /* set context's pkey */
 #define HFI1_CMD_CTXT_RESET      12     /* reset context's HW send context */
 #define HFI1_CMD_TID_INVAL_READ  13     /* read TID cache invalidations */
-/* separate EPROM commands from normal PSM commands */
-#define HFI1_CMD_EP_INFO         64      /* read EPROM device ID */
-#define HFI1_CMD_EP_ERASE_CHIP   65      /* erase whole EPROM */
-/* range 66-74 no longer used */
-#define HFI1_CMD_EP_ERASE_RANGE  75      /* erase EPROM range */
-#define HFI1_CMD_EP_READ_RANGE   76      /* read EPROM range */
-#define HFI1_CMD_EP_WRITE_RANGE  77      /* write EPROM range */
+#define HFI1_CMD_GET_VERS	 14	/* get the version of the user cdev */
+
+/*
+ * User IOCTLs can not go above 128 if they do then see common.h and change the
+ * base for the snoop ioctl
+ */
+#define IB_IOCTL_MAGIC 0x1b /* See Documentation/ioctl/ioctl-number.txt */
+
+/*
+ * Make the ioctls occupy the last 0xf0-0xff portion of the IB range
+ */
+#define __NUM(cmd) (HFI1_CMD_##cmd + 0xe0)
+
+struct hfi1_cmd;
+#define HFI1_IOCTL_ASSIGN_CTXT \
+	_IOWR(IB_IOCTL_MAGIC, __NUM(ASSIGN_CTXT), struct hfi1_user_info)
+#define HFI1_IOCTL_CTXT_INFO \
+	_IOW(IB_IOCTL_MAGIC, __NUM(CTXT_INFO), struct hfi1_ctxt_info)
+#define HFI1_IOCTL_USER_INFO \
+	_IOW(IB_IOCTL_MAGIC, __NUM(USER_INFO), struct hfi1_base_info)
+#define HFI1_IOCTL_TID_UPDATE \
+	_IOWR(IB_IOCTL_MAGIC, __NUM(TID_UPDATE), struct hfi1_tid_info)
+#define HFI1_IOCTL_TID_FREE \
+	_IOWR(IB_IOCTL_MAGIC, __NUM(TID_FREE), struct hfi1_tid_info)
+#define HFI1_IOCTL_CREDIT_UPD \
+	_IO(IB_IOCTL_MAGIC, __NUM(CREDIT_UPD))
+#define HFI1_IOCTL_RECV_CTRL \
+	_IOW(IB_IOCTL_MAGIC, __NUM(RECV_CTRL), int)
+#define HFI1_IOCTL_POLL_TYPE \
+	_IOW(IB_IOCTL_MAGIC, __NUM(POLL_TYPE), int)
+#define HFI1_IOCTL_ACK_EVENT \
+	_IOW(IB_IOCTL_MAGIC, __NUM(ACK_EVENT), unsigned long)
+#define HFI1_IOCTL_SET_PKEY \
+	_IOW(IB_IOCTL_MAGIC, __NUM(SET_PKEY), __u16)
+#define HFI1_IOCTL_CTXT_RESET \
+	_IO(IB_IOCTL_MAGIC, __NUM(CTXT_RESET))
+#define HFI1_IOCTL_TID_INVAL_READ \
+	_IOWR(IB_IOCTL_MAGIC, __NUM(TID_INVAL_READ), struct hfi1_tid_info)
+#define HFI1_IOCTL_GET_VERS \
+	_IOR(IB_IOCTL_MAGIC, __NUM(GET_VERS), int)
 
 #define _HFI1_EVENT_FROZEN_BIT         0
 #define _HFI1_EVENT_LINKDOWN_BIT       1
@@ -199,9 +223,7 @@
 	 * Should be set to HFI1_USER_SWVERSION.
 	 */
 	__u32 userversion;
-	__u16 pad;
-	/* HFI selection algorithm, if unit has not selected */
-	__u16 hfi1_alg;
+	__u32 pad;
 	/*
 	 * If two or more processes wish to share a context, each process
 	 * must set the subcontext_cnt and subcontext_id to the same
@@ -243,12 +265,6 @@
 	__u32 length;
 };
 
-struct hfi1_cmd {
-	__u32 type;        /* command type */
-	__u32 len;         /* length of struct pointed to by add */
-	__u64 addr;        /* pointer to user structure */
-};
-
 enum hfi1_sdma_comp_state {
 	FREE = 0,
 	QUEUED,
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 6e373d1..02fe839 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -135,10 +135,12 @@
  * Local service operations:
  *   RESOLVE - The client requests the local service to resolve a path.
  *   SET_TIMEOUT - The local service requests the client to set the timeout.
+ *   IP_RESOLVE - The client requests the local service to resolve an IP to GID.
  */
 enum {
 	RDMA_NL_LS_OP_RESOLVE = 0,
 	RDMA_NL_LS_OP_SET_TIMEOUT,
+	RDMA_NL_LS_OP_IP_RESOLVE,
 	RDMA_NL_LS_NUM_OPS
 };
 
@@ -176,6 +178,10 @@
 	__u8 path_use;
 };
 
+struct rdma_ls_ip_resolve_header {
+	__u32 ifindex;
+};
+
 /* Local service attribute type */
 #define RDMA_NLA_F_MANDATORY	(1 << 13)
 #define RDMA_NLA_TYPE_MASK	(~(NLA_F_NESTED | NLA_F_NET_BYTEORDER | \
@@ -193,6 +199,8 @@
  *   TCLASS          u8
  *   PKEY            u16                        cpu
  *   QOS_CLASS       u16                        cpu
+ *   IPV4            u32                        BE
+ *   IPV6            u8[16]                     BE
  */
 enum {
 	LS_NLA_TYPE_UNSPEC = 0,
@@ -204,6 +212,8 @@
 	LS_NLA_TYPE_TCLASS,
 	LS_NLA_TYPE_PKEY,
 	LS_NLA_TYPE_QOS_CLASS,
+	LS_NLA_TYPE_IPV4,
+	LS_NLA_TYPE_IPV6,
 	LS_NLA_TYPE_MAX
 };
 
diff --git a/include/uapi/sound/Kbuild b/include/uapi/sound/Kbuild
index a7f2770..691984c 100644
--- a/include/uapi/sound/Kbuild
+++ b/include/uapi/sound/Kbuild
@@ -1,5 +1,6 @@
 # UAPI Header export list
 header-y += asequencer.h
+header-y += asoc.h
 header-y += asound.h
 header-y += asound_fm.h
 header-y += compress_offload.h
@@ -10,3 +11,5 @@
 header-y += hdspm.h
 header-y += sb16_csp.h
 header-y += sfnt_info.h
+header-y += tlv.h
+header-y += usb_stream.h
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index c4cc1e4..e4701a3 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -116,6 +116,14 @@
 #define SND_SOC_TPLG_STREAM_PLAYBACK	0
 #define SND_SOC_TPLG_STREAM_CAPTURE	1
 
+/* vendor tuple types */
+#define SND_SOC_TPLG_TUPLE_TYPE_UUID	0
+#define SND_SOC_TPLG_TUPLE_TYPE_STRING	1
+#define SND_SOC_TPLG_TUPLE_TYPE_BOOL	2
+#define SND_SOC_TPLG_TUPLE_TYPE_BYTE	3
+#define SND_SOC_TPLG_TUPLE_TYPE_WORD	4
+#define SND_SOC_TPLG_TUPLE_TYPE_SHORT	5
+
 /*
  * Block Header.
  * This header precedes all object and object arrays below.
@@ -132,6 +140,35 @@
 	__le32 count;		/* number of elements in block */
 } __attribute__((packed));
 
+/* vendor tuple for uuid */
+struct snd_soc_tplg_vendor_uuid_elem {
+	__le32 token;
+	char uuid[16];
+} __attribute__((packed));
+
+/* vendor tuple for a bool/byte/short/word value */
+struct snd_soc_tplg_vendor_value_elem {
+	__le32 token;
+	__le32 value;
+} __attribute__((packed));
+
+/* vendor tuple for string */
+struct snd_soc_tplg_vendor_string_elem {
+	__le32 token;
+	char string[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+} __attribute__((packed));
+
+struct snd_soc_tplg_vendor_array {
+	__le32 size;	/* size in bytes of the array, including all elements */
+	__le32 type;	/* SND_SOC_TPLG_TUPLE_TYPE_ */
+	__le32 num_elems;	/* number of elements in array */
+	union {
+		struct snd_soc_tplg_vendor_uuid_elem uuid[0];
+		struct snd_soc_tplg_vendor_value_elem value[0];
+		struct snd_soc_tplg_vendor_string_elem string[0];
+	};
+} __attribute__((packed));
+
 /*
  * Private data.
  * All topology objects may have private data that can be used by the driver or
@@ -139,7 +176,10 @@
  */
 struct snd_soc_tplg_private {
 	__le32 size;	/* in bytes of private data */
-	char data[0];
+	union {
+		char data[0];
+		struct snd_soc_tplg_vendor_array array[0];
+	};
 } __attribute__((packed));
 
 /*
@@ -383,7 +423,7 @@
 	__le32 size;		/* in bytes of this structure */
 	char pcm_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
 	char dai_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
-	__le32 pcm_id;		/* unique ID - used to match */
+	__le32 pcm_id;		/* unique ID - used to match with DAI link */
 	__le32 dai_id;		/* unique ID - used to match */
 	__le32 playback;	/* supports playback mode */
 	__le32 capture;		/* supports capture mode */
diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h
index c1c1ca1..0098a52 100644
--- a/include/video/exynos5433_decon.h
+++ b/include/video/exynos5433_decon.h
@@ -179,9 +179,9 @@
 #define TRIGCON_TRIGMODE_W1BUF		(1 << 10)
 #define TRIGCON_SWTRIGCMD_W0BUF		(1 << 6)
 #define TRIGCON_TRIGMODE_W0BUF		(1 << 5)
-#define TRIGCON_HWTRIGMASK_I80_RGB	(1 << 4)
-#define TRIGCON_HWTRIGEN_I80_RGB	(1 << 3)
-#define TRIGCON_HWTRIG_INV_I80_RGB	(1 << 2)
+#define TRIGCON_HWTRIGMASK		(1 << 4)
+#define TRIGCON_HWTRIGEN		(1 << 3)
+#define TRIGCON_HWTRIG_INV		(1 << 2)
 #define TRIGCON_SWTRIGCMD		(1 << 1)
 #define TRIGCON_SWTRIGEN		(1 << 0)
 
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index ad66589..3a2a794 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -16,6 +16,7 @@
 #include <linux/videodev2.h>
 #include <linux/bitmap.h>
 #include <linux/fb.h>
+#include <linux/of.h>
 #include <media/v4l2-mediabus.h>
 #include <video/videomode.h>
 
@@ -345,6 +346,7 @@
 	int dc;
 	int dp;
 	int dma[2];
+	struct device_node *of_node;
 };
 
 #endif /* __DRM_IPU_H__ */
diff --git a/include/video/mipi_display.h b/include/video/mipi_display.h
index ddcc8ca..19aa65a 100644
--- a/include/video/mipi_display.h
+++ b/include/video/mipi_display.h
@@ -115,6 +115,14 @@
 	MIPI_DCS_READ_MEMORY_CONTINUE	= 0x3E,
 	MIPI_DCS_SET_TEAR_SCANLINE	= 0x44,
 	MIPI_DCS_GET_SCANLINE		= 0x45,
+	MIPI_DCS_SET_DISPLAY_BRIGHTNESS = 0x51,		/* MIPI DCS 1.3 */
+	MIPI_DCS_GET_DISPLAY_BRIGHTNESS = 0x52,		/* MIPI DCS 1.3 */
+	MIPI_DCS_WRITE_CONTROL_DISPLAY  = 0x53,		/* MIPI DCS 1.3 */
+	MIPI_DCS_GET_CONTROL_DISPLAY	= 0x54,		/* MIPI DCS 1.3 */
+	MIPI_DCS_WRITE_POWER_SAVE	= 0x55,		/* MIPI DCS 1.3 */
+	MIPI_DCS_GET_POWER_SAVE		= 0x56,		/* MIPI DCS 1.3 */
+	MIPI_DCS_SET_CABC_MIN_BRIGHTNESS = 0x5E,	/* MIPI DCS 1.3 */
+	MIPI_DCS_GET_CABC_MIN_BRIGHTNESS = 0x5F,	/* MIPI DCS 1.3 */
 	MIPI_DCS_READ_DDB_START		= 0xA1,
 	MIPI_DCS_READ_DDB_CONTINUE	= 0xA8,
 };
diff --git a/init/Kconfig b/init/Kconfig
index a9c4aefd..f755a60 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1306,6 +1306,17 @@
 
 endif
 
+choice
+	prompt "Compiler optimization level"
+	default CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
+
+config CC_OPTIMIZE_FOR_PERFORMANCE
+	bool "Optimize for performance"
+	help
+	  This is the default optimization level for the kernel, building
+	  with the "-O2" compiler flag for best performance and most
+	  helpful compile-time warnings.
+
 config CC_OPTIMIZE_FOR_SIZE
 	bool "Optimize for size"
 	help
@@ -1314,6 +1325,8 @@
 
 	  If unsure, say N.
 
+endchoice
+
 config SYSCTL
 	bool
 
@@ -2049,6 +2062,22 @@
 
 endchoice
 
+config TRIM_UNUSED_KSYMS
+	bool "Trim unused exported kernel symbols"
+	depends on MODULES && !UNUSED_SYMBOLS
+	help
+	  The kernel and some modules make many symbols available for
+	  other modules to use via EXPORT_SYMBOL() and variants. Depending
+	  on the set of modules being selected in your kernel configuration,
+	  many of those exported symbols might never be used.
+
+	  This option allows for unused exported symbols to be dropped from
+	  the build. In turn, this provides the compiler more opportunities
+	  (especially when using LTO) for optimizing the code and reducing
+	  binary size.  This might have some security advantages as well.
+
+	  If unsure say N.
+
 endif # MODULES
 
 config MODULES_TREE_LOOKUP
diff --git a/init/main.c b/init/main.c
index bc0f9e0..4c17fda 100644
--- a/init/main.c
+++ b/init/main.c
@@ -607,6 +607,7 @@
 		initrd_start = 0;
 	}
 #endif
+	page_ext_init();
 	debug_objects_mem_init();
 	kmemleak_init();
 	setup_per_cpu_pageset();
@@ -1003,8 +1004,6 @@
 	sched_init_smp();
 
 	page_alloc_init_late();
-	/* Initialize page ext after all struct pages are initializaed */
-	page_ext_init();
 
 	do_basic_setup();
 
diff --git a/ipc/shm.c b/ipc/shm.c
index 331fc1b..1328251 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1200,7 +1200,11 @@
 	if (err)
 		goto out_fput;
 
-	down_write(&current->mm->mmap_sem);
+	if (down_write_killable(&current->mm->mmap_sem)) {
+		err = -EINTR;
+		goto out_fput;
+	}
+
 	if (addr && !(shmflg & SHM_REMAP)) {
 		err = -EINVAL;
 		if (addr + size < addr)
@@ -1271,7 +1275,8 @@
 	if (addr & ~PAGE_MASK)
 		return retval;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
 
 	/*
 	 * This function tries to be smart and unmap shm segments that
diff --git a/kernel/Makefile b/kernel/Makefile
index f0c40bf..e2ec54e 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -91,9 +91,7 @@
 obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
 obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
 obj-$(CONFIG_LATENCYTOP) += latencytop.o
-obj-$(CONFIG_BINFMT_ELF) += elfcore.o
-obj-$(CONFIG_COMPAT_BINFMT_ELF) += elfcore.o
-obj-$(CONFIG_BINFMT_ELF_FDPIC) += elfcore.o
+obj-$(CONFIG_ELFCORE) += elfcore.o
 obj-$(CONFIG_FUNCTION_TRACER) += trace/
 obj-$(CONFIG_TRACING) += trace/
 obj-$(CONFIG_TRACE_CLOCK) += trace/
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index c8ee352..080a2df 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -136,7 +136,8 @@
 			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
 		return -EINVAL;
 
-	trace = get_perf_callchain(regs, init_nr, kernel, user, false, false);
+	trace = get_perf_callchain(regs, init_nr, kernel, user,
+				   sysctl_perf_event_max_stack, false, false);
 
 	if (unlikely(!trace))
 		/* couldn't fetch the stack trace */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 668e079..eec9f90 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -126,31 +126,6 @@
  * are set to NOT_INIT to indicate that they are no longer readable.
  */
 
-/* types of values stored in eBPF registers */
-enum bpf_reg_type {
-	NOT_INIT = 0,		 /* nothing was written into register */
-	UNKNOWN_VALUE,		 /* reg doesn't contain a valid pointer */
-	PTR_TO_CTX,		 /* reg points to bpf_context */
-	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */
-	PTR_TO_MAP_VALUE,	 /* reg points to map element value */
-	PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
-	FRAME_PTR,		 /* reg == frame_pointer */
-	PTR_TO_STACK,		 /* reg == frame_pointer + imm */
-	CONST_IMM,		 /* constant integer value */
-
-	/* PTR_TO_PACKET represents:
-	 * skb->data
-	 * skb->data + imm
-	 * skb->data + (u16) var
-	 * skb->data + (u16) var + imm
-	 * if (range > 0) then [ptr, ptr + range - off) is safe to access
-	 * if (id > 0) means that some 'var' was added
-	 * if (off > 0) menas that 'imm' was added
-	 */
-	PTR_TO_PACKET,
-	PTR_TO_PACKET_END,	 /* skb->data + headlen */
-};
-
 struct reg_state {
 	enum bpf_reg_type type;
 	union {
@@ -695,10 +670,10 @@
 
 /* check access to 'struct bpf_context' fields */
 static int check_ctx_access(struct verifier_env *env, int off, int size,
-			    enum bpf_access_type t)
+			    enum bpf_access_type t, enum bpf_reg_type *reg_type)
 {
 	if (env->prog->aux->ops->is_valid_access &&
-	    env->prog->aux->ops->is_valid_access(off, size, t)) {
+	    env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) {
 		/* remember the offset of last byte accessed in ctx */
 		if (env->prog->aux->max_ctx_offset < off + size)
 			env->prog->aux->max_ctx_offset = off + size;
@@ -798,21 +773,19 @@
 			mark_reg_unknown_value(state->regs, value_regno);
 
 	} else if (reg->type == PTR_TO_CTX) {
+		enum bpf_reg_type reg_type = UNKNOWN_VALUE;
+
 		if (t == BPF_WRITE && value_regno >= 0 &&
 		    is_pointer_value(env, value_regno)) {
 			verbose("R%d leaks addr into ctx\n", value_regno);
 			return -EACCES;
 		}
-		err = check_ctx_access(env, off, size, t);
+		err = check_ctx_access(env, off, size, t, &reg_type);
 		if (!err && t == BPF_READ && value_regno >= 0) {
 			mark_reg_unknown_value(state->regs, value_regno);
-			if (off == offsetof(struct __sk_buff, data) &&
-			    env->allow_ptr_leaks)
+			if (env->allow_ptr_leaks)
 				/* note that reg.[id|off|range] == 0 */
-				state->regs[value_regno].type = PTR_TO_PACKET;
-			else if (off == offsetof(struct __sk_buff, data_end) &&
-				 env->allow_ptr_leaks)
-				state->regs[value_regno].type = PTR_TO_PACKET_END;
+				state->regs[value_regno].type = reg_type;
 		}
 
 	} else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) {
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index b9325e7..179ef46 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -19,11 +19,13 @@
 };
 
 int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
+int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
 
 static inline size_t perf_callchain_entry__sizeof(void)
 {
 	return (sizeof(struct perf_callchain_entry) +
-		sizeof(__u64) * sysctl_perf_event_max_stack);
+		sizeof(__u64) * (sysctl_perf_event_max_stack +
+				 sysctl_perf_event_max_contexts_per_stack));
 }
 
 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
@@ -32,12 +34,12 @@
 static struct callchain_cpus_entries *callchain_cpus_entries;
 
 
-__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
+__weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
 				  struct pt_regs *regs)
 {
 }
 
-__weak void perf_callchain_user(struct perf_callchain_entry *entry,
+__weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
 				struct pt_regs *regs)
 {
 }
@@ -176,14 +178,15 @@
 	if (!kernel && !user)
 		return NULL;
 
-	return get_perf_callchain(regs, 0, kernel, user, crosstask, true);
+	return get_perf_callchain(regs, 0, kernel, user, sysctl_perf_event_max_stack, crosstask, true);
 }
 
 struct perf_callchain_entry *
 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
-		   bool crosstask, bool add_mark)
+		   u32 max_stack, bool crosstask, bool add_mark)
 {
 	struct perf_callchain_entry *entry;
+	struct perf_callchain_entry_ctx ctx;
 	int rctx;
 
 	entry = get_callchain_entry(&rctx);
@@ -193,12 +196,16 @@
 	if (!entry)
 		goto exit_put;
 
-	entry->nr = init_nr;
+	ctx.entry     = entry;
+	ctx.max_stack = max_stack;
+	ctx.nr	      = entry->nr = init_nr;
+	ctx.contexts       = 0;
+	ctx.contexts_maxed = false;
 
 	if (kernel && !user_mode(regs)) {
 		if (add_mark)
-			perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
-		perf_callchain_kernel(entry, regs);
+			perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
+		perf_callchain_kernel(&ctx, regs);
 	}
 
 	if (user) {
@@ -214,8 +221,8 @@
 				goto exit_put;
 
 			if (add_mark)
-				perf_callchain_store(entry, PERF_CONTEXT_USER);
-			perf_callchain_user(entry, regs);
+				perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
+			perf_callchain_user(&ctx, regs);
 		}
 	}
 
@@ -225,10 +232,15 @@
 	return entry;
 }
 
+/*
+ * Used for sysctl_perf_event_max_stack and
+ * sysctl_perf_event_max_contexts_per_stack.
+ */
 int perf_event_max_stack_handler(struct ctl_table *table, int write,
 				 void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	int new_value = sysctl_perf_event_max_stack, ret;
+	int *value = table->data;
+	int new_value = *value, ret;
 	struct ctl_table new_table = *table;
 
 	new_table.data = &new_value;
@@ -240,7 +252,7 @@
 	if (atomic_read(&nr_callchain_events))
 		ret = -EBUSY;
 	else
-		sysctl_perf_event_max_stack = new_value;
+		*value = new_value;
 
 	mutex_unlock(&callchain_mutex);
 
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index c01f733..b7a525a 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1130,7 +1130,9 @@
 	struct vm_area_struct *vma;
 	int ret;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
+
 	if (mm->uprobes_state.xol_area) {
 		ret = -EALREADY;
 		goto fail;
@@ -1469,7 +1471,8 @@
 	if (current->flags & PF_EXITING)
 		return;
 
-	if (!__create_xol_area(current->utask->dup_xol_addr))
+	if (!__create_xol_area(current->utask->dup_xol_addr) &&
+			!fatal_signal_pending(current))
 		uprobe_warn(current, "dup xol area");
 }
 
diff --git a/kernel/exit.c b/kernel/exit.c
index 75b34fe..9e6e135 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -918,17 +918,28 @@
 		task_pid_type(p, wo->wo_type) == wo->wo_pid;
 }
 
-static int eligible_child(struct wait_opts *wo, struct task_struct *p)
+static int
+eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
 {
 	if (!eligible_pid(wo, p))
 		return 0;
-	/* Wait for all children (clone and not) if __WALL is set;
-	 * otherwise, wait for clone children *only* if __WCLONE is
-	 * set; otherwise, wait for non-clone children *only*.  (Note:
-	 * A "clone" child here is one that reports to its parent
-	 * using a signal other than SIGCHLD.) */
-	if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
-	    && !(wo->wo_flags & __WALL))
+
+	/*
+	 * Wait for all children (clone and not) if __WALL is set or
+	 * if it is traced by us.
+	 */
+	if (ptrace || (wo->wo_flags & __WALL))
+		return 1;
+
+	/*
+	 * Otherwise, wait for clone children *only* if __WCLONE is set;
+	 * otherwise, wait for non-clone children *only*.
+	 *
+	 * Note: a "clone" child here is one that reports to its parent
+	 * using a signal other than SIGCHLD, or a non-leader thread which
+	 * we can only see if it is traced by us.
+	 */
+	if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
 		return 0;
 
 	return 1;
@@ -1300,7 +1311,7 @@
 	if (unlikely(exit_state == EXIT_DEAD))
 		return 0;
 
-	ret = eligible_child(wo, p);
+	ret = eligible_child(wo, ptrace, p);
 	if (!ret)
 		return ret;
 
@@ -1524,7 +1535,8 @@
 	enum pid_type type;
 	long ret;
 
-	if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
+	if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
+			__WNOTHREAD|__WCLONE|__WALL))
 		return -EINVAL;
 	if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
 		return -EINVAL;
diff --git a/kernel/fork.c b/kernel/fork.c
index 103d78f..5c2c355 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -340,13 +340,14 @@
 	*stackend = STACK_END_MAGIC;	/* for overflow detection */
 }
 
-static struct task_struct *dup_task_struct(struct task_struct *orig)
+static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 {
 	struct task_struct *tsk;
 	struct thread_info *ti;
-	int node = tsk_fork_get_node(orig);
 	int err;
 
+	if (node == NUMA_NO_NODE)
+		node = tsk_fork_get_node(orig);
 	tsk = alloc_task_struct_node(node);
 	if (!tsk)
 		return NULL;
@@ -413,7 +414,10 @@
 	unsigned long charge;
 
 	uprobe_start_dup_mmap();
-	down_write(&oldmm->mmap_sem);
+	if (down_write_killable(&oldmm->mmap_sem)) {
+		retval = -EINTR;
+		goto fail_uprobe_end;
+	}
 	flush_cache_dup_mm(oldmm);
 	uprobe_dup_mmap(oldmm, mm);
 	/*
@@ -525,6 +529,7 @@
 	up_write(&mm->mmap_sem);
 	flush_tlb_mm(oldmm);
 	up_write(&oldmm->mmap_sem);
+fail_uprobe_end:
 	uprobe_end_dup_mmap();
 	return retval;
 fail_nomem_anon_vma_fork:
@@ -731,6 +736,7 @@
 }
 EXPORT_SYMBOL_GPL(mmput);
 
+#ifdef CONFIG_MMU
 static void mmput_async_fn(struct work_struct *work)
 {
 	struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
@@ -744,6 +750,7 @@
 		schedule_work(&mm->async_put_work);
 	}
 }
+#endif
 
 /**
  * set_mm_exe_file - change a reference to the mm's executable file
@@ -1276,7 +1283,8 @@
 					int __user *child_tidptr,
 					struct pid *pid,
 					int trace,
-					unsigned long tls)
+					unsigned long tls,
+					int node)
 {
 	int retval;
 	struct task_struct *p;
@@ -1328,7 +1336,7 @@
 		goto fork_out;
 
 	retval = -ENOMEM;
-	p = dup_task_struct(current);
+	p = dup_task_struct(current, node);
 	if (!p)
 		goto fork_out;
 
@@ -1706,7 +1714,8 @@
 struct task_struct *fork_idle(int cpu)
 {
 	struct task_struct *task;
-	task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0);
+	task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0,
+			    cpu_to_node(cpu));
 	if (!IS_ERR(task)) {
 		init_idle_pids(task->pids);
 		init_idle(task, cpu);
@@ -1751,7 +1760,7 @@
 	}
 
 	p = copy_process(clone_flags, stack_start, stack_size,
-			 child_tidptr, NULL, trace, tls);
+			 child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
 	/*
 	 * Do this prior waking up the new thread - the thread pointer
 	 * might get invalid after that point, if the thread exits quickly.
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index c92e448..1276aab 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -37,6 +37,7 @@
 
 config GCOV_PROFILE_ALL
 	bool "Profile entire Kernel"
+	depends on !COMPILE_TEST
 	depends on GCOV_KERNEL
 	depends on ARCH_HAS_GCOV_PROFILE_ALL
 	default n
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
index c427422..89b49f6 100644
--- a/kernel/irq/ipi.c
+++ b/kernel/irq/ipi.c
@@ -125,7 +125,7 @@
 
 	domain = data->domain;
 	if (WARN_ON(domain == NULL))
-		return;
+		return -EINVAL;
 
 	if (!irq_domain_is_ipi(domain)) {
 		pr_warn("Trying to destroy a non IPI domain!\n");
diff --git a/kernel/kexec.c b/kernel/kexec.c
index ee70aef..4384672 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -103,6 +103,65 @@
 	return ret;
 }
 
+static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
+		struct kexec_segment __user *segments, unsigned long flags)
+{
+	struct kimage **dest_image, *image;
+	unsigned long i;
+	int ret;
+
+	if (flags & KEXEC_ON_CRASH) {
+		dest_image = &kexec_crash_image;
+		if (kexec_crash_image)
+			arch_kexec_unprotect_crashkres();
+	} else {
+		dest_image = &kexec_image;
+	}
+
+	if (nr_segments == 0) {
+		/* Uninstall image */
+		kimage_free(xchg(dest_image, NULL));
+		return 0;
+	}
+	if (flags & KEXEC_ON_CRASH) {
+		/*
+		 * Loading another kernel to switch to if this one
+		 * crashes.  Free any current crash dump kernel before
+		 * we corrupt it.
+		 */
+		kimage_free(xchg(&kexec_crash_image, NULL));
+	}
+
+	ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags);
+	if (ret)
+		return ret;
+
+	if (flags & KEXEC_PRESERVE_CONTEXT)
+		image->preserve_context = 1;
+
+	ret = machine_kexec_prepare(image);
+	if (ret)
+		goto out;
+
+	for (i = 0; i < nr_segments; i++) {
+		ret = kimage_load_segment(image, &image->segment[i]);
+		if (ret)
+			goto out;
+	}
+
+	kimage_terminate(image);
+
+	/* Install the new kernel and uninstall the old */
+	image = xchg(dest_image, image);
+
+out:
+	if ((flags & KEXEC_ON_CRASH) && kexec_crash_image)
+		arch_kexec_protect_crashkres();
+
+	kimage_free(image);
+	return ret;
+}
+
 /*
  * Exec Kernel system call: for obvious reasons only root may call it.
  *
@@ -127,7 +186,6 @@
 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
 		struct kexec_segment __user *, segments, unsigned long, flags)
 {
-	struct kimage **dest_image, *image;
 	int result;
 
 	/* We only trust the superuser with rebooting the system. */
@@ -152,9 +210,6 @@
 	if (nr_segments > KEXEC_SEGMENT_MAX)
 		return -EINVAL;
 
-	image = NULL;
-	result = 0;
-
 	/* Because we write directly to the reserved memory
 	 * region when loading crash kernels we need a mutex here to
 	 * prevent multiple crash  kernels from attempting to load
@@ -166,53 +221,9 @@
 	if (!mutex_trylock(&kexec_mutex))
 		return -EBUSY;
 
-	dest_image = &kexec_image;
-	if (flags & KEXEC_ON_CRASH)
-		dest_image = &kexec_crash_image;
-	if (nr_segments > 0) {
-		unsigned long i;
+	result = do_kexec_load(entry, nr_segments, segments, flags);
 
-		if (flags & KEXEC_ON_CRASH) {
-			/*
-			 * Loading another kernel to switch to if this one
-			 * crashes.  Free any current crash dump kernel before
-			 * we corrupt it.
-			 */
-
-			kimage_free(xchg(&kexec_crash_image, NULL));
-			result = kimage_alloc_init(&image, entry, nr_segments,
-						   segments, flags);
-			crash_map_reserved_pages();
-		} else {
-			/* Loading another kernel to reboot into. */
-
-			result = kimage_alloc_init(&image, entry, nr_segments,
-						   segments, flags);
-		}
-		if (result)
-			goto out;
-
-		if (flags & KEXEC_PRESERVE_CONTEXT)
-			image->preserve_context = 1;
-		result = machine_kexec_prepare(image);
-		if (result)
-			goto out;
-
-		for (i = 0; i < nr_segments; i++) {
-			result = kimage_load_segment(image, &image->segment[i]);
-			if (result)
-				goto out;
-		}
-		kimage_terminate(image);
-		if (flags & KEXEC_ON_CRASH)
-			crash_unmap_reserved_pages();
-	}
-	/* Install the new kernel, and  Uninstall the old */
-	image = xchg(dest_image, image);
-
-out:
 	mutex_unlock(&kexec_mutex);
-	kimage_free(image);
 
 	return result;
 }
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index d5d4082..56b3ed0 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -954,7 +954,6 @@
 	start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
 	end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
 
-	crash_map_reserved_pages();
 	crash_free_reserved_phys_range(end, crashk_res.end);
 
 	if ((start == end) && (crashk_res.parent != NULL))
@@ -968,7 +967,6 @@
 	crashk_res.end = end - 1;
 
 	insert_resource(&iomem_resource, ram_res);
-	crash_unmap_reserved_pages();
 
 unlock:
 	mutex_unlock(&kexec_mutex);
@@ -1553,13 +1551,14 @@
 }
 
 /*
- * Add and remove page tables for crashkernel memory
+ * Protection mechanism for crashkernel reserved memory after
+ * the kdump kernel is loaded.
  *
  * Provide an empty default implementation here -- architecture
  * code may override this
  */
-void __weak crash_map_reserved_pages(void)
+void __weak arch_kexec_protect_crashkres(void)
 {}
 
-void __weak crash_unmap_reserved_pages(void)
+void __weak arch_kexec_unprotect_crashkres(void)
 {}
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index c72d2ff..503bc2d 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -274,8 +274,11 @@
 		return -EBUSY;
 
 	dest_image = &kexec_image;
-	if (flags & KEXEC_FILE_ON_CRASH)
+	if (flags & KEXEC_FILE_ON_CRASH) {
 		dest_image = &kexec_crash_image;
+		if (kexec_crash_image)
+			arch_kexec_unprotect_crashkres();
+	}
 
 	if (flags & KEXEC_FILE_UNLOAD)
 		goto exchange;
@@ -324,6 +327,9 @@
 exchange:
 	image = xchg(dest_image, image);
 out:
+	if ((flags & KEXEC_FILE_ON_CRASH) && kexec_crash_image)
+		arch_kexec_protect_crashkres();
+
 	mutex_unlock(&kexec_mutex);
 	kimage_free(image);
 	return ret;
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
index f231e0b..bec0b64 100644
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -37,6 +37,7 @@
 	free_percpu(brw->fast_read_ctr);
 	brw->fast_read_ctr = NULL; /* catch use after free bugs */
 }
+EXPORT_SYMBOL_GPL(percpu_free_rwsem);
 
 /*
  * This is the fast-path for down_read/up_read. If it succeeds we rely
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index c817216..2e853ad 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -173,6 +173,22 @@
 
 EXPORT_SYMBOL(down_write_nested);
 
+int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
+{
+	might_sleep();
+	rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
+
+	if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) {
+		rwsem_release(&sem->dep_map, 1, _RET_IP_);
+		return -EINTR;
+	}
+
+	rwsem_set_owner(sem);
+	return 0;
+}
+
+EXPORT_SYMBOL(down_write_killable_nested);
+
 void up_read_non_owner(struct rw_semaphore *sem)
 {
 	__up_read(sem);
diff --git a/kernel/pid.c b/kernel/pid.c
index 4d73a83..f66162f 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -311,7 +311,7 @@
 	pid->level = ns->level;
 	for (i = ns->level; i >= 0; i--) {
 		nr = alloc_pidmap(tmp);
-		if (IS_ERR_VALUE(nr)) {
+		if (nr < 0) {
 			retval = nr;
 			goto out_free;
 		}
diff --git a/kernel/relay.c b/kernel/relay.c
index 074994b..04d7cf3 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -614,6 +614,7 @@
 
 	kref_put(&chan->kref, relay_destroy_channel);
 	mutex_unlock(&relay_channels_mutex);
+	kfree(chan);
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(relay_open);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 404c078..7f2cae4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1768,13 +1768,15 @@
 	cookie = lockdep_pin_lock(&rq->lock);
 
 	while (llist) {
+		int wake_flags = 0;
+
 		p = llist_entry(llist, struct task_struct, wake_entry);
 		llist = llist_next(llist);
-		/*
-		 * See ttwu_queue(); we only call ttwu_queue_remote() when
-		 * its a x-cpu wakeup.
-		 */
-		ttwu_do_activate(rq, p, WF_MIGRATED, cookie);
+
+		if (p->sched_remote_wakeup)
+			wake_flags = WF_MIGRATED;
+
+		ttwu_do_activate(rq, p, wake_flags, cookie);
 	}
 
 	lockdep_unpin_lock(&rq->lock, cookie);
@@ -1819,10 +1821,12 @@
 	irq_exit();
 }
 
-static void ttwu_queue_remote(struct task_struct *p, int cpu)
+static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
 {
 	struct rq *rq = cpu_rq(cpu);
 
+	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
+
 	if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
 		if (!set_nr_if_polling(rq->idle))
 			smp_send_reschedule(cpu);
@@ -1869,7 +1873,7 @@
 #if defined(CONFIG_SMP)
 	if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
 		sched_clock_cpu(cpu); /* sync clocks x-cpu */
-		ttwu_queue_remote(p, cpu);
+		ttwu_queue_remote(p, cpu, wake_flags);
 		return;
 	}
 #endif
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 154ae3a..14c4aa2 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -9,6 +9,8 @@
  * published by the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/cpufreq.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -388,7 +390,7 @@
 	mutex_unlock(&global_tunables_lock);
 
 	sugov_policy_free(sg_policy);
-	pr_err("cpufreq: schedutil governor initialization failed (error %d)\n", ret);
+	pr_err("initialization failed (error %d)\n", ret);
 	return ret;
 }
 
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index bd12c6c..c5aeedf 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -127,7 +127,7 @@
  */
 static void cpuidle_idle_call(void)
 {
-	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
+	struct cpuidle_device *dev = cpuidle_get_device();
 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
 	int next_state, entered_state;
 
diff --git a/kernel/signal.c b/kernel/signal.c
index ab122a2..96e9bc4 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -224,7 +224,7 @@
 	if (!__ratelimit(&ratelimit_state))
 		return;
 
-	printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
+	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
 				current->comm, current->pid, sig);
 }
 
@@ -1089,10 +1089,10 @@
 static void print_fatal_signal(int signr)
 {
 	struct pt_regs *regs = signal_pt_regs();
-	printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
+	pr_info("potentially unexpected fatal signal %d.\n", signr);
 
 #if defined(__i386__) && !defined(__arch_um__)
-	printk(KERN_INFO "code at %08lx: ", regs->ip);
+	pr_info("code at %08lx: ", regs->ip);
 	{
 		int i;
 		for (i = 0; i < 16; i++) {
@@ -1100,10 +1100,10 @@
 
 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
 				break;
-			printk(KERN_CONT "%02x ", insn);
+			pr_cont("%02x ", insn);
 		}
 	}
-	printk(KERN_CONT "\n");
+	pr_cont("\n");
 #endif
 	preempt_disable();
 	show_regs(regs);
diff --git a/kernel/sys.c b/kernel/sys.c
index cf8ba54..89d5be4 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2246,7 +2246,8 @@
 	case PR_SET_THP_DISABLE:
 		if (arg3 || arg4 || arg5)
 			return -EINVAL;
-		down_write(&me->mm->mmap_sem);
+		if (down_write_killable(&me->mm->mmap_sem))
+			return -EINTR;
 		if (arg2)
 			me->mm->def_flags |= VM_NOHUGEPAGE;
 		else
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2effd84..87b2fc38 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1149,13 +1149,22 @@
 	},
 	{
 		.procname	= "perf_event_max_stack",
-		.data		= NULL, /* filled in by handler */
+		.data		= &sysctl_perf_event_max_stack,
 		.maxlen		= sizeof(sysctl_perf_event_max_stack),
 		.mode		= 0644,
 		.proc_handler	= perf_event_max_stack_handler,
 		.extra1		= &zero,
 		.extra2		= &six_hundred_forty_kb,
 	},
+	{
+		.procname	= "perf_event_max_contexts_per_stack",
+		.data		= &sysctl_perf_event_max_contexts_per_stack,
+		.maxlen		= sizeof(sysctl_perf_event_max_contexts_per_stack),
+		.mode		= 0644,
+		.proc_handler	= perf_event_max_stack_handler,
+		.extra1		= &zero,
+		.extra2		= &one_thousand,
+	},
 #endif
 #ifdef CONFIG_KMEMCHECK
 	{
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 8c7392c..e99df0f 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -425,6 +425,7 @@
 {
 	debug_object_free(timer, &hrtimer_debug_descr);
 }
+EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
 
 #else
 static inline void debug_hrtimer_init(struct hrtimer *timer) { }
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 780bcbe..26f603d 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -198,7 +198,7 @@
 	if (unlikely(index >= array->map.max_entries))
 		return -E2BIG;
 
-	file = (struct file *)array->ptrs[index];
+	file = READ_ONCE(array->ptrs[index]);
 	if (unlikely(!file))
 		return -ENOENT;
 
@@ -209,6 +209,10 @@
 	    event->pmu->count)
 		return -EINVAL;
 
+	if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
+		     event->attr.type != PERF_TYPE_RAW))
+		return -EINVAL;
+
 	/*
 	 * we don't know if the function is run successfully by the
 	 * return value. It can be judged in other places, such as
@@ -247,7 +251,7 @@
 	if (unlikely(index >= array->map.max_entries))
 		return -E2BIG;
 
-	file = (struct file *)array->ptrs[index];
+	file = READ_ONCE(array->ptrs[index]);
 	if (unlikely(!file))
 		return -ENOENT;
 
@@ -349,7 +353,8 @@
 }
 
 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
-static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type)
+static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
+					enum bpf_reg_type *reg_type)
 {
 	/* check bounds */
 	if (off < 0 || off >= sizeof(struct pt_regs))
@@ -427,7 +432,8 @@
 	}
 }
 
-static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type)
+static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
+				    enum bpf_reg_type *reg_type)
 {
 	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
 		return false;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e707ab3..b9cfdbf 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1841,6 +1841,9 @@
 
 	  If unsure, say N.
 
+config TEST_UUID
+	tristate "Test functions located in the uuid module at runtime"
+
 config TEST_RHASHTABLE
 	tristate "Perform selftest on resizable hash table"
 	default n
@@ -1849,6 +1852,17 @@
 
 	  If unsure, say N.
 
+config TEST_HASH
+	tristate "Perform selftest on hash functions"
+	default n
+	help
+	  Enable this option to test the kernel's integer (<linux/hash,h>)
+	  and string (<linux/stringhash.h>) hash functions on boot
+	  (or module load).
+
+	  This is intended to help people writing architecture-specific
+	  optimized versions.  If unsure, say N.
+
 endmenu # runtime tests
 
 config PROVIDE_OHCI1394_DMA_INIT
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index c635a10..533f912 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -22,7 +22,7 @@
 	tristate "KGDB: use kgdb over the serial console"
 	select CONSOLE_POLL
 	select MAGIC_SYSRQ
-	depends on TTY
+	depends on TTY && HW_CONSOLE
 	default y
 	help
 	  Share a serial console with kgdb. Sysrq-g must be used
diff --git a/lib/Makefile b/lib/Makefile
index 42b6918..ff6a7a6 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -48,6 +48,7 @@
 obj-y += kstrtox.o
 obj-$(CONFIG_TEST_BPF) += test_bpf.o
 obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
+obj-$(CONFIG_TEST_HASH) += test_hash.o
 obj-$(CONFIG_TEST_KASAN) += test_kasan.o
 obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
 obj-$(CONFIG_TEST_LKM) += test_module.o
@@ -57,6 +58,7 @@
 obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
 obj-$(CONFIG_TEST_PRINTF) += test_printf.o
 obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
+obj-$(CONFIG_TEST_UUID) += test_uuid.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 4a1515f..51a76af 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -657,9 +657,9 @@
 	spin_lock_irqsave(&free_entries_lock, flags);
 
 	if (list_empty(&free_entries)) {
-		pr_err("DMA-API: debugging out of memory - disabling\n");
 		global_disable = true;
 		spin_unlock_irqrestore(&free_entries_lock, flags);
+		pr_err("DMA-API: debugging out of memory - disabling\n");
 		return NULL;
 	}
 
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 28cb431..0cd5227 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -101,7 +101,7 @@
 #define iterate_and_advance(i, n, v, I, B, K) {			\
 	if (unlikely(i->count < n))				\
 		n = i->count;					\
-	if (n) {						\
+	if (i->count) {						\
 		size_t skip = i->iov_offset;			\
 		if (unlikely(i->type & ITER_BVEC)) {		\
 			const struct bio_vec *bvec;		\
diff --git a/lib/test_hash.c b/lib/test_hash.c
new file mode 100644
index 0000000..c9549c8
--- /dev/null
+++ b/lib/test_hash.c
@@ -0,0 +1,250 @@
+/*
+ * Test cases for <linux/hash.h> and <linux/stringhash.h>
+ * This just verifies that various ways of computing a hash
+ * produce the same thing and, for cases where a k-bit hash
+ * value is requested, is of the requested size.
+ *
+ * We fill a buffer with a 255-byte null-terminated string,
+ * and use both full_name_hash() and hashlen_string() to hash the
+ * substrings from i to j, where 0 <= i < j < 256.
+ *
+ * The returned values are used to check that __hash_32() and
+ * __hash_32_generic() compute the same thing.  Likewise hash_32()
+ * and hash_64().
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt "\n"
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/hash.h>
+#include <linux/stringhash.h>
+#include <linux/printk.h>
+
+/* 32-bit XORSHIFT generator.  Seed must not be zero. */
+static u32 __init __attribute_const__
+xorshift(u32 seed)
+{
+	seed ^= seed << 13;
+	seed ^= seed >> 17;
+	seed ^= seed << 5;
+	return seed;
+}
+
+/* Given a non-zero x, returns a non-zero byte. */
+static u8 __init __attribute_const__
+mod255(u32 x)
+{
+	x = (x & 0xffff) + (x >> 16);	/* 1 <= x <= 0x1fffe */
+	x = (x & 0xff) + (x >> 8);	/* 1 <= x <= 0x2fd */
+	x = (x & 0xff) + (x >> 8);	/* 1 <= x <= 0x100 */
+	x = (x & 0xff) + (x >> 8);	/* 1 <= x <= 0xff */
+	return x;
+}
+
+/* Fill the buffer with non-zero bytes. */
+static void __init
+fill_buf(char *buf, size_t len, u32 seed)
+{
+	size_t i;
+
+	for (i = 0; i < len; i++) {
+		seed = xorshift(seed);
+		buf[i] = mod255(seed);
+	}
+}
+
+/*
+ * Test the various integer hash functions.  h64 (or its low-order bits)
+ * is the integer to hash.  hash_or accumulates the OR of the hash values,
+ * which are later checked to see that they cover all the requested bits.
+ *
+ * Because these functions (as opposed to the string hashes) are all
+ * inline, the code being tested is actually in the module, and you can
+ * recompile and re-test the module without rebooting.
+ */
+static bool __init
+test_int_hash(unsigned long long h64, u32 hash_or[2][33])
+{
+	int k;
+	u32 h0 = (u32)h64, h1, h2;
+
+	/* Test __hash32 */
+	hash_or[0][0] |= h1 = __hash_32(h0);
+#ifdef HAVE_ARCH__HASH_32
+	hash_or[1][0] |= h2 = __hash_32_generic(h0);
+#if HAVE_ARCH__HASH_32 == 1
+	if (h1 != h2) {
+		pr_err("__hash_32(%#x) = %#x != __hash_32_generic() = %#x",
+			h0, h1, h2);
+		return false;
+	}
+#endif
+#endif
+
+	/* Test k = 1..32 bits */
+	for (k = 1; k <= 32; k++) {
+		u32 const m = ((u32)2 << (k-1)) - 1;	/* Low k bits set */
+
+		/* Test hash_32 */
+		hash_or[0][k] |= h1 = hash_32(h0, k);
+		if (h1 > m) {
+			pr_err("hash_32(%#x, %d) = %#x > %#x", h0, k, h1, m);
+			return false;
+		}
+#ifdef HAVE_ARCH_HASH_32
+		h2 = hash_32_generic(h0, k);
+#if HAVE_ARCH_HASH_32 == 1
+		if (h1 != h2) {
+			pr_err("hash_32(%#x, %d) = %#x != hash_32_generic() "
+				" = %#x", h0, k, h1, h2);
+			return false;
+		}
+#else
+		if (h2 > m) {
+			pr_err("hash_32_generic(%#x, %d) = %#x > %#x",
+				h0, k, h1, m);
+			return false;
+		}
+#endif
+#endif
+		/* Test hash_64 */
+		hash_or[1][k] |= h1 = hash_64(h64, k);
+		if (h1 > m) {
+			pr_err("hash_64(%#llx, %d) = %#x > %#x", h64, k, h1, m);
+			return false;
+		}
+#ifdef HAVE_ARCH_HASH_64
+		h2 = hash_64_generic(h64, k);
+#if HAVE_ARCH_HASH_64 == 1
+		if (h1 != h2) {
+			pr_err("hash_64(%#llx, %d) = %#x != hash_64_generic() "
+				"= %#x", h64, k, h1, h2);
+			return false;
+		}
+#else
+		if (h2 > m) {
+			pr_err("hash_64_generic(%#llx, %d) = %#x > %#x",
+				h64, k, h1, m);
+			return false;
+		}
+#endif
+#endif
+	}
+
+	(void)h2;	/* Suppress unused variable warning */
+	return true;
+}
+
+#define SIZE 256	/* Run time is cubic in SIZE */
+
+static int __init
+test_hash_init(void)
+{
+	char buf[SIZE+1];
+	u32 string_or = 0, hash_or[2][33] = { 0 };
+	unsigned tests = 0;
+	unsigned long long h64 = 0;
+	int i, j;
+
+	fill_buf(buf, SIZE, 1);
+
+	/* Test every possible non-empty substring in the buffer. */
+	for (j = SIZE; j > 0; --j) {
+		buf[j] = '\0';
+
+		for (i = 0; i <= j; i++) {
+			u64 hashlen = hashlen_string(buf+i);
+			u32 h0 = full_name_hash(buf+i, j-i);
+
+			/* Check that hashlen_string gets the length right */
+			if (hashlen_len(hashlen) != j-i) {
+				pr_err("hashlen_string(%d..%d) returned length"
+					" %u, expected %d",
+					i, j, hashlen_len(hashlen), j-i);
+				return -EINVAL;
+			}
+			/* Check that the hashes match */
+			if (hashlen_hash(hashlen) != h0) {
+				pr_err("hashlen_string(%d..%d) = %08x != "
+					"full_name_hash() = %08x",
+					i, j, hashlen_hash(hashlen), h0);
+				return -EINVAL;
+			}
+
+			string_or |= h0;
+			h64 = h64 << 32 | h0;	/* For use with hash_64 */
+			if (!test_int_hash(h64, hash_or))
+				return -EINVAL;
+			tests++;
+		} /* i */
+	} /* j */
+
+	/* The OR of all the hash values should cover all the bits */
+	if (~string_or) {
+		pr_err("OR of all string hash results = %#x != %#x",
+			string_or, -1u);
+		return -EINVAL;
+	}
+	if (~hash_or[0][0]) {
+		pr_err("OR of all __hash_32 results = %#x != %#x",
+			hash_or[0][0], -1u);
+		return -EINVAL;
+	}
+#ifdef HAVE_ARCH__HASH_32
+#if HAVE_ARCH__HASH_32 != 1	/* Test is pointless if results match */
+	if (~hash_or[1][0]) {
+		pr_err("OR of all __hash_32_generic results = %#x != %#x",
+			hash_or[1][0], -1u);
+		return -EINVAL;
+	}
+#endif
+#endif
+
+	/* Likewise for all the i-bit hash values */
+	for (i = 1; i <= 32; i++) {
+		u32 const m = ((u32)2 << (i-1)) - 1;	/* Low i bits set */
+
+		if (hash_or[0][i] != m) {
+			pr_err("OR of all hash_32(%d) results = %#x "
+				"(%#x expected)", i, hash_or[0][i], m);
+			return -EINVAL;
+		}
+		if (hash_or[1][i] != m) {
+			pr_err("OR of all hash_64(%d) results = %#x "
+				"(%#x expected)", i, hash_or[1][i], m);
+			return -EINVAL;
+		}
+	}
+
+	/* Issue notices about skipped tests. */
+#ifndef HAVE_ARCH__HASH_32
+	pr_info("__hash_32() has no arch implementation to test.");
+#elif HAVE_ARCH__HASH_32 != 1
+	pr_info("__hash_32() is arch-specific; not compared to generic.");
+#endif
+#ifndef HAVE_ARCH_HASH_32
+	pr_info("hash_32() has no arch implementation to test.");
+#elif HAVE_ARCH_HASH_32 != 1
+	pr_info("hash_32() is arch-specific; not compared to generic.");
+#endif
+#ifndef HAVE_ARCH_HASH_64
+	pr_info("hash_64() has no arch implementation to test.");
+#elif HAVE_ARCH_HASH_64 != 1
+	pr_info("hash_64() is arch-specific; not compared to generic.");
+#endif
+
+	pr_notice("%u tests passed.", tests);
+
+	return 0;
+}
+
+static void __exit test_hash_exit(void)
+{
+}
+
+module_init(test_hash_init);	/* Does everything */
+module_exit(test_hash_exit);	/* Does nothing */
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_uuid.c b/lib/test_uuid.c
new file mode 100644
index 0000000..547d312
--- /dev/null
+++ b/lib/test_uuid.c
@@ -0,0 +1,133 @@
+/*
+ * Test cases for lib/uuid.c module.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/uuid.h>
+
+struct test_uuid_data {
+	const char *uuid;
+	uuid_le le;
+	uuid_be be;
+};
+
+static const struct test_uuid_data test_uuid_test_data[] = {
+	{
+		.uuid = "c33f4995-3701-450e-9fbf-206a2e98e576",
+		.le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+		.be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+	},
+	{
+		.uuid = "64b4371c-77c1-48f9-8221-29f054fc023b",
+		.le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+		.be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+	},
+	{
+		.uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84",
+		.le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+		.be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+	},
+};
+
+static const char * const test_uuid_wrong_data[] = {
+	"c33f4995-3701-450e-9fbf206a2e98e576 ",	/* no hyphen(s) */
+	"64b4371c-77c1-48f9-8221-29f054XX023b",	/* invalid character(s) */
+	"0cb4ddff-a545-4401-9d06-688af53e",	/* not enough data */
+};
+
+static unsigned total_tests __initdata;
+static unsigned failed_tests __initdata;
+
+static void __init test_uuid_failed(const char *prefix, bool wrong, bool be,
+				    const char *data, const char *actual)
+{
+	pr_err("%s test #%u %s %s data: '%s'\n",
+	       prefix,
+	       total_tests,
+	       wrong ? "passed on wrong" : "failed on",
+	       be ? "BE" : "LE",
+	       data);
+	if (actual && *actual)
+		pr_err("%s test #%u actual data: '%s'\n",
+		       prefix,
+		       total_tests,
+		       actual);
+	failed_tests++;
+}
+
+static void __init test_uuid_test(const struct test_uuid_data *data)
+{
+	uuid_le le;
+	uuid_be be;
+	char buf[48];
+
+	/* LE */
+	total_tests++;
+	if (uuid_le_to_bin(data->uuid, &le))
+		test_uuid_failed("conversion", false, false, data->uuid, NULL);
+
+	total_tests++;
+	if (uuid_le_cmp(data->le, le)) {
+		sprintf(buf, "%pUl", &le);
+		test_uuid_failed("cmp", false, false, data->uuid, buf);
+	}
+
+	/* BE */
+	total_tests++;
+	if (uuid_be_to_bin(data->uuid, &be))
+		test_uuid_failed("conversion", false, true, data->uuid, NULL);
+
+	total_tests++;
+	if (uuid_be_cmp(data->be, be)) {
+		sprintf(buf, "%pUb", &be);
+		test_uuid_failed("cmp", false, true, data->uuid, buf);
+	}
+}
+
+static void __init test_uuid_wrong(const char *data)
+{
+	uuid_le le;
+	uuid_be be;
+
+	/* LE */
+	total_tests++;
+	if (!uuid_le_to_bin(data, &le))
+		test_uuid_failed("negative", true, false, data, NULL);
+
+	/* BE */
+	total_tests++;
+	if (!uuid_be_to_bin(data, &be))
+		test_uuid_failed("negative", true, true, data, NULL);
+}
+
+static int __init test_uuid_init(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++)
+		test_uuid_test(&test_uuid_test_data[i]);
+
+	for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++)
+		test_uuid_wrong(test_uuid_wrong_data[i]);
+
+	if (failed_tests == 0)
+		pr_info("all %u tests passed\n", total_tests);
+	else
+		pr_err("failed %u out of %u tests\n", failed_tests, total_tests);
+
+	return failed_tests ? -EINVAL : 0;
+}
+module_init(test_uuid_init);
+
+static void __exit test_uuid_exit(void)
+{
+	/* do nothing */
+}
+module_exit(test_uuid_exit);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/uuid.c b/lib/uuid.c
index e116ae5..37687af 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -106,8 +106,8 @@
 		return -EINVAL;
 
 	for (i = 0; i < 16; i++) {
-		int hi = hex_to_bin(uuid[si[i]] + 0);
-		int lo = hex_to_bin(uuid[si[i]] + 1);
+		int hi = hex_to_bin(uuid[si[i] + 0]);
+		int lo = hex_to_bin(uuid[si[i] + 1]);
 
 		b[ei[i]] = (hi << 4) | lo;
 	}
diff --git a/mm/Kconfig b/mm/Kconfig
index 2664c11..3e2daef 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -648,7 +648,8 @@
 	bool "Defer initialisation of struct pages to kthreads"
 	default n
 	depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
-	depends on MEMORY_HOTPLUG
+	depends on NO_BOOTMEM && MEMORY_HOTPLUG
+	depends on !FLATMEM
 	help
 	  Ordinarily all struct pages are initialised during early boot in a
 	  single thread. On very large machines this can take a considerable
diff --git a/mm/cma.c b/mm/cma.c
index ea506eb..bd0e141 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -183,7 +183,8 @@
 		return -EINVAL;
 
 	/* ensure minimal alignment required by mm core */
-	alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
+	alignment = PAGE_SIZE <<
+			max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
 
 	/* alignment should be aligned with order_per_bit */
 	if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
@@ -266,8 +267,8 @@
 	 * migratetype page by page allocator's buddy algorithm. In the case,
 	 * you couldn't get a contiguous memory, which is not what we want.
 	 */
-	alignment = max(alignment,
-		(phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
+	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
+			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
 	base = ALIGN(base, alignment);
 	size = ALIGN(size, alignment);
 	limit &= ~(alignment - 1);
diff --git a/mm/fadvise.c b/mm/fadvise.c
index b8024fa..6c707bf 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -126,6 +126,17 @@
 		 */
 		start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
 		end_index = (endbyte >> PAGE_SHIFT);
+		if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
+			/* First page is tricky as 0 - 1 = -1, but pgoff_t
+			 * is unsigned, so the end_index >= start_index
+			 * check below would be true and we'll discard the whole
+			 * file cache which is not what was asked.
+			 */
+			if (end_index == 0)
+				break;
+
+			end_index--;
+		}
 
 		if (end_index >= start_index) {
 			unsigned long count = invalidate_mapping_pages(mapping,
diff --git a/mm/filemap.c b/mm/filemap.c
index 9665b1d..00ae878 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -143,13 +143,15 @@
 			return;
 
 	/*
-	 * Track node that only contains shadow entries.
+	 * Track node that only contains shadow entries. DAX mappings contain
+	 * no shadow entries and may contain other exceptional entries so skip
+	 * those.
 	 *
 	 * Avoid acquiring the list_lru lock if already tracked.  The
 	 * list_empty() test is safe as node->private_list is
 	 * protected by mapping->tree_lock.
 	 */
-	if (!workingset_node_pages(node) &&
+	if (!dax_mapping(mapping) && !workingset_node_pages(node) &&
 	    list_empty(&node->private_list)) {
 		node->private_data = mapping;
 		list_lru_add(&workingset_shadow_nodes, &node->private_list);
@@ -580,14 +582,24 @@
 		if (!radix_tree_exceptional_entry(p))
 			return -EEXIST;
 
-		if (WARN_ON(dax_mapping(mapping)))
-			return -EINVAL;
-
-		if (shadowp)
-			*shadowp = p;
 		mapping->nrexceptional--;
-		if (node)
-			workingset_node_shadows_dec(node);
+		if (!dax_mapping(mapping)) {
+			if (shadowp)
+				*shadowp = p;
+			if (node)
+				workingset_node_shadows_dec(node);
+		} else {
+			/* DAX can replace empty locked entry with a hole */
+			WARN_ON_ONCE(p !=
+				(void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
+					 RADIX_DAX_ENTRY_LOCK));
+			/* DAX accounts exceptional entries as normal pages */
+			if (node)
+				workingset_node_pages_dec(node);
+			/* Wakeup waiters for exceptional entry lock */
+			dax_wake_mapping_entry_waiter(mapping, page->index,
+						      false);
+		}
 	}
 	radix_tree_replace_slot(slot, page);
 	mapping->nrpages++;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 41ef754..9ed5853 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1013,6 +1013,7 @@
 	insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
 	return VM_FAULT_NOPAGE;
 }
+EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
 
 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
 		pmd_t *pmd)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 949d806..388c2bb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -627,6 +627,7 @@
 {
 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
 }
+EXPORT_SYMBOL_GPL(linear_hugepage_index);
 
 /*
  * Return the size of the pages allocated when backing a VMA. In the majority
@@ -831,8 +832,27 @@
 	 * Only the process that called mmap() has reserves for
 	 * private mappings.
 	 */
-	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
-		return true;
+	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
+		/*
+		 * Like the shared case above, a hole punch or truncate
+		 * could have been performed on the private mapping.
+		 * Examine the value of chg to determine if reserves
+		 * actually exist or were previously consumed.
+		 * Very Subtle - The value of chg comes from a previous
+		 * call to vma_needs_reserves().  The reserve map for
+		 * private mappings has different (opposite) semantics
+		 * than that of shared mappings.  vma_needs_reserves()
+		 * has already taken this difference in semantics into
+		 * account.  Therefore, the meaning of chg is the same
+		 * as in the shared case above.  Code could easily be
+		 * combined, but keeping it separate draws attention to
+		 * subtle differences.
+		 */
+		if (chg)
+			return false;
+		else
+			return true;
+	}
 
 	return false;
 }
@@ -1815,6 +1835,25 @@
 
 	if (vma->vm_flags & VM_MAYSHARE)
 		return ret;
+	else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
+		/*
+		 * In most cases, reserves always exist for private mappings.
+		 * However, a file associated with mapping could have been
+		 * hole punched or truncated after reserves were consumed.
+		 * As subsequent fault on such a range will not use reserves.
+		 * Subtle - The reserve map for private mappings has the
+		 * opposite meaning than that of shared mappings.  If NO
+		 * entry is in the reserve map, it means a reservation exists.
+		 * If an entry exists in the reserve map, it means the
+		 * reservation has already been consumed.  As a result, the
+		 * return value of this routine is the opposite of the
+		 * value returned from reserve map manipulation routines above.
+		 */
+		if (ret)
+			return 0;
+		else
+			return 1;
+	}
 	else
 		return ret < 0 ? ret : 0;
 }
diff --git a/mm/internal.h b/mm/internal.h
index f6f3353..a37e5b6 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -442,7 +442,7 @@
 extern u64 hwpoison_filter_memcg;
 extern u32 hwpoison_filter_enable;
 
-extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
+extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
         unsigned long, unsigned long,
         unsigned long, unsigned long);
 
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 18b6a2b..28439ac 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -763,8 +763,8 @@
 
 static int __init kasan_memhotplug_init(void)
 {
-	pr_err("WARNING: KASAN doesn't support memory hot-add\n");
-	pr_err("Memory hot-add will be disabled\n");
+	pr_info("WARNING: KASAN doesn't support memory hot-add\n");
+	pr_info("Memory hot-add will be disabled\n");
 
 	hotplug_memory_notifier(kasan_mem_notifier, 0);
 
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 7f7ac51..fb87923 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -77,7 +77,6 @@
 	struct kasan_track track;
 	u32 state : 2;	/* enum kasan_state */
 	u32 alloc_size : 30;
-	u32 reserved;
 };
 
 struct qlist_node {
diff --git a/mm/madvise.c b/mm/madvise.c
index 07427d3..93fb63e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -707,10 +707,12 @@
 		return error;
 
 	write = madvise_need_mmap_write(behavior);
-	if (write)
-		down_write(&current->mm->mmap_sem);
-	else
+	if (write) {
+		if (down_write_killable(&current->mm->mmap_sem))
+			return -EINTR;
+	} else {
 		down_read(&current->mm->mmap_sem);
+	}
 
 	/*
 	 * If the interval [start,end) covers some unmapped address
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b3f16ab..75e7440 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1108,6 +1108,8 @@
 		limit = READ_ONCE(memcg->memsw.limit);
 		if (count <= limit)
 			margin = min(margin, limit - count);
+		else
+			margin = 0;
 	}
 
 	return margin;
@@ -1302,6 +1304,8 @@
 				mem_cgroup_iter_break(memcg, iter);
 				if (chosen)
 					put_task_struct(chosen);
+				/* Set a dummy value to return "true". */
+				chosen = (void *) 1;
 				goto unlock;
 			case OOM_SCAN_OK:
 				break;
@@ -2892,6 +2896,7 @@
 	 * ordering is imposed by list_lru_node->lock taken by
 	 * memcg_drain_all_list_lrus().
 	 */
+	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
 	css_for_each_descendant_pre(css, &memcg->css) {
 		child = mem_cgroup_from_css(css);
 		BUG_ON(child->kmemcg_id != kmemcg_id);
@@ -2899,6 +2904,8 @@
 		if (!memcg->use_hierarchy)
 			break;
 	}
+	rcu_read_unlock();
+
 	memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
 
 	memcg_free_cache_id(kmemcg_id);
@@ -4305,24 +4312,6 @@
 	return 0;
 }
 
-/**
- * get_mctgt_type - get target type of moving charge
- * @vma: the vma the pte to be checked belongs
- * @addr: the address corresponding to the pte to be checked
- * @ptent: the pte to be checked
- * @target: the pointer the target page or swap ent will be stored(can be NULL)
- *
- * Returns
- *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
- *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
- *     move charge. if @target is not NULL, the page is stored in target->page
- *     with extra refcnt got(Callers should handle it).
- *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
- *     target for charge migration. if @target is not NULL, the entry is stored
- *     in target->ent.
- *
- * Called with pte lock held.
- */
 union mc_target {
 	struct page	*page;
 	swp_entry_t	ent;
@@ -4511,6 +4500,25 @@
 	return ret;
 }
 
+/**
+ * get_mctgt_type - get target type of moving charge
+ * @vma: the vma the pte to be checked belongs
+ * @addr: the address corresponding to the pte to be checked
+ * @ptent: the pte to be checked
+ * @target: the pointer the target page or swap ent will be stored(can be NULL)
+ *
+ * Returns
+ *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
+ *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
+ *     move charge. if @target is not NULL, the page is stored in target->page
+ *     with extra refcnt got(Callers should handle it).
+ *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
+ *     target for charge migration. if @target is not NULL, the entry is stored
+ *     in target->ent.
+ *
+ * Called with pte lock held.
+ */
+
 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
 		unsigned long addr, pte_t ptent, union mc_target *target)
 {
diff --git a/mm/memory.c b/mm/memory.c
index a1b93d9..15322b7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -63,6 +63,7 @@
 #include <linux/dma-debug.h>
 #include <linux/debugfs.h>
 #include <linux/userfaultfd_k.h>
+#include <linux/dax.h>
 
 #include <asm/io.h>
 #include <asm/mmu_context.h>
@@ -2492,8 +2493,6 @@
 	if (details.last_index < details.first_index)
 		details.last_index = ULONG_MAX;
 
-
-	/* DAX uses i_mmap_lock to serialise file truncate vs page fault */
 	i_mmap_lock_write(mapping);
 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
@@ -2825,7 +2824,8 @@
  */
 static int __do_fault(struct vm_area_struct *vma, unsigned long address,
 			pgoff_t pgoff, unsigned int flags,
-			struct page *cow_page, struct page **page)
+			struct page *cow_page, struct page **page,
+			void **entry)
 {
 	struct vm_fault vmf;
 	int ret;
@@ -2840,8 +2840,10 @@
 	ret = vma->vm_ops->fault(vma, &vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
-	if (!vmf.page)
-		goto out;
+	if (ret & VM_FAULT_DAX_LOCKED) {
+		*entry = vmf.entry;
+		return ret;
+	}
 
 	if (unlikely(PageHWPoison(vmf.page))) {
 		if (ret & VM_FAULT_LOCKED)
@@ -2855,7 +2857,6 @@
 	else
 		VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
 
- out:
 	*page = vmf.page;
 	return ret;
 }
@@ -3048,7 +3049,7 @@
 		pte_unmap_unlock(pte, ptl);
 	}
 
-	ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
+	ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page, NULL);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
 
@@ -3071,6 +3072,7 @@
 		pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
 {
 	struct page *fault_page, *new_page;
+	void *fault_entry;
 	struct mem_cgroup *memcg;
 	spinlock_t *ptl;
 	pte_t *pte;
@@ -3088,26 +3090,24 @@
 		return VM_FAULT_OOM;
 	}
 
-	ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page);
+	ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page,
+			 &fault_entry);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		goto uncharge_out;
 
-	if (fault_page)
+	if (!(ret & VM_FAULT_DAX_LOCKED))
 		copy_user_highpage(new_page, fault_page, address, vma);
 	__SetPageUptodate(new_page);
 
 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
 	if (unlikely(!pte_same(*pte, orig_pte))) {
 		pte_unmap_unlock(pte, ptl);
-		if (fault_page) {
+		if (!(ret & VM_FAULT_DAX_LOCKED)) {
 			unlock_page(fault_page);
 			put_page(fault_page);
 		} else {
-			/*
-			 * The fault handler has no page to lock, so it holds
-			 * i_mmap_lock for read to protect against truncate.
-			 */
-			i_mmap_unlock_read(vma->vm_file->f_mapping);
+			dax_unlock_mapping_entry(vma->vm_file->f_mapping,
+						 pgoff);
 		}
 		goto uncharge_out;
 	}
@@ -3115,15 +3115,11 @@
 	mem_cgroup_commit_charge(new_page, memcg, false, false);
 	lru_cache_add_active_or_unevictable(new_page, vma);
 	pte_unmap_unlock(pte, ptl);
-	if (fault_page) {
+	if (!(ret & VM_FAULT_DAX_LOCKED)) {
 		unlock_page(fault_page);
 		put_page(fault_page);
 	} else {
-		/*
-		 * The fault handler has no page to lock, so it holds
-		 * i_mmap_lock for read to protect against truncate.
-		 */
-		i_mmap_unlock_read(vma->vm_file->f_mapping);
+		dax_unlock_mapping_entry(vma->vm_file->f_mapping, pgoff);
 	}
 	return ret;
 uncharge_out:
@@ -3143,7 +3139,7 @@
 	int dirtied = 0;
 	int ret, tmp;
 
-	ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
+	ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page, NULL);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
 
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index caf2a14..e3cbdca 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -263,7 +263,7 @@
 }
 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
 
-void register_page_bootmem_info_node(struct pglist_data *pgdat)
+void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
 {
 	unsigned long i, pfn, end_pfn, nr_pages;
 	int node = pgdat->node_id;
@@ -300,7 +300,7 @@
 		 * multiple nodes we check that this pfn does not already
 		 * reside in some other nodes.
 		 */
-		if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
+		if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
 			register_page_bootmem_info_section(pfn);
 	}
 }
diff --git a/mm/mlock.c b/mm/mlock.c
index 96f0010..ef8dc9f 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -617,7 +617,7 @@
 	return error;
 }
 
-static int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
+static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
 {
 	unsigned long locked;
 	unsigned long lock_limit;
@@ -635,7 +635,8 @@
 	lock_limit >>= PAGE_SHIFT;
 	locked = len >> PAGE_SHIFT;
 
-	down_write(&current->mm->mmap_sem);
+	if (down_write_killable(&current->mm->mmap_sem))
+		return -EINTR;
 
 	locked += current->mm->locked_vm;
 
@@ -678,7 +679,8 @@
 	len = PAGE_ALIGN(len + (offset_in_page(start)));
 	start &= PAGE_MASK;
 
-	down_write(&current->mm->mmap_sem);
+	if (down_write_killable(&current->mm->mmap_sem))
+		return -EINTR;
 	ret = apply_vma_lock_flags(start, len, 0);
 	up_write(&current->mm->mmap_sem);
 
@@ -748,9 +750,10 @@
 	lock_limit = rlimit(RLIMIT_MEMLOCK);
 	lock_limit >>= PAGE_SHIFT;
 
-	ret = -ENOMEM;
-	down_write(&current->mm->mmap_sem);
+	if (down_write_killable(&current->mm->mmap_sem))
+		return -EINTR;
 
+	ret = -ENOMEM;
 	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
 	    capable(CAP_IPC_LOCK))
 		ret = apply_mlockall_flags(flags);
@@ -765,7 +768,8 @@
 {
 	int ret;
 
-	down_write(&current->mm->mmap_sem);
+	if (down_write_killable(&current->mm->mmap_sem))
+		return -EINTR;
 	ret = apply_mlockall_flags(0);
 	up_write(&current->mm->mmap_sem);
 	return ret;
diff --git a/mm/mmap.c b/mm/mmap.c
index b9274a0..de2c176 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -168,7 +168,7 @@
 	return next;
 }
 
-static unsigned long do_brk(unsigned long addr, unsigned long len);
+static int do_brk(unsigned long addr, unsigned long len);
 
 SYSCALL_DEFINE1(brk, unsigned long, brk)
 {
@@ -178,7 +178,8 @@
 	unsigned long min_brk;
 	bool populate;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
 
 #ifdef CONFIG_COMPAT_BRK
 	/*
@@ -223,7 +224,7 @@
 		goto out;
 
 	/* Ok, looks good - let it rip. */
-	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
+	if (do_brk(oldbrk, newbrk-oldbrk) < 0)
 		goto out;
 
 set_brk:
@@ -2493,7 +2494,9 @@
 	int ret;
 	struct mm_struct *mm = current->mm;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
+
 	ret = do_munmap(mm, start, len);
 	up_write(&mm->mmap_sem);
 	return ret;
@@ -2502,8 +2505,15 @@
 
 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
 {
+	int ret;
+	struct mm_struct *mm = current->mm;
+
 	profile_munmap(addr);
-	return vm_munmap(addr, len);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
+	ret = do_munmap(mm, addr, len);
+	up_write(&mm->mmap_sem);
+	return ret;
 }
 
 
@@ -2535,7 +2545,9 @@
 	if (pgoff + (size >> PAGE_SHIFT) < pgoff)
 		return ret;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
+
 	vma = find_vma(mm, start);
 
 	if (!vma || !(vma->vm_flags & VM_SHARED))
@@ -2613,7 +2625,7 @@
  *  anonymous maps.  eventually we may be able to do some
  *  brk-specific accounting here.
  */
-static unsigned long do_brk(unsigned long addr, unsigned long len)
+static int do_brk(unsigned long addr, unsigned long len)
 {
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma, *prev;
@@ -2624,7 +2636,7 @@
 
 	len = PAGE_ALIGN(len);
 	if (!len)
-		return addr;
+		return 0;
 
 	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
@@ -2691,20 +2703,22 @@
 	if (flags & VM_LOCKED)
 		mm->locked_vm += (len >> PAGE_SHIFT);
 	vma->vm_flags |= VM_SOFTDIRTY;
-	return addr;
+	return 0;
 }
 
-unsigned long vm_brk(unsigned long addr, unsigned long len)
+int vm_brk(unsigned long addr, unsigned long len)
 {
 	struct mm_struct *mm = current->mm;
-	unsigned long ret;
+	int ret;
 	bool populate;
 
-	down_write(&mm->mmap_sem);
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
+
 	ret = do_brk(addr, len);
 	populate = ((mm->def_flags & VM_LOCKED) != 0);
 	up_write(&mm->mmap_sem);
-	if (populate)
+	if (populate && !ret)
 		mm_populate(addr, len);
 	return ret;
 }
diff --git a/mm/mprotect.c b/mm/mprotect.c
index b650c54..5019a1e 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -379,7 +379,8 @@
 
 	reqprot = prot;
 
-	down_write(&current->mm->mmap_sem);
+	if (down_write_killable(&current->mm->mmap_sem))
+		return -EINTR;
 
 	vma = find_vma(current->mm, start);
 	error = -ENOMEM;
diff --git a/mm/mremap.c b/mm/mremap.c
index 9dc4999..1f157ad 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -503,7 +503,8 @@
 	if (!new_len)
 		return ret;
 
-	down_write(&current->mm->mmap_sem);
+	if (down_write_killable(&current->mm->mmap_sem))
+		return -EINTR;
 
 	if (flags & MREMAP_FIXED) {
 		ret = mremap_to(addr, old_len, new_addr, new_len,
diff --git a/mm/nommu.c b/mm/nommu.c
index c8bd59a..c2e588802 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1682,7 +1682,7 @@
 	}
 }
 
-unsigned long vm_brk(unsigned long addr, unsigned long len)
+int vm_brk(unsigned long addr, unsigned long len)
 {
 	return -ENOMEM;
 }
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 5bb2f76..acbc432 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -443,13 +443,29 @@
 {
 	struct mmu_gather tlb;
 	struct vm_area_struct *vma;
-	struct mm_struct *mm;
+	struct mm_struct *mm = NULL;
 	struct task_struct *p;
 	struct zap_details details = {.check_swap_entries = true,
 				      .ignore_dirty = true};
 	bool ret = true;
 
 	/*
+	 * We have to make sure to not race with the victim exit path
+	 * and cause premature new oom victim selection:
+	 * __oom_reap_task		exit_mm
+	 *   atomic_inc_not_zero
+	 *				  mmput
+	 *				    atomic_dec_and_test
+	 *				  exit_oom_victim
+	 *				[...]
+	 *				out_of_memory
+	 *				  select_bad_process
+	 *				    # no TIF_MEMDIE task selects new victim
+	 *  unmap_page_range # frees some memory
+	 */
+	mutex_lock(&oom_lock);
+
+	/*
 	 * Make sure we find the associated mm_struct even when the particular
 	 * thread has already terminated and cleared its mm.
 	 * We might have race with exit path so consider our work done if there
@@ -457,19 +473,19 @@
 	 */
 	p = find_lock_task_mm(tsk);
 	if (!p)
-		return true;
+		goto unlock_oom;
 
 	mm = p->mm;
 	if (!atomic_inc_not_zero(&mm->mm_users)) {
 		task_unlock(p);
-		return true;
+		goto unlock_oom;
 	}
 
 	task_unlock(p);
 
 	if (!down_read_trylock(&mm->mmap_sem)) {
 		ret = false;
-		goto out;
+		goto unlock_oom;
 	}
 
 	tlb_gather_mmu(&tlb, mm, 0, -1);
@@ -511,13 +527,15 @@
 	 * to release its memory.
 	 */
 	set_bit(MMF_OOM_REAPED, &mm->flags);
-out:
+unlock_oom:
+	mutex_unlock(&oom_lock);
 	/*
 	 * Drop our reference but make sure the mmput slow path is called from a
 	 * different context because we shouldn't risk we get stuck there and
 	 * put the oom_reaper out of the way.
 	 */
-	mmput_async(mm);
+	if (mm)
+		mmput_async(mm);
 	return ret;
 }
 
@@ -607,12 +625,8 @@
 	if (atomic_read(&mm->mm_users) > 1) {
 		rcu_read_lock();
 		for_each_process(p) {
-			bool exiting;
-
 			if (!process_shares_mm(p, mm))
 				continue;
-			if (same_thread_group(p, tsk))
-				continue;
 			if (fatal_signal_pending(p))
 				continue;
 
@@ -620,10 +634,7 @@
 			 * If the task is exiting make sure the whole thread group
 			 * is exiting and cannot acces mm anymore.
 			 */
-			spin_lock_irq(&p->sighand->siglock);
-			exiting = signal_group_exit(p->signal);
-			spin_unlock_irq(&p->sighand->siglock);
-			if (exiting)
+			if (signal_group_exit(p->signal))
 				continue;
 
 			/* Give up */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f8f3bfc..6903b69 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -656,6 +656,9 @@
 		return;
 
 	page_ext = lookup_page_ext(page);
+	if (unlikely(!page_ext))
+		return;
+
 	__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 
 	INIT_LIST_HEAD(&page->lru);
@@ -673,6 +676,9 @@
 		return;
 
 	page_ext = lookup_page_ext(page);
+	if (unlikely(!page_ext))
+		return;
+
 	__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 
 	set_page_private(page, 0);
@@ -2609,11 +2615,12 @@
 				page = list_last_entry(list, struct page, lru);
 			else
 				page = list_first_entry(list, struct page, lru);
-		} while (page && check_new_pcp(page));
 
-		__dec_zone_state(zone, NR_ALLOC_BATCH);
-		list_del(&page->lru);
-		pcp->count--;
+			__dec_zone_state(zone, NR_ALLOC_BATCH);
+			list_del(&page->lru);
+			pcp->count--;
+
+		} while (check_new_pcp(page));
 	} else {
 		/*
 		 * We most definitely don't want callers attempting to
@@ -3023,6 +3030,7 @@
 		apply_fair = false;
 		fair_skipped = false;
 		reset_alloc_batches(ac->preferred_zoneref->zone);
+		z = ac->preferred_zoneref;
 		goto zonelist_scan;
 	}
 
@@ -3596,6 +3604,17 @@
 	 */
 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
 
+	/*
+	 * Reset the zonelist iterators if memory policies can be ignored.
+	 * These allocations are high priority and system rather than user
+	 * orientated.
+	 */
+	if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) {
+		ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
+		ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+					ac->high_zoneidx, ac->nodemask);
+	}
+
 	/* This is the last chance, in general, before the goto nopage. */
 	page = get_page_from_freelist(gfp_mask, order,
 				alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
@@ -3604,12 +3623,6 @@
 
 	/* Allocate without watermarks if the context allows */
 	if (alloc_flags & ALLOC_NO_WATERMARKS) {
-		/*
-		 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
-		 * the allocation is high priority and these type of
-		 * allocations are system rather than user orientated
-		 */
-		ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
 		page = get_page_from_freelist(gfp_mask, order,
 						ALLOC_NO_WATERMARKS, ac);
 		if (page)
@@ -3808,7 +3821,11 @@
 	/* Dirty zone balancing only done in the fast path */
 	ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
 
-	/* The preferred zone is used for statistics later */
+	/*
+	 * The preferred zone is used for statistics but crucially it is
+	 * also used as the starting point for the zonelist iterator. It
+	 * may get reset for allocations that ignore memory policies.
+	 */
 	ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
 					ac.high_zoneidx, ac.nodemask);
 	if (!ac.preferred_zoneref) {
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 2d864e6..44a4c02 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -390,8 +390,10 @@
 			 * We know some arch can have a nodes layout such as
 			 * -------------pfn-------------->
 			 * N0 | N1 | N2 | N0 | N1 | N2|....
+			 *
+			 * Take into account DEFERRED_STRUCT_PAGE_INIT.
 			 */
-			if (pfn_to_nid(pfn) != nid)
+			if (early_pfn_to_nid(pfn) != nid)
 				continue;
 			if (init_section_page_ext(pfn, nid))
 				goto oom;
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 792b56d..c6cda3e 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -55,6 +55,8 @@
 
 	for (i = 0; i < (1 << order); i++) {
 		page_ext = lookup_page_ext(page + i);
+		if (unlikely(!page_ext))
+			continue;
 		__clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
 	}
 }
@@ -62,6 +64,7 @@
 void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
 {
 	struct page_ext *page_ext = lookup_page_ext(page);
+
 	struct stack_trace trace = {
 		.nr_entries = 0,
 		.max_entries = ARRAY_SIZE(page_ext->trace_entries),
@@ -69,6 +72,9 @@
 		.skip = 3,
 	};
 
+	if (unlikely(!page_ext))
+		return;
+
 	save_stack_trace(&trace);
 
 	page_ext->order = order;
@@ -82,6 +88,8 @@
 void __set_page_owner_migrate_reason(struct page *page, int reason)
 {
 	struct page_ext *page_ext = lookup_page_ext(page);
+	if (unlikely(!page_ext))
+		return;
 
 	page_ext->last_migrate_reason = reason;
 }
@@ -89,6 +97,12 @@
 gfp_t __get_page_owner_gfp(struct page *page)
 {
 	struct page_ext *page_ext = lookup_page_ext(page);
+	if (unlikely(!page_ext))
+		/*
+		 * The caller just returns 0 if no valid gfp
+		 * So return 0 here too.
+		 */
+		return 0;
 
 	return page_ext->gfp_mask;
 }
@@ -99,6 +113,9 @@
 	struct page_ext *new_ext = lookup_page_ext(newpage);
 	int i;
 
+	if (unlikely(!old_ext || !new_ext))
+		return;
+
 	new_ext->order = old_ext->order;
 	new_ext->gfp_mask = old_ext->gfp_mask;
 	new_ext->nr_entries = old_ext->nr_entries;
@@ -193,6 +210,11 @@
 	gfp_t gfp_mask = page_ext->gfp_mask;
 	int mt = gfpflags_to_migratetype(gfp_mask);
 
+	if (unlikely(!page_ext)) {
+		pr_alert("There is not page extension available.\n");
+		return;
+	}
+
 	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
 		pr_alert("page_owner info is not active (free page?)\n");
 		return;
@@ -251,6 +273,8 @@
 		}
 
 		page_ext = lookup_page_ext(page);
+		if (unlikely(!page_ext))
+			continue;
 
 		/*
 		 * Some pages could be missed by concurrent allocation or free,
@@ -317,6 +341,8 @@
 				continue;
 
 			page_ext = lookup_page_ext(page);
+			if (unlikely(!page_ext))
+				continue;
 
 			/* Maybe overraping zone */
 			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
diff --git a/mm/page_poison.c b/mm/page_poison.c
index 1eae5fa..2e647c6 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -54,6 +54,9 @@
 	struct page_ext *page_ext;
 
 	page_ext = lookup_page_ext(page);
+	if (unlikely(!page_ext))
+		return;
+
 	__set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
 }
 
@@ -62,6 +65,9 @@
 	struct page_ext *page_ext;
 
 	page_ext = lookup_page_ext(page);
+	if (unlikely(!page_ext))
+		return;
+
 	__clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
 }
 
@@ -70,7 +76,7 @@
 	struct page_ext *page_ext;
 
 	page_ext = lookup_page_ext(page);
-	if (!page_ext)
+	if (unlikely(!page_ext))
 		return false;
 
 	return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
diff --git a/mm/rmap.c b/mm/rmap.c
index 8a83993..0ea5d90 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1098,6 +1098,8 @@
 
 	VM_BUG_ON_PAGE(!PageLocked(page), page);
 	VM_BUG_ON_VMA(!anon_vma, vma);
+	if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page))
+		address &= HPAGE_PMD_MASK;
 	VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
 
 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
diff --git a/mm/shmem.c b/mm/shmem.c
index e418a99..a361449 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2645,10 +2645,11 @@
 }
 
 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
-				   struct dentry *dentry, const char *name,
-				   const void *value, size_t size, int flags)
+				   struct dentry *unused, struct inode *inode,
+				   const char *name, const void *value,
+				   size_t size, int flags)
 {
-	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
+	struct shmem_inode_info *info = SHMEM_I(inode);
 
 	name = xattr_full_name(handler, name);
 	return simple_xattr_set(&info->xattrs, name, value, size, flags);
diff --git a/mm/swap.c b/mm/swap.c
index 9591614..59f5faf 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -667,6 +667,24 @@
 
 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
 
+/*
+ * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
+ * workqueue, aiding in getting memory freed.
+ */
+static struct workqueue_struct *lru_add_drain_wq;
+
+static int __init lru_init(void)
+{
+	lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
+
+	if (WARN(!lru_add_drain_wq,
+		"Failed to create workqueue lru_add_drain_wq"))
+		return -ENOMEM;
+
+	return 0;
+}
+early_initcall(lru_init);
+
 void lru_add_drain_all(void)
 {
 	static DEFINE_MUTEX(lock);
@@ -686,7 +704,7 @@
 		    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
 		    need_activate_page_drain(cpu)) {
 			INIT_WORK(work, lru_add_drain_per_cpu);
-			schedule_work_on(cpu, work);
+			queue_work_on(cpu, lru_add_drain_wq, work);
 			cpumask_set_cpu(cpu, &has_work);
 		}
 	}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 0d457e7..c99463a 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -252,7 +252,10 @@
 void free_page_and_swap_cache(struct page *page)
 {
 	free_swap_cache(page);
-	put_page(page);
+	if (is_huge_zero_page(page))
+		put_huge_zero_page();
+	else
+		put_page(page);
 }
 
 /*
diff --git a/mm/truncate.c b/mm/truncate.c
index b002728..4064f8f 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -34,40 +34,38 @@
 	if (shmem_mapping(mapping))
 		return;
 
-	spin_lock_irq(&mapping->tree_lock);
-
 	if (dax_mapping(mapping)) {
-		if (radix_tree_delete_item(&mapping->page_tree, index, entry))
-			mapping->nrexceptional--;
-	} else {
-		/*
-		 * Regular page slots are stabilized by the page lock even
-		 * without the tree itself locked.  These unlocked entries
-		 * need verification under the tree lock.
-		 */
-		if (!__radix_tree_lookup(&mapping->page_tree, index, &node,
-					&slot))
-			goto unlock;
-		if (*slot != entry)
-			goto unlock;
-		radix_tree_replace_slot(slot, NULL);
-		mapping->nrexceptional--;
-		if (!node)
-			goto unlock;
-		workingset_node_shadows_dec(node);
-		/*
-		 * Don't track node without shadow entries.
-		 *
-		 * Avoid acquiring the list_lru lock if already untracked.
-		 * The list_empty() test is safe as node->private_list is
-		 * protected by mapping->tree_lock.
-		 */
-		if (!workingset_node_shadows(node) &&
-		    !list_empty(&node->private_list))
-			list_lru_del(&workingset_shadow_nodes,
-					&node->private_list);
-		__radix_tree_delete_node(&mapping->page_tree, node);
+		dax_delete_mapping_entry(mapping, index);
+		return;
 	}
+	spin_lock_irq(&mapping->tree_lock);
+	/*
+	 * Regular page slots are stabilized by the page lock even
+	 * without the tree itself locked.  These unlocked entries
+	 * need verification under the tree lock.
+	 */
+	if (!__radix_tree_lookup(&mapping->page_tree, index, &node,
+				&slot))
+		goto unlock;
+	if (*slot != entry)
+		goto unlock;
+	radix_tree_replace_slot(slot, NULL);
+	mapping->nrexceptional--;
+	if (!node)
+		goto unlock;
+	workingset_node_shadows_dec(node);
+	/*
+	 * Don't track node without shadow entries.
+	 *
+	 * Avoid acquiring the list_lru lock if already untracked.
+	 * The list_empty() test is safe as node->private_list is
+	 * protected by mapping->tree_lock.
+	 */
+	if (!workingset_node_shadows(node) &&
+	    !list_empty(&node->private_list))
+		list_lru_del(&workingset_shadow_nodes,
+				&node->private_list);
+	__radix_tree_delete_node(&mapping->page_tree, node);
 unlock:
 	spin_unlock_irq(&mapping->tree_lock);
 }
diff --git a/mm/util.c b/mm/util.c
index 8a1b3a1..917e0e3 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -297,7 +297,8 @@
 
 	ret = security_mmap_file(file, prot, flag);
 	if (!ret) {
-		down_write(&mm->mmap_sem);
+		if (down_write_killable(&mm->mmap_sem))
+			return -EINTR;
 		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
 				    &populate);
 		up_write(&mm->mmap_sem);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6e32918..e11475c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -21,6 +21,7 @@
 #include <linux/debugobjects.h>
 #include <linux/kallsyms.h>
 #include <linux/list.h>
+#include <linux/notifier.h>
 #include <linux/rbtree.h>
 #include <linux/radix-tree.h>
 #include <linux/rcupdate.h>
@@ -343,6 +344,8 @@
 
 static void purge_vmap_area_lazy(void);
 
+static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
+
 /*
  * Allocate a region of KVA of the specified size and alignment, within the
  * vstart and vend.
@@ -362,6 +365,8 @@
 	BUG_ON(offset_in_page(size));
 	BUG_ON(!is_power_of_2(align));
 
+	might_sleep_if(gfpflags_allow_blocking(gfp_mask));
+
 	va = kmalloc_node(sizeof(struct vmap_area),
 			gfp_mask & GFP_RECLAIM_MASK, node);
 	if (unlikely(!va))
@@ -467,6 +472,16 @@
 		purged = 1;
 		goto retry;
 	}
+
+	if (gfpflags_allow_blocking(gfp_mask)) {
+		unsigned long freed = 0;
+		blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
+		if (freed > 0) {
+			purged = 0;
+			goto retry;
+		}
+	}
+
 	if (printk_ratelimit())
 		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
 			size);
@@ -474,6 +489,18 @@
 	return ERR_PTR(-EBUSY);
 }
 
+int register_vmap_purge_notifier(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&vmap_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
+
+int unregister_vmap_purge_notifier(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
+
 static void __free_vmap_area(struct vmap_area *va)
 {
 	BUG_ON(RB_EMPTY_NODE(&va->rb_node));
@@ -1078,7 +1105,7 @@
  */
 void vm_unmap_ram(const void *mem, unsigned int count)
 {
-	unsigned long size = count << PAGE_SHIFT;
+	unsigned long size = (unsigned long)count << PAGE_SHIFT;
 	unsigned long addr = (unsigned long)mem;
 
 	BUG_ON(!addr);
@@ -1113,7 +1140,7 @@
  */
 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
 {
-	unsigned long size = count << PAGE_SHIFT;
+	unsigned long size = (unsigned long)count << PAGE_SHIFT;
 	unsigned long addr;
 	void *mem;
 
@@ -1547,14 +1574,15 @@
 		unsigned long flags, pgprot_t prot)
 {
 	struct vm_struct *area;
+	unsigned long size;		/* In bytes */
 
 	might_sleep();
 
 	if (count > totalram_pages)
 		return NULL;
 
-	area = get_vm_area_caller((count << PAGE_SHIFT), flags,
-					__builtin_return_address(0));
+	size = (unsigned long)count << PAGE_SHIFT;
+	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
 	if (!area)
 		return NULL;
 
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 77e42ef..cb2a67b 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1061,6 +1061,8 @@
 				continue;
 
 			page_ext = lookup_page_ext(page);
+			if (unlikely(!page_ext))
+				continue;
 
 			if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
 				continue;
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 34917d5..8f9e89c 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -412,7 +412,7 @@
 		/* HEADLESS page stored */
 		bud = HEADLESS;
 	} else {
-		bud = (handle - zhdr->first_num) & BUDDY_MASK;
+		bud = handle_to_buddy(handle);
 
 		switch (bud) {
 		case FIRST:
@@ -572,15 +572,19 @@
 			pool->pages_nr--;
 			spin_unlock(&pool->lock);
 			return 0;
-		} else if (zhdr->first_chunks != 0 &&
-			   zhdr->last_chunks != 0 && zhdr->middle_chunks != 0) {
-			/* Full, add to buddied list */
-			list_add(&zhdr->buddy, &pool->buddied);
-		} else if (!test_bit(PAGE_HEADLESS, &page->private)) {
-			z3fold_compact_page(zhdr);
-			/* add to unbuddied list */
-			freechunks = num_free_chunks(zhdr);
-			list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+		}  else if (!test_bit(PAGE_HEADLESS, &page->private)) {
+			if (zhdr->first_chunks != 0 &&
+			    zhdr->last_chunks != 0 &&
+			    zhdr->middle_chunks != 0) {
+				/* Full, add to buddied list */
+				list_add(&zhdr->buddy, &pool->buddied);
+			} else {
+				z3fold_compact_page(zhdr);
+				/* add to unbuddied list */
+				freechunks = num_free_chunks(zhdr);
+				list_add(&zhdr->buddy,
+					 &pool->unbuddied[freechunks]);
+			}
 		}
 
 		/* add to beginning of LRU */
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 72698db..b6d4f25 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -45,6 +45,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -483,16 +485,16 @@
 
 #ifdef CONFIG_ZSMALLOC_STAT
 
-static int __init zs_stat_init(void)
+static void __init zs_stat_init(void)
 {
-	if (!debugfs_initialized())
-		return -ENODEV;
+	if (!debugfs_initialized()) {
+		pr_warn("debugfs not available, stat dir not created\n");
+		return;
+	}
 
 	zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
 	if (!zs_stat_root)
-		return -ENOMEM;
-
-	return 0;
+		pr_warn("debugfs 'zsmalloc' stat dir creation failed\n");
 }
 
 static void __exit zs_stat_exit(void)
@@ -577,8 +579,10 @@
 {
 	struct dentry *entry;
 
-	if (!zs_stat_root)
+	if (!zs_stat_root) {
+		pr_warn("no root stat dir, not creating <%s> stat dir\n", name);
 		return;
+	}
 
 	entry = debugfs_create_dir(name, zs_stat_root);
 	if (!entry) {
@@ -592,7 +596,8 @@
 	if (!entry) {
 		pr_warn("%s: debugfs file entry <%s> creation failed\n",
 				name, "classes");
-		return;
+		debugfs_remove_recursive(pool->stat_dentry);
+		pool->stat_dentry = NULL;
 	}
 }
 
@@ -602,9 +607,8 @@
 }
 
 #else /* CONFIG_ZSMALLOC_STAT */
-static int __init zs_stat_init(void)
+static void __init zs_stat_init(void)
 {
-	return 0;
 }
 
 static void __exit zs_stat_exit(void)
@@ -2011,17 +2015,10 @@
 	zpool_register_driver(&zs_zpool_driver);
 #endif
 
-	ret = zs_stat_init();
-	if (ret) {
-		pr_err("zs stat initialization failed\n");
-		goto stat_fail;
-	}
+	zs_stat_init();
+
 	return 0;
 
-stat_fail:
-#ifdef CONFIG_ZPOOL
-	zpool_unregister_driver(&zs_zpool_driver);
-#endif
 notifier_fail:
 	zs_unregister_cpu_notifier();
 
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a1e273a..82a116b 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -290,6 +290,10 @@
 	if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
 		return;
 
+	/* vlan continues to inherit address of lower device */
+	if (vlan_dev_inherit_address(vlandev, dev))
+		goto out;
+
 	/* vlan address was different from the old address and is equal to
 	 * the new address */
 	if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
@@ -302,6 +306,7 @@
 	    !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
 		dev_uc_add(dev, vlandev->dev_addr);
 
+out:
 	ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
 }
 
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 9d010a09..cc15579 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -109,6 +109,8 @@
 void vlan_setup(struct net_device *dev);
 int register_vlan_dev(struct net_device *dev);
 void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
+bool vlan_dev_inherit_address(struct net_device *dev,
+			      struct net_device *real_dev);
 
 static inline u32 vlan_get_ingress_priority(struct net_device *dev,
 					    u16 vlan_tci)
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index e7e6257..86ae75b 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -245,6 +245,17 @@
 	strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
 }
 
+bool vlan_dev_inherit_address(struct net_device *dev,
+			      struct net_device *real_dev)
+{
+	if (dev->addr_assign_type != NET_ADDR_STOLEN)
+		return false;
+
+	ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+	return true;
+}
+
 static int vlan_dev_open(struct net_device *dev)
 {
 	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
@@ -255,7 +266,8 @@
 	    !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
 		return -ENETDOWN;
 
-	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) {
+	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
+	    !vlan_dev_inherit_address(dev, real_dev)) {
 		err = dev_uc_add(real_dev, dev->dev_addr);
 		if (err < 0)
 			goto out;
@@ -560,8 +572,10 @@
 	/* ipv6 shared card related stuff */
 	dev->dev_id = real_dev->dev_id;
 
-	if (is_zero_ether_addr(dev->dev_addr))
-		eth_hw_addr_inherit(dev, real_dev);
+	if (is_zero_ether_addr(dev->dev_addr)) {
+		ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+		dev->addr_assign_type = NET_ADDR_STOLEN;
+	}
 	if (is_zero_ether_addr(dev->broadcast))
 		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
 
diff --git a/net/9p/client.c b/net/9p/client.c
index ea79ee9..3fc94a4 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -518,10 +518,10 @@
 		if (err)
 			goto out_err;
 
-		if (p9_is_proto_dotu(c))
+		if (p9_is_proto_dotu(c) && ecode < 512)
 			err = -ecode;
 
-		if (!err || !IS_ERR_VALUE(err)) {
+		if (!err) {
 			err = p9_errstr2errno(ename, strlen(ename));
 
 			p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
@@ -605,10 +605,10 @@
 		if (err)
 			goto out_err;
 
-		if (p9_is_proto_dotu(c))
+		if (p9_is_proto_dotu(c) && ecode < 512)
 			err = -ecode;
 
-		if (!err || !IS_ERR_VALUE(err)) {
+		if (!err) {
 			err = p9_errstr2errno(ename, strlen(ename));
 
 			p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index fbd0acf..2fdebab 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -976,7 +976,8 @@
 			release_sock(sk);
 			ax25_disconnect(ax25, 0);
 			lock_sock(sk);
-			ax25_destroy_socket(ax25);
+			if (!sock_flag(ax25->sk, SOCK_DESTROY))
+				ax25_destroy_socket(ax25);
 			break;
 
 		case AX25_STATE_3:
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index 951cd57..5237dff 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -102,6 +102,7 @@
 	switch (ax25->state) {
 
 	case AX25_STATE_0:
+	case AX25_STATE_2:
 		/* Magic here: If we listen() and a new link dies before it
 		   is accepted() it isn't 'dead' so doesn't get removed. */
 		if (!sk || sock_flag(sk, SOCK_DESTROY) ||
@@ -111,6 +112,7 @@
 				sock_hold(sk);
 				ax25_destroy_socket(ax25);
 				bh_unlock_sock(sk);
+				/* Ungrab socket and destroy it */
 				sock_put(sk);
 			} else
 				ax25_destroy_socket(ax25);
@@ -213,7 +215,8 @@
 	case AX25_STATE_2:
 		if (ax25->n2count == ax25->n2) {
 			ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
-			ax25_disconnect(ax25, ETIMEDOUT);
+			if (!sock_flag(ax25->sk, SOCK_DESTROY))
+				ax25_disconnect(ax25, ETIMEDOUT);
 			return;
 		} else {
 			ax25->n2count++;
diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c
index 004467c9..2c0d6ef 100644
--- a/net/ax25/ax25_std_timer.c
+++ b/net/ax25/ax25_std_timer.c
@@ -38,6 +38,7 @@
 
 	switch (ax25->state) {
 	case AX25_STATE_0:
+	case AX25_STATE_2:
 		/* Magic here: If we listen() and a new link dies before it
 		   is accepted() it isn't 'dead' so doesn't get removed. */
 		if (!sk || sock_flag(sk, SOCK_DESTROY) ||
@@ -47,6 +48,7 @@
 				sock_hold(sk);
 				ax25_destroy_socket(ax25);
 				bh_unlock_sock(sk);
+				/* Ungrab socket and destroy it */
 				sock_put(sk);
 			} else
 				ax25_destroy_socket(ax25);
@@ -144,7 +146,8 @@
 	case AX25_STATE_2:
 		if (ax25->n2count == ax25->n2) {
 			ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
-			ax25_disconnect(ax25, ETIMEDOUT);
+			if (!sock_flag(ax25->sk, SOCK_DESTROY))
+				ax25_disconnect(ax25, ETIMEDOUT);
 			return;
 		} else {
 			ax25->n2count++;
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 3b78e84..655a7d4 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -264,7 +264,8 @@
 {
 	ax25_clear_queues(ax25);
 
-	ax25_stop_heartbeat(ax25);
+	if (!sock_flag(ax25->sk, SOCK_DESTROY))
+		ax25_stop_heartbeat(ax25);
 	ax25_stop_t1timer(ax25);
 	ax25_stop_t2timer(ax25);
 	ax25_stop_t3timer(ax25);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index dcea4f4..c18080a 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -279,6 +279,8 @@
 	 * change from under us.
 	 */
 	list_for_each_entry(v, &vg->vlan_list, vlist) {
+		if (!br_vlan_should_use(v))
+			continue;
 		f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
 		if (f && f->is_local && !f->dst)
 			fdb_delete_local(br, NULL, f);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 1607977..43d2cd8 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -213,8 +213,7 @@
 }
 EXPORT_SYMBOL_GPL(br_handle_frame_finish);
 
-/* note: already called with rcu_read_lock */
-static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+static void __br_handle_local_finish(struct sk_buff *skb)
 {
 	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
 	u16 vid = 0;
@@ -222,6 +221,14 @@
 	/* check if vlan is allowed, to avoid spoofing */
 	if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
 		br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
+}
+
+/* note: already called with rcu_read_lock */
+static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
+
+	__br_handle_local_finish(skb);
 
 	BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
 	br_pass_frame_up(skb);
@@ -274,7 +281,9 @@
 			if (p->br->stp_enabled == BR_NO_STP ||
 			    fwd_mask & (1u << dest[5]))
 				goto forward;
-			break;
+			*pskb = skb;
+			__br_handle_local_finish(skb);
+			return RX_HANDLER_PASS;
 
 		case 0x01:	/* IEEE MAC (Pause) */
 			goto drop;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index dcc18c6..55d2bfe 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -651,7 +651,7 @@
 /*
  * true if we have the mon map (and have thus joined the cluster)
  */
-static int have_mon_and_osd_map(struct ceph_client *client)
+static bool have_mon_and_osd_map(struct ceph_client *client)
 {
 	return client->monc.monmap && client->monc.monmap->epoch &&
 	       client->osdc.osdmap && client->osdc.osdmap->epoch;
diff --git a/net/ceph/ceph_strings.c b/net/ceph/ceph_strings.c
index 139a9cb..3773a4f 100644
--- a/net/ceph/ceph_strings.c
+++ b/net/ceph/ceph_strings.c
@@ -27,6 +27,22 @@
 	}
 }
 
+const char *ceph_osd_watch_op_name(int o)
+{
+	switch (o) {
+	case CEPH_OSD_WATCH_OP_UNWATCH:
+		return "unwatch";
+	case CEPH_OSD_WATCH_OP_WATCH:
+		return "watch";
+	case CEPH_OSD_WATCH_OP_RECONNECT:
+		return "reconnect";
+	case CEPH_OSD_WATCH_OP_PING:
+		return "ping";
+	default:
+		return "???";
+	}
+}
+
 const char *ceph_osd_state_name(int s)
 {
 	switch (s) {
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index b902fbc..e77b04c 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -54,24 +54,25 @@
 {
 	int i;
 	struct ceph_client *client = s->private;
-	struct ceph_osdmap *map = client->osdc.osdmap;
+	struct ceph_osd_client *osdc = &client->osdc;
+	struct ceph_osdmap *map = osdc->osdmap;
 	struct rb_node *n;
 
 	if (map == NULL)
 		return 0;
 
-	seq_printf(s, "epoch %d\n", map->epoch);
-	seq_printf(s, "flags%s%s\n",
-		   (map->flags & CEPH_OSDMAP_NEARFULL) ?  " NEARFULL" : "",
-		   (map->flags & CEPH_OSDMAP_FULL) ?  " FULL" : "");
+	down_read(&osdc->lock);
+	seq_printf(s, "epoch %d flags 0x%x\n", map->epoch, map->flags);
 
 	for (n = rb_first(&map->pg_pools); n; n = rb_next(n)) {
-		struct ceph_pg_pool_info *pool =
+		struct ceph_pg_pool_info *pi =
 			rb_entry(n, struct ceph_pg_pool_info, node);
 
-		seq_printf(s, "pool %lld pg_num %u (%d) read_tier %lld write_tier %lld\n",
-			   pool->id, pool->pg_num, pool->pg_num_mask,
-			   pool->read_tier, pool->write_tier);
+		seq_printf(s, "pool %lld '%s' type %d size %d min_size %d pg_num %u pg_num_mask %d flags 0x%llx lfor %u read_tier %lld write_tier %lld\n",
+			   pi->id, pi->name, pi->type, pi->size, pi->min_size,
+			   pi->pg_num, pi->pg_num_mask, pi->flags,
+			   pi->last_force_request_resend, pi->read_tier,
+			   pi->write_tier);
 	}
 	for (i = 0; i < map->max_osd; i++) {
 		struct ceph_entity_addr *addr = &map->osd_addr[i];
@@ -103,6 +104,7 @@
 			   pg->pgid.seed, pg->primary_temp.osd);
 	}
 
+	up_read(&osdc->lock);
 	return 0;
 }
 
@@ -126,6 +128,7 @@
 					CEPH_SUBSCRIBE_ONETIME ?  "" : "+"));
 		seq_putc(s, '\n');
 	}
+	seq_printf(s, "fs_cluster_id %d\n", monc->fs_cluster_id);
 
 	for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) {
 		__u16 op;
@@ -143,43 +146,113 @@
 	return 0;
 }
 
+static void dump_target(struct seq_file *s, struct ceph_osd_request_target *t)
+{
+	int i;
+
+	seq_printf(s, "osd%d\t%llu.%x\t[", t->osd, t->pgid.pool, t->pgid.seed);
+	for (i = 0; i < t->up.size; i++)
+		seq_printf(s, "%s%d", (!i ? "" : ","), t->up.osds[i]);
+	seq_printf(s, "]/%d\t[", t->up.primary);
+	for (i = 0; i < t->acting.size; i++)
+		seq_printf(s, "%s%d", (!i ? "" : ","), t->acting.osds[i]);
+	seq_printf(s, "]/%d\t%*pE\t0x%x", t->acting.primary,
+		   t->target_oid.name_len, t->target_oid.name, t->flags);
+	if (t->paused)
+		seq_puts(s, "\tP");
+}
+
+static void dump_request(struct seq_file *s, struct ceph_osd_request *req)
+{
+	int i;
+
+	seq_printf(s, "%llu\t", req->r_tid);
+	dump_target(s, &req->r_t);
+
+	seq_printf(s, "\t%d\t%u'%llu", req->r_attempts,
+		   le32_to_cpu(req->r_replay_version.epoch),
+		   le64_to_cpu(req->r_replay_version.version));
+
+	for (i = 0; i < req->r_num_ops; i++) {
+		struct ceph_osd_req_op *op = &req->r_ops[i];
+
+		seq_printf(s, "%s%s", (i == 0 ? "\t" : ","),
+			   ceph_osd_op_name(op->op));
+		if (op->op == CEPH_OSD_OP_WATCH)
+			seq_printf(s, "-%s",
+				   ceph_osd_watch_op_name(op->watch.op));
+	}
+
+	seq_putc(s, '\n');
+}
+
+static void dump_requests(struct seq_file *s, struct ceph_osd *osd)
+{
+	struct rb_node *n;
+
+	mutex_lock(&osd->lock);
+	for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
+		struct ceph_osd_request *req =
+		    rb_entry(n, struct ceph_osd_request, r_node);
+
+		dump_request(s, req);
+	}
+
+	mutex_unlock(&osd->lock);
+}
+
+static void dump_linger_request(struct seq_file *s,
+				struct ceph_osd_linger_request *lreq)
+{
+	seq_printf(s, "%llu\t", lreq->linger_id);
+	dump_target(s, &lreq->t);
+
+	seq_printf(s, "\t%u\t%s%s/%d\n", lreq->register_gen,
+		   lreq->is_watch ? "W" : "N", lreq->committed ? "C" : "",
+		   lreq->last_error);
+}
+
+static void dump_linger_requests(struct seq_file *s, struct ceph_osd *osd)
+{
+	struct rb_node *n;
+
+	mutex_lock(&osd->lock);
+	for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
+		struct ceph_osd_linger_request *lreq =
+		    rb_entry(n, struct ceph_osd_linger_request, node);
+
+		dump_linger_request(s, lreq);
+	}
+
+	mutex_unlock(&osd->lock);
+}
+
 static int osdc_show(struct seq_file *s, void *pp)
 {
 	struct ceph_client *client = s->private;
 	struct ceph_osd_client *osdc = &client->osdc;
-	struct rb_node *p;
+	struct rb_node *n;
 
-	mutex_lock(&osdc->request_mutex);
-	for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
-		struct ceph_osd_request *req;
-		unsigned int i;
-		int opcode;
+	down_read(&osdc->lock);
+	seq_printf(s, "REQUESTS %d homeless %d\n",
+		   atomic_read(&osdc->num_requests),
+		   atomic_read(&osdc->num_homeless));
+	for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
+		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
 
-		req = rb_entry(p, struct ceph_osd_request, r_node);
-
-		seq_printf(s, "%lld\tosd%d\t%lld.%x\t", req->r_tid,
-			   req->r_osd ? req->r_osd->o_osd : -1,
-			   req->r_pgid.pool, req->r_pgid.seed);
-
-		seq_printf(s, "%.*s", req->r_base_oid.name_len,
-			   req->r_base_oid.name);
-
-		if (req->r_reassert_version.epoch)
-			seq_printf(s, "\t%u'%llu",
-			   (unsigned int)le32_to_cpu(req->r_reassert_version.epoch),
-			   le64_to_cpu(req->r_reassert_version.version));
-		else
-			seq_printf(s, "\t");
-
-		for (i = 0; i < req->r_num_ops; i++) {
-			opcode = req->r_ops[i].op;
-			seq_printf(s, "%s%s", (i == 0 ? "\t" : ","),
-				   ceph_osd_op_name(opcode));
-		}
-
-		seq_printf(s, "\n");
+		dump_requests(s, osd);
 	}
-	mutex_unlock(&osdc->request_mutex);
+	dump_requests(s, &osdc->homeless_osd);
+
+	seq_puts(s, "LINGER REQUESTS\n");
+	for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
+		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
+
+		dump_linger_requests(s, osd);
+	}
+	dump_linger_requests(s, &osdc->homeless_osd);
+
+	up_read(&osdc->lock);
 	return 0;
 }
 
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index cf638c0..37c38a7 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -260,20 +260,26 @@
 	BUG_ON(num < 1); /* monmap sub is always there */
 	ceph_encode_32(&p, num);
 	for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
-		const char *s = ceph_sub_str[i];
+		char buf[32];
+		int len;
 
 		if (!monc->subs[i].want)
 			continue;
 
-		dout("%s %s start %llu flags 0x%x\n", __func__, s,
+		len = sprintf(buf, "%s", ceph_sub_str[i]);
+		if (i == CEPH_SUB_MDSMAP &&
+		    monc->fs_cluster_id != CEPH_FS_CLUSTER_ID_NONE)
+			len += sprintf(buf + len, ".%d", monc->fs_cluster_id);
+
+		dout("%s %s start %llu flags 0x%x\n", __func__, buf,
 		     le64_to_cpu(monc->subs[i].item.start),
 		     monc->subs[i].item.flags);
-		ceph_encode_string(&p, end, s, strlen(s));
+		ceph_encode_string(&p, end, buf, len);
 		memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item));
 		p += sizeof(monc->subs[i].item);
 	}
 
-	BUG_ON(p != (end - 35 - (ARRAY_SIZE(monc->subs) - num) * 19));
+	BUG_ON(p > end);
 	msg->front.iov_len = p - msg->front.iov_base;
 	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
 	ceph_msg_revoke(msg);
@@ -376,19 +382,13 @@
 }
 EXPORT_SYMBOL(ceph_monc_got_map);
 
-/*
- * Register interest in the next osdmap
- */
-void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
+void ceph_monc_renew_subs(struct ceph_mon_client *monc)
 {
-	dout("%s have %u\n", __func__, monc->subs[CEPH_SUB_OSDMAP].have);
 	mutex_lock(&monc->mutex);
-	if (__ceph_monc_want_map(monc, CEPH_SUB_OSDMAP,
-				 monc->subs[CEPH_SUB_OSDMAP].have + 1, false))
-		__send_subscribe(monc);
+	__send_subscribe(monc);
 	mutex_unlock(&monc->mutex);
 }
-EXPORT_SYMBOL(ceph_monc_request_next_osdmap);
+EXPORT_SYMBOL(ceph_monc_renew_subs);
 
 /*
  * Wait for an osdmap with a given epoch.
@@ -478,51 +478,17 @@
 /*
  * generic requests (currently statfs, mon_get_version)
  */
-static struct ceph_mon_generic_request *__lookup_generic_req(
-	struct ceph_mon_client *monc, u64 tid)
-{
-	struct ceph_mon_generic_request *req;
-	struct rb_node *n = monc->generic_request_tree.rb_node;
-
-	while (n) {
-		req = rb_entry(n, struct ceph_mon_generic_request, node);
-		if (tid < req->tid)
-			n = n->rb_left;
-		else if (tid > req->tid)
-			n = n->rb_right;
-		else
-			return req;
-	}
-	return NULL;
-}
-
-static void __insert_generic_request(struct ceph_mon_client *monc,
-			    struct ceph_mon_generic_request *new)
-{
-	struct rb_node **p = &monc->generic_request_tree.rb_node;
-	struct rb_node *parent = NULL;
-	struct ceph_mon_generic_request *req = NULL;
-
-	while (*p) {
-		parent = *p;
-		req = rb_entry(parent, struct ceph_mon_generic_request, node);
-		if (new->tid < req->tid)
-			p = &(*p)->rb_left;
-		else if (new->tid > req->tid)
-			p = &(*p)->rb_right;
-		else
-			BUG();
-	}
-
-	rb_link_node(&new->node, parent, p);
-	rb_insert_color(&new->node, &monc->generic_request_tree);
-}
+DEFINE_RB_FUNCS(generic_request, struct ceph_mon_generic_request, tid, node)
 
 static void release_generic_request(struct kref *kref)
 {
 	struct ceph_mon_generic_request *req =
 		container_of(kref, struct ceph_mon_generic_request, kref);
 
+	dout("%s greq %p request %p reply %p\n", __func__, req, req->request,
+	     req->reply);
+	WARN_ON(!RB_EMPTY_NODE(&req->node));
+
 	if (req->reply)
 		ceph_msg_put(req->reply);
 	if (req->request)
@@ -533,7 +499,8 @@
 
 static void put_generic_request(struct ceph_mon_generic_request *req)
 {
-	kref_put(&req->kref, release_generic_request);
+	if (req)
+		kref_put(&req->kref, release_generic_request);
 }
 
 static void get_generic_request(struct ceph_mon_generic_request *req)
@@ -541,6 +508,103 @@
 	kref_get(&req->kref);
 }
 
+static struct ceph_mon_generic_request *
+alloc_generic_request(struct ceph_mon_client *monc, gfp_t gfp)
+{
+	struct ceph_mon_generic_request *req;
+
+	req = kzalloc(sizeof(*req), gfp);
+	if (!req)
+		return NULL;
+
+	req->monc = monc;
+	kref_init(&req->kref);
+	RB_CLEAR_NODE(&req->node);
+	init_completion(&req->completion);
+
+	dout("%s greq %p\n", __func__, req);
+	return req;
+}
+
+static void register_generic_request(struct ceph_mon_generic_request *req)
+{
+	struct ceph_mon_client *monc = req->monc;
+
+	WARN_ON(req->tid);
+
+	get_generic_request(req);
+	req->tid = ++monc->last_tid;
+	insert_generic_request(&monc->generic_request_tree, req);
+}
+
+static void send_generic_request(struct ceph_mon_client *monc,
+				 struct ceph_mon_generic_request *req)
+{
+	WARN_ON(!req->tid);
+
+	dout("%s greq %p tid %llu\n", __func__, req, req->tid);
+	req->request->hdr.tid = cpu_to_le64(req->tid);
+	ceph_con_send(&monc->con, ceph_msg_get(req->request));
+}
+
+static void __finish_generic_request(struct ceph_mon_generic_request *req)
+{
+	struct ceph_mon_client *monc = req->monc;
+
+	dout("%s greq %p tid %llu\n", __func__, req, req->tid);
+	erase_generic_request(&monc->generic_request_tree, req);
+
+	ceph_msg_revoke(req->request);
+	ceph_msg_revoke_incoming(req->reply);
+}
+
+static void finish_generic_request(struct ceph_mon_generic_request *req)
+{
+	__finish_generic_request(req);
+	put_generic_request(req);
+}
+
+static void complete_generic_request(struct ceph_mon_generic_request *req)
+{
+	if (req->complete_cb)
+		req->complete_cb(req);
+	else
+		complete_all(&req->completion);
+	put_generic_request(req);
+}
+
+void cancel_generic_request(struct ceph_mon_generic_request *req)
+{
+	struct ceph_mon_client *monc = req->monc;
+	struct ceph_mon_generic_request *lookup_req;
+
+	dout("%s greq %p tid %llu\n", __func__, req, req->tid);
+
+	mutex_lock(&monc->mutex);
+	lookup_req = lookup_generic_request(&monc->generic_request_tree,
+					    req->tid);
+	if (lookup_req) {
+		WARN_ON(lookup_req != req);
+		finish_generic_request(req);
+	}
+
+	mutex_unlock(&monc->mutex);
+}
+
+static int wait_generic_request(struct ceph_mon_generic_request *req)
+{
+	int ret;
+
+	dout("%s greq %p tid %llu\n", __func__, req, req->tid);
+	ret = wait_for_completion_interruptible(&req->completion);
+	if (ret)
+		cancel_generic_request(req);
+	else
+		ret = req->result; /* completed */
+
+	return ret;
+}
+
 static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
 					 struct ceph_msg_header *hdr,
 					 int *skip)
@@ -551,7 +615,7 @@
 	struct ceph_msg *m;
 
 	mutex_lock(&monc->mutex);
-	req = __lookup_generic_req(monc, tid);
+	req = lookup_generic_request(&monc->generic_request_tree, tid);
 	if (!req) {
 		dout("get_generic_reply %lld dne\n", tid);
 		*skip = 1;
@@ -570,42 +634,6 @@
 	return m;
 }
 
-static int __do_generic_request(struct ceph_mon_client *monc, u64 tid,
-				struct ceph_mon_generic_request *req)
-{
-	int err;
-
-	/* register request */
-	req->tid = tid != 0 ? tid : ++monc->last_tid;
-	req->request->hdr.tid = cpu_to_le64(req->tid);
-	__insert_generic_request(monc, req);
-	monc->num_generic_requests++;
-	ceph_con_send(&monc->con, ceph_msg_get(req->request));
-	mutex_unlock(&monc->mutex);
-
-	err = wait_for_completion_interruptible(&req->completion);
-
-	mutex_lock(&monc->mutex);
-	rb_erase(&req->node, &monc->generic_request_tree);
-	monc->num_generic_requests--;
-
-	if (!err)
-		err = req->result;
-	return err;
-}
-
-static int do_generic_request(struct ceph_mon_client *monc,
-			      struct ceph_mon_generic_request *req)
-{
-	int err;
-
-	mutex_lock(&monc->mutex);
-	err = __do_generic_request(monc, 0, req);
-	mutex_unlock(&monc->mutex);
-
-	return err;
-}
-
 /*
  * statfs
  */
@@ -616,22 +644,24 @@
 	struct ceph_mon_statfs_reply *reply = msg->front.iov_base;
 	u64 tid = le64_to_cpu(msg->hdr.tid);
 
+	dout("%s msg %p tid %llu\n", __func__, msg, tid);
+
 	if (msg->front.iov_len != sizeof(*reply))
 		goto bad;
-	dout("handle_statfs_reply %p tid %llu\n", msg, tid);
 
 	mutex_lock(&monc->mutex);
-	req = __lookup_generic_req(monc, tid);
-	if (req) {
-		*(struct ceph_statfs *)req->buf = reply->st;
-		req->result = 0;
-		get_generic_request(req);
+	req = lookup_generic_request(&monc->generic_request_tree, tid);
+	if (!req) {
+		mutex_unlock(&monc->mutex);
+		return;
 	}
+
+	req->result = 0;
+	*req->u.st = reply->st; /* struct */
+	__finish_generic_request(req);
 	mutex_unlock(&monc->mutex);
-	if (req) {
-		complete_all(&req->completion);
-		put_generic_request(req);
-	}
+
+	complete_generic_request(req);
 	return;
 
 bad:
@@ -646,38 +676,38 @@
 {
 	struct ceph_mon_generic_request *req;
 	struct ceph_mon_statfs *h;
-	int err;
+	int ret = -ENOMEM;
 
-	req = kzalloc(sizeof(*req), GFP_NOFS);
+	req = alloc_generic_request(monc, GFP_NOFS);
 	if (!req)
-		return -ENOMEM;
+		goto out;
 
-	kref_init(&req->kref);
-	req->buf = buf;
-	init_completion(&req->completion);
-
-	err = -ENOMEM;
 	req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS,
 				    true);
 	if (!req->request)
 		goto out;
-	req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS,
-				  true);
+
+	req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 64, GFP_NOFS, true);
 	if (!req->reply)
 		goto out;
 
+	req->u.st = buf;
+
+	mutex_lock(&monc->mutex);
+	register_generic_request(req);
 	/* fill out request */
 	h = req->request->front.iov_base;
 	h->monhdr.have_version = 0;
 	h->monhdr.session_mon = cpu_to_le16(-1);
 	h->monhdr.session_mon_tid = 0;
 	h->fsid = monc->monmap->fsid;
+	send_generic_request(monc, req);
+	mutex_unlock(&monc->mutex);
 
-	err = do_generic_request(monc, req);
-
+	ret = wait_generic_request(req);
 out:
 	put_generic_request(req);
-	return err;
+	return ret;
 }
 EXPORT_SYMBOL(ceph_monc_do_statfs);
 
@@ -690,7 +720,7 @@
 	void *end = p + msg->front_alloc_len;
 	u64 handle;
 
-	dout("%s %p tid %llu\n", __func__, msg, tid);
+	dout("%s msg %p tid %llu\n", __func__, msg, tid);
 
 	ceph_decode_need(&p, end, 2*sizeof(u64), bad);
 	handle = ceph_decode_64(&p);
@@ -698,77 +728,111 @@
 		goto bad;
 
 	mutex_lock(&monc->mutex);
-	req = __lookup_generic_req(monc, handle);
-	if (req) {
-		*(u64 *)req->buf = ceph_decode_64(&p);
-		req->result = 0;
-		get_generic_request(req);
-	}
-	mutex_unlock(&monc->mutex);
-	if (req) {
-		complete_all(&req->completion);
-		put_generic_request(req);
+	req = lookup_generic_request(&monc->generic_request_tree, handle);
+	if (!req) {
+		mutex_unlock(&monc->mutex);
+		return;
 	}
 
+	req->result = 0;
+	req->u.newest = ceph_decode_64(&p);
+	__finish_generic_request(req);
+	mutex_unlock(&monc->mutex);
+
+	complete_generic_request(req);
 	return;
+
 bad:
 	pr_err("corrupt mon_get_version reply, tid %llu\n", tid);
 	ceph_msg_dump(msg);
 }
 
+static struct ceph_mon_generic_request *
+__ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
+			ceph_monc_callback_t cb, u64 private_data)
+{
+	struct ceph_mon_generic_request *req;
+
+	req = alloc_generic_request(monc, GFP_NOIO);
+	if (!req)
+		goto err_put_req;
+
+	req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION,
+				    sizeof(u64) + sizeof(u32) + strlen(what),
+				    GFP_NOIO, true);
+	if (!req->request)
+		goto err_put_req;
+
+	req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 32, GFP_NOIO,
+				  true);
+	if (!req->reply)
+		goto err_put_req;
+
+	req->complete_cb = cb;
+	req->private_data = private_data;
+
+	mutex_lock(&monc->mutex);
+	register_generic_request(req);
+	{
+		void *p = req->request->front.iov_base;
+		void *const end = p + req->request->front_alloc_len;
+
+		ceph_encode_64(&p, req->tid); /* handle */
+		ceph_encode_string(&p, end, what, strlen(what));
+		WARN_ON(p != end);
+	}
+	send_generic_request(monc, req);
+	mutex_unlock(&monc->mutex);
+
+	return req;
+
+err_put_req:
+	put_generic_request(req);
+	return ERR_PTR(-ENOMEM);
+}
+
 /*
  * Send MMonGetVersion and wait for the reply.
  *
  * @what: one of "mdsmap", "osdmap" or "monmap"
  */
-int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what,
-			     u64 *newest)
+int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
+			  u64 *newest)
 {
 	struct ceph_mon_generic_request *req;
-	void *p, *end;
-	u64 tid;
-	int err;
+	int ret;
 
-	req = kzalloc(sizeof(*req), GFP_NOFS);
-	if (!req)
-		return -ENOMEM;
+	req = __ceph_monc_get_version(monc, what, NULL, 0);
+	if (IS_ERR(req))
+		return PTR_ERR(req);
 
-	kref_init(&req->kref);
-	req->buf = newest;
-	init_completion(&req->completion);
+	ret = wait_generic_request(req);
+	if (!ret)
+		*newest = req->u.newest;
 
-	req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION,
-				    sizeof(u64) + sizeof(u32) + strlen(what),
-				    GFP_NOFS, true);
-	if (!req->request) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 1024,
-				  GFP_NOFS, true);
-	if (!req->reply) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	p = req->request->front.iov_base;
-	end = p + req->request->front_alloc_len;
-
-	/* fill out request */
-	mutex_lock(&monc->mutex);
-	tid = ++monc->last_tid;
-	ceph_encode_64(&p, tid); /* handle */
-	ceph_encode_string(&p, end, what, strlen(what));
-
-	err = __do_generic_request(monc, tid, req);
-
-	mutex_unlock(&monc->mutex);
-out:
 	put_generic_request(req);
-	return err;
+	return ret;
 }
-EXPORT_SYMBOL(ceph_monc_do_get_version);
+EXPORT_SYMBOL(ceph_monc_get_version);
+
+/*
+ * Send MMonGetVersion,
+ *
+ * @what: one of "mdsmap", "osdmap" or "monmap"
+ */
+int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
+				ceph_monc_callback_t cb, u64 private_data)
+{
+	struct ceph_mon_generic_request *req;
+
+	req = __ceph_monc_get_version(monc, what, cb, private_data);
+	if (IS_ERR(req))
+		return PTR_ERR(req);
+
+	put_generic_request(req);
+	return 0;
+}
+EXPORT_SYMBOL(ceph_monc_get_version_async);
 
 /*
  * Resend pending generic requests.
@@ -890,7 +954,7 @@
 	if (!monc->m_subscribe_ack)
 		goto out_auth;
 
-	monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS,
+	monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 128, GFP_NOFS,
 					 true);
 	if (!monc->m_subscribe)
 		goto out_subscribe_ack;
@@ -914,9 +978,10 @@
 
 	INIT_DELAYED_WORK(&monc->delayed_work, delayed_work);
 	monc->generic_request_tree = RB_ROOT;
-	monc->num_generic_requests = 0;
 	monc->last_tid = 0;
 
+	monc->fs_cluster_id = CEPH_FS_CLUSTER_ID_NONE;
+
 	return 0;
 
 out_auth_reply:
@@ -954,6 +1019,8 @@
 
 	ceph_auth_destroy(monc->auth);
 
+	WARN_ON(!RB_EMPTY_ROOT(&monc->generic_request_tree));
+
 	ceph_msg_put(monc->m_auth);
 	ceph_msg_put(monc->m_auth_reply);
 	ceph_msg_put(monc->m_subscribe);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 40a53a7..8946959 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -19,25 +19,12 @@
 #include <linux/ceph/auth.h>
 #include <linux/ceph/pagelist.h>
 
-#define OSD_OP_FRONT_LEN	4096
 #define OSD_OPREPLY_FRONT_LEN	512
 
 static struct kmem_cache	*ceph_osd_request_cache;
 
 static const struct ceph_connection_operations osd_con_ops;
 
-static void __send_queued(struct ceph_osd_client *osdc);
-static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
-static void __register_request(struct ceph_osd_client *osdc,
-			       struct ceph_osd_request *req);
-static void __unregister_request(struct ceph_osd_client *osdc,
-				 struct ceph_osd_request *req);
-static void __unregister_linger_request(struct ceph_osd_client *osdc,
-					struct ceph_osd_request *req);
-static void __enqueue_request(struct ceph_osd_request *req);
-static void __send_request(struct ceph_osd_client *osdc,
-			   struct ceph_osd_request *req);
-
 /*
  * Implement client access to distributed object storage cluster.
  *
@@ -56,6 +43,52 @@
  * channel with an OSD is reset.
  */
 
+static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
+static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
+static void link_linger(struct ceph_osd *osd,
+			struct ceph_osd_linger_request *lreq);
+static void unlink_linger(struct ceph_osd *osd,
+			  struct ceph_osd_linger_request *lreq);
+
+#if 1
+static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
+{
+	bool wrlocked = true;
+
+	if (unlikely(down_read_trylock(sem))) {
+		wrlocked = false;
+		up_read(sem);
+	}
+
+	return wrlocked;
+}
+static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
+{
+	WARN_ON(!rwsem_is_locked(&osdc->lock));
+}
+static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
+{
+	WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
+}
+static inline void verify_osd_locked(struct ceph_osd *osd)
+{
+	struct ceph_osd_client *osdc = osd->o_osdc;
+
+	WARN_ON(!(mutex_is_locked(&osd->lock) &&
+		  rwsem_is_locked(&osdc->lock)) &&
+		!rwsem_is_wrlocked(&osdc->lock));
+}
+static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
+{
+	WARN_ON(!mutex_is_locked(&lreq->lock));
+}
+#else
+static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
+static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
+static inline void verify_osd_locked(struct ceph_osd *osd) { }
+static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
+#endif
+
 /*
  * calculate the mapping of a file extent onto an object, and fill out the
  * request accordingly.  shorten extent as necessary if it crosses an
@@ -144,14 +177,6 @@
 }
 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
 
-struct ceph_osd_data *
-osd_req_op_cls_response_data(struct ceph_osd_request *osd_req,
-			unsigned int which)
-{
-	return osd_req_op_data(osd_req, which, cls, response_data);
-}
-EXPORT_SYMBOL(osd_req_op_cls_response_data);	/* ??? */
-
 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
 			unsigned int which, struct page **pages,
 			u64 length, u32 alignment,
@@ -218,6 +243,8 @@
 
 	osd_data = osd_req_op_data(osd_req, which, cls, request_data);
 	ceph_osd_data_pagelist_init(osd_data, pagelist);
+	osd_req->r_ops[which].cls.indata_len += pagelist->length;
+	osd_req->r_ops[which].indata_len += pagelist->length;
 }
 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
 
@@ -230,6 +257,8 @@
 	osd_data = osd_req_op_data(osd_req, which, cls, request_data);
 	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
 				pages_from_pool, own_pages);
+	osd_req->r_ops[which].cls.indata_len += length;
+	osd_req->r_ops[which].indata_len += length;
 }
 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
 
@@ -302,14 +331,76 @@
 	case CEPH_OSD_OP_STAT:
 		ceph_osd_data_release(&op->raw_data_in);
 		break;
+	case CEPH_OSD_OP_NOTIFY_ACK:
+		ceph_osd_data_release(&op->notify_ack.request_data);
+		break;
+	case CEPH_OSD_OP_NOTIFY:
+		ceph_osd_data_release(&op->notify.request_data);
+		ceph_osd_data_release(&op->notify.response_data);
+		break;
 	default:
 		break;
 	}
 }
 
 /*
+ * Assumes @t is zero-initialized.
+ */
+static void target_init(struct ceph_osd_request_target *t)
+{
+	ceph_oid_init(&t->base_oid);
+	ceph_oloc_init(&t->base_oloc);
+	ceph_oid_init(&t->target_oid);
+	ceph_oloc_init(&t->target_oloc);
+
+	ceph_osds_init(&t->acting);
+	ceph_osds_init(&t->up);
+	t->size = -1;
+	t->min_size = -1;
+
+	t->osd = CEPH_HOMELESS_OSD;
+}
+
+static void target_copy(struct ceph_osd_request_target *dest,
+			const struct ceph_osd_request_target *src)
+{
+	ceph_oid_copy(&dest->base_oid, &src->base_oid);
+	ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
+	ceph_oid_copy(&dest->target_oid, &src->target_oid);
+	ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
+
+	dest->pgid = src->pgid; /* struct */
+	dest->pg_num = src->pg_num;
+	dest->pg_num_mask = src->pg_num_mask;
+	ceph_osds_copy(&dest->acting, &src->acting);
+	ceph_osds_copy(&dest->up, &src->up);
+	dest->size = src->size;
+	dest->min_size = src->min_size;
+	dest->sort_bitwise = src->sort_bitwise;
+
+	dest->flags = src->flags;
+	dest->paused = src->paused;
+
+	dest->osd = src->osd;
+}
+
+static void target_destroy(struct ceph_osd_request_target *t)
+{
+	ceph_oid_destroy(&t->base_oid);
+	ceph_oid_destroy(&t->target_oid);
+}
+
+/*
  * requests
  */
+static void request_release_checks(struct ceph_osd_request *req)
+{
+	WARN_ON(!RB_EMPTY_NODE(&req->r_node));
+	WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
+	WARN_ON(!list_empty(&req->r_unsafe_item));
+	WARN_ON(req->r_osd);
+}
+
 static void ceph_osdc_release_request(struct kref *kref)
 {
 	struct ceph_osd_request *req = container_of(kref,
@@ -318,24 +409,19 @@
 
 	dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
 	     req->r_request, req->r_reply);
-	WARN_ON(!RB_EMPTY_NODE(&req->r_node));
-	WARN_ON(!list_empty(&req->r_req_lru_item));
-	WARN_ON(!list_empty(&req->r_osd_item));
-	WARN_ON(!list_empty(&req->r_linger_item));
-	WARN_ON(!list_empty(&req->r_linger_osd_item));
-	WARN_ON(req->r_osd);
+	request_release_checks(req);
 
 	if (req->r_request)
 		ceph_msg_put(req->r_request);
-	if (req->r_reply) {
-		ceph_msg_revoke_incoming(req->r_reply);
+	if (req->r_reply)
 		ceph_msg_put(req->r_reply);
-	}
 
 	for (which = 0; which < req->r_num_ops; which++)
 		osd_req_op_data_release(req, which);
 
+	target_destroy(&req->r_t);
 	ceph_put_snap_context(req->r_snapc);
+
 	if (req->r_mempool)
 		mempool_free(req, req->r_osdc->req_mempool);
 	else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
@@ -354,12 +440,66 @@
 
 void ceph_osdc_put_request(struct ceph_osd_request *req)
 {
-	dout("%s %p (was %d)\n", __func__, req,
-	     atomic_read(&req->r_kref.refcount));
-	kref_put(&req->r_kref, ceph_osdc_release_request);
+	if (req) {
+		dout("%s %p (was %d)\n", __func__, req,
+		     atomic_read(&req->r_kref.refcount));
+		kref_put(&req->r_kref, ceph_osdc_release_request);
+	}
 }
 EXPORT_SYMBOL(ceph_osdc_put_request);
 
+static void request_init(struct ceph_osd_request *req)
+{
+	/* req only, each op is zeroed in _osd_req_op_init() */
+	memset(req, 0, sizeof(*req));
+
+	kref_init(&req->r_kref);
+	init_completion(&req->r_completion);
+	init_completion(&req->r_safe_completion);
+	RB_CLEAR_NODE(&req->r_node);
+	RB_CLEAR_NODE(&req->r_mc_node);
+	INIT_LIST_HEAD(&req->r_unsafe_item);
+
+	target_init(&req->r_t);
+}
+
+/*
+ * This is ugly, but it allows us to reuse linger registration and ping
+ * requests, keeping the structure of the code around send_linger{_ping}()
+ * reasonable.  Setting up a min_nr=2 mempool for each linger request
+ * and dealing with copying ops (this blasts req only, watch op remains
+ * intact) isn't any better.
+ */
+static void request_reinit(struct ceph_osd_request *req)
+{
+	struct ceph_osd_client *osdc = req->r_osdc;
+	bool mempool = req->r_mempool;
+	unsigned int num_ops = req->r_num_ops;
+	u64 snapid = req->r_snapid;
+	struct ceph_snap_context *snapc = req->r_snapc;
+	bool linger = req->r_linger;
+	struct ceph_msg *request_msg = req->r_request;
+	struct ceph_msg *reply_msg = req->r_reply;
+
+	dout("%s req %p\n", __func__, req);
+	WARN_ON(atomic_read(&req->r_kref.refcount) != 1);
+	request_release_checks(req);
+
+	WARN_ON(atomic_read(&request_msg->kref.refcount) != 1);
+	WARN_ON(atomic_read(&reply_msg->kref.refcount) != 1);
+	target_destroy(&req->r_t);
+
+	request_init(req);
+	req->r_osdc = osdc;
+	req->r_mempool = mempool;
+	req->r_num_ops = num_ops;
+	req->r_snapid = snapid;
+	req->r_snapc = snapc;
+	req->r_linger = linger;
+	req->r_request = request_msg;
+	req->r_reply = reply_msg;
+}
+
 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
 					       struct ceph_snap_context *snapc,
 					       unsigned int num_ops,
@@ -367,8 +507,6 @@
 					       gfp_t gfp_flags)
 {
 	struct ceph_osd_request *req;
-	struct ceph_msg *msg;
-	size_t msg_size;
 
 	if (use_mempool) {
 		BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
@@ -383,73 +521,65 @@
 	if (unlikely(!req))
 		return NULL;
 
-	/* req only, each op is zeroed in _osd_req_op_init() */
-	memset(req, 0, sizeof(*req));
-
+	request_init(req);
 	req->r_osdc = osdc;
 	req->r_mempool = use_mempool;
 	req->r_num_ops = num_ops;
+	req->r_snapid = CEPH_NOSNAP;
+	req->r_snapc = ceph_get_snap_context(snapc);
 
-	kref_init(&req->r_kref);
-	init_completion(&req->r_completion);
-	init_completion(&req->r_safe_completion);
-	RB_CLEAR_NODE(&req->r_node);
-	INIT_LIST_HEAD(&req->r_unsafe_item);
-	INIT_LIST_HEAD(&req->r_linger_item);
-	INIT_LIST_HEAD(&req->r_linger_osd_item);
-	INIT_LIST_HEAD(&req->r_req_lru_item);
-	INIT_LIST_HEAD(&req->r_osd_item);
+	dout("%s req %p\n", __func__, req);
+	return req;
+}
+EXPORT_SYMBOL(ceph_osdc_alloc_request);
 
-	req->r_base_oloc.pool = -1;
-	req->r_target_oloc.pool = -1;
+int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
+{
+	struct ceph_osd_client *osdc = req->r_osdc;
+	struct ceph_msg *msg;
+	int msg_size;
 
-	msg_size = OSD_OPREPLY_FRONT_LEN;
-	if (num_ops > CEPH_OSD_SLAB_OPS) {
-		/* ceph_osd_op and rval */
-		msg_size += (num_ops - CEPH_OSD_SLAB_OPS) *
-			    (sizeof(struct ceph_osd_op) + 4);
-	}
+	WARN_ON(ceph_oid_empty(&req->r_base_oid));
 
-	/* create reply message */
-	if (use_mempool)
-		msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
-	else
-		msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size,
-				   gfp_flags, true);
-	if (!msg) {
-		ceph_osdc_put_request(req);
-		return NULL;
-	}
-	req->r_reply = msg;
-
+	/* create request message */
 	msg_size = 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
 	msg_size += 4 + 4 + 4 + 8; /* mtime, reassert_version */
 	msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
 	msg_size += 1 + 8 + 4 + 4; /* pgid */
-	msg_size += 4 + CEPH_MAX_OID_NAME_LEN; /* oid */
-	msg_size += 2 + num_ops * sizeof(struct ceph_osd_op);
+	msg_size += 4 + req->r_base_oid.name_len; /* oid */
+	msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
 	msg_size += 8; /* snapid */
 	msg_size += 8; /* snap_seq */
-	msg_size += 4 + 8 * (snapc ? snapc->num_snaps : 0); /* snaps */
+	msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
 	msg_size += 4; /* retry_attempt */
 
-	/* create request message; allow space for oid */
-	if (use_mempool)
+	if (req->r_mempool)
 		msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
 	else
-		msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
-	if (!msg) {
-		ceph_osdc_put_request(req);
-		return NULL;
-	}
+		msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
+	if (!msg)
+		return -ENOMEM;
 
 	memset(msg->front.iov_base, 0, msg->front.iov_len);
-
 	req->r_request = msg;
 
-	return req;
+	/* create reply message */
+	msg_size = OSD_OPREPLY_FRONT_LEN;
+	msg_size += req->r_base_oid.name_len;
+	msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
+
+	if (req->r_mempool)
+		msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
+	else
+		msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
+	if (!msg)
+		return -ENOMEM;
+
+	req->r_reply = msg;
+
+	return 0;
 }
-EXPORT_SYMBOL(ceph_osdc_alloc_request);
+EXPORT_SYMBOL(ceph_osdc_alloc_messages);
 
 static bool osd_req_opcode_valid(u16 opcode)
 {
@@ -587,8 +717,6 @@
 
 	osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
 
-	op->cls.argc = 0;	/* currently unused */
-
 	op->indata_len = payload_len;
 }
 EXPORT_SYMBOL(osd_req_op_cls_init);
@@ -627,21 +755,19 @@
 }
 EXPORT_SYMBOL(osd_req_op_xattr_init);
 
-void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
-				unsigned int which, u16 opcode,
-				u64 cookie, u64 version, int flag)
+/*
+ * @watch_opcode: CEPH_OSD_WATCH_OP_*
+ */
+static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
+				  u64 cookie, u8 watch_opcode)
 {
-	struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
-						      opcode, 0);
+	struct ceph_osd_req_op *op;
 
-	BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH);
-
+	op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
 	op->watch.cookie = cookie;
-	op->watch.ver = version;
-	if (opcode == CEPH_OSD_OP_WATCH && flag)
-		op->watch.flag = (u8)1;
+	op->watch.op = watch_opcode;
+	op->watch.gen = 0;
 }
-EXPORT_SYMBOL(osd_req_op_watch_init);
 
 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
 				unsigned int which,
@@ -686,16 +812,9 @@
 	}
 }
 
-static u64 osd_req_encode_op(struct ceph_osd_request *req,
-			      struct ceph_osd_op *dst, unsigned int which)
+static u32 osd_req_encode_op(struct ceph_osd_op *dst,
+			     const struct ceph_osd_req_op *src)
 {
-	struct ceph_osd_req_op *src;
-	struct ceph_osd_data *osd_data;
-	u64 request_data_len = 0;
-	u64 data_length;
-
-	BUG_ON(which >= req->r_num_ops);
-	src = &req->r_ops[which];
 	if (WARN_ON(!osd_req_opcode_valid(src->op))) {
 		pr_err("unrecognized osd opcode %d\n", src->op);
 
@@ -704,57 +823,36 @@
 
 	switch (src->op) {
 	case CEPH_OSD_OP_STAT:
-		osd_data = &src->raw_data_in;
-		ceph_osdc_msg_data_add(req->r_reply, osd_data);
 		break;
 	case CEPH_OSD_OP_READ:
 	case CEPH_OSD_OP_WRITE:
 	case CEPH_OSD_OP_WRITEFULL:
 	case CEPH_OSD_OP_ZERO:
 	case CEPH_OSD_OP_TRUNCATE:
-		if (src->op == CEPH_OSD_OP_WRITE ||
-		    src->op == CEPH_OSD_OP_WRITEFULL)
-			request_data_len = src->extent.length;
 		dst->extent.offset = cpu_to_le64(src->extent.offset);
 		dst->extent.length = cpu_to_le64(src->extent.length);
 		dst->extent.truncate_size =
 			cpu_to_le64(src->extent.truncate_size);
 		dst->extent.truncate_seq =
 			cpu_to_le32(src->extent.truncate_seq);
-		osd_data = &src->extent.osd_data;
-		if (src->op == CEPH_OSD_OP_WRITE ||
-		    src->op == CEPH_OSD_OP_WRITEFULL)
-			ceph_osdc_msg_data_add(req->r_request, osd_data);
-		else
-			ceph_osdc_msg_data_add(req->r_reply, osd_data);
 		break;
 	case CEPH_OSD_OP_CALL:
 		dst->cls.class_len = src->cls.class_len;
 		dst->cls.method_len = src->cls.method_len;
-		osd_data = &src->cls.request_info;
-		ceph_osdc_msg_data_add(req->r_request, osd_data);
-		BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGELIST);
-		request_data_len = osd_data->pagelist->length;
-
-		osd_data = &src->cls.request_data;
-		data_length = ceph_osd_data_length(osd_data);
-		if (data_length) {
-			BUG_ON(osd_data->type == CEPH_OSD_DATA_TYPE_NONE);
-			dst->cls.indata_len = cpu_to_le32(data_length);
-			ceph_osdc_msg_data_add(req->r_request, osd_data);
-			src->indata_len += data_length;
-			request_data_len += data_length;
-		}
-		osd_data = &src->cls.response_data;
-		ceph_osdc_msg_data_add(req->r_reply, osd_data);
+		dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
 		break;
 	case CEPH_OSD_OP_STARTSYNC:
 		break;
-	case CEPH_OSD_OP_NOTIFY_ACK:
 	case CEPH_OSD_OP_WATCH:
 		dst->watch.cookie = cpu_to_le64(src->watch.cookie);
-		dst->watch.ver = cpu_to_le64(src->watch.ver);
-		dst->watch.flag = src->watch.flag;
+		dst->watch.ver = cpu_to_le64(0);
+		dst->watch.op = src->watch.op;
+		dst->watch.gen = cpu_to_le32(src->watch.gen);
+		break;
+	case CEPH_OSD_OP_NOTIFY_ACK:
+		break;
+	case CEPH_OSD_OP_NOTIFY:
+		dst->notify.cookie = cpu_to_le64(src->notify.cookie);
 		break;
 	case CEPH_OSD_OP_SETALLOCHINT:
 		dst->alloc_hint.expected_object_size =
@@ -768,9 +866,6 @@
 		dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
 		dst->xattr.cmp_op = src->xattr.cmp_op;
 		dst->xattr.cmp_mode = src->xattr.cmp_mode;
-		osd_data = &src->xattr.osd_data;
-		ceph_osdc_msg_data_add(req->r_request, osd_data);
-		request_data_len = osd_data->pagelist->length;
 		break;
 	case CEPH_OSD_OP_CREATE:
 	case CEPH_OSD_OP_DELETE:
@@ -787,7 +882,7 @@
 	dst->flags = cpu_to_le32(src->flags);
 	dst->payload_len = cpu_to_le32(src->indata_len);
 
-	return request_data_len;
+	return src->indata_len;
 }
 
 /*
@@ -824,17 +919,15 @@
 
 	req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
 					GFP_NOFS);
-	if (!req)
-		return ERR_PTR(-ENOMEM);
-
-	req->r_flags = flags;
+	if (!req) {
+		r = -ENOMEM;
+		goto fail;
+	}
 
 	/* calculate max write size */
 	r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
-	if (r < 0) {
-		ceph_osdc_put_request(req);
-		return ERR_PTR(r);
-	}
+	if (r)
+		goto fail;
 
 	if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
 		osd_req_op_init(req, which, opcode, 0);
@@ -854,194 +947,71 @@
 				       truncate_size, truncate_seq);
 	}
 
+	req->r_flags = flags;
 	req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout);
+	ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
 
-	snprintf(req->r_base_oid.name, sizeof(req->r_base_oid.name),
-		 "%llx.%08llx", vino.ino, objnum);
-	req->r_base_oid.name_len = strlen(req->r_base_oid.name);
+	req->r_snapid = vino.snap;
+	if (flags & CEPH_OSD_FLAG_WRITE)
+		req->r_data_offset = off;
+
+	r = ceph_osdc_alloc_messages(req, GFP_NOFS);
+	if (r)
+		goto fail;
 
 	return req;
+
+fail:
+	ceph_osdc_put_request(req);
+	return ERR_PTR(r);
 }
 EXPORT_SYMBOL(ceph_osdc_new_request);
 
 /*
  * We keep osd requests in an rbtree, sorted by ->r_tid.
  */
-static void __insert_request(struct ceph_osd_client *osdc,
-			     struct ceph_osd_request *new)
+DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
+DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
+
+static bool osd_homeless(struct ceph_osd *osd)
 {
-	struct rb_node **p = &osdc->requests.rb_node;
-	struct rb_node *parent = NULL;
-	struct ceph_osd_request *req = NULL;
-
-	while (*p) {
-		parent = *p;
-		req = rb_entry(parent, struct ceph_osd_request, r_node);
-		if (new->r_tid < req->r_tid)
-			p = &(*p)->rb_left;
-		else if (new->r_tid > req->r_tid)
-			p = &(*p)->rb_right;
-		else
-			BUG();
-	}
-
-	rb_link_node(&new->r_node, parent, p);
-	rb_insert_color(&new->r_node, &osdc->requests);
+	return osd->o_osd == CEPH_HOMELESS_OSD;
 }
 
-static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
-						 u64 tid)
+static bool osd_registered(struct ceph_osd *osd)
 {
-	struct ceph_osd_request *req;
-	struct rb_node *n = osdc->requests.rb_node;
+	verify_osdc_locked(osd->o_osdc);
 
-	while (n) {
-		req = rb_entry(n, struct ceph_osd_request, r_node);
-		if (tid < req->r_tid)
-			n = n->rb_left;
-		else if (tid > req->r_tid)
-			n = n->rb_right;
-		else
-			return req;
-	}
-	return NULL;
-}
-
-static struct ceph_osd_request *
-__lookup_request_ge(struct ceph_osd_client *osdc,
-		    u64 tid)
-{
-	struct ceph_osd_request *req;
-	struct rb_node *n = osdc->requests.rb_node;
-
-	while (n) {
-		req = rb_entry(n, struct ceph_osd_request, r_node);
-		if (tid < req->r_tid) {
-			if (!n->rb_left)
-				return req;
-			n = n->rb_left;
-		} else if (tid > req->r_tid) {
-			n = n->rb_right;
-		} else {
-			return req;
-		}
-	}
-	return NULL;
-}
-
-static void __kick_linger_request(struct ceph_osd_request *req)
-{
-	struct ceph_osd_client *osdc = req->r_osdc;
-	struct ceph_osd *osd = req->r_osd;
-
-	/*
-	 * Linger requests need to be resent with a new tid to avoid
-	 * the dup op detection logic on the OSDs.  Achieve this with
-	 * a re-register dance instead of open-coding.
-	 */
-	ceph_osdc_get_request(req);
-	if (!list_empty(&req->r_linger_item))
-		__unregister_linger_request(osdc, req);
-	else
-		__unregister_request(osdc, req);
-	__register_request(osdc, req);
-	ceph_osdc_put_request(req);
-
-	/*
-	 * Unless request has been registered as both normal and
-	 * lingering, __unregister{,_linger}_request clears r_osd.
-	 * However, here we need to preserve r_osd to make sure we
-	 * requeue on the same OSD.
-	 */
-	WARN_ON(req->r_osd || !osd);
-	req->r_osd = osd;
-
-	dout("%s requeueing %p tid %llu\n", __func__, req, req->r_tid);
-	__enqueue_request(req);
+	return !RB_EMPTY_NODE(&osd->o_node);
 }
 
 /*
- * Resubmit requests pending on the given osd.
+ * Assumes @osd is zero-initialized.
  */
-static void __kick_osd_requests(struct ceph_osd_client *osdc,
-				struct ceph_osd *osd)
+static void osd_init(struct ceph_osd *osd)
 {
-	struct ceph_osd_request *req, *nreq;
-	LIST_HEAD(resend);
-	LIST_HEAD(resend_linger);
-	int err;
-
-	dout("%s osd%d\n", __func__, osd->o_osd);
-	err = __reset_osd(osdc, osd);
-	if (err)
-		return;
-
-	/*
-	 * Build up a list of requests to resend by traversing the
-	 * osd's list of requests.  Requests for a given object are
-	 * sent in tid order, and that is also the order they're
-	 * kept on this list.  Therefore all requests that are in
-	 * flight will be found first, followed by all requests that
-	 * have not yet been sent.  And to resend requests while
-	 * preserving this order we will want to put any sent
-	 * requests back on the front of the osd client's unsent
-	 * list.
-	 *
-	 * So we build a separate ordered list of already-sent
-	 * requests for the affected osd and splice it onto the
-	 * front of the osd client's unsent list.  Once we've seen a
-	 * request that has not yet been sent we're done.  Those
-	 * requests are already sitting right where they belong.
-	 */
-	list_for_each_entry(req, &osd->o_requests, r_osd_item) {
-		if (!req->r_sent)
-			break;
-
-		if (!req->r_linger) {
-			dout("%s requeueing %p tid %llu\n", __func__, req,
-			     req->r_tid);
-			list_move_tail(&req->r_req_lru_item, &resend);
-			req->r_flags |= CEPH_OSD_FLAG_RETRY;
-		} else {
-			list_move_tail(&req->r_req_lru_item, &resend_linger);
-		}
-	}
-	list_splice(&resend, &osdc->req_unsent);
-
-	/*
-	 * Both registered and not yet registered linger requests are
-	 * enqueued with a new tid on the same OSD.  We add/move them
-	 * to req_unsent/o_requests at the end to keep things in tid
-	 * order.
-	 */
-	list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
-				 r_linger_osd_item) {
-		WARN_ON(!list_empty(&req->r_req_lru_item));
-		__kick_linger_request(req);
-	}
-
-	list_for_each_entry_safe(req, nreq, &resend_linger, r_req_lru_item)
-		__kick_linger_request(req);
+	atomic_set(&osd->o_ref, 1);
+	RB_CLEAR_NODE(&osd->o_node);
+	osd->o_requests = RB_ROOT;
+	osd->o_linger_requests = RB_ROOT;
+	INIT_LIST_HEAD(&osd->o_osd_lru);
+	INIT_LIST_HEAD(&osd->o_keepalive_item);
+	osd->o_incarnation = 1;
+	mutex_init(&osd->lock);
 }
 
-/*
- * If the osd connection drops, we need to resubmit all requests.
- */
-static void osd_reset(struct ceph_connection *con)
+static void osd_cleanup(struct ceph_osd *osd)
 {
-	struct ceph_osd *osd = con->private;
-	struct ceph_osd_client *osdc;
+	WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
+	WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
+	WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
+	WARN_ON(!list_empty(&osd->o_osd_lru));
+	WARN_ON(!list_empty(&osd->o_keepalive_item));
 
-	if (!osd)
-		return;
-	dout("osd_reset osd%d\n", osd->o_osd);
-	osdc = osd->o_osdc;
-	down_read(&osdc->map_sem);
-	mutex_lock(&osdc->request_mutex);
-	__kick_osd_requests(osdc, osd);
-	__send_queued(osdc);
-	mutex_unlock(&osdc->request_mutex);
-	up_read(&osdc->map_sem);
+	if (osd->o_auth.authorizer) {
+		WARN_ON(osd_homeless(osd));
+		ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
+	}
 }
 
 /*
@@ -1051,22 +1021,15 @@
 {
 	struct ceph_osd *osd;
 
-	osd = kzalloc(sizeof(*osd), GFP_NOFS);
-	if (!osd)
-		return NULL;
+	WARN_ON(onum == CEPH_HOMELESS_OSD);
 
-	atomic_set(&osd->o_ref, 1);
+	osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
+	osd_init(osd);
 	osd->o_osdc = osdc;
 	osd->o_osd = onum;
-	RB_CLEAR_NODE(&osd->o_node);
-	INIT_LIST_HEAD(&osd->o_requests);
-	INIT_LIST_HEAD(&osd->o_linger_requests);
-	INIT_LIST_HEAD(&osd->o_osd_lru);
-	osd->o_incarnation = 1;
 
 	ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
 
-	INIT_LIST_HEAD(&osd->o_keepalive_item);
 	return osd;
 }
 
@@ -1087,114 +1050,115 @@
 	dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
 	     atomic_read(&osd->o_ref) - 1);
 	if (atomic_dec_and_test(&osd->o_ref)) {
-		if (osd->o_auth.authorizer)
-			ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
+		osd_cleanup(osd);
 		kfree(osd);
 	}
 }
 
-/*
- * remove an osd from our map
- */
-static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
+
+static void __move_osd_to_lru(struct ceph_osd *osd)
 {
-	dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
-	WARN_ON(!list_empty(&osd->o_requests));
-	WARN_ON(!list_empty(&osd->o_linger_requests));
+	struct ceph_osd_client *osdc = osd->o_osdc;
 
-	list_del_init(&osd->o_osd_lru);
-	rb_erase(&osd->o_node, &osdc->osds);
-	RB_CLEAR_NODE(&osd->o_node);
-}
-
-static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
-{
-	dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
-
-	if (!RB_EMPTY_NODE(&osd->o_node)) {
-		ceph_con_close(&osd->o_con);
-		__remove_osd(osdc, osd);
-		put_osd(osd);
-	}
-}
-
-static void remove_all_osds(struct ceph_osd_client *osdc)
-{
-	dout("%s %p\n", __func__, osdc);
-	mutex_lock(&osdc->request_mutex);
-	while (!RB_EMPTY_ROOT(&osdc->osds)) {
-		struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
-						struct ceph_osd, o_node);
-		remove_osd(osdc, osd);
-	}
-	mutex_unlock(&osdc->request_mutex);
-}
-
-static void __move_osd_to_lru(struct ceph_osd_client *osdc,
-			      struct ceph_osd *osd)
-{
-	dout("%s %p\n", __func__, osd);
+	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
 	BUG_ON(!list_empty(&osd->o_osd_lru));
 
+	spin_lock(&osdc->osd_lru_lock);
 	list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
+	spin_unlock(&osdc->osd_lru_lock);
+
 	osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
 }
 
-static void maybe_move_osd_to_lru(struct ceph_osd_client *osdc,
-				  struct ceph_osd *osd)
+static void maybe_move_osd_to_lru(struct ceph_osd *osd)
 {
-	dout("%s %p\n", __func__, osd);
-
-	if (list_empty(&osd->o_requests) &&
-	    list_empty(&osd->o_linger_requests))
-		__move_osd_to_lru(osdc, osd);
+	if (RB_EMPTY_ROOT(&osd->o_requests) &&
+	    RB_EMPTY_ROOT(&osd->o_linger_requests))
+		__move_osd_to_lru(osd);
 }
 
 static void __remove_osd_from_lru(struct ceph_osd *osd)
 {
-	dout("__remove_osd_from_lru %p\n", osd);
+	struct ceph_osd_client *osdc = osd->o_osdc;
+
+	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
+
+	spin_lock(&osdc->osd_lru_lock);
 	if (!list_empty(&osd->o_osd_lru))
 		list_del_init(&osd->o_osd_lru);
+	spin_unlock(&osdc->osd_lru_lock);
 }
 
-static void remove_old_osds(struct ceph_osd_client *osdc)
+/*
+ * Close the connection and assign any leftover requests to the
+ * homeless session.
+ */
+static void close_osd(struct ceph_osd *osd)
 {
-	struct ceph_osd *osd, *nosd;
+	struct ceph_osd_client *osdc = osd->o_osdc;
+	struct rb_node *n;
 
-	dout("__remove_old_osds %p\n", osdc);
-	mutex_lock(&osdc->request_mutex);
-	list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
-		if (time_before(jiffies, osd->lru_ttl))
-			break;
-		remove_osd(osdc, osd);
+	verify_osdc_wrlocked(osdc);
+	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
+
+	ceph_con_close(&osd->o_con);
+
+	for (n = rb_first(&osd->o_requests); n; ) {
+		struct ceph_osd_request *req =
+		    rb_entry(n, struct ceph_osd_request, r_node);
+
+		n = rb_next(n); /* unlink_request() */
+
+		dout(" reassigning req %p tid %llu\n", req, req->r_tid);
+		unlink_request(osd, req);
+		link_request(&osdc->homeless_osd, req);
 	}
-	mutex_unlock(&osdc->request_mutex);
+	for (n = rb_first(&osd->o_linger_requests); n; ) {
+		struct ceph_osd_linger_request *lreq =
+		    rb_entry(n, struct ceph_osd_linger_request, node);
+
+		n = rb_next(n); /* unlink_linger() */
+
+		dout(" reassigning lreq %p linger_id %llu\n", lreq,
+		     lreq->linger_id);
+		unlink_linger(osd, lreq);
+		link_linger(&osdc->homeless_osd, lreq);
+	}
+
+	__remove_osd_from_lru(osd);
+	erase_osd(&osdc->osds, osd);
+	put_osd(osd);
 }
 
 /*
  * reset osd connect
  */
-static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+static int reopen_osd(struct ceph_osd *osd)
 {
 	struct ceph_entity_addr *peer_addr;
 
-	dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
-	if (list_empty(&osd->o_requests) &&
-	    list_empty(&osd->o_linger_requests)) {
-		remove_osd(osdc, osd);
+	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
+
+	if (RB_EMPTY_ROOT(&osd->o_requests) &&
+	    RB_EMPTY_ROOT(&osd->o_linger_requests)) {
+		close_osd(osd);
 		return -ENODEV;
 	}
 
-	peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
+	peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
 	if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
 			!ceph_con_opened(&osd->o_con)) {
-		struct ceph_osd_request *req;
+		struct rb_node *n;
 
 		dout("osd addr hasn't changed and connection never opened, "
 		     "letting msgr retry\n");
 		/* touch each r_stamp for handle_timeout()'s benfit */
-		list_for_each_entry(req, &osd->o_requests, r_osd_item)
+		for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
+			struct ceph_osd_request *req =
+			    rb_entry(n, struct ceph_osd_request, r_node);
 			req->r_stamp = jiffies;
+		}
 
 		return -EAGAIN;
 	}
@@ -1206,455 +1170,1369 @@
 	return 0;
 }
 
-static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
-{
-	struct rb_node **p = &osdc->osds.rb_node;
-	struct rb_node *parent = NULL;
-	struct ceph_osd *osd = NULL;
-
-	dout("__insert_osd %p osd%d\n", new, new->o_osd);
-	while (*p) {
-		parent = *p;
-		osd = rb_entry(parent, struct ceph_osd, o_node);
-		if (new->o_osd < osd->o_osd)
-			p = &(*p)->rb_left;
-		else if (new->o_osd > osd->o_osd)
-			p = &(*p)->rb_right;
-		else
-			BUG();
-	}
-
-	rb_link_node(&new->o_node, parent, p);
-	rb_insert_color(&new->o_node, &osdc->osds);
-}
-
-static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
+static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
+					  bool wrlocked)
 {
 	struct ceph_osd *osd;
-	struct rb_node *n = osdc->osds.rb_node;
 
-	while (n) {
-		osd = rb_entry(n, struct ceph_osd, o_node);
-		if (o < osd->o_osd)
-			n = n->rb_left;
-		else if (o > osd->o_osd)
-			n = n->rb_right;
-		else
-			return osd;
+	if (wrlocked)
+		verify_osdc_wrlocked(osdc);
+	else
+		verify_osdc_locked(osdc);
+
+	if (o != CEPH_HOMELESS_OSD)
+		osd = lookup_osd(&osdc->osds, o);
+	else
+		osd = &osdc->homeless_osd;
+	if (!osd) {
+		if (!wrlocked)
+			return ERR_PTR(-EAGAIN);
+
+		osd = create_osd(osdc, o);
+		insert_osd(&osdc->osds, osd);
+		ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
+			      &osdc->osdmap->osd_addr[osd->o_osd]);
 	}
-	return NULL;
-}
 
-static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
-{
-	schedule_delayed_work(&osdc->timeout_work,
-			      osdc->client->options->osd_keepalive_timeout);
-}
-
-static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
-{
-	cancel_delayed_work(&osdc->timeout_work);
+	dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
+	return osd;
 }
 
 /*
- * Register request, assign tid.  If this is the first request, set up
- * the timeout event.
+ * Create request <-> OSD session relation.
+ *
+ * @req has to be assigned a tid, @osd may be homeless.
  */
-static void __register_request(struct ceph_osd_client *osdc,
-			       struct ceph_osd_request *req)
+static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
 {
-	req->r_tid = ++osdc->last_tid;
-	req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
-	dout("__register_request %p tid %lld\n", req, req->r_tid);
-	__insert_request(osdc, req);
-	ceph_osdc_get_request(req);
-	osdc->num_requests++;
-	if (osdc->num_requests == 1) {
-		dout(" first request, scheduling timeout\n");
-		__schedule_osd_timeout(osdc);
-	}
+	verify_osd_locked(osd);
+	WARN_ON(!req->r_tid || req->r_osd);
+	dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
+	     req, req->r_tid);
+
+	if (!osd_homeless(osd))
+		__remove_osd_from_lru(osd);
+	else
+		atomic_inc(&osd->o_osdc->num_homeless);
+
+	get_osd(osd);
+	insert_request(&osd->o_requests, req);
+	req->r_osd = osd;
 }
 
-/*
- * called under osdc->request_mutex
- */
-static void __unregister_request(struct ceph_osd_client *osdc,
-				 struct ceph_osd_request *req)
+static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
 {
-	if (RB_EMPTY_NODE(&req->r_node)) {
-		dout("__unregister_request %p tid %lld not registered\n",
-			req, req->r_tid);
-		return;
-	}
+	verify_osd_locked(osd);
+	WARN_ON(req->r_osd != osd);
+	dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
+	     req, req->r_tid);
 
-	dout("__unregister_request %p tid %lld\n", req, req->r_tid);
-	rb_erase(&req->r_node, &osdc->requests);
-	RB_CLEAR_NODE(&req->r_node);
-	osdc->num_requests--;
+	req->r_osd = NULL;
+	erase_request(&osd->o_requests, req);
+	put_osd(osd);
 
-	if (req->r_osd) {
-		/* make sure the original request isn't in flight. */
-		ceph_msg_revoke(req->r_request);
-
-		list_del_init(&req->r_osd_item);
-		maybe_move_osd_to_lru(osdc, req->r_osd);
-		if (list_empty(&req->r_linger_osd_item))
-			req->r_osd = NULL;
-	}
-
-	list_del_init(&req->r_req_lru_item);
-	ceph_osdc_put_request(req);
-
-	if (osdc->num_requests == 0) {
-		dout(" no requests, canceling timeout\n");
-		__cancel_osd_timeout(osdc);
-	}
+	if (!osd_homeless(osd))
+		maybe_move_osd_to_lru(osd);
+	else
+		atomic_dec(&osd->o_osdc->num_homeless);
 }
 
-/*
- * Cancel a previously queued request message
- */
-static void __cancel_request(struct ceph_osd_request *req)
+static bool __pool_full(struct ceph_pg_pool_info *pi)
 {
-	if (req->r_sent && req->r_osd) {
-		ceph_msg_revoke(req->r_request);
-		req->r_sent = 0;
-	}
+	return pi->flags & CEPH_POOL_FLAG_FULL;
 }
 
-static void __register_linger_request(struct ceph_osd_client *osdc,
-				    struct ceph_osd_request *req)
+static bool have_pool_full(struct ceph_osd_client *osdc)
 {
-	dout("%s %p tid %llu\n", __func__, req, req->r_tid);
-	WARN_ON(!req->r_linger);
+	struct rb_node *n;
 
-	ceph_osdc_get_request(req);
-	list_add_tail(&req->r_linger_item, &osdc->req_linger);
-	if (req->r_osd)
-		list_add_tail(&req->r_linger_osd_item,
-			      &req->r_osd->o_linger_requests);
-}
+	for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
+		struct ceph_pg_pool_info *pi =
+		    rb_entry(n, struct ceph_pg_pool_info, node);
 
-static void __unregister_linger_request(struct ceph_osd_client *osdc,
-					struct ceph_osd_request *req)
-{
-	WARN_ON(!req->r_linger);
-
-	if (list_empty(&req->r_linger_item)) {
-		dout("%s %p tid %llu not registered\n", __func__, req,
-		     req->r_tid);
-		return;
+		if (__pool_full(pi))
+			return true;
 	}
 
-	dout("%s %p tid %llu\n", __func__, req, req->r_tid);
-	list_del_init(&req->r_linger_item);
-
-	if (req->r_osd) {
-		list_del_init(&req->r_linger_osd_item);
-		maybe_move_osd_to_lru(osdc, req->r_osd);
-		if (list_empty(&req->r_osd_item))
-			req->r_osd = NULL;
-	}
-	ceph_osdc_put_request(req);
+	return false;
 }
 
-void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
-				  struct ceph_osd_request *req)
+static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
 {
-	if (!req->r_linger) {
-		dout("set_request_linger %p\n", req);
-		req->r_linger = 1;
-	}
+	struct ceph_pg_pool_info *pi;
+
+	pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
+	if (!pi)
+		return false;
+
+	return __pool_full(pi);
 }
-EXPORT_SYMBOL(ceph_osdc_set_request_linger);
 
 /*
  * Returns whether a request should be blocked from being sent
  * based on the current osdmap and osd_client settings.
- *
- * Caller should hold map_sem for read.
  */
-static bool __req_should_be_paused(struct ceph_osd_client *osdc,
-				   struct ceph_osd_request *req)
+static bool target_should_be_paused(struct ceph_osd_client *osdc,
+				    const struct ceph_osd_request_target *t,
+				    struct ceph_pg_pool_info *pi)
 {
-	bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
-	bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
-		ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
-	return (req->r_flags & CEPH_OSD_FLAG_READ && pauserd) ||
-		(req->r_flags & CEPH_OSD_FLAG_WRITE && pausewr);
+	bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
+	bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
+		       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
+		       __pool_full(pi);
+
+	WARN_ON(pi->id != t->base_oloc.pool);
+	return (t->flags & CEPH_OSD_FLAG_READ && pauserd) ||
+	       (t->flags & CEPH_OSD_FLAG_WRITE && pausewr);
 }
 
-/*
- * Calculate mapping of a request to a PG.  Takes tiering into account.
- */
-static int __calc_request_pg(struct ceph_osdmap *osdmap,
-			     struct ceph_osd_request *req,
-			     struct ceph_pg *pg_out)
-{
-	bool need_check_tiering;
+enum calc_target_result {
+	CALC_TARGET_NO_ACTION = 0,
+	CALC_TARGET_NEED_RESEND,
+	CALC_TARGET_POOL_DNE,
+};
 
-	need_check_tiering = false;
-	if (req->r_target_oloc.pool == -1) {
-		req->r_target_oloc = req->r_base_oloc; /* struct */
+static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
+					   struct ceph_osd_request_target *t,
+					   u32 *last_force_resend,
+					   bool any_change)
+{
+	struct ceph_pg_pool_info *pi;
+	struct ceph_pg pgid, last_pgid;
+	struct ceph_osds up, acting;
+	bool force_resend = false;
+	bool need_check_tiering = false;
+	bool need_resend = false;
+	bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
+	enum calc_target_result ct_res;
+	int ret;
+
+	pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
+	if (!pi) {
+		t->osd = CEPH_HOMELESS_OSD;
+		ct_res = CALC_TARGET_POOL_DNE;
+		goto out;
+	}
+
+	if (osdc->osdmap->epoch == pi->last_force_request_resend) {
+		if (last_force_resend &&
+		    *last_force_resend < pi->last_force_request_resend) {
+			*last_force_resend = pi->last_force_request_resend;
+			force_resend = true;
+		} else if (!last_force_resend) {
+			force_resend = true;
+		}
+	}
+	if (ceph_oid_empty(&t->target_oid) || force_resend) {
+		ceph_oid_copy(&t->target_oid, &t->base_oid);
 		need_check_tiering = true;
 	}
-	if (req->r_target_oid.name_len == 0) {
-		ceph_oid_copy(&req->r_target_oid, &req->r_base_oid);
+	if (ceph_oloc_empty(&t->target_oloc) || force_resend) {
+		ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
 		need_check_tiering = true;
 	}
 
 	if (need_check_tiering &&
-	    (req->r_flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
-		struct ceph_pg_pool_info *pi;
-
-		pi = ceph_pg_pool_by_id(osdmap, req->r_target_oloc.pool);
-		if (pi) {
-			if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
-			    pi->read_tier >= 0)
-				req->r_target_oloc.pool = pi->read_tier;
-			if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
-			    pi->write_tier >= 0)
-				req->r_target_oloc.pool = pi->write_tier;
-		}
-		/* !pi is caught in ceph_oloc_oid_to_pg() */
+	    (t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
+		if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
+			t->target_oloc.pool = pi->read_tier;
+		if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
+			t->target_oloc.pool = pi->write_tier;
 	}
 
-	return ceph_oloc_oid_to_pg(osdmap, &req->r_target_oloc,
-				   &req->r_target_oid, pg_out);
+	ret = ceph_object_locator_to_pg(osdc->osdmap, &t->target_oid,
+					&t->target_oloc, &pgid);
+	if (ret) {
+		WARN_ON(ret != -ENOENT);
+		t->osd = CEPH_HOMELESS_OSD;
+		ct_res = CALC_TARGET_POOL_DNE;
+		goto out;
+	}
+	last_pgid.pool = pgid.pool;
+	last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
+
+	ceph_pg_to_up_acting_osds(osdc->osdmap, &pgid, &up, &acting);
+	if (any_change &&
+	    ceph_is_new_interval(&t->acting,
+				 &acting,
+				 &t->up,
+				 &up,
+				 t->size,
+				 pi->size,
+				 t->min_size,
+				 pi->min_size,
+				 t->pg_num,
+				 pi->pg_num,
+				 t->sort_bitwise,
+				 sort_bitwise,
+				 &last_pgid))
+		force_resend = true;
+
+	if (t->paused && !target_should_be_paused(osdc, t, pi)) {
+		t->paused = false;
+		need_resend = true;
+	}
+
+	if (ceph_pg_compare(&t->pgid, &pgid) ||
+	    ceph_osds_changed(&t->acting, &acting, any_change) ||
+	    force_resend) {
+		t->pgid = pgid; /* struct */
+		ceph_osds_copy(&t->acting, &acting);
+		ceph_osds_copy(&t->up, &up);
+		t->size = pi->size;
+		t->min_size = pi->min_size;
+		t->pg_num = pi->pg_num;
+		t->pg_num_mask = pi->pg_num_mask;
+		t->sort_bitwise = sort_bitwise;
+
+		t->osd = acting.primary;
+		need_resend = true;
+	}
+
+	ct_res = need_resend ? CALC_TARGET_NEED_RESEND : CALC_TARGET_NO_ACTION;
+out:
+	dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
+	return ct_res;
 }
 
-static void __enqueue_request(struct ceph_osd_request *req)
+static void setup_request_data(struct ceph_osd_request *req,
+			       struct ceph_msg *msg)
 {
-	struct ceph_osd_client *osdc = req->r_osdc;
+	u32 data_len = 0;
+	int i;
 
-	dout("%s %p tid %llu to osd%d\n", __func__, req, req->r_tid,
-	     req->r_osd ? req->r_osd->o_osd : -1);
+	if (!list_empty(&msg->data))
+		return;
 
-	if (req->r_osd) {
-		__remove_osd_from_lru(req->r_osd);
-		list_add_tail(&req->r_osd_item, &req->r_osd->o_requests);
-		list_move_tail(&req->r_req_lru_item, &osdc->req_unsent);
-	} else {
-		list_move_tail(&req->r_req_lru_item, &osdc->req_notarget);
+	WARN_ON(msg->data_length);
+	for (i = 0; i < req->r_num_ops; i++) {
+		struct ceph_osd_req_op *op = &req->r_ops[i];
+
+		switch (op->op) {
+		/* request */
+		case CEPH_OSD_OP_WRITE:
+		case CEPH_OSD_OP_WRITEFULL:
+			WARN_ON(op->indata_len != op->extent.length);
+			ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
+			break;
+		case CEPH_OSD_OP_SETXATTR:
+		case CEPH_OSD_OP_CMPXATTR:
+			WARN_ON(op->indata_len != op->xattr.name_len +
+						  op->xattr.value_len);
+			ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
+			break;
+		case CEPH_OSD_OP_NOTIFY_ACK:
+			ceph_osdc_msg_data_add(msg,
+					       &op->notify_ack.request_data);
+			break;
+
+		/* reply */
+		case CEPH_OSD_OP_STAT:
+			ceph_osdc_msg_data_add(req->r_reply,
+					       &op->raw_data_in);
+			break;
+		case CEPH_OSD_OP_READ:
+			ceph_osdc_msg_data_add(req->r_reply,
+					       &op->extent.osd_data);
+			break;
+
+		/* both */
+		case CEPH_OSD_OP_CALL:
+			WARN_ON(op->indata_len != op->cls.class_len +
+						  op->cls.method_len +
+						  op->cls.indata_len);
+			ceph_osdc_msg_data_add(msg, &op->cls.request_info);
+			/* optional, can be NONE */
+			ceph_osdc_msg_data_add(msg, &op->cls.request_data);
+			/* optional, can be NONE */
+			ceph_osdc_msg_data_add(req->r_reply,
+					       &op->cls.response_data);
+			break;
+		case CEPH_OSD_OP_NOTIFY:
+			ceph_osdc_msg_data_add(msg,
+					       &op->notify.request_data);
+			ceph_osdc_msg_data_add(req->r_reply,
+					       &op->notify.response_data);
+			break;
+		}
+
+		data_len += op->indata_len;
 	}
+
+	WARN_ON(data_len != msg->data_length);
+}
+
+static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
+{
+	void *p = msg->front.iov_base;
+	void *const end = p + msg->front_alloc_len;
+	u32 data_len = 0;
+	int i;
+
+	if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
+		/* snapshots aren't writeable */
+		WARN_ON(req->r_snapid != CEPH_NOSNAP);
+	} else {
+		WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
+			req->r_data_offset || req->r_snapc);
+	}
+
+	setup_request_data(req, msg);
+
+	ceph_encode_32(&p, 1); /* client_inc, always 1 */
+	ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
+	ceph_encode_32(&p, req->r_flags);
+	ceph_encode_timespec(p, &req->r_mtime);
+	p += sizeof(struct ceph_timespec);
+	/* aka reassert_version */
+	memcpy(p, &req->r_replay_version, sizeof(req->r_replay_version));
+	p += sizeof(req->r_replay_version);
+
+	/* oloc */
+	ceph_encode_8(&p, 4);
+	ceph_encode_8(&p, 4);
+	ceph_encode_32(&p, 8 + 4 + 4);
+	ceph_encode_64(&p, req->r_t.target_oloc.pool);
+	ceph_encode_32(&p, -1); /* preferred */
+	ceph_encode_32(&p, 0); /* key len */
+
+	/* pgid */
+	ceph_encode_8(&p, 1);
+	ceph_encode_64(&p, req->r_t.pgid.pool);
+	ceph_encode_32(&p, req->r_t.pgid.seed);
+	ceph_encode_32(&p, -1); /* preferred */
+
+	/* oid */
+	ceph_encode_32(&p, req->r_t.target_oid.name_len);
+	memcpy(p, req->r_t.target_oid.name, req->r_t.target_oid.name_len);
+	p += req->r_t.target_oid.name_len;
+
+	/* ops, can imply data */
+	ceph_encode_16(&p, req->r_num_ops);
+	for (i = 0; i < req->r_num_ops; i++) {
+		data_len += osd_req_encode_op(p, &req->r_ops[i]);
+		p += sizeof(struct ceph_osd_op);
+	}
+
+	ceph_encode_64(&p, req->r_snapid); /* snapid */
+	if (req->r_snapc) {
+		ceph_encode_64(&p, req->r_snapc->seq);
+		ceph_encode_32(&p, req->r_snapc->num_snaps);
+		for (i = 0; i < req->r_snapc->num_snaps; i++)
+			ceph_encode_64(&p, req->r_snapc->snaps[i]);
+	} else {
+		ceph_encode_64(&p, 0); /* snap_seq */
+		ceph_encode_32(&p, 0); /* snaps len */
+	}
+
+	ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
+
+	BUG_ON(p > end);
+	msg->front.iov_len = p - msg->front.iov_base;
+	msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
+	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
+	msg->hdr.data_len = cpu_to_le32(data_len);
+	/*
+	 * The header "data_off" is a hint to the receiver allowing it
+	 * to align received data into its buffers such that there's no
+	 * need to re-copy it before writing it to disk (direct I/O).
+	 */
+	msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
+
+	dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__,
+	     req, req->r_t.target_oid.name, req->r_t.target_oid.name_len,
+	     msg->front.iov_len, data_len);
 }
 
 /*
- * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
- * (as needed), and set the request r_osd appropriately.  If there is
- * no up osd, set r_osd to NULL.  Move the request to the appropriate list
- * (unsent, homeless) or leave on in-flight lru.
- *
- * Return 0 if unchanged, 1 if changed, or negative on error.
- *
- * Caller should hold map_sem for read and request_mutex.
+ * @req has to be assigned a tid and registered.
  */
-static int __map_request(struct ceph_osd_client *osdc,
-			 struct ceph_osd_request *req, int force_resend)
+static void send_request(struct ceph_osd_request *req)
 {
-	struct ceph_pg pgid;
-	int acting[CEPH_PG_MAX_SIZE];
-	int num, o;
-	int err;
-	bool was_paused;
+	struct ceph_osd *osd = req->r_osd;
 
-	dout("map_request %p tid %lld\n", req, req->r_tid);
+	verify_osd_locked(osd);
+	WARN_ON(osd->o_osd != req->r_t.osd);
 
-	err = __calc_request_pg(osdc->osdmap, req, &pgid);
-	if (err) {
-		list_move(&req->r_req_lru_item, &osdc->req_notarget);
-		return err;
-	}
-	req->r_pgid = pgid;
+	/*
+	 * We may have a previously queued request message hanging
+	 * around.  Cancel it to avoid corrupting the msgr.
+	 */
+	if (req->r_sent)
+		ceph_msg_revoke(req->r_request);
 
-	num = ceph_calc_pg_acting(osdc->osdmap, pgid, acting, &o);
-	if (num < 0)
-		num = 0;
+	req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
+	if (req->r_attempts)
+		req->r_flags |= CEPH_OSD_FLAG_RETRY;
+	else
+		WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
 
-	was_paused = req->r_paused;
-	req->r_paused = __req_should_be_paused(osdc, req);
-	if (was_paused && !req->r_paused)
-		force_resend = 1;
+	encode_request(req, req->r_request);
 
-	if ((!force_resend &&
-	     req->r_osd && req->r_osd->o_osd == o &&
-	     req->r_sent >= req->r_osd->o_incarnation &&
-	     req->r_num_pg_osds == num &&
-	     memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
-	    (req->r_osd == NULL && o == -1) ||
-	    req->r_paused)
-		return 0;  /* no change */
+	dout("%s req %p tid %llu to pg %llu.%x osd%d flags 0x%x attempt %d\n",
+	     __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
+	     req->r_t.osd, req->r_flags, req->r_attempts);
 
-	dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
-	     req->r_tid, pgid.pool, pgid.seed, o,
-	     req->r_osd ? req->r_osd->o_osd : -1);
+	req->r_t.paused = false;
+	req->r_stamp = jiffies;
+	req->r_attempts++;
 
-	/* record full pg acting set */
-	memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
-	req->r_num_pg_osds = num;
+	req->r_sent = osd->o_incarnation;
+	req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
+	ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
+}
 
-	if (req->r_osd) {
-		__cancel_request(req);
-		list_del_init(&req->r_osd_item);
-		list_del_init(&req->r_linger_osd_item);
-		req->r_osd = NULL;
+static void maybe_request_map(struct ceph_osd_client *osdc)
+{
+	bool continuous = false;
+
+	verify_osdc_locked(osdc);
+	WARN_ON(!osdc->osdmap->epoch);
+
+	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
+	    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
+	    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
+		dout("%s osdc %p continuous\n", __func__, osdc);
+		continuous = true;
+	} else {
+		dout("%s osdc %p onetime\n", __func__, osdc);
 	}
 
-	req->r_osd = __lookup_osd(osdc, o);
-	if (!req->r_osd && o >= 0) {
-		err = -ENOMEM;
-		req->r_osd = create_osd(osdc, o);
-		if (!req->r_osd) {
-			list_move(&req->r_req_lru_item, &osdc->req_notarget);
-			goto out;
+	if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
+			       osdc->osdmap->epoch + 1, continuous))
+		ceph_monc_renew_subs(&osdc->client->monc);
+}
+
+static void send_map_check(struct ceph_osd_request *req);
+
+static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
+{
+	struct ceph_osd_client *osdc = req->r_osdc;
+	struct ceph_osd *osd;
+	enum calc_target_result ct_res;
+	bool need_send = false;
+	bool promoted = false;
+
+	WARN_ON(req->r_tid || req->r_got_reply);
+	dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
+
+again:
+	ct_res = calc_target(osdc, &req->r_t, &req->r_last_force_resend, false);
+	if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
+		goto promote;
+
+	osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
+	if (IS_ERR(osd)) {
+		WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
+		goto promote;
+	}
+
+	if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
+	    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
+		dout("req %p pausewr\n", req);
+		req->r_t.paused = true;
+		maybe_request_map(osdc);
+	} else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
+		   ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
+		dout("req %p pauserd\n", req);
+		req->r_t.paused = true;
+		maybe_request_map(osdc);
+	} else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
+		   !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
+				     CEPH_OSD_FLAG_FULL_FORCE)) &&
+		   (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
+		    pool_full(osdc, req->r_t.base_oloc.pool))) {
+		dout("req %p full/pool_full\n", req);
+		pr_warn_ratelimited("FULL or reached pool quota\n");
+		req->r_t.paused = true;
+		maybe_request_map(osdc);
+	} else if (!osd_homeless(osd)) {
+		need_send = true;
+	} else {
+		maybe_request_map(osdc);
+	}
+
+	mutex_lock(&osd->lock);
+	/*
+	 * Assign the tid atomically with send_request() to protect
+	 * multiple writes to the same object from racing with each
+	 * other, resulting in out of order ops on the OSDs.
+	 */
+	req->r_tid = atomic64_inc_return(&osdc->last_tid);
+	link_request(osd, req);
+	if (need_send)
+		send_request(req);
+	mutex_unlock(&osd->lock);
+
+	if (ct_res == CALC_TARGET_POOL_DNE)
+		send_map_check(req);
+
+	if (promoted)
+		downgrade_write(&osdc->lock);
+	return;
+
+promote:
+	up_read(&osdc->lock);
+	down_write(&osdc->lock);
+	wrlocked = true;
+	promoted = true;
+	goto again;
+}
+
+static void account_request(struct ceph_osd_request *req)
+{
+	unsigned int mask = CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK;
+
+	if (req->r_flags & CEPH_OSD_FLAG_READ) {
+		WARN_ON(req->r_flags & mask);
+		req->r_flags |= CEPH_OSD_FLAG_ACK;
+	} else if (req->r_flags & CEPH_OSD_FLAG_WRITE)
+		WARN_ON(!(req->r_flags & mask));
+	else
+		WARN_ON(1);
+
+	WARN_ON(req->r_unsafe_callback && (req->r_flags & mask) != mask);
+	atomic_inc(&req->r_osdc->num_requests);
+}
+
+static void submit_request(struct ceph_osd_request *req, bool wrlocked)
+{
+	ceph_osdc_get_request(req);
+	account_request(req);
+	__submit_request(req, wrlocked);
+}
+
+static void __finish_request(struct ceph_osd_request *req)
+{
+	struct ceph_osd_client *osdc = req->r_osdc;
+	struct ceph_osd *osd = req->r_osd;
+
+	verify_osd_locked(osd);
+	dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
+
+	WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
+	unlink_request(osd, req);
+	atomic_dec(&osdc->num_requests);
+
+	/*
+	 * If an OSD has failed or returned and a request has been sent
+	 * twice, it's possible to get a reply and end up here while the
+	 * request message is queued for delivery.  We will ignore the
+	 * reply, so not a big deal, but better to try and catch it.
+	 */
+	ceph_msg_revoke(req->r_request);
+	ceph_msg_revoke_incoming(req->r_reply);
+}
+
+static void finish_request(struct ceph_osd_request *req)
+{
+	__finish_request(req);
+	ceph_osdc_put_request(req);
+}
+
+static void __complete_request(struct ceph_osd_request *req)
+{
+	if (req->r_callback)
+		req->r_callback(req);
+	else
+		complete_all(&req->r_completion);
+}
+
+/*
+ * Note that this is open-coded in handle_reply(), which has to deal
+ * with ack vs commit, dup acks, etc.
+ */
+static void complete_request(struct ceph_osd_request *req, int err)
+{
+	dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
+
+	req->r_result = err;
+	__finish_request(req);
+	__complete_request(req);
+	complete_all(&req->r_safe_completion);
+	ceph_osdc_put_request(req);
+}
+
+static void cancel_map_check(struct ceph_osd_request *req)
+{
+	struct ceph_osd_client *osdc = req->r_osdc;
+	struct ceph_osd_request *lookup_req;
+
+	verify_osdc_wrlocked(osdc);
+
+	lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
+	if (!lookup_req)
+		return;
+
+	WARN_ON(lookup_req != req);
+	erase_request_mc(&osdc->map_checks, req);
+	ceph_osdc_put_request(req);
+}
+
+static void cancel_request(struct ceph_osd_request *req)
+{
+	dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
+
+	cancel_map_check(req);
+	finish_request(req);
+}
+
+static void check_pool_dne(struct ceph_osd_request *req)
+{
+	struct ceph_osd_client *osdc = req->r_osdc;
+	struct ceph_osdmap *map = osdc->osdmap;
+
+	verify_osdc_wrlocked(osdc);
+	WARN_ON(!map->epoch);
+
+	if (req->r_attempts) {
+		/*
+		 * We sent a request earlier, which means that
+		 * previously the pool existed, and now it does not
+		 * (i.e., it was deleted).
+		 */
+		req->r_map_dne_bound = map->epoch;
+		dout("%s req %p tid %llu pool disappeared\n", __func__, req,
+		     req->r_tid);
+	} else {
+		dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
+		     req, req->r_tid, req->r_map_dne_bound, map->epoch);
+	}
+
+	if (req->r_map_dne_bound) {
+		if (map->epoch >= req->r_map_dne_bound) {
+			/* we had a new enough map */
+			pr_info_ratelimited("tid %llu pool does not exist\n",
+					    req->r_tid);
+			complete_request(req, -ENOENT);
 		}
+	} else {
+		send_map_check(req);
+	}
+}
 
-		dout("map_request osd %p is osd%d\n", req->r_osd, o);
-		__insert_osd(osdc, req->r_osd);
+static void map_check_cb(struct ceph_mon_generic_request *greq)
+{
+	struct ceph_osd_client *osdc = &greq->monc->client->osdc;
+	struct ceph_osd_request *req;
+	u64 tid = greq->private_data;
 
-		ceph_con_open(&req->r_osd->o_con,
-			      CEPH_ENTITY_TYPE_OSD, o,
-			      &osdc->osdmap->osd_addr[o]);
+	WARN_ON(greq->result || !greq->u.newest);
+
+	down_write(&osdc->lock);
+	req = lookup_request_mc(&osdc->map_checks, tid);
+	if (!req) {
+		dout("%s tid %llu dne\n", __func__, tid);
+		goto out_unlock;
 	}
 
-	__enqueue_request(req);
-	err = 1;   /* osd or pg changed */
+	dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
+	     req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
+	if (!req->r_map_dne_bound)
+		req->r_map_dne_bound = greq->u.newest;
+	erase_request_mc(&osdc->map_checks, req);
+	check_pool_dne(req);
+
+	ceph_osdc_put_request(req);
+out_unlock:
+	up_write(&osdc->lock);
+}
+
+static void send_map_check(struct ceph_osd_request *req)
+{
+	struct ceph_osd_client *osdc = req->r_osdc;
+	struct ceph_osd_request *lookup_req;
+	int ret;
+
+	verify_osdc_wrlocked(osdc);
+
+	lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
+	if (lookup_req) {
+		WARN_ON(lookup_req != req);
+		return;
+	}
+
+	ceph_osdc_get_request(req);
+	insert_request_mc(&osdc->map_checks, req);
+	ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
+					  map_check_cb, req->r_tid);
+	WARN_ON(ret);
+}
+
+/*
+ * lingering requests, watch/notify v2 infrastructure
+ */
+static void linger_release(struct kref *kref)
+{
+	struct ceph_osd_linger_request *lreq =
+	    container_of(kref, struct ceph_osd_linger_request, kref);
+
+	dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
+	     lreq->reg_req, lreq->ping_req);
+	WARN_ON(!RB_EMPTY_NODE(&lreq->node));
+	WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
+	WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
+	WARN_ON(!list_empty(&lreq->scan_item));
+	WARN_ON(!list_empty(&lreq->pending_lworks));
+	WARN_ON(lreq->osd);
+
+	if (lreq->reg_req)
+		ceph_osdc_put_request(lreq->reg_req);
+	if (lreq->ping_req)
+		ceph_osdc_put_request(lreq->ping_req);
+	target_destroy(&lreq->t);
+	kfree(lreq);
+}
+
+static void linger_put(struct ceph_osd_linger_request *lreq)
+{
+	if (lreq)
+		kref_put(&lreq->kref, linger_release);
+}
+
+static struct ceph_osd_linger_request *
+linger_get(struct ceph_osd_linger_request *lreq)
+{
+	kref_get(&lreq->kref);
+	return lreq;
+}
+
+static struct ceph_osd_linger_request *
+linger_alloc(struct ceph_osd_client *osdc)
+{
+	struct ceph_osd_linger_request *lreq;
+
+	lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
+	if (!lreq)
+		return NULL;
+
+	kref_init(&lreq->kref);
+	mutex_init(&lreq->lock);
+	RB_CLEAR_NODE(&lreq->node);
+	RB_CLEAR_NODE(&lreq->osdc_node);
+	RB_CLEAR_NODE(&lreq->mc_node);
+	INIT_LIST_HEAD(&lreq->scan_item);
+	INIT_LIST_HEAD(&lreq->pending_lworks);
+	init_completion(&lreq->reg_commit_wait);
+	init_completion(&lreq->notify_finish_wait);
+
+	lreq->osdc = osdc;
+	target_init(&lreq->t);
+
+	dout("%s lreq %p\n", __func__, lreq);
+	return lreq;
+}
+
+DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
+DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
+DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
+
+/*
+ * Create linger request <-> OSD session relation.
+ *
+ * @lreq has to be registered, @osd may be homeless.
+ */
+static void link_linger(struct ceph_osd *osd,
+			struct ceph_osd_linger_request *lreq)
+{
+	verify_osd_locked(osd);
+	WARN_ON(!lreq->linger_id || lreq->osd);
+	dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
+	     osd->o_osd, lreq, lreq->linger_id);
+
+	if (!osd_homeless(osd))
+		__remove_osd_from_lru(osd);
+	else
+		atomic_inc(&osd->o_osdc->num_homeless);
+
+	get_osd(osd);
+	insert_linger(&osd->o_linger_requests, lreq);
+	lreq->osd = osd;
+}
+
+static void unlink_linger(struct ceph_osd *osd,
+			  struct ceph_osd_linger_request *lreq)
+{
+	verify_osd_locked(osd);
+	WARN_ON(lreq->osd != osd);
+	dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
+	     osd->o_osd, lreq, lreq->linger_id);
+
+	lreq->osd = NULL;
+	erase_linger(&osd->o_linger_requests, lreq);
+	put_osd(osd);
+
+	if (!osd_homeless(osd))
+		maybe_move_osd_to_lru(osd);
+	else
+		atomic_dec(&osd->o_osdc->num_homeless);
+}
+
+static bool __linger_registered(struct ceph_osd_linger_request *lreq)
+{
+	verify_osdc_locked(lreq->osdc);
+
+	return !RB_EMPTY_NODE(&lreq->osdc_node);
+}
+
+static bool linger_registered(struct ceph_osd_linger_request *lreq)
+{
+	struct ceph_osd_client *osdc = lreq->osdc;
+	bool registered;
+
+	down_read(&osdc->lock);
+	registered = __linger_registered(lreq);
+	up_read(&osdc->lock);
+
+	return registered;
+}
+
+static void linger_register(struct ceph_osd_linger_request *lreq)
+{
+	struct ceph_osd_client *osdc = lreq->osdc;
+
+	verify_osdc_wrlocked(osdc);
+	WARN_ON(lreq->linger_id);
+
+	linger_get(lreq);
+	lreq->linger_id = ++osdc->last_linger_id;
+	insert_linger_osdc(&osdc->linger_requests, lreq);
+}
+
+static void linger_unregister(struct ceph_osd_linger_request *lreq)
+{
+	struct ceph_osd_client *osdc = lreq->osdc;
+
+	verify_osdc_wrlocked(osdc);
+
+	erase_linger_osdc(&osdc->linger_requests, lreq);
+	linger_put(lreq);
+}
+
+static void cancel_linger_request(struct ceph_osd_request *req)
+{
+	struct ceph_osd_linger_request *lreq = req->r_priv;
+
+	WARN_ON(!req->r_linger);
+	cancel_request(req);
+	linger_put(lreq);
+}
+
+struct linger_work {
+	struct work_struct work;
+	struct ceph_osd_linger_request *lreq;
+	struct list_head pending_item;
+	unsigned long queued_stamp;
+
+	union {
+		struct {
+			u64 notify_id;
+			u64 notifier_id;
+			void *payload; /* points into @msg front */
+			size_t payload_len;
+
+			struct ceph_msg *msg; /* for ceph_msg_put() */
+		} notify;
+		struct {
+			int err;
+		} error;
+	};
+};
+
+static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
+				       work_func_t workfn)
+{
+	struct linger_work *lwork;
+
+	lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
+	if (!lwork)
+		return NULL;
+
+	INIT_WORK(&lwork->work, workfn);
+	INIT_LIST_HEAD(&lwork->pending_item);
+	lwork->lreq = linger_get(lreq);
+
+	return lwork;
+}
+
+static void lwork_free(struct linger_work *lwork)
+{
+	struct ceph_osd_linger_request *lreq = lwork->lreq;
+
+	mutex_lock(&lreq->lock);
+	list_del(&lwork->pending_item);
+	mutex_unlock(&lreq->lock);
+
+	linger_put(lreq);
+	kfree(lwork);
+}
+
+static void lwork_queue(struct linger_work *lwork)
+{
+	struct ceph_osd_linger_request *lreq = lwork->lreq;
+	struct ceph_osd_client *osdc = lreq->osdc;
+
+	verify_lreq_locked(lreq);
+	WARN_ON(!list_empty(&lwork->pending_item));
+
+	lwork->queued_stamp = jiffies;
+	list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
+	queue_work(osdc->notify_wq, &lwork->work);
+}
+
+static void do_watch_notify(struct work_struct *w)
+{
+	struct linger_work *lwork = container_of(w, struct linger_work, work);
+	struct ceph_osd_linger_request *lreq = lwork->lreq;
+
+	if (!linger_registered(lreq)) {
+		dout("%s lreq %p not registered\n", __func__, lreq);
+		goto out;
+	}
+
+	WARN_ON(!lreq->is_watch);
+	dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
+	     __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
+	     lwork->notify.payload_len);
+	lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
+		  lwork->notify.notifier_id, lwork->notify.payload,
+		  lwork->notify.payload_len);
 
 out:
+	ceph_msg_put(lwork->notify.msg);
+	lwork_free(lwork);
+}
+
+static void do_watch_error(struct work_struct *w)
+{
+	struct linger_work *lwork = container_of(w, struct linger_work, work);
+	struct ceph_osd_linger_request *lreq = lwork->lreq;
+
+	if (!linger_registered(lreq)) {
+		dout("%s lreq %p not registered\n", __func__, lreq);
+		goto out;
+	}
+
+	dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
+	lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
+
+out:
+	lwork_free(lwork);
+}
+
+static void queue_watch_error(struct ceph_osd_linger_request *lreq)
+{
+	struct linger_work *lwork;
+
+	lwork = lwork_alloc(lreq, do_watch_error);
+	if (!lwork) {
+		pr_err("failed to allocate error-lwork\n");
+		return;
+	}
+
+	lwork->error.err = lreq->last_error;
+	lwork_queue(lwork);
+}
+
+static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
+				       int result)
+{
+	if (!completion_done(&lreq->reg_commit_wait)) {
+		lreq->reg_commit_error = (result <= 0 ? result : 0);
+		complete_all(&lreq->reg_commit_wait);
+	}
+}
+
+static void linger_commit_cb(struct ceph_osd_request *req)
+{
+	struct ceph_osd_linger_request *lreq = req->r_priv;
+
+	mutex_lock(&lreq->lock);
+	dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
+	     lreq->linger_id, req->r_result);
+	WARN_ON(!__linger_registered(lreq));
+	linger_reg_commit_complete(lreq, req->r_result);
+	lreq->committed = true;
+
+	if (!lreq->is_watch) {
+		struct ceph_osd_data *osd_data =
+		    osd_req_op_data(req, 0, notify, response_data);
+		void *p = page_address(osd_data->pages[0]);
+
+		WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
+			osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
+
+		/* make note of the notify_id */
+		if (req->r_ops[0].outdata_len >= sizeof(u64)) {
+			lreq->notify_id = ceph_decode_64(&p);
+			dout("lreq %p notify_id %llu\n", lreq,
+			     lreq->notify_id);
+		} else {
+			dout("lreq %p no notify_id\n", lreq);
+		}
+	}
+
+	mutex_unlock(&lreq->lock);
+	linger_put(lreq);
+}
+
+static int normalize_watch_error(int err)
+{
+	/*
+	 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
+	 * notification and a failure to reconnect because we raced with
+	 * the delete appear the same to the user.
+	 */
+	if (err == -ENOENT)
+		err = -ENOTCONN;
+
 	return err;
 }
 
-/*
- * caller should hold map_sem (for read) and request_mutex
- */
-static void __send_request(struct ceph_osd_client *osdc,
-			   struct ceph_osd_request *req)
+static void linger_reconnect_cb(struct ceph_osd_request *req)
 {
-	void *p;
+	struct ceph_osd_linger_request *lreq = req->r_priv;
 
-	dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n",
-	     req, req->r_tid, req->r_osd->o_osd, req->r_flags,
-	     (unsigned long long)req->r_pgid.pool, req->r_pgid.seed);
-
-	/* fill in message content that changes each time we send it */
-	put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch);
-	put_unaligned_le32(req->r_flags, req->r_request_flags);
-	put_unaligned_le64(req->r_target_oloc.pool, req->r_request_pool);
-	p = req->r_request_pgid;
-	ceph_encode_64(&p, req->r_pgid.pool);
-	ceph_encode_32(&p, req->r_pgid.seed);
-	put_unaligned_le64(1, req->r_request_attempts);  /* FIXME */
-	memcpy(req->r_request_reassert_version, &req->r_reassert_version,
-	       sizeof(req->r_reassert_version));
-
-	req->r_stamp = jiffies;
-	list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
-
-	ceph_msg_get(req->r_request); /* send consumes a ref */
-
-	req->r_sent = req->r_osd->o_incarnation;
-
-	ceph_con_send(&req->r_osd->o_con, req->r_request);
-}
-
-/*
- * Send any requests in the queue (req_unsent).
- */
-static void __send_queued(struct ceph_osd_client *osdc)
-{
-	struct ceph_osd_request *req, *tmp;
-
-	dout("__send_queued\n");
-	list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
-		__send_request(osdc, req);
-}
-
-/*
- * Caller should hold map_sem for read and request_mutex.
- */
-static int __ceph_osdc_start_request(struct ceph_osd_client *osdc,
-				     struct ceph_osd_request *req,
-				     bool nofail)
-{
-	int rc;
-
-	__register_request(osdc, req);
-	req->r_sent = 0;
-	req->r_got_reply = 0;
-	rc = __map_request(osdc, req, 0);
-	if (rc < 0) {
-		if (nofail) {
-			dout("osdc_start_request failed map, "
-				" will retry %lld\n", req->r_tid);
-			rc = 0;
-		} else {
-			__unregister_request(osdc, req);
+	mutex_lock(&lreq->lock);
+	dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
+	     lreq, lreq->linger_id, req->r_result, lreq->last_error);
+	if (req->r_result < 0) {
+		if (!lreq->last_error) {
+			lreq->last_error = normalize_watch_error(req->r_result);
+			queue_watch_error(lreq);
 		}
-		return rc;
 	}
 
-	if (req->r_osd == NULL) {
-		dout("send_request %p no up osds in pg\n", req);
-		ceph_monc_request_next_osdmap(&osdc->client->monc);
+	mutex_unlock(&lreq->lock);
+	linger_put(lreq);
+}
+
+static void send_linger(struct ceph_osd_linger_request *lreq)
+{
+	struct ceph_osd_request *req = lreq->reg_req;
+	struct ceph_osd_req_op *op = &req->r_ops[0];
+
+	verify_osdc_wrlocked(req->r_osdc);
+	dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
+
+	if (req->r_osd)
+		cancel_linger_request(req);
+
+	request_reinit(req);
+	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
+	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
+	req->r_flags = lreq->t.flags;
+	req->r_mtime = lreq->mtime;
+
+	mutex_lock(&lreq->lock);
+	if (lreq->is_watch && lreq->committed) {
+		WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
+			op->watch.cookie != lreq->linger_id);
+		op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
+		op->watch.gen = ++lreq->register_gen;
+		dout("lreq %p reconnect register_gen %u\n", lreq,
+		     op->watch.gen);
+		req->r_callback = linger_reconnect_cb;
 	} else {
-		__send_queued(osdc);
+		if (!lreq->is_watch)
+			lreq->notify_id = 0;
+		else
+			WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
+		dout("lreq %p register\n", lreq);
+		req->r_callback = linger_commit_cb;
+	}
+	mutex_unlock(&lreq->lock);
+
+	req->r_priv = linger_get(lreq);
+	req->r_linger = true;
+
+	submit_request(req, true);
+}
+
+static void linger_ping_cb(struct ceph_osd_request *req)
+{
+	struct ceph_osd_linger_request *lreq = req->r_priv;
+
+	mutex_lock(&lreq->lock);
+	dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
+	     __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
+	     lreq->last_error);
+	if (lreq->register_gen == req->r_ops[0].watch.gen) {
+		if (!req->r_result) {
+			lreq->watch_valid_thru = lreq->ping_sent;
+		} else if (!lreq->last_error) {
+			lreq->last_error = normalize_watch_error(req->r_result);
+			queue_watch_error(lreq);
+		}
+	} else {
+		dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
+		     lreq->register_gen, req->r_ops[0].watch.gen);
 	}
 
-	return 0;
+	mutex_unlock(&lreq->lock);
+	linger_put(lreq);
+}
+
+static void send_linger_ping(struct ceph_osd_linger_request *lreq)
+{
+	struct ceph_osd_client *osdc = lreq->osdc;
+	struct ceph_osd_request *req = lreq->ping_req;
+	struct ceph_osd_req_op *op = &req->r_ops[0];
+
+	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
+		dout("%s PAUSERD\n", __func__);
+		return;
+	}
+
+	lreq->ping_sent = jiffies;
+	dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
+	     __func__, lreq, lreq->linger_id, lreq->ping_sent,
+	     lreq->register_gen);
+
+	if (req->r_osd)
+		cancel_linger_request(req);
+
+	request_reinit(req);
+	target_copy(&req->r_t, &lreq->t);
+
+	WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
+		op->watch.cookie != lreq->linger_id ||
+		op->watch.op != CEPH_OSD_WATCH_OP_PING);
+	op->watch.gen = lreq->register_gen;
+	req->r_callback = linger_ping_cb;
+	req->r_priv = linger_get(lreq);
+	req->r_linger = true;
+
+	ceph_osdc_get_request(req);
+	account_request(req);
+	req->r_tid = atomic64_inc_return(&osdc->last_tid);
+	link_request(lreq->osd, req);
+	send_request(req);
+}
+
+static void linger_submit(struct ceph_osd_linger_request *lreq)
+{
+	struct ceph_osd_client *osdc = lreq->osdc;
+	struct ceph_osd *osd;
+
+	calc_target(osdc, &lreq->t, &lreq->last_force_resend, false);
+	osd = lookup_create_osd(osdc, lreq->t.osd, true);
+	link_linger(osd, lreq);
+
+	send_linger(lreq);
+}
+
+static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
+{
+	struct ceph_osd_client *osdc = lreq->osdc;
+	struct ceph_osd_linger_request *lookup_lreq;
+
+	verify_osdc_wrlocked(osdc);
+
+	lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
+				       lreq->linger_id);
+	if (!lookup_lreq)
+		return;
+
+	WARN_ON(lookup_lreq != lreq);
+	erase_linger_mc(&osdc->linger_map_checks, lreq);
+	linger_put(lreq);
 }
 
 /*
- * Timeout callback, called every N seconds when 1 or more osd
- * requests has been active for more than N seconds.  When this
- * happens, we ping all OSDs with requests who have timed out to
- * ensure any communications channel reset is detected.  Reset the
- * request timeouts another N seconds in the future as we go.
- * Reschedule the timeout event another N seconds in future (unless
- * there are no open requests).
+ * @lreq has to be both registered and linked.
+ */
+static void __linger_cancel(struct ceph_osd_linger_request *lreq)
+{
+	if (lreq->is_watch && lreq->ping_req->r_osd)
+		cancel_linger_request(lreq->ping_req);
+	if (lreq->reg_req->r_osd)
+		cancel_linger_request(lreq->reg_req);
+	cancel_linger_map_check(lreq);
+	unlink_linger(lreq->osd, lreq);
+	linger_unregister(lreq);
+}
+
+static void linger_cancel(struct ceph_osd_linger_request *lreq)
+{
+	struct ceph_osd_client *osdc = lreq->osdc;
+
+	down_write(&osdc->lock);
+	if (__linger_registered(lreq))
+		__linger_cancel(lreq);
+	up_write(&osdc->lock);
+}
+
+static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
+
+static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
+{
+	struct ceph_osd_client *osdc = lreq->osdc;
+	struct ceph_osdmap *map = osdc->osdmap;
+
+	verify_osdc_wrlocked(osdc);
+	WARN_ON(!map->epoch);
+
+	if (lreq->register_gen) {
+		lreq->map_dne_bound = map->epoch;
+		dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
+		     lreq, lreq->linger_id);
+	} else {
+		dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
+		     __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
+		     map->epoch);
+	}
+
+	if (lreq->map_dne_bound) {
+		if (map->epoch >= lreq->map_dne_bound) {
+			/* we had a new enough map */
+			pr_info("linger_id %llu pool does not exist\n",
+				lreq->linger_id);
+			linger_reg_commit_complete(lreq, -ENOENT);
+			__linger_cancel(lreq);
+		}
+	} else {
+		send_linger_map_check(lreq);
+	}
+}
+
+static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
+{
+	struct ceph_osd_client *osdc = &greq->monc->client->osdc;
+	struct ceph_osd_linger_request *lreq;
+	u64 linger_id = greq->private_data;
+
+	WARN_ON(greq->result || !greq->u.newest);
+
+	down_write(&osdc->lock);
+	lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
+	if (!lreq) {
+		dout("%s linger_id %llu dne\n", __func__, linger_id);
+		goto out_unlock;
+	}
+
+	dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
+	     __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
+	     greq->u.newest);
+	if (!lreq->map_dne_bound)
+		lreq->map_dne_bound = greq->u.newest;
+	erase_linger_mc(&osdc->linger_map_checks, lreq);
+	check_linger_pool_dne(lreq);
+
+	linger_put(lreq);
+out_unlock:
+	up_write(&osdc->lock);
+}
+
+static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
+{
+	struct ceph_osd_client *osdc = lreq->osdc;
+	struct ceph_osd_linger_request *lookup_lreq;
+	int ret;
+
+	verify_osdc_wrlocked(osdc);
+
+	lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
+				       lreq->linger_id);
+	if (lookup_lreq) {
+		WARN_ON(lookup_lreq != lreq);
+		return;
+	}
+
+	linger_get(lreq);
+	insert_linger_mc(&osdc->linger_map_checks, lreq);
+	ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
+					  linger_map_check_cb, lreq->linger_id);
+	WARN_ON(ret);
+}
+
+static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
+{
+	int ret;
+
+	dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
+	ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
+	return ret ?: lreq->reg_commit_error;
+}
+
+static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
+{
+	int ret;
+
+	dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
+	ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
+	return ret ?: lreq->notify_finish_error;
+}
+
+/*
+ * Timeout callback, called every N seconds.  When 1 or more OSD
+ * requests has been active for more than N seconds, we send a keepalive
+ * (tag + timestamp) to its OSD to ensure any communications channel
+ * reset is detected.
  */
 static void handle_timeout(struct work_struct *work)
 {
 	struct ceph_osd_client *osdc =
 		container_of(work, struct ceph_osd_client, timeout_work.work);
 	struct ceph_options *opts = osdc->client->options;
-	struct ceph_osd_request *req;
-	struct ceph_osd *osd;
-	struct list_head slow_osds;
-	dout("timeout\n");
-	down_read(&osdc->map_sem);
+	unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
+	LIST_HEAD(slow_osds);
+	struct rb_node *n, *p;
 
-	ceph_monc_request_next_osdmap(&osdc->client->monc);
-
-	mutex_lock(&osdc->request_mutex);
+	dout("%s osdc %p\n", __func__, osdc);
+	down_write(&osdc->lock);
 
 	/*
 	 * ping osds that are a bit slow.  this ensures that if there
 	 * is a break in the TCP connection we will notice, and reopen
 	 * a connection with that osd (from the fault callback).
 	 */
-	INIT_LIST_HEAD(&slow_osds);
-	list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
-		if (time_before(jiffies,
-				req->r_stamp + opts->osd_keepalive_timeout))
-			break;
+	for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
+		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
+		bool found = false;
 
-		osd = req->r_osd;
-		BUG_ON(!osd);
-		dout(" tid %llu is slow, will send keepalive on osd%d\n",
-		     req->r_tid, osd->o_osd);
-		list_move_tail(&osd->o_keepalive_item, &slow_osds);
+		for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
+			struct ceph_osd_request *req =
+			    rb_entry(p, struct ceph_osd_request, r_node);
+
+			if (time_before(req->r_stamp, cutoff)) {
+				dout(" req %p tid %llu on osd%d is laggy\n",
+				     req, req->r_tid, osd->o_osd);
+				found = true;
+			}
+		}
+		for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
+			struct ceph_osd_linger_request *lreq =
+			    rb_entry(p, struct ceph_osd_linger_request, node);
+
+			dout(" lreq %p linger_id %llu is served by osd%d\n",
+			     lreq, lreq->linger_id, osd->o_osd);
+			found = true;
+
+			mutex_lock(&lreq->lock);
+			if (lreq->is_watch && lreq->committed && !lreq->last_error)
+				send_linger_ping(lreq);
+			mutex_unlock(&lreq->lock);
+		}
+
+		if (found)
+			list_move_tail(&osd->o_keepalive_item, &slow_osds);
 	}
+
+	if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
+		maybe_request_map(osdc);
+
 	while (!list_empty(&slow_osds)) {
-		osd = list_entry(slow_osds.next, struct ceph_osd,
-				 o_keepalive_item);
+		struct ceph_osd *osd = list_first_entry(&slow_osds,
+							struct ceph_osd,
+							o_keepalive_item);
 		list_del_init(&osd->o_keepalive_item);
 		ceph_con_keepalive(&osd->o_con);
 	}
 
-	__schedule_osd_timeout(osdc);
-	__send_queued(osdc);
-	mutex_unlock(&osdc->request_mutex);
-	up_read(&osdc->map_sem);
+	up_write(&osdc->lock);
+	schedule_delayed_work(&osdc->timeout_work,
+			      osdc->client->options->osd_keepalive_timeout);
 }
 
 static void handle_osds_timeout(struct work_struct *work)
@@ -1663,12 +2541,20 @@
 		container_of(work, struct ceph_osd_client,
 			     osds_timeout_work.work);
 	unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
+	struct ceph_osd *osd, *nosd;
 
-	dout("osds timeout\n");
-	down_read(&osdc->map_sem);
-	remove_old_osds(osdc);
-	up_read(&osdc->map_sem);
+	dout("%s osdc %p\n", __func__, osdc);
+	down_write(&osdc->lock);
+	list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
+		if (time_before(jiffies, osd->lru_ttl))
+			break;
 
+		WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
+		WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
+		close_osd(osd);
+	}
+
+	up_write(&osdc->lock);
 	schedule_delayed_work(&osdc->osds_timeout_work,
 			      round_jiffies_relative(delay));
 }
@@ -1776,107 +2662,76 @@
 	goto out;
 }
 
-static void complete_request(struct ceph_osd_request *req)
-{
-	complete_all(&req->r_safe_completion);  /* fsync waiter */
-}
+struct MOSDOpReply {
+	struct ceph_pg pgid;
+	u64 flags;
+	int result;
+	u32 epoch;
+	int num_ops;
+	u32 outdata_len[CEPH_OSD_MAX_OPS];
+	s32 rval[CEPH_OSD_MAX_OPS];
+	int retry_attempt;
+	struct ceph_eversion replay_version;
+	u64 user_version;
+	struct ceph_request_redirect redirect;
+};
 
-/*
- * handle osd op reply.  either call the callback if it is specified,
- * or do the completion to wake up the waiting thread.
- */
-static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
+static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
 {
-	void *p, *end;
-	struct ceph_osd_request *req;
-	struct ceph_request_redirect redir;
-	u64 tid;
-	int object_len;
-	unsigned int numops;
-	int payload_len, flags;
-	s32 result;
-	s32 retry_attempt;
-	struct ceph_pg pg;
-	int err;
-	u32 reassert_epoch;
-	u64 reassert_version;
-	u32 osdmap_epoch;
-	int already_completed;
-	u32 bytes;
+	void *p = msg->front.iov_base;
+	void *const end = p + msg->front.iov_len;
+	u16 version = le16_to_cpu(msg->hdr.version);
+	struct ceph_eversion bad_replay_version;
 	u8 decode_redir;
-	unsigned int i;
+	u32 len;
+	int ret;
+	int i;
 
-	tid = le64_to_cpu(msg->hdr.tid);
-	dout("handle_reply %p tid %llu\n", msg, tid);
+	ceph_decode_32_safe(&p, end, len, e_inval);
+	ceph_decode_need(&p, end, len, e_inval);
+	p += len; /* skip oid */
 
-	p = msg->front.iov_base;
-	end = p + msg->front.iov_len;
+	ret = ceph_decode_pgid(&p, end, &m->pgid);
+	if (ret)
+		return ret;
 
-	ceph_decode_need(&p, end, 4, bad);
-	object_len = ceph_decode_32(&p);
-	ceph_decode_need(&p, end, object_len, bad);
-	p += object_len;
+	ceph_decode_64_safe(&p, end, m->flags, e_inval);
+	ceph_decode_32_safe(&p, end, m->result, e_inval);
+	ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
+	memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
+	p += sizeof(bad_replay_version);
+	ceph_decode_32_safe(&p, end, m->epoch, e_inval);
 
-	err = ceph_decode_pgid(&p, end, &pg);
-	if (err)
-		goto bad;
+	ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
+	if (m->num_ops > ARRAY_SIZE(m->outdata_len))
+		goto e_inval;
 
-	ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad);
-	flags = ceph_decode_64(&p);
-	result = ceph_decode_32(&p);
-	reassert_epoch = ceph_decode_32(&p);
-	reassert_version = ceph_decode_64(&p);
-	osdmap_epoch = ceph_decode_32(&p);
-
-	/* lookup */
-	down_read(&osdc->map_sem);
-	mutex_lock(&osdc->request_mutex);
-	req = __lookup_request(osdc, tid);
-	if (req == NULL) {
-		dout("handle_reply tid %llu dne\n", tid);
-		goto bad_mutex;
-	}
-	ceph_osdc_get_request(req);
-
-	dout("handle_reply %p tid %llu req %p result %d\n", msg, tid,
-	     req, result);
-
-	ceph_decode_need(&p, end, 4, bad_put);
-	numops = ceph_decode_32(&p);
-	if (numops > CEPH_OSD_MAX_OPS)
-		goto bad_put;
-	if (numops != req->r_num_ops)
-		goto bad_put;
-	payload_len = 0;
-	ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad_put);
-	for (i = 0; i < numops; i++) {
+	ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
+			 e_inval);
+	for (i = 0; i < m->num_ops; i++) {
 		struct ceph_osd_op *op = p;
-		int len;
 
-		len = le32_to_cpu(op->payload_len);
-		req->r_ops[i].outdata_len = len;
-		dout(" op %d has %d bytes\n", i, len);
-		payload_len += len;
+		m->outdata_len[i] = le32_to_cpu(op->payload_len);
 		p += sizeof(*op);
 	}
-	bytes = le32_to_cpu(msg->hdr.data_len);
-	if (payload_len != bytes) {
-		pr_warn("sum of op payload lens %d != data_len %d\n",
-			payload_len, bytes);
-		goto bad_put;
+
+	ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
+	for (i = 0; i < m->num_ops; i++)
+		ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
+
+	if (version >= 5) {
+		ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
+		memcpy(&m->replay_version, p, sizeof(m->replay_version));
+		p += sizeof(m->replay_version);
+		ceph_decode_64_safe(&p, end, m->user_version, e_inval);
+	} else {
+		m->replay_version = bad_replay_version; /* struct */
+		m->user_version = le64_to_cpu(m->replay_version.version);
 	}
 
-	ceph_decode_need(&p, end, 4 + numops * 4, bad_put);
-	retry_attempt = ceph_decode_32(&p);
-	for (i = 0; i < numops; i++)
-		req->r_ops[i].rval = ceph_decode_32(&p);
-
-	if (le16_to_cpu(msg->hdr.version) >= 6) {
-		p += 8 + 4; /* skip replay_version */
-		p += 8; /* skip user_version */
-
-		if (le16_to_cpu(msg->hdr.version) >= 7)
-			ceph_decode_8_safe(&p, end, decode_redir, bad_put);
+	if (version >= 6) {
+		if (version >= 7)
+			ceph_decode_8_safe(&p, end, decode_redir, e_inval);
 		else
 			decode_redir = 1;
 	} else {
@@ -1884,227 +2739,409 @@
 	}
 
 	if (decode_redir) {
-		err = ceph_redirect_decode(&p, end, &redir);
-		if (err)
-			goto bad_put;
+		ret = ceph_redirect_decode(&p, end, &m->redirect);
+		if (ret)
+			return ret;
 	} else {
-		redir.oloc.pool = -1;
+		ceph_oloc_init(&m->redirect.oloc);
 	}
 
-	if (redir.oloc.pool != -1) {
-		dout("redirect pool %lld\n", redir.oloc.pool);
+	return 0;
 
-		__unregister_request(osdc, req);
-
-		req->r_target_oloc = redir.oloc; /* struct */
-
-		/*
-		 * Start redirect requests with nofail=true.  If
-		 * mapping fails, request will end up on the notarget
-		 * list, waiting for the new osdmap (which can take
-		 * a while), even though the original request mapped
-		 * successfully.  In the future we might want to follow
-		 * original request's nofail setting here.
-		 */
-		err = __ceph_osdc_start_request(osdc, req, true);
-		BUG_ON(err);
-
-		goto out_unlock;
-	}
-
-	already_completed = req->r_got_reply;
-	if (!req->r_got_reply) {
-		req->r_result = result;
-		dout("handle_reply result %d bytes %d\n", req->r_result,
-		     bytes);
-		if (req->r_result == 0)
-			req->r_result = bytes;
-
-		/* in case this is a write and we need to replay, */
-		req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch);
-		req->r_reassert_version.version = cpu_to_le64(reassert_version);
-
-		req->r_got_reply = 1;
-	} else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
-		dout("handle_reply tid %llu dup ack\n", tid);
-		goto out_unlock;
-	}
-
-	dout("handle_reply tid %llu flags %d\n", tid, flags);
-
-	if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
-		__register_linger_request(osdc, req);
-
-	/* either this is a read, or we got the safe response */
-	if (result < 0 ||
-	    (flags & CEPH_OSD_FLAG_ONDISK) ||
-	    ((flags & CEPH_OSD_FLAG_WRITE) == 0))
-		__unregister_request(osdc, req);
-
-	mutex_unlock(&osdc->request_mutex);
-	up_read(&osdc->map_sem);
-
-	if (!already_completed) {
-		if (req->r_unsafe_callback &&
-		    result >= 0 && !(flags & CEPH_OSD_FLAG_ONDISK))
-			req->r_unsafe_callback(req, true);
-		if (req->r_callback)
-			req->r_callback(req, msg);
-		else
-			complete_all(&req->r_completion);
-	}
-
-	if (flags & CEPH_OSD_FLAG_ONDISK) {
-		if (req->r_unsafe_callback && already_completed)
-			req->r_unsafe_callback(req, false);
-		complete_request(req);
-	}
-
-out:
-	dout("req=%p req->r_linger=%d\n", req, req->r_linger);
-	ceph_osdc_put_request(req);
-	return;
-out_unlock:
-	mutex_unlock(&osdc->request_mutex);
-	up_read(&osdc->map_sem);
-	goto out;
-
-bad_put:
-	req->r_result = -EIO;
-	__unregister_request(osdc, req);
-	if (req->r_callback)
-		req->r_callback(req, msg);
-	else
-		complete_all(&req->r_completion);
-	complete_request(req);
-	ceph_osdc_put_request(req);
-bad_mutex:
-	mutex_unlock(&osdc->request_mutex);
-	up_read(&osdc->map_sem);
-bad:
-	pr_err("corrupt osd_op_reply got %d %d\n",
-	       (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
-	ceph_msg_dump(msg);
-}
-
-static void reset_changed_osds(struct ceph_osd_client *osdc)
-{
-	struct rb_node *p, *n;
-
-	dout("%s %p\n", __func__, osdc);
-	for (p = rb_first(&osdc->osds); p; p = n) {
-		struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
-
-		n = rb_next(p);
-		if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
-		    memcmp(&osd->o_con.peer_addr,
-			   ceph_osd_addr(osdc->osdmap,
-					 osd->o_osd),
-			   sizeof(struct ceph_entity_addr)) != 0)
-			__reset_osd(osdc, osd);
-	}
+e_inval:
+	return -EINVAL;
 }
 
 /*
- * Requeue requests whose mapping to an OSD has changed.  If requests map to
- * no osd, request a new map.
- *
- * Caller should hold map_sem for read.
+ * We are done with @req if
+ *   - @m is a safe reply, or
+ *   - @m is an unsafe reply and we didn't want a safe one
  */
-static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
-			  bool force_resend_writes)
+static bool done_request(const struct ceph_osd_request *req,
+			 const struct MOSDOpReply *m)
 {
-	struct ceph_osd_request *req, *nreq;
-	struct rb_node *p;
-	int needmap = 0;
-	int err;
-	bool force_resend_req;
+	return (m->result < 0 ||
+		(m->flags & CEPH_OSD_FLAG_ONDISK) ||
+		!(req->r_flags & CEPH_OSD_FLAG_ONDISK));
+}
 
-	dout("kick_requests %s %s\n", force_resend ? " (force resend)" : "",
-		force_resend_writes ? " (force resend writes)" : "");
-	mutex_lock(&osdc->request_mutex);
-	for (p = rb_first(&osdc->requests); p; ) {
-		req = rb_entry(p, struct ceph_osd_request, r_node);
-		p = rb_next(p);
+/*
+ * handle osd op reply.  either call the callback if it is specified,
+ * or do the completion to wake up the waiting thread.
+ *
+ * ->r_unsafe_callback is set?	yes			no
+ *
+ * first reply is OK (needed	r_cb/r_completion,	r_cb/r_completion,
+ * any or needed/got safe)	r_safe_completion	r_safe_completion
+ *
+ * first reply is unsafe	r_unsafe_cb(true)	(nothing)
+ *
+ * when we get the safe reply	r_unsafe_cb(false),	r_cb/r_completion,
+ *				r_safe_completion	r_safe_completion
+ */
+static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
+{
+	struct ceph_osd_client *osdc = osd->o_osdc;
+	struct ceph_osd_request *req;
+	struct MOSDOpReply m;
+	u64 tid = le64_to_cpu(msg->hdr.tid);
+	u32 data_len = 0;
+	bool already_acked;
+	int ret;
+	int i;
 
-		/*
-		 * For linger requests that have not yet been
-		 * registered, move them to the linger list; they'll
-		 * be sent to the osd in the loop below.  Unregister
-		 * the request before re-registering it as a linger
-		 * request to ensure the __map_request() below
-		 * will decide it needs to be sent.
-		 */
-		if (req->r_linger && list_empty(&req->r_linger_item)) {
-			dout("%p tid %llu restart on osd%d\n",
-			     req, req->r_tid,
-			     req->r_osd ? req->r_osd->o_osd : -1);
-			ceph_osdc_get_request(req);
-			__unregister_request(osdc, req);
-			__register_linger_request(osdc, req);
-			ceph_osdc_put_request(req);
-			continue;
+	dout("%s msg %p tid %llu\n", __func__, msg, tid);
+
+	down_read(&osdc->lock);
+	if (!osd_registered(osd)) {
+		dout("%s osd%d unknown\n", __func__, osd->o_osd);
+		goto out_unlock_osdc;
+	}
+	WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
+
+	mutex_lock(&osd->lock);
+	req = lookup_request(&osd->o_requests, tid);
+	if (!req) {
+		dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
+		goto out_unlock_session;
+	}
+
+	ret = decode_MOSDOpReply(msg, &m);
+	if (ret) {
+		pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
+		       req->r_tid, ret);
+		ceph_msg_dump(msg);
+		goto fail_request;
+	}
+	dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
+	     __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
+	     m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
+	     le64_to_cpu(m.replay_version.version), m.user_version);
+
+	if (m.retry_attempt >= 0) {
+		if (m.retry_attempt != req->r_attempts - 1) {
+			dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
+			     req, req->r_tid, m.retry_attempt,
+			     req->r_attempts - 1);
+			goto out_unlock_session;
 		}
+	} else {
+		WARN_ON(1); /* MOSDOpReply v4 is assumed */
+	}
 
-		force_resend_req = force_resend ||
-			(force_resend_writes &&
-				req->r_flags & CEPH_OSD_FLAG_WRITE);
-		err = __map_request(osdc, req, force_resend_req);
-		if (err < 0)
-			continue;  /* error */
-		if (req->r_osd == NULL) {
-			dout("%p tid %llu maps to no osd\n", req, req->r_tid);
-			needmap++;  /* request a newer map */
-		} else if (err > 0) {
-			if (!req->r_linger) {
-				dout("%p tid %llu requeued on osd%d\n", req,
-				     req->r_tid,
-				     req->r_osd ? req->r_osd->o_osd : -1);
-				req->r_flags |= CEPH_OSD_FLAG_RETRY;
-			}
+	if (!ceph_oloc_empty(&m.redirect.oloc)) {
+		dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
+		     m.redirect.oloc.pool);
+		unlink_request(osd, req);
+		mutex_unlock(&osd->lock);
+
+		ceph_oloc_copy(&req->r_t.target_oloc, &m.redirect.oloc);
+		req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
+		req->r_tid = 0;
+		__submit_request(req, false);
+		goto out_unlock_osdc;
+	}
+
+	if (m.num_ops != req->r_num_ops) {
+		pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
+		       req->r_num_ops, req->r_tid);
+		goto fail_request;
+	}
+	for (i = 0; i < req->r_num_ops; i++) {
+		dout(" req %p tid %llu op %d rval %d len %u\n", req,
+		     req->r_tid, i, m.rval[i], m.outdata_len[i]);
+		req->r_ops[i].rval = m.rval[i];
+		req->r_ops[i].outdata_len = m.outdata_len[i];
+		data_len += m.outdata_len[i];
+	}
+	if (data_len != le32_to_cpu(msg->hdr.data_len)) {
+		pr_err("sum of lens %u != %u for tid %llu\n", data_len,
+		       le32_to_cpu(msg->hdr.data_len), req->r_tid);
+		goto fail_request;
+	}
+	dout("%s req %p tid %llu acked %d result %d data_len %u\n", __func__,
+	     req, req->r_tid, req->r_got_reply, m.result, data_len);
+
+	already_acked = req->r_got_reply;
+	if (!already_acked) {
+		req->r_result = m.result ?: data_len;
+		req->r_replay_version = m.replay_version; /* struct */
+		req->r_got_reply = true;
+	} else if (!(m.flags & CEPH_OSD_FLAG_ONDISK)) {
+		dout("req %p tid %llu dup ack\n", req, req->r_tid);
+		goto out_unlock_session;
+	}
+
+	if (done_request(req, &m)) {
+		__finish_request(req);
+		if (req->r_linger) {
+			WARN_ON(req->r_unsafe_callback);
+			dout("req %p tid %llu cb (locked)\n", req, req->r_tid);
+			__complete_request(req);
 		}
 	}
 
-	list_for_each_entry_safe(req, nreq, &osdc->req_linger,
-				 r_linger_item) {
-		dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
+	mutex_unlock(&osd->lock);
+	up_read(&osdc->lock);
 
-		err = __map_request(osdc, req,
-				    force_resend || force_resend_writes);
-		dout("__map_request returned %d\n", err);
-		if (err < 0)
-			continue;  /* hrm! */
-		if (req->r_osd == NULL || err > 0) {
-			if (req->r_osd == NULL) {
-				dout("lingering %p tid %llu maps to no osd\n",
-				     req, req->r_tid);
-				/*
-				 * A homeless lingering request makes
-				 * no sense, as it's job is to keep
-				 * a particular OSD connection open.
-				 * Request a newer map and kick the
-				 * request, knowing that it won't be
-				 * resent until we actually get a map
-				 * that can tell us where to send it.
-				 */
-				needmap++;
-			}
-
-			dout("kicking lingering %p tid %llu osd%d\n", req,
-			     req->r_tid, req->r_osd ? req->r_osd->o_osd : -1);
-			__register_request(osdc, req);
-			__unregister_linger_request(osdc, req);
+	if (done_request(req, &m)) {
+		if (already_acked && req->r_unsafe_callback) {
+			dout("req %p tid %llu safe-cb\n", req, req->r_tid);
+			req->r_unsafe_callback(req, false);
+		} else if (!req->r_linger) {
+			dout("req %p tid %llu cb\n", req, req->r_tid);
+			__complete_request(req);
+		}
+		if (m.flags & CEPH_OSD_FLAG_ONDISK)
+			complete_all(&req->r_safe_completion);
+		ceph_osdc_put_request(req);
+	} else {
+		if (req->r_unsafe_callback) {
+			dout("req %p tid %llu unsafe-cb\n", req, req->r_tid);
+			req->r_unsafe_callback(req, true);
+		} else {
+			WARN_ON(1);
 		}
 	}
-	reset_changed_osds(osdc);
-	mutex_unlock(&osdc->request_mutex);
 
-	if (needmap) {
-		dout("%d requests for down osds, need new map\n", needmap);
-		ceph_monc_request_next_osdmap(&osdc->client->monc);
+	return;
+
+fail_request:
+	complete_request(req, -EIO);
+out_unlock_session:
+	mutex_unlock(&osd->lock);
+out_unlock_osdc:
+	up_read(&osdc->lock);
+}
+
+static void set_pool_was_full(struct ceph_osd_client *osdc)
+{
+	struct rb_node *n;
+
+	for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
+		struct ceph_pg_pool_info *pi =
+		    rb_entry(n, struct ceph_pg_pool_info, node);
+
+		pi->was_full = __pool_full(pi);
 	}
 }
 
+static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
+{
+	struct ceph_pg_pool_info *pi;
+
+	pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
+	if (!pi)
+		return false;
+
+	return pi->was_full && !__pool_full(pi);
+}
+
+static enum calc_target_result
+recalc_linger_target(struct ceph_osd_linger_request *lreq)
+{
+	struct ceph_osd_client *osdc = lreq->osdc;
+	enum calc_target_result ct_res;
+
+	ct_res = calc_target(osdc, &lreq->t, &lreq->last_force_resend, true);
+	if (ct_res == CALC_TARGET_NEED_RESEND) {
+		struct ceph_osd *osd;
+
+		osd = lookup_create_osd(osdc, lreq->t.osd, true);
+		if (osd != lreq->osd) {
+			unlink_linger(lreq->osd, lreq);
+			link_linger(osd, lreq);
+		}
+	}
+
+	return ct_res;
+}
+
+/*
+ * Requeue requests whose mapping to an OSD has changed.
+ */
+static void scan_requests(struct ceph_osd *osd,
+			  bool force_resend,
+			  bool cleared_full,
+			  bool check_pool_cleared_full,
+			  struct rb_root *need_resend,
+			  struct list_head *need_resend_linger)
+{
+	struct ceph_osd_client *osdc = osd->o_osdc;
+	struct rb_node *n;
+	bool force_resend_writes;
+
+	for (n = rb_first(&osd->o_linger_requests); n; ) {
+		struct ceph_osd_linger_request *lreq =
+		    rb_entry(n, struct ceph_osd_linger_request, node);
+		enum calc_target_result ct_res;
+
+		n = rb_next(n); /* recalc_linger_target() */
+
+		dout("%s lreq %p linger_id %llu\n", __func__, lreq,
+		     lreq->linger_id);
+		ct_res = recalc_linger_target(lreq);
+		switch (ct_res) {
+		case CALC_TARGET_NO_ACTION:
+			force_resend_writes = cleared_full ||
+			    (check_pool_cleared_full &&
+			     pool_cleared_full(osdc, lreq->t.base_oloc.pool));
+			if (!force_resend && !force_resend_writes)
+				break;
+
+			/* fall through */
+		case CALC_TARGET_NEED_RESEND:
+			cancel_linger_map_check(lreq);
+			/*
+			 * scan_requests() for the previous epoch(s)
+			 * may have already added it to the list, since
+			 * it's not unlinked here.
+			 */
+			if (list_empty(&lreq->scan_item))
+				list_add_tail(&lreq->scan_item, need_resend_linger);
+			break;
+		case CALC_TARGET_POOL_DNE:
+			check_linger_pool_dne(lreq);
+			break;
+		}
+	}
+
+	for (n = rb_first(&osd->o_requests); n; ) {
+		struct ceph_osd_request *req =
+		    rb_entry(n, struct ceph_osd_request, r_node);
+		enum calc_target_result ct_res;
+
+		n = rb_next(n); /* unlink_request(), check_pool_dne() */
+
+		dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
+		ct_res = calc_target(osdc, &req->r_t,
+				     &req->r_last_force_resend, false);
+		switch (ct_res) {
+		case CALC_TARGET_NO_ACTION:
+			force_resend_writes = cleared_full ||
+			    (check_pool_cleared_full &&
+			     pool_cleared_full(osdc, req->r_t.base_oloc.pool));
+			if (!force_resend &&
+			    (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
+			     !force_resend_writes))
+				break;
+
+			/* fall through */
+		case CALC_TARGET_NEED_RESEND:
+			cancel_map_check(req);
+			unlink_request(osd, req);
+			insert_request(need_resend, req);
+			break;
+		case CALC_TARGET_POOL_DNE:
+			check_pool_dne(req);
+			break;
+		}
+	}
+}
+
+static int handle_one_map(struct ceph_osd_client *osdc,
+			  void *p, void *end, bool incremental,
+			  struct rb_root *need_resend,
+			  struct list_head *need_resend_linger)
+{
+	struct ceph_osdmap *newmap;
+	struct rb_node *n;
+	bool skipped_map = false;
+	bool was_full;
+
+	was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
+	set_pool_was_full(osdc);
+
+	if (incremental)
+		newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
+	else
+		newmap = ceph_osdmap_decode(&p, end);
+	if (IS_ERR(newmap))
+		return PTR_ERR(newmap);
+
+	if (newmap != osdc->osdmap) {
+		/*
+		 * Preserve ->was_full before destroying the old map.
+		 * For pools that weren't in the old map, ->was_full
+		 * should be false.
+		 */
+		for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
+			struct ceph_pg_pool_info *pi =
+			    rb_entry(n, struct ceph_pg_pool_info, node);
+			struct ceph_pg_pool_info *old_pi;
+
+			old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
+			if (old_pi)
+				pi->was_full = old_pi->was_full;
+			else
+				WARN_ON(pi->was_full);
+		}
+
+		if (osdc->osdmap->epoch &&
+		    osdc->osdmap->epoch + 1 < newmap->epoch) {
+			WARN_ON(incremental);
+			skipped_map = true;
+		}
+
+		ceph_osdmap_destroy(osdc->osdmap);
+		osdc->osdmap = newmap;
+	}
+
+	was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
+	scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
+		      need_resend, need_resend_linger);
+
+	for (n = rb_first(&osdc->osds); n; ) {
+		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
+
+		n = rb_next(n); /* close_osd() */
+
+		scan_requests(osd, skipped_map, was_full, true, need_resend,
+			      need_resend_linger);
+		if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
+		    memcmp(&osd->o_con.peer_addr,
+			   ceph_osd_addr(osdc->osdmap, osd->o_osd),
+			   sizeof(struct ceph_entity_addr)))
+			close_osd(osd);
+	}
+
+	return 0;
+}
+
+static void kick_requests(struct ceph_osd_client *osdc,
+			  struct rb_root *need_resend,
+			  struct list_head *need_resend_linger)
+{
+	struct ceph_osd_linger_request *lreq, *nlreq;
+	struct rb_node *n;
+
+	for (n = rb_first(need_resend); n; ) {
+		struct ceph_osd_request *req =
+		    rb_entry(n, struct ceph_osd_request, r_node);
+		struct ceph_osd *osd;
+
+		n = rb_next(n);
+		erase_request(need_resend, req); /* before link_request() */
+
+		WARN_ON(req->r_osd);
+		calc_target(osdc, &req->r_t, NULL, false);
+		osd = lookup_create_osd(osdc, req->r_t.osd, true);
+		link_request(osd, req);
+		if (!req->r_linger) {
+			if (!osd_homeless(osd) && !req->r_t.paused)
+				send_request(req);
+		} else {
+			cancel_linger_request(req);
+		}
+	}
+
+	list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
+		if (!osd_homeless(lreq->osd))
+			send_linger(lreq);
+
+		list_del_init(&lreq->scan_item);
+	}
+}
 
 /*
  * Process updated osd map.
@@ -2115,27 +3152,31 @@
  */
 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
 {
-	void *p, *end, *next;
+	void *p = msg->front.iov_base;
+	void *const end = p + msg->front.iov_len;
 	u32 nr_maps, maplen;
 	u32 epoch;
-	struct ceph_osdmap *newmap = NULL, *oldmap;
-	int err;
 	struct ceph_fsid fsid;
-	bool was_full;
+	struct rb_root need_resend = RB_ROOT;
+	LIST_HEAD(need_resend_linger);
+	bool handled_incremental = false;
+	bool was_pauserd, was_pausewr;
+	bool pauserd, pausewr;
+	int err;
 
-	dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
-	p = msg->front.iov_base;
-	end = p + msg->front.iov_len;
+	dout("%s have %u\n", __func__, osdc->osdmap->epoch);
+	down_write(&osdc->lock);
 
 	/* verify fsid */
 	ceph_decode_need(&p, end, sizeof(fsid), bad);
 	ceph_decode_copy(&p, &fsid, sizeof(fsid));
 	if (ceph_check_fsid(osdc->client, &fsid) < 0)
-		return;
+		goto bad;
 
-	down_write(&osdc->map_sem);
-
-	was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
+	was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
+	was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
+		      ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
+		      have_pool_full(osdc);
 
 	/* incremental maps */
 	ceph_decode_32_safe(&p, end, nr_maps, bad);
@@ -2145,34 +3186,23 @@
 		epoch = ceph_decode_32(&p);
 		maplen = ceph_decode_32(&p);
 		ceph_decode_need(&p, end, maplen, bad);
-		next = p + maplen;
-		if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
+		if (osdc->osdmap->epoch &&
+		    osdc->osdmap->epoch + 1 == epoch) {
 			dout("applying incremental map %u len %d\n",
 			     epoch, maplen);
-			newmap = osdmap_apply_incremental(&p, next,
-							  osdc->osdmap,
-							  &osdc->client->msgr);
-			if (IS_ERR(newmap)) {
-				err = PTR_ERR(newmap);
+			err = handle_one_map(osdc, p, p + maplen, true,
+					     &need_resend, &need_resend_linger);
+			if (err)
 				goto bad;
-			}
-			BUG_ON(!newmap);
-			if (newmap != osdc->osdmap) {
-				ceph_osdmap_destroy(osdc->osdmap);
-				osdc->osdmap = newmap;
-			}
-			was_full = was_full ||
-				ceph_osdmap_flag(osdc->osdmap,
-						 CEPH_OSDMAP_FULL);
-			kick_requests(osdc, 0, was_full);
+			handled_incremental = true;
 		} else {
 			dout("ignoring incremental map %u len %d\n",
 			     epoch, maplen);
 		}
-		p = next;
+		p += maplen;
 		nr_maps--;
 	}
-	if (newmap)
+	if (handled_incremental)
 		goto done;
 
 	/* full maps */
@@ -2186,205 +3216,99 @@
 		if (nr_maps > 1) {
 			dout("skipping non-latest full map %u len %d\n",
 			     epoch, maplen);
-		} else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
+		} else if (osdc->osdmap->epoch >= epoch) {
 			dout("skipping full map %u len %d, "
 			     "older than our %u\n", epoch, maplen,
 			     osdc->osdmap->epoch);
 		} else {
-			int skipped_map = 0;
-
 			dout("taking full map %u len %d\n", epoch, maplen);
-			newmap = ceph_osdmap_decode(&p, p+maplen);
-			if (IS_ERR(newmap)) {
-				err = PTR_ERR(newmap);
+			err = handle_one_map(osdc, p, p + maplen, false,
+					     &need_resend, &need_resend_linger);
+			if (err)
 				goto bad;
-			}
-			BUG_ON(!newmap);
-			oldmap = osdc->osdmap;
-			osdc->osdmap = newmap;
-			if (oldmap) {
-				if (oldmap->epoch + 1 < newmap->epoch)
-					skipped_map = 1;
-				ceph_osdmap_destroy(oldmap);
-			}
-			was_full = was_full ||
-				ceph_osdmap_flag(osdc->osdmap,
-						 CEPH_OSDMAP_FULL);
-			kick_requests(osdc, skipped_map, was_full);
 		}
 		p += maplen;
 		nr_maps--;
 	}
 
-	if (!osdc->osdmap)
-		goto bad;
 done:
-	downgrade_write(&osdc->map_sem);
-	ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
-			  osdc->osdmap->epoch);
-
 	/*
 	 * subscribe to subsequent osdmap updates if full to ensure
 	 * we find out when we are no longer full and stop returning
 	 * ENOSPC.
 	 */
-	if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
-		ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) ||
-		ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR))
-		ceph_monc_request_next_osdmap(&osdc->client->monc);
+	pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
+	pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
+		  ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
+		  have_pool_full(osdc);
+	if (was_pauserd || was_pausewr || pauserd || pausewr)
+		maybe_request_map(osdc);
 
-	mutex_lock(&osdc->request_mutex);
-	__send_queued(osdc);
-	mutex_unlock(&osdc->request_mutex);
-	up_read(&osdc->map_sem);
+	kick_requests(osdc, &need_resend, &need_resend_linger);
+
+	ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
+			  osdc->osdmap->epoch);
+	up_write(&osdc->lock);
 	wake_up_all(&osdc->client->auth_wq);
 	return;
 
 bad:
 	pr_err("osdc handle_map corrupt msg\n");
 	ceph_msg_dump(msg);
-	up_write(&osdc->map_sem);
+	up_write(&osdc->lock);
 }
 
 /*
- * watch/notify callback event infrastructure
- *
- * These callbacks are used both for watch and notify operations.
+ * Resubmit requests pending on the given osd.
  */
-static void __release_event(struct kref *kref)
+static void kick_osd_requests(struct ceph_osd *osd)
 {
-	struct ceph_osd_event *event =
-		container_of(kref, struct ceph_osd_event, kref);
+	struct rb_node *n;
 
-	dout("__release_event %p\n", event);
-	kfree(event);
-}
+	for (n = rb_first(&osd->o_requests); n; ) {
+		struct ceph_osd_request *req =
+		    rb_entry(n, struct ceph_osd_request, r_node);
 
-static void get_event(struct ceph_osd_event *event)
-{
-	kref_get(&event->kref);
-}
+		n = rb_next(n); /* cancel_linger_request() */
 
-void ceph_osdc_put_event(struct ceph_osd_event *event)
-{
-	kref_put(&event->kref, __release_event);
-}
-EXPORT_SYMBOL(ceph_osdc_put_event);
-
-static void __insert_event(struct ceph_osd_client *osdc,
-			     struct ceph_osd_event *new)
-{
-	struct rb_node **p = &osdc->event_tree.rb_node;
-	struct rb_node *parent = NULL;
-	struct ceph_osd_event *event = NULL;
-
-	while (*p) {
-		parent = *p;
-		event = rb_entry(parent, struct ceph_osd_event, node);
-		if (new->cookie < event->cookie)
-			p = &(*p)->rb_left;
-		else if (new->cookie > event->cookie)
-			p = &(*p)->rb_right;
-		else
-			BUG();
+		if (!req->r_linger) {
+			if (!req->r_t.paused)
+				send_request(req);
+		} else {
+			cancel_linger_request(req);
+		}
 	}
+	for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
+		struct ceph_osd_linger_request *lreq =
+		    rb_entry(n, struct ceph_osd_linger_request, node);
 
-	rb_link_node(&new->node, parent, p);
-	rb_insert_color(&new->node, &osdc->event_tree);
-}
-
-static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
-					        u64 cookie)
-{
-	struct rb_node **p = &osdc->event_tree.rb_node;
-	struct rb_node *parent = NULL;
-	struct ceph_osd_event *event = NULL;
-
-	while (*p) {
-		parent = *p;
-		event = rb_entry(parent, struct ceph_osd_event, node);
-		if (cookie < event->cookie)
-			p = &(*p)->rb_left;
-		else if (cookie > event->cookie)
-			p = &(*p)->rb_right;
-		else
-			return event;
-	}
-	return NULL;
-}
-
-static void __remove_event(struct ceph_osd_event *event)
-{
-	struct ceph_osd_client *osdc = event->osdc;
-
-	if (!RB_EMPTY_NODE(&event->node)) {
-		dout("__remove_event removed %p\n", event);
-		rb_erase(&event->node, &osdc->event_tree);
-		ceph_osdc_put_event(event);
-	} else {
-		dout("__remove_event didn't remove %p\n", event);
+		send_linger(lreq);
 	}
 }
 
-int ceph_osdc_create_event(struct ceph_osd_client *osdc,
-			   void (*event_cb)(u64, u64, u8, void *),
-			   void *data, struct ceph_osd_event **pevent)
+/*
+ * If the osd connection drops, we need to resubmit all requests.
+ */
+static void osd_fault(struct ceph_connection *con)
 {
-	struct ceph_osd_event *event;
+	struct ceph_osd *osd = con->private;
+	struct ceph_osd_client *osdc = osd->o_osdc;
 
-	event = kmalloc(sizeof(*event), GFP_NOIO);
-	if (!event)
-		return -ENOMEM;
+	dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
 
-	dout("create_event %p\n", event);
-	event->cb = event_cb;
-	event->one_shot = 0;
-	event->data = data;
-	event->osdc = osdc;
-	INIT_LIST_HEAD(&event->osd_node);
-	RB_CLEAR_NODE(&event->node);
-	kref_init(&event->kref);   /* one ref for us */
-	kref_get(&event->kref);    /* one ref for the caller */
+	down_write(&osdc->lock);
+	if (!osd_registered(osd)) {
+		dout("%s osd%d unknown\n", __func__, osd->o_osd);
+		goto out_unlock;
+	}
 
-	spin_lock(&osdc->event_lock);
-	event->cookie = ++osdc->event_count;
-	__insert_event(osdc, event);
-	spin_unlock(&osdc->event_lock);
+	if (!reopen_osd(osd))
+		kick_osd_requests(osd);
+	maybe_request_map(osdc);
 
-	*pevent = event;
-	return 0;
+out_unlock:
+	up_write(&osdc->lock);
 }
-EXPORT_SYMBOL(ceph_osdc_create_event);
-
-void ceph_osdc_cancel_event(struct ceph_osd_event *event)
-{
-	struct ceph_osd_client *osdc = event->osdc;
-
-	dout("cancel_event %p\n", event);
-	spin_lock(&osdc->event_lock);
-	__remove_event(event);
-	spin_unlock(&osdc->event_lock);
-	ceph_osdc_put_event(event); /* caller's */
-}
-EXPORT_SYMBOL(ceph_osdc_cancel_event);
-
-
-static void do_event_work(struct work_struct *work)
-{
-	struct ceph_osd_event_work *event_work =
-		container_of(work, struct ceph_osd_event_work, work);
-	struct ceph_osd_event *event = event_work->event;
-	u64 ver = event_work->ver;
-	u64 notify_id = event_work->notify_id;
-	u8 opcode = event_work->opcode;
-
-	dout("do_event_work completing %p\n", event);
-	event->cb(ver, notify_id, opcode, event->data);
-	dout("do_event_work completed %p\n", event);
-	ceph_osdc_put_event(event);
-	kfree(event_work);
-}
-
 
 /*
  * Process osd watch notifications
@@ -2392,47 +3316,97 @@
 static void handle_watch_notify(struct ceph_osd_client *osdc,
 				struct ceph_msg *msg)
 {
-	void *p, *end;
-	u8 proto_ver;
-	u64 cookie, ver, notify_id;
-	u8 opcode;
-	struct ceph_osd_event *event;
-	struct ceph_osd_event_work *event_work;
-
-	p = msg->front.iov_base;
-	end = p + msg->front.iov_len;
+	void *p = msg->front.iov_base;
+	void *const end = p + msg->front.iov_len;
+	struct ceph_osd_linger_request *lreq;
+	struct linger_work *lwork;
+	u8 proto_ver, opcode;
+	u64 cookie, notify_id;
+	u64 notifier_id = 0;
+	s32 return_code = 0;
+	void *payload = NULL;
+	u32 payload_len = 0;
 
 	ceph_decode_8_safe(&p, end, proto_ver, bad);
 	ceph_decode_8_safe(&p, end, opcode, bad);
 	ceph_decode_64_safe(&p, end, cookie, bad);
-	ceph_decode_64_safe(&p, end, ver, bad);
+	p += 8; /* skip ver */
 	ceph_decode_64_safe(&p, end, notify_id, bad);
 
-	spin_lock(&osdc->event_lock);
-	event = __find_event(osdc, cookie);
-	if (event) {
-		BUG_ON(event->one_shot);
-		get_event(event);
+	if (proto_ver >= 1) {
+		ceph_decode_32_safe(&p, end, payload_len, bad);
+		ceph_decode_need(&p, end, payload_len, bad);
+		payload = p;
+		p += payload_len;
 	}
-	spin_unlock(&osdc->event_lock);
-	dout("handle_watch_notify cookie %lld ver %lld event %p\n",
-	     cookie, ver, event);
-	if (event) {
-		event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
-		if (!event_work) {
-			pr_err("couldn't allocate event_work\n");
-			ceph_osdc_put_event(event);
-			return;
+
+	if (le16_to_cpu(msg->hdr.version) >= 2)
+		ceph_decode_32_safe(&p, end, return_code, bad);
+
+	if (le16_to_cpu(msg->hdr.version) >= 3)
+		ceph_decode_64_safe(&p, end, notifier_id, bad);
+
+	down_read(&osdc->lock);
+	lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
+	if (!lreq) {
+		dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
+		     cookie);
+		goto out_unlock_osdc;
+	}
+
+	mutex_lock(&lreq->lock);
+	dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
+	     opcode, cookie, lreq, lreq->is_watch);
+	if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
+		if (!lreq->last_error) {
+			lreq->last_error = -ENOTCONN;
+			queue_watch_error(lreq);
 		}
-		INIT_WORK(&event_work->work, do_event_work);
-		event_work->event = event;
-		event_work->ver = ver;
-		event_work->notify_id = notify_id;
-		event_work->opcode = opcode;
+	} else if (!lreq->is_watch) {
+		/* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
+		if (lreq->notify_id && lreq->notify_id != notify_id) {
+			dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
+			     lreq->notify_id, notify_id);
+		} else if (!completion_done(&lreq->notify_finish_wait)) {
+			struct ceph_msg_data *data =
+			    list_first_entry_or_null(&msg->data,
+						     struct ceph_msg_data,
+						     links);
 
-		queue_work(osdc->notify_wq, &event_work->work);
+			if (data) {
+				if (lreq->preply_pages) {
+					WARN_ON(data->type !=
+							CEPH_MSG_DATA_PAGES);
+					*lreq->preply_pages = data->pages;
+					*lreq->preply_len = data->length;
+				} else {
+					ceph_release_page_vector(data->pages,
+					       calc_pages_for(0, data->length));
+				}
+			}
+			lreq->notify_finish_error = return_code;
+			complete_all(&lreq->notify_finish_wait);
+		}
+	} else {
+		/* CEPH_WATCH_EVENT_NOTIFY */
+		lwork = lwork_alloc(lreq, do_watch_notify);
+		if (!lwork) {
+			pr_err("failed to allocate notify-lwork\n");
+			goto out_unlock_lreq;
+		}
+
+		lwork->notify.notify_id = notify_id;
+		lwork->notify.notifier_id = notifier_id;
+		lwork->notify.payload = payload;
+		lwork->notify.payload_len = payload_len;
+		lwork->notify.msg = ceph_msg_get(msg);
+		lwork_queue(lwork);
 	}
 
+out_unlock_lreq:
+	mutex_unlock(&lreq->lock);
+out_unlock_osdc:
+	up_read(&osdc->lock);
 	return;
 
 bad:
@@ -2440,122 +3414,17 @@
 }
 
 /*
- * build new request AND message
- *
- */
-void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off,
-				struct ceph_snap_context *snapc, u64 snap_id,
-				struct timespec *mtime)
-{
-	struct ceph_msg *msg = req->r_request;
-	void *p;
-	size_t msg_size;
-	int flags = req->r_flags;
-	u64 data_len;
-	unsigned int i;
-
-	req->r_snapid = snap_id;
-	req->r_snapc = ceph_get_snap_context(snapc);
-
-	/* encode request */
-	msg->hdr.version = cpu_to_le16(4);
-
-	p = msg->front.iov_base;
-	ceph_encode_32(&p, 1);   /* client_inc  is always 1 */
-	req->r_request_osdmap_epoch = p;
-	p += 4;
-	req->r_request_flags = p;
-	p += 4;
-	if (req->r_flags & CEPH_OSD_FLAG_WRITE)
-		ceph_encode_timespec(p, mtime);
-	p += sizeof(struct ceph_timespec);
-	req->r_request_reassert_version = p;
-	p += sizeof(struct ceph_eversion); /* will get filled in */
-
-	/* oloc */
-	ceph_encode_8(&p, 4);
-	ceph_encode_8(&p, 4);
-	ceph_encode_32(&p, 8 + 4 + 4);
-	req->r_request_pool = p;
-	p += 8;
-	ceph_encode_32(&p, -1);  /* preferred */
-	ceph_encode_32(&p, 0);   /* key len */
-
-	ceph_encode_8(&p, 1);
-	req->r_request_pgid = p;
-	p += 8 + 4;
-	ceph_encode_32(&p, -1);  /* preferred */
-
-	/* oid */
-	ceph_encode_32(&p, req->r_base_oid.name_len);
-	memcpy(p, req->r_base_oid.name, req->r_base_oid.name_len);
-	dout("oid '%.*s' len %d\n", req->r_base_oid.name_len,
-	     req->r_base_oid.name, req->r_base_oid.name_len);
-	p += req->r_base_oid.name_len;
-
-	/* ops--can imply data */
-	ceph_encode_16(&p, (u16)req->r_num_ops);
-	data_len = 0;
-	for (i = 0; i < req->r_num_ops; i++) {
-		data_len += osd_req_encode_op(req, p, i);
-		p += sizeof(struct ceph_osd_op);
-	}
-
-	/* snaps */
-	ceph_encode_64(&p, req->r_snapid);
-	ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0);
-	ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0);
-	if (req->r_snapc) {
-		for (i = 0; i < snapc->num_snaps; i++) {
-			ceph_encode_64(&p, req->r_snapc->snaps[i]);
-		}
-	}
-
-	req->r_request_attempts = p;
-	p += 4;
-
-	/* data */
-	if (flags & CEPH_OSD_FLAG_WRITE) {
-		u16 data_off;
-
-		/*
-		 * The header "data_off" is a hint to the receiver
-		 * allowing it to align received data into its
-		 * buffers such that there's no need to re-copy
-		 * it before writing it to disk (direct I/O).
-		 */
-		data_off = (u16) (off & 0xffff);
-		req->r_request->hdr.data_off = cpu_to_le16(data_off);
-	}
-	req->r_request->hdr.data_len = cpu_to_le32(data_len);
-
-	BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
-	msg_size = p - msg->front.iov_base;
-	msg->front.iov_len = msg_size;
-	msg->hdr.front_len = cpu_to_le32(msg_size);
-
-	dout("build_request msg_size was %d\n", (int)msg_size);
-}
-EXPORT_SYMBOL(ceph_osdc_build_request);
-
-/*
  * Register request, send initial attempt.
  */
 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
 			    struct ceph_osd_request *req,
 			    bool nofail)
 {
-	int rc;
+	down_read(&osdc->lock);
+	submit_request(req, false);
+	up_read(&osdc->lock);
 
-	down_read(&osdc->map_sem);
-	mutex_lock(&osdc->request_mutex);
-
-	rc = __ceph_osdc_start_request(osdc, req, nofail);
-
-	mutex_unlock(&osdc->request_mutex);
-	up_read(&osdc->map_sem);
-
-	return rc;
+	return 0;
 }
 EXPORT_SYMBOL(ceph_osdc_start_request);
 
@@ -2568,37 +3437,44 @@
 {
 	struct ceph_osd_client *osdc = req->r_osdc;
 
-	mutex_lock(&osdc->request_mutex);
-	if (req->r_linger)
-		__unregister_linger_request(osdc, req);
-	__unregister_request(osdc, req);
-	mutex_unlock(&osdc->request_mutex);
-
-	dout("%s %p tid %llu canceled\n", __func__, req, req->r_tid);
+	down_write(&osdc->lock);
+	if (req->r_osd)
+		cancel_request(req);
+	up_write(&osdc->lock);
 }
 EXPORT_SYMBOL(ceph_osdc_cancel_request);
 
 /*
+ * @timeout: in jiffies, 0 means "wait forever"
+ */
+static int wait_request_timeout(struct ceph_osd_request *req,
+				unsigned long timeout)
+{
+	long left;
+
+	dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
+	left = wait_for_completion_killable_timeout(&req->r_completion,
+						ceph_timeout_jiffies(timeout));
+	if (left <= 0) {
+		left = left ?: -ETIMEDOUT;
+		ceph_osdc_cancel_request(req);
+
+		/* kludge - need to to wake ceph_osdc_sync() */
+		complete_all(&req->r_safe_completion);
+	} else {
+		left = req->r_result; /* completed */
+	}
+
+	return left;
+}
+
+/*
  * wait for a request to complete
  */
 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
 			   struct ceph_osd_request *req)
 {
-	int rc;
-
-	dout("%s %p tid %llu\n", __func__, req, req->r_tid);
-
-	rc = wait_for_completion_interruptible(&req->r_completion);
-	if (rc < 0) {
-		dout("%s %p tid %llu interrupted\n", __func__, req, req->r_tid);
-		ceph_osdc_cancel_request(req);
-		complete_request(req);
-		return rc;
-	}
-
-	dout("%s %p tid %llu result %d\n", __func__, req, req->r_tid,
-	     req->r_result);
-	return req->r_result;
+	return wait_request_timeout(req, 0);
 }
 EXPORT_SYMBOL(ceph_osdc_wait_request);
 
@@ -2607,35 +3483,381 @@
  */
 void ceph_osdc_sync(struct ceph_osd_client *osdc)
 {
-	struct ceph_osd_request *req;
-	u64 last_tid, next_tid = 0;
+	struct rb_node *n, *p;
+	u64 last_tid = atomic64_read(&osdc->last_tid);
 
-	mutex_lock(&osdc->request_mutex);
-	last_tid = osdc->last_tid;
-	while (1) {
-		req = __lookup_request_ge(osdc, next_tid);
-		if (!req)
-			break;
-		if (req->r_tid > last_tid)
-			break;
+again:
+	down_read(&osdc->lock);
+	for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
+		struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
 
-		next_tid = req->r_tid + 1;
-		if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
-			continue;
+		mutex_lock(&osd->lock);
+		for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
+			struct ceph_osd_request *req =
+			    rb_entry(p, struct ceph_osd_request, r_node);
 
-		ceph_osdc_get_request(req);
-		mutex_unlock(&osdc->request_mutex);
-		dout("sync waiting on tid %llu (last is %llu)\n",
-		     req->r_tid, last_tid);
-		wait_for_completion(&req->r_safe_completion);
-		mutex_lock(&osdc->request_mutex);
-		ceph_osdc_put_request(req);
+			if (req->r_tid > last_tid)
+				break;
+
+			if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
+				continue;
+
+			ceph_osdc_get_request(req);
+			mutex_unlock(&osd->lock);
+			up_read(&osdc->lock);
+			dout("%s waiting on req %p tid %llu last_tid %llu\n",
+			     __func__, req, req->r_tid, last_tid);
+			wait_for_completion(&req->r_safe_completion);
+			ceph_osdc_put_request(req);
+			goto again;
+		}
+
+		mutex_unlock(&osd->lock);
 	}
-	mutex_unlock(&osdc->request_mutex);
-	dout("sync done (thru tid %llu)\n", last_tid);
+
+	up_read(&osdc->lock);
+	dout("%s done last_tid %llu\n", __func__, last_tid);
 }
 EXPORT_SYMBOL(ceph_osdc_sync);
 
+static struct ceph_osd_request *
+alloc_linger_request(struct ceph_osd_linger_request *lreq)
+{
+	struct ceph_osd_request *req;
+
+	req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
+	if (!req)
+		return NULL;
+
+	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
+	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
+
+	if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
+		ceph_osdc_put_request(req);
+		return NULL;
+	}
+
+	return req;
+}
+
+/*
+ * Returns a handle, caller owns a ref.
+ */
+struct ceph_osd_linger_request *
+ceph_osdc_watch(struct ceph_osd_client *osdc,
+		struct ceph_object_id *oid,
+		struct ceph_object_locator *oloc,
+		rados_watchcb2_t wcb,
+		rados_watcherrcb_t errcb,
+		void *data)
+{
+	struct ceph_osd_linger_request *lreq;
+	int ret;
+
+	lreq = linger_alloc(osdc);
+	if (!lreq)
+		return ERR_PTR(-ENOMEM);
+
+	lreq->is_watch = true;
+	lreq->wcb = wcb;
+	lreq->errcb = errcb;
+	lreq->data = data;
+	lreq->watch_valid_thru = jiffies;
+
+	ceph_oid_copy(&lreq->t.base_oid, oid);
+	ceph_oloc_copy(&lreq->t.base_oloc, oloc);
+	lreq->t.flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
+	lreq->mtime = CURRENT_TIME;
+
+	lreq->reg_req = alloc_linger_request(lreq);
+	if (!lreq->reg_req) {
+		ret = -ENOMEM;
+		goto err_put_lreq;
+	}
+
+	lreq->ping_req = alloc_linger_request(lreq);
+	if (!lreq->ping_req) {
+		ret = -ENOMEM;
+		goto err_put_lreq;
+	}
+
+	down_write(&osdc->lock);
+	linger_register(lreq); /* before osd_req_op_* */
+	osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
+			      CEPH_OSD_WATCH_OP_WATCH);
+	osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
+			      CEPH_OSD_WATCH_OP_PING);
+	linger_submit(lreq);
+	up_write(&osdc->lock);
+
+	ret = linger_reg_commit_wait(lreq);
+	if (ret) {
+		linger_cancel(lreq);
+		goto err_put_lreq;
+	}
+
+	return lreq;
+
+err_put_lreq:
+	linger_put(lreq);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(ceph_osdc_watch);
+
+/*
+ * Releases a ref.
+ *
+ * Times out after mount_timeout to preserve rbd unmap behaviour
+ * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
+ * with mount_timeout").
+ */
+int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
+		      struct ceph_osd_linger_request *lreq)
+{
+	struct ceph_options *opts = osdc->client->options;
+	struct ceph_osd_request *req;
+	int ret;
+
+	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
+	if (!req)
+		return -ENOMEM;
+
+	ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
+	ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
+	req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
+	req->r_mtime = CURRENT_TIME;
+	osd_req_op_watch_init(req, 0, lreq->linger_id,
+			      CEPH_OSD_WATCH_OP_UNWATCH);
+
+	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
+	if (ret)
+		goto out_put_req;
+
+	ceph_osdc_start_request(osdc, req, false);
+	linger_cancel(lreq);
+	linger_put(lreq);
+	ret = wait_request_timeout(req, opts->mount_timeout);
+
+out_put_req:
+	ceph_osdc_put_request(req);
+	return ret;
+}
+EXPORT_SYMBOL(ceph_osdc_unwatch);
+
+static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
+				      u64 notify_id, u64 cookie, void *payload,
+				      size_t payload_len)
+{
+	struct ceph_osd_req_op *op;
+	struct ceph_pagelist *pl;
+	int ret;
+
+	op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
+
+	pl = kmalloc(sizeof(*pl), GFP_NOIO);
+	if (!pl)
+		return -ENOMEM;
+
+	ceph_pagelist_init(pl);
+	ret = ceph_pagelist_encode_64(pl, notify_id);
+	ret |= ceph_pagelist_encode_64(pl, cookie);
+	if (payload) {
+		ret |= ceph_pagelist_encode_32(pl, payload_len);
+		ret |= ceph_pagelist_append(pl, payload, payload_len);
+	} else {
+		ret |= ceph_pagelist_encode_32(pl, 0);
+	}
+	if (ret) {
+		ceph_pagelist_release(pl);
+		return -ENOMEM;
+	}
+
+	ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
+	op->indata_len = pl->length;
+	return 0;
+}
+
+int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
+			 struct ceph_object_id *oid,
+			 struct ceph_object_locator *oloc,
+			 u64 notify_id,
+			 u64 cookie,
+			 void *payload,
+			 size_t payload_len)
+{
+	struct ceph_osd_request *req;
+	int ret;
+
+	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
+	if (!req)
+		return -ENOMEM;
+
+	ceph_oid_copy(&req->r_base_oid, oid);
+	ceph_oloc_copy(&req->r_base_oloc, oloc);
+	req->r_flags = CEPH_OSD_FLAG_READ;
+
+	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
+	if (ret)
+		goto out_put_req;
+
+	ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
+					 payload_len);
+	if (ret)
+		goto out_put_req;
+
+	ceph_osdc_start_request(osdc, req, false);
+	ret = ceph_osdc_wait_request(osdc, req);
+
+out_put_req:
+	ceph_osdc_put_request(req);
+	return ret;
+}
+EXPORT_SYMBOL(ceph_osdc_notify_ack);
+
+static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
+				  u64 cookie, u32 prot_ver, u32 timeout,
+				  void *payload, size_t payload_len)
+{
+	struct ceph_osd_req_op *op;
+	struct ceph_pagelist *pl;
+	int ret;
+
+	op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
+	op->notify.cookie = cookie;
+
+	pl = kmalloc(sizeof(*pl), GFP_NOIO);
+	if (!pl)
+		return -ENOMEM;
+
+	ceph_pagelist_init(pl);
+	ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
+	ret |= ceph_pagelist_encode_32(pl, timeout);
+	ret |= ceph_pagelist_encode_32(pl, payload_len);
+	ret |= ceph_pagelist_append(pl, payload, payload_len);
+	if (ret) {
+		ceph_pagelist_release(pl);
+		return -ENOMEM;
+	}
+
+	ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
+	op->indata_len = pl->length;
+	return 0;
+}
+
+/*
+ * @timeout: in seconds
+ *
+ * @preply_{pages,len} are initialized both on success and error.
+ * The caller is responsible for:
+ *
+ *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
+ */
+int ceph_osdc_notify(struct ceph_osd_client *osdc,
+		     struct ceph_object_id *oid,
+		     struct ceph_object_locator *oloc,
+		     void *payload,
+		     size_t payload_len,
+		     u32 timeout,
+		     struct page ***preply_pages,
+		     size_t *preply_len)
+{
+	struct ceph_osd_linger_request *lreq;
+	struct page **pages;
+	int ret;
+
+	WARN_ON(!timeout);
+	if (preply_pages) {
+		*preply_pages = NULL;
+		*preply_len = 0;
+	}
+
+	lreq = linger_alloc(osdc);
+	if (!lreq)
+		return -ENOMEM;
+
+	lreq->preply_pages = preply_pages;
+	lreq->preply_len = preply_len;
+
+	ceph_oid_copy(&lreq->t.base_oid, oid);
+	ceph_oloc_copy(&lreq->t.base_oloc, oloc);
+	lreq->t.flags = CEPH_OSD_FLAG_READ;
+
+	lreq->reg_req = alloc_linger_request(lreq);
+	if (!lreq->reg_req) {
+		ret = -ENOMEM;
+		goto out_put_lreq;
+	}
+
+	/* for notify_id */
+	pages = ceph_alloc_page_vector(1, GFP_NOIO);
+	if (IS_ERR(pages)) {
+		ret = PTR_ERR(pages);
+		goto out_put_lreq;
+	}
+
+	down_write(&osdc->lock);
+	linger_register(lreq); /* before osd_req_op_* */
+	ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
+				     timeout, payload, payload_len);
+	if (ret) {
+		linger_unregister(lreq);
+		up_write(&osdc->lock);
+		ceph_release_page_vector(pages, 1);
+		goto out_put_lreq;
+	}
+	ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
+						 response_data),
+				 pages, PAGE_SIZE, 0, false, true);
+	linger_submit(lreq);
+	up_write(&osdc->lock);
+
+	ret = linger_reg_commit_wait(lreq);
+	if (!ret)
+		ret = linger_notify_finish_wait(lreq);
+	else
+		dout("lreq %p failed to initiate notify %d\n", lreq, ret);
+
+	linger_cancel(lreq);
+out_put_lreq:
+	linger_put(lreq);
+	return ret;
+}
+EXPORT_SYMBOL(ceph_osdc_notify);
+
+/*
+ * Return the number of milliseconds since the watch was last
+ * confirmed, or an error.  If there is an error, the watch is no
+ * longer valid, and should be destroyed with ceph_osdc_unwatch().
+ */
+int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
+			  struct ceph_osd_linger_request *lreq)
+{
+	unsigned long stamp, age;
+	int ret;
+
+	down_read(&osdc->lock);
+	mutex_lock(&lreq->lock);
+	stamp = lreq->watch_valid_thru;
+	if (!list_empty(&lreq->pending_lworks)) {
+		struct linger_work *lwork =
+		    list_first_entry(&lreq->pending_lworks,
+				     struct linger_work,
+				     pending_item);
+
+		if (time_before(lwork->queued_stamp, stamp))
+			stamp = lwork->queued_stamp;
+	}
+	age = jiffies - stamp;
+	dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
+	     lreq, lreq->linger_id, age, lreq->last_error);
+	/* we are truncating to msecs, so return a safe upper bound */
+	ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
+
+	mutex_unlock(&lreq->lock);
+	up_read(&osdc->lock);
+	return ret;
+}
+
 /*
  * Call all pending notify callbacks - for use after a watch is
  * unregistered, to make sure no more callbacks for it will be invoked
@@ -2646,6 +3868,13 @@
 }
 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
 
+void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
+{
+	down_read(&osdc->lock);
+	maybe_request_map(osdc);
+	up_read(&osdc->lock);
+}
+EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
 
 /*
  * init, shutdown
@@ -2656,43 +3885,35 @@
 
 	dout("init\n");
 	osdc->client = client;
-	osdc->osdmap = NULL;
-	init_rwsem(&osdc->map_sem);
-	init_completion(&osdc->map_waiters);
-	osdc->last_requested_map = 0;
-	mutex_init(&osdc->request_mutex);
-	osdc->last_tid = 0;
+	init_rwsem(&osdc->lock);
 	osdc->osds = RB_ROOT;
 	INIT_LIST_HEAD(&osdc->osd_lru);
-	osdc->requests = RB_ROOT;
-	INIT_LIST_HEAD(&osdc->req_lru);
-	INIT_LIST_HEAD(&osdc->req_unsent);
-	INIT_LIST_HEAD(&osdc->req_notarget);
-	INIT_LIST_HEAD(&osdc->req_linger);
-	osdc->num_requests = 0;
+	spin_lock_init(&osdc->osd_lru_lock);
+	osd_init(&osdc->homeless_osd);
+	osdc->homeless_osd.o_osdc = osdc;
+	osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
+	osdc->linger_requests = RB_ROOT;
+	osdc->map_checks = RB_ROOT;
+	osdc->linger_map_checks = RB_ROOT;
 	INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
 	INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
-	spin_lock_init(&osdc->event_lock);
-	osdc->event_tree = RB_ROOT;
-	osdc->event_count = 0;
-
-	schedule_delayed_work(&osdc->osds_timeout_work,
-	    round_jiffies_relative(osdc->client->options->osd_idle_ttl));
 
 	err = -ENOMEM;
+	osdc->osdmap = ceph_osdmap_alloc();
+	if (!osdc->osdmap)
+		goto out;
+
 	osdc->req_mempool = mempool_create_slab_pool(10,
 						     ceph_osd_request_cache);
 	if (!osdc->req_mempool)
-		goto out;
+		goto out_map;
 
 	err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
-				OSD_OP_FRONT_LEN, 10, true,
-				"osd_op");
+				PAGE_SIZE, 10, true, "osd_op");
 	if (err < 0)
 		goto out_mempool;
 	err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
-				OSD_OPREPLY_FRONT_LEN, 10, true,
-				"osd_op_reply");
+				PAGE_SIZE, 10, true, "osd_op_reply");
 	if (err < 0)
 		goto out_msgpool;
 
@@ -2701,6 +3922,11 @@
 	if (!osdc->notify_wq)
 		goto out_msgpool_reply;
 
+	schedule_delayed_work(&osdc->timeout_work,
+			      osdc->client->options->osd_keepalive_timeout);
+	schedule_delayed_work(&osdc->osds_timeout_work,
+	    round_jiffies_relative(osdc->client->options->osd_idle_ttl));
+
 	return 0;
 
 out_msgpool_reply:
@@ -2709,6 +3935,8 @@
 	ceph_msgpool_destroy(&osdc->msgpool_op);
 out_mempool:
 	mempool_destroy(osdc->req_mempool);
+out_map:
+	ceph_osdmap_destroy(osdc->osdmap);
 out:
 	return err;
 }
@@ -2719,11 +3947,25 @@
 	destroy_workqueue(osdc->notify_wq);
 	cancel_delayed_work_sync(&osdc->timeout_work);
 	cancel_delayed_work_sync(&osdc->osds_timeout_work);
-	if (osdc->osdmap) {
-		ceph_osdmap_destroy(osdc->osdmap);
-		osdc->osdmap = NULL;
+
+	down_write(&osdc->lock);
+	while (!RB_EMPTY_ROOT(&osdc->osds)) {
+		struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
+						struct ceph_osd, o_node);
+		close_osd(osd);
 	}
-	remove_all_osds(osdc);
+	up_write(&osdc->lock);
+	WARN_ON(atomic_read(&osdc->homeless_osd.o_ref) != 1);
+	osd_cleanup(&osdc->homeless_osd);
+
+	WARN_ON(!list_empty(&osdc->osd_lru));
+	WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
+	WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
+	WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
+	WARN_ON(atomic_read(&osdc->num_requests));
+	WARN_ON(atomic_read(&osdc->num_homeless));
+
+	ceph_osdmap_destroy(osdc->osdmap);
 	mempool_destroy(osdc->req_mempool);
 	ceph_msgpool_destroy(&osdc->msgpool_op);
 	ceph_msgpool_destroy(&osdc->msgpool_op_reply);
@@ -2752,15 +3994,12 @@
 		return PTR_ERR(req);
 
 	/* it may be a short read due to an object boundary */
-
 	osd_req_op_extent_osd_data_pages(req, 0,
 				pages, *plen, page_align, false, false);
 
 	dout("readpages  final extent is %llu~%llu (%llu bytes align %d)\n",
 	     off, *plen, *plen, page_align);
 
-	ceph_osdc_build_request(req, off, NULL, vino.snap, NULL);
-
 	rc = ceph_osdc_start_request(osdc, req, false);
 	if (!rc)
 		rc = ceph_osdc_wait_request(osdc, req);
@@ -2786,7 +4025,6 @@
 	int rc = 0;
 	int page_align = off & ~PAGE_MASK;
 
-	BUG_ON(vino.snap != CEPH_NOSNAP);	/* snapshots aren't writeable */
 	req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
 				    CEPH_OSD_OP_WRITE,
 				    CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
@@ -2800,8 +4038,7 @@
 				false, false);
 	dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
 
-	ceph_osdc_build_request(req, off, snapc, CEPH_NOSNAP, mtime);
-
+	req->r_mtime = *mtime;
 	rc = ceph_osdc_start_request(osdc, req, true);
 	if (!rc)
 		rc = ceph_osdc_wait_request(osdc, req);
@@ -2841,19 +4078,15 @@
 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
 {
 	struct ceph_osd *osd = con->private;
-	struct ceph_osd_client *osdc;
+	struct ceph_osd_client *osdc = osd->o_osdc;
 	int type = le16_to_cpu(msg->hdr.type);
 
-	if (!osd)
-		goto out;
-	osdc = osd->o_osdc;
-
 	switch (type) {
 	case CEPH_MSG_OSD_MAP:
 		ceph_osdc_handle_map(osdc, msg);
 		break;
 	case CEPH_MSG_OSD_OPREPLY:
-		handle_reply(osdc, msg);
+		handle_reply(osd, msg);
 		break;
 	case CEPH_MSG_WATCH_NOTIFY:
 		handle_watch_notify(osdc, msg);
@@ -2863,7 +4096,7 @@
 		pr_err("received unknown message type %d %s\n", type,
 		       ceph_msg_type_name(type));
 	}
-out:
+
 	ceph_msg_put(msg);
 }
 
@@ -2878,21 +4111,27 @@
 {
 	struct ceph_osd *osd = con->private;
 	struct ceph_osd_client *osdc = osd->o_osdc;
-	struct ceph_msg *m;
+	struct ceph_msg *m = NULL;
 	struct ceph_osd_request *req;
 	int front_len = le32_to_cpu(hdr->front_len);
 	int data_len = le32_to_cpu(hdr->data_len);
-	u64 tid;
+	u64 tid = le64_to_cpu(hdr->tid);
 
-	tid = le64_to_cpu(hdr->tid);
-	mutex_lock(&osdc->request_mutex);
-	req = __lookup_request(osdc, tid);
+	down_read(&osdc->lock);
+	if (!osd_registered(osd)) {
+		dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
+		*skip = 1;
+		goto out_unlock_osdc;
+	}
+	WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
+
+	mutex_lock(&osd->lock);
+	req = lookup_request(&osd->o_requests, tid);
 	if (!req) {
 		dout("%s osd%d tid %llu unknown, skipping\n", __func__,
 		     osd->o_osd, tid);
-		m = NULL;
 		*skip = 1;
-		goto out;
+		goto out_unlock_session;
 	}
 
 	ceph_msg_revoke_incoming(req->r_reply);
@@ -2904,7 +4143,7 @@
 		m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
 				 false);
 		if (!m)
-			goto out;
+			goto out_unlock_session;
 		ceph_msg_put(req->r_reply);
 		req->r_reply = m;
 	}
@@ -2915,14 +4154,49 @@
 			req->r_reply->data_length);
 		m = NULL;
 		*skip = 1;
-		goto out;
+		goto out_unlock_session;
 	}
 
 	m = ceph_msg_get(req->r_reply);
 	dout("get_reply tid %lld %p\n", tid, m);
 
-out:
-	mutex_unlock(&osdc->request_mutex);
+out_unlock_session:
+	mutex_unlock(&osd->lock);
+out_unlock_osdc:
+	up_read(&osdc->lock);
+	return m;
+}
+
+/*
+ * TODO: switch to a msg-owned pagelist
+ */
+static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
+{
+	struct ceph_msg *m;
+	int type = le16_to_cpu(hdr->type);
+	u32 front_len = le32_to_cpu(hdr->front_len);
+	u32 data_len = le32_to_cpu(hdr->data_len);
+
+	m = ceph_msg_new(type, front_len, GFP_NOIO, false);
+	if (!m)
+		return NULL;
+
+	if (data_len) {
+		struct page **pages;
+		struct ceph_osd_data osd_data;
+
+		pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
+					       GFP_NOIO);
+		if (!pages) {
+			ceph_msg_put(m);
+			return NULL;
+		}
+
+		ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
+					 false);
+		ceph_osdc_msg_data_add(m, &osd_data);
+	}
+
 	return m;
 }
 
@@ -2932,18 +4206,17 @@
 {
 	struct ceph_osd *osd = con->private;
 	int type = le16_to_cpu(hdr->type);
-	int front = le32_to_cpu(hdr->front_len);
 
 	*skip = 0;
 	switch (type) {
 	case CEPH_MSG_OSD_MAP:
 	case CEPH_MSG_WATCH_NOTIFY:
-		return ceph_msg_new(type, front, GFP_NOFS, false);
+		return alloc_msg_with_page_vector(hdr);
 	case CEPH_MSG_OSD_OPREPLY:
 		return get_reply(con, hdr, skip);
 	default:
-		pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
-			osd->o_osd);
+		pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
+			osd->o_osd, type);
 		*skip = 1;
 		return NULL;
 	}
@@ -3047,5 +4320,5 @@
 	.alloc_msg = alloc_msg,
 	.sign_message = osd_sign_message,
 	.check_message_signature = osd_check_message_signature,
-	.fault = osd_reset,
+	.fault = osd_fault,
 };
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 243574c..03062bb 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -380,23 +380,24 @@
 	return ERR_PTR(err);
 }
 
+int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs)
+{
+	if (lhs->pool < rhs->pool)
+		return -1;
+	if (lhs->pool > rhs->pool)
+		return 1;
+	if (lhs->seed < rhs->seed)
+		return -1;
+	if (lhs->seed > rhs->seed)
+		return 1;
+
+	return 0;
+}
+
 /*
  * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
  * to a set of osds) and primary_temp (explicit primary setting)
  */
-static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
-{
-	if (l.pool < r.pool)
-		return -1;
-	if (l.pool > r.pool)
-		return 1;
-	if (l.seed < r.seed)
-		return -1;
-	if (l.seed > r.seed)
-		return 1;
-	return 0;
-}
-
 static int __insert_pg_mapping(struct ceph_pg_mapping *new,
 			       struct rb_root *root)
 {
@@ -409,7 +410,7 @@
 	while (*p) {
 		parent = *p;
 		pg = rb_entry(parent, struct ceph_pg_mapping, node);
-		c = pgid_cmp(new->pgid, pg->pgid);
+		c = ceph_pg_compare(&new->pgid, &pg->pgid);
 		if (c < 0)
 			p = &(*p)->rb_left;
 		else if (c > 0)
@@ -432,7 +433,7 @@
 
 	while (n) {
 		pg = rb_entry(n, struct ceph_pg_mapping, node);
-		c = pgid_cmp(pgid, pg->pgid);
+		c = ceph_pg_compare(&pgid, &pg->pgid);
 		if (c < 0) {
 			n = n->rb_left;
 		} else if (c > 0) {
@@ -596,7 +597,9 @@
 	*p += 4;  /* skip crash_replay_interval */
 
 	if (ev >= 7)
-		*p += 1;  /* skip min_size */
+		pi->min_size = ceph_decode_8(p);
+	else
+		pi->min_size = pi->size - pi->size / 2;
 
 	if (ev >= 8)
 		*p += 8 + 8;  /* skip quota_max_* */
@@ -616,6 +619,50 @@
 		pi->write_tier = -1;
 	}
 
+	if (ev >= 10) {
+		/* skip properties */
+		num = ceph_decode_32(p);
+		while (num--) {
+			len = ceph_decode_32(p);
+			*p += len; /* key */
+			len = ceph_decode_32(p);
+			*p += len; /* val */
+		}
+	}
+
+	if (ev >= 11) {
+		/* skip hit_set_params */
+		*p += 1 + 1; /* versions */
+		len = ceph_decode_32(p);
+		*p += len;
+
+		*p += 4; /* skip hit_set_period */
+		*p += 4; /* skip hit_set_count */
+	}
+
+	if (ev >= 12)
+		*p += 4; /* skip stripe_width */
+
+	if (ev >= 13) {
+		*p += 8; /* skip target_max_bytes */
+		*p += 8; /* skip target_max_objects */
+		*p += 4; /* skip cache_target_dirty_ratio_micro */
+		*p += 4; /* skip cache_target_full_ratio_micro */
+		*p += 4; /* skip cache_min_flush_age */
+		*p += 4; /* skip cache_min_evict_age */
+	}
+
+	if (ev >=  14) {
+		/* skip erasure_code_profile */
+		len = ceph_decode_32(p);
+		*p += len;
+	}
+
+	if (ev >= 15)
+		pi->last_force_request_resend = ceph_decode_32(p);
+	else
+		pi->last_force_request_resend = 0;
+
 	/* ignore the rest */
 
 	*p = pool_end;
@@ -660,6 +707,23 @@
 /*
  * osd map
  */
+struct ceph_osdmap *ceph_osdmap_alloc(void)
+{
+	struct ceph_osdmap *map;
+
+	map = kzalloc(sizeof(*map), GFP_NOIO);
+	if (!map)
+		return NULL;
+
+	map->pg_pools = RB_ROOT;
+	map->pool_max = -1;
+	map->pg_temp = RB_ROOT;
+	map->primary_temp = RB_ROOT;
+	mutex_init(&map->crush_scratch_mutex);
+
+	return map;
+}
+
 void ceph_osdmap_destroy(struct ceph_osdmap *map)
 {
 	dout("osdmap_destroy %p\n", map);
@@ -1183,14 +1247,10 @@
 	struct ceph_osdmap *map;
 	int ret;
 
-	map = kzalloc(sizeof(*map), GFP_NOFS);
+	map = ceph_osdmap_alloc();
 	if (!map)
 		return ERR_PTR(-ENOMEM);
 
-	map->pg_temp = RB_ROOT;
-	map->primary_temp = RB_ROOT;
-	mutex_init(&map->crush_scratch_mutex);
-
 	ret = osdmap_decode(p, end, map);
 	if (ret) {
 		ceph_osdmap_destroy(map);
@@ -1204,8 +1264,7 @@
  * decode and apply an incremental map update.
  */
 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
-					     struct ceph_osdmap *map,
-					     struct ceph_messenger *msgr)
+					     struct ceph_osdmap *map)
 {
 	struct crush_map *newcrush = NULL;
 	struct ceph_fsid fsid;
@@ -1381,8 +1440,252 @@
 	return ERR_PTR(err);
 }
 
+void ceph_oid_copy(struct ceph_object_id *dest,
+		   const struct ceph_object_id *src)
+{
+	WARN_ON(!ceph_oid_empty(dest));
 
+	if (src->name != src->inline_name) {
+		/* very rare, see ceph_object_id definition */
+		dest->name = kmalloc(src->name_len + 1,
+				     GFP_NOIO | __GFP_NOFAIL);
+	}
 
+	memcpy(dest->name, src->name, src->name_len + 1);
+	dest->name_len = src->name_len;
+}
+EXPORT_SYMBOL(ceph_oid_copy);
+
+static __printf(2, 0)
+int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap)
+{
+	int len;
+
+	WARN_ON(!ceph_oid_empty(oid));
+
+	len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap);
+	if (len >= sizeof(oid->inline_name))
+		return len;
+
+	oid->name_len = len;
+	return 0;
+}
+
+/*
+ * If oid doesn't fit into inline buffer, BUG.
+ */
+void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	BUG_ON(oid_printf_vargs(oid, fmt, ap));
+	va_end(ap);
+}
+EXPORT_SYMBOL(ceph_oid_printf);
+
+static __printf(3, 0)
+int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp,
+		      const char *fmt, va_list ap)
+{
+	va_list aq;
+	int len;
+
+	va_copy(aq, ap);
+	len = oid_printf_vargs(oid, fmt, aq);
+	va_end(aq);
+
+	if (len) {
+		char *external_name;
+
+		external_name = kmalloc(len + 1, gfp);
+		if (!external_name)
+			return -ENOMEM;
+
+		oid->name = external_name;
+		WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len);
+		oid->name_len = len;
+	}
+
+	return 0;
+}
+
+/*
+ * If oid doesn't fit into inline buffer, allocate.
+ */
+int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
+		     const char *fmt, ...)
+{
+	va_list ap;
+	int ret;
+
+	va_start(ap, fmt);
+	ret = oid_aprintf_vargs(oid, gfp, fmt, ap);
+	va_end(ap);
+
+	return ret;
+}
+EXPORT_SYMBOL(ceph_oid_aprintf);
+
+void ceph_oid_destroy(struct ceph_object_id *oid)
+{
+	if (oid->name != oid->inline_name)
+		kfree(oid->name);
+}
+EXPORT_SYMBOL(ceph_oid_destroy);
+
+/*
+ * osds only
+ */
+static bool __osds_equal(const struct ceph_osds *lhs,
+			 const struct ceph_osds *rhs)
+{
+	if (lhs->size == rhs->size &&
+	    !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0])))
+		return true;
+
+	return false;
+}
+
+/*
+ * osds + primary
+ */
+static bool osds_equal(const struct ceph_osds *lhs,
+		       const struct ceph_osds *rhs)
+{
+	if (__osds_equal(lhs, rhs) &&
+	    lhs->primary == rhs->primary)
+		return true;
+
+	return false;
+}
+
+static bool osds_valid(const struct ceph_osds *set)
+{
+	/* non-empty set */
+	if (set->size > 0 && set->primary >= 0)
+		return true;
+
+	/* empty can_shift_osds set */
+	if (!set->size && set->primary == -1)
+		return true;
+
+	/* empty !can_shift_osds set - all NONE */
+	if (set->size > 0 && set->primary == -1) {
+		int i;
+
+		for (i = 0; i < set->size; i++) {
+			if (set->osds[i] != CRUSH_ITEM_NONE)
+				break;
+		}
+		if (i == set->size)
+			return true;
+	}
+
+	return false;
+}
+
+void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src)
+{
+	memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0]));
+	dest->size = src->size;
+	dest->primary = src->primary;
+}
+
+static bool is_split(const struct ceph_pg *pgid,
+		     u32 old_pg_num,
+		     u32 new_pg_num)
+{
+	int old_bits = calc_bits_of(old_pg_num);
+	int old_mask = (1 << old_bits) - 1;
+	int n;
+
+	WARN_ON(pgid->seed >= old_pg_num);
+	if (new_pg_num <= old_pg_num)
+		return false;
+
+	for (n = 1; ; n++) {
+		int next_bit = n << (old_bits - 1);
+		u32 s = next_bit | pgid->seed;
+
+		if (s < old_pg_num || s == pgid->seed)
+			continue;
+		if (s >= new_pg_num)
+			break;
+
+		s = ceph_stable_mod(s, old_pg_num, old_mask);
+		if (s == pgid->seed)
+			return true;
+	}
+
+	return false;
+}
+
+bool ceph_is_new_interval(const struct ceph_osds *old_acting,
+			  const struct ceph_osds *new_acting,
+			  const struct ceph_osds *old_up,
+			  const struct ceph_osds *new_up,
+			  int old_size,
+			  int new_size,
+			  int old_min_size,
+			  int new_min_size,
+			  u32 old_pg_num,
+			  u32 new_pg_num,
+			  bool old_sort_bitwise,
+			  bool new_sort_bitwise,
+			  const struct ceph_pg *pgid)
+{
+	return !osds_equal(old_acting, new_acting) ||
+	       !osds_equal(old_up, new_up) ||
+	       old_size != new_size ||
+	       old_min_size != new_min_size ||
+	       is_split(pgid, old_pg_num, new_pg_num) ||
+	       old_sort_bitwise != new_sort_bitwise;
+}
+
+static int calc_pg_rank(int osd, const struct ceph_osds *acting)
+{
+	int i;
+
+	for (i = 0; i < acting->size; i++) {
+		if (acting->osds[i] == osd)
+			return i;
+	}
+
+	return -1;
+}
+
+static bool primary_changed(const struct ceph_osds *old_acting,
+			    const struct ceph_osds *new_acting)
+{
+	if (!old_acting->size && !new_acting->size)
+		return false; /* both still empty */
+
+	if (!old_acting->size ^ !new_acting->size)
+		return true; /* was empty, now not, or vice versa */
+
+	if (old_acting->primary != new_acting->primary)
+		return true; /* primary changed */
+
+	if (calc_pg_rank(old_acting->primary, old_acting) !=
+	    calc_pg_rank(new_acting->primary, new_acting))
+		return true;
+
+	return false; /* same primary (tho replicas may have changed) */
+}
+
+bool ceph_osds_changed(const struct ceph_osds *old_acting,
+		       const struct ceph_osds *new_acting,
+		       bool any_change)
+{
+	if (primary_changed(old_acting, new_acting))
+		return true;
+
+	if (any_change && !__osds_equal(old_acting, new_acting))
+		return true;
+
+	return false;
+}
 
 /*
  * calculate file layout from given offset, length.
@@ -1455,30 +1758,71 @@
 EXPORT_SYMBOL(ceph_calc_file_object_mapping);
 
 /*
- * Calculate mapping of a (oloc, oid) pair to a PG.  Should only be
- * called with target's (oloc, oid), since tiering isn't taken into
- * account.
+ * Map an object into a PG.
+ *
+ * Should only be called with target_oid and target_oloc (as opposed to
+ * base_oid and base_oloc), since tiering isn't taken into account.
  */
-int ceph_oloc_oid_to_pg(struct ceph_osdmap *osdmap,
-			struct ceph_object_locator *oloc,
-			struct ceph_object_id *oid,
-			struct ceph_pg *pg_out)
+int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
+			      struct ceph_object_id *oid,
+			      struct ceph_object_locator *oloc,
+			      struct ceph_pg *raw_pgid)
 {
 	struct ceph_pg_pool_info *pi;
 
-	pi = __lookup_pg_pool(&osdmap->pg_pools, oloc->pool);
+	pi = ceph_pg_pool_by_id(osdmap, oloc->pool);
 	if (!pi)
-		return -EIO;
+		return -ENOENT;
 
-	pg_out->pool = oloc->pool;
-	pg_out->seed = ceph_str_hash(pi->object_hash, oid->name,
-				     oid->name_len);
+	raw_pgid->pool = oloc->pool;
+	raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
+				       oid->name_len);
 
-	dout("%s '%.*s' pgid %llu.%x\n", __func__, oid->name_len, oid->name,
-	     pg_out->pool, pg_out->seed);
+	dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
+	     raw_pgid->pool, raw_pgid->seed);
 	return 0;
 }
-EXPORT_SYMBOL(ceph_oloc_oid_to_pg);
+EXPORT_SYMBOL(ceph_object_locator_to_pg);
+
+/*
+ * Map a raw PG (full precision ps) into an actual PG.
+ */
+static void raw_pg_to_pg(struct ceph_pg_pool_info *pi,
+			 const struct ceph_pg *raw_pgid,
+			 struct ceph_pg *pgid)
+{
+	pgid->pool = raw_pgid->pool;
+	pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num,
+				     pi->pg_num_mask);
+}
+
+/*
+ * Map a raw PG (full precision ps) into a placement ps (placement
+ * seed).  Include pool id in that value so that different pools don't
+ * use the same seeds.
+ */
+static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi,
+			 const struct ceph_pg *raw_pgid)
+{
+	if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
+		/* hash pool id and seed so that pool PGs do not overlap */
+		return crush_hash32_2(CRUSH_HASH_RJENKINS1,
+				      ceph_stable_mod(raw_pgid->seed,
+						      pi->pgp_num,
+						      pi->pgp_num_mask),
+				      raw_pgid->pool);
+	} else {
+		/*
+		 * legacy behavior: add ps and pool together.  this is
+		 * not a great approach because the PGs from each pool
+		 * will overlap on top of each other: 0.5 == 1.4 ==
+		 * 2.3 == ...
+		 */
+		return ceph_stable_mod(raw_pgid->seed, pi->pgp_num,
+				       pi->pgp_num_mask) +
+		       (unsigned)raw_pgid->pool;
+	}
+}
 
 static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
 		    int *result, int result_max,
@@ -1497,84 +1841,92 @@
 }
 
 /*
- * Calculate raw (crush) set for given pgid.
+ * Calculate raw set (CRUSH output) for given PG.  The result may
+ * contain nonexistent OSDs.  ->primary is undefined for a raw set.
  *
- * Return raw set length, or error.
+ * Placement seed (CRUSH input) is returned through @ppps.
  */
-static int pg_to_raw_osds(struct ceph_osdmap *osdmap,
-			  struct ceph_pg_pool_info *pool,
-			  struct ceph_pg pgid, u32 pps, int *osds)
+static void pg_to_raw_osds(struct ceph_osdmap *osdmap,
+			   struct ceph_pg_pool_info *pi,
+			   const struct ceph_pg *raw_pgid,
+			   struct ceph_osds *raw,
+			   u32 *ppps)
 {
+	u32 pps = raw_pg_to_pps(pi, raw_pgid);
 	int ruleno;
 	int len;
 
-	/* crush */
-	ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset,
-				 pool->type, pool->size);
+	ceph_osds_init(raw);
+	if (ppps)
+		*ppps = pps;
+
+	ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type,
+				 pi->size);
 	if (ruleno < 0) {
 		pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
-		       pgid.pool, pool->crush_ruleset, pool->type,
-		       pool->size);
-		return -ENOENT;
+		       pi->id, pi->crush_ruleset, pi->type, pi->size);
+		return;
 	}
 
-	len = do_crush(osdmap, ruleno, pps, osds,
-		       min_t(int, pool->size, CEPH_PG_MAX_SIZE),
+	len = do_crush(osdmap, ruleno, pps, raw->osds,
+		       min_t(int, pi->size, ARRAY_SIZE(raw->osds)),
 		       osdmap->osd_weight, osdmap->max_osd);
 	if (len < 0) {
 		pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
-		       len, ruleno, pgid.pool, pool->crush_ruleset,
-		       pool->type, pool->size);
-		return len;
+		       len, ruleno, pi->id, pi->crush_ruleset, pi->type,
+		       pi->size);
+		return;
 	}
 
-	return len;
+	raw->size = len;
 }
 
 /*
- * Given raw set, calculate up set and up primary.
+ * Given raw set, calculate up set and up primary.  By definition of an
+ * up set, the result won't contain nonexistent or down OSDs.
  *
- * Return up set length.  *primary is set to up primary osd id, or -1
- * if up set is empty.
+ * This is done in-place - on return @set is the up set.  If it's
+ * empty, ->primary will remain undefined.
  */
-static int raw_to_up_osds(struct ceph_osdmap *osdmap,
-			  struct ceph_pg_pool_info *pool,
-			  int *osds, int len, int *primary)
+static void raw_to_up_osds(struct ceph_osdmap *osdmap,
+			   struct ceph_pg_pool_info *pi,
+			   struct ceph_osds *set)
 {
-	int up_primary = -1;
 	int i;
 
-	if (ceph_can_shift_osds(pool)) {
+	/* ->primary is undefined for a raw set */
+	BUG_ON(set->primary != -1);
+
+	if (ceph_can_shift_osds(pi)) {
 		int removed = 0;
 
-		for (i = 0; i < len; i++) {
-			if (ceph_osd_is_down(osdmap, osds[i])) {
+		/* shift left */
+		for (i = 0; i < set->size; i++) {
+			if (ceph_osd_is_down(osdmap, set->osds[i])) {
 				removed++;
 				continue;
 			}
 			if (removed)
-				osds[i - removed] = osds[i];
+				set->osds[i - removed] = set->osds[i];
 		}
-
-		len -= removed;
-		if (len > 0)
-			up_primary = osds[0];
+		set->size -= removed;
+		if (set->size > 0)
+			set->primary = set->osds[0];
 	} else {
-		for (i = len - 1; i >= 0; i--) {
-			if (ceph_osd_is_down(osdmap, osds[i]))
-				osds[i] = CRUSH_ITEM_NONE;
+		/* set down/dne devices to NONE */
+		for (i = set->size - 1; i >= 0; i--) {
+			if (ceph_osd_is_down(osdmap, set->osds[i]))
+				set->osds[i] = CRUSH_ITEM_NONE;
 			else
-				up_primary = osds[i];
+				set->primary = set->osds[i];
 		}
 	}
-
-	*primary = up_primary;
-	return len;
 }
 
-static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
-				   struct ceph_pg_pool_info *pool,
-				   int *osds, int len, int *primary)
+static void apply_primary_affinity(struct ceph_osdmap *osdmap,
+				   struct ceph_pg_pool_info *pi,
+				   u32 pps,
+				   struct ceph_osds *up)
 {
 	int i;
 	int pos = -1;
@@ -1586,8 +1938,8 @@
 	if (!osdmap->osd_primary_affinity)
 		return;
 
-	for (i = 0; i < len; i++) {
-		int osd = osds[i];
+	for (i = 0; i < up->size; i++) {
+		int osd = up->osds[i];
 
 		if (osd != CRUSH_ITEM_NONE &&
 		    osdmap->osd_primary_affinity[osd] !=
@@ -1595,7 +1947,7 @@
 			break;
 		}
 	}
-	if (i == len)
+	if (i == up->size)
 		return;
 
 	/*
@@ -1603,8 +1955,8 @@
 	 * osd into the hash/rng so that a proportional fraction of an
 	 * osd's pgs get rejected as primary.
 	 */
-	for (i = 0; i < len; i++) {
-		int osd = osds[i];
+	for (i = 0; i < up->size; i++) {
+		int osd = up->osds[i];
 		u32 aff;
 
 		if (osd == CRUSH_ITEM_NONE)
@@ -1629,135 +1981,110 @@
 	if (pos < 0)
 		return;
 
-	*primary = osds[pos];
+	up->primary = up->osds[pos];
 
-	if (ceph_can_shift_osds(pool) && pos > 0) {
+	if (ceph_can_shift_osds(pi) && pos > 0) {
 		/* move the new primary to the front */
 		for (i = pos; i > 0; i--)
-			osds[i] = osds[i - 1];
-		osds[0] = *primary;
+			up->osds[i] = up->osds[i - 1];
+		up->osds[0] = up->primary;
 	}
 }
 
 /*
- * Given up set, apply pg_temp and primary_temp mappings.
+ * Get pg_temp and primary_temp mappings for given PG.
  *
- * Return acting set length.  *primary is set to acting primary osd id,
- * or -1 if acting set is empty.
+ * Note that a PG may have none, only pg_temp, only primary_temp or
+ * both pg_temp and primary_temp mappings.  This means @temp isn't
+ * always a valid OSD set on return: in the "only primary_temp" case,
+ * @temp will have its ->primary >= 0 but ->size == 0.
  */
-static int apply_temps(struct ceph_osdmap *osdmap,
-		       struct ceph_pg_pool_info *pool, struct ceph_pg pgid,
-		       int *osds, int len, int *primary)
+static void get_temp_osds(struct ceph_osdmap *osdmap,
+			  struct ceph_pg_pool_info *pi,
+			  const struct ceph_pg *raw_pgid,
+			  struct ceph_osds *temp)
 {
+	struct ceph_pg pgid;
 	struct ceph_pg_mapping *pg;
-	int temp_len;
-	int temp_primary;
 	int i;
 
-	/* raw_pg -> pg */
-	pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
-				    pool->pg_num_mask);
+	raw_pg_to_pg(pi, raw_pgid, &pgid);
+	ceph_osds_init(temp);
 
 	/* pg_temp? */
 	pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
 	if (pg) {
-		temp_len = 0;
-		temp_primary = -1;
-
 		for (i = 0; i < pg->pg_temp.len; i++) {
 			if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) {
-				if (ceph_can_shift_osds(pool))
+				if (ceph_can_shift_osds(pi))
 					continue;
-				else
-					osds[temp_len++] = CRUSH_ITEM_NONE;
+
+				temp->osds[temp->size++] = CRUSH_ITEM_NONE;
 			} else {
-				osds[temp_len++] = pg->pg_temp.osds[i];
+				temp->osds[temp->size++] = pg->pg_temp.osds[i];
 			}
 		}
 
 		/* apply pg_temp's primary */
-		for (i = 0; i < temp_len; i++) {
-			if (osds[i] != CRUSH_ITEM_NONE) {
-				temp_primary = osds[i];
+		for (i = 0; i < temp->size; i++) {
+			if (temp->osds[i] != CRUSH_ITEM_NONE) {
+				temp->primary = temp->osds[i];
 				break;
 			}
 		}
-	} else {
-		temp_len = len;
-		temp_primary = *primary;
 	}
 
 	/* primary_temp? */
 	pg = __lookup_pg_mapping(&osdmap->primary_temp, pgid);
 	if (pg)
-		temp_primary = pg->primary_temp.osd;
-
-	*primary = temp_primary;
-	return temp_len;
+		temp->primary = pg->primary_temp.osd;
 }
 
 /*
- * Calculate acting set for given pgid.
+ * Map a PG to its acting set as well as its up set.
  *
- * Return acting set length, or error.  *primary is set to acting
- * primary osd id, or -1 if acting set is empty or on error.
+ * Acting set is used for data mapping purposes, while up set can be
+ * recorded for detecting interval changes and deciding whether to
+ * resend a request.
  */
-int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
-			int *osds, int *primary)
+void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
+			       const struct ceph_pg *raw_pgid,
+			       struct ceph_osds *up,
+			       struct ceph_osds *acting)
 {
-	struct ceph_pg_pool_info *pool;
+	struct ceph_pg_pool_info *pi;
 	u32 pps;
-	int len;
 
-	pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool);
-	if (!pool) {
-		*primary = -1;
-		return -ENOENT;
+	pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool);
+	if (!pi) {
+		ceph_osds_init(up);
+		ceph_osds_init(acting);
+		goto out;
 	}
 
-	if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
-		/* hash pool id and seed so that pool PGs do not overlap */
-		pps = crush_hash32_2(CRUSH_HASH_RJENKINS1,
-				     ceph_stable_mod(pgid.seed, pool->pgp_num,
-						     pool->pgp_num_mask),
-				     pgid.pool);
-	} else {
-		/*
-		 * legacy behavior: add ps and pool together.  this is
-		 * not a great approach because the PGs from each pool
-		 * will overlap on top of each other: 0.5 == 1.4 ==
-		 * 2.3 == ...
-		 */
-		pps = ceph_stable_mod(pgid.seed, pool->pgp_num,
-				      pool->pgp_num_mask) +
-			(unsigned)pgid.pool;
+	pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps);
+	raw_to_up_osds(osdmap, pi, up);
+	apply_primary_affinity(osdmap, pi, pps, up);
+	get_temp_osds(osdmap, pi, raw_pgid, acting);
+	if (!acting->size) {
+		memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0]));
+		acting->size = up->size;
+		if (acting->primary == -1)
+			acting->primary = up->primary;
 	}
-
-	len = pg_to_raw_osds(osdmap, pool, pgid, pps, osds);
-	if (len < 0) {
-		*primary = -1;
-		return len;
-	}
-
-	len = raw_to_up_osds(osdmap, pool, osds, len, primary);
-
-	apply_primary_affinity(osdmap, pps, pool, osds, len, primary);
-
-	len = apply_temps(osdmap, pool, pgid, osds, len, primary);
-
-	return len;
+out:
+	WARN_ON(!osds_valid(up) || !osds_valid(acting));
 }
 
 /*
- * Return primary osd for given pgid, or -1 if none.
+ * Return acting primary for given PG, or -1 if none.
  */
-int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
+int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
+			      const struct ceph_pg *raw_pgid)
 {
-	int osds[CEPH_PG_MAX_SIZE];
-	int primary;
+	struct ceph_osds up, acting;
 
-	ceph_calc_pg_acting(osdmap, pgid, osds, &primary);
-
-	return primary;
+	ceph_pg_to_up_acting_osds(osdmap, raw_pgid, &up, &acting);
+	return acting.primary;
 }
-EXPORT_SYMBOL(ceph_calc_pg_primary);
+EXPORT_SYMBOL(ceph_pg_to_acting_primary);
diff --git a/net/compat.c b/net/compat.c
index 5cfd26a..1cd2ec0 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -309,8 +309,8 @@
 	__scm_destroy(scm);
 }
 
-static int do_set_attach_filter(struct socket *sock, int level, int optname,
-				char __user *optval, unsigned int optlen)
+/* allocate a 64-bit sock_fprog on the user stack for duration of syscall. */
+struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval)
 {
 	struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
 	struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
@@ -323,6 +323,19 @@
 	    __get_user(ptr, &fprog32->filter) ||
 	    __put_user(len, &kfprog->len) ||
 	    __put_user(compat_ptr(ptr), &kfprog->filter))
+		return NULL;
+
+	return kfprog;
+}
+EXPORT_SYMBOL_GPL(get_compat_bpf_fprog);
+
+static int do_set_attach_filter(struct socket *sock, int level, int optname,
+				char __user *optval, unsigned int optlen)
+{
+	struct sock_fprog __user *kfprog;
+
+	kfprog = get_compat_bpf_fprog(optval);
+	if (!kfprog)
 		return -EFAULT;
 
 	return sock_setsockopt(sock, level, optname, (char __user *)kfprog,
@@ -354,7 +367,8 @@
 static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
 				char __user *optval, unsigned int optlen)
 {
-	if (optname == SO_ATTACH_FILTER)
+	if (optname == SO_ATTACH_FILTER ||
+	    optname == SO_ATTACH_REUSEPORT_CBPF)
 		return do_set_attach_filter(sock, level, optname,
 					    optval, optlen);
 	if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
diff --git a/net/core/filter.c b/net/core/filter.c
index 68adb5f..c4b330c 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2085,7 +2085,8 @@
 }
 
 static bool sk_filter_is_valid_access(int off, int size,
-				      enum bpf_access_type type)
+				      enum bpf_access_type type,
+				      enum bpf_reg_type *reg_type)
 {
 	switch (off) {
 	case offsetof(struct __sk_buff, tc_classid):
@@ -2108,7 +2109,8 @@
 }
 
 static bool tc_cls_act_is_valid_access(int off, int size,
-				       enum bpf_access_type type)
+				       enum bpf_access_type type,
+				       enum bpf_reg_type *reg_type)
 {
 	if (type == BPF_WRITE) {
 		switch (off) {
@@ -2123,6 +2125,16 @@
 			return false;
 		}
 	}
+
+	switch (off) {
+	case offsetof(struct __sk_buff, data):
+		*reg_type = PTR_TO_PACKET;
+		break;
+	case offsetof(struct __sk_buff, data_end):
+		*reg_type = PTR_TO_PACKET_END;
+		break;
+	}
+
 	return __is_valid_access(off, size, type);
 }
 
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index f96ee8b..be873e4 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -47,6 +47,7 @@
  * @xstats_type: TLV type for backward compatibility xstats TLV
  * @lock: statistics lock
  * @d: dumping handle
+ * @padattr: padding attribute
  *
  * Initializes the dumping handle, grabs the statistic lock and appends
  * an empty TLV header to the socket buffer for use a container for all
@@ -87,6 +88,7 @@
  * @type: TLV type for top level statistic TLV
  * @lock: statistics lock
  * @d: dumping handle
+ * @padattr: padding attribute
  *
  * Initializes the dumping handle, grabs the statistic lock and appends
  * an empty TLV header to the socket buffer for use a container for all
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 2b3f76f..7a0b616 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -24,6 +24,7 @@
 #include <linux/jiffies.h>
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
+#include <linux/of_net.h>
 
 #include "net-sysfs.h"
 
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 8604ae2..8b02df0 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2245,10 +2245,8 @@
 	hrtimer_set_expires(&t.timer, spin_until);
 
 	remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
-	if (remaining <= 0) {
-		pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
-		return;
-	}
+	if (remaining <= 0)
+		goto out;
 
 	start_time = ktime_get();
 	if (remaining < 100000) {
@@ -2273,7 +2271,9 @@
 	}
 
 	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
+out:
 	pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
+	destroy_hrtimer_on_stack(&t.timer);
 }
 
 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index ca207db..116187b 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -1289,8 +1289,8 @@
 				     nl802154_dev_addr_policy))
 		return -EINVAL;
 
-	if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] &&
-	    !attrs[NL802154_DEV_ADDR_ATTR_MODE] &&
+	if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
+	    !attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
 	    !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
 	      attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
 		return -EINVAL;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 4779374..d95631d 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -23,6 +23,11 @@
 	void *tmp;
 };
 
+struct esp_output_extra {
+	__be32 seqhi;
+	u32 esphoff;
+};
+
 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
 
 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
@@ -35,11 +40,11 @@
  *
  * TODO: Use spare space in skb for this where possible.
  */
-static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
+static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
 {
 	unsigned int len;
 
-	len = seqhilen;
+	len = extralen;
 
 	len += crypto_aead_ivsize(aead);
 
@@ -57,15 +62,16 @@
 	return kmalloc(len, GFP_ATOMIC);
 }
 
-static inline __be32 *esp_tmp_seqhi(void *tmp)
+static inline void *esp_tmp_extra(void *tmp)
 {
-	return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
+	return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
 }
-static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
+
+static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
 {
 	return crypto_aead_ivsize(aead) ?
-	       PTR_ALIGN((u8 *)tmp + seqhilen,
-			 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
+	       PTR_ALIGN((u8 *)tmp + extralen,
+			 crypto_aead_alignmask(aead) + 1) : tmp + extralen;
 }
 
 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
@@ -99,7 +105,7 @@
 {
 	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
 	void *tmp = ESP_SKB_CB(skb)->tmp;
-	__be32 *seqhi = esp_tmp_seqhi(tmp);
+	__be32 *seqhi = esp_tmp_extra(tmp);
 
 	esph->seq_no = esph->spi;
 	esph->spi = *seqhi;
@@ -107,7 +113,11 @@
 
 static void esp_output_restore_header(struct sk_buff *skb)
 {
-	esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
+	void *tmp = ESP_SKB_CB(skb)->tmp;
+	struct esp_output_extra *extra = esp_tmp_extra(tmp);
+
+	esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
+				sizeof(__be32));
 }
 
 static void esp_output_done_esn(struct crypto_async_request *base, int err)
@@ -121,6 +131,7 @@
 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
 {
 	int err;
+	struct esp_output_extra *extra;
 	struct ip_esp_hdr *esph;
 	struct crypto_aead *aead;
 	struct aead_request *req;
@@ -137,8 +148,7 @@
 	int tfclen;
 	int nfrags;
 	int assoclen;
-	int seqhilen;
-	__be32 *seqhi;
+	int extralen;
 	__be64 seqno;
 
 	/* skb is pure payload to encrypt */
@@ -166,21 +176,21 @@
 	nfrags = err;
 
 	assoclen = sizeof(*esph);
-	seqhilen = 0;
+	extralen = 0;
 
 	if (x->props.flags & XFRM_STATE_ESN) {
-		seqhilen += sizeof(__be32);
-		assoclen += seqhilen;
+		extralen += sizeof(*extra);
+		assoclen += sizeof(__be32);
 	}
 
-	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
+	tmp = esp_alloc_tmp(aead, nfrags, extralen);
 	if (!tmp) {
 		err = -ENOMEM;
 		goto error;
 	}
 
-	seqhi = esp_tmp_seqhi(tmp);
-	iv = esp_tmp_iv(aead, tmp, seqhilen);
+	extra = esp_tmp_extra(tmp);
+	iv = esp_tmp_iv(aead, tmp, extralen);
 	req = esp_tmp_req(aead, iv);
 	sg = esp_req_sg(aead, req);
 
@@ -247,8 +257,10 @@
 	 * encryption.
 	 */
 	if ((x->props.flags & XFRM_STATE_ESN)) {
-		esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
-		*seqhi = esph->spi;
+		extra->esphoff = (unsigned char *)esph -
+				 skb_transport_header(skb);
+		esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
+		extra->seqhi = esph->spi;
 		esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
 		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
 	}
@@ -445,7 +457,7 @@
 		goto out;
 
 	ESP_SKB_CB(skb)->tmp = tmp;
-	seqhi = esp_tmp_seqhi(tmp);
+	seqhi = esp_tmp_extra(tmp);
 	iv = esp_tmp_iv(aead, tmp, seqhilen);
 	req = esp_tmp_req(aead, iv);
 	sg = esp_req_sg(aead, req);
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 4c39f4f..de1d119 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -62,26 +62,26 @@
 
 /* Fills in tpi and returns header length to be pulled. */
 int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
-		     bool *csum_err, __be16 proto)
+		     bool *csum_err, __be16 proto, int nhs)
 {
 	const struct gre_base_hdr *greh;
 	__be32 *options;
 	int hdr_len;
 
-	if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
+	if (unlikely(!pskb_may_pull(skb, nhs + sizeof(struct gre_base_hdr))))
 		return -EINVAL;
 
-	greh = (struct gre_base_hdr *)skb_transport_header(skb);
+	greh = (struct gre_base_hdr *)(skb->data + nhs);
 	if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
 		return -EINVAL;
 
 	tpi->flags = gre_flags_to_tnl_flags(greh->flags);
 	hdr_len = gre_calc_hlen(tpi->flags);
 
-	if (!pskb_may_pull(skb, hdr_len))
+	if (!pskb_may_pull(skb, nhs + hdr_len))
 		return -EINVAL;
 
-	greh = (struct gre_base_hdr *)skb_transport_header(skb);
+	greh = (struct gre_base_hdr *)(skb->data + nhs);
 	tpi->proto = greh->protocol;
 
 	options = (__be32 *)(greh + 1);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 4d2025f..1d000af 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -49,12 +49,6 @@
 #include <net/gre.h>
 #include <net/dst_metadata.h>
 
-#if IS_ENABLED(CONFIG_IPV6)
-#include <net/ipv6.h>
-#include <net/ip6_fib.h>
-#include <net/ip6_route.h>
-#endif
-
 /*
    Problems & solutions
    --------------------
@@ -217,12 +211,14 @@
 	 * by themselves???
 	 */
 
+	const struct iphdr *iph = (struct iphdr *)skb->data;
 	const int type = icmp_hdr(skb)->type;
 	const int code = icmp_hdr(skb)->code;
 	struct tnl_ptk_info tpi;
 	bool csum_err = false;
 
-	if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)) < 0) {
+	if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
+			     iph->ihl * 4) < 0) {
 		if (!csum_err)		/* ignore csum errors. */
 			return;
 	}
@@ -338,7 +334,7 @@
 	}
 #endif
 
-	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP));
+	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
 	if (hdr_len < 0)
 		goto drop;
 
@@ -1121,6 +1117,7 @@
 {
 	struct nlattr *tb[IFLA_MAX + 1];
 	struct net_device *dev;
+	LIST_HEAD(list_kill);
 	struct ip_tunnel *t;
 	int err;
 
@@ -1136,8 +1133,10 @@
 	t->collect_md = true;
 
 	err = ipgre_newlink(net, dev, tb, NULL);
-	if (err < 0)
-		goto out;
+	if (err < 0) {
+		free_netdev(dev);
+		return ERR_PTR(err);
+	}
 
 	/* openvswitch users expect packet sizes to be unrestricted,
 	 * so set the largest MTU we can.
@@ -1146,9 +1145,14 @@
 	if (err)
 		goto out;
 
+	err = rtnl_configure_link(dev, NULL);
+	if (err < 0)
+		goto out;
+
 	return dev;
 out:
-	free_netdev(dev);
+	ip_tunnel_dellink(dev, &list_kill);
+	unregister_netdevice_many(&list_kill);
 	return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 2ed9dd2..1d71c40 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -127,7 +127,9 @@
 static __be32 ic_netmask = NONE;	/* Netmask for local subnet */
 __be32 ic_gateway = NONE;	/* Gateway IP address */
 
-__be32 ic_addrservaddr = NONE;	/* IP Address of the IP addresses'server */
+#ifdef IPCONFIG_DYNAMIC
+static __be32 ic_addrservaddr = NONE;	/* IP Address of the IP addresses'server */
+#endif
 
 __be32 ic_servaddr = NONE;	/* Boot server IP address */
 
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d56c055..ca5e8ea 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -391,9 +391,9 @@
 	return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
 }
 
-static inline int compute_score(struct sock *sk, struct net *net,
-				__be32 saddr, unsigned short hnum, __be16 sport,
-				__be32 daddr, __be16 dport, int dif)
+static int compute_score(struct sock *sk, struct net *net,
+			 __be32 saddr, __be16 sport,
+			 __be32 daddr, unsigned short hnum, int dif)
 {
 	int score;
 	struct inet_sock *inet;
@@ -434,52 +434,6 @@
 	return score;
 }
 
-/*
- * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
- */
-static inline int compute_score2(struct sock *sk, struct net *net,
-				 __be32 saddr, __be16 sport,
-				 __be32 daddr, unsigned int hnum, int dif)
-{
-	int score;
-	struct inet_sock *inet;
-
-	if (!net_eq(sock_net(sk), net) ||
-	    ipv6_only_sock(sk))
-		return -1;
-
-	inet = inet_sk(sk);
-
-	if (inet->inet_rcv_saddr != daddr ||
-	    inet->inet_num != hnum)
-		return -1;
-
-	score = (sk->sk_family == PF_INET) ? 2 : 1;
-
-	if (inet->inet_daddr) {
-		if (inet->inet_daddr != saddr)
-			return -1;
-		score += 4;
-	}
-
-	if (inet->inet_dport) {
-		if (inet->inet_dport != sport)
-			return -1;
-		score += 4;
-	}
-
-	if (sk->sk_bound_dev_if) {
-		if (sk->sk_bound_dev_if != dif)
-			return -1;
-		score += 4;
-	}
-
-	if (sk->sk_incoming_cpu == raw_smp_processor_id())
-		score++;
-
-	return score;
-}
-
 static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
 		       const __u16 lport, const __be32 faddr,
 		       const __be16 fport)
@@ -492,11 +446,11 @@
 			      udp_ehash_secret + net_hash_mix(net));
 }
 
-/* called with read_rcu_lock() */
+/* called with rcu_read_lock() */
 static struct sock *udp4_lib_lookup2(struct net *net,
 		__be32 saddr, __be16 sport,
 		__be32 daddr, unsigned int hnum, int dif,
-		struct udp_hslot *hslot2, unsigned int slot2,
+		struct udp_hslot *hslot2,
 		struct sk_buff *skb)
 {
 	struct sock *sk, *result;
@@ -506,7 +460,7 @@
 	result = NULL;
 	badness = 0;
 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
-		score = compute_score2(sk, net, saddr, sport,
+		score = compute_score(sk, net, saddr, sport,
 				      daddr, hnum, dif);
 		if (score > badness) {
 			reuseport = sk->sk_reuseport;
@@ -554,17 +508,22 @@
 
 		result = udp4_lib_lookup2(net, saddr, sport,
 					  daddr, hnum, dif,
-					  hslot2, slot2, skb);
+					  hslot2, skb);
 		if (!result) {
+			unsigned int old_slot2 = slot2;
 			hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
 			slot2 = hash2 & udptable->mask;
+			/* avoid searching the same slot again. */
+			if (unlikely(slot2 == old_slot2))
+				return result;
+
 			hslot2 = &udptable->hash2[slot2];
 			if (hslot->count < hslot2->count)
 				goto begin;
 
 			result = udp4_lib_lookup2(net, saddr, sport,
-						  htonl(INADDR_ANY), hnum, dif,
-						  hslot2, slot2, skb);
+						  daddr, hnum, dif,
+						  hslot2, skb);
 		}
 		return result;
 	}
@@ -572,8 +531,8 @@
 	result = NULL;
 	badness = 0;
 	sk_for_each_rcu(sk, &hslot->head) {
-		score = compute_score(sk, net, saddr, hnum, sport,
-				      daddr, dport, dif);
+		score = compute_score(sk, net, saddr, sport,
+				      daddr, hnum, dif);
 		if (score > badness) {
 			reuseport = sk->sk_reuseport;
 			if (reuseport) {
@@ -1618,12 +1577,12 @@
 		}
 	}
 
-	if (rcu_access_pointer(sk->sk_filter)) {
-		if (udp_lib_checksum_complete(skb))
+	if (rcu_access_pointer(sk->sk_filter) &&
+	    udp_lib_checksum_complete(skb))
 			goto csum_error;
-		if (sk_filter(sk, skb))
-			goto drop;
-	}
+
+	if (sk_filter(sk, skb))
+		goto drop;
 
 	udp_csum_pull_header(skb);
 	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
@@ -1755,8 +1714,11 @@
 			return err;
 	}
 
-	return skb_checksum_init_zero_check(skb, proto, uh->check,
-					    inet_compute_pseudo);
+	/* Note, we are only interested in != 0 or == 0, thus the
+	 * force to int.
+	 */
+	return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
+							 inet_compute_pseudo);
 }
 
 /*
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 3f84113..2343e4f 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -232,6 +232,15 @@
 
 	  Saying M here will produce a module called ip6_gre. If unsure, say N.
 
+config IPV6_FOU
+	tristate
+	default NET_FOU && IPV6
+
+config IPV6_FOU_TUNNEL
+	tristate
+	default NET_FOU_IP_TUNNELS && IPV6_FOU
+	select IPV6_TUNNEL
+
 config IPV6_MULTIPLE_TABLES
 	bool "IPv6: Multiple Routing Tables"
 	select FIB_RULES
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 7ec3129..6d8ea09 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -42,7 +42,7 @@
 obj-$(CONFIG_IPV6_SIT) += sit.o
 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
 obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
-obj-$(CONFIG_NET_FOU) += fou6.o
+obj-$(CONFIG_IPV6_FOU) += fou6.o
 
 obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o
 obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index c972d0b..9ea249b 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -69,7 +69,7 @@
 }
 EXPORT_SYMBOL(gue6_build_header);
 
-#ifdef CONFIG_NET_FOU_IP_TUNNELS
+#if IS_ENABLED(CONFIG_IPV6_FOU_TUNNEL)
 
 static const struct ip6_tnl_encap_ops fou_ip6tun_ops = {
 	.encap_hlen = fou_encap_hlen,
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 4527285..a4fa840 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -98,7 +98,7 @@
 
 	if (!(type & ICMPV6_INFOMSG_MASK))
 		if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
-			ping_err(skb, offset, info);
+			ping_err(skb, offset, ntohl(info));
 }
 
 static int icmpv6_rcv(struct sk_buff *skb);
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
index b2025bf..c0cbcb2 100644
--- a/net/ipv6/ip6_checksum.c
+++ b/net/ipv6/ip6_checksum.c
@@ -78,9 +78,12 @@
 	 * we accept a checksum of zero here. When we find the socket
 	 * for the UDP packet we'll check if that socket allows zero checksum
 	 * for IPv6 (set by socket option).
+	 *
+	 * Note, we are only interested in != 0 or == 0, thus the
+	 * force to int.
 	 */
-	return skb_checksum_init_zero_check(skb, proto, uh->check,
-					   ip6_compute_pseudo);
+	return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
+							 ip6_compute_pseudo);
 }
 EXPORT_SYMBOL(udp6_csum_init);
 
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index f4ac284..776d145 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -468,7 +468,7 @@
 	bool csum_err = false;
 	int hdr_len;
 
-	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6));
+	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
 	if (hdr_len < 0)
 		goto drop;
 
@@ -1256,6 +1256,8 @@
 	if (ret)
 		return ret;
 
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
 	tunnel = netdev_priv(dev);
 
 	ip6gre_tnl_link_config(tunnel, 1);
@@ -1289,6 +1291,7 @@
 
 	dev->features |= NETIF_F_NETNS_LOCAL;
 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 }
 
 static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cbf127a..635b8d3 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1071,17 +1071,12 @@
 					 const struct in6_addr *final_dst)
 {
 	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
-	int err;
 
 	dst = ip6_sk_dst_check(sk, dst, fl6);
+	if (!dst)
+		dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
 
-	err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
-	if (err)
-		return ERR_PTR(err);
-	if (final_dst)
-		fl6->daddr = *final_dst;
-
-	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+	return dst;
 }
 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
 
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
index 6989c70..4a84b5a 100644
--- a/net/ipv6/netfilter/nf_dup_ipv6.c
+++ b/net/ipv6/netfilter/nf_dup_ipv6.c
@@ -33,6 +33,7 @@
 	fl6.daddr = *gw;
 	fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
 			(iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
+	fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
 	dst = ip6_route_output(net, NULL, &fl6);
 	if (dst->error) {
 		dst_release(dst);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 0a5a255..0619ac7 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -560,13 +560,13 @@
 
 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
 		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-				 t->parms.link, 0, IPPROTO_IPV6, 0);
+				 t->parms.link, 0, iph->protocol, 0);
 		err = 0;
 		goto out;
 	}
 	if (type == ICMP_REDIRECT) {
 		ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
-			      IPPROTO_IPV6, 0);
+			      iph->protocol, 0);
 		err = 0;
 		goto out;
 	}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 79e33e0..2255d2b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -738,7 +738,7 @@
 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
 				 int oif, struct tcp_md5sig_key *key, int rst,
-				 u8 tclass, u32 label)
+				 u8 tclass, __be32 label)
 {
 	const struct tcphdr *th = tcp_hdr(skb);
 	struct tcphdr *t1;
@@ -911,7 +911,7 @@
 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
 			    struct tcp_md5sig_key *key, u8 tclass,
-			    u32 label)
+			    __be32 label)
 {
 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
 			     tclass, label);
@@ -1721,7 +1721,9 @@
 	destp = ntohs(inet->inet_dport);
 	srcp  = ntohs(inet->inet_sport);
 
-	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
+	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
+	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
 		timer_active	= 1;
 		timer_expires	= icsk->icsk_timeout;
 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 2da1896..005dc82 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -115,11 +115,10 @@
 	udp_lib_rehash(sk, new_hash);
 }
 
-static inline int compute_score(struct sock *sk, struct net *net,
-				unsigned short hnum,
-				const struct in6_addr *saddr, __be16 sport,
-				const struct in6_addr *daddr, __be16 dport,
-				int dif)
+static int compute_score(struct sock *sk, struct net *net,
+			 const struct in6_addr *saddr, __be16 sport,
+			 const struct in6_addr *daddr, unsigned short hnum,
+			 int dif)
 {
 	int score;
 	struct inet_sock *inet;
@@ -162,54 +161,11 @@
 	return score;
 }
 
-static inline int compute_score2(struct sock *sk, struct net *net,
-				 const struct in6_addr *saddr, __be16 sport,
-				 const struct in6_addr *daddr,
-				 unsigned short hnum, int dif)
-{
-	int score;
-	struct inet_sock *inet;
-
-	if (!net_eq(sock_net(sk), net) ||
-	    udp_sk(sk)->udp_port_hash != hnum ||
-	    sk->sk_family != PF_INET6)
-		return -1;
-
-	if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
-		return -1;
-
-	score = 0;
-	inet = inet_sk(sk);
-
-	if (inet->inet_dport) {
-		if (inet->inet_dport != sport)
-			return -1;
-		score++;
-	}
-
-	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
-		if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
-			return -1;
-		score++;
-	}
-
-	if (sk->sk_bound_dev_if) {
-		if (sk->sk_bound_dev_if != dif)
-			return -1;
-		score++;
-	}
-
-	if (sk->sk_incoming_cpu == raw_smp_processor_id())
-		score++;
-
-	return score;
-}
-
-/* called with read_rcu_lock() */
+/* called with rcu_read_lock() */
 static struct sock *udp6_lib_lookup2(struct net *net,
 		const struct in6_addr *saddr, __be16 sport,
 		const struct in6_addr *daddr, unsigned int hnum, int dif,
-		struct udp_hslot *hslot2, unsigned int slot2,
+		struct udp_hslot *hslot2,
 		struct sk_buff *skb)
 {
 	struct sock *sk, *result;
@@ -219,7 +175,7 @@
 	result = NULL;
 	badness = -1;
 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
-		score = compute_score2(sk, net, saddr, sport,
+		score = compute_score(sk, net, saddr, sport,
 				      daddr, hnum, dif);
 		if (score > badness) {
 			reuseport = sk->sk_reuseport;
@@ -268,17 +224,22 @@
 
 		result = udp6_lib_lookup2(net, saddr, sport,
 					  daddr, hnum, dif,
-					  hslot2, slot2, skb);
+					  hslot2, skb);
 		if (!result) {
+			unsigned int old_slot2 = slot2;
 			hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
 			slot2 = hash2 & udptable->mask;
+			/* avoid searching the same slot again. */
+			if (unlikely(slot2 == old_slot2))
+				return result;
+
 			hslot2 = &udptable->hash2[slot2];
 			if (hslot->count < hslot2->count)
 				goto begin;
 
 			result = udp6_lib_lookup2(net, saddr, sport,
-						  &in6addr_any, hnum, dif,
-						  hslot2, slot2, skb);
+						  daddr, hnum, dif,
+						  hslot2, skb);
 		}
 		return result;
 	}
@@ -286,7 +247,7 @@
 	result = NULL;
 	badness = -1;
 	sk_for_each_rcu(sk, &hslot->head) {
-		score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
+		score = compute_score(sk, net, saddr, sport, daddr, hnum, dif);
 		if (score > badness) {
 			reuseport = sk->sk_reuseport;
 			if (reuseport) {
@@ -653,12 +614,12 @@
 		}
 	}
 
-	if (rcu_access_pointer(sk->sk_filter)) {
-		if (udp_lib_checksum_complete(skb))
-			goto csum_error;
-		if (sk_filter(sk, skb))
-			goto drop;
-	}
+	if (rcu_access_pointer(sk->sk_filter) &&
+	    udp_lib_checksum_complete(skb))
+		goto csum_error;
+
+	if (sk_filter(sk, skb))
+		goto drop;
 
 	udp_csum_pull_header(skb);
 	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
diff --git a/net/kcm/kcmproc.c b/net/kcm/kcmproc.c
index 7380087..fda7f47 100644
--- a/net/kcm/kcmproc.c
+++ b/net/kcm/kcmproc.c
@@ -241,6 +241,7 @@
 	.open		= kcm_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
+	.release	= seq_release_net,
 };
 
 static struct kcm_seq_muxinfo kcm_seq_muxinfo = {
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 6edfa99..1e40dac 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1581,7 +1581,7 @@
 	/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
 	tunnel->encap = encap;
 	if (encap == L2TP_ENCAPTYPE_UDP) {
-		struct udp_tunnel_sock_cfg udp_cfg;
+		struct udp_tunnel_sock_cfg udp_cfg = { };
 
 		udp_cfg.sk_user_data = tunnel;
 		udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index c6f5df1b..6c54e03 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -128,6 +128,7 @@
  */
 static int l2tp_ip6_recv(struct sk_buff *skb)
 {
+	struct net *net = dev_net(skb->dev);
 	struct sock *sk;
 	u32 session_id;
 	u32 tunnel_id;
@@ -154,7 +155,7 @@
 	}
 
 	/* Ok, this is a data packet. Lookup the session. */
-	session = l2tp_session_find(&init_net, NULL, session_id);
+	session = l2tp_session_find(net, NULL, session_id);
 	if (session == NULL)
 		goto discard;
 
@@ -188,14 +189,14 @@
 		goto discard;
 
 	tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
-	tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
+	tunnel = l2tp_tunnel_find(net, tunnel_id);
 	if (tunnel != NULL)
 		sk = tunnel->sock;
 	else {
 		struct ipv6hdr *iph = ipv6_hdr(skb);
 
 		read_lock_bh(&l2tp_ip6_lock);
-		sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr,
+		sk = __l2tp_ip6_bind_lookup(net, &iph->daddr,
 					    0, tunnel_id);
 		read_unlock_bh(&l2tp_ip6_lock);
 	}
@@ -263,6 +264,7 @@
 	struct inet_sock *inet = inet_sk(sk);
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
+	struct net *net = sock_net(sk);
 	__be32 v4addr = 0;
 	int addr_type;
 	int err;
@@ -286,7 +288,7 @@
 
 	err = -EADDRINUSE;
 	read_lock_bh(&l2tp_ip6_lock);
-	if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr,
+	if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr,
 				   sk->sk_bound_dev_if, addr->l2tp_conn_id))
 		goto out_in_use;
 	read_unlock_bh(&l2tp_ip6_lock);
@@ -456,7 +458,7 @@
 	return 0;
 
 drop:
-	IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
+	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
 	kfree_skb(skb);
 	return -1;
 }
diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c
index 5dba899..1824708 100644
--- a/net/lapb/lapb_in.c
+++ b/net/lapb/lapb_in.c
@@ -444,10 +444,9 @@
 		break;
 
 	case LAPB_FRMR:
-		lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n",
+		lapb_dbg(1, "(%p) S3 RX FRMR(%d) %5ph\n",
 			 lapb->dev, frame->pf,
-			 skb->data[0], skb->data[1], skb->data[2],
-			 skb->data[3], skb->data[4]);
+			 skb->data);
 		lapb_establish_data_link(lapb);
 		lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev);
 		lapb_requeue_frames(lapb);
diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
index ba4d015..482c94d 100644
--- a/net/lapb/lapb_out.c
+++ b/net/lapb/lapb_out.c
@@ -148,9 +148,7 @@
 		}
 	}
 
-	lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n",
-		 lapb->dev, lapb->state,
-		 skb->data[0], skb->data[1], skb->data[2]);
+	lapb_dbg(2, "(%p) S%d TX %3ph\n", lapb->dev, lapb->state, skb->data);
 
 	if (!lapb_data_transmit(lapb, skb))
 		kfree_skb(skb);
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
index 9d0a426..3c1914d 100644
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -113,9 +113,7 @@
 {
 	frame->type = LAPB_ILLEGAL;
 
-	lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n",
-		 lapb->dev, lapb->state,
-		 skb->data[0], skb->data[1], skb->data[2]);
+	lapb_dbg(2, "(%p) S%d RX %3ph\n", lapb->dev, lapb->state, skb->data);
 
 	/* We always need to look at 2 bytes, sometimes we need
 	 * to look at 3 and those cases are handled below.
@@ -284,10 +282,9 @@
 		dptr++;
 		*dptr++ = lapb->frmr_type;
 
-		lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n",
+		lapb_dbg(1, "(%p) S%d TX FRMR %5ph\n",
 			 lapb->dev, lapb->state,
-			 skb->data[1], skb->data[2], skb->data[3],
-			 skb->data[4], skb->data[5]);
+			 &skb->data[1]);
 	} else {
 		dptr    = skb_put(skb, 4);
 		*dptr++ = LAPB_FRMR;
@@ -299,9 +296,8 @@
 		dptr++;
 		*dptr++ = lapb->frmr_type;
 
-		lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n",
-			 lapb->dev, lapb->state, skb->data[1],
-			 skb->data[2], skb->data[3]);
+		lapb_dbg(1, "(%p) S%d TX FRMR %3ph\n",
+			 lapb->dev, lapb->state, &skb->data[1]);
 	}
 
 	lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 4c6404e..21b1fdf 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -161,6 +161,10 @@
 		del_timer_sync(&sta->mesh->plink_timer);
 	}
 
+	/* make sure no readers can access nexthop sta from here on */
+	mesh_path_flush_by_nexthop(sta);
+	synchronize_net();
+
 	if (changed)
 		ieee80211_mbss_info_change_notify(sdata, changed);
 }
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index c8b8ccc..78b0ef3 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -280,7 +280,7 @@
 	u8 sa_offs, da_offs, pn_offs;
 	u8 band;
 	u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
-	       sizeof(rfc1042_header)];
+	       sizeof(rfc1042_header)] __aligned(2);
 
 	struct rcu_head rcu_head;
 };
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 2cb3c62..096a451 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -762,7 +762,7 @@
  *	If available, return 1, otherwise invalidate this connection
  *	template and return 0.
  */
-int ip_vs_check_template(struct ip_vs_conn *ct)
+int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest)
 {
 	struct ip_vs_dest *dest = ct->dest;
 	struct netns_ipvs *ipvs = ct->ipvs;
@@ -772,7 +772,8 @@
 	 */
 	if ((dest == NULL) ||
 	    !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
-	    expire_quiescent_template(ipvs, dest)) {
+	    expire_quiescent_template(ipvs, dest) ||
+	    (cdest && (dest != cdest))) {
 		IP_VS_DBG_BUF(9, "check_template: dest not available for "
 			      "protocol %s s:%s:%d v:%s:%d "
 			      "-> d:%s:%d\n",
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 1207f20..2c1b498 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -321,7 +321,7 @@
 
 	/* Check if a template already exists */
 	ct = ip_vs_ct_in_get(&param);
-	if (!ct || !ip_vs_check_template(ct)) {
+	if (!ct || !ip_vs_check_template(ct, NULL)) {
 		struct ip_vs_scheduler *sched;
 
 		/*
@@ -1154,7 +1154,8 @@
 						  vport, &param) < 0)
 			return NULL;
 		ct = ip_vs_ct_in_get(&param);
-		if (!ct) {
+		/* check if template exists and points to the same dest */
+		if (!ct || !ip_vs_check_template(ct, dest)) {
 			ct = ip_vs_conn_new(&param, dest->af, daddr, dport,
 					    IP_VS_CONN_F_TEMPLATE, dest, 0);
 			if (!ct) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index db2312e..f204274 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1544,6 +1544,8 @@
 	nf_conntrack_tstamp_fini();
 	nf_conntrack_acct_fini();
 	nf_conntrack_expect_fini();
+
+	kmem_cache_destroy(nf_conntrack_cachep);
 }
 
 /*
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 883c691..19efeba 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -632,6 +632,7 @@
 			if (ret) {
 				pr_err("failed to register helper for pf: %d port: %d\n",
 				       ftp[i][j].tuple.src.l3num, ports[i]);
+				ports_c = i;
 				nf_conntrack_ftp_fini();
 				return ret;
 			}
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index f703adb..196cb39 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -361,9 +361,10 @@
 
 int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
 {
-	int ret = 0;
-	struct nf_conntrack_helper *cur;
+	struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
 	unsigned int h = helper_hash(&me->tuple);
+	struct nf_conntrack_helper *cur;
+	int ret = 0;
 
 	BUG_ON(me->expect_policy == NULL);
 	BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
@@ -371,9 +372,7 @@
 
 	mutex_lock(&nf_ct_helper_mutex);
 	hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
-		if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 &&
-		    cur->tuple.src.l3num == me->tuple.src.l3num &&
-		    cur->tuple.dst.protonum == me->tuple.dst.protonum) {
+		if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) {
 			ret = -EEXIST;
 			goto out;
 		}
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 8b6da27..f97ac61 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -271,6 +271,7 @@
 		if (ret) {
 			pr_err("failed to register helper for pf: %u port: %u\n",
 			       irc[i].tuple.src.l3num, ports[i]);
+			ports_c = i;
 			nf_conntrack_irc_fini();
 			return ret;
 		}
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index 7523a57..3fcbaab 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -223,6 +223,7 @@
 			if (ret) {
 				pr_err("failed to register helper for pf: %d port: %d\n",
 				       sane[i][j].tuple.src.l3num, ports[i]);
+				ports_c = i;
 				nf_conntrack_sane_fini();
 				return ret;
 			}
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 3e06402..f72ba55 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1669,6 +1669,7 @@
 			if (ret) {
 				pr_err("failed to register helper for pf: %u port: %u\n",
 				       sip[i][j].tuple.src.l3num, ports[i]);
+				ports_c = i;
 				nf_conntrack_sip_fini();
 				return ret;
 			}
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index f87e84e..c026c47 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -487,8 +487,6 @@
 	{ }
 };
 
-#define NET_NF_CONNTRACK_MAX 2089
-
 static struct ctl_table nf_ct_netfilter_table[] = {
 	{
 		.procname	= "nf_conntrack_max",
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
index 36f9640..2e65b543 100644
--- a/net/netfilter/nf_conntrack_tftp.c
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -142,6 +142,7 @@
 			if (ret) {
 				pr_err("failed to register helper for pf: %u port: %u\n",
 				       tftp[i][j].tuple.src.l3num, ports[i]);
+				ports_c = i;
 				nf_conntrack_tftp_fini();
 				return ret;
 			}
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 5baa8e2..b19ad20 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -26,23 +26,21 @@
  * Once the queue is registered it must reinject all packets it
  * receives, no matter what.
  */
-static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
 
 /* return EBUSY when somebody else is registered, return EEXIST if the
  * same handler is registered, return 0 in case of success. */
-void nf_register_queue_handler(const struct nf_queue_handler *qh)
+void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
 {
 	/* should never happen, we only have one queueing backend in kernel */
-	WARN_ON(rcu_access_pointer(queue_handler));
-	rcu_assign_pointer(queue_handler, qh);
+	WARN_ON(rcu_access_pointer(net->nf.queue_handler));
+	rcu_assign_pointer(net->nf.queue_handler, qh);
 }
 EXPORT_SYMBOL(nf_register_queue_handler);
 
 /* The caller must flush their queue before this */
-void nf_unregister_queue_handler(void)
+void nf_unregister_queue_handler(struct net *net)
 {
-	RCU_INIT_POINTER(queue_handler, NULL);
-	synchronize_rcu();
+	RCU_INIT_POINTER(net->nf.queue_handler, NULL);
 }
 EXPORT_SYMBOL(nf_unregister_queue_handler);
 
@@ -103,7 +101,7 @@
 	const struct nf_queue_handler *qh;
 
 	rcu_read_lock();
-	qh = rcu_dereference(queue_handler);
+	qh = rcu_dereference(net->nf.queue_handler);
 	if (qh)
 		qh->nf_hook_drop(net, ops);
 	rcu_read_unlock();
@@ -122,9 +120,10 @@
 	struct nf_queue_entry *entry = NULL;
 	const struct nf_afinfo *afinfo;
 	const struct nf_queue_handler *qh;
+	struct net *net = state->net;
 
 	/* QUEUE == DROP if no one is waiting, to be safe. */
-	qh = rcu_dereference(queue_handler);
+	qh = rcu_dereference(net->nf.queue_handler);
 	if (!qh) {
 		status = -ESRCH;
 		goto err;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 4d292b9..2c88187 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2647,6 +2647,8 @@
 	/* Only accept unspec with dump */
 	if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
 		return -EAFNOSUPPORT;
+	if (!nla[NFTA_SET_TABLE])
+		return -EINVAL;
 
 	set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
 	if (IS_ERR(set))
@@ -2944,24 +2946,20 @@
 		 * jumps are already validated for that chain.
 		 */
 		list_for_each_entry(i, &set->bindings, list) {
-			if (binding->flags & NFT_SET_MAP &&
+			if (i->flags & NFT_SET_MAP &&
 			    i->chain == binding->chain)
 				goto bind;
 		}
 
+		iter.genmask	= nft_genmask_next(ctx->net);
 		iter.skip 	= 0;
 		iter.count	= 0;
 		iter.err	= 0;
 		iter.fn		= nf_tables_bind_check_setelem;
 
 		set->ops->walk(ctx, set, &iter);
-		if (iter.err < 0) {
-			/* Destroy anonymous sets if binding fails */
-			if (set->flags & NFT_SET_ANONYMOUS)
-				nf_tables_set_destroy(ctx, set);
-
+		if (iter.err < 0)
 			return iter.err;
-		}
 	}
 bind:
 	binding->chain = ctx->chain;
@@ -3190,12 +3188,13 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	args.cb		= cb;
-	args.skb	= skb;
-	args.iter.skip	= cb->args[0];
-	args.iter.count	= 0;
-	args.iter.err   = 0;
-	args.iter.fn	= nf_tables_dump_setelem;
+	args.cb			= cb;
+	args.skb		= skb;
+	args.iter.genmask	= nft_genmask_cur(ctx.net);
+	args.iter.skip		= cb->args[0];
+	args.iter.count		= 0;
+	args.iter.err		= 0;
+	args.iter.fn		= nf_tables_dump_setelem;
 	set->ops->walk(&ctx, set, &args.iter);
 
 	nla_nest_end(skb, nest);
@@ -4282,6 +4281,7 @@
 			    binding->chain != chain)
 				continue;
 
+			iter.genmask	= nft_genmask_next(ctx->net);
 			iter.skip 	= 0;
 			iter.count	= 0;
 			iter.err	= 0;
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index e9f8dff..fb8b589 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -143,7 +143,7 @@
 	list_for_each_entry_continue_rcu(rule, &chain->rules, list) {
 
 		/* This rule is not active, skip. */
-		if (unlikely(rule->genmask & (1 << gencursor)))
+		if (unlikely(rule->genmask & gencursor))
 			continue;
 
 		rulenum++;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index aa93877..5d36a09 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -557,7 +557,7 @@
 
 	if (entskb->tstamp.tv64) {
 		struct nfqnl_msg_packet_timestamp ts;
-		struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
+		struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
 
 		ts.sec = cpu_to_be64(kts.tv_sec);
 		ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
@@ -1482,21 +1482,29 @@
 			 net->nf.proc_netfilter, &nfqnl_file_ops))
 		return -ENOMEM;
 #endif
+	nf_register_queue_handler(net, &nfqh);
 	return 0;
 }
 
 static void __net_exit nfnl_queue_net_exit(struct net *net)
 {
+	nf_unregister_queue_handler(net);
 #ifdef CONFIG_PROC_FS
 	remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
 #endif
 }
 
+static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
+{
+	synchronize_rcu();
+}
+
 static struct pernet_operations nfnl_queue_net_ops = {
-	.init	= nfnl_queue_net_init,
-	.exit	= nfnl_queue_net_exit,
-	.id	= &nfnl_queue_net_id,
-	.size	= sizeof(struct nfnl_queue_net),
+	.init		= nfnl_queue_net_init,
+	.exit		= nfnl_queue_net_exit,
+	.exit_batch	= nfnl_queue_net_exit_batch,
+	.id		= &nfnl_queue_net_id,
+	.size		= sizeof(struct nfnl_queue_net),
 };
 
 static int __init nfnetlink_queue_init(void)
@@ -1517,7 +1525,6 @@
 	}
 
 	register_netdevice_notifier(&nfqnl_dev_notifier);
-	nf_register_queue_handler(&nfqh);
 	return status;
 
 cleanup_netlink_notifier:
@@ -1529,7 +1536,6 @@
 
 static void __exit nfnetlink_queue_fini(void)
 {
-	nf_unregister_queue_handler();
 	unregister_netdevice_notifier(&nfqnl_dev_notifier);
 	nfnetlink_subsys_unregister(&nfqnl_subsys);
 	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 6fa0165..f39c53a 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -189,7 +189,6 @@
 	struct nft_hash_elem *he;
 	struct rhashtable_iter hti;
 	struct nft_set_elem elem;
-	u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
 	int err;
 
 	err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
@@ -218,7 +217,7 @@
 			goto cont;
 		if (nft_set_elem_expired(&he->ext))
 			goto cont;
-		if (!nft_set_elem_active(&he->ext, genmask))
+		if (!nft_set_elem_active(&he->ext, iter->genmask))
 			goto cont;
 
 		elem.priv = he;
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index f762094..7201d57 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -211,7 +211,6 @@
 	struct nft_rbtree_elem *rbe;
 	struct nft_set_elem elem;
 	struct rb_node *node;
-	u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
 
 	spin_lock_bh(&nft_rbtree_lock);
 	for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
@@ -219,7 +218,7 @@
 
 		if (iter->count < iter->skip)
 			goto cont;
-		if (!nft_set_elem_active(&rbe->ext, genmask))
+		if (!nft_set_elem_active(&rbe->ext, iter->genmask))
 			goto cont;
 
 		elem.priv = rbe;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index c69c892..2675d58 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -612,7 +612,7 @@
 		return -EINVAL;
 
 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
-	    target_offset + sizeof(struct compat_xt_standard_target) != next_offset)
+	    COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
 		return -EINVAL;
 
 	/* compat_xt_entry match has less strict aligment requirements,
@@ -694,7 +694,7 @@
 		return -EINVAL;
 
 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
-	    target_offset + sizeof(struct xt_standard_target) != next_offset)
+	    XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
 		return -EINVAL;
 
 	return xt_check_entry_match(elems, base + target_offset,
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 879185f..9a3eb7a 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -137,11 +137,23 @@
 	return !!key->eth.type;
 }
 
+static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
+			     __be16 ethertype)
+{
+	if (skb->ip_summed == CHECKSUM_COMPLETE) {
+		__be16 diff[] = { ~(hdr->h_proto), ethertype };
+
+		skb->csum = ~csum_partial((char *)diff, sizeof(diff),
+					~skb->csum);
+	}
+
+	hdr->h_proto = ethertype;
+}
+
 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
 		     const struct ovs_action_push_mpls *mpls)
 {
 	__be32 *new_mpls_lse;
-	struct ethhdr *hdr;
 
 	/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
 	if (skb->encapsulation)
@@ -160,9 +172,7 @@
 
 	skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
 
-	hdr = eth_hdr(skb);
-	hdr->h_proto = mpls->mpls_ethertype;
-
+	update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
 	if (!skb->inner_protocol)
 		skb_set_inner_protocol(skb, skb->protocol);
 	skb->protocol = mpls->mpls_ethertype;
@@ -193,7 +203,7 @@
 	 * field correctly in the presence of VLAN tags.
 	 */
 	hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
-	hdr->h_proto = ethertype;
+	update_ethertype(skb, hdr, ethertype);
 	if (eth_p_mpls(skb->protocol))
 		skb->protocol = ethertype;
 
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4040eb9..9bff6ef 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -93,6 +93,7 @@
 #include <net/inet_common.h>
 #endif
 #include <linux/bpf.h>
+#include <net/compat.h>
 
 #include "internal.h"
 
@@ -3940,6 +3941,27 @@
 }
 
 
+#ifdef CONFIG_COMPAT
+static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
+				    char __user *optval, unsigned int optlen)
+{
+	struct packet_sock *po = pkt_sk(sock->sk);
+
+	if (level != SOL_PACKET)
+		return -ENOPROTOOPT;
+
+	if (optname == PACKET_FANOUT_DATA &&
+	    po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
+		optval = (char __user *)get_compat_bpf_fprog(optval);
+		if (!optval)
+			return -EFAULT;
+		optlen = sizeof(struct sock_fprog);
+	}
+
+	return packet_setsockopt(sock, level, optname, optval, optlen);
+}
+#endif
+
 static int packet_notifier(struct notifier_block *this,
 			   unsigned long msg, void *ptr)
 {
@@ -4416,6 +4438,9 @@
 	.shutdown =	sock_no_shutdown,
 	.setsockopt =	packet_setsockopt,
 	.getsockopt =	packet_getsockopt,
+#ifdef CONFIG_COMPAT
+	.compat_setsockopt = compat_packet_setsockopt,
+#endif
 	.sendmsg =	packet_sendmsg,
 	.recvmsg =	packet_recvmsg,
 	.mmap =		packet_mmap,
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 310cabc..7c2a65a 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -111,7 +111,7 @@
 		}
 	}
 
-	if (conn->c_version < RDS_PROTOCOL(3,1)) {
+	if (conn->c_version < RDS_PROTOCOL(3, 1)) {
 		printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
 		       " no longer supported\n",
 		       &conn->c_faddr,
diff --git a/net/rds/loop.c b/net/rds/loop.c
index 6b12b68..814173b 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -95,8 +95,9 @@
  */
 static void rds_loop_inc_free(struct rds_incoming *inc)
 {
-        struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
-        rds_message_put(rm);
+	struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
+
+	rds_message_put(rm);
 }
 
 /* we need to at least give the thread something to succeed */
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 80256b0..387df5f 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -74,6 +74,7 @@
 	RDS_CONN_CONNECTING,
 	RDS_CONN_DISCONNECTING,
 	RDS_CONN_UP,
+	RDS_CONN_RESETTING,
 	RDS_CONN_ERROR,
 };
 
@@ -813,6 +814,7 @@
 void rds_shutdown_worker(struct work_struct *);
 void rds_send_worker(struct work_struct *);
 void rds_recv_worker(struct work_struct *);
+void rds_connect_path_complete(struct rds_connection *conn, int curr);
 void rds_connect_complete(struct rds_connection *conn);
 
 /* transport.c */
diff --git a/net/rds/recv.c b/net/rds/recv.c
index c0be1ec..8413f6c 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -561,5 +561,7 @@
 		minfo.fport = inc->i_hdr.h_dport;
 	}
 
+	minfo.flags = 0;
+
 	rds_info_copy(iter, &minfo, sizeof(minfo));
 }
diff --git a/net/rds/send.c b/net/rds/send.c
index c9cdb35..b1962f8 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -99,6 +99,7 @@
 	list_splice_init(&conn->c_retrans, &conn->c_send_queue);
 	spin_unlock_irqrestore(&conn->c_lock, flags);
 }
+EXPORT_SYMBOL_GPL(rds_send_reset);
 
 static int acquire_in_xmit(struct rds_connection *conn)
 {
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index c173f69..e381bbc 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -102,7 +102,8 @@
 	rds_sysctl_reconnect_min = msecs_to_jiffies(1);
 	rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
 
-	rds_sysctl_reg_table = register_net_sysctl(&init_net,"net/rds", rds_sysctl_rds_table);
+	rds_sysctl_reg_table =
+		register_net_sysctl(&init_net, "net/rds", rds_sysctl_rds_table);
 	if (!rds_sysctl_reg_table)
 		return -ENOMEM;
 	return 0;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 86187da..74ee126 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -126,9 +126,81 @@
 }
 
 /*
- * This is the only path that sets tc->t_sock.  Send and receive trust that
- * it is set.  The RDS_CONN_UP bit protects those paths from being
- * called while it isn't set.
+ * rds_tcp_reset_callbacks() switches the to the new sock and
+ * returns the existing tc->t_sock.
+ *
+ * The only functions that set tc->t_sock are rds_tcp_set_callbacks
+ * and rds_tcp_reset_callbacks.  Send and receive trust that
+ * it is set.  The absence of RDS_CONN_UP bit protects those paths
+ * from being called while it isn't set.
+ */
+void rds_tcp_reset_callbacks(struct socket *sock,
+			     struct rds_connection *conn)
+{
+	struct rds_tcp_connection *tc = conn->c_transport_data;
+	struct socket *osock = tc->t_sock;
+
+	if (!osock)
+		goto newsock;
+
+	/* Need to resolve a duelling SYN between peers.
+	 * We have an outstanding SYN to this peer, which may
+	 * potentially have transitioned to the RDS_CONN_UP state,
+	 * so we must quiesce any send threads before resetting
+	 * c_transport_data. We quiesce these threads by setting
+	 * c_state to something other than RDS_CONN_UP, and then
+	 * waiting for any existing threads in rds_send_xmit to
+	 * complete release_in_xmit(). (Subsequent threads entering
+	 * rds_send_xmit() will bail on !rds_conn_up().
+	 *
+	 * However an incoming syn-ack at this point would end up
+	 * marking the conn as RDS_CONN_UP, and would again permit
+	 * rds_send_xmi() threads through, so ideally we would
+	 * synchronize on RDS_CONN_UP after lock_sock(), but cannot
+	 * do that: waiting on !RDS_IN_XMIT after lock_sock() may
+	 * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
+	 * would not get set. As a result, we set c_state to
+	 * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
+	 * cannot mark rds_conn_path_up() in the window before lock_sock()
+	 */
+	atomic_set(&conn->c_state, RDS_CONN_RESETTING);
+	wait_event(conn->c_waitq, !test_bit(RDS_IN_XMIT, &conn->c_flags));
+	lock_sock(osock->sk);
+	/* reset receive side state for rds_tcp_data_recv() for osock  */
+	if (tc->t_tinc) {
+		rds_inc_put(&tc->t_tinc->ti_inc);
+		tc->t_tinc = NULL;
+	}
+	tc->t_tinc_hdr_rem = sizeof(struct rds_header);
+	tc->t_tinc_data_rem = 0;
+	tc->t_sock = NULL;
+
+	write_lock_bh(&osock->sk->sk_callback_lock);
+
+	osock->sk->sk_user_data = NULL;
+	osock->sk->sk_data_ready = tc->t_orig_data_ready;
+	osock->sk->sk_write_space = tc->t_orig_write_space;
+	osock->sk->sk_state_change = tc->t_orig_state_change;
+	write_unlock_bh(&osock->sk->sk_callback_lock);
+	release_sock(osock->sk);
+	sock_release(osock);
+newsock:
+	rds_send_reset(conn);
+	lock_sock(sock->sk);
+	write_lock_bh(&sock->sk->sk_callback_lock);
+	tc->t_sock = sock;
+	sock->sk->sk_user_data = conn;
+	sock->sk->sk_data_ready = rds_tcp_data_ready;
+	sock->sk->sk_write_space = rds_tcp_write_space;
+	sock->sk->sk_state_change = rds_tcp_state_change;
+
+	write_unlock_bh(&sock->sk->sk_callback_lock);
+	release_sock(sock->sk);
+}
+
+/* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
+ * above rds_tcp_reset_callbacks for notes about synchronization
+ * with data path
  */
 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
 {
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 41c2283..7940bab 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -50,6 +50,7 @@
 void rds_tcp_tune(struct socket *sock);
 void rds_tcp_nonagle(struct socket *sock);
 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn);
+void rds_tcp_reset_callbacks(struct socket *sock, struct rds_connection *conn);
 void rds_tcp_restore_callbacks(struct socket *sock,
 			       struct rds_tcp_connection *tc);
 u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
@@ -82,7 +83,7 @@
 void rds_tcp_xmit_prepare(struct rds_connection *conn);
 void rds_tcp_xmit_complete(struct rds_connection *conn);
 int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
-	         unsigned int hdr_off, unsigned int sg, unsigned int off);
+		 unsigned int hdr_off, unsigned int sg, unsigned int off);
 void rds_tcp_write_space(struct sock *sk);
 
 /* tcp_stats.c */
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index fb82e0a..f6e95d6 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -54,19 +54,19 @@
 
 	rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state);
 
-	switch(sk->sk_state) {
-		/* ignore connecting sockets as they make progress */
-		case TCP_SYN_SENT:
-		case TCP_SYN_RECV:
-			break;
-		case TCP_ESTABLISHED:
-			rds_connect_complete(conn);
-			break;
-		case TCP_CLOSE_WAIT:
-		case TCP_CLOSE:
-			rds_conn_drop(conn);
-		default:
-			break;
+	switch (sk->sk_state) {
+	/* ignore connecting sockets as they make progress */
+	case TCP_SYN_SENT:
+	case TCP_SYN_RECV:
+		break;
+	case TCP_ESTABLISHED:
+		rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
+		break;
+	case TCP_CLOSE_WAIT:
+	case TCP_CLOSE:
+		rds_conn_drop(conn);
+	default:
+		break;
 	}
 out:
 	read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 4bf4bef..245542c 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -78,7 +78,6 @@
 	struct inet_sock *inet;
 	struct rds_tcp_connection *rs_tcp = NULL;
 	int conn_state;
-	struct sock *nsk;
 
 	if (!sock) /* module unload or netns delete in progress */
 		return -ENETUNREACH;
@@ -136,26 +135,21 @@
 		    !conn->c_outgoing) {
 			goto rst_nsk;
 		} else {
-			atomic_set(&conn->c_state, RDS_CONN_CONNECTING);
-			wait_event(conn->c_waitq,
-				   !test_bit(RDS_IN_XMIT, &conn->c_flags));
-			rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
+			rds_tcp_reset_callbacks(new_sock, conn);
 			conn->c_outgoing = 0;
+			/* rds_connect_path_complete() marks RDS_CONN_UP */
+			rds_connect_path_complete(conn, RDS_CONN_RESETTING);
 		}
+	} else {
+		rds_tcp_set_callbacks(new_sock, conn);
+		rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
 	}
-	rds_tcp_set_callbacks(new_sock, conn);
-	rds_connect_complete(conn); /* marks RDS_CONN_UP */
 	new_sock = NULL;
 	ret = 0;
 	goto out;
 rst_nsk:
 	/* reset the newly returned accept sock and bail */
-	nsk = new_sock->sk;
-	rds_tcp_stats_inc(s_tcp_listen_closed_stale);
-	nsk->sk_user_data = NULL;
-	nsk->sk_prot->disconnect(nsk, 0);
-	tcp_done(nsk);
-	new_sock = NULL;
+	kernel_sock_shutdown(new_sock, SHUT_RDWR);
 	ret = 0;
 out:
 	if (rs_tcp)
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index c3196f9..6e6a711 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -171,7 +171,7 @@
 	while (left) {
 		if (!tinc) {
 			tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
-					        arg->gfp);
+						arg->gfp);
 			if (!tinc) {
 				desc->error = -ENOMEM;
 				goto out;
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 22d0f20..618be69 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -66,19 +66,19 @@
 static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
 {
 	struct kvec vec = {
-                .iov_base = data,
-                .iov_len = len,
+		.iov_base = data,
+		.iov_len = len,
 	};
-        struct msghdr msg = {
-                .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
-        };
+	struct msghdr msg = {
+		.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
+	};
 
 	return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len);
 }
 
 /* the core send_sem serializes this with other xmit and shutdown */
 int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
-	         unsigned int hdr_off, unsigned int sg, unsigned int off)
+		 unsigned int hdr_off, unsigned int sg, unsigned int off)
 {
 	struct rds_tcp_connection *tc = conn->c_transport_data;
 	int done = 0;
@@ -196,7 +196,7 @@
 	tc->t_last_seen_una = rds_tcp_snd_una(tc);
 	rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);
 
-        if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
+	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
 		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 
 out:
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 454aa6d..4a32304 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -71,9 +71,9 @@
 struct workqueue_struct *rds_wq;
 EXPORT_SYMBOL_GPL(rds_wq);
 
-void rds_connect_complete(struct rds_connection *conn)
+void rds_connect_path_complete(struct rds_connection *conn, int curr)
 {
-	if (!rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_UP)) {
+	if (!rds_conn_transition(conn, curr, RDS_CONN_UP)) {
 		printk(KERN_WARNING "%s: Cannot transition to state UP, "
 				"current state is %d\n",
 				__func__,
@@ -90,6 +90,12 @@
 	queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 	queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 }
+EXPORT_SYMBOL_GPL(rds_connect_path_complete);
+
+void rds_connect_complete(struct rds_connection *conn)
+{
+	rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
+}
 EXPORT_SYMBOL_GPL(rds_connect_complete);
 
 /*
diff --git a/net/rds/transport.c b/net/rds/transport.c
index f3afd1d..2ffd3e30 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -140,8 +140,7 @@
 	rds_info_iter_unmap(iter);
 	down_read(&rds_trans_sem);
 
-	for (i = 0; i < RDS_TRANS_COUNT; i++)
-	{
+	for (i = 0; i < RDS_TRANS_COUNT; i++) {
 		trans = transports[i];
 		if (!trans || !trans->stats_info_copy)
 			continue;
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 6b726a0..bab56ed 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -1162,9 +1162,7 @@
 	/* pin the cipher we need so that the crypto layer doesn't invoke
 	 * keventd to go get it */
 	rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
-	if (IS_ERR(rxkad_ci))
-		return PTR_ERR(rxkad_ci);
-	return 0;
+	return PTR_ERR_OR_ZERO(rxkad_ci);
 }
 
 /*
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 336774a..c7a0b0d 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1118,7 +1118,7 @@
 		nla_nest_end(skb, nest);
 		ret = skb->len;
 	} else
-		nla_nest_cancel(skb, nest);
+		nlmsg_trim(skb, b);
 
 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
 	if (NETLINK_CB(cb->skb).portid && ret)
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 658046d..ea4a2fe 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -106,9 +106,9 @@
 }
 EXPORT_SYMBOL_GPL(ife_get_meta_u16);
 
-int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval)
+int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
 {
-	mi->metaval = kmemdup(metaval, sizeof(u32), GFP_KERNEL);
+	mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
 	if (!mi->metaval)
 		return -ENOMEM;
 
@@ -116,9 +116,9 @@
 }
 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
 
-int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval)
+int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
 {
-	mi->metaval = kmemdup(metaval, sizeof(u16), GFP_KERNEL);
+	mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
 	if (!mi->metaval)
 		return -ENOMEM;
 
@@ -240,10 +240,10 @@
 }
 
 /* called when adding new meta information
- * under ife->tcf_lock
+ * under ife->tcf_lock for existing action
 */
 static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
-				void *val, int len)
+				void *val, int len, bool exists)
 {
 	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
 	int ret = 0;
@@ -251,11 +251,13 @@
 	if (!ops) {
 		ret = -ENOENT;
 #ifdef CONFIG_MODULES
-		spin_unlock_bh(&ife->tcf_lock);
+		if (exists)
+			spin_unlock_bh(&ife->tcf_lock);
 		rtnl_unlock();
 		request_module("ifemeta%u", metaid);
 		rtnl_lock();
-		spin_lock_bh(&ife->tcf_lock);
+		if (exists)
+			spin_lock_bh(&ife->tcf_lock);
 		ops = find_ife_oplist(metaid);
 #endif
 	}
@@ -272,10 +274,10 @@
 }
 
 /* called when adding new meta information
- * under ife->tcf_lock
+ * under ife->tcf_lock for existing action
 */
 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
-			int len)
+			int len, bool atomic)
 {
 	struct tcf_meta_info *mi = NULL;
 	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
@@ -284,7 +286,7 @@
 	if (!ops)
 		return -ENOENT;
 
-	mi = kzalloc(sizeof(*mi), GFP_KERNEL);
+	mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
 	if (!mi) {
 		/*put back what find_ife_oplist took */
 		module_put(ops->owner);
@@ -294,7 +296,7 @@
 	mi->metaid = metaid;
 	mi->ops = ops;
 	if (len > 0) {
-		ret = ops->alloc(mi, metaval);
+		ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
 		if (ret != 0) {
 			kfree(mi);
 			module_put(ops->owner);
@@ -313,11 +315,13 @@
 	int rc = 0;
 	int installed = 0;
 
+	read_lock(&ife_mod_lock);
 	list_for_each_entry(o, &ifeoplist, list) {
-		rc = add_metainfo(ife, o->metaid, NULL, 0);
+		rc = add_metainfo(ife, o->metaid, NULL, 0, true);
 		if (rc == 0)
 			installed += 1;
 	}
+	read_unlock(&ife_mod_lock);
 
 	if (installed)
 		return 0;
@@ -385,8 +389,9 @@
 	spin_unlock_bh(&ife->tcf_lock);
 }
 
-/* under ife->tcf_lock */
-static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb)
+/* under ife->tcf_lock for existing action */
+static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+			     bool exists)
 {
 	int len = 0;
 	int rc = 0;
@@ -398,11 +403,11 @@
 			val = nla_data(tb[i]);
 			len = nla_len(tb[i]);
 
-			rc = load_metaops_and_vet(ife, i, val, len);
+			rc = load_metaops_and_vet(ife, i, val, len, exists);
 			if (rc != 0)
 				return rc;
 
-			rc = add_metainfo(ife, i, val, len);
+			rc = add_metainfo(ife, i, val, len, exists);
 			if (rc)
 				return rc;
 		}
@@ -474,7 +479,8 @@
 			saddr = nla_data(tb[TCA_IFE_SMAC]);
 	}
 
-	spin_lock_bh(&ife->tcf_lock);
+	if (exists)
+		spin_lock_bh(&ife->tcf_lock);
 	ife->tcf_action = parm->action;
 
 	if (parm->flags & IFE_ENCODE) {
@@ -504,11 +510,12 @@
 			if (ret == ACT_P_CREATED)
 				_tcf_ife_cleanup(a, bind);
 
-			spin_unlock_bh(&ife->tcf_lock);
+			if (exists)
+				spin_unlock_bh(&ife->tcf_lock);
 			return err;
 		}
 
-		err = populate_metalist(ife, tb2);
+		err = populate_metalist(ife, tb2, exists);
 		if (err)
 			goto metadata_parse_err;
 
@@ -523,12 +530,14 @@
 			if (ret == ACT_P_CREATED)
 				_tcf_ife_cleanup(a, bind);
 
-			spin_unlock_bh(&ife->tcf_lock);
+			if (exists)
+				spin_unlock_bh(&ife->tcf_lock);
 			return err;
 		}
 	}
 
-	spin_unlock_bh(&ife->tcf_lock);
+	if (exists)
+		spin_unlock_bh(&ife->tcf_lock);
 
 	if (ret == ACT_P_CREATED)
 		tcf_hash_insert(tn, a);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 9f002ad..d4bd19e 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -121,10 +121,13 @@
 	}
 
 	td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
-	if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size)
+	if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
+		if (exists)
+			tcf_hash_release(a, bind);
 		return -EINVAL;
+	}
 
-	if (!tcf_hash_check(tn, index, a, bind)) {
+	if (!exists) {
 		ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
 				      false);
 		if (ret)
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index b884dae..c557789 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -38,7 +38,7 @@
 	bool			peak_present;
 };
 #define to_police(pc)	\
-	container_of(pc, struct tcf_police, common)
+	container_of(pc->priv, struct tcf_police, common)
 
 #define POL_TAB_MASK     15
 
@@ -119,14 +119,12 @@
 				 struct nlattr *est, struct tc_action *a,
 				 int ovr, int bind)
 {
-	unsigned int h;
 	int ret = 0, err;
 	struct nlattr *tb[TCA_POLICE_MAX + 1];
 	struct tc_police *parm;
 	struct tcf_police *police;
 	struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
 	struct tc_action_net *tn = net_generic(net, police_net_id);
-	struct tcf_hashinfo *hinfo = tn->hinfo;
 	int size;
 
 	if (nla == NULL)
@@ -145,7 +143,7 @@
 
 	if (parm->index) {
 		if (tcf_hash_search(tn, a, parm->index)) {
-			police = to_police(a->priv);
+			police = to_police(a);
 			if (bind) {
 				police->tcf_bindcnt += 1;
 				police->tcf_refcnt += 1;
@@ -156,16 +154,15 @@
 			/* not replacing */
 			return -EEXIST;
 		}
+	} else {
+		ret = tcf_hash_create(tn, parm->index, NULL, a,
+				      sizeof(*police), bind, false);
+		if (ret)
+			return ret;
+		ret = ACT_P_CREATED;
 	}
 
-	police = kzalloc(sizeof(*police), GFP_KERNEL);
-	if (police == NULL)
-		return -ENOMEM;
-	ret = ACT_P_CREATED;
-	police->tcf_refcnt = 1;
-	spin_lock_init(&police->tcf_lock);
-	if (bind)
-		police->tcf_bindcnt = 1;
+	police = to_police(a);
 override:
 	if (parm->rate.rate) {
 		err = -ENOMEM;
@@ -237,16 +234,8 @@
 		return ret;
 
 	police->tcfp_t_c = ktime_get_ns();
-	police->tcf_index = parm->index ? parm->index :
-		tcf_hash_new_index(tn);
-	police->tcf_tm.install = jiffies;
-	police->tcf_tm.lastuse = jiffies;
-	h = tcf_hash(police->tcf_index, POL_TAB_MASK);
-	spin_lock_bh(&hinfo->lock);
-	hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
-	spin_unlock_bh(&hinfo->lock);
+	tcf_hash_insert(tn, a);
 
-	a->priv = police;
 	return ret;
 
 failure_unlock:
@@ -255,7 +244,7 @@
 	qdisc_put_rtab(P_tab);
 	qdisc_put_rtab(R_tab);
 	if (ret == ACT_P_CREATED)
-		kfree(police);
+		tcf_hash_cleanup(a, est);
 	return err;
 }
 
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 730aaca..b3b7978f 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -171,7 +171,7 @@
 	struct tc_cls_flower_offload offload = {0};
 	struct tc_to_netdev tc;
 
-	if (!tc_should_offload(dev, 0))
+	if (!tc_should_offload(dev, tp, 0))
 		return;
 
 	offload.command = TC_CLSFLOWER_DESTROY;
@@ -194,7 +194,7 @@
 	struct tc_cls_flower_offload offload = {0};
 	struct tc_to_netdev tc;
 
-	if (!tc_should_offload(dev, flags))
+	if (!tc_should_offload(dev, tp, flags))
 		return;
 
 	offload.command = TC_CLSFLOWER_REPLACE;
@@ -216,7 +216,7 @@
 	struct tc_cls_flower_offload offload = {0};
 	struct tc_to_netdev tc;
 
-	if (!tc_should_offload(dev, 0))
+	if (!tc_should_offload(dev, tp, 0))
 		return;
 
 	offload.command = TC_CLSFLOWER_STATS;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 079b43b..ffe593e 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -440,7 +440,7 @@
 	offload.type = TC_SETUP_CLSU32;
 	offload.cls_u32 = &u32_offload;
 
-	if (tc_should_offload(dev, 0)) {
+	if (tc_should_offload(dev, tp, 0)) {
 		offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
 		offload.cls_u32->knode.handle = handle;
 		dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
@@ -457,20 +457,21 @@
 	struct tc_to_netdev offload;
 	int err;
 
+	if (!tc_should_offload(dev, tp, flags))
+		return tc_skip_sw(flags) ? -EINVAL : 0;
+
 	offload.type = TC_SETUP_CLSU32;
 	offload.cls_u32 = &u32_offload;
 
-	if (tc_should_offload(dev, flags)) {
-		offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
-		offload.cls_u32->hnode.divisor = h->divisor;
-		offload.cls_u32->hnode.handle = h->handle;
-		offload.cls_u32->hnode.prio = h->prio;
+	offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
+	offload.cls_u32->hnode.divisor = h->divisor;
+	offload.cls_u32->hnode.handle = h->handle;
+	offload.cls_u32->hnode.prio = h->prio;
 
-		err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-						    tp->protocol, &offload);
-		if (tc_skip_sw(flags))
-			return err;
-	}
+	err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+					    tp->protocol, &offload);
+	if (tc_skip_sw(flags))
+		return err;
 
 	return 0;
 }
@@ -484,7 +485,7 @@
 	offload.type = TC_SETUP_CLSU32;
 	offload.cls_u32 = &u32_offload;
 
-	if (tc_should_offload(dev, 0)) {
+	if (tc_should_offload(dev, tp, 0)) {
 		offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
 		offload.cls_u32->hnode.divisor = h->divisor;
 		offload.cls_u32->hnode.handle = h->handle;
@@ -507,27 +508,28 @@
 	offload.type = TC_SETUP_CLSU32;
 	offload.cls_u32 = &u32_offload;
 
-	if (tc_should_offload(dev, flags)) {
-		offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
-		offload.cls_u32->knode.handle = n->handle;
-		offload.cls_u32->knode.fshift = n->fshift;
-#ifdef CONFIG_CLS_U32_MARK
-		offload.cls_u32->knode.val = n->val;
-		offload.cls_u32->knode.mask = n->mask;
-#else
-		offload.cls_u32->knode.val = 0;
-		offload.cls_u32->knode.mask = 0;
-#endif
-		offload.cls_u32->knode.sel = &n->sel;
-		offload.cls_u32->knode.exts = &n->exts;
-		if (n->ht_down)
-			offload.cls_u32->knode.link_handle = n->ht_down->handle;
+	if (!tc_should_offload(dev, tp, flags))
+		return tc_skip_sw(flags) ? -EINVAL : 0;
 
-		err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-						    tp->protocol, &offload);
-		if (tc_skip_sw(flags))
-			return err;
-	}
+	offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
+	offload.cls_u32->knode.handle = n->handle;
+	offload.cls_u32->knode.fshift = n->fshift;
+#ifdef CONFIG_CLS_U32_MARK
+	offload.cls_u32->knode.val = n->val;
+	offload.cls_u32->knode.mask = n->mask;
+#else
+	offload.cls_u32->knode.val = 0;
+	offload.cls_u32->knode.mask = 0;
+#endif
+	offload.cls_u32->knode.sel = &n->sel;
+	offload.cls_u32->knode.exts = &n->exts;
+	if (n->ht_down)
+		offload.cls_u32->knode.link_handle = n->ht_down->handle;
+
+	err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+					    tp->protocol, &offload);
+	if (tc_skip_sw(flags))
+		return err;
 
 	return 0;
 }
@@ -863,7 +865,7 @@
 	if (tb[TCA_U32_FLAGS]) {
 		flags = nla_get_u32(tb[TCA_U32_FLAGS]);
 		if (!tc_flags_valid(flags))
-			return err;
+			return -EINVAL;
 	}
 
 	n = (struct tc_u_knode *)*arg;
@@ -921,11 +923,17 @@
 		ht->divisor = divisor;
 		ht->handle = handle;
 		ht->prio = tp->prio;
+
+		err = u32_replace_hw_hnode(tp, ht, flags);
+		if (err) {
+			kfree(ht);
+			return err;
+		}
+
 		RCU_INIT_POINTER(ht->next, tp_c->hlist);
 		rcu_assign_pointer(tp_c->hlist, ht);
 		*arg = (unsigned long)ht;
 
-		u32_replace_hw_hnode(tp, ht, flags);
 		return 0;
 	}
 
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index a63e879..bf8af2c 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -375,6 +375,7 @@
 		cl->deficit = cl->quantum;
 	}
 
+	qdisc_qstats_backlog_inc(sch, skb);
 	sch->q.qlen++;
 	return err;
 }
@@ -407,6 +408,7 @@
 
 			bstats_update(&cl->bstats, skb);
 			qdisc_bstats_update(sch, skb);
+			qdisc_qstats_backlog_dec(sch, skb);
 			sch->q.qlen--;
 			return skb;
 		}
@@ -428,6 +430,7 @@
 		if (cl->qdisc->ops->drop) {
 			len = cl->qdisc->ops->drop(cl->qdisc);
 			if (len > 0) {
+				sch->qstats.backlog -= len;
 				sch->q.qlen--;
 				if (cl->qdisc->q.qlen == 0)
 					list_del(&cl->alist);
@@ -463,6 +466,7 @@
 			qdisc_reset(cl->qdisc);
 		}
 	}
+	sch->qstats.backlog = 0;
 	sch->q.qlen = 0;
 }
 
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 2177eac..2e4bd2c 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -37,14 +37,18 @@
 
 static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
+	unsigned int prev_backlog;
+
 	if (likely(skb_queue_len(&sch->q) < sch->limit))
 		return qdisc_enqueue_tail(skb, sch);
 
+	prev_backlog = sch->qstats.backlog;
 	/* queue full, remove one skb to fulfill the limit */
 	__qdisc_queue_drop_head(sch, &sch->q);
 	qdisc_qstats_drop(sch);
 	qdisc_enqueue_tail(skb, sch);
 
+	qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
 	return NET_XMIT_CN;
 }
 
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 6883a89..da250b2 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -199,6 +199,7 @@
 	unsigned int idx, prev_backlog, prev_qlen;
 	struct fq_codel_flow *flow;
 	int uninitialized_var(ret);
+	unsigned int pkt_len;
 	bool memory_limited;
 
 	idx = fq_codel_classify(skb, sch, &ret);
@@ -230,6 +231,8 @@
 	prev_backlog = sch->qstats.backlog;
 	prev_qlen = sch->q.qlen;
 
+	/* save this packet length as it might be dropped by fq_codel_drop() */
+	pkt_len = qdisc_pkt_len(skb);
 	/* fq_codel_drop() is quite expensive, as it performs a linear search
 	 * in q->backlogs[] to find a fat flow.
 	 * So instead of dropping a single packet, drop half of its backlog
@@ -237,14 +240,23 @@
 	 */
 	ret = fq_codel_drop(sch, q->drop_batch_size);
 
-	q->drop_overlimit += prev_qlen - sch->q.qlen;
+	prev_qlen -= sch->q.qlen;
+	prev_backlog -= sch->qstats.backlog;
+	q->drop_overlimit += prev_qlen;
 	if (memory_limited)
-		q->drop_overmemory += prev_qlen - sch->q.qlen;
-	/* As we dropped packet(s), better let upper stack know this */
-	qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
-				  prev_backlog - sch->qstats.backlog);
+		q->drop_overmemory += prev_qlen;
 
-	return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
+	/* As we dropped packet(s), better let upper stack know this.
+	 * If we dropped a packet for this flow, return NET_XMIT_CN,
+	 * but in this case, our parents wont increase their backlogs.
+	 */
+	if (ret == idx) {
+		qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
+					  prev_backlog - pkt_len);
+		return NET_XMIT_CN;
+	}
+	qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
+	return NET_XMIT_SUCCESS;
 }
 
 /* This is the specific function called from codel_dequeue()
@@ -649,7 +661,7 @@
 		qs.backlog = q->backlogs[idx];
 		qs.drops = flow->dropped;
 	}
-	if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
+	if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
 		return -1;
 	if (idx < q->flows_cnt)
 		return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 269dd71..f9e0e9c 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -49,6 +49,7 @@
 {
 	q->gso_skb = skb;
 	q->qstats.requeues++;
+	qdisc_qstats_backlog_inc(q, skb);
 	q->q.qlen++;	/* it's still part of the queue */
 	__netif_schedule(q);
 
@@ -92,6 +93,7 @@
 		txq = skb_get_tx_queue(txq->dev, skb);
 		if (!netif_xmit_frozen_or_stopped(txq)) {
 			q->gso_skb = NULL;
+			qdisc_qstats_backlog_dec(q, skb);
 			q->q.qlen--;
 		} else
 			skb = NULL;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index d783d7c..1ac9f9f 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1529,6 +1529,7 @@
 	q->eligible = RB_ROOT;
 	INIT_LIST_HEAD(&q->droplist);
 	qdisc_watchdog_cancel(&q->watchdog);
+	sch->qstats.backlog = 0;
 	sch->q.qlen = 0;
 }
 
@@ -1559,14 +1560,6 @@
 	struct hfsc_sched *q = qdisc_priv(sch);
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tc_hfsc_qopt qopt;
-	struct hfsc_class *cl;
-	unsigned int i;
-
-	sch->qstats.backlog = 0;
-	for (i = 0; i < q->clhash.hashsize; i++) {
-		hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
-			sch->qstats.backlog += cl->qdisc->qstats.backlog;
-	}
 
 	qopt.defcls = q->defcls;
 	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
@@ -1604,6 +1597,7 @@
 	if (cl->qdisc->q.qlen == 1)
 		set_active(cl, qdisc_pkt_len(skb));
 
+	qdisc_qstats_backlog_inc(sch, skb);
 	sch->q.qlen++;
 
 	return NET_XMIT_SUCCESS;
@@ -1672,6 +1666,7 @@
 
 	qdisc_unthrottled(sch);
 	qdisc_bstats_update(sch, skb);
+	qdisc_qstats_backlog_dec(sch, skb);
 	sch->q.qlen--;
 
 	return skb;
@@ -1695,6 +1690,7 @@
 			}
 			cl->qstats.drops++;
 			qdisc_qstats_drop(sch);
+			sch->qstats.backlog -= len;
 			sch->q.qlen--;
 			return len;
 		}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index d4b4218..62f9d81 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1007,7 +1007,9 @@
 	struct htb_sched *q = container_of(work, struct htb_sched, work);
 	struct Qdisc *sch = q->watchdog.qdisc;
 
+	rcu_read_lock();
 	__netif_schedule(qdisc_root(sch));
+	rcu_read_unlock();
 }
 
 static int htb_init(struct Qdisc *sch, struct nlattr *opt)
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 10adbc6..8fe6999 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -27,6 +27,11 @@
 	return TC_H_MIN(classid) + 1;
 }
 
+static bool ingress_cl_offload(u32 classid)
+{
+	return true;
+}
+
 static unsigned long ingress_bind_filter(struct Qdisc *sch,
 					 unsigned long parent, u32 classid)
 {
@@ -86,6 +91,7 @@
 	.put		=	ingress_put,
 	.walk		=	ingress_walk,
 	.tcf_chain	=	ingress_find_tcf,
+	.tcf_cl_offload	=	ingress_cl_offload,
 	.bind_tcf	=	ingress_bind_filter,
 	.unbind_tcf	=	ingress_put,
 };
@@ -110,6 +116,11 @@
 	}
 }
 
+static bool clsact_cl_offload(u32 classid)
+{
+	return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS);
+}
+
 static unsigned long clsact_bind_filter(struct Qdisc *sch,
 					unsigned long parent, u32 classid)
 {
@@ -158,6 +169,7 @@
 	.put		=	ingress_put,
 	.walk		=	ingress_walk,
 	.tcf_chain	=	clsact_find_tcf,
+	.tcf_cl_offload	=	clsact_cl_offload,
 	.bind_tcf	=	clsact_bind_filter,
 	.unbind_tcf	=	ingress_put,
 };
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 205bed0..178f163 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -650,14 +650,14 @@
 #endif
 
 			if (q->qdisc) {
+				unsigned int pkt_len = qdisc_pkt_len(skb);
 				int err = qdisc_enqueue(skb, q->qdisc);
 
-				if (unlikely(err != NET_XMIT_SUCCESS)) {
-					if (net_xmit_drop_count(err)) {
-						qdisc_qstats_drop(sch);
-						qdisc_tree_reduce_backlog(sch, 1,
-									  qdisc_pkt_len(skb));
-					}
+				if (err != NET_XMIT_SUCCESS &&
+				    net_xmit_drop_count(err)) {
+					qdisc_qstats_drop(sch);
+					qdisc_tree_reduce_backlog(sch, 1,
+								  pkt_len);
 				}
 				goto tfifo_dequeue;
 			}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index fee1b15..a356450 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -85,6 +85,7 @@
 
 	ret = qdisc_enqueue(skb, qdisc);
 	if (ret == NET_XMIT_SUCCESS) {
+		qdisc_qstats_backlog_inc(sch, skb);
 		sch->q.qlen++;
 		return NET_XMIT_SUCCESS;
 	}
@@ -117,6 +118,7 @@
 		struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
 		if (skb) {
 			qdisc_bstats_update(sch, skb);
+			qdisc_qstats_backlog_dec(sch, skb);
 			sch->q.qlen--;
 			return skb;
 		}
@@ -135,6 +137,7 @@
 	for (prio = q->bands-1; prio >= 0; prio--) {
 		qdisc = q->queues[prio];
 		if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
+			sch->qstats.backlog -= len;
 			sch->q.qlen--;
 			return len;
 		}
@@ -151,6 +154,7 @@
 
 	for (prio = 0; prio < q->bands; prio++)
 		qdisc_reset(q->queues[prio]);
+	sch->qstats.backlog = 0;
 	sch->q.qlen = 0;
 }
 
@@ -168,8 +172,9 @@
 static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
 {
 	struct prio_sched_data *q = qdisc_priv(sch);
+	struct Qdisc *queues[TCQ_PRIO_BANDS];
+	int oldbands = q->bands, i;
 	struct tc_prio_qopt *qopt;
-	int i;
 
 	if (nla_len(opt) < sizeof(*qopt))
 		return -EINVAL;
@@ -183,62 +188,42 @@
 			return -EINVAL;
 	}
 
+	/* Before commit, make sure we can allocate all new qdiscs */
+	for (i = oldbands; i < qopt->bands; i++) {
+		queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+					      TC_H_MAKE(sch->handle, i + 1));
+		if (!queues[i]) {
+			while (i > oldbands)
+				qdisc_destroy(queues[--i]);
+			return -ENOMEM;
+		}
+	}
+
 	sch_tree_lock(sch);
 	q->bands = qopt->bands;
 	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
 
-	for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
+	for (i = q->bands; i < oldbands; i++) {
 		struct Qdisc *child = q->queues[i];
-		q->queues[i] = &noop_qdisc;
-		if (child != &noop_qdisc) {
-			qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
-			qdisc_destroy(child);
-		}
+
+		qdisc_tree_reduce_backlog(child, child->q.qlen,
+					  child->qstats.backlog);
+		qdisc_destroy(child);
 	}
+
+	for (i = oldbands; i < q->bands; i++)
+		q->queues[i] = queues[i];
+
 	sch_tree_unlock(sch);
-
-	for (i = 0; i < q->bands; i++) {
-		if (q->queues[i] == &noop_qdisc) {
-			struct Qdisc *child, *old;
-
-			child = qdisc_create_dflt(sch->dev_queue,
-						  &pfifo_qdisc_ops,
-						  TC_H_MAKE(sch->handle, i + 1));
-			if (child) {
-				sch_tree_lock(sch);
-				old = q->queues[i];
-				q->queues[i] = child;
-
-				if (old != &noop_qdisc) {
-					qdisc_tree_reduce_backlog(old,
-								  old->q.qlen,
-								  old->qstats.backlog);
-					qdisc_destroy(old);
-				}
-				sch_tree_unlock(sch);
-			}
-		}
-	}
 	return 0;
 }
 
 static int prio_init(struct Qdisc *sch, struct nlattr *opt)
 {
-	struct prio_sched_data *q = qdisc_priv(sch);
-	int i;
-
-	for (i = 0; i < TCQ_PRIO_BANDS; i++)
-		q->queues[i] = &noop_qdisc;
-
-	if (opt == NULL) {
+	if (!opt)
 		return -EINVAL;
-	} else {
-		int err;
 
-		if ((err = prio_tune(sch, opt)) != 0)
-			return err;
-	}
-	return 0;
+	return prio_tune(sch, opt);
 }
 
 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 8d2d8d9..f18857f 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1235,8 +1235,10 @@
 			 cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
 		err = qfq_change_agg(sch, cl, cl->agg->class_weight,
 				     qdisc_pkt_len(skb));
-		if (err)
-			return err;
+		if (err) {
+			cl->qstats.drops++;
+			return qdisc_drop(skb, sch);
+		}
 	}
 
 	err = qdisc_enqueue(skb, cl->qdisc);
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 8c0508c..91578bd 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -97,6 +97,7 @@
 
 	ret = qdisc_enqueue(skb, child);
 	if (likely(ret == NET_XMIT_SUCCESS)) {
+		qdisc_qstats_backlog_inc(sch, skb);
 		sch->q.qlen++;
 	} else if (net_xmit_drop_count(ret)) {
 		q->stats.pdrop++;
@@ -118,6 +119,7 @@
 	skb = child->dequeue(child);
 	if (skb) {
 		qdisc_bstats_update(sch, skb);
+		qdisc_qstats_backlog_dec(sch, skb);
 		sch->q.qlen--;
 	} else {
 		if (!red_is_idling(&q->vars))
@@ -143,6 +145,7 @@
 	if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
 		q->stats.other++;
 		qdisc_qstats_drop(sch);
+		sch->qstats.backlog -= len;
 		sch->q.qlen--;
 		return len;
 	}
@@ -158,6 +161,7 @@
 	struct red_sched_data *q = qdisc_priv(sch);
 
 	qdisc_reset(q->qdisc);
+	sch->qstats.backlog = 0;
 	sch->q.qlen = 0;
 	red_restart(&q->vars);
 }
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 83b90b5..3161e49 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -207,6 +207,7 @@
 		return ret;
 	}
 
+	qdisc_qstats_backlog_inc(sch, skb);
 	sch->q.qlen++;
 	return NET_XMIT_SUCCESS;
 }
@@ -217,6 +218,7 @@
 	unsigned int len = 0;
 
 	if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
+		sch->qstats.backlog -= len;
 		sch->q.qlen--;
 		qdisc_qstats_drop(sch);
 	}
@@ -263,6 +265,7 @@
 			q->t_c = now;
 			q->tokens = toks;
 			q->ptokens = ptoks;
+			qdisc_qstats_backlog_dec(sch, skb);
 			sch->q.qlen--;
 			qdisc_unthrottled(sch);
 			qdisc_bstats_update(sch, skb);
@@ -294,6 +297,7 @@
 	struct tbf_sched_data *q = qdisc_priv(sch);
 
 	qdisc_reset(q->qdisc);
+	sch->qstats.backlog = 0;
 	sch->q.qlen = 0;
 	q->t_c = ktime_get_ns();
 	q->tokens = q->buffer;
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 1ce724b..f69edcf 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -3,12 +3,6 @@
 #include <linux/sock_diag.h>
 #include <net/sctp/sctp.h>
 
-extern void inet_diag_msg_common_fill(struct inet_diag_msg *r,
-				      struct sock *sk);
-extern int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
-				    struct inet_diag_msg *r, int ext,
-				    struct user_namespace *user_ns);
-
 static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
 			       void *info);
 
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 777d032..67154b8 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4220,6 +4220,7 @@
 		info->sctpi_s_disable_fragments = sp->disable_fragments;
 		info->sctpi_s_v4mapped = sp->v4mapped;
 		info->sctpi_s_frag_interleave = sp->frag_interleave;
+		info->sctpi_s_type = sp->type;
 
 		return 0;
 	}
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 02f5367..040ff62 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -543,7 +543,7 @@
  */
 struct rpc_cred *
 rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
-		int flags)
+		int flags, gfp_t gfp)
 {
 	LIST_HEAD(free);
 	struct rpc_cred_cache *cache = auth->au_credcache;
@@ -580,7 +580,7 @@
 	if (flags & RPCAUTH_LOOKUP_RCU)
 		return ERR_PTR(-ECHILD);
 
-	new = auth->au_ops->crcreate(auth, acred, flags);
+	new = auth->au_ops->crcreate(auth, acred, flags, gfp);
 	if (IS_ERR(new)) {
 		cred = new;
 		goto out;
@@ -703,8 +703,7 @@
 		new = rpcauth_bind_new_cred(task, lookupflags);
 	if (IS_ERR(new))
 		return PTR_ERR(new);
-	if (req->rq_cred != NULL)
-		put_rpccred(req->rq_cred);
+	put_rpccred(req->rq_cred);
 	req->rq_cred = new;
 	return 0;
 }
@@ -712,6 +711,8 @@
 void
 put_rpccred(struct rpc_cred *cred)
 {
+	if (cred == NULL)
+		return;
 	/* Fast path for unhashed credentials */
 	if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) == 0) {
 		if (atomic_dec_and_test(&cred->cr_count))
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 41248b1..54dd3fd 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -38,6 +38,13 @@
 }
 EXPORT_SYMBOL_GPL(rpc_lookup_cred);
 
+struct rpc_cred *
+rpc_lookup_generic_cred(struct auth_cred *acred, int flags, gfp_t gfp)
+{
+	return rpcauth_lookup_credcache(&generic_auth, acred, flags, gfp);
+}
+EXPORT_SYMBOL_GPL(rpc_lookup_generic_cred);
+
 struct rpc_cred *rpc_lookup_cred_nonblock(void)
 {
 	return rpcauth_lookupcred(&generic_auth, RPCAUTH_LOOKUP_RCU);
@@ -77,15 +84,15 @@
 static struct rpc_cred *
 generic_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
 {
-	return rpcauth_lookup_credcache(&generic_auth, acred, flags);
+	return rpcauth_lookup_credcache(&generic_auth, acred, flags, GFP_KERNEL);
 }
 
 static struct rpc_cred *
-generic_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
+generic_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
 {
 	struct generic_cred *gcred;
 
-	gcred = kmalloc(sizeof(*gcred), GFP_KERNEL);
+	gcred = kmalloc(sizeof(*gcred), gfp);
 	if (gcred == NULL)
 		return ERR_PTR(-ENOMEM);
 
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 15612ff..e64ae93 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1299,11 +1299,11 @@
 static struct rpc_cred *
 gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
 {
-	return rpcauth_lookup_credcache(auth, acred, flags);
+	return rpcauth_lookup_credcache(auth, acred, flags, GFP_NOFS);
 }
 
 static struct rpc_cred *
-gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
+gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
 {
 	struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
 	struct gss_cred	*cred = NULL;
@@ -1313,7 +1313,7 @@
 		__func__, from_kuid(&init_user_ns, acred->uid),
 		auth->au_flavor);
 
-	if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS)))
+	if (!(cred = kzalloc(sizeof(*cred), gfp)))
 		goto out_err;
 
 	rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 1095be9..e085f5a 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -569,10 +569,9 @@
 	struct rsc *found;
 
 	memset(&rsci, 0, sizeof(rsci));
-	if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
-		return NULL;
+	rsci.handle.data = handle->data;
+	rsci.handle.len = handle->len;
 	found = rsc_lookup(cd, &rsci);
-	rsc_free(&rsci);
 	if (!found)
 		return NULL;
 	if (cache_check(cd, &found->h, NULL))
@@ -857,8 +856,8 @@
 		goto out;
 	if (svc_getnl(&buf->head[0]) != seq)
 		goto out;
-	/* trim off the mic at the end before returning */
-	xdr_buf_trim(buf, mic.len + 4);
+	/* trim off the mic and padding at the end before returning */
+	xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
 	stat = 0;
 out:
 	kfree(mic.data);
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 0d3dd36..9f65452 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -52,11 +52,11 @@
 static struct rpc_cred *
 unx_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
 {
-	return rpcauth_lookup_credcache(auth, acred, flags);
+	return rpcauth_lookup_credcache(auth, acred, flags, GFP_NOFS);
 }
 
 static struct rpc_cred *
-unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
+unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
 {
 	struct unx_cred	*cred;
 	unsigned int groups = 0;
@@ -66,7 +66,7 @@
 			from_kuid(&init_user_ns, acred->uid),
 			from_kgid(&init_user_ns, acred->gid));
 
-	if (!(cred = kmalloc(sizeof(*cred), GFP_NOFS)))
+	if (!(cred = kmalloc(sizeof(*cred), gfp)))
 		return ERR_PTR(-ENOMEM);
 
 	rpcauth_init_cred(&cred->uc_base, acred, auth, &unix_credops);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 7e0c9bf..06b4df9 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1414,6 +1414,23 @@
 EXPORT_SYMBOL_GPL(rpc_max_payload);
 
 /**
+ * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes
+ * @clnt: RPC client to query
+ */
+size_t rpc_max_bc_payload(struct rpc_clnt *clnt)
+{
+	struct rpc_xprt *xprt;
+	size_t ret;
+
+	rcu_read_lock();
+	xprt = rcu_dereference(clnt->cl_xprt);
+	ret = xprt->ops->bc_maxpayload(xprt);
+	rcu_read_unlock();
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rpc_max_bc_payload);
+
+/**
  * rpc_get_timeout - Get timeout for transport in units of HZ
  * @clnt: RPC client to query
  */
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 7422f28..f5572e31 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -244,13 +244,12 @@
 	svc_xprt_received(new);
 }
 
-int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
+int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
 		    struct net *net, const int family,
 		    const unsigned short port, int flags)
 {
 	struct svc_xprt_class *xcl;
 
-	dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
 	spin_lock(&svc_xprt_class_lock);
 	list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
 		struct svc_xprt *newxprt;
@@ -274,12 +273,28 @@
 	}
  err:
 	spin_unlock(&svc_xprt_class_lock);
-	dprintk("svc: transport %s not found\n", xprt_name);
-
 	/* This errno is exposed to user space.  Provide a reasonable
 	 * perror msg for a bad transport. */
 	return -EPROTONOSUPPORT;
 }
+
+int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
+		    struct net *net, const int family,
+		    const unsigned short port, int flags)
+{
+	int err;
+
+	dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
+	err = _svc_create_xprt(serv, xprt_name, net, family, port, flags);
+	if (err == -EPROTONOSUPPORT) {
+		request_module("svc%s", xprt_name);
+		err = _svc_create_xprt(serv, xprt_name, net, family, port, flags);
+	}
+	if (err)
+		dprintk("svc: transport %s not found, err %d\n",
+			xprt_name, err);
+	return err;
+}
 EXPORT_SYMBOL_GPL(svc_create_xprt);
 
 /*
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 6bdb386..c4f3cc0 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -797,6 +797,8 @@
 		xdr_set_iov(xdr, buf->head, buf->len);
 	else if (buf->page_len != 0)
 		xdr_set_page_base(xdr, 0, buf->len);
+	else
+		xdr_set_iov(xdr, buf->head, buf->len);
 	if (p != NULL && p > xdr->p && xdr->end >= p) {
 		xdr->nwords -= p - xdr->p;
 		xdr->p = p;
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 2dcd764..87762d9 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -192,6 +192,22 @@
 }
 
 /**
+ * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
+ * @xprt: transport
+ *
+ * Returns maximum size, in bytes, of a backchannel message
+ */
+size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
+{
+	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
+	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
+	size_t maxmsg;
+
+	maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize);
+	return maxmsg - RPCRDMA_HDRLEN_MIN;
+}
+
+/**
  * rpcrdma_bc_marshal_reply - Send backwards direction reply
  * @rqst: buffer containing RPC reply data
  *
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index b289e10..6326ebe 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -35,10 +35,71 @@
 /* Maximum scatter/gather per FMR */
 #define RPCRDMA_MAX_FMR_SGES	(64)
 
+static struct workqueue_struct *fmr_recovery_wq;
+
+#define FMR_RECOVERY_WQ_FLAGS		(WQ_UNBOUND)
+
+int
+fmr_alloc_recovery_wq(void)
+{
+	fmr_recovery_wq = alloc_workqueue("fmr_recovery", WQ_UNBOUND, 0);
+	return !fmr_recovery_wq ? -ENOMEM : 0;
+}
+
+void
+fmr_destroy_recovery_wq(void)
+{
+	struct workqueue_struct *wq;
+
+	if (!fmr_recovery_wq)
+		return;
+
+	wq = fmr_recovery_wq;
+	fmr_recovery_wq = NULL;
+	destroy_workqueue(wq);
+}
+
+static int
+__fmr_unmap(struct rpcrdma_mw *mw)
+{
+	LIST_HEAD(l);
+
+	list_add(&mw->fmr.fmr->list, &l);
+	return ib_unmap_fmr(&l);
+}
+
+/* Deferred reset of a single FMR. Generate a fresh rkey by
+ * replacing the MR. There's no recovery if this fails.
+ */
+static void
+__fmr_recovery_worker(struct work_struct *work)
+{
+	struct rpcrdma_mw *mw = container_of(work, struct rpcrdma_mw,
+					    mw_work);
+	struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+
+	__fmr_unmap(mw);
+	rpcrdma_put_mw(r_xprt, mw);
+	return;
+}
+
+/* A broken MR was discovered in a context that can't sleep.
+ * Defer recovery to the recovery worker.
+ */
+static void
+__fmr_queue_recovery(struct rpcrdma_mw *mw)
+{
+	INIT_WORK(&mw->mw_work, __fmr_recovery_worker);
+	queue_work(fmr_recovery_wq, &mw->mw_work);
+}
+
 static int
 fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
 	    struct rpcrdma_create_data_internal *cdata)
 {
+	rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
+						      RPCRDMA_MAX_DATA_SEGS /
+						      RPCRDMA_MAX_FMR_SGES));
 	return 0;
 }
 
@@ -48,7 +109,7 @@
 fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
 {
 	return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
-		     rpcrdma_max_segments(r_xprt) * RPCRDMA_MAX_FMR_SGES);
+		     RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES);
 }
 
 static int
@@ -89,6 +150,7 @@
 		if (IS_ERR(r->fmr.fmr))
 			goto out_fmr_err;
 
+		r->mw_xprt = r_xprt;
 		list_add(&r->mw_list, &buf->rb_mws);
 		list_add(&r->mw_all, &buf->rb_all);
 	}
@@ -104,15 +166,6 @@
 	return rc;
 }
 
-static int
-__fmr_unmap(struct rpcrdma_mw *r)
-{
-	LIST_HEAD(l);
-
-	list_add(&r->fmr.fmr->list, &l);
-	return ib_unmap_fmr(&l);
-}
-
 /* Use the ib_map_phys_fmr() verb to register a memory region
  * for remote access via RDMA READ or RDMA WRITE.
  */
@@ -183,15 +236,10 @@
 __fmr_dma_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
 {
 	struct ib_device *device = r_xprt->rx_ia.ri_device;
-	struct rpcrdma_mw *mw = seg->rl_mw;
 	int nsegs = seg->mr_nsegs;
 
-	seg->rl_mw = NULL;
-
 	while (nsegs--)
 		rpcrdma_unmap_one(device, seg++);
-
-	rpcrdma_put_mw(r_xprt, mw);
 }
 
 /* Invalidate all memory regions that were registered for "req".
@@ -234,42 +282,50 @@
 		seg = &req->rl_segments[i];
 
 		__fmr_dma_unmap(r_xprt, seg);
+		rpcrdma_put_mw(r_xprt, seg->rl_mw);
 
 		i += seg->mr_nsegs;
 		seg->mr_nsegs = 0;
+		seg->rl_mw = NULL;
 	}
 
 	req->rl_nchunks = 0;
 }
 
-/* Use the ib_unmap_fmr() verb to prevent further remote
- * access via RDMA READ or RDMA WRITE.
+/* Use a slow, safe mechanism to invalidate all memory regions
+ * that were registered for "req".
+ *
+ * In the asynchronous case, DMA unmapping occurs first here
+ * because the rpcrdma_mr_seg is released immediately after this
+ * call. It's contents won't be available in __fmr_dma_unmap later.
+ * FIXME.
  */
-static int
-fmr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
+static void
+fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
+		  bool sync)
 {
-	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
-	struct rpcrdma_mr_seg *seg1 = seg;
-	struct rpcrdma_mw *mw = seg1->rl_mw;
-	int rc, nsegs = seg->mr_nsegs;
+	struct rpcrdma_mr_seg *seg;
+	struct rpcrdma_mw *mw;
+	unsigned int i;
 
-	dprintk("RPC:       %s: FMR %p\n", __func__, mw);
+	for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
+		seg = &req->rl_segments[i];
+		mw = seg->rl_mw;
 
-	seg1->rl_mw = NULL;
-	while (seg1->mr_nsegs--)
-		rpcrdma_unmap_one(ia->ri_device, seg++);
-	rc = __fmr_unmap(mw);
-	if (rc)
-		goto out_err;
-	rpcrdma_put_mw(r_xprt, mw);
-	return nsegs;
+		if (sync) {
+			/* ORDER */
+			__fmr_unmap(mw);
+			__fmr_dma_unmap(r_xprt, seg);
+			rpcrdma_put_mw(r_xprt, mw);
+		} else {
+			__fmr_dma_unmap(r_xprt, seg);
+			__fmr_queue_recovery(mw);
+		}
 
-out_err:
-	/* The FMR is abandoned, but remains in rb_all. fmr_op_destroy
-	 * will attempt to release it when the transport is destroyed.
-	 */
-	dprintk("RPC:       %s: ib_unmap_fmr status %i\n", __func__, rc);
-	return nsegs;
+		i += seg->mr_nsegs;
+		seg->mr_nsegs = 0;
+		seg->rl_mw = NULL;
+	}
 }
 
 static void
@@ -295,7 +351,7 @@
 const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
 	.ro_map				= fmr_op_map,
 	.ro_unmap_sync			= fmr_op_unmap_sync,
-	.ro_unmap			= fmr_op_unmap,
+	.ro_unmap_safe			= fmr_op_unmap_safe,
 	.ro_open			= fmr_op_open,
 	.ro_maxpages			= fmr_op_maxpages,
 	.ro_init			= fmr_op_init,
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 94c3fa9..c094754 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -98,6 +98,47 @@
 	destroy_workqueue(wq);
 }
 
+static int
+__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
+{
+	struct rpcrdma_frmr *f = &r->frmr;
+	int rc;
+
+	rc = ib_dereg_mr(f->fr_mr);
+	if (rc) {
+		pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
+			rc, r);
+		return rc;
+	}
+
+	f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG,
+			       ia->ri_max_frmr_depth);
+	if (IS_ERR(f->fr_mr)) {
+		pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
+			PTR_ERR(f->fr_mr), r);
+		return PTR_ERR(f->fr_mr);
+	}
+
+	dprintk("RPC:       %s: recovered FRMR %p\n", __func__, r);
+	f->fr_state = FRMR_IS_INVALID;
+	return 0;
+}
+
+static void
+__frwr_reset_and_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
+{
+	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+	struct rpcrdma_frmr *f = &mw->frmr;
+	int rc;
+
+	rc = __frwr_reset_mr(ia, mw);
+	ib_dma_unmap_sg(ia->ri_device, f->fr_sg, f->fr_nents, f->fr_dir);
+	if (rc)
+		return;
+
+	rpcrdma_put_mw(r_xprt, mw);
+}
+
 /* Deferred reset of a single FRMR. Generate a fresh rkey by
  * replacing the MR.
  *
@@ -109,26 +150,10 @@
 __frwr_recovery_worker(struct work_struct *work)
 {
 	struct rpcrdma_mw *r = container_of(work, struct rpcrdma_mw,
-					    frmr.fr_work);
-	struct rpcrdma_xprt *r_xprt = r->frmr.fr_xprt;
-	unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
-	struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
+					    mw_work);
 
-	if (ib_dereg_mr(r->frmr.fr_mr))
-		goto out_fail;
-
-	r->frmr.fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
-	if (IS_ERR(r->frmr.fr_mr))
-		goto out_fail;
-
-	dprintk("RPC:       %s: recovered FRMR %p\n", __func__, r);
-	r->frmr.fr_state = FRMR_IS_INVALID;
-	rpcrdma_put_mw(r_xprt, r);
+	__frwr_reset_and_unmap(r->mw_xprt, r);
 	return;
-
-out_fail:
-	pr_warn("RPC:       %s: FRMR %p unrecovered\n",
-		__func__, r);
 }
 
 /* A broken MR was discovered in a context that can't sleep.
@@ -137,8 +162,8 @@
 static void
 __frwr_queue_recovery(struct rpcrdma_mw *r)
 {
-	INIT_WORK(&r->frmr.fr_work, __frwr_recovery_worker);
-	queue_work(frwr_recovery_wq, &r->frmr.fr_work);
+	INIT_WORK(&r->mw_work, __frwr_recovery_worker);
+	queue_work(frwr_recovery_wq, &r->mw_work);
 }
 
 static int
@@ -152,11 +177,11 @@
 	if (IS_ERR(f->fr_mr))
 		goto out_mr_err;
 
-	f->sg = kcalloc(depth, sizeof(*f->sg), GFP_KERNEL);
-	if (!f->sg)
+	f->fr_sg = kcalloc(depth, sizeof(*f->fr_sg), GFP_KERNEL);
+	if (!f->fr_sg)
 		goto out_list_err;
 
-	sg_init_table(f->sg, depth);
+	sg_init_table(f->fr_sg, depth);
 
 	init_completion(&f->fr_linv_done);
 
@@ -185,7 +210,7 @@
 	if (rc)
 		dprintk("RPC:       %s: ib_dereg_mr status %i\n",
 			__func__, rc);
-	kfree(r->frmr.sg);
+	kfree(r->frmr.fr_sg);
 }
 
 static int
@@ -231,6 +256,9 @@
 					       depth;
 	}
 
+	rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
+						      RPCRDMA_MAX_DATA_SEGS /
+						      ia->ri_max_frmr_depth));
 	return 0;
 }
 
@@ -243,7 +271,7 @@
 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 
 	return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
-		     rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth);
+		     RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
 }
 
 static void
@@ -350,9 +378,9 @@
 			return rc;
 		}
 
+		r->mw_xprt = r_xprt;
 		list_add(&r->mw_list, &buf->rb_mws);
 		list_add(&r->mw_all, &buf->rb_all);
-		r->frmr.fr_xprt = r_xprt;
 	}
 
 	return 0;
@@ -396,12 +424,12 @@
 
 	for (i = 0; i < nsegs;) {
 		if (seg->mr_page)
-			sg_set_page(&frmr->sg[i],
+			sg_set_page(&frmr->fr_sg[i],
 				    seg->mr_page,
 				    seg->mr_len,
 				    offset_in_page(seg->mr_offset));
 		else
-			sg_set_buf(&frmr->sg[i], seg->mr_offset,
+			sg_set_buf(&frmr->fr_sg[i], seg->mr_offset,
 				   seg->mr_len);
 
 		++seg;
@@ -412,25 +440,26 @@
 		    offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
 			break;
 	}
-	frmr->sg_nents = i;
+	frmr->fr_nents = i;
+	frmr->fr_dir = direction;
 
-	dma_nents = ib_dma_map_sg(device, frmr->sg, frmr->sg_nents, direction);
+	dma_nents = ib_dma_map_sg(device, frmr->fr_sg, frmr->fr_nents, direction);
 	if (!dma_nents) {
 		pr_err("RPC:       %s: failed to dma map sg %p sg_nents %u\n",
-		       __func__, frmr->sg, frmr->sg_nents);
+		       __func__, frmr->fr_sg, frmr->fr_nents);
 		return -ENOMEM;
 	}
 
-	n = ib_map_mr_sg(mr, frmr->sg, frmr->sg_nents, NULL, PAGE_SIZE);
-	if (unlikely(n != frmr->sg_nents)) {
+	n = ib_map_mr_sg(mr, frmr->fr_sg, frmr->fr_nents, NULL, PAGE_SIZE);
+	if (unlikely(n != frmr->fr_nents)) {
 		pr_err("RPC:       %s: failed to map mr %p (%u/%u)\n",
-		       __func__, frmr->fr_mr, n, frmr->sg_nents);
+		       __func__, frmr->fr_mr, n, frmr->fr_nents);
 		rc = n < 0 ? n : -EINVAL;
 		goto out_senderr;
 	}
 
 	dprintk("RPC:       %s: Using frmr %p to map %u segments (%u bytes)\n",
-		__func__, mw, frmr->sg_nents, mr->length);
+		__func__, mw, frmr->fr_nents, mr->length);
 
 	key = (u8)(mr->rkey & 0x000000FF);
 	ib_update_fast_reg_key(mr, ++key);
@@ -452,18 +481,16 @@
 	if (rc)
 		goto out_senderr;
 
-	seg1->mr_dir = direction;
 	seg1->rl_mw = mw;
 	seg1->mr_rkey = mr->rkey;
 	seg1->mr_base = mr->iova;
-	seg1->mr_nsegs = frmr->sg_nents;
+	seg1->mr_nsegs = frmr->fr_nents;
 	seg1->mr_len = mr->length;
 
-	return frmr->sg_nents;
+	return frmr->fr_nents;
 
 out_senderr:
 	dprintk("RPC:       %s: ib_post_send status %i\n", __func__, rc);
-	ib_dma_unmap_sg(device, frmr->sg, dma_nents, direction);
 	__frwr_queue_recovery(mw);
 	return rc;
 }
@@ -487,24 +514,6 @@
 	return invalidate_wr;
 }
 
-static void
-__frwr_dma_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
-		 int rc)
-{
-	struct ib_device *device = r_xprt->rx_ia.ri_device;
-	struct rpcrdma_mw *mw = seg->rl_mw;
-	struct rpcrdma_frmr *f = &mw->frmr;
-
-	seg->rl_mw = NULL;
-
-	ib_dma_unmap_sg(device, f->sg, f->sg_nents, seg->mr_dir);
-
-	if (!rc)
-		rpcrdma_put_mw(r_xprt, mw);
-	else
-		__frwr_queue_recovery(mw);
-}
-
 /* Invalidate all memory regions that were registered for "req".
  *
  * Sleeps until it is safe for the host CPU to access the
@@ -518,6 +527,7 @@
 	struct rpcrdma_mr_seg *seg;
 	unsigned int i, nchunks;
 	struct rpcrdma_frmr *f;
+	struct rpcrdma_mw *mw;
 	int rc;
 
 	dprintk("RPC:       %s: req %p\n", __func__, req);
@@ -558,11 +568,8 @@
 	 * unless ri_id->qp is a valid pointer.
 	 */
 	rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr);
-	if (rc) {
-		pr_warn("%s: ib_post_send failed %i\n", __func__, rc);
-		rdma_disconnect(ia->ri_id);
-		goto unmap;
-	}
+	if (rc)
+		goto reset_mrs;
 
 	wait_for_completion(&f->fr_linv_done);
 
@@ -572,56 +579,65 @@
 unmap:
 	for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
 		seg = &req->rl_segments[i];
+		mw = seg->rl_mw;
+		seg->rl_mw = NULL;
 
-		__frwr_dma_unmap(r_xprt, seg, rc);
+		ib_dma_unmap_sg(ia->ri_device, f->fr_sg, f->fr_nents,
+				f->fr_dir);
+		rpcrdma_put_mw(r_xprt, mw);
 
 		i += seg->mr_nsegs;
 		seg->mr_nsegs = 0;
 	}
 
 	req->rl_nchunks = 0;
+	return;
+
+reset_mrs:
+	pr_warn("%s: ib_post_send failed %i\n", __func__, rc);
+
+	/* Find and reset the MRs in the LOCAL_INV WRs that did not
+	 * get posted. This is synchronous, and slow.
+	 */
+	for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
+		seg = &req->rl_segments[i];
+		mw = seg->rl_mw;
+		f = &mw->frmr;
+
+		if (mw->frmr.fr_mr->rkey == bad_wr->ex.invalidate_rkey) {
+			__frwr_reset_mr(ia, mw);
+			bad_wr = bad_wr->next;
+		}
+
+		i += seg->mr_nsegs;
+	}
+	goto unmap;
 }
 
-/* Post a LOCAL_INV Work Request to prevent further remote access
- * via RDMA READ or RDMA WRITE.
+/* Use a slow, safe mechanism to invalidate all memory regions
+ * that were registered for "req".
  */
-static int
-frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
+static void
+frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
+		   bool sync)
 {
-	struct rpcrdma_mr_seg *seg1 = seg;
-	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
-	struct rpcrdma_mw *mw = seg1->rl_mw;
-	struct rpcrdma_frmr *frmr = &mw->frmr;
-	struct ib_send_wr *invalidate_wr, *bad_wr;
-	int rc, nsegs = seg->mr_nsegs;
+	struct rpcrdma_mr_seg *seg;
+	struct rpcrdma_mw *mw;
+	unsigned int i;
 
-	dprintk("RPC:       %s: FRMR %p\n", __func__, mw);
+	for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
+		seg = &req->rl_segments[i];
+		mw = seg->rl_mw;
 
-	seg1->rl_mw = NULL;
-	frmr->fr_state = FRMR_IS_INVALID;
-	invalidate_wr = &mw->frmr.fr_invwr;
+		if (sync)
+			__frwr_reset_and_unmap(r_xprt, mw);
+		else
+			__frwr_queue_recovery(mw);
 
-	memset(invalidate_wr, 0, sizeof(*invalidate_wr));
-	frmr->fr_cqe.done = frwr_wc_localinv;
-	invalidate_wr->wr_cqe = &frmr->fr_cqe;
-	invalidate_wr->opcode = IB_WR_LOCAL_INV;
-	invalidate_wr->ex.invalidate_rkey = frmr->fr_mr->rkey;
-	DECR_CQCOUNT(&r_xprt->rx_ep);
-
-	ib_dma_unmap_sg(ia->ri_device, frmr->sg, frmr->sg_nents, seg1->mr_dir);
-	read_lock(&ia->ri_qplock);
-	rc = ib_post_send(ia->ri_id->qp, invalidate_wr, &bad_wr);
-	read_unlock(&ia->ri_qplock);
-	if (rc)
-		goto out_err;
-
-	rpcrdma_put_mw(r_xprt, mw);
-	return nsegs;
-
-out_err:
-	dprintk("RPC:       %s: ib_post_send status %i\n", __func__, rc);
-	__frwr_queue_recovery(mw);
-	return nsegs;
+		i += seg->mr_nsegs;
+		seg->mr_nsegs = 0;
+		seg->rl_mw = NULL;
+	}
 }
 
 static void
@@ -643,7 +659,7 @@
 const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
 	.ro_map				= frwr_op_map,
 	.ro_unmap_sync			= frwr_op_unmap_sync,
-	.ro_unmap			= frwr_op_unmap,
+	.ro_unmap_safe			= frwr_op_unmap_safe,
 	.ro_open			= frwr_op_open,
 	.ro_maxpages			= frwr_op_maxpages,
 	.ro_init			= frwr_op_init,
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c
index 481b9b6..3750596 100644
--- a/net/sunrpc/xprtrdma/physical_ops.c
+++ b/net/sunrpc/xprtrdma/physical_ops.c
@@ -36,8 +36,11 @@
 		       __func__, PTR_ERR(mr));
 		return -ENOMEM;
 	}
-
 	ia->ri_dma_mr = mr;
+
+	rpcrdma_set_max_header_sizes(ia, cdata, min_t(unsigned int,
+						      RPCRDMA_MAX_DATA_SEGS,
+						      RPCRDMA_MAX_HDR_SEGS));
 	return 0;
 }
 
@@ -47,7 +50,7 @@
 physical_op_maxpages(struct rpcrdma_xprt *r_xprt)
 {
 	return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
-		     rpcrdma_max_segments(r_xprt));
+		     RPCRDMA_MAX_HDR_SEGS);
 }
 
 static int
@@ -71,17 +74,6 @@
 	return 1;
 }
 
-/* Unmap a memory region, but leave it registered.
- */
-static int
-physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
-{
-	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
-
-	rpcrdma_unmap_one(ia->ri_device, seg);
-	return 1;
-}
-
 /* DMA unmap all memory regions that were mapped for "req".
  */
 static void
@@ -94,6 +86,25 @@
 		rpcrdma_unmap_one(device, &req->rl_segments[i++]);
 }
 
+/* Use a slow, safe mechanism to invalidate all memory regions
+ * that were registered for "req".
+ *
+ * For physical memory registration, there is no good way to
+ * fence a single MR that has been advertised to the server. The
+ * client has already handed the server an R_key that cannot be
+ * invalidated and is shared by all MRs on this connection.
+ * Tearing down the PD might be the only safe choice, but it's
+ * not clear that a freshly acquired DMA R_key would be different
+ * than the one used by the PD that was just destroyed.
+ * FIXME.
+ */
+static void
+physical_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
+		       bool sync)
+{
+	physical_op_unmap_sync(r_xprt, req);
+}
+
 static void
 physical_op_destroy(struct rpcrdma_buffer *buf)
 {
@@ -102,7 +113,7 @@
 const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
 	.ro_map				= physical_op_map,
 	.ro_unmap_sync			= physical_op_unmap_sync,
-	.ro_unmap			= physical_op_unmap,
+	.ro_unmap_safe			= physical_op_unmap_safe,
 	.ro_open			= physical_op_open,
 	.ro_maxpages			= physical_op_maxpages,
 	.ro_init			= physical_op_init,
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 888823b..35a8109 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -61,26 +61,84 @@
 	rpcrdma_replych
 };
 
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 static const char transfertypes[][12] = {
-	"pure inline",	/* no chunks */
-	" read chunk",	/* some argument via rdma read */
-	"*read chunk",	/* entire request via rdma read */
-	"write chunk",	/* some result via rdma write */
+	"inline",	/* no chunks */
+	"read list",	/* some argument via rdma read */
+	"*read list",	/* entire request via rdma read */
+	"write list",	/* some result via rdma write */
 	"reply chunk"	/* entire reply via rdma write */
 };
-#endif
+
+/* Returns size of largest RPC-over-RDMA header in a Call message
+ *
+ * The largest Call header contains a full-size Read list and a
+ * minimal Reply chunk.
+ */
+static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
+{
+	unsigned int size;
+
+	/* Fixed header fields and list discriminators */
+	size = RPCRDMA_HDRLEN_MIN;
+
+	/* Maximum Read list size */
+	maxsegs += 2;	/* segment for head and tail buffers */
+	size = maxsegs * sizeof(struct rpcrdma_read_chunk);
+
+	/* Minimal Read chunk size */
+	size += sizeof(__be32);	/* segment count */
+	size += sizeof(struct rpcrdma_segment);
+	size += sizeof(__be32);	/* list discriminator */
+
+	dprintk("RPC:       %s: max call header size = %u\n",
+		__func__, size);
+	return size;
+}
+
+/* Returns size of largest RPC-over-RDMA header in a Reply message
+ *
+ * There is only one Write list or one Reply chunk per Reply
+ * message.  The larger list is the Write list.
+ */
+static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
+{
+	unsigned int size;
+
+	/* Fixed header fields and list discriminators */
+	size = RPCRDMA_HDRLEN_MIN;
+
+	/* Maximum Write list size */
+	maxsegs += 2;	/* segment for head and tail buffers */
+	size = sizeof(__be32);		/* segment count */
+	size += maxsegs * sizeof(struct rpcrdma_segment);
+	size += sizeof(__be32);	/* list discriminator */
+
+	dprintk("RPC:       %s: max reply header size = %u\n",
+		__func__, size);
+	return size;
+}
+
+void rpcrdma_set_max_header_sizes(struct rpcrdma_ia *ia,
+				  struct rpcrdma_create_data_internal *cdata,
+				  unsigned int maxsegs)
+{
+	ia->ri_max_inline_write = cdata->inline_wsize -
+				  rpcrdma_max_call_header_size(maxsegs);
+	ia->ri_max_inline_read = cdata->inline_rsize -
+				 rpcrdma_max_reply_header_size(maxsegs);
+}
 
 /* The client can send a request inline as long as the RPCRDMA header
  * plus the RPC call fit under the transport's inline limit. If the
  * combined call message size exceeds that limit, the client must use
  * the read chunk list for this operation.
  */
-static bool rpcrdma_args_inline(struct rpc_rqst *rqst)
+static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
+				struct rpc_rqst *rqst)
 {
-	unsigned int callsize = RPCRDMA_HDRLEN_MIN + rqst->rq_snd_buf.len;
+	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 
-	return callsize <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst);
+	return rqst->rq_snd_buf.len <= ia->ri_max_inline_write;
 }
 
 /* The client can't know how large the actual reply will be. Thus it
@@ -89,11 +147,12 @@
  * limit, the client must provide a write list or a reply chunk for
  * this request.
  */
-static bool rpcrdma_results_inline(struct rpc_rqst *rqst)
+static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
+				   struct rpc_rqst *rqst)
 {
-	unsigned int repsize = RPCRDMA_HDRLEN_MIN + rqst->rq_rcv_buf.buflen;
+	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 
-	return repsize <= RPCRDMA_INLINE_READ_THRESHOLD(rqst);
+	return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
 }
 
 static int
@@ -226,23 +285,16 @@
 	return n;
 }
 
-/*
- * Create read/write chunk lists, and reply chunks, for RDMA
- *
- *   Assume check against THRESHOLD has been done, and chunks are required.
- *   Assume only encoding one list entry for read|write chunks. The NFSv3
- *     protocol is simple enough to allow this as it only has a single "bulk
- *     result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
- *     RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
- *
- * When used for a single reply chunk (which is a special write
- * chunk used for the entire reply, rather than just the data), it
- * is used primarily for READDIR and READLINK which would otherwise
- * be severely size-limited by a small rdma inline read max. The server
- * response will come back as an RDMA Write, followed by a message
- * of type RDMA_NOMSG carrying the xid and length. As a result, reply
- * chunks do not provide data alignment, however they do not require
- * "fixup" (moving the response to the upper layer buffer) either.
+static inline __be32 *
+xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr_seg *seg)
+{
+	*iptr++ = cpu_to_be32(seg->mr_rkey);
+	*iptr++ = cpu_to_be32(seg->mr_len);
+	return xdr_encode_hyper(iptr, seg->mr_base);
+}
+
+/* XDR-encode the Read list. Supports encoding a list of read
+ * segments that belong to a single read chunk.
  *
  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
  *
@@ -250,131 +302,190 @@
  *   N elements, position P (same P for all chunks of same arg!):
  *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
  *
+ * Returns a pointer to the XDR word in the RDMA header following
+ * the end of the Read list, or an error pointer.
+ */
+static __be32 *
+rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
+			 struct rpcrdma_req *req, struct rpc_rqst *rqst,
+			 __be32 *iptr, enum rpcrdma_chunktype rtype)
+{
+	struct rpcrdma_mr_seg *seg = req->rl_nextseg;
+	unsigned int pos;
+	int n, nsegs;
+
+	if (rtype == rpcrdma_noch) {
+		*iptr++ = xdr_zero;	/* item not present */
+		return iptr;
+	}
+
+	pos = rqst->rq_snd_buf.head[0].iov_len;
+	if (rtype == rpcrdma_areadch)
+		pos = 0;
+	nsegs = rpcrdma_convert_iovs(&rqst->rq_snd_buf, pos, rtype, seg,
+				     RPCRDMA_MAX_SEGS - req->rl_nchunks);
+	if (nsegs < 0)
+		return ERR_PTR(nsegs);
+
+	do {
+		n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, false);
+		if (n <= 0)
+			return ERR_PTR(n);
+
+		*iptr++ = xdr_one;	/* item present */
+
+		/* All read segments in this chunk
+		 * have the same "position".
+		 */
+		*iptr++ = cpu_to_be32(pos);
+		iptr = xdr_encode_rdma_segment(iptr, seg);
+
+		dprintk("RPC: %5u %s: read segment pos %u "
+			"%d@0x%016llx:0x%08x (%s)\n",
+			rqst->rq_task->tk_pid, __func__, pos,
+			seg->mr_len, (unsigned long long)seg->mr_base,
+			seg->mr_rkey, n < nsegs ? "more" : "last");
+
+		r_xprt->rx_stats.read_chunk_count++;
+		req->rl_nchunks++;
+		seg += n;
+		nsegs -= n;
+	} while (nsegs);
+	req->rl_nextseg = seg;
+
+	/* Finish Read list */
+	*iptr++ = xdr_zero;	/* Next item not present */
+	return iptr;
+}
+
+/* XDR-encode the Write list. Supports encoding a list containing
+ * one array of plain segments that belong to a single write chunk.
+ *
+ * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
+ *
  *  Write chunklist (a list of (one) counted array):
  *   N elements:
  *    1 - N - HLOO - HLOO - ... - HLOO - 0
  *
+ * Returns a pointer to the XDR word in the RDMA header following
+ * the end of the Write list, or an error pointer.
+ */
+static __be32 *
+rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
+			  struct rpc_rqst *rqst, __be32 *iptr,
+			  enum rpcrdma_chunktype wtype)
+{
+	struct rpcrdma_mr_seg *seg = req->rl_nextseg;
+	int n, nsegs, nchunks;
+	__be32 *segcount;
+
+	if (wtype != rpcrdma_writech) {
+		*iptr++ = xdr_zero;	/* no Write list present */
+		return iptr;
+	}
+
+	nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf,
+				     rqst->rq_rcv_buf.head[0].iov_len,
+				     wtype, seg,
+				     RPCRDMA_MAX_SEGS - req->rl_nchunks);
+	if (nsegs < 0)
+		return ERR_PTR(nsegs);
+
+	*iptr++ = xdr_one;	/* Write list present */
+	segcount = iptr++;	/* save location of segment count */
+
+	nchunks = 0;
+	do {
+		n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, true);
+		if (n <= 0)
+			return ERR_PTR(n);
+
+		iptr = xdr_encode_rdma_segment(iptr, seg);
+
+		dprintk("RPC: %5u %s: write segment "
+			"%d@0x016%llx:0x%08x (%s)\n",
+			rqst->rq_task->tk_pid, __func__,
+			seg->mr_len, (unsigned long long)seg->mr_base,
+			seg->mr_rkey, n < nsegs ? "more" : "last");
+
+		r_xprt->rx_stats.write_chunk_count++;
+		r_xprt->rx_stats.total_rdma_request += seg->mr_len;
+		req->rl_nchunks++;
+		nchunks++;
+		seg   += n;
+		nsegs -= n;
+	} while (nsegs);
+	req->rl_nextseg = seg;
+
+	/* Update count of segments in this Write chunk */
+	*segcount = cpu_to_be32(nchunks);
+
+	/* Finish Write list */
+	*iptr++ = xdr_zero;	/* Next item not present */
+	return iptr;
+}
+
+/* XDR-encode the Reply chunk. Supports encoding an array of plain
+ * segments that belong to a single write (reply) chunk.
+ *
+ * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
+ *
  *  Reply chunk (a counted array):
  *   N elements:
  *    1 - N - HLOO - HLOO - ... - HLOO
  *
- * Returns positive RPC/RDMA header size, or negative errno.
+ * Returns a pointer to the XDR word in the RDMA header following
+ * the end of the Reply chunk, or an error pointer.
  */
-
-static ssize_t
-rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
-		struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
+static __be32 *
+rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
+			   struct rpcrdma_req *req, struct rpc_rqst *rqst,
+			   __be32 *iptr, enum rpcrdma_chunktype wtype)
 {
-	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
-	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
-	int n, nsegs, nchunks = 0;
-	unsigned int pos;
-	struct rpcrdma_mr_seg *seg = req->rl_segments;
-	struct rpcrdma_read_chunk *cur_rchunk = NULL;
-	struct rpcrdma_write_array *warray = NULL;
-	struct rpcrdma_write_chunk *cur_wchunk = NULL;
-	__be32 *iptr = headerp->rm_body.rm_chunks;
-	int (*map)(struct rpcrdma_xprt *, struct rpcrdma_mr_seg *, int, bool);
+	struct rpcrdma_mr_seg *seg = req->rl_nextseg;
+	int n, nsegs, nchunks;
+	__be32 *segcount;
 
-	if (type == rpcrdma_readch || type == rpcrdma_areadch) {
-		/* a read chunk - server will RDMA Read our memory */
-		cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
-	} else {
-		/* a write or reply chunk - server will RDMA Write our memory */
-		*iptr++ = xdr_zero;	/* encode a NULL read chunk list */
-		if (type == rpcrdma_replych)
-			*iptr++ = xdr_zero;	/* a NULL write chunk list */
-		warray = (struct rpcrdma_write_array *) iptr;
-		cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
+	if (wtype != rpcrdma_replych) {
+		*iptr++ = xdr_zero;	/* no Reply chunk present */
+		return iptr;
 	}
 
-	if (type == rpcrdma_replych || type == rpcrdma_areadch)
-		pos = 0;
-	else
-		pos = target->head[0].iov_len;
-
-	nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
+	nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 0, wtype, seg,
+				     RPCRDMA_MAX_SEGS - req->rl_nchunks);
 	if (nsegs < 0)
-		return nsegs;
+		return ERR_PTR(nsegs);
 
-	map = r_xprt->rx_ia.ri_ops->ro_map;
+	*iptr++ = xdr_one;	/* Reply chunk present */
+	segcount = iptr++;	/* save location of segment count */
+
+	nchunks = 0;
 	do {
-		n = map(r_xprt, seg, nsegs, cur_wchunk != NULL);
+		n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, true);
 		if (n <= 0)
-			goto out;
-		if (cur_rchunk) {	/* read */
-			cur_rchunk->rc_discrim = xdr_one;
-			/* all read chunks have the same "position" */
-			cur_rchunk->rc_position = cpu_to_be32(pos);
-			cur_rchunk->rc_target.rs_handle =
-						cpu_to_be32(seg->mr_rkey);
-			cur_rchunk->rc_target.rs_length =
-						cpu_to_be32(seg->mr_len);
-			xdr_encode_hyper(
-					(__be32 *)&cur_rchunk->rc_target.rs_offset,
-					seg->mr_base);
-			dprintk("RPC:       %s: read chunk "
-				"elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
-				seg->mr_len, (unsigned long long)seg->mr_base,
-				seg->mr_rkey, pos, n < nsegs ? "more" : "last");
-			cur_rchunk++;
-			r_xprt->rx_stats.read_chunk_count++;
-		} else {		/* write/reply */
-			cur_wchunk->wc_target.rs_handle =
-						cpu_to_be32(seg->mr_rkey);
-			cur_wchunk->wc_target.rs_length =
-						cpu_to_be32(seg->mr_len);
-			xdr_encode_hyper(
-					(__be32 *)&cur_wchunk->wc_target.rs_offset,
-					seg->mr_base);
-			dprintk("RPC:       %s: %s chunk "
-				"elem %d@0x%llx:0x%x (%s)\n", __func__,
-				(type == rpcrdma_replych) ? "reply" : "write",
-				seg->mr_len, (unsigned long long)seg->mr_base,
-				seg->mr_rkey, n < nsegs ? "more" : "last");
-			cur_wchunk++;
-			if (type == rpcrdma_replych)
-				r_xprt->rx_stats.reply_chunk_count++;
-			else
-				r_xprt->rx_stats.write_chunk_count++;
-			r_xprt->rx_stats.total_rdma_request += seg->mr_len;
-		}
+			return ERR_PTR(n);
+
+		iptr = xdr_encode_rdma_segment(iptr, seg);
+
+		dprintk("RPC: %5u %s: reply segment "
+			"%d@0x%016llx:0x%08x (%s)\n",
+			rqst->rq_task->tk_pid, __func__,
+			seg->mr_len, (unsigned long long)seg->mr_base,
+			seg->mr_rkey, n < nsegs ? "more" : "last");
+
+		r_xprt->rx_stats.reply_chunk_count++;
+		r_xprt->rx_stats.total_rdma_request += seg->mr_len;
+		req->rl_nchunks++;
 		nchunks++;
 		seg   += n;
 		nsegs -= n;
 	} while (nsegs);
+	req->rl_nextseg = seg;
 
-	/* success. all failures return above */
-	req->rl_nchunks = nchunks;
+	/* Update count of segments in the Reply chunk */
+	*segcount = cpu_to_be32(nchunks);
 
-	/*
-	 * finish off header. If write, marshal discrim and nchunks.
-	 */
-	if (cur_rchunk) {
-		iptr = (__be32 *) cur_rchunk;
-		*iptr++ = xdr_zero;	/* finish the read chunk list */
-		*iptr++ = xdr_zero;	/* encode a NULL write chunk list */
-		*iptr++ = xdr_zero;	/* encode a NULL reply chunk */
-	} else {
-		warray->wc_discrim = xdr_one;
-		warray->wc_nchunks = cpu_to_be32(nchunks);
-		iptr = (__be32 *) cur_wchunk;
-		if (type == rpcrdma_writech) {
-			*iptr++ = xdr_zero; /* finish the write chunk list */
-			*iptr++ = xdr_zero; /* encode a NULL reply chunk */
-		}
-	}
-
-	/*
-	 * Return header size.
-	 */
-	return (unsigned char *)iptr - (unsigned char *)headerp;
-
-out:
-	for (pos = 0; nchunks--;)
-		pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
-						      &req->rl_segments[pos]);
-	return n;
+	return iptr;
 }
 
 /*
@@ -440,13 +551,10 @@
  * Marshal a request: the primary job of this routine is to choose
  * the transfer modes. See comments below.
  *
- * Uses multiple RDMA IOVs for a request:
- *  [0] -- RPC RDMA header, which uses memory from the *start* of the
- *         preregistered buffer that already holds the RPC data in
- *         its middle.
- *  [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
- *  [2] -- optional padding.
- *  [3] -- if padded, header only in [1] and data here.
+ * Prepares up to two IOVs per Call message:
+ *
+ *  [0] -- RPC RDMA header
+ *  [1] -- the RPC header/data
  *
  * Returns zero on success, otherwise a negative errno.
  */
@@ -457,24 +565,17 @@
 	struct rpc_xprt *xprt = rqst->rq_xprt;
 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
-	char *base;
-	size_t rpclen;
-	ssize_t hdrlen;
 	enum rpcrdma_chunktype rtype, wtype;
 	struct rpcrdma_msg *headerp;
+	ssize_t hdrlen;
+	size_t rpclen;
+	__be32 *iptr;
 
 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
 	if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
 		return rpcrdma_bc_marshal_reply(rqst);
 #endif
 
-	/*
-	 * rpclen gets amount of data in first buffer, which is the
-	 * pre-registered buffer.
-	 */
-	base = rqst->rq_svec[0].iov_base;
-	rpclen = rqst->rq_svec[0].iov_len;
-
 	headerp = rdmab_to_msg(req->rl_rdmabuf);
 	/* don't byte-swap XID, it's already done in request */
 	headerp->rm_xid = rqst->rq_xid;
@@ -485,15 +586,16 @@
 	/*
 	 * Chunks needed for results?
 	 *
-	 * o Read ops return data as write chunk(s), header as inline.
 	 * o If the expected result is under the inline threshold, all ops
 	 *   return as inline.
+	 * o Large read ops return data as write chunk(s), header as
+	 *   inline.
 	 * o Large non-read ops return as a single reply chunk.
 	 */
-	if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
-		wtype = rpcrdma_writech;
-	else if (rpcrdma_results_inline(rqst))
+	if (rpcrdma_results_inline(r_xprt, rqst))
 		wtype = rpcrdma_noch;
+	else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
+		wtype = rpcrdma_writech;
 	else
 		wtype = rpcrdma_replych;
 
@@ -511,10 +613,14 @@
 	 * that both has a data payload, and whose non-data arguments
 	 * by themselves are larger than the inline threshold.
 	 */
-	if (rpcrdma_args_inline(rqst)) {
+	if (rpcrdma_args_inline(r_xprt, rqst)) {
 		rtype = rpcrdma_noch;
+		rpcrdma_inline_pullup(rqst);
+		rpclen = rqst->rq_svec[0].iov_len;
 	} else if (rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
 		rtype = rpcrdma_readch;
+		rpclen = rqst->rq_svec[0].iov_len;
+		rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf);
 	} else {
 		r_xprt->rx_stats.nomsg_call_count++;
 		headerp->rm_type = htonl(RDMA_NOMSG);
@@ -522,57 +628,50 @@
 		rpclen = 0;
 	}
 
-	/* The following simplification is not true forever */
-	if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
-		wtype = rpcrdma_noch;
-	if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) {
-		dprintk("RPC:       %s: cannot marshal multiple chunk lists\n",
-			__func__);
-		return -EIO;
-	}
-
-	hdrlen = RPCRDMA_HDRLEN_MIN;
-
-	/*
-	 * Pull up any extra send data into the preregistered buffer.
-	 * When padding is in use and applies to the transfer, insert
-	 * it and change the message type.
+	/* This implementation supports the following combinations
+	 * of chunk lists in one RPC-over-RDMA Call message:
+	 *
+	 *   - Read list
+	 *   - Write list
+	 *   - Reply chunk
+	 *   - Read list + Reply chunk
+	 *
+	 * It might not yet support the following combinations:
+	 *
+	 *   - Read list + Write list
+	 *
+	 * It does not support the following combinations:
+	 *
+	 *   - Write list + Reply chunk
+	 *   - Read list + Write list + Reply chunk
+	 *
+	 * This implementation supports only a single chunk in each
+	 * Read or Write list. Thus for example the client cannot
+	 * send a Call message with a Position Zero Read chunk and a
+	 * regular Read chunk at the same time.
 	 */
-	if (rtype == rpcrdma_noch) {
+	req->rl_nchunks = 0;
+	req->rl_nextseg = req->rl_segments;
+	iptr = headerp->rm_body.rm_chunks;
+	iptr = rpcrdma_encode_read_list(r_xprt, req, rqst, iptr, rtype);
+	if (IS_ERR(iptr))
+		goto out_unmap;
+	iptr = rpcrdma_encode_write_list(r_xprt, req, rqst, iptr, wtype);
+	if (IS_ERR(iptr))
+		goto out_unmap;
+	iptr = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, iptr, wtype);
+	if (IS_ERR(iptr))
+		goto out_unmap;
+	hdrlen = (unsigned char *)iptr - (unsigned char *)headerp;
 
-		rpcrdma_inline_pullup(rqst);
+	if (hdrlen + rpclen > RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
+		goto out_overflow;
 
-		headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
-		headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
-		headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
-		/* new length after pullup */
-		rpclen = rqst->rq_svec[0].iov_len;
-	} else if (rtype == rpcrdma_readch)
-		rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf);
-	if (rtype != rpcrdma_noch) {
-		hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
-					       headerp, rtype);
-		wtype = rtype;	/* simplify dprintk */
+	dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n",
+		rqst->rq_task->tk_pid, __func__,
+		transfertypes[rtype], transfertypes[wtype],
+		hdrlen, rpclen);
 
-	} else if (wtype != rpcrdma_noch) {
-		hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
-					       headerp, wtype);
-	}
-	if (hdrlen < 0)
-		return hdrlen;
-
-	dprintk("RPC:       %s: %s: hdrlen %zd rpclen %zd"
-		" headerp 0x%p base 0x%p lkey 0x%x\n",
-		__func__, transfertypes[wtype], hdrlen, rpclen,
-		headerp, base, rdmab_lkey(req->rl_rdmabuf));
-
-	/*
-	 * initialize send_iov's - normally only two: rdma chunk header and
-	 * single preregistered RPC header buffer, but if padding is present,
-	 * then use a preregistered (and zeroed) pad buffer between the RPC
-	 * header and any write data. In all non-rdma cases, any following
-	 * data has been copied into the RPC header buffer.
-	 */
 	req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
 	req->rl_send_iov[0].length = hdrlen;
 	req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
@@ -587,6 +686,18 @@
 
 	req->rl_niovs = 2;
 	return 0;
+
+out_overflow:
+	pr_err("rpcrdma: send overflow: hdrlen %zd rpclen %zu %s/%s\n",
+		hdrlen, rpclen, transfertypes[rtype], transfertypes[wtype]);
+	/* Terminate this RPC. Chunks registered above will be
+	 * released by xprt_release -> xprt_rmda_free .
+	 */
+	return -EIO;
+
+out_unmap:
+	r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
+	return PTR_ERR(iptr);
 }
 
 /*
diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
index 765bca4..0ba9887 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
@@ -145,19 +145,32 @@
 	return (__be32 *)&ary->wc_array[nchunks];
 }
 
-int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp)
+/**
+ * svc_rdma_xdr_decode_req - Parse incoming RPC-over-RDMA header
+ * @rq_arg: Receive buffer
+ *
+ * On entry, xdr->head[0].iov_base points to first byte in the
+ * RPC-over-RDMA header.
+ *
+ * On successful exit, head[0] points to first byte past the
+ * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
+ * The length of the RPC-over-RDMA header is returned.
+ */
+int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg)
 {
+	struct rpcrdma_msg *rmsgp;
 	__be32 *va, *vaend;
 	unsigned int len;
 	u32 hdr_len;
 
 	/* Verify that there's enough bytes for header + something */
-	if (rqstp->rq_arg.len <= RPCRDMA_HDRLEN_ERR) {
+	if (rq_arg->len <= RPCRDMA_HDRLEN_ERR) {
 		dprintk("svcrdma: header too short = %d\n",
-			rqstp->rq_arg.len);
+			rq_arg->len);
 		return -EINVAL;
 	}
 
+	rmsgp = (struct rpcrdma_msg *)rq_arg->head[0].iov_base;
 	if (rmsgp->rm_vers != rpcrdma_version) {
 		dprintk("%s: bad version %u\n", __func__,
 			be32_to_cpu(rmsgp->rm_vers));
@@ -189,10 +202,10 @@
 			be32_to_cpu(rmsgp->rm_body.rm_padded.rm_thresh);
 
 		va = &rmsgp->rm_body.rm_padded.rm_pempty[4];
-		rqstp->rq_arg.head[0].iov_base = va;
+		rq_arg->head[0].iov_base = va;
 		len = (u32)((unsigned long)va - (unsigned long)rmsgp);
-		rqstp->rq_arg.head[0].iov_len -= len;
-		if (len > rqstp->rq_arg.len)
+		rq_arg->head[0].iov_len -= len;
+		if (len > rq_arg->len)
 			return -EINVAL;
 		return len;
 	default:
@@ -205,7 +218,7 @@
 	 * chunk list and a reply chunk list.
 	 */
 	va = &rmsgp->rm_body.rm_chunks[0];
-	vaend = (__be32 *)((unsigned long)rmsgp + rqstp->rq_arg.len);
+	vaend = (__be32 *)((unsigned long)rmsgp + rq_arg->len);
 	va = decode_read_list(va, vaend);
 	if (!va) {
 		dprintk("svcrdma: failed to decode read list\n");
@@ -222,10 +235,9 @@
 		return -EINVAL;
 	}
 
-	rqstp->rq_arg.head[0].iov_base = va;
+	rq_arg->head[0].iov_base = va;
 	hdr_len = (unsigned long)va - (unsigned long)rmsgp;
-	rqstp->rq_arg.head[0].iov_len -= hdr_len;
-
+	rq_arg->head[0].iov_len -= hdr_len;
 	return hdr_len;
 }
 
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index fbe7444..2c25606 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -447,10 +447,8 @@
 	head->arg.len = rqstp->rq_arg.len;
 	head->arg.buflen = rqstp->rq_arg.buflen;
 
-	ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
-	position = be32_to_cpu(ch->rc_position);
-
 	/* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
+	position = be32_to_cpu(ch->rc_position);
 	if (position == 0) {
 		head->arg.pages = &head->pages[0];
 		page_offset = head->byte_len;
@@ -488,7 +486,7 @@
 	if (page_offset & 3) {
 		u32 pad = 4 - (page_offset & 3);
 
-		head->arg.page_len += pad;
+		head->arg.tail[0].iov_len += pad;
 		head->arg.len += pad;
 		head->arg.buflen += pad;
 		page_offset += pad;
@@ -510,11 +508,10 @@
 	return ret;
 }
 
-static int rdma_read_complete(struct svc_rqst *rqstp,
-			      struct svc_rdma_op_ctxt *head)
+static void rdma_read_complete(struct svc_rqst *rqstp,
+			       struct svc_rdma_op_ctxt *head)
 {
 	int page_no;
-	int ret;
 
 	/* Copy RPC pages */
 	for (page_no = 0; page_no < head->count; page_no++) {
@@ -550,23 +547,6 @@
 	rqstp->rq_arg.tail[0] = head->arg.tail[0];
 	rqstp->rq_arg.len = head->arg.len;
 	rqstp->rq_arg.buflen = head->arg.buflen;
-
-	/* Free the context */
-	svc_rdma_put_context(head, 0);
-
-	/* XXX: What should this be? */
-	rqstp->rq_prot = IPPROTO_MAX;
-	svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt);
-
-	ret = rqstp->rq_arg.head[0].iov_len
-		+ rqstp->rq_arg.page_len
-		+ rqstp->rq_arg.tail[0].iov_len;
-	dprintk("svcrdma: deferred read ret=%d, rq_arg.len=%u, "
-		"rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zu\n",
-		ret, rqstp->rq_arg.len,	rqstp->rq_arg.head[0].iov_base,
-		rqstp->rq_arg.head[0].iov_len);
-
-	return ret;
 }
 
 /* By convention, backchannel calls arrive via rdma_msg type
@@ -624,7 +604,8 @@
 				  dto_q);
 		list_del_init(&ctxt->dto_q);
 		spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
-		return rdma_read_complete(rqstp, ctxt);
+		rdma_read_complete(rqstp, ctxt);
+		goto complete;
 	} else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
 		ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next,
 				  struct svc_rdma_op_ctxt,
@@ -655,7 +636,7 @@
 
 	/* Decode the RDMA header. */
 	rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
-	ret = svc_rdma_xdr_decode_req(rmsgp, rqstp);
+	ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg);
 	if (ret < 0)
 		goto out_err;
 	if (ret == 0)
@@ -682,6 +663,7 @@
 		return 0;
 	}
 
+complete:
 	ret = rqstp->rq_arg.head[0].iov_len
 		+ rqstp->rq_arg.page_len
 		+ rqstp->rq_arg.tail[0].iov_len;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 4f1b1c4..54d53330 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -463,25 +463,21 @@
 		      struct svc_rqst *rqstp,
 		      struct page *page,
 		      struct rpcrdma_msg *rdma_resp,
-		      struct svc_rdma_op_ctxt *ctxt,
 		      struct svc_rdma_req_map *vec,
 		      int byte_count)
 {
+	struct svc_rdma_op_ctxt *ctxt;
 	struct ib_send_wr send_wr;
 	u32 xdr_off;
 	int sge_no;
 	int sge_bytes;
 	int page_no;
 	int pages;
-	int ret;
-
-	ret = svc_rdma_repost_recv(rdma, GFP_KERNEL);
-	if (ret) {
-		svc_rdma_put_context(ctxt, 0);
-		return -ENOTCONN;
-	}
+	int ret = -EIO;
 
 	/* Prepare the context */
+	ctxt = svc_rdma_get_context(rdma);
+	ctxt->direction = DMA_TO_DEVICE;
 	ctxt->pages[0] = page;
 	ctxt->count = 1;
 
@@ -565,8 +561,7 @@
  err:
 	svc_rdma_unmap_dma(ctxt);
 	svc_rdma_put_context(ctxt, 1);
-	pr_err("svcrdma: failed to send reply, rc=%d\n", ret);
-	return -EIO;
+	return ret;
 }
 
 void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
@@ -585,7 +580,6 @@
 	int ret;
 	int inline_bytes;
 	struct page *res_page;
-	struct svc_rdma_op_ctxt *ctxt;
 	struct svc_rdma_req_map *vec;
 
 	dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
@@ -598,8 +592,6 @@
 	rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary);
 
 	/* Build an req vec for the XDR */
-	ctxt = svc_rdma_get_context(rdma);
-	ctxt->direction = DMA_TO_DEVICE;
 	vec = svc_rdma_get_req_map(rdma);
 	ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL);
 	if (ret)
@@ -635,7 +627,12 @@
 		inline_bytes -= ret;
 	}
 
-	ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
+	/* Post a fresh Receive buffer _before_ sending the reply */
+	ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
+	if (ret)
+		goto err1;
+
+	ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec,
 			 inline_bytes);
 	if (ret < 0)
 		goto err1;
@@ -648,7 +645,8 @@
 	put_page(res_page);
  err0:
 	svc_rdma_put_req_map(rdma, vec);
-	svc_rdma_put_context(ctxt, 0);
+	pr_err("svcrdma: Could not send reply, err=%d. Closing transport.\n",
+	       ret);
 	set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
 	return -ENOTCONN;
 }
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 9066896..dd94401 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -789,7 +789,7 @@
 	int ret;
 
 	dprintk("svcrdma: Creating RDMA socket\n");
-	if (sa->sa_family != AF_INET) {
+	if ((sa->sa_family != AF_INET) && (sa->sa_family != AF_INET6)) {
 		dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
 		return ERR_PTR(-EAFNOSUPPORT);
 	}
@@ -805,6 +805,16 @@
 		goto err0;
 	}
 
+	/* Allow both IPv4 and IPv6 sockets to bind a single port
+	 * at the same time.
+	 */
+#if IS_ENABLED(CONFIG_IPV6)
+	ret = rdma_set_afonly(listen_id, 1);
+	if (ret) {
+		dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret);
+		goto err1;
+	}
+#endif
 	ret = rdma_bind_addr(listen_id, sa);
 	if (ret) {
 		dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
@@ -1073,7 +1083,7 @@
 		newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
 
 	/* Post receive buffers */
-	for (i = 0; i < newxprt->sc_rq_depth; i++) {
+	for (i = 0; i < newxprt->sc_max_requests; i++) {
 		ret = svc_rdma_post_recv(newxprt, GFP_KERNEL);
 		if (ret) {
 			dprintk("svcrdma: failure posting receive buffers\n");
@@ -1170,6 +1180,9 @@
 
 	dprintk("svcrdma: %s(%p)\n", __func__, rdma);
 
+	if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
+		ib_drain_qp(rdma->sc_qp);
+
 	/* We should only be called from kref_put */
 	if (atomic_read(&xprt->xpt_ref.refcount) != 0)
 		pr_err("svcrdma: sc_xprt still in use? (%d)\n",
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index b1b009f..99d2e5b 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -73,6 +73,8 @@
 
 static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE;
 static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE;
+static unsigned int min_inline_size = RPCRDMA_MIN_INLINE;
+static unsigned int max_inline_size = RPCRDMA_MAX_INLINE;
 static unsigned int zero;
 static unsigned int max_padding = PAGE_SIZE;
 static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS;
@@ -96,6 +98,8 @@
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
+		.extra1		= &min_inline_size,
+		.extra2		= &max_inline_size,
 	},
 	{
 		.procname	= "rdma_max_inline_write",
@@ -103,6 +107,8 @@
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
+		.extra1		= &min_inline_size,
+		.extra2		= &max_inline_size,
 	},
 	{
 		.procname	= "rdma_inline_write_padding",
@@ -508,6 +514,7 @@
 out:
 	dprintk("RPC:       %s: size %zd, request 0x%p\n", __func__, size, req);
 	req->rl_connect_cookie = 0;	/* our reserved value */
+	req->rl_task = task;
 	return req->rl_sendbuf->rg_base;
 
 out_rdmabuf:
@@ -564,7 +571,6 @@
 	struct rpcrdma_req *req;
 	struct rpcrdma_xprt *r_xprt;
 	struct rpcrdma_regbuf *rb;
-	int i;
 
 	if (buffer == NULL)
 		return;
@@ -578,11 +584,8 @@
 
 	dprintk("RPC:       %s: called on 0x%p\n", __func__, req->rl_reply);
 
-	for (i = 0; req->rl_nchunks;) {
-		--req->rl_nchunks;
-		i += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
-						    &req->rl_segments[i]);
-	}
+	r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req,
+					    !RPC_IS_ASYNC(req->rl_task));
 
 	rpcrdma_buffer_put(req);
 }
@@ -707,6 +710,7 @@
 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
 	.bc_setup		= xprt_rdma_bc_setup,
 	.bc_up			= xprt_rdma_bc_up,
+	.bc_maxpayload		= xprt_rdma_bc_maxpayload,
 	.bc_free_rqst		= xprt_rdma_bc_free_rqst,
 	.bc_destroy		= xprt_rdma_bc_destroy,
 #endif
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index f5ed9f9..b044d98a 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -203,15 +203,6 @@
 	goto out_schedule;
 }
 
-static void
-rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
-{
-	struct ib_wc wc;
-
-	while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0)
-		rpcrdma_receive_wc(NULL, &wc);
-}
-
 static int
 rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
 {
@@ -374,23 +365,6 @@
 }
 
 /*
- * Drain any cq, prior to teardown.
- */
-static void
-rpcrdma_clean_cq(struct ib_cq *cq)
-{
-	struct ib_wc wc;
-	int count = 0;
-
-	while (1 == ib_poll_cq(cq, 1, &wc))
-		++count;
-
-	if (count)
-		dprintk("RPC:       %s: flushed %d events (last 0x%x)\n",
-			__func__, count, wc.opcode);
-}
-
-/*
  * Exported functions.
  */
 
@@ -459,7 +433,6 @@
 	dprintk("RPC:       %s: memory registration strategy is '%s'\n",
 		__func__, ia->ri_ops->ro_displayname);
 
-	rwlock_init(&ia->ri_qplock);
 	return 0;
 
 out3:
@@ -515,7 +488,7 @@
 			__func__);
 		return -ENOMEM;
 	}
-	max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS;
+	max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS - 1;
 
 	/* check provider's send/recv wr limits */
 	if (cdata->max_requests > max_qp_wr)
@@ -526,11 +499,13 @@
 	ep->rep_attr.srq = NULL;
 	ep->rep_attr.cap.max_send_wr = cdata->max_requests;
 	ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
+	ep->rep_attr.cap.max_send_wr += 1;	/* drain cqe */
 	rc = ia->ri_ops->ro_open(ia, ep, cdata);
 	if (rc)
 		return rc;
 	ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
 	ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
+	ep->rep_attr.cap.max_recv_wr += 1;	/* drain cqe */
 	ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS;
 	ep->rep_attr.cap.max_recv_sge = 1;
 	ep->rep_attr.cap.max_inline_data = 0;
@@ -578,6 +553,7 @@
 	ep->rep_attr.recv_cq = recvcq;
 
 	/* Initialize cma parameters */
+	memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
 
 	/* RPC/RDMA does not use private data */
 	ep->rep_remote_cma.private_data = NULL;
@@ -591,7 +567,16 @@
 		ep->rep_remote_cma.responder_resources =
 						ia->ri_device->attrs.max_qp_rd_atom;
 
-	ep->rep_remote_cma.retry_count = 7;
+	/* Limit transport retries so client can detect server
+	 * GID changes quickly. RPC layer handles re-establishing
+	 * transport connection and retransmission.
+	 */
+	ep->rep_remote_cma.retry_count = 6;
+
+	/* RPC-over-RDMA handles its own flow control. In addition,
+	 * make all RNR NAKs visible so we know that RPC-over-RDMA
+	 * flow control is working correctly (no NAKs should be seen).
+	 */
 	ep->rep_remote_cma.flow_control = 0;
 	ep->rep_remote_cma.rnr_retry_count = 0;
 
@@ -622,13 +607,8 @@
 
 	cancel_delayed_work_sync(&ep->rep_connect_worker);
 
-	if (ia->ri_id->qp)
-		rpcrdma_ep_disconnect(ep, ia);
-
-	rpcrdma_clean_cq(ep->rep_attr.recv_cq);
-	rpcrdma_clean_cq(ep->rep_attr.send_cq);
-
 	if (ia->ri_id->qp) {
+		rpcrdma_ep_disconnect(ep, ia);
 		rdma_destroy_qp(ia->ri_id);
 		ia->ri_id->qp = NULL;
 	}
@@ -659,7 +639,6 @@
 		dprintk("RPC:       %s: reconnecting...\n", __func__);
 
 		rpcrdma_ep_disconnect(ep, ia);
-		rpcrdma_flush_cqs(ep);
 
 		xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
 		id = rpcrdma_create_id(xprt, ia,
@@ -692,10 +671,8 @@
 			goto out;
 		}
 
-		write_lock(&ia->ri_qplock);
 		old = ia->ri_id;
 		ia->ri_id = id;
-		write_unlock(&ia->ri_qplock);
 
 		rdma_destroy_qp(old);
 		rpcrdma_destroy_id(old);
@@ -785,7 +762,6 @@
 {
 	int rc;
 
-	rpcrdma_flush_cqs(ep);
 	rc = rdma_disconnect(ia->ri_id);
 	if (!rc) {
 		/* returns without wait if not connected */
@@ -797,6 +773,8 @@
 		dprintk("RPC:       %s: rdma_disconnect %i\n", __func__, rc);
 		ep->rep_connected = rc;
 	}
+
+	ib_drain_qp(ia->ri_id->qp);
 }
 
 struct rpcrdma_req *
@@ -1271,25 +1249,3 @@
 	rpcrdma_recv_buffer_put(rep);
 	return rc;
 }
-
-/* How many chunk list items fit within our inline buffers?
- */
-unsigned int
-rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt)
-{
-	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
-	int bytes, segments;
-
-	bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize);
-	bytes -= RPCRDMA_HDRLEN_MIN;
-	if (bytes < sizeof(struct rpcrdma_segment) * 2) {
-		pr_warn("RPC:       %s: inline threshold too small\n",
-			__func__);
-		return 0;
-	}
-
-	segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1);
-	dprintk("RPC:       %s: max chunk list size = %d segments\n",
-		__func__, segments);
-	return segments;
-}
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 2ebc743..95cdc66 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -65,7 +65,6 @@
  */
 struct rpcrdma_ia {
 	const struct rpcrdma_memreg_ops	*ri_ops;
-	rwlock_t		ri_qplock;
 	struct ib_device	*ri_device;
 	struct rdma_cm_id 	*ri_id;
 	struct ib_pd		*ri_pd;
@@ -73,6 +72,8 @@
 	struct completion	ri_done;
 	int			ri_async_rc;
 	unsigned int		ri_max_frmr_depth;
+	unsigned int		ri_max_inline_write;
+	unsigned int		ri_max_inline_read;
 	struct ib_qp_attr	ri_qp_attr;
 	struct ib_qp_init_attr	ri_qp_init_attr;
 };
@@ -144,6 +145,26 @@
 
 #define RPCRDMA_DEF_GFP		(GFP_NOIO | __GFP_NOWARN)
 
+/* To ensure a transport can always make forward progress,
+ * the number of RDMA segments allowed in header chunk lists
+ * is capped at 8. This prevents less-capable devices and
+ * memory registrations from overrunning the Send buffer
+ * while building chunk lists.
+ *
+ * Elements of the Read list take up more room than the
+ * Write list or Reply chunk. 8 read segments means the Read
+ * list (or Write list or Reply chunk) cannot consume more
+ * than
+ *
+ * ((8 + 2) * read segment size) + 1 XDR words, or 244 bytes.
+ *
+ * And the fixed part of the header is another 24 bytes.
+ *
+ * The smallest inline threshold is 1024 bytes, ensuring that
+ * at least 750 bytes are available for RPC messages.
+ */
+#define RPCRDMA_MAX_HDR_SEGS	(8)
+
 /*
  * struct rpcrdma_rep -- this structure encapsulates state required to recv
  * and complete a reply, asychronously. It needs several pieces of
@@ -162,7 +183,9 @@
  */
 
 #define RPCRDMA_MAX_DATA_SEGS	((1 * 1024 * 1024) / PAGE_SIZE)
-#define RPCRDMA_MAX_SEGS 	(RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */
+
+/* data segments + head/tail for Call + head/tail for Reply */
+#define RPCRDMA_MAX_SEGS 	(RPCRDMA_MAX_DATA_SEGS + 4)
 
 struct rpcrdma_buffer;
 
@@ -198,14 +221,13 @@
 };
 
 struct rpcrdma_frmr {
-	struct scatterlist		*sg;
-	int				sg_nents;
+	struct scatterlist		*fr_sg;
+	int				fr_nents;
+	enum dma_data_direction		fr_dir;
 	struct ib_mr			*fr_mr;
 	struct ib_cqe			fr_cqe;
 	enum rpcrdma_frmr_state		fr_state;
 	struct completion		fr_linv_done;
-	struct work_struct		fr_work;
-	struct rpcrdma_xprt		*fr_xprt;
 	union {
 		struct ib_reg_wr	fr_regwr;
 		struct ib_send_wr	fr_invwr;
@@ -222,6 +244,8 @@
 		struct rpcrdma_fmr	fmr;
 		struct rpcrdma_frmr	frmr;
 	};
+	struct work_struct	mw_work;
+	struct rpcrdma_xprt	*mw_xprt;
 	struct list_head	mw_list;
 	struct list_head	mw_all;
 };
@@ -270,12 +294,14 @@
 	unsigned int		rl_niovs;
 	unsigned int		rl_nchunks;
 	unsigned int		rl_connect_cookie;
+	struct rpc_task		*rl_task;
 	struct rpcrdma_buffer	*rl_buffer;
 	struct rpcrdma_rep	*rl_reply;/* holder for reply buffer */
 	struct ib_sge		rl_send_iov[RPCRDMA_MAX_IOVS];
 	struct rpcrdma_regbuf	*rl_rdmabuf;
 	struct rpcrdma_regbuf	*rl_sendbuf;
 	struct rpcrdma_mr_seg	rl_segments[RPCRDMA_MAX_SEGS];
+	struct rpcrdma_mr_seg	*rl_nextseg;
 
 	struct ib_cqe		rl_cqe;
 	struct list_head	rl_all;
@@ -372,8 +398,8 @@
 				  struct rpcrdma_mr_seg *, int, bool);
 	void		(*ro_unmap_sync)(struct rpcrdma_xprt *,
 					 struct rpcrdma_req *);
-	int		(*ro_unmap)(struct rpcrdma_xprt *,
-				    struct rpcrdma_mr_seg *);
+	void		(*ro_unmap_safe)(struct rpcrdma_xprt *,
+					 struct rpcrdma_req *, bool);
 	int		(*ro_open)(struct rpcrdma_ia *,
 				   struct rpcrdma_ep *,
 				   struct rpcrdma_create_data_internal *);
@@ -456,7 +482,6 @@
 void rpcrdma_free_regbuf(struct rpcrdma_ia *,
 			 struct rpcrdma_regbuf *);
 
-unsigned int rpcrdma_max_segments(struct rpcrdma_xprt *);
 int rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *, unsigned int);
 
 int frwr_alloc_recovery_wq(void);
@@ -519,6 +544,9 @@
  * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
  */
 int rpcrdma_marshal_req(struct rpc_rqst *);
+void rpcrdma_set_max_header_sizes(struct rpcrdma_ia *,
+				  struct rpcrdma_create_data_internal *,
+				  unsigned int);
 
 /* RPC/RDMA module init - xprtrdma/transport.c
  */
@@ -534,6 +562,7 @@
 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
 int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int);
 int xprt_rdma_bc_up(struct svc_serv *, struct net *);
+size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *);
 int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
 int rpcrdma_bc_marshal_reply(struct rpc_rqst *);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index b90c539..2d3e0c4 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1364,6 +1364,11 @@
 		return ret;
 	return 0;
 }
+
+static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt)
+{
+	return PAGE_SIZE;
+}
 #else
 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
 					struct xdr_skb_reader *desc)
@@ -2661,6 +2666,7 @@
 #ifdef CONFIG_SUNRPC_BACKCHANNEL
 	.bc_setup		= xprt_setup_bc,
 	.bc_up			= xs_tcp_bc_up,
+	.bc_maxpayload		= xs_tcp_bc_maxpayload,
 	.bc_free_rqst		= xprt_free_bc_rqst,
 	.bc_destroy		= xprt_destroy_bc,
 #endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 6f11c62..bf8f05c 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -405,7 +405,7 @@
 		return 0;
 
 	/* Send RESET message even if bearer is detached from device */
-	tipc_ptr = rtnl_dereference(dev->tipc_ptr);
+	tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
 	if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb))))
 		goto drop;
 
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 7059c94..67b6ab9 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -704,7 +704,8 @@
  */
 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
-	int mtyp, rc = 0;
+	int mtyp = 0;
+	int rc = 0;
 	bool state = false;
 	bool probe = false;
 	bool setup = false;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 8740930..17201aa 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -41,6 +41,8 @@
 #include "name_table.h"
 
 #define MAX_FORWARD_SIZE 1024
+#define BUF_HEADROOM (LL_MAX_HEADER + 48)
+#define BUF_TAILROOM 16
 
 static unsigned int align(unsigned int i)
 {
@@ -505,6 +507,10 @@
 		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
 	}
 
+	if (skb_cloned(_skb) &&
+	    pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL))
+		goto exit;
+
 	/* Now reverse the concerned fields */
 	msg_set_errcode(hdr, err);
 	msg_set_origport(hdr, msg_destport(&ohdr));
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 024da8a..7cf52fb 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -94,17 +94,6 @@
 
 #define TIPC_MEDIA_INFO_OFFSET	5
 
-/**
- * TIPC message buffer code
- *
- * TIPC message buffer headroom reserves space for the worst-case
- * link-level device header (in case the message is sent off-node).
- *
- * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
- *       are word aligned for quicker access
- */
-#define BUF_HEADROOM (LL_MAX_HEADER + 48)
-
 struct tipc_skb_cb {
 	void *handle;
 	struct sk_buff *tail;
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index f795b1d..3ad9fab 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -604,7 +604,8 @@
 
 	link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
 	link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
-	strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));
+	nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]),
+		    TIPC_MAX_LINK_NAME);
 
 	return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
 			    &link_info, sizeof(link_info));
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 88bfcd7..c49b8df 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -796,9 +796,11 @@
  * @tsk: receiving socket
  * @skb: pointer to message buffer.
  */
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+			      struct sk_buff_head *xmitq)
 {
 	struct sock *sk = &tsk->sk;
+	u32 onode = tsk_own_node(tsk);
 	struct tipc_msg *hdr = buf_msg(skb);
 	int mtyp = msg_type(hdr);
 	bool conn_cong;
@@ -811,7 +813,8 @@
 
 	if (mtyp == CONN_PROBE) {
 		msg_set_type(hdr, CONN_PROBE_REPLY);
-		tipc_sk_respond(sk, skb, TIPC_OK);
+		if (tipc_msg_reverse(onode, &skb, TIPC_OK))
+			__skb_queue_tail(xmitq, skb);
 		return;
 	} else if (mtyp == CONN_ACK) {
 		conn_cong = tsk_conn_cong(tsk);
@@ -1686,7 +1689,8 @@
  *
  * Returns true if message was added to socket receive queue, otherwise false
  */
-static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
+static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
+		       struct sk_buff_head *xmitq)
 {
 	struct socket *sock = sk->sk_socket;
 	struct tipc_sock *tsk = tipc_sk(sk);
@@ -1696,7 +1700,7 @@
 	int usr = msg_user(hdr);
 
 	if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
-		tipc_sk_proto_rcv(tsk, skb);
+		tipc_sk_proto_rcv(tsk, skb, xmitq);
 		return false;
 	}
 
@@ -1739,7 +1743,8 @@
 	return true;
 
 reject:
-	tipc_sk_respond(sk, skb, err);
+	if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
+		__skb_queue_tail(xmitq, skb);
 	return false;
 }
 
@@ -1755,9 +1760,24 @@
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
 	unsigned int truesize = skb->truesize;
+	struct sk_buff_head xmitq;
+	u32 dnode, selector;
 
-	if (likely(filter_rcv(sk, skb)))
+	__skb_queue_head_init(&xmitq);
+
+	if (likely(filter_rcv(sk, skb, &xmitq))) {
 		atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
+		return 0;
+	}
+
+	if (skb_queue_empty(&xmitq))
+		return 0;
+
+	/* Send response/rejected message */
+	skb = __skb_dequeue(&xmitq);
+	dnode = msg_destnode(buf_msg(skb));
+	selector = msg_origport(buf_msg(skb));
+	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
 	return 0;
 }
 
@@ -1771,12 +1791,13 @@
  * Caller must hold socket lock
  */
 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
-			    u32 dport)
+			    u32 dport, struct sk_buff_head *xmitq)
 {
+	unsigned long time_limit = jiffies + 2;
+	struct sk_buff *skb;
 	unsigned int lim;
 	atomic_t *dcnt;
-	struct sk_buff *skb;
-	unsigned long time_limit = jiffies + 2;
+	u32 onode;
 
 	while (skb_queue_len(inputq)) {
 		if (unlikely(time_after_eq(jiffies, time_limit)))
@@ -1788,7 +1809,7 @@
 
 		/* Add message directly to receive queue if possible */
 		if (!sock_owned_by_user(sk)) {
-			filter_rcv(sk, skb);
+			filter_rcv(sk, skb, xmitq);
 			continue;
 		}
 
@@ -1801,7 +1822,9 @@
 			continue;
 
 		/* Overload => reject message back to sender */
-		tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
+		onode = tipc_own_addr(sock_net(sk));
+		if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
+			__skb_queue_tail(xmitq, skb);
 		break;
 	}
 }
@@ -1814,12 +1837,14 @@
  */
 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
 {
+	struct sk_buff_head xmitq;
 	u32 dnode, dport = 0;
 	int err;
 	struct tipc_sock *tsk;
 	struct sock *sk;
 	struct sk_buff *skb;
 
+	__skb_queue_head_init(&xmitq);
 	while (skb_queue_len(inputq)) {
 		dport = tipc_skb_peek_port(inputq, dport);
 		tsk = tipc_sk_lookup(net, dport);
@@ -1827,9 +1852,14 @@
 		if (likely(tsk)) {
 			sk = &tsk->sk;
 			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
-				tipc_sk_enqueue(inputq, sk, dport);
+				tipc_sk_enqueue(inputq, sk, dport, &xmitq);
 				spin_unlock_bh(&sk->sk_lock.slock);
 			}
+			/* Send pending response/rejected messages, if any */
+			while ((skb = __skb_dequeue(&xmitq))) {
+				dnode = msg_destnode(buf_msg(skb));
+				tipc_node_xmit_skb(net, skb, dnode, dport);
+			}
 			sock_put(sk);
 			continue;
 		}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index d25c82b..ecca389 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -363,8 +363,6 @@
 	WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
 	WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
 	WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
-	WARN_ON(ops->set_tx_power && !ops->get_tx_power);
-	WARN_ON(ops->set_antenna && !ops->get_antenna);
 
 	alloc_size = sizeof(*rdev) + sizeof_priv;
 
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 6250b1c..dbb2738e 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -958,8 +958,29 @@
 			return private(dev, iwr, cmd, info, handler);
 	}
 	/* Old driver API : call driver ioctl handler */
-	if (dev->netdev_ops->ndo_do_ioctl)
-		return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+	if (dev->netdev_ops->ndo_do_ioctl) {
+#ifdef CONFIG_COMPAT
+		if (info->flags & IW_REQUEST_FLAG_COMPAT) {
+			int ret = 0;
+			struct iwreq iwr_lcl;
+			struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
+
+			memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
+			iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
+			iwr_lcl.u.data.length = iwp_compat->length;
+			iwr_lcl.u.data.flags = iwp_compat->flags;
+
+			ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
+
+			iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
+			iwp_compat->length = iwr_lcl.u.data.length;
+			iwp_compat->flags = iwr_lcl.u.data.flags;
+
+			return ret;
+		} else
+#endif
+			return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+	}
 	return -EOPNOTSUPP;
 }
 
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index b2ab2a9..0f82314 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -7,6 +7,7 @@
 squote  := '
 empty   :=
 space   := $(empty) $(empty)
+space_escape := _-_SPACE_-_
 
 ###
 # Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o
@@ -226,10 +227,10 @@
 # See Documentation/kbuild/makefiles.txt for more info
 
 ifneq ($(KBUILD_NOCMDDEP),1)
-# Check if both arguments has same arguments. Result is empty string if equal.
-# User may override this check using make KBUILD_NOCMDDEP=1
-arg-check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \
-                    $(filter-out $(cmd_$@),   $(cmd_$(1))) )
+# Check if both arguments are the same including their order. Result is empty
+# string if equal. User may override this check using make KBUILD_NOCMDDEP=1
+arg-check = $(filter-out $(subst $(space),$(space_escape),$(strip $(cmd_$@))), \
+                         $(subst $(space),$(space_escape),$(strip $(cmd_$1))))
 else
 arg-check = $(if $(strip $(cmd_$@)),,1)
 endif
@@ -256,10 +257,42 @@
 # Execute the command and also postprocess generated .d dependencies file.
 if_changed_dep = $(if $(strip $(any-prereq) $(arg-check) ),                  \
 	@set -e;                                                             \
+	$(cmd_and_fixdep), @:)
+
+ifndef CONFIG_TRIM_UNUSED_KSYMS
+
+cmd_and_fixdep =                                                             \
 	$(echo-cmd) $(cmd_$(1));                                             \
 	scripts/basic/fixdep $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp;\
 	rm -f $(depfile);                                                    \
-	mv -f $(dot-target).tmp $(dot-target).cmd, @:)
+	mv -f $(dot-target).tmp $(dot-target).cmd;
+
+else
+
+# Filter out exported kernel symbol names from the preprocessor output.
+# See also __KSYM_DEPS__ in include/linux/export.h.
+# We disable the depfile generation here, so as not to overwrite the existing
+# depfile while fixdep is parsing it.
+flags_nodeps = $(filter-out -Wp$(comma)-M%, $($(1)))
+ksym_dep_filter =                                                            \
+	case "$(1)" in                                                       \
+	  cc_*_c|cpp_i_c)                                                    \
+	    $(CPP) $(call flags_nodeps,c_flags) -D__KSYM_DEPS__ $< ;;        \
+	  as_*_S|cpp_s_S)                                                    \
+	    $(CPP) $(call flags_nodeps,a_flags) -D__KSYM_DEPS__ $< ;;        \
+	  boot*|build*|*cpp_lds_S|dtc|host*|vdso*) : ;;                      \
+	  *) echo "Don't know how to preprocess $(1)" >&2; false ;;          \
+	esac | tr ";" "\n" | sed -rn 's/^.*=== __KSYM_(.*) ===.*$$/KSYM_\1/p'
+
+cmd_and_fixdep =                                                             \
+	$(echo-cmd) $(cmd_$(1));                                             \
+	$(ksym_dep_filter) |                                                 \
+		scripts/basic/fixdep -e $(depfile) $@ '$(make-cmd)'          \
+			> $(dot-target).tmp;	                             \
+	rm -f $(depfile);                                                    \
+	mv -f $(dot-target).tmp $(dot-target).cmd;
+
+endif
 
 # Usage: $(call if_changed_rule,foo)
 # Will check if $(cmd_foo) or any of the prerequisites changed,
@@ -341,8 +374,6 @@
 #
 ###############################################################################
 #
-space_escape := %%%SPACE%%%
-#
 define config_filename
 ifneq ($$(CONFIG_$(1)),"")
 $(1)_FILENAME := $$(subst \\,\,$$(subst \$$(quote),$$(quote),$$(subst $$(space_escape),\$$(space),$$(patsubst "%",%,$$(subst $$(space),$$(space_escape),$$(CONFIG_$(1)))))))
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index e1bc190..0d1ca5b 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -152,11 +152,11 @@
 $(obj)/%.s: $(src)/%.c FORCE
 	$(call if_changed_dep,cc_s_c)
 
-quiet_cmd_cc_i_c = CPP $(quiet_modtag) $@
-cmd_cc_i_c       = $(CPP) $(c_flags)   -o $@ $<
+quiet_cmd_cpp_i_c = CPP $(quiet_modtag) $@
+cmd_cpp_i_c       = $(CPP) $(c_flags) -o $@ $<
 
 $(obj)/%.i: $(src)/%.c FORCE
-	$(call if_changed_dep,cc_i_c)
+	$(call if_changed_dep,cpp_i_c)
 
 cmd_gensymtypes =                                                           \
     $(CPP) -D__GENKSYMS__ $(c_flags) $< |                                   \
@@ -266,26 +266,24 @@
 
 define rule_cc_o_c
 	$(call echo-cmd,checksrc) $(cmd_checksrc)			  \
-	$(call echo-cmd,cc_o_c) $(cmd_cc_o_c);				  \
+	$(call cmd_and_fixdep,cc_o_c)					  \
 	$(cmd_modversions)						  \
-	$(cmd_objtool)						  \
-	$(call echo-cmd,record_mcount)					  \
-	$(cmd_record_mcount)						  \
-	scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' >    \
-	                                              $(dot-target).tmp;  \
-	rm -f $(depfile);						  \
-	mv -f $(dot-target).tmp $(dot-target).cmd
+	$(cmd_objtool)						          \
+	$(call echo-cmd,record_mcount) $(cmd_record_mcount)
 endef
 
 define rule_as_o_S
-	$(call echo-cmd,as_o_S) $(cmd_as_o_S);				  \
-	$(cmd_objtool)						  \
-	scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,as_o_S)' >    \
-	                                              $(dot-target).tmp;  \
-	rm -f $(depfile);						  \
-	mv -f $(dot-target).tmp $(dot-target).cmd
+	$(call cmd_and_fixdep,as_o_S)					  \
+	$(cmd_objtool)
 endef
 
+# List module undefined symbols (or empty line if not enabled)
+ifdef CONFIG_TRIM_UNUSED_KSYMS
+cmd_undef_syms = $(NM) $@ | sed -n 's/^ \+U //p' | xargs echo
+else
+cmd_undef_syms = echo
+endif
+
 # Built-in and composite module parts
 $(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_obj) FORCE
 	$(call cmd,force_checksrc)
@@ -296,7 +294,8 @@
 $(single-used-m): $(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_obj) FORCE
 	$(call cmd,force_checksrc)
 	$(call if_changed_rule,cc_o_c)
-	@{ echo $(@:.o=.ko); echo $@; } > $(MODVERDIR)/$(@F:.o=.mod)
+	@{ echo $(@:.o=.ko); echo $@; \
+	   $(cmd_undef_syms); } > $(MODVERDIR)/$(@F:.o=.mod)
 
 quiet_cmd_cc_lst_c = MKLST   $@
       cmd_cc_lst_c = $(CC) $(c_flags) -g -c -o $*.o $< && \
@@ -314,11 +313,11 @@
 $(real-objs-m)      : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE)
 $(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE)
 
-quiet_cmd_as_s_S = CPP $(quiet_modtag) $@
-cmd_as_s_S       = $(CPP) $(a_flags)   -o $@ $<
+quiet_cmd_cpp_s_S = CPP $(quiet_modtag) $@
+cmd_cpp_s_S       = $(CPP) $(a_flags) -o $@ $<
 
 $(obj)/%.s: $(src)/%.S FORCE
-	$(call if_changed_dep,as_s_S)
+	$(call if_changed_dep,cpp_s_S)
 
 quiet_cmd_as_o_S = AS $(quiet_modtag)  $@
 cmd_as_o_S       = $(CC) $(a_flags) -c -o $@ $<
@@ -426,7 +425,8 @@
 
 $(multi-used-m): FORCE
 	$(call if_changed,link_multi-m)
-	@{ echo $(@:.o=.ko); echo $(link_multi_deps); } > $(MODVERDIR)/$(@F:.o=.mod)
+	@{ echo $(@:.o=.ko); echo $(link_multi_deps); \
+	   $(cmd_undef_syms); } > $(MODVERDIR)/$(@F:.o=.mod)
 $(call multi_depend, $(multi-used-m), .o, -objs -y -m)
 
 targets += $(multi-used-y) $(multi-used-m)
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index f9e47a7..53449a6 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -24,6 +24,7 @@
 warning-1 += -Wold-style-definition
 warning-1 += $(call cc-option, -Wmissing-include-dirs)
 warning-1 += $(call cc-option, -Wunused-but-set-variable)
+warning-1 += $(call cc-option, -Wunused-const-variable)
 warning-1 += $(call cc-disable-warning, missing-field-initializers)
 warning-1 += $(call cc-disable-warning, sign-compare)
 
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index ed1b7c4..e7df0f5 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -96,10 +96,10 @@
 # Note: Files that end up in two or more modules are compiled without the
 #       KBUILD_MODNAME definition. The reason is that any made-up name would
 #       differ in different configs.
-name-fix = $(subst $(comma),_,$(subst -,_,$1))
-basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(basetarget)))"
+name-fix = $(squote)$(quote)$(subst $(comma),_,$(subst -,_,$1))$(quote)$(squote)
+basename_flags = -DKBUILD_BASENAME=$(call name-fix,$(basetarget))
 modname_flags  = $(if $(filter 1,$(words $(modname))),\
-                 -D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))")
+                 -DKBUILD_MODNAME=$(call name-fix,$(modname)))
 
 orig_c_flags   = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \
                  $(ccflags-y) $(CFLAGS_$(basetarget).o)
@@ -162,7 +162,7 @@
 
 c_flags        = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE)     \
 		 $(__c_flags) $(modkern_cflags)                           \
-		 -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags)
+		 $(basename_flags) $(modname_flags)
 
 a_flags        = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE)     \
 		 $(__a_flags) $(modkern_aflags)
diff --git a/scripts/adjust_autoksyms.sh b/scripts/adjust_autoksyms.sh
new file mode 100755
index 0000000..8dc1918
--- /dev/null
+++ b/scripts/adjust_autoksyms.sh
@@ -0,0 +1,101 @@
+#!/bin/sh
+
+# Script to create/update include/generated/autoksyms.h and dependency files
+#
+# Copyright:	(C) 2016  Linaro Limited
+# Created by:	Nicolas Pitre, January 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+
+# Create/update the include/generated/autoksyms.h file from the list
+# of all module's needed symbols as recorded on the third line of
+# .tmp_versions/*.mod files.
+#
+# For each symbol being added or removed, the corresponding dependency
+# file's timestamp is updated to force a rebuild of the affected source
+# file. All arguments passed to this script are assumed to be a command
+# to be exec'd to trigger a rebuild of those files.
+
+set -e
+
+cur_ksyms_file="include/generated/autoksyms.h"
+new_ksyms_file="include/generated/autoksyms.h.tmpnew"
+
+info() {
+	if [ "$quiet" != "silent_" ]; then
+		printf "  %-7s %s\n" "$1" "$2"
+	fi
+}
+
+info "CHK" "$cur_ksyms_file"
+
+# Use "make V=1" to debug this script.
+case "$KBUILD_VERBOSE" in
+*1*)
+	set -x
+	;;
+esac
+
+# We need access to CONFIG_ symbols
+case "${KCONFIG_CONFIG}" in
+*/*)
+	. "${KCONFIG_CONFIG}"
+	;;
+*)
+	# Force using a file from the current directory
+	. "./${KCONFIG_CONFIG}"
+esac
+
+# In case it doesn't exist yet...
+if [ -e "$cur_ksyms_file" ]; then touch "$cur_ksyms_file"; fi
+
+# Generate a new ksym list file with symbols needed by the current
+# set of modules.
+cat > "$new_ksyms_file" << EOT
+/*
+ * Automatically generated file; DO NOT EDIT.
+ */
+
+EOT
+sed -ns -e '3{s/ /\n/g;/^$/!p;}' "$MODVERDIR"/*.mod | sort -u |
+while read sym; do
+	if [ -n "$CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX" ]; then
+		sym="${sym#_}"
+	fi
+	echo "#define __KSYM_${sym} 1"
+done >> "$new_ksyms_file"
+
+# Special case for modversions (see modpost.c)
+if [ -n "$CONFIG_MODVERSIONS" ]; then
+	echo "#define __KSYM_module_layout 1" >> "$new_ksyms_file"
+fi
+
+# Extract changes between old and new list and touch corresponding
+# dependency files.
+changed=$(
+count=0
+sort "$cur_ksyms_file" "$new_ksyms_file" | uniq -u |
+sed -n 's/^#define __KSYM_\(.*\) 1/\1/p' | tr "A-Z_" "a-z/" |
+while read sympath; do
+	if [ -z "$sympath" ]; then continue; fi
+	depfile="include/config/ksym/${sympath}.h"
+	mkdir -p "$(dirname "$depfile")"
+	touch "$depfile"
+	echo $((count += 1))
+done | tail -1 )
+changed=${changed:-0}
+
+if [ $changed -gt 0 ]; then
+	# Replace the old list with tne new one
+	old=$(grep -c "^#define __KSYM_" "$cur_ksyms_file" || true)
+	new=$(grep -c "^#define __KSYM_" "$new_ksyms_file" || true)
+	info "KSYMS" "symbols: before=$old, after=$new, changed=$changed"
+	info "UPD" "$cur_ksyms_file"
+	mv -f "$new_ksyms_file" "$cur_ksyms_file"
+	# Then trigger a rebuild of affected source files
+	exec $@
+else
+	rm -f "$new_ksyms_file"
+fi
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index caef815..746ec1e 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -120,13 +120,15 @@
 #define INT_NFIG ntohl(0x4e464947)
 #define INT_FIG_ ntohl(0x4649475f)
 
+int insert_extra_deps;
 char *target;
 char *depfile;
 char *cmdline;
 
 static void usage(void)
 {
-	fprintf(stderr, "Usage: fixdep <depfile> <target> <cmdline>\n");
+	fprintf(stderr, "Usage: fixdep [-e] <depfile> <target> <cmdline>\n");
+	fprintf(stderr, " -e  insert extra dependencies given on stdin\n");
 	exit(1);
 }
 
@@ -138,6 +140,40 @@
 	printf("cmd_%s := %s\n\n", target, cmdline);
 }
 
+/*
+ * Print out a dependency path from a symbol name
+ */
+static void print_config(const char *m, int slen)
+{
+	int c, i;
+
+	printf("    $(wildcard include/config/");
+	for (i = 0; i < slen; i++) {
+		c = m[i];
+		if (c == '_')
+			c = '/';
+		else
+			c = tolower(c);
+		putchar(c);
+	}
+	printf(".h) \\\n");
+}
+
+static void do_extra_deps(void)
+{
+	if (insert_extra_deps) {
+		char buf[80];
+		while(fgets(buf, sizeof(buf), stdin)) {
+			int len = strlen(buf);
+			if (len < 2 || buf[len-1] != '\n') {
+				fprintf(stderr, "fixdep: bad data on stdin\n");
+				exit(1);
+			}
+			print_config(buf, len-1);
+		}
+	}
+}
+
 struct item {
 	struct item	*next;
 	unsigned int	len;
@@ -197,23 +233,12 @@
 static void use_config(const char *m, int slen)
 {
 	unsigned int hash = strhash(m, slen);
-	int c, i;
 
 	if (is_defined_config(m, slen, hash))
 	    return;
 
 	define_config(m, slen, hash);
-
-	printf("    $(wildcard include/config/");
-	for (i = 0; i < slen; i++) {
-		c = m[i];
-		if (c == '_')
-			c = '/';
-		else
-			c = tolower(c);
-		putchar(c);
-	}
-	printf(".h) \\\n");
+	print_config(m, slen);
 }
 
 static void parse_config_file(const char *map, size_t len)
@@ -250,7 +275,7 @@
 	}
 }
 
-/* test is s ends in sub */
+/* test if s ends in sub */
 static int strrcmp(const char *s, const char *sub)
 {
 	int slen = strlen(s);
@@ -333,6 +358,7 @@
 
 			/* Ignore certain dependencies */
 			if (strrcmp(s, "include/generated/autoconf.h") &&
+			    strrcmp(s, "include/generated/autoksyms.h") &&
 			    strrcmp(s, "arch/um/include/uml-config.h") &&
 			    strrcmp(s, "include/linux/kconfig.h") &&
 			    strrcmp(s, ".ver")) {
@@ -378,6 +404,8 @@
 		exit(1);
 	}
 
+	do_extra_deps();
+
 	printf("\n%s: $(deps_%s)\n\n", target, target);
 	printf("$(deps_%s):\n", target);
 }
@@ -434,7 +462,10 @@
 {
 	traps();
 
-	if (argc != 4)
+	if (argc == 5 && !strcmp(argv[1], "-e")) {
+		insert_extra_deps = 1;
+		argv++;
+	} else if (argc != 4)
 		usage();
 
 	depfile = argv[1];
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 6750595..4904ced 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2454,6 +2454,7 @@
 
 # Check for git id commit length and improperly formed commit descriptions
 		if ($in_commit_log && !$commit_log_possible_stack_dump &&
+		    $line !~ /^\s*(?:Link|Patchwork|http|BugLink):/i &&
 		    ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
 		     ($line =~ /\b[0-9a-f]{12,40}\b/i &&
 		      $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&
diff --git a/scripts/coccicheck b/scripts/coccicheck
index b2d7581..dd85a45 100755
--- a/scripts/coccicheck
+++ b/scripts/coccicheck
@@ -98,7 +98,7 @@
 }
 
 kill_running() {
-	for i in $(seq $(( NPROC - 1 )) ); do
+	for i in $(seq 0 $(( NPROC - 1 )) ); do
 		if [ $VERBOSE -eq 2 ] ; then
 			echo "Killing ${SPATCH_PID[$i]}"
 		fi
diff --git a/scripts/coccinelle/api/setup_timer.cocci b/scripts/coccinelle/api/setup_timer.cocci
index 8ee0ac3..eb6bd9e 100644
--- a/scripts/coccinelle/api/setup_timer.cocci
+++ b/scripts/coccinelle/api/setup_timer.cocci
@@ -106,7 +106,7 @@
 @match_function_and_data_after_init_timer_context
 depends on !patch &&
 !match_immediate_function_data_after_init_timer_context &&
-(context || org || report)@
+ (context || org || report)@
 expression a, b, e1, e2, e3, e4, e5;
 position j0, j1, j2;
 @@
@@ -127,7 +127,7 @@
 @r3_context depends on !patch &&
 !match_immediate_function_data_after_init_timer_context &&
 !match_function_and_data_after_init_timer_context &&
-(context || org || report)@
+ (context || org || report)@
 expression c, e6, e7;
 position r1.p;
 position j0, j1;
diff --git a/scripts/coccinelle/misc/compare_const_fl.cocci b/scripts/coccinelle/misc/compare_const_fl.cocci
deleted file mode 100644
index b5d4bab..0000000
--- a/scripts/coccinelle/misc/compare_const_fl.cocci
+++ /dev/null
@@ -1,171 +0,0 @@
-/// Move constants to the right of binary operators.
-//# Depends on personal taste in some cases.
-///
-// Confidence: Moderate
-// Copyright: (C) 2015 Copyright: (C) 2015 Julia Lawall, Inria. GPLv2.
-// URL: http://coccinelle.lip6.fr/
-// Options: --no-includes --include-headers
-
-virtual patch
-virtual context
-virtual org
-virtual report
-
-@r1 depends on patch && !context && !org && !report
- disable bitor_comm, neg_if_exp@
-constant c,c1;
-local idexpression i;
-expression e,e1,e2;
-binary operator b = {==,!=,&,|};
-type t;
-@@
-
-(
-c b (c1)
-|
-sizeof(t) b e1
-|
-sizeof e b e1
-|
-i b e1
-|
-c | e1 | e2 | ...
-|
-c | (e ? e1 : e2)
-|
-- c
-+ e
-b
-- e
-+ c
-)
-
-@r2 depends on patch && !context && !org && !report
- disable gtr_lss, gtr_lss_eq, not_int2@
-constant c,c1;
-expression e,e1,e2;
-binary operator b;
-binary operator b1 = {<,<=},b2 = {<,<=};
-binary operator b3 = {>,>=},b4 = {>,>=};
-local idexpression i;
-type t;
-@@
-
-(
-c b c1
-|
-sizeof(t) b e1
-|
-sizeof e b e1
-|
- (e1 b1 e) && (e b2 e2)
-|
- (e1 b3 e) && (e b4 e2)
-|
-i b e
-|
-- c < e
-+ e > c
-|
-- c <= e
-+ e >= c
-|
-- c > e
-+ e < c
-|
-- c >= e
-+ e <= c
-)
-
-// ----------------------------------------------------------------------------
-
-@r1_context depends on !patch && (context || org || report)
- disable bitor_comm, neg_if_exp exists@
-type t;
-binary operator b = {==,!=,&,|};
-constant c, c1;
-expression e, e1, e2;
-local idexpression i;
-position j0;
-@@
-
-(
-c b (c1)
-|
-sizeof(t) b e1
-|
-sizeof e b e1
-|
-i b e1
-|
-c | e1 | e2 | ...
-|
-c | (e ? e1 : e2)
-|
-* c@j0 b e
-)
-
-@r2_context depends on !patch && (context || org || report)
- disable gtr_lss, gtr_lss_eq, not_int2 exists@
-type t;
-binary operator b, b1 = {<,<=}, b2 = {<,<=}, b3 = {>,>=}, b4 = {>,>=};
-constant c, c1;
-expression e, e1, e2;
-local idexpression i;
-position j0;
-@@
-
-(
-c b c1
-|
-sizeof(t) b e1
-|
-sizeof e b e1
-|
- (e1 b1 e) && (e b2 e2)
-|
- (e1 b3 e) && (e b4 e2)
-|
-i b e
-|
-* c@j0 < e
-|
-* c@j0 <= e
-|
-* c@j0 > e
-|
-* c@j0 >= e
-)
-
-// ----------------------------------------------------------------------------
-
-@script:python r1_org depends on org@
-j0 << r1_context.j0;
-@@
-
-msg = "Move constant to right."
-coccilib.org.print_todo(j0[0], msg)
-
-@script:python r2_org depends on org@
-j0 << r2_context.j0;
-@@
-
-msg = "Move constant to right."
-coccilib.org.print_todo(j0[0], msg)
-
-// ----------------------------------------------------------------------------
-
-@script:python r1_report depends on report@
-j0 << r1_context.j0;
-@@
-
-msg = "Move constant to right."
-coccilib.report.print_report(j0[0], msg)
-
-@script:python r2_report depends on report@
-j0 << r2_context.j0;
-@@
-
-msg = "Move constant to right."
-coccilib.report.print_report(j0[0], msg)
-
diff --git a/scripts/gdb/linux/Makefile b/scripts/gdb/linux/Makefile
index 6cf1ecf..cd129e6 100644
--- a/scripts/gdb/linux/Makefile
+++ b/scripts/gdb/linux/Makefile
@@ -8,4 +8,14 @@
 endif
 	@:
 
-clean-files := *.pyc *.pyo $(if $(KBUILD_SRC),*.py)
+quiet_cmd_gen_constants_py = GEN     $@
+      cmd_gen_constants_py = \
+	$(CPP) -E -x c -P $(c_flags) $< > $@ ;\
+	sed -i '1,/<!-- end-c-headers -->/d;' $@
+
+$(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in
+	$(call if_changed,gen_constants_py)
+
+build_constants_py: $(obj)/constants.py
+
+clean-files := *.pyc *.pyo $(if $(KBUILD_SRC),*.py) $(obj)/constants.py
diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
new file mode 100644
index 0000000..07e6c2b
--- /dev/null
+++ b/scripts/gdb/linux/constants.py.in
@@ -0,0 +1,59 @@
+/*
+ * gdb helper commands and functions for Linux kernel debugging
+ *
+ *  Kernel constants derived from include files.
+ *
+ * Copyright (c) 2016 Linaro Ltd
+ *
+ * Authors:
+ *  Kieran Bingham <kieran.bingham@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL version 2.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/radix-tree.h>
+
+/* We need to stringify expanded macros so that they can be parsed */
+
+#define STRING(x) #x
+#define XSTRING(x) STRING(x)
+
+#define LX_VALUE(x) LX_##x = x
+#define LX_GDBPARSED(x) LX_##x = gdb.parse_and_eval(XSTRING(x))
+
+/*
+ * IS_ENABLED generates (a || b) which is not compatible with python
+ * We can only switch on configuration items we know are available
+ * Therefore - IS_BUILTIN() is more appropriate
+ */
+#define LX_CONFIG(x) LX_##x = IS_BUILTIN(x)
+
+/* The build system will take care of deleting everything above this marker */
+<!-- end-c-headers -->
+
+import gdb
+
+/* linux/fs.h */
+LX_VALUE(MS_RDONLY)
+LX_VALUE(MS_SYNCHRONOUS)
+LX_VALUE(MS_MANDLOCK)
+LX_VALUE(MS_DIRSYNC)
+LX_VALUE(MS_NOATIME)
+LX_VALUE(MS_NODIRATIME)
+
+/* linux/mount.h */
+LX_VALUE(MNT_NOSUID)
+LX_VALUE(MNT_NODEV)
+LX_VALUE(MNT_NOEXEC)
+LX_VALUE(MNT_NOATIME)
+LX_VALUE(MNT_NODIRATIME)
+LX_VALUE(MNT_RELATIME)
+
+/* linux/radix-tree.h */
+LX_VALUE(RADIX_TREE_INDIRECT_PTR)
+LX_GDBPARSED(RADIX_TREE_HEIGHT_MASK)
+LX_GDBPARSED(RADIX_TREE_MAP_SHIFT)
+LX_GDBPARSED(RADIX_TREE_MAP_MASK)
diff --git a/scripts/gdb/linux/cpus.py b/scripts/gdb/linux/cpus.py
index 4297b83..ca11e8d 100644
--- a/scripts/gdb/linux/cpus.py
+++ b/scripts/gdb/linux/cpus.py
@@ -97,9 +97,47 @@
         bits >>= 1
         bit += 1
 
+        yield int(cpu)
+
+
+def each_online_cpu():
+    for cpu in cpu_list("__cpu_online_mask"):
         yield cpu
 
 
+def each_present_cpu():
+    for cpu in cpu_list("__cpu_present_mask"):
+        yield cpu
+
+
+def each_possible_cpu():
+    for cpu in cpu_list("__cpu_possible_mask"):
+        yield cpu
+
+
+def each_active_cpu():
+    for cpu in cpu_list("__cpu_active_mask"):
+        yield cpu
+
+
+class LxCpus(gdb.Command):
+    """List CPU status arrays
+
+Displays the known state of each CPU based on the kernel masks
+and can help identify the state of hotplugged CPUs"""
+
+    def __init__(self):
+        super(LxCpus, self).__init__("lx-cpus", gdb.COMMAND_DATA)
+
+    def invoke(self, arg, from_tty):
+        gdb.write("Possible CPUs : {}\n".format(list(each_possible_cpu())))
+        gdb.write("Present CPUs  : {}\n".format(list(each_present_cpu())))
+        gdb.write("Online CPUs   : {}\n".format(list(each_online_cpu())))
+        gdb.write("Active CPUs   : {}\n".format(list(each_active_cpu())))
+
+LxCpus()
+
+
 class PerCpu(gdb.Function):
     """Return per-cpu variable.
 
diff --git a/scripts/gdb/linux/dmesg.py b/scripts/gdb/linux/dmesg.py
index 927d0d2..f9b92ec 100644
--- a/scripts/gdb/linux/dmesg.py
+++ b/scripts/gdb/linux/dmesg.py
@@ -33,11 +33,12 @@
         if log_first_idx < log_next_idx:
             log_buf_2nd_half = -1
             length = log_next_idx - log_first_idx
-            log_buf = inf.read_memory(start, length)
+            log_buf = utils.read_memoryview(inf, start, length).tobytes()
         else:
             log_buf_2nd_half = log_buf_len - log_first_idx
-            log_buf = inf.read_memory(start, log_buf_2nd_half) + \
-                inf.read_memory(log_buf_addr, log_next_idx)
+            a = utils.read_memoryview(inf, start, log_buf_2nd_half)
+            b = utils.read_memoryview(inf, log_buf_addr, log_next_idx)
+            log_buf = a.tobytes() + b.tobytes()
 
         pos = 0
         while pos < log_buf.__len__():
@@ -50,10 +51,10 @@
                 continue
 
             text_len = utils.read_u16(log_buf[pos + 10:pos + 12])
-            text = log_buf[pos + 16:pos + 16 + text_len]
+            text = log_buf[pos + 16:pos + 16 + text_len].decode()
             time_stamp = utils.read_u64(log_buf[pos:pos + 8])
 
-            for line in memoryview(text).tobytes().splitlines():
+            for line in text.splitlines():
                 gdb.write("[{time:12.6f}] {line}\n".format(
                     time=time_stamp / 1000000000.0,
                     line=line))
diff --git a/scripts/gdb/linux/lists.py b/scripts/gdb/linux/lists.py
index 3a3775b..2f335fb 100644
--- a/scripts/gdb/linux/lists.py
+++ b/scripts/gdb/linux/lists.py
@@ -18,6 +18,27 @@
 list_head = utils.CachedType("struct list_head")
 
 
+def list_for_each(head):
+    if head.type == list_head.get_type().pointer():
+        head = head.dereference()
+    elif head.type != list_head.get_type():
+        raise gdb.GdbError("Must be struct list_head not {}"
+                           .format(head.type))
+
+    node = head['next'].dereference()
+    while node.address != head.address:
+        yield node.address
+        node = node['next'].dereference()
+
+
+def list_for_each_entry(head, gdbtype, member):
+    for node in list_for_each(head):
+        if node.type != list_head.get_type().pointer():
+            raise TypeError("Type {} found. Expected struct list_head *."
+                            .format(node.type))
+        yield utils.container_of(node, gdbtype, member)
+
+
 def list_check(head):
     nb = 0
     if (head.type == list_head.get_type().pointer()):
diff --git a/scripts/gdb/linux/modules.py b/scripts/gdb/linux/modules.py
index 0a35d6d..441b2323 100644
--- a/scripts/gdb/linux/modules.py
+++ b/scripts/gdb/linux/modules.py
@@ -13,7 +13,7 @@
 
 import gdb
 
-from linux import cpus, utils
+from linux import cpus, utils, lists
 
 
 module_type = utils.CachedType("struct module")
@@ -21,14 +21,14 @@
 
 def module_list():
     global module_type
-    module_ptr_type = module_type.get_type().pointer()
-    modules = gdb.parse_and_eval("modules")
-    entry = modules['next']
-    end_of_list = modules.address
+    modules = utils.gdb_eval_or_none("modules")
+    if modules is None:
+        return
 
-    while entry != end_of_list:
-        yield utils.container_of(entry, module_ptr_type, "list")
-        entry = entry['next']
+    module_ptr_type = module_type.get_type().pointer()
+
+    for module in lists.list_for_each_entry(modules, module_ptr_type, "list"):
+        yield module
 
 
 def find_module_by_name(name):
@@ -78,19 +78,17 @@
                 address=str(layout['base']).split()[0],
                 name=module['name'].string(),
                 size=str(layout['size']),
-                ref=str(module['refcnt']['counter'])))
+                ref=str(module['refcnt']['counter'] - 1)))
 
-            source_list = module['source_list']
             t = self._module_use_type.get_type().pointer()
-            entry = source_list['next']
             first = True
-            while entry != source_list.address:
-                use = utils.container_of(entry, t, "source_list")
+            sources = module['source_list']
+            for use in lists.list_for_each_entry(sources, t, "source_list"):
                 gdb.write("{separator}{name}".format(
                     separator=" " if first else ",",
                     name=use['source']['name'].string()))
                 first = False
-                entry = entry['next']
+
             gdb.write("\n")
 
 
diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py
index 6e6709c..38b1f09 100644
--- a/scripts/gdb/linux/proc.py
+++ b/scripts/gdb/linux/proc.py
@@ -12,6 +12,10 @@
 #
 
 import gdb
+from linux import constants
+from linux import utils
+from linux import tasks
+from linux import lists
 
 
 class LxCmdLine(gdb.Command):
@@ -39,3 +43,155 @@
         gdb.write(gdb.parse_and_eval("linux_banner").string())
 
 LxVersion()
+
+
+# Resource Structure Printers
+#  /proc/iomem
+#  /proc/ioports
+
+def get_resources(resource, depth):
+    while resource:
+        yield resource, depth
+
+        child = resource['child']
+        if child:
+            for res, deep in get_resources(child, depth + 1):
+                yield res, deep
+
+        resource = resource['sibling']
+
+
+def show_lx_resources(resource_str):
+        resource = gdb.parse_and_eval(resource_str)
+        width = 4 if resource['end'] < 0x10000 else 8
+        # Iterate straight to the first child
+        for res, depth in get_resources(resource['child'], 0):
+            start = int(res['start'])
+            end = int(res['end'])
+            gdb.write(" " * depth * 2 +
+                      "{0:0{1}x}-".format(start, width) +
+                      "{0:0{1}x} : ".format(end, width) +
+                      res['name'].string() + "\n")
+
+
+class LxIOMem(gdb.Command):
+    """Identify the IO memory resource locations defined by the kernel
+
+Equivalent to cat /proc/iomem on a running target"""
+
+    def __init__(self):
+        super(LxIOMem, self).__init__("lx-iomem", gdb.COMMAND_DATA)
+
+    def invoke(self, arg, from_tty):
+        return show_lx_resources("iomem_resource")
+
+LxIOMem()
+
+
+class LxIOPorts(gdb.Command):
+    """Identify the IO port resource locations defined by the kernel
+
+Equivalent to cat /proc/ioports on a running target"""
+
+    def __init__(self):
+        super(LxIOPorts, self).__init__("lx-ioports", gdb.COMMAND_DATA)
+
+    def invoke(self, arg, from_tty):
+        return show_lx_resources("ioport_resource")
+
+LxIOPorts()
+
+
+# Mount namespace viewer
+#  /proc/mounts
+
+def info_opts(lst, opt):
+    opts = ""
+    for key, string in lst.items():
+        if opt & key:
+            opts += string
+    return opts
+
+
+FS_INFO = {constants.LX_MS_SYNCHRONOUS: ",sync",
+           constants.LX_MS_MANDLOCK: ",mand",
+           constants.LX_MS_DIRSYNC: ",dirsync",
+           constants.LX_MS_NOATIME: ",noatime",
+           constants.LX_MS_NODIRATIME: ",nodiratime"}
+
+MNT_INFO = {constants.LX_MNT_NOSUID: ",nosuid",
+            constants.LX_MNT_NODEV: ",nodev",
+            constants.LX_MNT_NOEXEC: ",noexec",
+            constants.LX_MNT_NOATIME: ",noatime",
+            constants.LX_MNT_NODIRATIME: ",nodiratime",
+            constants.LX_MNT_RELATIME: ",relatime"}
+
+mount_type = utils.CachedType("struct mount")
+mount_ptr_type = mount_type.get_type().pointer()
+
+
+class LxMounts(gdb.Command):
+    """Report the VFS mounts of the current process namespace.
+
+Equivalent to cat /proc/mounts on a running target
+An integer value can be supplied to display the mount
+values of that process namespace"""
+
+    def __init__(self):
+        super(LxMounts, self).__init__("lx-mounts", gdb.COMMAND_DATA)
+
+    # Equivalent to proc_namespace.c:show_vfsmnt
+    # However, that has the ability to call into s_op functions
+    # whereas we cannot and must make do with the information we can obtain.
+    def invoke(self, arg, from_tty):
+        argv = gdb.string_to_argv(arg)
+        if len(argv) >= 1:
+            try:
+                pid = int(argv[0])
+            except:
+                raise gdb.GdbError("Provide a PID as integer value")
+        else:
+            pid = 1
+
+        task = tasks.get_task_by_pid(pid)
+        if not task:
+            raise gdb.GdbError("Couldn't find a process with PID {}"
+                               .format(pid))
+
+        namespace = task['nsproxy']['mnt_ns']
+        if not namespace:
+            raise gdb.GdbError("No namespace for current process")
+
+        for vfs in lists.list_for_each_entry(namespace['list'],
+                                             mount_ptr_type, "mnt_list"):
+            devname = vfs['mnt_devname'].string()
+            devname = devname if devname else "none"
+
+            pathname = ""
+            parent = vfs
+            while True:
+                mntpoint = parent['mnt_mountpoint']
+                pathname = utils.dentry_name(mntpoint) + pathname
+                if (parent == parent['mnt_parent']):
+                    break
+                parent = parent['mnt_parent']
+
+            if (pathname == ""):
+                pathname = "/"
+
+            superblock = vfs['mnt']['mnt_sb']
+            fstype = superblock['s_type']['name'].string()
+            s_flags = int(superblock['s_flags'])
+            m_flags = int(vfs['mnt']['mnt_flags'])
+            rd = "ro" if (s_flags & constants.LX_MS_RDONLY) else "rw"
+
+            gdb.write(
+                "{} {} {} {}{}{} 0 0\n"
+                .format(devname,
+                        pathname,
+                        fstype,
+                        rd,
+                        info_opts(FS_INFO, s_flags),
+                        info_opts(MNT_INFO, m_flags)))
+
+LxMounts()
diff --git a/scripts/gdb/linux/radixtree.py b/scripts/gdb/linux/radixtree.py
new file mode 100644
index 0000000..0fdef4e
--- /dev/null
+++ b/scripts/gdb/linux/radixtree.py
@@ -0,0 +1,97 @@
+#
+# gdb helper commands and functions for Linux kernel debugging
+#
+#  Radix Tree Parser
+#
+# Copyright (c) 2016 Linaro Ltd
+#
+# Authors:
+#  Kieran Bingham <kieran.bingham@linaro.org>
+#
+# This work is licensed under the terms of the GNU GPL version 2.
+#
+
+import gdb
+
+from linux import utils
+from linux import constants
+
+radix_tree_root_type = utils.CachedType("struct radix_tree_root")
+radix_tree_node_type = utils.CachedType("struct radix_tree_node")
+
+
+def is_indirect_ptr(node):
+    long_type = utils.get_long_type()
+    return (node.cast(long_type) & constants.LX_RADIX_TREE_INDIRECT_PTR)
+
+
+def indirect_to_ptr(node):
+    long_type = utils.get_long_type()
+    node_type = node.type
+    indirect_ptr = node.cast(long_type) & ~constants.LX_RADIX_TREE_INDIRECT_PTR
+    return indirect_ptr.cast(node_type)
+
+
+def maxindex(height):
+    height = height & constants.LX_RADIX_TREE_HEIGHT_MASK
+    return gdb.parse_and_eval("height_to_maxindex["+str(height)+"]")
+
+
+def lookup(root, index):
+    if root.type == radix_tree_root_type.get_type().pointer():
+        root = root.dereference()
+    elif root.type != radix_tree_root_type.get_type():
+        raise gdb.GdbError("Must be struct radix_tree_root not {}"
+                           .format(root.type))
+
+    node = root['rnode']
+    if node is 0:
+        return None
+
+    if not (is_indirect_ptr(node)):
+        if (index > 0):
+            return None
+        return node
+
+    node = indirect_to_ptr(node)
+
+    height = node['path'] & constants.LX_RADIX_TREE_HEIGHT_MASK
+    if (index > maxindex(height)):
+        return None
+
+    shift = (height-1) * constants.LX_RADIX_TREE_MAP_SHIFT
+
+    while True:
+        new_index = (index >> shift) & constants.LX_RADIX_TREE_MAP_MASK
+        slot = node['slots'][new_index]
+
+        node = slot.cast(node.type.pointer()).dereference()
+        if node is 0:
+            return None
+
+        shift -= constants.LX_RADIX_TREE_MAP_SHIFT
+        height -= 1
+
+        if (height <= 0):
+            break
+
+    return node
+
+
+class LxRadixTree(gdb.Function):
+    """ Lookup and return a node from a RadixTree.
+
+$lx_radix_tree_lookup(root_node [, index]): Return the node at the given index.
+If index is omitted, the root node is dereferenced and returned."""
+
+    def __init__(self):
+        super(LxRadixTree, self).__init__("lx_radix_tree_lookup")
+
+    def invoke(self, root, index=0):
+        result = lookup(root, index)
+        if result is None:
+            raise gdb.GdbError("No entry in tree at index {}".format(index))
+
+        return result
+
+LxRadixTree()
diff --git a/scripts/gdb/linux/tasks.py b/scripts/gdb/linux/tasks.py
index 862a4ae..1bf949c 100644
--- a/scripts/gdb/linux/tasks.py
+++ b/scripts/gdb/linux/tasks.py
@@ -114,3 +114,22 @@
 
 
 LxThreadInfoFunc()
+
+
+class LxThreadInfoByPidFunc (gdb.Function):
+    """Calculate Linux thread_info from task variable found by pid
+
+$lx_thread_info_by_pid(PID): Given PID, return the corresponding thread_info
+variable."""
+
+    def __init__(self):
+        super(LxThreadInfoByPidFunc, self).__init__("lx_thread_info_by_pid")
+
+    def invoke(self, pid):
+        task = get_task_by_pid(pid)
+        if task:
+            return get_thread_info(task.dereference())
+        else:
+            raise gdb.GdbError("No task of PID " + str(pid))
+
+LxThreadInfoByPidFunc()
diff --git a/scripts/gdb/linux/utils.py b/scripts/gdb/linux/utils.py
index 0893b32..5080587 100644
--- a/scripts/gdb/linux/utils.py
+++ b/scripts/gdb/linux/utils.py
@@ -87,11 +87,24 @@
     return target_endianness
 
 
+def read_memoryview(inf, start, length):
+    return memoryview(inf.read_memory(start, length))
+
+
 def read_u16(buffer):
-    if get_target_endianness() == LITTLE_ENDIAN:
-        return ord(buffer[0]) + (ord(buffer[1]) << 8)
+    value = [0, 0]
+
+    if type(buffer[0]) is str:
+        value[0] = ord(buffer[0])
+        value[1] = ord(buffer[1])
     else:
-        return ord(buffer[1]) + (ord(buffer[0]) << 8)
+        value[0] = buffer[0]
+        value[1] = buffer[1]
+
+    if get_target_endianness() == LITTLE_ENDIAN:
+        return value[0] + (value[1] << 8)
+    else:
+        return value[1] + (value[0] << 8)
 
 
 def read_u32(buffer):
@@ -154,3 +167,18 @@
         if gdbserver_type is not None and hasattr(gdb, 'events'):
             gdb.events.exited.connect(exit_handler)
     return gdbserver_type
+
+
+def gdb_eval_or_none(expresssion):
+    try:
+        return gdb.parse_and_eval(expresssion)
+    except:
+        return None
+
+
+def dentry_name(d):
+    parent = d['d_parent']
+    if parent == d or parent == 0:
+        return ""
+    p = dentry_name(d['d_parent']) + "/"
+    return p + d['d_iname'].string()
diff --git a/scripts/gdb/vmlinux-gdb.py b/scripts/gdb/vmlinux-gdb.py
index d5943ec..3a80ad6 100644
--- a/scripts/gdb/vmlinux-gdb.py
+++ b/scripts/gdb/vmlinux-gdb.py
@@ -30,3 +30,5 @@
     import linux.cpus
     import linux.lists
     import linux.proc
+    import linux.constants
+    import linux.radixtree
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
index dafaf96..06121ce 100644
--- a/scripts/genksyms/genksyms.c
+++ b/scripts/genksyms/genksyms.c
@@ -873,5 +873,8 @@
 			(double)nsyms / (double)HASH_BUCKETS);
 	}
 
+	if (dumpfile)
+		fclose(dumpfile);
+
 	return errors != 0;
 }
diff --git a/scripts/headers_check.pl b/scripts/headers_check.pl
index 62320f9..8b2da05 100755
--- a/scripts/headers_check.pl
+++ b/scripts/headers_check.pl
@@ -69,6 +69,10 @@
 	if ($line =~ m/^void seqbuf_dump\(void\);/) {
 		return;
 	}
+	# drm headers are being C++ friendly
+	if ($line =~ m/^extern "C"/) {
+		return;
+	}
 	if ($line =~ m/^(\s*extern|unsigned|char|short|int|long|void)\b/) {
 		printf STDERR "$filename:$lineno: " .
 			      "userspace cannot reference function or " .
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index dd243d2..297b079 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -375,7 +375,9 @@
 				continue;
 		} else {
 			if (line[0] != '\r' && line[0] != '\n')
-				conf_warning("unexpected data");
+				conf_warning("unexpected data: %.*s",
+					     (int)strcspn(line, "\r\n"), line);
+
 			continue;
 		}
 setsym:
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 25cf0c2..2432298 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -209,12 +209,26 @@
 static void sym_calc_visibility(struct symbol *sym)
 {
 	struct property *prop;
+	struct symbol *choice_sym = NULL;
 	tristate tri;
 
 	/* any prompt visible? */
 	tri = no;
+
+	if (sym_is_choice_value(sym))
+		choice_sym = prop_get_symbol(sym_get_choice_prop(sym));
+
 	for_all_prompts(sym, prop) {
 		prop->visible.tri = expr_calc_value(prop->visible.expr);
+		/*
+		 * Tristate choice_values with visibility 'mod' are
+		 * not visible if the corresponding choice's value is
+		 * 'yes'.
+		 */
+		if (choice_sym && sym->type == S_TRISTATE &&
+		    prop->visible.tri == mod && choice_sym->curr.tri == yes)
+			prop->visible.tri = no;
+
 		tri = EXPR_OR(tri, prop->visible.tri);
 	}
 	if (tri == mod && (sym->type != S_TRISTATE || modules_val == no))
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index c2c7389..71b4a8a 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -52,7 +52,7 @@
 	$(call cmd,src_tar,$(KERNELPATH),kernel.spec)
 	$(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
 	mv -f $(objtree)/.tmp_version $(objtree)/.version
-	rpmbuild --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz
+	rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz
 	rm $(KERNELPATH).tar.gz kernel.spec
 
 # binrpm-pkg
@@ -63,7 +63,7 @@
 	$(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
 	mv -f $(objtree)/.tmp_version $(objtree)/.version
 
-	rpmbuild --define "_builddir $(objtree)" --target \
+	rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \
 		$(UTS_MACHINE) -bb $(objtree)/binkernel.spec
 	rm binkernel.spec
 
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 6c3b038..86e56fe 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -322,7 +322,10 @@
 
 # Build kernel header package
 (cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles"
-(cd $srctree; find arch/$SRCARCH/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles"
+if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
+	(cd $srctree; find tools/objtool -type f -executable) >> "$objtree/debian/hdrsrcfiles"
+fi
+(cd $srctree; find arch/*/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles"
 (cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles"
 (cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles"
 (cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles"
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index b6de63c..57673ba 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -143,6 +143,11 @@
 echo "new-kernel-pkg --remove $KERNELRELEASE --rminitrd --initrdfile=/boot/initramfs-$KERNELRELEASE.img"
 echo "fi"
 echo ""
+echo "%postun"
+echo "if [ -x /sbin/update-bootloader ]; then"
+echo "/sbin/update-bootloader --remove $KERNELRELEASE"
+echo "fi"
+echo ""
 echo "%files"
 echo '%defattr (-, root, root)'
 echo "/lib/modules/$KERNELRELEASE"
diff --git a/security/keys/compat.c b/security/keys/compat.c
index c8783b3..36c80bf 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -134,7 +134,7 @@
 
 	case KEYCTL_DH_COMPUTE:
 		return keyctl_dh_compute(compat_ptr(arg2), compat_ptr(arg3),
-					 arg4);
+					 arg4, compat_ptr(arg5));
 
 	default:
 		return -EOPNOTSUPP;
diff --git a/security/keys/dh.c b/security/keys/dh.c
index 880505a..531ed2e 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -78,7 +78,8 @@
 }
 
 long keyctl_dh_compute(struct keyctl_dh_params __user *params,
-		       char __user *buffer, size_t buflen)
+		       char __user *buffer, size_t buflen,
+		       void __user *reserved)
 {
 	long ret;
 	MPI base, private, prime, result;
@@ -97,6 +98,11 @@
 		goto out;
 	}
 
+	if (reserved) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	keylen = mpi_from_key(pcopy.prime, buflen, &prime);
 	if (keylen < 0 || !prime) {
 		/* buflen == 0 may be used to query the required buffer size,
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 8ec7a52..a705a7d 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -260,10 +260,11 @@
 
 #ifdef CONFIG_KEY_DH_OPERATIONS
 extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *,
-			      size_t);
+			      size_t, void __user *);
 #else
 static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params,
-				     char __user *buffer, size_t buflen)
+				     char __user *buffer, size_t buflen,
+				     void __user *reserved)
 {
 	return -EOPNOTSUPP;
 }
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 3b135a0..d580ad0 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1688,8 +1688,8 @@
 
 	case KEYCTL_DH_COMPUTE:
 		return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2,
-					 (char __user *) arg3,
-					 (size_t) arg4);
+					 (char __user *) arg3, (size_t) arg4,
+					 (void __user *) arg5);
 
 	default:
 		return -EOPNOTSUPP;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index ff2b8c3..6777295 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -3514,7 +3514,7 @@
 			 */
 			if (isp->smk_flags & SMK_INODE_CHANGED) {
 				isp->smk_flags &= ~SMK_INODE_CHANGED;
-				rc = inode->i_op->setxattr(dp,
+				rc = inode->i_op->setxattr(dp, inode,
 					XATTR_NAME_SMACKTRANSMUTE,
 					TRANS_TRUE, TRANS_TRUE_SIZE,
 					0);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 9b756b1..0309f21 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -19,6 +19,9 @@
 #include <linux/ratelimit.h>
 #include <linux/workqueue.h>
 #include <linux/string_helpers.h>
+#include <linux/task_work.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
 
 #define YAMA_SCOPE_DISABLED	0
 #define YAMA_SCOPE_RELATIONAL	1
@@ -42,20 +45,71 @@
 static void yama_relation_cleanup(struct work_struct *work);
 static DECLARE_WORK(yama_relation_work, yama_relation_cleanup);
 
-static void report_access(const char *access, struct task_struct *target,
-			  struct task_struct *agent)
+struct access_report_info {
+	struct callback_head work;
+	const char *access;
+	struct task_struct *target;
+	struct task_struct *agent;
+};
+
+static void __report_access(struct callback_head *work)
 {
+	struct access_report_info *info =
+		container_of(work, struct access_report_info, work);
 	char *target_cmd, *agent_cmd;
 
-	target_cmd = kstrdup_quotable_cmdline(target, GFP_ATOMIC);
-	agent_cmd = kstrdup_quotable_cmdline(agent, GFP_ATOMIC);
+	target_cmd = kstrdup_quotable_cmdline(info->target, GFP_KERNEL);
+	agent_cmd = kstrdup_quotable_cmdline(info->agent, GFP_KERNEL);
 
 	pr_notice_ratelimited(
 		"ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n",
-		access, target_cmd, target->pid, agent_cmd, agent->pid);
+		info->access, target_cmd, info->target->pid, agent_cmd,
+		info->agent->pid);
 
 	kfree(agent_cmd);
 	kfree(target_cmd);
+
+	put_task_struct(info->agent);
+	put_task_struct(info->target);
+	kfree(info);
+}
+
+/* defers execution because cmdline access can sleep */
+static void report_access(const char *access, struct task_struct *target,
+				struct task_struct *agent)
+{
+	struct access_report_info *info;
+	char agent_comm[sizeof(agent->comm)];
+
+	assert_spin_locked(&target->alloc_lock); /* for target->comm */
+
+	if (current->flags & PF_KTHREAD) {
+		/* I don't think kthreads call task_work_run() before exiting.
+		 * Imagine angry ranting about procfs here.
+		 */
+		pr_notice_ratelimited(
+		    "ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n",
+		    access, target->comm, target->pid,
+		    get_task_comm(agent_comm, agent), agent->pid);
+		return;
+	}
+
+	info = kmalloc(sizeof(*info), GFP_ATOMIC);
+	if (!info)
+		return;
+	init_task_work(&info->work, __report_access);
+	get_task_struct(target);
+	get_task_struct(agent);
+	info->access = access;
+	info->target = target;
+	info->agent = agent;
+	if (task_work_add(current, &info->work, true) == 0)
+		return; /* success */
+
+	WARN(1, "report_access called from exiting task");
+	put_task_struct(target);
+	put_task_struct(agent);
+	kfree(info);
 }
 
 /**
@@ -351,8 +405,11 @@
 		break;
 	}
 
-	if (rc)
+	if (rc) {
+		task_lock(current);
 		report_access("traceme", current, parent);
+		task_unlock(current);
+	}
 
 	return rc;
 }
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 9a0d144..94089fc 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -365,8 +365,11 @@
 
 #define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
 #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
+#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
+			IS_KBL(pci) || IS_KBL_LP(pci)
 
 static char *driver_short_names[] = {
 	[AZX_DRIVER_ICH] = "HDA Intel",
@@ -2181,6 +2184,12 @@
 	/* Sunrise Point-LP */
 	{ PCI_DEVICE(0x8086, 0x9d70),
 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+	/* Kabylake */
+	{ PCI_DEVICE(0x8086, 0xa171),
+	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+	/* Kabylake-LP */
+	{ PCI_DEVICE(0x8086, 0x9d71),
+	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
 	/* Broxton-P(Apollolake) */
 	{ PCI_DEVICE(0x8086, 0x5a98),
 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 002f153..0fe18ed 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -335,6 +335,7 @@
 	case 0x10ec0283:
 	case 0x10ec0286:
 	case 0x10ec0288:
+	case 0x10ec0295:
 	case 0x10ec0298:
 		alc_update_coef_idx(codec, 0x10, 1<<9, 0);
 		break;
@@ -345,6 +346,9 @@
 	case 0x10ec0234:
 	case 0x10ec0274:
 	case 0x10ec0294:
+	case 0x10ec0700:
+	case 0x10ec0701:
+	case 0x10ec0703:
 		alc_update_coef_idx(codec, 0x10, 1<<15, 0);
 		break;
 	case 0x10ec0662:
@@ -907,6 +911,7 @@
 	{ 0x10ec0298, 0x1028, 0, "ALC3266" },
 	{ 0x10ec0256, 0x1028, 0, "ALC3246" },
 	{ 0x10ec0225, 0x1028, 0, "ALC3253" },
+	{ 0x10ec0295, 0x1028, 0, "ALC3254" },
 	{ 0x10ec0670, 0x1025, 0, "ALC669X" },
 	{ 0x10ec0676, 0x1025, 0, "ALC679X" },
 	{ 0x10ec0282, 0x1043, 0, "ALC3229" },
@@ -2653,6 +2658,7 @@
 	ALC269_TYPE_ALC256,
 	ALC269_TYPE_ALC225,
 	ALC269_TYPE_ALC294,
+	ALC269_TYPE_ALC700,
 };
 
 /*
@@ -2684,6 +2690,7 @@
 	case ALC269_TYPE_ALC256:
 	case ALC269_TYPE_ALC225:
 	case ALC269_TYPE_ALC294:
+	case ALC269_TYPE_ALC700:
 		ssids = alc269_ssids;
 		break;
 	default:
@@ -3616,13 +3623,20 @@
 static void alc_headset_mode_unplugged(struct hda_codec *codec)
 {
 	static struct coef_fw coef0255[] = {
-		WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
 		WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
 		UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
 		WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
 		WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
 		{}
 	};
+	static struct coef_fw coef0255_1[] = {
+		WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
+		{}
+	};
+	static struct coef_fw coef0256[] = {
+		WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
+		{}
+	};
 	static struct coef_fw coef0233[] = {
 		WRITE_COEF(0x1b, 0x0c0b),
 		WRITE_COEF(0x45, 0xc429),
@@ -3675,7 +3689,11 @@
 
 	switch (codec->core.vendor_id) {
 	case 0x10ec0255:
+		alc_process_coef_fw(codec, coef0255_1);
+		alc_process_coef_fw(codec, coef0255);
+		break;
 	case 0x10ec0256:
+		alc_process_coef_fw(codec, coef0256);
 		alc_process_coef_fw(codec, coef0255);
 		break;
 	case 0x10ec0233:
@@ -3697,6 +3715,7 @@
 		alc_process_coef_fw(codec, coef0668);
 		break;
 	case 0x10ec0225:
+	case 0x10ec0295:
 		alc_process_coef_fw(codec, coef0225);
 		break;
 	}
@@ -3797,6 +3816,7 @@
 		snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
 		break;
 	case 0x10ec0225:
+	case 0x10ec0295:
 		alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
 		snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
 		alc_process_coef_fw(codec, coef0225);
@@ -3854,6 +3874,7 @@
 
 	switch (codec->core.vendor_id) {
 	case 0x10ec0225:
+	case 0x10ec0295:
 		alc_process_coef_fw(codec, coef0225);
 		break;
 	case 0x10ec0255:
@@ -3891,6 +3912,12 @@
 		WRITE_COEFEX(0x57, 0x03, 0x8ea6),
 		{}
 	};
+	static struct coef_fw coef0256[] = {
+		WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
+		WRITE_COEF(0x1b, 0x0c6b),
+		WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+		{}
+	};
 	static struct coef_fw coef0233[] = {
 		WRITE_COEF(0x45, 0xd429),
 		WRITE_COEF(0x1b, 0x0c2b),
@@ -3931,9 +3958,11 @@
 
 	switch (codec->core.vendor_id) {
 	case 0x10ec0255:
-	case 0x10ec0256:
 		alc_process_coef_fw(codec, coef0255);
 		break;
+	case 0x10ec0256:
+		alc_process_coef_fw(codec, coef0256);
+		break;
 	case 0x10ec0233:
 	case 0x10ec0283:
 		alc_process_coef_fw(codec, coef0233);
@@ -3957,6 +3986,7 @@
 		alc_process_coef_fw(codec, coef0688);
 		break;
 	case 0x10ec0225:
+	case 0x10ec0295:
 		alc_process_coef_fw(codec, coef0225);
 		break;
 	}
@@ -3972,6 +4002,12 @@
 		WRITE_COEFEX(0x57, 0x03, 0x8ea6),
 		{}
 	};
+	static struct coef_fw coef0256[] = {
+		WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
+		WRITE_COEF(0x1b, 0x0c6b),
+		WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+		{}
+	};
 	static struct coef_fw coef0233[] = {
 		WRITE_COEF(0x45, 0xe429),
 		WRITE_COEF(0x1b, 0x0c2b),
@@ -4012,9 +4048,11 @@
 
 	switch (codec->core.vendor_id) {
 	case 0x10ec0255:
-	case 0x10ec0256:
 		alc_process_coef_fw(codec, coef0255);
 		break;
+	case 0x10ec0256:
+		alc_process_coef_fw(codec, coef0256);
+		break;
 	case 0x10ec0233:
 	case 0x10ec0283:
 		alc_process_coef_fw(codec, coef0233);
@@ -4038,6 +4076,7 @@
 		alc_process_coef_fw(codec, coef0688);
 		break;
 	case 0x10ec0225:
+	case 0x10ec0295:
 		alc_process_coef_fw(codec, coef0225);
 		break;
 	}
@@ -4121,6 +4160,7 @@
 		is_ctia = (val & 0x1c02) == 0x1c02;
 		break;
 	case 0x10ec0225:
+	case 0x10ec0295:
 		alc_process_coef_fw(codec, coef0225);
 		msleep(800);
 		val = alc_read_coef_idx(codec, 0x46);
@@ -4258,7 +4298,7 @@
 static void alc255_set_default_jack_type(struct hda_codec *codec)
 {
 	/* Set to iphone type */
-	static struct coef_fw fw[] = {
+	static struct coef_fw alc255fw[] = {
 		WRITE_COEF(0x1b, 0x880b),
 		WRITE_COEF(0x45, 0xd089),
 		WRITE_COEF(0x1b, 0x080b),
@@ -4266,7 +4306,22 @@
 		WRITE_COEF(0x1b, 0x0c0b),
 		{}
 	};
-	alc_process_coef_fw(codec, fw);
+	static struct coef_fw alc256fw[] = {
+		WRITE_COEF(0x1b, 0x884b),
+		WRITE_COEF(0x45, 0xd089),
+		WRITE_COEF(0x1b, 0x084b),
+		WRITE_COEF(0x46, 0x0004),
+		WRITE_COEF(0x1b, 0x0c4b),
+		{}
+	};
+	switch (codec->core.vendor_id) {
+	case 0x10ec0255:
+		alc_process_coef_fw(codec, alc255fw);
+		break;
+	case 0x10ec0256:
+		alc_process_coef_fw(codec, alc256fw);
+		break;
+	}
 	msleep(30);
 }
 
@@ -5466,8 +5521,9 @@
 	SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
 	SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
 	SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
-	SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+	SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
 	SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
+	SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5578,6 +5634,7 @@
 	SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
 	SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
 	SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+	SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
 	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
 	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
@@ -5711,6 +5768,9 @@
 		{0x14, 0x90170110},
 		{0x21, 0x02211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+		{0x14, 0x90170130},
+		{0x21, 0x02211040}),
+	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
 		{0x12, 0x90a60140},
 		{0x14, 0x90170110},
 		{0x21, 0x02211020}),
@@ -5763,6 +5823,10 @@
 		{0x12, 0x90a60180},
 		{0x14, 0x90170130},
 		{0x21, 0x02211040}),
+	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+		{0x12, 0x90a60180},
+		{0x14, 0x90170120},
+		{0x21, 0x02211030}),
 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170120},
@@ -6033,6 +6097,7 @@
 		alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
 		break;
 	case 0x10ec0225:
+	case 0x10ec0295:
 		spec->codec_variant = ALC269_TYPE_ALC225;
 		break;
 	case 0x10ec0234:
@@ -6040,6 +6105,14 @@
 	case 0x10ec0294:
 		spec->codec_variant = ALC269_TYPE_ALC294;
 		break;
+	case 0x10ec0700:
+	case 0x10ec0701:
+	case 0x10ec0703:
+		spec->codec_variant = ALC269_TYPE_ALC700;
+		spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
+		alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
+		break;
+
 	}
 
 	if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
@@ -6979,6 +7052,7 @@
 	HDA_CODEC_ENTRY(0x10ec0292, "ALC292", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0293, "ALC293", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0294, "ALC294", patch_alc269),
+	HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
 	HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
 	HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
@@ -6994,6 +7068,9 @@
 	HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662),
 	HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662),
 	HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680),
+	HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
+	HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
+	HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882),
 	HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
 	HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index b3afae9..4d82a58 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -43,6 +43,7 @@
 	select SND_SOC_AK5386
 	select SND_SOC_ALC5623 if I2C
 	select SND_SOC_ALC5632 if I2C
+	select SND_SOC_BT_SCO
 	select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
 	select SND_SOC_CS35L32 if I2C
 	select SND_SOC_CS42L51_I2C if I2C
@@ -64,7 +65,6 @@
 	select SND_SOC_DA732X if I2C
 	select SND_SOC_DA9055 if I2C
 	select SND_SOC_DMIC
-	select SND_SOC_BT_SCO
 	select SND_SOC_ES8328_SPI if SPI_MASTER
 	select SND_SOC_ES8328_I2C if I2C
 	select SND_SOC_GTM601
@@ -79,6 +79,7 @@
 	select SND_SOC_MAX98090 if I2C
 	select SND_SOC_MAX98095 if I2C
 	select SND_SOC_MAX98357A if GPIOLIB
+	select SND_SOC_MAX98371 if I2C
 	select SND_SOC_MAX9867 if I2C
 	select SND_SOC_MAX98925 if I2C
 	select SND_SOC_MAX98926 if I2C
@@ -126,12 +127,14 @@
 	select SND_SOC_TAS2552 if I2C
 	select SND_SOC_TAS5086 if I2C
 	select SND_SOC_TAS571X if I2C
+	select SND_SOC_TAS5720 if I2C
 	select SND_SOC_TFA9879 if I2C
 	select SND_SOC_TLV320AIC23_I2C if I2C
 	select SND_SOC_TLV320AIC23_SPI if SPI_MASTER
 	select SND_SOC_TLV320AIC26 if SPI_MASTER
 	select SND_SOC_TLV320AIC31XX if I2C
-	select SND_SOC_TLV320AIC32X4 if I2C
+	select SND_SOC_TLV320AIC32X4_I2C if I2C
+	select SND_SOC_TLV320AIC32X4_SPI if SPI_MASTER
 	select SND_SOC_TLV320AIC3X if I2C
 	select SND_SOC_TPA6130A2 if I2C
 	select SND_SOC_TLV320DAC33 if I2C
@@ -367,6 +370,9 @@
 config SND_SOC_ALC5632
 	tristate
 
+config SND_SOC_BT_SCO
+	tristate
+
 config SND_SOC_CQ0093VC
 	tristate
 
@@ -473,9 +479,6 @@
 config SND_SOC_DA9055
 	tristate
 
-config SND_SOC_BT_SCO
-	tristate
-
 config SND_SOC_DMIC
 	tristate
 
@@ -529,6 +532,9 @@
 config SND_SOC_MAX98357A
        tristate
 
+config SND_SOC_MAX98371
+       tristate
+
 config SND_SOC_MAX9867
 	tristate
 
@@ -748,9 +754,16 @@
 	depends on I2C
 
 config SND_SOC_TAS571X
-	tristate "Texas Instruments TAS5711/TAS5717/TAS5719 power amplifiers"
+	tristate "Texas Instruments TAS5711/TAS5717/TAS5719/TAS5721 power amplifiers"
 	depends on I2C
 
+config SND_SOC_TAS5720
+	tristate "Texas Instruments TAS5720 Mono Audio amplifier"
+	depends on I2C
+	help
+	  Enable support for Texas Instruments TAS5720L/M high-efficiency mono
+	  Class-D audio power amplifiers.
+
 config SND_SOC_TFA9879
 	tristate "NXP Semiconductors TFA9879 amplifier"
 	depends on I2C
@@ -780,6 +793,16 @@
 config SND_SOC_TLV320AIC32X4
 	tristate
 
+config SND_SOC_TLV320AIC32X4_I2C
+	tristate
+	depends on I2C
+	select SND_SOC_TLV320AIC32X4
+
+config SND_SOC_TLV320AIC32X4_SPI
+	tristate
+	depends on SPI_MASTER
+	select SND_SOC_TLV320AIC32X4
+
 config SND_SOC_TLV320AIC3X
 	tristate "Texas Instruments TLV320AIC3x CODECs"
 	depends on I2C
@@ -920,7 +943,8 @@
 	tristate
 
 config SND_SOC_WM8960
-	tristate
+	tristate "Wolfson Microelectronics WM8960 CODEC"
+	depends on I2C
 
 config SND_SOC_WM8961
 	tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index b7b9941..0f548fd3 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -32,6 +32,7 @@
 snd-soc-ak4671-objs := ak4671.o
 snd-soc-ak5386-objs := ak5386.o
 snd-soc-arizona-objs := arizona.o
+snd-soc-bt-sco-objs := bt-sco.o
 snd-soc-cq93vc-objs := cq93vc.o
 snd-soc-cs35l32-objs := cs35l32.o
 snd-soc-cs42l51-objs := cs42l51.o
@@ -55,7 +56,6 @@
 snd-soc-da7219-objs := da7219.o da7219-aad.o
 snd-soc-da732x-objs := da732x.o
 snd-soc-da9055-objs := da9055.o
-snd-soc-bt-sco-objs := bt-sco.o
 snd-soc-dmic-objs := dmic.o
 snd-soc-es8328-objs := es8328.o
 snd-soc-es8328-i2c-objs := es8328-i2c.o
@@ -74,6 +74,7 @@
 snd-soc-max98090-objs := max98090.o
 snd-soc-max98095-objs := max98095.o
 snd-soc-max98357a-objs := max98357a.o
+snd-soc-max98371-objs := max98371.o
 snd-soc-max9867-objs := max9867.o
 snd-soc-max98925-objs := max98925.o
 snd-soc-max98926-objs := max98926.o
@@ -131,6 +132,7 @@
 snd-soc-sti-sas-objs := sti-sas.o
 snd-soc-tas5086-objs := tas5086.o
 snd-soc-tas571x-objs := tas571x.o
+snd-soc-tas5720-objs := tas5720.o
 snd-soc-tfa9879-objs := tfa9879.o
 snd-soc-tlv320aic23-objs := tlv320aic23.o
 snd-soc-tlv320aic23-i2c-objs := tlv320aic23-i2c.o
@@ -138,6 +140,8 @@
 snd-soc-tlv320aic26-objs := tlv320aic26.o
 snd-soc-tlv320aic31xx-objs := tlv320aic31xx.o
 snd-soc-tlv320aic32x4-objs := tlv320aic32x4.o
+snd-soc-tlv320aic32x4-i2c-objs := tlv320aic32x4-i2c.o
+snd-soc-tlv320aic32x4-spi-objs := tlv320aic32x4-spi.o
 snd-soc-tlv320aic3x-objs := tlv320aic3x.o
 snd-soc-tlv320dac33-objs := tlv320dac33.o
 snd-soc-ts3a227e-objs := ts3a227e.o
@@ -243,6 +247,7 @@
 obj-$(CONFIG_SND_SOC_ALC5623)    += snd-soc-alc5623.o
 obj-$(CONFIG_SND_SOC_ALC5632)	+= snd-soc-alc5632.o
 obj-$(CONFIG_SND_SOC_ARIZONA)	+= snd-soc-arizona.o
+obj-$(CONFIG_SND_SOC_BT_SCO)	+= snd-soc-bt-sco.o
 obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o
 obj-$(CONFIG_SND_SOC_CS35L32)	+= snd-soc-cs35l32.o
 obj-$(CONFIG_SND_SOC_CS42L51)	+= snd-soc-cs42l51.o
@@ -266,7 +271,6 @@
 obj-$(CONFIG_SND_SOC_DA7219)	+= snd-soc-da7219.o
 obj-$(CONFIG_SND_SOC_DA732X)	+= snd-soc-da732x.o
 obj-$(CONFIG_SND_SOC_DA9055)	+= snd-soc-da9055.o
-obj-$(CONFIG_SND_SOC_BT_SCO)	+= snd-soc-bt-sco.o
 obj-$(CONFIG_SND_SOC_DMIC)	+= snd-soc-dmic.o
 obj-$(CONFIG_SND_SOC_ES8328)	+= snd-soc-es8328.o
 obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o
@@ -339,6 +343,7 @@
 obj-$(CONFIG_SND_SOC_TAS2552)	+= snd-soc-tas2552.o
 obj-$(CONFIG_SND_SOC_TAS5086)	+= snd-soc-tas5086.o
 obj-$(CONFIG_SND_SOC_TAS571X)	+= snd-soc-tas571x.o
+obj-$(CONFIG_SND_SOC_TAS5720)	+= snd-soc-tas5720.o
 obj-$(CONFIG_SND_SOC_TFA9879)	+= snd-soc-tfa9879.o
 obj-$(CONFIG_SND_SOC_TLV320AIC23)	+= snd-soc-tlv320aic23.o
 obj-$(CONFIG_SND_SOC_TLV320AIC23_I2C)	+= snd-soc-tlv320aic23-i2c.o
@@ -346,6 +351,8 @@
 obj-$(CONFIG_SND_SOC_TLV320AIC26)	+= snd-soc-tlv320aic26.o
 obj-$(CONFIG_SND_SOC_TLV320AIC31XX)     += snd-soc-tlv320aic31xx.o
 obj-$(CONFIG_SND_SOC_TLV320AIC32X4)     += snd-soc-tlv320aic32x4.o
+obj-$(CONFIG_SND_SOC_TLV320AIC32X4_I2C)	+= snd-soc-tlv320aic32x4-i2c.o
+obj-$(CONFIG_SND_SOC_TLV320AIC32X4_SPI)	+= snd-soc-tlv320aic32x4-spi.o
 obj-$(CONFIG_SND_SOC_TLV320AIC3X)	+= snd-soc-tlv320aic3x.o
 obj-$(CONFIG_SND_SOC_TLV320DAC33)	+= snd-soc-tlv320dac33.o
 obj-$(CONFIG_SND_SOC_TS3A227E)	+= snd-soc-ts3a227e.o
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 1ee8506..4d8b9e4 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -560,6 +560,7 @@
 	.max_register		= FIL1_3,
 	.reg_defaults		= ak4642_reg,
 	.num_reg_defaults	= NUM_AK4642_REG_DEFAULTS,
+	.cache_type		= REGCACHE_RBTREE,
 };
 
 static const struct regmap_config ak4643_regmap = {
@@ -568,6 +569,7 @@
 	.max_register		= SPK_MS,
 	.reg_defaults		= ak4643_reg,
 	.num_reg_defaults	= ARRAY_SIZE(ak4643_reg),
+	.cache_type		= REGCACHE_RBTREE,
 };
 
 static const struct regmap_config ak4648_regmap = {
@@ -576,6 +578,7 @@
 	.max_register		= EQ_FBEQE,
 	.reg_defaults		= ak4648_reg,
 	.num_reg_defaults	= ARRAY_SIZE(ak4648_reg),
+	.cache_type		= REGCACHE_RBTREE,
 };
 
 static const struct ak4642_drvdata ak4642_drvdata = {
diff --git a/sound/soc/codecs/max98371.c b/sound/soc/codecs/max98371.c
new file mode 100644
index 0000000..cf0a39b
--- /dev/null
+++ b/sound/soc/codecs/max98371.c
@@ -0,0 +1,441 @@
+/*
+ * max98371.c -- ALSA SoC Stereo MAX98371 driver
+ *
+ * Copyright 2015-16 Maxim Integrated Products
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include "max98371.h"
+
+static const char *const monomix_text[] = {
+	"Left", "Right", "LeftRightDiv2",
+};
+
+static const char *const hpf_cutoff_txt[] = {
+	"Disable", "DC Block", "50Hz",
+	"100Hz", "200Hz", "400Hz", "800Hz",
+};
+
+static SOC_ENUM_SINGLE_DECL(max98371_monomix, MAX98371_MONOMIX_CFG, 0,
+		monomix_text);
+
+static SOC_ENUM_SINGLE_DECL(max98371_hpf_cutoff, MAX98371_HPF, 0,
+		hpf_cutoff_txt);
+
+static const DECLARE_TLV_DB_RANGE(max98371_dht_min_gain,
+	0, 1, TLV_DB_SCALE_ITEM(537, 66, 0),
+	2, 3, TLV_DB_SCALE_ITEM(677, 82, 0),
+	4, 5, TLV_DB_SCALE_ITEM(852, 104, 0),
+	6, 7, TLV_DB_SCALE_ITEM(1072, 131, 0),
+	8, 9, TLV_DB_SCALE_ITEM(1350, 165, 0),
+	10, 11, TLV_DB_SCALE_ITEM(1699, 101, 0),
+);
+
+static const DECLARE_TLV_DB_RANGE(max98371_dht_max_gain,
+	0, 1, TLV_DB_SCALE_ITEM(537, 66, 0),
+	2, 3, TLV_DB_SCALE_ITEM(677, 82, 0),
+	4, 5, TLV_DB_SCALE_ITEM(852, 104, 0),
+	6, 7, TLV_DB_SCALE_ITEM(1072, 131, 0),
+	8, 9, TLV_DB_SCALE_ITEM(1350, 165, 0),
+	10, 11, TLV_DB_SCALE_ITEM(1699, 208, 0),
+);
+
+static const DECLARE_TLV_DB_RANGE(max98371_dht_rot_gain,
+	0, 1, TLV_DB_SCALE_ITEM(-50, -50, 0),
+	2, 6, TLV_DB_SCALE_ITEM(-100, -100, 0),
+	7, 8, TLV_DB_SCALE_ITEM(-800, -200, 0),
+	9, 11, TLV_DB_SCALE_ITEM(-1200, -300, 0),
+	12, 13, TLV_DB_SCALE_ITEM(-2000, -200, 0),
+	14, 15, TLV_DB_SCALE_ITEM(-2500, -500, 0),
+);
+
+static const struct reg_default max98371_reg[] = {
+	{ 0x01, 0x00 },
+	{ 0x02, 0x00 },
+	{ 0x03, 0x00 },
+	{ 0x04, 0x00 },
+	{ 0x05, 0x00 },
+	{ 0x06, 0x00 },
+	{ 0x07, 0x00 },
+	{ 0x08, 0x00 },
+	{ 0x09, 0x00 },
+	{ 0x0A, 0x00 },
+	{ 0x10, 0x06 },
+	{ 0x11, 0x08 },
+	{ 0x14, 0x80 },
+	{ 0x15, 0x00 },
+	{ 0x16, 0x00 },
+	{ 0x18, 0x00 },
+	{ 0x19, 0x00 },
+	{ 0x1C, 0x00 },
+	{ 0x1D, 0x00 },
+	{ 0x1E, 0x00 },
+	{ 0x1F, 0x00 },
+	{ 0x20, 0x00 },
+	{ 0x21, 0x00 },
+	{ 0x22, 0x00 },
+	{ 0x23, 0x00 },
+	{ 0x24, 0x00 },
+	{ 0x25, 0x00 },
+	{ 0x26, 0x00 },
+	{ 0x27, 0x00 },
+	{ 0x28, 0x00 },
+	{ 0x29, 0x00 },
+	{ 0x2A, 0x00 },
+	{ 0x2B, 0x00 },
+	{ 0x2C, 0x00 },
+	{ 0x2D, 0x00 },
+	{ 0x2E, 0x0B },
+	{ 0x31, 0x00 },
+	{ 0x32, 0x18 },
+	{ 0x33, 0x00 },
+	{ 0x34, 0x00 },
+	{ 0x36, 0x00 },
+	{ 0x37, 0x00 },
+	{ 0x38, 0x00 },
+	{ 0x39, 0x00 },
+	{ 0x3A, 0x00 },
+	{ 0x3B, 0x00 },
+	{ 0x3C, 0x00 },
+	{ 0x3D, 0x00 },
+	{ 0x3E, 0x00 },
+	{ 0x3F, 0x00 },
+	{ 0x40, 0x00 },
+	{ 0x41, 0x00 },
+	{ 0x42, 0x00 },
+	{ 0x43, 0x00 },
+	{ 0x4A, 0x00 },
+	{ 0x4B, 0x00 },
+	{ 0x4C, 0x00 },
+	{ 0x4D, 0x00 },
+	{ 0x4E, 0x00 },
+	{ 0x50, 0x00 },
+	{ 0x51, 0x00 },
+	{ 0x55, 0x00 },
+	{ 0x58, 0x00 },
+	{ 0x59, 0x00 },
+	{ 0x5C, 0x00 },
+	{ 0xFF, 0x43 },
+};
+
+static bool max98371_volatile_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case MAX98371_IRQ_CLEAR1:
+	case MAX98371_IRQ_CLEAR2:
+	case MAX98371_IRQ_CLEAR3:
+	case MAX98371_VERSION:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool max98371_readable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case MAX98371_SOFT_RESET:
+		return false;
+	default:
+		return true;
+	}
+};
+
+static const DECLARE_TLV_DB_RANGE(max98371_gain_tlv,
+	0, 7, TLV_DB_SCALE_ITEM(0, 50, 0),
+	8, 10, TLV_DB_SCALE_ITEM(400, 100, 0)
+);
+
+static const DECLARE_TLV_DB_RANGE(max98371_noload_gain_tlv,
+	0, 11, TLV_DB_SCALE_ITEM(950, 100, 0),
+);
+
+static const DECLARE_TLV_DB_SCALE(digital_tlv, -6300, 50, 1);
+
+static const struct snd_kcontrol_new max98371_snd_controls[] = {
+	SOC_SINGLE_TLV("Speaker Volume", MAX98371_GAIN,
+			MAX98371_GAIN_SHIFT, (1<<MAX98371_GAIN_WIDTH)-1, 0,
+			max98371_gain_tlv),
+	SOC_SINGLE_TLV("Digital Volume", MAX98371_DIGITAL_GAIN, 0,
+			(1<<MAX98371_DIGITAL_GAIN_WIDTH)-1, 1, digital_tlv),
+	SOC_SINGLE_TLV("Speaker DHT Max Volume", MAX98371_GAIN,
+			0, (1<<MAX98371_DHT_MAX_WIDTH)-1, 0,
+			max98371_dht_max_gain),
+	SOC_SINGLE_TLV("Speaker DHT Min Volume", MAX98371_DHT_GAIN,
+			0, (1<<MAX98371_DHT_GAIN_WIDTH)-1, 0,
+			max98371_dht_min_gain),
+	SOC_SINGLE_TLV("Speaker DHT Rotation Volume", MAX98371_DHT_GAIN,
+			0, (1<<MAX98371_DHT_ROT_WIDTH)-1, 0,
+			max98371_dht_rot_gain),
+	SOC_SINGLE("DHT Attack Step", MAX98371_DHT, MAX98371_DHT_STEP, 3, 0),
+	SOC_SINGLE("DHT Attack Rate", MAX98371_DHT, 0, 7, 0),
+	SOC_ENUM("Monomix Select", max98371_monomix),
+	SOC_ENUM("HPF Cutoff", max98371_hpf_cutoff),
+};
+
+static int max98371_dai_set_fmt(struct snd_soc_dai *codec_dai,
+		unsigned int fmt)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct max98371_priv *max98371 = snd_soc_codec_get_drvdata(codec);
+	unsigned int val = 0;
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFS:
+		break;
+	default:
+		dev_err(codec->dev, "DAI clock mode unsupported");
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		val |= 0;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		val |= MAX98371_DAI_RIGHT;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		val |= MAX98371_DAI_LEFT;
+		break;
+	default:
+		dev_err(codec->dev, "DAI wrong mode unsupported");
+		return -EINVAL;
+	}
+	regmap_update_bits(max98371->regmap, MAX98371_FMT,
+			MAX98371_FMT_MODE_MASK, val);
+	return 0;
+}
+
+static int max98371_dai_hw_params(struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *params,
+		struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct max98371_priv *max98371 = snd_soc_codec_get_drvdata(codec);
+	int blr_clk_ratio, ch_size, channels = params_channels(params);
+	int rate = params_rate(params);
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S8:
+		regmap_update_bits(max98371->regmap, MAX98371_FMT,
+				MAX98371_FMT_MASK, MAX98371_DAI_CHANSZ_16);
+		ch_size = 8;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+		regmap_update_bits(max98371->regmap, MAX98371_FMT,
+				MAX98371_FMT_MASK, MAX98371_DAI_CHANSZ_16);
+		ch_size = 16;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		regmap_update_bits(max98371->regmap, MAX98371_FMT,
+				MAX98371_FMT_MASK, MAX98371_DAI_CHANSZ_32);
+		ch_size = 24;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		regmap_update_bits(max98371->regmap, MAX98371_FMT,
+				MAX98371_FMT_MASK, MAX98371_DAI_CHANSZ_32);
+		ch_size = 32;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* BCLK/LRCLK ratio calculation */
+	blr_clk_ratio = channels * ch_size;
+	switch (blr_clk_ratio) {
+	case 32:
+		regmap_update_bits(max98371->regmap,
+			MAX98371_DAI_CLK,
+			MAX98371_DAI_BSEL_MASK, MAX98371_DAI_BSEL_32);
+		break;
+	case 48:
+		regmap_update_bits(max98371->regmap,
+			MAX98371_DAI_CLK,
+			MAX98371_DAI_BSEL_MASK, MAX98371_DAI_BSEL_48);
+		break;
+	case 64:
+		regmap_update_bits(max98371->regmap,
+			MAX98371_DAI_CLK,
+			MAX98371_DAI_BSEL_MASK, MAX98371_DAI_BSEL_64);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (rate) {
+	case 32000:
+		regmap_update_bits(max98371->regmap,
+			MAX98371_SPK_SR,
+			MAX98371_SPK_SR_MASK, MAX98371_SPK_SR_32);
+		break;
+	case 44100:
+		regmap_update_bits(max98371->regmap,
+			MAX98371_SPK_SR,
+			MAX98371_SPK_SR_MASK, MAX98371_SPK_SR_44);
+		break;
+	case 48000:
+		regmap_update_bits(max98371->regmap,
+			MAX98371_SPK_SR,
+			MAX98371_SPK_SR_MASK, MAX98371_SPK_SR_48);
+		break;
+	case 88200:
+		regmap_update_bits(max98371->regmap,
+			MAX98371_SPK_SR,
+			MAX98371_SPK_SR_MASK, MAX98371_SPK_SR_88);
+		break;
+	case 96000:
+		regmap_update_bits(max98371->regmap,
+			MAX98371_SPK_SR,
+			MAX98371_SPK_SR_MASK, MAX98371_SPK_SR_96);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* enabling both the RX channels*/
+	regmap_update_bits(max98371->regmap, MAX98371_MONOMIX_SRC,
+			MAX98371_MONOMIX_SRC_MASK, MONOMIX_RX_0_1);
+	regmap_update_bits(max98371->regmap, MAX98371_DAI_CHANNEL,
+			MAX98371_CHANNEL_MASK, MAX98371_CHANNEL_MASK);
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget max98371_dapm_widgets[] = {
+	SND_SOC_DAPM_DAC("DAC", NULL, MAX98371_SPK_ENABLE, 0, 0),
+	SND_SOC_DAPM_SUPPLY("Global Enable", MAX98371_GLOBAL_ENABLE,
+		0, 0, NULL, 0),
+	SND_SOC_DAPM_OUTPUT("SPK_OUT"),
+};
+
+static const struct snd_soc_dapm_route max98371_audio_map[] = {
+	{"DAC", NULL, "HiFi Playback"},
+	{"SPK_OUT", NULL, "DAC"},
+	{"SPK_OUT", NULL, "Global Enable"},
+};
+
+#define MAX98371_RATES SNDRV_PCM_RATE_8000_48000
+#define MAX98371_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | \
+		SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE)
+
+static const struct snd_soc_dai_ops max98371_dai_ops = {
+	.set_fmt = max98371_dai_set_fmt,
+	.hw_params = max98371_dai_hw_params,
+};
+
+static struct snd_soc_dai_driver max98371_dai[] = {
+	{
+		.name = "max98371-aif1",
+		.playback = {
+			.stream_name = "HiFi Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = MAX98371_FORMATS,
+		},
+		.ops = &max98371_dai_ops,
+	}
+};
+
+static const struct snd_soc_codec_driver max98371_codec = {
+	.controls = max98371_snd_controls,
+	.num_controls = ARRAY_SIZE(max98371_snd_controls),
+	.dapm_routes = max98371_audio_map,
+	.num_dapm_routes = ARRAY_SIZE(max98371_audio_map),
+	.dapm_widgets = max98371_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(max98371_dapm_widgets),
+};
+
+static const struct regmap_config max98371_regmap = {
+	.reg_bits         = 8,
+	.val_bits         = 8,
+	.max_register     = MAX98371_VERSION,
+	.reg_defaults     = max98371_reg,
+	.num_reg_defaults = ARRAY_SIZE(max98371_reg),
+	.volatile_reg     = max98371_volatile_register,
+	.readable_reg     = max98371_readable_register,
+	.cache_type       = REGCACHE_RBTREE,
+};
+
+static int max98371_i2c_probe(struct i2c_client *i2c,
+		const struct i2c_device_id *id)
+{
+	struct max98371_priv *max98371;
+	int ret, reg;
+
+	max98371 = devm_kzalloc(&i2c->dev,
+			sizeof(*max98371), GFP_KERNEL);
+	if (!max98371)
+		return -ENOMEM;
+
+	i2c_set_clientdata(i2c, max98371);
+	max98371->regmap = devm_regmap_init_i2c(i2c, &max98371_regmap);
+	if (IS_ERR(max98371->regmap)) {
+		ret = PTR_ERR(max98371->regmap);
+		dev_err(&i2c->dev,
+				"Failed to allocate regmap: %d\n", ret);
+		return ret;
+	}
+
+	ret = regmap_read(max98371->regmap, MAX98371_VERSION, &reg);
+	if (ret < 0) {
+		dev_info(&i2c->dev, "device error %d\n", ret);
+		return ret;
+	}
+	dev_info(&i2c->dev, "device version %x\n", reg);
+
+	ret = snd_soc_register_codec(&i2c->dev, &max98371_codec,
+			max98371_dai, ARRAY_SIZE(max98371_dai));
+	if (ret < 0) {
+		dev_err(&i2c->dev, "Failed to register codec: %d\n", ret);
+		return ret;
+	}
+	return ret;
+}
+
+static int max98371_i2c_remove(struct i2c_client *client)
+{
+	snd_soc_unregister_codec(&client->dev);
+	return 0;
+}
+
+static const struct i2c_device_id max98371_i2c_id[] = {
+	{ "max98371", 0 },
+};
+
+MODULE_DEVICE_TABLE(i2c, max98371_i2c_id);
+
+static const struct of_device_id max98371_of_match[] = {
+	{ .compatible = "maxim,max98371", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, max98371_of_match);
+
+static struct i2c_driver max98371_i2c_driver = {
+	.driver = {
+		.name = "max98371",
+		.owner = THIS_MODULE,
+		.pm = NULL,
+		.of_match_table = of_match_ptr(max98371_of_match),
+	},
+	.probe  = max98371_i2c_probe,
+	.remove = max98371_i2c_remove,
+	.id_table = max98371_i2c_id,
+};
+
+module_i2c_driver(max98371_i2c_driver);
+
+MODULE_AUTHOR("anish kumar <yesanishhere@gmail.com>");
+MODULE_DESCRIPTION("ALSA SoC MAX98371 driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max98371.h b/sound/soc/codecs/max98371.h
new file mode 100644
index 0000000..9f63309
--- /dev/null
+++ b/sound/soc/codecs/max98371.h
@@ -0,0 +1,67 @@
+/*
+ * max98371.h -- MAX98371 ALSA SoC Audio driver
+ *
+ * Copyright 2011-2012 Maxim Integrated Products
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MAX98371_H
+#define _MAX98371_H
+
+#define MAX98371_IRQ_CLEAR1			0x01
+#define MAX98371_IRQ_CLEAR2			0x02
+#define MAX98371_IRQ_CLEAR3			0x03
+#define MAX98371_DAI_CLK			0x10
+#define MAX98371_DAI_BSEL_MASK			0xF
+#define MAX98371_DAI_BSEL_32			2
+#define MAX98371_DAI_BSEL_48			3
+#define MAX98371_DAI_BSEL_64			4
+#define MAX98371_SPK_SR				0x11
+#define MAX98371_SPK_SR_MASK			0xF
+#define MAX98371_SPK_SR_32			6
+#define MAX98371_SPK_SR_44			7
+#define MAX98371_SPK_SR_48			8
+#define MAX98371_SPK_SR_88			10
+#define MAX98371_SPK_SR_96			11
+#define MAX98371_DAI_CHANNEL			0x15
+#define MAX98371_CHANNEL_MASK			0x3
+#define MAX98371_MONOMIX_SRC			0x18
+#define MAX98371_MONOMIX_CFG			0x19
+#define MAX98371_HPF				0x1C
+#define MAX98371_MONOMIX_SRC_MASK		0xFF
+#define MONOMIX_RX_0_1				((0x1)<<(4))
+#define M98371_DAI_CHANNEL_I2S			0x3
+#define MAX98371_DIGITAL_GAIN			0x2D
+#define MAX98371_DIGITAL_GAIN_WIDTH		0x7
+#define MAX98371_GAIN				0x2E
+#define MAX98371_GAIN_SHIFT			0x4
+#define MAX98371_GAIN_WIDTH			0x4
+#define MAX98371_DHT_MAX_WIDTH			4
+#define MAX98371_FMT				0x14
+#define MAX98371_CHANSZ_WIDTH			6
+#define MAX98371_FMT_MASK		        ((0x3)<<(MAX98371_CHANSZ_WIDTH))
+#define MAX98371_FMT_MODE_MASK		        ((0x7)<<(3))
+#define MAX98371_DAI_LEFT		        ((0x1)<<(3))
+#define MAX98371_DAI_RIGHT		        ((0x2)<<(3))
+#define MAX98371_DAI_CHANSZ_16                  ((1)<<(MAX98371_CHANSZ_WIDTH))
+#define MAX98371_DAI_CHANSZ_24                  ((2)<<(MAX98371_CHANSZ_WIDTH))
+#define MAX98371_DAI_CHANSZ_32                  ((3)<<(MAX98371_CHANSZ_WIDTH))
+#define MAX98371_DHT  0x32
+#define MAX98371_DHT_STEP			0x3
+#define MAX98371_DHT_GAIN			0x31
+#define MAX98371_DHT_GAIN_WIDTH			0x4
+#define MAX98371_DHT_ROT_WIDTH			0x4
+#define MAX98371_SPK_ENABLE			0x4A
+#define MAX98371_GLOBAL_ENABLE			0x50
+#define MAX98371_SOFT_RESET			0x51
+#define MAX98371_VERSION			0xFF
+
+
+struct max98371_priv {
+	struct regmap *regmap;
+	struct snd_soc_codec *codec;
+};
+#endif
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index a1aaffc..f80cfe4 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -276,6 +276,8 @@
 		} else {
 			*mic = false;
 			regmap_write(rt298->regmap, RT298_SET_MIC1, 0x20);
+			regmap_update_bits(rt298->regmap,
+				RT298_CBJ_CTRL1, 0x0400, 0x0000);
 		}
 	} else {
 		regmap_read(rt298->regmap, RT298_GET_HP_SENSE, &buf);
@@ -482,6 +484,26 @@
 		snd_soc_update_bits(codec,
 			VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, nid, 0),
 			0x7080, 0x7000);
+		 /* If MCLK doesn't exist, reset AD filter */
+		if (!(snd_soc_read(codec, RT298_VAD_CTRL) & 0x200)) {
+			pr_info("NO MCLK\n");
+			switch (nid) {
+			case RT298_ADC_IN1:
+				snd_soc_update_bits(codec,
+					RT298_D_FILTER_CTRL, 0x2, 0x2);
+				mdelay(10);
+				snd_soc_update_bits(codec,
+					RT298_D_FILTER_CTRL, 0x2, 0x0);
+				break;
+			case RT298_ADC_IN2:
+				snd_soc_update_bits(codec,
+					RT298_D_FILTER_CTRL, 0x4, 0x4);
+				mdelay(10);
+				snd_soc_update_bits(codec,
+					RT298_D_FILTER_CTRL, 0x4, 0x0);
+				break;
+			}
+		}
 		break;
 	case SND_SOC_DAPM_PRE_PMD:
 		snd_soc_update_bits(codec,
@@ -520,30 +542,12 @@
 	return 0;
 }
 
-static int rt298_vref_event(struct snd_soc_dapm_widget *w,
-			     struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		snd_soc_update_bits(codec,
-			RT298_CBJ_CTRL1, 0x0400, 0x0000);
-		mdelay(50);
-		break;
-	default:
-		return 0;
-	}
-
-	return 0;
-}
-
 static const struct snd_soc_dapm_widget rt298_dapm_widgets[] = {
 
 	SND_SOC_DAPM_SUPPLY_S("HV", 1, RT298_POWER_CTRL1,
 		12, 1, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("VREF", RT298_POWER_CTRL1,
-		0, 1, rt298_vref_event, SND_SOC_DAPM_PRE_PMU),
+		0, 1, NULL, 0),
 	SND_SOC_DAPM_SUPPLY_S("BG_MBIAS", 1, RT298_POWER_CTRL2,
 		1, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY_S("LDO1", 1, RT298_POWER_CTRL2,
@@ -934,18 +938,9 @@
 		}
 		break;
 
-	case SND_SOC_BIAS_ON:
-		mdelay(30);
-		snd_soc_update_bits(codec,
-			RT298_CBJ_CTRL1, 0x0400, 0x0400);
-
-		break;
-
 	case SND_SOC_BIAS_STANDBY:
 		snd_soc_write(codec,
 			RT298_SET_AUDIO_POWER, AC_PWRST_D3);
-		snd_soc_update_bits(codec,
-			RT298_CBJ_CTRL1, 0x0400, 0x0000);
 		break;
 
 	default:
diff --git a/sound/soc/codecs/rt298.h b/sound/soc/codecs/rt298.h
index d66f884..3638f3d 100644
--- a/sound/soc/codecs/rt298.h
+++ b/sound/soc/codecs/rt298.h
@@ -137,6 +137,7 @@
 #define RT298_A_BIAS_CTRL2	0x02
 #define RT298_POWER_CTRL1	0x03
 #define RT298_A_BIAS_CTRL3	0x04
+#define RT298_D_FILTER_CTRL	0x05
 #define RT298_POWER_CTRL2	0x08
 #define RT298_I2S_CTRL1		0x09
 #define RT298_I2S_CTRL2		0x0a
@@ -148,6 +149,7 @@
 #define RT298_IRQ_CTRL		0x33
 #define RT298_WIND_FILTER_CTRL	0x46
 #define RT298_PLL_CTRL1		0x49
+#define RT298_VAD_CTRL		0x4e
 #define RT298_CBJ_CTRL1		0x4f
 #define RT298_CBJ_CTRL2		0x50
 #define RT298_PLL_CTRL		0x63
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 6021226..da9483c 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -1241,60 +1241,46 @@
 		regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting);
 		asrc_setting = (asrc_setting & RT5677_AD_STO1_CLK_SEL_MASK) >>
 				RT5677_AD_STO1_CLK_SEL_SFT;
-		if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
-			asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
-			return 1;
 		break;
 
 	case 10:
 		regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting);
 		asrc_setting = (asrc_setting & RT5677_AD_STO2_CLK_SEL_MASK) >>
 				RT5677_AD_STO2_CLK_SEL_SFT;
-		if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
-			asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
-			return 1;
 		break;
 
 	case 9:
 		regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting);
 		asrc_setting = (asrc_setting & RT5677_AD_STO3_CLK_SEL_MASK) >>
 				RT5677_AD_STO3_CLK_SEL_SFT;
-		if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
-			asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
-			return 1;
 		break;
 
 	case 8:
 		regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting);
 		asrc_setting = (asrc_setting & RT5677_AD_STO4_CLK_SEL_MASK) >>
 			RT5677_AD_STO4_CLK_SEL_SFT;
-		if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
-			asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
-			return 1;
 		break;
 
 	case 7:
 		regmap_read(rt5677->regmap, RT5677_ASRC_6, &asrc_setting);
 		asrc_setting = (asrc_setting & RT5677_AD_MONOL_CLK_SEL_MASK) >>
 			RT5677_AD_MONOL_CLK_SEL_SFT;
-		if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
-			asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
-			return 1;
 		break;
 
 	case 6:
 		regmap_read(rt5677->regmap, RT5677_ASRC_6, &asrc_setting);
 		asrc_setting = (asrc_setting & RT5677_AD_MONOR_CLK_SEL_MASK) >>
 			RT5677_AD_MONOR_CLK_SEL_SFT;
-		if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
-			asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
-			return 1;
 		break;
 
 	default:
-		break;
+		return 0;
 	}
 
+	if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
+	    asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
+		return 1;
+
 	return 0;
 }
 
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index 39307ad..b8d19b7 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -4,6 +4,9 @@
  * Copyright (C) 2015 Google, Inc.
  * Copyright (c) 2013 Daniel Mack <zonque@gmail.com>
  *
+ * TAS5721 support:
+ * Copyright (C) 2016 Petr Kulhavy, Barix AG <petr@barix.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -57,6 +60,10 @@
 	case TAS571X_CH1_VOL_REG:
 	case TAS571X_CH2_VOL_REG:
 		return priv->chip->vol_reg_size;
+	case TAS571X_INPUT_MUX_REG:
+	case TAS571X_CH4_SRC_SELECT_REG:
+	case TAS571X_PWM_MUX_REG:
+		return 4;
 	default:
 		return 1;
 	}
@@ -167,6 +174,23 @@
 				  TAS571X_SDI_FMT_MASK, val);
 }
 
+static int tas571x_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	u8 sysctl2;
+	int ret;
+
+	sysctl2 = mute ? TAS571X_SYS_CTRL_2_SDN_MASK : 0;
+
+	ret = snd_soc_update_bits(codec,
+			    TAS571X_SYS_CTRL_2_REG,
+		     TAS571X_SYS_CTRL_2_SDN_MASK,
+		     sysctl2);
+	usleep_range(1000, 2000);
+
+	return ret;
+}
+
 static int tas571x_set_bias_level(struct snd_soc_codec *codec,
 				  enum snd_soc_bias_level level)
 {
@@ -214,6 +238,7 @@
 static const struct snd_soc_dai_ops tas571x_dai_ops = {
 	.set_fmt	= tas571x_set_dai_fmt,
 	.hw_params	= tas571x_hw_params,
+	.digital_mute	= tas571x_mute,
 };
 
 static const char *const tas5711_supply_names[] = {
@@ -241,6 +266,26 @@
 		   1, 1),
 };
 
+static const struct regmap_range tas571x_readonly_regs_range[] = {
+	regmap_reg_range(TAS571X_CLK_CTRL_REG,  TAS571X_DEV_ID_REG),
+};
+
+static const struct regmap_range tas571x_volatile_regs_range[] = {
+	regmap_reg_range(TAS571X_CLK_CTRL_REG,  TAS571X_ERR_STATUS_REG),
+	regmap_reg_range(TAS571X_OSC_TRIM_REG,  TAS571X_OSC_TRIM_REG),
+};
+
+static const struct regmap_access_table tas571x_write_regs = {
+	.no_ranges =	tas571x_readonly_regs_range,
+	.n_no_ranges =	ARRAY_SIZE(tas571x_readonly_regs_range),
+};
+
+static const struct regmap_access_table tas571x_volatile_regs = {
+	.yes_ranges =	tas571x_volatile_regs_range,
+	.n_yes_ranges =	ARRAY_SIZE(tas571x_volatile_regs_range),
+
+};
+
 static const struct reg_default tas5711_reg_defaults[] = {
 	{ 0x04, 0x05 },
 	{ 0x05, 0x40 },
@@ -260,6 +305,8 @@
 	.reg_defaults			= tas5711_reg_defaults,
 	.num_reg_defaults		= ARRAY_SIZE(tas5711_reg_defaults),
 	.cache_type			= REGCACHE_RBTREE,
+	.wr_table			= &tas571x_write_regs,
+	.volatile_table			= &tas571x_volatile_regs,
 };
 
 static const struct tas571x_chip tas5711_chip = {
@@ -314,6 +361,8 @@
 	.reg_defaults			= tas5717_reg_defaults,
 	.num_reg_defaults		= ARRAY_SIZE(tas5717_reg_defaults),
 	.cache_type			= REGCACHE_RBTREE,
+	.wr_table			= &tas571x_write_regs,
+	.volatile_table			= &tas571x_volatile_regs,
 };
 
 /* This entry is reused for tas5719 as the software interface is identical. */
@@ -326,6 +375,77 @@
 	.vol_reg_size			= 2,
 };
 
+static const char *const tas5721_supply_names[] = {
+	"AVDD",
+	"DVDD",
+	"DRVDD",
+	"PVDD",
+};
+
+static const struct snd_kcontrol_new tas5721_controls[] = {
+	SOC_SINGLE_TLV("Master Volume",
+		       TAS571X_MVOL_REG,
+		       0, 0xff, 1, tas5711_volume_tlv),
+	SOC_DOUBLE_R_TLV("Speaker Volume",
+			 TAS571X_CH1_VOL_REG,
+			 TAS571X_CH2_VOL_REG,
+			 0, 0xff, 1, tas5711_volume_tlv),
+	SOC_DOUBLE("Speaker Switch",
+		   TAS571X_SOFT_MUTE_REG,
+		   TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT,
+		   1, 1),
+};
+
+static const struct reg_default tas5721_reg_defaults[] = {
+	{TAS571X_CLK_CTRL_REG,		0x6c},
+	{TAS571X_DEV_ID_REG,		0x00},
+	{TAS571X_ERR_STATUS_REG,	0x00},
+	{TAS571X_SYS_CTRL_1_REG,	0xa0},
+	{TAS571X_SDI_REG,		0x05},
+	{TAS571X_SYS_CTRL_2_REG,	0x40},
+	{TAS571X_SOFT_MUTE_REG,		0x00},
+	{TAS571X_MVOL_REG,		0xff},
+	{TAS571X_CH1_VOL_REG,		0x30},
+	{TAS571X_CH2_VOL_REG,		0x30},
+	{TAS571X_CH3_VOL_REG,		0x30},
+	{TAS571X_VOL_CFG_REG,		0x91},
+	{TAS571X_MODULATION_LIMIT_REG,	0x02},
+	{TAS571X_IC_DELAY_CH1_REG,	0xac},
+	{TAS571X_IC_DELAY_CH2_REG,	0x54},
+	{TAS571X_IC_DELAY_CH3_REG,	0xac},
+	{TAS571X_IC_DELAY_CH4_REG,	0x54},
+	{TAS571X_PWM_CH_SDN_GROUP_REG,	0x30},
+	{TAS571X_START_STOP_PERIOD_REG,	0x0f},
+	{TAS571X_OSC_TRIM_REG,		0x82},
+	{TAS571X_BKND_ERR_REG,		0x02},
+	{TAS571X_INPUT_MUX_REG,		0x17772},
+	{TAS571X_CH4_SRC_SELECT_REG,	0x4303},
+	{TAS571X_PWM_MUX_REG,		0x1021345},
+};
+
+static const struct regmap_config tas5721_regmap_config = {
+	.reg_bits			= 8,
+	.val_bits			= 32,
+	.max_register			= 0xff,
+	.reg_read			= tas571x_reg_read,
+	.reg_write			= tas571x_reg_write,
+	.reg_defaults			= tas5721_reg_defaults,
+	.num_reg_defaults		= ARRAY_SIZE(tas5721_reg_defaults),
+	.cache_type			= REGCACHE_RBTREE,
+	.wr_table			= &tas571x_write_regs,
+	.volatile_table			= &tas571x_volatile_regs,
+};
+
+
+static const struct tas571x_chip tas5721_chip = {
+	.supply_names			= tas5721_supply_names,
+	.num_supply_names		= ARRAY_SIZE(tas5721_supply_names),
+	.controls			= tas5711_controls,
+	.num_controls			= ARRAY_SIZE(tas5711_controls),
+	.regmap_config			= &tas5721_regmap_config,
+	.vol_reg_size			= 1,
+};
+
 static const struct snd_soc_dapm_widget tas571x_dapm_widgets[] = {
 	SND_SOC_DAPM_DAC("DACL", NULL, SND_SOC_NOPM, 0, 0),
 	SND_SOC_DAPM_DAC("DACR", NULL, SND_SOC_NOPM, 0, 0),
@@ -386,11 +506,10 @@
 	i2c_set_clientdata(client, priv);
 
 	of_id = of_match_device(tas571x_of_match, dev);
-	if (!of_id) {
-		dev_err(dev, "Unknown device type\n");
-		return -EINVAL;
-	}
-	priv->chip = of_id->data;
+	if (of_id)
+		priv->chip = of_id->data;
+	else
+		priv->chip = (void *) id->driver_data;
 
 	priv->mclk = devm_clk_get(dev, "mclk");
 	if (IS_ERR(priv->mclk) && PTR_ERR(priv->mclk) != -ENOENT) {
@@ -445,10 +564,6 @@
 	if (ret)
 		return ret;
 
-	ret = regmap_update_bits(priv->regmap, TAS571X_SYS_CTRL_2_REG,
-				 TAS571X_SYS_CTRL_2_SDN_MASK, 0);
-	if (ret)
-		return ret;
 
 	memcpy(&priv->codec_driver, &tas571x_codec, sizeof(priv->codec_driver));
 	priv->codec_driver.controls = priv->chip->controls;
@@ -486,14 +601,16 @@
 	{ .compatible = "ti,tas5711", .data = &tas5711_chip, },
 	{ .compatible = "ti,tas5717", .data = &tas5717_chip, },
 	{ .compatible = "ti,tas5719", .data = &tas5717_chip, },
+	{ .compatible = "ti,tas5721", .data = &tas5721_chip, },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, tas571x_of_match);
 
 static const struct i2c_device_id tas571x_i2c_id[] = {
-	{ "tas5711", 0 },
-	{ "tas5717", 0 },
-	{ "tas5719", 0 },
+	{ "tas5711", (kernel_ulong_t) &tas5711_chip },
+	{ "tas5717", (kernel_ulong_t) &tas5717_chip },
+	{ "tas5719", (kernel_ulong_t) &tas5717_chip },
+	{ "tas5721", (kernel_ulong_t) &tas5721_chip },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, tas571x_i2c_id);
diff --git a/sound/soc/codecs/tas571x.h b/sound/soc/codecs/tas571x.h
index 0aee471..cf800c3 100644
--- a/sound/soc/codecs/tas571x.h
+++ b/sound/soc/codecs/tas571x.h
@@ -13,6 +13,10 @@
 #define _TAS571X_H
 
 /* device registers */
+#define TAS571X_CLK_CTRL_REG		0x00
+#define TAS571X_DEV_ID_REG		0x01
+#define TAS571X_ERR_STATUS_REG		0x02
+#define TAS571X_SYS_CTRL_1_REG		0x03
 #define TAS571X_SDI_REG			0x04
 #define TAS571X_SDI_FMT_MASK		0x0f
 
@@ -27,7 +31,25 @@
 #define TAS571X_MVOL_REG		0x07
 #define TAS571X_CH1_VOL_REG		0x08
 #define TAS571X_CH2_VOL_REG		0x09
+#define TAS571X_CH3_VOL_REG		0x0a
+#define TAS571X_VOL_CFG_REG		0x0e
+#define TAS571X_MODULATION_LIMIT_REG	0x10
+#define TAS571X_IC_DELAY_CH1_REG	0x11
+#define TAS571X_IC_DELAY_CH2_REG	0x12
+#define TAS571X_IC_DELAY_CH3_REG	0x13
+#define TAS571X_IC_DELAY_CH4_REG	0x14
 
+#define TAS571X_PWM_CH_SDN_GROUP_REG	0x19	/* N/A on TAS5717, TAS5719 */
+#define TAS571X_PWM_CH1_SDN_MASK	(1<<0)
+#define TAS571X_PWM_CH2_SDN_SHIFT	(1<<1)
+#define TAS571X_PWM_CH3_SDN_SHIFT	(1<<2)
+#define TAS571X_PWM_CH4_SDN_SHIFT	(1<<3)
+
+#define TAS571X_START_STOP_PERIOD_REG	0x1a
 #define TAS571X_OSC_TRIM_REG		0x1b
+#define TAS571X_BKND_ERR_REG		0x1c
+#define TAS571X_INPUT_MUX_REG		0x20
+#define TAS571X_CH4_SRC_SELECT_REG	0x21
+#define TAS571X_PWM_MUX_REG		0x25
 
 #endif /* _TAS571X_H */
diff --git a/sound/soc/codecs/tas5720.c b/sound/soc/codecs/tas5720.c
new file mode 100644
index 0000000..f54fb46
--- /dev/null
+++ b/sound/soc/codecs/tas5720.c
@@ -0,0 +1,620 @@
+/*
+ * tas5720.c - ALSA SoC Texas Instruments TAS5720 Mono Audio Amplifier
+ *
+ * Copyright (C)2015-2016 Texas Instruments Incorporated -  http://www.ti.com
+ *
+ * Author: Andreas Dannenberg <dannenberg@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+#include "tas5720.h"
+
+/* Define how often to check (and clear) the fault status register (in ms) */
+#define TAS5720_FAULT_CHECK_INTERVAL		200
+
+static const char * const tas5720_supply_names[] = {
+	"dvdd",		/* Digital power supply. Connect to 3.3-V supply. */
+	"pvdd",		/* Class-D amp and analog power supply (connected). */
+};
+
+#define TAS5720_NUM_SUPPLIES	ARRAY_SIZE(tas5720_supply_names)
+
+struct tas5720_data {
+	struct snd_soc_codec *codec;
+	struct regmap *regmap;
+	struct i2c_client *tas5720_client;
+	struct regulator_bulk_data supplies[TAS5720_NUM_SUPPLIES];
+	struct delayed_work fault_check_work;
+	unsigned int last_fault;
+};
+
+static int tas5720_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params,
+			     struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	unsigned int rate = params_rate(params);
+	bool ssz_ds;
+	int ret;
+
+	switch (rate) {
+	case 44100:
+	case 48000:
+		ssz_ds = false;
+		break;
+	case 88200:
+	case 96000:
+		ssz_ds = true;
+		break;
+	default:
+		dev_err(codec->dev, "unsupported sample rate: %u\n", rate);
+		return -EINVAL;
+	}
+
+	ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL1_REG,
+				  TAS5720_SSZ_DS, ssz_ds);
+	if (ret < 0) {
+		dev_err(codec->dev, "error setting sample rate: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int tas5720_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	u8 serial_format;
+	int ret;
+
+	if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
+		dev_vdbg(codec->dev, "DAI Format master is not found\n");
+		return -EINVAL;
+	}
+
+	switch (fmt & (SND_SOC_DAIFMT_FORMAT_MASK |
+		       SND_SOC_DAIFMT_INV_MASK)) {
+	case (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF):
+		/* 1st data bit occur one BCLK cycle after the frame sync */
+		serial_format = TAS5720_SAIF_I2S;
+		break;
+	case (SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF):
+		/*
+		 * Note that although the TAS5720 does not have a dedicated DSP
+		 * mode it doesn't care about the LRCLK duty cycle during TDM
+		 * operation. Therefore we can use the device's I2S mode with
+		 * its delaying of the 1st data bit to receive DSP_A formatted
+		 * data. See device datasheet for additional details.
+		 */
+		serial_format = TAS5720_SAIF_I2S;
+		break;
+	case (SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF):
+		/*
+		 * Similar to DSP_A, we can use the fact that the TAS5720 does
+		 * not care about the LRCLK duty cycle during TDM to receive
+		 * DSP_B formatted data in LEFTJ mode (no delaying of the 1st
+		 * data bit).
+		 */
+		serial_format = TAS5720_SAIF_LEFTJ;
+		break;
+	case (SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF):
+		/* No delay after the frame sync */
+		serial_format = TAS5720_SAIF_LEFTJ;
+		break;
+	default:
+		dev_vdbg(codec->dev, "DAI Format is not found\n");
+		return -EINVAL;
+	}
+
+	ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL1_REG,
+				  TAS5720_SAIF_FORMAT_MASK,
+				  serial_format);
+	if (ret < 0) {
+		dev_err(codec->dev, "error setting SAIF format: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int tas5720_set_dai_tdm_slot(struct snd_soc_dai *dai,
+				    unsigned int tx_mask, unsigned int rx_mask,
+				    int slots, int slot_width)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	unsigned int first_slot;
+	int ret;
+
+	if (!tx_mask) {
+		dev_err(codec->dev, "tx masks must not be 0\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Determine the first slot that is being requested. We will only
+	 * use the first slot that is found since the TAS5720 is a mono
+	 * amplifier.
+	 */
+	first_slot = __ffs(tx_mask);
+
+	if (first_slot > 7) {
+		dev_err(codec->dev, "slot selection out of bounds (%u)\n",
+			first_slot);
+		return -EINVAL;
+	}
+
+	/* Enable manual TDM slot selection (instead of I2C ID based) */
+	ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL1_REG,
+				  TAS5720_TDM_CFG_SRC, TAS5720_TDM_CFG_SRC);
+	if (ret < 0)
+		goto error_snd_soc_update_bits;
+
+	/* Configure the TDM slot to process audio from */
+	ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL2_REG,
+				  TAS5720_TDM_SLOT_SEL_MASK, first_slot);
+	if (ret < 0)
+		goto error_snd_soc_update_bits;
+
+	return 0;
+
+error_snd_soc_update_bits:
+	dev_err(codec->dev, "error configuring TDM mode: %d\n", ret);
+	return ret;
+}
+
+static int tas5720_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	int ret;
+
+	ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL2_REG,
+				  TAS5720_MUTE, mute ? TAS5720_MUTE : 0);
+	if (ret < 0) {
+		dev_err(codec->dev, "error (un-)muting device: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void tas5720_fault_check_work(struct work_struct *work)
+{
+	struct tas5720_data *tas5720 = container_of(work, struct tas5720_data,
+			fault_check_work.work);
+	struct device *dev = tas5720->codec->dev;
+	unsigned int curr_fault;
+	int ret;
+
+	ret = regmap_read(tas5720->regmap, TAS5720_FAULT_REG, &curr_fault);
+	if (ret < 0) {
+		dev_err(dev, "failed to read FAULT register: %d\n", ret);
+		goto out;
+	}
+
+	/* Check/handle all errors except SAIF clock errors */
+	curr_fault &= TAS5720_OCE | TAS5720_DCE | TAS5720_OTE;
+
+	/*
+	 * Only flag errors once for a given occurrence. This is needed as
+	 * the TAS5720 will take time clearing the fault condition internally
+	 * during which we don't want to bombard the system with the same
+	 * error message over and over.
+	 */
+	if ((curr_fault & TAS5720_OCE) && !(tas5720->last_fault & TAS5720_OCE))
+		dev_crit(dev, "experienced an over current hardware fault\n");
+
+	if ((curr_fault & TAS5720_DCE) && !(tas5720->last_fault & TAS5720_DCE))
+		dev_crit(dev, "experienced a DC detection fault\n");
+
+	if ((curr_fault & TAS5720_OTE) && !(tas5720->last_fault & TAS5720_OTE))
+		dev_crit(dev, "experienced an over temperature fault\n");
+
+	/* Store current fault value so we can detect any changes next time */
+	tas5720->last_fault = curr_fault;
+
+	if (!curr_fault)
+		goto out;
+
+	/*
+	 * Periodically toggle SDZ (shutdown bit) H->L->H to clear any latching
+	 * faults as long as a fault condition persists. Always going through
+	 * the full sequence no matter the first return value to minimizes
+	 * chances for the device to end up in shutdown mode.
+	 */
+	ret = regmap_write_bits(tas5720->regmap, TAS5720_POWER_CTRL_REG,
+				TAS5720_SDZ, 0);
+	if (ret < 0)
+		dev_err(dev, "failed to write POWER_CTRL register: %d\n", ret);
+
+	ret = regmap_write_bits(tas5720->regmap, TAS5720_POWER_CTRL_REG,
+				TAS5720_SDZ, TAS5720_SDZ);
+	if (ret < 0)
+		dev_err(dev, "failed to write POWER_CTRL register: %d\n", ret);
+
+out:
+	/* Schedule the next fault check at the specified interval */
+	schedule_delayed_work(&tas5720->fault_check_work,
+			      msecs_to_jiffies(TAS5720_FAULT_CHECK_INTERVAL));
+}
+
+static int tas5720_codec_probe(struct snd_soc_codec *codec)
+{
+	struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
+	unsigned int device_id;
+	int ret;
+
+	tas5720->codec = codec;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(tas5720->supplies),
+				    tas5720->supplies);
+	if (ret != 0) {
+		dev_err(codec->dev, "failed to enable supplies: %d\n", ret);
+		return ret;
+	}
+
+	ret = regmap_read(tas5720->regmap, TAS5720_DEVICE_ID_REG, &device_id);
+	if (ret < 0) {
+		dev_err(codec->dev, "failed to read device ID register: %d\n",
+			ret);
+		goto probe_fail;
+	}
+
+	if (device_id != TAS5720_DEVICE_ID) {
+		dev_err(codec->dev, "wrong device ID. expected: %u read: %u\n",
+			TAS5720_DEVICE_ID, device_id);
+		ret = -ENODEV;
+		goto probe_fail;
+	}
+
+	/* Set device to mute */
+	ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL2_REG,
+				  TAS5720_MUTE, TAS5720_MUTE);
+	if (ret < 0)
+		goto error_snd_soc_update_bits;
+
+	/*
+	 * Enter shutdown mode - our default when not playing audio - to
+	 * minimize current consumption. On the TAS5720 there is no real down
+	 * side doing so as all device registers are preserved and the wakeup
+	 * of the codec is rather quick which we do using a dapm widget.
+	 */
+	ret = snd_soc_update_bits(codec, TAS5720_POWER_CTRL_REG,
+				  TAS5720_SDZ, 0);
+	if (ret < 0)
+		goto error_snd_soc_update_bits;
+
+	INIT_DELAYED_WORK(&tas5720->fault_check_work, tas5720_fault_check_work);
+
+	return 0;
+
+error_snd_soc_update_bits:
+	dev_err(codec->dev, "error configuring device registers: %d\n", ret);
+
+probe_fail:
+	regulator_bulk_disable(ARRAY_SIZE(tas5720->supplies),
+			       tas5720->supplies);
+	return ret;
+}
+
+static int tas5720_codec_remove(struct snd_soc_codec *codec)
+{
+	struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	cancel_delayed_work_sync(&tas5720->fault_check_work);
+
+	ret = regulator_bulk_disable(ARRAY_SIZE(tas5720->supplies),
+				     tas5720->supplies);
+	if (ret < 0)
+		dev_err(codec->dev, "failed to disable supplies: %d\n", ret);
+
+	return ret;
+};
+
+static int tas5720_dac_event(struct snd_soc_dapm_widget *w,
+			     struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	if (event & SND_SOC_DAPM_POST_PMU) {
+		/* Take TAS5720 out of shutdown mode */
+		ret = snd_soc_update_bits(codec, TAS5720_POWER_CTRL_REG,
+					  TAS5720_SDZ, TAS5720_SDZ);
+		if (ret < 0) {
+			dev_err(codec->dev, "error waking codec: %d\n", ret);
+			return ret;
+		}
+
+		/*
+		 * Observe codec shutdown-to-active time. The datasheet only
+		 * lists a nominal value however just use-it as-is without
+		 * additional padding to minimize the delay introduced in
+		 * starting to play audio (actually there is other setup done
+		 * by the ASoC framework that will provide additional delays,
+		 * so we should always be safe).
+		 */
+		msleep(25);
+
+		/* Turn on TAS5720 periodic fault checking/handling */
+		tas5720->last_fault = 0;
+		schedule_delayed_work(&tas5720->fault_check_work,
+				msecs_to_jiffies(TAS5720_FAULT_CHECK_INTERVAL));
+	} else if (event & SND_SOC_DAPM_PRE_PMD) {
+		/* Disable TAS5720 periodic fault checking/handling */
+		cancel_delayed_work_sync(&tas5720->fault_check_work);
+
+		/* Place TAS5720 in shutdown mode to minimize current draw */
+		ret = snd_soc_update_bits(codec, TAS5720_POWER_CTRL_REG,
+					  TAS5720_SDZ, 0);
+		if (ret < 0) {
+			dev_err(codec->dev, "error shutting down codec: %d\n",
+				ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int tas5720_suspend(struct snd_soc_codec *codec)
+{
+	struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	regcache_cache_only(tas5720->regmap, true);
+	regcache_mark_dirty(tas5720->regmap);
+
+	ret = regulator_bulk_disable(ARRAY_SIZE(tas5720->supplies),
+				     tas5720->supplies);
+	if (ret < 0)
+		dev_err(codec->dev, "failed to disable supplies: %d\n", ret);
+
+	return ret;
+}
+
+static int tas5720_resume(struct snd_soc_codec *codec)
+{
+	struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(tas5720->supplies),
+				    tas5720->supplies);
+	if (ret < 0) {
+		dev_err(codec->dev, "failed to enable supplies: %d\n", ret);
+		return ret;
+	}
+
+	regcache_cache_only(tas5720->regmap, false);
+
+	ret = regcache_sync(tas5720->regmap);
+	if (ret < 0) {
+		dev_err(codec->dev, "failed to sync regcache: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+#else
+#define tas5720_suspend NULL
+#define tas5720_resume NULL
+#endif
+
+static bool tas5720_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case TAS5720_DEVICE_ID_REG:
+	case TAS5720_FAULT_REG:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static const struct regmap_config tas5720_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+
+	.max_register = TAS5720_MAX_REG,
+	.cache_type = REGCACHE_RBTREE,
+	.volatile_reg = tas5720_is_volatile_reg,
+};
+
+/*
+ * DAC analog gain. There are four discrete values to select from, ranging
+ * from 19.2 dB to 26.3dB.
+ */
+static const DECLARE_TLV_DB_RANGE(dac_analog_tlv,
+	0x0, 0x0, TLV_DB_SCALE_ITEM(1920, 0, 0),
+	0x1, 0x1, TLV_DB_SCALE_ITEM(2070, 0, 0),
+	0x2, 0x2, TLV_DB_SCALE_ITEM(2350, 0, 0),
+	0x3, 0x3, TLV_DB_SCALE_ITEM(2630, 0, 0),
+);
+
+/*
+ * DAC digital volumes. From -103.5 to 24 dB in 0.5 dB steps. Note that
+ * setting the gain below -100 dB (register value <0x7) is effectively a MUTE
+ * as per device datasheet.
+ */
+static DECLARE_TLV_DB_SCALE(dac_tlv, -10350, 50, 0);
+
+static const struct snd_kcontrol_new tas5720_snd_controls[] = {
+	SOC_SINGLE_TLV("Speaker Driver Playback Volume",
+		       TAS5720_VOLUME_CTRL_REG, 0, 0xff, 0, dac_tlv),
+	SOC_SINGLE_TLV("Speaker Driver Analog Gain", TAS5720_ANALOG_CTRL_REG,
+		       TAS5720_ANALOG_GAIN_SHIFT, 3, 0, dac_analog_tlv),
+};
+
+static const struct snd_soc_dapm_widget tas5720_dapm_widgets[] = {
+	SND_SOC_DAPM_AIF_IN("DAC IN", "Playback", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas5720_dac_event,
+			   SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_OUTPUT("OUT")
+};
+
+static const struct snd_soc_dapm_route tas5720_audio_map[] = {
+	{ "DAC", NULL, "DAC IN" },
+	{ "OUT", NULL, "DAC" },
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_tas5720 = {
+	.probe = tas5720_codec_probe,
+	.remove = tas5720_codec_remove,
+	.suspend = tas5720_suspend,
+	.resume = tas5720_resume,
+
+	.controls = tas5720_snd_controls,
+	.num_controls = ARRAY_SIZE(tas5720_snd_controls),
+	.dapm_widgets = tas5720_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(tas5720_dapm_widgets),
+	.dapm_routes = tas5720_audio_map,
+	.num_dapm_routes = ARRAY_SIZE(tas5720_audio_map),
+};
+
+/* PCM rates supported by the TAS5720 driver */
+#define TAS5720_RATES	(SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
+			 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
+
+/* Formats supported by TAS5720 driver */
+#define TAS5720_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE |\
+			 SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE)
+
+static struct snd_soc_dai_ops tas5720_speaker_dai_ops = {
+	.hw_params	= tas5720_hw_params,
+	.set_fmt	= tas5720_set_dai_fmt,
+	.set_tdm_slot	= tas5720_set_dai_tdm_slot,
+	.digital_mute	= tas5720_mute,
+};
+
+/*
+ * TAS5720 DAI structure
+ *
+ * Note that were are advertising .playback.channels_max = 2 despite this being
+ * a mono amplifier. The reason for that is that some serial ports such as TI's
+ * McASP module have a minimum number of channels (2) that they can output.
+ * Advertising more channels than we have will allow us to interface with such
+ * a serial port without really any negative side effects as the TAS5720 will
+ * simply ignore any extra channel(s) asides from the one channel that is
+ * configured to be played back.
+ */
+static struct snd_soc_dai_driver tas5720_dai[] = {
+	{
+		.name = "tas5720-amplifier",
+		.playback = {
+			.stream_name = "Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = TAS5720_RATES,
+			.formats = TAS5720_FORMATS,
+		},
+		.ops = &tas5720_speaker_dai_ops,
+	},
+};
+
+static int tas5720_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct device *dev = &client->dev;
+	struct tas5720_data *data;
+	int ret;
+	int i;
+
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->tas5720_client = client;
+	data->regmap = devm_regmap_init_i2c(client, &tas5720_regmap_config);
+	if (IS_ERR(data->regmap)) {
+		ret = PTR_ERR(data->regmap);
+		dev_err(dev, "failed to allocate register map: %d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(data->supplies); i++)
+		data->supplies[i].supply = tas5720_supply_names[i];
+
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->supplies),
+				      data->supplies);
+	if (ret != 0) {
+		dev_err(dev, "failed to request supplies: %d\n", ret);
+		return ret;
+	}
+
+	dev_set_drvdata(dev, data);
+
+	ret = snd_soc_register_codec(&client->dev,
+				     &soc_codec_dev_tas5720,
+				     tas5720_dai, ARRAY_SIZE(tas5720_dai));
+	if (ret < 0) {
+		dev_err(dev, "failed to register codec: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int tas5720_remove(struct i2c_client *client)
+{
+	struct device *dev = &client->dev;
+
+	snd_soc_unregister_codec(dev);
+
+	return 0;
+}
+
+static const struct i2c_device_id tas5720_id[] = {
+	{ "tas5720", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, tas5720_id);
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id tas5720_of_match[] = {
+	{ .compatible = "ti,tas5720", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tas5720_of_match);
+#endif
+
+static struct i2c_driver tas5720_i2c_driver = {
+	.driver = {
+		.name = "tas5720",
+		.of_match_table = of_match_ptr(tas5720_of_match),
+	},
+	.probe = tas5720_probe,
+	.remove = tas5720_remove,
+	.id_table = tas5720_id,
+};
+
+module_i2c_driver(tas5720_i2c_driver);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_DESCRIPTION("TAS5720 Audio amplifier driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tas5720.h b/sound/soc/codecs/tas5720.h
new file mode 100644
index 0000000..3d077c7
--- /dev/null
+++ b/sound/soc/codecs/tas5720.h
@@ -0,0 +1,90 @@
+/*
+ * tas5720.h - ALSA SoC Texas Instruments TAS5720 Mono Audio Amplifier
+ *
+ * Copyright (C)2015-2016 Texas Instruments Incorporated -  http://www.ti.com
+ *
+ * Author: Andreas Dannenberg <dannenberg@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __TAS5720_H__
+#define __TAS5720_H__
+
+/* Register Address Map */
+#define TAS5720_DEVICE_ID_REG		0x00
+#define TAS5720_POWER_CTRL_REG		0x01
+#define TAS5720_DIGITAL_CTRL1_REG	0x02
+#define TAS5720_DIGITAL_CTRL2_REG	0x03
+#define TAS5720_VOLUME_CTRL_REG		0x04
+#define TAS5720_ANALOG_CTRL_REG		0x06
+#define TAS5720_FAULT_REG		0x08
+#define TAS5720_DIGITAL_CLIP2_REG	0x10
+#define TAS5720_DIGITAL_CLIP1_REG	0x11
+#define TAS5720_MAX_REG			TAS5720_DIGITAL_CLIP1_REG
+
+/* TAS5720_DEVICE_ID_REG */
+#define TAS5720_DEVICE_ID		0x01
+
+/* TAS5720_POWER_CTRL_REG */
+#define TAS5720_DIG_CLIP_MASK		GENMASK(7, 2)
+#define TAS5720_SLEEP			BIT(1)
+#define TAS5720_SDZ			BIT(0)
+
+/* TAS5720_DIGITAL_CTRL1_REG */
+#define TAS5720_HPF_BYPASS		BIT(7)
+#define TAS5720_TDM_CFG_SRC		BIT(6)
+#define TAS5720_SSZ_DS			BIT(3)
+#define TAS5720_SAIF_RIGHTJ_24BIT	(0x0)
+#define TAS5720_SAIF_RIGHTJ_20BIT	(0x1)
+#define TAS5720_SAIF_RIGHTJ_18BIT	(0x2)
+#define TAS5720_SAIF_RIGHTJ_16BIT	(0x3)
+#define TAS5720_SAIF_I2S		(0x4)
+#define TAS5720_SAIF_LEFTJ		(0x5)
+#define TAS5720_SAIF_FORMAT_MASK	GENMASK(2, 0)
+
+/* TAS5720_DIGITAL_CTRL2_REG */
+#define TAS5720_MUTE			BIT(4)
+#define TAS5720_TDM_SLOT_SEL_MASK	GENMASK(2, 0)
+
+/* TAS5720_ANALOG_CTRL_REG */
+#define TAS5720_PWM_RATE_6_3_FSYNC	(0x0 << 4)
+#define TAS5720_PWM_RATE_8_4_FSYNC	(0x1 << 4)
+#define TAS5720_PWM_RATE_10_5_FSYNC	(0x2 << 4)
+#define TAS5720_PWM_RATE_12_6_FSYNC	(0x3 << 4)
+#define TAS5720_PWM_RATE_14_7_FSYNC	(0x4 << 4)
+#define TAS5720_PWM_RATE_16_8_FSYNC	(0x5 << 4)
+#define TAS5720_PWM_RATE_20_10_FSYNC	(0x6 << 4)
+#define TAS5720_PWM_RATE_24_12_FSYNC	(0x7 << 4)
+#define TAS5720_PWM_RATE_MASK		GENMASK(6, 4)
+#define TAS5720_ANALOG_GAIN_19_2DBV	(0x0 << 2)
+#define TAS5720_ANALOG_GAIN_20_7DBV	(0x1 << 2)
+#define TAS5720_ANALOG_GAIN_23_5DBV	(0x2 << 2)
+#define TAS5720_ANALOG_GAIN_26_3DBV	(0x3 << 2)
+#define TAS5720_ANALOG_GAIN_MASK	GENMASK(3, 2)
+#define TAS5720_ANALOG_GAIN_SHIFT	(0x2)
+
+/* TAS5720_FAULT_REG */
+#define TAS5720_OC_THRESH_100PCT	(0x0 << 4)
+#define TAS5720_OC_THRESH_75PCT		(0x1 << 4)
+#define TAS5720_OC_THRESH_50PCT		(0x2 << 4)
+#define TAS5720_OC_THRESH_25PCT		(0x3 << 4)
+#define TAS5720_OC_THRESH_MASK		GENMASK(5, 4)
+#define TAS5720_CLKE			BIT(3)
+#define TAS5720_OCE			BIT(2)
+#define TAS5720_DCE			BIT(1)
+#define TAS5720_OTE			BIT(0)
+#define TAS5720_FAULT_MASK		GENMASK(3, 0)
+
+/* TAS5720_DIGITAL_CLIP1_REG */
+#define TAS5720_CLIP1_MASK		GENMASK(7, 2)
+#define TAS5720_CLIP1_SHIFT		(0x2)
+
+#endif /* __TAS5720_H__ */
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index ee4def4..3c5e1df 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -28,6 +28,7 @@
 #include <linux/i2c.h>
 #include <linux/gpio.h>
 #include <linux/regulator/consumer.h>
+#include <linux/acpi.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 #include <linux/slab.h>
@@ -1280,10 +1281,19 @@
 };
 MODULE_DEVICE_TABLE(i2c, aic31xx_i2c_id);
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id aic31xx_acpi_match[] = {
+	{ "10TI3100", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, aic31xx_acpi_match);
+#endif
+
 static struct i2c_driver aic31xx_i2c_driver = {
 	.driver = {
 		.name	= "tlv320aic31xx-codec",
 		.of_match_table = of_match_ptr(tlv320aic31xx_of_match),
+		.acpi_match_table = ACPI_PTR(aic31xx_acpi_match),
 	},
 	.probe		= aic31xx_i2c_probe,
 	.remove		= aic31xx_i2c_remove,
diff --git a/sound/soc/codecs/tlv320aic32x4-i2c.c b/sound/soc/codecs/tlv320aic32x4-i2c.c
new file mode 100644
index 0000000..59606cf
--- /dev/null
+++ b/sound/soc/codecs/tlv320aic32x4-i2c.c
@@ -0,0 +1,74 @@
+/*
+ * linux/sound/soc/codecs/tlv320aic32x4-i2c.c
+ *
+ * Copyright 2011 NW Digital Radio
+ *
+ * Author: Jeremy McDermond <nh6z@nh6z.net>
+ *
+ * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+
+#include "tlv320aic32x4.h"
+
+static int aic32x4_i2c_probe(struct i2c_client *i2c,
+			     const struct i2c_device_id *id)
+{
+	struct regmap *regmap;
+	struct regmap_config config;
+
+	config = aic32x4_regmap_config;
+	config.reg_bits = 8;
+	config.val_bits = 8;
+
+	regmap = devm_regmap_init_i2c(i2c, &config);
+	return aic32x4_probe(&i2c->dev, regmap);
+}
+
+static int aic32x4_i2c_remove(struct i2c_client *i2c)
+{
+	return aic32x4_remove(&i2c->dev);
+}
+
+static const struct i2c_device_id aic32x4_i2c_id[] = {
+	{ "tlv320aic32x4", 0 },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, aic32x4_i2c_id);
+
+static const struct of_device_id aic32x4_of_id[] = {
+	{ .compatible = "ti,tlv320aic32x4", },
+	{ /* senitel */ }
+};
+MODULE_DEVICE_TABLE(of, aic32x4_of_id);
+
+static struct i2c_driver aic32x4_i2c_driver = {
+	.driver = {
+		.name = "tlv320aic32x4",
+		.of_match_table = aic32x4_of_id,
+	},
+	.probe =    aic32x4_i2c_probe,
+	.remove =   aic32x4_i2c_remove,
+	.id_table = aic32x4_i2c_id,
+};
+
+module_i2c_driver(aic32x4_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver I2C");
+MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tlv320aic32x4-spi.c b/sound/soc/codecs/tlv320aic32x4-spi.c
new file mode 100644
index 0000000..724fcdd4
--- /dev/null
+++ b/sound/soc/codecs/tlv320aic32x4-spi.c
@@ -0,0 +1,76 @@
+/*
+ * linux/sound/soc/codecs/tlv320aic32x4-spi.c
+ *
+ * Copyright 2011 NW Digital Radio
+ *
+ * Author: Jeremy McDermond <nh6z@nh6z.net>
+ *
+ * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+
+#include "tlv320aic32x4.h"
+
+static int aic32x4_spi_probe(struct spi_device *spi)
+{
+	struct regmap *regmap;
+	struct regmap_config config;
+
+	config = aic32x4_regmap_config;
+	config.reg_bits = 7;
+	config.pad_bits = 1;
+	config.val_bits = 8;
+	config.read_flag_mask = 0x01;
+
+	regmap = devm_regmap_init_spi(spi, &config);
+	return aic32x4_probe(&spi->dev, regmap);
+}
+
+static int aic32x4_spi_remove(struct spi_device *spi)
+{
+	return aic32x4_remove(&spi->dev);
+}
+
+static const struct spi_device_id aic32x4_spi_id[] = {
+	{ "tlv320aic32x4", 0 },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, aic32x4_spi_id);
+
+static const struct of_device_id aic32x4_of_id[] = {
+	{ .compatible = "ti,tlv320aic32x4", },
+	{ /* senitel */ }
+};
+MODULE_DEVICE_TABLE(of, aic32x4_of_id);
+
+static struct spi_driver aic32x4_spi_driver = {
+	.driver = {
+		.name = "tlv320aic32x4",
+		.owner = THIS_MODULE,
+		.of_match_table = aic32x4_of_id,
+	},
+	.probe =    aic32x4_spi_probe,
+	.remove =   aic32x4_spi_remove,
+	.id_table = aic32x4_spi_id,
+};
+
+module_spi_driver(aic32x4_spi_driver);
+
+MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver SPI");
+MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index f2d3191..85d4978 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -30,7 +30,6 @@
 #include <linux/pm.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
-#include <linux/i2c.h>
 #include <linux/cdev.h>
 #include <linux/slab.h>
 #include <linux/clk.h>
@@ -160,7 +159,10 @@
 	/* 48k rate */
 	{AIC32X4_FREQ_12000000, 48000, 1, 8, 1920, 128, 2, 8, 128, 2, 8, 4},
 	{AIC32X4_FREQ_24000000, 48000, 2, 8, 1920, 128, 8, 2, 64, 8, 4, 4},
-	{AIC32X4_FREQ_25000000, 48000, 2, 7, 8643, 128, 8, 2, 64, 8, 4, 4}
+	{AIC32X4_FREQ_25000000, 48000, 2, 7, 8643, 128, 8, 2, 64, 8, 4, 4},
+
+	/* 96k rate */
+	{AIC32X4_FREQ_25000000, 96000, 2, 7, 8643, 64, 4, 4, 64, 4, 4, 1},
 };
 
 static const struct snd_kcontrol_new hpl_output_mixer_controls[] = {
@@ -181,16 +183,71 @@
 	SOC_DAPM_SINGLE("R_DAC Switch", AIC32X4_LORROUTE, 3, 1, 0),
 };
 
-static const struct snd_kcontrol_new left_input_mixer_controls[] = {
-	SOC_DAPM_SINGLE("IN1_L P Switch", AIC32X4_LMICPGAPIN, 6, 1, 0),
-	SOC_DAPM_SINGLE("IN2_L P Switch", AIC32X4_LMICPGAPIN, 4, 1, 0),
-	SOC_DAPM_SINGLE("IN3_L P Switch", AIC32X4_LMICPGAPIN, 2, 1, 0),
+static const char * const resistor_text[] = {
+	"Off", "10 kOhm", "20 kOhm", "40 kOhm",
 };
 
-static const struct snd_kcontrol_new right_input_mixer_controls[] = {
-	SOC_DAPM_SINGLE("IN1_R P Switch", AIC32X4_RMICPGAPIN, 6, 1, 0),
-	SOC_DAPM_SINGLE("IN2_R P Switch", AIC32X4_RMICPGAPIN, 4, 1, 0),
-	SOC_DAPM_SINGLE("IN3_R P Switch", AIC32X4_RMICPGAPIN, 2, 1, 0),
+/* Left mixer pins */
+static SOC_ENUM_SINGLE_DECL(in1l_lpga_p_enum, AIC32X4_LMICPGAPIN, 6, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in2l_lpga_p_enum, AIC32X4_LMICPGAPIN, 4, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in3l_lpga_p_enum, AIC32X4_LMICPGAPIN, 2, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in1r_lpga_p_enum, AIC32X4_LMICPGAPIN, 0, resistor_text);
+
+static SOC_ENUM_SINGLE_DECL(cml_lpga_n_enum, AIC32X4_LMICPGANIN, 6, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in2r_lpga_n_enum, AIC32X4_LMICPGANIN, 4, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in3r_lpga_n_enum, AIC32X4_LMICPGANIN, 2, resistor_text);
+
+static const struct snd_kcontrol_new in1l_to_lmixer_controls[] = {
+	SOC_DAPM_ENUM("IN1_L L+ Switch", in1l_lpga_p_enum),
+};
+static const struct snd_kcontrol_new in2l_to_lmixer_controls[] = {
+	SOC_DAPM_ENUM("IN2_L L+ Switch", in2l_lpga_p_enum),
+};
+static const struct snd_kcontrol_new in3l_to_lmixer_controls[] = {
+	SOC_DAPM_ENUM("IN3_L L+ Switch", in3l_lpga_p_enum),
+};
+static const struct snd_kcontrol_new in1r_to_lmixer_controls[] = {
+	SOC_DAPM_ENUM("IN1_R L+ Switch", in1r_lpga_p_enum),
+};
+static const struct snd_kcontrol_new cml_to_lmixer_controls[] = {
+	SOC_DAPM_ENUM("CM_L L- Switch", cml_lpga_n_enum),
+};
+static const struct snd_kcontrol_new in2r_to_lmixer_controls[] = {
+	SOC_DAPM_ENUM("IN2_R L- Switch", in2r_lpga_n_enum),
+};
+static const struct snd_kcontrol_new in3r_to_lmixer_controls[] = {
+	SOC_DAPM_ENUM("IN3_R L- Switch", in3r_lpga_n_enum),
+};
+
+/*  Right mixer pins */
+static SOC_ENUM_SINGLE_DECL(in1r_rpga_p_enum, AIC32X4_RMICPGAPIN, 6, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in2r_rpga_p_enum, AIC32X4_RMICPGAPIN, 4, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in3r_rpga_p_enum, AIC32X4_RMICPGAPIN, 2, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in2l_rpga_p_enum, AIC32X4_RMICPGAPIN, 0, resistor_text);
+static SOC_ENUM_SINGLE_DECL(cmr_rpga_n_enum, AIC32X4_RMICPGANIN, 6, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in1l_rpga_n_enum, AIC32X4_RMICPGANIN, 4, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in3l_rpga_n_enum, AIC32X4_RMICPGANIN, 2, resistor_text);
+
+static const struct snd_kcontrol_new in1r_to_rmixer_controls[] = {
+	SOC_DAPM_ENUM("IN1_R R+ Switch", in1r_rpga_p_enum),
+};
+static const struct snd_kcontrol_new in2r_to_rmixer_controls[] = {
+	SOC_DAPM_ENUM("IN2_R R+ Switch", in2r_rpga_p_enum),
+};
+static const struct snd_kcontrol_new in3r_to_rmixer_controls[] = {
+	SOC_DAPM_ENUM("IN3_R R+ Switch", in3r_rpga_p_enum),
+};
+static const struct snd_kcontrol_new in2l_to_rmixer_controls[] = {
+	SOC_DAPM_ENUM("IN2_L R+ Switch", in2l_rpga_p_enum),
+};
+static const struct snd_kcontrol_new cmr_to_rmixer_controls[] = {
+	SOC_DAPM_ENUM("CM_R R- Switch", cmr_rpga_n_enum),
+};
+static const struct snd_kcontrol_new in1l_to_rmixer_controls[] = {
+	SOC_DAPM_ENUM("IN1_L R- Switch", in1l_rpga_n_enum),
+};
+static const struct snd_kcontrol_new in3l_to_rmixer_controls[] = {
+	SOC_DAPM_ENUM("IN3_L R- Switch", in3l_rpga_n_enum),
 };
 
 static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = {
@@ -214,14 +271,39 @@
 			   &lor_output_mixer_controls[0],
 			   ARRAY_SIZE(lor_output_mixer_controls)),
 	SND_SOC_DAPM_PGA("LOR Power", AIC32X4_OUTPWRCTL, 2, 0, NULL, 0),
-	SND_SOC_DAPM_MIXER("Left Input Mixer", SND_SOC_NOPM, 0, 0,
-			   &left_input_mixer_controls[0],
-			   ARRAY_SIZE(left_input_mixer_controls)),
-	SND_SOC_DAPM_MIXER("Right Input Mixer", SND_SOC_NOPM, 0, 0,
-			   &right_input_mixer_controls[0],
-			   ARRAY_SIZE(right_input_mixer_controls)),
-	SND_SOC_DAPM_ADC("Left ADC", "Left Capture", AIC32X4_ADCSETUP, 7, 0),
+
 	SND_SOC_DAPM_ADC("Right ADC", "Right Capture", AIC32X4_ADCSETUP, 6, 0),
+	SND_SOC_DAPM_MUX("IN1_R to Right Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+			in1r_to_rmixer_controls),
+	SND_SOC_DAPM_MUX("IN2_R to Right Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+			in2r_to_rmixer_controls),
+	SND_SOC_DAPM_MUX("IN3_R to Right Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+			in3r_to_rmixer_controls),
+	SND_SOC_DAPM_MUX("IN2_L to Right Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+			in2l_to_rmixer_controls),
+	SND_SOC_DAPM_MUX("CM_R to Right Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
+			cmr_to_rmixer_controls),
+	SND_SOC_DAPM_MUX("IN1_L to Right Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
+			in1l_to_rmixer_controls),
+	SND_SOC_DAPM_MUX("IN3_L to Right Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
+			in3l_to_rmixer_controls),
+
+	SND_SOC_DAPM_ADC("Left ADC", "Left Capture", AIC32X4_ADCSETUP, 7, 0),
+	SND_SOC_DAPM_MUX("IN1_L to Left Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+			in1l_to_lmixer_controls),
+	SND_SOC_DAPM_MUX("IN2_L to Left Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+			in2l_to_lmixer_controls),
+	SND_SOC_DAPM_MUX("IN3_L to Left Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+			in3l_to_lmixer_controls),
+	SND_SOC_DAPM_MUX("IN1_R to Left Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+			in1r_to_lmixer_controls),
+	SND_SOC_DAPM_MUX("CM_L to Left Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
+			cml_to_lmixer_controls),
+	SND_SOC_DAPM_MUX("IN2_R to Left Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
+			in2r_to_lmixer_controls),
+	SND_SOC_DAPM_MUX("IN3_R to Left Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
+			in3r_to_lmixer_controls),
+
 	SND_SOC_DAPM_MICBIAS("Mic Bias", AIC32X4_MICBIAS, 6, 0),
 
 	SND_SOC_DAPM_OUTPUT("HPL"),
@@ -261,19 +343,77 @@
 	{"LOR Power", NULL, "LOR Output Mixer"},
 	{"LOR", NULL, "LOR Power"},
 
-	/* Left input */
-	{"Left Input Mixer", "IN1_L P Switch", "IN1_L"},
-	{"Left Input Mixer", "IN2_L P Switch", "IN2_L"},
-	{"Left Input Mixer", "IN3_L P Switch", "IN3_L"},
-
-	{"Left ADC", NULL, "Left Input Mixer"},
-
 	/* Right Input */
-	{"Right Input Mixer", "IN1_R P Switch", "IN1_R"},
-	{"Right Input Mixer", "IN2_R P Switch", "IN2_R"},
-	{"Right Input Mixer", "IN3_R P Switch", "IN3_R"},
+	{"Right ADC", NULL, "IN1_R to Right Mixer Positive Resistor"},
+	{"IN1_R to Right Mixer Positive Resistor", "10 kOhm", "IN1_R"},
+	{"IN1_R to Right Mixer Positive Resistor", "20 kOhm", "IN1_R"},
+	{"IN1_R to Right Mixer Positive Resistor", "40 kOhm", "IN1_R"},
 
-	{"Right ADC", NULL, "Right Input Mixer"},
+	{"Right ADC", NULL, "IN2_R to Right Mixer Positive Resistor"},
+	{"IN2_R to Right Mixer Positive Resistor", "10 kOhm", "IN2_R"},
+	{"IN2_R to Right Mixer Positive Resistor", "20 kOhm", "IN2_R"},
+	{"IN2_R to Right Mixer Positive Resistor", "40 kOhm", "IN2_R"},
+
+	{"Right ADC", NULL, "IN3_R to Right Mixer Positive Resistor"},
+	{"IN3_R to Right Mixer Positive Resistor", "10 kOhm", "IN3_R"},
+	{"IN3_R to Right Mixer Positive Resistor", "20 kOhm", "IN3_R"},
+	{"IN3_R to Right Mixer Positive Resistor", "40 kOhm", "IN3_R"},
+
+	{"Right ADC", NULL, "IN2_L to Right Mixer Positive Resistor"},
+	{"IN2_L to Right Mixer Positive Resistor", "10 kOhm", "IN2_L"},
+	{"IN2_L to Right Mixer Positive Resistor", "20 kOhm", "IN2_L"},
+	{"IN2_L to Right Mixer Positive Resistor", "40 kOhm", "IN2_L"},
+
+	{"Right ADC", NULL, "CM_R to Right Mixer Negative Resistor"},
+	{"CM_R to Right Mixer Negative Resistor", "10 kOhm", "CM_R"},
+	{"CM_R to Right Mixer Negative Resistor", "20 kOhm", "CM_R"},
+	{"CM_R to Right Mixer Negative Resistor", "40 kOhm", "CM_R"},
+
+	{"Right ADC", NULL, "IN1_L to Right Mixer Negative Resistor"},
+	{"IN1_L to Right Mixer Negative Resistor", "10 kOhm", "IN1_L"},
+	{"IN1_L to Right Mixer Negative Resistor", "20 kOhm", "IN1_L"},
+	{"IN1_L to Right Mixer Negative Resistor", "40 kOhm", "IN1_L"},
+
+	{"Right ADC", NULL, "IN3_L to Right Mixer Negative Resistor"},
+	{"IN3_L to Right Mixer Negative Resistor", "10 kOhm", "IN3_L"},
+	{"IN3_L to Right Mixer Negative Resistor", "20 kOhm", "IN3_L"},
+	{"IN3_L to Right Mixer Negative Resistor", "40 kOhm", "IN3_L"},
+
+	/* Left Input */
+	{"Left ADC", NULL, "IN1_L to Left Mixer Positive Resistor"},
+	{"IN1_L to Left Mixer Positive Resistor", "10 kOhm", "IN1_L"},
+	{"IN1_L to Left Mixer Positive Resistor", "20 kOhm", "IN1_L"},
+	{"IN1_L to Left Mixer Positive Resistor", "40 kOhm", "IN1_L"},
+
+	{"Left ADC", NULL, "IN2_L to Left Mixer Positive Resistor"},
+	{"IN2_L to Left Mixer Positive Resistor", "10 kOhm", "IN2_L"},
+	{"IN2_L to Left Mixer Positive Resistor", "20 kOhm", "IN2_L"},
+	{"IN2_L to Left Mixer Positive Resistor", "40 kOhm", "IN2_L"},
+
+	{"Left ADC", NULL, "IN3_L to Left Mixer Positive Resistor"},
+	{"IN3_L to Left Mixer Positive Resistor", "10 kOhm", "IN3_L"},
+	{"IN3_L to Left Mixer Positive Resistor", "20 kOhm", "IN3_L"},
+	{"IN3_L to Left Mixer Positive Resistor", "40 kOhm", "IN3_L"},
+
+	{"Left ADC", NULL, "IN1_R to Left Mixer Positive Resistor"},
+	{"IN1_R to Left Mixer Positive Resistor", "10 kOhm", "IN1_R"},
+	{"IN1_R to Left Mixer Positive Resistor", "20 kOhm", "IN1_R"},
+	{"IN1_R to Left Mixer Positive Resistor", "40 kOhm", "IN1_R"},
+
+	{"Left ADC", NULL, "CM_L to Left Mixer Negative Resistor"},
+	{"CM_L to Left Mixer Negative Resistor", "10 kOhm", "CM_L"},
+	{"CM_L to Left Mixer Negative Resistor", "20 kOhm", "CM_L"},
+	{"CM_L to Left Mixer Negative Resistor", "40 kOhm", "CM_L"},
+
+	{"Left ADC", NULL, "IN2_R to Left Mixer Negative Resistor"},
+	{"IN2_R to Left Mixer Negative Resistor", "10 kOhm", "IN2_R"},
+	{"IN2_R to Left Mixer Negative Resistor", "20 kOhm", "IN2_R"},
+	{"IN2_R to Left Mixer Negative Resistor", "40 kOhm", "IN2_R"},
+
+	{"Left ADC", NULL, "IN3_R to Left Mixer Negative Resistor"},
+	{"IN3_R to Left Mixer Negative Resistor", "10 kOhm", "IN3_R"},
+	{"IN3_R to Left Mixer Negative Resistor", "20 kOhm", "IN3_R"},
+	{"IN3_R to Left Mixer Negative Resistor", "40 kOhm", "IN3_R"},
 };
 
 static const struct regmap_range_cfg aic32x4_regmap_pages[] = {
@@ -287,14 +427,12 @@
 	},
 };
 
-static const struct regmap_config aic32x4_regmap = {
-	.reg_bits = 8,
-	.val_bits = 8,
-
+const struct regmap_config aic32x4_regmap_config = {
 	.max_register = AIC32X4_RMICPGAVOL,
 	.ranges = aic32x4_regmap_pages,
 	.num_ranges = ARRAY_SIZE(aic32x4_regmap_pages),
 };
+EXPORT_SYMBOL(aic32x4_regmap_config);
 
 static inline int aic32x4_get_divs(int mclk, int rate)
 {
@@ -567,7 +705,7 @@
 	return 0;
 }
 
-#define AIC32X4_RATES	SNDRV_PCM_RATE_8000_48000
+#define AIC32X4_RATES	SNDRV_PCM_RATE_8000_96000
 #define AIC32X4_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
 			 | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE)
 
@@ -596,7 +734,7 @@
 	.symmetric_rates = 1,
 };
 
-static int aic32x4_probe(struct snd_soc_codec *codec)
+static int aic32x4_codec_probe(struct snd_soc_codec *codec)
 {
 	struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
 	u32 tmp_reg;
@@ -655,7 +793,7 @@
 }
 
 static struct snd_soc_codec_driver soc_codec_dev_aic32x4 = {
-	.probe = aic32x4_probe,
+	.probe = aic32x4_codec_probe,
 	.set_bias_level = aic32x4_set_bias_level,
 	.suspend_bias_off = true,
 
@@ -777,24 +915,22 @@
 	return ret;
 }
 
-static int aic32x4_i2c_probe(struct i2c_client *i2c,
-			     const struct i2c_device_id *id)
+int aic32x4_probe(struct device *dev, struct regmap *regmap)
 {
-	struct aic32x4_pdata *pdata = i2c->dev.platform_data;
 	struct aic32x4_priv *aic32x4;
-	struct device_node *np = i2c->dev.of_node;
+	struct aic32x4_pdata *pdata = dev->platform_data;
+	struct device_node *np = dev->of_node;
 	int ret;
 
-	aic32x4 = devm_kzalloc(&i2c->dev, sizeof(struct aic32x4_priv),
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	aic32x4 = devm_kzalloc(dev, sizeof(struct aic32x4_priv),
 			       GFP_KERNEL);
 	if (aic32x4 == NULL)
 		return -ENOMEM;
 
-	aic32x4->regmap = devm_regmap_init_i2c(i2c, &aic32x4_regmap);
-	if (IS_ERR(aic32x4->regmap))
-		return PTR_ERR(aic32x4->regmap);
-
-	i2c_set_clientdata(i2c, aic32x4);
+	dev_set_drvdata(dev, aic32x4);
 
 	if (pdata) {
 		aic32x4->power_cfg = pdata->power_cfg;
@@ -804,7 +940,7 @@
 	} else if (np) {
 		ret = aic32x4_parse_dt(aic32x4, np);
 		if (ret) {
-			dev_err(&i2c->dev, "Failed to parse DT node\n");
+			dev_err(dev, "Failed to parse DT node\n");
 			return ret;
 		}
 	} else {
@@ -814,71 +950,48 @@
 		aic32x4->rstn_gpio = -1;
 	}
 
-	aic32x4->mclk = devm_clk_get(&i2c->dev, "mclk");
+	aic32x4->mclk = devm_clk_get(dev, "mclk");
 	if (IS_ERR(aic32x4->mclk)) {
-		dev_err(&i2c->dev, "Failed getting the mclk. The current implementation does not support the usage of this codec without mclk\n");
+		dev_err(dev, "Failed getting the mclk. The current implementation does not support the usage of this codec without mclk\n");
 		return PTR_ERR(aic32x4->mclk);
 	}
 
 	if (gpio_is_valid(aic32x4->rstn_gpio)) {
-		ret = devm_gpio_request_one(&i2c->dev, aic32x4->rstn_gpio,
+		ret = devm_gpio_request_one(dev, aic32x4->rstn_gpio,
 				GPIOF_OUT_INIT_LOW, "tlv320aic32x4 rstn");
 		if (ret != 0)
 			return ret;
 	}
 
-	ret = aic32x4_setup_regulators(&i2c->dev, aic32x4);
+	ret = aic32x4_setup_regulators(dev, aic32x4);
 	if (ret) {
-		dev_err(&i2c->dev, "Failed to setup regulators\n");
+		dev_err(dev, "Failed to setup regulators\n");
 		return ret;
 	}
 
-	ret = snd_soc_register_codec(&i2c->dev,
+	ret = snd_soc_register_codec(dev,
 			&soc_codec_dev_aic32x4, &aic32x4_dai, 1);
 	if (ret) {
-		dev_err(&i2c->dev, "Failed to register codec\n");
+		dev_err(dev, "Failed to register codec\n");
 		aic32x4_disable_regulators(aic32x4);
 		return ret;
 	}
 
-	i2c_set_clientdata(i2c, aic32x4);
-
 	return 0;
 }
+EXPORT_SYMBOL(aic32x4_probe);
 
-static int aic32x4_i2c_remove(struct i2c_client *client)
+int aic32x4_remove(struct device *dev)
 {
-	struct aic32x4_priv *aic32x4 = i2c_get_clientdata(client);
+	struct aic32x4_priv *aic32x4 = dev_get_drvdata(dev);
 
 	aic32x4_disable_regulators(aic32x4);
 
-	snd_soc_unregister_codec(&client->dev);
+	snd_soc_unregister_codec(dev);
+
 	return 0;
 }
-
-static const struct i2c_device_id aic32x4_i2c_id[] = {
-	{ "tlv320aic32x4", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(i2c, aic32x4_i2c_id);
-
-static const struct of_device_id aic32x4_of_id[] = {
-	{ .compatible = "ti,tlv320aic32x4", },
-	{ /* senitel */ }
-};
-MODULE_DEVICE_TABLE(of, aic32x4_of_id);
-
-static struct i2c_driver aic32x4_i2c_driver = {
-	.driver = {
-		.name = "tlv320aic32x4",
-		.of_match_table = aic32x4_of_id,
-	},
-	.probe =    aic32x4_i2c_probe,
-	.remove =   aic32x4_i2c_remove,
-	.id_table = aic32x4_i2c_id,
-};
-
-module_i2c_driver(aic32x4_i2c_driver);
+EXPORT_SYMBOL(aic32x4_remove);
 
 MODULE_DESCRIPTION("ASoC tlv320aic32x4 codec driver");
 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
diff --git a/sound/soc/codecs/tlv320aic32x4.h b/sound/soc/codecs/tlv320aic32x4.h
index 995f033..a197dd5 100644
--- a/sound/soc/codecs/tlv320aic32x4.h
+++ b/sound/soc/codecs/tlv320aic32x4.h
@@ -10,6 +10,13 @@
 #ifndef _TLV320AIC32X4_H
 #define _TLV320AIC32X4_H
 
+struct device;
+struct regmap_config;
+
+extern const struct regmap_config aic32x4_regmap_config;
+int aic32x4_probe(struct device *dev, struct regmap *regmap);
+int aic32x4_remove(struct device *dev);
+
 /* tlv320aic32x4 register space (in decimal to match datasheet) */
 
 #define AIC32X4_PAGE1		128
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index bc3de2e..1f70810 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -824,7 +824,7 @@
 {
 	struct twl6040 *twl6040 = codec->control_data;
 	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
-	int ret;
+	int ret = 0;
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
@@ -832,12 +832,16 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (priv->codec_powered)
+		if (priv->codec_powered) {
+			/* Select low power PLL in standby */
+			ret = twl6040_set_pll(twl6040, TWL6040_SYSCLK_SEL_LPPLL,
+					      32768, 19200000);
 			break;
+		}
 
 		ret = twl6040_power(twl6040, 1);
 		if (ret)
-			return ret;
+			break;
 
 		priv->codec_powered = 1;
 
@@ -853,7 +857,7 @@
 		break;
 	}
 
-	return 0;
+	return ret;
 }
 
 static int twl6040_startup(struct snd_pcm_substream *substream,
@@ -983,9 +987,9 @@
 		if (mute) {
 			/* Power down drivers and DACs */
 			hflctl &= ~(TWL6040_HFDACENA | TWL6040_HFPGAENA |
-				    TWL6040_HFDRVENA);
+				    TWL6040_HFDRVENA | TWL6040_HFSWENA);
 			hfrctl &= ~(TWL6040_HFDACENA | TWL6040_HFPGAENA |
-				    TWL6040_HFDRVENA);
+				    TWL6040_HFDRVENA | TWL6040_HFSWENA);
 		}
 
 		twl6040_reg_write(twl6040, TWL6040_REG_HFLCTL, hflctl);
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index fc164d6..f3109da 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3793,9 +3793,8 @@
 	ret = regulator_bulk_enable(ARRAY_SIZE(wm8962->supplies),
 				    wm8962->supplies);
 	if (ret != 0) {
-		dev_err(dev,
-			"Failed to enable supplies: %d\n", ret);
-		return ret;
+		dev_err(dev, "Failed to enable supplies: %d\n", ret);
+		goto disable_clock;
 	}
 
 	regcache_cache_only(wm8962->regmap, false);
@@ -3833,6 +3832,10 @@
 	msleep(5);
 
 	return 0;
+
+disable_clock:
+	clk_disable_unprepare(wm8962->pdata.mclk);
+	return ret;
 }
 
 static int wm8962_runtime_suspend(struct device *dev)
diff --git a/sound/soc/codecs/wm8962.h b/sound/soc/codecs/wm8962.h
index 910aafd..e63a318 100644
--- a/sound/soc/codecs/wm8962.h
+++ b/sound/soc/codecs/wm8962.h
@@ -16,9 +16,9 @@
 #include <asm/types.h>
 #include <sound/soc.h>
 
-#define WM8962_SYSCLK_MCLK 1
-#define WM8962_SYSCLK_FLL  2
-#define WM8962_SYSCLK_PLL3 3
+#define WM8962_SYSCLK_MCLK 0
+#define WM8962_SYSCLK_FLL  1
+#define WM8962_SYSCLK_PLL3 2
 
 #define WM8962_FLL  1
 
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 2389ab4..466492b 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -643,6 +643,7 @@
 static struct platform_driver asoc_simple_card = {
 	.driver = {
 		.name = "asoc-simple-card",
+		.pm = &snd_soc_pm_ops,
 		.of_match_table = asoc_simple_of_match,
 	},
 	.probe = asoc_simple_card_probe,
diff --git a/sound/soc/kirkwood/Kconfig b/sound/soc/kirkwood/Kconfig
index 132bb83..bc3c7b5 100644
--- a/sound/soc/kirkwood/Kconfig
+++ b/sound/soc/kirkwood/Kconfig
@@ -1,6 +1,7 @@
 config SND_KIRKWOOD_SOC
 	tristate "SoC Audio for the Marvell Kirkwood and Dove chips"
 	depends on ARCH_DOVE || ARCH_MVEBU || COMPILE_TEST
+	depends on HAS_DMA
 	help
 	  Say Y or M if you want to add support for codecs attached to
 	  the Kirkwood I2S interface. You will also need to select the
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index f7e789e..3abf51c 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -43,6 +43,7 @@
 	depends on SND_SOC_MEDIATEK && I2C
 	select SND_SOC_RT5645
 	select SND_SOC_RT5677
+	select SND_SOC_HDMI_CODEC
 	help
 	  This adds ASoC driver for Mediatek MT8173 boards
 	  with the RT5650 and RT5676 codecs.
diff --git a/sound/soc/mediatek/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
index 5c4c58c..bb59392 100644
--- a/sound/soc/mediatek/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
@@ -134,7 +134,9 @@
 enum {
 	DAI_LINK_PLAYBACK,
 	DAI_LINK_CAPTURE,
+	DAI_LINK_HDMI,
 	DAI_LINK_CODEC_I2S,
+	DAI_LINK_HDMI_I2S,
 	DAI_LINK_INTERCODEC
 };
 
@@ -161,6 +163,16 @@
 		.dynamic = 1,
 		.dpcm_capture = 1,
 	},
+	[DAI_LINK_HDMI] = {
+		.name = "HDMI",
+		.stream_name = "HDMI PCM",
+		.cpu_dai_name = "HDMI",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+		.dynamic = 1,
+		.dpcm_playback = 1,
+	},
 
 	/* Back End DAI links */
 	[DAI_LINK_CODEC_I2S] = {
@@ -177,6 +189,13 @@
 		.dpcm_playback = 1,
 		.dpcm_capture = 1,
 	},
+	[DAI_LINK_HDMI_I2S] = {
+		.name = "HDMI BE",
+		.cpu_dai_name = "HDMIO",
+		.no_pcm = 1,
+		.codec_dai_name = "i2s-hifi",
+		.dpcm_playback = 1,
+	},
 	/* rt5676 <-> rt5650 intercodec link: Sets rt5676 I2S2 as master */
 	[DAI_LINK_INTERCODEC] = {
 		.name = "rt5650_rt5676 intercodec",
@@ -251,6 +270,14 @@
 	mt8173_rt5650_rt5676_dais[DAI_LINK_INTERCODEC].codec_of_node =
 		mt8173_rt5650_rt5676_codecs[1].of_node;
 
+	mt8173_rt5650_rt5676_dais[DAI_LINK_HDMI_I2S].codec_of_node =
+		of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 2);
+	if (!mt8173_rt5650_rt5676_dais[DAI_LINK_HDMI_I2S].codec_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'audio-codec' missing or invalid\n");
+		return -EINVAL;
+	}
+
 	card->dev = &pdev->dev;
 	platform_set_drvdata(pdev, card);
 
diff --git a/sound/soc/mediatek/mt8173-rt5650.c b/sound/soc/mediatek/mt8173-rt5650.c
index bb09bb1..a27a667 100644
--- a/sound/soc/mediatek/mt8173-rt5650.c
+++ b/sound/soc/mediatek/mt8173-rt5650.c
@@ -85,12 +85,29 @@
 {
 	struct snd_soc_card *card = runtime->card;
 	struct snd_soc_codec *codec = runtime->codec_dais[0]->codec;
+	const char *codec_capture_dai = runtime->codec_dais[1]->name;
 	int ret;
 
 	rt5645_sel_asrc_clk_src(codec,
-				RT5645_DA_STEREO_FILTER |
-				RT5645_AD_STEREO_FILTER,
+				RT5645_DA_STEREO_FILTER,
 				RT5645_CLK_SEL_I2S1_ASRC);
+
+	if (!strcmp(codec_capture_dai, "rt5645-aif1")) {
+		rt5645_sel_asrc_clk_src(codec,
+					RT5645_AD_STEREO_FILTER,
+					RT5645_CLK_SEL_I2S1_ASRC);
+	} else if (!strcmp(codec_capture_dai, "rt5645-aif2")) {
+		rt5645_sel_asrc_clk_src(codec,
+					RT5645_AD_STEREO_FILTER,
+					RT5645_CLK_SEL_I2S2_ASRC);
+	} else {
+		dev_warn(card->dev,
+			 "Only one dai codec found in DTS, enabled rt5645 AD filter\n");
+		rt5645_sel_asrc_clk_src(codec,
+					RT5645_AD_STEREO_FILTER,
+					RT5645_CLK_SEL_I2S1_ASRC);
+	}
+
 	/* enable jack detection */
 	ret = snd_soc_card_jack_new(card, "Headset Jack",
 				    SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
@@ -110,6 +127,11 @@
 
 static struct snd_soc_dai_link_component mt8173_rt5650_codecs[] = {
 	{
+		/* Playback */
+		.dai_name = "rt5645-aif1",
+	},
+	{
+		/* Capture */
 		.dai_name = "rt5645-aif1",
 	},
 };
@@ -149,7 +171,7 @@
 		.cpu_dai_name = "I2S",
 		.no_pcm = 1,
 		.codecs = mt8173_rt5650_codecs,
-		.num_codecs = 1,
+		.num_codecs = 2,
 		.init = mt8173_rt5650_init,
 		.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
 			   SND_SOC_DAIFMT_CBS_CFS,
@@ -177,6 +199,8 @@
 {
 	struct snd_soc_card *card = &mt8173_rt5650_card;
 	struct device_node *platform_node;
+	struct device_node *np;
+	const char *codec_capture_dai;
 	int i, ret;
 
 	platform_node = of_parse_phandle(pdev->dev.of_node,
@@ -199,6 +223,26 @@
 			"Property 'audio-codec' missing or invalid\n");
 		return -EINVAL;
 	}
+	mt8173_rt5650_codecs[1].of_node = mt8173_rt5650_codecs[0].of_node;
+
+	if (of_find_node_by_name(platform_node, "codec-capture")) {
+		np = of_get_child_by_name(pdev->dev.of_node, "codec-capture");
+		if (!np) {
+			dev_err(&pdev->dev,
+				"%s: Can't find codec-capture DT node\n",
+				__func__);
+			return -EINVAL;
+		}
+		ret = snd_soc_of_get_dai_name(np, &codec_capture_dai);
+		if (ret < 0) {
+			dev_err(&pdev->dev,
+				"%s codec_capture_dai name fail %d\n",
+				__func__, ret);
+			return ret;
+		}
+		mt8173_rt5650_codecs[1].dai_name = codec_capture_dai;
+	}
+
 	card->dev = &pdev->dev;
 	platform_set_drvdata(pdev, card);
 
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c
index f1c58a2..2b5df2e 100644
--- a/sound/soc/mediatek/mtk-afe-pcm.c
+++ b/sound/soc/mediatek/mtk-afe-pcm.c
@@ -123,6 +123,7 @@
 #define AFE_TDM_CON1_WLEN_32BIT		(0x2 << 8)
 #define AFE_TDM_CON1_MSB_ALIGNED	(0x1 << 4)
 #define AFE_TDM_CON1_1_BCK_DELAY	(0x1 << 3)
+#define AFE_TDM_CON1_LRCK_INV		(0x1 << 2)
 #define AFE_TDM_CON1_BCK_INV		(0x1 << 1)
 #define AFE_TDM_CON1_EN			(0x1 << 0)
 
@@ -449,6 +450,7 @@
 			      runtime->rate * runtime->channels * 32);
 
 	val = AFE_TDM_CON1_BCK_INV |
+	      AFE_TDM_CON1_LRCK_INV |
 	      AFE_TDM_CON1_1_BCK_DELAY |
 	      AFE_TDM_CON1_MSB_ALIGNED | /* I2S mode */
 	      AFE_TDM_CON1_WLEN_32BIT |
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index c7563e2..4a16e77 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -260,6 +260,10 @@
 	if (mcbsp->pdata->enable_st_clock)
 		mcbsp->pdata->enable_st_clock(mcbsp->id, 1);
 
+	/* Disable Sidetone clock auto-gating for normal operation */
+	w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
+	MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w & ~(ST_AUTOIDLE));
+
 	/* Enable McBSP Sidetone */
 	w = MCBSP_READ(mcbsp, SSELCR);
 	MCBSP_WRITE(mcbsp, SSELCR, w | SIDETONEEN);
@@ -279,6 +283,10 @@
 	w = MCBSP_READ(mcbsp, SSELCR);
 	MCBSP_WRITE(mcbsp, SSELCR, w & ~(SIDETONEEN));
 
+	/* Enable Sidetone clock auto-gating to reduce power consumption */
+	w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
+	MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w | ST_AUTOIDLE);
+
 	if (mcbsp->pdata->enable_st_clock)
 		mcbsp->pdata->enable_st_clock(mcbsp->id, 0);
 }
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index 99381a2..a84f677 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -82,6 +82,8 @@
 	struct dma_chan *chan;
 	int err = 0;
 
+	memset(&config, 0x00, sizeof(config));
+
 	dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
 
 	/* return if this is a bufferless transfer e.g.
diff --git a/sound/soc/pxa/brownstone.c b/sound/soc/pxa/brownstone.c
index ec522e9..b6cb995 100644
--- a/sound/soc/pxa/brownstone.c
+++ b/sound/soc/pxa/brownstone.c
@@ -133,3 +133,4 @@
 MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
 MODULE_DESCRIPTION("ALSA SoC Brownstone");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:brownstone-audio");
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 5c8f9db..d1661fa 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -207,3 +207,4 @@
 MODULE_AUTHOR("Robert Jarzmik (rjarzmik@free.fr)");
 MODULE_DESCRIPTION("ALSA SoC WM9713 MIO A701");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mioa701-wm9713");
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 51e790d..96df9b2 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -248,3 +248,4 @@
 MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
 MODULE_DESCRIPTION("MMP Soc Audio DMA module");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mmp-pcm-audio");
diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c
index eca60c2..ca8b23f 100644
--- a/sound/soc/pxa/mmp-sspa.c
+++ b/sound/soc/pxa/mmp-sspa.c
@@ -482,3 +482,4 @@
 MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
 MODULE_DESCRIPTION("MMP SSPA SoC Interface");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mmp-sspa-dai");
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 4e74d95..bcc81e9 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -161,3 +161,4 @@
 MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
 MODULE_DESCRIPTION("ALSA SoC Palm T|X, T5 and LifeDrive");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:palm27x-asoc");
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index da03fad..3cad990 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -833,3 +833,4 @@
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
 MODULE_DESCRIPTION("PXA SSP/PCM SoC Interface");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa-ssp-dai");
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index f3de615..9615e6d 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -287,3 +287,4 @@
 MODULE_AUTHOR("Nicolas Pitre");
 MODULE_DESCRIPTION("AC97 driver for the Intel PXA2xx chip");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa2xx-ac97");
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 9f39039..410d48b 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -117,3 +117,4 @@
 MODULE_AUTHOR("Nicolas Pitre");
 MODULE_DESCRIPTION("Intel PXA2xx PCM DMA module");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa-pcm-audio");
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index 6e86654..db000c6 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -474,7 +474,7 @@
 	struct lpass_data *drvdata =
 		snd_soc_platform_get_drvdata(soc_runtime->platform);
 	struct lpass_variant *v = drvdata->variant;
-	int ret;
+	int ret = -EINVAL;
 	struct lpass_pcm_data *data;
 	size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
 
@@ -491,7 +491,7 @@
 			data->rdma_ch = v->alloc_dma_channel(drvdata,
 						SNDRV_PCM_STREAM_PLAYBACK);
 
-		if (IS_ERR_VALUE(data->rdma_ch))
+		if (data->rdma_ch < 0)
 			return data->rdma_ch;
 
 		drvdata->substream[data->rdma_ch] = psubstream;
@@ -518,8 +518,10 @@
 			data->wrdma_ch = v->alloc_dma_channel(drvdata,
 						SNDRV_PCM_STREAM_CAPTURE);
 
-		if (IS_ERR_VALUE(data->wrdma_ch))
+		if (data->wrdma_ch < 0) {
+			ret = data->wrdma_ch;
 			goto capture_alloc_err;
+		}
 
 		drvdata->substream[data->wrdma_ch] = csubstream;
 
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 606399d..49354d1 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -492,9 +492,7 @@
 	 */
 	if (!count) {
 		clk = clk_register_fixed_rate(dev, clkout_name[CLKOUT],
-					      parent_clk_name,
-					      (parent_clk_name) ?
-					      0 : CLK_IS_ROOT, req_rate);
+					      parent_clk_name, 0, req_rate);
 		if (!IS_ERR(clk)) {
 			adg->clkout[CLKOUT] = clk;
 			of_clk_add_provider(np, of_clk_src_simple_get, clk);
@@ -506,9 +504,7 @@
 	else {
 		for (i = 0; i < CLKOUTMAX; i++) {
 			clk = clk_register_fixed_rate(dev, clkout_name[i],
-						      parent_clk_name,
-						      (parent_clk_name) ?
-						      0 : CLK_IS_ROOT,
+						      parent_clk_name, 0,
 						      req_rate);
 			if (!IS_ERR(clk)) {
 				adg->onecell.clks	= adg->clkout;
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 7658e8f..6bc93cb 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -316,11 +316,15 @@
 		size = ARRAY_SIZE(gen2_id_table_cmd);
 	}
 
-	if (!entry)
-		return 0xFF;
+	if ((!entry) || (size <= id)) {
+		struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
 
-	if (size <= id)
-		return 0xFF;
+		dev_err(dev, "unknown connection (%s[%d])\n",
+			rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+		/* use non-prohibited SRS number as error */
+		return 0x00; /* SSI00 */
+	}
 
 	return entry[id];
 }
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index fc89a67..a8f61d7 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -276,8 +276,9 @@
 /*
  * status
  *
- * 0xH0000CB0
+ * 0xH0000CBA
  *
+ * A	0: probe	1: remove
  * B	0: init		1: quit
  * C	0: start	1: stop
  *
@@ -287,19 +288,19 @@
  * H	0: fallback
  * H	0: hw_params
  */
+#define __rsnd_mod_shift_probe		0
+#define __rsnd_mod_shift_remove		0
 #define __rsnd_mod_shift_init		4
 #define __rsnd_mod_shift_quit		4
 #define __rsnd_mod_shift_start		8
 #define __rsnd_mod_shift_stop		8
-#define __rsnd_mod_shift_probe		28 /* always called */
-#define __rsnd_mod_shift_remove		28 /* always called */
 #define __rsnd_mod_shift_irq		28 /* always called */
 #define __rsnd_mod_shift_pcm_new	28 /* always called */
 #define __rsnd_mod_shift_fallback	28 /* always called */
 #define __rsnd_mod_shift_hw_params	28 /* always called */
 
-#define __rsnd_mod_add_probe		0
-#define __rsnd_mod_add_remove		0
+#define __rsnd_mod_add_probe		 1
+#define __rsnd_mod_add_remove		-1
 #define __rsnd_mod_add_init		 1
 #define __rsnd_mod_add_quit		-1
 #define __rsnd_mod_add_start		 1
@@ -310,7 +311,7 @@
 #define __rsnd_mod_add_hw_params	0
 
 #define __rsnd_mod_call_probe		0
-#define __rsnd_mod_call_remove		0
+#define __rsnd_mod_call_remove		1
 #define __rsnd_mod_call_init		0
 #define __rsnd_mod_call_quit		1
 #define __rsnd_mod_call_start		0
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 15d6ffe..e39f916 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -572,6 +572,9 @@
 
 	i = 0;
 	for_each_child_of_node(node, np) {
+		if (!of_device_is_available(np))
+			goto skip;
+
 		src = rsnd_src_get(priv, i);
 
 		snprintf(name, RSND_SRC_NAME_SIZE, "%s.%d",
@@ -595,6 +598,7 @@
 		if (ret)
 			goto rsnd_src_probe_done;
 
+skip:
 		i++;
 	}
 
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 1cf94d7..ee7f15a 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1023,6 +1023,11 @@
 
 		control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos;
 
+		if (control_hdr->size != sizeof(*control_hdr)) {
+			dev_err(tplg->dev, "ASoC: invalid control size\n");
+			return -EINVAL;
+		}
+
 		switch (control_hdr->ops.info) {
 		case SND_SOC_TPLG_CTL_VOLSW:
 		case SND_SOC_TPLG_CTL_STROBE:
@@ -1476,6 +1481,8 @@
 	widget->dobj.type = SND_SOC_DOBJ_WIDGET;
 	widget->dobj.ops = tplg->ops;
 	widget->dobj.index = tplg->index;
+	kfree(template.sname);
+	kfree(template.name);
 	list_add(&widget->dobj.list, &tplg->comp->dobj_list);
 	return 0;
 
@@ -1499,10 +1506,17 @@
 
 	for (i = 0; i < count; i++) {
 		widget = (struct snd_soc_tplg_dapm_widget *) tplg->pos;
+		if (widget->size != sizeof(*widget)) {
+			dev_err(tplg->dev, "ASoC: invalid widget size\n");
+			return -EINVAL;
+		}
+
 		ret = soc_tplg_dapm_widget_create(tplg, widget);
-		if (ret < 0)
+		if (ret < 0) {
 			dev_err(tplg->dev, "ASoC: failed to load widget %s\n",
 				widget->name);
+			return ret;
+		}
 	}
 
 	return 0;
@@ -1586,6 +1600,7 @@
 	return snd_soc_register_dai(tplg->comp, dai_drv);
 }
 
+/* create the FE DAI link */
 static int soc_tplg_link_create(struct soc_tplg *tplg,
 	struct snd_soc_tplg_pcm *pcm)
 {
@@ -1598,6 +1613,16 @@
 
 	link->name = pcm->pcm_name;
 	link->stream_name = pcm->pcm_name;
+	link->id = pcm->pcm_id;
+
+	link->cpu_dai_name = pcm->dai_name;
+	link->codec_name = "snd-soc-dummy";
+	link->codec_dai_name = "snd-soc-dummy-dai";
+
+	/* enable DPCM */
+	link->dynamic = 1;
+	link->dpcm_playback = pcm->playback;
+	link->dpcm_capture = pcm->capture;
 
 	/* pass control to component driver for optional further init */
 	ret = soc_tplg_dai_link_load(tplg, link);
@@ -1639,8 +1664,6 @@
 	if (tplg->pass != SOC_TPLG_PASS_PCM_DAI)
 		return 0;
 
-	pcm = (struct snd_soc_tplg_pcm *)tplg->pos;
-
 	if (soc_tplg_check_elem_count(tplg,
 		sizeof(struct snd_soc_tplg_pcm), count,
 		hdr->payload_size, "PCM DAI")) {
@@ -1650,7 +1673,13 @@
 	}
 
 	/* create the FE DAIs and DAI links */
+	pcm = (struct snd_soc_tplg_pcm *)tplg->pos;
 	for (i = 0; i < count; i++) {
+		if (pcm->size != sizeof(*pcm)) {
+			dev_err(tplg->dev, "ASoC: invalid pcm size\n");
+			return -EINVAL;
+		}
+
 		soc_tplg_pcm_create(tplg, pcm);
 		pcm++;
 	}
@@ -1670,6 +1699,11 @@
 		return 0;
 
 	manifest = (struct snd_soc_tplg_manifest *)tplg->pos;
+	if (manifest->size != sizeof(*manifest)) {
+		dev_err(tplg->dev, "ASoC: invalid manifest size\n");
+		return -EINVAL;
+	}
+
 	tplg->pos += sizeof(struct snd_soc_tplg_manifest);
 
 	if (tplg->comp && tplg->ops && tplg->ops->manifest)
@@ -1686,6 +1720,14 @@
 	if (soc_tplg_get_hdr_offset(tplg) >= tplg->fw->size)
 		return 0;
 
+	if (hdr->size != sizeof(*hdr)) {
+		dev_err(tplg->dev,
+			"ASoC: invalid header size for type %d at offset 0x%lx size 0x%zx.\n",
+			hdr->type, soc_tplg_get_hdr_offset(tplg),
+			tplg->fw->size);
+		return -EINVAL;
+	}
+
 	/* big endian firmware objects not supported atm */
 	if (hdr->magic == cpu_to_be32(SND_SOC_TPLG_MAGIC)) {
 		dev_err(tplg->dev,
diff --git a/sound/soc/sti/sti_uniperif.c b/sound/soc/sti/sti_uniperif.c
index 39bcefe..488ef4e 100644
--- a/sound/soc/sti/sti_uniperif.c
+++ b/sound/soc/sti/sti_uniperif.c
@@ -11,6 +11,142 @@
 #include "uniperif.h"
 
 /*
+ * User frame size shall be 2, 4, 6 or 8 32-bits words length
+ * (i.e. 8, 16, 24 or 32 bytes)
+ * This constraint comes from allowed values for
+ * UNIPERIF_I2S_FMT_NUM_CH register
+ */
+#define UNIPERIF_MAX_FRAME_SZ 0x20
+#define UNIPERIF_ALLOWED_FRAME_SZ (0x08 | 0x10 | 0x18 | UNIPERIF_MAX_FRAME_SZ)
+
+int sti_uniperiph_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+			       unsigned int rx_mask, int slots,
+			       int slot_width)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *uni = priv->dai_data.uni;
+	int i, frame_size, avail_slots;
+
+	if (!UNIPERIF_TYPE_IS_TDM(uni)) {
+		dev_err(uni->dev, "cpu dai not in tdm mode\n");
+		return -EINVAL;
+	}
+
+	/* store info in unip context */
+	uni->tdm_slot.slots = slots;
+	uni->tdm_slot.slot_width = slot_width;
+	/* unip is unidirectionnal */
+	uni->tdm_slot.mask = (tx_mask != 0) ? tx_mask : rx_mask;
+
+	/* number of available timeslots */
+	for (i = 0, avail_slots = 0; i < uni->tdm_slot.slots; i++) {
+		if ((uni->tdm_slot.mask >> i) & 0x01)
+			avail_slots++;
+	}
+	uni->tdm_slot.avail_slots = avail_slots;
+
+	/* frame size in bytes */
+	frame_size = uni->tdm_slot.avail_slots * uni->tdm_slot.slot_width / 8;
+
+	/* check frame size is allowed */
+	if ((frame_size > UNIPERIF_MAX_FRAME_SZ) ||
+	    (frame_size & ~(int)UNIPERIF_ALLOWED_FRAME_SZ)) {
+		dev_err(uni->dev, "frame size not allowed: %d bytes\n",
+			frame_size);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int sti_uniperiph_fix_tdm_chan(struct snd_pcm_hw_params *params,
+			       struct snd_pcm_hw_rule *rule)
+{
+	struct uniperif *uni = rule->private;
+	struct snd_interval t;
+
+	t.min = uni->tdm_slot.avail_slots;
+	t.max = uni->tdm_slot.avail_slots;
+	t.openmin = 0;
+	t.openmax = 0;
+	t.integer = 0;
+
+	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
+}
+
+int sti_uniperiph_fix_tdm_format(struct snd_pcm_hw_params *params,
+				 struct snd_pcm_hw_rule *rule)
+{
+	struct uniperif *uni = rule->private;
+	struct snd_mask *maskp = hw_param_mask(params, rule->var);
+	u64 format;
+
+	switch (uni->tdm_slot.slot_width) {
+	case 16:
+		format = SNDRV_PCM_FMTBIT_S16_LE;
+		break;
+	case 32:
+		format = SNDRV_PCM_FMTBIT_S32_LE;
+		break;
+	default:
+		dev_err(uni->dev, "format not supported: %d bits\n",
+			uni->tdm_slot.slot_width);
+		return -EINVAL;
+	}
+
+	maskp->bits[0] &= (u_int32_t)format;
+	maskp->bits[1] &= (u_int32_t)(format >> 32);
+	/* clear remaining indexes */
+	memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX - 64) / 8);
+
+	if (!maskp->bits[0] && !maskp->bits[1])
+		return -EINVAL;
+
+	return 0;
+}
+
+int sti_uniperiph_get_tdm_word_pos(struct uniperif *uni,
+				   unsigned int *word_pos)
+{
+	int slot_width = uni->tdm_slot.slot_width / 8;
+	int slots_num = uni->tdm_slot.slots;
+	unsigned int slots_mask = uni->tdm_slot.mask;
+	int i, j, k;
+	unsigned int word16_pos[4];
+
+	/* word16_pos:
+	 * word16_pos[0] = WORDX_LSB
+	 * word16_pos[1] = WORDX_MSB,
+	 * word16_pos[2] = WORDX+1_LSB
+	 * word16_pos[3] = WORDX+1_MSB
+	 */
+
+	/* set unip word position */
+	for (i = 0, j = 0, k = 0; (i < slots_num) && (k < WORD_MAX); i++) {
+		if ((slots_mask >> i) & 0x01) {
+			word16_pos[j] = i * slot_width;
+
+			if (slot_width == 4) {
+				word16_pos[j + 1] = word16_pos[j] + 2;
+				j++;
+			}
+			j++;
+
+			if (j > 3) {
+				word_pos[k] = word16_pos[1] |
+					      (word16_pos[0] << 8) |
+					      (word16_pos[3] << 16) |
+					      (word16_pos[2] << 24);
+				j = 0;
+				k++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
  * sti_uniperiph_dai_create_ctrl
  * This function is used to create Ctrl associated to DAI but also pcm device.
  * Request is done by front end to associate ctrl with pcm device id
@@ -45,10 +181,16 @@
 				struct snd_pcm_hw_params *params,
 				struct snd_soc_dai *dai)
 {
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *uni = priv->dai_data.uni;
 	struct snd_dmaengine_dai_dma_data *dma_data;
 	int transfer_size;
 
-	transfer_size = params_channels(params) * UNIPERIF_FIFO_FRAMES;
+	if (uni->info->type == SND_ST_UNIPERIF_TYPE_TDM)
+		/* transfer size = user frame size (in 32-bits FIFO cell) */
+		transfer_size = snd_soc_params_to_frame_size(params) / 32;
+	else
+		transfer_size = params_channels(params) * UNIPERIF_FIFO_FRAMES;
 
 	dma_data = snd_soc_dai_get_dma_data(dai, substream);
 	dma_data->maxburst = transfer_size;
diff --git a/sound/soc/sti/uniperif.h b/sound/soc/sti/uniperif.h
index f0fd5a9..eb9933c 100644
--- a/sound/soc/sti/uniperif.h
+++ b/sound/soc/sti/uniperif.h
@@ -25,7 +25,7 @@
 	writel_relaxed((((value) & mask) << shift), ip->base + offset)
 
 /*
- * AUD_UNIPERIF_SOFT_RST reg
+ * UNIPERIF_SOFT_RST reg
  */
 
 #define UNIPERIF_SOFT_RST_OFFSET(ip) 0x0000
@@ -50,7 +50,7 @@
 		UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip))
 
 /*
- * AUD_UNIPERIF_FIFO_DATA reg
+ * UNIPERIF_FIFO_DATA reg
  */
 
 #define UNIPERIF_FIFO_DATA_OFFSET(ip) 0x0004
@@ -58,7 +58,7 @@
 	writel_relaxed(value, ip->base + UNIPERIF_FIFO_DATA_OFFSET(ip))
 
 /*
- * AUD_UNIPERIF_CHANNEL_STA_REGN reg
+ * UNIPERIF_CHANNEL_STA_REGN reg
  */
 
 #define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
@@ -105,7 +105,7 @@
 	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip))
 
 /*
- *  AUD_UNIPERIF_ITS reg
+ *  UNIPERIF_ITS reg
  */
 
 #define UNIPERIF_ITS_OFFSET(ip) 0x000C
@@ -143,7 +143,7 @@
 		0 : (BIT(UNIPERIF_ITS_UNDERFLOW_REC_FAILED_SHIFT(ip))))
 
 /*
- *  AUD_UNIPERIF_ITS_BCLR reg
+ *  UNIPERIF_ITS_BCLR reg
  */
 
 /* FIFO_ERROR */
@@ -160,7 +160,7 @@
 	writel_relaxed(value, ip->base + UNIPERIF_ITS_BCLR_OFFSET(ip))
 
 /*
- *  AUD_UNIPERIF_ITM reg
+ *  UNIPERIF_ITM reg
  */
 
 #define UNIPERIF_ITM_OFFSET(ip) 0x0018
@@ -188,7 +188,7 @@
 		0 : (BIT(UNIPERIF_ITM_UNDERFLOW_REC_FAILED_SHIFT(ip))))
 
 /*
- *  AUD_UNIPERIF_ITM_BCLR reg
+ *  UNIPERIF_ITM_BCLR reg
  */
 
 #define UNIPERIF_ITM_BCLR_OFFSET(ip) 0x001c
@@ -213,7 +213,7 @@
 		UNIPERIF_ITM_BCLR_DMA_ERROR_MASK(ip))
 
 /*
- *  AUD_UNIPERIF_ITM_BSET reg
+ *  UNIPERIF_ITM_BSET reg
  */
 
 #define UNIPERIF_ITM_BSET_OFFSET(ip) 0x0020
@@ -767,7 +767,7 @@
 	SET_UNIPERIF_REG(ip, \
 		UNIPERIF_CTRL_OFFSET(ip), \
 		UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
-		CORAUD_UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 1)
+		UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 1)
 
 /* UNDERFLOW_REC_WINDOW */
 #define UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip) 20
@@ -1046,7 +1046,7 @@
 		UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip), value)
 
 /*
- * AUD_UNIPERIF_CHANNEL_STA_REGN reg
+ * UNIPERIF_CHANNEL_STA_REGN reg
  */
 
 #define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
@@ -1057,7 +1057,7 @@
 			UNIPERIF_CHANNEL_STA_REGN(ip, n))
 
 /*
- * AUD_UNIPERIF_USER_VALIDITY reg
+ * UNIPERIF_USER_VALIDITY reg
  */
 
 #define UNIPERIF_USER_VALIDITY_OFFSET(ip) 0x0090
@@ -1101,12 +1101,136 @@
 		UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip), value)
 
 /*
+ * UNIPERIF_TDM_ENABLE
+ */
+#define UNIPERIF_TDM_ENABLE_OFFSET(ip) 0x0118
+#define GET_UNIPERIF_TDM_ENABLE(ip) \
+	readl_relaxed(ip->base + UNIPERIF_TDM_ENABLE_OFFSET(ip))
+#define SET_UNIPERIF_TDM_ENABLE(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_TDM_ENABLE_OFFSET(ip))
+
+/* TDM_ENABLE */
+#define UNIPERIF_TDM_ENABLE_EN_TDM_SHIFT(ip) 0x0
+#define UNIPERIF_TDM_ENABLE_EN_TDM_MASK(ip) 0x1
+#define GET_UNIPERIF_TDM_ENABLE_EN_TDM(ip) \
+		GET_UNIPERIF_REG(ip, \
+		UNIPERIF_TDM_ENABLE_OFFSET(ip), \
+		UNIPERIF_TDM_ENABLE_EN_TDM_SHIFT(ip), \
+		UNIPERIF_TDM_ENABLE_EN_TDM_MASK(ip))
+#define SET_UNIPERIF_TDM_ENABLE_TDM_ENABLE(ip) \
+		SET_UNIPERIF_REG(ip, \
+		UNIPERIF_TDM_ENABLE_OFFSET(ip), \
+		UNIPERIF_TDM_ENABLE_EN_TDM_SHIFT(ip), \
+		UNIPERIF_TDM_ENABLE_EN_TDM_MASK(ip), 1)
+#define SET_UNIPERIF_TDM_ENABLE_TDM_DISABLE(ip) \
+		SET_UNIPERIF_REG(ip, \
+		UNIPERIF_TDM_ENABLE_OFFSET(ip), \
+		UNIPERIF_TDM_ENABLE_EN_TDM_SHIFT(ip), \
+		UNIPERIF_TDM_ENABLE_EN_TDM_MASK(ip), 0)
+
+/*
+ * UNIPERIF_TDM_FS_REF_FREQ
+ */
+#define UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip) 0x011c
+#define GET_UNIPERIF_TDM_FS_REF_FREQ(ip) \
+	readl_relaxed(ip->base + UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip))
+#define SET_UNIPERIF_TDM_FS_REF_FREQ(ip, value) \
+	writel_relaxed(value, ip->base + \
+			UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip))
+
+/* REF_FREQ */
+#define UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip) 0x0
+#define VALUE_UNIPERIF_TDM_FS_REF_FREQ_8KHZ(ip) 0
+#define VALUE_UNIPERIF_TDM_FS_REF_FREQ_16KHZ(ip) 1
+#define VALUE_UNIPERIF_TDM_FS_REF_FREQ_32KHZ(ip) 2
+#define VALUE_UNIPERIF_TDM_FS_REF_FREQ_48KHZ(ip) 3
+#define UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip) 0x3
+#define GET_UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ(ip) \
+		GET_UNIPERIF_REG(ip, \
+		UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip), \
+		UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip), \
+		UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip))
+#define SET_UNIPERIF_TDM_FS_REF_FREQ_8KHZ(ip) \
+		SET_UNIPERIF_REG(ip, \
+		UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip), \
+		UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip), \
+		UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip), \
+		VALUE_UNIPERIF_TDM_FS_REF_FREQ_8KHZ(ip))
+#define SET_UNIPERIF_TDM_FS_REF_FREQ_16KHZ(ip) \
+		SET_UNIPERIF_REG(ip, \
+		UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip), \
+		UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip), \
+		UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip), \
+		VALUE_UNIPERIF_TDM_FS_REF_FREQ_16KHZ(ip))
+#define SET_UNIPERIF_TDM_FS_REF_FREQ_32KHZ(ip) \
+		SET_UNIPERIF_REG(ip, \
+		UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip), \
+		UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip), \
+		UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip), \
+		VALUE_UNIPERIF_TDM_FS_REF_FREQ_32KHZ(ip))
+#define SET_UNIPERIF_TDM_FS_REF_FREQ_48KHZ(ip) \
+		SET_UNIPERIF_REG(ip, \
+		UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip), \
+		UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip), \
+		UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip), \
+		VALUE_UNIPERIF_TDM_FS_REF_FREQ_48KHZ(ip))
+
+/*
+ * UNIPERIF_TDM_FS_REF_DIV
+ */
+#define UNIPERIF_TDM_FS_REF_DIV_OFFSET(ip) 0x0120
+#define GET_UNIPERIF_TDM_FS_REF_DIV(ip) \
+	readl_relaxed(ip->base + UNIPERIF_TDM_FS_REF_DIV_OFFSET(ip))
+#define SET_UNIPERIF_TDM_FS_REF_DIV(ip, value) \
+		writel_relaxed(value, ip->base + \
+			UNIPERIF_TDM_FS_REF_DIV_OFFSET(ip))
+
+/* NUM_TIMESLOT */
+#define UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_SHIFT(ip) 0x0
+#define UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_MASK(ip) 0xff
+#define GET_UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT(ip) \
+		GET_UNIPERIF_REG(ip, \
+		UNIPERIF_TDM_FS_REF_DIV_OFFSET(ip), \
+		UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_SHIFT(ip), \
+		UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_MASK(ip))
+#define SET_UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT(ip, value) \
+		SET_UNIPERIF_REG(ip, \
+		UNIPERIF_TDM_FS_REF_DIV_OFFSET(ip), \
+		UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_SHIFT(ip), \
+		UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_MASK(ip), value)
+
+/*
+ * UNIPERIF_TDM_WORD_POS_X_Y
+ * 32 bits of UNIPERIF_TDM_WORD_POS_X_Y register shall be set in 1 shot
+ */
+#define UNIPERIF_TDM_WORD_POS_1_2_OFFSET(ip) 0x013c
+#define UNIPERIF_TDM_WORD_POS_3_4_OFFSET(ip) 0x0140
+#define UNIPERIF_TDM_WORD_POS_5_6_OFFSET(ip) 0x0144
+#define UNIPERIF_TDM_WORD_POS_7_8_OFFSET(ip) 0x0148
+#define GET_UNIPERIF_TDM_WORD_POS(ip, words) \
+	readl_relaxed(ip->base + UNIPERIF_TDM_WORD_POS_##words##_OFFSET(ip))
+#define SET_UNIPERIF_TDM_WORD_POS(ip, words, value) \
+		writel_relaxed(value, ip->base + \
+		UNIPERIF_TDM_WORD_POS_##words##_OFFSET(ip))
+/*
  * uniperipheral IP capabilities
  */
 
 #define UNIPERIF_FIFO_SIZE		70 /* FIFO is 70 cells deep */
 #define UNIPERIF_FIFO_FRAMES		4  /* FDMA trigger limit in frames */
 
+#define UNIPERIF_TYPE_IS_HDMI(p) \
+	((p)->info->type == SND_ST_UNIPERIF_TYPE_HDMI)
+#define UNIPERIF_TYPE_IS_PCM(p) \
+	((p)->info->type == SND_ST_UNIPERIF_TYPE_PCM)
+#define UNIPERIF_TYPE_IS_SPDIF(p) \
+	((p)->info->type == SND_ST_UNIPERIF_TYPE_SPDIF)
+#define UNIPERIF_TYPE_IS_IEC958(p) \
+	(UNIPERIF_TYPE_IS_HDMI(p) || \
+		UNIPERIF_TYPE_IS_SPDIF(p))
+#define UNIPERIF_TYPE_IS_TDM(p) \
+	((p)->info->type == SND_ST_UNIPERIF_TYPE_TDM)
+
 /*
  * Uniperipheral IP revisions
  */
@@ -1125,10 +1249,11 @@
 };
 
 enum uniperif_type {
-	SND_ST_UNIPERIF_PLAYER_TYPE_NONE,
-	SND_ST_UNIPERIF_PLAYER_TYPE_HDMI,
-	SND_ST_UNIPERIF_PLAYER_TYPE_PCM,
-	SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF
+	SND_ST_UNIPERIF_TYPE_NONE,
+	SND_ST_UNIPERIF_TYPE_HDMI,
+	SND_ST_UNIPERIF_TYPE_PCM,
+	SND_ST_UNIPERIF_TYPE_SPDIF,
+	SND_ST_UNIPERIF_TYPE_TDM
 };
 
 enum uniperif_state {
@@ -1145,9 +1270,17 @@
 	UNIPERIF_IEC958_ENCODING_MODE_ENCODED
 };
 
+enum uniperif_word_pos {
+	WORD_1_2,
+	WORD_3_4,
+	WORD_5_6,
+	WORD_7_8,
+	WORD_MAX
+};
+
 struct uniperif_info {
 	int id; /* instance value of the uniperipheral IP */
-	enum uniperif_type player_type;
+	enum uniperif_type type;
 	int underflow_enabled;		/* Underflow recovery mode */
 };
 
@@ -1156,12 +1289,20 @@
 	struct snd_aes_iec958 iec958;
 };
 
+struct dai_tdm_slot {
+	unsigned int mask;
+	int slots;
+	int slot_width;
+	unsigned int avail_slots;
+};
+
 struct uniperif {
 	/* System information */
 	struct uniperif_info *info;
 	struct device *dev;
 	int ver; /* IP version, used by register access macros */
 	struct regmap_field *clk_sel;
+	struct regmap_field *valid_sel;
 
 	/* capabilities */
 	const struct snd_pcm_hardware *hw;
@@ -1192,6 +1333,7 @@
 
 	/* dai properties */
 	unsigned int daifmt;
+	struct dai_tdm_slot tdm_slot;
 
 	/* DAI callbacks */
 	const struct snd_soc_dai_ops *dai_ops;
@@ -1209,6 +1351,28 @@
 	struct sti_uniperiph_dai dai_data;
 };
 
+static const struct snd_pcm_hardware uni_tdm_hw = {
+	.info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+		SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP |
+		SNDRV_PCM_INFO_MMAP_VALID,
+
+	.formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE,
+
+	.rates = SNDRV_PCM_RATE_CONTINUOUS,
+	.rate_min = 8000,
+	.rate_max = 48000,
+
+	.channels_min = 1,
+	.channels_max = 32,
+
+	.periods_min = 2,
+	.periods_max = 10,
+
+	.period_bytes_min = 128,
+	.period_bytes_max = 64 * PAGE_SIZE,
+	.buffer_bytes_max = 256 * PAGE_SIZE
+};
+
 /* uniperiph player*/
 int uni_player_init(struct platform_device *pdev,
 		    struct uniperif *uni_player);
@@ -1226,4 +1390,28 @@
 				struct snd_pcm_hw_params *params,
 				struct snd_soc_dai *dai);
 
+static inline int sti_uniperiph_get_user_frame_size(
+	struct snd_pcm_runtime *runtime)
+{
+	return (runtime->channels * snd_pcm_format_width(runtime->format) / 8);
+}
+
+static inline int sti_uniperiph_get_unip_tdm_frame_size(struct uniperif *uni)
+{
+	return (uni->tdm_slot.slots * uni->tdm_slot.slot_width / 8);
+}
+
+int sti_uniperiph_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+			       unsigned int rx_mask, int slots,
+			       int slot_width);
+
+int sti_uniperiph_get_tdm_word_pos(struct uniperif *uni,
+				   unsigned int *word_pos);
+
+int sti_uniperiph_fix_tdm_chan(struct snd_pcm_hw_params *params,
+			       struct snd_pcm_hw_rule *rule);
+
+int sti_uniperiph_fix_tdm_format(struct snd_pcm_hw_params *params,
+				 struct snd_pcm_hw_rule *rule);
+
 #endif
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index 7aca6b9..ee1c7c2 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -21,23 +21,14 @@
 
 /* sys config registers definitions */
 #define SYS_CFG_AUDIO_GLUE 0xA4
-#define SYS_CFG_AUDI0_GLUE_PCM_CLKX 8
 
 /*
  * Driver specific types.
  */
-#define UNIPERIF_PLAYER_TYPE_IS_HDMI(p) \
-	((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_HDMI)
-#define UNIPERIF_PLAYER_TYPE_IS_PCM(p) \
-	((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_PCM)
-#define UNIPERIF_PLAYER_TYPE_IS_SPDIF(p) \
-	((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF)
-#define UNIPERIF_PLAYER_TYPE_IS_IEC958(p) \
-	(UNIPERIF_PLAYER_TYPE_IS_HDMI(p) || \
-		UNIPERIF_PLAYER_TYPE_IS_SPDIF(p))
 
 #define UNIPERIF_PLAYER_CLK_ADJ_MIN  -999999
 #define UNIPERIF_PLAYER_CLK_ADJ_MAX  1000000
+#define UNIPERIF_PLAYER_I2S_OUT 1 /* player id connected to I2S/TDM TX bus */
 
 /*
  * Note: snd_pcm_hardware is linked to DMA controller but is declared here to
@@ -444,18 +435,11 @@
 
 	/* Force slot width to 32 in I2S mode (HW constraint) */
 	if ((player->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
-		SND_SOC_DAIFMT_I2S) {
+		SND_SOC_DAIFMT_I2S)
 		slot_width = 32;
-	} else {
-		switch (runtime->format) {
-		case SNDRV_PCM_FORMAT_S16_LE:
-			slot_width = 16;
-			break;
-		default:
-			slot_width = 32;
-			break;
-		}
-	}
+	else
+		slot_width = snd_pcm_format_width(runtime->format);
+
 	output_frame_size = slot_width * runtime->channels;
 
 	clk_div = player->mclk / runtime->rate;
@@ -530,7 +514,6 @@
 	SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(player);
 
 	SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
-	SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
 
 	/* No iec958 formatting as outputting to DAC  */
 	SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(player);
@@ -538,6 +521,55 @@
 	return 0;
 }
 
+static int uni_player_prepare_tdm(struct uniperif *player,
+				  struct snd_pcm_runtime *runtime)
+{
+	int tdm_frame_size; /* unip tdm frame size in bytes */
+	int user_frame_size; /* user tdm frame size in bytes */
+	/* default unip TDM_WORD_POS_X_Y */
+	unsigned int word_pos[4] = {
+		0x04060002, 0x0C0E080A, 0x14161012, 0x1C1E181A};
+	int freq, ret;
+
+	tdm_frame_size =
+		sti_uniperiph_get_unip_tdm_frame_size(player);
+	user_frame_size =
+		sti_uniperiph_get_user_frame_size(runtime);
+
+	/* fix 16/0 format */
+	SET_UNIPERIF_CONFIG_MEM_FMT_16_0(player);
+	SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(player);
+
+	/* number of words inserted on the TDM line */
+	SET_UNIPERIF_I2S_FMT_NUM_CH(player, user_frame_size / 4 / 2);
+
+	SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
+	SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(player);
+
+	/* Enable the tdm functionality */
+	SET_UNIPERIF_TDM_ENABLE_TDM_ENABLE(player);
+
+	/* number of 8 bits timeslots avail in unip tdm frame */
+	SET_UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT(player, tdm_frame_size);
+
+	/* set the timeslot allocation for words in FIFO */
+	sti_uniperiph_get_tdm_word_pos(player, word_pos);
+	SET_UNIPERIF_TDM_WORD_POS(player, 1_2, word_pos[WORD_1_2]);
+	SET_UNIPERIF_TDM_WORD_POS(player, 3_4, word_pos[WORD_3_4]);
+	SET_UNIPERIF_TDM_WORD_POS(player, 5_6, word_pos[WORD_5_6]);
+	SET_UNIPERIF_TDM_WORD_POS(player, 7_8, word_pos[WORD_7_8]);
+
+	/* set unip clk rate (not done vai set_sysclk ops) */
+	freq = runtime->rate * tdm_frame_size * 8;
+	mutex_lock(&player->ctrl_lock);
+	ret = uni_player_clk_set_rate(player, freq);
+	if (!ret)
+		player->mclk = freq;
+	mutex_unlock(&player->ctrl_lock);
+
+	return 0;
+}
+
 /*
  * ALSA uniperipheral iec958 controls
  */
@@ -668,11 +700,29 @@
 {
 	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
 	struct uniperif *player = priv->dai_data.uni;
+	int ret;
+
 	player->substream = substream;
 
 	player->clk_adj = 0;
 
-	return 0;
+	if (!UNIPERIF_TYPE_IS_TDM(player))
+		return 0;
+
+	/* refine hw constraint in tdm mode */
+	ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+				  SNDRV_PCM_HW_PARAM_CHANNELS,
+				  sti_uniperiph_fix_tdm_chan,
+				  player, SNDRV_PCM_HW_PARAM_CHANNELS,
+				  -1);
+	if (ret < 0)
+		return ret;
+
+	return snd_pcm_hw_rule_add(substream->runtime, 0,
+				   SNDRV_PCM_HW_PARAM_FORMAT,
+				   sti_uniperiph_fix_tdm_format,
+				   player, SNDRV_PCM_HW_PARAM_FORMAT,
+				   -1);
 }
 
 static int uni_player_set_sysclk(struct snd_soc_dai *dai, int clk_id,
@@ -682,7 +732,7 @@
 	struct uniperif *player = priv->dai_data.uni;
 	int ret;
 
-	if (dir == SND_SOC_CLOCK_IN)
+	if (UNIPERIF_TYPE_IS_TDM(player) || (dir == SND_SOC_CLOCK_IN))
 		return 0;
 
 	if (clk_id != 0)
@@ -714,7 +764,13 @@
 	}
 
 	/* Calculate transfer size (in fifo cells and bytes) for frame count */
-	transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
+	if (player->info->type == SND_ST_UNIPERIF_TYPE_TDM) {
+		/* transfer size = user frame size (in 32 bits FIFO cell) */
+		transfer_size =
+			sti_uniperiph_get_user_frame_size(runtime) / 4;
+	} else {
+		transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
+	}
 
 	/* Calculate number of empty cells available before asserting DREQ */
 	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
@@ -738,16 +794,19 @@
 	SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(player, trigger_limit);
 
 	/* Uniperipheral setup depends on player type */
-	switch (player->info->player_type) {
-	case SND_ST_UNIPERIF_PLAYER_TYPE_HDMI:
+	switch (player->info->type) {
+	case SND_ST_UNIPERIF_TYPE_HDMI:
 		ret = uni_player_prepare_iec958(player, runtime);
 		break;
-	case SND_ST_UNIPERIF_PLAYER_TYPE_PCM:
+	case SND_ST_UNIPERIF_TYPE_PCM:
 		ret = uni_player_prepare_pcm(player, runtime);
 		break;
-	case SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF:
+	case SND_ST_UNIPERIF_TYPE_SPDIF:
 		ret = uni_player_prepare_iec958(player, runtime);
 		break;
+	case SND_ST_UNIPERIF_TYPE_TDM:
+		ret = uni_player_prepare_tdm(player, runtime);
+		break;
 	default:
 		dev_err(player->dev, "invalid player type");
 		return -EINVAL;
@@ -852,8 +911,8 @@
 	 * will not take affect and hang the player.
 	 */
 	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
-		if (UNIPERIF_PLAYER_TYPE_IS_IEC958(player))
-				SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player);
+		if (UNIPERIF_TYPE_IS_IEC958(player))
+			SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player);
 
 	/* Force channel status update (no update if clk disable) */
 	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
@@ -954,27 +1013,30 @@
 	player->substream = NULL;
 }
 
-static int uni_player_parse_dt_clk_glue(struct platform_device *pdev,
-					struct uniperif *player)
+static int uni_player_parse_dt_audio_glue(struct platform_device *pdev,
+					  struct uniperif *player)
 {
-	int bit_offset;
 	struct device_node *node = pdev->dev.of_node;
 	struct regmap *regmap;
-
-	bit_offset = SYS_CFG_AUDI0_GLUE_PCM_CLKX + player->info->id;
+	struct reg_field regfield[2] = {
+		/* PCM_CLK_SEL */
+		REG_FIELD(SYS_CFG_AUDIO_GLUE,
+			  8 + player->info->id,
+			  8 + player->info->id),
+		/* PCMP_VALID_SEL */
+		REG_FIELD(SYS_CFG_AUDIO_GLUE, 0, 1)
+	};
 
 	regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg");
 
-	if (regmap) {
-		struct reg_field regfield =
-			REG_FIELD(SYS_CFG_AUDIO_GLUE, bit_offset, bit_offset);
-
-		player->clk_sel = regmap_field_alloc(regmap, regfield);
-	} else {
+	if (!regmap) {
 		dev_err(&pdev->dev, "sti-audio-clk-glue syscf not found\n");
 		return -EINVAL;
 	}
 
+	player->clk_sel = regmap_field_alloc(regmap, regfield[0]);
+	player->valid_sel = regmap_field_alloc(regmap, regfield[1]);
+
 	return 0;
 }
 
@@ -1012,19 +1074,21 @@
 	}
 
 	if (strcasecmp(mode, "hdmi") == 0)
-		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_HDMI;
+		info->type = SND_ST_UNIPERIF_TYPE_HDMI;
 	else if (strcasecmp(mode, "pcm") == 0)
-		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_PCM;
+		info->type = SND_ST_UNIPERIF_TYPE_PCM;
 	else if (strcasecmp(mode, "spdif") == 0)
-		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF;
+		info->type = SND_ST_UNIPERIF_TYPE_SPDIF;
+	else if (strcasecmp(mode, "tdm") == 0)
+		info->type = SND_ST_UNIPERIF_TYPE_TDM;
 	else
-		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_NONE;
+		info->type = SND_ST_UNIPERIF_TYPE_NONE;
 
 	/* Save the info structure */
 	player->info = info;
 
-	/* Get the PCM_CLK_SEL bit from audio-glue-ctrl SoC register */
-	if (uni_player_parse_dt_clk_glue(pdev, player))
+	/* Get PCM_CLK_SEL & PCMP_VALID_SEL from audio-glue-ctrl SoC reg */
+	if (uni_player_parse_dt_audio_glue(pdev, player))
 		return -EINVAL;
 
 	return 0;
@@ -1037,7 +1101,8 @@
 		.trigger = uni_player_trigger,
 		.hw_params = sti_uniperiph_dai_hw_params,
 		.set_fmt = sti_uniperiph_dai_set_fmt,
-		.set_sysclk = uni_player_set_sysclk
+		.set_sysclk = uni_player_set_sysclk,
+		.set_tdm_slot = sti_uniperiph_set_tdm_slot
 };
 
 int uni_player_init(struct platform_device *pdev,
@@ -1047,7 +1112,6 @@
 
 	player->dev = &pdev->dev;
 	player->state = UNIPERIF_STATE_STOPPED;
-	player->hw = &uni_player_pcm_hw;
 	player->dai_ops = &uni_player_dai_ops;
 
 	ret = uni_player_parse_dt(pdev, player);
@@ -1057,6 +1121,11 @@
 		return ret;
 	}
 
+	if (UNIPERIF_TYPE_IS_TDM(player))
+		player->hw = &uni_tdm_hw;
+	else
+		player->hw = &uni_player_pcm_hw;
+
 	/* Get uniperif resource */
 	player->clk = of_clk_get(pdev->dev.of_node, 0);
 	if (IS_ERR(player->clk))
@@ -1073,6 +1142,17 @@
 		}
 	}
 
+	/* connect to I2S/TDM TX bus */
+	if (player->valid_sel &&
+	    (player->info->id == UNIPERIF_PLAYER_I2S_OUT)) {
+		ret = regmap_field_write(player->valid_sel, player->info->id);
+		if (ret) {
+			dev_err(player->dev,
+				"%s: unable to connect to tdm bus", __func__);
+			return ret;
+		}
+	}
+
 	ret = devm_request_irq(&pdev->dev, player->irq,
 			       uni_player_irq_handler, IRQF_SHARED,
 			       dev_name(&pdev->dev), player);
@@ -1087,7 +1167,7 @@
 	SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
 	SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(player);
 
-	if (UNIPERIF_PLAYER_TYPE_IS_IEC958(player)) {
+	if (UNIPERIF_TYPE_IS_IEC958(player)) {
 		/* Set default iec958 status bits  */
 
 		/* Consumer, PCM, copyright, 2ch, mode 0 */
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index 8a0eb20..eb74a32 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -73,55 +73,10 @@
 	return ret;
 }
 
-static int uni_reader_prepare(struct snd_pcm_substream *substream,
-			      struct snd_soc_dai *dai)
+static int uni_reader_prepare_pcm(struct snd_pcm_runtime *runtime,
+				  struct uniperif *reader)
 {
-	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
-	struct uniperif *reader = priv->dai_data.uni;
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	int transfer_size, trigger_limit;
 	int slot_width;
-	int count = 10;
-
-	/* The reader should be stopped */
-	if (reader->state != UNIPERIF_STATE_STOPPED) {
-		dev_err(reader->dev, "%s: invalid reader state %d", __func__,
-			reader->state);
-		return -EINVAL;
-	}
-
-	/* Calculate transfer size (in fifo cells and bytes) for frame count */
-	transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
-
-	/* Calculate number of empty cells available before asserting DREQ */
-	if (reader->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
-		trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
-	else
-		/*
-		 * Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
-		 * FDMA_TRIGGER_LIMIT also controls when the state switches
-		 * from OFF or STANDBY to AUDIO DATA.
-		 */
-		trigger_limit = transfer_size;
-
-	/* Trigger limit must be an even number */
-	if ((!trigger_limit % 2) ||
-	    (trigger_limit != 1 && transfer_size % 2) ||
-	    (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(reader))) {
-		dev_err(reader->dev, "invalid trigger limit %d", trigger_limit);
-		return -EINVAL;
-	}
-
-	SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(reader, trigger_limit);
-
-	switch (reader->daifmt & SND_SOC_DAIFMT_INV_MASK) {
-	case SND_SOC_DAIFMT_IB_IF:
-	case SND_SOC_DAIFMT_NB_IF:
-		SET_UNIPERIF_I2S_FMT_LR_POL_HIG(reader);
-		break;
-	default:
-		SET_UNIPERIF_I2S_FMT_LR_POL_LOW(reader);
-	}
 
 	/* Force slot width to 32 in I2S mode */
 	if ((reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK)
@@ -173,6 +128,109 @@
 		return -EINVAL;
 	}
 
+	/* Number of channels must be even */
+	if ((runtime->channels % 2) || (runtime->channels < 2) ||
+	    (runtime->channels > 10)) {
+		dev_err(reader->dev, "%s: invalid nb of channels", __func__);
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_I2S_FMT_NUM_CH(reader, runtime->channels / 2);
+	SET_UNIPERIF_I2S_FMT_ORDER_MSB(reader);
+
+	return 0;
+}
+
+static int uni_reader_prepare_tdm(struct snd_pcm_runtime *runtime,
+				  struct uniperif *reader)
+{
+	int frame_size; /* user tdm frame size in bytes */
+	/* default unip TDM_WORD_POS_X_Y */
+	unsigned int word_pos[4] = {
+		0x04060002, 0x0C0E080A, 0x14161012, 0x1C1E181A};
+
+	frame_size = sti_uniperiph_get_user_frame_size(runtime);
+
+	/* fix 16/0 format */
+	SET_UNIPERIF_CONFIG_MEM_FMT_16_0(reader);
+	SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(reader);
+
+	/* number of words inserted on the TDM line */
+	SET_UNIPERIF_I2S_FMT_NUM_CH(reader, frame_size / 4 / 2);
+
+	SET_UNIPERIF_I2S_FMT_ORDER_MSB(reader);
+	SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
+	SET_UNIPERIF_TDM_ENABLE_TDM_ENABLE(reader);
+
+	/*
+	 * set the timeslots allocation for words in FIFO
+	 *
+	 * HW bug: (LSB word < MSB word) => this config is not possible
+	 *         So if we want (LSB word < MSB) word, then it shall be
+	 *         handled by user
+	 */
+	sti_uniperiph_get_tdm_word_pos(reader, word_pos);
+	SET_UNIPERIF_TDM_WORD_POS(reader, 1_2, word_pos[WORD_1_2]);
+	SET_UNIPERIF_TDM_WORD_POS(reader, 3_4, word_pos[WORD_3_4]);
+	SET_UNIPERIF_TDM_WORD_POS(reader, 5_6, word_pos[WORD_5_6]);
+	SET_UNIPERIF_TDM_WORD_POS(reader, 7_8, word_pos[WORD_7_8]);
+
+	return 0;
+}
+
+static int uni_reader_prepare(struct snd_pcm_substream *substream,
+			      struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *reader = priv->dai_data.uni;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	int transfer_size, trigger_limit, ret;
+	int count = 10;
+
+	/* The reader should be stopped */
+	if (reader->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(reader->dev, "%s: invalid reader state %d", __func__,
+			reader->state);
+		return -EINVAL;
+	}
+
+	/* Calculate transfer size (in fifo cells and bytes) for frame count */
+	if (reader->info->type == SND_ST_UNIPERIF_TYPE_TDM) {
+		/* transfer size = unip frame size (in 32 bits FIFO cell) */
+		transfer_size =
+			sti_uniperiph_get_user_frame_size(runtime) / 4;
+	} else {
+		transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
+	}
+
+	/* Calculate number of empty cells available before asserting DREQ */
+	if (reader->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
+	else
+		/*
+		 * Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
+		 * FDMA_TRIGGER_LIMIT also controls when the state switches
+		 * from OFF or STANDBY to AUDIO DATA.
+		 */
+		trigger_limit = transfer_size;
+
+	/* Trigger limit must be an even number */
+	if ((!trigger_limit % 2) ||
+	    (trigger_limit != 1 && transfer_size % 2) ||
+	    (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(reader))) {
+		dev_err(reader->dev, "invalid trigger limit %d", trigger_limit);
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(reader, trigger_limit);
+
+	if (UNIPERIF_TYPE_IS_TDM(reader))
+		ret = uni_reader_prepare_tdm(runtime, reader);
+	else
+		ret = uni_reader_prepare_pcm(runtime, reader);
+	if (ret)
+		return ret;
+
 	switch (reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) {
 	case SND_SOC_DAIFMT_I2S:
 		SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
@@ -191,21 +249,26 @@
 		return -EINVAL;
 	}
 
-	SET_UNIPERIF_I2S_FMT_ORDER_MSB(reader);
-
-	/* Data clocking (changing) on the rising edge */
-	SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(reader);
-
-	/* Number of channels must be even */
-
-	if ((runtime->channels % 2) || (runtime->channels < 2) ||
-	    (runtime->channels > 10)) {
-		dev_err(reader->dev, "%s: invalid nb of channels", __func__);
-		return -EINVAL;
+	/* Data clocking (changing) on the rising/falling edge */
+	switch (reader->daifmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_LOW(reader);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(reader);
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_HIG(reader);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(reader);
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_LOW(reader);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(reader);
+		break;
+	case SND_SOC_DAIFMT_IB_IF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_HIG(reader);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(reader);
+		break;
 	}
 
-	SET_UNIPERIF_I2S_FMT_NUM_CH(reader, runtime->channels / 2);
-
 	/* Clear any pending interrupts */
 	SET_UNIPERIF_ITS_BCLR(reader, GET_UNIPERIF_ITS(reader));
 
@@ -293,6 +356,32 @@
 	}
 }
 
+static int uni_reader_startup(struct snd_pcm_substream *substream,
+			      struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *reader = priv->dai_data.uni;
+	int ret;
+
+	if (!UNIPERIF_TYPE_IS_TDM(reader))
+		return 0;
+
+	/* refine hw constraint in tdm mode */
+	ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+				  SNDRV_PCM_HW_PARAM_CHANNELS,
+				  sti_uniperiph_fix_tdm_chan,
+				  reader, SNDRV_PCM_HW_PARAM_CHANNELS,
+				  -1);
+	if (ret < 0)
+		return ret;
+
+	return snd_pcm_hw_rule_add(substream->runtime, 0,
+				   SNDRV_PCM_HW_PARAM_FORMAT,
+				   sti_uniperiph_fix_tdm_format,
+				   reader, SNDRV_PCM_HW_PARAM_FORMAT,
+				   -1);
+}
+
 static void uni_reader_shutdown(struct snd_pcm_substream *substream,
 				struct snd_soc_dai *dai)
 {
@@ -310,6 +399,7 @@
 {
 	struct uniperif_info *info;
 	struct device_node *node = pdev->dev.of_node;
+	const char *mode;
 
 	/* Allocate memory for the info structure */
 	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -322,6 +412,17 @@
 		return -EINVAL;
 	}
 
+	/* Read the device mode property */
+	if (of_property_read_string(node, "st,mode", &mode)) {
+		dev_err(&pdev->dev, "uniperipheral mode not defined");
+		return -EINVAL;
+	}
+
+	if (strcasecmp(mode, "tdm") == 0)
+		info->type = SND_ST_UNIPERIF_TYPE_TDM;
+	else
+		info->type = SND_ST_UNIPERIF_TYPE_PCM;
+
 	/* Save the info structure */
 	reader->info = info;
 
@@ -329,11 +430,13 @@
 }
 
 static const struct snd_soc_dai_ops uni_reader_dai_ops = {
+		.startup = uni_reader_startup,
 		.shutdown = uni_reader_shutdown,
 		.prepare = uni_reader_prepare,
 		.trigger = uni_reader_trigger,
 		.hw_params = sti_uniperiph_dai_hw_params,
 		.set_fmt = sti_uniperiph_dai_set_fmt,
+		.set_tdm_slot = sti_uniperiph_set_tdm_slot
 };
 
 int uni_reader_init(struct platform_device *pdev,
@@ -343,7 +446,6 @@
 
 	reader->dev = &pdev->dev;
 	reader->state = UNIPERIF_STATE_STOPPED;
-	reader->hw = &uni_reader_pcm_hw;
 	reader->dai_ops = &uni_reader_dai_ops;
 
 	ret = uni_reader_parse_dt(pdev, reader);
@@ -352,6 +454,11 @@
 		return ret;
 	}
 
+	if (UNIPERIF_TYPE_IS_TDM(reader))
+		reader->hw = &uni_tdm_hw;
+	else
+		reader->hw = &uni_reader_pcm_hw;
+
 	ret = devm_request_irq(&pdev->dev, reader->irq,
 			       uni_reader_irq_handler, IRQF_SHARED,
 			       dev_name(&pdev->dev), reader);
diff --git a/tools/Makefile b/tools/Makefile
index 6bf68fe..f10b64d8 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -16,6 +16,7 @@
 	@echo '  gpio                   - GPIO tools'
 	@echo '  hv                     - tools used when in Hyper-V clients'
 	@echo '  iio                    - IIO tools'
+	@echo '  kvm_stat               - top-like utility for displaying kvm statistics'
 	@echo '  lguest                 - a minimal 32-bit x86 hypervisor'
 	@echo '  net                    - misc networking tools'
 	@echo '  perf                   - Linux performance measurement and analysis tool'
@@ -110,10 +111,13 @@
 freefall_install:
 	$(call descend,laptop/$(@:_install=),install)
 
+kvm_stat_install:
+	$(call descend,kvm/$(@:_install=),install)
+
 install: acpi_install cgroup_install cpupower_install hv_install firewire_install lguest_install \
 		perf_install selftests_install turbostat_install usb_install \
 		virtio_install vm_install net_install x86_energy_perf_policy_install \
-		tmon_install freefall_install objtool_install
+		tmon_install freefall_install objtool_install kvm_stat_install
 
 acpi_clean:
 	$(call descend,power/acpi,clean)
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index ee566e8..27f3583 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -58,8 +58,8 @@
 quiet_cmd_cc_o_c = CC       $@
       cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
 
-quiet_cmd_cc_i_c = CPP      $@
-      cmd_cc_i_c = $(CC) $(c_flags) -E -o $@ $<
+quiet_cmd_cpp_i_c = CPP      $@
+      cmd_cpp_i_c = $(CC) $(c_flags) -E -o $@ $<
 
 quiet_cmd_cc_s_c = AS       $@
       cmd_cc_s_c = $(CC) $(c_flags) -S -o $@ $<
@@ -83,11 +83,11 @@
 
 $(OUTPUT)%.i: %.c FORCE
 	$(call rule_mkdir)
-	$(call if_changed_dep,cc_i_c)
+	$(call if_changed_dep,cpp_i_c)
 
 $(OUTPUT)%.s: %.S FORCE
 	$(call rule_mkdir)
-	$(call if_changed_dep,cc_i_c)
+	$(call if_changed_dep,cpp_i_c)
 
 $(OUTPUT)%.s: %.c FORCE
 	$(call rule_mkdir)
diff --git a/tools/kvm/kvm_stat/Makefile b/tools/kvm/kvm_stat/Makefile
new file mode 100644
index 0000000..5b1cba5
--- /dev/null
+++ b/tools/kvm/kvm_stat/Makefile
@@ -0,0 +1,41 @@
+include ../../scripts/Makefile.include
+include ../../scripts/utilities.mak
+BINDIR=usr/bin
+MANDIR=usr/share/man
+MAN1DIR=$(MANDIR)/man1
+
+MAN1=kvm_stat.1
+
+A2X=a2x
+a2x_path := $(call get-executable,$(A2X))
+
+all: man
+
+ifneq ($(findstring $(MAKEFLAGS),s),s)
+  ifneq ($(V),1)
+     QUIET_A2X = @echo '  A2X     '$@;
+  endif
+endif
+
+%.1: %.txt
+ifeq ($(a2x_path),)
+	$(error "You need to install asciidoc for man pages")
+else
+	$(QUIET_A2X)$(A2X) --doctype manpage --format manpage $<
+endif
+
+clean:
+	rm -f $(MAN1)
+
+man: $(MAN1)
+
+install-man: man
+	install -d -m 755 $(INSTALL_ROOT)/$(MAN1DIR)
+	install -m 644 kvm_stat.1 $(INSTALL_ROOT)/$(MAN1DIR)
+
+install-tools:
+	install -d -m 755 $(INSTALL_ROOT)/$(BINDIR)
+	install -m 755 -p "kvm_stat" "$(INSTALL_ROOT)/$(BINDIR)/$(TARGET)"
+
+install: install-tools install-man
+.PHONY: all clean man install-tools install-man install
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
new file mode 100755
index 0000000..581278c
--- /dev/null
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -0,0 +1,1127 @@
+#!/usr/bin/python
+#
+# top-like utility for displaying kvm statistics
+#
+# Copyright 2006-2008 Qumranet Technologies
+# Copyright 2008-2011 Red Hat, Inc.
+#
+# Authors:
+#  Avi Kivity <avi@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2.  See
+# the COPYING file in the top-level directory.
+"""The kvm_stat module outputs statistics about running KVM VMs
+
+Three different ways of output formatting are available:
+- as a top-like text ui
+- in a key -> value format
+- in an all keys, all values format
+
+The data is sampled from the KVM's debugfs entries and its perf events.
+"""
+
+import curses
+import sys
+import os
+import time
+import optparse
+import ctypes
+import fcntl
+import resource
+import struct
+import re
+from collections import defaultdict
+from time import sleep
+
+VMX_EXIT_REASONS = {
+    'EXCEPTION_NMI':        0,
+    'EXTERNAL_INTERRUPT':   1,
+    'TRIPLE_FAULT':         2,
+    'PENDING_INTERRUPT':    7,
+    'NMI_WINDOW':           8,
+    'TASK_SWITCH':          9,
+    'CPUID':                10,
+    'HLT':                  12,
+    'INVLPG':               14,
+    'RDPMC':                15,
+    'RDTSC':                16,
+    'VMCALL':               18,
+    'VMCLEAR':              19,
+    'VMLAUNCH':             20,
+    'VMPTRLD':              21,
+    'VMPTRST':              22,
+    'VMREAD':               23,
+    'VMRESUME':             24,
+    'VMWRITE':              25,
+    'VMOFF':                26,
+    'VMON':                 27,
+    'CR_ACCESS':            28,
+    'DR_ACCESS':            29,
+    'IO_INSTRUCTION':       30,
+    'MSR_READ':             31,
+    'MSR_WRITE':            32,
+    'INVALID_STATE':        33,
+    'MWAIT_INSTRUCTION':    36,
+    'MONITOR_INSTRUCTION':  39,
+    'PAUSE_INSTRUCTION':    40,
+    'MCE_DURING_VMENTRY':   41,
+    'TPR_BELOW_THRESHOLD':  43,
+    'APIC_ACCESS':          44,
+    'EPT_VIOLATION':        48,
+    'EPT_MISCONFIG':        49,
+    'WBINVD':               54,
+    'XSETBV':               55,
+    'APIC_WRITE':           56,
+    'INVPCID':              58,
+}
+
+SVM_EXIT_REASONS = {
+    'READ_CR0':       0x000,
+    'READ_CR3':       0x003,
+    'READ_CR4':       0x004,
+    'READ_CR8':       0x008,
+    'WRITE_CR0':      0x010,
+    'WRITE_CR3':      0x013,
+    'WRITE_CR4':      0x014,
+    'WRITE_CR8':      0x018,
+    'READ_DR0':       0x020,
+    'READ_DR1':       0x021,
+    'READ_DR2':       0x022,
+    'READ_DR3':       0x023,
+    'READ_DR4':       0x024,
+    'READ_DR5':       0x025,
+    'READ_DR6':       0x026,
+    'READ_DR7':       0x027,
+    'WRITE_DR0':      0x030,
+    'WRITE_DR1':      0x031,
+    'WRITE_DR2':      0x032,
+    'WRITE_DR3':      0x033,
+    'WRITE_DR4':      0x034,
+    'WRITE_DR5':      0x035,
+    'WRITE_DR6':      0x036,
+    'WRITE_DR7':      0x037,
+    'EXCP_BASE':      0x040,
+    'INTR':           0x060,
+    'NMI':            0x061,
+    'SMI':            0x062,
+    'INIT':           0x063,
+    'VINTR':          0x064,
+    'CR0_SEL_WRITE':  0x065,
+    'IDTR_READ':      0x066,
+    'GDTR_READ':      0x067,
+    'LDTR_READ':      0x068,
+    'TR_READ':        0x069,
+    'IDTR_WRITE':     0x06a,
+    'GDTR_WRITE':     0x06b,
+    'LDTR_WRITE':     0x06c,
+    'TR_WRITE':       0x06d,
+    'RDTSC':          0x06e,
+    'RDPMC':          0x06f,
+    'PUSHF':          0x070,
+    'POPF':           0x071,
+    'CPUID':          0x072,
+    'RSM':            0x073,
+    'IRET':           0x074,
+    'SWINT':          0x075,
+    'INVD':           0x076,
+    'PAUSE':          0x077,
+    'HLT':            0x078,
+    'INVLPG':         0x079,
+    'INVLPGA':        0x07a,
+    'IOIO':           0x07b,
+    'MSR':            0x07c,
+    'TASK_SWITCH':    0x07d,
+    'FERR_FREEZE':    0x07e,
+    'SHUTDOWN':       0x07f,
+    'VMRUN':          0x080,
+    'VMMCALL':        0x081,
+    'VMLOAD':         0x082,
+    'VMSAVE':         0x083,
+    'STGI':           0x084,
+    'CLGI':           0x085,
+    'SKINIT':         0x086,
+    'RDTSCP':         0x087,
+    'ICEBP':          0x088,
+    'WBINVD':         0x089,
+    'MONITOR':        0x08a,
+    'MWAIT':          0x08b,
+    'MWAIT_COND':     0x08c,
+    'XSETBV':         0x08d,
+    'NPF':            0x400,
+}
+
+# EC definition of HSR (from arch/arm64/include/asm/kvm_arm.h)
+AARCH64_EXIT_REASONS = {
+    'UNKNOWN':      0x00,
+    'WFI':          0x01,
+    'CP15_32':      0x03,
+    'CP15_64':      0x04,
+    'CP14_MR':      0x05,
+    'CP14_LS':      0x06,
+    'FP_ASIMD':     0x07,
+    'CP10_ID':      0x08,
+    'CP14_64':      0x0C,
+    'ILL_ISS':      0x0E,
+    'SVC32':        0x11,
+    'HVC32':        0x12,
+    'SMC32':        0x13,
+    'SVC64':        0x15,
+    'HVC64':        0x16,
+    'SMC64':        0x17,
+    'SYS64':        0x18,
+    'IABT':         0x20,
+    'IABT_HYP':     0x21,
+    'PC_ALIGN':     0x22,
+    'DABT':         0x24,
+    'DABT_HYP':     0x25,
+    'SP_ALIGN':     0x26,
+    'FP_EXC32':     0x28,
+    'FP_EXC64':     0x2C,
+    'SERROR':       0x2F,
+    'BREAKPT':      0x30,
+    'BREAKPT_HYP':  0x31,
+    'SOFTSTP':      0x32,
+    'SOFTSTP_HYP':  0x33,
+    'WATCHPT':      0x34,
+    'WATCHPT_HYP':  0x35,
+    'BKPT32':       0x38,
+    'VECTOR32':     0x3A,
+    'BRK64':        0x3C,
+}
+
+# From include/uapi/linux/kvm.h, KVM_EXIT_xxx
+USERSPACE_EXIT_REASONS = {
+    'UNKNOWN':          0,
+    'EXCEPTION':        1,
+    'IO':               2,
+    'HYPERCALL':        3,
+    'DEBUG':            4,
+    'HLT':              5,
+    'MMIO':             6,
+    'IRQ_WINDOW_OPEN':  7,
+    'SHUTDOWN':         8,
+    'FAIL_ENTRY':       9,
+    'INTR':             10,
+    'SET_TPR':          11,
+    'TPR_ACCESS':       12,
+    'S390_SIEIC':       13,
+    'S390_RESET':       14,
+    'DCR':              15,
+    'NMI':              16,
+    'INTERNAL_ERROR':   17,
+    'OSI':              18,
+    'PAPR_HCALL':       19,
+    'S390_UCONTROL':    20,
+    'WATCHDOG':         21,
+    'S390_TSCH':        22,
+    'EPR':              23,
+    'SYSTEM_EVENT':     24,
+}
+
+IOCTL_NUMBERS = {
+    'SET_FILTER':  0x40082406,
+    'ENABLE':      0x00002400,
+    'DISABLE':     0x00002401,
+    'RESET':       0x00002403,
+}
+
+class Arch(object):
+    """Encapsulates global architecture specific data.
+
+    Contains the performance event open syscall and ioctl numbers, as
+    well as the VM exit reasons for the architecture it runs on.
+
+    """
+    @staticmethod
+    def get_arch():
+        machine = os.uname()[4]
+
+        if machine.startswith('ppc'):
+            return ArchPPC()
+        elif machine.startswith('aarch64'):
+            return ArchA64()
+        elif machine.startswith('s390'):
+            return ArchS390()
+        else:
+            # X86_64
+            for line in open('/proc/cpuinfo'):
+                if not line.startswith('flags'):
+                    continue
+
+                flags = line.split()
+                if 'vmx' in flags:
+                    return ArchX86(VMX_EXIT_REASONS)
+                if 'svm' in flags:
+                    return ArchX86(SVM_EXIT_REASONS)
+                return
+
+class ArchX86(Arch):
+    def __init__(self, exit_reasons):
+        self.sc_perf_evt_open = 298
+        self.ioctl_numbers = IOCTL_NUMBERS
+        self.exit_reasons = exit_reasons
+
+class ArchPPC(Arch):
+    def __init__(self):
+        self.sc_perf_evt_open = 319
+        self.ioctl_numbers = IOCTL_NUMBERS
+        self.ioctl_numbers['ENABLE'] = 0x20002400
+        self.ioctl_numbers['DISABLE'] = 0x20002401
+        self.ioctl_numbers['RESET'] = 0x20002403
+
+        # PPC comes in 32 and 64 bit and some generated ioctl
+        # numbers depend on the wordsize.
+        char_ptr_size = ctypes.sizeof(ctypes.c_char_p)
+        self.ioctl_numbers['SET_FILTER'] = 0x80002406 | char_ptr_size << 16
+        self.exit_reasons = {}
+
+class ArchA64(Arch):
+    def __init__(self):
+        self.sc_perf_evt_open = 241
+        self.ioctl_numbers = IOCTL_NUMBERS
+        self.exit_reasons = AARCH64_EXIT_REASONS
+
+class ArchS390(Arch):
+    def __init__(self):
+        self.sc_perf_evt_open = 331
+        self.ioctl_numbers = IOCTL_NUMBERS
+        self.exit_reasons = None
+
+ARCH = Arch.get_arch()
+
+
+def walkdir(path):
+    """Returns os.walk() data for specified directory.
+
+    As it is only a wrapper it returns the same 3-tuple of (dirpath,
+    dirnames, filenames).
+    """
+    return next(os.walk(path))
+
+
+def parse_int_list(list_string):
+    """Returns an int list from a string of comma separated integers and
+    integer ranges."""
+    integers = []
+    members = list_string.split(',')
+
+    for member in members:
+        if '-' not in member:
+            integers.append(int(member))
+        else:
+            int_range = member.split('-')
+            integers.extend(range(int(int_range[0]),
+                                  int(int_range[1]) + 1))
+
+    return integers
+
+
+def get_online_cpus():
+    """Returns a list of cpu id integers."""
+    with open('/sys/devices/system/cpu/online') as cpu_list:
+        cpu_string = cpu_list.readline()
+        return parse_int_list(cpu_string)
+
+
+def get_filters():
+    """Returns a dict of trace events, their filter ids and
+    the values that can be filtered.
+
+    Trace events can be filtered for special values by setting a
+    filter string via an ioctl. The string normally has the format
+    identifier==value. For each filter a new event will be created, to
+    be able to distinguish the events.
+
+    """
+    filters = {}
+    filters['kvm_userspace_exit'] = ('reason', USERSPACE_EXIT_REASONS)
+    if ARCH.exit_reasons:
+        filters['kvm_exit'] = ('exit_reason', ARCH.exit_reasons)
+    return filters
+
+libc = ctypes.CDLL('libc.so.6', use_errno=True)
+syscall = libc.syscall
+
+class perf_event_attr(ctypes.Structure):
+    """Struct that holds the necessary data to set up a trace event.
+
+    For an extensive explanation see perf_event_open(2) and
+    include/uapi/linux/perf_event.h, struct perf_event_attr
+
+    All fields that are not initialized in the constructor are 0.
+
+    """
+    _fields_ = [('type', ctypes.c_uint32),
+                ('size', ctypes.c_uint32),
+                ('config', ctypes.c_uint64),
+                ('sample_freq', ctypes.c_uint64),
+                ('sample_type', ctypes.c_uint64),
+                ('read_format', ctypes.c_uint64),
+                ('flags', ctypes.c_uint64),
+                ('wakeup_events', ctypes.c_uint32),
+                ('bp_type', ctypes.c_uint32),
+                ('bp_addr', ctypes.c_uint64),
+                ('bp_len', ctypes.c_uint64),
+                ]
+
+    def __init__(self):
+        super(self.__class__, self).__init__()
+        self.type = PERF_TYPE_TRACEPOINT
+        self.size = ctypes.sizeof(self)
+        self.read_format = PERF_FORMAT_GROUP
+
+def perf_event_open(attr, pid, cpu, group_fd, flags):
+    """Wrapper for the sys_perf_evt_open() syscall.
+
+    Used to set up performance events, returns a file descriptor or -1
+    on error.
+
+    Attributes are:
+    - syscall number
+    - struct perf_event_attr *
+    - pid or -1 to monitor all pids
+    - cpu number or -1 to monitor all cpus
+    - The file descriptor of the group leader or -1 to create a group.
+    - flags
+
+    """
+    return syscall(ARCH.sc_perf_evt_open, ctypes.pointer(attr),
+                   ctypes.c_int(pid), ctypes.c_int(cpu),
+                   ctypes.c_int(group_fd), ctypes.c_long(flags))
+
+PERF_TYPE_TRACEPOINT = 2
+PERF_FORMAT_GROUP = 1 << 3
+
+PATH_DEBUGFS_TRACING = '/sys/kernel/debug/tracing'
+PATH_DEBUGFS_KVM = '/sys/kernel/debug/kvm'
+
+class Group(object):
+    """Represents a perf event group."""
+
+    def __init__(self):
+        self.events = []
+
+    def add_event(self, event):
+        self.events.append(event)
+
+    def read(self):
+        """Returns a dict with 'event name: value' for all events in the
+        group.
+
+        Values are read by reading from the file descriptor of the
+        event that is the group leader. See perf_event_open(2) for
+        details.
+
+        Read format for the used event configuration is:
+        struct read_format {
+            u64 nr; /* The number of events */
+            struct {
+                u64 value; /* The value of the event */
+            } values[nr];
+        };
+
+        """
+        length = 8 * (1 + len(self.events))
+        read_format = 'xxxxxxxx' + 'Q' * len(self.events)
+        return dict(zip([event.name for event in self.events],
+                        struct.unpack(read_format,
+                                      os.read(self.events[0].fd, length))))
+
+class Event(object):
+    """Represents a performance event and manages its life cycle."""
+    def __init__(self, name, group, trace_cpu, trace_pid, trace_point,
+                 trace_filter, trace_set='kvm'):
+        self.name = name
+        self.fd = None
+        self.setup_event(group, trace_cpu, trace_pid, trace_point,
+                         trace_filter, trace_set)
+
+    def __del__(self):
+        """Closes the event's file descriptor.
+
+        As no python file object was created for the file descriptor,
+        python will not reference count the descriptor and will not
+        close it itself automatically, so we do it.
+
+        """
+        if self.fd:
+            os.close(self.fd)
+
+    def setup_event_attribute(self, trace_set, trace_point):
+        """Returns an initialized ctype perf_event_attr struct."""
+
+        id_path = os.path.join(PATH_DEBUGFS_TRACING, 'events', trace_set,
+                               trace_point, 'id')
+
+        event_attr = perf_event_attr()
+        event_attr.config = int(open(id_path).read())
+        return event_attr
+
+    def setup_event(self, group, trace_cpu, trace_pid, trace_point,
+                    trace_filter, trace_set):
+        """Sets up the perf event in Linux.
+
+        Issues the syscall to register the event in the kernel and
+        then sets the optional filter.
+
+        """
+
+        event_attr = self.setup_event_attribute(trace_set, trace_point)
+
+        # First event will be group leader.
+        group_leader = -1
+
+        # All others have to pass the leader's descriptor instead.
+        if group.events:
+            group_leader = group.events[0].fd
+
+        fd = perf_event_open(event_attr, trace_pid,
+                             trace_cpu, group_leader, 0)
+        if fd == -1:
+            err = ctypes.get_errno()
+            raise OSError(err, os.strerror(err),
+                          'while calling sys_perf_event_open().')
+
+        if trace_filter:
+            fcntl.ioctl(fd, ARCH.ioctl_numbers['SET_FILTER'],
+                        trace_filter)
+
+        self.fd = fd
+
+    def enable(self):
+        """Enables the trace event in the kernel.
+
+        Enabling the group leader makes reading counters from it and the
+        events under it possible.
+
+        """
+        fcntl.ioctl(self.fd, ARCH.ioctl_numbers['ENABLE'], 0)
+
+    def disable(self):
+        """Disables the trace event in the kernel.
+
+        Disabling the group leader makes reading all counters under it
+        impossible.
+
+        """
+        fcntl.ioctl(self.fd, ARCH.ioctl_numbers['DISABLE'], 0)
+
+    def reset(self):
+        """Resets the count of the trace event in the kernel."""
+        fcntl.ioctl(self.fd, ARCH.ioctl_numbers['RESET'], 0)
+
+class TracepointProvider(object):
+    """Data provider for the stats class.
+
+    Manages the events/groups from which it acquires its data.
+
+    """
+    def __init__(self):
+        self.group_leaders = []
+        self.filters = get_filters()
+        self._fields = self.get_available_fields()
+        self._pid = 0
+
+    def get_available_fields(self):
+        """Returns a list of available event's of format 'event name(filter
+        name)'.
+
+        All available events have directories under
+        /sys/kernel/debug/tracing/events/ which export information
+        about the specific event. Therefore, listing the dirs gives us
+        a list of all available events.
+
+        Some events like the vm exit reasons can be filtered for
+        specific values. To take account for that, the routine below
+        creates special fields with the following format:
+        event name(filter name)
+
+        """
+        path = os.path.join(PATH_DEBUGFS_TRACING, 'events', 'kvm')
+        fields = walkdir(path)[1]
+        extra = []
+        for field in fields:
+            if field in self.filters:
+                filter_name_, filter_dicts = self.filters[field]
+                for name in filter_dicts:
+                    extra.append(field + '(' + name + ')')
+        fields += extra
+        return fields
+
+    def setup_traces(self):
+        """Creates all event and group objects needed to be able to retrieve
+        data."""
+        if self._pid > 0:
+            # Fetch list of all threads of the monitored pid, as qemu
+            # starts a thread for each vcpu.
+            path = os.path.join('/proc', str(self._pid), 'task')
+            groupids = walkdir(path)[1]
+        else:
+            groupids = get_online_cpus()
+
+        # The constant is needed as a buffer for python libs, std
+        # streams and other files that the script opens.
+        newlim = len(groupids) * len(self._fields) + 50
+        try:
+            softlim_, hardlim = resource.getrlimit(resource.RLIMIT_NOFILE)
+
+            if hardlim < newlim:
+                # Now we need CAP_SYS_RESOURCE, to increase the hard limit.
+                resource.setrlimit(resource.RLIMIT_NOFILE, (newlim, newlim))
+            else:
+                # Raising the soft limit is sufficient.
+                resource.setrlimit(resource.RLIMIT_NOFILE, (newlim, hardlim))
+
+        except ValueError:
+            sys.exit("NOFILE rlimit could not be raised to {0}".format(newlim))
+
+        for groupid in groupids:
+            group = Group()
+            for name in self._fields:
+                tracepoint = name
+                tracefilter = None
+                match = re.match(r'(.*)\((.*)\)', name)
+                if match:
+                    tracepoint, sub = match.groups()
+                    tracefilter = ('%s==%d\0' %
+                                   (self.filters[tracepoint][0],
+                                    self.filters[tracepoint][1][sub]))
+
+                # From perf_event_open(2):
+                # pid > 0 and cpu == -1
+                # This measures the specified process/thread on any CPU.
+                #
+                # pid == -1 and cpu >= 0
+                # This measures all processes/threads on the specified CPU.
+                trace_cpu = groupid if self._pid == 0 else -1
+                trace_pid = int(groupid) if self._pid != 0 else -1
+
+                group.add_event(Event(name=name,
+                                      group=group,
+                                      trace_cpu=trace_cpu,
+                                      trace_pid=trace_pid,
+                                      trace_point=tracepoint,
+                                      trace_filter=tracefilter))
+
+            self.group_leaders.append(group)
+
+    def available_fields(self):
+        return self.get_available_fields()
+
+    @property
+    def fields(self):
+        return self._fields
+
+    @fields.setter
+    def fields(self, fields):
+        """Enables/disables the (un)wanted events"""
+        self._fields = fields
+        for group in self.group_leaders:
+            for index, event in enumerate(group.events):
+                if event.name in fields:
+                    event.reset()
+                    event.enable()
+                else:
+                    # Do not disable the group leader.
+                    # It would disable all of its events.
+                    if index != 0:
+                        event.disable()
+
+    @property
+    def pid(self):
+        return self._pid
+
+    @pid.setter
+    def pid(self, pid):
+        """Changes the monitored pid by setting new traces."""
+        self._pid = pid
+        # The garbage collector will get rid of all Event/Group
+        # objects and open files after removing the references.
+        self.group_leaders = []
+        self.setup_traces()
+        self.fields = self._fields
+
+    def read(self):
+        """Returns 'event name: current value' for all enabled events."""
+        ret = defaultdict(int)
+        for group in self.group_leaders:
+            for name, val in group.read().iteritems():
+                if name in self._fields:
+                    ret[name] += val
+        return ret
+
+class DebugfsProvider(object):
+    """Provides data from the files that KVM creates in the kvm debugfs
+    folder."""
+    def __init__(self):
+        self._fields = self.get_available_fields()
+        self._pid = 0
+        self.do_read = True
+
+    def get_available_fields(self):
+        """"Returns a list of available fields.
+
+        The fields are all available KVM debugfs files
+
+        """
+        return walkdir(PATH_DEBUGFS_KVM)[2]
+
+    @property
+    def fields(self):
+        return self._fields
+
+    @fields.setter
+    def fields(self, fields):
+        self._fields = fields
+
+    @property
+    def pid(self):
+        return self._pid
+
+    @pid.setter
+    def pid(self, pid):
+        if pid != 0:
+            self._pid = pid
+
+            vms = walkdir(PATH_DEBUGFS_KVM)[1]
+            if len(vms) == 0:
+                self.do_read = False
+
+            self.paths = filter(lambda x: "{}-".format(pid) in x, vms)
+
+        else:
+            self.paths = ['']
+            self.do_read = True
+
+    def read(self):
+        """Returns a dict with format:'file name / field -> current value'."""
+        results = {}
+
+        # If no debugfs filtering support is available, then don't read.
+        if not self.do_read:
+            return results
+
+        for path in self.paths:
+            for field in self._fields:
+                results[field] = results.get(field, 0) \
+                                 + self.read_field(field, path)
+
+        return results
+
+    def read_field(self, field, path):
+        """Returns the value of a single field from a specific VM."""
+        try:
+            return int(open(os.path.join(PATH_DEBUGFS_KVM,
+                                         path,
+                                         field))
+                       .read())
+        except IOError:
+            return 0
+
+class Stats(object):
+    """Manages the data providers and the data they provide.
+
+    It is used to set filters on the provider's data and collect all
+    provider data.
+
+    """
+    def __init__(self, providers, pid, fields=None):
+        self.providers = providers
+        self._pid_filter = pid
+        self._fields_filter = fields
+        self.values = {}
+        self.update_provider_pid()
+        self.update_provider_filters()
+
+    def update_provider_filters(self):
+        """Propagates fields filters to providers."""
+        def wanted(key):
+            if not self._fields_filter:
+                return True
+            return re.match(self._fields_filter, key) is not None
+
+        # As we reset the counters when updating the fields we can
+        # also clear the cache of old values.
+        self.values = {}
+        for provider in self.providers:
+            provider_fields = [key for key in provider.get_available_fields()
+                               if wanted(key)]
+            provider.fields = provider_fields
+
+    def update_provider_pid(self):
+        """Propagates pid filters to providers."""
+        for provider in self.providers:
+            provider.pid = self._pid_filter
+
+    @property
+    def fields_filter(self):
+        return self._fields_filter
+
+    @fields_filter.setter
+    def fields_filter(self, fields_filter):
+        self._fields_filter = fields_filter
+        self.update_provider_filters()
+
+    @property
+    def pid_filter(self):
+        return self._pid_filter
+
+    @pid_filter.setter
+    def pid_filter(self, pid):
+        self._pid_filter = pid
+        self.values = {}
+        self.update_provider_pid()
+
+    def get(self):
+        """Returns a dict with field -> (value, delta to last value) of all
+        provider data."""
+        for provider in self.providers:
+            new = provider.read()
+            for key in provider.fields:
+                oldval = self.values.get(key, (0, 0))
+                newval = new.get(key, 0)
+                newdelta = None
+                if oldval is not None:
+                    newdelta = newval - oldval[0]
+                self.values[key] = (newval, newdelta)
+        return self.values
+
+LABEL_WIDTH = 40
+NUMBER_WIDTH = 10
+
+class Tui(object):
+    """Instruments curses to draw a nice text ui."""
+    def __init__(self, stats):
+        self.stats = stats
+        self.screen = None
+        self.drilldown = False
+        self.update_drilldown()
+
+    def __enter__(self):
+        """Initialises curses for later use.  Based on curses.wrapper
+           implementation from the Python standard library."""
+        self.screen = curses.initscr()
+        curses.noecho()
+        curses.cbreak()
+
+        # The try/catch works around a minor bit of
+        # over-conscientiousness in the curses module, the error
+        # return from C start_color() is ignorable.
+        try:
+            curses.start_color()
+        except:
+            pass
+
+        curses.use_default_colors()
+        return self
+
+    def __exit__(self, *exception):
+        """Resets the terminal to its normal state.  Based on curses.wrappre
+           implementation from the Python standard library."""
+        if self.screen:
+            self.screen.keypad(0)
+            curses.echo()
+            curses.nocbreak()
+            curses.endwin()
+
+    def update_drilldown(self):
+        """Sets or removes a filter that only allows fields without braces."""
+        if not self.stats.fields_filter:
+            self.stats.fields_filter = r'^[^\(]*$'
+
+        elif self.stats.fields_filter == r'^[^\(]*$':
+            self.stats.fields_filter = None
+
+    def update_pid(self, pid):
+        """Propagates pid selection to stats object."""
+        self.stats.pid_filter = pid
+
+    def refresh(self, sleeptime):
+        """Refreshes on-screen data."""
+        self.screen.erase()
+        if self.stats.pid_filter > 0:
+            self.screen.addstr(0, 0, 'kvm statistics - pid {0}'
+                               .format(self.stats.pid_filter),
+                               curses.A_BOLD)
+        else:
+            self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD)
+        self.screen.addstr(2, 1, 'Event')
+        self.screen.addstr(2, 1 + LABEL_WIDTH + NUMBER_WIDTH -
+                           len('Total'), 'Total')
+        self.screen.addstr(2, 1 + LABEL_WIDTH + NUMBER_WIDTH + 8 -
+                           len('Current'), 'Current')
+        row = 3
+        stats = self.stats.get()
+        def sortkey(x):
+            if stats[x][1]:
+                return (-stats[x][1], -stats[x][0])
+            else:
+                return (0, -stats[x][0])
+        for key in sorted(stats.keys(), key=sortkey):
+
+            if row >= self.screen.getmaxyx()[0]:
+                break
+            values = stats[key]
+            if not values[0] and not values[1]:
+                break
+            col = 1
+            self.screen.addstr(row, col, key)
+            col += LABEL_WIDTH
+            self.screen.addstr(row, col, '%10d' % (values[0],))
+            col += NUMBER_WIDTH
+            if values[1] is not None:
+                self.screen.addstr(row, col, '%8d' % (values[1] / sleeptime,))
+            row += 1
+        self.screen.refresh()
+
+    def show_filter_selection(self):
+        """Draws filter selection mask.
+
+        Asks for a valid regex and sets the fields filter accordingly.
+
+        """
+        while True:
+            self.screen.erase()
+            self.screen.addstr(0, 0,
+                               "Show statistics for events matching a regex.",
+                               curses.A_BOLD)
+            self.screen.addstr(2, 0,
+                               "Current regex: {0}"
+                               .format(self.stats.fields_filter))
+            self.screen.addstr(3, 0, "New regex: ")
+            curses.echo()
+            regex = self.screen.getstr()
+            curses.noecho()
+            if len(regex) == 0:
+                return
+            try:
+                re.compile(regex)
+                self.stats.fields_filter = regex
+                return
+            except re.error:
+                continue
+
+    def show_vm_selection(self):
+        """Draws PID selection mask.
+
+        Asks for a pid until a valid pid or 0 has been entered.
+
+        """
+        while True:
+            self.screen.erase()
+            self.screen.addstr(0, 0,
+                               'Show statistics for specific pid.',
+                               curses.A_BOLD)
+            self.screen.addstr(1, 0,
+                               'This might limit the shown data to the trace '
+                               'statistics.')
+
+            curses.echo()
+            self.screen.addstr(3, 0, "Pid [0 or pid]: ")
+            pid = self.screen.getstr()
+            curses.noecho()
+
+            try:
+                pid = int(pid)
+
+                if pid == 0:
+                    self.update_pid(pid)
+                    break
+                else:
+                    if not os.path.isdir(os.path.join('/proc/', str(pid))):
+                        continue
+                    else:
+                        self.update_pid(pid)
+                        break
+
+            except ValueError:
+                continue
+
+    def show_stats(self):
+        """Refreshes the screen and processes user input."""
+        sleeptime = 0.25
+        while True:
+            self.refresh(sleeptime)
+            curses.halfdelay(int(sleeptime * 10))
+            sleeptime = 3
+            try:
+                char = self.screen.getkey()
+                if char == 'x':
+                    self.drilldown = not self.drilldown
+                    self.update_drilldown()
+                if char == 'q':
+                    break
+                if char == 'f':
+                    self.show_filter_selection()
+                if char == 'p':
+                    self.show_vm_selection()
+            except KeyboardInterrupt:
+                break
+            except curses.error:
+                continue
+
+def batch(stats):
+    """Prints statistics in a key, value format."""
+    s = stats.get()
+    time.sleep(1)
+    s = stats.get()
+    for key in sorted(s.keys()):
+        values = s[key]
+        print '%-42s%10d%10d' % (key, values[0], values[1])
+
+def log(stats):
+    """Prints statistics as reiterating key block, multiple value blocks."""
+    keys = sorted(stats.get().iterkeys())
+    def banner():
+        for k in keys:
+            print '%s' % k,
+        print
+    def statline():
+        s = stats.get()
+        for k in keys:
+            print ' %9d' % s[k][1],
+        print
+    line = 0
+    banner_repeat = 20
+    while True:
+        time.sleep(1)
+        if line % banner_repeat == 0:
+            banner()
+        statline()
+        line += 1
+
+def get_options():
+    """Returns processed program arguments."""
+    description_text = """
+This script displays various statistics about VMs running under KVM.
+The statistics are gathered from the KVM debugfs entries and / or the
+currently available perf traces.
+
+The monitoring takes additional cpu cycles and might affect the VM's
+performance.
+
+Requirements:
+- Access to:
+    /sys/kernel/debug/kvm
+    /sys/kernel/debug/trace/events/*
+    /proc/pid/task
+- /proc/sys/kernel/perf_event_paranoid < 1 if user has no
+  CAP_SYS_ADMIN and perf events are used.
+- CAP_SYS_RESOURCE if the hard limit is not high enough to allow
+  the large number of files that are possibly opened.
+"""
+
+    class PlainHelpFormatter(optparse.IndentedHelpFormatter):
+        def format_description(self, description):
+            if description:
+                return description + "\n"
+            else:
+                return ""
+
+    optparser = optparse.OptionParser(description=description_text,
+                                      formatter=PlainHelpFormatter())
+    optparser.add_option('-1', '--once', '--batch',
+                         action='store_true',
+                         default=False,
+                         dest='once',
+                         help='run in batch mode for one second',
+                         )
+    optparser.add_option('-l', '--log',
+                         action='store_true',
+                         default=False,
+                         dest='log',
+                         help='run in logging mode (like vmstat)',
+                         )
+    optparser.add_option('-t', '--tracepoints',
+                         action='store_true',
+                         default=False,
+                         dest='tracepoints',
+                         help='retrieve statistics from tracepoints',
+                         )
+    optparser.add_option('-d', '--debugfs',
+                         action='store_true',
+                         default=False,
+                         dest='debugfs',
+                         help='retrieve statistics from debugfs',
+                         )
+    optparser.add_option('-f', '--fields',
+                         action='store',
+                         default=None,
+                         dest='fields',
+                         help='fields to display (regex)',
+                         )
+    optparser.add_option('-p', '--pid',
+                        action='store',
+                        default=0,
+                        type=int,
+                        dest='pid',
+                        help='restrict statistics to pid',
+                        )
+    (options, _) = optparser.parse_args(sys.argv)
+    return options
+
+def get_providers(options):
+    """Returns a list of data providers depending on the passed options."""
+    providers = []
+
+    if options.tracepoints:
+        providers.append(TracepointProvider())
+    if options.debugfs:
+        providers.append(DebugfsProvider())
+    if len(providers) == 0:
+        providers.append(TracepointProvider())
+
+    return providers
+
+def check_access(options):
+    """Exits if the current user can't access all needed directories."""
+    if not os.path.exists('/sys/kernel/debug'):
+        sys.stderr.write('Please enable CONFIG_DEBUG_FS in your kernel.')
+        sys.exit(1)
+
+    if not os.path.exists(PATH_DEBUGFS_KVM):
+        sys.stderr.write("Please make sure, that debugfs is mounted and "
+                         "readable by the current user:\n"
+                         "('mount -t debugfs debugfs /sys/kernel/debug')\n"
+                         "Also ensure, that the kvm modules are loaded.\n")
+        sys.exit(1)
+
+    if not os.path.exists(PATH_DEBUGFS_TRACING) and (options.tracepoints
+                                                     or not options.debugfs):
+        sys.stderr.write("Please enable CONFIG_TRACING in your kernel "
+                         "when using the option -t (default).\n"
+                         "If it is enabled, make {0} readable by the "
+                         "current user.\n"
+                         .format(PATH_DEBUGFS_TRACING))
+        if options.tracepoints:
+            sys.exit(1)
+
+        sys.stderr.write("Falling back to debugfs statistics!\n")
+        options.debugfs = True
+        sleep(5)
+
+    return options
+
+def main():
+    options = get_options()
+    options = check_access(options)
+
+    if (options.pid > 0 and
+        not os.path.isdir(os.path.join('/proc/',
+                                       str(options.pid)))):
+        sys.stderr.write('Did you use a (unsupported) tid instead of a pid?\n')
+        sys.exit('Specified pid does not exist.')
+
+    providers = get_providers(options)
+    stats = Stats(providers, options.pid, fields=options.fields)
+
+    if options.log:
+        log(stats)
+    elif not options.once:
+        with Tui(stats) as tui:
+            tui.show_stats()
+    else:
+        batch(stats)
+
+if __name__ == "__main__":
+    main()
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt
new file mode 100644
index 0000000..b92a153
--- /dev/null
+++ b/tools/kvm/kvm_stat/kvm_stat.txt
@@ -0,0 +1,63 @@
+kvm_stat(1)
+===========
+
+NAME
+----
+kvm_stat - Report KVM kernel module event counters
+
+SYNOPSIS
+--------
+[verse]
+'kvm_stat' [OPTION]...
+
+DESCRIPTION
+-----------
+kvm_stat prints counts of KVM kernel module trace events.  These events signify
+state transitions such as guest mode entry and exit.
+
+This tool is useful for observing guest behavior from the host perspective.
+Often conclusions about performance or buggy behavior can be drawn from the
+output.
+
+The set of KVM kernel module trace events may be specific to the kernel version
+or architecture.  It is best to check the KVM kernel module source code for the
+meaning of events.
+
+OPTIONS
+-------
+-1::
+--once::
+--batch::
+	run in batch mode for one second
+
+-l::
+--log::
+	run in logging mode (like vmstat)
+
+-t::
+--tracepoints::
+	retrieve statistics from tracepoints
+
+-d::
+--debugfs::
+	retrieve statistics from debugfs
+
+-p<pid>::
+--pid=<pid>::
+	limit statistics to one virtual machine (pid)
+
+-f<fields>::
+--fields=<fields>::
+	fields to display (regex)
+
+-h::
+--help::
+	show help message
+
+SEE ALSO
+--------
+'perf'(1), 'trace-cmd'(1)
+
+AUTHOR
+------
+Stefan Hajnoczi <stefanha@redhat.com>
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 6765c7e..f094f3c 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -30,6 +30,10 @@
 CFLAGS   += -Wall -Werror $(EXTRA_WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
 LDFLAGS  += -lelf $(LIBSUBCMD)
 
+# Allow old libelf to be used:
+elfshdr := $(shell echo '\#include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
+CFLAGS += $(if $(elfshdr),,-DLIBELF_USE_DEPRECATED)
+
 AWK = awk
 export srctree OUTPUT CFLAGS ARCH AWK
 include $(srctree)/tools/build/Makefile.include
diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
index 7f3e00a..aa1ff65 100644
--- a/tools/objtool/elf.h
+++ b/tools/objtool/elf.h
@@ -23,6 +23,11 @@
 #include <linux/list.h>
 #include <linux/hashtable.h>
 
+#ifdef LIBELF_USE_DEPRECATED
+# define elf_getshdrnum    elf_getshnum
+# define elf_getshdrstrndx elf_getshstrndx
+#endif
+
 struct section {
 	struct list_head list;
 	GElf_Shdr sh;
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index ebaf849..9cbddc2 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -103,12 +103,13 @@
 
 	If --branch-stack option is used, following sort keys are also
 	available:
-	dso_from, dso_to, symbol_from, symbol_to, mispredict.
 
 	- dso_from: name of library or module branched from
 	- dso_to: name of library or module branched to
 	- symbol_from: name of function branched from
 	- symbol_to: name of function branched to
+	- srcline_from: source file and line branched from
+	- srcline_to: source file and line branched to
 	- mispredict: "N" for predicted branch, "Y" for mispredicted branch
 	- in_tx: branch in TSX transaction
 	- abort: TSX transaction abort.
@@ -248,7 +249,7 @@
 	Note that when using the --itrace option the synthesized callchain size
 	will override this value if the synthesized callchain size is bigger.
 
-	Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
+	Default: 127
 
 -G::
 --inverted::
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index a856a10..4fc44c7 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -267,7 +267,7 @@
         Note that when using the --itrace option the synthesized callchain size
         will override this value if the synthesized callchain size is bigger.
 
-        Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
+        Default: 127
 
 --ns::
 	Use 9 decimal places when displaying time (i.e. show the nanoseconds)
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 6afe201..1ab0782 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -143,7 +143,8 @@
         Implies '--call-graph dwarf' when --call-graph not present on the
         command line, on systems where DWARF unwinding was built in.
 
-        Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
+        Default: /proc/sys/kernel/perf_event_max_stack when present for
+                 live sessions (without --input/-i), 127 otherwise.
 
 --min-stack::
         Set the stack depth limit when parsing the callchain, anything
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 8141583..25c8173 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -324,8 +324,9 @@
 	OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
 		    "Skip symbols that cannot be annotated"),
 	OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
-	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
-		   "Look for files with symbols relative to this directory"),
+	OPT_CALLBACK(0, "symfs", NULL, "directory",
+		     "Look for files with symbols relative to this directory",
+		     symbol__config_symfs),
 	OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
 		    "Interleave source code with assembly code (default)"),
 	OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 632efc6..d75bded 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -119,8 +119,8 @@
 	if (build_id_cache__kcore_buildid(from_dir, sbuildid) < 0)
 		return -1;
 
-	scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s",
-		  buildid_dir, sbuildid);
+	scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s",
+		  buildid_dir, DSO__NAME_KCORE, sbuildid);
 
 	if (!force &&
 	    !build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) {
@@ -131,8 +131,8 @@
 	if (build_id_cache__kcore_dir(dir, sizeof(dir)))
 		return -1;
 
-	scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s/%s",
-		  buildid_dir, sbuildid, dir);
+	scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s/%s",
+		  buildid_dir, DSO__NAME_KCORE, sbuildid, dir);
 
 	if (mkdir_p(to_dir, 0755))
 		return -1;
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 9ce354f..f7645a4 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -812,8 +812,9 @@
 	OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
 		   "separator for columns, no spaces will be added between "
 		   "columns '.' is reserved."),
-	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
-		    "Look for files with symbols relative to this directory"),
+	OPT_CALLBACK(0, "symfs", NULL, "directory",
+		     "Look for files with symbols relative to this directory",
+		     symbol__config_symfs),
 	OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."),
 	OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
 		     "How to display percentage of filtered entries", parse_filter_percentage),
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f3679c4..dc3fcb5 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -40,6 +40,7 @@
 #include <unistd.h>
 #include <sched.h>
 #include <sys/mman.h>
+#include <asm/bug.h>
 
 
 struct record {
@@ -82,27 +83,87 @@
 	return record__write(rec, event, event->header.size);
 }
 
+static int
+backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
+{
+	struct perf_event_header *pheader;
+	u64 evt_head = head;
+	int size = mask + 1;
+
+	pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
+	pheader = (struct perf_event_header *)(buf + (head & mask));
+	*start = head;
+	while (true) {
+		if (evt_head - head >= (unsigned int)size) {
+			pr_debug("Finshed reading backward ring buffer: rewind\n");
+			if (evt_head - head > (unsigned int)size)
+				evt_head -= pheader->size;
+			*end = evt_head;
+			return 0;
+		}
+
+		pheader = (struct perf_event_header *)(buf + (evt_head & mask));
+
+		if (pheader->size == 0) {
+			pr_debug("Finshed reading backward ring buffer: get start\n");
+			*end = evt_head;
+			return 0;
+		}
+
+		evt_head += pheader->size;
+		pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
+	}
+	WARN_ONCE(1, "Shouldn't get here\n");
+	return -1;
+}
+
+static int
+rb_find_range(struct perf_evlist *evlist,
+	      void *data, int mask, u64 head, u64 old,
+	      u64 *start, u64 *end)
+{
+	if (!evlist->backward) {
+		*start = old;
+		*end = head;
+		return 0;
+	}
+
+	return backward_rb_find_range(data, mask, head, start, end);
+}
+
 static int record__mmap_read(struct record *rec, int idx)
 {
 	struct perf_mmap *md = &rec->evlist->mmap[idx];
 	u64 head = perf_mmap__read_head(md);
 	u64 old = md->prev;
+	u64 end = head, start = old;
 	unsigned char *data = md->base + page_size;
 	unsigned long size;
 	void *buf;
 	int rc = 0;
 
-	if (old == head)
+	if (rb_find_range(rec->evlist, data, md->mask, head,
+			  old, &start, &end))
+		return -1;
+
+	if (start == end)
 		return 0;
 
 	rec->samples++;
 
-	size = head - old;
+	size = end - start;
+	if (size > (unsigned long)(md->mask) + 1) {
+		WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
 
-	if ((old & md->mask) + size != (head & md->mask)) {
-		buf = &data[old & md->mask];
-		size = md->mask + 1 - (old & md->mask);
-		old += size;
+		md->prev = head;
+		perf_evlist__mmap_consume(rec->evlist, idx);
+		return 0;
+	}
+
+	if ((start & md->mask) + size != (end & md->mask)) {
+		buf = &data[start & md->mask];
+		size = md->mask + 1 - (start & md->mask);
+		start += size;
 
 		if (record__write(rec, buf, size) < 0) {
 			rc = -1;
@@ -110,16 +171,16 @@
 		}
 	}
 
-	buf = &data[old & md->mask];
-	size = head - old;
-	old += size;
+	buf = &data[start & md->mask];
+	size = end - start;
+	start += size;
 
 	if (record__write(rec, buf, size) < 0) {
 		rc = -1;
 		goto out;
 	}
 
-	md->prev = old;
+	md->prev = head;
 	perf_evlist__mmap_consume(rec->evlist, idx);
 out:
 	return rc;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 87d40e3..a87cb33 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -691,7 +691,7 @@
 			.ordered_events	 = true,
 			.ordering_requires_timestamps = true,
 		},
-		.max_stack		 = sysctl_perf_event_max_stack,
+		.max_stack		 = PERF_MAX_STACK_DEPTH,
 		.pretty_printing_style	 = "normal",
 		.socket_filter		 = -1,
 	};
@@ -770,8 +770,9 @@
 		   "columns '.' is reserved."),
 	OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
 		    "Only display entries resolved to a symbol"),
-	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
-		    "Look for files with symbols relative to this directory"),
+	OPT_CALLBACK(0, "symfs", NULL, "directory",
+		     "Look for files with symbols relative to this directory",
+		     symbol__config_symfs),
 	OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
 		   "list of cpus to profile"),
 	OPT_BOOLEAN('I', "show-info", &report.show_full_info,
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index efca816..e3ce2f3 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -2010,8 +2010,9 @@
 		   "file", "kallsyms pathname"),
 	OPT_BOOLEAN('G', "hide-call-graph", &no_callchain,
 		    "When printing symbols do not display call chain"),
-	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
-		    "Look for files with symbols relative to this directory"),
+	OPT_CALLBACK(0, "symfs", NULL, "directory",
+		     "Look for files with symbols relative to this directory",
+		     symbol__config_symfs),
 	OPT_CALLBACK('F', "fields", NULL, "str",
 		     "comma separated output fields prepend with 'type:'. "
 		     "Valid types: hw,sw,trace,raw. "
@@ -2067,8 +2068,6 @@
 		NULL
 	};
 
-	scripting_max_stack = sysctl_perf_event_max_stack;
-
 	setup_scripting();
 
 	argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index e459b68..ee7ada7 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -66,6 +66,7 @@
 #include <stdlib.h>
 #include <sys/prctl.h>
 #include <locale.h>
+#include <math.h>
 
 #define DEFAULT_SEPARATOR	" "
 #define CNTR_NOT_SUPPORTED	"<not supported>"
@@ -991,12 +992,12 @@
 	const char *fmt;
 
 	if (csv_output) {
-		fmt = sc != 1.0 ?  "%.2f%s" : "%.0f%s";
+		fmt = floor(sc) != sc ?  "%.2f%s" : "%.0f%s";
 	} else {
 		if (big_num)
-			fmt = sc != 1.0 ? "%'18.2f%s" : "%'18.0f%s";
+			fmt = floor(sc) != sc ? "%'18.2f%s" : "%'18.0f%s";
 		else
-			fmt = sc != 1.0 ? "%18.2f%s" : "%18.0f%s";
+			fmt = floor(sc) != sc ? "%18.2f%s" : "%18.0f%s";
 	}
 
 	aggr_printout(evsel, id, nr);
@@ -1909,6 +1910,9 @@
 	}
 
 	if (!evsel_list->nr_entries) {
+		if (target__has_cpu(&target))
+			default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
+
 		if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
 			return -1;
 		if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
@@ -2000,7 +2004,7 @@
 				    union perf_event *event,
 				    struct perf_session *session)
 {
-	struct stat_round_event *round = &event->stat_round;
+	struct stat_round_event *stat_round = &event->stat_round;
 	struct perf_evsel *counter;
 	struct timespec tsh, *ts = NULL;
 	const char **argv = session->header.env.cmdline_argv;
@@ -2009,12 +2013,12 @@
 	evlist__for_each(evsel_list, counter)
 		perf_stat_process_counter(&stat_config, counter);
 
-	if (round->type == PERF_STAT_ROUND_TYPE__FINAL)
-		update_stats(&walltime_nsecs_stats, round->time);
+	if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
+		update_stats(&walltime_nsecs_stats, stat_round->time);
 
-	if (stat_config.interval && round->time) {
-		tsh.tv_sec  = round->time / NSECS_PER_SEC;
-		tsh.tv_nsec = round->time % NSECS_PER_SEC;
+	if (stat_config.interval && stat_round->time) {
+		tsh.tv_sec  = stat_round->time / NSECS_PER_SEC;
+		tsh.tv_nsec = stat_round->time % NSECS_PER_SEC;
 		ts = &tsh;
 	}
 
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 40cc9bb..733a554 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1945,8 +1945,9 @@
 	OPT_CALLBACK('p', "process", NULL, "process",
 		      "process selector. Pass a pid or process name.",
 		       parse_process),
-	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
-		    "Look for files with symbols relative to this directory"),
+	OPT_CALLBACK(0, "symfs", NULL, "directory",
+		     "Look for files with symbols relative to this directory",
+		     symbol__config_symfs),
 	OPT_INTEGER('n', "proc-num", &tchart.proc_num,
 		    "min. number of tasks to print"),
 	OPT_BOOLEAN('t', "topology", &tchart.topology,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1793da5..2a6cc25 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -732,7 +732,7 @@
 	if (machine__resolve(machine, &al, sample) < 0)
 		return;
 
-	if (!top->kptr_restrict_warned &&
+	if (!machine->kptr_restrict_warned &&
 	    symbol_conf.kptr_restrict &&
 	    al.cpumode == PERF_RECORD_MISC_KERNEL) {
 		ui__warning(
@@ -743,7 +743,7 @@
 			  " modules" : "");
 		if (use_browser <= 0)
 			sleep(5);
-		top->kptr_restrict_warned = true;
+		machine->kptr_restrict_warned = true;
 	}
 
 	if (al.sym == NULL) {
@@ -759,7 +759,7 @@
 		 * --hide-kernel-symbols, even if the user specifies an
 		 * invalid --vmlinux ;-)
 		 */
-		if (!top->kptr_restrict_warned && !top->vmlinux_warned &&
+		if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
 		    al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
 		    RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
 			if (symbol_conf.vmlinux_name) {
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 6e5c325..5c50fe7 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -576,84 +576,54 @@
 	bool	   hexret;
 } syscall_fmts[] = {
 	{ .name	    = "access",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */
-			     [1] = SCA_ACCMODE,  /* mode */ }, },
+	  .arg_scnprintf = { [1] = SCA_ACCMODE,  /* mode */ }, },
 	{ .name	    = "arch_prctl", .errmsg = true, .alias = "prctl", },
 	{ .name	    = "bpf",	    .errmsg = true, STRARRAY(0, cmd, bpf_cmd), },
 	{ .name	    = "brk",	    .hexret = true,
 	  .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
-	{ .name	    = "chdir",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
-	{ .name	    = "chmod",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
-	{ .name	    = "chroot",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
+	{ .name	    = "chdir",	    .errmsg = true, },
+	{ .name	    = "chmod",	    .errmsg = true, },
+	{ .name	    = "chroot",	    .errmsg = true, },
 	{ .name     = "clock_gettime",  .errmsg = true, STRARRAY(0, clk_id, clockid), },
 	{ .name	    = "clone",	    .errpid = true, },
 	{ .name	    = "close",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
 	{ .name	    = "connect",    .errmsg = true, },
-	{ .name	    = "creat",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
-	{ .name	    = "dup",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
-	{ .name	    = "dup2",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
-	{ .name	    = "dup3",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+	{ .name	    = "creat",	    .errmsg = true, },
+	{ .name	    = "dup",	    .errmsg = true, },
+	{ .name	    = "dup2",	    .errmsg = true, },
+	{ .name	    = "dup3",	    .errmsg = true, },
 	{ .name	    = "epoll_ctl",  .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
 	{ .name	    = "eventfd2",   .errmsg = true,
 	  .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
-	{ .name	    = "faccessat",  .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
-			     [1] = SCA_FILENAME, /* filename */ }, },
-	{ .name	    = "fadvise64",  .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
-	{ .name	    = "fallocate",  .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
-	{ .name	    = "fchdir",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
-	{ .name	    = "fchmod",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+	{ .name	    = "faccessat",  .errmsg = true, },
+	{ .name	    = "fadvise64",  .errmsg = true, },
+	{ .name	    = "fallocate",  .errmsg = true, },
+	{ .name	    = "fchdir",	    .errmsg = true, },
+	{ .name	    = "fchmod",	    .errmsg = true, },
 	{ .name	    = "fchmodat",   .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
-			     [1] = SCA_FILENAME, /* filename */ }, },
-	{ .name	    = "fchown",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+	{ .name	    = "fchown",	    .errmsg = true, },
 	{ .name	    = "fchownat",   .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
-			     [1] = SCA_FILENAME, /* filename */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
 	{ .name	    = "fcntl",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
-			     [1] = SCA_STRARRAY, /* cmd */ },
+	  .arg_scnprintf = { [1] = SCA_STRARRAY, /* cmd */ },
 	  .arg_parm	 = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
-	{ .name	    = "fdatasync",  .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+	{ .name	    = "fdatasync",  .errmsg = true, },
 	{ .name	    = "flock",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
-			     [1] = SCA_FLOCK, /* cmd */ }, },
-	{ .name	    = "fsetxattr",  .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
-	{ .name	    = "fstat",	    .errmsg = true, .alias = "newfstat",
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
-	{ .name	    = "fstatat",    .errmsg = true, .alias = "newfstatat",
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
-			     [1] = SCA_FILENAME, /* filename */ }, },
-	{ .name	    = "fstatfs",    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
-	{ .name	    = "fsync",    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
-	{ .name	    = "ftruncate", .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+	  .arg_scnprintf = { [1] = SCA_FLOCK, /* cmd */ }, },
+	{ .name	    = "fsetxattr",  .errmsg = true, },
+	{ .name	    = "fstat",	    .errmsg = true, .alias = "newfstat", },
+	{ .name	    = "fstatat",    .errmsg = true, .alias = "newfstatat", },
+	{ .name	    = "fstatfs",    .errmsg = true, },
+	{ .name	    = "fsync",    .errmsg = true, },
+	{ .name	    = "ftruncate", .errmsg = true, },
 	{ .name	    = "futex",	    .errmsg = true,
 	  .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
 	{ .name	    = "futimesat", .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
-			     [1] = SCA_FILENAME, /* filename */ }, },
-	{ .name	    = "getdents",   .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
-	{ .name	    = "getdents64", .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+	{ .name	    = "getdents",   .errmsg = true, },
+	{ .name	    = "getdents64", .errmsg = true, },
 	{ .name	    = "getitimer",  .errmsg = true, STRARRAY(0, which, itimers), },
 	{ .name	    = "getpid",	    .errpid = true, },
 	{ .name	    = "getpgid",    .errpid = true, },
@@ -661,12 +631,10 @@
 	{ .name	    = "getrandom",  .errmsg = true,
 	  .arg_scnprintf = { [2] = SCA_GETRANDOM_FLAGS, /* flags */ }, },
 	{ .name	    = "getrlimit",  .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
-	{ .name	    = "getxattr",    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
-	{ .name	    = "inotify_add_watch",	    .errmsg = true,
-	  .arg_scnprintf = { [1] = SCA_FILENAME, /* pathname */ }, },
+	{ .name	    = "getxattr",   .errmsg = true, },
+	{ .name	    = "inotify_add_watch",	    .errmsg = true, },
 	{ .name	    = "ioctl",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
+	  .arg_scnprintf = {
 #if defined(__i386__) || defined(__x86_64__)
 /*
  * FIXME: Make this available to all arches.
@@ -680,41 +648,28 @@
 	{ .name	    = "keyctl",	    .errmsg = true, STRARRAY(0, option, keyctl_options), },
 	{ .name	    = "kill",	    .errmsg = true,
 	  .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
-	{ .name	    = "lchown",    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
-	{ .name	    = "lgetxattr",  .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+	{ .name	    = "lchown",    .errmsg = true, },
+	{ .name	    = "lgetxattr",  .errmsg = true, },
 	{ .name	    = "linkat",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
-	{ .name	    = "listxattr",  .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
-	{ .name	    = "llistxattr", .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
-	{ .name	    = "lremovexattr",  .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+	{ .name	    = "listxattr",  .errmsg = true, },
+	{ .name	    = "llistxattr", .errmsg = true, },
+	{ .name	    = "lremovexattr",  .errmsg = true, },
 	{ .name	    = "lseek",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
-			     [2] = SCA_STRARRAY, /* whence */ },
+	  .arg_scnprintf = { [2] = SCA_STRARRAY, /* whence */ },
 	  .arg_parm	 = { [2] = &strarray__whences, /* whence */ }, },
-	{ .name	    = "lsetxattr",  .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
-	{ .name	    = "lstat",	    .errmsg = true, .alias = "newlstat",
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
-	{ .name	    = "lsxattr",    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+	{ .name	    = "lsetxattr",  .errmsg = true, },
+	{ .name	    = "lstat",	    .errmsg = true, .alias = "newlstat", },
+	{ .name	    = "lsxattr",    .errmsg = true, },
 	{ .name     = "madvise",    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_HEX,	 /* start */
 			     [2] = SCA_MADV_BHV, /* behavior */ }, },
-	{ .name	    = "mkdir",    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+	{ .name	    = "mkdir",    .errmsg = true, },
 	{ .name	    = "mkdirat",    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
-			     [1] = SCA_FILENAME, /* pathname */ }, },
-	{ .name	    = "mknod",      .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+	{ .name	    = "mknod",      .errmsg = true, },
 	{ .name	    = "mknodat",    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
-			     [1] = SCA_FILENAME, /* filename */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
 	{ .name	    = "mlock",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
 	{ .name	    = "mlockall",   .errmsg = true,
@@ -722,8 +677,7 @@
 	{ .name	    = "mmap",	    .hexret = true,
 	  .arg_scnprintf = { [0] = SCA_HEX,	  /* addr */
 			     [2] = SCA_MMAP_PROT, /* prot */
-			     [3] = SCA_MMAP_FLAGS, /* flags */
-			     [4] = SCA_FD, 	  /* fd */ }, },
+			     [3] = SCA_MMAP_FLAGS, /* flags */ }, },
 	{ .name	    = "mprotect",   .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_HEX, /* start */
 			     [2] = SCA_MMAP_PROT, /* prot */ }, },
@@ -740,17 +694,14 @@
 	{ .name	    = "name_to_handle_at", .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
 	{ .name	    = "newfstatat", .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
-			     [1] = SCA_FILENAME, /* filename */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
 	{ .name	    = "open",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME,   /* filename */
-			     [1] = SCA_OPEN_FLAGS, /* flags */ }, },
+	  .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
 	{ .name	    = "open_by_handle_at", .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
 			     [2] = SCA_OPEN_FLAGS, /* flags */ }, },
 	{ .name	    = "openat",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
-			     [1] = SCA_FILENAME, /* filename */
 			     [2] = SCA_OPEN_FLAGS, /* flags */ }, },
 	{ .name	    = "perf_event_open", .errmsg = true,
 	  .arg_scnprintf = { [2] = SCA_INT, /* cpu */
@@ -760,39 +711,26 @@
 	  .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
 	{ .name	    = "poll",	    .errmsg = true, .timeout = true, },
 	{ .name	    = "ppoll",	    .errmsg = true, .timeout = true, },
-	{ .name	    = "pread",	    .errmsg = true, .alias = "pread64",
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
-	{ .name	    = "preadv",	    .errmsg = true, .alias = "pread",
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+	{ .name	    = "pread",	    .errmsg = true, .alias = "pread64", },
+	{ .name	    = "preadv",	    .errmsg = true, .alias = "pread", },
 	{ .name	    = "prlimit64",  .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
-	{ .name	    = "pwrite",	    .errmsg = true, .alias = "pwrite64",
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
-	{ .name	    = "pwritev",    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
-	{ .name	    = "read",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
-	{ .name	    = "readlink",   .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
+	{ .name	    = "pwrite",	    .errmsg = true, .alias = "pwrite64", },
+	{ .name	    = "pwritev",    .errmsg = true, },
+	{ .name	    = "read",	    .errmsg = true, },
+	{ .name	    = "readlink",   .errmsg = true, },
 	{ .name	    = "readlinkat", .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
-			     [1] = SCA_FILENAME, /* pathname */ }, },
-	{ .name	    = "readv",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+	{ .name	    = "readv",	    .errmsg = true, },
 	{ .name	    = "recvfrom",   .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
-			     [3] = SCA_MSG_FLAGS, /* flags */ }, },
+	  .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
 	{ .name	    = "recvmmsg",   .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
-			     [3] = SCA_MSG_FLAGS, /* flags */ }, },
+	  .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
 	{ .name	    = "recvmsg",    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
-			     [2] = SCA_MSG_FLAGS, /* flags */ }, },
-	{ .name	    = "removexattr", .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+	  .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
+	{ .name	    = "removexattr", .errmsg = true, },
 	{ .name	    = "renameat",   .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
-	{ .name	    = "rmdir",    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+	{ .name	    = "rmdir",    .errmsg = true, },
 	{ .name	    = "rt_sigaction", .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
 	{ .name	    = "rt_sigprocmask",  .errmsg = true, STRARRAY(0, how, sighow), },
@@ -807,22 +745,17 @@
 			     [1] = SCA_SECCOMP_FLAGS, /* flags */ }, },
 	{ .name	    = "select",	    .errmsg = true, .timeout = true, },
 	{ .name	    = "sendmmsg",    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
-			     [3] = SCA_MSG_FLAGS, /* flags */ }, },
+	  .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
 	{ .name	    = "sendmsg",    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
-			     [2] = SCA_MSG_FLAGS, /* flags */ }, },
+	  .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
 	{ .name	    = "sendto",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
-			     [3] = SCA_MSG_FLAGS, /* flags */ }, },
+	  .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
 	{ .name	    = "set_tid_address", .errpid = true, },
 	{ .name	    = "setitimer",  .errmsg = true, STRARRAY(0, which, itimers), },
 	{ .name	    = "setpgid",    .errmsg = true, },
 	{ .name	    = "setrlimit",  .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
-	{ .name	    = "setxattr",   .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
-	{ .name	    = "shutdown",   .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+	{ .name	    = "setxattr",   .errmsg = true, },
+	{ .name	    = "shutdown",   .errmsg = true, },
 	{ .name	    = "socket",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
 			     [1] = SCA_SK_TYPE, /* type */ },
@@ -831,10 +764,8 @@
 	  .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
 			     [1] = SCA_SK_TYPE, /* type */ },
 	  .arg_parm	 = { [0] = &strarray__socket_families, /* family */ }, },
-	{ .name	    = "stat",	    .errmsg = true, .alias = "newstat",
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
-	{ .name	    = "statfs",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+	{ .name	    = "stat",	    .errmsg = true, .alias = "newstat", },
+	{ .name	    = "statfs",	    .errmsg = true, },
 	{ .name	    = "swapoff",    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
 	{ .name	    = "swapon",	    .errmsg = true,
@@ -845,29 +776,21 @@
 	  .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
 	{ .name	    = "tkill",	    .errmsg = true,
 	  .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
-	{ .name	    = "truncate",   .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
+	{ .name	    = "truncate",   .errmsg = true, },
 	{ .name	    = "uname",	    .errmsg = true, .alias = "newuname", },
 	{ .name	    = "unlinkat",   .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
-			     [1] = SCA_FILENAME, /* pathname */ }, },
-	{ .name	    = "utime",  .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+	{ .name	    = "utime",  .errmsg = true, },
 	{ .name	    = "utimensat",  .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */
-			     [1] = SCA_FILENAME, /* filename */ }, },
-	{ .name	    = "utimes",  .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
-	{ .name	    = "vmsplice",  .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
+	{ .name	    = "utimes",  .errmsg = true, },
+	{ .name	    = "vmsplice",  .errmsg = true, },
 	{ .name	    = "wait4",	    .errpid = true,
 	  .arg_scnprintf = { [2] = SCA_WAITID_OPTIONS, /* options */ }, },
 	{ .name	    = "waitid",	    .errpid = true,
 	  .arg_scnprintf = { [3] = SCA_WAITID_OPTIONS, /* options */ }, },
-	{ .name	    = "write",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
-	{ .name	    = "writev",	    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+	{ .name	    = "write",	    .errmsg = true, },
+	{ .name	    = "writev",	    .errmsg = true, },
 };
 
 static int syscall_fmt__cmp(const void *name, const void *fmtp)
@@ -1160,6 +1083,24 @@
 	return trace__process_event(trace, machine, event, sample);
 }
 
+static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
+{
+	struct machine *machine = vmachine;
+
+	if (machine->kptr_restrict_warned)
+		return NULL;
+
+	if (symbol_conf.kptr_restrict) {
+		pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
+			   "Check /proc/sys/kernel/kptr_restrict.\n\n"
+			   "Kernel samples will not be resolved.\n");
+		machine->kptr_restrict_warned = true;
+		return NULL;
+	}
+
+	return machine__resolve_kernel_addr(vmachine, addrp, modp);
+}
+
 static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
 {
 	int err = symbol__init(NULL);
@@ -1171,7 +1112,7 @@
 	if (trace->host == NULL)
 		return -ENOMEM;
 
-	if (trace_event__register_resolver(trace->host, machine__resolve_kernel_addr) < 0)
+	if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0)
 		return -errno;
 
 	err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
@@ -1186,7 +1127,7 @@
 static int syscall__set_arg_fmts(struct syscall *sc)
 {
 	struct format_field *field;
-	int idx = 0;
+	int idx = 0, len;
 
 	sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *));
 	if (sc->arg_scnprintf == NULL)
@@ -1198,12 +1139,31 @@
 	for (field = sc->args; field; field = field->next) {
 		if (sc->fmt && sc->fmt->arg_scnprintf[idx])
 			sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
+		else if (strcmp(field->type, "const char *") == 0 &&
+			 (strcmp(field->name, "filename") == 0 ||
+			  strcmp(field->name, "path") == 0 ||
+			  strcmp(field->name, "pathname") == 0))
+			sc->arg_scnprintf[idx] = SCA_FILENAME;
 		else if (field->flags & FIELD_IS_POINTER)
 			sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
 		else if (strcmp(field->type, "pid_t") == 0)
 			sc->arg_scnprintf[idx] = SCA_PID;
 		else if (strcmp(field->type, "umode_t") == 0)
 			sc->arg_scnprintf[idx] = SCA_MODE_T;
+		else if ((strcmp(field->type, "int") == 0 ||
+			  strcmp(field->type, "unsigned int") == 0 ||
+			  strcmp(field->type, "long") == 0) &&
+			 (len = strlen(field->name)) >= 2 &&
+			 strcmp(field->name + len - 2, "fd") == 0) {
+			/*
+			 * /sys/kernel/tracing/events/syscalls/sys_enter*
+			 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
+			 * 65 int
+			 * 23 unsigned int
+			 * 7 unsigned long
+			 */
+			sc->arg_scnprintf[idx] = SCA_FD;
+		}
 		++idx;
 	}
 
@@ -1534,7 +1494,7 @@
 	if (sc->is_exit) {
 		if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
 			trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
-			fprintf(trace->output, "%-70s\n", ttrace->entry_str);
+			fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
 		}
 	} else {
 		ttrace->entry_pending = true;
@@ -2887,12 +2847,12 @@
 		mmap_pages_user_set = false;
 
 	if (trace.max_stack == UINT_MAX) {
-		trace.max_stack = sysctl_perf_event_max_stack;
+		trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl_perf_event_max_stack;
 		max_stack_user_set = false;
 	}
 
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
-	if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled)
+	if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled && trace.trace_syscalls)
 		record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
 #endif
 
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 7970008..15982ce 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -549,6 +549,9 @@
 	if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0)
 		sysctl_perf_event_max_stack = value;
 
+	if (sysctl__read_int("kernel/perf_event_max_contexts_per_stack", &value) == 0)
+		sysctl_perf_event_max_contexts_per_stack = value;
+
 	cmd = extract_argv0_path(argv[0]);
 	if (!cmd)
 		cmd = "perf-help";
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 4db73d5..7e5a1e8 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -354,9 +354,6 @@
 	.scnprintf = nop__scnprintf,
 };
 
-/*
- * Must be sorted by name!
- */
 static struct ins instructions[] = {
 	{ .name = "add",   .ops  = &mov_ops, },
 	{ .name = "addl",  .ops  = &mov_ops, },
@@ -372,8 +369,8 @@
 	{ .name = "bgt",   .ops  = &jump_ops, },
 	{ .name = "bhi",   .ops  = &jump_ops, },
 	{ .name = "bl",    .ops  = &call_ops, },
-	{ .name = "blt",   .ops  = &jump_ops, },
 	{ .name = "bls",   .ops  = &jump_ops, },
+	{ .name = "blt",   .ops  = &jump_ops, },
 	{ .name = "blx",   .ops  = &call_ops, },
 	{ .name = "bne",   .ops  = &jump_ops, },
 #endif
@@ -449,18 +446,39 @@
 	{ .name = "xbeginq", .ops  = &jump_ops, },
 };
 
-static int ins__cmp(const void *name, const void *insp)
+static int ins__key_cmp(const void *name, const void *insp)
 {
 	const struct ins *ins = insp;
 
 	return strcmp(name, ins->name);
 }
 
-static struct ins *ins__find(const char *name)
+static int ins__cmp(const void *a, const void *b)
+{
+	const struct ins *ia = a;
+	const struct ins *ib = b;
+
+	return strcmp(ia->name, ib->name);
+}
+
+static void ins__sort(void)
 {
 	const int nmemb = ARRAY_SIZE(instructions);
 
-	return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp);
+	qsort(instructions, nmemb, sizeof(struct ins), ins__cmp);
+}
+
+static struct ins *ins__find(const char *name)
+{
+	const int nmemb = ARRAY_SIZE(instructions);
+	static bool sorted;
+
+	if (!sorted) {
+		ins__sort();
+		sorted = true;
+	}
+
+	return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__key_cmp);
 }
 
 int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym)
@@ -1122,7 +1140,7 @@
 	} else if (dso__is_kcore(dso)) {
 		goto fallback;
 	} else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
-		   strstr(command, "[kernel.kallsyms]") ||
+		   strstr(command, DSO__NAME_KALLSYMS) ||
 		   access(symfs_filename, R_OK)) {
 		free(filename);
 fallback:
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index bff425e..67e5966 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -256,7 +256,7 @@
 		size_t name_len;
 		bool in_kernel = false;
 
-		if (!pos->hit)
+		if (!pos->hit && !dso__is_vdso(pos))
 			continue;
 
 		if (dso__is_vdso(pos)) {
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 8d96c80..c9a6dc1 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -298,8 +298,7 @@
 	 */
 	callchain_param.order = ORDER_CALLER;
 	err = thread__resolve_callchain(thread, &callchain_cursor, evsel,
-					sample, NULL, NULL,
-					sysctl_perf_event_max_stack);
+					sample, NULL, NULL, PERF_MAX_STACK_DEPTH);
 	if (err) {
 		callchain_param.order = saved_order;
 		return NULL;
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 3357479..5d286f5 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -7,6 +7,7 @@
 #include "auxtrace.h"
 #include "util.h"
 #include "debug.h"
+#include "vdso.h"
 
 char dso__symtab_origin(const struct dso *dso)
 {
@@ -62,9 +63,7 @@
 		}
 		break;
 	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
-		/* skip the locally configured cache if a symfs is given */
-		if (symbol_conf.symfs[0] ||
-		    (dso__build_id_filename(dso, filename, size) == NULL))
+		if (dso__build_id_filename(dso, filename, size) == NULL)
 			ret = -1;
 		break;
 
@@ -1169,7 +1168,7 @@
 	struct dso *pos;
 
 	list_for_each_entry(pos, head, node) {
-		if (with_hits && !pos->hit)
+		if (with_hits && !pos->hit && !dso__is_vdso(pos))
 			continue;
 		if (pos->has_build_id) {
 			have_build_id = true;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index c4bfe11..e82ba90 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -44,6 +44,7 @@
 	perf_evlist__set_maps(evlist, cpus, threads);
 	fdarray__init(&evlist->pollfd, 64);
 	evlist->workload.pid = -1;
+	evlist->backward = false;
 }
 
 struct perf_evlist *perf_evlist__new(void)
@@ -679,6 +680,33 @@
 	return NULL;
 }
 
+static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
+{
+	int i;
+
+	for (i = 0; i < evlist->nr_mmaps; i++) {
+		int fd = evlist->mmap[i].fd;
+		int err;
+
+		if (fd < 0)
+			continue;
+		err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+int perf_evlist__pause(struct perf_evlist *evlist)
+{
+	return perf_evlist__set_paused(evlist, true);
+}
+
+int perf_evlist__resume(struct perf_evlist *evlist)
+{
+	return perf_evlist__set_paused(evlist, false);
+}
+
 /* When check_messup is true, 'end' must points to a good entry */
 static union perf_event *
 perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
@@ -881,6 +909,7 @@
 	if (evlist->mmap[idx].base != NULL) {
 		munmap(evlist->mmap[idx].base, evlist->mmap_len);
 		evlist->mmap[idx].base = NULL;
+		evlist->mmap[idx].fd = -1;
 		atomic_set(&evlist->mmap[idx].refcnt, 0);
 	}
 	auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
@@ -901,10 +930,14 @@
 
 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
 {
+	int i;
+
 	evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
 	if (cpu_map__empty(evlist->cpus))
 		evlist->nr_mmaps = thread_map__nr(evlist->threads);
 	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
+	for (i = 0; i < evlist->nr_mmaps; i++)
+		evlist->mmap[i].fd = -1;
 	return evlist->mmap != NULL ? 0 : -ENOMEM;
 }
 
@@ -941,6 +974,7 @@
 		evlist->mmap[idx].base = NULL;
 		return -1;
 	}
+	evlist->mmap[idx].fd = fd;
 
 	if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
 				&mp->auxtrace_mp, evlist->mmap[idx].base, fd))
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 85d1b598..d740fb8 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -28,6 +28,7 @@
 struct perf_mmap {
 	void		 *base;
 	int		 mask;
+	int		 fd;
 	atomic_t	 refcnt;
 	u64		 prev;
 	struct auxtrace_mmap auxtrace_mmap;
@@ -43,6 +44,7 @@
 	bool		 overwrite;
 	bool		 enabled;
 	bool		 has_user_cpus;
+	bool		 backward;
 	size_t		 mmap_len;
 	int		 id_pos;
 	int		 is_pos;
@@ -135,6 +137,8 @@
 
 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
 
+int perf_evlist__pause(struct perf_evlist *evlist);
+int perf_evlist__resume(struct perf_evlist *evlist);
 int perf_evlist__open(struct perf_evlist *evlist);
 void perf_evlist__close(struct perf_evlist *evlist);
 
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 52c7d88..5d7037e 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -37,6 +37,7 @@
 	bool clockid;
 	bool clockid_wrong;
 	bool lbr_flags;
+	bool write_backward;
 } perf_missing_features;
 
 static clockid_t clockid;
@@ -1376,6 +1377,8 @@
 	if (perf_missing_features.lbr_flags)
 		evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
 				     PERF_SAMPLE_BRANCH_NO_CYCLES);
+	if (perf_missing_features.write_backward)
+		evsel->attr.write_backward = false;
 retry_sample_id:
 	if (perf_missing_features.sample_id_all)
 		evsel->attr.sample_id_all = 0;
@@ -1438,6 +1441,12 @@
 				err = -EINVAL;
 				goto out_close;
 			}
+
+			if (evsel->overwrite &&
+			    perf_missing_features.write_backward) {
+				err = -EINVAL;
+				goto out_close;
+			}
 		}
 	}
 
@@ -1500,6 +1509,10 @@
 			  PERF_SAMPLE_BRANCH_NO_FLAGS))) {
 		perf_missing_features.lbr_flags = true;
 		goto fallback_missing_features;
+	} else if (!perf_missing_features.write_backward &&
+			evsel->attr.write_backward) {
+		perf_missing_features.write_backward = true;
+		goto fallback_missing_features;
 	}
 
 out_close:
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 8a644fe..c1f1015 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -112,6 +112,7 @@
 	bool			tracking;
 	bool			per_pkg;
 	bool			precise_max;
+	bool			overwrite;
 	/* parse modifier helper */
 	int			exclude_GH;
 	int			nr_members;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index cfab531..d1f19e0 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -117,6 +117,13 @@
 			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
 			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
 		}
+
+		if (h->branch_info->srcline_from)
+			hists__new_col_len(hists, HISTC_SRCLINE_FROM,
+					strlen(h->branch_info->srcline_from));
+		if (h->branch_info->srcline_to)
+			hists__new_col_len(hists, HISTC_SRCLINE_TO,
+					strlen(h->branch_info->srcline_to));
 	}
 
 	if (h->mem_info) {
@@ -1042,6 +1049,8 @@
 	if (he->branch_info) {
 		map__zput(he->branch_info->from.map);
 		map__zput(he->branch_info->to.map);
+		free_srcline(he->branch_info->srcline_from);
+		free_srcline(he->branch_info->srcline_to);
 		zfree(&he->branch_info);
 	}
 
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 0f84bfb..7b54ccf 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -52,6 +52,8 @@
 	HISTC_MEM_IADDR_SYMBOL,
 	HISTC_TRANSACTION,
 	HISTC_CYCLES,
+	HISTC_SRCLINE_FROM,
+	HISTC_SRCLINE_TO,
 	HISTC_TRACE,
 	HISTC_NR_COLS, /* Last entry */
 };
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index f9644f7..b177218 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -43,6 +43,7 @@
 
 	machine->symbol_filter = NULL;
 	machine->id_hdr_size = 0;
+	machine->kptr_restrict_warned = false;
 	machine->comm_exec = false;
 	machine->kernel_start = 0;
 
@@ -709,7 +710,7 @@
 	if (machine__is_host(machine)) {
 		vmlinux_name = symbol_conf.vmlinux_name;
 		if (!vmlinux_name)
-			vmlinux_name = "[kernel.kallsyms]";
+			vmlinux_name = DSO__NAME_KALLSYMS;
 
 		kernel = machine__findnew_kernel(machine, vmlinux_name,
 						 "[kernel]", DSO_TYPE_KERNEL);
@@ -1135,10 +1136,10 @@
 {
 	struct dso *kernel = machine__get_kernel(machine);
 	const char *name;
-	u64 addr = machine__get_running_kernel_start(machine, &name);
+	u64 addr;
 	int ret;
 
-	if (!addr || kernel == NULL)
+	if (kernel == NULL)
 		return -1;
 
 	ret = __machine__create_kernel_maps(machine, kernel);
@@ -1160,8 +1161,9 @@
 	 */
 	map_groups__fixup_end(&machine->kmaps);
 
-	if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
-					     addr)) {
+	addr = machine__get_running_kernel_start(machine, &name);
+	if (!addr) {
+	} else if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
 		machine__destroy_kernel_maps(machine);
 		return -1;
 	}
@@ -1769,11 +1771,6 @@
 		 */
 		int mix_chain_nr = i + 1 + lbr_nr + 1;
 
-		if (mix_chain_nr > (int)sysctl_perf_event_max_stack + PERF_MAX_BRANCH_DEPTH) {
-			pr_warning("corrupted callchain. skipping...\n");
-			return 0;
-		}
-
 		for (j = 0; j < mix_chain_nr; j++) {
 			if (callchain_param.order == ORDER_CALLEE) {
 				if (j < i + 1)
@@ -1811,9 +1808,9 @@
 {
 	struct branch_stack *branch = sample->branch_stack;
 	struct ip_callchain *chain = sample->callchain;
-	int chain_nr = min(max_stack, (int)chain->nr);
+	int chain_nr = chain->nr;
 	u8 cpumode = PERF_RECORD_MISC_USER;
-	int i, j, err;
+	int i, j, err, nr_entries;
 	int skip_idx = -1;
 	int first_call = 0;
 
@@ -1828,8 +1825,7 @@
 	 * Based on DWARF debug information, some architectures skip
 	 * a callchain entry saved by the kernel.
 	 */
-	if (chain->nr < sysctl_perf_event_max_stack)
-		skip_idx = arch_skip_callchain_idx(thread, chain);
+	skip_idx = arch_skip_callchain_idx(thread, chain);
 
 	/*
 	 * Add branches to call stack for easier browsing. This gives
@@ -1889,12 +1885,8 @@
 	}
 
 check_calls:
-	if (chain->nr > sysctl_perf_event_max_stack && (int)chain->nr > max_stack) {
-		pr_warning("corrupted callchain. skipping...\n");
-		return 0;
-	}
-
-	for (i = first_call; i < chain_nr; i++) {
+	for (i = first_call, nr_entries = 0;
+	     i < chain_nr && nr_entries < max_stack; i++) {
 		u64 ip;
 
 		if (callchain_param.order == ORDER_CALLEE)
@@ -1908,6 +1900,9 @@
 #endif
 		ip = chain->ips[j];
 
+		if (ip < PERF_CONTEXT_MAX)
+                       ++nr_entries;
+
 		err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip);
 
 		if (err)
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 83f4679..41ac9cf 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -28,6 +28,7 @@
 	pid_t		  pid;
 	u16		  id_hdr_size;
 	bool		  comm_exec;
+	bool		  kptr_restrict_warned;
 	char		  *root_dir;
 	struct rb_root	  threads;
 	pthread_rwlock_t  threads_lock;
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 62c7f69..5d1eb1c 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -264,8 +264,7 @@
 		goto exit;
 
 	if (thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
-				      sample, NULL, NULL,
-				      sysctl_perf_event_max_stack) != 0) {
+				      sample, NULL, NULL, scripting_max_stack) != 0) {
 		pr_err("Failed to resolve callchain. Skipping\n");
 		goto exit;
 	}
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 20e69ed..c4e9bd7 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -353,6 +353,88 @@
 	.se_width_idx	= HISTC_SRCLINE,
 };
 
+/* --sort srcline_from */
+
+static int64_t
+sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+	if (!left->branch_info->srcline_from) {
+		struct map *map = left->branch_info->from.map;
+		if (!map)
+			left->branch_info->srcline_from = SRCLINE_UNKNOWN;
+		else
+			left->branch_info->srcline_from = get_srcline(map->dso,
+					   map__rip_2objdump(map,
+							     left->branch_info->from.al_addr),
+							 left->branch_info->from.sym, true);
+	}
+	if (!right->branch_info->srcline_from) {
+		struct map *map = right->branch_info->from.map;
+		if (!map)
+			right->branch_info->srcline_from = SRCLINE_UNKNOWN;
+		else
+			right->branch_info->srcline_from = get_srcline(map->dso,
+					     map__rip_2objdump(map,
+							       right->branch_info->from.al_addr),
+						     right->branch_info->from.sym, true);
+	}
+	return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
+}
+
+static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
+					size_t size, unsigned int width)
+{
+	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
+}
+
+struct sort_entry sort_srcline_from = {
+	.se_header	= "From Source:Line",
+	.se_cmp		= sort__srcline_from_cmp,
+	.se_snprintf	= hist_entry__srcline_from_snprintf,
+	.se_width_idx	= HISTC_SRCLINE_FROM,
+};
+
+/* --sort srcline_to */
+
+static int64_t
+sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+	if (!left->branch_info->srcline_to) {
+		struct map *map = left->branch_info->to.map;
+		if (!map)
+			left->branch_info->srcline_to = SRCLINE_UNKNOWN;
+		else
+			left->branch_info->srcline_to = get_srcline(map->dso,
+					   map__rip_2objdump(map,
+							     left->branch_info->to.al_addr),
+							 left->branch_info->from.sym, true);
+	}
+	if (!right->branch_info->srcline_to) {
+		struct map *map = right->branch_info->to.map;
+		if (!map)
+			right->branch_info->srcline_to = SRCLINE_UNKNOWN;
+		else
+			right->branch_info->srcline_to = get_srcline(map->dso,
+					     map__rip_2objdump(map,
+							       right->branch_info->to.al_addr),
+						     right->branch_info->to.sym, true);
+	}
+	return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
+}
+
+static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
+					size_t size, unsigned int width)
+{
+	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
+}
+
+struct sort_entry sort_srcline_to = {
+	.se_header	= "To Source:Line",
+	.se_cmp		= sort__srcline_to_cmp,
+	.se_snprintf	= hist_entry__srcline_to_snprintf,
+	.se_width_idx	= HISTC_SRCLINE_TO,
+};
+
 /* --sort srcfile */
 
 static char no_srcfile[1];
@@ -1347,6 +1429,8 @@
 	DIM(SORT_IN_TX, "in_tx", sort_in_tx),
 	DIM(SORT_ABORT, "abort", sort_abort),
 	DIM(SORT_CYCLES, "cycles", sort_cycles),
+	DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
+	DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
 };
 
 #undef DIM
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 42927f4..ebb59ca 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -215,6 +215,8 @@
 	SORT_ABORT,
 	SORT_IN_TX,
 	SORT_CYCLES,
+	SORT_SRCLINE_FROM,
+	SORT_SRCLINE_TO,
 
 	/* memory mode specific sort keys */
 	__SORT_MEMORY_MODE,
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index fdb7196..aa9efe0 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -94,7 +94,8 @@
 {
 	int ctx = evsel_context(counter);
 
-	if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
+	if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
+	    perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
 		update_stats(&runtime_nsecs_stats[cpu], count[0]);
 	else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
 		update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
@@ -188,7 +189,7 @@
 
 	color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
 
-	out->print_metric(out->ctx, color, "%6.2f%%", "backend cycles idle", ratio);
+	out->print_metric(out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
 }
 
 static void print_branch_misses(int cpu,
@@ -444,7 +445,8 @@
 			ratio = total / avg;
 
 		print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
-	} else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) {
+	} else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) ||
+		   perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) {
 		if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
 			print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
 				     avg / ratio);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 7fb3330..20f9cb3 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1662,8 +1662,8 @@
 
 	build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
 
-	scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir,
-		  sbuild_id);
+	scnprintf(path, sizeof(path), "%s/%s/%s", buildid_dir,
+		  DSO__NAME_KCORE, sbuild_id);
 
 	/* Use /proc/kallsyms if possible */
 	if (is_host) {
@@ -1699,8 +1699,8 @@
 	if (!find_matching_kcore(map, path, sizeof(path)))
 		return strdup(path);
 
-	scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s",
-		  buildid_dir, sbuild_id);
+	scnprintf(path, sizeof(path), "%s/%s/%s",
+		  buildid_dir, DSO__NAME_KALLSYMS, sbuild_id);
 
 	if (access(path, F_OK)) {
 		pr_err("No kallsyms or vmlinux with build-id %s was found\n",
@@ -1769,7 +1769,7 @@
 
 	if (err > 0 && !dso__is_kcore(dso)) {
 		dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
-		dso__set_long_name(dso, "[kernel.kallsyms]", false);
+		dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
 		map__fixup_start(map);
 		map__fixup_end(map);
 	}
@@ -2033,3 +2033,26 @@
 	symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
 	symbol_conf.initialized = false;
 }
+
+int symbol__config_symfs(const struct option *opt __maybe_unused,
+			 const char *dir, int unset __maybe_unused)
+{
+	char *bf = NULL;
+	int ret;
+
+	symbol_conf.symfs = strdup(dir);
+	if (symbol_conf.symfs == NULL)
+		return -ENOMEM;
+
+	/* skip the locally configured cache if a symfs is given, and
+	 * config buildid dir to symfs/.debug
+	 */
+	ret = asprintf(&bf, "%s/%s", dir, ".debug");
+	if (ret < 0)
+		return -ENOMEM;
+
+	set_buildid_dir(bf);
+
+	free(bf);
+	return 0;
+}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 2b5e4ed..b10d558 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -44,6 +44,9 @@
 #define DMGL_ANSI        (1 << 1)       /* Include const, volatile, etc */
 #endif
 
+#define DSO__NAME_KALLSYMS	"[kernel.kallsyms]"
+#define DSO__NAME_KCORE		"[kernel.kcore]"
+
 /** struct symbol - symtab entry
  *
  * @ignore - resolvable but tools ignore it (e.g. idle routines)
@@ -183,6 +186,8 @@
 	struct addr_map_symbol from;
 	struct addr_map_symbol to;
 	struct branch_flags flags;
+	char			*srcline_from;
+	char			*srcline_to;
 };
 
 struct mem_info {
@@ -287,6 +292,8 @@
 bool symbol__restricted_filename(const char *filename,
 				 const char *restricted_filename);
 bool symbol__is_idle(struct symbol *sym);
+int symbol__config_symfs(const struct option *opt __maybe_unused,
+			 const char *dir, int unset __maybe_unused);
 
 int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
 		  struct symsrc *runtime_ss, symbol_filter_t filter,
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index f92c37a..b2940c8 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -27,7 +27,6 @@
 	int		   max_stack;
 	bool		   hide_kernel_symbols, hide_user_symbols, zero;
 	bool		   use_tui, use_stdio;
-	bool		   kptr_restrict_warned;
 	bool		   vmlinux_warned;
 	bool		   dump_symtab;
 	struct hist_entry  *sym_filter_entry;
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index eab077a..23504ad 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -33,7 +33,8 @@
 unsigned int page_size;
 int cacheline_size;
 
-unsigned int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
+int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
+int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK;
 
 bool test_attr__enabled;
 
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 7651633..1e8c316 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -261,7 +261,8 @@
 
 extern unsigned int page_size;
 extern int cacheline_size;
-extern unsigned int sysctl_perf_event_max_stack;
+extern int sysctl_perf_event_max_stack;
+extern int sysctl_perf_event_max_contexts_per_stack;
 
 struct parse_tag {
 	char tag;
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index a34bfd0..7859856 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -7,6 +7,7 @@
 ldflags-y += --wrap=iounmap
 ldflags-y += --wrap=memunmap
 ldflags-y += --wrap=__devm_request_region
+ldflags-y += --wrap=__devm_release_region
 ldflags-y += --wrap=__request_region
 ldflags-y += --wrap=__release_region
 ldflags-y += --wrap=devm_memremap_pages
@@ -15,6 +16,7 @@
 DRIVERS := ../../../drivers
 NVDIMM_SRC := $(DRIVERS)/nvdimm
 ACPI_SRC := $(DRIVERS)/acpi
+DAX_SRC := $(DRIVERS)/dax
 
 obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
 obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
@@ -22,6 +24,8 @@
 obj-$(CONFIG_ND_BLK) += nd_blk.o
 obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
 obj-$(CONFIG_ACPI_NFIT) += nfit.o
+obj-$(CONFIG_DEV_DAX) += dax.o
+obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
 
 nfit-y := $(ACPI_SRC)/nfit.o
 nfit-y += config_check.o
@@ -38,6 +42,12 @@
 nd_e820-y := $(NVDIMM_SRC)/e820.o
 nd_e820-y += config_check.o
 
+dax-y := $(DAX_SRC)/dax.o
+dax-y += config_check.o
+
+dax_pmem-y := $(DAX_SRC)/pmem.o
+dax_pmem-y += config_check.o
+
 libnvdimm-y := $(NVDIMM_SRC)/core.o
 libnvdimm-y += $(NVDIMM_SRC)/bus.o
 libnvdimm-y += $(NVDIMM_SRC)/dimm_devs.o
@@ -49,6 +59,7 @@
 libnvdimm-$(CONFIG_ND_CLAIM) += $(NVDIMM_SRC)/claim.o
 libnvdimm-$(CONFIG_BTT) += $(NVDIMM_SRC)/btt_devs.o
 libnvdimm-$(CONFIG_NVDIMM_PFN) += $(NVDIMM_SRC)/pfn_devs.o
+libnvdimm-$(CONFIG_NVDIMM_DAX) += $(NVDIMM_SRC)/dax_devs.o
 libnvdimm-y += config_check.o
 
 obj-m += test/
diff --git a/tools/testing/nvdimm/config_check.c b/tools/testing/nvdimm/config_check.c
index f2c7615..adf18bf 100644
--- a/tools/testing/nvdimm/config_check.c
+++ b/tools/testing/nvdimm/config_check.c
@@ -12,4 +12,6 @@
 	BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_BTT));
 	BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_BLK));
 	BUILD_BUG_ON(!IS_MODULE(CONFIG_ACPI_NFIT));
+	BUILD_BUG_ON(!IS_MODULE(CONFIG_DEV_DAX));
+	BUILD_BUG_ON(!IS_MODULE(CONFIG_DEV_DAX_PMEM));
 }
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index 0c1a7e6..c842095 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -239,13 +239,11 @@
 }
 EXPORT_SYMBOL(__wrap___devm_request_region);
 
-void __wrap___release_region(struct resource *parent, resource_size_t start,
-				resource_size_t n)
+static bool nfit_test_release_region(struct resource *parent,
+		resource_size_t start, resource_size_t n)
 {
-	struct nfit_test_resource *nfit_res;
-
 	if (parent == &iomem_resource) {
-		nfit_res = get_nfit_res(start);
+		struct nfit_test_resource *nfit_res = get_nfit_res(start);
 		if (nfit_res) {
 			struct resource *res = nfit_res->res + 1;
 
@@ -254,11 +252,26 @@
 						__func__, start, n, res);
 			else
 				memset(res, 0, sizeof(*res));
-			return;
+			return true;
 		}
 	}
-	__release_region(parent, start, n);
+	return false;
+}
+
+void __wrap___release_region(struct resource *parent, resource_size_t start,
+		resource_size_t n)
+{
+	if (!nfit_test_release_region(parent, start, n))
+		__release_region(parent, start, n);
 }
 EXPORT_SYMBOL(__wrap___release_region);
 
+void __wrap___devm_release_region(struct device *dev, struct resource *parent,
+		resource_size_t start, resource_size_t n)
+{
+	if (!nfit_test_release_region(parent, start, n))
+		__devm_release_region(dev, parent, start, n);
+}
+EXPORT_SYMBOL(__wrap___devm_release_region);
+
 MODULE_LICENSE("GPL v2");
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 3187322..c919866 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -330,12 +330,49 @@
 	return 0;
 }
 
+static int nfit_test_cmd_smart(struct nd_cmd_smart *smart, unsigned int buf_len)
+{
+	static const struct nd_smart_payload smart_data = {
+		.flags = ND_SMART_HEALTH_VALID | ND_SMART_TEMP_VALID
+			| ND_SMART_SPARES_VALID | ND_SMART_ALARM_VALID
+			| ND_SMART_USED_VALID | ND_SMART_SHUTDOWN_VALID,
+		.health = ND_SMART_NON_CRITICAL_HEALTH,
+		.temperature = 23 * 16,
+		.spares = 75,
+		.alarm_flags = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
+		.life_used = 5,
+		.shutdown_state = 0,
+		.vendor_size = 0,
+	};
+
+	if (buf_len < sizeof(*smart))
+		return -EINVAL;
+	memcpy(smart->data, &smart_data, sizeof(smart_data));
+	return 0;
+}
+
+static int nfit_test_cmd_smart_threshold(struct nd_cmd_smart_threshold *smart_t,
+		unsigned int buf_len)
+{
+	static const struct nd_smart_threshold_payload smart_t_data = {
+		.alarm_control = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
+		.temperature = 40 * 16,
+		.spares = 5,
+	};
+
+	if (buf_len < sizeof(*smart_t))
+		return -EINVAL;
+	memcpy(smart_t->data, &smart_t_data, sizeof(smart_t_data));
+	return 0;
+}
+
 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
 		struct nvdimm *nvdimm, unsigned int cmd, void *buf,
 		unsigned int buf_len, int *cmd_rc)
 {
 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
 	struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
+	unsigned int func = cmd;
 	int i, rc = 0, __cmd_rc;
 
 	if (!cmd_rc)
@@ -344,8 +381,23 @@
 
 	if (nvdimm) {
 		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+		unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
 
-		if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask))
+		if (!nfit_mem)
+			return -ENOTTY;
+
+		if (cmd == ND_CMD_CALL) {
+			struct nd_cmd_pkg *call_pkg = buf;
+
+			buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
+			buf = (void *) call_pkg->nd_payload;
+			func = call_pkg->nd_command;
+			if (call_pkg->nd_family != nfit_mem->family)
+				return -ENOTTY;
+		}
+
+		if (!test_bit(cmd, &cmd_mask)
+				|| !test_bit(func, &nfit_mem->dsm_mask))
 			return -ENOTTY;
 
 		/* lookup label space for the given dimm */
@@ -356,7 +408,7 @@
 		if (i >= ARRAY_SIZE(handle))
 			return -ENXIO;
 
-		switch (cmd) {
+		switch (func) {
 		case ND_CMD_GET_CONFIG_SIZE:
 			rc = nfit_test_cmd_get_config_size(buf, buf_len);
 			break;
@@ -368,16 +420,22 @@
 			rc = nfit_test_cmd_set_config_data(buf, buf_len,
 				t->label[i]);
 			break;
+		case ND_CMD_SMART:
+			rc = nfit_test_cmd_smart(buf, buf_len);
+			break;
+		case ND_CMD_SMART_THRESHOLD:
+			rc = nfit_test_cmd_smart_threshold(buf, buf_len);
+			break;
 		default:
 			return -ENOTTY;
 		}
 	} else {
 		struct ars_state *ars_state = &t->ars_state;
 
-		if (!nd_desc || !test_bit(cmd, &nd_desc->dsm_mask))
+		if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
 			return -ENOTTY;
 
-		switch (cmd) {
+		switch (func) {
 		case ND_CMD_ARS_CAP:
 			rc = nfit_test_cmd_ars_cap(buf, buf_len);
 			break;
@@ -1251,13 +1309,15 @@
 	post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA0_SIZE);
 
 	acpi_desc = &t->acpi_desc;
-	set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en);
-	set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
-	set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
-	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_dsm_force_en);
-	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_dsm_force_en);
-	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_dsm_force_en);
-	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_dsm_force_en);
+	set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
+	set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
+	set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
+	set_bit(ND_CMD_SMART, &acpi_desc->dimm_cmd_force_en);
+	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
+	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
+	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
+	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
+	set_bit(ND_CMD_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
 }
 
 static void nfit_test1_setup(struct nfit_test *t)
@@ -1315,10 +1375,10 @@
 	post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA2_SIZE);
 
 	acpi_desc = &t->acpi_desc;
-	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_dsm_force_en);
-	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_dsm_force_en);
-	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_dsm_force_en);
-	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_dsm_force_en);
+	set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
+	set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
+	set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
+	set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
 }
 
 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
index da48812..4c6a0bf 100755
--- a/tools/testing/selftests/ftrace/ftracetest
+++ b/tools/testing/selftests/ftrace/ftracetest
@@ -88,7 +88,12 @@
 
 # Parameters
 DEBUGFS_DIR=`grep debugfs /proc/mounts | cut -f2 -d' ' | head -1`
-TRACING_DIR=$DEBUGFS_DIR/tracing
+if [ -z "$DEBUGFS_DIR" ]; then
+    TRACING_DIR=`grep tracefs /proc/mounts | cut -f2 -d' ' | head -1`
+else
+    TRACING_DIR=$DEBUGFS_DIR/tracing
+fi
+
 TOP_DIR=`absdir $0`
 TEST_DIR=$TOP_DIR/test.d
 TEST_CASES=`find_testcases $TEST_DIR`
@@ -102,7 +107,7 @@
 [ $DEBUG -ne 0 ] && set -x
 
 # Verify parameters
-if [ -z "$DEBUGFS_DIR" -o ! -d "$TRACING_DIR" ]; then
+if [ -z "$TRACING_DIR" -o ! -d "$TRACING_DIR" ]; then
   errexit "No ftrace directory found"
 fi
 
diff --git a/tools/testing/selftests/ftrace/test.d/event/event-pid.tc b/tools/testing/selftests/ftrace/test.d/event/event-pid.tc
new file mode 100644
index 0000000..d4ab27b
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/event/event-pid.tc
@@ -0,0 +1,72 @@
+#!/bin/sh
+# description: event tracing - restricts events based on pid
+
+do_reset() {
+    echo > set_event
+    echo > set_event_pid
+    echo 0 > options/event-fork
+    clear_trace
+}
+
+fail() { #msg
+    do_reset
+    echo $1
+    exit $FAIL
+}
+
+yield() {
+    ping localhost -c 1 || sleep .001 || usleep 1 || sleep 1
+}
+
+if [ ! -f set_event -o ! -d events/sched ]; then
+    echo "event tracing is not supported"
+    exit_unsupported
+fi
+
+if [ ! -f set_event_pid ]; then
+    echo "event pid filtering is not supported"
+    exit_unsupported
+fi
+
+reset_tracer
+do_reset
+
+echo 1 > events/sched/sched_switch/enable
+
+yield
+
+count=`cat trace | grep sched_switch | wc -l`
+if [ $count -eq 0 ]; then
+    fail "sched_switch events are not recorded"
+fi
+
+do_reset
+
+read mypid rest < /proc/self/stat
+
+echo $mypid > set_event_pid
+echo 'sched:sched_switch' > set_event
+
+yield
+
+count=`cat trace | grep sched_switch | grep -v "pid=$mypid" | wc -l`
+if [ $count -ne 0 ]; then
+    fail "sched_switch events from other task are recorded"
+fi
+
+do_reset
+
+echo $mypid > set_event_pid
+echo 1 > options/event-fork
+echo 1 > events/sched/sched_switch/enable
+
+yield
+
+count=`cat trace | grep sched_switch | grep -v "pid=$mypid" | wc -l`
+if [ $count -eq 0 ]; then
+    fail "sched_switch events from other task are not recorded"
+fi
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
index 5f2abd0..4c5a061 100644
--- a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
+++ b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
@@ -92,28 +92,23 @@
 }
 
 instance_slam &
-x=`jobs -l`
-p1=`echo $x | cut -d' ' -f2`
+p1=$!
 echo $p1
 
 instance_slam &
-x=`jobs -l | tail -1`
-p2=`echo $x | cut -d' ' -f2`
+p2=$!
 echo $p2
 
 instance_slam &
-x=`jobs -l | tail -1`
-p3=`echo $x | cut -d' ' -f2`
+p3=$!
 echo $p3
 
 instance_slam &
-x=`jobs -l | tail -1`
-p4=`echo $x | cut -d' ' -f2`
+p4=$!
 echo $p4
 
 instance_slam &
-x=`jobs -l | tail -1`
-p5=`echo $x | cut -d' ' -f2`
+p5=$!
 echo $p5
 
 ls -lR >/dev/null
diff --git a/tools/testing/selftests/intel_pstate/run.sh b/tools/testing/selftests/intel_pstate/run.sh
index bdaf37e..7868c10 100755
--- a/tools/testing/selftests/intel_pstate/run.sh
+++ b/tools/testing/selftests/intel_pstate/run.sh
@@ -32,7 +32,7 @@
 max_cpus=$(($(nproc)-1))
 
 # compile programs
-gcc -o aperf aperf.c -lm
+gcc aperf.c -Wall -D_GNU_SOURCE -o aperf  -lm
 [ $? -ne 0 ] && echo "Problem compiling aperf.c." && exit 1
 gcc -o msr msr.c -lm
 [ $? -ne 0 ] && echo "Problem compiling msr.c." && exit 1
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
index 96ba386..4a82174 100644
--- a/tools/testing/selftests/net/reuseport_bpf.c
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -111,9 +111,9 @@
 	memset(&attr, 0, sizeof(attr));
 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
 	attr.insn_cnt = ARRAY_SIZE(prog);
-	attr.insns = (uint64_t)prog;
-	attr.license = (uint64_t)bpf_license;
-	attr.log_buf = (uint64_t)bpf_log_buf;
+	attr.insns = (unsigned long) &prog;
+	attr.license = (unsigned long) &bpf_license;
+	attr.log_buf = (unsigned long) &bpf_log_buf;
 	attr.log_size = sizeof(bpf_log_buf);
 	attr.log_level = 1;
 	attr.kern_version = 0;
@@ -351,8 +351,8 @@
 	memset(&eprog, 0, sizeof(eprog));
 	eprog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
 	eprog.insn_cnt = ARRAY_SIZE(ecode);
-	eprog.insns = (uint64_t)ecode;
-	eprog.license = (uint64_t)bpf_license;
+	eprog.insns = (unsigned long) &ecode;
+	eprog.license = (unsigned long) &bpf_license;
 	eprog.kern_version = 0;
 
 	memset(&cprog, 0, sizeof(cprog));
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 7947e56..2e58549 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1234,6 +1234,10 @@
 # define ARCH_REGS	struct user_pt_regs
 # define SYSCALL_NUM	regs[8]
 # define SYSCALL_RET	regs[0]
+#elif defined(__hppa__)
+# define ARCH_REGS	struct user_regs_struct
+# define SYSCALL_NUM	gr[20]
+# define SYSCALL_RET	gr[28]
 #elif defined(__powerpc__)
 # define ARCH_REGS	struct pt_regs
 # define SYSCALL_NUM	gpr[0]
@@ -1303,7 +1307,7 @@
 	EXPECT_EQ(0, ret);
 
 #if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \
-    defined(__s390__)
+    defined(__s390__) || defined(__hppa__)
 	{
 		regs.SYSCALL_NUM = syscall;
 	}
@@ -1505,6 +1509,8 @@
 #  define __NR_seccomp 383
 # elif defined(__aarch64__)
 #  define __NR_seccomp 277
+# elif defined(__hppa__)
+#  define __NR_seccomp 338
 # elif defined(__powerpc__)
 #  define __NR_seccomp 358
 # elif defined(__s390__)
diff --git a/tools/testing/selftests/vm/thuge-gen.c b/tools/testing/selftests/vm/thuge-gen.c
index c879572..0bc737a 100644
--- a/tools/testing/selftests/vm/thuge-gen.c
+++ b/tools/testing/selftests/vm/thuge-gen.c
@@ -30,7 +30,9 @@
 #define MAP_HUGE_1GB    (30 << MAP_HUGE_SHIFT)
 #define MAP_HUGE_SHIFT  26
 #define MAP_HUGE_MASK   0x3f
+#if !defined(MAP_HUGETLB)
 #define MAP_HUGETLB	0x40000
+#endif
 
 #define SHM_HUGETLB     04000   /* segment will use huge TLB pages */
 #define SHM_HUGE_SHIFT  26
diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile
index feaa64a..6ba7455 100644
--- a/tools/virtio/ringtest/Makefile
+++ b/tools/virtio/ringtest/Makefile
@@ -1,6 +1,6 @@
 all:
 
-all: ring virtio_ring_0_9 virtio_ring_poll
+all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder
 
 CFLAGS += -Wall
 CFLAGS += -pthread -O2 -ggdb
@@ -10,13 +10,16 @@
 ring.o: ring.c main.h
 virtio_ring_0_9.o: virtio_ring_0_9.c main.h
 virtio_ring_poll.o: virtio_ring_poll.c virtio_ring_0_9.c main.h
+virtio_ring_inorder.o: virtio_ring_inorder.c virtio_ring_0_9.c main.h
 ring: ring.o main.o
 virtio_ring_0_9: virtio_ring_0_9.o main.o
 virtio_ring_poll: virtio_ring_poll.o main.o
+virtio_ring_inorder: virtio_ring_inorder.o main.o
 clean:
 	-rm main.o
 	-rm ring.o ring
 	-rm virtio_ring_0_9.o virtio_ring_0_9
 	-rm virtio_ring_poll.o virtio_ring_poll
+	-rm virtio_ring_inorder.o virtio_ring_inorder
 
 .PHONY: all clean
diff --git a/tools/virtio/ringtest/main.c b/tools/virtio/ringtest/main.c
index 3a5ff43..147abb4 100644
--- a/tools/virtio/ringtest/main.c
+++ b/tools/virtio/ringtest/main.c
@@ -115,7 +115,7 @@
 		do {
 			if (started < bufs &&
 			    started - completed < max_outstanding) {
-				r = add_inbuf(0, NULL, "Hello, world!");
+				r = add_inbuf(0, "Buffer\n", "Hello, world!");
 				if (__builtin_expect(r == 0, true)) {
 					++started;
 					if (!--tokick) {
diff --git a/tools/virtio/ringtest/virtio_ring_0_9.c b/tools/virtio/ringtest/virtio_ring_0_9.c
index 47c9a1a..7618662 100644
--- a/tools/virtio/ringtest/virtio_ring_0_9.c
+++ b/tools/virtio/ringtest/virtio_ring_0_9.c
@@ -26,6 +26,14 @@
  * high bits of ring id ^ 0x8000).
  */
 /* #ifdef RING_POLL */
+/* enabling the below activates experimental in-order code
+ * (which skips ring updates and reads and writes len in descriptor).
+ */
+/* #ifdef INORDER */
+
+#if defined(RING_POLL) && defined(INORDER)
+#error "RING_POLL and INORDER are mutually exclusive"
+#endif
 
 /* how much padding is needed to avoid false cache sharing */
 #define HOST_GUEST_PADDING 0x80
@@ -35,7 +43,11 @@
 	unsigned short last_used_idx;
 	unsigned short num_free;
 	unsigned short kicked_avail_idx;
+#ifndef INORDER
 	unsigned short free_head;
+#else
+	unsigned short reserved_free_head;
+#endif
 	unsigned char reserved[HOST_GUEST_PADDING - 10];
 } guest;
 
@@ -66,8 +78,10 @@
 	guest.avail_idx = 0;
 	guest.kicked_avail_idx = -1;
 	guest.last_used_idx = 0;
+#ifndef INORDER
 	/* Put everything in free lists. */
 	guest.free_head = 0;
+#endif
 	for (i = 0; i < ring_size - 1; i++)
 		ring.desc[i].next = i + 1;
 	host.used_idx = 0;
@@ -84,13 +98,20 @@
 /* guest side */
 int add_inbuf(unsigned len, void *buf, void *datap)
 {
-	unsigned head, avail;
+	unsigned head;
+#ifndef INORDER
+	unsigned avail;
+#endif
 	struct vring_desc *desc;
 
 	if (!guest.num_free)
 		return -1;
 
+#ifdef INORDER
+	head = (ring_size - 1) & (guest.avail_idx++);
+#else
 	head = guest.free_head;
+#endif
 	guest.num_free--;
 
 	desc = ring.desc;
@@ -102,7 +123,9 @@
 	 * descriptors.
 	 */
 	desc[head].flags &= ~VRING_DESC_F_NEXT;
+#ifndef INORDER
 	guest.free_head = desc[head].next;
+#endif
 
 	data[head].data = datap;
 
@@ -113,8 +136,12 @@
 	ring.avail->ring[avail & (ring_size - 1)] =
 		(head | (avail & ~(ring_size - 1))) ^ 0x8000;
 #else
+#ifndef INORDER
+	/* Barrier A (for pairing) */
+	smp_release();
 	avail = (ring_size - 1) & (guest.avail_idx++);
 	ring.avail->ring[avail] = head;
+#endif
 	/* Barrier A (for pairing) */
 	smp_release();
 #endif
@@ -141,15 +168,27 @@
 		return NULL;
 	/* Barrier B (for pairing) */
 	smp_acquire();
+#ifdef INORDER
+	head = (ring_size - 1) & guest.last_used_idx;
+	index = head;
+#else
 	head = (ring_size - 1) & guest.last_used_idx;
 	index = ring.used->ring[head].id;
 #endif
+
+#endif
+#ifdef INORDER
+	*lenp = ring.desc[index].len;
+#else
 	*lenp = ring.used->ring[head].len;
+#endif
 	datap = data[index].data;
 	*bufp = (void*)(unsigned long)ring.desc[index].addr;
 	data[index].data = NULL;
+#ifndef INORDER
 	ring.desc[index].next = guest.free_head;
 	guest.free_head = index;
+#endif
 	guest.num_free++;
 	guest.last_used_idx++;
 	return datap;
@@ -283,16 +322,24 @@
 	smp_acquire();
 
 	used_idx &= ring_size - 1;
+#ifdef INORDER
+	head = used_idx;
+#else
 	head = ring.avail->ring[used_idx];
+#endif
 	desc = &ring.desc[head];
 #endif
 
 	*lenp = desc->len;
 	*bufp = (void *)(unsigned long)desc->addr;
 
+#ifdef INORDER
+	desc->len = desc->len - 1;
+#else
 	/* now update used ring */
 	ring.used->ring[used_idx].id = head;
 	ring.used->ring[used_idx].len = desc->len - 1;
+#endif
 	/* Barrier B (for pairing) */
 	smp_release();
 	host.used_idx++;
diff --git a/tools/virtio/ringtest/virtio_ring_inorder.c b/tools/virtio/ringtest/virtio_ring_inorder.c
new file mode 100644
index 0000000..2438ca5
--- /dev/null
+++ b/tools/virtio/ringtest/virtio_ring_inorder.c
@@ -0,0 +1,2 @@
+#define INORDER 1
+#include "virtio_ring_0_9.c"
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 409db33..e2d5b6f 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -20,6 +20,7 @@
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 
 #include <clocksource/arm_arch_timer.h>
 #include <asm/arch_timer.h>
@@ -174,10 +175,10 @@
 
 	timer->active_cleared_last = false;
 	timer->irq.level = new_level;
-	trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->map->virt_irq,
+	trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->irq.irq,
 				   timer->irq.level);
 	ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
-					 timer->map,
+					 timer->irq.irq,
 					 timer->irq.level);
 	WARN_ON(ret);
 }
@@ -196,7 +197,7 @@
 	 * because the guest would never see the interrupt.  Instead wait
 	 * until we call this function from kvm_timer_flush_hwstate.
 	 */
-	if (!vgic_initialized(vcpu->kvm))
+	if (!vgic_initialized(vcpu->kvm) || !timer->enabled)
 		return -ENODEV;
 
 	if (kvm_timer_should_fire(vcpu) != timer->irq.level)
@@ -274,10 +275,8 @@
 	* to ensure that hardware interrupts from the timer triggers a guest
 	* exit.
 	*/
-	if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map))
-		phys_active = true;
-	else
-		phys_active = false;
+	phys_active = timer->irq.level ||
+			kvm_vgic_map_is_active(vcpu, timer->irq.irq);
 
 	/*
 	 * We want to avoid hitting the (re)distributor as much as
@@ -302,7 +301,7 @@
 	if (timer->active_cleared_last && !phys_active)
 		return;
 
-	ret = irq_set_irqchip_state(timer->map->irq,
+	ret = irq_set_irqchip_state(host_vtimer_irq,
 				    IRQCHIP_STATE_ACTIVE,
 				    phys_active);
 	WARN_ON(ret);
@@ -334,7 +333,6 @@
 			 const struct kvm_irq_level *irq)
 {
 	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-	struct irq_phys_map *map;
 
 	/*
 	 * The vcpu timer irq number cannot be determined in
@@ -353,15 +351,6 @@
 	timer->cntv_ctl = 0;
 	kvm_timer_update_state(vcpu);
 
-	/*
-	 * Tell the VGIC that the virtual interrupt is tied to a
-	 * physical interrupt. We do that once per VCPU.
-	 */
-	map = kvm_vgic_map_phys_irq(vcpu, irq->irq, host_vtimer_irq);
-	if (WARN_ON(IS_ERR(map)))
-		return PTR_ERR(map);
-
-	timer->map = map;
 	return 0;
 }
 
@@ -487,14 +476,43 @@
 	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
 
 	timer_disarm(timer);
-	if (timer->map)
-		kvm_vgic_unmap_phys_irq(vcpu, timer->map);
+	kvm_vgic_unmap_phys_irq(vcpu, timer->irq.irq);
 }
 
-void kvm_timer_enable(struct kvm *kvm)
+int kvm_timer_enable(struct kvm_vcpu *vcpu)
 {
-	if (kvm->arch.timer.enabled)
-		return;
+	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+	struct irq_desc *desc;
+	struct irq_data *data;
+	int phys_irq;
+	int ret;
+
+	if (timer->enabled)
+		return 0;
+
+	/*
+	 * Find the physical IRQ number corresponding to the host_vtimer_irq
+	 */
+	desc = irq_to_desc(host_vtimer_irq);
+	if (!desc) {
+		kvm_err("%s: no interrupt descriptor\n", __func__);
+		return -EINVAL;
+	}
+
+	data = irq_desc_get_irq_data(desc);
+	while (data->parent_data)
+		data = data->parent_data;
+
+	phys_irq = data->hwirq;
+
+	/*
+	 * Tell the VGIC that the virtual interrupt is tied to a
+	 * physical interrupt. We do that once per VCPU.
+	 */
+	ret = kvm_vgic_map_phys_irq(vcpu, timer->irq.irq, phys_irq);
+	if (ret)
+		return ret;
+
 
 	/*
 	 * There is a potential race here between VCPUs starting for the first
@@ -505,7 +523,9 @@
 	 * the arch timers are enabled.
 	 */
 	if (timecounter && wqueue)
-		kvm->arch.timer.enabled = 1;
+		timer->enabled = 1;
+
+	return 0;
 }
 
 void kvm_timer_init(struct kvm *kvm)
diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c
index ea00d69..798866a 100644
--- a/virt/kvm/arm/hyp/timer-sr.c
+++ b/virt/kvm/arm/hyp/timer-sr.c
@@ -24,11 +24,10 @@
 /* vcpu is already in the HYP VA space */
 void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
 {
-	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
 	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
 	u64 val;
 
-	if (kvm->arch.timer.enabled) {
+	if (timer->enabled) {
 		timer->cntv_ctl = read_sysreg_el0(cntv_ctl);
 		timer->cntv_cval = read_sysreg_el0(cntv_cval);
 	}
@@ -60,7 +59,7 @@
 	val |= CNTHCTL_EL1PCTEN;
 	write_sysreg(val, cnthctl_el2);
 
-	if (kvm->arch.timer.enabled) {
+	if (timer->enabled) {
 		write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
 		write_sysreg_el0(timer->cntv_cval, cntv_cval);
 		isb();
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index 674bdf8..3a3a699 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -21,11 +21,18 @@
 
 #include <asm/kvm_hyp.h>
 
+#ifdef CONFIG_KVM_NEW_VGIC
+extern struct vgic_global kvm_vgic_global_state;
+#define vgic_v2_params kvm_vgic_global_state
+#else
+extern struct vgic_params vgic_v2_params;
+#endif
+
 static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
 					    void __iomem *base)
 {
 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
-	int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+	int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
 	u32 eisr0, eisr1;
 	int i;
 	bool expect_mi;
@@ -67,7 +74,7 @@
 static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
 {
 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
-	int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+	int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
 	u32 elrsr0, elrsr1;
 
 	elrsr0 = readl_relaxed(base + GICH_ELRSR0);
@@ -86,19 +93,18 @@
 static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
 {
 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
-	int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+	int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
 	int i;
 
 	for (i = 0; i < nr_lr; i++) {
 		if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
 			continue;
 
-		if (cpu_if->vgic_elrsr & (1UL << i)) {
+		if (cpu_if->vgic_elrsr & (1UL << i))
 			cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
-			continue;
-		}
+		else
+			cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
 
-		cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
 		writel_relaxed(0, base + GICH_LR0 + (i * 4));
 	}
 }
@@ -141,13 +147,13 @@
 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
 	struct vgic_dist *vgic = &kvm->arch.vgic;
 	void __iomem *base = kern_hyp_va(vgic->vctrl_base);
-	int i, nr_lr;
+	int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
+	int i;
 	u64 live_lrs = 0;
 
 	if (!base)
 		return;
 
-	nr_lr = vcpu->arch.vgic_cpu.nr_lr;
 
 	for (i = 0; i < nr_lr; i++)
 		if (cpu_if->vgic_lr[i] & GICH_LR_STATE)
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 575c7aa..a027569 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -436,7 +436,14 @@
 	return 0;
 }
 
-static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi)
+#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS)
+
+/*
+ * For one VM the interrupt type must be same for each vcpu.
+ * As a PPI, the interrupt number is the same for all vcpus,
+ * while as an SPI it must be a separate number per vcpu.
+ */
+static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
 {
 	int i;
 	struct kvm_vcpu *vcpu;
@@ -445,7 +452,7 @@
 		if (!kvm_arm_pmu_irq_initialized(vcpu))
 			continue;
 
-		if (is_ppi) {
+		if (irq_is_ppi(irq)) {
 			if (vcpu->arch.pmu.irq_num != irq)
 				return false;
 		} else {
@@ -457,7 +464,6 @@
 	return true;
 }
 
-
 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 {
 	switch (attr->attr) {
@@ -471,14 +477,11 @@
 		if (get_user(irq, uaddr))
 			return -EFAULT;
 
-		/*
-		 * The PMU overflow interrupt could be a PPI or SPI, but for one
-		 * VM the interrupt type must be same for each vcpu. As a PPI,
-		 * the interrupt number is the same for all vcpus, while as an
-		 * SPI it must be a separate number per vcpu.
-		 */
-		if (irq < VGIC_NR_SGIS || irq >= vcpu->kvm->arch.vgic.nr_irqs ||
-		    !irq_is_valid(vcpu->kvm, irq, irq < VGIC_NR_PRIVATE_IRQS))
+		/* The PMU overflow interrupt can be a PPI or a valid SPI. */
+		if (!(irq_is_ppi(irq) || vgic_valid_spi(vcpu->kvm, irq)))
+			return -EINVAL;
+
+		if (!pmu_irq_is_valid(vcpu->kvm, irq))
 			return -EINVAL;
 
 		if (kvm_arm_pmu_irq_initialized(vcpu))
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index 7e826c9..334cd7a 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -171,7 +171,7 @@
 	.enable			= vgic_v2_enable,
 };
 
-static struct vgic_params vgic_v2_params;
+struct vgic_params __section(.hyp.text) vgic_v2_params;
 
 static void vgic_cpu_init_lrs(void *params)
 {
@@ -201,6 +201,8 @@
 	const struct resource *vctrl_res = &gic_kvm_info->vctrl;
 	const struct resource *vcpu_res = &gic_kvm_info->vcpu;
 
+	memset(vgic, 0, sizeof(*vgic));
+
 	if (!gic_kvm_info->maint_irq) {
 		kvm_err("error getting vgic maintenance irq\n");
 		ret = -ENXIO;
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index c02a1b1..75b02fa 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -29,12 +29,6 @@
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmu.h>
 
-/* These are for GICv2 emulation only */
-#define GICH_LR_VIRTUALID		(0x3ffUL << 0)
-#define GICH_LR_PHYSID_CPUID_SHIFT	(10)
-#define GICH_LR_PHYSID_CPUID		(7UL << GICH_LR_PHYSID_CPUID_SHIFT)
-#define ICH_LR_VIRTUALID_MASK		(BIT_ULL(32) - 1)
-
 static u32 ich_vtr_el2;
 
 static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
@@ -43,7 +37,7 @@
 	u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr];
 
 	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
-		lr_desc.irq = val & ICH_LR_VIRTUALID_MASK;
+		lr_desc.irq = val & ICH_LR_VIRTUAL_ID_MASK;
 	else
 		lr_desc.irq = val & GICH_LR_VIRTUALID;
 
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 60668a7..c3bfbb9 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -690,12 +690,11 @@
  */
 void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
 {
-	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 	u64 elrsr = vgic_get_elrsr(vcpu);
 	unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
 	int i;
 
-	for_each_clear_bit(i, elrsr_ptr, vgic_cpu->nr_lr) {
+	for_each_clear_bit(i, elrsr_ptr, vgic->nr_lr) {
 		struct vgic_lr lr = vgic_get_lr(vcpu, i);
 
 		/*
@@ -820,7 +819,6 @@
 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
 	struct vgic_io_device *iodev = container_of(this,
 						    struct vgic_io_device, dev);
-	struct kvm_run *run = vcpu->run;
 	const struct vgic_io_range *range;
 	struct kvm_exit_mmio mmio;
 	bool updated_state;
@@ -849,12 +847,6 @@
 		updated_state = false;
 	}
 	spin_unlock(&dist->lock);
-	run->mmio.is_write	= is_write;
-	run->mmio.len		= len;
-	run->mmio.phys_addr	= addr;
-	memcpy(run->mmio.data, val, len);
-
-	kvm_handle_mmio_return(vcpu, run);
 
 	if (updated_state)
 		vgic_kick_vcpus(vcpu->kvm);
@@ -1102,18 +1094,18 @@
 	return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
 }
 
-bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
 {
 	int i;
 
-	for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) {
+	for (i = 0; i < vgic->nr_lr; i++) {
 		struct vgic_lr vlr = vgic_get_lr(vcpu, i);
 
-		if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE)
+		if (vlr.irq == virt_irq && vlr.state & LR_STATE_ACTIVE)
 			return true;
 	}
 
-	return vgic_irq_is_active(vcpu, map->virt_irq);
+	return vgic_irq_is_active(vcpu, virt_irq);
 }
 
 /*
@@ -1521,7 +1513,6 @@
 }
 
 static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
-				   struct irq_phys_map *map,
 				   unsigned int irq_num, bool level)
 {
 	struct vgic_dist *dist = &kvm->arch.vgic;
@@ -1660,14 +1651,14 @@
 	if (map)
 		return -EINVAL;
 
-	return vgic_update_irq_pending(kvm, cpuid, NULL, irq_num, level);
+	return vgic_update_irq_pending(kvm, cpuid, irq_num, level);
 }
 
 /**
  * kvm_vgic_inject_mapped_irq - Inject a physically mapped IRQ to the vgic
  * @kvm:     The VM structure pointer
  * @cpuid:   The CPU for PPIs
- * @map:     Pointer to a irq_phys_map structure describing the mapping
+ * @virt_irq: The virtual IRQ to be injected
  * @level:   Edge-triggered:  true:  to trigger the interrupt
  *			      false: to ignore the call
  *	     Level-sensitive  true:  raise the input signal
@@ -1678,7 +1669,7 @@
  * being HIGH and 0 being LOW and all devices being active-HIGH.
  */
 int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
-			       struct irq_phys_map *map, bool level)
+			       unsigned int virt_irq, bool level)
 {
 	int ret;
 
@@ -1686,7 +1677,7 @@
 	if (ret)
 		return ret;
 
-	return vgic_update_irq_pending(kvm, cpuid, map, map->virt_irq, level);
+	return vgic_update_irq_pending(kvm, cpuid, virt_irq, level);
 }
 
 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
@@ -1712,43 +1703,28 @@
 /**
  * kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ
  * @vcpu: The VCPU pointer
- * @virt_irq: The virtual irq number
- * @irq: The Linux IRQ number
+ * @virt_irq: The virtual IRQ number for the guest
+ * @phys_irq: The hardware IRQ number of the host
  *
  * Establish a mapping between a guest visible irq (@virt_irq) and a
- * Linux irq (@irq). On injection, @virt_irq will be associated with
- * the physical interrupt represented by @irq. This mapping can be
+ * hardware irq (@phys_irq). On injection, @virt_irq will be associated with
+ * the physical interrupt represented by @phys_irq. This mapping can be
  * established multiple times as long as the parameters are the same.
  *
- * Returns a valid pointer on success, and an error pointer otherwise
+ * Returns 0 on success or an error value otherwise.
  */
-struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
-					   int virt_irq, int irq)
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int virt_irq, int phys_irq)
 {
 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
 	struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
 	struct irq_phys_map *map;
 	struct irq_phys_map_entry *entry;
-	struct irq_desc *desc;
-	struct irq_data *data;
-	int phys_irq;
-
-	desc = irq_to_desc(irq);
-	if (!desc) {
-		kvm_err("%s: no interrupt descriptor\n", __func__);
-		return ERR_PTR(-EINVAL);
-	}
-
-	data = irq_desc_get_irq_data(desc);
-	while (data->parent_data)
-		data = data->parent_data;
-
-	phys_irq = data->hwirq;
+	int ret = 0;
 
 	/* Create a new mapping */
 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 	if (!entry)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
 	spin_lock(&dist->irq_phys_map_lock);
 
@@ -1756,9 +1732,8 @@
 	map = vgic_irq_map_search(vcpu, virt_irq);
 	if (map) {
 		/* Make sure this mapping matches */
-		if (map->phys_irq != phys_irq	||
-		    map->irq      != irq)
-			map = ERR_PTR(-EINVAL);
+		if (map->phys_irq != phys_irq)
+			ret = -EINVAL;
 
 		/* Found an existing, valid mapping */
 		goto out;
@@ -1767,7 +1742,6 @@
 	map           = &entry->map;
 	map->virt_irq = virt_irq;
 	map->phys_irq = phys_irq;
-	map->irq      = irq;
 
 	list_add_tail_rcu(&entry->entry, root);
 
@@ -1775,9 +1749,9 @@
 	spin_unlock(&dist->irq_phys_map_lock);
 	/* If we've found a hit in the existing list, free the useless
 	 * entry */
-	if (IS_ERR(map) || map != &entry->map)
+	if (ret || map != &entry->map)
 		kfree(entry);
-	return map;
+	return ret;
 }
 
 static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
@@ -1813,25 +1787,22 @@
 /**
  * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
  * @vcpu: The VCPU pointer
- * @map: The pointer to a mapping obtained through kvm_vgic_map_phys_irq
+ * @virt_irq: The virtual IRQ number to be unmapped
  *
  * Remove an existing mapping between virtual and physical interrupts.
  */
-int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
 {
 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
 	struct irq_phys_map_entry *entry;
 	struct list_head *root;
 
-	if (!map)
-		return -EINVAL;
-
-	root = vgic_get_irq_phys_map_list(vcpu, map->virt_irq);
+	root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
 
 	spin_lock(&dist->irq_phys_map_lock);
 
 	list_for_each_entry(entry, root, entry) {
-		if (&entry->map == map) {
+		if (entry->map.virt_irq == virt_irq) {
 			list_del_rcu(&entry->entry);
 			call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
 			break;
@@ -1887,13 +1858,6 @@
 		return -ENOMEM;
 	}
 
-	/*
-	 * Store the number of LRs per vcpu, so we don't have to go
-	 * all the way to the distributor structure to find out. Only
-	 * assembly code should use this one.
-	 */
-	vgic_cpu->nr_lr = vgic->nr_lr;
-
 	return 0;
 }
 
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
new file mode 100644
index 0000000..a1442f7
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <linux/kvm_host.h>
+#include <kvm/arm_vgic.h>
+#include <asm/kvm_mmu.h>
+#include "vgic.h"
+
+/*
+ * Initialization rules: there are multiple stages to the vgic
+ * initialization, both for the distributor and the CPU interfaces.
+ *
+ * Distributor:
+ *
+ * - kvm_vgic_early_init(): initialization of static data that doesn't
+ *   depend on any sizing information or emulation type. No allocation
+ *   is allowed there.
+ *
+ * - vgic_init(): allocation and initialization of the generic data
+ *   structures that depend on sizing information (number of CPUs,
+ *   number of interrupts). Also initializes the vcpu specific data
+ *   structures. Can be executed lazily for GICv2.
+ *
+ * CPU Interface:
+ *
+ * - kvm_vgic_cpu_early_init(): initialization of static data that
+ *   doesn't depend on any sizing information or emulation type. No
+ *   allocation is allowed there.
+ */
+
+/* EARLY INIT */
+
+/*
+ * Those 2 functions should not be needed anymore but they
+ * still are called from arm.c
+ */
+void kvm_vgic_early_init(struct kvm *kvm)
+{
+}
+
+void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu)
+{
+}
+
+/* CREATION */
+
+/**
+ * kvm_vgic_create: triggered by the instantiation of the VGIC device by
+ * user space, either through the legacy KVM_CREATE_IRQCHIP ioctl (v2 only)
+ * or through the generic KVM_CREATE_DEVICE API ioctl.
+ * irqchip_in_kernel() tells you if this function succeeded or not.
+ * @kvm: kvm struct pointer
+ * @type: KVM_DEV_TYPE_ARM_VGIC_V[23]
+ */
+int kvm_vgic_create(struct kvm *kvm, u32 type)
+{
+	int i, vcpu_lock_idx = -1, ret;
+	struct kvm_vcpu *vcpu;
+
+	mutex_lock(&kvm->lock);
+
+	if (irqchip_in_kernel(kvm)) {
+		ret = -EEXIST;
+		goto out;
+	}
+
+	/*
+	 * This function is also called by the KVM_CREATE_IRQCHIP handler,
+	 * which had no chance yet to check the availability of the GICv2
+	 * emulation. So check this here again. KVM_CREATE_DEVICE does
+	 * the proper checks already.
+	 */
+	if (type == KVM_DEV_TYPE_ARM_VGIC_V2 &&
+		!kvm_vgic_global_state.can_emulate_gicv2) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	/*
+	 * Any time a vcpu is run, vcpu_load is called which tries to grab the
+	 * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
+	 * that no other VCPUs are run while we create the vgic.
+	 */
+	ret = -EBUSY;
+	kvm_for_each_vcpu(i, vcpu, kvm) {
+		if (!mutex_trylock(&vcpu->mutex))
+			goto out_unlock;
+		vcpu_lock_idx = i;
+	}
+
+	kvm_for_each_vcpu(i, vcpu, kvm) {
+		if (vcpu->arch.has_run_once)
+			goto out_unlock;
+	}
+	ret = 0;
+
+	if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
+		kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
+	else
+		kvm->arch.max_vcpus = VGIC_V3_MAX_CPUS;
+
+	if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus) {
+		ret = -E2BIG;
+		goto out_unlock;
+	}
+
+	kvm->arch.vgic.in_kernel = true;
+	kvm->arch.vgic.vgic_model = type;
+
+	/*
+	 * kvm_vgic_global_state.vctrl_base is set on vgic probe (kvm_arch_init)
+	 * it is stored in distributor struct for asm save/restore purpose
+	 */
+	kvm->arch.vgic.vctrl_base = kvm_vgic_global_state.vctrl_base;
+
+	kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
+	kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
+	kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF;
+
+out_unlock:
+	for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+		vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
+		mutex_unlock(&vcpu->mutex);
+	}
+
+out:
+	mutex_unlock(&kvm->lock);
+	return ret;
+}
+
+/* INIT/DESTROY */
+
+/**
+ * kvm_vgic_dist_init: initialize the dist data structures
+ * @kvm: kvm struct pointer
+ * @nr_spis: number of spis, frozen by caller
+ */
+static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
+{
+	struct vgic_dist *dist = &kvm->arch.vgic;
+	struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0);
+	int i;
+
+	dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL);
+	if (!dist->spis)
+		return  -ENOMEM;
+
+	/*
+	 * In the following code we do not take the irq struct lock since
+	 * no other action on irq structs can happen while the VGIC is
+	 * not initialized yet:
+	 * If someone wants to inject an interrupt or does a MMIO access, we
+	 * require prior initialization in case of a virtual GICv3 or trigger
+	 * initialization when using a virtual GICv2.
+	 */
+	for (i = 0; i < nr_spis; i++) {
+		struct vgic_irq *irq = &dist->spis[i];
+
+		irq->intid = i + VGIC_NR_PRIVATE_IRQS;
+		INIT_LIST_HEAD(&irq->ap_list);
+		spin_lock_init(&irq->irq_lock);
+		irq->vcpu = NULL;
+		irq->target_vcpu = vcpu0;
+		if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
+			irq->targets = 0;
+		else
+			irq->mpidr = 0;
+	}
+	return 0;
+}
+
+/**
+ * kvm_vgic_vcpu_init: initialize the vcpu data structures and
+ * enable the VCPU interface
+ * @vcpu: the VCPU which's VGIC should be initialized
+ */
+static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
+{
+	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+	int i;
+
+	INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
+	spin_lock_init(&vgic_cpu->ap_list_lock);
+
+	/*
+	 * Enable and configure all SGIs to be edge-triggered and
+	 * configure all PPIs as level-triggered.
+	 */
+	for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
+		struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
+
+		INIT_LIST_HEAD(&irq->ap_list);
+		spin_lock_init(&irq->irq_lock);
+		irq->intid = i;
+		irq->vcpu = NULL;
+		irq->target_vcpu = vcpu;
+		irq->targets = 1U << vcpu->vcpu_id;
+		if (vgic_irq_is_sgi(i)) {
+			/* SGIs */
+			irq->enabled = 1;
+			irq->config = VGIC_CONFIG_EDGE;
+		} else {
+			/* PPIs */
+			irq->config = VGIC_CONFIG_LEVEL;
+		}
+	}
+	if (kvm_vgic_global_state.type == VGIC_V2)
+		vgic_v2_enable(vcpu);
+	else
+		vgic_v3_enable(vcpu);
+}
+
+/*
+ * vgic_init: allocates and initializes dist and vcpu data structures
+ * depending on two dimensioning parameters:
+ * - the number of spis
+ * - the number of vcpus
+ * The function is generally called when nr_spis has been explicitly set
+ * by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
+ * vgic_initialized() returns true when this function has succeeded.
+ * Must be called with kvm->lock held!
+ */
+int vgic_init(struct kvm *kvm)
+{
+	struct vgic_dist *dist = &kvm->arch.vgic;
+	struct kvm_vcpu *vcpu;
+	int ret = 0, i;
+
+	if (vgic_initialized(kvm))
+		return 0;
+
+	/* freeze the number of spis */
+	if (!dist->nr_spis)
+		dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS;
+
+	ret = kvm_vgic_dist_init(kvm, dist->nr_spis);
+	if (ret)
+		goto out;
+
+	kvm_for_each_vcpu(i, vcpu, kvm)
+		kvm_vgic_vcpu_init(vcpu);
+
+	dist->initialized = true;
+out:
+	return ret;
+}
+
+static void kvm_vgic_dist_destroy(struct kvm *kvm)
+{
+	struct vgic_dist *dist = &kvm->arch.vgic;
+
+	mutex_lock(&kvm->lock);
+
+	dist->ready = false;
+	dist->initialized = false;
+
+	kfree(dist->spis);
+	kfree(dist->redist_iodevs);
+	dist->nr_spis = 0;
+
+	mutex_unlock(&kvm->lock);
+}
+
+void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+
+	INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
+}
+
+void kvm_vgic_destroy(struct kvm *kvm)
+{
+	struct kvm_vcpu *vcpu;
+	int i;
+
+	kvm_vgic_dist_destroy(kvm);
+
+	kvm_for_each_vcpu(i, vcpu, kvm)
+		kvm_vgic_vcpu_destroy(vcpu);
+}
+
+/**
+ * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
+ * is a GICv2. A GICv3 must be explicitly initialized by the guest using the
+ * KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group.
+ * @kvm: kvm struct pointer
+ */
+int vgic_lazy_init(struct kvm *kvm)
+{
+	int ret = 0;
+
+	if (unlikely(!vgic_initialized(kvm))) {
+		/*
+		 * We only provide the automatic initialization of the VGIC
+		 * for the legacy case of a GICv2. Any other type must
+		 * be explicitly initialized once setup with the respective
+		 * KVM device call.
+		 */
+		if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
+			return -EBUSY;
+
+		mutex_lock(&kvm->lock);
+		ret = vgic_init(kvm);
+		mutex_unlock(&kvm->lock);
+	}
+
+	return ret;
+}
+
+/* RESOURCE MAPPING */
+
+/**
+ * Map the MMIO regions depending on the VGIC model exposed to the guest
+ * called on the first VCPU run.
+ * Also map the virtual CPU interface into the VM.
+ * v2/v3 derivatives call vgic_init if not already done.
+ * vgic_ready() returns true if this function has succeeded.
+ * @kvm: kvm struct pointer
+ */
+int kvm_vgic_map_resources(struct kvm *kvm)
+{
+	struct vgic_dist *dist = &kvm->arch.vgic;
+	int ret = 0;
+
+	mutex_lock(&kvm->lock);
+	if (!irqchip_in_kernel(kvm))
+		goto out;
+
+	if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
+		ret = vgic_v2_map_resources(kvm);
+	else
+		ret = vgic_v3_map_resources(kvm);
+out:
+	mutex_unlock(&kvm->lock);
+	return ret;
+}
+
+/* GENERIC PROBE */
+
+static void vgic_init_maintenance_interrupt(void *info)
+{
+	enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0);
+}
+
+static int vgic_cpu_notify(struct notifier_block *self,
+			   unsigned long action, void *cpu)
+{
+	switch (action) {
+	case CPU_STARTING:
+	case CPU_STARTING_FROZEN:
+		vgic_init_maintenance_interrupt(NULL);
+		break;
+	case CPU_DYING:
+	case CPU_DYING_FROZEN:
+		disable_percpu_irq(kvm_vgic_global_state.maint_irq);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block vgic_cpu_nb = {
+	.notifier_call = vgic_cpu_notify,
+};
+
+static irqreturn_t vgic_maintenance_handler(int irq, void *data)
+{
+	/*
+	 * We cannot rely on the vgic maintenance interrupt to be
+	 * delivered synchronously. This means we can only use it to
+	 * exit the VM, and we perform the handling of EOIed
+	 * interrupts on the exit path (see vgic_process_maintenance).
+	 */
+	return IRQ_HANDLED;
+}
+
+/**
+ * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
+ * according to the host GIC model. Accordingly calls either
+ * vgic_v2/v3_probe which registers the KVM_DEVICE that can be
+ * instantiated by a guest later on .
+ */
+int kvm_vgic_hyp_init(void)
+{
+	const struct gic_kvm_info *gic_kvm_info;
+	int ret;
+
+	gic_kvm_info = gic_get_kvm_info();
+	if (!gic_kvm_info)
+		return -ENODEV;
+
+	if (!gic_kvm_info->maint_irq) {
+		kvm_err("No vgic maintenance irq\n");
+		return -ENXIO;
+	}
+
+	switch (gic_kvm_info->type) {
+	case GIC_V2:
+		ret = vgic_v2_probe(gic_kvm_info);
+		break;
+	case GIC_V3:
+		ret = vgic_v3_probe(gic_kvm_info);
+		break;
+	default:
+		ret = -ENODEV;
+	};
+
+	if (ret)
+		return ret;
+
+	kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
+	ret = request_percpu_irq(kvm_vgic_global_state.maint_irq,
+				 vgic_maintenance_handler,
+				 "vgic", kvm_get_running_vcpus());
+	if (ret) {
+		kvm_err("Cannot register interrupt %d\n",
+			kvm_vgic_global_state.maint_irq);
+		return ret;
+	}
+
+	ret = __register_cpu_notifier(&vgic_cpu_nb);
+	if (ret) {
+		kvm_err("Cannot register vgic CPU notifier\n");
+		goto out_free_irq;
+	}
+
+	on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
+
+	kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq);
+	return 0;
+
+out_free_irq:
+	free_percpu_irq(kvm_vgic_global_state.maint_irq,
+			kvm_get_running_vcpus());
+	return ret;
+}
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
new file mode 100644
index 0000000..c675513
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-irqfd.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <trace/events/kvm.h>
+
+int kvm_irq_map_gsi(struct kvm *kvm,
+		    struct kvm_kernel_irq_routing_entry *entries,
+		    int gsi)
+{
+	return 0;
+}
+
+int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned int irqchip,
+			 unsigned int pin)
+{
+	return pin;
+}
+
+int kvm_set_irq(struct kvm *kvm, int irq_source_id,
+		u32 irq, int level, bool line_status)
+{
+	unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
+
+	trace_kvm_set_irq(irq, level, irq_source_id);
+
+	BUG_ON(!vgic_initialized(kvm));
+
+	return kvm_vgic_inject_irq(kvm, 0, spi, level);
+}
+
+/* MSI not implemented yet */
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
+		struct kvm *kvm, int irq_source_id,
+		int level, bool line_status)
+{
+	return 0;
+}
diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c
new file mode 100644
index 0000000..0130c4b
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -0,0 +1,431 @@
+/*
+ * VGIC: KVM DEVICE API
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kvm_host.h>
+#include <kvm/arm_vgic.h>
+#include <linux/uaccess.h>
+#include <asm/kvm_mmu.h>
+#include "vgic.h"
+
+/* common helpers */
+
+static int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
+			     phys_addr_t addr, phys_addr_t alignment)
+{
+	if (addr & ~KVM_PHYS_MASK)
+		return -E2BIG;
+
+	if (!IS_ALIGNED(addr, alignment))
+		return -EINVAL;
+
+	if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
+		return -EEXIST;
+
+	return 0;
+}
+
+/**
+ * kvm_vgic_addr - set or get vgic VM base addresses
+ * @kvm:   pointer to the vm struct
+ * @type:  the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
+ * @addr:  pointer to address value
+ * @write: if true set the address in the VM address space, if false read the
+ *          address
+ *
+ * Set or get the vgic base addresses for the distributor and the virtual CPU
+ * interface in the VM physical address space.  These addresses are properties
+ * of the emulated core/SoC and therefore user space initially knows this
+ * information.
+ * Check them for sanity (alignment, double assignment). We can't check for
+ * overlapping regions in case of a virtual GICv3 here, since we don't know
+ * the number of VCPUs yet, so we defer this check to map_resources().
+ */
+int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
+{
+	int r = 0;
+	struct vgic_dist *vgic = &kvm->arch.vgic;
+	int type_needed;
+	phys_addr_t *addr_ptr, alignment;
+
+	mutex_lock(&kvm->lock);
+	switch (type) {
+	case KVM_VGIC_V2_ADDR_TYPE_DIST:
+		type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
+		addr_ptr = &vgic->vgic_dist_base;
+		alignment = SZ_4K;
+		break;
+	case KVM_VGIC_V2_ADDR_TYPE_CPU:
+		type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
+		addr_ptr = &vgic->vgic_cpu_base;
+		alignment = SZ_4K;
+		break;
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+	case KVM_VGIC_V3_ADDR_TYPE_DIST:
+		type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
+		addr_ptr = &vgic->vgic_dist_base;
+		alignment = SZ_64K;
+		break;
+	case KVM_VGIC_V3_ADDR_TYPE_REDIST:
+		type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
+		addr_ptr = &vgic->vgic_redist_base;
+		alignment = SZ_64K;
+		break;
+#endif
+	default:
+		r = -ENODEV;
+		goto out;
+	}
+
+	if (vgic->vgic_model != type_needed) {
+		r = -ENODEV;
+		goto out;
+	}
+
+	if (write) {
+		r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment);
+		if (!r)
+			*addr_ptr = *addr;
+	} else {
+		*addr = *addr_ptr;
+	}
+
+out:
+	mutex_unlock(&kvm->lock);
+	return r;
+}
+
+static int vgic_set_common_attr(struct kvm_device *dev,
+				struct kvm_device_attr *attr)
+{
+	int r;
+
+	switch (attr->group) {
+	case KVM_DEV_ARM_VGIC_GRP_ADDR: {
+		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
+		u64 addr;
+		unsigned long type = (unsigned long)attr->attr;
+
+		if (copy_from_user(&addr, uaddr, sizeof(addr)))
+			return -EFAULT;
+
+		r = kvm_vgic_addr(dev->kvm, type, &addr, true);
+		return (r == -ENODEV) ? -ENXIO : r;
+	}
+	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
+		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+		u32 val;
+		int ret = 0;
+
+		if (get_user(val, uaddr))
+			return -EFAULT;
+
+		/*
+		 * We require:
+		 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
+		 * - at most 1024 interrupts
+		 * - a multiple of 32 interrupts
+		 */
+		if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
+		    val > VGIC_MAX_RESERVED ||
+		    (val & 31))
+			return -EINVAL;
+
+		mutex_lock(&dev->kvm->lock);
+
+		if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
+			ret = -EBUSY;
+		else
+			dev->kvm->arch.vgic.nr_spis =
+				val - VGIC_NR_PRIVATE_IRQS;
+
+		mutex_unlock(&dev->kvm->lock);
+
+		return ret;
+	}
+	case KVM_DEV_ARM_VGIC_GRP_CTRL: {
+		switch (attr->attr) {
+		case KVM_DEV_ARM_VGIC_CTRL_INIT:
+			mutex_lock(&dev->kvm->lock);
+			r = vgic_init(dev->kvm);
+			mutex_unlock(&dev->kvm->lock);
+			return r;
+		}
+		break;
+	}
+	}
+
+	return -ENXIO;
+}
+
+static int vgic_get_common_attr(struct kvm_device *dev,
+				struct kvm_device_attr *attr)
+{
+	int r = -ENXIO;
+
+	switch (attr->group) {
+	case KVM_DEV_ARM_VGIC_GRP_ADDR: {
+		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
+		u64 addr;
+		unsigned long type = (unsigned long)attr->attr;
+
+		r = kvm_vgic_addr(dev->kvm, type, &addr, false);
+		if (r)
+			return (r == -ENODEV) ? -ENXIO : r;
+
+		if (copy_to_user(uaddr, &addr, sizeof(addr)))
+			return -EFAULT;
+		break;
+	}
+	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
+		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+
+		r = put_user(dev->kvm->arch.vgic.nr_spis +
+			     VGIC_NR_PRIVATE_IRQS, uaddr);
+		break;
+	}
+	}
+
+	return r;
+}
+
+static int vgic_create(struct kvm_device *dev, u32 type)
+{
+	return kvm_vgic_create(dev->kvm, type);
+}
+
+static void vgic_destroy(struct kvm_device *dev)
+{
+	kfree(dev);
+}
+
+void kvm_register_vgic_device(unsigned long type)
+{
+	switch (type) {
+	case KVM_DEV_TYPE_ARM_VGIC_V2:
+		kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
+					KVM_DEV_TYPE_ARM_VGIC_V2);
+		break;
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+	case KVM_DEV_TYPE_ARM_VGIC_V3:
+		kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
+					KVM_DEV_TYPE_ARM_VGIC_V3);
+		break;
+#endif
+	}
+}
+
+/** vgic_attr_regs_access: allows user space to read/write VGIC registers
+ *
+ * @dev: kvm device handle
+ * @attr: kvm device attribute
+ * @reg: address the value is read or written
+ * @is_write: write flag
+ *
+ */
+static int vgic_attr_regs_access(struct kvm_device *dev,
+				 struct kvm_device_attr *attr,
+				 u32 *reg, bool is_write)
+{
+	gpa_t addr;
+	int cpuid, ret, c;
+	struct kvm_vcpu *vcpu, *tmp_vcpu;
+	int vcpu_lock_idx = -1;
+
+	cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
+		 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
+	vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+	addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+
+	mutex_lock(&dev->kvm->lock);
+
+	ret = vgic_init(dev->kvm);
+	if (ret)
+		goto out;
+
+	if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Any time a vcpu is run, vcpu_load is called which tries to grab the
+	 * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
+	 * that no other VCPUs are run and fiddle with the vgic state while we
+	 * access it.
+	 */
+	ret = -EBUSY;
+	kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
+		if (!mutex_trylock(&tmp_vcpu->mutex))
+			goto out;
+		vcpu_lock_idx = c;
+	}
+
+	switch (attr->group) {
+	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+		ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
+		break;
+	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+		ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+out:
+	for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+		tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx);
+		mutex_unlock(&tmp_vcpu->mutex);
+	}
+
+	mutex_unlock(&dev->kvm->lock);
+	return ret;
+}
+
+/* V2 ops */
+
+static int vgic_v2_set_attr(struct kvm_device *dev,
+			    struct kvm_device_attr *attr)
+{
+	int ret;
+
+	ret = vgic_set_common_attr(dev, attr);
+	if (ret != -ENXIO)
+		return ret;
+
+	switch (attr->group) {
+	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
+		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+		u32 reg;
+
+		if (get_user(reg, uaddr))
+			return -EFAULT;
+
+		return vgic_attr_regs_access(dev, attr, &reg, true);
+	}
+	}
+
+	return -ENXIO;
+}
+
+static int vgic_v2_get_attr(struct kvm_device *dev,
+			    struct kvm_device_attr *attr)
+{
+	int ret;
+
+	ret = vgic_get_common_attr(dev, attr);
+	if (ret != -ENXIO)
+		return ret;
+
+	switch (attr->group) {
+	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
+		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+		u32 reg = 0;
+
+		ret = vgic_attr_regs_access(dev, attr, &reg, false);
+		if (ret)
+			return ret;
+		return put_user(reg, uaddr);
+	}
+	}
+
+	return -ENXIO;
+}
+
+static int vgic_v2_has_attr(struct kvm_device *dev,
+			    struct kvm_device_attr *attr)
+{
+	switch (attr->group) {
+	case KVM_DEV_ARM_VGIC_GRP_ADDR:
+		switch (attr->attr) {
+		case KVM_VGIC_V2_ADDR_TYPE_DIST:
+		case KVM_VGIC_V2_ADDR_TYPE_CPU:
+			return 0;
+		}
+		break;
+	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+		return vgic_v2_has_attr_regs(dev, attr);
+	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
+		return 0;
+	case KVM_DEV_ARM_VGIC_GRP_CTRL:
+		switch (attr->attr) {
+		case KVM_DEV_ARM_VGIC_CTRL_INIT:
+			return 0;
+		}
+	}
+	return -ENXIO;
+}
+
+struct kvm_device_ops kvm_arm_vgic_v2_ops = {
+	.name = "kvm-arm-vgic-v2",
+	.create = vgic_create,
+	.destroy = vgic_destroy,
+	.set_attr = vgic_v2_set_attr,
+	.get_attr = vgic_v2_get_attr,
+	.has_attr = vgic_v2_has_attr,
+};
+
+/* V3 ops */
+
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+
+static int vgic_v3_set_attr(struct kvm_device *dev,
+			    struct kvm_device_attr *attr)
+{
+	return vgic_set_common_attr(dev, attr);
+}
+
+static int vgic_v3_get_attr(struct kvm_device *dev,
+			    struct kvm_device_attr *attr)
+{
+	return vgic_get_common_attr(dev, attr);
+}
+
+static int vgic_v3_has_attr(struct kvm_device *dev,
+			    struct kvm_device_attr *attr)
+{
+	switch (attr->group) {
+	case KVM_DEV_ARM_VGIC_GRP_ADDR:
+		switch (attr->attr) {
+		case KVM_VGIC_V3_ADDR_TYPE_DIST:
+		case KVM_VGIC_V3_ADDR_TYPE_REDIST:
+			return 0;
+		}
+		break;
+	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
+		return 0;
+	case KVM_DEV_ARM_VGIC_GRP_CTRL:
+		switch (attr->attr) {
+		case KVM_DEV_ARM_VGIC_CTRL_INIT:
+			return 0;
+		}
+	}
+	return -ENXIO;
+}
+
+struct kvm_device_ops kvm_arm_vgic_v3_ops = {
+	.name = "kvm-arm-vgic-v3",
+	.create = vgic_create,
+	.destroy = vgic_destroy,
+	.set_attr = vgic_v3_set_attr,
+	.get_attr = vgic_v3_get_attr,
+	.has_attr = vgic_v3_has_attr,
+};
+
+#endif /* CONFIG_KVM_ARM_VGIC_V3 */
+
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
new file mode 100644
index 0000000..a213936
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -0,0 +1,446 @@
+/*
+ * VGICv2 MMIO handling functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irqchip/arm-gic.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <kvm/iodev.h>
+#include <kvm/arm_vgic.h>
+
+#include "vgic.h"
+#include "vgic-mmio.h"
+
+static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
+					    gpa_t addr, unsigned int len)
+{
+	u32 value;
+
+	switch (addr & 0x0c) {
+	case GIC_DIST_CTRL:
+		value = vcpu->kvm->arch.vgic.enabled ? GICD_ENABLE : 0;
+		break;
+	case GIC_DIST_CTR:
+		value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
+		value = (value >> 5) - 1;
+		value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
+		break;
+	case GIC_DIST_IIDR:
+		value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+		break;
+	default:
+		return 0;
+	}
+
+	return value;
+}
+
+static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
+				    gpa_t addr, unsigned int len,
+				    unsigned long val)
+{
+	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+	bool was_enabled = dist->enabled;
+
+	switch (addr & 0x0c) {
+	case GIC_DIST_CTRL:
+		dist->enabled = val & GICD_ENABLE;
+		if (!was_enabled && dist->enabled)
+			vgic_kick_vcpus(vcpu->kvm);
+		break;
+	case GIC_DIST_CTR:
+	case GIC_DIST_IIDR:
+		/* Nothing to do */
+		return;
+	}
+}
+
+static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
+				 gpa_t addr, unsigned int len,
+				 unsigned long val)
+{
+	int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
+	int intid = val & 0xf;
+	int targets = (val >> 16) & 0xff;
+	int mode = (val >> 24) & 0x03;
+	int c;
+	struct kvm_vcpu *vcpu;
+
+	switch (mode) {
+	case 0x0:		/* as specified by targets */
+		break;
+	case 0x1:
+		targets = (1U << nr_vcpus) - 1;			/* all, ... */
+		targets &= ~(1U << source_vcpu->vcpu_id);	/* but self */
+		break;
+	case 0x2:		/* this very vCPU only */
+		targets = (1U << source_vcpu->vcpu_id);
+		break;
+	case 0x3:		/* reserved */
+		return;
+	}
+
+	kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
+		struct vgic_irq *irq;
+
+		if (!(targets & (1U << c)))
+			continue;
+
+		irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
+
+		spin_lock(&irq->irq_lock);
+		irq->pending = true;
+		irq->source |= 1U << source_vcpu->vcpu_id;
+
+		vgic_queue_irq_unlock(source_vcpu->kvm, irq);
+	}
+}
+
+static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
+					   gpa_t addr, unsigned int len)
+{
+	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
+	int i;
+	u64 val = 0;
+
+	for (i = 0; i < len; i++) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+		val |= (u64)irq->targets << (i * 8);
+	}
+
+	return val;
+}
+
+static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
+				   gpa_t addr, unsigned int len,
+				   unsigned long val)
+{
+	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
+	int i;
+
+	/* GICD_ITARGETSR[0-7] are read-only */
+	if (intid < VGIC_NR_PRIVATE_IRQS)
+		return;
+
+	for (i = 0; i < len; i++) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
+		int target;
+
+		spin_lock(&irq->irq_lock);
+
+		irq->targets = (val >> (i * 8)) & 0xff;
+		target = irq->targets ? __ffs(irq->targets) : 0;
+		irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
+
+		spin_unlock(&irq->irq_lock);
+	}
+}
+
+static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
+					    gpa_t addr, unsigned int len)
+{
+	u32 intid = addr & 0x0f;
+	int i;
+	u64 val = 0;
+
+	for (i = 0; i < len; i++) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+		val |= (u64)irq->source << (i * 8);
+	}
+	return val;
+}
+
+static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
+				     gpa_t addr, unsigned int len,
+				     unsigned long val)
+{
+	u32 intid = addr & 0x0f;
+	int i;
+
+	for (i = 0; i < len; i++) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+		spin_lock(&irq->irq_lock);
+
+		irq->source &= ~((val >> (i * 8)) & 0xff);
+		if (!irq->source)
+			irq->pending = false;
+
+		spin_unlock(&irq->irq_lock);
+	}
+}
+
+static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
+				     gpa_t addr, unsigned int len,
+				     unsigned long val)
+{
+	u32 intid = addr & 0x0f;
+	int i;
+
+	for (i = 0; i < len; i++) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+		spin_lock(&irq->irq_lock);
+
+		irq->source |= (val >> (i * 8)) & 0xff;
+
+		if (irq->source) {
+			irq->pending = true;
+			vgic_queue_irq_unlock(vcpu->kvm, irq);
+		} else {
+			spin_unlock(&irq->irq_lock);
+		}
+	}
+}
+
+static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+{
+	if (kvm_vgic_global_state.type == VGIC_V2)
+		vgic_v2_set_vmcr(vcpu, vmcr);
+	else
+		vgic_v3_set_vmcr(vcpu, vmcr);
+}
+
+static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+{
+	if (kvm_vgic_global_state.type == VGIC_V2)
+		vgic_v2_get_vmcr(vcpu, vmcr);
+	else
+		vgic_v3_get_vmcr(vcpu, vmcr);
+}
+
+#define GICC_ARCH_VERSION_V2	0x2
+
+/* These are for userland accesses only, there is no guest-facing emulation. */
+static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
+					   gpa_t addr, unsigned int len)
+{
+	struct vgic_vmcr vmcr;
+	u32 val;
+
+	vgic_get_vmcr(vcpu, &vmcr);
+
+	switch (addr & 0xff) {
+	case GIC_CPU_CTRL:
+		val = vmcr.ctlr;
+		break;
+	case GIC_CPU_PRIMASK:
+		val = vmcr.pmr;
+		break;
+	case GIC_CPU_BINPOINT:
+		val = vmcr.bpr;
+		break;
+	case GIC_CPU_ALIAS_BINPOINT:
+		val = vmcr.abpr;
+		break;
+	case GIC_CPU_IDENT:
+		val = ((PRODUCT_ID_KVM << 20) |
+		       (GICC_ARCH_VERSION_V2 << 16) |
+		       IMPLEMENTER_ARM);
+		break;
+	default:
+		return 0;
+	}
+
+	return val;
+}
+
+static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
+				   gpa_t addr, unsigned int len,
+				   unsigned long val)
+{
+	struct vgic_vmcr vmcr;
+
+	vgic_get_vmcr(vcpu, &vmcr);
+
+	switch (addr & 0xff) {
+	case GIC_CPU_CTRL:
+		vmcr.ctlr = val;
+		break;
+	case GIC_CPU_PRIMASK:
+		vmcr.pmr = val;
+		break;
+	case GIC_CPU_BINPOINT:
+		vmcr.bpr = val;
+		break;
+	case GIC_CPU_ALIAS_BINPOINT:
+		vmcr.abpr = val;
+		break;
+	}
+
+	vgic_set_vmcr(vcpu, &vmcr);
+}
+
+static const struct vgic_register_region vgic_v2_dist_registers[] = {
+	REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL,
+		vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
+		vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
+		vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
+		vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
+		vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
+		vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
+		vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
+		vgic_mmio_read_active, vgic_mmio_write_cactive, 1,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
+		vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
+		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
+		vgic_mmio_read_target, vgic_mmio_write_target, 8,
+		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
+		vgic_mmio_read_config, vgic_mmio_write_config, 2,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
+		vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
+		vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
+		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
+		vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
+		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+};
+
+static const struct vgic_register_region vgic_v2_cpu_registers[] = {
+	REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
+		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
+		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
+		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
+		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
+		vgic_mmio_read_raz, vgic_mmio_write_wi, 16,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
+		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
+		VGIC_ACCESS_32bit),
+};
+
+unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
+{
+	dev->regions = vgic_v2_dist_registers;
+	dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
+
+	kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
+
+	return SZ_4K;
+}
+
+int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+	int nr_irqs = dev->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
+	const struct vgic_register_region *regions;
+	gpa_t addr;
+	int nr_regions, i, len;
+
+	addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+
+	switch (attr->group) {
+	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+		regions = vgic_v2_dist_registers;
+		nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
+		break;
+	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+		regions = vgic_v2_cpu_registers;
+		nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
+		break;
+	default:
+		return -ENXIO;
+	}
+
+	/* We only support aligned 32-bit accesses. */
+	if (addr & 3)
+		return -ENXIO;
+
+	for (i = 0; i < nr_regions; i++) {
+		if (regions[i].bits_per_irq)
+			len = (regions[i].bits_per_irq * nr_irqs) / 8;
+		else
+			len = regions[i].len;
+
+		if (regions[i].reg_offset <= addr &&
+		    regions[i].reg_offset + len > addr)
+			return 0;
+	}
+
+	return -ENXIO;
+}
+
+/*
+ * When userland tries to access the VGIC register handlers, we need to
+ * create a usable struct vgic_io_device to be passed to the handlers and we
+ * have to set up a buffer similar to what would have happened if a guest MMIO
+ * access occurred, including doing endian conversions on BE systems.
+ */
+static int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
+			bool is_write, int offset, u32 *val)
+{
+	unsigned int len = 4;
+	u8 buf[4];
+	int ret;
+
+	if (is_write) {
+		vgic_data_host_to_mmio_bus(buf, len, *val);
+		ret = kvm_io_gic_ops.write(vcpu, &dev->dev, offset, len, buf);
+	} else {
+		ret = kvm_io_gic_ops.read(vcpu, &dev->dev, offset, len, buf);
+		if (!ret)
+			*val = vgic_data_mmio_bus_to_host(buf, len);
+	}
+
+	return ret;
+}
+
+int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+			  int offset, u32 *val)
+{
+	struct vgic_io_device dev = {
+		.regions = vgic_v2_cpu_registers,
+		.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
+	};
+
+	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
+}
+
+int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+			 int offset, u32 *val)
+{
+	struct vgic_io_device dev = {
+		.regions = vgic_v2_dist_registers,
+		.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
+	};
+
+	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
+}
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
new file mode 100644
index 0000000..a0c515a
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -0,0 +1,455 @@
+/*
+ * VGICv3 MMIO handling functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <kvm/iodev.h>
+#include <kvm/arm_vgic.h>
+
+#include <asm/kvm_emulate.h>
+
+#include "vgic.h"
+#include "vgic-mmio.h"
+
+/* extract @num bytes at @offset bytes offset in data */
+static unsigned long extract_bytes(unsigned long data, unsigned int offset,
+				   unsigned int num)
+{
+	return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0);
+}
+
+static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
+					    gpa_t addr, unsigned int len)
+{
+	u32 value = 0;
+
+	switch (addr & 0x0c) {
+	case GICD_CTLR:
+		if (vcpu->kvm->arch.vgic.enabled)
+			value |= GICD_CTLR_ENABLE_SS_G1;
+		value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
+		break;
+	case GICD_TYPER:
+		value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
+		value = (value >> 5) - 1;
+		value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
+		break;
+	case GICD_IIDR:
+		value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+		break;
+	default:
+		return 0;
+	}
+
+	return value;
+}
+
+static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
+				    gpa_t addr, unsigned int len,
+				    unsigned long val)
+{
+	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+	bool was_enabled = dist->enabled;
+
+	switch (addr & 0x0c) {
+	case GICD_CTLR:
+		dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
+
+		if (!was_enabled && dist->enabled)
+			vgic_kick_vcpus(vcpu->kvm);
+		break;
+	case GICD_TYPER:
+	case GICD_IIDR:
+		return;
+	}
+}
+
+static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu,
+					    gpa_t addr, unsigned int len)
+{
+	int intid = VGIC_ADDR_TO_INTID(addr, 64);
+	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
+
+	if (!irq)
+		return 0;
+
+	/* The upper word is RAZ for us. */
+	if (addr & 4)
+		return 0;
+
+	return extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len);
+}
+
+static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
+				    gpa_t addr, unsigned int len,
+				    unsigned long val)
+{
+	int intid = VGIC_ADDR_TO_INTID(addr, 64);
+	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
+
+	if (!irq)
+		return;
+
+	/* The upper word is WI for us since we don't implement Aff3. */
+	if (addr & 4)
+		return;
+
+	spin_lock(&irq->irq_lock);
+
+	/* We only care about and preserve Aff0, Aff1 and Aff2. */
+	irq->mpidr = val & GENMASK(23, 0);
+	irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
+
+	spin_unlock(&irq->irq_lock);
+}
+
+static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
+					      gpa_t addr, unsigned int len)
+{
+	unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
+	int target_vcpu_id = vcpu->vcpu_id;
+	u64 value;
+
+	value = (mpidr & GENMASK(23, 0)) << 32;
+	value |= ((target_vcpu_id & 0xffff) << 8);
+	if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
+		value |= GICR_TYPER_LAST;
+
+	return extract_bytes(value, addr & 7, len);
+}
+
+static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
+					     gpa_t addr, unsigned int len)
+{
+	return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+}
+
+static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu,
+					      gpa_t addr, unsigned int len)
+{
+	switch (addr & 0xffff) {
+	case GICD_PIDR2:
+		/* report a GICv3 compliant implementation */
+		return 0x3b;
+	}
+
+	return 0;
+}
+
+/*
+ * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
+ * redistributors, while SPIs are covered by registers in the distributor
+ * block. Trying to set private IRQs in this block gets ignored.
+ * We take some special care here to fix the calculation of the register
+ * offset.
+ */
+#define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, bpi, acc)	\
+	{								\
+		.reg_offset = off,					\
+		.bits_per_irq = bpi,					\
+		.len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8,		\
+		.access_flags = acc,					\
+		.read = vgic_mmio_read_raz,				\
+		.write = vgic_mmio_write_wi,				\
+	}, {								\
+		.reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8,	\
+		.bits_per_irq = bpi,					\
+		.len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8,	\
+		.access_flags = acc,					\
+		.read = rd,						\
+		.write = wr,						\
+	}
+
+static const struct vgic_register_region vgic_v3_dist_registers[] = {
+	REGISTER_DESC_WITH_LENGTH(GICD_CTLR,
+		vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc, 16,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR,
+		vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER,
+		vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER,
+		vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
+		vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR,
+		vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
+		vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
+		vgic_mmio_read_active, vgic_mmio_write_cactive, 1,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
+		vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
+		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR,
+		vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR,
+		vgic_mmio_read_config, vgic_mmio_write_config, 2,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR,
+		vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER,
+		vgic_mmio_read_irouter, vgic_mmio_write_irouter, 64,
+		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICD_IDREGS,
+		vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
+		VGIC_ACCESS_32bit),
+};
+
+static const struct vgic_register_region vgic_v3_rdbase_registers[] = {
+	REGISTER_DESC_WITH_LENGTH(GICR_CTLR,
+		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
+		vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
+		vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
+		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER,
+		vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER,
+		vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
+		vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
+		VGIC_ACCESS_32bit),
+};
+
+static const struct vgic_register_region vgic_v3_sgibase_registers[] = {
+	REGISTER_DESC_WITH_LENGTH(GICR_IGROUPR0,
+		vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICR_ISENABLER0,
+		vgic_mmio_read_enable, vgic_mmio_write_senable, 4,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICR_ICENABLER0,
+		vgic_mmio_read_enable, vgic_mmio_write_cenable, 4,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICR_ISPENDR0,
+		vgic_mmio_read_pending, vgic_mmio_write_spending, 4,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICR_ICPENDR0,
+		vgic_mmio_read_pending, vgic_mmio_write_cpending, 4,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICR_ISACTIVER0,
+		vgic_mmio_read_active, vgic_mmio_write_sactive, 4,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICR_ICACTIVER0,
+		vgic_mmio_read_active, vgic_mmio_write_cactive, 4,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICR_IPRIORITYR0,
+		vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
+		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+	REGISTER_DESC_WITH_LENGTH(GICR_ICFGR0,
+		vgic_mmio_read_config, vgic_mmio_write_config, 8,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICR_IGRPMODR0,
+		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+		VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICR_NSACR,
+		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+		VGIC_ACCESS_32bit),
+};
+
+unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev)
+{
+	dev->regions = vgic_v3_dist_registers;
+	dev->nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
+
+	kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
+
+	return SZ_64K;
+}
+
+int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t redist_base_address)
+{
+	int nr_vcpus = atomic_read(&kvm->online_vcpus);
+	struct kvm_vcpu *vcpu;
+	struct vgic_io_device *devices;
+	int c, ret = 0;
+
+	devices = kmalloc(sizeof(struct vgic_io_device) * nr_vcpus * 2,
+			  GFP_KERNEL);
+	if (!devices)
+		return -ENOMEM;
+
+	kvm_for_each_vcpu(c, vcpu, kvm) {
+		gpa_t rd_base = redist_base_address + c * SZ_64K * 2;
+		gpa_t sgi_base = rd_base + SZ_64K;
+		struct vgic_io_device *rd_dev = &devices[c * 2];
+		struct vgic_io_device *sgi_dev = &devices[c * 2 + 1];
+
+		kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops);
+		rd_dev->base_addr = rd_base;
+		rd_dev->regions = vgic_v3_rdbase_registers;
+		rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers);
+		rd_dev->redist_vcpu = vcpu;
+
+		mutex_lock(&kvm->slots_lock);
+		ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
+					      SZ_64K, &rd_dev->dev);
+		mutex_unlock(&kvm->slots_lock);
+
+		if (ret)
+			break;
+
+		kvm_iodevice_init(&sgi_dev->dev, &kvm_io_gic_ops);
+		sgi_dev->base_addr = sgi_base;
+		sgi_dev->regions = vgic_v3_sgibase_registers;
+		sgi_dev->nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers);
+		sgi_dev->redist_vcpu = vcpu;
+
+		mutex_lock(&kvm->slots_lock);
+		ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base,
+					      SZ_64K, &sgi_dev->dev);
+		mutex_unlock(&kvm->slots_lock);
+		if (ret) {
+			kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
+						  &rd_dev->dev);
+			break;
+		}
+	}
+
+	if (ret) {
+		/* The current c failed, so we start with the previous one. */
+		for (c--; c >= 0; c--) {
+			kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
+						  &devices[c * 2].dev);
+			kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
+						  &devices[c * 2 + 1].dev);
+		}
+		kfree(devices);
+	} else {
+		kvm->arch.vgic.redist_iodevs = devices;
+	}
+
+	return ret;
+}
+
+/*
+ * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
+ * generation register ICC_SGI1R_EL1) with a given VCPU.
+ * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
+ * return -1.
+ */
+static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
+{
+	unsigned long affinity;
+	int level0;
+
+	/*
+	 * Split the current VCPU's MPIDR into affinity level 0 and the
+	 * rest as this is what we have to compare against.
+	 */
+	affinity = kvm_vcpu_get_mpidr_aff(vcpu);
+	level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
+	affinity &= ~MPIDR_LEVEL_MASK;
+
+	/* bail out if the upper three levels don't match */
+	if (sgi_aff != affinity)
+		return -1;
+
+	/* Is this VCPU's bit set in the mask ? */
+	if (!(sgi_cpu_mask & BIT(level0)))
+		return -1;
+
+	return level0;
+}
+
+/*
+ * The ICC_SGI* registers encode the affinity differently from the MPIDR,
+ * so provide a wrapper to use the existing defines to isolate a certain
+ * affinity level.
+ */
+#define SGI_AFFINITY_LEVEL(reg, level) \
+	((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
+	>> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
+
+/**
+ * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
+ * @vcpu: The VCPU requesting a SGI
+ * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
+ *
+ * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
+ * This will trap in sys_regs.c and call this function.
+ * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
+ * target processors as well as a bitmask of 16 Aff0 CPUs.
+ * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
+ * check for matching ones. If this bit is set, we signal all, but not the
+ * calling VCPU.
+ */
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_vcpu *c_vcpu;
+	u16 target_cpus;
+	u64 mpidr;
+	int sgi, c;
+	int vcpu_id = vcpu->vcpu_id;
+	bool broadcast;
+
+	sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
+	broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
+	target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
+	mpidr = SGI_AFFINITY_LEVEL(reg, 3);
+	mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
+	mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
+
+	/*
+	 * We iterate over all VCPUs to find the MPIDRs matching the request.
+	 * If we have handled one CPU, we clear its bit to detect early
+	 * if we are already finished. This avoids iterating through all
+	 * VCPUs when most of the times we just signal a single VCPU.
+	 */
+	kvm_for_each_vcpu(c, c_vcpu, kvm) {
+		struct vgic_irq *irq;
+
+		/* Exit early if we have dealt with all requested CPUs */
+		if (!broadcast && target_cpus == 0)
+			break;
+
+		/* Don't signal the calling VCPU */
+		if (broadcast && c == vcpu_id)
+			continue;
+
+		if (!broadcast) {
+			int level0;
+
+			level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
+			if (level0 == -1)
+				continue;
+
+			/* remove this matching VCPU from the mask */
+			target_cpus &= ~BIT(level0);
+		}
+
+		irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
+
+		spin_lock(&irq->irq_lock);
+		irq->pending = true;
+
+		vgic_queue_irq_unlock(vcpu->kvm, irq);
+	}
+}
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
new file mode 100644
index 0000000..9f6fab7
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -0,0 +1,524 @@
+/*
+ * VGIC MMIO handling functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/bsearch.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <kvm/iodev.h>
+#include <kvm/arm_vgic.h>
+
+#include "vgic.h"
+#include "vgic-mmio.h"
+
+unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
+				 gpa_t addr, unsigned int len)
+{
+	return 0;
+}
+
+unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
+				 gpa_t addr, unsigned int len)
+{
+	return -1UL;
+}
+
+void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
+			unsigned int len, unsigned long val)
+{
+	/* Ignore */
+}
+
+/*
+ * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
+ * of the enabled bit, so there is only one function for both here.
+ */
+unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
+				    gpa_t addr, unsigned int len)
+{
+	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+	u32 value = 0;
+	int i;
+
+	/* Loop over all IRQs affected by this read */
+	for (i = 0; i < len * 8; i++) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+		if (irq->enabled)
+			value |= (1U << i);
+	}
+
+	return value;
+}
+
+void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
+			     gpa_t addr, unsigned int len,
+			     unsigned long val)
+{
+	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+	int i;
+
+	for_each_set_bit(i, &val, len * 8) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+		spin_lock(&irq->irq_lock);
+		irq->enabled = true;
+		vgic_queue_irq_unlock(vcpu->kvm, irq);
+	}
+}
+
+void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
+			     gpa_t addr, unsigned int len,
+			     unsigned long val)
+{
+	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+	int i;
+
+	for_each_set_bit(i, &val, len * 8) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+		spin_lock(&irq->irq_lock);
+
+		irq->enabled = false;
+
+		spin_unlock(&irq->irq_lock);
+	}
+}
+
+unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
+				     gpa_t addr, unsigned int len)
+{
+	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+	u32 value = 0;
+	int i;
+
+	/* Loop over all IRQs affected by this read */
+	for (i = 0; i < len * 8; i++) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+		if (irq->pending)
+			value |= (1U << i);
+	}
+
+	return value;
+}
+
+void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
+			      gpa_t addr, unsigned int len,
+			      unsigned long val)
+{
+	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+	int i;
+
+	for_each_set_bit(i, &val, len * 8) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+		spin_lock(&irq->irq_lock);
+		irq->pending = true;
+		if (irq->config == VGIC_CONFIG_LEVEL)
+			irq->soft_pending = true;
+
+		vgic_queue_irq_unlock(vcpu->kvm, irq);
+	}
+}
+
+void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
+			      gpa_t addr, unsigned int len,
+			      unsigned long val)
+{
+	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+	int i;
+
+	for_each_set_bit(i, &val, len * 8) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+		spin_lock(&irq->irq_lock);
+
+		if (irq->config == VGIC_CONFIG_LEVEL) {
+			irq->soft_pending = false;
+			irq->pending = irq->line_level;
+		} else {
+			irq->pending = false;
+		}
+
+		spin_unlock(&irq->irq_lock);
+	}
+}
+
+unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+				    gpa_t addr, unsigned int len)
+{
+	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+	u32 value = 0;
+	int i;
+
+	/* Loop over all IRQs affected by this read */
+	for (i = 0; i < len * 8; i++) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+		if (irq->active)
+			value |= (1U << i);
+	}
+
+	return value;
+}
+
+static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+				    bool new_active_state)
+{
+	spin_lock(&irq->irq_lock);
+	/*
+	 * If this virtual IRQ was written into a list register, we
+	 * have to make sure the CPU that runs the VCPU thread has
+	 * synced back LR state to the struct vgic_irq.  We can only
+	 * know this for sure, when either this irq is not assigned to
+	 * anyone's AP list anymore, or the VCPU thread is not
+	 * running on any CPUs.
+	 *
+	 * In the opposite case, we know the VCPU thread may be on its
+	 * way back from the guest and still has to sync back this
+	 * IRQ, so we release and re-acquire the spin_lock to let the
+	 * other thread sync back the IRQ.
+	 */
+	while (irq->vcpu && /* IRQ may have state in an LR somewhere */
+	       irq->vcpu->cpu != -1) /* VCPU thread is running */
+		cond_resched_lock(&irq->irq_lock);
+
+	irq->active = new_active_state;
+	if (new_active_state)
+		vgic_queue_irq_unlock(vcpu->kvm, irq);
+	else
+		spin_unlock(&irq->irq_lock);
+}
+
+/*
+ * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
+ * is not queued on some running VCPU's LRs, because then the change to the
+ * active state can be overwritten when the VCPU's state is synced coming back
+ * from the guest.
+ *
+ * For shared interrupts, we have to stop all the VCPUs because interrupts can
+ * be migrated while we don't hold the IRQ locks and we don't want to be
+ * chasing moving targets.
+ *
+ * For private interrupts, we only have to make sure the single and only VCPU
+ * that can potentially queue the IRQ is stopped.
+ */
+static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
+{
+	if (intid < VGIC_NR_PRIVATE_IRQS)
+		kvm_arm_halt_vcpu(vcpu);
+	else
+		kvm_arm_halt_guest(vcpu->kvm);
+}
+
+/* See vgic_change_active_prepare */
+static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
+{
+	if (intid < VGIC_NR_PRIVATE_IRQS)
+		kvm_arm_resume_vcpu(vcpu);
+	else
+		kvm_arm_resume_guest(vcpu->kvm);
+}
+
+void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
+			     gpa_t addr, unsigned int len,
+			     unsigned long val)
+{
+	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+	int i;
+
+	vgic_change_active_prepare(vcpu, intid);
+	for_each_set_bit(i, &val, len * 8) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+		vgic_mmio_change_active(vcpu, irq, false);
+	}
+	vgic_change_active_finish(vcpu, intid);
+}
+
+void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
+			     gpa_t addr, unsigned int len,
+			     unsigned long val)
+{
+	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+	int i;
+
+	vgic_change_active_prepare(vcpu, intid);
+	for_each_set_bit(i, &val, len * 8) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+		vgic_mmio_change_active(vcpu, irq, true);
+	}
+	vgic_change_active_finish(vcpu, intid);
+}
+
+unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
+				      gpa_t addr, unsigned int len)
+{
+	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
+	int i;
+	u64 val = 0;
+
+	for (i = 0; i < len; i++) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+		val |= (u64)irq->priority << (i * 8);
+	}
+
+	return val;
+}
+
+/*
+ * We currently don't handle changing the priority of an interrupt that
+ * is already pending on a VCPU. If there is a need for this, we would
+ * need to make this VCPU exit and re-evaluate the priorities, potentially
+ * leading to this interrupt getting presented now to the guest (if it has
+ * been masked by the priority mask before).
+ */
+void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
+			      gpa_t addr, unsigned int len,
+			      unsigned long val)
+{
+	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
+	int i;
+
+	for (i = 0; i < len; i++) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+		spin_lock(&irq->irq_lock);
+		/* Narrow the priority range to what we actually support */
+		irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
+		spin_unlock(&irq->irq_lock);
+	}
+}
+
+unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
+				    gpa_t addr, unsigned int len)
+{
+	u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
+	u32 value = 0;
+	int i;
+
+	for (i = 0; i < len * 4; i++) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+		if (irq->config == VGIC_CONFIG_EDGE)
+			value |= (2U << (i * 2));
+	}
+
+	return value;
+}
+
+void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
+			    gpa_t addr, unsigned int len,
+			    unsigned long val)
+{
+	u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
+	int i;
+
+	for (i = 0; i < len * 4; i++) {
+		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+		/*
+		 * The configuration cannot be changed for SGIs in general,
+		 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
+		 * code relies on PPIs being level triggered, so we also
+		 * make them read-only here.
+		 */
+		if (intid + i < VGIC_NR_PRIVATE_IRQS)
+			continue;
+
+		spin_lock(&irq->irq_lock);
+		if (test_bit(i * 2 + 1, &val)) {
+			irq->config = VGIC_CONFIG_EDGE;
+		} else {
+			irq->config = VGIC_CONFIG_LEVEL;
+			irq->pending = irq->line_level | irq->soft_pending;
+		}
+		spin_unlock(&irq->irq_lock);
+	}
+}
+
+static int match_region(const void *key, const void *elt)
+{
+	const unsigned int offset = (unsigned long)key;
+	const struct vgic_register_region *region = elt;
+
+	if (offset < region->reg_offset)
+		return -1;
+
+	if (offset >= region->reg_offset + region->len)
+		return 1;
+
+	return 0;
+}
+
+/* Find the proper register handler entry given a certain address offset. */
+static const struct vgic_register_region *
+vgic_find_mmio_region(const struct vgic_register_region *region, int nr_regions,
+		      unsigned int offset)
+{
+	return bsearch((void *)(uintptr_t)offset, region, nr_regions,
+		       sizeof(region[0]), match_region);
+}
+
+/*
+ * kvm_mmio_read_buf() returns a value in a format where it can be converted
+ * to a byte array and be directly observed as the guest wanted it to appear
+ * in memory if it had done the store itself, which is LE for the GIC, as the
+ * guest knows the GIC is always LE.
+ *
+ * We convert this value to the CPUs native format to deal with it as a data
+ * value.
+ */
+unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
+{
+	unsigned long data = kvm_mmio_read_buf(val, len);
+
+	switch (len) {
+	case 1:
+		return data;
+	case 2:
+		return le16_to_cpu(data);
+	case 4:
+		return le32_to_cpu(data);
+	default:
+		return le64_to_cpu(data);
+	}
+}
+
+/*
+ * kvm_mmio_write_buf() expects a value in a format such that if converted to
+ * a byte array it is observed as the guest would see it if it could perform
+ * the load directly.  Since the GIC is LE, and the guest knows this, the
+ * guest expects a value in little endian format.
+ *
+ * We convert the data value from the CPUs native format to LE so that the
+ * value is returned in the proper format.
+ */
+void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
+				unsigned long data)
+{
+	switch (len) {
+	case 1:
+		break;
+	case 2:
+		data = cpu_to_le16(data);
+		break;
+	case 4:
+		data = cpu_to_le32(data);
+		break;
+	default:
+		data = cpu_to_le64(data);
+	}
+
+	kvm_mmio_write_buf(buf, len, data);
+}
+
+static
+struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
+{
+	return container_of(dev, struct vgic_io_device, dev);
+}
+
+static bool check_region(const struct vgic_register_region *region,
+			 gpa_t addr, int len)
+{
+	if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1)
+		return true;
+	if ((region->access_flags & VGIC_ACCESS_32bit) &&
+	    len == sizeof(u32) && !(addr & 3))
+		return true;
+	if ((region->access_flags & VGIC_ACCESS_64bit) &&
+	    len == sizeof(u64) && !(addr & 7))
+		return true;
+
+	return false;
+}
+
+static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+			      gpa_t addr, int len, void *val)
+{
+	struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
+	const struct vgic_register_region *region;
+	struct kvm_vcpu *r_vcpu;
+	unsigned long data;
+
+	region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
+				       addr - iodev->base_addr);
+	if (!region || !check_region(region, addr, len)) {
+		memset(val, 0, len);
+		return 0;
+	}
+
+	r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
+	data = region->read(r_vcpu, addr, len);
+	vgic_data_host_to_mmio_bus(val, len, data);
+	return 0;
+}
+
+static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+			       gpa_t addr, int len, const void *val)
+{
+	struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
+	const struct vgic_register_region *region;
+	struct kvm_vcpu *r_vcpu;
+	unsigned long data = vgic_data_mmio_bus_to_host(val, len);
+
+	region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
+				       addr - iodev->base_addr);
+	if (!region)
+		return 0;
+
+	if (!check_region(region, addr, len))
+		return 0;
+
+	r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
+	region->write(r_vcpu, addr, len, data);
+	return 0;
+}
+
+struct kvm_io_device_ops kvm_io_gic_ops = {
+	.read = dispatch_mmio_read,
+	.write = dispatch_mmio_write,
+};
+
+int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
+			     enum vgic_type type)
+{
+	struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
+	int ret = 0;
+	unsigned int len;
+
+	switch (type) {
+	case VGIC_V2:
+		len = vgic_v2_init_dist_iodev(io_device);
+		break;
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+	case VGIC_V3:
+		len = vgic_v3_init_dist_iodev(io_device);
+		break;
+#endif
+	default:
+		BUG_ON(1);
+	}
+
+	io_device->base_addr = dist_base_address;
+	io_device->redist_vcpu = NULL;
+
+	mutex_lock(&kvm->slots_lock);
+	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
+				      len, &io_device->dev);
+	mutex_unlock(&kvm->slots_lock);
+
+	return ret;
+}
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
new file mode 100644
index 0000000..8509014
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-mmio.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __KVM_ARM_VGIC_MMIO_H__
+#define __KVM_ARM_VGIC_MMIO_H__
+
+struct vgic_register_region {
+	unsigned int reg_offset;
+	unsigned int len;
+	unsigned int bits_per_irq;
+	unsigned int access_flags;
+	unsigned long (*read)(struct kvm_vcpu *vcpu, gpa_t addr,
+			      unsigned int len);
+	void (*write)(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len,
+		      unsigned long val);
+};
+
+extern struct kvm_io_device_ops kvm_io_gic_ops;
+
+#define VGIC_ACCESS_8bit	1
+#define VGIC_ACCESS_32bit	2
+#define VGIC_ACCESS_64bit	4
+
+/*
+ * Generate a mask that covers the number of bytes required to address
+ * up to 1024 interrupts, each represented by <bits> bits. This assumes
+ * that <bits> is a power of two.
+ */
+#define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
+
+/*
+ * (addr & mask) gives us the byte offset for the INT ID, so we want to
+ * divide this with 'bytes per irq' to get the INT ID, which is given
+ * by '(bits) / 8'.  But we do this with fixed-point-arithmetic and
+ * take advantage of the fact that division by a fraction equals
+ * multiplication with the inverted fraction, and scale up both the
+ * numerator and denominator with 8 to support at most 64 bits per IRQ:
+ */
+#define VGIC_ADDR_TO_INTID(addr, bits)  (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
+					64 / (bits) / 8)
+
+/*
+ * Some VGIC registers store per-IRQ information, with a different number
+ * of bits per IRQ. For those registers this macro is used.
+ * The _WITH_LENGTH version instantiates registers with a fixed length
+ * and is mutually exclusive with the _PER_IRQ version.
+ */
+#define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, bpi, acc)		\
+	{								\
+		.reg_offset = off,					\
+		.bits_per_irq = bpi,					\
+		.len = bpi * 1024 / 8,					\
+		.access_flags = acc,					\
+		.read = rd,						\
+		.write = wr,						\
+	}
+
+#define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc)		\
+	{								\
+		.reg_offset = off,					\
+		.bits_per_irq = 0,					\
+		.len = length,						\
+		.access_flags = acc,					\
+		.read = rd,						\
+		.write = wr,						\
+	}
+
+int kvm_vgic_register_mmio_region(struct kvm *kvm, struct kvm_vcpu *vcpu,
+				  struct vgic_register_region *reg_desc,
+				  struct vgic_io_device *region,
+				  int nr_irqs, bool offset_private);
+
+unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len);
+
+void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
+				unsigned long data);
+
+unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
+				 gpa_t addr, unsigned int len);
+
+unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
+				 gpa_t addr, unsigned int len);
+
+void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
+			unsigned int len, unsigned long val);
+
+unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
+				    gpa_t addr, unsigned int len);
+
+void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
+			     gpa_t addr, unsigned int len,
+			     unsigned long val);
+
+void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
+			     gpa_t addr, unsigned int len,
+			     unsigned long val);
+
+unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
+				     gpa_t addr, unsigned int len);
+
+void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
+			      gpa_t addr, unsigned int len,
+			      unsigned long val);
+
+void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
+			      gpa_t addr, unsigned int len,
+			      unsigned long val);
+
+unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+				    gpa_t addr, unsigned int len);
+
+void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
+			     gpa_t addr, unsigned int len,
+			     unsigned long val);
+
+void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
+			     gpa_t addr, unsigned int len,
+			     unsigned long val);
+
+unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
+				      gpa_t addr, unsigned int len);
+
+void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
+			      gpa_t addr, unsigned int len,
+			      unsigned long val);
+
+unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
+				    gpa_t addr, unsigned int len);
+
+void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
+			    gpa_t addr, unsigned int len,
+			    unsigned long val);
+
+unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
+
+unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev);
+
+#endif
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
new file mode 100644
index 0000000..e31405e
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/irqchip/arm-gic.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <kvm/arm_vgic.h>
+#include <asm/kvm_mmu.h>
+
+#include "vgic.h"
+
+/*
+ * Call this function to convert a u64 value to an unsigned long * bitmask
+ * in a way that works on both 32-bit and 64-bit LE and BE platforms.
+ *
+ * Warning: Calling this function may modify *val.
+ */
+static unsigned long *u64_to_bitmask(u64 *val)
+{
+#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
+	*val = (*val >> 32) | (*val << 32);
+#endif
+	return (unsigned long *)val;
+}
+
+void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
+{
+	struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
+
+	if (cpuif->vgic_misr & GICH_MISR_EOI) {
+		u64 eisr = cpuif->vgic_eisr;
+		unsigned long *eisr_bmap = u64_to_bitmask(&eisr);
+		int lr;
+
+		for_each_set_bit(lr, eisr_bmap, kvm_vgic_global_state.nr_lr) {
+			u32 intid = cpuif->vgic_lr[lr] & GICH_LR_VIRTUALID;
+
+			WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
+
+			kvm_notify_acked_irq(vcpu->kvm, 0,
+					     intid - VGIC_NR_PRIVATE_IRQS);
+		}
+	}
+
+	/* check and disable underflow maintenance IRQ */
+	cpuif->vgic_hcr &= ~GICH_HCR_UIE;
+
+	/*
+	 * In the next iterations of the vcpu loop, if we sync the
+	 * vgic state after flushing it, but before entering the guest
+	 * (this happens for pending signals and vmid rollovers), then
+	 * make sure we don't pick up any old maintenance interrupts
+	 * here.
+	 */
+	cpuif->vgic_eisr = 0;
+}
+
+void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
+{
+	struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
+
+	cpuif->vgic_hcr |= GICH_HCR_UIE;
+}
+
+/*
+ * transfer the content of the LRs back into the corresponding ap_list:
+ * - active bit is transferred as is
+ * - pending bit is
+ *   - transferred as is in case of edge sensitive IRQs
+ *   - set to the line-level (resample time) for level sensitive IRQs
+ */
+void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
+{
+	struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
+	int lr;
+
+	for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
+		u32 val = cpuif->vgic_lr[lr];
+		u32 intid = val & GICH_LR_VIRTUALID;
+		struct vgic_irq *irq;
+
+		irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
+
+		spin_lock(&irq->irq_lock);
+
+		/* Always preserve the active bit */
+		irq->active = !!(val & GICH_LR_ACTIVE_BIT);
+
+		/* Edge is the only case where we preserve the pending bit */
+		if (irq->config == VGIC_CONFIG_EDGE &&
+		    (val & GICH_LR_PENDING_BIT)) {
+			irq->pending = true;
+
+			if (vgic_irq_is_sgi(intid)) {
+				u32 cpuid = val & GICH_LR_PHYSID_CPUID;
+
+				cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
+				irq->source |= (1 << cpuid);
+			}
+		}
+
+		/*
+		 * Clear soft pending state when level irqs have been acked.
+		 * Always regenerate the pending state.
+		 */
+		if (irq->config == VGIC_CONFIG_LEVEL) {
+			if (!(val & GICH_LR_PENDING_BIT))
+				irq->soft_pending = false;
+
+			irq->pending = irq->line_level || irq->soft_pending;
+		}
+
+		spin_unlock(&irq->irq_lock);
+	}
+}
+
+/*
+ * Populates the particular LR with the state of a given IRQ:
+ * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
+ * - for a level sensitive IRQ the pending state value is unchanged;
+ *   it is dictated directly by the input level
+ *
+ * If @irq describes an SGI with multiple sources, we choose the
+ * lowest-numbered source VCPU and clear that bit in the source bitmap.
+ *
+ * The irq_lock must be held by the caller.
+ */
+void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
+{
+	u32 val = irq->intid;
+
+	if (irq->pending) {
+		val |= GICH_LR_PENDING_BIT;
+
+		if (irq->config == VGIC_CONFIG_EDGE)
+			irq->pending = false;
+
+		if (vgic_irq_is_sgi(irq->intid)) {
+			u32 src = ffs(irq->source);
+
+			BUG_ON(!src);
+			val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
+			irq->source &= ~(1 << (src - 1));
+			if (irq->source)
+				irq->pending = true;
+		}
+	}
+
+	if (irq->active)
+		val |= GICH_LR_ACTIVE_BIT;
+
+	if (irq->hw) {
+		val |= GICH_LR_HW;
+		val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
+	} else {
+		if (irq->config == VGIC_CONFIG_LEVEL)
+			val |= GICH_LR_EOI;
+	}
+
+	/* The GICv2 LR only holds five bits of priority. */
+	val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
+
+	vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
+}
+
+void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
+{
+	vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
+}
+
+void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+	u32 vmcr;
+
+	vmcr  = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
+	vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
+		GICH_VMCR_ALIAS_BINPOINT_MASK;
+	vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
+		GICH_VMCR_BINPOINT_MASK;
+	vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) &
+		GICH_VMCR_PRIMASK_MASK;
+
+	vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
+}
+
+void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+	u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
+
+	vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >>
+			GICH_VMCR_CTRL_SHIFT;
+	vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
+			GICH_VMCR_ALIAS_BINPOINT_SHIFT;
+	vmcrp->bpr  = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
+			GICH_VMCR_BINPOINT_SHIFT;
+	vmcrp->pmr  = (vmcr & GICH_VMCR_PRIMASK_MASK) >>
+			GICH_VMCR_PRIMASK_SHIFT;
+}
+
+void vgic_v2_enable(struct kvm_vcpu *vcpu)
+{
+	/*
+	 * By forcing VMCR to zero, the GIC will restore the binary
+	 * points to their reset values. Anything else resets to zero
+	 * anyway.
+	 */
+	vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
+	vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr = ~0;
+
+	/* Get the show on the road... */
+	vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
+}
+
+/* check for overlapping regions and for regions crossing the end of memory */
+static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
+{
+	if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base)
+		return false;
+	if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base)
+		return false;
+
+	if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base)
+		return true;
+	if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base)
+		return true;
+
+	return false;
+}
+
+int vgic_v2_map_resources(struct kvm *kvm)
+{
+	struct vgic_dist *dist = &kvm->arch.vgic;
+	int ret = 0;
+
+	if (vgic_ready(kvm))
+		goto out;
+
+	if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
+	    IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
+		kvm_err("Need to set vgic cpu and dist addresses first\n");
+		ret = -ENXIO;
+		goto out;
+	}
+
+	if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
+		kvm_err("VGIC CPU and dist frames overlap\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Initialize the vgic if this hasn't already been done on demand by
+	 * accessing the vgic state from userspace.
+	 */
+	ret = vgic_init(kvm);
+	if (ret) {
+		kvm_err("Unable to initialize VGIC dynamic data structures\n");
+		goto out;
+	}
+
+	ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
+	if (ret) {
+		kvm_err("Unable to register VGIC MMIO regions\n");
+		goto out;
+	}
+
+	ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
+				    kvm_vgic_global_state.vcpu_base,
+				    KVM_VGIC_V2_CPU_SIZE, true);
+	if (ret) {
+		kvm_err("Unable to remap VGIC CPU to VCPU\n");
+		goto out;
+	}
+
+	dist->ready = true;
+
+out:
+	if (ret)
+		kvm_vgic_destroy(kvm);
+	return ret;
+}
+
+/**
+ * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
+ * @node:	pointer to the DT node
+ *
+ * Returns 0 if a GICv2 has been found, returns an error code otherwise
+ */
+int vgic_v2_probe(const struct gic_kvm_info *info)
+{
+	int ret;
+	u32 vtr;
+
+	if (!info->vctrl.start) {
+		kvm_err("GICH not present in the firmware table\n");
+		return -ENXIO;
+	}
+
+	if (!PAGE_ALIGNED(info->vcpu.start)) {
+		kvm_err("GICV physical address 0x%llx not page aligned\n",
+			(unsigned long long)info->vcpu.start);
+		return -ENXIO;
+	}
+
+	if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
+		kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+			(unsigned long long)resource_size(&info->vcpu),
+			PAGE_SIZE);
+		return -ENXIO;
+	}
+
+	kvm_vgic_global_state.vctrl_base = ioremap(info->vctrl.start,
+						   resource_size(&info->vctrl));
+	if (!kvm_vgic_global_state.vctrl_base) {
+		kvm_err("Cannot ioremap GICH\n");
+		return -ENOMEM;
+	}
+
+	vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
+	kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
+
+	ret = create_hyp_io_mappings(kvm_vgic_global_state.vctrl_base,
+				     kvm_vgic_global_state.vctrl_base +
+					 resource_size(&info->vctrl),
+				     info->vctrl.start);
+
+	if (ret) {
+		kvm_err("Cannot map VCTRL into hyp\n");
+		iounmap(kvm_vgic_global_state.vctrl_base);
+		return ret;
+	}
+
+	kvm_vgic_global_state.can_emulate_gicv2 = true;
+	kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
+
+	kvm_vgic_global_state.vcpu_base = info->vcpu.start;
+	kvm_vgic_global_state.type = VGIC_V2;
+	kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
+
+	kvm_info("vgic-v2@%llx\n", info->vctrl.start);
+
+	return 0;
+}
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
new file mode 100644
index 0000000..346b4ad
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -0,0 +1,334 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <kvm/arm_vgic.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_asm.h>
+
+#include "vgic.h"
+
+void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
+{
+	struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
+	u32 model = vcpu->kvm->arch.vgic.vgic_model;
+
+	if (cpuif->vgic_misr & ICH_MISR_EOI) {
+		unsigned long eisr_bmap = cpuif->vgic_eisr;
+		int lr;
+
+		for_each_set_bit(lr, &eisr_bmap, kvm_vgic_global_state.nr_lr) {
+			u32 intid;
+			u64 val = cpuif->vgic_lr[lr];
+
+			if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
+				intid = val & ICH_LR_VIRTUAL_ID_MASK;
+			else
+				intid = val & GICH_LR_VIRTUALID;
+
+			WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE);
+
+			kvm_notify_acked_irq(vcpu->kvm, 0,
+					     intid - VGIC_NR_PRIVATE_IRQS);
+		}
+
+		/*
+		 * In the next iterations of the vcpu loop, if we sync
+		 * the vgic state after flushing it, but before
+		 * entering the guest (this happens for pending
+		 * signals and vmid rollovers), then make sure we
+		 * don't pick up any old maintenance interrupts here.
+		 */
+		cpuif->vgic_eisr = 0;
+	}
+
+	cpuif->vgic_hcr &= ~ICH_HCR_UIE;
+}
+
+void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
+{
+	struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
+
+	cpuif->vgic_hcr |= ICH_HCR_UIE;
+}
+
+void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
+{
+	struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
+	u32 model = vcpu->kvm->arch.vgic.vgic_model;
+	int lr;
+
+	for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
+		u64 val = cpuif->vgic_lr[lr];
+		u32 intid;
+		struct vgic_irq *irq;
+
+		if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
+			intid = val & ICH_LR_VIRTUAL_ID_MASK;
+		else
+			intid = val & GICH_LR_VIRTUALID;
+		irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
+
+		spin_lock(&irq->irq_lock);
+
+		/* Always preserve the active bit */
+		irq->active = !!(val & ICH_LR_ACTIVE_BIT);
+
+		/* Edge is the only case where we preserve the pending bit */
+		if (irq->config == VGIC_CONFIG_EDGE &&
+		    (val & ICH_LR_PENDING_BIT)) {
+			irq->pending = true;
+
+			if (vgic_irq_is_sgi(intid) &&
+			    model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+				u32 cpuid = val & GICH_LR_PHYSID_CPUID;
+
+				cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
+				irq->source |= (1 << cpuid);
+			}
+		}
+
+		/*
+		 * Clear soft pending state when level irqs have been acked.
+		 * Always regenerate the pending state.
+		 */
+		if (irq->config == VGIC_CONFIG_LEVEL) {
+			if (!(val & ICH_LR_PENDING_BIT))
+				irq->soft_pending = false;
+
+			irq->pending = irq->line_level || irq->soft_pending;
+		}
+
+		spin_unlock(&irq->irq_lock);
+	}
+}
+
+/* Requires the irq to be locked already */
+void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
+{
+	u32 model = vcpu->kvm->arch.vgic.vgic_model;
+	u64 val = irq->intid;
+
+	if (irq->pending) {
+		val |= ICH_LR_PENDING_BIT;
+
+		if (irq->config == VGIC_CONFIG_EDGE)
+			irq->pending = false;
+
+		if (vgic_irq_is_sgi(irq->intid) &&
+		    model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+			u32 src = ffs(irq->source);
+
+			BUG_ON(!src);
+			val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
+			irq->source &= ~(1 << (src - 1));
+			if (irq->source)
+				irq->pending = true;
+		}
+	}
+
+	if (irq->active)
+		val |= ICH_LR_ACTIVE_BIT;
+
+	if (irq->hw) {
+		val |= ICH_LR_HW;
+		val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
+	} else {
+		if (irq->config == VGIC_CONFIG_LEVEL)
+			val |= ICH_LR_EOI;
+	}
+
+	/*
+	 * We currently only support Group1 interrupts, which is a
+	 * known defect. This needs to be addressed at some point.
+	 */
+	if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
+		val |= ICH_LR_GROUP;
+
+	val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
+
+	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
+}
+
+void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
+{
+	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
+}
+
+void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+	u32 vmcr;
+
+	vmcr  = (vmcrp->ctlr << ICH_VMCR_CTLR_SHIFT) & ICH_VMCR_CTLR_MASK;
+	vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
+	vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
+	vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
+
+	vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr;
+}
+
+void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+	u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr;
+
+	vmcrp->ctlr = (vmcr & ICH_VMCR_CTLR_MASK) >> ICH_VMCR_CTLR_SHIFT;
+	vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
+	vmcrp->bpr  = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
+	vmcrp->pmr  = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
+}
+
+void vgic_v3_enable(struct kvm_vcpu *vcpu)
+{
+	struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+	/*
+	 * By forcing VMCR to zero, the GIC will restore the binary
+	 * points to their reset values. Anything else resets to zero
+	 * anyway.
+	 */
+	vgic_v3->vgic_vmcr = 0;
+	vgic_v3->vgic_elrsr = ~0;
+
+	/*
+	 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
+	 * way, so we force SRE to 1 to demonstrate this to the guest.
+	 * This goes with the spec allowing the value to be RAO/WI.
+	 */
+	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+		vgic_v3->vgic_sre = ICC_SRE_EL1_SRE;
+	else
+		vgic_v3->vgic_sre = 0;
+
+	/* Get the show on the road... */
+	vgic_v3->vgic_hcr = ICH_HCR_EN;
+}
+
+/* check for overlapping regions and for regions crossing the end of memory */
+static bool vgic_v3_check_base(struct kvm *kvm)
+{
+	struct vgic_dist *d = &kvm->arch.vgic;
+	gpa_t redist_size = KVM_VGIC_V3_REDIST_SIZE;
+
+	redist_size *= atomic_read(&kvm->online_vcpus);
+
+	if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
+		return false;
+	if (d->vgic_redist_base + redist_size < d->vgic_redist_base)
+		return false;
+
+	if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE <= d->vgic_redist_base)
+		return true;
+	if (d->vgic_redist_base + redist_size <= d->vgic_dist_base)
+		return true;
+
+	return false;
+}
+
+int vgic_v3_map_resources(struct kvm *kvm)
+{
+	int ret = 0;
+	struct vgic_dist *dist = &kvm->arch.vgic;
+
+	if (vgic_ready(kvm))
+		goto out;
+
+	if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
+	    IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
+		kvm_err("Need to set vgic distributor addresses first\n");
+		ret = -ENXIO;
+		goto out;
+	}
+
+	if (!vgic_v3_check_base(kvm)) {
+		kvm_err("VGIC redist and dist frames overlap\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * For a VGICv3 we require the userland to explicitly initialize
+	 * the VGIC before we need to use it.
+	 */
+	if (!vgic_initialized(kvm)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
+	if (ret) {
+		kvm_err("Unable to register VGICv3 dist MMIO regions\n");
+		goto out;
+	}
+
+	ret = vgic_register_redist_iodevs(kvm, dist->vgic_redist_base);
+	if (ret) {
+		kvm_err("Unable to register VGICv3 redist MMIO regions\n");
+		goto out;
+	}
+
+	dist->ready = true;
+
+out:
+	if (ret)
+		kvm_vgic_destroy(kvm);
+	return ret;
+}
+
+/**
+ * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
+ * @node:	pointer to the DT node
+ *
+ * Returns 0 if a GICv3 has been found, returns an error code otherwise
+ */
+int vgic_v3_probe(const struct gic_kvm_info *info)
+{
+	u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
+
+	/*
+	 * The ListRegs field is 5 bits, but there is a architectural
+	 * maximum of 16 list registers. Just ignore bit 4...
+	 */
+	kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
+	kvm_vgic_global_state.can_emulate_gicv2 = false;
+
+	if (!info->vcpu.start) {
+		kvm_info("GICv3: no GICV resource entry\n");
+		kvm_vgic_global_state.vcpu_base = 0;
+	} else if (!PAGE_ALIGNED(info->vcpu.start)) {
+		pr_warn("GICV physical address 0x%llx not page aligned\n",
+			(unsigned long long)info->vcpu.start);
+		kvm_vgic_global_state.vcpu_base = 0;
+	} else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
+		pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+			(unsigned long long)resource_size(&info->vcpu),
+			PAGE_SIZE);
+		kvm_vgic_global_state.vcpu_base = 0;
+	} else {
+		kvm_vgic_global_state.vcpu_base = info->vcpu.start;
+		kvm_vgic_global_state.can_emulate_gicv2 = true;
+		kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
+		kvm_info("vgic-v2@%llx\n", info->vcpu.start);
+	}
+	if (kvm_vgic_global_state.vcpu_base == 0)
+		kvm_info("disabling GICv2 emulation\n");
+	kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
+
+	kvm_vgic_global_state.vctrl_base = NULL;
+	kvm_vgic_global_state.type = VGIC_V3;
+	kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
+
+	return 0;
+}
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
new file mode 100644
index 0000000..69b61ab
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -0,0 +1,619 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/list_sort.h>
+
+#include "vgic.h"
+
+#define CREATE_TRACE_POINTS
+#include "../trace.h"
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
+#else
+#define DEBUG_SPINLOCK_BUG_ON(p)
+#endif
+
+struct vgic_global __section(.hyp.text) kvm_vgic_global_state;
+
+/*
+ * Locking order is always:
+ *   vgic_cpu->ap_list_lock
+ *     vgic_irq->irq_lock
+ *
+ * (that is, always take the ap_list_lock before the struct vgic_irq lock).
+ *
+ * When taking more than one ap_list_lock at the same time, always take the
+ * lowest numbered VCPU's ap_list_lock first, so:
+ *   vcpuX->vcpu_id < vcpuY->vcpu_id:
+ *     spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
+ *     spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
+ */
+
+struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
+			      u32 intid)
+{
+	/* SGIs and PPIs */
+	if (intid <= VGIC_MAX_PRIVATE)
+		return &vcpu->arch.vgic_cpu.private_irqs[intid];
+
+	/* SPIs */
+	if (intid <= VGIC_MAX_SPI)
+		return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
+
+	/* LPIs are not yet covered */
+	if (intid >= VGIC_MIN_LPI)
+		return NULL;
+
+	WARN(1, "Looking up struct vgic_irq for reserved INTID");
+	return NULL;
+}
+
+/**
+ * kvm_vgic_target_oracle - compute the target vcpu for an irq
+ *
+ * @irq:	The irq to route. Must be already locked.
+ *
+ * Based on the current state of the interrupt (enabled, pending,
+ * active, vcpu and target_vcpu), compute the next vcpu this should be
+ * given to. Return NULL if this shouldn't be injected at all.
+ *
+ * Requires the IRQ lock to be held.
+ */
+static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
+{
+	DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+
+	/* If the interrupt is active, it must stay on the current vcpu */
+	if (irq->active)
+		return irq->vcpu ? : irq->target_vcpu;
+
+	/*
+	 * If the IRQ is not active but enabled and pending, we should direct
+	 * it to its configured target VCPU.
+	 * If the distributor is disabled, pending interrupts shouldn't be
+	 * forwarded.
+	 */
+	if (irq->enabled && irq->pending) {
+		if (unlikely(irq->target_vcpu &&
+			     !irq->target_vcpu->kvm->arch.vgic.enabled))
+			return NULL;
+
+		return irq->target_vcpu;
+	}
+
+	/* If neither active nor pending and enabled, then this IRQ should not
+	 * be queued to any VCPU.
+	 */
+	return NULL;
+}
+
+/*
+ * The order of items in the ap_lists defines how we'll pack things in LRs as
+ * well, the first items in the list being the first things populated in the
+ * LRs.
+ *
+ * A hard rule is that active interrupts can never be pushed out of the LRs
+ * (and therefore take priority) since we cannot reliably trap on deactivation
+ * of IRQs and therefore they have to be present in the LRs.
+ *
+ * Otherwise things should be sorted by the priority field and the GIC
+ * hardware support will take care of preemption of priority groups etc.
+ *
+ * Return negative if "a" sorts before "b", 0 to preserve order, and positive
+ * to sort "b" before "a".
+ */
+static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+	struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
+	struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
+	bool penda, pendb;
+	int ret;
+
+	spin_lock(&irqa->irq_lock);
+	spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
+
+	if (irqa->active || irqb->active) {
+		ret = (int)irqb->active - (int)irqa->active;
+		goto out;
+	}
+
+	penda = irqa->enabled && irqa->pending;
+	pendb = irqb->enabled && irqb->pending;
+
+	if (!penda || !pendb) {
+		ret = (int)pendb - (int)penda;
+		goto out;
+	}
+
+	/* Both pending and enabled, sort by priority */
+	ret = irqa->priority - irqb->priority;
+out:
+	spin_unlock(&irqb->irq_lock);
+	spin_unlock(&irqa->irq_lock);
+	return ret;
+}
+
+/* Must be called with the ap_list_lock held */
+static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
+{
+	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+
+	DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+
+	list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
+}
+
+/*
+ * Only valid injection if changing level for level-triggered IRQs or for a
+ * rising edge.
+ */
+static bool vgic_validate_injection(struct vgic_irq *irq, bool level)
+{
+	switch (irq->config) {
+	case VGIC_CONFIG_LEVEL:
+		return irq->line_level != level;
+	case VGIC_CONFIG_EDGE:
+		return level;
+	}
+
+	return false;
+}
+
+/*
+ * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
+ * Do the queuing if necessary, taking the right locks in the right order.
+ * Returns true when the IRQ was queued, false otherwise.
+ *
+ * Needs to be entered with the IRQ lock already held, but will return
+ * with all locks dropped.
+ */
+bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq)
+{
+	struct kvm_vcpu *vcpu;
+
+	DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+
+retry:
+	vcpu = vgic_target_oracle(irq);
+	if (irq->vcpu || !vcpu) {
+		/*
+		 * If this IRQ is already on a VCPU's ap_list, then it
+		 * cannot be moved or modified and there is no more work for
+		 * us to do.
+		 *
+		 * Otherwise, if the irq is not pending and enabled, it does
+		 * not need to be inserted into an ap_list and there is also
+		 * no more work for us to do.
+		 */
+		spin_unlock(&irq->irq_lock);
+		return false;
+	}
+
+	/*
+	 * We must unlock the irq lock to take the ap_list_lock where
+	 * we are going to insert this new pending interrupt.
+	 */
+	spin_unlock(&irq->irq_lock);
+
+	/* someone can do stuff here, which we re-check below */
+
+	spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+	spin_lock(&irq->irq_lock);
+
+	/*
+	 * Did something change behind our backs?
+	 *
+	 * There are two cases:
+	 * 1) The irq lost its pending state or was disabled behind our
+	 *    backs and/or it was queued to another VCPU's ap_list.
+	 * 2) Someone changed the affinity on this irq behind our
+	 *    backs and we are now holding the wrong ap_list_lock.
+	 *
+	 * In both cases, drop the locks and retry.
+	 */
+
+	if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
+		spin_unlock(&irq->irq_lock);
+		spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+
+		spin_lock(&irq->irq_lock);
+		goto retry;
+	}
+
+	list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
+	irq->vcpu = vcpu;
+
+	spin_unlock(&irq->irq_lock);
+	spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+
+	kvm_vcpu_kick(vcpu);
+
+	return true;
+}
+
+static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
+				   unsigned int intid, bool level,
+				   bool mapped_irq)
+{
+	struct kvm_vcpu *vcpu;
+	struct vgic_irq *irq;
+	int ret;
+
+	trace_vgic_update_irq_pending(cpuid, intid, level);
+
+	ret = vgic_lazy_init(kvm);
+	if (ret)
+		return ret;
+
+	vcpu = kvm_get_vcpu(kvm, cpuid);
+	if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
+		return -EINVAL;
+
+	irq = vgic_get_irq(kvm, vcpu, intid);
+	if (!irq)
+		return -EINVAL;
+
+	if (irq->hw != mapped_irq)
+		return -EINVAL;
+
+	spin_lock(&irq->irq_lock);
+
+	if (!vgic_validate_injection(irq, level)) {
+		/* Nothing to see here, move along... */
+		spin_unlock(&irq->irq_lock);
+		return 0;
+	}
+
+	if (irq->config == VGIC_CONFIG_LEVEL) {
+		irq->line_level = level;
+		irq->pending = level || irq->soft_pending;
+	} else {
+		irq->pending = true;
+	}
+
+	vgic_queue_irq_unlock(kvm, irq);
+
+	return 0;
+}
+
+/**
+ * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
+ * @kvm:     The VM structure pointer
+ * @cpuid:   The CPU for PPIs
+ * @intid:   The INTID to inject a new state to.
+ * @level:   Edge-triggered:  true:  to trigger the interrupt
+ *			      false: to ignore the call
+ *	     Level-sensitive  true:  raise the input signal
+ *			      false: lower the input signal
+ *
+ * The VGIC is not concerned with devices being active-LOW or active-HIGH for
+ * level-sensitive interrupts.  You can think of the level parameter as 1
+ * being HIGH and 0 being LOW and all devices being active-HIGH.
+ */
+int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
+			bool level)
+{
+	return vgic_update_irq_pending(kvm, cpuid, intid, level, false);
+}
+
+int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, unsigned int intid,
+			       bool level)
+{
+	return vgic_update_irq_pending(kvm, cpuid, intid, level, true);
+}
+
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
+{
+	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+
+	BUG_ON(!irq);
+
+	spin_lock(&irq->irq_lock);
+
+	irq->hw = true;
+	irq->hwintid = phys_irq;
+
+	spin_unlock(&irq->irq_lock);
+
+	return 0;
+}
+
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
+{
+	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+
+	BUG_ON(!irq);
+
+	if (!vgic_initialized(vcpu->kvm))
+		return -EAGAIN;
+
+	spin_lock(&irq->irq_lock);
+
+	irq->hw = false;
+	irq->hwintid = 0;
+
+	spin_unlock(&irq->irq_lock);
+
+	return 0;
+}
+
+/**
+ * vgic_prune_ap_list - Remove non-relevant interrupts from the list
+ *
+ * @vcpu: The VCPU pointer
+ *
+ * Go over the list of "interesting" interrupts, and prune those that we
+ * won't have to consider in the near future.
+ */
+static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
+{
+	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+	struct vgic_irq *irq, *tmp;
+
+retry:
+	spin_lock(&vgic_cpu->ap_list_lock);
+
+	list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
+		struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
+
+		spin_lock(&irq->irq_lock);
+
+		BUG_ON(vcpu != irq->vcpu);
+
+		target_vcpu = vgic_target_oracle(irq);
+
+		if (!target_vcpu) {
+			/*
+			 * We don't need to process this interrupt any
+			 * further, move it off the list.
+			 */
+			list_del(&irq->ap_list);
+			irq->vcpu = NULL;
+			spin_unlock(&irq->irq_lock);
+			continue;
+		}
+
+		if (target_vcpu == vcpu) {
+			/* We're on the right CPU */
+			spin_unlock(&irq->irq_lock);
+			continue;
+		}
+
+		/* This interrupt looks like it has to be migrated. */
+
+		spin_unlock(&irq->irq_lock);
+		spin_unlock(&vgic_cpu->ap_list_lock);
+
+		/*
+		 * Ensure locking order by always locking the smallest
+		 * ID first.
+		 */
+		if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
+			vcpuA = vcpu;
+			vcpuB = target_vcpu;
+		} else {
+			vcpuA = target_vcpu;
+			vcpuB = vcpu;
+		}
+
+		spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
+		spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
+				 SINGLE_DEPTH_NESTING);
+		spin_lock(&irq->irq_lock);
+
+		/*
+		 * If the affinity has been preserved, move the
+		 * interrupt around. Otherwise, it means things have
+		 * changed while the interrupt was unlocked, and we
+		 * need to replay this.
+		 *
+		 * In all cases, we cannot trust the list not to have
+		 * changed, so we restart from the beginning.
+		 */
+		if (target_vcpu == vgic_target_oracle(irq)) {
+			struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
+
+			list_del(&irq->ap_list);
+			irq->vcpu = target_vcpu;
+			list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
+		}
+
+		spin_unlock(&irq->irq_lock);
+		spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
+		spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
+		goto retry;
+	}
+
+	spin_unlock(&vgic_cpu->ap_list_lock);
+}
+
+static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu *vcpu)
+{
+	if (kvm_vgic_global_state.type == VGIC_V2)
+		vgic_v2_process_maintenance(vcpu);
+	else
+		vgic_v3_process_maintenance(vcpu);
+}
+
+static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
+{
+	if (kvm_vgic_global_state.type == VGIC_V2)
+		vgic_v2_fold_lr_state(vcpu);
+	else
+		vgic_v3_fold_lr_state(vcpu);
+}
+
+/* Requires the irq_lock to be held. */
+static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
+				    struct vgic_irq *irq, int lr)
+{
+	DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+
+	if (kvm_vgic_global_state.type == VGIC_V2)
+		vgic_v2_populate_lr(vcpu, irq, lr);
+	else
+		vgic_v3_populate_lr(vcpu, irq, lr);
+}
+
+static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
+{
+	if (kvm_vgic_global_state.type == VGIC_V2)
+		vgic_v2_clear_lr(vcpu, lr);
+	else
+		vgic_v3_clear_lr(vcpu, lr);
+}
+
+static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
+{
+	if (kvm_vgic_global_state.type == VGIC_V2)
+		vgic_v2_set_underflow(vcpu);
+	else
+		vgic_v3_set_underflow(vcpu);
+}
+
+/* Requires the ap_list_lock to be held. */
+static int compute_ap_list_depth(struct kvm_vcpu *vcpu)
+{
+	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+	struct vgic_irq *irq;
+	int count = 0;
+
+	DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+
+	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
+		spin_lock(&irq->irq_lock);
+		/* GICv2 SGIs can count for more than one... */
+		if (vgic_irq_is_sgi(irq->intid) && irq->source)
+			count += hweight8(irq->source);
+		else
+			count++;
+		spin_unlock(&irq->irq_lock);
+	}
+	return count;
+}
+
+/* Requires the VCPU's ap_list_lock to be held. */
+static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
+{
+	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+	struct vgic_irq *irq;
+	int count = 0;
+
+	DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+
+	if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) {
+		vgic_set_underflow(vcpu);
+		vgic_sort_ap_list(vcpu);
+	}
+
+	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
+		spin_lock(&irq->irq_lock);
+
+		if (unlikely(vgic_target_oracle(irq) != vcpu))
+			goto next;
+
+		/*
+		 * If we get an SGI with multiple sources, try to get
+		 * them in all at once.
+		 */
+		do {
+			vgic_populate_lr(vcpu, irq, count++);
+		} while (irq->source && count < kvm_vgic_global_state.nr_lr);
+
+next:
+		spin_unlock(&irq->irq_lock);
+
+		if (count == kvm_vgic_global_state.nr_lr)
+			break;
+	}
+
+	vcpu->arch.vgic_cpu.used_lrs = count;
+
+	/* Nuke remaining LRs */
+	for ( ; count < kvm_vgic_global_state.nr_lr; count++)
+		vgic_clear_lr(vcpu, count);
+}
+
+/* Sync back the hardware VGIC state into our emulation after a guest's run. */
+void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
+{
+	vgic_process_maintenance_interrupt(vcpu);
+	vgic_fold_lr_state(vcpu);
+	vgic_prune_ap_list(vcpu);
+}
+
+/* Flush our emulation state into the GIC hardware before entering the guest. */
+void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
+{
+	spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+	vgic_flush_lr_state(vcpu);
+	spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+}
+
+int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
+{
+	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+	struct vgic_irq *irq;
+	bool pending = false;
+
+	if (!vcpu->kvm->arch.vgic.enabled)
+		return false;
+
+	spin_lock(&vgic_cpu->ap_list_lock);
+
+	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
+		spin_lock(&irq->irq_lock);
+		pending = irq->pending && irq->enabled;
+		spin_unlock(&irq->irq_lock);
+
+		if (pending)
+			break;
+	}
+
+	spin_unlock(&vgic_cpu->ap_list_lock);
+
+	return pending;
+}
+
+void vgic_kick_vcpus(struct kvm *kvm)
+{
+	struct kvm_vcpu *vcpu;
+	int c;
+
+	/*
+	 * We've injected an interrupt, time to find out who deserves
+	 * a good kick...
+	 */
+	kvm_for_each_vcpu(c, vcpu, kvm) {
+		if (kvm_vgic_vcpu_pending_irq(vcpu))
+			kvm_vcpu_kick(vcpu);
+	}
+}
+
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
+{
+	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+	bool map_is_active;
+
+	spin_lock(&irq->irq_lock);
+	map_is_active = irq->hw && irq->active;
+	spin_unlock(&irq->irq_lock);
+
+	return map_is_active;
+}
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
new file mode 100644
index 0000000..7b300ca
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __KVM_ARM_VGIC_NEW_H__
+#define __KVM_ARM_VGIC_NEW_H__
+
+#include <linux/irqchip/arm-gic-common.h>
+
+#define PRODUCT_ID_KVM		0x4b	/* ASCII code K */
+#define IMPLEMENTER_ARM		0x43b
+
+#define VGIC_ADDR_UNDEF		(-1)
+#define IS_VGIC_ADDR_UNDEF(_x)  ((_x) == VGIC_ADDR_UNDEF)
+
+#define INTERRUPT_ID_BITS_SPIS	10
+#define VGIC_PRI_BITS		5
+
+#define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)
+
+struct vgic_vmcr {
+	u32	ctlr;
+	u32	abpr;
+	u32	bpr;
+	u32	pmr;
+};
+
+struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
+			      u32 intid);
+bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq);
+void vgic_kick_vcpus(struct kvm *kvm);
+
+void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu);
+void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
+void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
+void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
+void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
+int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
+int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+			 int offset, u32 *val);
+int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+			  int offset, u32 *val);
+void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_v2_enable(struct kvm_vcpu *vcpu);
+int vgic_v2_probe(const struct gic_kvm_info *info);
+int vgic_v2_map_resources(struct kvm *kvm);
+int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
+			     enum vgic_type);
+
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu);
+void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
+void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
+void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
+void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
+void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_v3_enable(struct kvm_vcpu *vcpu);
+int vgic_v3_probe(const struct gic_kvm_info *info);
+int vgic_v3_map_resources(struct kvm *kvm);
+int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address);
+#else
+static inline void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline void vgic_v3_populate_lr(struct kvm_vcpu *vcpu,
+				       struct vgic_irq *irq, int lr)
+{
+}
+
+static inline void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
+{
+}
+
+static inline void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline
+void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+{
+}
+
+static inline
+void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+{
+}
+
+static inline void vgic_v3_enable(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline int vgic_v3_probe(const struct gic_kvm_info *info)
+{
+	return -ENODEV;
+}
+
+static inline int vgic_v3_map_resources(struct kvm *kvm)
+{
+	return -ENODEV;
+}
+
+static inline int vgic_register_redist_iodevs(struct kvm *kvm,
+					      gpa_t dist_base_address)
+{
+	return -ENODEV;
+}
+#endif
+
+void kvm_register_vgic_device(unsigned long type);
+int vgic_lazy_init(struct kvm *kvm);
+int vgic_init(struct kvm *kvm);
+
+#endif
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
index fe84e1a..8db197b 100644
--- a/virt/kvm/irqchip.c
+++ b/virt/kvm/irqchip.c
@@ -40,7 +40,7 @@
 
 	irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
 					lockdep_is_held(&kvm->irq_lock));
-	if (gsi < irq_rt->nr_rt_entries) {
+	if (irq_rt && gsi < irq_rt->nr_rt_entries) {
 		hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
 			entries[n] = *e;
 			++n;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index dd4ac9d..02e98f3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -63,6 +63,9 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/kvm.h>
 
+/* Worst case buffer size needed for holding an integer. */
+#define ITOA_MAX_LEN 12
+
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
 
@@ -100,6 +103,9 @@
 struct dentry *kvm_debugfs_dir;
 EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
 
+static int kvm_debugfs_num_entries;
+static const struct file_operations *stat_fops_per_vm[];
+
 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
 			   unsigned long arg);
 #ifdef CONFIG_KVM_COMPAT
@@ -542,6 +548,58 @@
 	kvfree(slots);
 }
 
+static void kvm_destroy_vm_debugfs(struct kvm *kvm)
+{
+	int i;
+
+	if (!kvm->debugfs_dentry)
+		return;
+
+	debugfs_remove_recursive(kvm->debugfs_dentry);
+
+	for (i = 0; i < kvm_debugfs_num_entries; i++)
+		kfree(kvm->debugfs_stat_data[i]);
+	kfree(kvm->debugfs_stat_data);
+}
+
+static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
+{
+	char dir_name[ITOA_MAX_LEN * 2];
+	struct kvm_stat_data *stat_data;
+	struct kvm_stats_debugfs_item *p;
+
+	if (!debugfs_initialized())
+		return 0;
+
+	snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
+	kvm->debugfs_dentry = debugfs_create_dir(dir_name,
+						 kvm_debugfs_dir);
+	if (!kvm->debugfs_dentry)
+		return -ENOMEM;
+
+	kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
+					 sizeof(*kvm->debugfs_stat_data),
+					 GFP_KERNEL);
+	if (!kvm->debugfs_stat_data)
+		return -ENOMEM;
+
+	for (p = debugfs_entries; p->name; p++) {
+		stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL);
+		if (!stat_data)
+			return -ENOMEM;
+
+		stat_data->kvm = kvm;
+		stat_data->offset = p->offset;
+		kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
+		if (!debugfs_create_file(p->name, 0444,
+					 kvm->debugfs_dentry,
+					 stat_data,
+					 stat_fops_per_vm[p->kind]))
+			return -ENOMEM;
+	}
+	return 0;
+}
+
 static struct kvm *kvm_create_vm(unsigned long type)
 {
 	int r, i;
@@ -647,6 +705,7 @@
 	int i;
 	struct mm_struct *mm = kvm->mm;
 
+	kvm_destroy_vm_debugfs(kvm);
 	kvm_arch_sync_events(kvm);
 	spin_lock(&kvm_lock);
 	list_del(&kvm->vm_list);
@@ -2876,7 +2935,7 @@
 	case KVM_SET_GSI_ROUTING: {
 		struct kvm_irq_routing routing;
 		struct kvm_irq_routing __user *urouting;
-		struct kvm_irq_routing_entry *entries;
+		struct kvm_irq_routing_entry *entries = NULL;
 
 		r = -EFAULT;
 		if (copy_from_user(&routing, argp, sizeof(routing)))
@@ -2886,15 +2945,17 @@
 			goto out;
 		if (routing.flags)
 			goto out;
-		r = -ENOMEM;
-		entries = vmalloc(routing.nr * sizeof(*entries));
-		if (!entries)
-			goto out;
-		r = -EFAULT;
-		urouting = argp;
-		if (copy_from_user(entries, urouting->entries,
-				   routing.nr * sizeof(*entries)))
-			goto out_free_irq_routing;
+		if (routing.nr) {
+			r = -ENOMEM;
+			entries = vmalloc(routing.nr * sizeof(*entries));
+			if (!entries)
+				goto out;
+			r = -EFAULT;
+			urouting = argp;
+			if (copy_from_user(entries, urouting->entries,
+					   routing.nr * sizeof(*entries)))
+				goto out_free_irq_routing;
+		}
 		r = kvm_set_irq_routing(kvm, entries, routing.nr,
 					routing.flags);
 out_free_irq_routing:
@@ -2999,8 +3060,15 @@
 	}
 #endif
 	r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC);
-	if (r < 0)
+	if (r < 0) {
 		kvm_put_kvm(kvm);
+		return r;
+	}
+
+	if (kvm_create_vm_debugfs(kvm, r) < 0) {
+		kvm_put_kvm(kvm);
+		return -ENOMEM;
+	}
 
 	return r;
 }
@@ -3425,15 +3493,114 @@
 	.notifier_call = kvm_cpu_hotplug,
 };
 
+static int kvm_debugfs_open(struct inode *inode, struct file *file,
+			   int (*get)(void *, u64 *), int (*set)(void *, u64),
+			   const char *fmt)
+{
+	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
+					  inode->i_private;
+
+	/* The debugfs files are a reference to the kvm struct which
+	 * is still valid when kvm_destroy_vm is called.
+	 * To avoid the race between open and the removal of the debugfs
+	 * directory we test against the users count.
+	 */
+	if (!atomic_add_unless(&stat_data->kvm->users_count, 1, 0))
+		return -ENOENT;
+
+	if (simple_attr_open(inode, file, get, set, fmt)) {
+		kvm_put_kvm(stat_data->kvm);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int kvm_debugfs_release(struct inode *inode, struct file *file)
+{
+	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
+					  inode->i_private;
+
+	simple_attr_release(inode, file);
+	kvm_put_kvm(stat_data->kvm);
+
+	return 0;
+}
+
+static int vm_stat_get_per_vm(void *data, u64 *val)
+{
+	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+
+	*val = *(u32 *)((void *)stat_data->kvm + stat_data->offset);
+
+	return 0;
+}
+
+static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file)
+{
+	__simple_attr_check_format("%llu\n", 0ull);
+	return kvm_debugfs_open(inode, file, vm_stat_get_per_vm,
+				NULL, "%llu\n");
+}
+
+static const struct file_operations vm_stat_get_per_vm_fops = {
+	.owner   = THIS_MODULE,
+	.open    = vm_stat_get_per_vm_open,
+	.release = kvm_debugfs_release,
+	.read    = simple_attr_read,
+	.write   = simple_attr_write,
+	.llseek  = generic_file_llseek,
+};
+
+static int vcpu_stat_get_per_vm(void *data, u64 *val)
+{
+	int i;
+	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+	struct kvm_vcpu *vcpu;
+
+	*val = 0;
+
+	kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
+		*val += *(u32 *)((void *)vcpu + stat_data->offset);
+
+	return 0;
+}
+
+static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
+{
+	__simple_attr_check_format("%llu\n", 0ull);
+	return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm,
+				 NULL, "%llu\n");
+}
+
+static const struct file_operations vcpu_stat_get_per_vm_fops = {
+	.owner   = THIS_MODULE,
+	.open    = vcpu_stat_get_per_vm_open,
+	.release = kvm_debugfs_release,
+	.read    = simple_attr_read,
+	.write   = simple_attr_write,
+	.llseek  = generic_file_llseek,
+};
+
+static const struct file_operations *stat_fops_per_vm[] = {
+	[KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops,
+	[KVM_STAT_VM]   = &vm_stat_get_per_vm_fops,
+};
+
 static int vm_stat_get(void *_offset, u64 *val)
 {
 	unsigned offset = (long)_offset;
 	struct kvm *kvm;
+	struct kvm_stat_data stat_tmp = {.offset = offset};
+	u64 tmp_val;
 
 	*val = 0;
 	spin_lock(&kvm_lock);
-	list_for_each_entry(kvm, &vm_list, vm_list)
-		*val += *(u32 *)((void *)kvm + offset);
+	list_for_each_entry(kvm, &vm_list, vm_list) {
+		stat_tmp.kvm = kvm;
+		vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
+		*val += tmp_val;
+	}
 	spin_unlock(&kvm_lock);
 	return 0;
 }
@@ -3444,15 +3611,16 @@
 {
 	unsigned offset = (long)_offset;
 	struct kvm *kvm;
-	struct kvm_vcpu *vcpu;
-	int i;
+	struct kvm_stat_data stat_tmp = {.offset = offset};
+	u64 tmp_val;
 
 	*val = 0;
 	spin_lock(&kvm_lock);
-	list_for_each_entry(kvm, &vm_list, vm_list)
-		kvm_for_each_vcpu(i, vcpu, kvm)
-			*val += *(u32 *)((void *)vcpu + offset);
-
+	list_for_each_entry(kvm, &vm_list, vm_list) {
+		stat_tmp.kvm = kvm;
+		vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
+		*val += tmp_val;
+	}
 	spin_unlock(&kvm_lock);
 	return 0;
 }
@@ -3473,7 +3641,8 @@
 	if (kvm_debugfs_dir == NULL)
 		goto out;
 
-	for (p = debugfs_entries; p->name; ++p) {
+	kvm_debugfs_num_entries = 0;
+	for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
 		if (!debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
 					 (void *)(long)p->offset,
 					 stat_fops[p->kind]))